summaryrefslogtreecommitdiffstats
path: root/arch/sh
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1999-10-09 00:00:47 +0000
committerRalf Baechle <ralf@linux-mips.org>1999-10-09 00:00:47 +0000
commitd6434e1042f3b0a6dfe1b1f615af369486f9b1fa (patch)
treee2be02f33984c48ec019c654051d27964e42c441 /arch/sh
parent609d1e803baf519487233b765eb487f9ec227a18 (diff)
Merge with 2.3.19.
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/Makefile82
-rw-r--r--arch/sh/boot/Makefile41
-rw-r--r--arch/sh/config.in78
-rw-r--r--arch/sh/defconfig101
-rw-r--r--arch/sh/kernel/Makefile27
-rw-r--r--arch/sh/kernel/entry.S683
-rw-r--r--arch/sh/kernel/head.S69
-rw-r--r--arch/sh/kernel/init_task.c23
-rw-r--r--arch/sh/kernel/irq.c485
-rw-r--r--arch/sh/kernel/irq_onchip.c168
-rw-r--r--arch/sh/kernel/process.c303
-rw-r--r--arch/sh/kernel/ptrace.c476
-rw-r--r--arch/sh/kernel/semaphore.c133
-rw-r--r--arch/sh/kernel/setup.c188
-rw-r--r--arch/sh/kernel/sh_ksyms.c48
-rw-r--r--arch/sh/kernel/signal.c597
-rw-r--r--arch/sh/kernel/sys_sh.c249
-rw-r--r--arch/sh/kernel/test-img.c69
-rw-r--r--arch/sh/kernel/time.c224
-rw-r--r--arch/sh/kernel/traps.c127
-rw-r--r--arch/sh/lib/Makefile14
-rw-r--r--arch/sh/lib/checksum.c170
-rw-r--r--arch/sh/lib/csum_partial_copy.c75
-rw-r--r--arch/sh/lib/delay.c21
-rw-r--r--arch/sh/lib/memcpy.S131
-rw-r--r--arch/sh/lib/memmove.S422
-rw-r--r--arch/sh/lib/memset.S72
-rw-r--r--arch/sh/lib/old-checksum.c19
-rw-r--r--arch/sh/lib/wordcopy.S1289
-rw-r--r--arch/sh/mm/Makefile13
-rw-r--r--arch/sh/mm/extable.c57
-rw-r--r--arch/sh/mm/fault.c326
-rw-r--r--arch/sh/mm/init.c294
-rw-r--r--arch/sh/mm/ioremap.c140
-rw-r--r--arch/sh/vmlinux.lds.S114
35 files changed, 7328 insertions, 0 deletions
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
new file mode 100644
index 000000000..c1c6f1e29
--- /dev/null
+++ b/arch/sh/Makefile
@@ -0,0 +1,82 @@
+# $Id$
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies. Remember to do have actions
+# for "archclean" and "archdep" for cleaning up and making dependencies for
+# this architecture
+#
+
+#
+# Select the object file format to substitute into the linker script.
+#
+tool-prefix = sh-gniibe-
+oformat = elf
+
+ifdef CONFIG_CROSSCOMPILE
+CROSS_COMPILE = $(tool-prefix)
+endif
+
+LINKFLAGS = # -EL # -static #-N
+MODFLAGS +=
+
+#
+#
+CFLAGS += -m3 # -ml
+LINKFLAGS +=
+LDFLAGS += # -EL
+
+#
+#
+HOSTCC = cc
+
+#
+# Choosing incompatible machines durings configuration will result in
+# error messages during linking. Select a default linkscript if
+# none has been choosen above.
+#
+LINKSCRIPT = arch/sh/vmlinux.lds
+LINKFLAGS += -T $(word 1,$(LINKSCRIPT)) -e __stext
+
+ifdef LOADADDR
+LINKFLAGS += -Ttext $(word 1,$(LOADADDR))
+endif
+
+#
+CFLAGS += -pipe
+
+HEAD := arch/sh/kernel/head.o arch/sh/kernel/init_task.o
+
+SUBDIRS := $(SUBDIRS) $(addprefix arch/sh/, kernel mm lib)
+CORE_FILES := arch/sh/kernel/kernel.o arch/sh/mm/mm.o $(CORE_FILES)
+LIBS := $(TOPDIR)/arch/sh/lib/lib.a $(LIBS) $(TOPDIR)/arch/sh/lib/lib.a /home/niibe/lib/gcc-lib/sh-gniibe-elf/egcs-2.91.66/libgcc.a
+
+MAKEBOOT = $(MAKE) -C arch/$(ARCH)/boot
+
+vmlinux: arch/sh/vmlinux.lds
+
+arch/sh/vmlinux.lds: arch/sh/vmlinux.lds.S FORCE
+ gcc -E -C -P -I$(HPATH) -imacros $(HPATH)/linux/config.h -Ush arch/sh/vmlinux.lds.S >arch/sh/vmlinux.lds
+
+FORCE: ;
+
+zImage: vmlinux
+ @$(MAKEBOOT) zImage
+
+compressed: zImage
+
+zdisk: vmlinux
+ @$(MAKEBOOT) zdisk
+
+archclean:
+ @$(MAKEBOOT) clean
+ $(MAKE) -C arch/$(ARCH)/kernel clean
+# $(MAKE) -C arch/$(ARCH)/tools clean
+
+archmrproper:
+
+archdep:
+ @$(MAKEBOOT) dep
diff --git a/arch/sh/boot/Makefile b/arch/sh/boot/Makefile
new file mode 100644
index 000000000..e2ae36bde
--- /dev/null
+++ b/arch/sh/boot/Makefile
@@ -0,0 +1,41 @@
+#
+# arch/mips/boot/Makefile
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+
+.S.s:
+ $(CPP) $(CFLAGS) $< -o $*.s
+.S.o:
+ $(CC) $(CFLAGS) -c $< -o $*.o
+
+OBJS =
+
+#
+# Drop some uninteresting sections in the kernel.
+#
+drop-sections = .reginfo .mdebug
+strip-flags = $(addprefix --remove-section=,$(drop-sections))
+
+#
+# Fake compressed boot
+#
+zImage: $(CONFIGURE) mkboot $(TOPDIR)/vmlinux
+ $(OBJCOPY) $(strip-flags) $(TOPDIR)/vmlinux zImage.tmp
+ ./mkboot zImage.tmp zImage
+ rm -f zImage.tmp
+
+mkboot: mkboot.c
+ $(HOSTCC) -o $@ $^
+
+# Don't build dependencies, this may die if $(CC) isn't gcc
+dep:
+
+clean:
+ rm -f zImage zImage.tmp mkboot
+
+dummy:
+
+include $(TOPDIR)/Rules.make
diff --git a/arch/sh/config.in b/arch/sh/config.in
new file mode 100644
index 000000000..043af8304
--- /dev/null
+++ b/arch/sh/config.in
@@ -0,0 +1,78 @@
+#
+# For a description of the syntax of this configuration file,
+# see the Configure script.
+#
+mainmenu_name "Linux/SuperH Kernel Configuration"
+
+mainmenu_option next_comment
+comment 'Code maturity level options'
+bool 'Prompt for development and/or incomplete code/drivers' CONFIG_EXPERIMENTAL
+endmenu
+
+mainmenu_option next_comment
+comment 'Processor type and features'
+choice 'Processor family' \
+ "SH3 CONFIG_CPU_SH3 \
+ SH4 CONFIG_CPU_SH4" SH3
+bool 'Little Endian' CONFIG_LITTLE_ENDIAN
+hex 'Physical memory start address' CONFIG_MEMORY_START 0c000000
+endmenu
+
+mainmenu_option next_comment
+comment 'Loadable module support'
+bool 'Enable loadable module support' CONFIG_MODULES
+if [ "$CONFIG_MODULES" = "y" ]; then
+ bool 'Set version information on all symbols for modules' CONFIG_MODVERSIONS
+ bool 'Kernel module loader' CONFIG_KMOD
+fi
+endmenu
+
+define_bool CONFIG_SERIAL n
+define_bool CONFIG_SH3SCI_SERIAL y
+define_bool CONFIG_SERIAL_CONSOLE y
+
+mainmenu_option next_comment
+comment 'Floppy, IDE, and other block devices'
+
+bool 'Networking support' CONFIG_NET
+bool 'System V IPC' CONFIG_SYSVIPC
+bool 'BSD Process Accounting' CONFIG_BSD_PROCESS_ACCT
+bool 'Sysctl support' CONFIG_SYSCTL
+
+tristate 'Kernel support for ELF binaries' CONFIG_BINFMT_ELF
+tristate 'Kernel support for MISC binaries' CONFIG_BINFMT_MISC
+
+tristate 'RAM disk support' CONFIG_BLK_DEV_RAM
+if [ "$CONFIG_BLK_DEV_RAM" = "y" ]; then
+ bool ' Initial RAM disk (initrd) support' CONFIG_BLK_DEV_INITRD
+fi
+
+tristate 'Loopback device support' CONFIG_BLK_DEV_LOOP
+tristate 'Network block device support' CONFIG_BLK_DEV_NBD
+endmenu
+
+if [ "$CONFIG_NET" = "y" ]; then
+ source net/Config.in
+fi
+
+mainmenu_option next_comment
+comment 'Unix 98 PTY support'
+bool 'Unix98 PTY support' CONFIG_UNIX98_PTYS
+if [ "$CONFIG_UNIX98_PTYS" = "y" ]; then
+ int 'Maximum number of Unix98 PTYs in use (0-2048)' CONFIG_UNIX98_PTY_COUNT 256
+fi
+endmenu
+
+source fs/Config.in
+
+mainmenu_option next_comment
+comment 'Watchdog'
+
+tristate 'Software watchdog' CONFIG_SOFT_WATCHDOG
+endmenu
+
+mainmenu_option next_comment
+comment 'Kernel hacking'
+
+bool 'Magic SysRq key' CONFIG_MAGIC_SYSRQ
+endmenu
diff --git a/arch/sh/defconfig b/arch/sh/defconfig
new file mode 100644
index 000000000..bd830d4a9
--- /dev/null
+++ b/arch/sh/defconfig
@@ -0,0 +1,101 @@
+#
+# Automatically generated make config: don't edit
+#
+
+#
+# Code maturity level options
+#
+# CONFIG_EXPERIMENTAL is not set
+
+#
+# Processor type and features
+#
+CONFIG_CPU_SH3=y
+# CONFIG_CPU_SH4 is not set
+# CONFIG_LITTLE_ENDIAN is not set
+CONFIG_MEMORY_START=0c000000
+
+#
+# Loadable module support
+#
+# CONFIG_MODULES is not set
+# CONFIG_SERIAL is not set
+CONFIG_SH3SCI_SERIAL=y
+CONFIG_SERIAL_CONSOLE=y
+
+#
+# Floppy, IDE, and other block devices
+#
+# CONFIG_NET is not set
+CONFIG_SYSVIPC=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_SYSCTL is not set
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+
+#
+# Networking options
+#
+# CONFIG_PACKET is not set
+# CONFIG_NETLINK is not set
+# CONFIG_FIREWALL is not set
+# CONFIG_FILTER is not set
+# CONFIG_UNIX is not set
+# CONFIG_INET is not set
+
+#
+#
+#
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+
+#
+# Unix 98 PTY support
+#
+# CONFIG_UNIX98_PTYS is not set
+
+#
+# Filesystems
+#
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_FAT_FS is not set
+# CONFIG_ISO9660_FS is not set
+# CONFIG_JOLIET is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_NTFS_FS is not set
+# CONFIG_HPFS_FS is not set
+CONFIG_PROC_FS=y
+# CONFIG_ROMFS_FS is not set
+CONFIG_EXT2_FS=y
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_SMD_DISKLABEL is not set
+# CONFIG_SGI_DISKLABEL is not set
+# CONFIG_NLS is not set
+
+#
+# Watchdog
+#
+# CONFIG_SOFT_WATCHDOG is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_MAGIC_SYSRQ is not set
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
new file mode 100644
index 000000000..0a2abf858
--- /dev/null
+++ b/arch/sh/kernel/Makefile
@@ -0,0 +1,27 @@
+#
+# Makefile for the Linux/SuperH kernel.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+
+.S.o:
+ $(CC) -D__ASSEMBLY__ $(AFLAGS) -traditional -c $< -o $*.o
+
+O_TARGET := kernel.o
+O_OBJS := process.o signal.o entry.o traps.o irq.o irq_onchip.o \
+ ptrace.o setup.o time.o sys_sh.o test-img.o semaphore.o
+OX_OBJS := sh_ksyms.o
+MX_OBJS :=
+
+all: kernel.o head.o init_task.o
+
+entry.o: entry.S
+
+head.o: head.S
+ $(CC) -D__ASSEMBLY__ $(AFLAGS) -traditional -c $*.S -o $*.o
+
+clean:
+
+include $(TOPDIR)/Rules.make
diff --git a/arch/sh/kernel/entry.S b/arch/sh/kernel/entry.S
new file mode 100644
index 000000000..7fca58b30
--- /dev/null
+++ b/arch/sh/kernel/entry.S
@@ -0,0 +1,683 @@
+/* $Id$
+ *
+ * linux/arch/sh/entry.S
+ *
+ * Copyright (C) 1999 Niibe Yutaka
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+
+! NOTE:
+! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
+! to be jumped is too far, but it causes illegal slot exception.
+
+/*
+ * entry.S contains the system-call and fault low-level handling routines.
+ * This also contains the timer-interrupt handler, as well as all interrupts
+ * and faults that can result in a task-switch.
+ *
+ * NOTE: This code handles signal-recognition, which happens every time
+ * after a timer-interrupt and after each system call.
+ *
+ * Stack layout in 'ret_from_syscall':
+ * ptrace needs to have all regs on the stack.
+ * if the order here is changed, it needs to be
+ * updated in process.c:copy_thread, signal.c:do_signal,
+ * ptrace.c and ptrace.h
+ *
+ * syscall #
+ * r0
+ * ...
+ * r15
+ * gbr
+ * mach
+ * macl
+ * pr
+ * ssr
+ * spc
+ *
+ */
+
+/*
+ * these are offsets into the task-struct.
+ */
+state = 0
+flags = 4
+sigpending = 8
+addr_limit = 12
+need_resched = 20
+
+PF_TRACESYS = 0x20
+
+ENOSYS = 38
+
+TRA = 0xffffffd0
+EXPEVT = 0xffffffd4
+INTEVT = 0xffffffd8
+
+/* Offsets to the stack */
+SYSCALL_NR = 0
+R0 = 4
+R15 = 64
+
+#define k0 r0
+#define k1 r1
+#define k2 r2
+#define k3 r3
+
+#define kernel_sp r4 /* r4_bank1 */
+#define ksp r4_bank /* r4_bank1 */
+#define k_ex_code r2_bank /* r2_bank1 */
+
+/* Kernel mode register usage:
+ k0 scratch
+ k1 scratch
+ k2 scratch (Exception code)
+ k3 scratch (Return address)
+ k4 Stack base = current+8192
+ k5 reserved
+ k6 reserved
+ k7 reserved
+*/
+
+!
+! TLB Miss / Initial Page write exception handling
+! _and_
+! TLB hits, but the access violate the protection.
+! It can be valid access, such as stack grow and/or C-O-W.
+!
+!
+! Find the pmd/pte entry and loadtlb
+! If it's not found, cause address error (SEGV)
+!
+! Although this could be written in assembly language (and it'd be faster),
+! this first version depends *much* on C implementation.
+!
+MMU_TEA = 0xfffffffc ! TLB Exception Address Register
+
+#define DO_FAULT(write) \
+ mov #MMU_TEA,r0; \
+ mov.l @r0,r6; \
+ /* STI */ \
+ mov.l 3f,r1; \
+ stc sr,r0; \
+ and r1,r0; \
+ ldc r0,sr; \
+ /* */ \
+ mov r15,r4; \
+ mov.l 2f,r0; \
+ jmp @r0; \
+ mov #write,r5;
+
+ .balign 4
+tlb_protection_violation_load:
+tlb_miss_load:
+ mov #-1,r0
+ mov.l r0,@r15 ! syscall nr = -1
+ DO_FAULT(0)
+
+ .balign 4
+tlb_protection_violation_store:
+tlb_miss_store:
+initial_page_write:
+ mov #-1,r0
+ mov.l r0,@r15 ! syscall nr = -1
+ DO_FAULT(1)
+
+ .balign 4
+2: .long SYMBOL_NAME(do_page_fault)
+3: .long 0xefffffff ! BL=0
+
+
+ .balign 4
+error: mov #-1,r0
+ ! STI
+ mov.l 2f,r1
+ stc sr,r0
+ and r1,r0
+ ldc r0,sr
+ !
+ mov.l r0,@r15 ! syscall nr = -1
+ mov.l 1f,r1
+ jmp @r1
+ nop
+ .balign 4
+1: .long SYMBOL_NAME(do_exception_error)
+
+reschedule:
+ mova SYMBOL_NAME(ret_from_syscall),r0
+ mov.l 1f,r1
+ jmp @r1
+ lds r0,pr
+ .balign 4
+1: .long SYMBOL_NAME(schedule)
+
+badsys: mov #-ENOSYS,r0
+ bra SYMBOL_NAME(ret_from_syscall)
+ mov.l r0,@(R0,r15)
+
+signal_return:
+ ! We can reach here from an interrupt handler,
+ ! so, we need to unblock interrupt.
+ mov.l 1f,r1
+ stc sr,r0
+ and r1,r0
+ ldc r0,sr
+ !
+ mov r15,r4
+ mov #0,r5
+ mov.l 2f,r1
+ mova restore_all,r0
+ jmp @r1
+ lds r0,pr
+ .balign 4
+1: .long 0xefffffff ! BL=0
+2: .long SYMBOL_NAME(do_signal)
+
+!
+!
+!
+ENTRY(ret_from_fork)
+ bra SYMBOL_NAME(ret_from_syscall)
+ add #4,r15 ! pop down bogus r0
+
+!
+! The immediate value of "trapa" indicates the number of arguments
+! placed on the stack.
+!
+system_call:
+ mov #TRA,r2
+ mov.l @r2,r8
+ ! STI
+ mov.l 2f,r1
+ stc sr,r2
+ and r1,r2
+ ldc r2,sr
+ !
+ mov.l __n_sys,r1
+ cmp/ge r1,r0
+ bt badsys
+ !
+ stc ksp,r1 !
+ mov.l __tsk_flags,r0 !
+ add r0,r1 !
+ mov.l @r1,r0 ! Is it trace?
+ tst #PF_TRACESYS,r0
+ bt 6f
+ ! Trace system call
+ mov #-ENOSYS,r1
+ mov.l r1,@(R0,r15)
+ mov.l 3f,r1
+ jsr @r1
+ nop
+ mova 4f,r0
+ bra 7f
+ lds r0,pr
+ !
+6: mova 1f,r0
+ lds r0,pr
+ ! Build the stack frame if TRA > 0
+7: cmp/pl r8
+ bf 9f
+ shll2 r8 ! x4
+ mov #R15,r0
+ mov.l @(r0,r15),r0 ! get original stack
+8: add #-4,r8
+ mov.l @(r0,r8),r1
+ mov.l r1,@-r15
+ cmp/pl r8
+ bt 8b
+ !
+9: mov.l @(SYSCALL_NR,r15),r0
+ shll2 r0 ! x4
+ mov.l __sct,r1
+ add r1,r0
+ mov.l @r0,r1
+ jmp @r1
+ nop
+ .balign 4
+4: mov.l r0,@(R0,r15) ! save the return value
+ mov.l 3f,r1
+ mova SYMBOL_NAME(ret_from_syscall),r0
+ jmp @r1
+ lds r0,pr
+ .balign 4
+3: .long SYMBOL_NAME(syscall_trace)
+2: .long 0xefffffff ! BL=0
+1: mov.l r0,@(R0,r15) ! save the return value
+ /* fall through */
+
+ENTRY(ret_from_syscall)
+ENTRY(ret_from_irq)
+ mov.l __bh_mask,r0
+ mov.l @r0,r1
+ mov.l __bh_active,r0
+ mov.l @r0,r2
+ tst r2,r1
+ bt ret_with_reschedule
+handle_bottom_half:
+ mov.l __dbh,r0
+ jsr @r0
+ nop
+ret_with_reschedule:
+ stc ksp,r1
+ mov.l __minus8192,r0
+ add r0,r1
+ mov.l @(need_resched,r1),r0
+ tst #0xff,r0
+ bf reschedule
+ mov.l @(sigpending,r1),r0
+ tst #0xff,r0
+ bf signal_return
+ !
+ .balign 4
+restore_all:
+ add #4,r15 ! skip syscall number
+ mov.l @r15+,r0
+ mov.l @r15+,r1
+ mov.l @r15+,r2
+ mov.l @r15+,r3
+ mov.l @r15+,r4
+ mov.l @r15+,r5
+ mov.l @r15+,r6
+ mov.l @r15+,r7
+ stc sr,r14
+ mov.l __blrb_flags,r9 ! BL =1, RB=1
+ or r9,r14
+ ldc r14,sr ! here, change the register bank
+ mov.l @r15+,r8
+ mov.l @r15+,r9
+ mov.l @r15+,r10
+ mov.l @r15+,r11
+ mov.l @r15+,r12
+ mov.l @r15+,r13
+ mov.l @r15+,r14
+ mov.l @r15+,k0
+ ldc.l @r15+,gbr
+ lds.l @r15+,mach
+ lds.l @r15+,macl
+ lds.l @r15+,pr
+ ldc.l @r15+,ssr
+ ldc.l @r15+,spc
+ mov k0,r15
+ rte
+ nop
+
+ .balign 4
+__n_sys: .long NR_syscalls
+__sct: .long SYMBOL_NAME(sys_call_table)
+__bh_mask: .long SYMBOL_NAME(bh_mask)
+__bh_active: .long SYMBOL_NAME(bh_active)
+__dbh: .long SYMBOL_NAME(do_bottom_half)
+__blrb_flags: .long 0x30000000
+__minus8192: .long -8192 ! offset from stackbase to tsk
+__tsk_flags: .long flags-8192 ! offset from stackbase to tsk->flags
+
+
+! Exception Vector Base
+!
+! Should be aligned page boundary.
+!
+ .balign 4096,0,4096
+ENTRY(vbr_base)
+ .long 0
+!
+ .balign 256,0,256
+general_exception:
+ mov #EXPEVT,k2
+ mov.l 2f,k3
+ bra handle_exception
+ mov.l @k2,k2
+ .balign 4
+2: .long SYMBOL_NAME(ret_from_syscall)
+!
+!
+ .balign 1024,0,1024
+tlb_miss:
+ mov #EXPEVT,k2
+ mov.l 3f,k3
+ bra handle_exception
+ mov.l @k2,k2
+!
+ .balign 512,0,512
+interrupt:
+ mov #INTEVT,k2
+ mov.l 4f,k3
+ bra handle_exception
+ mov.l @k2,k2
+
+ .balign 4
+3: .long SYMBOL_NAME(ret_from_syscall)
+4: .long SYMBOL_NAME(ret_from_irq)
+
+!
+!
+handle_exception:
+ ! Using k0, k1 for scratch registers (r0_bank1, and r1_bank1),
+ ! save all registers onto stack.
+ !
+ mov.l 2f,k1
+ stc ssr,k0 ! from kernel space?
+ shll k0 ! Check MD bit (bit30)
+ shll k0
+ bt/s 1f ! it's from kernel to kernel transition
+ mov r15,k0 ! save original stack to k0 anyway
+ mov kernel_sp,r15 ! change to kernel stack
+1: stc.l spc,@-r15 ! save control registers
+ stc.l ssr,@-r15
+ sts.l pr,@-r15
+ !
+ lds k3,pr ! Set the return address to pr
+ !
+ sts.l macl,@-r15
+ sts.l mach,@-r15
+ stc.l gbr,@-r15
+ mov.l k0,@-r15 ! save orignal stack, and general registers
+ mov.l r14,@-r15
+ !
+ stc sr,r14 ! back to normal register bank, and
+ and k1,r14 ! ..
+ ldc r14,sr ! ...changed here.
+ !
+ mov.l r13,@-r15
+ mov.l r12,@-r15
+ mov.l r11,@-r15
+ mov.l r10,@-r15
+ mov.l r9,@-r15
+ mov.l r8,@-r15
+ mov.l r7,@-r15
+ mov.l r6,@-r15
+ mov.l r5,@-r15
+ mov.l r4,@-r15
+ mov.l r3,@-r15
+ mov.l r2,@-r15
+ mov.l r1,@-r15
+ mov.l r0,@-r15
+ mov.l r0,@-r15 ! push r0 again (for syscall number)
+ ! Then, dispatch to the handler, according to the excepiton code.
+ stc k_ex_code,r1
+ shlr2 r1
+ shlr r1
+ mov.l 1f,r0
+ add r1,r0
+ mov.l @r0,r0
+ jmp @r0
+ mov.l @r15,r0 ! recovering r0..
+ .balign 4
+1: .long SYMBOL_NAME(exception_handling_table)
+2: .long 0xdfffffff ! RB=0, BL=1
+
+.data
+ENTRY(exception_handling_table)
+ .long 0
+ .long 0
+ .long tlb_miss_load
+ .long tlb_miss_store
+ .long initial_page_write
+ .long tlb_protection_violation_load
+ .long tlb_protection_violation_store
+ .long error ! address_error_load (filled by trap_init)
+ .long error ! address_error_store (filled by trap_init)
+ .long 0
+ .long 0
+ .long system_call ! Unconditional Trap
+ .long error ! reserved_instruction (filled by trap_init)
+ .long error ! illegal_slot_instruction (filled by trap_init)
+ENTRY(nmi_slot)
+ .long error ! Not implemented yet
+ENTRY(user_break_point_trap)
+ .long error ! Not implemented yet
+ENTRY(interrupt_table)
+ ! external hardware
+ .long SYMBOL_NAME(do_IRQ) ! 0000
+ .long SYMBOL_NAME(do_IRQ) ! 0001
+ .long SYMBOL_NAME(do_IRQ) ! 0010
+ .long SYMBOL_NAME(do_IRQ) ! 0011
+ .long SYMBOL_NAME(do_IRQ) ! 0100
+ .long SYMBOL_NAME(do_IRQ) ! 0101
+ .long SYMBOL_NAME(do_IRQ) ! 0110
+ .long SYMBOL_NAME(do_IRQ) ! 0111
+ .long SYMBOL_NAME(do_IRQ) ! 1000
+ .long SYMBOL_NAME(do_IRQ) ! 1001
+ .long SYMBOL_NAME(do_IRQ) ! 1010
+ .long SYMBOL_NAME(do_IRQ) ! 1011
+ .long SYMBOL_NAME(do_IRQ) ! 1100
+ .long SYMBOL_NAME(do_IRQ) ! 1101
+ .long SYMBOL_NAME(do_IRQ) ! 1110
+ .long 0
+ ! Internal hardware
+ .long SYMBOL_NAME(do_IRQ) ! TMU0 tuni0
+ .long SYMBOL_NAME(do_IRQ) ! TMU1 tuni1
+ .long SYMBOL_NAME(do_IRQ) ! TMU2 tuni2
+ .long SYMBOL_NAME(do_IRQ) ! ticpi2
+ .long SYMBOL_NAME(do_IRQ) ! RTC ati
+ .long SYMBOL_NAME(do_IRQ) ! pri
+ .long SYMBOL_NAME(do_IRQ) ! cui
+ .long SYMBOL_NAME(do_IRQ) ! SCI eri
+ .long SYMBOL_NAME(do_IRQ) ! rxi
+ .long SYMBOL_NAME(do_IRQ) ! txi
+ .long SYMBOL_NAME(do_IRQ) ! tei
+ .long SYMBOL_NAME(do_IRQ) ! WDT iti
+ .long SYMBOL_NAME(do_IRQ) ! REF rcmi
+ .long SYMBOL_NAME(do_IRQ) ! rovi
+ .long SYMBOL_NAME(do_IRQ)
+ .long SYMBOL_NAME(do_IRQ)
+ .long SYMBOL_NAME(do_IRQ)
+ .long SYMBOL_NAME(do_IRQ)
+ .long SYMBOL_NAME(do_IRQ)
+ .long SYMBOL_NAME(do_IRQ)
+ .long SYMBOL_NAME(do_IRQ)
+ .long SYMBOL_NAME(do_IRQ)
+ .long SYMBOL_NAME(do_IRQ)
+ .long SYMBOL_NAME(do_IRQ)
+
+ENTRY(sys_call_table)
+ .long SYMBOL_NAME(sys_ni_syscall) /* 0 - old "setup()" system call*/
+ .long SYMBOL_NAME(sys_exit)
+ .long SYMBOL_NAME(sys_fork)
+ .long SYMBOL_NAME(sys_read)
+ .long SYMBOL_NAME(sys_write)
+ .long SYMBOL_NAME(sys_open) /* 5 */
+ .long SYMBOL_NAME(sys_close)
+ .long SYMBOL_NAME(sys_waitpid)
+ .long SYMBOL_NAME(sys_creat)
+ .long SYMBOL_NAME(sys_link)
+ .long SYMBOL_NAME(sys_unlink) /* 10 */
+ .long SYMBOL_NAME(sys_execve)
+ .long SYMBOL_NAME(sys_chdir)
+ .long SYMBOL_NAME(sys_time)
+ .long SYMBOL_NAME(sys_mknod)
+ .long SYMBOL_NAME(sys_chmod) /* 15 */
+ .long SYMBOL_NAME(sys_lchown)
+ .long SYMBOL_NAME(sys_ni_syscall) /* old break syscall holder */
+ .long SYMBOL_NAME(sys_stat)
+ .long SYMBOL_NAME(sys_lseek)
+ .long SYMBOL_NAME(sys_getpid) /* 20 */
+ .long SYMBOL_NAME(sys_mount)
+ .long SYMBOL_NAME(sys_oldumount)
+ .long SYMBOL_NAME(sys_setuid)
+ .long SYMBOL_NAME(sys_getuid)
+ .long SYMBOL_NAME(sys_stime) /* 25 */
+ .long SYMBOL_NAME(sys_ptrace)
+ .long SYMBOL_NAME(sys_alarm)
+ .long SYMBOL_NAME(sys_fstat)
+ .long SYMBOL_NAME(sys_pause)
+ .long SYMBOL_NAME(sys_utime) /* 30 */
+ .long SYMBOL_NAME(sys_ni_syscall) /* old stty syscall holder */
+ .long SYMBOL_NAME(sys_ni_syscall) /* old gtty syscall holder */
+ .long SYMBOL_NAME(sys_access)
+ .long SYMBOL_NAME(sys_nice)
+ .long SYMBOL_NAME(sys_ni_syscall) /* 35 */ /* old ftime syscall holder */
+ .long SYMBOL_NAME(sys_sync)
+ .long SYMBOL_NAME(sys_kill)
+ .long SYMBOL_NAME(sys_rename)
+ .long SYMBOL_NAME(sys_mkdir)
+ .long SYMBOL_NAME(sys_rmdir) /* 40 */
+ .long SYMBOL_NAME(sys_dup)
+ .long SYMBOL_NAME(sys_pipe)
+ .long SYMBOL_NAME(sys_times)
+ .long SYMBOL_NAME(sys_ni_syscall) /* old prof syscall holder */
+ .long SYMBOL_NAME(sys_brk) /* 45 */
+ .long SYMBOL_NAME(sys_setgid)
+ .long SYMBOL_NAME(sys_getgid)
+ .long SYMBOL_NAME(sys_signal)
+ .long SYMBOL_NAME(sys_geteuid)
+ .long SYMBOL_NAME(sys_getegid) /* 50 */
+ .long SYMBOL_NAME(sys_acct)
+ .long SYMBOL_NAME(sys_umount) /* recycled never used phys() */
+ .long SYMBOL_NAME(sys_ni_syscall) /* old lock syscall holder */
+ .long SYMBOL_NAME(sys_ioctl)
+ .long SYMBOL_NAME(sys_fcntl) /* 55 */
+ .long SYMBOL_NAME(sys_ni_syscall) /* old mpx syscall holder */
+ .long SYMBOL_NAME(sys_setpgid)
+ .long SYMBOL_NAME(sys_ni_syscall) /* old ulimit syscall holder */
+ .long SYMBOL_NAME(sys_ni_syscall) /* sys_olduname */
+ .long SYMBOL_NAME(sys_umask) /* 60 */
+ .long SYMBOL_NAME(sys_chroot)
+ .long SYMBOL_NAME(sys_ustat)
+ .long SYMBOL_NAME(sys_dup2)
+ .long SYMBOL_NAME(sys_getppid)
+ .long SYMBOL_NAME(sys_getpgrp) /* 65 */
+ .long SYMBOL_NAME(sys_setsid)
+ .long SYMBOL_NAME(sys_sigaction)
+ .long SYMBOL_NAME(sys_sgetmask)
+ .long SYMBOL_NAME(sys_ssetmask)
+ .long SYMBOL_NAME(sys_setreuid) /* 70 */
+ .long SYMBOL_NAME(sys_setregid)
+ .long SYMBOL_NAME(sys_sigsuspend)
+ .long SYMBOL_NAME(sys_sigpending)
+ .long SYMBOL_NAME(sys_sethostname)
+ .long SYMBOL_NAME(sys_setrlimit) /* 75 */
+ .long SYMBOL_NAME(sys_getrlimit)
+ .long SYMBOL_NAME(sys_getrusage)
+ .long SYMBOL_NAME(sys_gettimeofday)
+ .long SYMBOL_NAME(sys_settimeofday)
+ .long SYMBOL_NAME(sys_getgroups) /* 80 */
+ .long SYMBOL_NAME(sys_setgroups)
+ .long SYMBOL_NAME(sys_ni_syscall) /* old_select */
+ .long SYMBOL_NAME(sys_symlink)
+ .long SYMBOL_NAME(sys_lstat)
+ .long SYMBOL_NAME(sys_readlink) /* 85 */
+ .long SYMBOL_NAME(sys_uselib)
+ .long SYMBOL_NAME(sys_swapon)
+ .long SYMBOL_NAME(sys_reboot)
+ .long SYMBOL_NAME(old_readdir)
+ .long SYMBOL_NAME(sys_ni_syscall) /* old_mmap */ /* 90 */
+ .long SYMBOL_NAME(sys_munmap)
+ .long SYMBOL_NAME(sys_truncate)
+ .long SYMBOL_NAME(sys_ftruncate)
+ .long SYMBOL_NAME(sys_fchmod)
+ .long SYMBOL_NAME(sys_fchown) /* 95 */
+ .long SYMBOL_NAME(sys_getpriority)
+ .long SYMBOL_NAME(sys_setpriority)
+ .long SYMBOL_NAME(sys_ni_syscall) /* old profil syscall holder */
+ .long SYMBOL_NAME(sys_statfs)
+ .long SYMBOL_NAME(sys_fstatfs) /* 100 */
+ .long SYMBOL_NAME(sys_ni_syscall) /* ioperm */
+ .long SYMBOL_NAME(sys_socketcall)
+ .long SYMBOL_NAME(sys_syslog)
+ .long SYMBOL_NAME(sys_setitimer)
+ .long SYMBOL_NAME(sys_getitimer) /* 105 */
+ .long SYMBOL_NAME(sys_newstat)
+ .long SYMBOL_NAME(sys_newlstat)
+ .long SYMBOL_NAME(sys_newfstat)
+ .long SYMBOL_NAME(sys_uname)
+ .long SYMBOL_NAME(sys_ni_syscall) /* 110 */ /* iopl */
+ .long SYMBOL_NAME(sys_vhangup)
+ .long SYMBOL_NAME(sys_ni_syscall) /* idle */
+ .long SYMBOL_NAME(sys_ni_syscall) /* vm86old */
+ .long SYMBOL_NAME(sys_wait4)
+ .long SYMBOL_NAME(sys_swapoff) /* 115 */
+ .long SYMBOL_NAME(sys_sysinfo)
+ .long SYMBOL_NAME(sys_ipc)
+ .long SYMBOL_NAME(sys_fsync)
+ .long SYMBOL_NAME(sys_sigreturn)
+ .long SYMBOL_NAME(sys_clone) /* 120 */
+ .long SYMBOL_NAME(sys_setdomainname)
+ .long SYMBOL_NAME(sys_newuname)
+ .long SYMBOL_NAME(sys_ni_syscall) /* sys_modify_ldt */
+ .long SYMBOL_NAME(sys_adjtimex)
+ .long SYMBOL_NAME(sys_mprotect) /* 125 */
+ .long SYMBOL_NAME(sys_sigprocmask)
+ .long SYMBOL_NAME(sys_create_module)
+ .long SYMBOL_NAME(sys_init_module)
+ .long SYMBOL_NAME(sys_delete_module)
+ .long SYMBOL_NAME(sys_get_kernel_syms) /* 130 */
+ .long SYMBOL_NAME(sys_quotactl)
+ .long SYMBOL_NAME(sys_getpgid)
+ .long SYMBOL_NAME(sys_fchdir)
+ .long SYMBOL_NAME(sys_bdflush)
+ .long SYMBOL_NAME(sys_sysfs) /* 135 */
+ .long SYMBOL_NAME(sys_personality)
+ .long SYMBOL_NAME(sys_ni_syscall) /* for afs_syscall */
+ .long SYMBOL_NAME(sys_setfsuid)
+ .long SYMBOL_NAME(sys_setfsgid)
+ .long SYMBOL_NAME(sys_llseek) /* 140 */
+ .long SYMBOL_NAME(sys_getdents)
+ .long SYMBOL_NAME(sys_select)
+ .long SYMBOL_NAME(sys_flock)
+ .long SYMBOL_NAME(sys_msync)
+ .long SYMBOL_NAME(sys_readv) /* 145 */
+ .long SYMBOL_NAME(sys_writev)
+ .long SYMBOL_NAME(sys_getsid)
+ .long SYMBOL_NAME(sys_fdatasync)
+ .long SYMBOL_NAME(sys_sysctl)
+ .long SYMBOL_NAME(sys_mlock) /* 150 */
+ .long SYMBOL_NAME(sys_munlock)
+ .long SYMBOL_NAME(sys_mlockall)
+ .long SYMBOL_NAME(sys_munlockall)
+ .long SYMBOL_NAME(sys_sched_setparam)
+ .long SYMBOL_NAME(sys_sched_getparam) /* 155 */
+ .long SYMBOL_NAME(sys_sched_setscheduler)
+ .long SYMBOL_NAME(sys_sched_getscheduler)
+ .long SYMBOL_NAME(sys_sched_yield)
+ .long SYMBOL_NAME(sys_sched_get_priority_max)
+ .long SYMBOL_NAME(sys_sched_get_priority_min) /* 160 */
+ .long SYMBOL_NAME(sys_sched_rr_get_interval)
+ .long SYMBOL_NAME(sys_nanosleep)
+ .long SYMBOL_NAME(sys_mremap)
+ .long SYMBOL_NAME(sys_setresuid)
+ .long SYMBOL_NAME(sys_getresuid) /* 165 */
+ .long SYMBOL_NAME(sys_ni_syscall) /* vm86 */
+ .long SYMBOL_NAME(sys_query_module)
+ .long SYMBOL_NAME(sys_poll)
+ .long SYMBOL_NAME(sys_nfsservctl)
+ .long SYMBOL_NAME(sys_setresgid) /* 170 */
+ .long SYMBOL_NAME(sys_getresgid)
+ .long SYMBOL_NAME(sys_prctl)
+ .long SYMBOL_NAME(sys_rt_sigreturn)
+ .long SYMBOL_NAME(sys_rt_sigaction)
+ .long SYMBOL_NAME(sys_rt_sigprocmask) /* 175 */
+ .long SYMBOL_NAME(sys_rt_sigpending)
+ .long SYMBOL_NAME(sys_rt_sigtimedwait)
+ .long SYMBOL_NAME(sys_rt_sigqueueinfo)
+ .long SYMBOL_NAME(sys_rt_sigsuspend)
+ .long SYMBOL_NAME(sys_pread) /* 180 */
+ .long SYMBOL_NAME(sys_pwrite)
+ .long SYMBOL_NAME(sys_chown)
+ .long SYMBOL_NAME(sys_getcwd)
+ .long SYMBOL_NAME(sys_capget)
+ .long SYMBOL_NAME(sys_capset) /* 185 */
+ .long SYMBOL_NAME(sys_sigaltstack)
+ .long SYMBOL_NAME(sys_sendfile)
+ .long SYMBOL_NAME(sys_ni_syscall) /* streams1 */
+ .long SYMBOL_NAME(sys_ni_syscall) /* streams2 */
+ .long SYMBOL_NAME(sys_vfork) /* 190 */
+
+ /*
+ * NOTE!! This doesn't have to be exact - we just have
+ * to make sure we have _enough_ of the "sys_ni_syscall"
+ * entries. Don't panic if you notice that this hasn't
+ * been shrunk every time we add a new system call.
+ */
+ .rept NR_syscalls-190
+ .long SYMBOL_NAME(sys_ni_syscall)
+ .endr
+
+/* End of entry.S */
diff --git a/arch/sh/kernel/head.S b/arch/sh/kernel/head.S
new file mode 100644
index 000000000..ed466ba38
--- /dev/null
+++ b/arch/sh/kernel/head.S
@@ -0,0 +1,69 @@
+/* $Id$
+ *
+ * arch/sh/kernel/head.S
+ *
+ * Copyright (C) 1999 Niibe Yutaka
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Head.S contains the SH exception handlers and startup code.
+ */
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <linux/linkage.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+#ifdef CONFIG_CPU_SH3
+/* Following values are assumed to be as small as immediate. */
+#define CCR 0xffffffec /* Address of Cache Control Register */
+#define CACHE_INIT 0x00000009 /* 8k-byte cache, flush, enable */
+#elif CONFIG_CPU_SH4
+/* Should fill here. */
+#endif
+
+ENTRY(_stext)
+ ! Switch to register bank 0
+ stc sr,r1 !
+ mov.l 1f,r0 ! RB=0, BL=1
+ and r1,r0
+ ldc r0,sr
+ ! Enable cache
+#ifdef CONFIG_CPU_SH3
+ mov #CCR,r1
+ mov.l @r1,r0
+ cmp/eq #1,r0 ! If it's enabled already, don't flush it
+ bt/s 8f
+ mov #CACHE_INIT,r0
+ mov.l r0,@r1
+#elif CONFIG_CPU_SH4
+ ! Should fill here.
+#endif
+8:
+ !
+ mov.l 2f,r0
+ mov r0,r15 ! Set initial r15 (stack pointer)
+ ldc r0,r4_bank ! and stack base
+ ! Clear BSS area
+ mov.l 3f,r1
+ mov.l 4f,r2
+ mov #0,r0
+9: mov.l r0,@r1
+ cmp/hs r2,r1
+ bf/s 9b
+ add #4,r1
+ ! Start kernel
+ mov.l 5f,r0
+ jmp @r0
+ nop
+
+ .balign 4
+1: .long 0xdfffffff ! RB=0, BL=1
+2: .long SYMBOL_NAME(stack)
+3: .long SYMBOL_NAME(__bss_start)
+4: .long SYMBOL_NAME(_end)
+5: .long SYMBOL_NAME(start_kernel)
+
+.data
diff --git a/arch/sh/kernel/init_task.c b/arch/sh/kernel/init_task.c
new file mode 100644
index 000000000..aacd8f304
--- /dev/null
+++ b/arch/sh/kernel/init_task.c
@@ -0,0 +1,23 @@
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+
+static struct vm_area_struct init_mmap = INIT_MMAP;
+static struct fs_struct init_fs = INIT_FS;
+static struct files_struct init_files = INIT_FILES;
+static struct signal_struct init_signals = INIT_SIGNALS;
+struct mm_struct init_mm = INIT_MM(init_mm);
+
+/*
+ * Initial task structure.
+ *
+ * We need to make sure that this is 8192-byte aligned due to the
+ * way process stacks are handled. This is done by having a special
+ * "init_task" linker map entry..
+ */
+union task_union init_task_union
+ __attribute__((__section__(".data.init_task"))) =
+ { INIT_TASK(init_task_union.task) };
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
new file mode 100644
index 000000000..f75af5003
--- /dev/null
+++ b/arch/sh/kernel/irq.c
@@ -0,0 +1,485 @@
+/*
+ * linux/arch/sh/kernel/irq.c
+ *
+ * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
+ *
+ *
+ * SuperH version: Copyright (C) 1999 Niibe Yutaka
+ */
+
+/*
+ * IRQs are in fact implemented a bit like signal handlers for the kernel.
+ * Naturally it's not a 1:1 relation, but there are similarities.
+ */
+
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/kernel_stat.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/timex.h>
+#include <linux/malloc.h>
+#include <linux/random.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/bitops.h>
+#include <asm/smp.h>
+#include <asm/pgtable.h>
+#include <asm/delay.h>
+#include <asm/irq.h>
+#include <linux/irq.h>
+
+
+unsigned int local_bh_count[NR_CPUS];
+unsigned int local_irq_count[NR_CPUS];
+
+/*
+ * Micro-access to controllers is serialized over the whole
+ * system. We never hold this lock when we call the actual
+ * IRQ handler.
+ */
+spinlock_t irq_controller_lock = SPIN_LOCK_UNLOCKED;
+/*
+ * Controller mappings for all interrupt sources:
+ */
+irq_desc_t irq_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = { 0, &no_irq_type, }};
+
+/*
+ * Special irq handlers.
+ */
+
+void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
+
+/*
+ * Generic, controller-independent functions:
+ */
+
+int get_irq_list(char *buf)
+{
+ int i, j;
+ struct irqaction * action;
+ char *p = buf;
+
+ p += sprintf(p, " ");
+ for (j=0; j<smp_num_cpus; j++)
+ p += sprintf(p, "CPU%d ",j);
+ *p++ = '\n';
+
+ for (i = 0 ; i < NR_IRQS ; i++) {
+ action = irq_desc[i].action;
+ if (!action)
+ continue;
+ p += sprintf(p, "%3d: ",i);
+ p += sprintf(p, "%10u ", kstat_irqs(i));
+ p += sprintf(p, " %14s", irq_desc[i].handler->typename);
+ p += sprintf(p, " %s", action->name);
+
+ for (action=action->next; action; action = action->next) {
+ p += sprintf(p, ", %s", action->name);
+ }
+ *p++ = '\n';
+ }
+ return p - buf;
+}
+
+/*
+ * This should really return information about whether
+ * we should do bottom half handling etc. Right now we
+ * end up _always_ checking the bottom half, which is a
+ * waste of time and is not what some drivers would
+ * prefer.
+ */
+int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action)
+{
+ int status;
+ int cpu = smp_processor_id();
+
+ irq_enter(cpu, irq);
+
+ status = 1; /* Force the "do bottom halves" bit */
+
+ if (!(action->flags & SA_INTERRUPT))
+ __sti();
+
+ do {
+ status |= action->flags;
+ action->handler(irq, action->dev_id, regs);
+ action = action->next;
+ } while (action);
+ if (status & SA_SAMPLE_RANDOM)
+ add_interrupt_randomness(irq);
+ __cli();
+
+ irq_exit(cpu, irq);
+
+ return status;
+}
+
+/*
+ * Generic enable/disable code: this just calls
+ * down into the PIC-specific version for the actual
+ * hardware disable after having gotten the irq
+ * controller lock.
+ */
+void disable_irq_nosync(unsigned int irq)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&irq_controller_lock, flags);
+ if (!irq_desc[irq].depth++) {
+ irq_desc[irq].status |= IRQ_DISABLED;
+ irq_desc[irq].handler->disable(irq);
+ }
+ spin_unlock_irqrestore(&irq_controller_lock, flags);
+}
+
+/*
+ * Synchronous version of the above, making sure the IRQ is
+ * no longer running on any other IRQ..
+ */
+void disable_irq(unsigned int irq)
+{
+ disable_irq_nosync(irq);
+
+ if (!local_irq_count[smp_processor_id()]) {
+ do {
+ barrier();
+ } while (irq_desc[irq].status & IRQ_INPROGRESS);
+ }
+}
+
+void enable_irq(unsigned int irq)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&irq_controller_lock, flags);
+ switch (irq_desc[irq].depth) {
+ case 1: {
+ unsigned int status = irq_desc[irq].status & ~IRQ_DISABLED;
+ irq_desc[irq].status = status;
+ if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
+ irq_desc[irq].status = status | IRQ_REPLAY;
+ hw_resend_irq(irq_desc[irq].handler,irq);
+ }
+ irq_desc[irq].handler->enable(irq);
+ /* fall-through */
+ }
+ default:
+ irq_desc[irq].depth--;
+ break;
+ case 0:
+ printk("enable_irq() unbalanced from %p\n",
+ __builtin_return_address(0));
+ }
+ spin_unlock_irqrestore(&irq_controller_lock, flags);
+}
+
+/*
+ * do_IRQ handles all normal device IRQ's.
+ */
+asmlinkage int do_IRQ(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs regs)
+{
+ /*
+ * We ack quickly, we don't want the irq controller
+ * thinking we're snobs just because some other CPU has
+ * disabled global interrupts (we have already done the
+ * INT_ACK cycles, it's too late to try to pretend to the
+ * controller that we aren't taking the interrupt).
+ *
+ * 0 return value means that this irq is already being
+ * handled by some other CPU. (or is disabled)
+ */
+ int irq;
+ int cpu = smp_processor_id();
+ irq_desc_t *desc;
+ struct irqaction * action;
+ unsigned int status;
+
+ /* Get IRQ number */
+ asm volatile("stc r2_bank,%0\n\t"
+ "shlr2 %0\n\t"
+ "shlr2 %0\n\t"
+ "shlr %0\n\t"
+ "add #-16,%0\n\t"
+ :"=z" (irq));
+
+ kstat.irqs[cpu][irq]++;
+ desc = irq_desc + irq;
+ spin_lock(&irq_controller_lock);
+ irq_desc[irq].handler->ack(irq);
+ /*
+ REPLAY is when Linux resends an IRQ that was dropped earlier
+ WAITING is used by probe to mark irqs that are being tested
+ */
+ status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
+ status |= IRQ_PENDING; /* we _want_ to handle it */
+
+ /*
+ * If the IRQ is disabled for whatever reason, we cannot
+ * use the action we have.
+ */
+ action = NULL;
+ if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
+ action = desc->action;
+ status &= ~IRQ_PENDING; /* we commit to handling */
+ status |= IRQ_INPROGRESS; /* we are handling it */
+ }
+ desc->status = status;
+ spin_unlock(&irq_controller_lock);
+
+ /*
+ * If there is no IRQ handler or it was disabled, exit early.
+ Since we set PENDING, if another processor is handling
+ a different instance of this same irq, the other processor
+ will take care of it.
+ */
+ if (!action)
+ return 1;
+
+ /*
+ * Edge triggered interrupts need to remember
+ * pending events.
+ * This applies to any hw interrupts that allow a second
+ * instance of the same irq to arrive while we are in do_IRQ
+ * or in the handler. But the code here only handles the _second_
+ * instance of the irq, not the third or fourth. So it is mostly
+ * useful for irq hardware that does not mask cleanly in an
+ * SMP environment.
+ */
+ for (;;) {
+ handle_IRQ_event(irq, &regs, action);
+ spin_lock(&irq_controller_lock);
+
+ if (!(desc->status & IRQ_PENDING))
+ break;
+ desc->status &= ~IRQ_PENDING;
+ spin_unlock(&irq_controller_lock);
+ }
+ desc->status &= ~IRQ_INPROGRESS;
+ if (!(desc->status & IRQ_DISABLED)){
+ irq_desc[irq].handler->end(irq);
+ }
+ spin_unlock(&irq_controller_lock);
+
+ /*
+ * This should be conditional: we should really get
+ * a return code from the irq handler to tell us
+ * whether the handler wants us to do software bottom
+ * half handling or not..
+ */
+ if (1) {
+ if (bh_active & bh_mask)
+ do_bottom_half();
+ }
+ return 1;
+}
+
+int request_irq(unsigned int irq,
+ void (*handler)(int, void *, struct pt_regs *),
+ unsigned long irqflags,
+ const char * devname,
+ void *dev_id)
+{
+ int retval;
+ struct irqaction * action;
+
+ if (irq >= NR_IRQS)
+ return -EINVAL;
+ if (!handler)
+ return -EINVAL;
+
+ action = (struct irqaction *)
+ kmalloc(sizeof(struct irqaction), GFP_KERNEL);
+ if (!action)
+ return -ENOMEM;
+
+ action->handler = handler;
+ action->flags = irqflags;
+ action->mask = 0;
+ action->name = devname;
+ action->next = NULL;
+ action->dev_id = dev_id;
+
+ retval = setup_irq(irq, action);
+ if (retval)
+ kfree(action);
+ return retval;
+}
+
+void free_irq(unsigned int irq, void *dev_id)
+{
+ struct irqaction **p;
+ unsigned long flags;
+
+ if (irq >= NR_IRQS)
+ return;
+
+ spin_lock_irqsave(&irq_controller_lock,flags);
+ p = &irq_desc[irq].action;
+ for (;;) {
+ struct irqaction * action = *p;
+ if (action) {
+ struct irqaction **pp = p;
+ p = &action->next;
+ if (action->dev_id != dev_id)
+ continue;
+
+ /* Found it - now remove it from the list of entries */
+ *pp = action->next;
+ if (irq_desc[irq].action)
+ break;
+ irq_desc[irq].status |= IRQ_DISABLED;
+ irq_desc[irq].handler->shutdown(irq);
+ break;
+ }
+ printk("Trying to free free IRQ%d\n",irq);
+ break;
+ }
+ spin_unlock_irqrestore(&irq_controller_lock,flags);
+}
+
+/*
+ * IRQ autodetection code..
+ *
+ * This depends on the fact that any interrupt that
+ * comes in on to an unassigned handler will get stuck
+ * with "IRQ_WAITING" cleared and the interrupt
+ * disabled.
+ */
+unsigned long probe_irq_on(void)
+{
+ unsigned int i;
+ unsigned long delay;
+
+ /*
+ * first, enable any unassigned irqs
+ */
+ spin_lock_irq(&irq_controller_lock);
+ for (i = NR_IRQS-1; i > 0; i--) {
+ if (!irq_desc[i].action) {
+ irq_desc[i].status |= IRQ_AUTODETECT | IRQ_WAITING;
+ if(irq_desc[i].handler->startup(i))
+ irq_desc[i].status |= IRQ_PENDING;
+ }
+ }
+ spin_unlock_irq(&irq_controller_lock);
+
+ /*
+ * Wait for spurious interrupts to trigger
+ */
+ for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
+ /* about 100ms delay */ synchronize_irq();
+
+ /*
+ * Now filter out any obviously spurious interrupts
+ */
+ spin_lock_irq(&irq_controller_lock);
+ for (i=0; i<NR_IRQS; i++) {
+ unsigned int status = irq_desc[i].status;
+
+ if (!(status & IRQ_AUTODETECT))
+ continue;
+
+ /* It triggered already - consider it spurious. */
+ if (!(status & IRQ_WAITING)) {
+ irq_desc[i].status = status & ~IRQ_AUTODETECT;
+ irq_desc[i].handler->shutdown(i);
+ }
+ }
+ spin_unlock_irq(&irq_controller_lock);
+
+ return 0x12345678;
+}
+
+int probe_irq_off(unsigned long unused)
+{
+ int i, irq_found, nr_irqs;
+
+ if (unused != 0x12345678)
+ printk("Bad IRQ probe from %lx\n", (&unused)[-1]);
+
+ nr_irqs = 0;
+ irq_found = 0;
+ spin_lock_irq(&irq_controller_lock);
+ for (i=0; i<NR_IRQS; i++) {
+ unsigned int status = irq_desc[i].status;
+
+ if (!(status & IRQ_AUTODETECT))
+ continue;
+
+ if (!(status & IRQ_WAITING)) {
+ if (!nr_irqs)
+ irq_found = i;
+ nr_irqs++;
+ }
+ irq_desc[i].status = status & ~IRQ_AUTODETECT;
+ irq_desc[i].handler->shutdown(i);
+ }
+ spin_unlock_irq(&irq_controller_lock);
+
+ if (nr_irqs > 1)
+ irq_found = -irq_found;
+ return irq_found;
+}
+
+int setup_irq(unsigned int irq, struct irqaction * new)
+{
+ int shared = 0;
+ struct irqaction *old, **p;
+ unsigned long flags;
+
+ /*
+ * Some drivers like serial.c use request_irq() heavily,
+ * so we have to be careful not to interfere with a
+ * running system.
+ */
+ if (new->flags & SA_SAMPLE_RANDOM) {
+ /*
+ * This function might sleep, we want to call it first,
+ * outside of the atomic block.
+ * Yes, this might clear the entropy pool if the wrong
+ * driver is attempted to be loaded, without actually
+ * installing a new handler, but is this really a problem,
+ * only the sysadmin is able to do this.
+ */
+ rand_initialize_irq(irq);
+ }
+
+ /*
+ * The following block of code has to be executed atomically
+ */
+ spin_lock_irqsave(&irq_controller_lock,flags);
+ p = &irq_desc[irq].action;
+ if ((old = *p) != NULL) {
+ /* Can't share interrupts unless both agree to */
+ if (!(old->flags & new->flags & SA_SHIRQ)) {
+ spin_unlock_irqrestore(&irq_controller_lock,flags);
+ return -EBUSY;
+ }
+
+ /* add new interrupt at end of irq queue */
+ do {
+ p = &old->next;
+ old = *p;
+ } while (old);
+ shared = 1;
+ }
+
+ *p = new;
+
+ if (!shared) {
+ irq_desc[irq].depth = 0;
+ irq_desc[irq].status &= ~IRQ_DISABLED;
+ irq_desc[irq].handler->startup(irq);
+ }
+ spin_unlock_irqrestore(&irq_controller_lock,flags);
+ return 0;
+}
diff --git a/arch/sh/kernel/irq_onchip.c b/arch/sh/kernel/irq_onchip.c
new file mode 100644
index 000000000..2eae049e5
--- /dev/null
+++ b/arch/sh/kernel/irq_onchip.c
@@ -0,0 +1,168 @@
+/*
+ * linux/arch/sh/kernel/irq_onchip.c
+ *
+ * Copyright (C) 1999 Niibe Yutaka
+ *
+ * Interrupt handling for on-chip supporting modules (TMU, RTC, etc.).
+ *
+ */
+
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/kernel_stat.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/timex.h>
+#include <linux/malloc.h>
+#include <linux/random.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/bitops.h>
+#include <asm/smp.h>
+#include <asm/pgtable.h>
+#include <asm/delay.h>
+
+#include <linux/irq.h>
+
+
+/*
+ * SH (non-)specific no controller code
+ */
+
+static void enable_none(unsigned int irq) { }
+static unsigned int startup_none(unsigned int irq) { return 0; }
+static void disable_none(unsigned int irq) { }
+static void ack_none(unsigned int irq)
+{
+}
+
+/* startup is the same as "enable", shutdown is same as "disable" */
+#define shutdown_none disable_none
+#define end_none enable_none
+
+struct hw_interrupt_type no_irq_type = {
+ "none",
+ startup_none,
+ shutdown_none,
+ enable_none,
+ disable_none,
+ ack_none,
+ end_none
+};
+
+struct ipr_data {
+ int offset;
+ int priority;
+};
+static struct ipr_data ipr_data[NR_IRQS-TIMER_IRQ];
+
+void set_ipr_data(unsigned int irq, int offset, int priority)
+{
+ ipr_data[irq-TIMER_IRQ].offset = offset;
+ ipr_data[irq-TIMER_IRQ].priority = priority;
+}
+
+static void enable_onChip_irq(unsigned int irq);
+void disable_onChip_irq(unsigned int irq);
+
+/* shutdown is same as "disable" */
+#define shutdown_onChip_irq disable_onChip_irq
+
+static void mask_and_ack_onChip(unsigned int);
+static void end_onChip_irq(unsigned int irq);
+
+static unsigned int startup_onChip_irq(unsigned int irq)
+{
+ enable_onChip_irq(irq);
+ return 0; /* never anything pending */
+}
+
+static struct hw_interrupt_type onChip_irq_type = {
+ "On-Chip Supporting Module",
+ startup_onChip_irq,
+ shutdown_onChip_irq,
+ enable_onChip_irq,
+ disable_onChip_irq,
+ mask_and_ack_onChip,
+ end_onChip_irq
+};
+
+/*
+ * These have to be protected by the irq controller spinlock
+ * before being called.
+ *
+ *
+ * IPRA 15-12 11-8 7-4 3-0
+ * IPRB 15-12 11-8 7-4 3-0
+ * IPRC 15-12 11-8 7-4 3-0
+ *
+ */
+#define INTC_IPR 0xfffffee2UL /* Word access */
+
+void disable_onChip_irq(unsigned int irq)
+{
+ /* Set priority in IPR to 0 */
+ int offset = ipr_data[irq-TIMER_IRQ].offset;
+ unsigned long intc_ipr_address = INTC_IPR + offset/16;
+ unsigned short mask = 0xffff ^ (0xf << (offset%16));
+ unsigned long __dummy;
+
+ asm volatile("mov.w @%1,%0\n\t"
+ "and %2,%0\n\t"
+ "mov.w %0,@%1"
+ : "=&z" (__dummy)
+ : "r" (intc_ipr_address), "r" (mask)
+ : "memory" );
+}
+
+static void enable_onChip_irq(unsigned int irq)
+{
+ /* Set priority in IPR back to original value */
+ int offset = ipr_data[irq-TIMER_IRQ].offset;
+ int priority = ipr_data[irq-TIMER_IRQ].priority;
+ unsigned long intc_ipr_address = INTC_IPR + offset/16;
+ unsigned short value = (priority << (offset%16));
+ unsigned long __dummy;
+
+ asm volatile("mov.w @%1,%0\n\t"
+ "or %2,%0\n\t"
+ "mov.w %0,@%1"
+ : "=&z" (__dummy)
+ : "r" (intc_ipr_address), "r" (value)
+ : "memory" );
+}
+
+void make_onChip_irq(unsigned int irq)
+{
+ disable_irq_nosync(irq);
+ irq_desc[irq].handler = &onChip_irq_type;
+ enable_irq(irq);
+}
+
+static void mask_and_ack_onChip(unsigned int irq)
+{
+ disable_onChip_irq(irq);
+ sti();
+}
+
+static void end_onChip_irq(unsigned int irq)
+{
+ enable_onChip_irq(irq);
+ cli();
+}
+
+void __init init_IRQ(void)
+{
+ int i;
+
+ for (i = TIMER_IRQ; i < NR_IRQS; i++) {
+ irq_desc[i].handler = &onChip_irq_type;
+ }
+}
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
new file mode 100644
index 000000000..744da694b
--- /dev/null
+++ b/arch/sh/kernel/process.c
@@ -0,0 +1,303 @@
+/*
+ * linux/arch/sh/kernel/process.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ *
+ * SuperH version: Copyright (C) 1999 Niibe Yutaka
+ */
+
+/*
+ * This file handles the architecture-dependent parts of process handling..
+ */
+
+#define __KERNEL_SYSCALLS__
+#include <stdarg.h>
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/stddef.h>
+#include <linux/ptrace.h>
+#include <linux/malloc.h>
+#include <linux/vmalloc.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/interrupt.h>
+#include <linux/unistd.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/mmu_context.h>
+#include <asm/elf.h>
+
+#include <linux/irq.h>
+
+static int hlt_counter=0;
+
+#define HARD_IDLE_TIMEOUT (HZ / 3)
+
+void disable_hlt(void)
+{
+ hlt_counter++;
+}
+
+void enable_hlt(void)
+{
+ hlt_counter--;
+}
+
+/*
+ * The idle loop on a uniprocessor i386..
+ */
+void cpu_idle(void *unused)
+{
+ /* endless idle loop with no priority at all */
+ init_idle();
+ current->priority = 0;
+ current->counter = -100;
+
+ while (1) {
+ while (!current->need_resched) {
+ if (hlt_counter)
+ continue;
+ __sti();
+ asm volatile("sleep" : : : "memory");
+ }
+ schedule();
+ check_pgt_cache();
+ }
+}
+
+void machine_restart(char * __unused)
+{ /* Need to set MMU_TTB?? */
+}
+
+void machine_halt(void)
+{
+}
+
+void machine_power_off(void)
+{
+}
+
+void show_regs(struct pt_regs * regs)
+{
+ printk("\n");
+ printk("PC: [<%08lx>]", regs->pc);
+ printk(" SP: %08lx", regs->u_regs[UREG_SP]);
+ printk(" SR: %08lx\n", regs->sr);
+ printk("R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n",
+ regs->u_regs[0],regs->u_regs[1],
+ regs->u_regs[2],regs->u_regs[3]);
+ printk("R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n",
+ regs->u_regs[4],regs->u_regs[5],
+ regs->u_regs[6],regs->u_regs[7]);
+ printk("R8 : %08lx R9 : %08lx R10: %08lx R11: %08lx\n",
+ regs->u_regs[8],regs->u_regs[9],
+ regs->u_regs[10],regs->u_regs[11]);
+ printk("R12: %08lx R13: %08lx R14: %08lx\n",
+ regs->u_regs[12],regs->u_regs[13],
+ regs->u_regs[14]);
+ printk("MACH: %08lx MACL: %08lx GBR: %08lx PR: %08lx",
+ regs->mach, regs->macl, regs->gbr, regs->pr);
+}
+
+struct task_struct * alloc_task_struct(void)
+{
+ /* Get two pages */
+ return (struct task_struct *) __get_free_pages(GFP_KERNEL,1);
+}
+
+void free_task_struct(struct task_struct *p)
+{
+ free_pages((unsigned long) p, 1);
+}
+
+/*
+ * Create a kernel thread
+ */
+
+/*
+ * This is the mechanism for creating a new kernel thread.
+ *
+ * NOTE! Only a kernel-only process(ie the swapper or direct descendants
+ * who haven't done an "execve()") should use this: it will work within
+ * a system call from a "real" process, but the process memory space will
+ * not be free'd until both the parent and the child have exited.
+ */
+int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+{ /* Don't use this in BL=1(cli). Or else, CPU resets! */
+ register unsigned long __sc0 __asm__ ("r0") = __NR_clone;
+ register unsigned long __sc4 __asm__ ("r4") = (long) flags | CLONE_VM;
+ register unsigned long __sc5 __asm__ ("r5") = 0;
+ register unsigned long __sc8 __asm__ ("r8") = (long) arg;
+ register unsigned long __sc9 __asm__ ("r9") = (long) fn;
+ __asm__ __volatile__(
+ "trapa #0\n\t" /* Linux/SH system call */
+ "tst #0xff,r0\n\t" /* child or parent? */
+ "bf 1f\n\t" /* parent - jump */
+ "jsr @r9\n\t" /* call fn */
+ " mov r8,r4\n\t" /* push argument */
+ "mov r0,r4\n\t" /* return value to arg of exit */
+ "mov %2,r0\n\t" /* exit */
+ "trapa #0\n"
+ "1:"
+ :"=z" (__sc0)
+ :"0" (__sc0), "i" (__NR_exit),
+ "r" (__sc4), "r" (__sc5), "r" (__sc8), "r" (__sc9)
+ :"memory");
+ return __sc0;
+}
+
+/*
+ * Free current thread data structures etc..
+ */
+void exit_thread(void)
+{
+ /* nothing to do ... */
+}
+
+void flush_thread(void)
+{
+ /* do nothing */
+ /* Possibly, set clear debug registers */
+}
+
+void release_thread(struct task_struct *dead_task)
+{
+ /* do nothing */
+}
+
+/* Fill in the fpu structure for a core dump.. */
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
+{
+ return 0; /* Task didn't use the fpu at all. */
+}
+
+asmlinkage void ret_from_fork(void);
+
+int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
+ struct task_struct *p, struct pt_regs *regs)
+{
+ struct pt_regs *childregs;
+
+ childregs = ((struct pt_regs *)(THREAD_SIZE + (unsigned long) p)) - 1;
+
+ *childregs = *regs;
+ if (user_mode(regs)) {
+ childregs->u_regs[UREG_SP] = usp;
+ } else {
+ childregs->u_regs[UREG_SP] = (unsigned long)p+2*PAGE_SIZE;
+ }
+ childregs->u_regs[0] = 0; /* Set return value for child */
+
+ p->thread.sp = (unsigned long) childregs;
+ p->thread.pc = (unsigned long) ret_from_fork;
+ if (p->mm)
+ p->mm->context = NO_CONTEXT;
+
+ return 0;
+}
+
+/*
+ * fill in the user structure for a core dump..
+ */
+void dump_thread(struct pt_regs * regs, struct user * dump)
+{
+/* changed the size calculations - should hopefully work better. lbt */
+ dump->magic = CMAGIC;
+ dump->start_code = 0;
+ dump->start_stack = regs->u_regs[UREG_SP] & ~(PAGE_SIZE - 1);
+ dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
+ dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
+ dump->u_dsize -= dump->u_tsize;
+ dump->u_ssize = 0;
+ /* Debug registers will come here. */
+
+ if (dump->start_stack < TASK_SIZE)
+ dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
+
+ dump->regs = *regs;
+}
+
+/*
+ * switch_to(x,y) should switch tasks from x to y.
+ *
+ */
+void __switch_to(struct task_struct *prev, struct task_struct *next)
+{
+ /*
+ * Restore the kernel stack onto kernel mode register
+ * k4 (r4_bank1)
+ */
+ asm volatile("ldc %0,r4_bank"
+ : /* no output */
+ :"r" ((unsigned long)next+8192));
+}
+
+asmlinkage int sys_fork(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs regs)
+{
+ return do_fork(SIGCHLD, regs.u_regs[UREG_SP], &regs);
+}
+
+asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs regs)
+{
+ if (!newsp)
+ newsp = regs.u_regs[UREG_SP];
+ return do_fork(clone_flags, newsp, &regs);
+}
+
+/*
+ * This is trivial, and on the face of it looks like it
+ * could equally well be done in user mode.
+ *
+ * Not so, for quite unobvious reasons - register pressure.
+ * In user mode vfork() cannot have a stack frame, and if
+ * done by calling the "clone()" system call directly, you
+ * do not have enough call-clobbered registers to hold all
+ * the information you need.
+ */
+asmlinkage int sys_vfork(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs regs)
+{
+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD,
+ regs.u_regs[UREG_SP], &regs);
+}
+
+/*
+ * sys_execve() executes a new program.
+ */
+asmlinkage int sys_execve(char *ufilename, char **uargv,
+ char **uenvp, unsigned long r7,
+ struct pt_regs regs)
+{
+ int error;
+ char *filename;
+
+ lock_kernel();
+ filename = getname(ufilename);
+ error = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ goto out;
+ error = do_execve(filename, uargv, uenvp, &regs);
+ if (error == 0)
+ current->flags &= ~PF_DTRACE;
+ putname(filename);
+out:
+ unlock_kernel();
+ return error;
+}
diff --git a/arch/sh/kernel/ptrace.c b/arch/sh/kernel/ptrace.c
new file mode 100644
index 000000000..2d69b5b7c
--- /dev/null
+++ b/arch/sh/kernel/ptrace.c
@@ -0,0 +1,476 @@
+/*
+ * Surely this doesn't work... (we need to design ptrace for SupreH)
+ * linux/arch/sh/kernel/ptrace.c
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/processor.h>
+
+/*
+ * does not yet catch signals sent when the child dies.
+ * in exit.c or in signal.c.
+ */
+
+/* determines which flags the user has access to. */
+/* 1 = access 0 = no access */
+#define FLAG_MASK 0x00044dd5
+
+/* set's the trap flag. */
+#define TRAP_FLAG 0x100
+
+/*
+ * Offset of eflags on child stack..
+ */
+#define EFL_OFFSET ((EFL-2)*4-sizeof(struct pt_regs))
+
+/*
+ * this routine will get a word off of the processes privileged stack.
+ * the offset is how far from the base addr as stored in the TSS.
+ * this routine assumes that all the privileged stacks are in our
+ * data space.
+ */
+static inline int get_stack_long(struct task_struct *task, int offset)
+{
+ unsigned char *stack;
+
+ stack = (unsigned char *)task->thread.sp;
+ stack += offset;
+ return (*((int *)stack));
+}
+
+/*
+ * this routine will put a word on the processes privileged stack.
+ * the offset is how far from the base addr as stored in the TSS.
+ * this routine assumes that all the privileged stacks are in our
+ * data space.
+ */
+static inline int put_stack_long(struct task_struct *task, int offset,
+ unsigned long data)
+{
+ unsigned char * stack;
+
+ stack = (unsigned char *) task->thread.sp;
+ stack += offset;
+ *(unsigned long *) stack = data;
+ return 0;
+}
+
+static int putreg(struct task_struct *child,
+ unsigned long regno, unsigned long value)
+{
+#if 0
+ switch (regno >> 2) {
+ case ORIG_EAX:
+ return -EIO;
+ case FS:
+ if (value && (value & 3) != 3)
+ return -EIO;
+ child->thread.fs = value;
+ return 0;
+ case GS:
+ if (value && (value & 3) != 3)
+ return -EIO;
+ child->thread.gs = value;
+ return 0;
+ case DS:
+ case ES:
+ if (value && (value & 3) != 3)
+ return -EIO;
+ value &= 0xffff;
+ break;
+ case SS:
+ case CS:
+ if ((value & 3) != 3)
+ return -EIO;
+ value &= 0xffff;
+ break;
+ case EFL:
+ value &= FLAG_MASK;
+ value |= get_stack_long(child, EFL_OFFSET) & ~FLAG_MASK;
+ }
+ if (regno > GS*4)
+ regno -= 2*4;
+ put_stack_long(child, regno - sizeof(struct pt_regs), value);
+#endif
+ return 0;
+}
+
+static unsigned long getreg(struct task_struct *child,
+ unsigned long regno)
+{
+ unsigned long retval = ~0UL;
+
+#if 0
+ switch (regno >> 2) {
+ case FS:
+ retval = child->thread.fs;
+ break;
+ case GS:
+ retval = child->thread.gs;
+ break;
+ case DS:
+ case ES:
+ case SS:
+ case CS:
+ retval = 0xffff;
+ /* fall through */
+ default:
+ if (regno > GS*4)
+ regno -= 2*4;
+ regno = regno - sizeof(struct pt_regs);
+ retval &= get_stack_long(child, regno);
+ }
+#endif
+ return retval;
+}
+
+asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
+{
+ struct task_struct *child;
+ struct user * dummy = NULL;
+ unsigned long flags;
+ int i, ret;
+
+ lock_kernel();
+ ret = -EPERM;
+ if (request == PTRACE_TRACEME) {
+ /* are we already being traced? */
+ if (current->flags & PF_PTRACED)
+ goto out;
+ /* set the ptrace bit in the process flags. */
+ current->flags |= PF_PTRACED;
+ ret = 0;
+ goto out;
+ }
+ ret = -ESRCH;
+ read_lock(&tasklist_lock);
+ child = find_task_by_pid(pid);
+ read_unlock(&tasklist_lock); /* FIXME!!! */
+ if (!child)
+ goto out;
+ ret = -EPERM;
+ if (pid == 1) /* you may not mess with init */
+ goto out;
+ if (request == PTRACE_ATTACH) {
+ if (child == current)
+ goto out;
+ if ((!child->dumpable ||
+ (current->uid != child->euid) ||
+ (current->uid != child->suid) ||
+ (current->uid != child->uid) ||
+ (current->gid != child->egid) ||
+ (current->gid != child->sgid) ||
+ (!cap_issubset(child->cap_permitted, current->cap_permitted)) ||
+ (current->gid != child->gid)) && !capable(CAP_SYS_PTRACE))
+ goto out;
+ /* the same process cannot be attached many times */
+ if (child->flags & PF_PTRACED)
+ goto out;
+ child->flags |= PF_PTRACED;
+
+ write_lock_irqsave(&tasklist_lock, flags);
+ if (child->p_pptr != current) {
+ REMOVE_LINKS(child);
+ child->p_pptr = current;
+ SET_LINKS(child);
+ }
+ write_unlock_irqrestore(&tasklist_lock, flags);
+
+ send_sig(SIGSTOP, child, 1);
+ ret = 0;
+ goto out;
+ }
+ ret = -ESRCH;
+ if (!(child->flags & PF_PTRACED))
+ goto out;
+ if (child->state != TASK_STOPPED) {
+ if (request != PTRACE_KILL)
+ goto out;
+ }
+ if (child->p_pptr != current)
+ goto out;
+
+ switch (request) {
+ /* when I and D space are separate, these will need to be fixed. */
+ case PTRACE_PEEKTEXT: /* read word at location addr. */
+ case PTRACE_PEEKDATA: {
+ unsigned long tmp;
+ int copied;
+
+ copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
+ ret = -EIO;
+ if (copied != sizeof(tmp))
+ goto out;
+ ret = put_user(tmp,(unsigned long *) data);
+ goto out;
+ }
+
+ /* read the word at location addr in the USER area. */
+ case PTRACE_PEEKUSR: {
+ unsigned long tmp;
+
+ ret = -EIO;
+ if ((addr & 3) || addr < 0 ||
+ addr > sizeof(struct user) - 3)
+ goto out;
+
+ tmp = 0; /* Default return condition */
+ if(addr < 17*sizeof(long))
+ tmp = getreg(child, addr);
+#if 0
+ if(addr >= (long) &dummy->u_debugreg[0] &&
+ addr <= (long) &dummy->u_debugreg[7]){
+ addr -= (long) &dummy->u_debugreg[0];
+ addr = addr >> 2;
+ tmp = child->thread.debugreg[addr];
+ };
+#endif
+ ret = put_user(tmp,(unsigned long *) data);
+ goto out;
+ }
+
+ /* when I and D space are separate, this will have to be fixed. */
+ case PTRACE_POKETEXT: /* write the word at location addr. */
+ case PTRACE_POKEDATA:
+ ret = 0;
+ if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
+ goto out;
+ ret = -EIO;
+ goto out;
+
+ case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
+ ret = -EIO;
+ if ((addr & 3) || addr < 0 ||
+ addr > sizeof(struct user) - 3)
+ goto out;
+
+ if (addr < 17*sizeof(long)) {
+ ret = putreg(child, addr, data);
+ goto out;
+ }
+
+ /* We need to be very careful here. We implicitly
+ want to modify a portion of the task_struct, and we
+ have to be selective about what portions we allow someone
+ to modify. */
+#if 0
+ if(addr >= (long) &dummy->u_debugreg[0] &&
+ addr <= (long) &dummy->u_debugreg[7]){
+
+ if(addr == (long) &dummy->u_debugreg[4]) return -EIO;
+ if(addr == (long) &dummy->u_debugreg[5]) return -EIO;
+ if(addr < (long) &dummy->u_debugreg[4] &&
+ ((unsigned long) data) >= TASK_SIZE-3) return -EIO;
+
+ ret = -EIO;
+ if(addr == (long) &dummy->u_debugreg[7]) {
+ data &= ~DR_CONTROL_RESERVED;
+ for(i=0; i<4; i++)
+ if ((0x5f54 >> ((data >> (16 + 4*i)) & 0xf)) & 1)
+ goto out;
+ };
+
+ addr -= (long) &dummy->u_debugreg;
+ addr = addr >> 2;
+ child->thread.debugreg[addr] = data;
+ ret = 0;
+ goto out;
+ };
+#endif
+ ret = -EIO;
+ goto out;
+
+ case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
+ case PTRACE_CONT: { /* restart after signal. */
+ long tmp;
+
+ ret = -EIO;
+ if ((unsigned long) data > _NSIG)
+ goto out;
+ if (request == PTRACE_SYSCALL)
+ child->flags |= PF_TRACESYS;
+ else
+ child->flags &= ~PF_TRACESYS;
+ child->exit_code = data;
+ /* make sure the single step bit is not set. */
+#if 0
+ tmp = get_stack_long(child, EFL_OFFSET) & ~TRAP_FLAG;
+ put_stack_long(child, EFL_OFFSET,tmp);
+#endif
+ wake_up_process(child);
+ ret = 0;
+ goto out;
+ }
+
+/*
+ * make the child exit. Best I can do is send it a sigkill.
+ * perhaps it should be put in the status that it wants to
+ * exit.
+ */
+ case PTRACE_KILL: {
+ long tmp;
+
+ ret = 0;
+ if (child->state == TASK_ZOMBIE) /* already dead */
+ goto out;
+ child->exit_code = SIGKILL;
+ /* make sure the single step bit is not set. */
+#if 0
+ tmp = get_stack_long(child, EFL_OFFSET) & ~TRAP_FLAG;
+ put_stack_long(child, EFL_OFFSET, tmp);
+#endif
+ wake_up_process(child);
+ goto out;
+ }
+
+ case PTRACE_SINGLESTEP: { /* set the trap flag. */
+ long tmp;
+
+ ret = -EIO;
+ if ((unsigned long) data > _NSIG)
+ goto out;
+ child->flags &= ~PF_TRACESYS;
+ if ((child->flags & PF_DTRACE) == 0) {
+ /* Spurious delayed TF traps may occur */
+ child->flags |= PF_DTRACE;
+ }
+#if 0
+ tmp = get_stack_long(child, EFL_OFFSET) | TRAP_FLAG;
+ put_stack_long(child, EFL_OFFSET, tmp);
+#endif
+ child->exit_code = data;
+ /* give it a chance to run. */
+ wake_up_process(child);
+ ret = 0;
+ goto out;
+ }
+
+ case PTRACE_DETACH: { /* detach a process that was attached. */
+ long tmp;
+
+ ret = -EIO;
+ if ((unsigned long) data > _NSIG)
+ goto out;
+ child->flags &= ~(PF_PTRACED|PF_TRACESYS);
+ child->exit_code = data;
+ write_lock_irqsave(&tasklist_lock, flags);
+ REMOVE_LINKS(child);
+ child->p_pptr = child->p_opptr;
+ SET_LINKS(child);
+ write_unlock_irqrestore(&tasklist_lock, flags);
+ /* make sure the single step bit is not set. */
+#if 0
+ tmp = get_stack_long(child, EFL_OFFSET) & ~TRAP_FLAG;
+ put_stack_long(child, EFL_OFFSET, tmp);
+#endif
+ wake_up_process(child);
+ ret = 0;
+ goto out;
+ }
+#if 0
+ case PTRACE_GETREGS: { /* Get all gp regs from the child. */
+ if (!access_ok(VERIFY_WRITE, (unsigned *)data,
+ 17*sizeof(long)))
+ {
+ ret = -EIO;
+ goto out;
+ }
+ for ( i = 0; i < 17*sizeof(long); i += sizeof(long) )
+ {
+ __put_user(getreg(child, i),(unsigned long *) data);
+ data += sizeof(long);
+ }
+ ret = 0;
+ goto out;
+ };
+
+ case PTRACE_SETREGS: { /* Set all gp regs in the child. */
+ unsigned long tmp;
+ if (!access_ok(VERIFY_READ, (unsigned *)data,
+ 17*sizeof(long)))
+ {
+ ret = -EIO;
+ goto out;
+ }
+ for ( i = 0; i < 17*sizeof(long); i += sizeof(long) )
+ {
+ __get_user(tmp, (unsigned long *) data);
+ putreg(child, i, tmp);
+ data += sizeof(long);
+ }
+ ret = 0;
+ goto out;
+ };
+
+ case PTRACE_GETFPREGS: { /* Get the child FPU state. */
+ if (!access_ok(VERIFY_WRITE, (unsigned *)data,
+ sizeof(struct user_i387_struct)))
+ {
+ ret = -EIO;
+ goto out;
+ }
+ ret = 0;
+ if ( !child->used_math ) {
+ /* Simulate an empty FPU. */
+ child->thread.i387.hard.cwd = 0xffff037f;
+ child->thread.i387.hard.swd = 0xffff0000;
+ child->thread.i387.hard.twd = 0xffffffff;
+ }
+ __copy_to_user((void *)data, &child->thread.i387.hard,
+ sizeof(struct user_i387_struct));
+ goto out;
+ };
+
+ case PTRACE_SETFPREGS: { /* Set the child FPU state. */
+ if (!access_ok(VERIFY_READ, (unsigned *)data,
+ sizeof(struct user_i387_struct)))
+ {
+ ret = -EIO;
+ goto out;
+ }
+ child->used_math = 1;
+ __copy_from_user(&child->thread.i387.hard, (void *)data,
+ sizeof(struct user_i387_struct));
+ ret = 0;
+ goto out;
+ };
+#endif
+ default:
+ ret = -EIO;
+ goto out;
+ }
+out:
+ unlock_kernel();
+ return ret;
+}
+
+asmlinkage void syscall_trace(void)
+{
+ if ((current->flags & (PF_PTRACED|PF_TRACESYS))
+ != (PF_PTRACED|PF_TRACESYS))
+ return;
+ current->exit_code = SIGTRAP;
+ current->state = TASK_STOPPED;
+ notify_parent(current, SIGCHLD);
+ schedule();
+ /*
+ * this isn't the same as continuing with a signal, but it will do
+ * for normal use. strace only continues with a signal if the
+ * stopping signal is not SIGTRAP. -brl
+ */
+ if (current->exit_code) {
+ send_sig(current->exit_code, current, 1);
+ current->exit_code = 0;
+ }
+}
diff --git a/arch/sh/kernel/semaphore.c b/arch/sh/kernel/semaphore.c
new file mode 100644
index 000000000..b9f565dd8
--- /dev/null
+++ b/arch/sh/kernel/semaphore.c
@@ -0,0 +1,133 @@
+/*
+ * Just taken from alpha implementation.
+ * This can't work well, perhaps.
+ */
+/*
+ * Generic semaphore code. Buyer beware. Do your own
+ * specific changes in <asm/semaphore-helper.h>
+ */
+
+#include <linux/sched.h>
+#include <asm/semaphore-helper.h>
+
+/*
+ * Semaphores are implemented using a two-way counter:
+ * The "count" variable is decremented for each process
+ * that tries to sleep, while the "waking" variable is
+ * incremented when the "up()" code goes to wake up waiting
+ * processes.
+ *
+ * Notably, the inline "up()" and "down()" functions can
+ * efficiently test if they need to do any extra work (up
+ * needs to do something only if count was negative before
+ * the increment operation.
+ *
+ * waking_non_zero() (from asm/semaphore.h) must execute
+ * atomically.
+ *
+ * When __up() is called, the count was negative before
+ * incrementing it, and we need to wake up somebody.
+ *
+ * This routine adds one to the count of processes that need to
+ * wake up and exit. ALL waiting processes actually wake up but
+ * only the one that gets to the "waking" field first will gate
+ * through and acquire the semaphore. The others will go back
+ * to sleep.
+ *
+ * Note that these functions are only called when there is
+ * contention on the lock, and as such all this is the
+ * "non-critical" part of the whole semaphore business. The
+ * critical part is the inline stuff in <asm/semaphore.h>
+ * where we want to avoid any extra jumps and calls.
+ */
+void __up(struct semaphore *sem)
+{
+ wake_one_more(sem);
+ wake_up(&sem->wait);
+}
+
+/*
+ * Perform the "down" function. Return zero for semaphore acquired,
+ * return negative for signalled out of the function.
+ *
+ * If called from __down, the return is ignored and the wait loop is
+ * not interruptible. This means that a task waiting on a semaphore
+ * using "down()" cannot be killed until someone does an "up()" on
+ * the semaphore.
+ *
+ * If called from __down_interruptible, the return value gets checked
+ * upon return. If the return value is negative then the task continues
+ * with the negative value in the return register (it can be tested by
+ * the caller).
+ *
+ * Either form may be used in conjunction with "up()".
+ *
+ */
+
+#define DOWN_VAR \
+ struct task_struct *tsk = current; \
+ wait_queue_t wait; \
+ init_waitqueue_entry(&wait, tsk);
+
+#define DOWN_HEAD(task_state) \
+ \
+ \
+ tsk->state = (task_state); \
+ add_wait_queue(&sem->wait, &wait); \
+ \
+ /* \
+ * Ok, we're set up. sem->count is known to be less than zero \
+ * so we must wait. \
+ * \
+ * We can let go the lock for purposes of waiting. \
+ * We re-acquire it after awaking so as to protect \
+ * all semaphore operations. \
+ * \
+ * If "up()" is called before we call waking_non_zero() then \
+ * we will catch it right away. If it is called later then \
+ * we will have to go through a wakeup cycle to catch it. \
+ * \
+ * Multiple waiters contend for the semaphore lock to see \
+ * who gets to gate through and who has to wait some more. \
+ */ \
+ for (;;) {
+
+#define DOWN_TAIL(task_state) \
+ tsk->state = (task_state); \
+ } \
+ tsk->state = TASK_RUNNING; \
+ remove_wait_queue(&sem->wait, &wait);
+
+void __down(struct semaphore * sem)
+{
+ DOWN_VAR
+ DOWN_HEAD(TASK_UNINTERRUPTIBLE)
+ if (waking_non_zero(sem))
+ break;
+ schedule();
+ DOWN_TAIL(TASK_UNINTERRUPTIBLE)
+}
+
+int __down_interruptible(struct semaphore * sem)
+{
+ int ret = 0;
+ DOWN_VAR
+ DOWN_HEAD(TASK_INTERRUPTIBLE)
+
+ ret = waking_non_zero_interruptible(sem, tsk);
+ if (ret)
+ {
+ if (ret == 1)
+ /* ret != 0 only if we get interrupted -arca */
+ ret = 0;
+ break;
+ }
+ schedule();
+ DOWN_TAIL(TASK_INTERRUPTIBLE)
+ return ret;
+}
+
+int __down_trylock(struct semaphore * sem)
+{
+ return waking_non_zero_trylock(sem);
+}
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
new file mode 100644
index 000000000..6714fd96d
--- /dev/null
+++ b/arch/sh/kernel/setup.c
@@ -0,0 +1,188 @@
+/*
+ * linux/arch/sh/kernel/setup.c
+ *
+ * Copyright (C) 1999 Niibe Yutaka
+ *
+ */
+
+/*
+ * This file handles the architecture-dependent parts of initialization
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/malloc.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/tty.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/config.h>
+#include <linux/init.h>
+#ifdef CONFIG_BLK_DEV_RAM
+#include <linux/blk.h>
+#endif
+#include <asm/processor.h>
+#include <linux/console.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/smp.h>
+
+/*
+ * Machine setup..
+ */
+
+struct sh_cpuinfo boot_cpu_data = { 0, 0, 0, 0, };
+extern int _text, _etext, _edata, _end, _stext, __bss_start;
+
+#ifdef CONFIG_BLK_DEV_RAM
+extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */
+extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */
+extern int rd_image_start; /* starting block # of image */
+#endif
+
+extern int root_mountflags;
+
+#define COMMAND_LINE_SIZE 1024
+static char command_line[COMMAND_LINE_SIZE] = { 0, };
+ char saved_command_line[COMMAND_LINE_SIZE];
+
+extern unsigned char *root_fs_image;
+
+struct resource standard_io_resources[] = {
+ { "dma1", 0x00, 0x1f },
+ { "pic1", 0x20, 0x3f },
+ { "timer", 0x40, 0x5f },
+ { "keyboard", 0x60, 0x6f },
+ { "dma page reg", 0x80, 0x8f },
+ { "pic2", 0xa0, 0xbf },
+ { "dma2", 0xc0, 0xdf },
+ { "fpu", 0xf0, 0xff }
+};
+
+#define STANDARD_IO_RESOURCES (sizeof(standard_io_resources)/sizeof(struct resource))
+
+
+/* System RAM - interrupted by the 640kB-1M hole */
+#define code_resource (ram_resources[3])
+#define data_resource (ram_resources[4])
+static struct resource ram_resources[] = {
+ { "System RAM", 0x000000, 0x09ffff, IORESOURCE_BUSY },
+ { "System RAM", 0x100000, 0x100000, IORESOURCE_BUSY },
+ { "Video RAM area", 0x0a0000, 0x0bffff },
+ { "Kernel code", 0x100000, 0 },
+ { "Kernel data", 0, 0 }
+};
+
+/* System ROM resources */
+#define MAXROMS 6
+static struct resource rom_resources[MAXROMS] = {
+ { "System ROM", 0xF0000, 0xFFFFF, IORESOURCE_BUSY },
+ { "Video ROM", 0xc0000, 0xc7fff }
+};
+
+
+void __init setup_arch(char **cmdline_p,
+ unsigned long * memory_start_p,
+ unsigned long * memory_end_p)
+{
+ *cmdline_p = command_line;
+ *memory_start_p = (unsigned long) &_end;
+ *memory_end_p = 0x8c400000; /* For my board. */
+ ram_resources[1].end = *memory_end_p-1;
+
+ init_mm.start_code = (unsigned long)&_stext;
+ init_mm.end_code = (unsigned long) &_etext;
+ init_mm.end_data = (unsigned long) &_edata;
+ init_mm.brk = (unsigned long) &_end;
+
+ code_resource.start = virt_to_bus(&_text);
+ code_resource.end = virt_to_bus(&_etext)-1;
+ data_resource.start = virt_to_bus(&_etext);
+ data_resource.end = virt_to_bus(&_edata)-1;
+
+ ROOT_DEV = MKDEV(FLOPPY_MAJOR, 0);
+
+ initrd_below_start_ok = 1;
+ initrd_start = (long)&root_fs_image;
+ initrd_end = (long)&__bss_start;
+ mount_initrd = 1;
+
+
+#if 0
+ /* Request the standard RAM and ROM resources - they eat up PCI memory space */
+ request_resource(&iomem_resource, ram_resources+0);
+ request_resource(&iomem_resource, ram_resources+1);
+ request_resource(&iomem_resource, ram_resources+2);
+ request_resource(ram_resources+1, &code_resource);
+ request_resource(ram_resources+1, &data_resource);
+#endif
+
+#if 0
+ for (i = 0; i < STANDARD_IO_RESOURCES; i++)
+ request_resource(&ioport_resource, standard_io_resources+i);
+#endif
+
+#if 0
+ rd_image_start = (long)root_fs_image;
+ rd_prompt = 0;
+ rd_doload = 1;
+#endif
+
+#if 0
+ ROOT_DEV = to_kdev_t(ORIG_ROOT_DEV);
+
+#ifdef CONFIG_BLK_DEV_RAM
+ rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
+ rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
+ rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
+#endif
+
+ if (!MOUNT_ROOT_RDONLY)
+ root_mountflags &= ~MS_RDONLY;
+#endif
+
+#ifdef CONFIG_BLK_DEV_INITRD
+#if 0
+ if (LOADER_TYPE) {
+ initrd_start = INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
+ initrd_end = initrd_start+INITRD_SIZE;
+ if (initrd_end > memory_end) {
+ printk("initrd extends beyond end of memory "
+ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
+ initrd_end,memory_end);
+ initrd_start = 0;
+ }
+ }
+#endif
+
+#endif
+}
+
+/*
+ * Get CPU information for use by the procfs.
+ */
+
+int get_cpuinfo(char *buffer)
+{
+ char *p = buffer;
+
+#ifdef CONFIG_CPU_SH3
+ p += sprintf(p,"cpu family\t: SH3\n"
+ "cache size\t: 8K-byte\n");
+#elif CONFIG_CPU_SH4
+ p += sprintf(p,"cpu family\t: SH4\n"
+ "cache size\t: ??K-byte\n");
+#endif
+ p += sprintf(p, "bogomips\t: %lu.%02lu\n\n",
+ (loops_per_sec+2500)/500000,
+ ((loops_per_sec+2500)/5000) % 100);
+
+ return p - buffer;
+}
diff --git a/arch/sh/kernel/sh_ksyms.c b/arch/sh/kernel/sh_ksyms.c
new file mode 100644
index 000000000..2b1b9ea2e
--- /dev/null
+++ b/arch/sh/kernel/sh_ksyms.c
@@ -0,0 +1,48 @@
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/smp.h>
+#include <linux/user.h>
+#include <linux/elfcore.h>
+#include <linux/mca.h>
+#include <linux/sched.h>
+#include <linux/in6.h>
+#include <linux/interrupt.h>
+#include <linux/smp_lock.h>
+
+#include <asm/semaphore.h>
+#include <asm/processor.h>
+#include <asm/uaccess.h>
+#include <asm/checksum.h>
+#include <asm/io.h>
+#include <asm/hardirq.h>
+#include <asm/delay.h>
+#include <asm/irq.h>
+
+extern void dump_thread(struct pt_regs *, struct user *);
+extern int dump_fpu(elf_fpregset_t *);
+
+#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
+extern struct drive_info_struct drive_info;
+EXPORT_SYMBOL(drive_info);
+#endif
+
+/* platform dependent support */
+EXPORT_SYMBOL(dump_thread);
+EXPORT_SYMBOL(dump_fpu);
+EXPORT_SYMBOL(iounmap);
+EXPORT_SYMBOL(local_bh_count);
+EXPORT_SYMBOL(local_irq_count);
+EXPORT_SYMBOL(enable_irq);
+EXPORT_SYMBOL(disable_irq);
+EXPORT_SYMBOL(kernel_thread);
+
+/* Networking helper routines. */
+EXPORT_SYMBOL(csum_partial_copy);
+
+EXPORT_SYMBOL(strtok);
+EXPORT_SYMBOL(strpbrk);
+EXPORT_SYMBOL(strstr);
+
+#ifdef CONFIG_VT
+EXPORT_SYMBOL(screen_info);
+#endif
diff --git a/arch/sh/kernel/signal.c b/arch/sh/kernel/signal.c
new file mode 100644
index 000000000..66fa36c85
--- /dev/null
+++ b/arch/sh/kernel/signal.c
@@ -0,0 +1,597 @@
+/*
+ * linux/arch/sh/kernel/signal.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
+ *
+ * SuperH version: Copyright (C) 1999 Niibe Yutaka
+ *
+ */
+
+#include <linux/config.h>
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <asm/ucontext.h>
+#include <asm/uaccess.h>
+
+#define DEBUG_SIG 0
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+asmlinkage int sys_wait4(pid_t pid, unsigned long *stat_addr,
+ int options, unsigned long *ru);
+asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+asmlinkage int
+sys_sigsuspend(old_sigset_t mask,
+ unsigned long r5, unsigned long r6, unsigned long r7,
+ struct pt_regs regs)
+{
+ sigset_t saveset;
+
+ mask &= _BLOCKABLE;
+ spin_lock_irq(&current->sigmask_lock);
+ saveset = current->blocked;
+ siginitset(&current->blocked, mask);
+ recalc_sigpending(current);
+ spin_unlock_irq(&current->sigmask_lock);
+
+ regs.u_regs[0] = -EINTR;
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(&regs,&saveset))
+ return -EINTR;
+ }
+}
+
+asmlinkage int
+sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs regs)
+{
+ sigset_t saveset, newset;
+
+ /* XXX: Don't preclude handling different sized sigset_t's. */
+ if (sigsetsize != sizeof(sigset_t))
+ return -EINVAL;
+
+ if (copy_from_user(&newset, unewset, sizeof(newset)))
+ return -EFAULT;
+ sigdelsetmask(&newset, ~_BLOCKABLE);
+
+ spin_lock_irq(&current->sigmask_lock);
+ saveset = current->blocked;
+ current->blocked = newset;
+ recalc_sigpending(current);
+ spin_unlock_irq(&current->sigmask_lock);
+
+ regs.u_regs[0] = -EINTR;
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(&regs, &saveset))
+ return -EINTR;
+ }
+}
+
+asmlinkage int
+sys_sigaction(int sig, const struct old_sigaction *act,
+ struct old_sigaction *oact)
+{
+ struct k_sigaction new_ka, old_ka;
+ int ret;
+
+ if (act) {
+ old_sigset_t mask;
+ if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
+ __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
+ __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+ return -EFAULT;
+ __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+ __get_user(mask, &act->sa_mask);
+ siginitset(&new_ka.sa.sa_mask, mask);
+ }
+
+ ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+ if (!ret && oact) {
+ if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
+ __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
+ __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+ return -EFAULT;
+ __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+ __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+ }
+
+ return ret;
+}
+
+asmlinkage int
+sys_sigaltstack(const stack_t *uss, stack_t *uoss,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs regs)
+{
+ return do_sigaltstack(uss, uoss, regs.u_regs[UREG_SP]);
+}
+
+
+/*
+ * Do a signal return; undo the signal stack.
+ */
+
+struct sigframe
+{
+ struct sigcontext sc;
+ /* FPU should come here: SH-3 has no FPU */
+ unsigned long extramask[_NSIG_WORDS-1];
+ char retcode[4];
+};
+
+struct rt_sigframe
+{
+ struct siginfo *pinfo;
+ void *puc;
+ struct siginfo info;
+ struct ucontext uc;
+ /* FPU should come here: SH-3 has no FPU */
+ char retcode[4];
+};
+
+
+static int
+restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc, int *r0_p)
+{
+ unsigned int err = 0;
+
+#define COPY(x) err |= __get_user(regs->x, &sc->x)
+ COPY(u_regs[1]);
+ COPY(u_regs[2]); COPY(u_regs[3]);
+ COPY(u_regs[4]); COPY(u_regs[5]);
+ COPY(u_regs[6]); COPY(u_regs[7]);
+ COPY(u_regs[8]); COPY(u_regs[9]);
+ COPY(u_regs[10]); COPY(u_regs[11]);
+ COPY(u_regs[12]); COPY(u_regs[13]);
+ COPY(u_regs[14]); COPY(u_regs[15]);
+ COPY(gbr); COPY(mach);
+ COPY(macl); COPY(pr);
+ COPY(sr); COPY(pc);
+#undef COPY
+
+ regs->syscall_nr = -1; /* disable syscall checks */
+ err |= __get_user(*r0_p, &sc->u_regs[0]);
+
+ return err;
+}
+
+asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs regs)
+{
+ struct sigframe *frame = (struct sigframe *)regs.u_regs[UREG_SP];
+ sigset_t set;
+ int r0;
+
+ if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+ if (__get_user(set.sig[0], &frame->sc.oldmask)
+ || (_NSIG_WORDS > 1
+ && __copy_from_user(&set.sig[1], &frame->extramask,
+ sizeof(frame->extramask))))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sigmask_lock);
+ current->blocked = set;
+ recalc_sigpending(current);
+ spin_unlock_irq(&current->sigmask_lock);
+
+ if (restore_sigcontext(&regs, &frame->sc, &r0))
+ goto badframe;
+ return r0;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs regs)
+{
+ struct rt_sigframe *frame = (struct rt_sigframe *)regs.u_regs[UREG_SP];
+ sigset_t set;
+ stack_t st;
+ int r0;
+
+ if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sigmask_lock);
+ current->blocked = set;
+ recalc_sigpending(current);
+ spin_unlock_irq(&current->sigmask_lock);
+
+ if (restore_sigcontext(&regs, &frame->uc.uc_mcontext, &r0))
+ goto badframe;
+
+ if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
+ goto badframe;
+ /* It is more difficult to avoid calling this function than to
+ call it and ignore errors. */
+ do_sigaltstack(&st, NULL, regs.u_regs[UREG_SP]);
+
+ return r0;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+/*
+ * Set up a signal frame.
+ */
+
+static int
+setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
+ unsigned long mask)
+{
+ int err = 0;
+
+#define COPY(x) err |= __put_user(regs->x, &sc->x)
+ COPY(u_regs[0]); COPY(u_regs[1]);
+ COPY(u_regs[2]); COPY(u_regs[3]);
+ COPY(u_regs[4]); COPY(u_regs[5]);
+ COPY(u_regs[6]); COPY(u_regs[7]);
+ COPY(u_regs[8]); COPY(u_regs[9]);
+ COPY(u_regs[10]); COPY(u_regs[11]);
+ COPY(u_regs[12]); COPY(u_regs[13]);
+ COPY(u_regs[14]); COPY(u_regs[15]);
+ COPY(gbr); COPY(mach);
+ COPY(macl); COPY(pr);
+ COPY(sr); COPY(pc);
+#undef COPY
+
+ /* non-iBCS2 extensions.. */
+ err |= __put_user(mask, &sc->oldmask);
+
+ return err;
+}
+
+/*
+ * Determine which stack to use..
+ */
+static inline void *
+get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
+{
+ if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp))
+ sp = current->sas_ss_sp + current->sas_ss_size;
+
+ return (void *)((sp - frame_size) & -8ul);
+}
+
+static void setup_frame(int sig, struct k_sigaction *ka,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct sigframe *frame;
+ int err = 0;
+ int signal;
+
+ frame = get_sigframe(ka, regs->u_regs[UREG_SP], sizeof(*frame));
+
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+ goto give_sigsegv;
+
+ signal = current->exec_domain
+ && current->exec_domain->signal_invmap
+ && sig < 32
+ ? current->exec_domain->signal_invmap[sig]
+ : sig;
+
+ err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
+
+ if (_NSIG_WORDS > 1) {
+ err |= __copy_to_user(frame->extramask, &set->sig[1],
+ sizeof(frame->extramask));
+ }
+
+ /* Set up to return from userspace. If provided, use a stub
+ already in userspace. */
+ if (ka->sa.sa_flags & SA_RESTORER) {
+ regs->pr = (unsigned long) ka->sa.sa_restorer;
+ } else {
+ /* This is ; mov #__NR_sigreturn,r0 ; trapa #0 */
+#ifdef CONFIG_LITTLE_ENDIAN
+ unsigned long code = 0x00c300e0 | (__NR_sigreturn << 8);
+#else
+ unsigned long code = 0xe000c300 | (__NR_sigreturn << 16);
+#endif
+
+ regs->pr = (unsigned long) frame->retcode;
+ err |= __put_user(code, (long *)(frame->retcode+0));
+ }
+
+ if (err)
+ goto give_sigsegv;
+
+ /* Set up registers for signal handler */
+ regs->u_regs[UREG_SP] = (unsigned long) frame;
+ regs->u_regs[4] = signal; /* Arg for signal handler */
+ regs->pc = (unsigned long) ka->sa.sa_handler;
+
+ set_fs(USER_DS);
+
+#if DEBUG_SIG
+ printk("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
+ current->comm, current->pid, frame, regs->pc, regs->pr);
+#endif
+
+ return;
+
+give_sigsegv:
+ if (sig == SIGSEGV)
+ ka->sa.sa_handler = SIG_DFL;
+ force_sig(SIGSEGV, current);
+}
+
+static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct rt_sigframe *frame;
+ int err = 0;
+ int signal;
+
+ frame = get_sigframe(ka, regs->u_regs[UREG_SP], sizeof(*frame));
+
+ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+ goto give_sigsegv;
+
+ signal = current->exec_domain
+ && current->exec_domain->signal_invmap
+ && sig < 32
+ ? current->exec_domain->signal_invmap[sig]
+ : sig;
+
+ err |= __put_user(&frame->info, &frame->pinfo);
+ err |= __put_user(&frame->uc, &frame->puc);
+ err |= __copy_to_user(&frame->info, info, sizeof(*info));
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->uc.uc_flags);
+ err |= __put_user(0, &frame->uc.uc_link);
+ err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
+ err |= __put_user(sas_ss_flags(regs->u_regs[UREG_SP]),
+ &frame->uc.uc_stack.ss_flags);
+ err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= setup_sigcontext(&frame->uc.uc_mcontext,
+ regs, set->sig[0]);
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+ /* Set up to return from userspace. If provided, use a stub
+ already in userspace. */
+ if (ka->sa.sa_flags & SA_RESTORER) {
+ regs->pr = (unsigned long) ka->sa.sa_restorer;
+ } else {
+ /* This is ; mov #__NR_sigreturn,r0 ; trapa #0 */
+#ifdef CONFIG_LITTLE_ENDIAN
+ unsigned long code = 0x00c300e0 | (__NR_sigreturn << 8);
+#else
+ unsigned long code = 0xe000c300 | (__NR_sigreturn << 16);
+#endif
+
+ regs->pr = (unsigned long) frame->retcode;
+ err |= __put_user(code, (long *)(frame->retcode+0));
+ }
+
+ if (err)
+ goto give_sigsegv;
+
+ /* Set up registers for signal handler */
+ regs->u_regs[UREG_SP] = (unsigned long) frame;
+ regs->u_regs[4] = signal; /* Arg for signal handler */
+ regs->pc = (unsigned long) ka->sa.sa_handler;
+
+ set_fs(USER_DS);
+
+#if DEBUG_SIG
+ printk("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
+ current->comm, current->pid, frame, regs->pc, regs->pr);
+#endif
+
+ return;
+
+give_sigsegv:
+ if (sig == SIGSEGV)
+ ka->sa.sa_handler = SIG_DFL;
+ force_sig(SIGSEGV, current);
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+
+static void
+handle_signal(unsigned long sig, struct k_sigaction *ka,
+ siginfo_t *info, sigset_t *oldset, struct pt_regs * regs)
+{
+ /* Are we from a system call? */
+ if (regs->syscall_nr >= 0) {
+ /* If so, check system call restarting.. */
+ switch (regs->u_regs[0]) {
+ case -ERESTARTNOHAND:
+ regs->u_regs[0] = -EINTR;
+ break;
+
+ case -ERESTARTSYS:
+ if (!(ka->sa.sa_flags & SA_RESTART)) {
+ regs->u_regs[0] = -EINTR;
+ break;
+ }
+ /* fallthrough */
+ case -ERESTARTNOINTR:
+ regs->u_regs[0] = regs->syscall_nr;
+ regs->pc -= 2;
+ }
+ }
+
+ /* Set up the stack frame */
+ if (ka->sa.sa_flags & SA_SIGINFO)
+ setup_rt_frame(sig, ka, info, oldset, regs);
+ else
+ setup_frame(sig, ka, oldset, regs);
+
+ if (ka->sa.sa_flags & SA_ONESHOT)
+ ka->sa.sa_handler = SIG_DFL;
+
+ if (!(ka->sa.sa_flags & SA_NODEFER)) {
+ spin_lock_irq(&current->sigmask_lock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ sigaddset(&current->blocked,sig);
+ recalc_sigpending(current);
+ spin_unlock_irq(&current->sigmask_lock);
+ }
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ *
+ * Note that we go through the signals twice: once to check the signals that
+ * the kernel can handle, and then we build all the user-level signal handling
+ * stack-frames in one go after that.
+ */
+int do_signal(struct pt_regs *regs, sigset_t *oldset)
+{
+ siginfo_t info;
+ struct k_sigaction *ka;
+
+ if (!oldset)
+ oldset = &current->blocked;
+
+ for (;;) {
+ unsigned long signr;
+
+ spin_lock_irq(&current->sigmask_lock);
+ signr = dequeue_signal(&current->blocked, &info);
+ spin_unlock_irq(&current->sigmask_lock);
+
+ if (!signr)
+ break;
+
+ if ((current->flags & PF_PTRACED) && signr != SIGKILL) {
+ /* Let the debugger run. */
+ current->exit_code = signr;
+ current->state = TASK_STOPPED;
+ notify_parent(current, SIGCHLD);
+ schedule();
+
+ /* We're back. Did the debugger cancel the sig? */
+ if (!(signr = current->exit_code))
+ continue;
+ current->exit_code = 0;
+
+ /* The debugger continued. Ignore SIGSTOP. */
+ if (signr == SIGSTOP)
+ continue;
+
+ /* Update the siginfo structure. Is this good? */
+ if (signr != info.si_signo) {
+ info.si_signo = signr;
+ info.si_errno = 0;
+ info.si_code = SI_USER;
+ info.si_pid = current->p_pptr->pid;
+ info.si_uid = current->p_pptr->uid;
+ }
+
+ /* If the (new) signal is now blocked, requeue it. */
+ if (sigismember(&current->blocked, signr)) {
+ send_sig_info(signr, &info, current);
+ continue;
+ }
+ }
+
+ ka = &current->sig->action[signr-1];
+ if (ka->sa.sa_handler == SIG_IGN) {
+ if (signr != SIGCHLD)
+ continue;
+ /* Check for SIGCHLD: it's special. */
+ while (sys_wait4(-1, NULL, WNOHANG, NULL) > 0)
+ /* nothing */;
+ continue;
+ }
+
+ if (ka->sa.sa_handler == SIG_DFL) {
+ int exit_code = signr;
+
+ /* Init gets no signals it doesn't want. */
+ if (current->pid == 1)
+ continue;
+
+ switch (signr) {
+ case SIGCONT: case SIGCHLD: case SIGWINCH:
+ continue;
+
+ case SIGTSTP: case SIGTTIN: case SIGTTOU:
+ if (is_orphaned_pgrp(current->pgrp))
+ continue;
+ /* FALLTHRU */
+
+ case SIGSTOP:
+ current->state = TASK_STOPPED;
+ current->exit_code = signr;
+ if (!(current->p_pptr->sig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
+ notify_parent(current, SIGCHLD);
+ schedule();
+ continue;
+
+ case SIGQUIT: case SIGILL: case SIGTRAP:
+ case SIGABRT: case SIGFPE: case SIGSEGV:
+ if (do_coredump(signr, regs))
+ exit_code |= 0x80;
+ /* FALLTHRU */
+
+ default:
+ lock_kernel();
+ sigaddset(&current->signal, signr);
+ recalc_sigpending(current);
+ current->flags |= PF_SIGNALED;
+ do_exit(exit_code);
+ /* NOTREACHED */
+ }
+ }
+
+ /* Whee! Actually deliver the signal. */
+ handle_signal(signr, ka, &info, oldset, regs);
+ return 1;
+ }
+
+ /* Did we come from a system call? */
+ if (regs->syscall_nr >= 0) {
+ /* Restart the system call - no handlers present */
+ if (regs->u_regs[0] == -ERESTARTNOHAND ||
+ regs->u_regs[0] == -ERESTARTSYS ||
+ regs->u_regs[0] == -ERESTARTNOINTR) {
+ regs->u_regs[0] = regs->syscall_nr;
+ regs->pc -= 2;
+ }
+ }
+ return 0;
+}
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
new file mode 100644
index 000000000..6999cff81
--- /dev/null
+++ b/arch/sh/kernel/sys_sh.c
@@ -0,0 +1,249 @@
+/*
+ * linux/arch/i386/kernel/sys_i386.c
+ *
+ * This file contains various random system calls that
+ * have a non-standard calling sequence on the Linux/i386
+ * platform.
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/stat.h>
+#include <linux/mman.h>
+#include <linux/file.h>
+#include <linux/utsname.h>
+
+#include <asm/uaccess.h>
+#include <asm/ipc.h>
+
+/*
+ * sys_pipe() is the normal C calling standard for creating
+ * a pipe. It's not the way Unix traditionally does this, though.
+ */
+asmlinkage int sys_pipe(unsigned long * fildes)
+{
+ int fd[2];
+ int error;
+
+ lock_kernel();
+ error = do_pipe(fd);
+ unlock_kernel();
+ if (!error) {
+ if (copy_to_user(fildes, fd, 2*sizeof(int)))
+ error = -EFAULT;
+ }
+ return error;
+}
+
+/*
+ * Perform the select(nd, in, out, ex, tv) and mmap() system
+ * calls. Linux/i386 didn't use to be able to handle more than
+ * 4 system call parameters, so these system calls used a memory
+ * block for parameter passing..
+ */
+
+struct mmap_arg_struct {
+ unsigned long addr;
+ unsigned long len;
+ unsigned long prot;
+ unsigned long flags;
+ unsigned long fd;
+ unsigned long offset;
+};
+
+asmlinkage int old_mmap(struct mmap_arg_struct *arg)
+{
+ int error = -EFAULT;
+ struct file * file = NULL;
+ struct mmap_arg_struct a;
+
+ if (copy_from_user(&a, arg, sizeof(a)))
+ return -EFAULT;
+
+ down(&current->mm->mmap_sem);
+ lock_kernel();
+ if (!(a.flags & MAP_ANONYMOUS)) {
+ error = -EBADF;
+ file = fget(a.fd);
+ if (!file)
+ goto out;
+ }
+ a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
+ error = do_mmap(file, a.addr, a.len, a.prot, a.flags, a.offset);
+ if (file)
+ fput(file);
+out:
+ unlock_kernel();
+ up(&current->mm->mmap_sem);
+ return error;
+}
+
+extern asmlinkage int sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *);
+
+struct sel_arg_struct {
+ unsigned long n;
+ fd_set *inp, *outp, *exp;
+ struct timeval *tvp;
+};
+
+asmlinkage int old_select(struct sel_arg_struct *arg)
+{
+ struct sel_arg_struct a;
+
+ if (copy_from_user(&a, arg, sizeof(a)))
+ return -EFAULT;
+ /* sys_select() does the appropriate kernel locking */
+ return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
+}
+
+/*
+ * sys_ipc() is the de-multiplexer for the SysV IPC calls..
+ *
+ * This is really horribly ugly.
+ */
+asmlinkage int sys_ipc (uint call, int first, int second,
+ int third, void *ptr, long fifth)
+{
+ int version, ret;
+
+ version = call >> 16; /* hack for backward compatibility */
+ call &= 0xffff;
+
+ if (call <= SEMCTL)
+ switch (call) {
+ case SEMOP:
+ return sys_semop (first, (struct sembuf *)ptr, second);
+ case SEMGET:
+ return sys_semget (first, second, third);
+ case SEMCTL: {
+ union semun fourth;
+ if (!ptr)
+ return -EINVAL;
+ if (get_user(fourth.__pad, (void **) ptr))
+ return -EFAULT;
+ return sys_semctl (first, second, third, fourth);
+ }
+ default:
+ return -EINVAL;
+ }
+
+ if (call <= MSGCTL)
+ switch (call) {
+ case MSGSND:
+ return sys_msgsnd (first, (struct msgbuf *) ptr,
+ second, third);
+ case MSGRCV:
+ switch (version) {
+ case 0: {
+ struct ipc_kludge tmp;
+ if (!ptr)
+ return -EINVAL;
+
+ if (copy_from_user(&tmp,
+ (struct ipc_kludge *) ptr,
+ sizeof (tmp)))
+ return -EFAULT;
+ return sys_msgrcv (first, tmp.msgp, second,
+ tmp.msgtyp, third);
+ }
+ default:
+ return sys_msgrcv (first,
+ (struct msgbuf *) ptr,
+ second, fifth, third);
+ }
+ case MSGGET:
+ return sys_msgget ((key_t) first, second);
+ case MSGCTL:
+ return sys_msgctl (first, second,
+ (struct msqid_ds *) ptr);
+ default:
+ return -EINVAL;
+ }
+ if (call <= SHMCTL)
+ switch (call) {
+ case SHMAT:
+ switch (version) {
+ default: {
+ ulong raddr;
+ ret = sys_shmat (first, (char *) ptr,
+ second, &raddr);
+ if (ret)
+ return ret;
+ return put_user (raddr, (ulong *) third);
+ }
+ case 1: /* iBCS2 emulator entry point */
+ if (!segment_eq(get_fs(), get_ds()))
+ return -EINVAL;
+ return sys_shmat (first, (char *) ptr,
+ second, (ulong *) third);
+ }
+ case SHMDT:
+ return sys_shmdt ((char *)ptr);
+ case SHMGET:
+ return sys_shmget (first, second, third);
+ case SHMCTL:
+ return sys_shmctl (first, second,
+ (struct shmid_ds *) ptr);
+ default:
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * Old cruft
+ */
+asmlinkage int sys_uname(struct old_utsname * name)
+{
+ int err;
+ if (!name)
+ return -EFAULT;
+ down(&uts_sem);
+ err=copy_to_user(name, &system_utsname, sizeof (*name));
+ up(&uts_sem);
+ return err?-EFAULT:0;
+}
+
+asmlinkage int sys_olduname(struct oldold_utsname * name)
+{
+ int error;
+
+ if (!name)
+ return -EFAULT;
+ if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname)))
+ return -EFAULT;
+
+ down(&uts_sem);
+
+ error = __copy_to_user(&name->sysname,&system_utsname.sysname,__OLD_UTS_LEN);
+ error |= __put_user(0,name->sysname+__OLD_UTS_LEN);
+ error |= __copy_to_user(&name->nodename,&system_utsname.nodename,__OLD_UTS_LEN);
+ error |= __put_user(0,name->nodename+__OLD_UTS_LEN);
+ error |= __copy_to_user(&name->release,&system_utsname.release,__OLD_UTS_LEN);
+ error |= __put_user(0,name->release+__OLD_UTS_LEN);
+ error |= __copy_to_user(&name->version,&system_utsname.version,__OLD_UTS_LEN);
+ error |= __put_user(0,name->version+__OLD_UTS_LEN);
+ error |= __copy_to_user(&name->machine,&system_utsname.machine,__OLD_UTS_LEN);
+ error |= __put_user(0,name->machine+__OLD_UTS_LEN);
+
+ up(&uts_sem);
+
+ error = error ? -EFAULT : 0;
+
+ return error;
+}
+
+asmlinkage int sys_pause(void)
+{
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ return -ERESTARTNOHAND;
+}
diff --git a/arch/sh/kernel/test-img.c b/arch/sh/kernel/test-img.c
new file mode 100644
index 000000000..daade9f6d
--- /dev/null
+++ b/arch/sh/kernel/test-img.c
@@ -0,0 +1,69 @@
+unsigned char root_fs_image[]
+__attribute__((__section__(".data.disk_image")))
+= {
+0x1f,0x8b,0x08,0x08,0x5d,0xd5,0xc7,0x37,0x00,0x03,0x72,0x2e,0x62,0x69,0x6e,0x00,
+0xed,0xdc,0x3f,0x6c,0x1b,0x55,0x1c,0xc0,0xf1,0xdf,0xf9,0xdc,0x04,0x27,0x69,0xb1,
+0x93,0x14,0x10,0x48,0x91,0xd3,0x02,0x4d,0x8a,0xb8,0xd4,0x21,0x8a,0x09,0x02,0x02,
+0xb5,0x4a,0xab,0x52,0x65,0x69,0x11,0x03,0x42,0xc2,0xb1,0x8f,0xc4,0x92,0xe3,0x03,
+0x9f,0x8d,0xca,0x14,0xd8,0x88,0x2a,0xa6,0x0e,0x88,0xa9,0x20,0xb1,0x87,0x8d,0xa5,
+0x5b,0x86,0xcc,0x90,0x78,0x77,0xd4,0x60,0x75,0xa9,0x40,0xe2,0xdf,0xd0,0x42,0x78,
+0x77,0xef,0x9c,0x38,0x24,0x72,0x49,0x20,0xc9,0x70,0xdf,0x8f,0xf2,0xf3,0xd9,0x77,
+0xbf,0xf3,0xbb,0x67,0xbf,0xdf,0xf9,0x4f,0xf4,0x2c,0x02,0x20,0xac,0xe2,0x2a,0x5e,
+0x53,0x61,0xaa,0x18,0x0e,0xd6,0x19,0xad,0x09,0x49,0x1d,0x5e,0x5e,0x7d,0x75,0x39,
+0xfd,0x6c,0x6d,0x39,0x6d,0x48,0xbf,0x5c,0xfd,0xc9,0xf0,0xf3,0x56,0xd5,0x3a,0x99,
+0xba,0xf7,0xd0,0x76,0x8a,0x53,0x5f,0xc4,0xdf,0xcd,0x24,0x56,0x6e,0x9e,0x59,0xb9,
+0x30,0x3e,0x73,0x3b,0xf7,0x3f,0x76,0x01,0xc0,0x3e,0x79,0x75,0x1f,0x55,0x71,0x4c,
+0x74,0xfd,0x47,0x8f,0xf6,0x70,0x00,0x1c,0xa2,0x8d,0x8d,0x49,0x6f,0xf1,0xc9,0x06,
+0x00,0x00,0x08,0x8d,0xe6,0xfb,0x00,0xef,0x73,0x7c,0x33,0x0e,0xf3,0xfd,0xc7,0xbd,
+0xd7,0xc5,0xff,0xd0,0x31,0x5a,0x5b,0x4e,0xf7,0x05,0xa1,0xb7,0x1c,0x93,0x48,0x4b,
+0x5e,0xe7,0x61,0x1e,0x14,0x80,0x50,0xf0,0xcf,0x3f,0xe7,0x76,0x3b,0xff,0x45,0xe4,
+0x89,0x96,0xbc,0x47,0x54,0xc4,0x54,0x74,0xa9,0xe8,0x56,0xd1,0xa3,0xe2,0xb8,0x8a,
+0x13,0x2a,0x1e,0x15,0xfd,0xfd,0x68,0x42,0x45,0xaf,0x8a,0xbe,0xbd,0xb6,0xaf,0xce,
+0x7f,0x7f,0xaa,0x76,0xef,0x07,0xd1,0x6c,0xbf,0xf5,0xfc,0xd7,0xbf,0xf7,0xae,0x6d,
+0x32,0xda,0x6c,0x6b,0xb6,0x7f,0x56,0x9d,0x77,0x4f,0x05,0xb1,0x5b,0xfb,0x27,0x0f,
+0xa8,0xfd,0x6f,0x06,0xf5,0xf2,0xfe,0x8e,0xfe,0xff,0x63,0xaf,0xff,0xf0,0xc5,0x54,
+0xdb,0xfe,0x7f,0x7a,0xeb,0xf2,0x15,0x53,0xe4,0xe6,0xaa,0x7e,0xed,0x19,0x0b,0xda,
+0xbf,0x75,0xd9,0xd8,0xd6,0xff,0xc7,0xf6,0xdf,0x7c,0xdb,0xf6,0x37,0xbe,0xd6,0x63,
+0x6a,0xe7,0xe3,0xbf,0x7d,0x2f,0xcb,0x1a,0x29,0x16,0x4a,0xd5,0xeb,0xe5,0x7d,0x7c,
+0x73,0xde,0xae,0x7d,0xaf,0x8f,0x3d,0x2a,0xc3,0xda,0xbc,0x1e,0x51,0x6d,0xe9,0x31,
+0xde,0xaf,0x8e,0xac,0xe8,0xb8,0x95,0xe7,0xde,0x77,0xaa,0xa5,0xbc,0x1e,0xf3,0x3d,
+0x62,0x4a,0xde,0xfe,0xc8,0x1f,0xfb,0x3d,0xea,0x49,0x71,0xa7,0x0b,0x25,0x6f,0xfc,
+0xdf,0x36,0x3b,0x65,0xdf,0x07,0x08,0xe0,0x48,0xe8,0xd7,0xb2,0xad,0xfa,0xff,0xd5,
+0xd4,0xf5,0x0f,0x20,0x24,0xf8,0xa7,0x1f,0x10,0x5e,0xd4,0x3f,0x10,0x5e,0xd4,0x3f,
+0x10,0x5e,0xd4,0x3f,0x10,0x5e,0xd4,0x3f,0x10,0x5e,0xd4,0x3f,0x10,0x5e,0xd4,0x3f,
+0x10,0x5e,0xd4,0x3f,0x10,0x5e,0xd4,0x3f,0x10,0x5e,0xd4,0x3f,0x10,0x5e,0xd4,0x3f,
+0x10,0x4a,0x7a,0x4e,0xcf,0xce,0xf9,0x3f,0xde,0xbc,0xb6,0xbb,0x66,0xa7,0xe4,0x9c,
+0x92,0xeb,0x14,0xed,0xa3,0x3d,0x48,0x00,0x07,0x42,0xcf,0xe3,0xdb,0x59,0xff,0xde,
+0x7c,0xd6,0xbb,0x66,0x54,0x0a,0xa5,0x42,0xe5,0x68,0x8f,0x10,0xc0,0x41,0x99,0xbf,
+0x70,0xe5,0x0d,0x23,0xd2,0x32,0x43,0x38,0x22,0x67,0xc5,0x9f,0x32,0x1c,0xff,0x4a,
+0x2d,0xc7,0xd4,0xd5,0x75,0x7f,0xfd,0x98,0x24,0xd5,0xb6,0x21,0x89,0xf9,0x53,0xe1,
+0x83,0x1d,0xe2,0x41,0x18,0xd3,0x3a,0xfc,0x9f,0x11,0x34,0x74,0x78,0xb7,0x07,0x83,
+0xd8,0xd4,0xe1,0x27,0x67,0xd6,0x8d,0x46,0x7c,0xa4,0x51,0x8f,0xd6,0x3a,0x4a,0xbf,
+0x2c,0xc9,0x7b,0xa7,0x3f,0x33,0x16,0xcc,0x5a,0xb4,0x61,0xd6,0xa3,0x4b,0xe2,0xdc,
+0x91,0xee,0xd2,0xef,0x22,0x89,0xa7,0x55,0xbc,0x38,0xd2,0x98,0xff,0xb9,0x1e,0xf1,
+0xb2,0xa6,0xcd,0xf3,0x89,0x85,0xce,0x75,0xa3,0xf6,0x78,0xe3,0xa4,0x97,0x27,0xb1,
+0xc5,0xbf,0x24,0x76,0x6a,0x68,0xa1,0x7b,0xa5,0x6f,0x4d,0x3e,0x34,0x52,0xe9,0x1b,
+0x0f,0xf2,0xa7,0x7f,0x34,0xea,0xcf,0x2c,0xc9,0xe2,0x1f,0x6b,0x6a,0xfb,0xf7,0x27,
+0xd6,0x0d,0xab,0xd7,0xbe,0xb3,0x26,0x03,0x89,0x86,0x0c,0xf4,0xd6,0x33,0x03,0x7d,
+0x4b,0xf2,0x43,0xd7,0xba,0x21,0xb1,0x5a,0xac,0x71,0xdc,0xbb,0x17,0x2f,0x4f,0xed,
+0x7b,0xe6,0xc6,0x83,0xc5,0xdf,0xbc,0xf5,0xaa,0xcd,0x97,0xe5,0x9d,0xcf,0xe7,0x55,
+0xbf,0x2a,0xf2,0xdd,0x93,0x1b,0xea,0xf6,0xb5,0x6b,0xb3,0x05,0x37,0xa9,0xfe,0xae,
+0x56,0x3f,0xb0,0xcb,0x97,0x06,0xbd,0xe9,0xda,0x32,0x39,0xd9,0x25,0xae,0x33,0x67,
+0x57,0x66,0x0b,0xa5,0x99,0x64,0xb5,0x54,0x75,0xab,0xd9,0xa2,0x65,0x59,0xde,0xc6,
+0x4b,0x76,0xb1,0xe8,0x24,0xdf,0x76,0xca,0xc5,0xbc,0x97,0x7c,0x31,0x93,0x79,0x29,
+0x39,0x74,0x71,0xea,0xad,0xe1,0xa4,0x3d,0x93,0x73,0x9f,0x1f,0xb5,0x26,0x52,0xd6,
+0xf8,0x78,0x32,0x35,0x31,0x31,0x71,0xee,0x85,0xd4,0x58,0x72,0xc8,0x5f,0x9d,0xb2,
+0x52,0xd6,0x68,0xb2,0x6c,0x17,0xed,0xac,0x6b,0x0f,0x8b,0x58,0xee,0xc7,0x73,0x95,
+0xec,0xb4,0x5a,0x56,0xca,0x7a,0x39,0xdb,0xbc,0x56,0xb1,0xaf,0x57,0xc4,0x2a,0x3b,
+0xf9,0x6c,0x25,0x2b,0x96,0xbe,0xcc,0x55,0x9c,0xb2,0xab,0x6e,0xe8,0xc5,0xb4,0xab,
+0x2e,0x72,0xce,0xdc,0x9c,0x5d,0xda,0xd3,0xe9,0xfb,0xa9,0xe0,0xf9,0xeb,0xf0,0xfb,
+0x2f,0xe2,0xc5,0xb7,0x2d,0xdb,0x9b,0x9f,0x14,0x07,0x83,0xbc,0x88,0x7e,0x9e,0x0c,
+0x15,0xf2,0xea,0x2e,0x79,0xc3,0x41,0x9e,0xa9,0xc7,0x81,0xd1,0x3a,0x16,0x64,0x6b,
+0x1c,0xc9,0xc8,0xd6,0xb8,0x69,0x9b,0x37,0xfe,0x2f,0xf3,0x5e,0x11,0xfd,0x93,0x0d,
+0x0f,0x6b,0xf7,0xbc,0x6c,0x9b,0x1e,0xef,0xe7,0xa5,0x77,0xc9,0x4b,0xe8,0xfb,0xda,
+0x5c,0xfd,0xa5,0xba,0x78,0x73,0x97,0x3c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+0x00,0x00,0x00,0x00,0x00,0x87,0xe8,0x6f,0x20,0x01,0xec,0xc5,0x00,0x00,0x01,0x00,
+};
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c
new file mode 100644
index 000000000..6f4598a7e
--- /dev/null
+++ b/arch/sh/kernel/time.c
@@ -0,0 +1,224 @@
+/*
+ * linux/arch/sh/kernel/time.c
+ *
+ * Copyright (C) 1999 Niibe Yutaka
+ *
+ * Some code taken from i386 version.
+ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+
+#include <asm/processor.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/delay.h>
+
+#include <linux/timex.h>
+#include <linux/irq.h>
+
+#define TMU_TOCR 0xfffffe90 /* Byte access */
+#define TMU_TSTR 0xfffffe92 /* Byte access */
+
+#define TMU0_TCOR 0xfffffe94 /* Long access */
+#define TMU0_TCNT 0xfffffe98 /* Long access */
+#define TMU0_TCR 0xfffffe9c /* Word access */
+
+#define TMU_TOCR_INIT 0x00
+#define TMU0_TCR_INIT 0x0020
+#define TMU_TSTR_INIT 1
+
+#define CLOCK_MHZ (60/4)
+#define INTERVAL 37500 /* (1000000*CLOCK_MHZ/HZ/2) ??? */
+
+extern rwlock_t xtime_lock;
+#define TICK_SIZE tick
+
+void do_gettimeofday(struct timeval *tv)
+{
+ extern volatile unsigned long lost_ticks;
+ unsigned long flags;
+ unsigned long usec, sec;
+
+ read_lock_irqsave(&xtime_lock, flags);
+ usec = 0;
+ {
+ unsigned long lost = lost_ticks;
+ if (lost)
+ usec += lost * (1000000 / HZ);
+ }
+ sec = xtime.tv_sec;
+ usec += xtime.tv_usec;
+ read_unlock_irqrestore(&xtime_lock, flags);
+
+ while (usec >= 1000000) {
+ usec -= 1000000;
+ sec++;
+ }
+
+ tv->tv_sec = sec;
+ tv->tv_usec = usec;
+}
+
+void do_settimeofday(struct timeval *tv)
+{
+ write_lock_irq(&xtime_lock);
+ xtime = *tv;
+ time_adjust = 0; /* stop active adjtime() */
+ time_status |= STA_UNSYNC;
+ time_maxerror = NTP_PHASE_LIMIT;
+ time_esterror = NTP_PHASE_LIMIT;
+ write_unlock_irq(&xtime_lock);
+}
+
+/*
+ */
+static int set_rtc_time(unsigned long nowtime)
+{
+/* XXX should be implemented XXXXXXXXXX */
+ int retval = -1;
+
+ return retval;
+}
+
+/* last time the RTC clock got updated */
+static long last_rtc_update = 0;
+
+/*
+ * timer_interrupt() needs to keep up the real-time clock,
+ * as well as call the "do_timer()" routine every clocktick
+ */
+static inline void do_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ do_timer(regs);
+
+#if 0
+ if (!user_mode(regs))
+ sh_do_profile(regs->pc);
+#endif
+
+ /*
+ * If we have an externally synchronized Linux clock, then update
+ * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
+ * called as close as possible to 500 ms before the new second starts.
+ */
+ if ((time_status & STA_UNSYNC) == 0 &&
+ xtime.tv_sec > last_rtc_update + 660 &&
+ xtime.tv_usec >= 500000 - ((unsigned) tick) / 2 &&
+ xtime.tv_usec <= 500000 + ((unsigned) tick) / 2) {
+ if (set_rtc_time(xtime.tv_sec) == 0)
+ last_rtc_update = xtime.tv_sec;
+ else
+ last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
+ }
+}
+
+/*
+ * This is the same as the above, except we _also_ save the current
+ * Time Stamp Counter value at the time of the timer interrupt, so that
+ * we later on can estimate the time of day more exactly.
+ */
+static void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ unsigned long __dummy;
+
+ /* Clear UNF bit */
+ asm volatile("mov.w %1,%0\n\t"
+ "and %2,%0\n\t"
+ "mov.w %0,%1"
+ : "=&z" (__dummy)
+ : "m" (__m(TMU0_TCR)), "r" (~0x100));
+
+ /*
+ * Here we are in the timer irq handler. We just have irqs locally
+ * disabled but we don't know if the timer_bh is running on the other
+ * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
+ * the irq version of write_lock because as just said we have irq
+ * locally disabled. -arca
+ */
+ write_lock(&xtime_lock);
+
+ do_timer_interrupt(irq, NULL, regs);
+
+ write_unlock(&xtime_lock);
+}
+
+/* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
+ * Assumes input in normal date format, i.e. 1980-12-31 23:59:59
+ * => year=1980, mon=12, day=31, hour=23, min=59, sec=59.
+ *
+ * [For the Julian calendar (which was used in Russia before 1917,
+ * Britain & colonies before 1752, anywhere else before 1582,
+ * and is still in use by some communities) leave out the
+ * -year/100+year/400 terms, and add 10.]
+ *
+ * This algorithm was first published by Gauss (I think).
+ *
+ * WARNING: this function will overflow on 2106-02-07 06:28:16 on
+ * machines were long is 32-bit! (However, as time_t is signed, we
+ * will already get problems at other places on 2038-01-19 03:14:08)
+ */
+static inline unsigned long mktime(unsigned int year, unsigned int mon,
+ unsigned int day, unsigned int hour,
+ unsigned int min, unsigned int sec)
+{
+ if (0 >= (int) (mon -= 2)) { /* 1..12 -> 11,12,1..10 */
+ mon += 12; /* Puts Feb last since it has leap day */
+ year -= 1;
+ }
+ return (((
+ (unsigned long)(year/4 - year/100 + year/400 + 367*mon/12 + day) +
+ year*365 - 719499
+ )*24 + hour /* now have hours */
+ )*60 + min /* now have minutes */
+ )*60 + sec; /* finally seconds */
+}
+
+static unsigned long get_rtc_time(void)
+{
+/* XXX not implemented yet */
+ return 0;
+}
+
+static struct irqaction irq0 = { timer_interrupt, SA_INTERRUPT, 0, "timer", NULL, NULL};
+
+void __init time_init(void)
+{
+ unsigned long __dummy;
+
+ xtime.tv_sec = get_rtc_time();
+ xtime.tv_usec = 0;
+
+ set_ipr_data(TIMER_IRQ, TIMER_IRP_OFFSET, TIMER_PRIORITY);
+ setup_irq(TIMER_IRQ, &irq0);
+
+ /* Start TMU0 */
+ asm volatile("mov %1,%0\n\t"
+ "mov.b %0,%2 ! external clock input\n\t"
+ "mov %3,%0\n\t"
+ "mov.w %0,%4 ! enable timer0 interrupt\n\t"
+ "mov.l %5,%6\n\t"
+ "mov.l %5,%7\n\t"
+ "mov %8,%0\n\t"
+ "mov.b %0,%9"
+ : "=&z" (__dummy)
+ : "i" (TMU_TOCR_INIT), "m" (__m(TMU_TOCR)),
+ "i" (TMU0_TCR_INIT), "m" (__m(TMU0_TCR)),
+ "r" (INTERVAL), "m" (__m(TMU0_TCOR)), "m" (__m(TMU0_TCNT)),
+ "i" (TMU_TSTR_INIT), "m" (__m(TMU_TSTR)));
+#if 0
+ /* Start RTC */
+ asm volatile("");
+#endif
+}
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
new file mode 100644
index 000000000..3d3cba23c
--- /dev/null
+++ b/arch/sh/kernel/traps.c
@@ -0,0 +1,127 @@
+/*
+ * linux/arch/sh/traps.c
+ *
+ * SuperH version: Copyright (C) 1999 Niibe Yutaka
+ */
+
+/*
+ * 'Traps.c' handles hardware traps and faults after we have saved some
+ * state in 'entry.S'.
+ */
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+
+static inline void console_verbose(void)
+{
+ extern int console_loglevel;
+ console_loglevel = 15;
+}
+
+#define DO_ERROR(trapnr, signr, str, name, tsk) \
+asmlinkage void do_##name(unsigned long r4, unsigned long r5, \
+ unsigned long r6, unsigned long r7, \
+ struct pt_regs regs) \
+{ \
+ unsigned long error_code; \
+ \
+ asm volatile("stc r2_bank,%0": "=r" (error_code)); \
+ sti(); \
+ regs.syscall_nr = -1; \
+ tsk->thread.error_code = error_code; \
+ tsk->thread.trap_no = trapnr; \
+ force_sig(signr, tsk); \
+ die_if_no_fixup(str,&regs,error_code); \
+}
+
+/*
+ * These constants are for searching for possible module text
+ * segments. VMALLOC_OFFSET comes from mm/vmalloc.c; MODULE_RANGE is
+ * a guess of how much space is likely to be vmalloced.
+ */
+#define VMALLOC_OFFSET (8*1024*1024)
+#define MODULE_RANGE (8*1024*1024)
+
+static void show_registers(struct pt_regs *regs)
+{/* Not implemented yet. */
+}
+
+spinlock_t die_lock;
+
+void die(const char * str, struct pt_regs * regs, long err)
+{
+ console_verbose();
+ spin_lock_irq(&die_lock);
+ printk("%s: %04lx\n", str, err & 0xffff);
+ show_registers(regs);
+ spin_unlock_irq(&die_lock);
+ do_exit(SIGSEGV);
+}
+
+static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
+{
+ if (!user_mode(regs))
+ die(str, regs, err);
+}
+
+static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
+{
+ if (!user_mode(regs))
+ {
+ unsigned long fixup;
+ fixup = search_exception_table(regs->pc);
+ if (fixup) {
+ regs->pc = fixup;
+ return;
+ }
+ die(str, regs, err);
+ }
+}
+
+DO_ERROR( 7, SIGSEGV, "address error (load)", address_error_load, current)
+DO_ERROR( 8, SIGSEGV, "address error (store)", address_error_store, current)
+DO_ERROR(12, SIGILL, "reserved instruction", reserved_inst, current)
+DO_ERROR(13, SIGILL, "illegal slot instruction", illegal_slot_inst, current)
+
+asmlinkage void do_exception_error (unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs regs)
+{
+ long ex;
+ asm volatile("stc r2_bank,%0" : "=r" (ex));
+ die_if_kernel("exception", &regs, ex);
+}
+
+void __init trap_init(void)
+{
+ extern void *vbr_base;
+ extern void *exception_handling_table[14];
+
+ exception_handling_table[7] = (void *)do_address_error_load;
+ exception_handling_table[8] = (void *)do_address_error_store;
+ exception_handling_table[12] = (void *)do_reserved_inst;
+ exception_handling_table[13] = (void *)do_illegal_slot_inst;
+
+ /* NOTE: The VBR value should be at P1
+ (or P2, virtural "fixed" address space).
+ It's definitely should not in physical address. */
+
+ asm volatile("ldc %0,vbr"
+ : /* no output */
+ : "r" (&vbr_base)
+ : "memory");
+}
diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile
new file mode 100644
index 000000000..a9010ddb2
--- /dev/null
+++ b/arch/sh/lib/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for SuperH-specific library files..
+#
+
+.S.o:
+ $(CC) -D__ASSEMBLY__ $(AFLAGS) -traditional -c $< -o $*.o
+
+L_TARGET = lib.a
+# L_OBJS = checksum.o old-checksum.o semaphore.o delay.o \
+# usercopy.o getuser.o putuser.o
+L_OBJS = delay.o memcpy.o memset.o memmove.o csum_partial_copy.o \
+ wordcopy.o checksum.o # usercopy.o getuser.o putuser.o
+
+include $(TOPDIR)/Rules.make
diff --git a/arch/sh/lib/checksum.c b/arch/sh/lib/checksum.c
new file mode 100644
index 000000000..f076bd3c6
--- /dev/null
+++ b/arch/sh/lib/checksum.c
@@ -0,0 +1,170 @@
+/*
+ * Taken from:
+ * arch/alpha/lib/checksum.c
+ *
+ * This file contains network checksum routines that are better done
+ * in an architecture-specific manner due to speed..
+ */
+
+#include <linux/string.h>
+
+#include <asm/byteorder.h>
+
+static inline unsigned short from64to16(unsigned long long x)
+{
+ /* add up 32-bit words for 33 bits */
+ x = (x & 0xffffffff) + (x >> 32);
+ /* add up 16-bit and 17-bit words for 17+c bits */
+ x = (x & 0xffff) + (x >> 16);
+ /* add up 16-bit and 2-bit for 16+c bit */
+ x = (x & 0xffff) + (x >> 16);
+ /* add up carry.. */
+ x = (x & 0xffff) + (x >> 16);
+ return x;
+}
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented.
+ */
+unsigned short int csum_tcpudp_magic(unsigned long saddr,
+ unsigned long daddr,
+ unsigned short len,
+ unsigned short proto,
+ unsigned int sum)
+{
+ return ~from64to16(saddr + daddr + sum +
+ ((unsigned long) ntohs(len) << 16) +
+ ((unsigned long) proto << 8));
+}
+
+unsigned int csum_tcpudp_nofold(unsigned long saddr,
+ unsigned long daddr,
+ unsigned short len,
+ unsigned short proto,
+ unsigned int sum)
+{
+ unsigned long long result;
+
+ result = (saddr + daddr + sum +
+ ((unsigned long) ntohs(len) << 16) +
+ ((unsigned long) proto << 8));
+
+ /* Fold down to 32-bits so we don't loose in the typedef-less
+ network stack. */
+ /* 64 to 33 */
+ result = (result & 0xffffffff) + (result >> 32);
+ /* 33 to 32 */
+ result = (result & 0xffffffff) + (result >> 32);
+ return result;
+}
+
+/*
+ * Do a 64-bit checksum on an arbitrary memory area..
+ *
+ * This isn't a great routine, but it's not _horrible_ either. The
+ * inner loop could be unrolled a bit further, and there are better
+ * ways to do the carry, but this is reasonable.
+ */
+static inline unsigned long do_csum(const unsigned char * buff, int len)
+{
+ int odd, count;
+ unsigned long long result = 0;
+
+ if (len <= 0)
+ goto out;
+ odd = 1 & (unsigned long) buff;
+ if (odd) {
+ result = *buff << 8;
+ len--;
+ buff++;
+ }
+ count = len >> 1; /* nr of 16-bit words.. */
+ if (count) {
+ if (2 & (unsigned long) buff) {
+ result += *(unsigned short *) buff;
+ count--;
+ len -= 2;
+ buff += 2;
+ }
+ count >>= 1; /* nr of 32-bit words.. */
+ if (count) {
+ if (4 & (unsigned long) buff) {
+ result += *(unsigned int *) buff;
+ count--;
+ len -= 4;
+ buff += 4;
+ }
+ count >>= 1; /* nr of 64-bit words.. */
+ if (count) {
+ unsigned long carry = 0;
+ do {
+ unsigned long w = *(unsigned long *) buff;
+ count--;
+ buff += 8;
+ result += carry;
+ result += w;
+ carry = (w > result);
+ } while (count);
+ result += carry;
+ result = (result & 0xffffffff) + (result >> 32);
+ }
+ if (len & 4) {
+ result += *(unsigned int *) buff;
+ buff += 4;
+ }
+ }
+ if (len & 2) {
+ result += *(unsigned short *) buff;
+ buff += 2;
+ }
+ }
+ if (len & 1)
+ result += *buff;
+ result = from64to16(result);
+ if (odd)
+ result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
+out:
+ return result;
+}
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ */
+unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl)
+{
+ return ~do_csum(iph,ihl*4);
+}
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
+{
+ unsigned long long result = do_csum(buff, len);
+
+ /* add in old sum, and carry.. */
+ result += sum;
+ /* 32+c bits -> 32 bits */
+ result = (result & 0xffffffff) + (result >> 32);
+ return result;
+}
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+unsigned short ip_compute_csum(unsigned char * buff, int len)
+{
+ return ~from64to16(do_csum(buff,len));
+}
diff --git a/arch/sh/lib/csum_partial_copy.c b/arch/sh/lib/csum_partial_copy.c
new file mode 100644
index 000000000..1fb36ab05
--- /dev/null
+++ b/arch/sh/lib/csum_partial_copy.c
@@ -0,0 +1,75 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * MIPS specific IP/TCP/UDP checksumming routines
+ *
+ * Authors: Ralf Baechle, <ralf@waldorf-gmbh.de>
+ * Lots of code moved from tcp.c and ip.c; see those files
+ * for more names.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * $Id: csum_partial_copy.c,v 1.2 1998/09/16 13:29:32 ralf Exp $
+ */
+#include <net/checksum.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/string.h>
+#include <asm/uaccess.h>
+
+/*
+ * copy while checksumming, otherwise like csum_partial
+ */
+unsigned int csum_partial_copy(const char *src, char *dst,
+ int len, unsigned int sum)
+{
+ /*
+ * It's 2:30 am and I don't feel like doing it real ...
+ * This is lots slower than the real thing (tm)
+ */
+ sum = csum_partial(src, len, sum);
+ memcpy(dst, src, len);
+
+ return sum;
+}
+
+/*
+ * Copy from userspace and compute checksum. If we catch an exception
+ * then zero the rest of the buffer.
+ */
+unsigned int csum_partial_copy_from_user (const char *src, char *dst,
+ int len, unsigned int sum,
+ int *err_ptr)
+{
+ int missing;
+
+ missing = copy_from_user(dst, src, len);
+ if (missing) {
+ memset(dst + len - missing, 0, missing);
+ *err_ptr = -EFAULT;
+ }
+
+ return csum_partial(dst, len, sum);
+}
+
+/*
+ * Copy to userspace and compute checksum.
+ */
+unsigned int csum_partial_copy_to_user (const char *src, char *dst,
+ int len, unsigned int sum,
+ int *err_ptr)
+{
+ sum = csum_partial(src, len, sum);
+
+ if (copy_to_user(dst, src, len)) {
+ *err_ptr = -EFAULT;
+ return sum;
+ }
+
+ return sum;
+}
diff --git a/arch/sh/lib/delay.c b/arch/sh/lib/delay.c
new file mode 100644
index 000000000..cadd7367e
--- /dev/null
+++ b/arch/sh/lib/delay.c
@@ -0,0 +1,21 @@
+/*
+ * Precise Delay Loops for SuperH
+ *
+ * Copyright (C) 1999 Niibe Yutaka
+ */
+
+#include <linux/sched.h>
+#include <linux/delay.h>
+
+inline void __const_udelay(unsigned long xloops)
+{
+ xloops *= current_cpu_data.loops_per_sec;
+ __delay(xloops);
+}
+
+#if 0
+void __udelay(unsigned long usecs)
+{
+ __const_udelay(usecs * 0x000010c6); /* 2**32 / 1000000 */
+}
+#endif
diff --git a/arch/sh/lib/memcpy.S b/arch/sh/lib/memcpy.S
new file mode 100644
index 000000000..3a4aca919
--- /dev/null
+++ b/arch/sh/lib/memcpy.S
@@ -0,0 +1,131 @@
+! Taken from newlib-1.8.0
+
+!
+! Fast SH memcpy
+!
+! by Toshiyasu Morita (tm@netcom.com)
+! hacked by J"orn Rernnecke (amylaar@cygnus.co.uk) ("o for o-umlaut)
+!
+! Entry: r4: destination pointer
+! r5: source pointer
+! r6: byte count
+!
+! Exit: r0: destination pointer
+! r1-r7: trashed
+!
+! Notes: Usually one wants to do small reads and write a longword, but
+! unfortunately it is difficult in some cases to concatanate bytes
+! into a longword on the SH, so this does a longword read and small
+! writes.
+!
+! This implementation makes two assumptions about how it is called:
+!
+! 1.: If the byte count is nonzero, the address of the last byte to be
+! copied is unsigned greater than the address of the first byte to
+! be copied. This could be easily swapped for a signed comparison,
+! but the algorithm used needs some comparison.
+!
+! 2.: When there are two or three bytes in the last word of an 11-or-bore
+! bytes memory chunk to b copied, the rest of the word can be read
+! without size effects.
+! This could be easily changed by increasing the minumum size of
+! a fast memcpy and the amount subtracted from r7 before L_2l_loop be 2,
+! however, this would cost a few extra cyles on average.
+!
+
+#include <linux/linkage.h>
+ENTRY(memcpy)
+ ! Big endian version copies with decreasing addresses.
+ mov r4,r0
+ add r6,r0
+ sub r4,r5
+ mov #11,r1
+ cmp/hs r1,r6
+ bf/s L_small
+ add #-1,r5
+ mov r5,r3
+ add r0,r3
+ shlr r3
+ bt/s L_even
+ mov r4,r7
+ mov.b @(r0,r5),r2
+ add #-1,r3
+ mov.b r2,@-r0
+L_even:
+ tst #1,r0
+ add #-1,r5
+ bf/s L_odddst
+ add #8,r7
+ tst #2,r0
+ bt L_al4dst
+ add #-1,r3
+ mov.w @(r0,r5),r1
+ mov.w r1,@-r0
+L_al4dst:
+ shlr r3
+ bt L_al4both
+ mov.w @(r0,r5),r1
+ swap.w r1,r1
+ add #4,r7
+ add #-4,r5
+ .align 2
+L_2l_loop:
+ mov.l @(r0,r5),r2
+ xtrct r2,r1
+ mov.l r1,@-r0
+ cmp/hs r7,r0
+ mov.l @(r0,r5),r1
+ xtrct r1,r2
+ mov.l r2,@-r0
+ bt L_2l_loop
+ bra L_cleanup
+ add #5,r5
+
+ nop ! avoid nop in executed code.
+L_al4both:
+ add #-2,r5
+ .align 2
+L_al4both_loop:
+ mov.l @(r0,r5),r1
+ cmp/hs r7,r0
+ bt/s L_al4both_loop
+ mov.l r1,@-r0
+ bra L_cleanup
+ add #3,r5
+
+ nop ! avoid nop in executed code.
+L_odddst:
+ shlr r3
+ bt L_al4src
+ mov.w @(r0,r5),r1
+ mov.b r1,@-r0
+ shlr8 r1
+ mov.b r1,@-r0
+L_al4src:
+ add #-2,r5
+ .align 2
+L_odd_loop:
+ mov.l @(r0,r5),r2
+ cmp/hs r7,r0
+ mov.b r2,@-r0
+ shlr8 r2
+ mov.w r2,@-r0
+ shlr16 r2
+ mov.b r2,@-r0
+ bt L_odd_loop
+
+ add #3,r5
+L_cleanup:
+L_small:
+ cmp/eq r4,r0
+ bt L_ready
+ add #1,r4
+ .align 2
+L_cleanup_loop:
+ mov.b @(r0,r5),r2
+ cmp/eq r4,r0
+ mov.b r2,@-r0
+ bf L_cleanup_loop
+L_ready:
+ rts
+ nop
diff --git a/arch/sh/lib/memmove.S b/arch/sh/lib/memmove.S
new file mode 100644
index 000000000..e95dc3709
--- /dev/null
+++ b/arch/sh/lib/memmove.S
@@ -0,0 +1,422 @@
+#include <linux/linkage.h>
+ENTRY(memmove)
+ mov.l r8,@-r15
+ mov.l r9,@-r15
+ mov.l r14,@-r15
+ sts.l pr,@-r15
+ add #-28,r15
+ mov r15,r14
+ mov.l r4,@r14
+ mov.l r5,@(4,r14)
+ mov.l r6,@(8,r14)
+ mov.l @r14,r1
+ mov.l r1,@(12,r14)
+ mov.l @(4,r14),r1
+ mov.l r1,@(16,r14)
+ mov.l @(12,r14),r1
+ mov.l @(16,r14),r2
+ sub r2,r1
+ mov.l @(8,r14),r2
+ cmp/hs r2,r1
+ bt .L54
+ bra .L2
+ nop
+.L54:
+ mov.l @(8,r14),r1
+ mov #15,r2
+ cmp/gt r2,r1
+ bt .LF100
+ bra .L52
+ nop
+.LF100:
+ mov.l @(12,r14),r2
+ neg r2,r1
+ mov #3,r2
+ and r1,r2
+ mov.l @(8,r14),r1
+ mov r1,r9
+ sub r2,r9
+ mov r9,r2
+ mov.l r2,@(8,r14)
+.L4:
+ mov.l @(12,r14),r2
+ neg r2,r1
+ mov #3,r2
+ and r1,r2
+ mov.l r2,@(20,r14)
+.L7:
+ mov.l @(20,r14),r1
+ cmp/pl r1
+ bt .L9
+ bra .L6
+ nop
+ .align 2
+.L9:
+ mov r14,r2
+ mov r14,r1
+ add #24,r1
+ mov.l @(16,r14),r2
+ mov.b @r2,r3
+ mov.b r3,@r1
+ mov.l @(16,r14),r1
+ mov r1,r2
+ add #1,r2
+ mov.l r2,@(16,r14)
+ mov.l @(20,r14),r1
+ mov r1,r2
+ add #-1,r2
+ mov.l r2,@(20,r14)
+ mov.l @(12,r14),r1
+ mov r14,r2
+ mov r14,r3
+ add #24,r3
+ mov.b @r3,r2
+ mov.b r2,@r1
+ mov.l @(12,r14),r1
+ mov r1,r2
+ add #1,r2
+ mov.l r2,@(12,r14)
+ bra .L7
+ nop
+ .align 2
+.L8:
+.L6:
+ bra .L5
+ nop
+ .align 2
+.L10:
+ bra .L4
+ nop
+ .align 2
+.L5:
+ nop
+.L11:
+ mov.l @(16,r14),r1
+ mov #3,r2
+ and r1,r2
+ tst r2,r2
+ bf .L14
+ mov r15,r2
+ mov.l @(12,r14),r1
+ mov.l @(16,r14),r2
+ mov.l @(8,r14),r7
+ mov r7,r3
+ shlr2 r3
+ mov r1,r4
+ mov r2,r5
+ mov r3,r6
+ mov.l .L46,r8
+ jsr @r8
+ nop
+ bra .L15
+ nop
+ .align 2
+.L14:
+ mov r15,r2
+ mov.l @(12,r14),r1
+ mov.l @(16,r14),r2
+ mov.l @(8,r14),r7
+ mov r7,r3
+ shlr2 r3
+ mov r1,r4
+ mov r2,r5
+ mov r3,r6
+ mov.l .L47,r8
+ jsr @r8
+ nop
+.L15:
+ mov.l @(8,r14),r1
+ mov #-4,r2
+ and r2,r1
+ mov.l @(16,r14),r2
+ add r2,r1
+ mov.l r1,@(16,r14)
+ mov.l @(8,r14),r1
+ mov #-4,r2
+ and r2,r1
+ mov.l @(12,r14),r2
+ add r2,r1
+ mov.l r1,@(12,r14)
+ mov.l @(8,r14),r1
+ mov #3,r2
+ and r1,r2
+ mov.l r2,@(8,r14)
+.L13:
+.L52:
+ bra .L3
+ nop
+ .align 2
+.L16:
+ bra .L11
+ nop
+ .align 2
+.L12:
+.L3:
+ nop
+.L17:
+ mov.l @(8,r14),r1
+ mov.l r1,@(20,r14)
+.L20:
+ mov.l @(20,r14),r1
+ cmp/pl r1
+ bt .L22
+ bra .L19
+ nop
+ .align 2
+.L22:
+ mov r14,r2
+ mov r14,r1
+ add #24,r1
+ mov.l @(16,r14),r2
+ mov.b @r2,r3
+ mov.b r3,@r1
+ mov.l @(16,r14),r1
+ mov r1,r2
+ add #1,r2
+ mov.l r2,@(16,r14)
+ mov.l @(20,r14),r1
+ mov r1,r2
+ add #-1,r2
+ mov.l r2,@(20,r14)
+ mov.l @(12,r14),r1
+ mov r14,r2
+ mov r14,r3
+ add #24,r3
+ mov.b @r3,r2
+ mov.b r2,@r1
+ mov.l @(12,r14),r1
+ mov r1,r2
+ add #1,r2
+ mov.l r2,@(12,r14)
+ bra .L20
+ nop
+ .align 2
+.L21:
+.L19:
+ bra .L18
+ nop
+ .align 2
+.L23:
+ bra .L17
+ nop
+ .align 2
+.L18:
+ bra .L24
+ nop
+ .align 2
+.L2:
+ mov.l @(16,r14),r1
+ mov.l @(8,r14),r2
+ add r2,r1
+ mov.l r1,@(16,r14)
+ mov.l @(12,r14),r1
+ mov.l @(8,r14),r2
+ add r2,r1
+ mov.l r1,@(12,r14)
+ mov.l @(8,r14),r1
+ mov #15,r2
+ cmp/gt r2,r1
+ bt .LF101
+ bra .L53
+ nop
+.LF101:
+ mov.l @(12,r14),r1
+ mov #3,r2
+ and r1,r2
+ mov.l @(8,r14),r1
+ mov r1,r9
+ sub r2,r9
+ mov r9,r2
+ mov.l r2,@(8,r14)
+.L26:
+ mov.l @(12,r14),r1
+ mov #3,r2
+ and r1,r2
+ mov.l r2,@(20,r14)
+.L29:
+ mov.l @(20,r14),r1
+ cmp/pl r1
+ bt .L31
+ bra .L28
+ nop
+ .align 2
+.L31:
+ mov.l @(16,r14),r1
+ mov r1,r2
+ add #-1,r2
+ mov.l r2,@(16,r14)
+ mov r14,r2
+ mov r14,r1
+ add #24,r1
+ mov.l @(16,r14),r2
+ mov.b @r2,r3
+ mov.b r3,@r1
+ mov.l @(12,r14),r1
+ mov r1,r2
+ add #-1,r2
+ mov.l r2,@(12,r14)
+ mov.l @(20,r14),r1
+ mov r1,r2
+ add #-1,r2
+ mov.l r2,@(20,r14)
+ mov.l @(12,r14),r1
+ mov r14,r2
+ mov r14,r3
+ add #24,r3
+ mov.b @r3,r2
+ mov.b r2,@r1
+ bra .L29
+ nop
+ .align 2
+.L30:
+.L28:
+ bra .L27
+ nop
+ .align 2
+.L32:
+ bra .L26
+ nop
+ .align 2
+.L27:
+ nop
+.L33:
+ mov.l @(16,r14),r1
+ mov #3,r2
+ and r1,r2
+ tst r2,r2
+ bf .L36
+ mov r15,r2
+ mov.l @(12,r14),r1
+ mov.l @(16,r14),r2
+ mov.l @(8,r14),r7
+ mov r7,r3
+ shlr2 r3
+ mov r1,r4
+ mov r2,r5
+ mov r3,r6
+ mov.l .L48,r8
+ jsr @r8
+ nop
+ bra .L37
+ nop
+ .align 2
+.L36:
+ mov r15,r2
+ mov.l @(12,r14),r1
+ mov.l @(16,r14),r2
+ mov.l @(8,r14),r7
+ mov r7,r3
+ shlr2 r3
+ mov r1,r4
+ mov r2,r5
+ mov r3,r6
+ mov.l .L49,r8
+ jsr @r8
+ nop
+.L37:
+ mov.l @(8,r14),r1
+ mov #-4,r2
+ and r2,r1
+ mov.l @(16,r14),r2
+ mov r2,r9
+ sub r1,r9
+ mov r9,r1
+ mov.l r1,@(16,r14)
+ mov.l @(8,r14),r1
+ mov #-4,r2
+ and r2,r1
+ mov.l @(12,r14),r2
+ mov r2,r9
+ sub r1,r9
+ mov r9,r1
+ mov.l r1,@(12,r14)
+ mov.l @(8,r14),r1
+ mov #3,r2
+ and r1,r2
+ mov.l r2,@(8,r14)
+.L35:
+.L53:
+ bra .L25
+ nop
+ .align 2
+.L38:
+ bra .L33
+ nop
+ .align 2
+.L34:
+.L25:
+ nop
+.L39:
+ mov.l @(8,r14),r1
+ mov.l r1,@(20,r14)
+.L42:
+ mov.l @(20,r14),r1
+ cmp/pl r1
+ bt .L44
+ bra .L41
+ nop
+ .align 2
+.L44:
+ mov.l @(16,r14),r1
+ mov r1,r2
+ add #-1,r2
+ mov.l r2,@(16,r14)
+ mov r14,r2
+ mov r14,r1
+ add #24,r1
+ mov.l @(16,r14),r2
+ mov.b @r2,r3
+ mov.b r3,@r1
+ mov.l @(12,r14),r1
+ mov r1,r2
+ add #-1,r2
+ mov.l r2,@(12,r14)
+ mov.l @(20,r14),r1
+ mov r1,r2
+ add #-1,r2
+ mov.l r2,@(20,r14)
+ mov.l @(12,r14),r1
+ mov r14,r2
+ mov r14,r3
+ add #24,r3
+ mov.b @r3,r2
+ mov.b r2,@r1
+ bra .L42
+ nop
+ .align 2
+.L43:
+.L41:
+ bra .L24
+ nop
+ .align 2
+.L45:
+ bra .L39
+ nop
+ .align 2
+.L40:
+.L24:
+ mov.l @r14,r1
+ mov r1,r0
+ bra .L1
+ nop
+ .align 2
+.L1:
+ add #28,r14
+ mov r14,r15
+ lds.l @r15+,pr
+ mov.l @r15+,r14
+ mov.l @r15+,r9
+ mov.l @r15+,r8
+ rts
+ nop
+.L50:
+ .align 2
+.L46:
+ .long __wordcopy_fwd_aligned
+.L47:
+ .long __wordcopy_fwd_dest_aligned
+.L48:
+ .long __wordcopy_bwd_aligned
+.L49:
+ .long __wordcopy_bwd_dest_aligned
+.Lfe1:
diff --git a/arch/sh/lib/memset.S b/arch/sh/lib/memset.S
new file mode 100644
index 000000000..c97648a4a
--- /dev/null
+++ b/arch/sh/lib/memset.S
@@ -0,0 +1,72 @@
+! Taken from newlib-1.8.0
+
+!
+! Fast SH memset
+!
+! by Toshiyasu Morita (tm@netcom.com)
+!
+! Entry: r4: destination pointer
+! r5: fill value
+! r6: byte count
+!
+! Exit: r0-r3: trashed
+!
+#include <linux/linkage.h>
+
+ENTRY(memset)
+ mov r4,r3 ! Save return value
+
+ mov r6,r0 ! Check explicitly for zero
+ cmp/eq #0,r0
+ bt L_exit
+
+ mov #12,r0 ! Check for small number of bytes
+ cmp/gt r6,r0
+ bt L_store_byte_loop
+
+ neg r4,r0 ! Align destination
+ add #4,r0
+ and #3,r0
+ tst r0,r0
+ bt L_dup_bytes
+ .balignw 4,0x0009
+L_align_loop:
+ mov.b r5,@r4
+ add #-1,r6
+ add #1,r4
+ dt r0
+ bf L_align_loop
+
+L_dup_bytes:
+ extu.b r5,r5 ! Duplicate bytes across longword
+ swap.b r5,r0
+ or r0,r5
+ swap.w r5,r0
+ or r0,r5
+
+ mov r6,r2 ! Calculate number of double longwords
+ shlr2 r2
+ shlr r2
+
+ .balignw 4,0x0009
+L_store_long_loop:
+ mov.l r5,@r4 ! Store double longs to memory
+ dt r2
+ mov.l r5,@(4,r4)
+ add #8,r4
+ bf L_store_long_loop
+
+ mov #7,r0
+ and r0,r6
+ tst r6,r6
+ bt L_exit
+ .balignw 4,0x0009
+L_store_byte_loop:
+ mov.b r5,@r4 ! Store bytes to memory
+ add #1,r4
+ dt r6
+ bf L_store_byte_loop
+
+L_exit:
+ rts
+ mov r3,r0
diff --git a/arch/sh/lib/old-checksum.c b/arch/sh/lib/old-checksum.c
new file mode 100644
index 000000000..ae3a38043
--- /dev/null
+++ b/arch/sh/lib/old-checksum.c
@@ -0,0 +1,19 @@
+/*
+ * FIXME: old compatibility stuff, will be removed soon.
+ */
+
+#include <net/checksum.h>
+
+unsigned int csum_partial_copy( const char *src, char *dst, int len, int sum)
+{
+ int src_err=0, dst_err=0;
+
+ sum = csum_partial_copy_generic ( src, dst, len, sum, &src_err, &dst_err);
+
+ if (src_err || dst_err)
+ printk("old csum_partial_copy_fromuser(), tell mingo to convert me.\n");
+
+ return sum;
+}
+
+
diff --git a/arch/sh/lib/wordcopy.S b/arch/sh/lib/wordcopy.S
new file mode 100644
index 000000000..c116623d0
--- /dev/null
+++ b/arch/sh/lib/wordcopy.S
@@ -0,0 +1,1289 @@
+#include <linux/linkage.h>
+ENTRY(_wordcopy_fwd_aligned)
+ mov.l r14,@-r15
+ add #-20,r15
+ mov r15,r14
+ mov.l r4,@r14
+ mov.l r5,@(4,r14)
+ mov.l r6,@(8,r14)
+ mov.l @(8,r14),r2
+ mov #7,r1
+ and r2,r1
+ mov #0,r2
+ mov #7,r3
+ sub r2,r1
+ cmp/hi r3,r1
+ bf .L29
+ bra .L2
+ nop
+.L29:
+ mova .L22,r0
+ add r1,r1
+ mov.w @(r0,r1),r1
+ add r0,r1
+ jmp @r1
+ nop
+ .align 2
+ .align 2
+.L22:
+ .word .L15-.L22
+ .word .L18-.L22
+ .word .L3-.L22
+ .word .L5-.L22
+ .word .L7-.L22
+ .word .L9-.L22
+ .word .L11-.L22
+ .word .L13-.L22
+ .align 2
+.L3:
+ mov.l @(4,r14),r1
+ mov.l @r1,r2
+ mov.l r2,@(12,r14)
+ mov.l @(4,r14),r1
+ mov r1,r2
+ add #-24,r2
+ mov.l r2,@(4,r14)
+ mov.l @r14,r1
+ mov r1,r2
+ add #-28,r2
+ mov.l r2,@r14
+ mov.l @(8,r14),r1
+ mov r1,r2
+ add #6,r2
+ mov.l r2,@(8,r14)
+ bra .L4
+ nop
+ .align 2
+.L5:
+ mov.l @(4,r14),r1
+ mov.l @r1,r2
+ mov.l r2,@(16,r14)
+ mov.l @(4,r14),r1
+ mov r1,r2
+ add #-20,r2
+ mov.l r2,@(4,r14)
+ mov.l @r14,r1
+ mov r1,r2
+ add #-24,r2
+ mov.l r2,@r14
+ mov.l @(8,r14),r1
+ mov r1,r2
+ add #5,r2
+ mov.l r2,@(8,r14)
+ bra .L6
+ nop
+ .align 2
+.L7:
+ mov.l @(4,r14),r1
+ mov.l @r1,r2
+ mov.l r2,@(12,r14)
+ mov.l @(4,r14),r1
+ mov r1,r2
+ add #-16,r2
+ mov.l r2,@(4,r14)
+ mov.l @r14,r1
+ mov r1,r2
+ add #-20,r2
+ mov.l r2,@r14
+ mov.l @(8,r14),r1
+ mov r1,r2
+ add #4,r2
+ mov.l r2,@(8,r14)
+ bra .L8
+ nop
+ .align 2
+.L9:
+ mov.l @(4,r14),r1
+ mov.l @r1,r2
+ mov.l r2,@(16,r14)
+ mov.l @(4,r14),r1
+ mov r1,r2
+ add #-12,r2
+ mov.l r2,@(4,r14)
+ mov.l @r14,r1
+ mov r1,r2
+ add #-16,r2
+ mov.l r2,@r14
+ mov.l @(8,r14),r1
+ mov r1,r2
+ add #3,r2
+ mov.l r2,@(8,r14)
+ bra .L10
+ nop
+ .align 2
+.L11:
+ mov.l @(4,r14),r1
+ mov.l @r1,r2
+ mov.l r2,@(12,r14)
+ mov.l @(4,r14),r1
+ mov r1,r2
+ add #-8,r2
+ mov.l r2,@(4,r14)
+ mov.l @r14,r1
+ mov r1,r2
+ add #-12,r2
+ mov.l r2,@r14
+ mov.l @(8,r14),r1
+ mov r1,r2
+ add #2,r2
+ mov.l r2,@(8,r14)
+ bra .L12
+ nop
+ .align 2
+.L13:
+ mov.l @(4,r14),r1
+ mov.l @r1,r2
+ mov.l r2,@(16,r14)
+ mov.l @(4,r14),r1
+ mov r1,r2
+ add #-4,r2
+ mov.l r2,@(4,r14)
+ mov.l @r14,r1
+ mov r1,r2
+ add #-8,r2
+ mov.l r2,@r14
+ mov.l @(8,r14),r1
+ mov r1,r2
+ add #1,r2
+ mov.l r2,@(8,r14)
+ bra .L14
+ nop
+ .align 2
+.L15:
+ bra .L16
+ nop
+ bra .L1
+ nop
+ .align 2
+.L16:
+ mov.l @(4,r14),r1
+ mov.l @r1,r2
+ mov.l r2,@(12,r14)
+ mov.l @r14,r1
+ mov r1,r2
+ add #-4,r2
+ mov.l r2,@r14
+ bra .L17
+ nop
+ .align 2
+.L18:
+ mov.l @(4,r14),r1
+ mov.l @r1,r2
+ mov.l r2,@(16,r14)
+ mov.l @(4,r14),r1
+ mov r1,r2
+ add #4,r2
+ mov.l r2,@(4,r14)
+ mov.l @(8,r14),r1
+ mov r1,r2
+ add #-1,r2
+ mov.l r2,@(8,r14)
+ bra .L19
+ nop
+ bra .L20
+ nop
+ .align 2
+.L19:
+ bra .L21
+ nop
+ .align 2
+.L23:
+.L2:
+ nop
+.L24:
+.L21:
+ mov.l @(4,r14),r1
+ mov.l @r1,r2
+ mov.l r2,@(12,r14)
+ mov.l @r14,r1
+ mov.l @(16,r14),r2
+ mov.l r2,@r1
+.L17:
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #4,r1
+ mov.l @r1,r2
+ mov.l r2,@(16,r14)
+ mov.l @r14,r2
+ mov r2,r1
+ add #4,r1
+ mov.l @(12,r14),r2
+ mov.l r2,@r1
+.L14:
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #8,r1
+ mov.l @r1,r2
+ mov.l r2,@(12,r14)
+ mov.l @r14,r2
+ mov r2,r1
+ add #8,r1
+ mov.l @(16,r14),r2
+ mov.l r2,@r1
+.L12:
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #12,r1
+ mov.l @r1,r2
+ mov.l r2,@(16,r14)
+ mov.l @r14,r2
+ mov r2,r1
+ add #12,r1
+ mov.l @(12,r14),r2
+ mov.l r2,@r1
+.L10:
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #16,r1
+ mov.l @r1,r2
+ mov.l r2,@(12,r14)
+ mov.l @r14,r2
+ mov r2,r1
+ add #16,r1
+ mov.l @(16,r14),r2
+ mov.l r2,@r1
+.L8:
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #20,r1
+ mov.l @r1,r2
+ mov.l r2,@(16,r14)
+ mov.l @r14,r2
+ mov r2,r1
+ add #20,r1
+ mov.l @(12,r14),r2
+ mov.l r2,@r1
+.L6:
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #24,r1
+ mov.l @r1,r2
+ mov.l r2,@(12,r14)
+ mov.l @r14,r2
+ mov r2,r1
+ add #24,r1
+ mov.l @(16,r14),r2
+ mov.l r2,@r1
+.L4:
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #28,r1
+ mov.l @r1,r2
+ mov.l r2,@(16,r14)
+ mov.l @r14,r2
+ mov r2,r1
+ add #28,r1
+ mov.l @(12,r14),r2
+ mov.l r2,@r1
+ mov.l @(4,r14),r1
+ mov r1,r2
+ add #32,r2
+ mov.l r2,@(4,r14)
+ mov.l @r14,r1
+ mov r1,r2
+ add #32,r2
+ mov.l r2,@r14
+ mov.l @(8,r14),r1
+ mov r1,r2
+ add #-8,r2
+ mov.l r2,@(8,r14)
+.L26:
+ mov.l @(8,r14),r1
+ tst r1,r1
+ bf .L27
+ bra .L25
+ nop
+ .align 2
+.L27:
+ bra .L21
+ nop
+ .align 2
+.L25:
+ nop
+.L20:
+ mov.l @r14,r1
+ mov.l @(16,r14),r2
+ mov.l r2,@r1
+.L1:
+ add #20,r14
+ mov r14,r15
+ mov.l @r15+,r14
+ rts
+ nop
+.Lfe1:
+ .size __wordcopy_fwd_aligned,.Lfe1-__wordcopy_fwd_aligned
+ .global ___lshrsi3
+ .global ___ashlsi3
+ .align 2
+ .global __wordcopy_fwd_dest_aligned
+ .type __wordcopy_fwd_dest_aligned,@function
+__wordcopy_fwd_dest_aligned:
+ mov.l r8,@-r15
+ mov.l r9,@-r15
+ mov.l r14,@-r15
+ sts.l pr,@-r15
+ add #-40,r15
+ mov r15,r14
+ mov.l r4,@r14
+ mov.l r5,@(4,r14)
+ mov.l r6,@(8,r14)
+ mov.l @(4,r14),r1
+ mov #3,r2
+ and r1,r2
+ mov r2,r1
+ mov r1,r2
+ shll2 r2
+ add r2,r2
+ mov.l r2,@(28,r14)
+ mov.l @(28,r14),r2
+ neg r2,r1
+ add #32,r1
+ mov.l r1,@(32,r14)
+ mov.l @(4,r14),r1
+ mov #-4,r2
+ and r2,r1
+ mov.l r1,@(4,r14)
+ mov.l @(8,r14),r2
+ mov #3,r1
+ and r2,r1
+ mov #0,r2
+ mov #3,r3
+ sub r2,r1
+ cmp/hi r3,r1
+ bf .L53
+ bra .L31
+ nop
+.L53:
+ mova .L43,r0
+ add r1,r1
+ mov.w @(r0,r1),r1
+ add r0,r1
+ jmp @r1
+ nop
+ .align 2
+ .align 2
+.L43:
+ .word .L36-.L43
+ .word .L39-.L43
+ .word .L32-.L43
+ .word .L34-.L43
+ .align 2
+.L32:
+ mov.l @(4,r14),r1
+ mov.l @r1,r2
+ mov.l r2,@(16,r14)
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #4,r1
+ mov.l @r1,r2
+ mov.l r2,@(20,r14)
+ mov.l @(4,r14),r1
+ mov r1,r2
+ add #-4,r2
+ mov.l r2,@(4,r14)
+ mov.l @r14,r1
+ mov r1,r2
+ add #-12,r2
+ mov.l r2,@r14
+ mov.l @(8,r14),r1
+ mov r1,r2
+ add #2,r2
+ mov.l r2,@(8,r14)
+ bra .L33
+ nop
+ .align 2
+.L34:
+ mov.l @(4,r14),r1
+ mov.l @r1,r2
+ mov.l r2,@(12,r14)
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #4,r1
+ mov.l @r1,r2
+ mov.l r2,@(16,r14)
+ mov.l @r14,r1
+ mov r1,r2
+ add #-8,r2
+ mov.l r2,@r14
+ mov.l @(8,r14),r1
+ mov r1,r2
+ add #1,r2
+ mov.l r2,@(8,r14)
+ bra .L35
+ nop
+ .align 2
+.L36:
+ bra .L37
+ nop
+ bra .L30
+ nop
+ .align 2
+.L37:
+ mov.l @(4,r14),r1
+ mov.l @r1,r2
+ mov.l r2,@(24,r14)
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #4,r1
+ mov.l @r1,r2
+ mov.l r2,@(12,r14)
+ mov.l @(4,r14),r1
+ mov r1,r2
+ add #4,r2
+ mov.l r2,@(4,r14)
+ mov.l @r14,r1
+ mov r1,r2
+ add #-4,r2
+ mov.l r2,@r14
+ bra .L38
+ nop
+ .align 2
+.L39:
+ mov.l @(4,r14),r1
+ mov.l @r1,r2
+ mov.l r2,@(20,r14)
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #4,r1
+ mov.l @r1,r2
+ mov.l r2,@(24,r14)
+ mov.l @(4,r14),r1
+ mov r1,r2
+ add #8,r2
+ mov.l r2,@(4,r14)
+ mov.l @(8,r14),r1
+ mov r1,r2
+ add #-1,r2
+ mov.l r2,@(8,r14)
+ bra .L40
+ nop
+ bra .L41
+ nop
+ .align 2
+.L40:
+ bra .L42
+ nop
+ .align 2
+.L44:
+.L31:
+ nop
+.L45:
+.L42:
+ mov.l @(4,r14),r1
+ mov.l @r1,r2
+ mov.l r2,@(12,r14)
+ mov.l @r14,r8
+ mov.l .L49,r1
+ mov.l @(20,r14),r4
+ mov.l @(28,r14),r5
+ jsr @r1
+ nop
+ mov r0,r9
+ mov.l .L50,r1
+ mov.l @(24,r14),r4
+ mov.l @(32,r14),r5
+ jsr @r1
+ nop
+ mov.l r0,@(36,r14)
+ mov.l @(36,r14),r1
+ or r9,r1
+ mov.l r1,@r8
+.L38:
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #4,r1
+ mov.l @r1,r2
+ mov.l r2,@(16,r14)
+ mov.l @r14,r1
+ mov r1,r8
+ add #4,r8
+ mov.l .L49,r1
+ mov.l @(24,r14),r4
+ mov.l @(28,r14),r5
+ jsr @r1
+ nop
+ mov r0,r9
+ mov.l .L50,r1
+ mov.l @(12,r14),r4
+ mov.l @(32,r14),r5
+ jsr @r1
+ nop
+ mov.l r0,@(36,r14)
+ mov.l @(36,r14),r1
+ or r9,r1
+ mov.l r1,@r8
+.L35:
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #8,r1
+ mov.l @r1,r2
+ mov.l r2,@(20,r14)
+ mov.l @r14,r1
+ mov r1,r8
+ add #8,r8
+ mov.l .L49,r1
+ mov.l @(12,r14),r4
+ mov.l @(28,r14),r5
+ jsr @r1
+ nop
+ mov r0,r9
+ mov.l .L50,r1
+ mov.l @(16,r14),r4
+ mov.l @(32,r14),r5
+ jsr @r1
+ nop
+ mov.l r0,@(36,r14)
+ mov.l @(36,r14),r1
+ or r9,r1
+ mov.l r1,@r8
+.L33:
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #12,r1
+ mov.l @r1,r2
+ mov.l r2,@(24,r14)
+ mov.l @r14,r1
+ mov r1,r8
+ add #12,r8
+ mov.l .L49,r1
+ mov.l @(16,r14),r4
+ mov.l @(28,r14),r5
+ jsr @r1
+ nop
+ mov r0,r9
+ mov.l .L50,r1
+ mov.l @(20,r14),r4
+ mov.l @(32,r14),r5
+ jsr @r1
+ nop
+ mov.l r0,@(36,r14)
+ mov.l @(36,r14),r1
+ or r9,r1
+ mov.l r1,@r8
+ mov.l @(4,r14),r1
+ mov r1,r2
+ add #16,r2
+ mov.l r2,@(4,r14)
+ mov.l @r14,r1
+ mov r1,r2
+ add #16,r2
+ mov.l r2,@r14
+ mov.l @(8,r14),r1
+ mov r1,r2
+ add #-4,r2
+ mov.l r2,@(8,r14)
+.L47:
+ mov.l @(8,r14),r1
+ tst r1,r1
+ bf .L48
+ bra .L46
+ nop
+ .align 2
+.L48:
+ bra .L42
+ nop
+ .align 2
+.L46:
+ nop
+.L41:
+ mov.l @r14,r8
+ mov.l .L49,r1
+ mov.l @(20,r14),r4
+ mov.l @(28,r14),r5
+ jsr @r1
+ nop
+ mov r0,r9
+ mov.l .L50,r1
+ mov.l @(24,r14),r4
+ mov.l @(32,r14),r5
+ jsr @r1
+ nop
+ mov.l r0,@(36,r14)
+ mov.l @(36,r14),r1
+ or r9,r1
+ mov.l r1,@r8
+.L30:
+ add #40,r14
+ mov r14,r15
+ lds.l @r15+,pr
+ mov.l @r15+,r14
+ mov.l @r15+,r9
+ mov.l @r15+,r8
+ rts
+ nop
+.L51:
+ .align 2
+.L49:
+ .long ___lshrsi3
+.L50:
+ .long ___ashlsi3
+.Lfe2:
+ .size __wordcopy_fwd_dest_aligned,.Lfe2-__wordcopy_fwd_dest_aligned
+ .align 2
+ .global __wordcopy_bwd_aligned
+ .type __wordcopy_bwd_aligned,@function
+__wordcopy_bwd_aligned:
+ mov.l r14,@-r15
+ add #-20,r15
+ mov r15,r14
+ mov.l r4,@r14
+ mov.l r5,@(4,r14)
+ mov.l r6,@(8,r14)
+ mov.l @(8,r14),r2
+ mov #7,r1
+ and r2,r1
+ mov #0,r2
+ mov #7,r3
+ sub r2,r1
+ cmp/hi r3,r1
+ bf .L82
+ bra .L55
+ nop
+.L82:
+ mova .L75,r0
+ add r1,r1
+ mov.w @(r0,r1),r1
+ add r0,r1
+ jmp @r1
+ nop
+ .align 2
+ .align 2
+.L75:
+ .word .L68-.L75
+ .word .L71-.L75
+ .word .L56-.L75
+ .word .L58-.L75
+ .word .L60-.L75
+ .word .L62-.L75
+ .word .L64-.L75
+ .word .L66-.L75
+ .align 2
+.L56:
+ mov.l @(4,r14),r1
+ mov r1,r2
+ add #-8,r2
+ mov.l r2,@(4,r14)
+ mov.l @r14,r1
+ mov r1,r2
+ add #-4,r2
+ mov.l r2,@r14
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #4,r1
+ mov.l @r1,r2
+ mov.l r2,@(12,r14)
+ mov.l @(8,r14),r1
+ mov r1,r2
+ add #6,r2
+ mov.l r2,@(8,r14)
+ bra .L57
+ nop
+ .align 2
+.L58:
+ mov.l @(4,r14),r1
+ mov r1,r2
+ add #-12,r2
+ mov.l r2,@(4,r14)
+ mov.l @r14,r1
+ mov r1,r2
+ add #-8,r2
+ mov.l r2,@r14
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #8,r1
+ mov.l @r1,r2
+ mov.l r2,@(16,r14)
+ mov.l @(8,r14),r1
+ mov r1,r2
+ add #5,r2
+ mov.l r2,@(8,r14)
+ bra .L59
+ nop
+ .align 2
+.L60:
+ mov.l @(4,r14),r1
+ mov r1,r2
+ add #-16,r2
+ mov.l r2,@(4,r14)
+ mov.l @r14,r1
+ mov r1,r2
+ add #-12,r2
+ mov.l r2,@r14
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #12,r1
+ mov.l @r1,r2
+ mov.l r2,@(12,r14)
+ mov.l @(8,r14),r1
+ mov r1,r2
+ add #4,r2
+ mov.l r2,@(8,r14)
+ bra .L61
+ nop
+ .align 2
+.L62:
+ mov.l @(4,r14),r1
+ mov r1,r2
+ add #-20,r2
+ mov.l r2,@(4,r14)
+ mov.l @r14,r1
+ mov r1,r2
+ add #-16,r2
+ mov.l r2,@r14
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #16,r1
+ mov.l @r1,r2
+ mov.l r2,@(16,r14)
+ mov.l @(8,r14),r1
+ mov r1,r2
+ add #3,r2
+ mov.l r2,@(8,r14)
+ bra .L63
+ nop
+ .align 2
+.L64:
+ mov.l @(4,r14),r1
+ mov r1,r2
+ add #-24,r2
+ mov.l r2,@(4,r14)
+ mov.l @r14,r1
+ mov r1,r2
+ add #-20,r2
+ mov.l r2,@r14
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #20,r1
+ mov.l @r1,r2
+ mov.l r2,@(12,r14)
+ mov.l @(8,r14),r1
+ mov r1,r2
+ add #2,r2
+ mov.l r2,@(8,r14)
+ bra .L65
+ nop
+ .align 2
+.L66:
+ mov.l @(4,r14),r1
+ mov r1,r2
+ add #-28,r2
+ mov.l r2,@(4,r14)
+ mov.l @r14,r1
+ mov r1,r2
+ add #-24,r2
+ mov.l r2,@r14
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #24,r1
+ mov.l @r1,r2
+ mov.l r2,@(16,r14)
+ mov.l @(8,r14),r1
+ mov r1,r2
+ add #1,r2
+ mov.l r2,@(8,r14)
+ bra .L67
+ nop
+ .align 2
+.L68:
+ bra .L69
+ nop
+ bra .L54
+ nop
+ .align 2
+.L69:
+ mov.l @(4,r14),r1
+ mov r1,r2
+ add #-32,r2
+ mov.l r2,@(4,r14)
+ mov.l @r14,r1
+ mov r1,r2
+ add #-28,r2
+ mov.l r2,@r14
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #28,r1
+ mov.l @r1,r2
+ mov.l r2,@(12,r14)
+ bra .L70
+ nop
+ .align 2
+.L71:
+ mov.l @(4,r14),r1
+ mov r1,r2
+ add #-36,r2
+ mov.l r2,@(4,r14)
+ mov.l @r14,r1
+ mov r1,r2
+ add #-32,r2
+ mov.l r2,@r14
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #32,r1
+ mov.l @r1,r2
+ mov.l r2,@(16,r14)
+ mov.l @(8,r14),r1
+ mov r1,r2
+ add #-1,r2
+ mov.l r2,@(8,r14)
+ bra .L72
+ nop
+ bra .L73
+ nop
+ .align 2
+.L72:
+ bra .L74
+ nop
+ .align 2
+.L76:
+.L55:
+ nop
+.L77:
+.L74:
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #28,r1
+ mov.l @r1,r2
+ mov.l r2,@(12,r14)
+ mov.l @r14,r2
+ mov r2,r1
+ add #28,r1
+ mov.l @(16,r14),r2
+ mov.l r2,@r1
+.L70:
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #24,r1
+ mov.l @r1,r2
+ mov.l r2,@(16,r14)
+ mov.l @r14,r2
+ mov r2,r1
+ add #24,r1
+ mov.l @(12,r14),r2
+ mov.l r2,@r1
+.L67:
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #20,r1
+ mov.l @r1,r2
+ mov.l r2,@(12,r14)
+ mov.l @r14,r2
+ mov r2,r1
+ add #20,r1
+ mov.l @(16,r14),r2
+ mov.l r2,@r1
+.L65:
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #16,r1
+ mov.l @r1,r2
+ mov.l r2,@(16,r14)
+ mov.l @r14,r2
+ mov r2,r1
+ add #16,r1
+ mov.l @(12,r14),r2
+ mov.l r2,@r1
+.L63:
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #12,r1
+ mov.l @r1,r2
+ mov.l r2,@(12,r14)
+ mov.l @r14,r2
+ mov r2,r1
+ add #12,r1
+ mov.l @(16,r14),r2
+ mov.l r2,@r1
+.L61:
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #8,r1
+ mov.l @r1,r2
+ mov.l r2,@(16,r14)
+ mov.l @r14,r2
+ mov r2,r1
+ add #8,r1
+ mov.l @(12,r14),r2
+ mov.l r2,@r1
+.L59:
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #4,r1
+ mov.l @r1,r2
+ mov.l r2,@(12,r14)
+ mov.l @r14,r2
+ mov r2,r1
+ add #4,r1
+ mov.l @(16,r14),r2
+ mov.l r2,@r1
+.L57:
+ mov.l @(4,r14),r1
+ mov.l @r1,r2
+ mov.l r2,@(16,r14)
+ mov.l @r14,r1
+ mov.l @(12,r14),r2
+ mov.l r2,@r1
+ mov.l @(4,r14),r1
+ mov r1,r2
+ add #-32,r2
+ mov.l r2,@(4,r14)
+ mov.l @r14,r1
+ mov r1,r2
+ add #-32,r2
+ mov.l r2,@r14
+ mov.l @(8,r14),r1
+ mov r1,r2
+ add #-8,r2
+ mov.l r2,@(8,r14)
+.L79:
+ mov.l @(8,r14),r1
+ tst r1,r1
+ bf .L80
+ bra .L78
+ nop
+ .align 2
+.L80:
+ bra .L74
+ nop
+ .align 2
+.L78:
+ nop
+.L73:
+ mov.l @r14,r2
+ mov r2,r1
+ add #28,r1
+ mov.l @(16,r14),r2
+ mov.l r2,@r1
+.L54:
+ add #20,r14
+ mov r14,r15
+ mov.l @r15+,r14
+ rts
+ nop
+.Lfe3:
+ .size __wordcopy_bwd_aligned,.Lfe3-__wordcopy_bwd_aligned
+ .align 2
+ .global __wordcopy_bwd_dest_aligned
+ .type __wordcopy_bwd_dest_aligned,@function
+__wordcopy_bwd_dest_aligned:
+ mov.l r8,@-r15
+ mov.l r9,@-r15
+ mov.l r14,@-r15
+ sts.l pr,@-r15
+ add #-40,r15
+ mov r15,r14
+ mov.l r4,@r14
+ mov.l r5,@(4,r14)
+ mov.l r6,@(8,r14)
+ mov.l @(4,r14),r1
+ mov #3,r2
+ and r1,r2
+ mov r2,r1
+ mov r1,r2
+ shll2 r2
+ add r2,r2
+ mov.l r2,@(28,r14)
+ mov.l @(28,r14),r2
+ neg r2,r1
+ add #32,r1
+ mov.l r1,@(32,r14)
+ mov.l @(4,r14),r1
+ mov #-4,r2
+ and r2,r1
+ mov.l r1,@(4,r14)
+ mov.l @(4,r14),r1
+ mov r1,r2
+ add #4,r2
+ mov.l r2,@(4,r14)
+ mov.l @(8,r14),r2
+ mov #3,r1
+ and r2,r1
+ mov #0,r2
+ mov #3,r3
+ sub r2,r1
+ cmp/hi r3,r1
+ bf .L106
+ bra .L84
+ nop
+.L106:
+ mova .L96,r0
+ add r1,r1
+ mov.w @(r0,r1),r1
+ add r0,r1
+ jmp @r1
+ nop
+ .align 2
+ .align 2
+.L96:
+ .word .L89-.L96
+ .word .L92-.L96
+ .word .L85-.L96
+ .word .L87-.L96
+ .align 2
+.L85:
+ mov.l @(4,r14),r1
+ mov r1,r2
+ add #-12,r2
+ mov.l r2,@(4,r14)
+ mov.l @r14,r1
+ mov r1,r2
+ add #-4,r2
+ mov.l r2,@r14
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #8,r1
+ mov.l @r1,r2
+ mov.l r2,@(20,r14)
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #4,r1
+ mov.l @r1,r2
+ mov.l r2,@(16,r14)
+ mov.l @(8,r14),r1
+ mov r1,r2
+ add #2,r2
+ mov.l r2,@(8,r14)
+ bra .L86
+ nop
+ .align 2
+.L87:
+ mov.l @(4,r14),r1
+ mov r1,r2
+ add #-16,r2
+ mov.l r2,@(4,r14)
+ mov.l @r14,r1
+ mov r1,r2
+ add #-8,r2
+ mov.l r2,@r14
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #12,r1
+ mov.l @r1,r2
+ mov.l r2,@(24,r14)
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #8,r1
+ mov.l @r1,r2
+ mov.l r2,@(20,r14)
+ mov.l @(8,r14),r1
+ mov r1,r2
+ add #1,r2
+ mov.l r2,@(8,r14)
+ bra .L88
+ nop
+ .align 2
+.L89:
+ bra .L90
+ nop
+ bra .L83
+ nop
+ .align 2
+.L90:
+ mov.l @(4,r14),r1
+ mov r1,r2
+ add #-20,r2
+ mov.l r2,@(4,r14)
+ mov.l @r14,r1
+ mov r1,r2
+ add #-12,r2
+ mov.l r2,@r14
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #16,r1
+ mov.l @r1,r2
+ mov.l r2,@(12,r14)
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #12,r1
+ mov.l @r1,r2
+ mov.l r2,@(24,r14)
+ bra .L91
+ nop
+ .align 2
+.L92:
+ mov.l @(4,r14),r1
+ mov r1,r2
+ add #-24,r2
+ mov.l r2,@(4,r14)
+ mov.l @r14,r1
+ mov r1,r2
+ add #-16,r2
+ mov.l r2,@r14
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #20,r1
+ mov.l @r1,r2
+ mov.l r2,@(16,r14)
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #16,r1
+ mov.l @r1,r2
+ mov.l r2,@(12,r14)
+ mov.l @(8,r14),r1
+ mov r1,r2
+ add #-1,r2
+ mov.l r2,@(8,r14)
+ bra .L93
+ nop
+ bra .L94
+ nop
+ .align 2
+.L93:
+ bra .L95
+ nop
+ .align 2
+.L97:
+.L84:
+ nop
+.L98:
+.L95:
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #12,r1
+ mov.l @r1,r2
+ mov.l r2,@(24,r14)
+ mov.l @r14,r1
+ mov r1,r8
+ add #12,r8
+ mov.l .L102,r1
+ mov.l @(12,r14),r4
+ mov.l @(28,r14),r5
+ jsr @r1
+ nop
+ mov r0,r9
+ mov.l .L103,r1
+ mov.l @(16,r14),r4
+ mov.l @(32,r14),r5
+ jsr @r1
+ nop
+ mov.l r0,@(36,r14)
+ mov.l @(36,r14),r1
+ or r9,r1
+ mov.l r1,@r8
+.L91:
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #8,r1
+ mov.l @r1,r2
+ mov.l r2,@(20,r14)
+ mov.l @r14,r1
+ mov r1,r8
+ add #8,r8
+ mov.l .L102,r1
+ mov.l @(24,r14),r4
+ mov.l @(28,r14),r5
+ jsr @r1
+ nop
+ mov r0,r9
+ mov.l .L103,r1
+ mov.l @(12,r14),r4
+ mov.l @(32,r14),r5
+ jsr @r1
+ nop
+ mov.l r0,@(36,r14)
+ mov.l @(36,r14),r1
+ or r9,r1
+ mov.l r1,@r8
+.L88:
+ mov.l @(4,r14),r2
+ mov r2,r1
+ add #4,r1
+ mov.l @r1,r2
+ mov.l r2,@(16,r14)
+ mov.l @r14,r1
+ mov r1,r8
+ add #4,r8
+ mov.l .L102,r1
+ mov.l @(20,r14),r4
+ mov.l @(28,r14),r5
+ jsr @r1
+ nop
+ mov r0,r9
+ mov.l .L103,r1
+ mov.l @(24,r14),r4
+ mov.l @(32,r14),r5
+ jsr @r1
+ nop
+ mov.l r0,@(36,r14)
+ mov.l @(36,r14),r1
+ or r9,r1
+ mov.l r1,@r8
+.L86:
+ mov.l @(4,r14),r1
+ mov.l @r1,r2
+ mov.l r2,@(12,r14)
+ mov.l @r14,r8
+ mov.l .L102,r1
+ mov.l @(16,r14),r4
+ mov.l @(28,r14),r5
+ jsr @r1
+ nop
+ mov r0,r9
+ mov.l .L103,r1
+ mov.l @(20,r14),r4
+ mov.l @(32,r14),r5
+ jsr @r1
+ nop
+ mov.l r0,@(36,r14)
+ mov.l @(36,r14),r1
+ or r9,r1
+ mov.l r1,@r8
+ mov.l @(4,r14),r1
+ mov r1,r2
+ add #-16,r2
+ mov.l r2,@(4,r14)
+ mov.l @r14,r1
+ mov r1,r2
+ add #-16,r2
+ mov.l r2,@r14
+ mov.l @(8,r14),r1
+ mov r1,r2
+ add #-4,r2
+ mov.l r2,@(8,r14)
+.L100:
+ mov.l @(8,r14),r1
+ tst r1,r1
+ bf .L101
+ bra .L99
+ nop
+ .align 2
+.L101:
+ bra .L95
+ nop
+ .align 2
+.L99:
+ nop
+.L94:
+ mov.l @r14,r1
+ mov r1,r8
+ add #12,r8
+ mov.l .L102,r1
+ mov.l @(12,r14),r4
+ mov.l @(28,r14),r5
+ jsr @r1
+ nop
+ mov r0,r9
+ mov.l .L103,r1
+ mov.l @(16,r14),r4
+ mov.l @(32,r14),r5
+ jsr @r1
+ nop
+ mov.l r0,@(36,r14)
+ mov.l @(36,r14),r1
+ or r9,r1
+ mov.l r1,@r8
+.L83:
+ add #40,r14
+ mov r14,r15
+ lds.l @r15+,pr
+ mov.l @r15+,r14
+ mov.l @r15+,r9
+ mov.l @r15+,r8
+ rts
+ nop
+.L104:
+ .align 2
+.L102:
+ .long ___lshrsi3
+.L103:
+ .long ___ashlsi3
+.Lfe4:
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile
new file mode 100644
index 000000000..c89c2b9e3
--- /dev/null
+++ b/arch/sh/mm/Makefile
@@ -0,0 +1,13 @@
+#
+# Makefile for the Linux SuperH-specific parts of the memory manager.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definition is now in the main makefile...
+
+O_TARGET := mm.o
+O_OBJS := init.o fault.o ioremap.o extable.o
+
+include $(TOPDIR)/Rules.make
diff --git a/arch/sh/mm/extable.c b/arch/sh/mm/extable.c
new file mode 100644
index 000000000..35c4451eb
--- /dev/null
+++ b/arch/sh/mm/extable.c
@@ -0,0 +1,57 @@
+/*
+ * linux/arch/sh/mm/extable.c
+ * Taken from:
+ * linux/arch/i386/mm/extable.c
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <asm/uaccess.h>
+
+extern const struct exception_table_entry __start___ex_table[];
+extern const struct exception_table_entry __stop___ex_table[];
+
+static inline unsigned long
+search_one_table(const struct exception_table_entry *first,
+ const struct exception_table_entry *last,
+ unsigned long value)
+{
+ while (first <= last) {
+ const struct exception_table_entry *mid;
+ long diff;
+
+ mid = (last - first) / 2 + first;
+ diff = mid->insn - value;
+ if (diff == 0)
+ return mid->fixup;
+ else if (diff < 0)
+ first = mid+1;
+ else
+ last = mid-1;
+ }
+ return 0;
+}
+
+unsigned long
+search_exception_table(unsigned long addr)
+{
+ unsigned long ret;
+
+#ifndef CONFIG_MODULES
+ /* There is only the kernel to search. */
+ ret = search_one_table(__start___ex_table, __stop___ex_table-1, addr);
+ if (ret) return ret;
+#else
+ /* The kernel is the last "module" -- no need to treat it special. */
+ struct module *mp;
+ for (mp = module_list; mp != NULL; mp = mp->next) {
+ if (mp->ex_table_start == NULL)
+ continue;
+ ret = search_one_table(mp->ex_table_start,
+ mp->ex_table_end - 1, addr);
+ if (ret) return ret;
+ }
+#endif
+
+ return 0;
+}
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
new file mode 100644
index 000000000..c1348d5b4
--- /dev/null
+++ b/arch/sh/mm/fault.c
@@ -0,0 +1,326 @@
+/*
+ * linux/arch/sh/mm/fault.c
+ * Copyright (C) 1999 Niibe Yutaka
+ *
+ * Based on linux/arch/i386/mm/fault.c:
+ * Copyright (C) 1995 Linus Torvalds
+ */
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/hardirq.h>
+#include <asm/mmu_context.h>
+
+extern void die(const char *,struct pt_regs *,long);
+
+/*
+ * Ugly, ugly, but the goto's result in better assembly..
+ */
+int __verify_write(const void * addr, unsigned long size)
+{
+ struct vm_area_struct * vma;
+ unsigned long start = (unsigned long) addr;
+
+ if (!size)
+ return 1;
+
+ vma = find_vma(current->mm, start);
+ if (!vma)
+ goto bad_area;
+ if (vma->vm_start > start)
+ goto check_stack;
+
+good_area:
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ size--;
+ size += start & ~PAGE_MASK;
+ size >>= PAGE_SHIFT;
+ start &= PAGE_MASK;
+
+ for (;;) {
+ if (handle_mm_fault(current, vma, start, 1) <= 0)
+ goto bad_area;
+ if (!size)
+ break;
+ size--;
+ start += PAGE_SIZE;
+ if (start < vma->vm_end)
+ continue;
+ vma = vma->vm_next;
+ if (!vma || vma->vm_start != start)
+ goto bad_area;
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;;
+ }
+ return 1;
+
+check_stack:
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+ if (expand_stack(vma, start) == 0)
+ goto good_area;
+
+bad_area:
+ return 0;
+}
+
+/*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+ * routines.
+ */
+asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
+ unsigned long address)
+{
+ struct task_struct *tsk;
+ struct mm_struct *mm;
+ struct vm_area_struct * vma;
+ unsigned long page;
+ unsigned long fixup;
+
+ tsk = current;
+ mm = tsk->mm;
+
+ /*
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+ if (in_interrupt() || !mm)
+ goto no_context;
+
+ down(&mm->mmap_sem);
+
+ vma = find_vma(mm, address);
+ if (!vma)
+ goto bad_area;
+ if (vma->vm_start <= address)
+ goto good_area;
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+ if (expand_stack(vma, address))
+ goto bad_area;
+/*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+good_area:
+ if (writeaccess) {
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ } else {
+ if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+ }
+
+ /*
+ * If for any reason at all we couldn't handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+ {
+ int fault = handle_mm_fault(tsk, vma, address, writeaccess);
+ if (fault < 0)
+ goto out_of_memory;
+ if (!fault)
+ goto do_sigbus;
+ }
+
+ up(&mm->mmap_sem);
+ return;
+
+/*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+ up(&mm->mmap_sem);
+
+ if (user_mode(regs)) {
+ tsk->thread.address = address;
+ tsk->thread.error_code = writeaccess;
+ force_sig(SIGSEGV, tsk);
+ return;
+ }
+
+no_context:
+ /* Are we prepared to handle this kernel fault? */
+ fixup = search_exception_table(regs->pc);
+ if (fixup != 0) {
+ regs->pc = fixup;
+ return;
+ }
+
+/*
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ *
+ */
+ if (address < PAGE_SIZE)
+ printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
+ else
+ printk(KERN_ALERT "Unable to handle kernel paging request");
+ printk(" at virtual address %08lx\n",address);
+ printk(KERN_ALERT "pc = %08lx\n", regs->pc);
+ page = (unsigned long)mm->pgd;
+ page = ((unsigned long *) __va(page))[address >> 22];
+ printk(KERN_ALERT "*pde = %08lx\n", page);
+ if (page & 1) {
+ page &= PAGE_MASK;
+ address &= 0x003ff000;
+ page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
+ printk(KERN_ALERT "*pte = %08lx\n", page);
+ }
+ die("Oops", regs, writeaccess);
+ do_exit(SIGKILL);
+
+/*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+out_of_memory:
+ up(&mm->mmap_sem);
+ printk("VM: killing process %s\n", tsk->comm);
+ if (user_mode(regs))
+ do_exit(SIGKILL);
+ goto no_context;
+
+do_sigbus:
+ up(&mm->mmap_sem);
+
+ /*
+ * Send a sigbus, regardless of whether we were in kernel
+ * or user mode.
+ */
+ tsk->thread.address = address;
+ tsk->thread.error_code = writeaccess;
+ tsk->thread.trap_no = 14;
+ force_sig(SIGBUS, tsk);
+
+ /* Kernel mode? Handle exceptions or die */
+ if (!user_mode(regs))
+ goto no_context;
+}
+
+void update_mmu_cache(struct vm_area_struct * vma,
+ unsigned long address, pte_t pte)
+{
+ unsigned long flags;
+ unsigned long asid;
+ unsigned long pteval;
+
+ asid = get_asid();
+
+ save_and_cli(flags);
+ address &= PAGE_MASK;
+ /* Set PTEH register */
+ asm volatile ("mov.l %0,%1"
+ : /* no output */
+ : "r" (address | asid), "m" (__m(MMU_PTEH)));
+
+ pteval = pte_val(pte);
+ pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
+ pteval |= _PAGE_FLAGS_HARDWARE_DEFAULT; /* add default flags */
+ /* Set PTEL register */
+ asm volatile ("mov.l %0,%1"
+ : /* no output */
+ : "r" (pteval), "m" (__m(MMU_PTEL)));
+
+ /* Load the TLB */
+ asm volatile ("ldtlb" : /* no output */ : /* no input */
+ : "memory");
+ restore_flags(flags);
+}
+
+static __inline__ void __flush_tlb_page(unsigned long asid, unsigned long page)
+{
+ unsigned long addr, data;
+
+ addr = MMU_TLB_ADDRESS_ARRAY | (page & 0x1F000) | MMU_PAGE_ASSOC_BIT;
+ data = page | asid; /* VALID bit is off */
+ __asm__ __volatile__ ("mov.l %0,%1"
+ : /* no output */
+ : "r" (data), "m" (__m(addr)));
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ unsigned long asid;
+
+ if (vma->vm_mm->context != NO_CONTEXT) {
+ page &= PAGE_MASK;
+ asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK;
+ __flush_tlb_page (asid, page);
+ }
+}
+
+void flush_tlb_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end)
+{
+ if (mm->context != NO_CONTEXT) {
+ unsigned long flags;
+ int size;
+
+ save_and_cli(flags);
+ size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ if (size > (MMU_NTLB_ENTRIES/4)) { /* So many TLB to flush */
+ get_new_mmu_context(mm);
+ if (mm == current->mm)
+ set_asid(mm->context & MMU_CONTEXT_ASID_MASK);
+ } else {
+ unsigned long asid;
+
+ asid = mm->context & MMU_CONTEXT_ASID_MASK;
+ start &= PAGE_MASK;
+ end += (PAGE_SIZE - 1);
+ end &= PAGE_MASK;
+ while (start < end) {
+ __flush_tlb_page (asid, start);
+ start += PAGE_SIZE;
+ }
+ }
+ restore_flags(flags);
+ }
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ /* Invalidate all TLB of this process. */
+ /* Instead of flush TLBs, we get new MMU context. */
+ if (mm->context != NO_CONTEXT) {
+ unsigned long flags;
+
+ save_and_cli(flags);
+ get_new_mmu_context(mm);
+ if (mm == current->mm)
+ set_asid(mm->context & MMU_CONTEXT_ASID_MASK);
+ restore_flags(flags);
+ }
+}
+
+void flush_tlb_all(void)
+{
+ unsigned long flags, __dummy;
+
+ save_and_cli(flags);
+ asm volatile("mov.l %1,%0\n\t"
+ "or #4,%0\n\t" /* Set TF-bit to flush */
+ "mov.l %0,%1"
+ : "=&z" (__dummy)
+ : "m" (__m(MMUCR)));
+ restore_flags(flags);
+}
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
new file mode 100644
index 000000000..3a6bfc1a2
--- /dev/null
+++ b/arch/sh/mm/init.c
@@ -0,0 +1,294 @@
+/*
+ * linux/arch/sh/mm/init.c
+ *
+ * Copyright (C) 1999 Niibe Yutaka
+ *
+ * Based on linux/arch/i386/mm/init.c:
+ * Copyright (C) 1995 Linus Torvalds
+ */
+
+#include <linux/config.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#ifdef CONFIG_BLK_DEV_INITRD
+#include <linux/blk.h>
+#endif
+
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/mmu_context.h>
+
+/*
+ * Cache of MMU context last used.
+ */
+unsigned long mmu_context_cache;
+
+static unsigned long totalram = 0;
+
+extern void show_net_buffers(void);
+extern unsigned long init_smp_mappings(unsigned long);
+
+void __bad_pte_kernel(pmd_t *pmd)
+{
+ printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
+ pmd_val(*pmd) = _KERNPG_TABLE + __pa(BAD_PAGETABLE);
+}
+
+void __bad_pte(pmd_t *pmd)
+{
+ printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
+ pmd_val(*pmd) = _PAGE_TABLE + __pa(BAD_PAGETABLE);
+}
+
+pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long offset)
+{
+ pte_t *pte;
+
+ pte = (pte_t *) __get_free_page(GFP_KERNEL);
+ if (pmd_none(*pmd)) {
+ if (pte) {
+ clear_page((unsigned long)pte);
+ pmd_val(*pmd) = _KERNPG_TABLE + __pa(pte);
+ return pte + offset;
+ }
+ pmd_val(*pmd) = _KERNPG_TABLE + __pa(BAD_PAGETABLE);
+ return NULL;
+ }
+ free_page((unsigned long)pte);
+ if (pmd_bad(*pmd)) {
+ __bad_pte_kernel(pmd);
+ return NULL;
+ }
+ return (pte_t *) pmd_page(*pmd) + offset;
+}
+
+pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset)
+{
+ unsigned long pte;
+
+ pte = (unsigned long) __get_free_page(GFP_KERNEL);
+ if (pmd_none(*pmd)) {
+ if (pte) {
+ clear_page(pte);
+ pmd_val(*pmd) = _PAGE_TABLE + __pa(pte);
+ return (pte_t *)(pte + offset);
+ }
+ pmd_val(*pmd) = _PAGE_TABLE + __pa(BAD_PAGETABLE);
+ return NULL;
+ }
+ free_page(pte);
+ if (pmd_bad(*pmd)) {
+ __bad_pte(pmd);
+ return NULL;
+ }
+ return (pte_t *) (pmd_page(*pmd) + offset);
+}
+
+int do_check_pgt_cache(int low, int high)
+{
+ int freed = 0;
+ if(pgtable_cache_size > high) {
+ do {
+ if(pgd_quicklist)
+ free_pgd_slow(get_pgd_fast()), freed++;
+ if(pmd_quicklist)
+ free_pmd_slow(get_pmd_fast()), freed++;
+ if(pte_quicklist)
+ free_pte_slow(get_pte_fast()), freed++;
+ } while(pgtable_cache_size > low);
+ }
+ return freed;
+}
+
+/*
+ * BAD_PAGE is the page that is used for page faults when linux
+ * is out-of-memory. Older versions of linux just did a
+ * do_exit(), but using this instead means there is less risk
+ * for a process dying in kernel mode, possibly leaving an inode
+ * unused etc..
+ *
+ * BAD_PAGETABLE is the accompanying page-table: it is initialized
+ * to point to BAD_PAGE entries.
+ *
+ * ZERO_PAGE is a special page that is used for zero-initialized
+ * data and COW.
+ */
+pte_t * __bad_pagetable(void)
+{
+ extern char empty_bad_page_table[PAGE_SIZE];
+ unsigned long page = (unsigned long)empty_bad_page_table;
+
+ clear_page(page);
+ return (pte_t *)empty_bad_page_table;
+}
+
+pte_t __bad_page(void)
+{
+ extern char empty_bad_page[PAGE_SIZE];
+ unsigned long page = (unsigned long)empty_bad_page;
+
+ clear_page(page);
+ return pte_mkdirty(mk_pte(page, PAGE_SHARED));
+}
+
+void show_mem(void)
+{
+ int i,free = 0,total = 0,reserved = 0;
+ int shared = 0, cached = 0;
+
+ printk("Mem-info:\n");
+ show_free_areas();
+ printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
+ i = max_mapnr;
+ while (i-- > 0) {
+ total++;
+ if (PageReserved(mem_map+i))
+ reserved++;
+ else if (PageSwapCache(mem_map+i))
+ cached++;
+ else if (!page_count(mem_map+i))
+ free++;
+ else
+ shared += page_count(mem_map+i) - 1;
+ }
+ printk("%d pages of RAM\n",total);
+ printk("%d reserved pages\n",reserved);
+ printk("%d pages shared\n",shared);
+ printk("%d pages swap cached\n",cached);
+ printk("%ld pages in page table cache\n",pgtable_cache_size);
+#ifdef CONFIG_NET
+ show_net_buffers();
+#endif
+}
+
+extern unsigned long free_area_init(unsigned long, unsigned long);
+
+/* References to section boundaries */
+
+extern char _text, _etext, _edata, __bss_start, _end;
+extern char __init_begin, __init_end;
+
+pgd_t swapper_pg_dir[1024];
+
+/*
+ * paging_init() sets up the page tables
+ *
+ * This routines also unmaps the page at virtual kernel address 0, so
+ * that we can trap those pesky NULL-reference errors in the kernel.
+ */
+unsigned long __init
+paging_init(unsigned long start_mem, unsigned long end_mem)
+{
+ pgd_t * pg_dir;
+
+ start_mem = PAGE_ALIGN(start_mem);
+
+ /* We don't need kernel mapping as hardware support that. */
+ pg_dir = swapper_pg_dir;
+
+ /* Unmap the original low memory mappings to detect NULL reference */
+ pgd_val(pg_dir[0]) = 0;
+
+ /* Enable MMU */
+ __asm__ __volatile__ ("mov.l %0,%1"
+ : /* no output */
+ : "r" (MMU_CONTROL_INIT), "m" (__m(MMUCR)));
+
+ return free_area_init(start_mem, end_mem);
+}
+
+unsigned long empty_bad_page[1024];
+unsigned long empty_bad_page_table[1024];
+unsigned long empty_zero_page[1024];
+
+void __init mem_init(unsigned long start_mem, unsigned long end_mem)
+{
+ int codepages = 0;
+ int reservedpages = 0;
+ int datapages = 0;
+ int initpages = 0;
+ unsigned long tmp;
+
+ end_mem &= PAGE_MASK;
+ high_memory = (void *) end_mem;
+ max_mapnr = num_physpages = MAP_NR(end_mem);
+
+ /* clear the zero-page */
+ memset(empty_zero_page, 0, PAGE_SIZE);
+
+ /* Mark (clear "reserved" bit) usable pages in the mem_map[] */
+ /* Note that all are marked reserved already. */
+ tmp = start_mem = PAGE_ALIGN(start_mem);
+ while (tmp < end_mem) {
+ clear_bit(PG_reserved, &mem_map[MAP_NR(tmp)].flags);
+ clear_bit(PG_DMA, &mem_map[MAP_NR(tmp)].flags);
+ tmp += PAGE_SIZE;
+ }
+
+ for (tmp = PAGE_OFFSET; tmp < end_mem; tmp += PAGE_SIZE) {
+ if (PageReserved(mem_map+MAP_NR(tmp))) {
+ if (tmp >= (unsigned long) &_text && tmp < (unsigned long) &_edata) {
+ if (tmp < (unsigned long) &_etext)
+ codepages++;
+ else
+ datapages++;
+ } else if (tmp >= (unsigned long) &__init_begin
+ && tmp < (unsigned long) &__init_end)
+ initpages++;
+ else if (tmp >= (unsigned long) &__bss_start
+ && tmp < (unsigned long) start_mem)
+ datapages++;
+ else
+ reservedpages++;
+ continue;
+ }
+ set_page_count(mem_map+MAP_NR(tmp), 1);
+ totalram += PAGE_SIZE;
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (!initrd_start || (tmp < initrd_start || tmp >= initrd_end))
+#endif
+ free_page(tmp);
+ }
+ printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
+ (unsigned long) nr_free_pages << (PAGE_SHIFT-10),
+ max_mapnr << (PAGE_SHIFT-10),
+ codepages << (PAGE_SHIFT-10),
+ reservedpages << (PAGE_SHIFT-10),
+ datapages << (PAGE_SHIFT-10),
+ initpages << (PAGE_SHIFT-10));
+}
+
+void free_initmem(void)
+{
+ unsigned long addr;
+
+ addr = (unsigned long)(&__init_begin);
+ for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
+ mem_map[MAP_NR(addr)].flags &= ~(1 << PG_reserved);
+ set_page_count(mem_map+MAP_NR(addr), 1);
+ free_page(addr);
+ totalram += PAGE_SIZE;
+ }
+ printk ("Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10);
+}
+
+void si_meminfo(struct sysinfo *val)
+{
+ val->totalram = totalram;
+ val->sharedram = 0;
+ val->freeram = nr_free_pages << PAGE_SHIFT;
+ val->bufferram = atomic_read(&buffermem);
+ return;
+}
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c
new file mode 100644
index 000000000..f786379cf
--- /dev/null
+++ b/arch/sh/mm/ioremap.c
@@ -0,0 +1,140 @@
+/*
+ * arch/sh/mm/ioremap.c
+ *
+ * Re-map IO memory to kernel address space so that we can access it.
+ * This is needed for high PCI addresses that aren't mapped in the
+ * 640k-1MB IO memory area on PC's
+ *
+ * (C) Copyright 1995 1996 Linus Torvalds
+ */
+
+#include <linux/vmalloc.h>
+#include <asm/io.h>
+
+static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
+ unsigned long phys_addr, unsigned long flags)
+{
+ unsigned long end;
+
+ address &= ~PMD_MASK;
+ end = address + size;
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ do {
+ if (!pte_none(*pte))
+ printk("remap_area_pte: page already exists\n");
+ set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT | _PAGE_RW |
+ _PAGE_DIRTY | _PAGE_ACCESSED | flags)));
+ address += PAGE_SIZE;
+ phys_addr += PAGE_SIZE;
+ pte++;
+ } while (address < end);
+}
+
+static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
+ unsigned long phys_addr, unsigned long flags)
+{
+ unsigned long end;
+
+ address &= ~PGDIR_MASK;
+ end = address + size;
+ if (end > PGDIR_SIZE)
+ end = PGDIR_SIZE;
+ phys_addr -= address;
+ do {
+ pte_t * pte = pte_alloc_kernel(pmd, address);
+ if (!pte)
+ return -ENOMEM;
+ remap_area_pte(pte, address, end - address, address + phys_addr, flags);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address < end);
+ return 0;
+}
+
+static int remap_area_pages(unsigned long address, unsigned long phys_addr,
+ unsigned long size, unsigned long flags)
+{
+ pgd_t * dir;
+ unsigned long end = address + size;
+
+ phys_addr -= address;
+ dir = pgd_offset(&init_mm, address);
+ flush_cache_all();
+ while (address < end) {
+ pmd_t *pmd = pmd_alloc_kernel(dir, address);
+ if (!pmd)
+ return -ENOMEM;
+ if (remap_area_pmd(pmd, address, end - address,
+ phys_addr + address, flags))
+ return -ENOMEM;
+ set_pgdir(address, *dir);
+ address = (address + PGDIR_SIZE) & PGDIR_MASK;
+ dir++;
+ }
+ flush_tlb_all();
+ return 0;
+}
+
+/*
+ * Generic mapping function (not visible outside):
+ */
+
+/*
+ * Remap an arbitrary physical address space into the kernel virtual
+ * address space. Needed when the kernel wants to access high addresses
+ * directly.
+ *
+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
+ * have to convert them into an offset in a page-aligned mapping, but the
+ * caller shouldn't need to know that small detail.
+ */
+void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
+{
+ void * addr;
+ struct vm_struct * area;
+ unsigned long offset, last_addr;
+
+ /* Don't allow wraparound or zero size */
+ last_addr = phys_addr + size - 1;
+ if (!size || last_addr < phys_addr)
+ return NULL;
+
+ /*
+ * Don't remap the low PCI/ISA area, it's always mapped..
+ */
+ if (phys_addr >= 0xA0000 && last_addr <= 0x100000)
+ return phys_to_virt(phys_addr);
+
+ /*
+ * Don't allow anybody to remap normal RAM that we're using..
+ */
+ if (phys_addr < virt_to_phys(high_memory))
+ return NULL;
+
+ /*
+ * Mappings have to be page-aligned
+ */
+ offset = phys_addr & ~PAGE_MASK;
+ phys_addr &= PAGE_MASK;
+ size = PAGE_ALIGN(last_addr) - phys_addr;
+
+ /*
+ * Ok, go for it..
+ */
+ area = get_vm_area(size);
+ if (!area)
+ return NULL;
+ addr = area->addr;
+ if (remap_area_pages(VMALLOC_VMADDR(addr), phys_addr, size, flags)) {
+ vfree(addr);
+ return NULL;
+ }
+ return (void *) (offset + (char *)addr);
+}
+
+void iounmap(void *addr)
+{
+ if (addr > high_memory)
+ return vfree((void *) (PAGE_MASK & (unsigned long) addr));
+}
diff --git a/arch/sh/vmlinux.lds.S b/arch/sh/vmlinux.lds.S
new file mode 100644
index 000000000..a812c91f8
--- /dev/null
+++ b/arch/sh/vmlinux.lds.S
@@ -0,0 +1,114 @@
+/* ld script to make SuperH Linux kernel
+ * Written by Niibe Yutaka
+ */
+#include <linux/config.h>
+#ifdef CONFIG_LITTLE_ENDIAN
+OUTPUT_FORMAT("elf32-shl", "elf32-shl", "elf32-shl")
+#else
+OUTPUT_FORMAT("elf32-sh", "elf32-sh", "elf32-sh")
+#endif
+OUTPUT_ARCH(sh)
+ENTRY(_start)
+SECTIONS
+{
+ . = 0x80000000 + CONFIG_MEMORY_START + 0x1000;
+ __text = .; /* Text and read-only data */
+ _text = .; /* Text and read-only data */
+ .text : {
+ *(.text)
+ *(.fixup)
+ *(.gnu.warning)
+ } = 0
+ .text.lock : { *(.text.lock) } /* out-of-line lock text */
+ .rodata : { *(.rodata) }
+ .kstrtab : { *(.kstrtab) }
+
+ . = ALIGN(16); /* Exception table */
+ ___start___ex_table = .;
+ ___ex_table : { *(__ex_table) }
+ ___stop___ex_table = .;
+
+ ___start___ksymtab = .; /* Kernel symbol table */
+ ___ksymtab : { *(__ksymtab) }
+ ___stop___ksymtab = .;
+
+ __etext = .; /* End of text section */
+
+ .data : { /* Data */
+ *(.data)
+ CONSTRUCTORS
+ }
+
+ __edata = .; /* End of data section */
+
+ . = ALIGN(8192); /* init_task */
+ .data.init_task : { *(.data.init_task) }
+ /* stack */
+ .stack : { _stack = .; __stack = .; }
+
+ . = ALIGN(4096); /* Init code and data */
+ ___init_begin = .;
+ .text.init : { *(.text.init) }
+ .data.init : { *(.data.init) }
+ . = ALIGN(16);
+ ___setup_start = .;
+ .setup.init : { *(.setup.init) }
+ ___setup_end = .;
+ ___initcall_start = .;
+ .initcall.init : { *(.initcall.init) }
+ ___initcall_end = .;
+ . = ALIGN(4096);
+ ___init_end = .;
+
+ . = ALIGN(4096);
+ .data.page_aligned : { *(.data.idt) }
+
+ . = ALIGN(32);
+ .data.cacheline_aligned : { *(.data.cacheline_aligned) }
+
+ . = ALIGN(4096);
+ .data.disk_image : { *(.data.disk_image) }
+
+ . = ALIGN(4);
+ ___bss_start = .; /* BSS */
+ .bss : {
+ *(.bss)
+ }
+ . = ALIGN(4);
+ __end = . ;
+
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging section are relative to the beginning
+ of the section so we begin .debug at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* These must appear regardless of . */
+}