summaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/Makefile198
-rw-r--r--arch/arm/boot/Makefile35
-rw-r--r--arch/arm/boot/compressed/Makefile33
-rw-r--r--arch/arm/boot/compressed/Makefile.debug16
-rw-r--r--arch/arm/boot/compressed/head-nexuspci.S95
-rw-r--r--arch/arm/boot/compressed/head.S124
-rw-r--r--arch/arm/boot/compressed/misc.c308
-rw-r--r--arch/arm/boot/install.sh61
-rw-r--r--arch/arm/config.in145
-rw-r--r--arch/arm/defconfig264
-rw-r--r--arch/arm/kernel/Makefile47
-rw-r--r--arch/arm/kernel/armksyms.c178
-rw-r--r--arch/arm/kernel/calls.S194
-rw-r--r--arch/arm/kernel/dma.c199
-rw-r--r--arch/arm/kernel/ecard.c604
-rw-r--r--arch/arm/kernel/entry-armo.S643
-rw-r--r--arch/arm/kernel/entry-armv.S671
-rw-r--r--arch/arm/kernel/entry-common.S283
-rw-r--r--arch/arm/kernel/head-armo.S63
-rw-r--r--arch/arm/kernel/head-armv.S312
-rw-r--r--arch/arm/kernel/iic.c160
-rw-r--r--arch/arm/kernel/init_task.c23
-rw-r--r--arch/arm/kernel/ioport.c98
-rw-r--r--arch/arm/kernel/irq.c327
-rw-r--r--arch/arm/kernel/oldlatches.c53
-rw-r--r--arch/arm/kernel/process.c239
-rw-r--r--arch/arm/kernel/ptrace.c745
-rw-r--r--arch/arm/kernel/setup-ebsa110.c143
-rw-r--r--arch/arm/kernel/setup.c292
-rw-r--r--arch/arm/kernel/signal.c515
-rw-r--r--arch/arm/kernel/sys_arm.c372
-rw-r--r--arch/arm/kernel/time.c154
-rw-r--r--arch/arm/kernel/traps.c306
-rw-r--r--arch/arm/lib/Makefile55
-rw-r--r--arch/arm/lib/backtrace.S100
-rw-r--r--arch/arm/lib/bitops.S152
-rw-r--r--arch/arm/lib/checksum.S600
-rw-r--r--arch/arm/lib/delay.S43
-rw-r--r--arch/arm/lib/floppydma.S57
-rw-r--r--arch/arm/lib/fp_support.c22
-rw-r--r--arch/arm/lib/getconsdata.c31
-rw-r--r--arch/arm/lib/getconstants.c74
-rw-r--r--arch/arm/lib/getconstants.h17
-rw-r--r--arch/arm/lib/io-acorn.S215
-rw-r--r--arch/arm/lib/io-ebsa110.S149
-rw-r--r--arch/arm/lib/ll_char_wr.S157
-rw-r--r--arch/arm/lib/loaders.S53
-rw-r--r--arch/arm/lib/memcpy.S312
-rw-r--r--arch/arm/lib/memfastset.S35
-rw-r--r--arch/arm/lib/string.S139
-rw-r--r--arch/arm/lib/system.S20
-rw-r--r--arch/arm/lib/testm.c81
-rw-r--r--arch/arm/lib/uaccess-armo.S230
-rw-r--r--arch/arm/lib/uaccess.S631
-rw-r--r--arch/arm/mm/Makefile36
-rw-r--r--arch/arm/mm/extable.c55
-rw-r--r--arch/arm/mm/fault-armo.c159
-rw-r--r--arch/arm/mm/fault-armv.c200
-rw-r--r--arch/arm/mm/init.c215
-rw-r--r--arch/arm/mm/mm-a5k.c7
-rw-r--r--arch/arm/mm/mm-arc.c7
-rw-r--r--arch/arm/mm/mm-ebsa110.c7
-rw-r--r--arch/arm/mm/mm-nexuspci.c7
-rw-r--r--arch/arm/mm/mm-rpc.c80
-rw-r--r--arch/arm/mm/proc-arm2,3.S494
-rw-r--r--arch/arm/mm/proc-arm6,7.S436
-rw-r--r--arch/arm/mm/proc-sa110.S305
-rw-r--r--arch/arm/mm/small_page.c201
-rw-r--r--arch/arm/vmlinux.lds58
69 files changed, 13340 insertions, 0 deletions
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
new file mode 100644
index 000000000..5c1efb76a
--- /dev/null
+++ b/arch/arm/Makefile
@@ -0,0 +1,198 @@
+#
+# arch/arm/Makefile
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies. Remember to do have actions
+# for "archclean" and "archdep" for cleaning up and making dependencies for
+# this architecture
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1995, 1996 by Russell King
+
+ifeq ($(CONFIG_CPU_ARM2),y)
+PROCESSOR = armo
+ASFLAGS_PROC += -m2
+ifeq ($(CONFIG_BINUTILS_NEW),y)
+CFLAGS_PROC += -mcpu=arm2
+ASFLAGS_PROC += -m2
+else
+CFLAGS_PROC += -m2
+ASFLAGS_PROC += -m2
+endif
+endif
+
+ifeq ($(CONFIG_CPU_ARM3),y)
+PROCESSOR = armo
+ifeq ($(CONFIG_BINUTILS_NEW),y)
+CFLAGS_PROC += -mcpu=arm3
+ASFLAGS_PROC += -m3
+else
+CFLAGS_PROC += -m3
+ASFLAGS_PROC += -m3
+endif
+endif
+
+ifeq ($(CONFIG_CPU_ARM6),y)
+PROCESSOR = armv
+ifeq ($(CONFIG_BINUTILS_NEW),y)
+CFLAGS_PROC += -mcpu=arm6
+ASFLAGS_PROC += -m6
+else
+CFLAGS_PROC += -m6
+ASFLAGS_PROC += -m6
+endif
+endif
+
+ifeq ($(CONFIG_CPU_SA110),y)
+PROCESSOR = armv
+ifeq ($(CONFIG_BINUTILS_NEW),y)
+CFLAGS_PROC += -mcpu=strongarm110
+ASFLAGS_PROC += -m6
+else
+CFLAGS_PROC += -m6
+ASFLAGS_PROC += -m6
+endif
+endif
+
+# Processor Architecture
+# CFLAGS_PROC - processor dependent CFLAGS
+# PROCESSOR - processor type
+# TEXTADDR - Uncompressed kernel link text address
+# ZTEXTADDR - Compressed kernel link text address
+# ZRELADDR - Compressed kernel relocating address (point at which uncompressed kernel is loaded).
+#
+
+HEAD := arch/arm/kernel/head-$(PROCESSOR).o arch/arm/kernel/init_task.o
+COMPRESSED_HEAD = head.o
+
+ifeq ($(PROCESSOR),armo)
+ifeq ($(CONFIG_BINUTILS_NEW),y)
+CFLAGS_PROC += -mapcs-26 -mshort-load-bytes
+endif
+TEXTADDR = 0x02080000
+ZTEXTADDR = 0x01800000
+ZRELADDR = 0x02080000
+endif
+
+ifeq ($(CONFIG_ARCH_A5K),y)
+MACHINE = a5k
+COMPRESSED_EXTRA = $(TOPDIR)/arch/arm/lib/ll_char_wr.o
+endif
+
+ifeq ($(CONFIG_ARCH_ARC),y)
+MACHINE = arc
+COMPRESSED_EXTRA = $(TOPDIR)/arch/arm/lib/ll_char_wr.o
+endif
+
+ifeq ($(PROCESSOR),armv)
+ifeq ($(CONFIG_BINUTILS_NEW),y)
+CFLAGS_PROC += -mapcs-32 -mshort-load-bytes
+endif
+TEXTADDR = 0xC0008000
+endif
+
+ifeq ($(CONFIG_ARCH_RPC),y)
+MACHINE = rpc
+COMPRESSED_EXTRA = $(TOPDIR)/arch/arm/lib/ll_char_wr.o
+ZTEXTADDR = 0x10008000
+ZRELADDR = 0x10008000
+endif
+
+ifeq ($(CONFIG_ARCH_EBSA110),y)
+MACHINE = ebsa110
+ZTEXTADDR = 0x00008000
+ZRELADDR = 0x00008000
+endif
+
+ifeq ($(CONFIG_ARCH_NEXUSPCI),y)
+MACHINE = nexuspci
+TEXTADDR = 0xc0000000
+ZTEXTADDR = 0x40200000
+ZRELADDR = 0x40000000
+COMPRESSED_EXTRA = $(TOPDIR)/arch/arm/lib/ll_char_wr_scc.o
+COMPRESSED_HEAD = head-nexuspci.o
+endif
+
+OBJDUMP = $(CROSS_COMPILE)objdump
+PERL = perl
+LD = $(CROSS_COMPILE)ld -m elf_arm
+CPP = $(CC) -E
+OBJCOPY = $(CROSS_COMPILE)objcopy -O binary -R .note -R .comment -S
+ARCHCC := $(word 1,$(CC))
+GCCLIB := `$(ARCHCC) $(CFLAGS_PROC) --print-libgcc-file-name`
+GCCARCH := -B/usr/src/bin/arm/arm-linuxelf-
+HOSTCFLAGS := $(CFLAGS:-fomit-frame-pointer=)
+ifeq ($(CONFIG_FRAME_POINTER),y)
+CFLAGS := $(CFLAGS:-fomit-frame-pointer=)
+endif
+CFLAGS := $(CFLAGS_PROC) $(CFLAGS) -pipe
+ASFLAGS := $(ASFLAGS_PROC) $(ASFLAGS) -D__ASSEMBLY__
+LINKFLAGS = -T $(TOPDIR)/arch/arm/vmlinux.lds -e stext -Ttext $(TEXTADDR)
+ZLINKFLAGS = -Ttext $(ZTEXTADDR)
+
+SUBDIRS := $(SUBDIRS:drivers=) arch/arm/lib arch/arm/kernel arch/arm/mm arch/arm/drivers
+CORE_FILES := arch/arm/kernel/kernel.o arch/arm/mm/mm.o $(CORE_FILES)
+LIBS := arch/arm/lib/lib.a $(LIBS) $(GCCLIB)
+
+DRIVERS := arch/arm/drivers/block/block.a \
+ arch/arm/drivers/char/char.a \
+ drivers/misc/misc.a \
+ arch/arm/drivers/net/net.a
+
+ifeq ($(CONFIG_SCSI),y)
+DRIVERS := $(DRIVERS) arch/arm/drivers/scsi/scsi.a
+endif
+
+ifneq ($(CONFIG_CD_NO_IDESCSI)$(CONFIG_BLK_DEV_IDECD)$(CONFIG_BLK_DEV_SR),)
+DRIVERS := $(DRIVERS) drivers/cdrom/cdrom.a
+endif
+
+ifeq ($(CONFIG_SOUND),y)
+DRIVERS := $(DRIVERS) arch/arm/drivers/sound/sound.a
+endif
+
+symlinks::
+ $(RM) include/asm-arm/arch include/asm-arm/proc
+ (cd include/asm-arm; ln -sf arch-$(MACHINE) arch; ln -sf proc-$(PROCESSOR) proc)
+
+mrproper::
+ rm -f include/asm-arm/arch include/asm-arm/proc
+ @$(MAKE) -C arch/$(ARCH)/drivers mrproper
+
+arch/arm/kernel: dummy
+ $(MAKE) linuxsubdirs SUBDIRS=arch/arm/kernel
+
+arch/arm/mm: dummy
+ $(MAKE) linuxsubdirs SUBDIRS=arch/arm/mm
+
+MAKEBOOT = $(MAKE) -C arch/$(ARCH)/boot
+
+zImage: vmlinux
+ @$(MAKEBOOT) zImage
+
+zinstall: vmlinux
+ @$(MAKEBOOT) zinstall
+
+Image: vmlinux
+ @$(MAKEBOOT) Image
+
+install: vmlinux
+ @$(MAKEBOOT) install
+
+# My testing targets (that short circuit a few dependencies)
+#
+zImg:; @$(MAKEBOOT) zImage
+Img:; @$(MAKEBOOT) Image
+i:; @$(MAKEBOOT) install
+zi:; @$(MAKEBOOT) zinstall
+
+archclean:
+ @$(MAKEBOOT) clean
+ @$(MAKE) -C arch/arm/lib clean
+
+archdep:
+ @$(MAKEBOOT) dep
+sed -e /^MACHINE..*=/s,= .*,= rpc,;/^PROCESSOR..*=/s,= .*,= armv, linux/arch/arm/Makefile.normal
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
new file mode 100644
index 000000000..e6050bf13
--- /dev/null
+++ b/arch/arm/boot/Makefile
@@ -0,0 +1,35 @@
+#
+# arch/arm/boot/Makefile
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1995, 1996 Russell King
+#
+
+SYSTEM =$(TOPDIR)/vmlinux
+
+Image: $(CONFIGURE) $(SYSTEM)
+ $(OBJCOPY) $(SYSTEM) $@
+
+zImage: $(CONFIGURE) compressed/vmlinux
+ $(OBJCOPY) compressed/vmlinux $@
+
+compressed/vmlinux: $(TOPDIR)/vmlinux dep
+ @$(MAKE) -C compressed vmlinux
+
+install: $(CONFIGURE) Image
+ sh ./install.sh $(VERSION).$(PATCHLEVEL).$(SUBLEVEL) Image $(TOPDIR)/System.map "$(INSTALL_PATH)"
+
+zinstall: $(CONFIGURE) zImage
+ sh ./install.sh $(VERSION).$(PATCHLEVEL).$(SUBLEVEL) zImage $(TOPDIR)/System.map "$(INSTALL_PATH)"
+
+tools/build: tools/build.c
+ $(HOSTCC) $(HOSTCFLAGS) -o $@ $< -I$(TOPDIR)/include
+
+clean:
+ rm -f Image zImage tools/build
+ @$(MAKE) -C compressed clean
+
+dep:
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
new file mode 100644
index 000000000..8e49f5dd0
--- /dev/null
+++ b/arch/arm/boot/compressed/Makefile
@@ -0,0 +1,33 @@
+#
+# linux/arch/arm/boot/compressed/Makefile
+#
+# create a compressed vmlinuz image from the original vmlinux
+#
+# With this config, max compressed image size = 640k
+# Uncompressed image size = 1.3M (text+data)
+
+SYSTEM =$(TOPDIR)/vmlinux
+HEAD =$(COMPRESSED_HEAD)
+OBJS =$(HEAD) misc.o $(COMPRESSED_EXTRA)
+CFLAGS =-O2 -DSTDC_HEADERS $(CFLAGS_PROC)
+ARFLAGS =rc
+
+all: vmlinux
+
+vmlinux: piggy.o $(OBJS)
+ $(LD) $(ZLINKFLAGS) -o vmlinux $(OBJS) piggy.o
+
+$(HEAD): $(HEAD:.o=.S)
+ $(CC) -traditional -DLOADADDR=$(ZRELADDR) -c $(HEAD:.o=.S)
+
+piggy.o: $(SYSTEM)
+ tmppiggy=_tmp_$$$$piggy; \
+ rm -f $$tmppiggy $$tmppiggy.gz $$tmppiggy.lnk; \
+ $(OBJCOPY) $(SYSTEM) $$tmppiggy; \
+ gzip -f -9 < $$tmppiggy > $$tmppiggy.gz; \
+ echo "SECTIONS { .data : { input_len = .; LONG(input_data_end - input_data) input_data = .; *(.data) input_data_end = .; }}" > $$tmppiggy.lnk; \
+ $(LD) -m elf_arm -r -o piggy.o -b binary $$tmppiggy.gz -b elf32-arm -T $$tmppiggy.lnk; \
+ rm -f $$tmppiggy $$tmppiggy.gz $$tmppiggy.lnk;
+
+clean:; rm -f vmlinux core
+
diff --git a/arch/arm/boot/compressed/Makefile.debug b/arch/arm/boot/compressed/Makefile.debug
new file mode 100644
index 000000000..3c87b0569
--- /dev/null
+++ b/arch/arm/boot/compressed/Makefile.debug
@@ -0,0 +1,16 @@
+#
+# linux/arch/arm/boot/compressed/Makefile
+#
+# create a compressed vmlinux image from the original vmlinux
+#
+
+COMPRESSED_EXTRA=../../lib/ll_char_wr.o
+OBJECTS=misc-debug.o $(COMPRESSED_EXTRA)
+
+CFLAGS=-D__KERNEL__ -O2 -DSTDC_HEADERS -DSTANDALONE_DEBUG -Wall -I../../../../include -c
+
+test-gzip: piggy.o $(OBJECTS)
+ $(CC) -o $@ $(OBJECTS) piggy.o
+
+misc-debug.o: misc.c
+ $(CC) $(CFLAGS) -o $@ misc.c
diff --git a/arch/arm/boot/compressed/head-nexuspci.S b/arch/arm/boot/compressed/head-nexuspci.S
new file mode 100644
index 000000000..92840fbda
--- /dev/null
+++ b/arch/arm/boot/compressed/head-nexuspci.S
@@ -0,0 +1,95 @@
+/*
+ * linux/arch/arm/boot/compressed/head-nexuspci.S
+ *
+ * Copyright (C) 1996 Philip Blundell
+ */
+
+#define ARM_CP p15
+#define ARM610_REG_CONTROL cr1
+#define ARM_REG_ZERO cr0
+
+ .text
+
+start: b skip1
+ b go_uncompress
+ b go_uncompress
+ b go_uncompress
+ b go_uncompress
+ b go_uncompress
+ b go_uncompress
+ b go_uncompress
+ b go_uncompress
+ b go_uncompress
+skip1: mov sp, #0x40000000
+ add sp, sp, #0x200000
+ mov r2, #0x20000000
+ mov r1, #0x1a
+ str r1, [r2]
+
+ MOV r0, #0x30
+ MCR ARM_CP, 0, r0, ARM610_REG_CONTROL, ARM_REG_ZERO
+
+ mov r2, #0x10000000
+
+ mov r1, #42
+ strb r1, [r2, #8]
+
+ mov r1, #48
+ strb r1, [r2, #8]
+
+ mov r1, #16
+ strb r1, [r2, #8]
+
+ mov r1, #0x93
+ strb r1, [r2, #0]
+ mov r1, #0x17
+ strb r1, [r2, #0]
+
+ mov r1, #0xbb
+ strb r1, [r2, #0x4]
+
+ mov r1, #0x78
+ strb r1, [r2, #0x10]
+
+ mov r1, #160
+ strb r1, [r2, #0x8]
+
+ mov r1, #5
+ strb r1, [r2, #0x8]
+
+ mov r0, #0x50
+ bl _ll_write_char
+
+ mov r4, #0x40000000
+ mov r1, #0x00200000
+ add r4, r4, r1
+copylp:
+ ldr r3, [r1]
+ str r3, [r4, r1]
+ subs r1, r1, #4
+ bne copylp
+
+ add pc, r4, #0x28
+
+
+/*
+ * Uncompress the kernel
+ */
+go_uncompress:
+ mov r0, #0x40000000
+ add r0, r0, #0x300000
+ bl _decompress_kernel
+
+ mov r0, #0x40000000
+ add r1, r0, #0x300000
+ mov r2, #0x100000
+
+clp2: ldr r3, [r1, r2]
+ str r3, [r0, r2]
+ subs r2, r2, #4
+ bne clp2
+
+ mov r2, #0x40000000
+ mov r0, #0
+ mov r1, #3
+ add pc, r2, #0x20 @ call via EXEC entry
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
new file mode 100644
index 000000000..98853511b
--- /dev/null
+++ b/arch/arm/boot/compressed/head.S
@@ -0,0 +1,124 @@
+/*
+ * linux/arch/arm/boot/compressed/head.S
+ *
+ * Copyright (C) 1996,1997,1998 Russell King
+ */
+#include <linux/linkage.h>
+
+ .text
+/*
+ * sort out different calling conventions
+ */
+ .align
+ .globl _start
+_start:
+start: mov r0, r0
+ mov r0, r0
+ mov r0, r0
+ mov r0, r0
+ mov r0, r0
+ mov r0, r0
+ mov r0, r0
+ mov r0, r0
+ teq r0, #0
+ beq 2f
+ mov r4, #0x02000000
+ add r4, r4, #0x7C000
+ mov r3, #0x4000
+ sub r3, r3, #4
+1: ldmia r0!, {r5 - r12}
+ stmia r4!, {r5 - r12}
+ subs r3, r3, #32
+ bpl 1b
+2: adr r2, LC0
+ ldmia r2, {r2, r3, r4, r5, r6, sp}
+ add r2, r2, #3
+ add r3, r3, #3
+ add sp, sp, #3
+ bic r2, r2, #3
+ bic r3, r3, #3
+ bic sp, sp, #3
+ adr r7, start
+ sub r6, r7, r6
+/*
+ * Relocate pointers
+ */
+ add r2, r2, r6
+ add r3, r3, r6
+ add r5, r5, r6
+ add sp, sp, r6
+/*
+ * Clear zero-init
+ */
+ mov r6, #0
+1: str r6, [r2], #4
+ cmp r2, r3
+ blt 1b
+ str r1, [r5] @ save architecture
+/*
+ * Uncompress the kernel
+ */
+ mov r1, #0x8000
+ add r2, r2, r1, lsl #1 @ Add 64k for malloc
+ sub r1, r1, #1
+ add r2, r2, r1
+ bic r5, r2, r1 @ decompress kernel to after end of the compressed
+ mov r0, r5
+ bl SYMBOL_NAME(decompress_kernel)
+ add r0, r0, #7
+ bic r2, r0, #7
+/*
+ * Now move the kernel to the correct location (r5 -> r4, len r0)
+ */
+ mov r0, r4 @ r0 = start of real kernel
+ mov r1, r5 @ r1 = start of kernel image
+ add r3, r5, r2 @ r3 = end of kernel
+ adr r4, movecode
+ adr r5, movecodeend
+1: ldmia r4!, {r6 - r12, lr}
+ stmia r3!, {r6 - r12, lr}
+ cmp r4, r5
+ blt 1b
+ mrc p15, 0, r5, c0, c0
+ eor r5, r5, #0x44 << 24
+ eor r5, r5, #0x01 << 16
+ eor r5, r5, #0xa1 << 8
+ movs r5, r5, lsr #4
+ mov r5, #0
+ mcreq p15, 0, r5, c7, c5, 0 @ flush I cache
+ ldr r5, LC0 + 12 @ get architecture
+ ldr r5, [r5]
+ add pc, r1, r2 @ Call move code
+
+/*
+ * r0 = length, r1 = to, r2 = from
+ */
+movecode: add r3, r1, r2
+ mov r4, r0
+1: ldmia r1!, {r6 - r12, lr}
+ stmia r0!, {r6 - r12, lr}
+ cmp r1, r3
+ blt 1b
+ mrc p15, 0, r0, c0, c0
+ eor r0, r0, #0x44 << 24
+ eor r0, r0, #0x01 << 16
+ eor r0, r0, #0xa1 << 8
+ movs r0, r0, lsr #4
+ mov r0, #0
+ mcreq p15, 0, r0, c7, c5, 0 @ flush I cache
+ mov r1, r5 @ call kernel correctly
+ mov pc, r4 @ call via EXEC entry
+movecodeend:
+
+LC0: .word SYMBOL_NAME(_edata)
+ .word SYMBOL_NAME(_end)
+ .word LOADADDR
+ .word SYMBOL_NAME(architecture)
+ .word start
+ .word SYMBOL_NAME(user_stack)+4096
+ .align
+
+ .bss
+SYMBOL_NAME(architecture):
+ .space 4
+ .align
diff --git a/arch/arm/boot/compressed/misc.c b/arch/arm/boot/compressed/misc.c
new file mode 100644
index 000000000..181583b75
--- /dev/null
+++ b/arch/arm/boot/compressed/misc.c
@@ -0,0 +1,308 @@
+/*
+ * misc.c
+ *
+ * This is a collection of several routines from gzip-1.0.3
+ * adapted for Linux.
+ *
+ * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
+ *
+ * Modified for ARM Linux by Russell King
+ */
+
+#include <asm/uaccess.h>
+#include <asm/arch/uncompress.h>
+#include <asm/proc/uncompress.h>
+
+#ifdef STANDALONE_DEBUG
+#define puts printf
+#endif
+
+#define __ptr_t void *
+
+/*
+ * Optimised C version of memzero for the ARM.
+ */
+extern __inline__ __ptr_t __memzero (__ptr_t s, size_t n)
+{
+ union { void *vp; unsigned long *ulp; unsigned char *ucp; } u;
+ int i;
+
+ u.vp = s;
+
+ for (i = n >> 5; i > 0; i--) {
+ *u.ulp++ = 0;
+ *u.ulp++ = 0;
+ *u.ulp++ = 0;
+ *u.ulp++ = 0;
+ *u.ulp++ = 0;
+ *u.ulp++ = 0;
+ *u.ulp++ = 0;
+ *u.ulp++ = 0;
+ }
+
+ if (n & 1 << 4) {
+ *u.ulp++ = 0;
+ *u.ulp++ = 0;
+ *u.ulp++ = 0;
+ *u.ulp++ = 0;
+ }
+
+ if (n & 1 << 3) {
+ *u.ulp++ = 0;
+ *u.ulp++ = 0;
+ }
+
+ if (n & 1 << 2)
+ *u.ulp++ = 0;
+
+ if (n & 1 << 1) {
+ *u.ucp++ = 0;
+ *u.ucp++ = 0;
+ }
+
+ if (n & 1)
+ *u.ucp++ = 0;
+ return s;
+}
+
+#define memzero(s,n) __memzero(s,n)
+
+extern __inline__ __ptr_t memcpy(__ptr_t __dest, __const __ptr_t __src,
+ size_t __n)
+{
+ int i = 0;
+ unsigned char *d = (unsigned char *)__dest, *s = (unsigned char *)__src;
+
+ for (i = __n >> 3; i > 0; i--) {
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ }
+
+ if (__n & 1 << 2) {
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ }
+
+ if (__n & 1 << 1) {
+ *d++ = *s++;
+ *d++ = *s++;
+ }
+
+ if (__n & 1)
+ *d++ = *s++;
+
+ return __dest;
+}
+
+/*
+ * gzip delarations
+ */
+#define OF(args) args
+#define STATIC static
+
+typedef unsigned char uch;
+typedef unsigned short ush;
+typedef unsigned long ulg;
+
+#define WSIZE 0x8000 /* Window size must be at least 32k, */
+ /* and a power of two */
+
+static uch *inbuf; /* input buffer */
+static uch window[WSIZE]; /* Sliding window buffer */
+
+static unsigned insize; /* valid bytes in inbuf */
+static unsigned inptr; /* index of next byte to be processed in inbuf */
+static unsigned outcnt; /* bytes in output buffer */
+
+/* gzip flag byte */
+#define ASCII_FLAG 0x01 /* bit 0 set: file probably ascii text */
+#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */
+#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */
+#define ORIG_NAME 0x08 /* bit 3 set: original file name present */
+#define COMMENT 0x10 /* bit 4 set: file comment present */
+#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */
+#define RESERVED 0xC0 /* bit 6,7: reserved */
+
+#define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf())
+
+/* Diagnostic functions */
+#ifdef DEBUG
+# define Assert(cond,msg) {if(!(cond)) error(msg);}
+# define Trace(x) fprintf x
+# define Tracev(x) {if (verbose) fprintf x ;}
+# define Tracevv(x) {if (verbose>1) fprintf x ;}
+# define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
+# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
+#else
+# define Assert(cond,msg)
+# define Trace(x)
+# define Tracev(x)
+# define Tracevv(x)
+# define Tracec(c,x)
+# define Tracecv(c,x)
+#endif
+
+static int fill_inbuf(void);
+static void flush_window(void);
+static void error(char *m);
+static void gzip_mark(void **);
+static void gzip_release(void **);
+
+extern char input_data[];
+extern int input_len;
+
+static uch *output_data;
+static ulg output_ptr;
+static ulg bytes_out = 0;
+
+static void *malloc(int size);
+static void free(void *where);
+static void error(char *m);
+static void gzip_mark(void **);
+static void gzip_release(void **);
+
+static void puts(const char *);
+
+extern int end;
+static ulg free_mem_ptr;
+static ulg free_mem_ptr_end;
+
+#define HEAP_SIZE 0x2000
+
+#include "../../../../lib/inflate.c"
+
+#ifndef STANDALONE_DEBUG
+static void *malloc(int size)
+{
+ void *p;
+
+ if (size <0) error("Malloc error\n");
+ if (free_mem_ptr <= 0) error("Memory error\n");
+
+ free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */
+
+ p = (void *)free_mem_ptr;
+ free_mem_ptr += size;
+
+ if (free_mem_ptr >= free_mem_ptr_end)
+ error("Out of memory");
+ return p;
+}
+
+static void free(void *where)
+{ /* gzip_mark & gzip_release do the free */
+}
+
+static void gzip_mark(void **ptr)
+{
+ *ptr = (void *) free_mem_ptr;
+}
+
+static void gzip_release(void **ptr)
+{
+ free_mem_ptr = (long) *ptr;
+}
+#else
+static void gzip_mark(void **ptr)
+{
+}
+
+static void gzip_release(void **ptr)
+{
+}
+#endif
+
+/* ===========================================================================
+ * Fill the input buffer. This is called only when the buffer is empty
+ * and at least one byte is really needed.
+ */
+int fill_inbuf()
+{
+ if (insize != 0)
+ error("ran out of input data\n");
+
+ inbuf = input_data;
+ insize = input_len;
+ inptr = 1;
+ return inbuf[0];
+}
+
+/* ===========================================================================
+ * Write the output window window[0..outcnt-1] and update crc and bytes_out.
+ * (Used for the decompressed data only.)
+ */
+void flush_window()
+{
+ ulg c = crc;
+ unsigned n;
+ uch *in, *out, ch;
+
+ in = window;
+ out = &output_data[output_ptr];
+ for (n = 0; n < outcnt; n++) {
+ ch = *out++ = *in++;
+ c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
+ }
+ crc = c;
+ bytes_out += (ulg)outcnt;
+ output_ptr += (ulg)outcnt;
+ outcnt = 0;
+}
+
+static void error(char *x)
+{
+ int ptr;
+
+ puts("\n\n");
+ puts(x);
+ puts("\n\n -- System halted");
+
+ while(1); /* Halt */
+}
+
+#define STACK_SIZE (4096)
+
+ulg user_stack [STACK_SIZE];
+
+#ifndef STANDALONE_DEBUG
+
+ulg decompress_kernel(ulg output_start)
+{
+ free_mem_ptr = (ulg)&end;
+ free_mem_ptr_end = output_start;
+
+ proc_decomp_setup ();
+ arch_decomp_setup ();
+
+ output_data = (uch *)output_start; /* Points to kernel start */
+
+ makecrc();
+ puts("Uncompressing Linux...");
+ gunzip();
+ puts("done.\nNow booting the kernel\n");
+ return output_ptr;
+}
+#else
+
+char output_buffer[1500*1024];
+
+int main()
+{
+ output_data = output_buffer;
+
+ makecrc();
+ puts("Uncompressing Linux...");
+ gunzip();
+ puts("done.\n");
+ return 0;
+}
+#endif
+
diff --git a/arch/arm/boot/install.sh b/arch/arm/boot/install.sh
new file mode 100644
index 000000000..133eae430
--- /dev/null
+++ b/arch/arm/boot/install.sh
@@ -0,0 +1,61 @@
+#!/bin/sh
+#
+# arch/arm/boot/install.sh
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1995 by Linus Torvalds
+#
+# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
+# Adapted from code in arch/i386/boot/install.sh by Russell King
+#
+# "make install" script for arm architecture
+#
+# Arguments:
+# $1 - kernel version
+# $2 - kernel image file
+# $3 - kernel map file
+# $4 - default install path (blank if root directory)
+#
+
+# User may have a custom install script
+
+if [ -x /sbin/installkernel ]; then
+ exec /sbin/installkernel "$@"
+fi
+
+if [ "$2" = "zImage" ]; then
+# Compressed install
+ echo "Installing compressed kernel"
+ if [ -f $4/vmlinuz-$1 ]; then
+ mv $4/vmlinuz-$1 $4/vmlinuz.old
+ fi
+
+ if [ -f $4/System.map-$1 ]; then
+ mv $4/System.map-$1 $4/System.old
+ fi
+
+ cat $2 > $4/vmlinuz-$1
+ cp $3 $4/System.map-$1
+else
+# Normal install
+ echo "Installing normal kernel"
+ if [ -f $4/vmlinux-$1 ]; then
+ mv $4/vmlinux-$1 $4/vmlinux.old
+ fi
+
+ if [ -f $4/System.map ]; then
+ mv $4/System.map $4/System.old
+ fi
+
+ cat $2 > $4/vmlinux-$1
+ cp $3 $4/System.map
+fi
+
+if [ -x /sbin/loadmap ]; then
+ /sbin/loadmap --rdev /dev/ima
+else
+ echo "You have to install it yourself"
+fi
diff --git a/arch/arm/config.in b/arch/arm/config.in
new file mode 100644
index 000000000..2b5303196
--- /dev/null
+++ b/arch/arm/config.in
@@ -0,0 +1,145 @@
+#
+# For a description of the syntax of this configuration file,
+# see the Configure script.
+#
+mainmenu_name "Linux Kernel Configuration"
+
+define_bool CONFIG_ARM y
+
+mainmenu_option next_comment
+comment 'Code maturity level options'
+bool 'Prompt for development and/or incomplete code/drivers' CONFIG_EXPERIMENTAL
+endmenu
+
+mainmenu_option next_comment
+comment 'Loadable module support'
+bool 'Enable loadable module support' CONFIG_MODULES
+if [ "$CONFIG_MODULES" = "y" ]; then
+ bool 'Set version information on all symbols for modules' CONFIG_MODVERSIONS
+ bool 'Kernel daemon support (e.g. autoload of modules)' CONFIG_KERNELD
+fi
+endmenu
+
+mainmenu_option next_comment
+comment 'General setup'
+choice 'ARM system type' \
+ "Archimedes CONFIG_ARCH_ARC \
+ A5000 CONFIG_ARCH_A5K \
+ RiscPC CONFIG_ARCH_RPC \
+ EBSA-110 CONFIG_ARCH_EBSA110 \
+ NexusPCI CONFIG_ARCH_NEXUSPCI" RiscPC
+if [ "$CONFIG_ARCH_ARC" = "y" -o "$CONFIG_ARCH_A5K" = "y" -o "$CONFIG_ARCH_RPC" = "y" ]; then
+ define_bool CONFIG_ARCH_ACORN y
+else
+ define_bool CONFIG_ARCH_ACORN n
+fi
+if [ "$CONFIG_ARCH_NEXUSPCI" = "y" ]; then
+ define_bool CONFIG_PCI y
+else
+ define_bool CONFIG_PCI n
+fi
+if [ "$CONFIG_ARCH_NEXUSPCI" = "y" -o "$CONFIG_ARCH_EBSA110" = "y" ]; then
+ define_bool CONFIG_CPU_SA110 y
+else
+ if [ "$CONFIG_ARCH_A5K" = "y" ]; then
+ define_bool CONFIG_CPU_ARM3 y
+ else
+ choice 'ARM cpu type' \
+ "ARM2 CONFIG_CPU_ARM2 \
+ ARM3 CONFIG_CPU_ARM3 \
+ ARM6/7 CONFIG_CPU_ARM6 \
+ StrongARM CONFIG_CPU_SA110" StrongARM
+ fi
+fi
+bool 'Compile kernel with frame pointer (for useful debugging)' CONFIG_FRAME_POINTER
+bool 'Use new compilation options (for GCC 2.8)' CONFIG_BINUTILS_NEW
+bool 'Debug kernel errors' CONFIG_DEBUG_ERRORS
+bool 'Networking support' CONFIG_NET
+bool 'System V IPC' CONFIG_SYSVIPC
+bool 'Sysctl support' CONFIG_SYSCTL
+tristate 'Kernel support for a.out binaries' CONFIG_BINFMT_AOUT
+tristate 'Kernel support for ELF binaries' CONFIG_BINFMT_ELF
+if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
+# tristate 'Kernel support for JAVA binaries' CONFIG_BINFMT_JAVA
+ define_bool CONFIG_BINFMT_JAVA n
+fi
+tristate 'Parallel port support' CONFIG_PARPORT
+if [ "$CONFIG_PARPORT" != "n" ]; then
+ dep_tristate ' PC-style hardware' CONFIG_PARPORT_PC $CONFIG_PARPORT
+fi
+endmenu
+
+source arch/arm/drivers/block/Config.in
+
+if [ "$CONFIG_NET" = "y" ]; then
+ source net/Config.in
+fi
+
+mainmenu_option next_comment
+comment 'SCSI support'
+
+tristate 'SCSI support?' CONFIG_SCSI
+
+if [ "$CONFIG_SCSI" != "n" ]; then
+ source arch/arm/drivers/scsi/Config.in
+fi
+endmenu
+
+if [ "$CONFIG_NET" = "y" ]; then
+ mainmenu_option next_comment
+ comment 'Network device support'
+
+ bool 'Network device support?' CONFIG_NETDEVICES
+ if [ "$CONFIG_NETDEVICES" = "y" ]; then
+ source arch/arm/drivers/net/Config.in
+ fi
+ endmenu
+fi
+
+# mainmenu_option next_comment
+# comment 'ISDN subsystem'
+#
+# tristate 'ISDN support' CONFIG_ISDN
+# if [ "$CONFIG_ISDN" != "n" ]; then
+# source drivers/isdn/Config.in
+# fi
+# endmenu
+
+# Conditionally compile in the Uniform CD-ROM driver
+if [ "$CONFIG_BLK_DEV_IDECD" = "y" -o "$CONFIG_BLK_DEV_SR" = "y" ]; then
+ define_bool CONFIG_CDROM y
+else
+ if [ "$CONFIG_BLK_DEV_IDECD" = "m" -o "$CONFIG_BLK_DEV_SR" = "m" ]; then
+ define_bool CONFIG_CDROM m
+ else
+ define_bool CONFIG_CDROM n
+ fi
+fi
+
+source fs/Config.in
+
+source fs/nls/Config.in
+
+source arch/arm/drivers/char/Config.in
+
+if [ "$CONFIG_ARCH_ACORN" = "y" ]; then
+ mainmenu_option next_comment
+ comment 'Sound'
+
+ tristate 'Sound support' CONFIG_SOUND
+ if [ "$CONFIG_SOUND" != "n" ]; then
+ source arch/arm/drivers/sound/Config.in
+ fi
+ endmenu
+fi
+
+mainmenu_option next_comment
+comment 'Kernel hacking'
+
+#bool 'Debug kmalloc/kfree' CONFIG_DEBUG_MALLOC
+bool 'Kernel profiling support' CONFIG_PROFILE
+if [ "$CONFIG_PROFILE" = "y" ]; then
+ int ' Profile shift count' CONFIG_PROFILE_SHIFT 2
+fi
+bool 'Magic SysRq key' CONFIG_MAGIC_SYSRQ
+endmenu
diff --git a/arch/arm/defconfig b/arch/arm/defconfig
new file mode 100644
index 000000000..c3a14ebcd
--- /dev/null
+++ b/arch/arm/defconfig
@@ -0,0 +1,264 @@
+#
+# Automatically generated make config: don't edit
+#
+CONFIG_ARM=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODVERSIONS=y
+CONFIG_KERNELD=y
+
+#
+# General setup
+#
+# CONFIG_ARCH_ARC is not set
+# CONFIG_ARCH_A5K is not set
+CONFIG_ARCH_RPC=y
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_NEXUSPCI is not set
+CONFIG_ARCH_ACORN=y
+# CONFIG_PCI is not set
+# CONFIG_CPU_ARM2 is not set
+# CONFIG_CPU_ARM3 is not set
+# CONFIG_CPU_ARM6 is not set
+CONFIG_CPU_SA110=y
+CONFIG_FRAME_POINTER=y
+# CONFIG_BINUTILS_NEW is not set
+CONFIG_NET=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSCTL=y
+CONFIG_BINFMT_AOUT=y
+CONFIG_BINFMT_ELF=m
+# CONFIG_BINFMT_JAVA is not set
+CONFIG_PARPORT=y
+CONFIG_PARPORT_PC=y
+
+#
+# Floppy, IDE, and other block devices
+#
+CONFIG_BLK_DEV_FD=y
+CONFIG_BLK_DEV_IDE=y
+
+#
+# Please see Documentation/ide.txt for help/info on IDE drives
+#
+# CONFIG_BLK_DEV_HD_IDE is not set
+CONFIG_BLK_DEV_IDEDISK=y
+CONFIG_BLK_DEV_IDECD=y
+# CONFIG_BLK_DEV_IDETAPE is not set
+# CONFIG_BLK_DEV_IDEFLOPPY is not set
+# CONFIG_BLK_DEV_IDESCSI is not set
+# CONFIG_BLK_DEV_IDE_PCMCIA is not set
+CONFIG_BLK_DEV_IDE_CARDS=y
+CONFIG_BLK_DEV_IDE_ICSIDE=y
+# CONFIG_BLK_DEV_IDE_RAPIDE is not set
+# CONFIG_BLK_DEV_XD is not set
+
+#
+# Additional Block Devices
+#
+CONFIG_BLK_DEV_LOOP=m
+# CONFIG_BLK_DEV_MD is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_PARIDE_PARPORT=y
+# CONFIG_PARIDE is not set
+CONFIG_BLK_DEV_PART=y
+# CONFIG_BLK_DEV_HD is not set
+
+#
+# Networking options
+#
+CONFIG_PACKET=m
+# CONFIG_NETLINK is not set
+# CONFIG_FIREWALL is not set
+# CONFIG_NET_ALIAS is not set
+# CONFIG_FILTER is not set
+CONFIG_UNIX=y
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+# CONFIG_IP_PNP is not set
+# CONFIG_IP_ACCT is not set
+# CONFIG_IP_MASQUERADE is not set
+# CONFIG_IP_ROUTER is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_ALIAS is not set
+# CONFIG_SYN_COOKIES is not set
+
+#
+# (it is safe to leave these untouched)
+#
+# CONFIG_INET_RARP is not set
+CONFIG_IP_NOSR=y
+# CONFIG_SKB_LARGE is not set
+# CONFIG_IPV6 is not set
+
+#
+#
+#
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_LLC is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_CPU_IS_SLOW is not set
+# CONFIG_NET_SCHED is not set
+
+#
+# SCSI support
+#
+CONFIG_SCSI=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+CONFIG_BLK_DEV_SR=y
+# CONFIG_BLK_DEV_SR_VENDOR is not set
+# CONFIG_CHR_DEV_SG is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+
+#
+# SCSI low-level drivers
+#
+CONFIG_SCSI_ACORNSCSI_3=m
+CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE=y
+CONFIG_SCSI_ACORNSCSI_SYNC=y
+CONFIG_SCSI_CUMANA_2=m
+CONFIG_SCSI_POWERTECSCSI=m
+
+#
+# The following drives are not fully supported
+#
+CONFIG_SCSI_CUMANA_1=m
+CONFIG_SCSI_ECOSCSI=m
+CONFIG_SCSI_OAK1=m
+CONFIG_SCSI_PPA=m
+CONFIG_SCSI_PPA_HAVE_PEDANTIC=2
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_EQUALIZER is not set
+CONFIG_PPP=m
+
+#
+# CCP compressors for PPP are only built as modules.
+#
+# CONFIG_SLIP is not set
+CONFIG_ETHER1=m
+CONFIG_ETHER3=m
+CONFIG_ETHERH=m
+CONFIG_CDROM=y
+
+#
+# Filesystems
+#
+# CONFIG_QUOTA is not set
+# CONFIG_MINIX_FS is not set
+CONFIG_EXT2_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+# CONFIG_UMSDOS_FS is not set
+CONFIG_VFAT_FS=y
+CONFIG_PROC_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NFSD=y
+CONFIG_SUNRPC=y
+CONFIG_LOCKD=y
+# CONFIG_CODA_FS is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_NTFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_ADFS_FS=y
+# CONFIG_MAC_PARTITION is not set
+CONFIG_NLS=y
+
+#
+# Native Language Support
+#
+# CONFIG_NLS_CODEPAGE_437 is not set
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_1 is not set
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_KOI8_R is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_SERIAL=y
+# CONFIG_SERIAL_CONSOLE is not set
+# CONFIG_SERIAL_EXTENDED is not set
+CONFIG_ATOMWIDE_SERIAL=y
+CONFIG_DUALSP_SERIAL=y
+CONFIG_MOUSE=y
+CONFIG_PRINTER=m
+CONFIG_PRINTER_READBACK=y
+# CONFIG_UMISC is not set
+# CONFIG_WATCHDOG is not set
+CONFIG_RPCMOUSE=y
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+# CONFIG_VIDC is not set
+# CONFIG_AUDIO is not set
+# DSP_BUFFSIZE is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PROFILE is not set
+CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
new file mode 100644
index 000000000..90e71345a
--- /dev/null
+++ b/arch/arm/kernel/Makefile
@@ -0,0 +1,47 @@
+#
+# Makefile for the linux kernel.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+
+HEAD_OBJ = head-$(PROCESSOR).o
+ENTRY_OBJ = entry-$(PROCESSOR).o
+
+O_TARGET := kernel.o
+O_OBJS := $(ENTRY_OBJ) ioport.o irq.o process.o ptrace.o signal.o sys_arm.o time.o traps.o
+
+all: kernel.o $(HEAD_OBJ) init_task.o
+
+ifeq ($(CONFIG_MODULES),y)
+OX_OBJS = armksyms.o
+else
+O_OBJS += armksyms.o
+endif
+
+ifdef CONFIG_ARCH_ACORN
+ O_OBJS += setup.o ecard.o iic.o dma.o
+ ifdef CONFIG_ARCH_ARC
+ O_OBJS += oldlatches.o
+ endif
+endif
+
+ifeq ($(MACHINE),ebsa110)
+ O_OBJS += setup-ebsa110.o dma.o
+endif
+
+ifeq ($(MACHINE),nexuspci)
+ O_OBJS += setup-ebsa110.o
+endif
+
+$(HEAD_OBJ): $(HEAD_OBJ:.o=.S)
+ $(CC) -D__ASSEMBLY__ -traditional -c $(HEAD_OBJ:.o=.S) -o $@
+
+include $(TOPDIR)/Rules.make
+
+$(ENTRY_OBJ:.o=.S): ../lib/constants.h
+
+.PHONY: ../lib/constants.h
+
+../lib/constants.h:
+ $(MAKE) -C ../lib constants.h
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
new file mode 100644
index 000000000..19666ac1e
--- /dev/null
+++ b/arch/arm/kernel/armksyms.c
@@ -0,0 +1,178 @@
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/user.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+
+#include <asm/ecard.h>
+#include <asm/io.h>
+#include <asm/delay.h>
+#include <asm/dma.h>
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+
+extern void dump_thread(struct pt_regs *, struct user *);
+extern int dump_fpu(struct pt_regs *, struct user_fp_struct *);
+
+/*
+ * libgcc functions - functions that are used internally by the
+ * compiler... (prototypes are not correct though, but that
+ * doesn't really matter since they're not versioned).
+ */
+extern void __gcc_bcmp(void);
+extern void __ashldi3(void);
+extern void __ashrdi3(void);
+extern void __cmpdi2(void);
+extern void __divdi3(void);
+extern void __divsi3(void);
+extern void __lshrdi3(void);
+extern void __moddi3(void);
+extern void __modsi3(void);
+extern void __muldi3(void);
+extern void __negdi2(void);
+extern void __ucmpdi2(void);
+extern void __udivdi3(void);
+extern void __udivmoddi4(void);
+extern void __udivsi3(void);
+extern void __umoddi3(void);
+extern void __umodsi3(void);
+
+extern void inswb(unsigned int port, void *to, int len);
+extern void outswb(unsigned int port, const void *to, int len);
+
+/*
+ * floating point math emulator support.
+ * These will not change. If they do, then a new version
+ * of the emulator will have to be compiled...
+ * fp_current is never actually dereferenced - it is just
+ * used as a pointer to pass back for send_sig().
+ */
+extern void (*fp_save)(unsigned char *);
+extern void (*fp_restore)(unsigned char *);
+extern void fp_setup(void);
+extern void fpreturn(void);
+extern void fpundefinstr(void);
+extern void fp_enter(void);
+extern void fp_printk(void);
+extern struct task_struct *fp_current;
+extern void fp_send_sig(int);
+
+/* platform dependent support */
+EXPORT_SYMBOL(dump_thread);
+EXPORT_SYMBOL(dump_fpu);
+EXPORT_SYMBOL(udelay);
+EXPORT_SYMBOL(dma_str);
+EXPORT_SYMBOL(xchg_str);
+
+/* expansion card support */
+#ifdef CONFIG_ARCH_ACORN
+EXPORT_SYMBOL(ecard_startfind);
+EXPORT_SYMBOL(ecard_find);
+EXPORT_SYMBOL(ecard_readchunk);
+EXPORT_SYMBOL(ecard_address);
+#endif
+
+/* processor dependencies */
+EXPORT_SYMBOL(processor);
+
+/* io */
+EXPORT_SYMBOL(outswb);
+EXPORT_SYMBOL(outsw);
+EXPORT_SYMBOL(inswb);
+EXPORT_SYMBOL(insw);
+
+#ifdef CONFIG_ARCH_RPC
+EXPORT_SYMBOL(drambank);
+#endif
+
+/* dma */
+EXPORT_SYMBOL(enable_dma);
+EXPORT_SYMBOL(set_dma_mode);
+EXPORT_SYMBOL(set_dma_addr);
+EXPORT_SYMBOL(set_dma_count);
+EXPORT_SYMBOL(get_dma_residue);
+
+/*
+ * floating point math emulator support.
+ * These symbols will never change their calling convention...
+ */
+EXPORT_SYMBOL_NOVERS(fpreturn);
+EXPORT_SYMBOL_NOVERS(fpundefinstr);
+EXPORT_SYMBOL_NOVERS(fp_enter);
+EXPORT_SYMBOL_NOVERS(fp_save);
+EXPORT_SYMBOL_NOVERS(fp_restore);
+EXPORT_SYMBOL_NOVERS(fp_setup);
+
+const char __kstrtab_fp_printk[] __attribute__((section(".kstrtab"))) = __MODULE_STRING(fp_printk);
+const struct module_symbol __ksymtab_fp_printk __attribute__((section("__ksymtab"))) =
+{ (unsigned long)&printk, __kstrtab_fp_printk };
+
+const char __kstrtab_fp_send_sig[] __attribute__((section(".kstrtab"))) = __MODULE_STRING(fp_send_sig);
+const struct module_symbol __ksymtab_fp_send_sig __attribute__((section("__ksymtab"))) =
+{ (unsigned long)&send_sig, __kstrtab_fp_send_sig };
+
+//EXPORT_SYMBOL_NOVERS(fp_current);
+
+ /*
+ * string / mem functions
+ */
+EXPORT_SYMBOL_NOVERS(strcpy);
+EXPORT_SYMBOL_NOVERS(strncpy);
+EXPORT_SYMBOL_NOVERS(strcat);
+EXPORT_SYMBOL_NOVERS(strncat);
+EXPORT_SYMBOL_NOVERS(strcmp);
+EXPORT_SYMBOL_NOVERS(strncmp);
+EXPORT_SYMBOL_NOVERS(strchr);
+EXPORT_SYMBOL_NOVERS(strlen);
+EXPORT_SYMBOL_NOVERS(strnlen);
+EXPORT_SYMBOL_NOVERS(strspn);
+EXPORT_SYMBOL_NOVERS(strpbrk);
+EXPORT_SYMBOL_NOVERS(strtok);
+EXPORT_SYMBOL_NOVERS(strrchr);
+EXPORT_SYMBOL_NOVERS(memset);
+EXPORT_SYMBOL_NOVERS(memcpy);
+EXPORT_SYMBOL_NOVERS(memmove);
+EXPORT_SYMBOL_NOVERS(memcmp);
+EXPORT_SYMBOL_NOVERS(memscan);
+EXPORT_SYMBOL_NOVERS(memzero);
+
+ /* user mem (segment) */
+#if defined(CONFIG_CPU_ARM6) || defined(CONFIG_CPU_SA110)
+EXPORT_SYMBOL(__arch_copy_from_user);
+EXPORT_SYMBOL(__arch_copy_to_user);
+EXPORT_SYMBOL(__arch_clear_user);
+EXPORT_SYMBOL(__arch_strlen_user);
+#elif defined(CONFIG_CPU_ARM2) || defined(CONFIG_CPU_ARM3)
+EXPORT_SYMBOL(uaccess_kernel);
+EXPORT_SYMBOL(uaccess_user);
+#endif
+
+ /* gcc lib functions */
+EXPORT_SYMBOL_NOVERS(__gcc_bcmp);
+EXPORT_SYMBOL_NOVERS(__ashldi3);
+EXPORT_SYMBOL_NOVERS(__ashrdi3);
+EXPORT_SYMBOL_NOVERS(__cmpdi2);
+EXPORT_SYMBOL_NOVERS(__divdi3);
+EXPORT_SYMBOL_NOVERS(__divsi3);
+EXPORT_SYMBOL_NOVERS(__lshrdi3);
+EXPORT_SYMBOL_NOVERS(__moddi3);
+EXPORT_SYMBOL_NOVERS(__modsi3);
+EXPORT_SYMBOL_NOVERS(__muldi3);
+EXPORT_SYMBOL_NOVERS(__negdi2);
+EXPORT_SYMBOL_NOVERS(__ucmpdi2);
+EXPORT_SYMBOL_NOVERS(__udivdi3);
+EXPORT_SYMBOL_NOVERS(__udivmoddi4);
+EXPORT_SYMBOL_NOVERS(__udivsi3);
+EXPORT_SYMBOL_NOVERS(__umoddi3);
+EXPORT_SYMBOL_NOVERS(__umodsi3);
+
+ /* bitops */
+EXPORT_SYMBOL(set_bit);
+EXPORT_SYMBOL(test_and_set_bit);
+EXPORT_SYMBOL(clear_bit);
+EXPORT_SYMBOL(test_and_clear_bit);
+EXPORT_SYMBOL(change_bit);
+EXPORT_SYMBOL(test_and_change_bit);
+EXPORT_SYMBOL(find_first_zero_bit);
+EXPORT_SYMBOL(find_next_zero_bit);
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
new file mode 100644
index 000000000..0d02eb85a
--- /dev/null
+++ b/arch/arm/kernel/calls.S
@@ -0,0 +1,194 @@
+/*
+ * linux/arch/arm/lib/calls.h
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ */
+#ifndef NR_SYSCALLS
+#define NR_syscalls 256
+#define NR_SYSCALLS 182
+#else
+
+/* 0 */ .long SYMBOL_NAME(sys_setup)
+ .long SYMBOL_NAME(sys_exit)
+ .long SYMBOL_NAME(sys_fork_wrapper)
+ .long SYMBOL_NAME(sys_read)
+ .long SYMBOL_NAME(sys_write)
+/* 5 */ .long SYMBOL_NAME(sys_open)
+ .long SYMBOL_NAME(sys_close)
+ .long SYMBOL_NAME(sys_waitpid)
+ .long SYMBOL_NAME(sys_creat)
+ .long SYMBOL_NAME(sys_link)
+/* 10 */ .long SYMBOL_NAME(sys_unlink)
+ .long SYMBOL_NAME(sys_execve_wrapper)
+ .long SYMBOL_NAME(sys_chdir)
+ .long SYMBOL_NAME(sys_time)
+ .long SYMBOL_NAME(sys_mknod)
+/* 15 */ .long SYMBOL_NAME(sys_chmod)
+ .long SYMBOL_NAME(sys_chown)
+ .long SYMBOL_NAME(sys_ni_syscall) /* was sys_break */
+ .long SYMBOL_NAME(sys_stat)
+ .long SYMBOL_NAME(sys_lseek)
+/* 20 */ .long SYMBOL_NAME(sys_getpid)
+ .long SYMBOL_NAME(sys_mount_wrapper)
+ .long SYMBOL_NAME(sys_umount)
+ .long SYMBOL_NAME(sys_setuid)
+ .long SYMBOL_NAME(sys_getuid)
+/* 25 */ .long SYMBOL_NAME(sys_stime)
+ .long SYMBOL_NAME(sys_ptrace)
+ .long SYMBOL_NAME(sys_alarm)
+ .long SYMBOL_NAME(sys_fstat)
+ .long SYMBOL_NAME(sys_pause)
+/* 30 */ .long SYMBOL_NAME(sys_utime)
+ .long SYMBOL_NAME(sys_ni_syscall) /* was sys_stty */
+ .long SYMBOL_NAME(sys_ni_syscall) /* was sys_getty */
+ .long SYMBOL_NAME(sys_access)
+ .long SYMBOL_NAME(sys_nice)
+/* 35 */ .long SYMBOL_NAME(sys_ni_syscall) /* was sys_ftime */
+ .long SYMBOL_NAME(sys_sync)
+ .long SYMBOL_NAME(sys_kill)
+ .long SYMBOL_NAME(sys_rename)
+ .long SYMBOL_NAME(sys_mkdir)
+/* 40 */ .long SYMBOL_NAME(sys_rmdir)
+ .long SYMBOL_NAME(sys_dup)
+ .long SYMBOL_NAME(sys_pipe)
+ .long SYMBOL_NAME(sys_times)
+ .long SYMBOL_NAME(sys_ni_syscall) /* was sys_prof */
+/* 45 */ .long SYMBOL_NAME(sys_brk)
+ .long SYMBOL_NAME(sys_setgid)
+ .long SYMBOL_NAME(sys_getgid)
+ .long SYMBOL_NAME(sys_signal)
+ .long SYMBOL_NAME(sys_geteuid)
+/* 50 */ .long SYMBOL_NAME(sys_getegid)
+ .long SYMBOL_NAME(sys_acct)
+ .long SYMBOL_NAME(sys_ni_syscall) /* was sys_phys */
+ .long SYMBOL_NAME(sys_ni_syscall) /* was sys_lock */
+ .long SYMBOL_NAME(sys_ioctl)
+/* 55 */ .long SYMBOL_NAME(sys_fcntl)
+ .long SYMBOL_NAME(sys_ni_syscall) /* was sys_mpx */
+ .long SYMBOL_NAME(sys_setpgid)
+ .long SYMBOL_NAME(sys_ni_syscall) /* was sys_ulimit */
+ .long SYMBOL_NAME(sys_olduname)
+/* 60 */ .long SYMBOL_NAME(sys_umask)
+ .long SYMBOL_NAME(sys_chroot)
+ .long SYMBOL_NAME(sys_ustat)
+ .long SYMBOL_NAME(sys_dup2)
+ .long SYMBOL_NAME(sys_getppid)
+/* 65 */ .long SYMBOL_NAME(sys_getpgrp)
+ .long SYMBOL_NAME(sys_setsid)
+ .long SYMBOL_NAME(sys_sigaction)
+ .long SYMBOL_NAME(sys_sgetmask)
+ .long SYMBOL_NAME(sys_ssetmask)
+/* 70 */ .long SYMBOL_NAME(sys_setreuid)
+ .long SYMBOL_NAME(sys_setregid)
+ .long SYMBOL_NAME(sys_sigsuspend_wrapper)
+ .long SYMBOL_NAME(sys_sigpending)
+ .long SYMBOL_NAME(sys_sethostname)
+/* 75 */ .long SYMBOL_NAME(sys_setrlimit)
+ .long SYMBOL_NAME(sys_getrlimit)
+ .long SYMBOL_NAME(sys_getrusage)
+ .long SYMBOL_NAME(sys_gettimeofday)
+ .long SYMBOL_NAME(sys_settimeofday)
+/* 80 */ .long SYMBOL_NAME(sys_getgroups)
+ .long SYMBOL_NAME(sys_setgroups)
+ .long SYMBOL_NAME(old_select)
+ .long SYMBOL_NAME(sys_symlink)
+ .long SYMBOL_NAME(sys_lstat)
+/* 85 */ .long SYMBOL_NAME(sys_readlink)
+ .long SYMBOL_NAME(sys_uselib)
+ .long SYMBOL_NAME(sys_swapon)
+ .long SYMBOL_NAME(sys_reboot)
+ .long SYMBOL_NAME(old_readdir)
+/* 90 */ .long SYMBOL_NAME(old_mmap)
+ .long SYMBOL_NAME(sys_munmap)
+ .long SYMBOL_NAME(sys_truncate)
+ .long SYMBOL_NAME(sys_ftruncate)
+ .long SYMBOL_NAME(sys_fchmod)
+/* 95 */ .long SYMBOL_NAME(sys_fchown)
+ .long SYMBOL_NAME(sys_getpriority)
+ .long SYMBOL_NAME(sys_setpriority)
+ .long SYMBOL_NAME(sys_ni_syscall) /* was sys_profil */
+ .long SYMBOL_NAME(sys_statfs)
+/* 100 */ .long SYMBOL_NAME(sys_fstatfs)
+ .long SYMBOL_NAME(sys_ni_syscall) /* .long _sys_ioperm */
+ .long SYMBOL_NAME(sys_socketcall)
+ .long SYMBOL_NAME(sys_syslog)
+ .long SYMBOL_NAME(sys_setitimer)
+/* 105 */ .long SYMBOL_NAME(sys_getitimer)
+ .long SYMBOL_NAME(sys_newstat)
+ .long SYMBOL_NAME(sys_newlstat)
+ .long SYMBOL_NAME(sys_newfstat)
+ .long SYMBOL_NAME(sys_uname)
+/* 110 */ .long SYMBOL_NAME(sys_iopl)
+ .long SYMBOL_NAME(sys_vhangup)
+ .long SYMBOL_NAME(sys_idle)
+ .long SYMBOL_NAME(sys_syscall) /* call a syscall */
+ .long SYMBOL_NAME(sys_wait4)
+/* 115 */ .long SYMBOL_NAME(sys_swapoff)
+ .long SYMBOL_NAME(sys_sysinfo)
+ .long SYMBOL_NAME(sys_ipc)
+ .long SYMBOL_NAME(sys_fsync)
+ .long SYMBOL_NAME(sys_sigreturn_wrapper)
+ .long SYMBOL_NAME(sys_clone_wapper)
+ .long SYMBOL_NAME(sys_setdomainname)
+ .long SYMBOL_NAME(sys_newuname)
+ .long SYMBOL_NAME(sys_ni_syscall) /* .long SYMBOL_NAME(sys_modify_ldt) */
+ .long SYMBOL_NAME(sys_adjtimex)
+/* 125 */ .long SYMBOL_NAME(sys_mprotect)
+ .long SYMBOL_NAME(sys_sigprocmask)
+ .long SYMBOL_NAME(sys_create_module)
+ .long SYMBOL_NAME(sys_init_module)
+ .long SYMBOL_NAME(sys_delete_module)
+/* 130 */ .long SYMBOL_NAME(sys_get_kernel_syms)
+ .long SYMBOL_NAME(sys_quotactl)
+ .long SYMBOL_NAME(sys_getpgid)
+ .long SYMBOL_NAME(sys_fchdir)
+ .long SYMBOL_NAME(sys_bdflush)
+/* 135 */ .long SYMBOL_NAME(sys_sysfs)
+ .long SYMBOL_NAME(sys_personality)
+ .long SYMBOL_NAME(sys_ni_syscall) /* .long _sys_afs_syscall */
+ .long SYMBOL_NAME(sys_setfsuid)
+ .long SYMBOL_NAME(sys_setfsgid)
+/* 140 */ .long SYMBOL_NAME(sys_llseek_wrapper)
+ .long SYMBOL_NAME(sys_getdents)
+ .long SYMBOL_NAME(sys_select)
+ .long SYMBOL_NAME(sys_flock)
+ .long SYMBOL_NAME(sys_msync)
+/* 145 */ .long SYMBOL_NAME(sys_readv)
+ .long SYMBOL_NAME(sys_writev)
+ .long SYMBOL_NAME(sys_getsid)
+ .long SYMBOL_NAME(sys_ni_syscall)
+ .long SYMBOL_NAME(sys_ni_syscall)
+/* 150 */ .long SYMBOL_NAME(sys_mlock)
+ .long SYMBOL_NAME(sys_munlock)
+ .long SYMBOL_NAME(sys_mlockall)
+ .long SYMBOL_NAME(sys_munlockall)
+ .long SYMBOL_NAME(sys_sched_setparam)
+/* 155 */ .long SYMBOL_NAME(sys_sched_getparam)
+ .long SYMBOL_NAME(sys_sched_setscheduler)
+ .long SYMBOL_NAME(sys_sched_getscheduler)
+ .long SYMBOL_NAME(sys_sched_yield)
+ .long SYMBOL_NAME(sys_sched_get_priority_max)
+/* 160 */ .long SYMBOL_NAME(sys_sched_get_priority_min)
+ .long SYMBOL_NAME(sys_sched_rr_get_interval)
+ .long SYMBOL_NAME(sys_nanosleep)
+ .long SYMBOL_NAME(sys_mremap)
+ .long SYMBOL_NAME(sys_setresuid)
+/* 165 */ .long SYMBOL_NAME(sys_getresuid)
+ .long SYMBOL_NAME(sys_ni_syscall)
+ .long SYMBOL_NAME(sys_query_module)
+ .long SYMBOL_NAME(sys_poll)
+ .long SYMBOL_NAME(sys_nfsservctl)
+/* 170 */ .long SYMBOL_NAME(sys_setresgid)
+ .long SYMBOL_NAME(sys_getresgid)
+ .long SYMBOL_NAME(sys_prctl)
+ .long SYMBOL_NAME(sys_rt_sigreturn_wrapper)
+ .long SYMBOL_NAME(sys_rt_sigaction)
+/* 175 */ .long SYMBOL_NAME(sys_rt_sigprocmask)
+ .long SYMBOL_NAME(sys_rt_sigpending)
+ .long SYMBOL_NAME(sys_rt_sigtimedwait)
+ .long SYMBOL_NAME(sys_rt_sigqueueinfo)
+ .long SYMBOL_NAME(sys_rt_sigsuspend_wrapper)
+/* 180 */ .long SYMBOL_NAME(sys_pread)
+ .long SYMBOL_NAME(sys_pwrite)
+ .space (NR_syscalls - 182) * 4
+#endif
diff --git a/arch/arm/kernel/dma.c b/arch/arm/kernel/dma.c
new file mode 100644
index 000000000..3c165c41d
--- /dev/null
+++ b/arch/arm/kernel/dma.c
@@ -0,0 +1,199 @@
+/*
+ * linux/arch/arm/kernel/dma.c
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/malloc.h>
+#include <linux/mman.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/irq.h>
+#include <asm/hardware.h>
+#include <asm/io.h>
+#define KERNEL_ARCH_DMA
+#include <asm/dma.h>
+
+static unsigned long dma_address[8];
+static unsigned long dma_count[8];
+static char dma_direction[8] = { -1, -1, -1, -1, -1, -1, -1};
+
+#if defined(CONFIG_ARCH_A5K) || defined(CONFIG_ARCH_RPC)
+#define DMA_PCIO
+#endif
+#if defined(CONFIG_ARCH_ARC) && defined(CONFIG_BLK_DEV_FD)
+#define DMA_OLD
+#endif
+
+void enable_dma (unsigned int dmanr)
+{
+ switch (dmanr) {
+#ifdef DMA_PCIO
+ case 2: {
+ void *fiqhandler_start;
+ unsigned int fiqhandler_length;
+ extern void floppy_fiqsetup (unsigned long len, unsigned long addr,
+ unsigned long port);
+ switch (dma_direction[dmanr]) {
+ case 1: {
+ extern unsigned char floppy_fiqin_start, floppy_fiqin_end;
+ fiqhandler_start = &floppy_fiqin_start;
+ fiqhandler_length = &floppy_fiqin_end - &floppy_fiqin_start;
+ break;
+ }
+ case 0: {
+ extern unsigned char floppy_fiqout_start, floppy_fiqout_end;
+ fiqhandler_start = &floppy_fiqout_start;
+ fiqhandler_length = &floppy_fiqout_end - &floppy_fiqout_start;
+ break;
+ }
+ default:
+ printk ("enable_dma: dma%d not initialised\n", dmanr);
+ return;
+ }
+ memcpy ((void *)0x1c, fiqhandler_start, fiqhandler_length);
+ flush_page_to_ram(0);
+ floppy_fiqsetup (dma_count[dmanr], dma_address[dmanr], (int)PCIO_FLOPPYDMABASE);
+ enable_irq (64);
+ return;
+ }
+#endif
+#ifdef DMA_OLD
+ case 0: { /* Data DMA */
+ switch (dma_direction[dmanr]) {
+ case 1: /* read */
+ {
+ extern unsigned char fdc1772_dma_read, fdc1772_dma_read_end;
+ extern void fdc1772_setupdma(unsigned int count,unsigned int addr);
+ unsigned long flags;
+#ifdef DEBUG
+ printk("enable_dma fdc1772 data read\n");
+#endif
+ save_flags(flags);
+ cliIF();
+
+ memcpy ((void *)0x1c, (void *)&fdc1772_dma_read,
+ &fdc1772_dma_read_end - &fdc1772_dma_read);
+ fdc1772_setupdma(dma_count[dmanr],dma_address[dmanr]); /* Sets data pointer up */
+ enable_irq (64);
+ restore_flags(flags);
+ }
+ break;
+
+ case 0: /* write */
+ {
+ extern unsigned char fdc1772_dma_write, fdc1772_dma_write_end;
+ extern void fdc1772_setupdma(unsigned int count,unsigned int addr);
+ unsigned long flags;
+
+#ifdef DEBUG
+ printk("enable_dma fdc1772 data write\n");
+#endif
+ save_flags(flags);
+ cliIF();
+ memcpy ((void *)0x1c, (void *)&fdc1772_dma_write,
+ &fdc1772_dma_write_end - &fdc1772_dma_write);
+ fdc1772_setupdma(dma_count[dmanr],dma_address[dmanr]); /* Sets data pointer up */
+ enable_irq (64);
+
+ restore_flags(flags);
+ }
+ break;
+ default:
+ printk ("enable_dma: dma%d not initialised\n", dmanr);
+ return;
+ }
+ }
+ break;
+
+ case 1: { /* Command end FIQ - actually just sets a flag */
+ /* Need to build a branch at the FIQ address */
+ extern void fdc1772_comendhandler(void);
+ unsigned long flags;
+
+ /*printk("enable_dma fdc1772 command end FIQ\n");*/
+ save_flags(flags);
+ cliIF();
+
+ *((unsigned int *)0x1c)=0xea000000 | (((unsigned int)fdc1772_comendhandler-(0x1c+8))/4); /* B fdc1772_comendhandler */
+
+ restore_flags(flags);
+ }
+ break;
+#endif
+ case DMA_0:
+ case DMA_1:
+ case DMA_2:
+ case DMA_3:
+ case DMA_S0:
+ case DMA_S1:
+ arch_enable_dma (dmanr - DMA_0);
+ break;
+
+ default:
+ printk ("enable_dma: dma %d not supported\n", dmanr);
+ }
+}
+
+void set_dma_mode (unsigned int dmanr, char mode)
+{
+ if (dmanr < 8) {
+ if (mode == DMA_MODE_READ)
+ dma_direction[dmanr] = 1;
+ else if (mode == DMA_MODE_WRITE)
+ dma_direction[dmanr] = 0;
+ else
+ printk ("set_dma_mode: dma%d: invalid mode %02X not supported\n",
+ dmanr, mode);
+ } else if (dmanr < MAX_DMA_CHANNELS)
+ arch_set_dma_mode (dmanr - DMA_0, mode);
+ else
+ printk ("set_dma_mode: dma %d not supported\n", dmanr);
+}
+
+void set_dma_addr (unsigned int dmanr, unsigned int addr)
+{
+ if (dmanr < 8)
+ dma_address[dmanr] = (unsigned long)addr;
+ else if (dmanr < MAX_DMA_CHANNELS)
+ arch_set_dma_addr (dmanr - DMA_0, addr);
+ else
+ printk ("set_dma_addr: dma %d not supported\n", dmanr);
+}
+
+void set_dma_count (unsigned int dmanr, unsigned int count)
+{
+ if (dmanr < 8)
+ dma_count[dmanr] = (unsigned long)count;
+ else if (dmanr < MAX_DMA_CHANNELS)
+ arch_set_dma_count (dmanr - DMA_0, count);
+ else
+ printk ("set_dma_count: dma %d not supported\n", dmanr);
+}
+
+int get_dma_residue (unsigned int dmanr)
+{
+ if (dmanr < 8) {
+ switch (dmanr) {
+#if defined(CONFIG_ARCH_A5K) || defined(CONFIG_ARCH_RPC)
+ case 2: {
+ extern int floppy_fiqresidual (void);
+ return floppy_fiqresidual ();
+ }
+#endif
+#if defined(CONFIG_ARCH_ARC) && defined(CONFIG_BLK_DEV_FD)
+ case 0: {
+ extern unsigned int fdc1772_bytestogo;
+ return fdc1772_bytestogo;
+ }
+#endif
+ default:
+ return -1;
+ }
+ } else if (dmanr < MAX_DMA_CHANNELS)
+ return arch_dma_count (dmanr - DMA_0);
+ return -1;
+}
diff --git a/arch/arm/kernel/ecard.c b/arch/arm/kernel/ecard.c
new file mode 100644
index 000000000..cc18252b3
--- /dev/null
+++ b/arch/arm/kernel/ecard.c
@@ -0,0 +1,604 @@
+/*
+ * linux/arch/arm/kernel/ecard.c
+ *
+ * Find all installed expansion cards, and handle interrupts from them.
+ *
+ * Copyright 1995,1996,1997 Russell King
+ *
+ * Created from information from Acorns RiscOS3 PRMs
+ *
+ * 08-Dec-1996 RMK Added code for the 9'th expansion card - the ether podule slot.
+ * 06-May-1997 RMK Added blacklist for cards whose loader doesn't work.
+ * 12-Sep-1997 RMK Created new handling of interrupt enables/disables - cards can
+ * now register their own routine to control interrupts (recommended).
+ * 29-Sep-1997 RMK Expansion card interrupt hardware not being re-enabled on reset from
+ * Linux. (Caused cards not to respond under RiscOS without hard reset).
+ */
+
+#define ECARD_C
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/malloc.h>
+
+#include <asm/irq-no.h>
+#include <asm/ecard.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/hardware.h>
+#include <asm/arch/irq.h>
+
+#ifdef CONFIG_ARCH_ARC
+#include <asm/arch/oldlatches.h>
+#else
+#define oldlatch_init()
+#endif
+
+#define BLACKLIST_NAME(m,p,s) { m, p, NULL, s }
+#define BLACKLIST_LOADER(m,p,l) { m, p, l, NULL }
+#define BLACKLIST_NOLOADER(m,p) { m, p, noloader, blacklisted_str }
+#define BUS_ADDR(x) ((((unsigned long)(x)) << 2) + IO_BASE)
+
+extern unsigned long atomwide_serial_loader[], oak_scsi_loader[], noloader[];
+static const char blacklisted_str[] = "*loader blacklisted - not 32-bit compliant*";
+
+static const struct expcard_blacklist {
+ unsigned short manufacturer;
+ unsigned short product;
+ const loader_t loader;
+ const char *type;
+} blacklist[] = {
+/* Cards without names */
+ BLACKLIST_NAME(MANU_ACORN, PROD_ACORN_ETHER1, "Acorn Ether1"),
+
+/* Cards with corrected loader */
+ BLACKLIST_LOADER(MANU_ATOMWIDE, PROD_ATOMWIDE_3PSERIAL, atomwide_serial_loader),
+ BLACKLIST_LOADER(MANU_OAK, PROD_OAK_SCSI, oak_scsi_loader),
+
+/* Unsupported cards with no loader */
+BLACKLIST_NOLOADER(MANU_ALSYSTEMS, PROD_ALSYS_SCSIATAPI),
+BLACKLIST_NOLOADER(MANU_MCS, PROD_MCS_CONNECT32)
+};
+
+extern int setup_arm_irq(int, struct irqaction *);
+
+/*
+ * from linux/arch/arm/kernel/irq.c
+ */
+extern void do_ecard_IRQ(int irq, struct pt_regs *);
+
+static ecard_t expcard[MAX_ECARDS];
+static signed char irqno_to_expcard[16];
+static unsigned int ecard_numcards, ecard_numirqcards;
+static unsigned int have_expmask;
+static unsigned long kmem;
+
+static void ecard_def_irq_enable (ecard_t *ec, int irqnr)
+{
+#ifdef HAS_EXPMASK
+ if (irqnr < 4 && have_expmask) {
+ have_expmask |= 1 << irqnr;
+ EXPMASK_ENABLE = have_expmask;
+ }
+#endif
+}
+
+static void ecard_def_irq_disable (ecard_t *ec, int irqnr)
+{
+#ifdef HAS_EXPMASK
+ if (irqnr < 4 && have_expmask) {
+ have_expmask &= ~(1 << irqnr);
+ EXPMASK_ENABLE = have_expmask;
+ }
+#endif
+}
+
+static void ecard_def_fiq_enable (ecard_t *ec, int fiqnr)
+{
+ panic ("ecard_def_fiq_enable called - impossible");
+}
+
+static void ecard_def_fiq_disable (ecard_t *ec, int fiqnr)
+{
+ panic ("ecard_def_fiq_disable called - impossible");
+}
+
+static expansioncard_ops_t ecard_default_ops = {
+ ecard_def_irq_enable,
+ ecard_def_irq_disable,
+ ecard_def_fiq_enable,
+ ecard_def_fiq_disable
+};
+
+/*
+ * Enable and disable interrupts from expansion cards.
+ * (interrupts are disabled for these functions).
+ *
+ * They are not meant to be called directly, but via enable/disable_irq.
+ */
+void ecard_enableirq (unsigned int irqnr)
+{
+ if (irqnr < MAX_ECARDS && irqno_to_expcard[irqnr] != -1) {
+ ecard_t *ec = expcard + irqno_to_expcard[irqnr];
+
+ if (!ec->ops)
+ ec->ops = &ecard_default_ops;
+
+ if (ec->claimed && ec->ops->irqenable)
+ ec->ops->irqenable (ec, irqnr);
+ else
+ printk (KERN_ERR "ecard: rejecting request to "
+ "enable IRQs for %d\n", irqnr);
+ }
+}
+
+void ecard_disableirq (unsigned int irqnr)
+{
+ if (irqnr < MAX_ECARDS && irqno_to_expcard[irqnr] != -1) {
+ ecard_t *ec = expcard + irqno_to_expcard[irqnr];
+
+ if (!ec->ops)
+ ec->ops = &ecard_default_ops;
+
+ if (ec->ops && ec->ops->irqdisable)
+ ec->ops->irqdisable (ec, irqnr);
+ }
+}
+
+void ecard_enablefiq (unsigned int fiqnr)
+{
+ if (fiqnr < MAX_ECARDS && irqno_to_expcard[fiqnr] != -1) {
+ ecard_t *ec = expcard + irqno_to_expcard[fiqnr];
+
+ if (!ec->ops)
+ ec->ops = &ecard_default_ops;
+
+ if (ec->claimed && ec->ops->fiqenable)
+ ec->ops->fiqenable (ec, fiqnr);
+ else
+ printk (KERN_ERR "ecard: rejecting request to "
+ "enable FIQs for %d\n", fiqnr);
+ }
+}
+
+void ecard_disablefiq (unsigned int fiqnr)
+{
+ if (fiqnr < MAX_ECARDS && irqno_to_expcard[fiqnr] != -1) {
+ ecard_t *ec = expcard + irqno_to_expcard[fiqnr];
+
+ if (!ec->ops)
+ ec->ops = &ecard_default_ops;
+
+ if (ec->ops->fiqdisable)
+ ec->ops->fiqdisable (ec, fiqnr);
+ }
+}
+
+static void *ecard_malloc(int len)
+{
+ int r;
+
+ len = (len + 3) & ~3;
+
+ if (kmem) {
+ r = kmem;
+ kmem += len;
+ return (void *)r;
+ } else
+ return kmalloc(len, GFP_KERNEL);
+}
+
+static void ecard_irq_noexpmask(int intr_no, void *dev_id, struct pt_regs *regs)
+{
+ const int num_cards = ecard_numirqcards;
+ int i, called = 0;
+
+ mask_irq (IRQ_EXPANSIONCARD);
+ for (i = 0; i < num_cards; i++) {
+ if (expcard[i].claimed && expcard[i].irq &&
+ (!expcard[i].irqmask ||
+ expcard[i].irqaddr[0] & expcard[i].irqmask)) {
+ do_ecard_IRQ(expcard[i].irq, regs);
+ called ++;
+ }
+ }
+ cli ();
+ unmask_irq (IRQ_EXPANSIONCARD);
+ if (called == 0)
+ printk (KERN_WARNING "Wild interrupt from backplane?\n");
+}
+
+#ifdef HAS_EXPMASK
+static unsigned char priority_masks[] =
+{
+ 0xf0, 0xf1, 0xf3, 0xf7, 0xff, 0xff, 0xff, 0xff
+};
+
+static unsigned char first_set[] =
+{
+ 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
+ 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00
+};
+
+static void ecard_irq_expmask (int intr_no, void *dev_id, struct pt_regs *regs)
+{
+ const unsigned int statusmask = 15;
+ unsigned int status;
+
+ status = EXPMASK_STATUS & statusmask;
+ if (status) {
+ unsigned int irqno;
+ ecard_t *ec;
+again:
+ irqno = first_set[status];
+ ec = expcard + irqno_to_expcard[irqno];
+ if (ec->claimed) {
+ unsigned int oldexpmask;
+ /*
+ * this ugly code is so that we can operate a prioritorising system.
+ * Card 0 highest priority
+ * Card 1
+ * Card 2
+ * Card 3 lowest priority
+ * Serial cards should go in 0/1, ethernet/scsi in 2/3
+ * otherwise you will lose serial data at high speeds!
+ */
+ oldexpmask = have_expmask;
+ EXPMASK_ENABLE = (have_expmask &= priority_masks[irqno]);
+ sti ();
+ do_ecard_IRQ (ec->irq, regs);
+ cli ();
+ EXPMASK_ENABLE = have_expmask = oldexpmask;
+ status = EXPMASK_STATUS & statusmask;
+ if (status)
+ goto again;
+ } else {
+ printk (KERN_WARNING "card%d: interrupt from unclaimed card???\n", irqno);
+ EXPMASK_ENABLE = (have_expmask &= ~(1 << irqno));
+ }
+ } else
+ printk (KERN_WARNING "Wild interrupt from backplane (masks)\n");
+}
+
+static int ecard_checkirqhw (void)
+{
+ int found;
+
+ EXPMASK_ENABLE = 0x00;
+ EXPMASK_STATUS = 0xff;
+ found = ((EXPMASK_STATUS & 15) == 0);
+ EXPMASK_ENABLE = 0xff;
+
+ return found;
+}
+#endif
+
+static void ecard_readbytes (void *addr, ecard_t *ec, int off, int len, int useld)
+{
+ extern int ecard_loader_read(int off, volatile unsigned int pa, loader_t loader);
+ unsigned char *a = (unsigned char *)addr;
+
+ if (ec->slot_no == 8) {
+ static unsigned int lowaddress;
+ unsigned int laddr, haddr;
+ unsigned char byte = 0; /* keep gcc quiet */
+
+ laddr = off & 4095; /* number of bytes to read from offset + base addr */
+ haddr = off >> 12; /* offset into card from base addr */
+
+ if (haddr > 256)
+ return;
+
+ /*
+ * If we require a low address or address 0, then reset, and start again...
+ */
+ if (!off || lowaddress > laddr) {
+ outb (0, ec->podaddr);
+ lowaddress = 0;
+ }
+ while (lowaddress <= laddr) {
+ byte = inb (ec->podaddr + haddr);
+ lowaddress += 1;
+ }
+ while (len--) {
+ *a++ = byte;
+ if (len) {
+ byte = inb (ec->podaddr + haddr);
+ lowaddress += 1;
+ }
+ }
+ } else {
+ if (!useld || !ec->loader) {
+ while(len--)
+ *a++ = inb(ec->podaddr + (off++));
+ } else {
+ while(len--) {
+ *(unsigned long *)0x108 = 0; /* hack for some loaders!!! */
+ *a++ = ecard_loader_read(off++, BUS_ADDR(ec->podaddr), ec->loader);
+ }
+ }
+ }
+}
+
+/*
+ * This is called to reset the loaders for each expansion card on reboot.
+ *
+ * This is required to make sure that the card is in the correct state
+ * that RiscOS expects it to be.
+ */
+void ecard_reset (int card)
+{
+ extern int ecard_loader_reset (volatile unsigned int pa, loader_t loader);
+
+ if (card >= ecard_numcards)
+ return;
+
+ if (card < 0) {
+ for (card = 0; card < ecard_numcards; card++)
+ if (expcard[card].loader)
+ ecard_loader_reset (BUS_ADDR(expcard[card].podaddr),
+ expcard[card].loader);
+ } else
+ if (expcard[card].loader)
+ ecard_loader_reset (BUS_ADDR(expcard[card].podaddr),
+ expcard[card].loader);
+
+#ifdef HAS_EXPMASK
+ if (have_expmask) {
+ have_expmask |= ~0;
+ EXPMASK_ENABLE = have_expmask;
+ }
+#endif
+}
+
+static unsigned int ecard_startcard;
+
+void ecard_startfind (void)
+{
+ ecard_startcard = 0;
+}
+
+ecard_t *ecard_find (int cld, const card_ids *cids)
+{
+ int card;
+ if (!cids) {
+ for (card = ecard_startcard; card < ecard_numcards; card++)
+ if (!expcard[card].claimed &&
+ ((expcard[card].cld.ecld ^ cld) & 0x78) == 0)
+ break;
+ } else {
+ for (card = ecard_startcard; card < ecard_numcards; card++) {
+ unsigned int manufacturer, product;
+ int i;
+
+ if (expcard[card].claimed)
+ continue;
+
+ manufacturer = expcard[card].cld.manufacturer;
+ product = expcard[card].cld.product;
+
+ for (i = 0; cids[i].manufacturer != 65535; i++)
+ if (manufacturer == cids[i].manufacturer &&
+ product == cids[i].product)
+ break;
+
+ if (cids[i].manufacturer != 65535)
+ break;
+ }
+ }
+ ecard_startcard = card + 1;
+ return card < ecard_numcards ? &expcard[card] : NULL;
+}
+
+int ecard_readchunk (struct in_chunk_dir *cd, ecard_t *ec, int id, int num)
+{
+ struct ex_chunk_dir excd;
+ int index = 16;
+ int useld = 0;
+
+ while(1) {
+ ecard_readbytes(&excd, ec, index, 8, useld);
+ index += 8;
+ if (c_id(&excd) == 0) {
+ if (!useld && ec->loader) {
+ useld = 1;
+ index = 0;
+ continue;
+ }
+ return 0;
+ }
+ if (c_id(&excd) == 0xf0) { /* link */
+ index = c_start(&excd);
+ continue;
+ }
+ if (c_id(&excd) == 0x80) { /* loader */
+ if (!ec->loader) {
+ ec->loader = (loader_t)ecard_malloc(c_len(&excd));
+ ecard_readbytes(ec->loader, ec, (int)c_start(&excd), c_len(&excd), useld);
+ }
+ continue;
+ }
+ if (c_id(&excd) == id && num-- == 0)
+ break;
+ }
+
+ if (c_id(&excd) & 0x80) {
+ switch (c_id(&excd) & 0x70) {
+ case 0x70:
+ ecard_readbytes((unsigned char *)excd.d.string, ec,
+ (int)c_start(&excd), c_len(&excd), useld);
+ break;
+ case 0x00:
+ break;
+ }
+ }
+ cd->start_offset = c_start(&excd);
+ memcpy (cd->d.string, excd.d.string, 256);
+ return 1;
+}
+
+unsigned int ecard_address (ecard_t *ec, card_type_t memc, card_speed_t speed)
+{
+ switch (ec->slot_no) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ return (memc ? MEMCECIO_BASE : IOCECIO_BASE + (speed << 17)) + (ec->slot_no << 12);
+#ifdef IOCEC4IO_BASE
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ return (memc ? 0 : IOCEC4IO_BASE + (speed << 17)) + ((ec->slot_no - 4) << 12);
+#endif
+#ifdef MEMCEC8IO_BASE
+ case 8:
+ return MEMCEC8IO_BASE;
+#endif
+ }
+ return 0;
+}
+
+/*
+ * Probe for an expansion card.
+ *
+ * If bit 1 of the first byte of the card is set,
+ * then the card does not exist.
+ */
+static int ecard_probe (int card, int freeslot)
+{
+ ecard_t *ec = expcard + freeslot;
+ struct ex_ecld excld;
+ const char *card_desc = NULL;
+ int i;
+
+ irqno_to_expcard[card] = -1;
+
+ ec->slot_no = card;
+ if ((ec->podaddr = ecard_address (ec, 0, ECARD_SYNC)) == 0)
+ return 0;
+
+ excld.r_ecld = 2;
+ ecard_readbytes (&excld, ec, 0, 16, 0);
+ if (excld.r_ecld & 2)
+ return 0;
+
+ irqno_to_expcard[card] = freeslot;
+
+ ec->irq = -1;
+ ec->fiq = -1;
+ ec->cld.ecld = e_ecld(&excld);
+ ec->cld.manufacturer = e_manu(&excld);
+ ec->cld.product = e_prod(&excld);
+ ec->cld.country = e_country(&excld);
+ ec->cld.fiqmask = e_fiqmask(&excld);
+ ec->cld.irqmask = e_irqmask(&excld);
+ ec->cld.fiqaddr = e_fiqaddr(&excld);
+ ec->cld.irqaddr = e_irqaddr(&excld);
+ ec->fiqaddr =
+ ec->irqaddr = (unsigned char *)BUS_ADDR(ec->podaddr);
+ ec->fiqmask = 4;
+ ec->irqmask = 1;
+ ec->ops = &ecard_default_ops;
+
+ for (i = 0; i < sizeof (blacklist) / sizeof (*blacklist); i++)
+ if (blacklist[i].manufacturer == ec->cld.manufacturer &&
+ blacklist[i].product == ec->cld.product) {
+ ec->loader = blacklist[i].loader;
+ card_desc = blacklist[i].type;
+ break;
+ }
+
+ if (card != 8) {
+ ec->irq = 32 + card;
+#if 0
+ ec->fiq = 96 + card;
+#endif
+ } else {
+ ec->irq = 11;
+ ec->fiq = -1;
+ }
+
+ if ((ec->cld.ecld & 0x78) == 0) {
+ struct in_chunk_dir incd;
+ printk ("\n %d: [%04X:%04X] ", card, ec->cld.manufacturer, ec->cld.product);
+ if (e_is (&excld)) {
+ ec->fiqmask = e_fiqmask (&excld);
+ ec->irqmask = e_irqmask (&excld);
+ ec->fiqaddr += e_fiqaddr (&excld);
+ ec->irqaddr += e_irqaddr (&excld);
+ }
+ if (!card_desc && e_cd (&excld) && ecard_readchunk (&incd, ec, 0xf5, 0))
+ card_desc = incd.d.string;
+ if (card_desc)
+ printk ("%s", card_desc);
+ else
+ printk ("*Unknown*");
+ } else
+ printk("\n %d: Simple card %d\n", card, (ec->cld.ecld >> 3) & 15);
+ return 1;
+}
+
+static struct irqaction irqexpansioncard = { ecard_irq_noexpmask, SA_INTERRUPT, 0, "expansion cards", NULL, NULL };
+
+/*
+ * Initialise the expansion card system.
+ * Locate all hardware - interrupt management and
+ * actual cards.
+ */
+unsigned long ecard_init(unsigned long start_mem)
+{
+ int i, nc = 0;
+
+ kmem = (start_mem | 3) & ~3;
+ memset (expcard, 0, sizeof (expcard));
+
+#ifdef HAS_EXPMASK
+ if (ecard_checkirqhw()) {
+ printk (KERN_DEBUG "Expansion card interrupt management hardware found\n");
+ irqexpansioncard.handler = ecard_irq_expmask;
+ have_expmask = -1;
+ }
+#endif
+ printk("Installed expansion cards:");
+
+ /*
+ * First of all, probe all cards on the expansion card interrupt line
+ */
+ for (i = 0; i < 4; i++)
+ if (ecard_probe (i, nc))
+ nc += 1;
+ else
+ have_expmask &= ~(1<<i);
+
+ ecard_numirqcards = nc;
+
+ /*
+ * Now probe other cards with different interrupt lines
+ */
+#ifdef MEMCEC8IO_BASE
+ if (ecard_probe (8, nc))
+ nc += 1;
+#endif
+ printk("\n");
+ ecard_numcards = nc;
+
+ if (nc && setup_arm_irq(IRQ_EXPANSIONCARD, &irqexpansioncard)) {
+ printk ("Could not allocate interrupt for expansion cards\n");
+ return kmem;
+ }
+
+#ifdef HAS_EXPMASK
+ if (nc && have_expmask)
+ EXPMASK_ENABLE = have_expmask;
+#endif
+ oldlatch_init ();
+ start_mem = kmem;
+ kmem = 0;
+ return start_mem;
+}
diff --git a/arch/arm/kernel/entry-armo.S b/arch/arm/kernel/entry-armo.S
new file mode 100644
index 000000000..20c1b8e7c
--- /dev/null
+++ b/arch/arm/kernel/entry-armo.S
@@ -0,0 +1,643 @@
+/*
+ * linux/arch/arm/kernel/entry-armo.S
+ *
+ * Copyright (C) 1995,1996,1997,1998 Russell King.
+ *
+ * Low-level vector interface routines
+ *
+ * Design issues:
+ * - We have several modes that each vector can be called from,
+ * each with its own set of registers. On entry to any vector,
+ * we *must* save the registers used in *that* mode.
+ *
+ * - This code must be as fast as possible.
+ *
+ * There are a few restrictions on the vectors:
+ * - the SWI vector cannot be called from *any* non-user mode
+ *
+ * - the FP emulator is *never* called from *any* non-user mode undefined
+ * instruction.
+ *
+ * Ok, so this file may be a mess, but its as efficient as possible while
+ * adhering to the above criteria.
+ */
+#include <linux/autoconf.h>
+#include <linux/linkage.h>
+
+#include <asm/assembler.h>
+#include <asm/errno.h>
+#include <asm/hardware.h>
+
+#include "../lib/constants.h"
+
+ .text
+
+@ Offsets into task structure
+@ ---------------------------
+@
+#define STATE 0
+#define COUNTER 4
+#define PRIORITY 8
+#define FLAGS 12
+#define SIGPENDING 16
+
+#define PF_TRACESYS 0x20
+
+@ Bad Abort numbers
+@ -----------------
+@
+#define BAD_PREFETCH 0
+#define BAD_DATA 1
+#define BAD_ADDREXCPTN 2
+#define BAD_IRQ 3
+#define BAD_UNDEFINSTR 4
+
+@ OS version number used in SWIs
+@ RISC OS is 0
+@ RISC iX is 8
+@
+#define OS_NUMBER 9
+
+@
+@ Stack format (ensured by USER_* and SVC_*)
+@
+#define S_OLD_R0 64
+#define S_PSR 60
+#define S_PC 60
+#define S_LR 56
+#define S_SP 52
+#define S_IP 48
+#define S_FP 44
+#define S_R10 40
+#define S_R9 36
+#define S_R8 32
+#define S_R7 28
+#define S_R6 24
+#define S_R5 20
+#define S_R4 16
+#define S_R3 12
+#define S_R2 8
+#define S_R1 4
+#define S_R0 0
+
+#ifdef IOC_BASE
+/* IOC / IOMD based hardware */
+ .equ ioc_base_high, IOC_BASE & 0xff000000
+ .equ ioc_base_low, IOC_BASE & 0x00ff0000
+ .macro disable_fiq
+ mov r12, #ioc_base_high
+ .if ioc_base_low
+ orr r12, r12, #ioc_base_low
+ .endif
+ strb r12, [r12, #0x38] @ Disable FIQ register
+ .endm
+
+ .macro get_irqnr_and_base, irqnr, base
+ mov r4, #ioc_base_high @ point at IOC
+ .if ioc_base_low
+ orr r4, r4, #ioc_base_low
+ .endif
+ ldrb \irqnr, [r4, #0x24] @ get high priority first
+ adr \base, irq_prio_h
+ teq \irqnr, #0
+ ldreqb \irqnr, [r4, #0x14] @ get low priority
+ adreq \base, irq_prio_l
+ .endm
+
+/*
+ * Interrupt table (incorporates priority)
+ */
+ .macro irq_prio_table
+irq_prio_l: .byte 0, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3
+ .byte 4, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3
+ .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+ .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+ .byte 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3
+ .byte 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3
+ .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+ .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+ .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+ .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+ .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+ .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+ .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+ .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+ .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+ .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+irq_prio_h: .byte 0, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 12, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+ .endm
+#else
+#error Unknown architecture
+#endif
+
+/*=============================================================================
+ * For entry-common.S
+ */
+
+ .macro save_user_regs
+ str r0, [sp, #-4]!
+ str lr, [sp, #-4]!
+ sub sp, sp, #15*4
+ stmia sp, {r0 - lr}^
+ mov r0, r0
+ .endm
+
+ .macro restore_user_regs
+ ldmia sp, {r0 - lr}^
+ mov r0, r0
+ add sp, sp, #15*4
+ ldr lr, [sp], #8
+ movs pc, lr
+ .endm
+
+ .macro mask_pc, rd, rm
+ bic \rd, \rm, #PCMASK
+ .endm
+
+ .macro arm700_bug_check, instr, temp
+ .endm
+
+ .macro enable_irqs, temp
+ teqp pc, #0x00000003
+ .endm
+
+ .macro initialise_traps_extra
+ .endm
+
+ .macro get_current_task, rd
+ mov \rd, sp, lsr #13
+ mov \rd, \rd, lsl #13
+ .endm
+
+ /*
+ * Like adr, but force SVC mode (if required)
+ */
+ .macro adrsvc, cond, reg, label
+ adr\cond \reg, \label
+ orr\cond \reg, \reg, #3
+ .endm
+
+#if 0
+/*
+ * Uncomment these if you wish to get more debugging into about data aborts.
+ */
+#define FAULT_CODE_LDRSTRPOST 0x80
+#define FAULT_CODE_LDRSTRPRE 0x40
+#define FAULT_CODE_LDRSTRREG 0x20
+#define FAULT_CODE_LDMSTM 0x10
+#define FAULT_CODE_LDCSTC 0x08
+#endif
+#define FAULT_CODE_PREFETCH 0x04
+#define FAULT_CODE_WRITE 0x02
+#define FAULT_CODE_USER 0x01
+
+
+#define SVC_SAVE_ALL \
+ str sp, [sp, #-16]! ;\
+ str lr, [sp, #8] ;\
+ str lr, [sp, #4] ;\
+ stmfd sp!, {r0 - r12} ;\
+ mov r0, #-1 ;\
+ str r0, [sp, #S_OLD_R0] ;\
+ mov fp, #0
+
+#define SVC_IRQ_SAVE_ALL \
+ str sp, [sp, #-16]! ;\
+ str lr, [sp, #4] ;\
+ ldr lr, .LCirq ;\
+ ldr lr, [lr] ;\
+ str lr, [sp, #8] ;\
+ stmfd sp!, {r0 - r12} ;\
+ mov r0, #-1 ;\
+ str r0, [sp, #S_OLD_R0] ;\
+ mov fp, #0
+
+#define USER_RESTORE_ALL \
+ ldmia sp, {r0 - lr}^ ;\
+ mov r0, r0 ;\
+ add sp, sp, #15*4 ;\
+ ldr lr, [sp], #8 ;\
+ movs pc, lr
+
+#define SVC_RESTORE_ALL \
+ ldmfd sp, {r0 - pc}^
+
+/*=============================================================================
+ * Undefined FIQs
+ *-----------------------------------------------------------------------------
+ */
+_unexp_fiq: ldr sp, .LCfiq
+ mov r12, #IOC_BASE
+ strb r12, [r12, #0x38] @ Disable FIQ register
+ teqp pc, #0x0c000003
+ mov r0, r0
+ stmfd sp!, {r0 - r3, ip, lr}
+ adr r0, Lfiqmsg
+ bl SYMBOL_NAME(printk)
+ ldmfd sp!, {r0 - r3, ip, lr}
+ teqp pc, #0x0c000001
+ mov r0, r0
+ movs pc, lr
+
+Lfiqmsg: .ascii "*** Unexpeced FIQ\n\0"
+ .align
+
+.LCfiq: .word __temp_fiq
+.LCirq: .word __temp_irq
+
+/*=============================================================================
+ * Undefined instruction handler
+ *-----------------------------------------------------------------------------
+ * Handles floating point instructions
+ */
+vector_undefinstr:
+ tst lr,#3
+ bne __und_svc
+ save_user_regs
+ mov fp, #0
+ teqp pc, #I_BIT | MODE_SVC
+.Lbug_undef:
+ adr r1, .LC2
+ ldmia r1, {r1, r4}
+ ldr r1, [r1]
+ get_current_task r2
+ teq r1, r2
+ stmnefd sp!, {ip, lr}
+ blne SYMBOL_NAME(math_state_restore)
+ ldmnefd sp!, {ip, lr}
+ ldr pc, [r4] @ Call FP module USR entry point
+
+ .globl SYMBOL_NAME(fpundefinstr)
+SYMBOL_NAME(fpundefinstr): @ Called by FP module on undefined instr
+SYMBOL_NAME(fpundefinstrsvc):
+ mov r0, lr
+ mov r1, sp
+ teqp pc, #MODE_SVC
+ bl SYMBOL_NAME(do_undefinstr)
+ b ret_from_exception @ Normal FP exit
+
+__und_svc: SVC_SAVE_ALL @ Non-user mode
+ mask_pc r0, lr
+ and r2, lr, #3
+ sub r0, r0, #4
+ mov r1, sp
+ bl SYMBOL_NAME(do_undefinstr)
+ SVC_RESTORE_ALL
+
+.LC2: .word SYMBOL_NAME(last_task_used_math)
+ .word SYMBOL_NAME(fp_enter)
+
+/*=============================================================================
+ * Prefetch abort handler
+ *-----------------------------------------------------------------------------
+ */
+
+vector_prefetch:
+ sub lr, lr, #4
+ tst lr, #3
+ bne __pabt_invalid
+ save_user_regs
+ teqp pc, #0x00000003 @ NOT a problem - doesnt change mode
+ mask_pc r0, lr @ Address of abort
+ mov r1, #FAULT_CODE_PREFETCH|FAULT_CODE_USER @ Error code
+ mov r2, sp @ Tasks registers
+ bl SYMBOL_NAME(do_PrefetchAbort)
+ teq r0, #0 @ If non-zero, we believe this abort..
+ bne ret_from_sys_call
+#ifdef DEBUG_UNDEF
+ adr r0, t
+ bl SYMBOL_NAME(printk)
+#endif
+ ldr lr, [sp,#S_PC] @ program to test this on. I think its
+ b .Lbug_undef @ broken at the moment though!)
+
+__pabt_invalid: SVC_SAVE_ALL
+ mov r0, sp @ Prefetch aborts are definitely *not*
+ mov r1, #BAD_PREFETCH @ allowed in non-user modes. We cant
+ and r2, lr, #3 @ recover from this problem.
+ b SYMBOL_NAME(bad_mode)
+
+#ifdef DEBUG_UNDEF
+t: .ascii "*** undef ***\r\n\0"
+ .align
+#endif
+
+/*=============================================================================
+ * Address exception handler
+ *-----------------------------------------------------------------------------
+ * These aren't too critical.
+ * (they're not supposed to happen).
+ * In order to debug the reason for address exceptions in non-user modes,
+ * we have to obtain all the registers so that we can see what's going on.
+ */
+
+vector_addrexcptn:
+ sub lr, lr, #8
+ tst lr, #3
+ bne Laddrexcptn_not_user
+ save_user_regs
+ teq pc, #0x00000003
+ mask_pc r0, lr @ Point to instruction
+ mov r1, sp @ Point to registers
+ mov r2, #0x400
+ mov lr, pc
+ bl SYMBOL_NAME(do_excpt)
+ b ret_from_exception
+
+Laddrexcptn_not_user:
+ SVC_SAVE_ALL
+ and r2, lr, #3
+ teq r2, #3
+ bne Laddrexcptn_illegal_mode
+ teqp pc, #0x00000003 @ NOT a problem - doesnt change mode
+ mask_pc r0, lr
+ mov r1, sp
+ orr r2, r2, #0x400
+ bl SYMBOL_NAME(do_excpt)
+ ldmia sp, {r0 - lr} @ I cant remember the reason I changed this...
+ add sp, sp, #15*4
+ movs pc, lr
+
+Laddrexcptn_illegal_mode:
+ mov r0, sp
+ str lr, [sp, #-4]!
+ orr r1, r2, #0x0c000000
+ teqp r1, #0 @ change into mode (wont be user mode)
+ mov r0, r0
+ mov r1, r8 @ Any register from r8 - r14 can be banked
+ mov r2, r9
+ mov r3, r10
+ mov r4, r11
+ mov r5, r12
+ mov r6, r13
+ mov r7, r14
+ teqp pc, #0x04000003 @ back to svc
+ mov r0, r0
+ stmfd sp!, {r1-r7}
+ ldmia r0, {r0-r7}
+ stmfd sp!, {r0-r7}
+ mov r0, sp
+ mov r1, #BAD_ADDREXCPTN
+ b SYMBOL_NAME(bad_mode)
+
+/*=============================================================================
+ * Interrupt (IRQ) handler
+ *-----------------------------------------------------------------------------
+ * Note: if in user mode, then *no* kernel routine is running, so dont have
+ * to save svc lr
+ * (r13 points to irq temp save area)
+ */
+
+vector_IRQ: ldr r13, .LCirq @ Ill leave this one in just in case...
+ sub lr, lr, #4
+ str lr, [r13]
+ tst lr, #3
+ bne __irq_svc
+ teqp pc, #0x08000003
+ mov r0, r0
+ ldr lr, .LCirq
+ ldr lr, [lr]
+ save_user_regs
+
+1: get_irqnr_and_base r6, r5
+ teq r6, #0
+ ldrneb r0, [r5, r6] @ get IRQ number
+ movne r1, sp
+ @
+ @ routine called with r0 = irq number, r1 = struct pt_regs *
+ @
+ adr lr, 1b
+ orr lr, lr, #3 @ Force SVC
+ bne do_IRQ
+ b ret_with_reschedule
+
+ irq_prio_table
+
+__irq_svc: teqp pc, #0x08000003
+ mov r0, r0
+ SVC_IRQ_SAVE_ALL
+ and r2, lr, #3
+ teq r2, #3
+ bne __irq_invalid
+1: get_irqnr_and_base r6, r5
+ teq r6, #0
+ ldrneb r0, [r5, r6] @ get IRQ number
+ movne r1, sp
+ @
+ @ routine called with r0 = irq number, r1 = struct pt_regs *
+ @
+ adr lr, 1b
+ orr lr, lr, #3 @ Force SVC
+ bne do_IRQ @ Returns to 1b
+ SVC_RESTORE_ALL
+
+__irq_invalid: mov r0, sp
+ mov r1, #BAD_IRQ
+ b SYMBOL_NAME(bad_mode)
+
+/*=============================================================================
+ * Data abort handler code
+ *-----------------------------------------------------------------------------
+ *
+ * This handles both exceptions from user and SVC modes, computes the address
+ * range of the problem, and does any correction that is required. It then
+ * calls the kernel data abort routine.
+ *
+ * This is where I wish that the ARM would tell you which address aborted.
+ */
+
+vector_data: sub lr, lr, #8 @ Correct lr
+ tst lr, #3
+ bne Ldata_not_user
+ save_user_regs
+ teqp pc, #0x00000003 @ NOT a problem - doesnt change mode
+ mask_pc r0, lr
+ mov r2, #FAULT_CODE_USER
+ bl Ldata_do
+ b ret_from_exception
+
+Ldata_not_user:
+ SVC_SAVE_ALL
+ and r2, lr, #3
+ teq r2, #3
+ bne Ldata_illegal_mode
+ tst lr, #0x08000000
+ teqeqp pc, #0x00000003 @ NOT a problem - doesnt change mode
+ mask_pc r0, lr
+ mov r2, #0
+ bl Ldata_do
+ SVC_RESTORE_ALL
+
+Ldata_illegal_mode:
+ mov r0, sp
+ mov r1, #BAD_DATA
+ b SYMBOL_NAME(bad_mode)
+
+Ldata_do: mov r3, sp
+ ldr r4, [r0] @ Get instruction
+ tst r4, #1 << 20 @ Check to see if it is a write instruction
+ orreq r2, r2, #FAULT_CODE_WRITE @ Indicate write instruction
+ mov r1, r4, lsr #22 @ Now branch to the relevent processing routine
+ and r1, r1, #15 << 2
+ add pc, pc, r1
+ movs pc, lr
+ b Ldata_unknown
+ b Ldata_unknown
+ b Ldata_unknown
+ b Ldata_unknown
+ b Ldata_ldrstr_post @ ldr rd, [rn], #m
+ b Ldata_ldrstr_numindex @ ldr rd, [rn, #m] @ RegVal
+ b Ldata_ldrstr_post @ ldr rd, [rn], rm
+ b Ldata_ldrstr_regindex @ ldr rd, [rn, rm]
+ b Ldata_ldmstm @ ldm*a rn, <rlist>
+ b Ldata_ldmstm @ ldm*b rn, <rlist>
+ b Ldata_unknown
+ b Ldata_unknown
+ b Ldata_ldrstr_post @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m
+ b Ldata_ldcstc_pre @ ldc rd, [rn, #m]
+ b Ldata_unknown
+Ldata_unknown: @ Part of jumptable
+ ldr r3, [sp, #15 * 4]
+ str r3, [sp, #-4]!
+ mov r1, r1, lsr #2
+ mov r2, r0
+ mov r3, r4
+ adr r0, Ltt
+ bl SYMBOL_NAME(printk)
+Llpxx: b Llpxx
+
+Ltt: .ascii "Unknown data abort code %d [pc=%p, *pc=%p]\nLR=%p\0"
+ .align
+
+Ldata_ldrstr_post:
+ mov r0, r4, lsr #14 @ Get Rn
+ and r0, r0, #15 << 2 @ Mask out reg.
+ teq r0, #15 << 2
+ ldr r0, [r3, r0] @ Get register
+ biceq r0, r0, #PCMASK
+ mov r1, r0
+#ifdef FAULT_CODE_LDRSTRPOST
+ orr r2, r2, #FAULT_CODE_LDRSTRPOST
+#endif
+ b SYMBOL_NAME(do_DataAbort)
+
+Ldata_ldrstr_numindex:
+ mov r0, r4, lsr #14 @ Get Rn
+ and r0, r0, #15 << 2 @ Mask out reg.
+ teq r0, #15 << 2
+ ldr r0, [r3, r0] @ Get register
+ biceq r0, r0, #PCMASK
+ mov r1, r4, lsl #20
+ tst r4, #1 << 23
+ addne r0, r0, r1, lsr #20
+ subeq r0, r0, r1, lsr #20
+ mov r1, r0
+#ifdef FAULT_CODE_LDRSTRPRE
+ orr r2, r2, #FAULT_CODE_LDRSTRPRE
+#endif
+ b SYMBOL_NAME(do_DataAbort)
+
+Ldata_ldrstr_regindex:
+ mov r0, r4, lsr #14 @ Get Rn
+ and r0, r0, #15 << 2 @ Mask out reg.
+ teq r0, #15 << 2
+ ldr r0, [r3, r0] @ Get register
+ biceq r0, r0, #PCMASK
+ and r7, r4, #15
+ teq r7, #15 @ Check for PC
+ ldr r7, [r3, r7, lsl #2] @ Get Rm
+ biceq r7, r7, #PCMASK
+ and r8, r4, #0x60 @ Get shift types
+ mov r9, r4, lsr #7 @ Get shift amount
+ and r9, r9, #31
+ teq r8, #0
+ moveq r7, r7, lsl r9
+ teq r8, #0x20 @ LSR shift
+ moveq r7, r7, lsr r9
+ teq r8, #0x40 @ ASR shift
+ moveq r7, r7, asr r9
+ teq r8, #0x60 @ ROR shift
+ moveq r7, r7, ror r9
+ tst r4, #1 << 23
+ addne r0, r0, r7
+ subeq r0, r0, r7 @ Apply correction
+ mov r1, r0
+#ifdef FAULT_CODE_LDRSTRREG
+ orr r2, r2, #FAULT_CODE_LDRSTRREG
+#endif
+ b SYMBOL_NAME(do_DataAbort)
+
+Ldata_ldmstm:
+ mov r7, #0x11
+ orr r7, r7, r7, lsl #8
+ and r0, r4, r7
+ and r1, r4, r7, lsl #1
+ add r0, r0, r1, lsr #1
+ and r1, r4, r7, lsl #2
+ add r0, r0, r1, lsr #2
+ and r1, r4, r7, lsl #3
+ add r0, r0, r1, lsr #3
+ add r0, r0, r0, lsr #8
+ add r0, r0, r0, lsr #4
+ and r7, r0, #15 @ r7 = no. of registers to transfer.
+ mov r5, r4, lsr #14 @ Get Rn
+ and r5, r5, #15 << 2
+ ldr r0, [r3, r5] @ Get reg
+ eor r6, r4, r4, lsl #2
+ tst r6, #1 << 23 @ Check inc/dec ^ writeback
+ rsbeq r7, r7, #0
+ add r7, r0, r7, lsl #2 @ Do correction (signed)
+ subne r1, r7, #1
+ subeq r1, r0, #1
+ moveq r0, r7
+ tst r4, #1 << 21 @ Check writeback
+ strne r7, [r3, r5]
+ eor r6, r4, r4, lsl #1
+ tst r6, #1 << 24 @ Check Pre/Post ^ inc/dec
+ addeq r0, r0, #4
+ addeq r1, r1, #4
+ teq r5, #15*4 @ CHECK FOR PC
+ biceq r1, r1, #PCMASK
+ biceq r0, r0, #PCMASK
+#ifdef FAULT_CODE_LDMSTM
+ orr r2, r2, #FAULT_CODE_LDMSTM
+#endif
+ b SYMBOL_NAME(do_DataAbort)
+
+Ldata_ldcstc_pre:
+ mov r0, r4, lsr #14 @ Get Rn
+ and r0, r0, #15 << 2 @ Mask out reg.
+ teq r0, #15 << 2
+ ldr r0, [r3, r0] @ Get register
+ biceq r0, r0, #PCMASK
+ mov r1, r4, lsl #24 @ Get offset
+ tst r4, #1 << 23
+ addne r0, r0, r1, lsr #24
+ subeq r0, r0, r1, lsr #24
+ mov r1, r0
+#ifdef FAULT_CODE_LDCSTC
+ orr r2, r2, #FAULT_CODE_LDCSTC
+#endif
+ b SYMBOL_NAME(do_DataAbort)
+
+#include "entry-common.S"
+
+ .data
+
+__temp_irq: .word 0 @ saved lr_irq
+__temp_fiq: .space 128
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
new file mode 100644
index 000000000..a2de41f33
--- /dev/null
+++ b/arch/arm/kernel/entry-armv.S
@@ -0,0 +1,671 @@
+/*
+ * linux/arch/arm/kernel/entry-armv.S
+ *
+ * Copyright (C) 1996,1997,1998 Russell King.
+ * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
+ *
+ * Low-level vector interface routines
+ *
+ * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction that causes
+ * it to save wrong values... Be aware!
+ */
+#include <linux/config.h> /* for CONFIG_ARCH_EBSA110 /*
+#include <linux/autoconf.h>
+#include <linux/linkage.h>
+
+#include <asm/assembler.h>
+#include <asm/errno.h>
+#include <asm/hardware.h>
+
+#include "../lib/constants.h"
+
+ .text
+
+@ Offsets into task structure
+@ ---------------------------
+@
+#define STATE 0
+#define COUNTER 4
+#define PRIORITY 8
+#define FLAGS 12
+#define SIGPENDING 16
+
+#define PF_TRACESYS 0x20
+
+@ Bad Abort numbers
+@ -----------------
+@
+#define BAD_PREFETCH 0
+#define BAD_DATA 1
+#define BAD_ADDREXCPTN 2
+#define BAD_IRQ 3
+#define BAD_UNDEFINSTR 4
+
+@ OS version number used in SWIs
+@ RISC OS is 0
+@ RISC iX is 8
+@
+#define OS_NUMBER 9
+
+@
+@ Stack format (ensured by USER_* and SVC_*)
+@
+#define S_FRAME_SIZE 72
+#define S_OLD_R0 68
+#define S_PSR 64
+#define S_PC 60
+#define S_LR 56
+#define S_SP 52
+#define S_IP 48
+#define S_FP 44
+#define S_R10 40
+#define S_R9 36
+#define S_R8 32
+#define S_R7 28
+#define S_R6 24
+#define S_R5 20
+#define S_R4 16
+#define S_R3 12
+#define S_R2 8
+#define S_R1 4
+#define S_R0 0
+
+#ifdef IOC_BASE
+/* IOC / IOMD based hardware */
+ .equ ioc_base_high, IOC_BASE & 0xff000000
+ .equ ioc_base_low, IOC_BASE & 0x00ff0000
+ .macro disable_fiq
+ mov r12, #ioc_base_high
+ .if ioc_base_low
+ orr r12, r12, #ioc_base_low
+ .endif
+ strb r12, [r12, #0x38] @ Disable FIQ register
+ .endm
+
+ .macro get_irqnr_and_base, irqnr, base
+ mov r4, #ioc_base_high @ point at IOC
+ .if ioc_base_low
+ orr r4, r4, #ioc_base_low
+ .endif
+ ldrb \irqnr, [r4, #0x24] @ get high priority first
+ adr \base, irq_prio_h
+ teq \irqnr, #0
+#ifdef IOMD_BASE
+ ldreqb \irqnr, [r4, #0x1f4] @ get dma
+ adreq \base, irq_prio_d
+ teqeq \irqnr, #0
+#endif
+ ldreqb \irqnr, [r4, #0x14] @ get low priority
+ adreq \base, irq_prio_l
+ .endm
+
+/*
+ * Interrupt table (incorporates priority)
+ */
+ .macro irq_prio_table
+irq_prio_l: .byte 0, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3
+ .byte 4, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3
+ .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+ .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+ .byte 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3
+ .byte 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3
+ .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+ .byte 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
+ .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+ .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+ .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+ .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+ .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+ .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+ .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+ .byte 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
+#ifdef IOMD_BASE
+irq_prio_d: .byte 0,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+ .byte 20,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+ .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+ .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+ .byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+ .byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+ .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+ .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+ .byte 23,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+ .byte 23,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+ .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+ .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+ .byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+ .byte 22,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+ .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+ .byte 21,16,17,16,18,16,17,16,19,16,17,16,18,16,17,16
+#endif
+irq_prio_h: .byte 0, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 12, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+ .byte 13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
+ .endm
+
+#elif defined(CONFIG_ARCH_EBSA110)
+
+ .macro disable_fiq
+ .endm
+
+ .macro get_irqnr_and_base, irqnr, base
+ mov r4, #0xf3000000
+ ldrb \irqnr, [r4] @ get interrupts
+ adr \base, irq_prio_ebsa110
+ .endm
+
+ .macro irq_prio_table
+irq_prio_ebsa110:
+ .byte 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 1, 1, 2, 2, 2, 2
+ .byte 4, 4, 1, 1, 2, 2, 2, 2, 3, 3, 1, 1, 2, 2, 2, 2
+ .byte 5, 5, 1, 1, 2, 2, 2, 2, 3, 3, 1, 1, 2, 2, 2, 2
+ .byte 5, 5, 1, 1, 2, 2, 2, 2, 3, 3, 1, 1, 2, 2, 2, 2
+
+ .byte 6, 6, 6, 6, 2, 2, 2, 2, 3, 3, 6, 6, 2, 2, 2, 2
+ .byte 6, 6, 6, 6, 2, 2, 2, 2, 3, 3, 6, 6, 2, 2, 2, 2
+ .byte 6, 6, 6, 6, 2, 2, 2, 2, 3, 3, 6, 6, 2, 2, 2, 2
+ .byte 6, 6, 6, 6, 2, 2, 2, 2, 3, 3, 6, 6, 2, 2, 2, 2
+
+ .byte 7, 0, 1, 1, 2, 2, 2, 2, 3, 3, 1, 1, 2, 2, 2, 2
+ .byte 4, 4, 1, 1, 2, 2, 2, 2, 3, 3, 1, 1, 2, 2, 2, 2
+ .byte 5, 5, 1, 1, 2, 2, 2, 2, 3, 3, 1, 1, 2, 2, 2, 2
+ .byte 5, 5, 1, 1, 2, 2, 2, 2, 3, 3, 1, 1, 2, 2, 2, 2
+
+ .byte 6, 6, 6, 6, 2, 2, 2, 2, 3, 3, 6, 6, 2, 2, 2, 2
+ .byte 6, 6, 6, 6, 2, 2, 2, 2, 3, 3, 6, 6, 2, 2, 2, 2
+ .byte 6, 6, 6, 6, 2, 2, 2, 2, 3, 3, 6, 6, 2, 2, 2, 2
+ .byte 6, 6, 6, 6, 2, 2, 2, 2, 3, 3, 6, 6, 2, 2, 2, 2
+ .endm
+
+#else
+#error Unknown architecture
+#endif
+
+/*============================================================================
+ * For entry-common.S
+ */
+
+ .macro save_user_regs
+ sub sp, sp, #S_FRAME_SIZE
+ stmia sp, {r0 - r12} @ Calling r0 - r12
+ add r8, sp, #S_PC
+ stmdb r8, {sp, lr}^ @ Calling sp, lr
+ mov r7, r0
+ mrs r6, spsr
+ mov r5, lr
+ stmia r8, {r5, r6, r7} @ Save calling PC, CPSR, OLD_R0
+ .endm
+
+ .macro restore_user_regs
+ mrs r0, cpsr @ disable IRQs
+ orr r0, r0, #I_BIT
+ msr cpsr, r0
+ ldr r0, [sp, #S_PSR] @ Get calling cpsr
+ msr spsr, r0 @ save in spsr_svc
+ ldmia sp, {r0 - lr}^ @ Get calling r0 - lr
+ mov r0, r0
+ add sp, sp, #S_PC
+ ldr lr, [sp], #S_FRAME_SIZE - S_PC @ Get PC and jump over PC, PSR, OLD_R0
+ movs pc, lr @ return & move spsr_svc into cpsr
+ .endm
+
+ .macro mask_pc, rd, rm
+ .endm
+
+ .macro arm700_bug_check, instr, temp
+ and \temp, \instr, #0x0f000000 @ check for SWI
+ teq \temp, #0x0f000000
+ bne .Larm700bug
+ .endm
+
+ .macro enable_irqs, temp
+ mrs \temp, cpsr
+ bic \temp, \temp, #I_BIT
+ msr cpsr, \temp
+ .endm
+
+ .macro initialise_traps_extra
+ mrs r0, cpsr
+ bic r0, r0, #31
+ orr r0, r0, #0xd3
+ msr cpsr, r0
+ .endm
+
+
+.Larm700bug: str lr, [r8]
+ ldr r0, [sp, #S_PSR] @ Get calling cpsr
+ msr spsr, r0
+ ldmia sp, {r0 - lr}^ @ Get calling r0 - lr
+ mov r0, r0
+ add sp, sp, #S_PC
+ ldr lr, [sp], #S_FRAME_SIZE - S_PC @ Get PC and jump over PC, PSR, OLD_R0
+ movs pc, lr
+
+
+ .macro get_current_task, rd
+ mov \rd, sp, lsr #13
+ mov \rd, \rd, lsl #13
+ .endm
+
+ /*
+ * Like adr, but force SVC mode (if required)
+ */
+ .macro adrsvc, cond, reg, label
+ adr\cond \reg, \label
+ .endm
+
+/*=============================================================================
+ * Undefined FIQs
+ *-----------------------------------------------------------------------------
+ * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
+ * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
+ * Basically to switch modes, we *HAVE* to clobber one register... brain
+ * damage alert! I don't think that we can execute any code in here in any
+ * other mode than FIQ... Ok you can switch to another mode, but you can't
+ * get out of that mode without clobbering one register.
+ */
+_unexp_fiq: disable_fiq
+ subs pc, lr, #4
+
+/*=============================================================================
+ * Interrupt entry dispatcher
+ *-----------------------------------------------------------------------------
+ * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
+ */
+vector_IRQ: @
+ @ save mode specific registers
+ @
+ ldr r13, .LCirq
+ sub lr, lr, #4
+ str lr, [r13] @ save lr_IRQ
+ mrs lr, spsr
+ str lr, [r13, #4] @ save spsr_IRQ
+ @
+ @ now branch to the relevent MODE handling routine
+ @
+ mrs sp, cpsr @ switch to SVC mode
+ bic sp, sp, #31
+ orr sp, sp, #0x13
+ msr spsr, sp
+ and lr, lr, #15
+ cmp lr, #4
+ addlts pc, pc, lr, lsl #2 @ Changes mode and branches
+ b __irq_invalid @ 4 - 15
+ b __irq_usr @ 0 (USR_26 / USR_32)
+ b __irq_invalid @ 1 (FIQ_26 / FIQ_32)
+ b __irq_invalid @ 2 (IRQ_26 / IRQ_32)
+ b __irq_svc @ 3 (SVC_26 / SVC_32)
+/*
+ *------------------------------------------------------------------------------------------------
+ * Undef instr entry dispatcher - dispatches it to the correct handler for the processor mode
+ *------------------------------------------------------------------------------------------------
+ * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
+ */
+.LCirq: .word __temp_irq
+.LCund: .word __temp_und
+.LCabt: .word __temp_abt
+
+vector_undefinstr:
+ @
+ @ save mode specific registers
+ @
+ ldr r13, [pc, #.LCund - . - 8]
+ str lr, [r13]
+ mrs lr, spsr
+ str lr, [r13, #4]
+ @
+ @ now branch to the relevent MODE handling routine
+ @
+ mrs sp, cpsr
+ bic sp, sp, #31
+ orr sp, sp, #0x13
+ msr spsr, sp
+ and lr, lr, #15
+ cmp lr, #4
+ addlts pc, pc, lr, lsl #2 @ Changes mode and branches
+ b __und_invalid @ 4 - 15
+ b __und_usr @ 0 (USR_26 / USR_32)
+ b __und_invalid @ 1 (FIQ_26 / FIQ_32)
+ b __und_invalid @ 2 (IRQ_26 / IRQ_32)
+ b __und_svc @ 3 (SVC_26 / SVC_32)
+/*
+ *------------------------------------------------------------------------------------------------
+ * Prefetch abort dispatcher - dispatches it to the correct handler for the processor mode
+ *------------------------------------------------------------------------------------------------
+ * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
+ */
+vector_prefetch:
+ @
+ @ save mode specific registers
+ @
+ sub lr, lr, #4
+ ldr r13, .LCabt
+ str lr, [r13]
+ mrs lr, spsr
+ str lr, [r13, #4]
+ @
+ @ now branch to the relevent MODE handling routine
+ @
+ mrs sp, cpsr
+ bic sp, sp, #31
+ orr sp, sp, #0x13
+ msr spsr, sp
+ and lr, lr, #15
+ adds pc, pc, lr, lsl #2 @ Changes mode and branches
+ b __pabt_invalid @ 4 - 15
+ b __pabt_usr @ 0 (USR_26 / USR_32)
+ b __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
+ b __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
+ b __pabt_invalid @ 3 (SVC_26 / SVC_32)
+/*
+ *------------------------------------------------------------------------------------------------
+ * Data abort dispatcher - dispatches it to the correct handler for the processor mode
+ *------------------------------------------------------------------------------------------------
+ * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
+ */
+vector_data: @
+ @ save mode specific registers
+ @
+ sub lr, lr, #8
+ ldr r13, .LCabt
+ str lr, [r13]
+ mrs lr, spsr
+ str lr, [r13, #4]
+ @
+ @ now branch to the relevent MODE handling routine
+ @
+ mrs sp, cpsr
+ bic sp, sp, #31
+ orr sp, sp, #0x13
+ msr spsr, sp
+ and lr, lr, #15
+ cmp lr, #4
+ addlts pc, pc, lr, lsl #2 @ Changes mode & branches
+ b __dabt_invalid @ 4 - 15
+ b __dabt_usr @ 0 (USR_26 / USR_32)
+ b __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
+ b __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
+ b __dabt_svc @ 3 (SVC_26 / SVC_32)
+
+/*=============================================================================
+ * Undefined instruction handler
+ *-----------------------------------------------------------------------------
+ * Handles floating point instructions
+ */
+__und_usr: sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go
+ stmia sp, {r0 - r12} @ Save r0 - r12
+ add r8, sp, #S_PC
+ stmdb r8, {sp, lr}^ @ Save user r0 - r12
+ ldr r4, .LCund
+ ldmia r4, {r5 - r7}
+ stmia r8, {r5 - r7} @ Save USR pc, cpsr, old_r0
+ mov fp, #0
+
+ adr r1, .LC2
+ ldmia r1, {r1, r4}
+ ldr r1, [r1]
+ get_current_task r2
+ teq r1, r2
+ blne SYMBOL_NAME(math_state_restore)
+ adrsvc, al, r9, SYMBOL_NAME(fpreturn)
+ adrsvc al, lr, SYMBOL_NAME(fpundefinstr)
+ ldr pc, [r4] @ Call FP module USR entry point
+
+ .globl SYMBOL_NAME(fpundefinstr)
+SYMBOL_NAME(fpundefinstr): @ Called by FP module on undefined instr
+ mov r0, lr
+ mov r1, sp
+ mrs r4, cpsr @ Enable interrupts
+ bic r4, r4, #I_BIT
+ msr cpsr, r4
+ bl SYMBOL_NAME(do_undefinstr)
+ b ret_from_exception @ Normal FP exit
+
+__und_svc: sub sp, sp, #S_FRAME_SIZE
+ stmia sp, {r0 - r12} @ save r0 - r12
+ mov r6, lr
+ ldr r7, .LCund
+ ldmia r7, {r7 - r9}
+ add r5, sp, #S_FRAME_SIZE
+ add r4, sp, #S_SP
+ stmia r4, {r5 - r9} @ save sp_SVC, lr_SVC, pc, cpsr, old_ro
+
+ adr r1, .LC2
+ ldmia r1, {r1, r4}
+ ldr r1, [r1]
+ mov r2, sp, lsr #13
+ mov r2, r2, lsl #13
+ teq r1, r2
+ blne SYMBOL_NAME(math_state_restore)
+ adrsvc al, r9, SYMBOL_NAME(fpreturnsvc)
+ adrsvc al, lr, SYMBOL_NAME(fpundefinstrsvc)
+ ldr pc, [r4] @ Call FP module SVC entry point
+
+ .globl SYMBOL_NAME(fpundefinstrsvc)
+SYMBOL_NAME(fpundefinstrsvc):
+ mov r0, r5 @ unsigned long pc
+ mov r1, sp @ struct pt_regs *regs
+ bl SYMBOL_NAME(do_undefinstr)
+
+ .globl SYMBOL_NAME(fpreturnsvc)
+SYMBOL_NAME(fpreturnsvc):
+ ldr lr, [sp, #S_PSR] @ Get SVC cpsr
+ msr spsr, lr
+ ldmia sp, {r0 - pc}^ @ Restore SVC registers
+
+.LC2: .word SYMBOL_NAME(last_task_used_math)
+ .word SYMBOL_NAME(fp_enter)
+
+__und_invalid: sub sp, sp, #S_FRAME_SIZE
+ stmia sp, {r0 - lr}
+ mov r7, r0
+ ldr r4, .LCund
+ ldmia r4, {r5, r6} @ Get UND/IRQ/FIQ/ABT pc, cpsr
+ add r4, sp, #S_PC
+ stmia r4, {r5, r6, r7} @ Save UND/IRQ/FIQ/ABT pc, cpsr, old_r0
+ mov r0, sp @ struct pt_regs *regs
+ mov r1, #BAD_UNDEFINSTR @ int reason
+ and r2, r6, #31 @ int mode
+ b SYMBOL_NAME(bad_mode) @ Does not ever return...
+/*=============================================================================
+ * Prefetch abort handler
+ *-----------------------------------------------------------------------------
+ */
+pabtmsg: .ascii "Pabt: %08lX\n\0"
+ .align
+__pabt_usr: sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go
+ stmia sp, {r0 - r12} @ Save r0 - r12
+ add r8, sp, #S_PC
+ stmdb r8, {sp, lr}^ @ Save sp_usr lr_usr
+ ldr r4, .LCabt
+ ldmia r4, {r5 - r7} @ Get USR pc, cpsr
+ stmia r8, {r5 - r7} @ Save USR pc, cpsr, old_r0
+
+ mrs r7, cpsr @ Enable interrupts if they were
+ bic r7, r7, #I_BIT @ previously
+ msr cpsr, r7
+ mov r0, r5 @ address (pc)
+ mov r1, sp @ regs
+ bl SYMBOL_NAME(do_PrefetchAbort) @ call abort handler
+ teq r0, #0 @ Does this still apply???
+ bne ret_from_exception @ Return from exception
+#ifdef DEBUG_UNDEF
+ adr r0, t
+ bl SYMBOL_NAME(printk)
+#endif
+ mov r0, r5
+ mov r1, sp
+ and r2, r6, #31
+ bl SYMBOL_NAME(do_undefinstr)
+ ldr lr, [sp, #S_PSR] @ Get USR cpsr
+ msr spsr, lr
+ ldmia sp, {r0 - pc}^ @ Restore USR registers
+
+__pabt_invalid: sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go
+ stmia sp, {r0 - lr} @ Save XXX r0 - lr
+ mov r7, r0 @ OLD R0
+ ldr r4, .LCabt
+ ldmia r4, {r5 - r7} @ Get XXX pc, cpsr
+ add r4, sp, #S_PC
+ stmia r4, {r5 - r7} @ Save XXX pc, cpsr, old_r0
+ mov r0, sp @ Prefetch aborts are definitely *not*
+ mov r1, #BAD_PREFETCH @ allowed in non-user modes. We cant
+ and r2, r6, #31 @ recover from this problem.
+ b SYMBOL_NAME(bad_mode)
+
+#ifdef DEBUG_UNDEF
+t: .ascii "*** undef ***\r\n\0"
+ .align
+#endif
+
+/*=============================================================================
+ * Address exception handler
+ *-----------------------------------------------------------------------------
+ * These aren't too critical.
+ * (they're not supposed to happen, and won't happen in 32-bit mode).
+ */
+
+vector_addrexcptn:
+ b vector_addrexcptn
+
+/*=============================================================================
+ * Interrupt (IRQ) handler
+ *-----------------------------------------------------------------------------
+ */
+__irq_usr: sub sp, sp, #S_FRAME_SIZE
+ stmia sp, {r0 - r12} @ save r0 - r12
+ add r8, sp, #S_PC
+ stmdb r8, {sp, lr}^
+ ldr r4, .LCirq
+ ldmia r4, {r5 - r7} @ get saved PC, SPSR
+ stmia r8, {r5 - r7} @ save pc, psr, old_r0
+1: get_irqnr_and_base r6, r5
+ teq r6, #0
+ ldrneb r0, [r5, r6] @ get IRQ number
+ movne r1, sp
+ @
+ @ routine called with r0 = irq number, r1 = struct pt_regs *
+ @
+ adrsvc ne, lr, 1b
+ bne do_IRQ
+ b ret_with_reschedule
+
+ irq_prio_table
+
+__irq_svc: sub sp, sp, #S_FRAME_SIZE
+ stmia sp, {r0 - r12} @ save r0 - r12
+ mov r6, lr
+ ldr r7, .LCirq
+ ldmia r7, {r7 - r9}
+ add r5, sp, #S_FRAME_SIZE
+ add r4, sp, #S_SP
+ stmia r4, {r5, r6, r7, r8, r9} @ save sp_SVC, lr_SVC, pc, cpsr, old_ro
+1: get_irqnr_and_base r6, r5
+ teq r6, #0
+ ldrneb r0, [r5, r6] @ get IRQ number
+ movne r1, sp
+ @
+ @ routine called with r0 = irq number, r1 = struct pt_regs *
+ @
+ adrsvc ne, lr, 1b
+ bne do_IRQ
+ ldr r0, [sp, #S_PSR]
+ msr spsr, r0
+ ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
+
+__irq_invalid: sub sp, sp, #S_FRAME_SIZE @ Allocate space on stack for frame
+ stmfd sp, {r0 - lr} @ Save r0 - lr
+ mov r7, #-1
+ ldr r4, .LCirq
+ ldmia r4, {r5, r6} @ get saved pc, psr
+ add r4, sp, #S_PC
+ stmia r4, {r5, r6, r7}
+ mov fp, #0
+ mov r0, sp
+ mov r1, #BAD_IRQ
+ b SYMBOL_NAME(bad_mode)
+
+/*=============================================================================
+ * Data abort handler code
+ *-----------------------------------------------------------------------------
+ */
+.LCprocfns: .word SYMBOL_NAME(processor)
+
+__dabt_usr: sub sp, sp, #S_FRAME_SIZE @ Allocate frame size in one go
+ stmia sp, {r0 - r12} @ save r0 - r12
+ add r3, sp, #S_PC
+ stmdb r3, {sp, lr}^
+ ldr r0, .LCabt
+ ldmia r0, {r0 - r2} @ Get USR pc, cpsr
+ stmia r3, {r0 - r2} @ Save USR pc, cpsr, old_r0
+ mov fp, #0
+ mrs r2, cpsr @ Enable interrupts if they were
+ bic r2, r2, #I_BIT @ previously
+ msr cpsr, r2
+ ldr r2, .LCprocfns
+ mov lr, pc
+ ldr pc, [r2, #8] @ call processor specific code
+ mov r3, sp
+ bl SYMBOL_NAME(do_DataAbort)
+ b ret_from_sys_call
+
+__dabt_svc: sub sp, sp, #S_FRAME_SIZE
+ stmia sp, {r0 - r12} @ save r0 - r12
+ ldr r2, .LCabt
+ add r0, sp, #S_FRAME_SIZE
+ add r5, sp, #S_SP
+ mov r1, lr
+ ldmia r2, {r2 - r4} @ get pc, cpsr
+ stmia r5, {r0 - r4} @ save sp_SVC, lr_SVC, pc, cpsr, old_ro
+ tst r3, #I_BIT
+ mrseq r0, cpsr @ Enable interrupts if they were
+ biceq r0, r0, #I_BIT @ previously
+ msreq cpsr, r0
+ mov r0, r2
+ ldr r2, .LCprocfns
+ mov lr, pc
+ ldr pc, [r2, #8] @ call processor specific code
+ mov r3, sp
+ bl SYMBOL_NAME(do_DataAbort)
+ ldr r0, [sp, #S_PSR]
+ msr spsr, r0
+ ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
+
+__dabt_invalid: sub sp, sp, #S_FRAME_SIZE
+ stmia sp, {r0 - lr} @ Save SVC r0 - lr [lr *should* be intact]
+ mov r7, r0
+ ldr r4, .LCabt
+ ldmia r4, {r5, r6} @ Get SVC pc, cpsr
+ add r4, sp, #S_PC
+ stmia r4, {r5, r6, r7} @ Save SVC pc, cpsr, old_r0
+ mov r0, sp
+ mov r1, #BAD_DATA
+ and r2, r6, #31
+ b SYMBOL_NAME(bad_mode)
+
+
+#include "entry-common.S"
+
+ .data
+
+__temp_irq: .word 0 @ saved lr_irq
+ .word 0 @ saved spsr_irq
+ .word -1 @ old_r0
+__temp_und: .word 0 @ Saved lr_und
+ .word 0 @ Saved spsr_und
+ .word -1 @ old_r0
+__temp_abt: .word 0 @ Saved lr_abt
+ .word 0 @ Saved spsr_abt
+ .word -1 @ old_r0
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
new file mode 100644
index 000000000..5725e1781
--- /dev/null
+++ b/arch/arm/kernel/entry-common.S
@@ -0,0 +1,283 @@
+/*
+ *=============================================================================
+ * Low-level interface code
+ *-----------------------------------------------------------------------------
+ * Trap initialisation
+ *-----------------------------------------------------------------------------
+ *
+ * Note - FIQ code has changed. The default is a couple of words in 0x1c, 0x20
+ * that call _unexp_fiq. Nowever, we now copy the FIQ routine to 0x1c (removes
+ * some excess cycles).
+ *
+ * What we need to put into 0-0x1c are ldrs to branch to 0xC0000000
+ * (the kernel).
+ * 0x1c onwards is reserved for FIQ, so I think that I will allocate 0xe0 onwards for
+ * the actuall address to jump to.
+ */
+/*
+ * these go into 0x00
+ */
+.Lbranches: swi SYS_ERROR0
+ ldr pc, .Lbranches + 0xe4
+ ldr pc, .Lbranches + 0xe8
+ ldr pc, .Lbranches + 0xec
+ ldr pc, .Lbranches + 0xf0
+ ldr pc, .Lbranches + 0xf4
+ ldr pc, .Lbranches + 0xf8
+ ldr pc, .Lbranches + 0xfc
+/*
+ * this is put into 0xe4 and above
+ */
+.Ljump_addresses:
+ .word vector_undefinstr @ 0xe4
+ .word vector_swi @ 0xe8
+ .word vector_prefetch @ 0xec
+ .word vector_data @ 0xf0
+ .word vector_addrexcptn @ 0xf4
+ .word vector_IRQ @ 0xf8
+ .word _unexp_fiq @ 0xfc
+/*
+ * initialise the trap system
+ */
+ENTRY(trap_init)
+ stmfd sp!, {r4 - r7, lr}
+ initialise_traps_extra
+ mov r0, #0xe4
+ adr r1, .Ljump_addresses
+ ldmia r1, {r1 - r6}
+ stmia r0, {r1 - r6}
+ mov r0, #0
+ adr r1, .Lbranches
+ ldmia r1, {r1 - r7}
+ stmia r0, {r1 - r7}
+ LOADREGS(fd, sp!, {r4 - r7, pc})
+
+/*=============================================================================
+ * SWI handler
+ *-----------------------------------------------------------------------------
+ *
+ * We now handle sys-call tracing, and the errno in the task structure.
+ * Still have a problem with >4 arguments for functions. Theres only
+ * a couple of functions in the code that have 5 arguments, so Im not
+ * too worried.
+ */
+
+#include "calls.S"
+
+vector_swi: save_user_regs
+ mov fp, #0
+ mask_pc lr, lr
+ ldr r6, [lr, #-4]! @ get SWI instruction
+ arm700_bug_check r6, r7
+ enable_irqs r7
+
+ bic r6, r6, #0xff000000 @ mask off SWI op-code
+ eor r6, r6, #OS_NUMBER<<20 @ check OS number
+ cmp r6, #NR_SYSCALLS @ check upper syscall limit
+ bcs 2f
+
+ get_current_task r5
+ ldr ip, [r5, #FLAGS] @ check for syscall tracing
+ tst ip, #PF_TRACESYS
+ bne 1f
+
+ adr ip, SYMBOL_NAME(sys_call_table)
+ str r4, [sp, #-4]! @ new style: (r0 = arg1, r5 = arg5)
+ mov lr, pc
+ ldr pc, [ip, r6, lsl #2] @ call sys routine
+ add sp, sp, #4
+ str r0, [sp, #S_R0] @ returned r0
+ b ret_from_sys_call
+
+1: ldr r7, [sp, #S_IP] @ save old IP
+ mov r0, #0
+ str r7, [sp, #S_IP] @ trace entry [IP = 0]
+ bl SYMBOL_NAME(syscall_trace)
+ str r7, [sp, #S_IP]
+ ldmia sp, {r0 - r3} @ have to reload r0 - r3
+ adr ip, SYMBOL_NAME(sys_call_table)
+ str r4, [sp, #-4]! @ new style: (r0 = arg1, r5 = arg5)
+ mov lr, pc
+ ldr pc, [ip, r6, lsl #2] @ call sys routine
+ add sp, sp, #4
+ str r0, [sp, #S_R0] @ returned r0
+ mov r0, #1
+ str r0, [sp, #S_IP] @ trace exit [IP = 1]
+ bl SYMBOL_NAME(syscall_trace)
+ str r7, [sp, #S_IP]
+ b ret_from_sys_call
+
+2: tst r6, #0x00f00000 @ is it a Unix SWI?
+ bne 3f
+ cmp r6, #(KSWI_SYS_BASE - KSWI_BASE)
+ bcc 4f @ not private func
+ bic r0, r6, #0x000f0000
+ mov r1, sp
+ bl SYMBOL_NAME(arm_syscall)
+ b ret_from_sys_call
+
+3: eor r0, r6, #OS_NUMBER<<20 @ Put OS number back
+ mov r1, sp
+ bl SYMBOL_NAME(deferred)
+ ldmfd sp, {r0 - r3}
+ b ret_from_sys_call
+
+4: bl SYMBOL_NAME(sys_ni_syscall)
+ str r0, [sp, #0] @ returned r0
+ b ret_from_sys_call
+
+@ r0 = syscall number
+@ r1 = syscall r0
+@ r5 = syscall r4
+@ ip = syscall table
+SYMBOL_NAME(sys_syscall):
+ mov r6, r0
+ eor r6, r6, #OS_NUMBER << 20
+ cmp r6, #NR_SYSCALLS @ check range
+ movgt r0, #-ENOSYS
+ movgt pc, lr
+ add sp, sp, #4 @ take of the save of our r4
+ ldmib sp, {r0 - r4} @ get our args
+ str r4, [sp, #-4]! @ Put our arg on the stack
+ ldr pc, [ip, r6, lsl #2]
+
+ENTRY(sys_call_table)
+#include "calls.S"
+
+/*============================================================================
+ * Special system call wrappers
+ */
+sys_fork_wrapper:
+ add r0, sp, #4
+ b SYMBOL_NAME(sys_fork)
+
+sys_execve_wrapper:
+ add r3, sp, #4
+ b SYMBOL_NAME(sys_execve)
+
+sys_mount_wrapper:
+ mov r6, lr
+ add r5, sp, #4
+ str r5, [sp]
+ str r4, [sp, #-4]!
+ bl SYMBOL_NAME(sys_compat_mount)
+ add sp, sp, #4
+ RETINSTR(mov,pc,r6)
+
+sys_clone_wapper:
+ add r2, sp, #4
+ b SYMBOL_NAME(sys_clone)
+
+sys_llseek_wrapper:
+ mov r6, lr
+ add r5, sp, #4
+ str r5, [sp]
+ str r4, [sp, #-4]!
+ bl SYMBOL_NAME(sys_compat_llseek)
+ add sp, sp, #4
+ RETINSTR(mov,pc,r6)
+
+sys_sigsuspend_wrapper:
+ add r3, sp, #4
+ b SYMBOL_NAME(sys_sigsuspend)
+
+sys_rt_sigsuspend_wrapper:
+ add r2, sp, #4
+ b SYMBOL_NAME(sys_rt_sigsuspend)
+
+sys_sigreturn_wrapper:
+ add r0, sp, #4
+ b SYMBOL_NAME(sys_sigreturn)
+
+sys_rt_sigreturn_wrapper:
+ add r0, sp, #4
+ b SYMBOL_NAME(sys_rt_sigreturn)
+
+/*============================================================================
+ * All exits to user mode from the kernel go through this code.
+ */
+
+ .globl ret_from_sys_call
+
+ .globl SYMBOL_NAME(fpreturn)
+SYMBOL_NAME(fpreturn):
+ret_from_exception:
+ adr r0, 1f
+ ldmia r0, {r0, r1}
+ ldr r0, [r0]
+ ldr r1, [r1]
+ tst r0, r1
+ blne SYMBOL_NAME(do_bottom_half)
+ret_from_intr: ldr r0, [sp, #S_PSR]
+ tst r0, #3
+ beq ret_with_reschedule
+ b ret_from_all
+
+ret_signal: mov r1, sp
+ adrsvc al, lr, ret_from_all
+ b SYMBOL_NAME(do_signal)
+
+2: bl SYMBOL_NAME(schedule)
+
+ret_from_sys_call:
+ adr r0, 1f
+ ldmia r0, {r0, r1}
+ ldr r0, [r0]
+ ldr r1, [r1]
+ tst r0, r1
+ adrsvc ne, lr, ret_from_intr
+ bne SYMBOL_NAME(do_bottom_half)
+
+ret_with_reschedule:
+ ldr r0, 1f + 8
+ ldr r0, [r0]
+ teq r0, #0
+ bne 2b
+
+ get_current_task r1
+ ldr r1, [r1, #SIGPENDING]
+ teq r1, #0
+ bne ret_signal
+
+ret_from_all: restore_user_regs
+
+1: .word SYMBOL_NAME(bh_mask)
+ .word SYMBOL_NAME(bh_active)
+ .word SYMBOL_NAME(need_resched)
+
+/*============================================================================
+ * FP support
+ */
+
+1: .word SYMBOL_NAME(fp_save)
+ .word SYMBOL_NAME(fp_restore)
+
+.Lfpnull: mov pc, lr
+
+
+/*
+ * Function to call when switching tasks to save FP state
+ */
+ENTRY(fpe_save)
+ ldr r1, 1b
+ ldr pc, [r1]
+
+/*
+ * Function to call when switching tasks to restore FP state
+ */
+ENTRY(fpe_restore)
+ ldr r1, 1b + 4
+ ldr pc, [r1]
+
+
+ .data
+
+ENTRY(fp_enter)
+ .word SYMBOL_NAME(fpundefinstr)
+ .word SYMBOL_NAME(fpundefinstrsvc)
+
+ENTRY(fp_save)
+ .word .Lfpnull
+ENTRY(fp_restore)
+ .word .Lfpnull
+
diff --git a/arch/arm/kernel/head-armo.S b/arch/arm/kernel/head-armo.S
new file mode 100644
index 000000000..7bd69ed5f
--- /dev/null
+++ b/arch/arm/kernel/head-armo.S
@@ -0,0 +1,63 @@
+/*
+ * linux/arch/arm/kernel/head.S
+ *
+ * Copyright (C) 1994, 1995, 1996, 1997 Russell King
+ *
+ * 26-bit kernel startup code
+ */
+#include <linux/linkage.h>
+
+ .text
+ .align
+/*
+ * Entry point.
+ */
+ENTRY(stext)
+ENTRY(_stext)
+__entry: cmp pc, #0x02000000
+ ldrlt pc, LC1 @ if 0x01800000, call at 0x02080000
+ teq r0, #0 @ Check for old calling method
+ blne Loldparams @ Move page if old
+ adr r5, LC0
+ ldmia r5, {r5, r6, sl, sp} @ Setup stack
+ mov r4, #0
+1: cmp r5, sl @ Clear BSS
+ strcc r4, [r5], #4
+ bcc 1b
+ mov r0, #0xea000000 @ Point undef instr to continuation
+ adr r5, Lcontinue - 12
+ orr r5, r0, r5, lsr #2
+ str r5, [r4, #4]
+ mov r2, r4
+ ldr r5, Larm2_id
+ swp r0, r0, [r2] @ check for swp (ARM2 can't)
+ ldr r5, Larm250_id
+ mrc 15, 0, r0, c0, c0 @ check for CP#15 (ARM250 can't)
+ mov r5, r0 @ Use processor ID if we do have CP#15
+Lcontinue: str r5, [r6]
+ mov r5, #0xeb000000 @ Point undef instr vector to itself
+ sub r5, r5, #2
+ str r5, [r4, #4]
+ mov fp, #0
+ b SYMBOL_NAME(start_kernel)
+
+LC1: .word SYMBOL_NAME(_stext)
+LC0: .word SYMBOL_NAME(_edata)
+ .word SYMBOL_NAME(arm_id)
+ .word SYMBOL_NAME(_end)
+ .word SYMBOL_NAME(init_task_union)+8192
+Larm2_id: .long 0x41560200
+Larm250_id: .long 0x41560250
+ .align
+
+Loldparams: mov r4, #0x02000000
+ add r3, r4, #0x00080000
+ add r4, r4, #0x0007c000
+1: ldmia r0!, {r5 - r12}
+ stmia r4!, {r5 - r12}
+ cmp r4, r3
+ blt 1b
+ movs pc, lr
+
+ .align 13
+ENTRY(this_must_match_init_task)
diff --git a/arch/arm/kernel/head-armv.S b/arch/arm/kernel/head-armv.S
new file mode 100644
index 000000000..0af401e43
--- /dev/null
+++ b/arch/arm/kernel/head-armv.S
@@ -0,0 +1,312 @@
+/*
+ * linux/arch/arm/kernel/head32.S
+ *
+ * Copyright (C) 1994, 1995, 1996, 1997 Russell King
+ *
+ * Kernel 32 bit startup code for ARM6 / ARM7 / StrongARM
+ */
+#include <linux/config.h>
+#include <linux/linkage.h>
+ .text
+ .align
+
+ .globl SYMBOL_NAME(swapper_pg_dir)
+ .equ SYMBOL_NAME(swapper_pg_dir), 0xc0004000
+
+ .globl __stext
+/*
+ * Entry point and restart point. Entry *must* be called with r0 == 0,
+ * MMU off.
+ *
+ * r1 = 0 -> ebsa (Ram @ 0x00000000)
+ * r1 = 1 -> RPC (Ram @ 0x10000000)
+ * r1 = 2 -> ebsit (???)
+ * r1 = 3 -> nexuspci
+ */
+ENTRY(stext)
+ENTRY(_stext)
+__entry:
+ teq r0, #0 @ check for illegal entry...
+ bne .Lerror @ loop indefinitely
+ cmp r1, #4 @ Unknown machine architecture
+ bge .Lerror
+@
+@ First thing to do is to get the page tables set up so that we can call the kernel
+@ in the correct place. This is relocatable code...
+@
+ mrc p15, 0, r9, c0, c0 @ get Processor ID
+@
+@ Read processor ID register (CP#15, CR0).
+@ NOTE: ARM2 & ARM250 cause an undefined instruction exception...
+@ Values are:
+@ XX01XXXX = ARMv4 architecture (StrongARM)
+@ XX00XXXX = ARMv3 architecture
+@ 4156061X = ARM 610
+@ 4156030X = ARM 3
+@ 4156025X = ARM 250
+@ 4156020X = ARM 2
+@
+ adr r10, .LCProcTypes
+1: ldmia r10!, {r5, r6, r8} @ Get Set, Mask, MMU Flags
+ teq r5, #0 @ End of list?
+ beq .Lerror
+ eor r5, r5, r9
+ tst r5, r6
+ addne r10, r10, #8
+ bne 1b
+
+ adr r4, .LCMachTypes
+ add r4, r4, r1, lsl #4
+ ldmia r4, {r4, r5, r6} @ r4 = page dir in physical ram
+
+ mov r0, r4
+ mov r1, #0
+ add r2, r0, #0x4000
+1: str r1, [r0], #4 @ Clear page table
+ teq r0, r2
+ bne 1b
+@
+@ Add enough entries to allow the kernel to be called.
+@ It will sort out the real mapping in paging_init
+@
+ add r0, r4, #0x3000
+ mov r1, #0x0000000c @ SECT_CACHEABLE | SECT_BUFFERABLE
+ orr r1, r1, r8
+ add r1, r1, r5
+ str r1, [r0], #4
+ add r1, r1, #1 << 20
+ str r1, [r0], #4
+ add r1, r1, #1 << 20
+@
+@ Map in IO space
+@
+ add r0, r4, #0x3800
+ orr r1, r6, r8
+ add r2, r0, #0x0800
+1: str r1, [r0], #4
+ add r1, r1, #1 << 20
+ teq r0, r2
+ bne 1b
+@
+@ Map in screen at 0x02000000 & SCREEN2_BASE
+@
+ teq r5, #0
+ addne r0, r4, #0x80 @ 02000000
+ movne r1, #0x02000000
+ orrne r1, r1, r8
+ strne r1, [r0]
+ addne r0, r4, #0x3600 @ d8000000
+ strne r1, [r0]
+@
+@ The following should work on both v3 and v4 implementations
+@
+ mov lr, pc
+ mov pc, r10 @ Call processor flush (returns ctrl reg)
+ adr r5, __entry
+ sub r10, r10, r5 @ Make r10 PIC
+ ldr lr, .Lbranch
+ mcr p15, 0, r0, c1, c0 @ Enable MMU & caches. In 3 instructions
+ @ we lose this page!
+ mov pc, lr
+
+.Lerror: mov r0, #0x02000000
+ mov r1, #0x11
+ orr r1, r1, r1, lsl #8
+ orr r1, r1, r1, lsl #16
+ str r1, [r0], #4
+ str r1, [r0], #4
+ str r1, [r0], #4
+ str r1, [r0], #4
+ b .Lerror
+
+.Lbranch: .long .Lalready_done_mmap @ Real address of routine
+
+ @ EBSA (pg dir phys, phys ram start, phys i/o)
+.LCMachTypes: .long SYMBOL_NAME(swapper_pg_dir) - 0xc0000000 @ Address of page tables (physical)
+ .long 0 @ Address of RAM
+ .long 0xe0000000 @ I/O address
+ .long 0
+
+ @ RPC
+ .long SYMBOL_NAME(swapper_pg_dir) - 0xc0000000 + 0x10000000
+ .long 0x10000000
+ .long 0x03000000
+ .long 0
+
+ @ EBSIT ???
+ .long SYMBOL_NAME(swapper_pg_dir) - 0xc0000000
+ .long 0
+ .long 0xe0000000
+ .long 0
+
+ @ NexusPCI
+ .long SYMBOL_NAME(swapper_pg_dir) - 0xc0000000 + 0x40000000
+ .long 0x40000000
+ .long 0x10000000
+ .long 0
+
+.LCProcTypes: @ ARM6 / 610
+ .long 0x41560600
+ .long 0xffffff00
+ .long 0x00000c12
+ b .Larmv3_flush_early @ arm v3 flush & ctrl early setup
+ mov pc, lr
+
+ @ ARM7 / 710
+ .long 0x41007000
+ .long 0xfffff000
+ .long 0x00000c12
+ b .Larmv3_flush_late @ arm v3 flush & ctrl late setup
+ mov pc, lr
+
+ @ StrongARM
+ .long 0x4401a100
+ .long 0xfffffff0
+ .long 0x00000c02
+ b .Larmv4_flush_early
+ b .Lsa_fastclock
+
+ .long 0
+
+.LC0: .long SYMBOL_NAME(_edata)
+ .long SYMBOL_NAME(arm_id)
+ .long SYMBOL_NAME(_end)
+ .long SYMBOL_NAME(init_task_union)+8192
+ .align
+
+.Larmv3_flush_early:
+ mov r0, #0
+ mcr p15, 0, r0, c7, c0 @ flush caches on v3
+ mcr p15, 0, r0, c5, c0 @ flush TLBs on v3
+ mcr p15, 0, r4, c2, c0 @ load page table pointer
+ mov r0, #0x1f @ Domains 0, 1 = client
+ mcr p15, 0, r0, c3, c0 @ load domain access register
+ mov r0, #0x3d @ ....S..DPWC.M
+ orr r0, r0, #0x100
+ mov pc, lr
+
+.Larmv3_flush_late:
+ mov r0, #0
+ mcr p15, 0, r0, c7, c0 @ flush caches on v3
+ mcr p15, 0, r0, c5, c0 @ flush TLBs on v3
+ mcr p15, 0, r4, c2, c0 @ load page table pointer
+ mov r0, #0x1f @ Domains 0, 1 = client
+ mcr p15, 0, r0, c3, c0 @ load domain access register
+ mov r0, #0x7d @ ....S.LDPWC.M
+ orr r0, r0, #0x100
+ mov pc, lr
+
+.Larmv4_flush_early:
+ mov r0, #0
+ mcr p15, 0, r0, c7, c7 @ flush I,D caches on v4
+ mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
+ mcr p15, 0, r0, c8, c7 @ flush I,D TLBs on v4
+ mcr p15, 0, r4, c2, c0 @ load page table pointer
+ mov r0, #0x1f @ Domains 0, 1 = client
+ mcr p15, 0, r0, c3, c0 @ load domain access register
+ mrc p15, 0, r0, c1, c0 @ get control register v4
+ bic r0, r0, #0x0e00
+ bic r0, r0, #0x0002
+ orr r0, r0, #0x003d @ I...S..DPWC.M
+ orr r0, r0, #0x1100 @ v4 supports separate I cache
+ mov pc, lr
+
+.Lsa_fastclock: mcr p15, 0, r4, c15, c1, 2 @ Enable clock switching
+ mov pc, lr
+
+.Lalready_done_mmap:
+ adr r5, __entry @ Add base back in
+ add r10, r10, r5
+ adr r5, .LC0
+ ldmia r5, {r5, r6, r8, sp} @ Setup stack
+ mov r4, #0
+1: cmp r5, r8 @ Clear BSS
+ strcc r4, [r5],#4
+ bcc 1b
+
+ str r9, [r6] @ Save processor ID
+ mov lr, pc
+ add pc, r10, #4 @ Call post-processor init
+ mov fp, #0
+ b SYMBOL_NAME(start_kernel)
+
+#if 1
+/*
+ * Useful debugging routines
+ */
+ .globl _printhex8
+_printhex8: mov r1, #8
+ b printhex
+
+ .globl _printhex4
+_printhex4: mov r1, #4
+ b printhex
+
+ .globl _printhex2
+_printhex2: mov r1, #2
+printhex: ldr r2, =hexbuf
+ add r3, r2, r1
+ mov r1, #0
+ strb r1, [r3]
+1: and r1, r0, #15
+ mov r0, r0, lsr #4
+ cmp r1, #10
+ addlt r1, r1, #'0'
+ addge r1, r1, #'a' - 10
+ strb r1, [r3, #-1]!
+ teq r3, r2
+ bne 1b
+ mov r0, r2
+
+ .globl _printascii
+_printascii:
+#ifdef CONFIG_ARCH_RPC
+ mov r3, #0xe0000000
+ orr r3, r3, #0x00010000
+ orr r3, r3, #0x00000fe0
+#else
+ mov r3, #0xf0000000
+ orr r3, r3, #0x0be0
+#endif
+ b 3f
+1: ldrb r2, [r3, #0x18]
+ tst r2, #0x10
+ beq 1b
+ strb r1, [r3]
+2: ldrb r2, [r3, #0x14]
+ and r2, r2, #0x60
+ teq r2, #0x60
+ bne 2b
+ teq r1, #'\n'
+ moveq r1, #'\r'
+ beq 1b
+3: teq r0, #0
+ ldrneb r1, [r0], #1
+ teqne r1, #0
+ bne 1b
+ mov pc, lr
+
+ .ltorg
+
+ .globl _printch
+_printch:
+#ifdef CONFIG_ARCH_RPC
+ mov r3, #0xe0000000
+ orr r3, r3, #0x00010000
+ orr r3, r3, #0x00000fe0
+#else
+ mov r3, #0xf0000000
+ orr r3, r3, #0x0be0
+#endif
+ mov r1, r0
+ mov r0, #0
+ b 1b
+
+ .bss
+hexbuf: .space 16
+
+#endif
+
+ .text
+ .align 13
+ENTRY(this_must_match_init_task)
diff --git a/arch/arm/kernel/iic.c b/arch/arm/kernel/iic.c
new file mode 100644
index 000000000..10a25e01b
--- /dev/null
+++ b/arch/arm/kernel/iic.c
@@ -0,0 +1,160 @@
+/*
+ * linux/arch/arm/kernel/iic.c
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ *
+ * IIC is used to get the current time from the CMOS rtc.
+ */
+
+#include <asm/system.h>
+#include <asm/delay.h>
+#include <asm/io.h>
+#include <asm/hardware.h>
+
+/*
+ * if delay loop has been calibrated then us that,
+ * else use IOC timer 1.
+ */
+static void iic_delay (void)
+{
+ extern unsigned long loops_per_sec;
+ if (loops_per_sec != (1 << 12)) {
+ udelay(10);
+ return;
+ } else {
+ unsigned long flags;
+ save_flags_cli(flags);
+
+ outb(254, IOC_T1LTCHL);
+ outb(255, IOC_T1LTCHH);
+ outb(0, IOC_T1GO);
+ outb(1<<6, IOC_IRQCLRA); /* clear T1 irq */
+ outb(4, IOC_T1LTCHL);
+ outb(0, IOC_T1LTCHH);
+ outb(0, IOC_T1GO);
+ while ((inb(IOC_IRQSTATA) & (1<<6)) == 0);
+ restore_flags(flags);
+ }
+}
+
+static inline void iic_start (void)
+{
+ unsigned char out;
+
+ out = inb(IOC_CONTROL) | 0xc2;
+
+ outb(out, IOC_CONTROL);
+ iic_delay();
+
+ outb(out ^ 1, IOC_CONTROL);
+ iic_delay();
+}
+
+static inline void iic_stop (void)
+{
+ unsigned char out;
+
+ out = inb(IOC_CONTROL) | 0xc3;
+
+ iic_delay();
+ outb(out ^ 1, IOC_CONTROL);
+
+ iic_delay();
+ outb(out, IOC_CONTROL);
+}
+
+static int iic_sendbyte (unsigned char b)
+{
+ unsigned char out, in;
+ int i;
+
+ out = (inb(IOC_CONTROL) & 0xfc) | 0xc0;
+
+ outb(out, IOC_CONTROL);
+ for (i = 7; i >= 0; i--) {
+ unsigned char c;
+ c = out | ((b & (1 << i)) ? 1 : 0);
+
+ outb(c, IOC_CONTROL);
+ iic_delay();
+
+ outb(c | 2, IOC_CONTROL);
+ iic_delay();
+
+ outb(c, IOC_CONTROL);
+ }
+ outb(out | 1, IOC_CONTROL);
+ iic_delay();
+
+ outb(out | 3, IOC_CONTROL);
+ iic_delay();
+
+ in = inb(IOC_CONTROL) & 1;
+
+ outb(out | 1, IOC_CONTROL);
+ iic_delay();
+
+ outb(out, IOC_CONTROL);
+ iic_delay();
+
+ if(in) {
+ printk("No acknowledge from RTC\n");
+ return 1;
+ } else
+ return 0;
+}
+
+static unsigned char iic_recvbyte (void)
+{
+ unsigned char out, in;
+ int i;
+
+ out = (inb(IOC_CONTROL) & 0xfc) | 0xc0;
+
+ outb(out, IOC_CONTROL);
+ in = 0;
+ for (i = 7; i >= 0; i--) {
+ outb(out | 1, IOC_CONTROL);
+ iic_delay();
+ outb(out | 3, IOC_CONTROL);
+ iic_delay();
+ in = (in << 1) | (inb(IOC_CONTROL) & 1);
+ outb(out | 1, IOC_CONTROL);
+ iic_delay();
+ }
+ outb(out, IOC_CONTROL);
+ iic_delay();
+ outb(out | 2, IOC_CONTROL);
+ iic_delay();
+
+ return in;
+}
+
+void iic_control (unsigned char addr, unsigned char loc, unsigned char *buf, int len)
+{
+ iic_start();
+
+ if (iic_sendbyte(addr & 0xfe))
+ goto error;
+
+ if (iic_sendbyte(loc))
+ goto error;
+
+ if (addr & 1) {
+ int i;
+
+ for (i = 0; i < len; i++)
+ if (iic_sendbyte (buf[i]))
+ break;
+ } else {
+ int i;
+
+ iic_stop();
+ iic_start();
+ iic_sendbyte(addr|1);
+ for (i = 0; i < len; i++)
+ buf[i] = iic_recvbyte ();
+ }
+error:
+ iic_stop();
+}
diff --git a/arch/arm/kernel/init_task.c b/arch/arm/kernel/init_task.c
new file mode 100644
index 000000000..acc206942
--- /dev/null
+++ b/arch/arm/kernel/init_task.c
@@ -0,0 +1,23 @@
+#include <linux/mm.h>
+#include <linux/sched.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+
+static struct vm_area_struct init_mmap = INIT_MMAP;
+static struct fs_struct init_fs = INIT_FS;
+static struct files_struct init_files = INIT_FILES;
+static struct signal_struct init_signals = INIT_SIGNALS;
+struct mm_struct init_mm = INIT_MM;
+
+/*
+ * Initial task structure.
+ *
+ * We need to make sure that this is 8192-byte aligned due to the
+ * way process stacks are handled. This is done by making sure
+ * the linker maps this in the .text segment right after head.S,
+ * and making head.S ensure the proper alignment.
+ *
+ * The things we do for performance..
+ */
+union task_union init_task_union __attribute__((__section__(".text"))) = { INIT_TASK };
diff --git a/arch/arm/kernel/ioport.c b/arch/arm/kernel/ioport.c
new file mode 100644
index 000000000..defa74335
--- /dev/null
+++ b/arch/arm/kernel/ioport.c
@@ -0,0 +1,98 @@
+/*
+ * linux/arch/arm/kernel/ioport.c
+ *
+ * This contains the io-permission bitmap code - written by obz, with changes
+ * by Linus.
+ *
+ * Modifications for ARM processor Copyright (C) 1995, 1996 Russell King
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+
+/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
+asmlinkage void set_bitmap(unsigned long *bitmap, short base, short extent, int new_value)
+{
+ int mask;
+ unsigned long *bitmap_base = bitmap + (base >> 5);
+ unsigned short low_index = base & 0x1f;
+ int length = low_index + extent;
+
+ if (low_index != 0) {
+ mask = (~0 << low_index);
+ if (length < 32)
+ mask &= ~(~0 << length);
+ if (new_value)
+ *bitmap_base++ |= mask;
+ else
+ *bitmap_base++ &= ~mask;
+ length -= 32;
+ }
+
+ mask = (new_value ? ~0 : 0);
+ while (length >= 32) {
+ *bitmap_base++ = mask;
+ length -= 32;
+ }
+
+ if (length > 0) {
+ mask = ~(~0 << length);
+ if (new_value)
+ *bitmap_base++ |= mask;
+ else
+ *bitmap_base++ &= ~mask;
+ }
+}
+
+/*
+ * this changes the io permissions bitmap in the current task.
+ */
+asmlinkage int sys_ioperm(unsigned long from, unsigned long num, int turn_on)
+{
+ if (from + num <= from)
+ return -EINVAL;
+#ifndef __arm__
+ if (from + num > IO_BITMAP_SIZE*32)
+ return -EINVAL;
+#endif
+ if (!suser())
+ return -EPERM;
+
+#ifdef IODEBUG
+ printk("io: from=%d num=%d %s\n", from, num, (turn_on ? "on" : "off"));
+#endif
+#ifndef __arm__
+ set_bitmap((unsigned long *)current->tss.io_bitmap, from, num, !turn_on);
+#endif
+ return 0;
+}
+
+unsigned int *stack;
+
+/*
+ * sys_iopl has to be used when you want to access the IO ports
+ * beyond the 0x3ff range: to get the full 65536 ports bitmapped
+ * you'd need 8kB of bitmaps/process, which is a bit excessive.
+ *
+ * Here we just change the eflags value on the stack: we allow
+ * only the super-user to do it. This depends on the stack-layout
+ * on system-call entry - see also fork() and the signal handling
+ * code.
+ */
+asmlinkage int sys_iopl(long ebx,long ecx,long edx,
+ long esi, long edi, long ebp, long eax, long ds,
+ long es, long fs, long gs, long orig_eax,
+ long eip,long cs,long eflags,long esp,long ss)
+{
+ unsigned int level = ebx;
+
+ if (level > 3)
+ return -EINVAL;
+ if (!suser())
+ return -EPERM;
+ *(&eflags) = (eflags & 0xffffcfff) | (level << 12);
+ return 0;
+}
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
new file mode 100644
index 000000000..e0fb7540a
--- /dev/null
+++ b/arch/arm/kernel/irq.c
@@ -0,0 +1,327 @@
+/*
+ * linux/arch/arm/kernel/irq.c
+ *
+ * Copyright (C) 1992 Linus Torvalds
+ * Modifications for ARM processor Copyright (C) 1995, 1996 Russell King.
+ *
+ * This file contains the code used by various IRQ handling routines:
+ * asking for different IRQ's should be done through these routines
+ * instead of just grabbing them. Thus setups with different IRQ numbers
+ * shouldn't result in any weird surprises, and installing new handlers
+ * should be easier.
+ */
+
+/*
+ * IRQ's are in fact implemented a bit like signal handlers for the kernel.
+ * Naturally it's not a 1:1 relation, but there are similarities.
+ */
+#include <linux/config.h> /* for CONFIG_DEBUG_ERRORS */
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/kernel_stat.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/timex.h>
+#include <linux/malloc.h>
+#include <linux/random.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/hardware.h>
+#include <asm/irq-no.h>
+#include <asm/arch/irq.h>
+
+unsigned int local_irq_count[NR_CPUS];
+#ifdef __SMP__
+atomic_t __arm_bh_counter;
+#else
+int __arm_bh_counter;
+#endif
+
+spinlock_t irq_controller_lock;
+
+#ifndef SMP
+#define irq_enter(cpu, irq) (++local_irq_count[cpu])
+#define irq_exit(cpu, irq) (--local_irq_count[cpu])
+#else
+#error SMP not supported
+#endif
+
+void disable_irq(unsigned int irq_nr)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&irq_controller_lock, flags);
+#ifdef cliIF
+ save_flags(flags);
+ cliIF();
+#endif
+ mask_irq(irq_nr);
+ spin_unlock_irqrestore(&irq_controller_lock, flags);
+}
+
+void enable_irq(unsigned int irq_nr)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&irq_controller_lock, flags);
+#ifdef cliIF
+ save_flags (flags);
+ cliIF();
+#endif
+ unmask_irq(irq_nr);
+ spin_unlock_irqrestore(&irq_controller_lock, flags);
+}
+
+struct irqaction *irq_action[NR_IRQS];
+
+/*
+ * Bitmask indicating valid interrupt numbers
+ */
+unsigned long validirqs[NR_IRQS / 32] = {
+ 0x003fffff, 0x000001ff, 0x000000ff, 0x00000000
+};
+
+int get_irq_list(char *buf)
+{
+ int i;
+ struct irqaction * action;
+ char *p = buf;
+
+ for (i = 0 ; i < NR_IRQS ; i++) {
+ action = irq_action[i];
+ if (!action)
+ continue;
+ p += sprintf(p, "%3d: %10u %s",
+ i, kstat.interrupts[i], action->name);
+ for (action = action->next; action; action = action->next) {
+ p += sprintf(p, ", %s", action->name);
+ }
+ *p++ = '\n';
+ }
+ return p - buf;
+}
+
+/*
+ * do_IRQ handles all normal device IRQ's
+ */
+asmlinkage void do_IRQ(int irq, struct pt_regs * regs)
+{
+ struct irqaction * action;
+ int status, cpu;
+
+#if defined(HAS_IOMD) || defined(HAS_IOC)
+ if (irq != IRQ_EXPANSIONCARD)
+#endif
+ {
+ spin_lock(&irq_controller_lock);
+ mask_and_ack_irq(irq);
+ spin_unlock(&irq_controller_lock);
+ }
+
+ cpu = smp_processor_id();
+ irq_enter(cpu, irq);
+ kstat.interrupts[irq]++;
+
+ /* Return with this interrupt masked if no action */
+ status = 0;
+ action = *(irq + irq_action);
+ if (action) {
+ if (!(action->flags & SA_INTERRUPT))
+ __sti();
+
+ do {
+ status |= action->flags;
+ action->handler(irq, action->dev_id, regs);
+ action = action->next;
+ } while (action);
+ if (status & SA_SAMPLE_RANDOM)
+ add_interrupt_randomness(irq);
+ __cli();
+#if defined(HAS_IOMD) || defined(HAS_IOC)
+ if (irq != IRQ_KEYBOARDTX && irq != IRQ_EXPANSIONCARD)
+#endif
+ {
+ spin_lock(&irq_controller_lock);
+ unmask_irq(irq);
+ spin_unlock(&irq_controller_lock);
+ }
+ }
+
+ irq_exit(cpu, irq);
+ /*
+ * This should be conditional: we should really get
+ * a return code from the irq handler to tell us
+ * whether the handler wants us to do software bottom
+ * half handling or not..
+ *
+ * ** IMPORTANT NOTE: do_bottom_half() ENABLES IRQS!!! **
+ * ** WE MUST DISABLE THEM AGAIN, ELSE IDE DISKS GO **
+ * ** AWOL **
+ */
+ if (1) {
+ if (bh_active & bh_mask)
+ do_bottom_half();
+ __cli();
+ }
+}
+
+#if defined(HAS_IOMD) || defined(HAS_IOC)
+void do_ecard_IRQ(int irq, struct pt_regs *regs)
+{
+ struct irqaction * action;
+
+ action = *(irq + irq_action);
+ if (action) {
+ do {
+ action->handler(irq, action->dev_id, regs);
+ action = action->next;
+ } while (action);
+ } else {
+ spin_lock(&irq_controller_lock);
+ mask_irq (irq);
+ spin_unlock(&irq_controller_lock);
+ }
+}
+#endif
+
+int setup_arm_irq(int irq, struct irqaction * new)
+{
+ int shared = 0;
+ struct irqaction *old, **p;
+ unsigned long flags;
+
+ p = irq_action + irq;
+ if ((old = *p) != NULL) {
+ /* Can't share interrupts unless both agree to */
+ if (!(old->flags & new->flags & SA_SHIRQ))
+ return -EBUSY;
+
+ /* add new interrupt at end of irq queue */
+ do {
+ p = &old->next;
+ old = *p;
+ } while (old);
+ shared = 1;
+ }
+
+ if (new->flags & SA_SAMPLE_RANDOM)
+ rand_initialize_irq(irq);
+
+ save_flags_cli(flags);
+ *p = new;
+
+ if (!shared) {
+ spin_lock(&irq_controller_lock);
+ unmask_irq(irq);
+ spin_unlock(&irq_controller_lock);
+ }
+ restore_flags(flags);
+ return 0;
+}
+
+/*
+ * Using "struct sigaction" is slightly silly, but there
+ * are historical reasons and it works well, so..
+ */
+int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
+ unsigned long irq_flags, const char * devname, void *dev_id)
+{
+ unsigned long retval;
+ struct irqaction *action;
+
+ if (irq >= NR_IRQS || !(validirqs[irq >> 5] & (1 << (irq & 31))))
+ return -EINVAL;
+ if (!handler)
+ return -EINVAL;
+
+ action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL);
+ if (!action)
+ return -ENOMEM;
+
+ action->handler = handler;
+ action->flags = irq_flags;
+ action->mask = 0;
+ action->name = devname;
+ action->next = NULL;
+ action->dev_id = dev_id;
+
+ retval = setup_arm_irq(irq, action);
+
+ if (retval)
+ kfree(action);
+ return retval;
+}
+
+void free_irq(unsigned int irq, void *dev_id)
+{
+ struct irqaction * action, **p;
+ unsigned long flags;
+
+ if (irq >= NR_IRQS || !(validirqs[irq >> 5] & (1 << (irq & 31)))) {
+ printk(KERN_ERR "Trying to free IRQ%d\n",irq);
+#ifdef CONFIG_DEBUG_ERRORS
+ __backtrace();
+#endif
+ return;
+ }
+ for (p = irq + irq_action; (action = *p) != NULL; p = &action->next) {
+ if (action->dev_id != dev_id)
+ continue;
+
+ /* Found it - now free it */
+ save_flags_cli (flags);
+ *p = action->next;
+ restore_flags (flags);
+ kfree(action);
+ return;
+ }
+ printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
+#ifdef CONFIG_DEBUG_ERRORS
+ __backtrace();
+#endif
+}
+
+unsigned long probe_irq_on (void)
+{
+ unsigned int i, irqs = 0;
+ unsigned long delay;
+
+ /* first snaffle up any unassigned irqs */
+ for (i = 15; i > 0; i--) {
+ if (!irq_action[i]) {
+ enable_irq(i);
+ irqs |= 1 << i;
+ }
+ }
+
+ /* wait for spurious interrupts to mask themselves out again */
+ for (delay = jiffies + HZ/10; delay > jiffies; )
+ /* min 100ms delay */;
+
+ /* now filter out any obviously spurious interrupts */
+ return irqs & get_enabled_irqs();
+}
+
+int probe_irq_off (unsigned long irqs)
+{
+ unsigned int i;
+
+ irqs &= ~get_enabled_irqs();
+ if (!irqs)
+ return 0;
+ i = ffz (~irqs);
+ if (irqs != (irqs & (1 << i)))
+ i = -i;
+ return i;
+}
+
+__initfunc(void init_IRQ(void))
+{
+ irq_init_irq();
+}
diff --git a/arch/arm/kernel/oldlatches.c b/arch/arm/kernel/oldlatches.c
new file mode 100644
index 000000000..c4674cd35
--- /dev/null
+++ b/arch/arm/kernel/oldlatches.c
@@ -0,0 +1,53 @@
+/* Support for the latches on the old Archimedes which control the floppy,
+ * hard disc and printer
+ *
+ * (c) David Alan Gilbert 1995/1996
+ */
+#include <linux/kernel.h>
+
+#include <asm/io.h>
+#include <asm/hardware.h>
+
+#ifdef LATCHAADDR
+/*
+ * They are static so that everyone who accesses them has to go through here
+ */
+static unsigned char LatchACopy;
+
+/* newval=(oldval & ~mask)|newdata */
+void oldlatch_aupdate(unsigned char mask,unsigned char newdata)
+{
+ LatchACopy=(LatchACopy & ~mask)|newdata;
+ outb(LatchACopy, LATCHAADDR);
+#ifdef DEBUG
+ printk("oldlatch_A:0x%2x\n",LatchACopy);
+#endif
+
+}
+#endif
+
+#ifdef LATCHBADDR
+static unsigned char LatchBCopy;
+
+/* newval=(oldval & ~mask)|newdata */
+void oldlatch_bupdate(unsigned char mask,unsigned char newdata)
+{
+ LatchBCopy=(LatchBCopy & ~mask)|newdata;
+ outb(LatchBCopy, LATCHBADDR);
+#ifdef DEBUG
+ printk("oldlatch_B:0x%2x\n",LatchBCopy);
+#endif
+}
+#endif
+
+void oldlatch_init(void)
+{
+ printk("oldlatch: init\n");
+#ifdef LATCHAADDR
+ oldlatch_aupdate(0xff,0xff);
+#endif
+#ifdef LATCHBADDR
+ oldlatch_bupdate(0xff,0x8); /* Thats no FDC reset...*/
+#endif
+ return ;
+}
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
new file mode 100644
index 000000000..7f45e7c3c
--- /dev/null
+++ b/arch/arm/kernel/process.c
@@ -0,0 +1,239 @@
+/*
+ * linux/arch/arm/kernel/process.c
+ *
+ * Copyright (C) 1996 Russell King - Converted to ARM.
+ * Origional Copyright (C) 1995 Linus Torvalds
+ */
+
+/*
+ * This file handles the architecture-dependent parts of process handling..
+ */
+
+#define __KERNEL_SYSCALLS__
+#include <stdarg.h>
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/malloc.h>
+#include <linux/vmalloc.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/interrupt.h>
+#include <linux/config.h>
+#include <linux/unistd.h>
+#include <linux/delay.h>
+#include <linux/smp.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/io.h>
+
+extern void fpe_save(struct fp_soft_struct *);
+extern char *processor_modes[];
+
+asmlinkage void ret_from_sys_call(void) __asm__("ret_from_sys_call");
+
+static int hlt_counter=0;
+
+void disable_hlt(void)
+{
+ hlt_counter++;
+}
+
+void enable_hlt(void)
+{
+ hlt_counter--;
+}
+
+/*
+ * The idle loop on an arm..
+ */
+asmlinkage int sys_idle(void)
+{
+ int ret = -EPERM;
+
+ lock_kernel();
+ if (current->pid != 0)
+ goto out;
+ /* endless idle loop with no priority at all */
+ current->priority = -100;
+ for (;;)
+ {
+ if (!hlt_counter && !need_resched)
+ proc_idle ();
+ run_task_queue(&tq_scheduler);
+ schedule();
+ }
+ ret = 0;
+out:
+ unlock_kernel();
+ return ret;
+}
+
+__initfunc(void reboot_setup(char *str, int *ints))
+{
+}
+
+/*
+ * This routine reboots the machine by resetting the expansion cards via
+ * their loaders, turning off the processor cache (if ARM3), copying the
+ * first instruction of the ROM to 0, and executing it there.
+ */
+void machine_restart(char * __unused)
+{
+ proc_hard_reset ();
+ arch_hard_reset ();
+}
+
+void machine_halt(void)
+{
+}
+
+void machine_power_off(void)
+{
+}
+
+
+void show_regs(struct pt_regs * regs)
+{
+ unsigned long flags;
+
+ flags = condition_codes(regs);
+
+ printk("\n"
+ "pc : [<%08lx>]\n"
+ "lr : [<%08lx>]\n"
+ "sp : %08lx ip : %08lx fp : %08lx\n",
+ instruction_pointer(regs),
+ regs->ARM_lr, regs->ARM_sp,
+ regs->ARM_ip, regs->ARM_fp);
+ printk( "r10: %08lx r9 : %08lx r8 : %08lx\n",
+ regs->ARM_r10, regs->ARM_r9,
+ regs->ARM_r8);
+ printk( "r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
+ regs->ARM_r7, regs->ARM_r6,
+ regs->ARM_r5, regs->ARM_r4);
+ printk( "r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
+ regs->ARM_r3, regs->ARM_r2,
+ regs->ARM_r1, regs->ARM_r0);
+ printk("Flags: %c%c%c%c",
+ flags & CC_N_BIT ? 'N' : 'n',
+ flags & CC_Z_BIT ? 'Z' : 'z',
+ flags & CC_C_BIT ? 'C' : 'c',
+ flags & CC_V_BIT ? 'V' : 'v');
+ printk(" IRQs %s FIQs %s Mode %s\n",
+ interrupts_enabled(regs) ? "on" : "off",
+ fast_interrupts_enabled(regs) ? "on" : "off",
+ processor_modes[processor_mode(regs)]);
+#if defined(CONFIG_CPU_ARM6) || defined(CONFIG_CPU_SA110)
+{ int ctrl, transbase, dac;
+ __asm__ (
+" mrc p15, 0, %0, c1, c0\n"
+" mrc p15, 0, %1, c2, c0\n"
+" mrc p15, 0, %2, c3, c0\n"
+ : "=r" (ctrl), "=r" (transbase), "=r" (dac));
+ printk("Control: %04X Table: %08X DAC: %08X",
+ ctrl, transbase, dac);
+ }
+#endif
+ printk ("Segment %s\n", get_fs() == get_ds() ? "kernel" : "user");
+}
+
+/*
+ * Free current thread data structures etc..
+ */
+void exit_thread(void)
+{
+ if (last_task_used_math == current)
+ last_task_used_math = NULL;
+}
+
+void flush_thread(void)
+{
+ int i;
+
+ for (i = 0; i < 8; i++)
+ current->debugreg[i] = 0;
+ if (last_task_used_math == current)
+ last_task_used_math = NULL;
+ current->used_math = 0;
+ current->flags &= ~PF_USEDFPU;
+}
+
+void release_thread(struct task_struct *dead_task)
+{
+}
+
+int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
+ struct task_struct * p, struct pt_regs * regs)
+{
+ struct pt_regs * childregs;
+ struct context_save_struct * save;
+
+ childregs = ((struct pt_regs *)((unsigned long)p + 8192)) - 1;
+ *childregs = *regs;
+ childregs->ARM_r0 = 0;
+
+ save = ((struct context_save_struct *)(childregs)) - 1;
+ copy_thread_css (save);
+ p->tss.save = save;
+ /*
+ * Save current math state in p->tss.fpe_save if not already there.
+ */
+ if (last_task_used_math == current)
+ fpe_save (&p->tss.fpstate.soft);
+
+ return 0;
+}
+
+/*
+ * fill in the fpe structure for a core dump...
+ */
+int dump_fpu (struct pt_regs *regs, struct user_fp *fp)
+{
+ int fpvalid = 0;
+
+ if (current->used_math) {
+ if (last_task_used_math == current)
+ fpe_save (&current->tss.fpstate.soft);
+
+ memcpy (fp, &current->tss.fpstate.soft, sizeof (fp));
+ }
+
+ return fpvalid;
+}
+
+/*
+ * fill in the user structure for a core dump..
+ */
+void dump_thread(struct pt_regs * regs, struct user * dump)
+{
+ int i;
+
+ dump->magic = CMAGIC;
+ dump->start_code = current->mm->start_code;
+ dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1);
+
+ dump->u_tsize = (current->mm->end_code - current->mm->start_code) >> PAGE_SHIFT;
+ dump->u_dsize = (current->mm->brk - current->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ dump->u_ssize = 0;
+
+ for (i = 0; i < 8; i++)
+ dump->u_debugreg[i] = current->debugreg[i];
+
+ if (dump->start_stack < 0x04000000)
+ dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT;
+
+ dump->regs = *regs;
+ dump->u_fpvalid = dump_fpu (regs, &dump->u_fp);
+}
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
new file mode 100644
index 000000000..5fa67df6c
--- /dev/null
+++ b/arch/arm/kernel/ptrace.c
@@ -0,0 +1,745 @@
+/* ptrace.c */
+/* By Ross Biro 1/23/92 */
+/* edited by Linus Torvalds */
+/* edited for ARM by Russell King */
+
+#include <linux/head.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+
+/*
+ * does not yet catch signals sent when the child dies.
+ * in exit.c or in signal.c.
+ */
+
+/*
+ * Breakpoint SWI instruction: SWI &9F0001
+ */
+#define BREAKINST 0xef9f0001
+
+/* change a pid into a task struct. */
+static inline struct task_struct * get_task(int pid)
+{
+ int i;
+
+ for (i = 1; i < NR_TASKS; i++) {
+ if (task[i] != NULL && (task[i]->pid == pid))
+ return task[i];
+ }
+ return NULL;
+}
+
+/*
+ * this routine will get a word off of the processes privileged stack.
+ * the offset is how far from the base addr as stored in the TSS.
+ * this routine assumes that all the privileged stacks are in our
+ * data space.
+ */
+static inline long get_stack_long(struct task_struct *task, int offset)
+{
+ unsigned char *stack;
+
+ stack = (unsigned char *)((unsigned long)task + 8192 - sizeof(struct pt_regs));
+ stack += offset << 2;
+ return *(unsigned long *)stack;
+}
+
+/*
+ * this routine will put a word on the processes privileged stack.
+ * the offset is how far from the base addr as stored in the TSS.
+ * this routine assumes that all the privileged stacks are in our
+ * data space.
+ */
+static inline long put_stack_long(struct task_struct *task, int offset,
+ unsigned long data)
+{
+ unsigned char *stack;
+
+ stack = (unsigned char *)((unsigned long)task + 8192 - sizeof(struct pt_regs));
+ stack += offset << 2;
+ *(unsigned long *) stack = data;
+ return 0;
+}
+
+/*
+ * This routine gets a long from any process space by following the page
+ * tables. NOTE! You should check that the long isn't on a page boundary,
+ * and that it is in the task area before calling this: this routine does
+ * no checking.
+ */
+static unsigned long get_long(struct task_struct * tsk,
+ struct vm_area_struct * vma, unsigned long addr)
+{
+ pgd_t *pgdir;
+ pmd_t *pgmiddle;
+ pte_t *pgtable;
+ unsigned long page;
+
+repeat:
+ pgdir = pgd_offset(vma->vm_mm, addr);
+ if (pgd_none(*pgdir)) {
+ handle_mm_fault(tsk, vma, addr, 0);
+ goto repeat;
+ }
+ if (pgd_bad(*pgdir)) {
+ printk("ptrace: bad page directory %08lx\n", pgd_val(*pgdir));
+ pgd_clear(pgdir);
+ return 0;
+ }
+ pgmiddle = pmd_offset(pgdir, addr);
+ if (pmd_none(*pgmiddle)) {
+ handle_mm_fault(tsk, vma, addr, 0);
+ goto repeat;
+ }
+ if (pmd_bad(*pgmiddle)) {
+ printk("ptrace: bad page middle %08lx\n", pmd_val(*pgmiddle));
+ pmd_clear(pgmiddle);
+ return 0;
+ }
+ pgtable = pte_offset(pgmiddle, addr);
+ if (!pte_present(*pgtable)) {
+ handle_mm_fault(tsk, vma, addr, 0);
+ goto repeat;
+ }
+ page = pte_page(*pgtable);
+
+ if(MAP_NR(page) >= max_mapnr)
+ return 0;
+ page += addr & ~PAGE_MASK;
+ return *(unsigned long *)page;
+}
+
+/*
+ * This routine puts a long into any process space by following the page
+ * tables. NOTE! You should check that the long isn't on a page boundary,
+ * and that it is in the task area before calling this: this routine does
+ * no checking.
+ *
+ * Now keeps R/W state of the page so that a text page stays readonly
+ * even if a debugger scribbles breakpoints into it. -M.U-
+ */
+static void put_long(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long addr,
+ unsigned long data)
+{
+ pgd_t *pgdir;
+ pmd_t *pgmiddle;
+ pte_t *pgtable;
+ unsigned long page;
+
+repeat:
+ pgdir = pgd_offset(vma->vm_mm, addr);
+ if (!pgd_present(*pgdir)) {
+ handle_mm_fault(tsk, vma, addr, 1);
+ goto repeat;
+ }
+ if (pgd_bad(*pgdir)) {
+ printk("ptrace: bad page directory %08lx\n", pgd_val(*pgdir));
+ pgd_clear(pgdir);
+ return;
+ }
+ pgmiddle = pmd_offset(pgdir, addr);
+ if (pmd_none(*pgmiddle)) {
+ handle_mm_fault(tsk, vma, addr, 1);
+ goto repeat;
+ }
+ if (pmd_bad(*pgmiddle)) {
+ printk("ptrace: bad page middle %08lx\n", pmd_val(*pgmiddle));
+ pmd_clear(pgmiddle);
+ return;
+ }
+ pgtable = pte_offset(pgmiddle, addr);
+ if (!pte_present(*pgtable)) {
+ handle_mm_fault(tsk, vma, addr, 1);
+ goto repeat;
+ }
+ page = pte_page(*pgtable);
+ if (!pte_write(*pgtable)) {
+ handle_mm_fault(tsk, vma, addr, 1);
+ goto repeat;
+ }
+
+ if (MAP_NR(page) < max_mapnr) {
+ page += addr & ~PAGE_MASK;
+ *(unsigned long *)page = data;
+ __flush_entry_to_ram(page);
+ }
+ set_pte(pgtable, pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
+ flush_tlb();
+}
+
+static struct vm_area_struct * find_extend_vma(struct task_struct * tsk, unsigned long addr)
+{
+ struct vm_area_struct * vma;
+
+ addr &= PAGE_MASK;
+ vma = find_vma(tsk->mm,addr);
+ if (!vma)
+ return NULL;
+ if (vma->vm_start <= addr)
+ return vma;
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ return NULL;
+ if (vma->vm_end - addr > tsk->rlim[RLIMIT_STACK].rlim_cur)
+ return NULL;
+ vma->vm_offset -= vma->vm_start - addr;
+ vma->vm_start = addr;
+ return vma;
+}
+
+/*
+ * This routine checks the page boundaries, and that the offset is
+ * within the task area. It then calls get_long() to read a long.
+ */
+static int read_long(struct task_struct * tsk, unsigned long addr,
+ unsigned long * result)
+{
+ struct vm_area_struct * vma = find_extend_vma(tsk, addr);
+
+ if (!vma)
+ return -EIO;
+ if ((addr & ~PAGE_MASK) > PAGE_SIZE-sizeof(long)) {
+ unsigned long low,high;
+ struct vm_area_struct * vma_high = vma;
+
+ if (addr + sizeof(long) >= vma->vm_end) {
+ vma_high = vma->vm_next;
+ if (!vma_high || vma_high->vm_start != vma->vm_end)
+ return -EIO;
+ }
+ low = get_long(tsk, vma, addr & ~(sizeof(long)-1));
+ high = get_long(tsk, vma_high, (addr+sizeof(long)) & ~(sizeof(long)-1));
+ switch (addr & (sizeof(long)-1)) {
+ case 1:
+ low >>= 8;
+ low |= high << 24;
+ break;
+ case 2:
+ low >>= 16;
+ low |= high << 16;
+ break;
+ case 3:
+ low >>= 24;
+ low |= high << 8;
+ break;
+ }
+ *result = low;
+ } else
+ *result = get_long(tsk, vma, addr);
+ return 0;
+}
+
+/*
+ * This routine checks the page boundaries, and that the offset is
+ * within the task area. It then calls put_long() to write a long.
+ */
+static int write_long(struct task_struct * tsk, unsigned long addr,
+ unsigned long data)
+{
+ struct vm_area_struct * vma = find_extend_vma(tsk, addr);
+
+ if (!vma)
+ return -EIO;
+ if ((addr & ~PAGE_MASK) > PAGE_SIZE-sizeof(long)) {
+ unsigned long low,high;
+ struct vm_area_struct * vma_high = vma;
+
+ if (addr + sizeof(long) >= vma->vm_end) {
+ vma_high = vma->vm_next;
+ if (!vma_high || vma_high->vm_start != vma->vm_end)
+ return -EIO;
+ }
+ low = get_long(tsk, vma, addr & ~(sizeof(long)-1));
+ high = get_long(tsk, vma_high, (addr+sizeof(long)) & ~(sizeof(long)-1));
+ switch (addr & (sizeof(long)-1)) {
+ case 0: /* shouldn't happen, but safety first */
+ low = data;
+ break;
+ case 1:
+ low &= 0x000000ff;
+ low |= data << 8;
+ high &= ~0xff;
+ high |= data >> 24;
+ break;
+ case 2:
+ low &= 0x0000ffff;
+ low |= data << 16;
+ high &= ~0xffff;
+ high |= data >> 16;
+ break;
+ case 3:
+ low &= 0x00ffffff;
+ low |= data << 24;
+ high &= ~0xffffff;
+ high |= data >> 8;
+ break;
+ }
+ put_long(tsk, vma, addr & ~(sizeof(long)-1),low);
+ put_long(tsk, vma_high, (addr+sizeof(long)) & ~(sizeof(long)-1),high);
+ } else
+ put_long(tsk, vma, addr, data);
+ return 0;
+}
+
+/*
+ * Get value of register `rn' (in the instruction)
+ */
+static unsigned long ptrace_getrn (struct task_struct *child, unsigned long insn)
+{
+ unsigned int reg = (insn >> 16) & 15;
+ unsigned long val;
+
+ if (reg == 15)
+ val = pc_pointer (get_stack_long (child, reg));
+ else
+ val = get_stack_long (child, reg);
+
+printk ("r%02d=%08lX ", reg, val);
+ return val;
+}
+
+/*
+ * Get value of operand 2 (in an ALU instruction)
+ */
+static unsigned long ptrace_getaluop2 (struct task_struct *child, unsigned long insn)
+{
+ unsigned long val;
+ int shift;
+ int type;
+
+printk ("op2=");
+ if (insn & 1 << 25) {
+ val = insn & 255;
+ shift = (insn >> 8) & 15;
+ type = 3;
+printk ("(imm)");
+ } else {
+ val = get_stack_long (child, insn & 15);
+
+ if (insn & (1 << 4))
+ shift = (int)get_stack_long (child, (insn >> 8) & 15);
+ else
+ shift = (insn >> 7) & 31;
+
+ type = (insn >> 5) & 3;
+printk ("(r%02ld)", insn & 15);
+ }
+printk ("sh%dx%d", type, shift);
+ switch (type) {
+ case 0: val <<= shift; break;
+ case 1: val >>= shift; break;
+ case 2:
+ val = (((signed long)val) >> shift);
+ break;
+ case 3:
+ __asm__ __volatile__("mov %0, %0, ror %1" : "=r" (val) : "0" (val), "r" (shift));
+ break;
+ }
+printk ("=%08lX ", val);
+ return val;
+}
+
+/*
+ * Get value of operand 2 (in a LDR instruction)
+ */
+static unsigned long ptrace_getldrop2 (struct task_struct *child, unsigned long insn)
+{
+ unsigned long val;
+ int shift;
+ int type;
+
+ val = get_stack_long (child, insn & 15);
+ shift = (insn >> 7) & 31;
+ type = (insn >> 5) & 3;
+
+printk ("op2=r%02ldsh%dx%d", insn & 15, shift, type);
+ switch (type) {
+ case 0: val <<= shift; break;
+ case 1: val >>= shift; break;
+ case 2:
+ val = (((signed long)val) >> shift);
+ break;
+ case 3:
+ __asm__ __volatile__("mov %0, %0, ror %1" : "=r" (val) : "0" (val), "r" (shift));
+ break;
+ }
+printk ("=%08lX ", val);
+ return val;
+}
+#undef pc_pointer
+#define pc_pointer(x) ((x) & 0x03fffffc)
+int ptrace_set_bpt (struct task_struct *child)
+{
+ unsigned long insn, pc, alt;
+ int i, nsaved = 0, res;
+
+ pc = pc_pointer (get_stack_long (child, 15/*REG_PC*/));
+
+ res = read_long (child, pc, &insn);
+ if (res < 0)
+ return res;
+
+ child->debugreg[nsaved++] = alt = pc + 4;
+printk ("ptrace_set_bpt: insn=%08lX pc=%08lX ", insn, pc);
+ switch (insn & 0x0e100000) {
+ case 0x00000000:
+ case 0x00100000:
+ case 0x02000000:
+ case 0x02100000: /* data processing */
+ printk ("data ");
+ switch (insn & 0x01e0f000) {
+ case 0x0000f000:
+ alt = ptrace_getrn(child, insn) & ptrace_getaluop2(child, insn);
+ break;
+ case 0x0020f000:
+ alt = ptrace_getrn(child, insn) ^ ptrace_getaluop2(child, insn);
+ break;
+ case 0x0040f000:
+ alt = ptrace_getrn(child, insn) - ptrace_getaluop2(child, insn);
+ break;
+ case 0x0060f000:
+ alt = ptrace_getaluop2(child, insn) - ptrace_getrn(child, insn);
+ break;
+ case 0x0080f000:
+ alt = ptrace_getrn(child, insn) + ptrace_getaluop2(child, insn);
+ break;
+ case 0x00a0f000:
+ alt = ptrace_getrn(child, insn) + ptrace_getaluop2(child, insn) +
+ (get_stack_long (child, 16/*REG_PSR*/) & CC_C_BIT ? 1 : 0);
+ break;
+ case 0x00c0f000:
+ alt = ptrace_getrn(child, insn) - ptrace_getaluop2(child, insn) +
+ (get_stack_long (child, 16/*REG_PSR*/) & CC_C_BIT ? 1 : 0);
+ break;
+ case 0x00e0f000:
+ alt = ptrace_getaluop2(child, insn) - ptrace_getrn(child, insn) +
+ (get_stack_long (child, 16/*REG_PSR*/) & CC_C_BIT ? 1 : 0);
+ break;
+ case 0x0180f000:
+ alt = ptrace_getrn(child, insn) | ptrace_getaluop2(child, insn);
+ break;
+ case 0x01a0f000:
+ alt = ptrace_getaluop2(child, insn);
+ break;
+ case 0x01c0f000:
+ alt = ptrace_getrn(child, insn) & ~ptrace_getaluop2(child, insn);
+ break;
+ case 0x01e0f000:
+ alt = ~ptrace_getaluop2(child, insn);
+ break;
+ }
+ break;
+
+ case 0x04100000: /* ldr */
+ if ((insn & 0xf000) == 0xf000) {
+printk ("ldr ");
+ alt = ptrace_getrn(child, insn);
+ if (insn & 1 << 24) {
+ if (insn & 1 << 23)
+ alt += ptrace_getldrop2 (child, insn);
+ else
+ alt -= ptrace_getldrop2 (child, insn);
+ }
+ if (read_long (child, alt, &alt) < 0)
+ alt = pc + 4; /* not valid */
+ else
+ alt = pc_pointer (alt);
+ }
+ break;
+
+ case 0x06100000: /* ldr imm */
+ if ((insn & 0xf000) == 0xf000) {
+printk ("ldrimm ");
+ alt = ptrace_getrn(child, insn);
+ if (insn & 1 << 24) {
+ if (insn & 1 << 23)
+ alt += insn & 0xfff;
+ else
+ alt -= insn & 0xfff;
+ }
+ if (read_long (child, alt, &alt) < 0)
+ alt = pc + 4; /* not valid */
+ else
+ alt = pc_pointer (alt);
+ }
+ break;
+
+ case 0x08100000: /* ldm */
+ if (insn & (1 << 15)) {
+ unsigned long base;
+ int nr_regs;
+printk ("ldm ");
+
+ if (insn & (1 << 23)) {
+ nr_regs = insn & 65535;
+
+ nr_regs = (nr_regs & 0x5555) + ((nr_regs & 0xaaaa) >> 1);
+ nr_regs = (nr_regs & 0x3333) + ((nr_regs & 0xcccc) >> 2);
+ nr_regs = (nr_regs & 0x0707) + ((nr_regs & 0x7070) >> 4);
+ nr_regs = (nr_regs & 0x000f) + ((nr_regs & 0x0f00) >> 8);
+ nr_regs <<= 2;
+
+ if (!(insn & (1 << 24)))
+ nr_regs -= 4;
+ } else {
+ if (insn & (1 << 24))
+ nr_regs = -4;
+ else
+ nr_regs = 0;
+ }
+
+ base = ptrace_getrn (child, insn);
+
+ if (read_long (child, base + nr_regs, &alt) < 0)
+ alt = pc + 4; /* not valid */
+ else
+ alt = pc_pointer (alt);
+ break;
+ }
+ break;
+
+ case 0x0a000000:
+ case 0x0a100000: { /* bl or b */
+ signed long displ;
+printk ("b/bl ");
+ /* It's a branch/branch link: instead of trying to
+ * figure out whether the branch will be taken or not,
+ * we'll put a breakpoint at either location. This is
+ * simpler, more reliable, and probably not a whole lot
+ * slower than the alternative approach of emulating the
+ * branch.
+ */
+ displ = (insn & 0x00ffffff) << 8;
+ displ = (displ >> 6) + 8;
+ if (displ != 0 && displ != 4)
+ alt = pc + displ;
+ }
+ break;
+ }
+printk ("=%08lX\n", alt);
+ if (alt != pc + 4)
+ child->debugreg[nsaved++] = alt;
+
+ for (i = 0; i < nsaved; i++) {
+ res = read_long (child, child->debugreg[i], &insn);
+ if (res >= 0) {
+ child->debugreg[i + 2] = insn;
+ res = write_long (child, child->debugreg[i], BREAKINST);
+ }
+ if (res < 0) {
+ child->debugreg[4] = 0;
+ return res;
+ }
+ }
+ child->debugreg[4] = nsaved;
+ return 0;
+}
+
+/* Ensure no single-step breakpoint is pending. Returns non-zero
+ * value if child was being single-stepped.
+ */
+int ptrace_cancel_bpt (struct task_struct *child)
+{
+ int i, nsaved = child->debugreg[4];
+
+ child->debugreg[4] = 0;
+
+ if (nsaved > 2) {
+ printk ("ptrace_cancel_bpt: bogus nsaved: %d!\n", nsaved);
+ nsaved = 2;
+ }
+ for (i = 0; i < nsaved; i++)
+ write_long (child, child->debugreg[i], child->debugreg[i + 2]);
+ return nsaved != 0;
+}
+
+asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
+{
+ struct task_struct *child;
+ int ret;
+
+ lock_kernel();
+ ret = -EPERM;
+ if (request == PTRACE_TRACEME) {
+ /* are we already being traced? */
+ if (current->flags & PF_PTRACED)
+ goto out;
+ /* set the ptrace bit in the process flags. */
+ current->flags |= PF_PTRACED;
+ ret = 0;
+ goto out;
+ }
+ if (pid == 1) /* you may not mess with init */
+ goto out;
+ ret = -ESRCH;
+ if (!(child = get_task(pid)))
+ goto out;
+ ret = -EPERM;
+ if (request == PTRACE_ATTACH) {
+ if (child == current)
+ goto out;
+ if ((!child->dumpable ||
+ (current->uid != child->euid) ||
+ (current->uid != child->suid) ||
+ (current->uid != child->uid) ||
+ (current->gid != child->egid) ||
+ (current->gid != child->sgid) ||
+ (current->gid != child->gid)) && !suser())
+ goto out;
+ /* the same process cannot be attached many times */
+ if (child->flags & PF_PTRACED)
+ goto out;
+ child->flags |= PF_PTRACED;
+ if (child->p_pptr != current) {
+ REMOVE_LINKS(child);
+ child->p_pptr = current;
+ SET_LINKS(child);
+ }
+ send_sig(SIGSTOP, child, 1);
+ ret = 0;
+ goto out;
+ }
+ ret = -ESRCH;
+ if (!(child->flags & PF_PTRACED))
+ goto out;
+ if (child->state != TASK_STOPPED) {
+ if (request != PTRACE_KILL)
+ goto out;
+ }
+ if (child->p_pptr != current)
+ goto out;
+
+ switch (request) {
+ case PTRACE_PEEKTEXT: /* read word at location addr. */
+ case PTRACE_PEEKDATA: {
+ unsigned long tmp;
+
+ ret = read_long(child, addr, &tmp);
+ if (ret >= 0)
+ ret = put_user(tmp, (unsigned long *)data);
+ goto out;
+ }
+
+ case PTRACE_PEEKUSR: { /* read the word at location addr in the USER area. */
+ unsigned long tmp;
+
+ ret = -EIO;
+ if ((addr & 3) || addr < 0 || addr >= sizeof(struct user))
+ goto out;
+
+ tmp = 0; /* Default return condition */
+ if (addr < sizeof (struct pt_regs))
+ tmp = get_stack_long(child, (int)addr >> 2);
+ ret = put_user(tmp, (unsigned long *)data);
+ goto out;
+ }
+
+ case PTRACE_POKETEXT: /* write the word at location addr. */
+ case PTRACE_POKEDATA:
+ ret = write_long(child,addr,data);
+ goto out;
+
+ case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
+ ret = -EIO;
+ if ((addr & 3) || addr < 0 || addr >= sizeof(struct user))
+ goto out;
+
+ if (addr < sizeof (struct pt_regs))
+ ret = put_stack_long(child, (int)addr >> 2, data);
+ goto out;
+
+ case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
+ case PTRACE_CONT: /* restart after signal. */
+ ret = -EIO;
+ if ((unsigned long) data > _NSIG)
+ goto out;
+ if (request == PTRACE_SYSCALL)
+ child->flags |= PF_TRACESYS;
+ else
+ child->flags &= ~PF_TRACESYS;
+ child->exit_code = data;
+ wake_up_process (child);
+ /* make sure single-step breakpoint is gone. */
+ ptrace_cancel_bpt (child);
+ ret = 0;
+ goto out;
+
+ /* make the child exit. Best I can do is send it a sigkill.
+ * perhaps it should be put in the status that it wants to
+ * exit.
+ */
+ case PTRACE_KILL:
+ if (child->state == TASK_ZOMBIE) /* already dead */
+ return 0;
+ wake_up_process (child);
+ child->exit_code = SIGKILL;
+ ptrace_cancel_bpt (child);
+ /* make sure single-step breakpoint is gone. */
+ ptrace_cancel_bpt (child);
+ ret = 0;
+ goto out;
+
+ case PTRACE_SINGLESTEP: /* execute single instruction. */
+ ret = -EIO;
+ if ((unsigned long) data > _NSIG)
+ goto out;
+ child->debugreg[4] = -1;
+ child->flags &= ~PF_TRACESYS;
+ wake_up_process(child);
+ child->exit_code = data;
+ /* give it a chance to run. */
+ ret = 0;
+ goto out;
+
+ case PTRACE_DETACH: /* detach a process that was attached. */
+ ret = -EIO;
+ if ((unsigned long) data > _NSIG)
+ goto out;
+ child->flags &= ~(PF_PTRACED|PF_TRACESYS);
+ wake_up_process (child);
+ child->exit_code = data;
+ REMOVE_LINKS(child);
+ child->p_pptr = child->p_opptr;
+ SET_LINKS(child);
+ /* make sure single-step breakpoint is gone. */
+ ptrace_cancel_bpt (child);
+ ret = 0;
+ goto out;
+
+ default:
+ ret = -EIO;
+ goto out;
+ }
+out:
+ unlock_kernel();
+ return ret;
+}
+
+asmlinkage void syscall_trace(void)
+{
+ if ((current->flags & (PF_PTRACED|PF_TRACESYS))
+ != (PF_PTRACED|PF_TRACESYS))
+ return;
+ current->exit_code = SIGTRAP;
+ current->state = TASK_STOPPED;
+ notify_parent(current, SIGCHLD);
+ schedule();
+ /*
+ * this isn't the same as continuing with a signal, but it will do
+ * for normal use. strace only continues with a signal if the
+ * stopping signal is not SIGTRAP. -brl
+ */
+ if (current->exit_code) {
+ send_sig(current->exit_code, current, 1);
+ current->exit_code = 0;
+ }
+}
diff --git a/arch/arm/kernel/setup-ebsa110.c b/arch/arm/kernel/setup-ebsa110.c
new file mode 100644
index 000000000..285284b7d
--- /dev/null
+++ b/arch/arm/kernel/setup-ebsa110.c
@@ -0,0 +1,143 @@
+/*
+ * linux/arch/arm/kernel/setup-sa.c
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ */
+
+/*
+ * This file obtains various parameters about the system that the kernel
+ * is running on.
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/malloc.h>
+#include <linux/ldt.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/tty.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/major.h>
+#include <linux/utsname.h>
+
+#include <asm/segment.h>
+#include <asm/system.h>
+#include <asm/hardware.h>
+#include <asm/pgtable.h>
+
+#ifndef CONFIG_CMDLINE
+#define CONFIG_CMDLINE "root=nfs rw console=ttyS1,38400n8"
+#endif
+#define MEM_SIZE (16*1024*1024)
+
+#define COMMAND_LINE_SIZE 256
+
+unsigned char aux_device_present;
+unsigned long arm_id;
+extern int root_mountflags;
+extern int _etext, _edata, _end;
+
+#ifdef CONFIG_BLK_DEV_RAM
+extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */
+extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */
+extern int rd_image_start; /* starting block # of image */
+
+static inline void setup_ramdisk (void)
+{
+ rd_image_start = 0;
+ rd_prompt = 1;
+ rd_doload = 1;
+}
+#else
+#define setup_ramdisk()
+#endif
+
+static char default_command_line[] = CONFIG_CMDLINE;
+static char command_line[COMMAND_LINE_SIZE] = { 0, };
+ char saved_command_line[COMMAND_LINE_SIZE];
+
+struct processor processor;
+extern const struct processor sa110_processor_functions;
+
+void setup_arch(char **cmdline_p,
+ unsigned long * memory_start_p, unsigned long * memory_end_p)
+{
+ unsigned long memory_start, memory_end;
+ char c = ' ', *to = command_line, *from;
+ int len = 0;
+
+ memory_start = (unsigned long)&_end;
+ memory_end = 0xc0000000 + MEM_SIZE;
+ from = default_command_line;
+
+ processor = sa110_processor_functions;
+ processor._proc_init ();
+
+ ROOT_DEV = 0x00ff;
+ setup_ramdisk();
+
+ init_task.mm->start_code = TASK_SIZE;
+ init_task.mm->end_code = TASK_SIZE + (unsigned long) &_etext;
+ init_task.mm->end_data = TASK_SIZE + (unsigned long) &_edata;
+ init_task.mm->brk = TASK_SIZE + (unsigned long) &_end;
+
+ /* Save unparsed command line copy for /proc/cmdline */
+ memcpy(saved_command_line, from, COMMAND_LINE_SIZE);
+ saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
+
+ for (;;) {
+ if (c == ' ' &&
+ from[0] == 'm' &&
+ from[1] == 'e' &&
+ from[2] == 'm' &&
+ from[3] == '=') {
+ memory_end = simple_strtoul(from+4, &from, 0);
+ if ( *from == 'K' || *from == 'k' ) {
+ memory_end = memory_end << 10;
+ from++;
+ } else if ( *from == 'M' || *from == 'm' ) {
+ memory_end = memory_end << 20;
+ from++;
+ }
+ memory_end = memory_end + PAGE_OFFSET;
+ }
+ c = *from++;
+ if (!c)
+ break;
+ if (COMMAND_LINE_SIZE <= ++len)
+ break;
+ *to++ = c;
+ }
+
+ *to = '\0';
+ *cmdline_p = command_line;
+ *memory_start_p = memory_start;
+ *memory_end_p = memory_end;
+ strcpy (system_utsname.machine, "sa110");
+}
+
+int get_cpuinfo(char * buffer)
+{
+ int len;
+
+ len = sprintf (buffer, "CPU:\n"
+ "Type\t\t: %s\n"
+ "Revision\t: %d\n"
+ "Manufacturer\t: %s\n"
+ "32bit modes\t: %s\n"
+ "BogoMips\t: %lu.%02lu\n",
+ "sa110",
+ (int)arm_id & 15,
+ "DEC",
+ "yes",
+ (loops_per_sec+2500) / 500000,
+ ((loops_per_sec+2500) / 5000) % 100);
+ return len;
+}
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
new file mode 100644
index 000000000..ac304fb3e
--- /dev/null
+++ b/arch/arm/kernel/setup.c
@@ -0,0 +1,292 @@
+/*
+ * linux/arch/arm/kernel/setup.c
+ *
+ * Copyright (C) 1995, 1996, 1997 Russell King
+ */
+
+/*
+ * This file obtains various parameters about the system that the kernel
+ * is running on.
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/malloc.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/tty.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/major.h>
+#include <linux/utsname.h>
+#include <linux/blk.h>
+
+#include <asm/segment.h>
+#include <asm/system.h>
+#include <asm/hardware.h>
+#include <asm/pgtable.h>
+#include <asm/arch/mmu.h>
+#include <asm/procinfo.h>
+#include <asm/io.h>
+#include <asm/setup.h>
+
+struct drive_info_struct { char dummy[32]; } drive_info;
+struct screen_info screen_info;
+struct processor processor;
+unsigned char aux_device_present;
+
+extern const struct processor arm2_processor_functions;
+extern const struct processor arm250_processor_functions;
+extern const struct processor arm3_processor_functions;
+extern const struct processor arm6_processor_functions;
+extern const struct processor arm7_processor_functions;
+extern const struct processor sa110_processor_functions;
+
+struct armversions armidlist[] = {
+#if defined(CONFIG_CPU_ARM2) || defined(CONFIG_CPU_ARM3)
+ { 0x41560200, 0xfffffff0, F_MEMC , "ARM/VLSI", "arm2" , &arm2_processor_functions },
+ { 0x41560250, 0xfffffff0, F_MEMC , "ARM/VLSI", "arm250" , &arm250_processor_functions },
+ { 0x41560300, 0xfffffff0, F_MEMC|F_CACHE, "ARM/VLSI", "arm3" , &arm3_processor_functions },
+#endif
+#if defined(CONFIG_CPU_ARM6) || defined(CONFIG_CPU_SA110)
+ { 0x41560600, 0xfffffff0, F_MMU|F_32BIT , "ARM/VLSI", "arm6" , &arm6_processor_functions },
+ { 0x41560610, 0xfffffff0, F_MMU|F_32BIT , "ARM/VLSI", "arm610" , &arm6_processor_functions },
+ { 0x41007000, 0xffffff00, F_MMU|F_32BIT , "ARM/VLSI", "arm7" , &arm7_processor_functions },
+ { 0x41007100, 0xffffff00, F_MMU|F_32BIT , "ARM/VLSI", "arm710" , &arm7_processor_functions },
+ { 0x4401a100, 0xfffffff0, F_MMU|F_32BIT , "DEC", "sa110" , &sa110_processor_functions },
+#endif
+ { 0x00000000, 0x00000000, 0 , "***", "*unknown*" , NULL }
+};
+
+static struct param_struct *params = (struct param_struct *)PARAMS_BASE;
+
+unsigned long arm_id;
+unsigned int vram_half_sam;
+int armidindex;
+int ioebpresent;
+int memc_ctrl_reg;
+int number_ide_drives;
+int number_mfm_drives;
+
+extern int bytes_per_char_h;
+extern int bytes_per_char_v;
+extern int root_mountflags;
+extern int _etext, _edata, _end;
+extern unsigned long real_end_mem;
+
+/*-------------------------------------------------------------------------
+ * Early initialisation routines for various configurable items in the
+ * kernel. Each one either supplies a setup_ function, or defines this
+ * symbol to be empty if not configured.
+ */
+
+/*
+ * Risc-PC specific initialisation
+ */
+#ifdef CONFIG_ARCH_RPC
+
+extern void init_dram_banks(struct param_struct *params);
+
+static void setup_rpc (struct param_struct *params)
+{
+ init_dram_banks(params);
+
+ switch (params->u1.s.pages_in_vram) {
+ case 256:
+ vram_half_sam = 1024;
+ break;
+ case 512:
+ default:
+ vram_half_sam = 2048;
+ }
+
+ /*
+ * Set ROM speed to maximum
+ */
+ outb (0x1d, IOMD_ROMCR0);
+}
+#else
+#define setup_rpc(x)
+#endif
+
+/*
+ * ram disk
+ */
+#ifdef CONFIG_BLK_DEV_RAM
+extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */
+extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */
+extern int rd_image_start; /* starting block # of image */
+
+static void setup_ramdisk (struct param_struct *params)
+{
+ rd_image_start = params->u1.s.rd_start;
+ rd_prompt = (params->u1.s.flags & FLAG_RDPROMPT) == 0;
+ rd_doload = (params->u1.s.flags & FLAG_RDLOAD) == 0;
+}
+#else
+#define setup_ramdisk(p)
+#endif
+
+/*
+ * initial ram disk
+ */
+#ifdef CONFIG_BLK_DEV_INITRD
+static void setup_initrd (struct param_struct *params, unsigned long memory_end)
+{
+ initrd_start = params->u1.s.initrd_start;
+ initrd_end = params->u1.s.initrd_start + params->u1.s.initrd_size;
+
+ if (initrd_end > memory_end) {
+ printk ("initrd extends beyond end of memory "
+ "(0x%08lx > 0x%08lx) - disabling initrd\n",
+ initrd_end, memory_end);
+ initrd_start = 0;
+ }
+}
+#else
+#define setup_initrd(p,m)
+#endif
+
+static inline void check_ioeb_present(void)
+{
+ if (((*IOEB_BASE) & 15) == 5)
+ armidlist[armidindex].features |= F_IOEB;
+}
+
+static void get_processor_type (void)
+{
+ for (armidindex = 0; ; armidindex ++)
+ if (!((armidlist[armidindex].id ^ arm_id) &
+ armidlist[armidindex].mask))
+ break;
+
+ if (armidlist[armidindex].id == 0) {
+ int i;
+
+ for (i = 0; i < 3200; i++)
+ ((unsigned long *)SCREEN2_BASE)[i] = 0x77113322;
+
+ while (1);
+ }
+ processor = *armidlist[armidindex].proc;
+}
+
+#define COMMAND_LINE_SIZE 256
+
+static char command_line[COMMAND_LINE_SIZE] = { 0, };
+ char saved_command_line[COMMAND_LINE_SIZE];
+
+void setup_arch(char **cmdline_p,
+ unsigned long * memory_start_p, unsigned long * memory_end_p)
+{
+ static unsigned char smptrap;
+ unsigned long memory_start, memory_end;
+ char c = ' ', *to = command_line, *from;
+ int len = 0;
+
+ if (smptrap == 1)
+ return;
+ smptrap = 1;
+
+ get_processor_type ();
+ check_ioeb_present ();
+ processor._proc_init ();
+
+ bytes_per_char_h = params->u1.s.bytes_per_char_h;
+ bytes_per_char_v = params->u1.s.bytes_per_char_v;
+ from = params->commandline;
+ ROOT_DEV = to_kdev_t (params->u1.s.rootdev);
+ ORIG_X = params->u1.s.video_x;
+ ORIG_Y = params->u1.s.video_y;
+ ORIG_VIDEO_COLS = params->u1.s.video_num_cols;
+ ORIG_VIDEO_LINES = params->u1.s.video_num_rows;
+ memc_ctrl_reg = params->u1.s.memc_control_reg;
+ number_ide_drives = (params->u1.s.adfsdrives >> 6) & 3;
+ number_mfm_drives = (params->u1.s.adfsdrives >> 3) & 3;
+
+ setup_rpc (params);
+ setup_ramdisk (params);
+
+ if (!(params->u1.s.flags & FLAG_READONLY))
+ root_mountflags &= ~MS_RDONLY;
+
+ memory_start = MAPTOPHYS((unsigned long)&_end);
+ memory_end = GET_MEMORY_END(params);
+
+ init_task.mm->start_code = TASK_SIZE;
+ init_task.mm->end_code = TASK_SIZE + (unsigned long) &_etext;
+ init_task.mm->end_data = TASK_SIZE + (unsigned long) &_edata;
+ init_task.mm->brk = TASK_SIZE + (unsigned long) &_end;
+
+ /* Save unparsed command line copy for /proc/cmdline */
+ memcpy(saved_command_line, from, COMMAND_LINE_SIZE);
+ saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
+
+ for (;;) {
+ if (c == ' ' &&
+ from[0] == 'm' &&
+ from[1] == 'e' &&
+ from[2] == 'm' &&
+ from[3] == '=') {
+ memory_end = simple_strtoul(from+4, &from, 0);
+ if (*from == 'K' || *from == 'k') {
+ memory_end = memory_end << 10;
+ from++;
+ } else if (*from == 'M' || *from == 'm') {
+ memory_end = memory_end << 20;
+ from++;
+ }
+ memory_end = memory_end + PAGE_OFFSET;
+ }
+ c = *from++;
+ if (!c)
+ break;
+ if (COMMAND_LINE_SIZE <= ++len)
+ break;
+ *to++ = c;
+ }
+
+ *to = '\0';
+ *cmdline_p = command_line;
+ *memory_start_p = memory_start;
+ *memory_end_p = memory_end;
+
+ setup_initrd (params, memory_end);
+
+ strcpy (system_utsname.machine, armidlist[armidindex].name);
+}
+
+#define ISSET(bit) (armidlist[armidindex].features & bit)
+
+int get_cpuinfo(char * buffer)
+{
+ int len;
+
+ len = sprintf (buffer, "CPU:\n"
+ "Type\t\t: %s\n"
+ "Revision\t: %d\n"
+ "Manufacturer\t: %s\n"
+ "32bit modes\t: %s\n"
+ "BogoMips\t: %lu.%02lu\n",
+ armidlist[armidindex].name,
+ (int)arm_id & 15,
+ armidlist[armidindex].manu,
+ ISSET (F_32BIT) ? "yes" : "no",
+ (loops_per_sec+2500) / 500000,
+ ((loops_per_sec+2500) / 5000) % 100);
+ len += sprintf (buffer + len,
+ "\nHardware:\n"
+ "Mem System\t: %s\n"
+ "IOEB\t\t: %s\n",
+ ISSET(F_MEMC) ? "MEMC" :
+ ISSET(F_MMU) ? "MMU" : "*unknown*",
+ ISSET(F_IOEB) ? "present" : "absent"
+ );
+ return len;
+}
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
new file mode 100644
index 000000000..0cba3dd07
--- /dev/null
+++ b/arch/arm/kernel/signal.c
@@ -0,0 +1,515 @@
+/*
+ * linux/arch/arm/kernel/signal.c
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ */
+
+#include <linux/config.h> /* for CONFIG_CPU_ARM6 and CONFIG_CPU_SA110 */
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+
+#include <asm/ucontext.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+#define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn))
+#define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn))
+
+asmlinkage int sys_wait4(pid_t pid, unsigned long * stat_addr,
+ int options, unsigned long *ru);
+asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs);
+extern int ptrace_cancel_bpt (struct task_struct *);
+extern int ptrace_set_bpt (struct task_struct *);
+
+/*
+ * atomically swap in the new signal mask, and wait for a signal.
+ */
+asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask, struct pt_regs *regs)
+{
+
+ sigset_t saveset;
+
+ mask &= _BLOCKABLE;
+ spin_lock_irq(&current->sigmask_lock);
+ saveset = current->blocked;
+ siginitset(&current->blocked, mask);
+ recalc_sigpending(current);
+ spin_unlock_irq(&current->sigmask_lock);
+ regs->ARM_r0 = -EINTR;
+
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(&saveset, regs))
+ return regs->ARM_r0;
+ }
+}
+
+asmlinkage int
+sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize, struct pt_regs *regs)
+{
+ sigset_t saveset, newset;
+
+ /* XXX: Don't preclude handling different sized sigset_t's. */
+ if (sigsetsize != sizeof(sigset_t))
+ return -EINVAL;
+
+ if (copy_from_user(&newset, unewset, sizeof(newset)))
+ return -EFAULT;
+ sigdelsetmask(&newset, ~_BLOCKABLE);
+
+ spin_lock_irq(&current->sigmask_lock);
+ saveset = current->blocked;
+ current->blocked = newset;
+ recalc_sigpending(current);
+ spin_unlock_irq(&current->sigmask_lock);
+ regs->ARM_r0 = -EINTR;
+
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(&saveset, regs))
+ return regs->ARM_r0;
+ }
+}
+
+asmlinkage int
+sys_sigaction(int sig, const struct old_sigaction *act,
+ struct old_sigaction *oact)
+{
+ struct k_sigaction new_ka, old_ka;
+ int ret;
+
+ if (act) {
+ old_sigset_t mask;
+ if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
+ __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
+ __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+ return -EFAULT;
+ __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+ __get_user(mask, &act->sa_mask);
+ siginitset(&new_ka.sa.sa_mask, mask);
+ }
+
+ ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+ if (!ret && oact) {
+ if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
+ __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
+ __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+ return -EFAULT;
+ __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+ __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+ }
+
+ return ret;
+}
+
+/*
+ * Do a signal return; undo the signal stack.
+ */
+struct sigframe
+{
+ struct sigcontext sc;
+ unsigned long extramask[_NSIG_WORDS-1];
+ unsigned long retcode;
+};
+
+struct rt_sigframe
+{
+ struct siginfo *pinfo;
+ void *puc;
+ struct siginfo info;
+ struct ucontext uc;
+ unsigned long retcode;
+};
+
+static int
+restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
+{
+ __get_user(regs->ARM_r0, &sc->arm_r0);
+ __get_user(regs->ARM_r1, &sc->arm_r1);
+ __get_user(regs->ARM_r2, &sc->arm_r2);
+ __get_user(regs->ARM_r3, &sc->arm_r3);
+ __get_user(regs->ARM_r4, &sc->arm_r4);
+ __get_user(regs->ARM_r5, &sc->arm_r5);
+ __get_user(regs->ARM_r6, &sc->arm_r6);
+ __get_user(regs->ARM_r7, &sc->arm_r7);
+ __get_user(regs->ARM_r8, &sc->arm_r8);
+ __get_user(regs->ARM_r9, &sc->arm_r9);
+ __get_user(regs->ARM_r10, &sc->arm_r10);
+ __get_user(regs->ARM_fp, &sc->arm_fp);
+ __get_user(regs->ARM_ip, &sc->arm_ip);
+ __get_user(regs->ARM_sp, &sc->arm_sp);
+ __get_user(regs->ARM_lr, &sc->arm_lr);
+ __get_user(regs->ARM_pc, &sc->arm_pc); /* security! */
+#if defined(CONFIG_CPU_ARM6) || defined(CONFIG_CPU_SA110)
+ __get_user(regs->ARM_cpsr, &sc->arm_cpsr); /* security! */
+#endif
+
+ /* send SIGTRAP if we're single-stepping */
+ if (ptrace_cancel_bpt (current))
+ send_sig (SIGTRAP, current, 1);
+
+ return regs->ARM_r0;
+}
+
+asmlinkage int sys_sigreturn(struct pt_regs *regs)
+{
+ struct sigframe *frame;
+ sigset_t set;
+
+ frame = (struct sigframe *)regs->ARM_sp;
+
+ if (verify_area(VERIFY_READ, frame, sizeof (*frame)))
+ goto badframe;
+ if (__get_user(set.sig[0], &frame->sc.oldmask)
+ || (_NSIG_WORDS > 1
+ && __copy_from_user(&set.sig[1], &frame->extramask,
+ sizeof(frame->extramask))))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sigmask_lock);
+ current->blocked = set;
+ recalc_sigpending(current);
+ spin_unlock_irq(&current->sigmask_lock);
+
+ return restore_sigcontext(regs, &frame->sc);
+
+badframe:
+ lock_kernel();
+ do_exit(SIGSEGV);
+}
+
+asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
+{
+ struct rt_sigframe *frame;
+ sigset_t set;
+
+ frame = (struct rt_sigframe *)regs->ARM_sp;
+
+ if (verify_area(VERIFY_READ, frame, sizeof (*frame)))
+ goto badframe;
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sigmask_lock);
+ current->blocked = set;
+ recalc_sigpending(current);
+ spin_unlock_irq(&current->sigmask_lock);
+
+ return restore_sigcontext(regs, &frame->uc.uc_mcontext);
+
+badframe:
+ lock_kernel();
+ do_exit(SIGSEGV);
+}
+
+static void
+setup_sigcontext(struct sigcontext *sc, /*struct _fpstate *fpstate,*/
+ struct pt_regs *regs, unsigned long mask)
+{
+ __put_user (regs->ARM_r0, &sc->arm_r0);
+ __put_user (regs->ARM_r1, &sc->arm_r1);
+ __put_user (regs->ARM_r2, &sc->arm_r2);
+ __put_user (regs->ARM_r3, &sc->arm_r3);
+ __put_user (regs->ARM_r4, &sc->arm_r4);
+ __put_user (regs->ARM_r5, &sc->arm_r5);
+ __put_user (regs->ARM_r6, &sc->arm_r6);
+ __put_user (regs->ARM_r7, &sc->arm_r7);
+ __put_user (regs->ARM_r8, &sc->arm_r8);
+ __put_user (regs->ARM_r9, &sc->arm_r9);
+ __put_user (regs->ARM_r10, &sc->arm_r10);
+ __put_user (regs->ARM_fp, &sc->arm_fp);
+ __put_user (regs->ARM_ip, &sc->arm_ip);
+ __put_user (regs->ARM_sp, &sc->arm_sp);
+ __put_user (regs->ARM_lr, &sc->arm_lr);
+ __put_user (regs->ARM_pc, &sc->arm_pc); /* security! */
+#if defined(CONFIG_CPU_ARM6) || defined(CONFIG_CPU_SA110)
+ __put_user (regs->ARM_cpsr, &sc->arm_cpsr); /* security! */
+#endif
+
+ __put_user (current->tss.trap_no, &sc->trap_no);
+ __put_user (current->tss.error_code, &sc->error_code);
+ __put_user (mask, &sc->oldmask);
+}
+
+static void setup_frame(int sig, struct k_sigaction *ka,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct sigframe *frame;
+ unsigned long retcode;
+
+ frame = (struct sigframe *)regs->ARM_sp - 1;
+
+ if (!access_ok(VERIFT_WRITE, frame, sizeof (*frame)))
+ goto segv_and_exit;
+
+ setup_sigcontext(&frame->sc, /*&frame->fpstate,*/ regs, set->sig[0]);
+
+ if (_NSIG_WORDS > 1) {
+ __copy_to_user(frame->extramask, &set->sig[1],
+ sizeof(frame->extramask));
+ }
+
+ /* Set up to return from userspace. If provided, use a stub
+ already in userspace. */
+ if (ka->sa.sa_flags & SA_RESTORER) {
+ retcode = (unsigned long)ka->sa.sa_restorer; /* security! */
+ } else {
+ retcode = (unsigned long)&frame->retcode;
+ __put_user(SWI_SYS_SIGRETURN, &frame->retcode);
+ __flush_entry_to_ram (&frame->retcode);
+ }
+
+ if (current->exec_domain && current->exec_domain->signal_invmap && sig < 32)
+ regs->ARM_r0 = current->exec_domain->signal_invmap[sig];
+ else
+ regs->ARM_r0 = sig;
+ regs->ARM_sp = (unsigned long)frame;
+ regs->ARM_lr = retcode;
+ regs->ARM_pc = (unsigned long)ka->sa.sa_handler; /* security! */
+ return;
+
+segv_and_exit:
+ lock_kernel();
+ do_exit (SIGSEGV);
+}
+
+static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct rt_sigframe *frame;
+ unsigned long retcode;
+
+ frame = (struct rt_sigframe *)regs->ARM_sp - 1;
+ if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
+ goto segv_and_exit;
+
+ __put_user(&frame->info, &frame->pinfo);
+ __put_user(&frame->uc, &frame->puc);
+ __copy_to_user(&frame->info, info, sizeof(*info));
+
+ /* Clear all the bits of the ucontext we don't use. */
+ __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));
+
+ setup_sigcontext(&frame->uc.uc_mcontext, /*&frame->fpstate,*/
+ regs, set->sig[0]);
+ __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+ /* Set up to return from userspace. If provided, use a stub
+ already in userspace. */
+ if (ka->sa.sa_flags & SA_RESTORER) {
+ retcode = (unsigned long)ka->sa.sa_restorer; /* security! */
+ } else {
+ retcode = (unsigned long)&frame->retcode;
+ __put_user(SWI_SYS_RT_SIGRETURN, &frame->retcode);
+ __flush_entry_to_ram (&frame->retcode);
+ }
+
+ if (current->exec_domain && current->exec_domain->signal_invmap && sig < 32)
+ regs->ARM_r0 = current->exec_domain->signal_invmap[sig];
+ else
+ regs->ARM_r0 = sig;
+ regs->ARM_sp = (unsigned long)frame;
+ regs->ARM_lr = retcode;
+ regs->ARM_pc = (unsigned long)ka->sa.sa_handler; /* security! */
+ return;
+
+segv_and_exit:
+ lock_kernel();
+ do_exit (SIGSEGV);
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+static void
+handle_signal(unsigned long sig, struct k_sigaction *ka,
+ siginfo_t *info, sigset_t *oldset, struct pt_regs * regs)
+{
+ /* Set up the stack frame */
+ if (ka->sa.sa_flags & SA_SIGINFO)
+ setup_rt_frame(sig, ka, info, oldset, regs);
+ else
+ setup_frame(sig, ka, oldset, regs);
+
+ if (ka->sa.sa_flags & SA_ONESHOT)
+ ka->sa.sa_handler = SIG_DFL;
+
+ if (!(ka->sa.sa_flags & SA_NODEFER)) {
+ spin_lock_irq(&current->sigmask_lock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ sigaddset(&current->blocked,sig);
+ recalc_sigpending(current);
+ spin_unlock_irq(&current->sigmask_lock);
+ }
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ *
+ * Note that we go through the signals twice: once to check the signals that
+ * the kernel can handle, and then we build all the user-level signal handling
+ * stack-frames in one go after that.
+ */
+asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs)
+{
+ unsigned long instr, *pc = (unsigned long *)(instruction_pointer(regs)-4);
+ struct k_sigaction *ka;
+ siginfo_t info;
+ int single_stepping, swi_instr;
+
+ if (!oldset)
+ oldset = &current->blocked;
+
+ single_stepping = ptrace_cancel_bpt (current);
+ swi_instr = (!get_user (instr, pc) && (instr & 0x0f000000) == 0x0f000000);
+
+ for (;;) {
+ unsigned long signr;
+
+ spin_lock_irq (&current->sigmask_lock);
+ signr = dequeue_signal(&current->blocked, &info);
+ spin_unlock_irq (&current->sigmask_lock);
+
+ if (!signr)
+ break;
+
+ if ((current->flags & PF_PTRACED) && signr != SIGKILL) {
+ /* Let the debugger run. */
+ current->exit_code = signr;
+ current->state = TASK_STOPPED;
+ notify_parent(current, SIGCHLD);
+ schedule();
+ single_stepping |= ptrace_cancel_bpt (current);
+
+ /* We're back. Did the debugger cancel the sig? */
+ if (!(signr = current->exit_code))
+ continue;
+ current->exit_code = 0;
+
+ /* The debugger continued. Ignore SIGSTOP. */
+ if (signr == SIGSTOP)
+ continue;
+
+ /* Update the siginfo structure. Is this good? */
+ if (signr != info.si_signo) {
+ info.si_signo = signr;
+ info.si_errno = 0;
+ info.si_code = SI_USER;
+ info.si_pid = current->p_pptr->pid;
+ info.si_uid = current->p_pptr->uid;
+ }
+
+ /* If the (new) signal is now blocked, requeue it. */
+ if (sigismember(&current->blocked, signr)) {
+ send_sig_info(signr, &info, current);
+ continue;
+ }
+ }
+
+ ka = &current->sig->action[signr-1];
+ if (ka->sa.sa_handler == SIG_IGN) {
+ if (signr != SIGCHLD)
+ continue;
+ /* Check for SIGCHLD: it's special. */
+ while (sys_wait4(-1, NULL, WNOHANG, NULL) > 0)
+ /* nothing */;
+ continue;
+ }
+
+ if (ka->sa.sa_handler == SIG_DFL) {
+ int exit_code = signr;
+
+ /* Init gets no signals it doesn't want. */
+ if (current->pid == 1)
+ continue;
+
+ switch (signr) {
+ case SIGCONT: case SIGCHLD: case SIGWINCH:
+ continue;
+
+ case SIGTSTP: case SIGTTIN: case SIGTTOU:
+ if (is_orphaned_pgrp(current->pgrp))
+ continue;
+ /* FALLTHRU */
+
+ case SIGSTOP:
+ current->state = TASK_STOPPED;
+ current->exit_code = signr;
+ if (!(current->p_pptr->sig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
+ notify_parent(current, SIGCHLD);
+ schedule();
+ continue;
+
+ case SIGQUIT: case SIGILL: case SIGTRAP:
+ case SIGABRT: case SIGFPE: case SIGSEGV:
+ lock_kernel();
+ if (current->binfmt
+ && current->binfmt->core_dump
+ && current->binfmt->core_dump(signr, regs))
+ exit_code |= 0x80;
+ unlock_kernel();
+ /* FALLTHRU */
+
+ default:
+ lock_kernel();
+ sigaddset(&current->signal, signr);
+ current->flags |= PF_SIGNALED;
+ do_exit(exit_code);
+ /* NOTREACHED */
+ }
+ }
+
+ /* Are we from a system call? */
+ if (swi_instr) {
+ switch (regs->ARM_r0) {
+ case -ERESTARTNOHAND:
+ regs->ARM_r0 = -EINTR;
+ break;
+
+ case -ERESTARTSYS:
+ if (!(ka->sa.sa_flags & SA_RESTART)) {
+ regs->ARM_r0 = -EINTR;
+ break;
+ }
+ /* fallthrough */
+ case -ERESTARTNOINTR:
+ regs->ARM_r0 = regs->ARM_ORIG_r0;
+ regs->ARM_pc -= 4;
+ }
+ }
+ /* Whee! Actually deliver the signal. */
+ handle_signal(signr, ka, &info, oldset, regs);
+ if (single_stepping)
+ ptrace_set_bpt (current);
+ return 1;
+ }
+
+ if (swi_instr &&
+ (regs->ARM_r0 == -ERESTARTNOHAND ||
+ regs->ARM_r0 == -ERESTARTSYS ||
+ regs->ARM_r0 == -ERESTARTNOINTR)) {
+ regs->ARM_r0 = regs->ARM_ORIG_r0;
+ regs->ARM_pc -= 4;
+ }
+ if (single_stepping)
+ ptrace_set_bpt (current);
+ return 0;
+}
diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c
new file mode 100644
index 000000000..ab514903d
--- /dev/null
+++ b/arch/arm/kernel/sys_arm.c
@@ -0,0 +1,372 @@
+/*
+ * linux/arch/arm/kernel/sys_arm.c
+ *
+ * Copyright (C) People who wrote linux/arch/i386/kernel/sys_i386.c
+ * Copyright (C) 1995, 1996 Russell King.
+ *
+ * This file contains various random system calls that
+ * have a non-standard calling sequence on the Linux/arm
+ * platform.
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/stat.h>
+#include <linux/mman.h>
+#include <linux/file.h>
+#include <linux/utsname.h>
+
+#include <asm/uaccess.h>
+#include <asm/ipc.h>
+
+/*
+ * Constant strings used in inlined functions in header files
+ */
+/* proc/system.h */
+const char xchg_str[] = "xchg";
+/* arch/dma.h */
+const char dma_str[] = "%s: dma %d not supported\n";
+
+/*
+ * sys_pipe() is the normal C calling standard for creating
+ * a pipe. It's not the way unix traditionally does this, though.
+ */
+asmlinkage int sys_pipe(unsigned long * fildes)
+{
+ int fd[2];
+ int error;
+
+ lock_kernel();
+ error = do_pipe(fd);
+ unlock_kernel();
+ if (!error) {
+ if (copy_to_user(fildes, fd, 2*sizeof(int)))
+ error = -EFAULT;
+ }
+ return error;
+}
+
+/*
+ * Perform the select(nd, in, out, ex, tv) and mmap() system
+ * calls. ARM Linux didn't use to be able to handle more than
+ * 4 system call parameters, so these system calls used a memory
+ * block for parameter passing..
+ */
+
+struct mmap_arg_struct {
+ unsigned long addr;
+ unsigned long len;
+ unsigned long prot;
+ unsigned long flags;
+ unsigned long fd;
+ unsigned long offset;
+};
+
+asmlinkage int old_mmap(struct mmap_arg_struct *arg)
+{
+ int error = -EFAULT;
+ struct file * file = NULL;
+ struct mmap_arg_struct a;
+
+ lock_kernel();
+ if (copy_from_user(&a, arg, sizeof(a)))
+ goto out;
+ if (!(a.flags & MAP_ANONYMOUS)) {
+ error = -EBADF;
+ if (a.fd >= NR_OPEN || !(file = current->files->fd[a.fd]))
+ goto out;
+ }
+ a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ error = do_mmap(file, a.addr, a.len, a.prot, a.flags, a.offset);
+out:
+ unlock_kernel();
+ return error;
+}
+
+
+extern asmlinkage int sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *);
+
+struct sel_arg_struct {
+ unsigned long n;
+ fd_set *inp, *outp, *exp;
+ struct timeval *tvp;
+};
+
+asmlinkage int old_select(struct sel_arg_struct *arg)
+{
+ struct sel_arg_struct a;
+
+ if (copy_from_user(&a, arg, sizeof(a)))
+ return -EFAULT;
+ /* sys_select() does the appropriate kernel locking */
+ return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
+}
+
+/*
+ * sys_ipc() is the de-multiplexer for the SysV IPC calls..
+ *
+ * This is really horribly ugly.
+ */
+asmlinkage int sys_ipc (uint call, int first, int second, int third, void *ptr, long fifth)
+{
+ int version, ret;
+
+ lock_kernel();
+ version = call >> 16; /* hack for backward compatibility */
+ call &= 0xffff;
+
+ if (call <= SEMCTL)
+ switch (call) {
+ case SEMOP:
+ ret = sys_semop (first, (struct sembuf *)ptr, second);
+ goto out;
+ case SEMGET:
+ ret = sys_semget (first, second, third);
+ goto out;
+ case SEMCTL: {
+ union semun fourth;
+ ret = -EINVAL;
+ if (!ptr)
+ goto out;
+ ret = -EFAULT;
+ if (get_user(fourth.__pad, (void **) ptr))
+ goto out;
+ ret = sys_semctl (first, second, third, fourth);
+ goto out;
+ }
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+ if (call <= MSGCTL)
+ switch (call) {
+ case MSGSND:
+ ret = sys_msgsnd (first, (struct msgbuf *) ptr,
+ second, third);
+ goto out;
+ case MSGRCV:
+ switch (version) {
+ case 0: {
+ struct ipc_kludge tmp;
+ ret = -EINVAL;
+ if (!ptr)
+ goto out;
+ ret = -EFAULT;
+ if (copy_from_user(&tmp,(struct ipc_kludge *) ptr,
+ sizeof (tmp)))
+ goto out;
+ ret = sys_msgrcv (first, tmp.msgp, second, tmp.msgtyp, third);
+ goto out;
+ }
+ case 1: default:
+ ret = sys_msgrcv (first, (struct msgbuf *) ptr, second, fifth, third);
+ goto out;
+ }
+ case MSGGET:
+ ret = sys_msgget ((key_t) first, second);
+ goto out;
+ case MSGCTL:
+ ret = sys_msgctl (first, second, (struct msqid_ds *) ptr);
+ goto out;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+ if (call <= SHMCTL)
+ switch (call) {
+ case SHMAT:
+ switch (version) {
+ case 0: default: {
+ ulong raddr;
+ ret = sys_shmat (first, (char *) ptr, second, &raddr);
+ if (ret)
+ goto out;
+ ret = put_user (raddr, (ulong *) third);
+ goto out;
+ }
+ case 1: /* iBCS2 emulator entry point */
+ ret = -EINVAL;
+ if (!segment_eq(get_fs(), get_ds()))
+ goto out;
+ ret = sys_shmat (first, (char *) ptr, second, (ulong *) third);
+ goto out;
+ }
+ case SHMDT:
+ ret = sys_shmdt ((char *)ptr);
+ goto out;
+ case SHMGET:
+ ret = sys_shmget (first, second, third);
+ goto out;
+ case SHMCTL:
+ ret = sys_shmctl (first, second, (struct shmid_ds *) ptr);
+ goto out;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+ else
+ ret = -EINVAL;
+out:
+ unlock_kernel();
+ return ret;
+}
+
+/* Fork a new task - this creates a new program thread.
+ * This is called indirectly via a small wrapper
+ */
+asmlinkage int sys_fork(struct pt_regs *regs)
+{
+ int ret;
+
+ lock_kernel();
+ ret = do_fork(SIGCHLD, regs->ARM_sp, regs);
+ unlock_kernel();
+
+ return ret;
+}
+
+/* Clone a task - this clones the calling program thread.
+ * This is called indirectly via a small wrapper
+ */
+asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp, struct pt_regs *regs)
+{
+ int ret;
+
+ lock_kernel();
+ if (!newsp)
+ newsp = regs->ARM_sp;
+
+ ret = do_fork(clone_flags, newsp, regs);
+ unlock_kernel();
+ return ret;
+}
+
+/* sys_execve() executes a new program.
+ * This is called indirectly via a small wrapper
+ */
+asmlinkage int sys_execve(char *filenamei, char **argv, char **envp, struct pt_regs *regs)
+{
+ int error;
+ char * filename;
+
+ lock_kernel();
+ filename = getname(filenamei);
+ error = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ goto out;
+ error = do_execve(filename, argv, envp, regs);
+ putname(filename);
+out:
+ unlock_kernel();
+ return error;
+}
+
+/*
+ * Detect the old function calling standard
+ */
+static inline unsigned long old_calling_standard (struct pt_regs *regs)
+{
+ unsigned long instr, *pcv = (unsigned long *)(instruction_pointer(regs) - 8);
+ return (!get_user (instr, pcv) && instr == 0xe1a0300d);
+}
+
+/* Compatability functions - we used to pass 5 parameters as r0, r1, r2, *r3, *(r3+4)
+ * We now use r0 - r4, and return an error if the old style calling standard is used.
+ * Eventually these functions will disappear.
+ */
+asmlinkage int
+sys_compat_llseek (unsigned int fd, unsigned long offset_high, unsigned long offset_low,
+ loff_t *result, unsigned int origin, struct pt_regs *regs)
+{
+ extern int sys_llseek (unsigned int, unsigned long, unsigned long, loff_t *, unsigned int);
+
+ if (old_calling_standard (regs)) {
+ printk (KERN_NOTICE "%s (%d): unsupported llseek call standard\n",
+ current->comm, current->pid);
+ return -EINVAL;
+ }
+ return sys_llseek (fd, offset_high, offset_low, result, origin);
+}
+
+asmlinkage int
+sys_compat_mount (char *devname, char *dirname, char *type, unsigned long flags, void *data,
+ struct pt_regs *regs)
+{
+ extern int sys_mount (char *, char *, char *, unsigned long, void *);
+
+ if (old_calling_standard (regs)) {
+ printk (KERN_NOTICE "%s (%d): unsupported mount call standard\n",
+ current->comm, current->pid);
+ return -EINVAL;
+ }
+ return sys_mount (devname, dirname, type, flags, data);
+}
+
+asmlinkage int sys_uname (struct old_utsname * name)
+{
+ static int warned = 0;
+
+ if (warned == 0) {
+ warned ++;
+ printk (KERN_NOTICE "%s (%d): obsolete uname call\n",
+ current->comm, current->pid);
+ }
+
+ if (name && !copy_to_user (name, &system_utsname, sizeof (*name)))
+ return 0;
+ return -EFAULT;
+}
+
+asmlinkage int sys_olduname(struct oldold_utsname * name)
+{
+ int error;
+ static int warned = 0;
+
+ if (warned == 0) {
+ warned ++;
+ printk (KERN_NOTICE "%s (%d): obsolete olduname call\n",
+ current->comm, current->pid);
+ }
+
+ if (!name)
+ return -EFAULT;
+
+ if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname)))
+ return -EFAULT;
+
+ error = __copy_to_user(&name->sysname,&system_utsname.sysname,__OLD_UTS_LEN);
+ error -= __put_user(0,name->sysname+__OLD_UTS_LEN);
+ error -= __copy_to_user(&name->nodename,&system_utsname.nodename,__OLD_UTS_LEN);
+ error -= __put_user(0,name->nodename+__OLD_UTS_LEN);
+ error -= __copy_to_user(&name->release,&system_utsname.release,__OLD_UTS_LEN);
+ error -= __put_user(0,name->release+__OLD_UTS_LEN);
+ error -= __copy_to_user(&name->version,&system_utsname.version,__OLD_UTS_LEN);
+ error -= __put_user(0,name->version+__OLD_UTS_LEN);
+ error -= __copy_to_user(&name->machine,&system_utsname.machine,__OLD_UTS_LEN);
+ error -= __put_user(0,name->machine+__OLD_UTS_LEN);
+ error = error ? -EFAULT : 0;
+
+ return error;
+}
+
+asmlinkage int sys_pause(void)
+{
+ static int warned = 0;
+
+ if (warned == 0) {
+ warned ++;
+ printk (KERN_NOTICE "%s (%d): obsolete pause call\n",
+ current->comm, current->pid);
+ }
+
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ return -ERESTARTNOHAND;
+}
+
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
new file mode 100644
index 000000000..b1c679ec5
--- /dev/null
+++ b/arch/arm/kernel/time.c
@@ -0,0 +1,154 @@
+/*
+ * linux/arch/arm/kernel/time.c
+ *
+ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
+ * Modifications for ARM (C) 1994, 1995, 1996,1997 Russell King
+ *
+ * This file contains the ARM-specific time handling details:
+ * reading the RTC at bootup, etc...
+ *
+ * 1994-07-02 Alan Modra
+ * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
+ * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
+ * "A Kernel Model for Precision Timekeeping" by Dave Mills
+ */
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/delay.h>
+
+#include <linux/timex.h>
+#include <asm/irq-no.h>
+#include <asm/hardware.h>
+
+extern int setup_arm_irq(int, struct irqaction *);
+extern volatile unsigned long lost_ticks;
+
+/* change this if you have some constant time drift */
+#define USECS_PER_JIFFY (1000000/HZ)
+
+#ifndef BCD_TO_BIN
+#define BCD_TO_BIN(val) ((val)=((val)&15) + ((val)>>4)*10)
+#endif
+
+#ifndef BIN_TO_BCD
+#define BIN_TO_BCD(val) ((val)=(((val)/10)<<4) + (val)%10)
+#endif
+
+/* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
+ * Assumes input in normal date format, i.e. 1980-12-31 23:59:59
+ * => year=1980, mon=12, day=31, hour=23, min=59, sec=59.
+ *
+ * [For the Julian calendar (which was used in Russia before 1917,
+ * Britain & colonies before 1752, anywhere else before 1582,
+ * and is still in use by some communities) leave out the
+ * -year/100+year/400 terms, and add 10.]
+ *
+ * This algorithm was first published by Gauss (I think).
+ *
+ * WARNING: this function will overflow on 2106-02-07 06:28:16 on
+ * machines were long is 32-bit! (However, as time_t is signed, we
+ * will already get problems at other places on 2038-01-19 03:14:08)
+ */
+static inline unsigned long mktime(unsigned int year, unsigned int mon,
+ unsigned int day, unsigned int hour,
+ unsigned int min, unsigned int sec)
+{
+ if (0 >= (int) (mon -= 2)) { /* 1..12 -> 11,12,1..10 */
+ mon += 12; /* Puts Feb last since it has leap day */
+ year -= 1;
+ }
+ return (((
+ (unsigned long)(year/4 - year/100 + year/400 + 367*mon/12 + day) +
+ year*365 - 719499
+ )*24 + hour /* now have hours */
+ )*60 + min /* now have minutes */
+ )*60 + sec; /* finally seconds */
+}
+
+#include <asm/arch/time.h>
+
+static unsigned long do_gettimeoffset(void)
+{
+ return gettimeoffset ();
+}
+
+void do_gettimeofday(struct timeval *tv)
+{
+ unsigned long flags;
+
+ save_flags_cli (flags);
+ *tv = xtime;
+ tv->tv_usec += do_gettimeoffset();
+
+ /*
+ * xtime is atomically updated in timer_bh. lost_ticks is
+ * nonzero if the tiemr bottom half hasnt executed yet.
+ */
+ if (lost_ticks)
+ tv->tv_usec += USECS_PER_JIFFY;
+
+ restore_flags(flags);
+
+ if (tv->tv_usec >= 1000000) {
+ tv->tv_usec -= 1000000;
+ tv->tv_sec++;
+ }
+}
+
+void do_settimeofday(struct timeval *tv)
+{
+ cli ();
+ /* This is revolting. We need to set the xtime.tv_usec
+ * correctly. However, the value in this location is
+ * is value at the last tick.
+ * Discover what correction gettimeofday
+ * would have done, and then undo it!
+ */
+ tv->tv_usec -= do_gettimeoffset();
+
+ if (tv->tv_usec < 0) {
+ tv->tv_usec += 1000000;
+ tv->tv_sec--;
+ }
+
+ xtime = *tv;
+ time_state = TIME_BAD;
+ time_maxerror = MAXPHASE;
+ time_esterror = MAXPHASE;
+ sti ();
+}
+
+/*
+ * timer_interrupt() needs to keep up the real-time clock,
+ * as well as call the "do_timer()" routine every clocktick.
+ */
+static void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ if (reset_timer ())
+ do_timer(regs);
+
+ update_rtc ();
+}
+
+static struct irqaction irqtimer0 = { timer_interrupt, 0, 0, "timer", NULL, NULL};
+
+void time_init(void)
+{
+ xtime.tv_sec = setup_timer();
+ xtime.tv_usec = 0;
+
+ setup_arm_irq(IRQ_TIMER0, &irqtimer0);
+}
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
new file mode 100644
index 000000000..7ff7436c5
--- /dev/null
+++ b/arch/arm/kernel/traps.c
@@ -0,0 +1,306 @@
+/*
+ * linux/arch/arm/kernel/traps.c
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ * Fragments that appear the same as linux/arch/i386/kernel/traps.c (C) Linus Torvalds
+ */
+
+/*
+ * 'traps.c' handles hardware exceptions after we have saved some state in
+ * 'linux/arch/arm/lib/traps.S'. Mostly a debugging aid, but will probably
+ * kill the offending process.
+ */
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/spinlock.h>
+#include <asm/atomic.h>
+#include <asm/pgtable.h>
+
+extern void fpe_save(struct fp_soft_struct *);
+extern void fpe_restore(struct fp_soft_struct *);
+extern void die_if_kernel(char *str, struct pt_regs *regs, int err, int ret);
+extern void c_backtrace (unsigned long fp, int pmode);
+extern int ptrace_cancel_bpt (struct task_struct *);
+
+char *processor_modes[]=
+{ "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" ,
+ "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26",
+ "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "UK6_32" , "ABT_32" ,
+ "UK8_32" , "UK9_32" , "UK10_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32"
+};
+
+static char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
+
+static inline void console_verbose(void)
+{
+ extern int console_loglevel;
+ console_loglevel = 15;
+}
+
+int kstack_depth_to_print = 200;
+
+static int verify_stack_pointer (unsigned long stackptr, int size)
+{
+#if defined(CONFIG_CPU_ARM2) || defined(CONFIG_CPU_ARM3)
+ if (stackptr < 0x02048000 || stackptr + size > 0x03000000)
+ return -EFAULT;
+#else
+ if (stackptr < 0xc0000000 || stackptr + size > (unsigned long)high_memory)
+ return -EFAULT;
+#endif
+ return 0;
+}
+
+static void dump_stack (unsigned long *start, unsigned long *end, int offset, int max)
+{
+ unsigned long *p;
+ int i;
+
+ for (p = start + offset, i = 0; i < max && p < end; i++, p++) {
+ if (i && (i & 7) == 0)
+ printk ("\n ");
+ printk ("%08lx ", *p);
+ }
+ printk ("\n");
+}
+
+/*
+ * These constants are for searching for possible module text
+ * segments. VMALLOC_OFFSET comes from mm/vmalloc.c; MODULE_RANGE is
+ * a guess of how much space is likely to be vmalloced.
+ */
+#define VMALLOC_OFFSET (8*1024*1024)
+#define MODULE_RANGE (8*1024*1024)
+
+static void dump_instr (unsigned long pc)
+{
+ unsigned long module_start, module_end;
+ int pmin = -2, pmax = 3, ok = 0;
+ extern char start_kernel, _etext;
+
+ module_start = VMALLOC_START;
+ module_end = module_start + MODULE_RANGE;
+
+ if ((pc >= (unsigned long) &start_kernel) &&
+ (pc <= (unsigned long) &_etext)) {
+ if (pc + pmin < (unsigned long) &start_kernel)
+ pmin = ((unsigned long) &start_kernel) - pc;
+ if (pc + pmax > (unsigned long) &_etext)
+ pmax = ((unsigned long) &_etext) - pc;
+ ok = 1;
+ } else if (pc >= module_start && pc <= module_end) {
+ if (pc + pmin < module_start)
+ pmin = module_start - pc;
+ if (pc + pmax > module_end)
+ pmax = module_end - pc;
+ ok = 1;
+ }
+ printk ("Code: ");
+ if (ok) {
+ int i;
+ for (i = pmin; i < pmax; i++)
+ printk("%08lx ", ((unsigned long *)pc)[i]);
+ printk ("\n");
+ } else
+ printk ("pc not in code space\n");
+}
+
+/*
+ * This function is protected against kernel-mode re-entrancy. If it
+ * is re-entered it will hang the system since we can't guarantee in
+ * this case that any of the functions that it calls are safe any more.
+ * Even the panic function could be a problem, but we'll give it a go.
+ */
+void die_if_kernel(char *str, struct pt_regs *regs, int err, int ret)
+{
+ static int died = 0;
+ unsigned long cstack, sstack, frameptr;
+
+ if (user_mode(regs))
+ return;
+
+ switch (died) {
+ case 2:
+ while (1);
+ case 1:
+ died ++;
+ panic ("die_if_kernel re-entered. Major kernel corruption. Please reboot me!");
+ break;
+ case 0:
+ died ++;
+ break;
+ }
+
+ console_verbose ();
+ printk ("Internal error: %s: %x\n", str, err);
+ printk ("CPU: %d", smp_processor_id());
+ show_regs (regs);
+ printk ("Process %s (pid: %d, stackpage=%08lx)\nStack: ",
+ current->comm, current->pid, 4096+(unsigned long)current);
+
+ cstack = (unsigned long)(regs + 1);
+ sstack = 4096+(unsigned long)current;
+
+ if (*(unsigned long *)sstack != STACK_MAGIC)
+ printk ("*** corrupted stack page\n ");
+
+ if (verify_stack_pointer (cstack, 4))
+ printk ("%08lx invalid kernel stack pointer\n", cstack);
+ else if(cstack > sstack + 4096)
+ printk("(sp overflow)\n");
+ else if(cstack < sstack)
+ printk("(sp underflow)\n");
+ else
+ dump_stack ((unsigned long *)sstack, (unsigned long *)sstack + 1024,
+ cstack - sstack, kstack_depth_to_print);
+
+ frameptr = regs->ARM_fp;
+ if (frameptr) {
+ if (verify_stack_pointer (frameptr, 4))
+ printk ("Backtrace: invalid frame pointer\n");
+ else {
+ printk("Backtrace: \n");
+ c_backtrace (frameptr, processor_mode(regs));
+ }
+ }
+
+ dump_instr (instruction_pointer(regs));
+ died = 0;
+ if (ret != -1)
+ do_exit (ret);
+ else {
+ cli ();
+ while (1);
+ }
+}
+
+void bad_user_access_alignment (const void *ptr)
+{
+ void *pc;
+ __asm__("mov %0, lr\n": "=r" (pc));
+ printk (KERN_ERR "bad_user_access_alignment called: ptr = %p, pc = %p\n", ptr, pc);
+ current->tss.error_code = 0;
+ current->tss.trap_no = 11;
+ force_sig (SIGBUS, current);
+/* die_if_kernel("Oops - bad user access alignment", regs, mode, SIGBUS);*/
+}
+
+asmlinkage void do_undefinstr (int address, struct pt_regs *regs, int mode)
+{
+ current->tss.error_code = 0;
+ current->tss.trap_no = 6;
+ force_sig (SIGILL, current);
+ die_if_kernel("Oops - undefined instruction", regs, mode, SIGILL);
+}
+
+asmlinkage void do_excpt (int address, struct pt_regs *regs, int mode)
+{
+ current->tss.error_code = 0;
+ current->tss.trap_no = 11;
+ force_sig (SIGBUS, current);
+ die_if_kernel("Oops - address exception", regs, mode, SIGBUS);
+}
+
+asmlinkage void do_unexp_fiq (struct pt_regs *regs)
+{
+#ifndef CONFIG_IGNORE_FIQ
+ printk ("Hmm. Unexpected FIQ received, but trying to continue\n");
+ printk ("You may have a hardware problem...\n");
+#endif
+}
+
+asmlinkage void bad_mode(struct pt_regs *regs, int reason, int proc_mode)
+{
+ printk (KERN_CRIT "Bad mode in %s handler detected: mode %s\n",
+ handler[reason],
+ processor_modes[proc_mode]);
+ die_if_kernel ("Oops", regs, 0, -1);
+}
+
+/*
+ * 'math_state_restore()' saves the current math information in the
+ * old math state array, and gets the new ones from the current task.
+ *
+ * We no longer save/restore the math state on every context switch
+ * any more. We only do this now if it actually gets used.
+ */
+asmlinkage void math_state_restore (void)
+{
+ if (last_task_used_math == current)
+ return;
+ if (last_task_used_math)
+ /*
+ * Save current fp state into last_task_used_math->tss.fpe_save
+ */
+ fpe_save (&last_task_used_math->tss.fpstate.soft);
+ last_task_used_math = current;
+ if (current->used_math) {
+ /*
+ * Restore current fp state from current->tss.fpe_save
+ */
+ fpe_restore (&current->tss.fpstate.soft);
+ } else {
+ /*
+ * initialise fp state
+ */
+ fpe_restore (&init_task.tss.fpstate.soft);
+ current->used_math = 1;
+ }
+}
+
+asmlinkage void arm_syscall (int no, struct pt_regs *regs)
+{
+ switch (no) {
+ case 0: /* branch through 0 */
+ printk ("[%d] %s: branch through zero\n", current->pid, current->comm);
+ force_sig (SIGILL, current);
+ if (user_mode(regs)) {
+ show_regs (regs);
+ c_backtrace (regs->ARM_fp, processor_mode(regs));
+ }
+ die_if_kernel ("Oops", regs, 0, SIGILL);
+ break;
+
+ case 1: /* SWI_BREAK_POINT */
+ regs->ARM_pc -= 4; /* Decrement PC by one instruction */
+ ptrace_cancel_bpt (current);
+ force_sig (SIGTRAP, current);
+ break;
+
+ default:
+ printk ("[%d] %s: arm syscall %d\n", current->pid, current->comm, no);
+ force_sig (SIGILL, current);
+ if (user_mode(regs)) {
+ show_regs (regs);
+ c_backtrace (regs->ARM_fp, processor_mode(regs));
+ }
+ die_if_kernel ("Oops", regs, no, SIGILL);
+ break;
+ }
+}
+
+asmlinkage void deferred(int n, struct pt_regs *regs)
+{
+ printk ("[%d] %s: old system call %X\n", current->pid, current->comm, n);
+ show_regs (regs);
+ force_sig (SIGILL, current);
+}
+
+asmlinkage void arm_malalignedptr(const char *str, void *pc, volatile void *ptr)
+{
+ printk ("Mal-aligned pointer in %s: %p (PC=%p)\n", str, ptr, pc);
+}
+
+asmlinkage void arm_invalidptr (const char *function, int size)
+{
+ printk ("Invalid pointer size in %s (PC=%p) size %d\n",
+ function, __builtin_return_address(0), size);
+}
diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
new file mode 100644
index 000000000..10fad6b43
--- /dev/null
+++ b/arch/arm/lib/Makefile
@@ -0,0 +1,55 @@
+#
+# linux/arch/arm/lib/Makefile
+#
+# Copyright (C) 1995-1998 Russell King
+#
+
+L_TARGET := lib.a
+L_OBJS := backtrace.o bitops.o delay.o fp_support.o \
+ loaders.o memcpy.o memfastset.o system.o string.o uaccess.o
+
+ifeq ($(PROCESSOR),armo)
+ L_OBJS += uaccess-armo.o
+endif
+
+ifdef CONFIG_INET
+ L_OBJS += checksum.o
+endif
+
+ifdef CONFIG_ARCH_ACORN
+ L_OBJS += ll_char_wr.o io-acorn.o
+ ifdef CONFIG_ARCH_A5K
+ L_OBJS += floppydma.o
+ endif
+ ifdef CONFIG_ARCH_RPC
+ L_OBJS += floppydma.o
+ endif
+endif
+
+ifdef CONFIG_ARCH_EBSA110
+ L_OBJS += io-ebsa110.o
+endif
+
+include $(TOPDIR)/Rules.make
+
+constants.h: getconstants
+ ./getconstants > constants.h
+
+getconstants: getconstants.c getconstants.h
+ $(HOSTCC) -D__KERNEL__ -o getconstants getconstants.c
+
+getconstants.h: getconsdata.c
+ $(CC) $(CFLAGS) -c getconsdata.c
+ $(PERL) extractinfo.perl $(OBJDUMP) > $@
+
+%.o: %.S
+ifndef $(CONFIG_BINUTILS_NEW)
+ $(CC) $(CFLAGS) -D__ASSEMBLY__ -E $< | tr ';$$' '\n#' > ..tmp.$<.s
+ $(CC) $(CFLAGS:-pipe=) -c -o $@ ..tmp.$<.s
+ $(RM) ..tmp.$<.s
+else
+ $(CC) $(CFLAGS) -D__ASSEMBLY__ -c -o $@ $<
+endif
+
+clean:
+ $(RM) getconstants constants.h getconstants.h
diff --git a/arch/arm/lib/backtrace.S b/arch/arm/lib/backtrace.S
new file mode 100644
index 000000000..d48055b70
--- /dev/null
+++ b/arch/arm/lib/backtrace.S
@@ -0,0 +1,100 @@
+/*
+ * linux/arch/arm/lib/backtrace.S
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+ .text
+
+@ fp is 0 or stack frame
+
+#define frame r4
+#define next r5
+#define save r6
+#define mask r7
+#define offset r8
+
+ENTRY(__backtrace)
+ mov r1, #0x10
+ mov r0, fp
+
+ENTRY(c_backtrace)
+ stmfd sp!, {r4 - r8, lr} @ Save an extra register so we have a location...
+ tst r1, #0x10 @ 26 or 32-bit?
+ moveq mask, #0xfc000003
+ movne mask, #0
+ tst mask, r0
+ movne r0, #0
+ movs frame, r0
+1: moveq r0, #-2
+ LOADREGS(eqfd, sp!, {r4 - r8, pc})
+
+2: stmfd sp!, {pc} @ calculate offset of PC in STMIA instruction
+ ldr r0, [sp], #4
+ adr r1, 2b - 4
+ sub offset, r0, r1
+
+3: tst frame, mask @ Check for address exceptions...
+ bne 1b
+
+ ldmda frame, {r0, r1, r2, r3} @ fp, sp, lr, pc
+ mov next, r0
+
+ sub save, r3, offset @ Correct PC for prefetching
+ bic save, save, mask
+ adr r0, .Lfe
+ mov r1, save
+ bic r2, r2, mask
+ bl SYMBOL_NAME(printk)
+
+ sub r0, frame, #16
+ ldr r1, [save, #4]
+ mov r3, r1, lsr #10
+ ldr r2, .Ldsi+4
+ teq r3, r2 @ Check for stmia sp!, {args}
+ addeq save, save, #4 @ next instruction
+ bleq .Ldumpstm
+
+ ldr r1, [save, #4] @ Get 'stmia sp!, {rlist, fp, ip, lr, pc}' instruction
+ mov r3, r1, lsr #10
+ ldr r2, .Ldsi
+ teq r3, r2
+ bleq .Ldumpstm
+
+ teq frame, next
+ movne frame, next
+ teqne frame, #0
+ bne 3b
+ LOADREGS(fd, sp!, {r4 - r8, pc})
+
+
+#define instr r4
+#define reg r5
+#define stack r6
+
+.Ldumpstm: stmfd sp!, {instr, reg, stack, lr}
+ mov stack, r0
+ mov instr, r1
+ mov reg, #9
+
+1: mov r3, #1
+ tst instr, r3, lsl reg
+ beq 2f
+ ldr r2, [stack], #-4
+ mov r1, reg
+ adr r0, .Lfp
+ bl SYMBOL_NAME(printk)
+2: subs reg, reg, #1
+ bpl 1b
+
+ mov r0, stack
+ LOADREGS(fd, sp!, {instr, reg, stack, pc})
+
+.Lfe: .ascii "Function entered at [<%p>] from [<%p>]\n"
+ .byte 0
+.Lfp: .ascii " r%d = %p\n"
+ .byte 0
+ .align
+.Ldsi: .word 0x00e92dd8 >> 2
+ .word 0x00e92d00 >> 2
diff --git a/arch/arm/lib/bitops.S b/arch/arm/lib/bitops.S
new file mode 100644
index 000000000..4c1f4b0aa
--- /dev/null
+++ b/arch/arm/lib/bitops.S
@@ -0,0 +1,152 @@
+/*
+ * linux/arch/arm/lib/bitops.S
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+ .text
+
+@ Purpose : Function to set a bit
+@ Prototype: int set_bit(int bit,int *addr)
+
+ENTRY(set_bit)
+ and r2, r0, #7
+ mov r3, #1
+ mov r3, r3, lsl r2
+ SAVEIRQS(ip)
+ DISABLEIRQS(ip)
+ ldrb r2, [r1, r0, lsr #3]
+ orr r2, r2, r3
+ strb r2, [r1, r0, lsr #3]
+ RESTOREIRQS(ip)
+ RETINSTR(mov,pc,lr)
+
+ENTRY(test_and_set_bit)
+ add r1, r1, r0, lsr #3 @ Get byte offset
+ and r3, r0, #7 @ Get bit offset
+ mov r0, #1
+ SAVEIRQS(ip)
+ DISABLEIRQS(ip)
+ ldrb r2, [r1]
+ tst r2, r0, lsl r3
+ orr r2, r2, r0, lsl r3
+ moveq r0, #0
+ strb r2, [r1]
+ RESTOREIRQS(ip)
+ RETINSTR(mov,pc,lr)
+
+@ Purpose : Function to clear a bit
+@ Prototype: int clear_bit(int bit,int *addr)
+
+ENTRY(clear_bit)
+ and r2, r0, #7
+ mov r3, #1
+ mov r3, r3, lsl r2
+ SAVEIRQS(ip)
+ DISABLEIRQS(ip)
+ ldrb r2, [r1, r0, lsr #3]
+ bic r2, r2, r3
+ strb r2, [r1, r0, lsr #3]
+ RESTOREIRQS(ip)
+ RETINSTR(mov,pc,lr)
+
+ENTRY(test_and_clear_bit)
+ add r1, r1, r0, lsr #3 @ Get byte offset
+ and r3, r0, #7 @ Get bit offset
+ mov r0, #1
+ SAVEIRQS(ip)
+ DISABLEIRQS(ip)
+ ldrb r2, [r1]
+ tst r2, r0, lsl r3
+ bic r2, r2, r0, lsl r3
+ moveq r0, #0
+ strb r2, [r1]
+ RESTOREIRQS(ip)
+ RETINSTR(mov,pc,lr)
+
+/* Purpose : Function to change a bit
+ * Prototype: int change_bit(int bit,int *addr)
+ */
+ENTRY(change_bit)
+ and r2, r0, #7
+ mov r3, #1
+ mov r3, r3, lsl r2
+ SAVEIRQS(ip)
+ DISABLEIRQS(ip)
+ ldrb r2, [r1, r0, lsr #3]
+ eor r2, r2, r3
+ strb r2, [r1, r0, lsr #3]
+ RESTOREIRQS(ip)
+ RETINSTR(mov,pc,lr)
+
+ENTRY(test_and_change_bit)
+ add r1, r1, r0, lsr #3
+ and r3, r0, #7
+ mov r0, #1
+ SAVEIRQS(ip)
+ DISABLEIRQS(ip)
+ ldrb r2, [r1]
+ tst r2, r0, lsl r3
+ eor r2, r2, r0, lsl r3
+ moveq r0, #0
+ strb r2, [r1]
+ RESTOREIRQS(ip)
+ RETINSTR(mov,pc,lr)
+
+@ Purpose : Find a 'zero' bit
+@ Prototype: int find_first_zero_bit(char *addr,int maxbit);
+
+ENTRY(find_first_zero_bit)
+ mov r2, #0 @ Initialise bit position
+Lfindzbit1lp: ldrb r3, [r0, r2, lsr #3] @ Check byte, if 0xFF, then all bits set
+ teq r3, #0xFF
+ bne Lfoundzbit
+ add r2, r2, #8
+ cmp r2, r1 @ Check to see if we have come to the end
+ bcc Lfindzbit1lp
+ add r0, r1, #1 @ Make sure that we flag an error
+ RETINSTR(mov,pc,lr)
+Lfoundzbit: tst r3, #1 @ Check individual bits
+ moveq r0, r2
+ RETINSTR(moveq,pc,lr)
+ tst r3, #2
+ addeq r0, r2, #1
+ RETINSTR(moveq,pc,lr)
+ tst r3, #4
+ addeq r0, r2, #2
+ RETINSTR(moveq,pc,lr)
+ tst r3, #8
+ addeq r0, r2, #3
+ RETINSTR(moveq,pc,lr)
+ tst r3, #16
+ addeq r0, r2, #4
+ RETINSTR(moveq,pc,lr)
+ tst r3, #32
+ addeq r0, r2, #5
+ RETINSTR(moveq,pc,lr)
+ tst r3, #64
+ addeq r0, r2, #6
+ RETINSTR(moveq,pc,lr)
+ add r0, r2, #7
+ RETINSTR(mov,pc,lr)
+
+@ Purpose : Find next 'zero' bit
+@ Prototype: int find_next_zero_bit(char *addr,int maxbit,int offset)
+
+ENTRY(find_next_zero_bit)
+ tst r2, #7
+ beq Lfindzbit1lp @ If new byte, goto old routine
+ ldrb r3, [r0, r2, lsr#3]
+ orr r3, r3, #0xFF00 @ Set top bits so we wont get confused
+ stmfd sp!, {r4}
+ and r4, r2, #7
+ mov r3, r3, lsr r4 @ Shift right by no. of bits
+ ldmfd sp!, {r4}
+ and r3, r3, #0xFF
+ teq r3, #0xFF
+ orreq r2, r2, #7
+ addeq r2, r2, #1
+ beq Lfindzbit1lp @ If all bits are set, goto old routine
+ b Lfoundzbit
diff --git a/arch/arm/lib/checksum.S b/arch/arm/lib/checksum.S
new file mode 100644
index 000000000..f273f960d
--- /dev/null
+++ b/arch/arm/lib/checksum.S
@@ -0,0 +1,600 @@
+/*
+ * linux/arch/arm/lib/iputils.S
+ *
+ * Copyright (C) 1995, 1996, 1997, 1998 Russell King
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/errno.h>
+
+ .text
+
+/* Function: __u32 csum_partial(const char *src, int len, __u32)
+ * Params : r0 = buffer, r1 = len, r2 = checksum
+ * Returns : r0 = new checksum
+ */
+
+ENTRY(csum_partial)
+ tst r0, #2
+ beq 1f
+ subs r1, r1, #2
+ addmi r1, r1, #2
+ bmi 3f
+ bic r0, r0, #3
+ ldr r3, [r0], #4
+ adds r2, r2, r3, lsr #16
+ adcs r2, r2, #0
+1: adds r2, r2, #0
+ bics ip, r1, #31
+ beq 3f
+ stmfd sp!, {r4 - r6}
+2: ldmia r0!, {r3 - r6}
+ adcs r2, r2, r3
+ adcs r2, r2, r4
+ adcs r2, r2, r5
+ adcs r2, r2, r6
+ ldmia r0!, {r3 - r6}
+ adcs r2, r2, r3
+ adcs r2, r2, r4
+ adcs r2, r2, r5
+ adcs r2, r2, r6
+ sub ip, ip, #32
+ teq ip, #0
+ bne 2b
+ adcs r2, r2, #0
+ ldmfd sp!, {r4 - r6}
+3: ands ip, r1, #0x1c
+ beq 5f
+4: ldr r3, [r0], #4
+ adcs r2, r2, r3
+ sub ip, ip, #4
+ teq ip, #0
+ bne 4b
+ adcs r2, r2, #0
+5: ands ip, r1, #3
+ moveq r0, r2
+ RETINSTR(moveq,pc,lr)
+ mov ip, ip, lsl #3
+ rsb ip, ip, #32
+ ldr r3, [r0]
+ mov r3, r3, lsl ip
+ adds r2, r2, r3, lsr ip
+ adc r0, r2, #0
+ RETINSTR(mov,pc,lr)
+
+/* Function: __u32 csum_partial_copy_from_user (const char *src, char *dst, int len, __u32 sum, int *err_ptr)
+ * Params : r0 = src, r1 = dst, r2 = len, r3 = sum, [sp, #0] = &err
+ * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
+ */
+
+#define USER_LDR(instr...) \
+9999: instr; \
+ .section __ex_table, "a"; \
+ .align 3; \
+ .long 9999b, 6001f; \
+ .previous;
+
+ENTRY(csum_partial_copy_from_user)
+ mov ip, sp
+ stmfd sp!, {r4 - r8, fp, ip, lr, pc}
+ sub fp, ip, #4
+ cmp r2, #4
+ blt .too_small_user
+ tst r1, #2 @ Test destination alignment
+ beq .dst_aligned_user
+ subs r2, r2, #2 @ We dont know if SRC is aligned...
+USER_LDR( ldrbt ip, [r0], #1)
+USER_LDR( ldrbt r8, [r0], #1)
+ orr ip, ip, r8, lsl #8
+ adds r3, r3, ip
+ adcs r3, r3, #0
+ strb ip, [r1], #1
+ mov ip, ip, lsr #8
+ strb ip, [r1], #1 @ Destination now aligned
+.dst_aligned_user:
+ tst r0, #3
+ bne .src_not_aligned_user
+ adds r3, r3, #0
+ bics ip, r2, #15 @ Routine for src & dst aligned
+ beq 2f
+1:
+USER_LDR( ldrt r4, [r0], #4)
+USER_LDR( ldrt r5, [r0], #4)
+USER_LDR( ldrt r6, [r0], #4)
+USER_LDR( ldrt r7, [r0], #4)
+ stmia r1!, {r4, r5, r6, r7}
+ adcs r3, r3, r4
+ adcs r3, r3, r5
+ adcs r3, r3, r6
+ adcs r3, r3, r7
+ sub ip, ip, #16
+ teq ip, #0
+ bne 1b
+2: ands ip, r2, #12
+ beq 4f
+ tst ip, #8
+ beq 3f
+USER_LDR( ldrt r4, [r0], #4)
+USER_LDR( ldrt r5, [r0], #4)
+ stmia r1!, {r4, r5}
+ adcs r3, r3, r4
+ adcs r3, r3, r5
+ tst ip, #4
+ beq 4f
+3:
+USER_LDR( ldrt r4, [r0], #4)
+ str r4, [r1], #4
+ adcs r3, r3, r4
+4: ands r2, r2, #3
+ adceq r0, r3, #0
+ LOADREGS(eqea,fp,{r4 - r8, fp, sp, pc})
+USER_LDR( ldrt r4, [r0], #4)
+ tst r2, #2
+ beq .exit
+ adcs r3, r3, r4, lsl #16
+ strb r4, [r1], #1
+ mov r4, r4, lsr #8
+ strb r4, [r1], #1
+ mov r4, r4, lsr #8
+.exit: tst r2, #1
+ strneb r4, [r1], #1
+ andne r4, r4, #255
+ adcnes r3, r3, r4
+ adcs r0, r3, #0
+ LOADREGS(ea,fp,{r4 - r8, fp, sp, pc})
+
+.too_small_user:
+ teq r2, #0
+ LOADREGS(eqea,fp,{r4 - r8, fp, sp, pc})
+ cmp r2, #2
+ blt .too_small_user1
+USER_LDR( ldrbt ip, [r0], #1)
+USER_LDR( ldrbt r8, [r0], #1)
+ orr ip, ip, r8, lsl #8
+ adds r3, r3, ip
+ strb ip, [r1], #1
+ strb r8, [r1], #1
+ tst r2, #1
+.too_small_user1:
+USER_LDR( ldrnebt ip, [r0], #1)
+ strneb ip, [r1], #1
+ adcnes r3, r3, ip
+ adcs r0, r3, #0
+ LOADREGS(ea,fp,{r4 - r8, fp, sp, pc})
+
+.src_not_aligned_user:
+ cmp r2, #4
+ blt .too_small_user
+ and ip, r0, #3
+ bic r0, r0, #3
+USER_LDR( ldrt r4, [r0], #4)
+ cmp ip, #2
+ beq .src2_aligned_user
+ bhi .src3_aligned_user
+ mov r4, r4, lsr #8
+ adds r3, r3, #0
+ bics ip, r2, #15
+ beq 2f
+1:
+USER_LDR( ldrt r5, [r0], #4)
+USER_LDR( ldrt r6, [r0], #4)
+USER_LDR( ldrt r7, [r0], #4)
+USER_LDR( ldrt r8, [r0], #4)
+ orr r4, r4, r5, lsl #24
+ mov r5, r5, lsr #8
+ orr r5, r5, r6, lsl #24
+ mov r6, r6, lsr #8
+ orr r6, r6, r7, lsl #24
+ mov r7, r7, lsr #8
+ orr r7, r7, r8, lsl #24
+ stmia r1!, {r4, r5, r6, r7}
+ adcs r3, r3, r4
+ adcs r3, r3, r5
+ adcs r3, r3, r6
+ adcs r3, r3, r7
+ mov r4, r8, lsr #8
+ sub ip, ip, #16
+ teq ip, #0
+ bne 1b
+2: ands ip, r2, #12
+ beq 4f
+ tst ip, #8
+ beq 3f
+USER_LDR( ldrt r5, [r0], #4)
+USER_LDR( ldrt r6, [r0], #4)
+ orr r4, r4, r5, lsl #24
+ mov r5, r5, lsr #8
+ orr r5, r5, r6, lsl #24
+ stmia r1!, {r4, r5}
+ adcs r3, r3, r4
+ adcs r3, r3, r5
+ mov r4, r6, lsr #8
+ tst ip, #4
+ beq 4f
+3:
+USER_LDR( ldrt r5, [r0], #4)
+ orr r4, r4, r5, lsl #24
+ str r4, [r1], #4
+ adcs r3, r3, r4
+ mov r4, r5, lsr #8
+4: ands r2, r2, #3
+ adceq r0, r3, #0
+ LOADREGS(eqea,fp,{r4 - r8, fp, sp, pc})
+ tst r2, #2
+ beq .exit
+ adcs r3, r3, r4, lsl #16
+ strb r4, [r1], #1
+ mov r4, r4, lsr #8
+ strb r4, [r1], #1
+ mov r4, r4, lsr #8
+ b .exit
+
+.src2_aligned_user:
+ mov r4, r4, lsr #16
+ adds r3, r3, #0
+ bics ip, r2, #15
+ beq 2f
+1:
+USER_LDR( ldrt r5, [r0], #4)
+USER_LDR( ldrt r6, [r0], #4)
+USER_LDR( ldrt r7, [r0], #4)
+USER_LDR( ldrt r8, [r0], #4)
+ orr r4, r4, r5, lsl #16
+ mov r5, r5, lsr #16
+ orr r5, r5, r6, lsl #16
+ mov r6, r6, lsr #16
+ orr r6, r6, r7, lsl #16
+ mov r7, r7, lsr #16
+ orr r7, r7, r8, lsl #16
+ stmia r1!, {r4, r5, r6, r7}
+ adcs r3, r3, r4
+ adcs r3, r3, r5
+ adcs r3, r3, r6
+ adcs r3, r3, r7
+ mov r4, r8, lsr #16
+ sub ip, ip, #16
+ teq ip, #0
+ bne 1b
+2: ands ip, r2, #12
+ beq 4f
+ tst ip, #8
+ beq 3f
+USER_LDR( ldrt r5, [r0], #4)
+USER_LDR( ldrt r6, [r0], #4)
+ orr r4, r4, r5, lsl #16
+ mov r5, r5, lsr #16
+ orr r5, r5, r6, lsl #16
+ stmia r1!, {r4, r5}
+ adcs r3, r3, r4
+ adcs r3, r3, r5
+ mov r4, r6, lsr #16
+ tst ip, #4
+ beq 4f
+3:
+USER_LDR( ldrt r5, [r0], #4)
+ orr r4, r4, r5, lsl #16
+ str r4, [r1], #4
+ adcs r3, r3, r4
+ mov r4, r5, lsr #16
+4: ands r2, r2, #3
+ adceq r0, r3, #0
+ LOADREGS(eqea,fp,{r4 - r8, fp, sp, pc})
+ tst r2, #2
+ beq .exit
+ adcs r3, r3, r4, lsl #16
+ strb r4, [r1], #1
+ mov r4, r4, lsr #8
+ strb r4, [r1], #1
+USER_LDR( ldrb r4, [r0], #1)
+ b .exit
+
+.src3_aligned_user:
+ mov r4, r4, lsr #24
+ adds r3, r3, #0
+ bics ip, r2, #15
+ beq 2f
+1:
+USER_LDR( ldrt r5, [r0], #4)
+USER_LDR( ldrt r6, [r0], #4)
+USER_LDR( ldrt r7, [r0], #4)
+USER_LDR( ldrt r8, [r0], #4)
+ orr r4, r4, r5, lsl #8
+ mov r5, r5, lsr #24
+ orr r5, r5, r6, lsl #8
+ mov r6, r6, lsr #24
+ orr r6, r6, r7, lsl #8
+ mov r7, r7, lsr #24
+ orr r7, r7, r8, lsl #8
+ stmia r1!, {r4, r5, r6, r7}
+ adcs r3, r3, r4
+ adcs r3, r3, r5
+ adcs r3, r3, r6
+ adcs r3, r3, r7
+ mov r4, r8, lsr #24
+ sub ip, ip, #16
+ teq ip, #0
+ bne 1b
+2: ands ip, r2, #12
+ beq 4f
+ tst ip, #8
+ beq 3f
+USER_LDR( ldrt r5, [r0], #4)
+USER_LDR( ldrt r6, [r0], #4)
+ orr r4, r4, r5, lsl #8
+ mov r5, r5, lsr #24
+ orr r5, r5, r6, lsl #8
+ stmia r1!, {r4, r5}
+ adcs r3, r3, r4
+ adcs r3, r3, r5
+ mov r4, r6, lsr #24
+ tst ip, #4
+ beq 4f
+3:
+USER_LDR( ldrt r5, [r0], #4)
+ orr r4, r4, r5, lsl #8
+ str r4, [r1], #4
+ adcs r3, r3, r4
+ mov r4, r5, lsr #24
+4: ands r2, r2, #3
+ adceq r0, r3, #0
+ LOADREGS(eqea,fp,{r4 - r8, fp, sp, pc})
+ tst r2, #2
+ beq .exit
+ adcs r3, r3, r4, lsl #16
+ strb r4, [r1], #1
+USER_LDR( ldrt r4, [r0], #4)
+ strb r4, [r1], #1
+ adcs r3, r3, r4, lsl #24
+ mov r4, r4, lsr #8
+ b .exit
+
+ .section .fixup,"ax"
+ .align 4
+6001: mov r4, #-EFAULT
+ ldr r5, [sp, #4*8]
+ str r4, [r5]
+ LOADREGS(ea,fp,{r4 - r8, fp, sp, pc})
+
+/* Function: __u32 csum_partial_copy (const char *src, char *dst, int len, __u32 sum)
+ * Params : r0 = src, r1 = dst, r2 = len, r3 = checksum
+ * Returns : r0 = new checksum
+ */
+ENTRY(csum_partial_copy)
+ mov ip, sp
+ stmfd sp!, {r4 - r8, fp, ip, lr, pc}
+ sub fp, ip, #4
+ cmp r2, #4
+ blt Ltoo_small
+ tst r1, #2 @ Test destination alignment
+ beq Ldst_aligned
+ subs r2, r2, #2 @ We dont know if SRC is aligned...
+ ldrb ip, [r0], #1
+ ldrb r8, [r0], #1
+ orr ip, ip, r8, lsl #8
+ adds r3, r3, ip
+ adcs r3, r3, #0
+ strb ip, [r1], #1
+ mov ip, ip, lsr #8
+ strb ip, [r1], #1 @ Destination now aligned
+Ldst_aligned: tst r0, #3
+ bne Lsrc_not_aligned
+ adds r3, r3, #0
+ bics ip, r2, #15 @ Routine for src & dst aligned
+ beq 3f
+1: ldmia r0!, {r4, r5, r6, r7}
+ stmia r1!, {r4, r5, r6, r7}
+ adcs r3, r3, r4
+ adcs r3, r3, r5
+ adcs r3, r3, r6
+ adcs r3, r3, r7
+ sub ip, ip, #16
+ teq ip, #0
+ bne 1b
+3: ands ip, r2, #12
+ beq 5f
+ tst ip, #8
+ beq 4f
+ ldmia r0!, {r4, r5}
+ stmia r1!, {r4, r5}
+ adcs r3, r3, r4
+ adcs r3, r3, r5
+ tst ip, #4
+ beq 5f
+4: ldr r4, [r0], #4
+ str r4, [r1], #4
+ adcs r3, r3, r4
+5: ands r2, r2, #3
+ adceq r0, r3, #0
+ LOADREGS(eqea,fp,{r4 - r8, fp, sp, pc})
+ ldr r4, [r0], #4
+ tst r2, #2
+ beq Lexit
+ adcs r3, r3, r4, lsl #16
+ strb r4, [r1], #1
+ mov r4, r4, lsr #8
+ strb r4, [r1], #1
+ mov r4, r4, lsr #8
+ b Lexit
+
+Ltoo_small: teq r2, #0
+ LOADREGS(eqea,fp,{r4 - r8, fp, sp, pc})
+ cmp r2, #2
+ blt Ltoo_small1
+ ldrb ip, [r0], #1
+ ldrb r8, [r0], #1
+ orr ip, ip, r8, lsl #8
+ adds r3, r3, ip
+ strb ip, [r1], #1
+ strb r8, [r1], #1
+Lexit: tst r2, #1
+Ltoo_small1: ldrneb ip, [r0], #1
+ strneb ip, [r1], #1
+ adcnes r3, r3, ip
+ adcs r0, r3, #0
+ LOADREGS(ea,fp,{r4 - r8, fp, sp, pc})
+
+Lsrc_not_aligned:
+ cmp r2, #4
+ blt Ltoo_small
+ and ip, r0, #3
+ bic r0, r0, #3
+ ldr r4, [r0], #4
+ cmp ip, #2
+ beq Lsrc2_aligned
+ bhi Lsrc3_aligned
+ mov r4, r4, lsr #8
+ adds r3, r3, #0
+ bics ip, r2, #15
+ beq 2f
+1: ldmia r0!, {r5, r6, r7, r8}
+ orr r4, r4, r5, lsl #24
+ mov r5, r5, lsr #8
+ orr r5, r5, r6, lsl #24
+ mov r6, r6, lsr #8
+ orr r6, r6, r7, lsl #24
+ mov r7, r7, lsr #8
+ orr r7, r7, r8, lsl #24
+ stmia r1!, {r4, r5, r6, r7}
+ adcs r3, r3, r4
+ adcs r3, r3, r5
+ adcs r3, r3, r6
+ adcs r3, r3, r7
+ mov r4, r8, lsr #8
+ sub ip, ip, #16
+ teq ip, #0
+ bne 1b
+2: ands ip, r2, #12
+ beq 4f
+ tst ip, #8
+ beq 3f
+ ldmia r0!, {r5, r6}
+ orr r4, r4, r5, lsl #24
+ mov r5, r5, lsr #8
+ orr r5, r5, r6, lsl #24
+ stmia r1!, {r4, r5}
+ adcs r3, r3, r4
+ adcs r3, r3, r5
+ mov r4, r6, lsr #8
+ tst ip, #4
+ beq 4f
+3: ldr r5, [r0], #4
+ orr r4, r4, r5, lsl #24
+ str r4, [r1], #4
+ adcs r3, r3, r4
+ mov r4, r5, lsr #8
+4: ands r2, r2, #3
+ adceq r0, r3, #0
+ LOADREGS(eqea,fp,{r4 - r8, fp, sp, pc})
+ tst r2, #2
+ beq Lexit
+ adcs r3, r3, r4, lsl #16
+ strb r4, [r1], #1
+ mov r4, r4, lsr #8
+ strb r4, [r1], #1
+ mov r4, r4, lsr #8
+ b Lexit
+
+Lsrc2_aligned: mov r4, r4, lsr #16
+ adds r3, r3, #0
+ bics ip, r2, #15
+ beq 2f
+1: ldmia r0!, {r5, r6, r7, r8}
+ orr r4, r4, r5, lsl #16
+ mov r5, r5, lsr #16
+ orr r5, r5, r6, lsl #16
+ mov r6, r6, lsr #16
+ orr r6, r6, r7, lsl #16
+ mov r7, r7, lsr #16
+ orr r7, r7, r8, lsl #16
+ stmia r1!, {r4, r5, r6, r7}
+ adcs r3, r3, r4
+ adcs r3, r3, r5
+ adcs r3, r3, r6
+ adcs r3, r3, r7
+ mov r4, r8, lsr #16
+ sub ip, ip, #16
+ teq ip, #0
+ bne 1b
+2: ands ip, r2, #12
+ beq 4f
+ tst ip, #8
+ beq 3f
+ ldmia r0!, {r5, r6}
+ orr r4, r4, r5, lsl #16
+ mov r5, r5, lsr #16
+ orr r5, r5, r6, lsl #16
+ stmia r1!, {r4, r5}
+ adcs r3, r3, r4
+ adcs r3, r3, r5
+ mov r4, r6, lsr #16
+ tst ip, #4
+ beq 4f
+3: ldr r5, [r0], #4
+ orr r4, r4, r5, lsl #16
+ str r4, [r1], #4
+ adcs r3, r3, r4
+ mov r4, r5, lsr #16
+4: ands r2, r2, #3
+ adceq r0, r3, #0
+ LOADREGS(eqea,fp,{r4 - r8, fp, sp, pc})
+ tst r2, #2
+ beq Lexit
+ adcs r3, r3, r4, lsl #16
+ strb r4, [r1], #1
+ mov r4, r4, lsr #8
+ strb r4, [r1], #1
+ ldrb r4, [r0], #1
+ b Lexit
+
+Lsrc3_aligned: mov r4, r4, lsr #24
+ adds r3, r3, #0
+ bics ip, r2, #15
+ beq 2f
+1: ldmia r0!, {r5, r6, r7, r8}
+ orr r4, r4, r5, lsl #8
+ mov r5, r5, lsr #24
+ orr r5, r5, r6, lsl #8
+ mov r6, r6, lsr #24
+ orr r6, r6, r7, lsl #8
+ mov r7, r7, lsr #24
+ orr r7, r7, r8, lsl #8
+ stmia r1!, {r4, r5, r6, r7}
+ adcs r3, r3, r4
+ adcs r3, r3, r5
+ adcs r3, r3, r6
+ adcs r3, r3, r7
+ mov r4, r8, lsr #24
+ sub ip, ip, #16
+ teq ip, #0
+ bne 1b
+2: ands ip, r2, #12
+ beq 4f
+ tst ip, #8
+ beq 3f
+ ldmia r0!, {r5, r6}
+ orr r4, r4, r5, lsl #8
+ mov r5, r5, lsr #24
+ orr r5, r5, r6, lsl #8
+ stmia r1!, {r4, r5}
+ adcs r3, r3, r4
+ adcs r3, r3, r5
+ mov r4, r6, lsr #24
+ tst ip, #4
+ beq 4f
+3: ldr r5, [r0], #4
+ orr r4, r4, r5, lsl #8
+ str r4, [r1], #4
+ adcs r3, r3, r4
+ mov r4, r5, lsr #24
+4: ands r2, r2, #3
+ adceq r0, r3, #0
+ LOADREGS(eqea,fp,{r4 - r8, fp, sp, pc})
+ tst r2, #2
+ beq Lexit
+ adcs r3, r3, r4, lsl #16
+ strb r4, [r1], #1
+ ldr r4, [r0], #4
+ strb r4, [r1], #1
+ adcs r3, r3, r4, lsl #24
+ mov r4, r4, lsr #8
+ b Lexit
diff --git a/arch/arm/lib/delay.S b/arch/arm/lib/delay.S
new file mode 100644
index 000000000..72dab5a95
--- /dev/null
+++ b/arch/arm/lib/delay.S
@@ -0,0 +1,43 @@
+/*
+ * linux/arch/arm/lib/delay.S
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+ .text
+
+LC0: .word SYMBOL_NAME(loops_per_sec)
+
+ENTRY(udelay)
+ mov r2, #0x1000
+ orr r2, r2, #0x00c6
+ mul r1, r0, r2
+ ldr r2, LC0
+ ldr r2, [r2]
+ mov r1, r1, lsr #11
+ mov r2, r2, lsr #11
+ mul r0, r1, r2
+ movs r0, r0, lsr #10
+ RETINSTR(moveq,pc,lr)
+
+@ Delay routine
+ENTRY(__delay)
+ subs r0, r0, #1
+ RETINSTR(movcc,pc,lr)
+ subs r0, r0, #1
+ RETINSTR(movcc,pc,lr)
+ subs r0, r0, #1
+ RETINSTR(movcc,pc,lr)
+ subs r0, r0, #1
+ RETINSTR(movcc,pc,lr)
+ subs r0, r0, #1
+ RETINSTR(movcc,pc,lr)
+ subs r0, r0, #1
+ RETINSTR(movcc,pc,lr)
+ subs r0, r0, #1
+ RETINSTR(movcc,pc,lr)
+ subs r0, r0, #1
+ bcs SYMBOL_NAME(__delay)
+ RETINSTR(mov,pc,lr)
+
diff --git a/arch/arm/lib/floppydma.S b/arch/arm/lib/floppydma.S
new file mode 100644
index 000000000..08fdccb27
--- /dev/null
+++ b/arch/arm/lib/floppydma.S
@@ -0,0 +1,57 @@
+/*
+ * linux/arch/arm/lib/floppydma.S
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+ .text
+
+ .global SYMBOL_NAME(floppy_fiqin_end)
+ENTRY(floppy_fiqin_start)
+ subs r9, r9, #1
+ ldrgtb r12, [r11, #-4]
+ ldrleb r12, [r11], #0
+ strb r12, [r10], #1
+ subs pc, lr, #4
+SYMBOL_NAME(floppy_fiqin_end):
+
+ .global SYMBOL_NAME(floppy_fiqout_end)
+ENTRY(floppy_fiqout_start)
+ subs r9, r9, #1
+ ldrgeb r12, [r10], #1
+ movlt r12, #0
+ strleb r12, [r11], #0
+ subles pc, lr, #4
+ strb r12, [r11, #-4]
+ subs pc, lr, #4
+SYMBOL_NAME(floppy_fiqout_end):
+
+@ Params:
+@ r0 = length
+@ r1 = address
+@ r2 = floppy port
+@ Puts these into R9_fiq, R10_fiq, R11_fiq
+ENTRY(floppy_fiqsetup)
+ mov ip, sp
+ stmfd sp!, {fp, ip, lr, pc}
+ sub fp, ip, #4
+ MODE(r3,ip,I_BIT|F_BIT|DEFAULT_FIQ) @ disable FIQs, IRQs, FIQ mode
+ mov r0, r0
+ mov r9, r0
+ mov r10, r1
+ mov r11, r2
+ RESTOREMODE(r3) @ back to normal
+ mov r0, r0
+ LOADREGS(ea,fp,{fp, sp, pc})
+
+ENTRY(floppy_fiqresidual)
+ mov ip, sp
+ stmfd sp!, {fp, ip, lr, pc}
+ sub fp, ip, #4
+ MODE(r3,ip,I_BIT|F_BIT|DEFAULT_FIQ) @ disable FIQs, IRQs, FIQ mode
+ mov r0, r0
+ mov r0, r9
+ RESTOREMODE(r3)
+ mov r0, r0
+ LOADREGS(ea,fp,{fp, sp, pc})
diff --git a/arch/arm/lib/fp_support.c b/arch/arm/lib/fp_support.c
new file mode 100644
index 000000000..aaac3c766
--- /dev/null
+++ b/arch/arm/lib/fp_support.c
@@ -0,0 +1,22 @@
+/*
+ * linux/arch/arm/lib/fp_support.c
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ */
+
+#include <linux/sched.h>
+#include <linux/linkage.h>
+
+extern void (*fp_save)(struct fp_soft_struct *);
+
+asmlinkage void fp_setup(void)
+{
+ struct task_struct *p;
+
+ p = &init_task;
+ do {
+ fp_save(&p->tss.fpstate.soft);
+ p = p->next_task;
+ }
+ while (p != &init_task);
+}
diff --git a/arch/arm/lib/getconsdata.c b/arch/arm/lib/getconsdata.c
new file mode 100644
index 000000000..901c1ad16
--- /dev/null
+++ b/arch/arm/lib/getconsdata.c
@@ -0,0 +1,31 @@
+/*
+ * linux/arch/arm/lib/getconsdata.c
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/unistd.h>
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+
+#define OFF_TSK(n) (unsigned long)&(((struct task_struct *)0)->n)
+#define OFF_MM(n) (unsigned long)&(((struct mm_struct *)0)->n)
+
+#ifdef KERNEL_DOMAIN
+unsigned long kernel_domain = KERNEL_DOMAIN;
+#endif
+#ifdef USER_DOMAIN
+unsigned long user_domain = USER_DOMAIN;
+#endif
+unsigned long addr_limit = OFF_TSK(addr_limit);
+unsigned long tss_memmap = OFF_TSK(tss.memmap);
+unsigned long mm = OFF_TSK(mm);
+unsigned long pgd = OFF_MM(pgd);
+unsigned long tss_save = OFF_TSK(tss.save);
+unsigned long tss_fpesave = OFF_TSK(tss.fpstate.soft.save);
+#if defined(CONFIG_CPU_ARM2) || defined(CONFIG_CPU_ARM3)
+unsigned long tss_memcmap = OFF_TSK(tss.memcmap);
+#endif
diff --git a/arch/arm/lib/getconstants.c b/arch/arm/lib/getconstants.c
new file mode 100644
index 000000000..edb67a5d3
--- /dev/null
+++ b/arch/arm/lib/getconstants.c
@@ -0,0 +1,74 @@
+/*
+ * linux/arch/arm/lib/getconstants.c
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ */
+
+#include <linux/mm.h>
+#include <asm/pgtable.h>
+#include <stdio.h>
+#include <linux/unistd.h>
+
+void printdef(char *def, int no)
+{
+ printf("#define %s\t%d\n", def, no);
+}
+
+#include "getconstants.h"
+
+int main()
+{
+ printf("/*\n * contants.h generated by getconstants\n * DO NOT EDIT!\n */\n");
+
+ printf("#define _current\t_%s\n", "current_set");
+
+#ifdef _PAGE_PRESENT
+ printdef("PAGE_PRESENT", _PAGE_PRESENT);
+#endif
+#ifdef _PAGE_RW
+ printdef("PAGE_RW", _PAGE_RW);
+#endif
+#ifdef _PAGE_USER
+ printdef("PAGE_USER", _PAGE_USER);
+#endif
+#ifdef _PAGE_ACCESSED
+ printdef("PAGE_ACCESSED", _PAGE_ACCESSED);
+#endif
+#ifdef _PAGE_DIRTY
+ printdef("PAGE_DIRTY", _PAGE_DIRTY);
+#endif
+#ifdef _PAGE_READONLY
+ printdef("PAGE_READONLY", _PAGE_READONLY);
+#endif
+#ifdef _PAGE_NOT_USER
+ printdef("PAGE_NOT_USER", _PAGE_NOT_USER);
+#endif
+#ifdef _PAGE_OLD
+ printdef("PAGE_OLD", _PAGE_OLD);
+#endif
+#ifdef _PAGE_CLEAN
+ printdef("PAGE_CLEAN", _PAGE_CLEAN);
+#endif
+ printdef("TSS_MEMMAP", (int)tss_memmap);
+ printdef("TSS_SAVE", (int)tss_save);
+#ifdef __HAS_tss_memcmap
+ printdef("TSS_MEMCMAP", (int)tss_memcmap);
+#endif
+#ifdef __HAS_addr_limit
+ printdef("ADDR_LIMIT", (int)addr_limit);
+#endif
+#ifdef __HAS_kernel_domain
+ printdef("KERNEL_DOMAIN", kernel_domain);
+#endif
+#ifdef __HAS_user_domain
+ printdef("USER_DOMAIN", user_domain);
+#endif
+ printdef("TSS_FPESAVE", (int)tss_fpesave);
+ printdef("MM", (int)mm);
+ printdef("PGD", (int)pgd);
+
+ printf("#define KSWI_BASE 0x900000\n");
+ printf("#define KSWI_SYS_BASE 0x9F0000\n");
+ printf("#define SYS_ERROR0 0x9F0000\n");
+ return 0;
+}
diff --git a/arch/arm/lib/getconstants.h b/arch/arm/lib/getconstants.h
new file mode 100644
index 000000000..ef9637781
--- /dev/null
+++ b/arch/arm/lib/getconstants.h
@@ -0,0 +1,17 @@
+/*
+ * *** THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT! ***
+ */
+unsigned long addr_limit = 56;
+#define __HAS_addr_limit
+unsigned long tss_memmap = 640;
+#define __HAS_tss_memmap
+unsigned long mm = 1676;
+#define __HAS_mm
+unsigned long pgd = 8;
+#define __HAS_pgd
+unsigned long tss_save = 636;
+#define __HAS_tss_save
+unsigned long tss_fpesave = 492;
+#define __HAS_tss_fpesave
+unsigned long tss_memcmap = 644;
+#define __HAS_tss_memcmap
diff --git a/arch/arm/lib/io-acorn.S b/arch/arm/lib/io-acorn.S
new file mode 100644
index 000000000..172783b02
--- /dev/null
+++ b/arch/arm/lib/io-acorn.S
@@ -0,0 +1,215 @@
+/*
+ * linux/arch/arm/lib/io.S
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ */
+#include <linux/config.h> /* for CONFIG_CPU_ARM2 and CONFIG_CPU_ARM3 */
+#include <linux/autoconf.h>
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/hardware.h>
+
+ .text
+ .align
+
+#define OUT(reg) \
+ mov r8, reg, lsl $16 ;\
+ orr r8, r8, r8, lsr $16 ;\
+ str r8, [r3, r0, lsl $2] ;\
+ mov r8, reg, lsr $16 ;\
+ orr r8, r8, r8, lsl $16 ;\
+ str r8, [r3, r0, lsl $2]
+
+#define IN(reg) \
+ ldr reg, [r0] ;\
+ and reg, reg, ip ;\
+ ldr lr, [r0] ;\
+ orr reg, reg, lr, lsl $16
+
+ .equ pcio_base_high, PCIO_BASE & 0xff000000
+ .equ pcio_base_low, PCIO_BASE & 0x00ff0000
+ .equ io_base_high, IO_BASE & 0xff000000
+ .equ io_base_low, IO_BASE & 0x00ff0000
+
+ .equ addr_io_diff_hi, pcio_base_high - io_base_high
+ .equ addr_io_diff_lo, pcio_base_low - io_base_low
+
+ .macro addr reg, off
+ tst \off, #0x80000000
+ .if addr_io_diff_hi
+ movne \reg, #IO_BASE
+ moveq \reg, #pcio_base_high
+ .if pcio_base_low
+ addeq \reg, \reg, #pcio_base_low
+ .endif
+ .else
+ mov \reg, #IO_BASE
+ addeq \reg, \reg, #addr_io_diff_lo
+ .endif
+ .endm
+
+@ Purpose: read a block of data from a hardware register to memory.
+@ Proto : insw(int from_port, void *to, int len_in_words);
+@ Proto : inswb(int from_port, void *to, int len_in_bytes);
+@ Notes : increment to
+
+ENTRY(insw)
+ mov r2, r2, lsl#1
+ENTRY(inswb)
+ mov ip, sp
+ stmfd sp!, {r4 - r10 ,fp ,ip ,lr ,pc}
+ sub fp, ip, #4
+ addr r3, r0
+ add r0, r3, r0, lsl #2
+ tst r1, #3
+ beq Linswok
+ tst r1, #1
+ bne Linsw_notaligned
+ cmp r2, #1
+ ldrge r4, [r0]
+ strgeb r4, [r1], #1
+ movgt r4, r4, LSR#8
+ strgtb r4, [r1], #1
+ ldmleea fp, {r4 - r10, fp, sp, pc}^
+ sub r2, r2, #2
+Linswok: mov ip, #0xFF
+ orr ip, ip, ip, lsl #8
+Linswlp: subs r2, r2, #64
+ bmi Linsw_toosmall
+ IN(r3)
+ IN(r4)
+ IN(r5)
+ IN(r6)
+ IN(r7)
+ IN(r8)
+ IN(r9)
+ IN(r10)
+ stmia r1!, {r3 - r10}
+ IN(r3)
+ IN(r4)
+ IN(r5)
+ IN(r6)
+ IN(r7)
+ IN(r8)
+ IN(r9)
+ IN(r10)
+ stmia r1!, {r3 - r10}
+ bne Linswlp
+ LOADREGS(ea, fp, {r4 - r10, fp, sp, pc})
+Linsw_toosmall:
+ adds r2, r2, #32
+ bmi Linsw_toosmall2
+Linsw2lp: IN(r3)
+ IN(r4)
+ IN(r5)
+ IN(r6)
+ IN(r7)
+ IN(r8)
+ IN(r9)
+ IN(r10)
+ stmia r1!, {r3 - r10}
+ LOADREGS(eqea, fp, {r4 - r10, fp, sp, pc})
+ b Linsw_notaligned
+Linsw_toosmall2:
+ add r2, r2, #32
+Linsw_notaligned:
+ cmp r2, #1
+ LOADREGS(ltea, fp, {r4 - r10, fp, sp, pc})
+ ldr r4, [r0]
+ strb r4, [r1], #1
+ movgt r4, r4, LSR#8
+ strgtb r4, [r1], #1
+ subs r2, r2, #2
+ bgt Linsw_notaligned
+ LOADREGS(ea, fp, {r4 - r10, fp, sp, pc})
+
+@ Purpose: write a block of data from memory to a hardware register.
+@ Proto : outsw(int to_reg, void *from, int len_in_words);
+@ Proto : outswb(int to_reg, void *from, int len_in_bytes);
+@ Notes : increments from
+
+ENTRY(outsw)
+ mov r2, r2, LSL#1
+ENTRY(outswb)
+ mov ip, sp
+ stmfd sp!, {r4 - r8, fp, ip, lr, pc}
+ sub fp, ip, #4
+ addr r3, r0
+ tst r1, #2
+ beq 1f
+ ldr r4, [r1], #2
+ mov r4, r4, lsl #16
+ orr r4, r4, r4, lsr #16
+ str r4, [r3, r0, lsl #2]
+ subs r2, r2, #2
+ LOADREGS(eqea, fp, {r4 - r8, fp, sp, pc})
+1: subs r2, r2, #32
+ blt 2f
+ ldmia r1!, {r4, r5, r6, r7}
+ OUT(r4)
+ OUT(r5)
+ OUT(r6)
+ OUT(r7)
+ ldmia r1!, {r4, r5, r6, r7}
+ OUT(r4)
+ OUT(r5)
+ OUT(r6)
+ OUT(r7)
+ bne 1b
+ LOADREGS(ea, fp, {r4 - r8, fp, sp, pc})
+2: adds r2, r2, #32
+ LOADREGS(eqea, fp, {r4 - r8, fp, sp, pc})
+3: ldr r4, [r1],#2
+ mov r4, r4, lsl#16
+ orr r4, r4, r4, lsr#16
+ str r4, [r3, r0, lsl#2]
+ subs r2, r2, #2
+ bgt 3b
+ LOADREGS(ea, fp, {r4 - r8, fp, sp, pc})
+
+@ Purpose: write a memc register
+@ Proto : void memc_write(int register, int value);
+@ Returns: nothing
+
+#if defined(CONFIG_CPU_ARM2) || defined(CONFIG_CPU_ARM3)
+ENTRY(memc_write)
+ cmp r0, #7
+ RETINSTR(movgt,pc,lr)
+ mov r0, r0, lsl #17
+ mov r1, r1, lsl #15
+ mov r1, r1, lsr #17
+ orr r0, r0, r1, lsl #2
+ add r0, r0, #0x03600000
+ strb r0, [r0]
+ RETINSTR(mov,pc,lr)
+#define CPSR2SPSR(rt)
+#else
+#define CPSR2SPSR(rt) \
+ mrs rt, cpsr; \
+ msr spsr, rt
+#endif
+
+@ Purpose: call an expansion card loader to read bytes.
+@ Proto : char read_loader(int offset, char *card_base, char *loader);
+@ Returns: byte read
+
+ENTRY(ecard_loader_read)
+ stmfd sp!, {r4 - r12, lr}
+ mov r11, r1
+ mov r1, r0
+ CPSR2SPSR(r0)
+ mov lr, pc
+ mov pc, r2
+ LOADREGS(fd, sp!, {r4 - r12, pc})
+
+@ Purpose: call an expansion card loader to reset the card
+@ Proto : void read_loader(int card_base, char *loader);
+@ Returns: byte read
+
+ENTRY(ecard_loader_reset)
+ stmfd sp!, {r4 - r12, lr}
+ mov r11, r0
+ CPSR2SPSR(r0)
+ mov lr, pc
+ add pc, r1, #8
+ LOADREGS(fd, sp!, {r4 - r12, pc})
diff --git a/arch/arm/lib/io-ebsa110.S b/arch/arm/lib/io-ebsa110.S
new file mode 100644
index 000000000..e0b8229a4
--- /dev/null
+++ b/arch/arm/lib/io-ebsa110.S
@@ -0,0 +1,149 @@
+/*
+ * linux/arch/arm/lib/io-ebsa.S
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+ .text
+ .align
+
+#define OUT(reg) \
+ mov r8, reg, lsl $16 ;\
+ orr r8, r8, r8, lsr $16 ;\
+ str r8, [r3, r0, lsl $2] ;\
+ mov r8, reg, lsr $16 ;\
+ orr r8, r8, r8, lsl $16 ;\
+ str r8, [r3, r0, lsl $2]
+
+#define IN(reg) \
+ ldr reg, [r0] ;\
+ and reg, reg, ip ;\
+ ldr lr, [r0] ;\
+ orr reg, reg, lr, lsl $16
+
+@ Purpose: read a block of data from a hardware register to memory.
+@ Proto : insw(int from_port, void *to, int len_in_words);
+@ Proto : inswb(int from_port, void *to, int len_in_bytes);
+@ Notes : increment to
+
+ENTRY(insw)
+ mov r2, r2, lsl#1
+ENTRY(inswb)
+ mov ip, sp
+ stmfd sp!, {r4 - r10 ,fp ,ip ,lr ,pc}
+ sub fp, ip, #4
+ cmp r0, #0x00c00000
+ movge r3, #0
+ movlt r3, #0xf0000000
+ add r0, r3, r0, lsl #2
+ tst r1, #3
+ beq Linswok
+ tst r1, #1
+ bne Linsw_notaligned
+ cmp r2, #1
+ ldrge r4, [r0]
+ strgeb r4, [r1], #1
+ movgt r4, r4, LSR#8
+ strgtb r4, [r1], #1
+ ldmleea fp, {r4 - r10, fp, sp, pc}^
+ sub r2, r2, #2
+Linswok: mov ip, #0xFF
+ orr ip, ip, ip, lsl #8
+Linswlp: subs r2, r2, #64
+ bmi Linsw_toosmall
+ IN(r3)
+ IN(r4)
+ IN(r5)
+ IN(r6)
+ IN(r7)
+ IN(r8)
+ IN(r9)
+ IN(r10)
+ stmia r1!, {r3 - r10}
+ IN(r3)
+ IN(r4)
+ IN(r5)
+ IN(r6)
+ IN(r7)
+ IN(r8)
+ IN(r9)
+ IN(r10)
+ stmia r1!, {r3 - r10}
+ bne Linswlp
+ LOADREGS(ea, fp, {r4 - r10, fp, sp, pc})
+Linsw_toosmall:
+ add r2, r2, #32
+ bmi Linsw_toosmall2
+Linsw2lp: IN(r3)
+ IN(r4)
+ IN(r5)
+ IN(r6)
+ IN(r7)
+ IN(r8)
+ IN(r9)
+ IN(r10)
+ stmia r1!, {r3 - r10}
+ LOADREGS(eqea, fp, {r4 - r10, fp, sp, pc})
+ b Linsw_notaligned
+Linsw_toosmall2:
+ add r2, r2, #32
+Linsw_notaligned:
+ cmp r2, #1
+ LOADREGS(ltea, fp, {r4 - r10, fp, sp, pc})
+ ldr r4, [r0]
+ strb r4, [r1], #1
+ movgt r4, r4, LSR#8
+ strgtb r4, [r1], #1
+ subs r2, r2, #2
+ bgt Linsw_notaligned
+ LOADREGS(ea, fp, {r4 - r10, fp, sp, pc})
+
+@ Purpose: write a block of data from memory to a hardware register.
+@ Proto : outsw(int to_reg, void *from, int len_in_words);
+@ Proto : outswb(int to_reg, void *from, int len_in_bytes);
+@ Notes : increments from
+
+ENTRY(outsw)
+ mov r2, r2, LSL#1
+ENTRY(outswb)
+ mov ip, sp
+ stmfd sp!, {r4 - r8, fp, ip, lr, pc}
+ sub fp, ip, #4
+ cmp r0, #0x00c00000
+ movge r3, #0
+ movlt r3, #0xf0000000
+ tst r1, #2
+ beq Loutsw32lp
+ ldr r4, [r1], #2
+ mov r4, r4, lsl #16
+ orr r4, r4, r4, lsr #16
+ str r4, [r3, r0, lsl #2]
+ sub r2, r2, #2
+ teq r2, #0
+ LOADREGS(eqea, fp, {r4 - r8, fp, sp, pc})
+Loutsw32lp: subs r2,r2,#32
+ blt Loutsw_toosmall
+ ldmia r1!,{r4,r5,r6,r7}
+ OUT(r4)
+ OUT(r5)
+ OUT(r6)
+ OUT(r7)
+ ldmia r1!,{r4,r5,r6,r7}
+ OUT(r4)
+ OUT(r5)
+ OUT(r6)
+ OUT(r7)
+ LOADREGS(eqea, fp, {r4 - r8, fp, sp, pc})
+ b Loutsw32lp
+Loutsw_toosmall:
+ adds r2,r2,#32
+ LOADREGS(eqea, fp, {r4 - r8, fp, sp, pc})
+Llpx: ldr r4,[r1],#2
+ mov r4,r4,LSL#16
+ orr r4,r4,r4,LSR#16
+ str r4,[r3,r0,LSL#2]
+ subs r2,r2,#2
+ bgt Llpx
+ LOADREGS(ea, fp, {r4 - r8, fp, sp, pc})
+
diff --git a/arch/arm/lib/ll_char_wr.S b/arch/arm/lib/ll_char_wr.S
new file mode 100644
index 000000000..7df08d93b
--- /dev/null
+++ b/arch/arm/lib/ll_char_wr.S
@@ -0,0 +1,157 @@
+/*
+ * linux/arch/arm/lib/ll_char_wr.S
+ *
+ * Copyright (C) 1995, 1996 Russell King.
+ *
+ * Speedups & 1bpp code (C) 1996 Philip Blundel & Russell King.
+ *
+ * 10-04-96 RMK Various cleanups & reduced register usage.
+ */
+
+@ Regs: [] = corruptable
+@ {} = used
+@ () = dont use
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+ .text
+
+#define BOLD 0x01
+#define ITALIC 0x02
+#define UNDERLINE 0x04
+#define FLASH 0x08
+#define INVERSE 0x10
+
+LC0: .word SYMBOL_NAME(bytes_per_char_h)
+ .word SYMBOL_NAME(video_size_row)
+ .word SYMBOL_NAME(cmap_80)
+ .word SYMBOL_NAME(con_charconvtable)
+
+ENTRY(ll_write_char)
+ stmfd sp!, {r4 - r7, lr}
+@
+@ Smashable regs: {r0 - r3}, [r4 - r7], (r8 - fp), [ip], (sp), [lr], (pc)
+@
+ eor ip, r1, #UNDERLINE << 24
+/*
+ * calculate colours
+ */
+ tst r1, #INVERSE << 24
+ moveq r2, r1, lsr #8
+ moveq r3, r1, lsr #16
+ movne r2, r1, lsr #16
+ movne r3, r1, lsr #8
+ and r3, r3, #255
+ and r2, r2, #255
+/*
+ * calculate offset into character table
+ */
+ and r1, r1, #255
+ mov r1, r1, lsl #3
+/*
+ * calculate offset required for each row [maybe I should make this an argument to this fn.
+ * Have to see what the register usage is like in the calling routines.
+ */
+ adr r4, LC0
+ ldmia r4, {r4, r5, r6, lr}
+ ldr r4, [r4]
+ ldr r5, [r5]
+/*
+ * Go to resolution-dependent routine...
+ */
+ cmp r4, #4
+ blt Lrow1bpp
+ eor r2, r3, r2 @ Create eor mask to change colour from bg
+ orr r3, r3, r3, lsl #8 @ to fg.
+ orr r3, r3, r3, lsl #16
+ add r0, r0, r5, lsl #3 @ Move to bottom of character
+ add r1, r1, #7
+ ldrb r7, [r6, r1]
+ tst ip, #UNDERLINE << 24
+ eoreq r7, r7, #255
+ teq r4, #8
+ beq Lrow8bpplp
+@
+@ Smashable regs: {r0 - r3}, [r4], {r5 - r7}, (r8 - fp), [ip], (sp), {lr}, (pc)
+@
+ orr r3, r3, r3, lsl #4
+Lrow4bpplp: ldr r7, [lr, r7, lsl #2]
+ mul r7, r2, r7
+ tst r1, #7 @ avoid using r7 directly after
+ eor ip, r3, r7
+ str ip, [r0, -r5]!
+ LOADREGS(eqfd, sp!, {r4 - r7, pc})
+ sub r1, r1, #1
+ ldrb r7, [r6, r1]
+ ldr r7, [lr, r7, lsl #2]
+ mul r7, r2, r7
+ tst r1, #7 @ avoid using r7 directly after
+ eor ip, r3, r7
+ str ip, [r0, -r5]!
+ subne r1, r1, #1
+ ldrneb r7, [r6, r1]
+ bne Lrow4bpplp
+ LOADREGS(fd, sp!, {r4 - r7, pc})
+
+@
+@ Smashable regs: {r0 - r3}, [r4], {r5 - r7}, (r8 - fp), [ip], (sp), {lr}, (pc)
+@
+Lrow8bpplp: mov ip, r7, lsr #4
+ ldr ip, [lr, ip, lsl #2]
+ mul r4, r2, ip
+ and ip, r7, #15
+ eor r4, r3, r4
+ ldr ip, [lr, ip, lsl #2]
+ mul ip, r2, ip
+ tst r1, #7
+ eor ip, r3, ip
+ sub r0, r0, r5
+ stmia r0, {r4, ip}
+ LOADREGS(eqfd, sp!, {r4 - r7, pc})
+ sub r1, r1, #1
+ ldrb r7, [r6, r1]
+ mov ip, r7, lsr #4
+ ldr ip, [lr, ip, lsl #2]
+ mul r4, r2, ip
+ and ip, r7, #15
+ eor r4, r3, r4
+ ldr ip, [lr, ip, lsl #2]
+ mul ip, r2, ip
+ tst r1, #7
+ eor ip, r3, ip
+ sub r0, r0, r5
+ stmia r0, {r4, ip}
+ subne r1, r1, #1
+ ldrneb r7, [r6, r1]
+ bne Lrow8bpplp
+ LOADREGS(fd, sp!, {r4 - r7, pc})
+
+@
+@ Smashable regs: {r0 - r3}, [r4], {r5, r6}, [r7], (r8 - fp), [ip], (sp), [lr], (pc)
+@
+Lrow1bpp: add r6, r6, r1
+ ldmia r6, {r4, r7}
+ tst ip, #INVERSE << 24
+ mvnne r4, r4
+ mvnne r7, r7
+ strb r4, [r0], r5
+ mov r4, r4, lsr #8
+ strb r4, [r0], r5
+ mov r4, r4, lsr #8
+ strb r4, [r0], r5
+ mov r4, r4, lsr #8
+ strb r4, [r0], r5
+ strb r7, [r0], r5
+ mov r7, r7, lsr #8
+ strb r7, [r0], r5
+ mov r7, r7, lsr #8
+ strb r7, [r0], r5
+ mov r7, r7, lsr #8
+ tst ip, #UNDERLINE << 24
+ mvneq r7, r7
+ strb r7, [r0], r5
+ LOADREGS(fd, sp!, {r4 - r7, pc})
+
+ .bss
+ENTRY(con_charconvtable)
+ .space 1024
diff --git a/arch/arm/lib/loaders.S b/arch/arm/lib/loaders.S
new file mode 100644
index 000000000..760e2e311
--- /dev/null
+++ b/arch/arm/lib/loaders.S
@@ -0,0 +1,53 @@
+/*
+ * linux/arch/arm/lib/loaders.S
+ *
+ * This file contains the ROM loaders for buggy cards
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+/*
+ * Oak SCSI
+ */
+
+ENTRY(oak_scsi_loader)
+ b Loak_scsi_read
+ .word 0
+Loak_scsi_reset: bic r10, r11, #0x00ff0000
+ ldr r2, [r10]
+ RETINSTR(mov,pc,lr)
+
+Loak_scsi_read: mov r2, r1, lsr #3
+ and r2, r2, #15 << 9
+ bic r10, r11, #0x00ff0000
+ ldr r2, [r10, r2]
+ mov r2, r1, lsl #20
+ ldrb r0, [r11, r2, lsr #18]
+ ldr r2, [r10]
+ RETINSTR(mov,pc,lr)
+
+ENTRY(atomwide_serial_loader)
+ b Latomwide_serial_read
+ .word 0
+Latomwide_serial_reset: mov r2, #0x3c00
+ strb r2, [r11, r2]
+ RETINSTR(mov,pc,lr)
+
+Latomwide_serial_read: cmp r1, #0x8000
+ RETINSTR(movhi,pc,lr)
+ add r0, r1, #0x800
+ mov r0, r0, lsr #11
+ mov r3, #0x3c00
+ strb r0, [r11, r3]
+ mov r2, r1, lsl #21
+ ldrb r0, [r11, r2, lsr #19]
+ strb r2, [r11, r3]
+ RETINSTR(mov,pc,lr)
+
+/*
+ * Cards we don't know about yet
+ */
+ENTRY(noloader)
+ mov r0, r0
+ mov r0, #0
+ RETINSTR(mov,pc,lr)
diff --git a/arch/arm/lib/memcpy.S b/arch/arm/lib/memcpy.S
new file mode 100644
index 000000000..209768f9f
--- /dev/null
+++ b/arch/arm/lib/memcpy.S
@@ -0,0 +1,312 @@
+/*
+ * linux/arch/arm/lib/segment.S
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ * Except memcpy/memmove routine.
+ */
+
+#include <asm/assembler.h>
+#include <linux/linkage.h>
+
+ .text
+#define ENTER \
+ mov ip,sp ;\
+ stmfd sp!,{r4-r9,fp,ip,lr,pc} ;\
+ sub fp,ip,#4
+
+#define EXIT \
+ LOADREGS(ea, fp, {r4 - r9, fp, sp, pc})
+
+#define EXITEQ \
+ LOADREGS(eqea, fp, {r4 - r9, fp, sp, pc})
+
+# Prototype: void memcpy(void *to,const void *from,unsigned long n);
+# ARM3: cant use memcopy here!!!
+
+ENTRY(memcpy)
+ENTRY(memmove)
+ ENTER
+ cmp r1, r0
+ bcc 19f
+ subs r2, r2, #4
+ blt 6f
+ ands ip, r0, #3
+ bne 7f
+ ands ip, r1, #3
+ bne 8f
+
+1: subs r2, r2, #8
+ blt 5f
+ subs r2, r2, #0x14
+ blt 3f
+2: ldmia r1!,{r3 - r9, ip}
+ stmia r0!,{r3 - r9, ip}
+ subs r2, r2, #32
+ bge 2b
+ cmn r2, #16
+ ldmgeia r1!, {r3 - r6}
+ stmgeia r0!, {r3 - r6}
+ subge r2, r2, #0x10
+3: adds r2, r2, #0x14
+4: ldmgeia r1!, {r3 - r5}
+ stmgeia r0!, {r3 - r5}
+ subges r2, r2, #12
+ bge 4b
+5: adds r2, r2, #8
+ blt 6f
+ subs r2, r2, #4
+ ldrlt r3, [r1], #4
+ strlt r3, [r0], #4
+ ldmgeia r1!, {r3, r4}
+ stmgeia r0!, {r3, r4}
+ subge r2, r2, #4
+
+6: adds r2, r2, #4
+ EXITEQ
+ cmp r2, #2
+ ldrb r3, [r1], #1
+ strb r3, [r0], #1
+ ldrgeb r3, [r1], #1
+ strgeb r3, [r0], #1
+ ldrgtb r3, [r1], #1
+ strgtb r3, [r0], #1
+ EXIT
+
+7: rsb ip, ip, #4
+ cmp ip, #2
+ ldrb r3, [r1], #1
+ strb r3, [r0], #1
+ ldrgeb r3, [r1], #1
+ strgeb r3, [r0], #1
+ ldrgtb r3, [r1], #1
+ strgtb r3, [r0], #1
+ subs r2, r2, ip
+ blt 6b
+ ands ip, r1, #3
+ beq 1b
+8: bic r1, r1, #3
+ ldr r7, [r1], #4
+ cmp ip, #2
+ bgt 15f
+ beq 11f
+ cmp r2, #12
+ blt 10f
+ sub r2, r2, #12
+9: mov r3, r7, lsr #8
+ ldmia r1!, {r4 - r7}
+ orr r3, r3, r4, lsl #24
+ mov r4, r4, lsr #8
+ orr r4, r4, r5, lsl #24
+ mov r5, r5, lsr #8
+ orr r5, r5, r6, lsl #24
+ mov r6, r6, lsr #8
+ orr r6, r6, r7, lsl #24
+ stmia r0!, {r3 - r6}
+ subs r2, r2, #16
+ bge 9b
+ adds r2, r2, #12
+ blt 1b
+10: mov r3, r7, lsr #8
+ ldr r7, [r1], #4
+ orr r3, r3, r7, lsl #24
+ str r3, [r0], #4
+ subs r2, r2, #4
+ bge 10b
+ sub r1, r1, #3
+ b 6b
+
+11: cmp r2, #12
+ blt 13f /* */
+ sub r2, r2, #12
+12: mov r3, r7, lsr #16
+ ldmia r1!, {r4 - r7}
+ orr r3, r3, r4, lsl #16
+ mov r4, r4, lsr #16
+ orr r4, r4, r5, lsl #16
+ mov r5, r5, lsr #16
+ orr r5, r5, r6, lsl #16
+ mov r6, r6, lsr #16
+ orr r6, r6, r7,LSL#16
+ stmia r0!, {r3 - r6}
+ subs r2, r2, #16
+ bge 12b
+ adds r2, r2, #12
+ blt 14f
+13: mov r3, r7, lsr #16
+ ldr r7, [r1], #4
+ orr r3, r3, r7, lsl #16
+ str r3, [r0], #4
+ subs r2, r2, #4
+ bge 13b
+14: sub r1, r1, #2
+ b 6b
+
+15: cmp r2, #12
+ blt 17f
+ sub r2, r2, #12
+16: mov r3, r7, lsr #24
+ ldmia r1!,{r4 - r7}
+ orr r3, r3, r4, lsl #8
+ mov r4, r4, lsr #24
+ orr r4, r4, r5, lsl #8
+ mov r5, r5, lsr #24
+ orr r5, r5, r6, lsl #8
+ mov r6, r6, lsr #24
+ orr r6, r6, r7, lsl #8
+ stmia r0!, {r3 - r6}
+ subs r2, r2, #16
+ bge 16b
+ adds r2, r2, #12
+ blt 18f
+17: mov r3, r7, lsr #24
+ ldr r7, [r1], #4
+ orr r3, r3, r7, lsl#8
+ str r3, [r0], #4
+ subs r2, r2, #4
+ bge 17b
+18: sub r1, r1, #1
+ b 6b
+
+
+19: add r1, r1, r2
+ add r0, r0, r2
+ subs r2, r2, #4
+ blt 24f
+ ands ip, r0, #3
+ bne 25f
+ ands ip, r1, #3
+ bne 26f
+
+20: subs r2, r2, #8
+ blt 23f
+ subs r2, r2, #0x14
+ blt 22f
+21: ldmdb r1!, {r3 - r9, ip}
+ stmdb r0!, {r3 - r9, ip}
+ subs r2, r2, #32
+ bge 21b
+22: cmn r2, #16
+ ldmgedb r1!, {r3 - r6}
+ stmgedb r0!, {r3 - r6}
+ subge r2, r2, #16
+ adds r2, r2, #20
+ ldmgedb r1!, {r3 - r5}
+ stmgedb r0!, {r3 - r5}
+ subge r2, r2, #12
+23: adds r2, r2, #8
+ blt 24f
+ subs r2, r2, #4
+ ldrlt r3, [r1, #-4]!
+ strlt r3, [r0, #-4]!
+ ldmgedb r1!, {r3, r4}
+ stmgedb r0!, {r3, r4}
+ subge r2, r2, #4
+
+24: adds r2, r2, #4
+ EXITEQ
+ cmp r2, #2
+ ldrb r3, [r1, #-1]!
+ strb r3, [r0, #-1]!
+ ldrgeb r3, [r1, #-1]!
+ strgeb r3, [r0, #-1]!
+ ldrgtb r3, [r1, #-1]!
+ strgtb r3, [r0, #-1]!
+ EXIT
+
+25: cmp ip, #2
+ ldrb r3, [r1, #-1]!
+ strb r3, [r0, #-1]!
+ ldrgeb r3, [r1, #-1]!
+ strgeb r3, [r0, #-1]!
+ ldrgtb r3, [r1, #-1]!
+ strgtb r3, [r0, #-1]!
+ subs r2, r2, ip
+ blt 24b
+ ands ip, r1, #3
+ beq 20b
+
+26: bic r1, r1, #3
+ ldr r3, [r1], #0
+ cmp ip, #2
+ blt 34f
+ beq 30f
+ cmp r2, #12
+ blt 28f
+ sub r2, r2, #12
+27: mov r7, r3, lsl #8
+ ldmdb r1!, {r3, r4, r5, r6}
+ orr r7, r7, r6, lsr #24
+ mov r6, r6, lsl #8
+ orr r6, r6, r5, lsr #24
+ mov r5, r5, lsl #8
+ orr r5, r5, r4, lsr #24
+ mov r4, r4, lsl #8
+ orr r4, r4, r3, lsr #24
+ stmdb r0!, {r4, r5, r6, r7}
+ subs r2, r2, #16
+ bge 27b
+ adds r2, r2, #12
+ blt 29f
+28: mov ip, r3, lsl #8
+ ldr r3, [r1, #-4]!
+ orr ip, ip, r3, lsr #24
+ str ip, [r0, #-4]!
+ subs r2, r2, #4
+ bge 28b
+29: add r1, r1, #3
+ b 24b
+
+30: cmp r2, #12
+ blt 32f
+ sub r2, r2, #12
+31: mov r7, r3, lsl #16
+ ldmdb r1!, {r3, r4, r5, r6}
+ orr r7, r7, r6, lsr #16
+ mov r6, r6, lsl #16
+ orr r6, r6, r5, lsr #16
+ mov r5, r5, lsl #16
+ orr r5, r5, r4, lsr #16
+ mov r4, r4, lsl #16
+ orr r4, r4, r3, lsr #16
+ stmdb r0!, {r4, r5, r6, r7}
+ subs r2, r2, #16
+ bge 31b
+ adds r2, r2, #12
+ blt 33f
+32: mov ip, r3, lsl #16
+ ldr r3, [r1, #-4]!
+ orr ip, ip, r3, lsr #16
+ str ip, [r0, #-4]!
+ subs r2, r2, #4
+ bge 32b
+33: add r1, r1, #2
+ b 24b
+
+34: cmp r2, #12
+ blt 36f
+ sub r2, r2, #12
+35: mov r7, r3, lsl #24
+ ldmdb r1!, {r3, r4, r5, r6}
+ orr r7, r7, r6, lsr #8
+ mov r6, r6, lsl #24
+ orr r6, r6, r5, lsr #8
+ mov r5, r5, lsl #24
+ orr r5, r5, r4, lsr #8
+ mov r4, r4, lsl #24
+ orr r4, r4, r3, lsr #8
+ stmdb r0!, {r4, r5, r6, r7}
+ subs r2, r2, #16
+ bge 35b
+ adds r2, r2, #12
+ blt 37f
+36: mov ip, r3, lsl #24
+ ldr r3, [r1, #-4]!
+ orr ip, ip, r3, lsr #8
+ str ip, [r0, #-4]!
+ subs r2, r2, #4
+ bge 36b
+37: add r1, r1, #1
+ b 24b
+
+ .align
+
diff --git a/arch/arm/lib/memfastset.S b/arch/arm/lib/memfastset.S
new file mode 100644
index 000000000..a7e8a5d29
--- /dev/null
+++ b/arch/arm/lib/memfastset.S
@@ -0,0 +1,35 @@
+/*
+ * linux/arch/arm/lib/memfastset.S
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+ .text
+@ Prototype: void memsetl (unsigned long *d, unsigned long c, size_t n);
+
+ENTRY(memsetl)
+ stmfd sp!, {lr}
+ cmp r2, #16
+ blt 5f
+ mov r3, r1
+ mov ip, r1
+ mov lr, r1
+ subs r2, r2, #32
+ bmi 2f
+1: stmia r0!, {r1, r3, ip, lr}
+ stmia r0!, {r1, r3, ip, lr}
+ LOADREGS(eqfd, sp!, {pc})
+ subs r2, r2, #32
+ bpl 1b
+2: adds r2, r2, #16
+ bmi 4f
+3: stmia r0!, {r1, r3, ip, lr}
+ LOADREGS(eqfd, sp!, {pc})
+ subs r2, r2, #16
+ bpl 3b
+4: add r2, r2, #16
+5: subs r2, r2, #4
+ strge r1, [r0], #4
+ bgt 5b
+ LOADREGS(fd, sp!, {pc})
diff --git a/arch/arm/lib/string.S b/arch/arm/lib/string.S
new file mode 100644
index 000000000..b54c902a4
--- /dev/null
+++ b/arch/arm/lib/string.S
@@ -0,0 +1,139 @@
+/*
+ * linux/arch/arm/lib/string.S
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+ .text
+# Prototype: char *strrchr(const char *s,char c);
+
+@ r0 = pointer, r1 = length
+ .global memzero
+memzero: stmfd sp!, {lr}
+ mov r2, #0
+ mov r3, #0
+ mov ip, #0
+ mov lr, #0
+1: subs r1, r1, #4*8
+ stmgeia r0!, {r2, r3, ip, lr}
+ stmgeia r0!, {r2, r3, ip, lr}
+ bgt 1b
+ LOADREGS(fd, sp!, {pc})
+
+ .global __page_memcpy
+__page_memcpy: stmfd sp!, {r4, r5, lr}
+1: subs r2, r2, #4*8
+ ldmgeia r1!, {r3, r4, r5, ip}
+ stmgeia r0!, {r3, r4, r5, ip}
+ ldmgeia r1!, {r3, r4, r5, ip}
+ stmgeia r0!, {r3, r4, r5, ip}
+ bgt 1b
+ LOADREGS(fd, sp!, {r4, r5, pc})
+
+ .global memset
+memset: mov r3, r0
+ cmp r2, #16
+ blt 6f
+ ands ip, r3, #3
+ beq 1f
+ cmp ip, #2
+ strltb r1, [r3], #1 @ Align destination
+ strleb r1, [r3], #1
+ strb r1, [r3], #1
+ rsb ip, ip, #4
+ sub r2, r2, ip
+1: orr r1, r1, r1, lsl #8
+ orr r1, r1, r1, lsl #16
+ cmp r2, #256
+ blt 4f
+ stmfd sp!, {r4, r5, lr}
+ mov r4, r1
+ mov r5, r1
+ mov lr, r1
+ mov ip, r2, lsr #6
+ sub r2, r2, ip, lsl #6
+2: stmia r3!, {r1, r4, r5, lr} @ 64 bytes at a time.
+ stmia r3!, {r1, r4, r5, lr}
+ stmia r3!, {r1, r4, r5, lr}
+ stmia r3!, {r1, r4, r5, lr}
+ subs ip, ip, #1
+ bne 2b
+ teq r2, #0
+ LOADREGS(eqfd, sp!, {r4, r5, pc}) @ Now <64 bytes to go.
+ tst r2, #32
+ stmneia r3!, {r1, r4, r5, lr}
+ stmneia r3!, {r1, r4, r5, lr}
+ tst r2, #16
+ stmneia r3!, {r1, r4, r5, lr}
+ ldmia sp!, {r4, r5}
+3: tst r2, #8
+ stmneia r3!, {r1, lr}
+ tst r2, #4
+ strne r1, [r3], #4
+ tst r2, #2
+ strneb r1, [r3], #1
+ strneb r1, [r3], #1
+ tst r2, #1
+ strneb r1, [r3], #1
+ LOADREGS(fd, sp!, {pc})
+
+4: movs ip, r2, lsr #3
+ beq 3b
+ sub r2, r2, ip, lsl #3
+ stmfd sp!, {lr}
+ mov lr, r1
+ subs ip, ip, #4
+5: stmgeia r3!, {r1, lr}
+ stmgeia r3!, {r1, lr}
+ stmgeia r3!, {r1, lr}
+ stmgeia r3!, {r1, lr}
+ subges ip, ip, #4
+ bge 5b
+ tst ip, #2
+ stmneia r3!, {r1, lr}
+ stmneia r3!, {r1, lr}
+ tst ip, #1
+ stmneia r3!, {r1, lr}
+ teq r2, #0
+ LOADREGS(eqfd, sp!, {pc})
+ b 3b
+
+6: subs r2, r2, #1
+ strgeb r1, [r3], #1
+ bgt 6b
+ RETINSTR(mov, pc, lr)
+
+ENTRY(strrchr)
+ stmfd sp!, {lr}
+ mov r3, #0
+1: ldrb r2, [r0], #1
+ teq r2, r1
+ moveq r3, r0
+ teq r2, #0
+ bne 1b
+ mov r0, r3
+ LOADREGS(fd, sp!, {pc})
+
+ENTRY(strchr)
+ stmfd sp!,{lr}
+ mov r3, #0
+1: ldrb r2, [r0], #1
+ teq r2, r1
+ teqne r2, #0
+ bne 1b
+ teq r2, #0
+ moveq r0, #0
+ subne r0, r0, #1
+ LOADREGS(fd, sp!, {pc})
+
+ENTRY(memchr)
+ stmfd sp!, {lr}
+1: ldrb r3, [r0], #1
+ teq r3, r1
+ beq 2f
+ subs r2, r2, #1
+ bpl 1b
+2: movne r0, #0
+ subeq r0, r0, #1
+ LOADREGS(fd, sp!, {pc})
diff --git a/arch/arm/lib/system.S b/arch/arm/lib/system.S
new file mode 100644
index 000000000..54ea4d9a4
--- /dev/null
+++ b/arch/arm/lib/system.S
@@ -0,0 +1,20 @@
+/*
+ * linux/arch/arm/lib/system.S
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ *
+ * 07/06/96: Now support tasks running in SVC mode.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ .text
+
+ENTRY(abort)
+ adr r0, .abort_msg
+ mov r1, lr
+ b SYMBOL_NAME(panic)
+
+.abort_msg: .ascii "Eek! Got to an abort() from %p! "
+ .ascii "(Please report to rmk@ecs.soton.ac.uk)\n\0"
+ .align
diff --git a/arch/arm/lib/testm.c b/arch/arm/lib/testm.c
new file mode 100644
index 000000000..88e815605
--- /dev/null
+++ b/arch/arm/lib/testm.c
@@ -0,0 +1,81 @@
+char buffer[1036];
+char buffer2[1036];
+
+int main ()
+{
+ char *p;
+ int i, o, o2, l;
+
+ printf ("Testing memset\n");
+ for (l = 1; l < 1020; l ++) {
+ for (o = 0; o < 4; o++) {
+ p = buffer + o + 4;
+ for (i = 0; i < l + 12; i++)
+ buffer[i] = 0x55;
+
+ memset (p, 0xaa, l);
+
+ for (i = 0; i < l; i++)
+ if (p[i] != 0xaa)
+ printf ("Error: %X+%d\n", p, i);
+ if (p[-1] != 0x55 || p[-2] != 0x55 || p[-3] != 0x55 || p[-4] != 0x55)
+ printf ("Error before %X\n", p);
+ if (p[l] != 0x55 || p[l+1] != 0x55 || p[l+2] != 0x55 || p[l+3] != 0x55)
+ printf ("Error at end: %p: %02X %02X %02X %02X\n", p+l, p[l], p[l+1], p[l+2], p[l+3]);
+ }
+ }
+
+ printf ("Testing memcpy s > d\n");
+ for (l = 1; l < 1020; l++) {
+ for (o = 0; o < 4; o++) {
+ for (o2 = 0; o2 < 4; o2++) {
+ char *d, *s;
+
+ for (i = 0; i < l + 12; i++)
+ buffer[i] = (i & 0x3f) + 0x40;
+ for (i = 0; i < 1036; i++)
+ buffer2[i] = 0;
+
+ s = buffer + o;
+ d = buffer2 + o2 + 4;
+
+ memcpy (d, s, l);
+
+ for (i = 0; i < l; i++)
+ if (s[i] != d[i])
+ printf ("Error at %X+%d -> %X+%d (%02X != %02X)\n", s, i, d, i, s[i], d[i]);
+ if (d[-1] || d[-2] || d[-3] || d[-4])
+ printf ("Error before %X\n", d);
+ if (d[l] || d[l+1] || d[l+2] || d[l+3])
+ printf ("Error after %X\n", d+l);
+ }
+ }
+ }
+
+ printf ("Testing memcpy s < d\n");
+ for (l = 1; l < 1020; l++) {
+ for (o = 0; o < 4; o++) {
+ for (o2 = 0; o2 < 4; o2++) {
+ char *d, *s;
+
+ for (i = 0; i < l + 12; i++)
+ buffer2[i] = (i & 0x3f) + 0x40;
+ for (i = 0; i < 1036; i++)
+ buffer[i] = 0;
+
+ s = buffer2 + o;
+ d = buffer + o2 + 4;
+
+ memcpy (d, s, l);
+
+ for (i = 0; i < l; i++)
+ if (s[i] != d[i])
+ printf ("Error at %X+%d -> %X+%d (%02X != %02X)\n", s, i, d, i, s[i], d[i]);
+ if (d[-1] || d[-2] || d[-3] || d[-4])
+ printf ("Error before %X\n", d);
+ if (d[l] || d[l+1] || d[l+2] || d[l+3])
+ printf ("Error after %X\n", d+l);
+ }
+ }
+ }
+}
diff --git a/arch/arm/lib/uaccess-armo.S b/arch/arm/lib/uaccess-armo.S
new file mode 100644
index 000000000..1a740493a
--- /dev/null
+++ b/arch/arm/lib/uaccess-armo.S
@@ -0,0 +1,230 @@
+/*
+ * arch/arm/lib/uaccess-armo.S
+ *
+ * Copyright (C) 1998 Russell King
+ *
+ * Note! Some code fragments found in here have a special calling
+ * convention - they are not APCS compliant!
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ .text
+
+#define USER(x...) \
+9999: x; \
+ .section __ex_table,"a"; \
+ .align 3; \
+ .long 9999b,9001f; \
+ .previous
+
+ .globl SYMBOL_NAME(uaccess_user)
+SYMBOL_NAME(uaccess_user):
+ .word uaccess_user_put_byte
+ .word uaccess_user_get_byte
+ .word uaccess_user_put_half
+ .word uaccess_user_get_half
+ .word uaccess_user_put_word
+ .word uaccess_user_get_word
+ .word __arch_copy_from_user
+ .word __arch_copy_to_user
+ .word __arch_clear_user
+ .word __arch_strncpy_from_user
+ .word __arch_strlen_user
+
+
+@ In : r0 = x, r1 = addr, r2 = error
+@ Out: r2 = error
+uaccess_user_put_byte:
+ stmfd sp!, {lr}
+USER( strbt r0, [r1])
+ ldmfd sp!, {pc}^
+
+@ In : r0 = x, r1 = addr, r2 = error
+@ Out: r2 = error
+uaccess_user_put_half:
+ stmfd sp!, {lr}
+USER( strbt r0, [r1], #1)
+ mov r0, r0, lsr #8
+USER( strbt r0, [r1])
+ ldmfd sp!, {pc}^
+
+@ In : r0 = x, r1 = addr, r2 = error
+@ Out: r2 = error
+uaccess_user_put_word:
+ stmfd sp!, {lr}
+USER( strt r0, [r1])
+ ldmfd sp!, {pc}^
+
+9001: mov r2, #-EFAULT
+ ldmfd sp!, {pc}^
+
+@ In : r0 = addr, r1 = error
+@ Out: r0 = x, r1 = error
+uaccess_user_get_byte:
+ stmfd sp!, {lr}
+USER( ldrbt r0, [r0])
+ ldmfd sp!, {pc}^
+
+@ In : r0 = addr, r1 = error
+@ Out: r0 = x, r1 = error
+uaccess_user_get_half:
+ stmfd sp!, {lr}
+USER( ldrt r0, [r0])
+ mov r0, r0, lsl #16
+ mov r0, r0, lsr #16
+ ldmfd sp!, {pc}^
+
+@ In : r0 = addr, r1 = error
+@ Out: r0 = x, r1 = error
+uaccess_user_get_word:
+ stmfd sp!, {lr}
+USER( ldrt r0, [r0])
+ ldmfd sp!, {pc}^
+
+9001: mov r1, #-EFAULT
+ ldmfd sp!, {pc}^
+
+
+
+ .globl SYMBOL_NAME(uaccess_kernel)
+SYMBOL_NAME(uaccess_kernel):
+ .word uaccess_kernel_put_byte
+ .word uaccess_kernel_get_byte
+ .word uaccess_kernel_put_half
+ .word uaccess_kernel_get_half
+ .word uaccess_kernel_put_word
+ .word uaccess_kernel_get_word
+ .word uaccess_kernel_copy
+ .word uaccess_kernel_copy
+ .word uaccess_kernel_clear
+ .word uaccess_kernel_strncpy_from
+ .word uaccess_kernel_strlen
+
+@ In : r0 = x, r1 = addr, r2 = error
+@ Out: r2 = error
+uaccess_kernel_put_byte:
+ stmfd sp!, {lr}
+ strb r0, [r1]
+ ldmfd sp!, {pc}^
+
+@ In : r0 = x, r1 = addr, r2 = error
+@ Out: r2 = error
+uaccess_kernel_put_half:
+ stmfd sp!, {lr}
+ strb r0, [r1]
+ mov r0, r0, lsr #8
+ strb r0, [r1, #1]
+ ldmfd sp!, {pc}^
+
+@ In : r0 = x, r1 = addr, r2 = error
+@ Out: r2 = error
+uaccess_kernel_put_word:
+ stmfd sp!, {lr}
+ str r0, [r1]
+ ldmfd sp!, {pc}^
+
+@ In : r0 = addr, r1 = error
+@ Out: r0 = x, r1 = error
+uaccess_kernel_get_byte:
+ stmfd sp!, {lr}
+ ldrb r0, [r0]
+ ldmfd sp!, {pc}^
+
+@ In : r0 = addr, r1 = error
+@ Out: r0 = x, r1 = error
+uaccess_kernel_get_half:
+ stmfd sp!, {lr}
+ ldr r0, [r0]
+ mov r0, r0, lsl #16
+ mov r0, r0, lsr #16
+ ldmfd sp!, {pc}^
+
+@ In : r0 = addr, r1 = error
+@ Out: r0 = x, r1 = error
+uaccess_kernel_get_word:
+ stmfd sp!, {lr}
+ ldr r0, [r0]
+ ldmfd sp!, {pc}^
+
+
+/* Prototype: int uaccess_kernel_copy(void *to, const char *from, size_t n)
+ * Purpose : copy a block to kernel memory from kernel memory
+ * Params : to - kernel memory
+ * : from - kernel memory
+ * : n - number of bytes to copy
+ * Returns : Number of bytes NOT copied.
+ */
+uaccess_kernel_copy:
+ stmfd sp!, {lr}
+ bl SYMBOL_NAME(memcpy)
+ mov r0, #0
+ ldmfd sp!, {pc}^
+
+/* Prototype: int uaccess_kernel_clear(void *addr, size_t sz)
+ * Purpose : clear some kernel memory
+ * Params : addr - kernel memory address to clear
+ * : sz - number of bytes to clear
+ * Returns : number of bytes NOT cleared
+ */
+uaccess_kernel_clear:
+ stmfd sp!, {lr}
+ mov r2, #0
+ cmp r1, #4
+ blt 2f
+ ands ip, r0, #3
+ beq 1f
+ cmp ip, #1
+ strb r2, [r0], #1
+ strleb r2, [r0], #1
+ strltb r2, [r0], #1
+ rsb ip, ip, #4
+ sub r1, r1, ip @ 7 6 5 4 3 2 1
+1: subs r1, r1, #8 @ -1 -2 -3 -4 -5 -6 -7
+ bmi 2f
+ str r2, [r0], #4
+ str r2, [r0], #4
+ b 1b
+2: adds r1, r1, #4 @ 3 2 1 0 -1 -2 -3
+ strpl r2, [r0], #4
+ tst r1, #2 @ 1x 1x 0x 0x 1x 1x 0x
+ strneb r2, [r0], #1
+ strneb r2, [r0], #1
+ tst r1, #1 @ x1 x0 x1 x0 x1 x0 x1
+ strneb r2, [r0], #1
+ mov r0, #0
+ ldmfd sp!, {pc}^
+
+/* Prototype: size_t uaccess_kernel_strncpy_from(char *dst, char *src, size_t len)
+ * Purpose : copy a string from kernel memory to kernel memory
+ * Params : dst - kernel memory destination
+ * : src - kernel memory source
+ * : len - maximum length of string
+ * Returns : number of characters copied
+ */
+uaccess_kernel_strncpy_from:
+ stmfd sp!, {lr}
+ mov ip, r2
+1: subs r2, r2, #1
+ bmi 2f
+ ldrb r3, [r1], #1
+ strb r3, [r0], #1
+ teq r3, #0
+ bne 1b
+2: subs r0, ip, r2
+ ldmfd sp!, {pc}^
+
+/* Prototype: int uaccess_kernel_strlen(char *str)
+ * Purpose : get length of a string in kernel memory
+ * Params : str - address of string in kernel memory
+ * Returns : length of string *including terminator*, or zero on error
+ */
+uaccess_kernel_strlen:
+ stmfd sp!, {lr}
+ mov r2, r0
+1: ldrb r1, [r0], #1
+ teq r1, #0
+ bne 1b
+ sub r0, r0, r2
+ ldmfd sp!, {pc}^
+
diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
new file mode 100644
index 000000000..a1524bee9
--- /dev/null
+++ b/arch/arm/lib/uaccess.S
@@ -0,0 +1,631 @@
+/*
+ * linux/arch/arm/lib/uaccess.S
+ *
+ * Copyright (C) 1995, 1996,1997,1998 Russell King
+ *
+ * Routines to block copy data to/from user memory
+ * These are highly optimised both for the 4k page size
+ * and for various alignments.
+ */
+#include <linux/autoconf.h>
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/errno.h>
+
+ .text
+
+#define USER(x...) \
+9999: x; \
+ .section __ex_table,"a"; \
+ .align 3; \
+ .long 9999b,9001f; \
+ .previous
+
+#define PAGE_SHIFT 12
+
+/* Prototype: int __arch_copy_to_user(void *to, const char *from, size_t n)
+ * Purpose : copy a block to user memory from kernel memory
+ * Params : to - user memory
+ * : from - kernel memory
+ * : n - number of bytes to copy
+ * Returns : Number of bytes NOT copied.
+ */
+
+.c2u_dest_not_aligned:
+ rsb ip, ip, #4
+ cmp ip, #2
+ ldrb r3, [r1], #1
+USER( strbt r3, [r0], #1) // May fault
+ ldrgeb r3, [r1], #1
+USER( strgebt r3, [r0], #1) // May fault
+ ldrgtb r3, [r1], #1
+USER( strgtbt r3, [r0], #1) // May fault
+ sub r2, r2, ip
+ b .c2u_dest_aligned
+
+ENTRY(__arch_copy_to_user)
+ stmfd sp!, {r2, r4 - r7, lr}
+ cmp r2, #4
+ blt .c2u_not_enough
+ ands ip, r0, #3
+ bne .c2u_dest_not_aligned
+.c2u_dest_aligned:
+
+ ands ip, r1, #3
+ bne .c2u_src_not_aligned
+/*
+ * Seeing as there has to be at least 8 bytes to copy, we can
+ * copy one word, and force a user-mode page fault...
+ */
+
+.c2u_0fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .c2u_0nowords
+ ldr r3, [r1], #4
+USER( strt r3, [r0], #4) // May fault
+ mov ip, r0, lsl #32 - PAGE_SHIFT // On each page, use a ld/st??t instruction
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .c2u_0fupi
+/*
+ * ip = max no. of bytes to copy before needing another "strt" insn
+ */
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #32
+ blt .c2u_0rem8lp
+
+.c2u_0cpy8lp: ldmia r1!, {r3 - r6}
+ stmia r0!, {r3 - r6} // Shouldn't fault
+ ldmia r1!, {r3 - r6}
+ stmia r0!, {r3 - r6} // Shouldn't fault
+ subs ip, ip, #32
+ bpl .c2u_0cpy8lp
+.c2u_0rem8lp: cmn ip, #16
+ ldmgeia r1!, {r3 - r6}
+ stmgeia r0!, {r3 - r6} // Shouldn't fault
+ tst ip, #8
+ ldmneia r1!, {r3 - r4}
+ stmneia r0!, {r3 - r4} // Shouldn't fault
+ tst ip, #4
+ ldrne r3, [r1], #4
+ strnet r3, [r0], #4 // Shouldn't fault
+ ands ip, ip, #3
+ beq .c2u_0fupi
+.c2u_0nowords: teq ip, #0
+ beq .c2u_finished
+.c2u_nowords: cmp ip, #2
+ ldrb r3, [r1], #1
+USER( strbt r3, [r0], #1) // May fault
+ ldrgeb r3, [r1], #1
+USER( strgebt r3, [r0], #1) // May fault
+ ldrgtb r3, [r1], #1
+USER( strgtbt r3, [r0], #1) // May fault
+ b .c2u_finished
+
+.c2u_not_enough:
+ movs ip, r2
+ bne .c2u_nowords
+.c2u_finished: mov r0, #0
+ LOADREGS(fd,sp!,{r2, r4 - r7, pc})
+
+.c2u_src_not_aligned:
+ bic r1, r1, #3
+ ldr r7, [r1], #4
+ cmp ip, #2
+ bgt .c2u_3fupi
+ beq .c2u_2fupi
+.c2u_1fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .c2u_1nowords
+ mov r3, r7, lsr #8
+ ldr r7, [r1], #4
+ orr r3, r3, r7, lsl #24
+USER( strt r3, [r0], #4) // May fault
+ mov ip, r0, lsl #32 - PAGE_SHIFT
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .c2u_1fupi
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #16
+ blt .c2u_1rem8lp
+
+.c2u_1cpy8lp: mov r3, r7, lsr #8
+ ldmia r1!, {r4 - r7}
+ orr r3, r3, r4, lsl #24
+ mov r4, r4, lsr #8
+ orr r4, r4, r5, lsl #24
+ mov r5, r5, lsr #8
+ orr r5, r5, r6, lsl #24
+ mov r6, r6, lsr #8
+ orr r6, r6, r7, lsl #24
+ stmia r0!, {r3 - r6} // Shouldn't fault
+ subs ip, ip, #16
+ bpl .c2u_1cpy8lp
+.c2u_1rem8lp: tst ip, #8
+ movne r3, r7, lsr #8
+ ldmneia r1!, {r4, r7}
+ orrne r3, r3, r4, lsl #24
+ movne r4, r4, lsr #8
+ orrne r4, r4, r7, lsl #24
+ stmneia r0!, {r3 - r4} // Shouldn't fault
+ tst ip, #4
+ movne r3, r7, lsr #8
+ ldrne r7, [r1], #4
+ orrne r3, r3, r7, lsl #24
+ strnet r3, [r0], #4 // Shouldn't fault
+ ands ip, ip, #3
+ beq .c2u_1fupi
+.c2u_1nowords: mov r3, r7, lsr #8
+ teq ip, #0
+ beq .c2u_finished
+ cmp ip, #2
+USER( strbt r3, [r0], #1) // May fault
+ movge r3, r3, lsr #8
+USER( strgebt r3, [r0], #1) // May fault
+ movgt r3, r3, lsr #8
+USER( strgtbt r3, [r0], #1) // May fault
+ b .c2u_finished
+
+.c2u_2fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .c2u_2nowords
+ mov r3, r7, lsr #16
+ ldr r7, [r1], #4
+ orr r3, r3, r7, lsl #16
+USER( strt r3, [r0], #4) // May fault
+ mov ip, r0, lsl #32 - PAGE_SHIFT
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .c2u_2fupi
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #16
+ blt .c2u_2rem8lp
+
+.c2u_2cpy8lp: mov r3, r7, lsr #16
+ ldmia r1!, {r4 - r7}
+ orr r3, r3, r4, lsl #16
+ mov r4, r4, lsr #16
+ orr r4, r4, r5, lsl #16
+ mov r5, r5, lsr #16
+ orr r5, r5, r6, lsl #16
+ mov r6, r6, lsr #16
+ orr r6, r6, r7, lsl #16
+ stmia r0!, {r3 - r6} // Shouldn't fault
+ subs ip, ip, #16
+ bpl .c2u_2cpy8lp
+.c2u_2rem8lp: tst ip, #8
+ movne r3, r7, lsr #16
+ ldmneia r1!, {r4, r7}
+ orrne r3, r3, r4, lsl #16
+ movne r4, r4, lsr #16
+ orrne r4, r4, r7, lsl #16
+ stmneia r0!, {r3 - r4} // Shouldn't fault
+ tst ip, #4
+ movne r3, r7, lsr #16
+ ldrne r7, [r1], #4
+ orrne r3, r3, r7, lsl #16
+ strnet r3, [r0], #4 // Shouldn't fault
+ ands ip, ip, #3
+ beq .c2u_2fupi
+.c2u_2nowords: mov r3, r7, lsr #16
+ teq ip, #0
+ beq .c2u_finished
+ cmp ip, #2
+USER( strbt r3, [r0], #1) // May fault
+ movge r3, r3, lsr #8
+USER( strgebt r3, [r0], #1) // May fault
+ ldrgtb r3, [r1], #0
+USER( strgtbt r3, [r0], #1) // May fault
+ b .c2u_finished
+
+.c2u_3fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .c2u_3nowords
+ mov r3, r7, lsr #24
+ ldr r7, [r1], #4
+ orr r3, r3, r7, lsl #8
+USER( strt r3, [r0], #4) // May fault
+ mov ip, r0, lsl #32 - PAGE_SHIFT
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .c2u_3fupi
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #16
+ blt .c2u_3rem8lp
+
+.c2u_3cpy8lp: mov r3, r7, lsr #24
+ ldmia r1!, {r4 - r7}
+ orr r3, r3, r4, lsl #8
+ mov r4, r4, lsr #24
+ orr r4, r4, r5, lsl #8
+ mov r5, r5, lsr #24
+ orr r5, r5, r6, lsl #8
+ mov r6, r6, lsr #24
+ orr r6, r6, r7, lsl #8
+ stmia r0!, {r3 - r6} // Shouldn't fault
+ subs ip, ip, #16
+ bpl .c2u_3cpy8lp
+.c2u_3rem8lp: tst ip, #8
+ movne r3, r7, lsr #24
+ ldmneia r1!, {r4, r7}
+ orrne r3, r3, r4, lsl #8
+ movne r4, r4, lsr #24
+ orrne r4, r4, r7, lsl #8
+ stmneia r0!, {r3 - r4} // Shouldn't fault
+ tst ip, #4
+ movne r3, r7, lsr #24
+ ldrne r7, [r1], #4
+ orrne r3, r3, r7, lsl #8
+ strnet r3, [r0], #4 // Shouldn't fault
+ ands ip, ip, #3
+ beq .c2u_3fupi
+.c2u_3nowords: mov r3, r7, lsr #24
+ teq ip, #0
+ beq .c2u_finished
+ cmp ip, #2
+USER( strbt r3, [r0], #1) // May fault
+ ldrge r3, [r1], #0
+USER( strgebt r3, [r0], #1) // May fault
+ movgt r3, r3, lsr #8
+USER( strgtbt r3, [r0], #1) // May fault
+ b .c2u_finished
+
+ .section .fixup,"ax"
+ .align 0
+9001: LOADREGS(fd,sp!, {r0, r4 - r7, pc})
+ .previous
+
+
+
+/* Prototype: unsigned long __arch_copy_from_user(void *to,const void *from,unsigned long n);
+ * Purpose : copy a block from user memory to kernel memory
+ * Params : to - kernel memory
+ * : from - user memory
+ * : n - number of bytes to copy
+ * Returns : Number of bytes NOT copied.
+ */
+.cfu_dest_not_aligned:
+ rsb ip, ip, #4
+ cmp ip, #2
+USER( ldrbt r3, [r1], #1) // May fault
+ strb r3, [r0], #1
+USER( ldrgebt r3, [r1], #1) // May fault
+ strgeb r3, [r0], #1
+USER( ldrgtbt r3, [r1], #1) // May fault
+ strgtb r3, [r0], #1
+ sub r2, r2, ip
+ b .cfu_dest_aligned
+
+ENTRY(__arch_copy_from_user)
+ stmfd sp!, {r2, r4 - r7, lr}
+ cmp r2, #4
+ blt .cfu_not_enough
+ ands ip, r0, #3
+ bne .cfu_dest_not_aligned
+.cfu_dest_aligned:
+ ands ip, r1, #3
+ bne .cfu_src_not_aligned
+/*
+ * Seeing as there has to be at least 8 bytes to copy, we can
+ * copy one word, and force a user-mode page fault...
+ */
+
+.cfu_0fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .cfu_0nowords
+USER( ldrt r3, [r1], #4)
+ str r3, [r0], #4
+ mov ip, r1, lsl #32 - PAGE_SHIFT // On each page, use a ld/st??t instruction
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .cfu_0fupi
+/*
+ * ip = max no. of bytes to copy before needing another "strt" insn
+ */
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #32
+ blt .cfu_0rem8lp
+
+.cfu_0cpy8lp: ldmia r1!, {r3 - r6} // Shouldn't fault
+ stmia r0!, {r3 - r6}
+ ldmia r1!, {r3 - r6} // Shouldn't fault
+ stmia r0!, {r3 - r6}
+ subs ip, ip, #32
+ bpl .cfu_0cpy8lp
+.cfu_0rem8lp: cmn ip, #16
+ ldmgeia r1!, {r3 - r6} // Shouldn't fault
+ stmgeia r0!, {r3 - r6}
+ tst ip, #8
+ ldmneia r1!, {r3 - r4} // Shouldn't fault
+ stmneia r0!, {r3 - r4}
+ tst ip, #4
+ ldrnet r3, [r1], #4 // Shouldn't fault
+ strne r3, [r0], #4
+ ands ip, ip, #3
+ beq .cfu_0fupi
+.cfu_0nowords: teq ip, #0
+ beq .cfu_finished
+.cfu_nowords: cmp ip, #2
+USER( ldrbt r3, [r1], #1) // May fault
+ strb r3, [r0], #1
+USER( ldrgebt r3, [r1], #1) // May fault
+ strgeb r3, [r0], #1
+USER( ldrgtbt r3, [r1], #1) // May fault
+ strgtb r3, [r0], #1
+ b .cfu_finished
+
+.cfu_not_enough:
+ movs ip, r2
+ bne .cfu_nowords
+.cfu_finished: mov r0, #0
+ LOADREGS(fd,sp!,{r2, r4 - r7, pc})
+
+.cfu_src_not_aligned:
+ bic r1, r1, #3
+USER( ldrt r7, [r1], #4) // May fault
+ cmp ip, #2
+ bgt .cfu_3fupi
+ beq .cfu_2fupi
+.cfu_1fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .cfu_1nowords
+ mov r3, r7, lsr #8
+USER( ldrt r7, [r1], #4) // May fault
+ orr r3, r3, r7, lsl #24
+ str r3, [r0], #4
+ mov ip, r1, lsl #32 - PAGE_SHIFT
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .cfu_1fupi
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #16
+ blt .cfu_1rem8lp
+
+.cfu_1cpy8lp: mov r3, r7, lsr #8
+ ldmia r1!, {r4 - r7} // Shouldn't fault
+ orr r3, r3, r4, lsl #24
+ mov r4, r4, lsr #8
+ orr r4, r4, r5, lsl #24
+ mov r5, r5, lsr #8
+ orr r5, r5, r6, lsl #24
+ mov r6, r6, lsr #8
+ orr r6, r6, r7, lsl #24
+ stmia r0!, {r3 - r6}
+ subs ip, ip, #16
+ bpl .cfu_1cpy8lp
+.cfu_1rem8lp: tst ip, #8
+ movne r3, r7, lsr #8
+ ldmneia r1!, {r4, r7} // Shouldn't fault
+ orrne r3, r3, r4, lsl #24
+ movne r4, r4, lsr #8
+ orrne r4, r4, r7, lsl #24
+ stmneia r0!, {r3 - r4}
+ tst ip, #4
+ movne r3, r7, lsr #8
+USER( ldrnet r7, [r1], #4) // May fault
+ orrne r3, r3, r7, lsl #24
+ strne r3, [r0], #4
+ ands ip, ip, #3
+ beq .cfu_1fupi
+.cfu_1nowords: mov r3, r7, lsr #8
+ teq ip, #0
+ beq .cfu_finished
+ cmp ip, #2
+ strb r3, [r0], #1
+ movge r3, r3, lsr #8
+ strgeb r3, [r0], #1
+ movgt r3, r3, lsr #8
+ strgtb r3, [r0], #1
+ b .cfu_finished
+
+.cfu_2fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .cfu_2nowords
+ mov r3, r7, lsr #16
+USER( ldrt r7, [r1], #4) // May fault
+ orr r3, r3, r7, lsl #16
+ str r3, [r0], #4
+ mov ip, r1, lsl #32 - PAGE_SHIFT
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .cfu_2fupi
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #16
+ blt .cfu_2rem8lp
+
+.cfu_2cpy8lp: mov r3, r7, lsr #16
+ ldmia r1!, {r4 - r7} // Shouldn't fault
+ orr r3, r3, r4, lsl #16
+ mov r4, r4, lsr #16
+ orr r4, r4, r5, lsl #16
+ mov r5, r5, lsr #16
+ orr r5, r5, r6, lsl #16
+ mov r6, r6, lsr #16
+ orr r6, r6, r7, lsl #16
+ stmia r0!, {r3 - r6}
+ subs ip, ip, #16
+ bpl .cfu_2cpy8lp
+.cfu_2rem8lp: tst ip, #8
+ movne r3, r7, lsr #16
+ ldmneia r1!, {r4, r7} // Shouldn't fault
+ orrne r3, r3, r4, lsl #16
+ movne r4, r4, lsr #16
+ orrne r4, r4, r7, lsl #16
+ stmneia r0!, {r3 - r4}
+ tst ip, #4
+ movne r3, r7, lsr #16
+USER( ldrnet r7, [r1], #4) // May fault
+ orrne r3, r3, r7, lsl #16
+ strne r3, [r0], #4
+ ands ip, ip, #3
+ beq .cfu_2fupi
+.cfu_2nowords: mov r3, r7, lsr #16
+ teq ip, #0
+ beq .cfu_finished
+ cmp ip, #2
+ strb r3, [r0], #1
+ movge r3, r3, lsr #8
+ strgeb r3, [r0], #1
+USER( ldrgtbt r3, [r1], #0) // May fault
+ strgtb r3, [r0], #1
+ b .cfu_finished
+
+.cfu_3fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .cfu_3nowords
+ mov r3, r7, lsr #24
+USER( ldrt r7, [r1], #4) // May fault
+ orr r3, r3, r7, lsl #8
+ str r3, [r0], #4
+ mov ip, r1, lsl #32 - PAGE_SHIFT
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .cfu_3fupi
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #16
+ blt .cfu_3rem8lp
+
+.cfu_3cpy8lp: mov r3, r7, lsr #24
+ ldmia r1!, {r4 - r7} // Shouldn't fault
+ orr r3, r3, r4, lsl #8
+ mov r4, r4, lsr #24
+ orr r4, r4, r5, lsl #8
+ mov r5, r5, lsr #24
+ orr r5, r5, r6, lsl #8
+ mov r6, r6, lsr #24
+ orr r6, r6, r7, lsl #8
+ stmia r0!, {r3 - r6}
+ subs ip, ip, #16
+ bpl .cfu_3cpy8lp
+.cfu_3rem8lp: tst ip, #8
+ movne r3, r7, lsr #24
+ ldmneia r1!, {r4, r7} // Shouldn't fault
+ orrne r3, r3, r4, lsl #8
+ movne r4, r4, lsr #24
+ orrne r4, r4, r7, lsl #8
+ stmneia r0!, {r3 - r4}
+ tst ip, #4
+ movne r3, r7, lsr #24
+USER( ldrnet r7, [r1], #4) // May fault
+ orrne r3, r3, r7, lsl #8
+ strne r3, [r0], #4
+ ands ip, ip, #3
+ beq .cfu_3fupi
+.cfu_3nowords: mov r3, r7, lsr #24
+ teq ip, #0
+ beq .cfu_finished
+ cmp ip, #2
+ strb r3, [r0], #1
+USER( ldrget r3, [r1], #0) // May fault
+ strgeb r3, [r0], #1
+ movgt r3, r3, lsr #8
+ strgtb r3, [r0], #1
+ b .cfu_finished
+
+ .section .fixup,"ax"
+ .align 0
+9001: LOADREGS(fd,sp!, {r0, r4 - r7, pc})
+ .previous
+
+/* Prototype: int __arch_clear_user(void *addr, size_t sz)
+ * Purpose : clear some user memory
+ * Params : addr - user memory address to clear
+ * : sz - number of bytes to clear
+ * Returns : number of bytes NOT cleared
+ */
+ENTRY(__arch_clear_user)
+ stmfd sp!, {r1, lr}
+ mov r2, #0
+ cmp r1, #4
+ blt 2f
+ ands ip, r0, #3
+ beq 1f
+ cmp ip, #1
+USER( strbt r2, [r0], #1)
+USER( strlebt r2, [r0], #1)
+USER( strltbt r2, [r0], #1)
+ rsb ip, ip, #4
+ sub r1, r1, ip @ 7 6 5 4 3 2 1
+1: subs r1, r1, #8 @ -1 -2 -3 -4 -5 -6 -7
+USER( strplt r2, [r0], #4)
+USER( strplt r2, [r0], #4)
+ bpl 1b
+2: adds r1, r1, #4 @ 3 2 1 0 -1 -2 -3
+USER( strplt r2, [r0], #4)
+ tst r1, #2 @ 1x 1x 0x 0x 1x 1x 0x
+USER( strnebt r2, [r0], #1)
+USER( strnebt r2, [r0], #1)
+ tst r1, #1 @ x1 x0 x1 x0 x1 x0 x1
+USER( strnebt r2, [r0], #1)
+ mov r0, #0
+ LOADREGS(fd,sp!, {r1, pc})
+
+ .section .fixup,"ax"
+ .align 0
+9001: LOADREGS(fd,sp!, {r0, pc})
+ .previous
+
+/* Prototype: int __arch_strlen_user(char *str)
+ * Purpose : get length of a string in user memory
+ * Params : str - address of string in user memory
+ * Returns : length of string *including terminator*, or zero on error
+ */
+ENTRY(__arch_strlen_user)
+ stmfd sp!, {lr}
+ mov r2, r0
+1:
+USER( ldrbt r1, [r0], #1)
+ teq r1, #0
+ bne 1b
+ sub r0, r0, r2
+ LOADREGS(fd,sp!, {pc})
+
+ .section .fixup,"ax"
+ .align 0
+9001: mov r0, #0
+ LOADREGS(fd,sp!,{pc})
+ .previous
+
+/* Prototype: size_t __arch_strncpy_from_user(char *dst, char *src, size_t len)
+ * Purpose : copy a string from user memory to kernel memory
+ * Params : dst - kernel memory destination
+ * : src - user memory source
+ * : len - maximum length of string
+ * Returns : number of characters copied
+ */
+ENTRY(__arch_strncpy_from_user)
+ stmfd sp!, {lr}
+ mov ip, r2
+1: subs r2, r2, #1
+ bmi 2f
+USER( ldrbt r3, [r1], #1)
+ strb r3, [r0], #1
+ teq r3, #0
+ bne 1b
+2: subs r0, ip, r2
+ LOADREGS(fd,sp!, {pc})
+
+ .section .fixup,"ax"
+ .align 0
+9001: mov r0, #-EFAULT
+ LOADREGS(fd,sp!, {pc})
+ .previous
+
+ .align
+
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
new file mode 100644
index 000000000..0488da561
--- /dev/null
+++ b/arch/arm/mm/Makefile
@@ -0,0 +1,36 @@
+#
+# Makefile for the linux arm-specific parts of the memory manager.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definition is now in the main makefile...
+
+O_TARGET := mm.o
+O_OBJS := init.o extable.o fault-$(PROCESSOR).o mm-$(MACHINE).o
+
+ifeq ($(PROCESSOR),armo)
+ O_OBJS += proc-arm2,3.o
+endif
+
+ifeq ($(PROCESSOR),armv)
+ O_OBJS += small_page.o proc-arm6,7.o proc-sa110.o
+endif
+
+include $(TOPDIR)/Rules.make
+
+proc-arm2,3.o: ../lib/constants.h
+proc-arm6,7.o: ../lib/constants.h
+proc-sa110.o: ../lib/constants.h
+
+.PHONY: ../lib/constants.h
+../lib/constants.h:
+ @$(MAKE) -C ../lib constants.h
+
+%.o: %.S
+ifndef $(CONFIG_BINUTILS_NEW)
+ $(CC) $(CFLAGS) -D__ASSEMBLY__ -E $< | tr ';$$' '\n#' > ..tmp.s
+ $(CC) $(CFLAGS:-pipe=) -c -o $@ ..tmp.s
+ $(RM) ..tmp.s
+endif
diff --git a/arch/arm/mm/extable.c b/arch/arm/mm/extable.c
new file mode 100644
index 000000000..e603b6362
--- /dev/null
+++ b/arch/arm/mm/extable.c
@@ -0,0 +1,55 @@
+/*
+ * linux/arch/arm/mm/extable.c
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <asm/uaccess.h>
+
+extern const struct exception_table_entry __start___ex_table[];
+extern const struct exception_table_entry __stop___ex_table[];
+
+static inline unsigned long
+search_one_table(const struct exception_table_entry *first,
+ const struct exception_table_entry *last,
+ unsigned long value)
+{
+ while (first <= last) {
+ const struct exception_table_entry *mid;
+ long diff;
+
+ mid = (last - first) / 2 + first;
+ diff = mid->insn - value;
+ if (diff == 0)
+ return mid->fixup;
+ else if (diff < 0)
+ first = mid+1;
+ else
+ last = mid-1;
+ }
+ return 0;
+}
+
+unsigned long
+search_exception_table(unsigned long addr)
+{
+ unsigned long ret;
+
+#ifndef CONFIG_MODULES
+ /* There is only the kernel to search. */
+ ret = search_one_table(__start___ex_table, __stop___ex_table-1, addr);
+ if (ret) return ret;
+#else
+ /* The kernel is the last "module" -- no need to treat it special. */
+ struct module *mp;
+ for (mp = module_list; mp != NULL; mp = mp->next) {
+ if (mp->ex_table_start == NULL)
+ continue;
+ ret = search_one_table(mp->ex_table_start,
+ mp->ex_table_end - 1, addr);
+ if (ret) return ret;
+ }
+#endif
+
+ return 0;
+}
diff --git a/arch/arm/mm/fault-armo.c b/arch/arm/mm/fault-armo.c
new file mode 100644
index 000000000..a0fd65df2
--- /dev/null
+++ b/arch/arm/mm/fault-armo.c
@@ -0,0 +1,159 @@
+/*
+ * linux/arch/arm/mm/fault.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ * Modifications for ARM processor (c) 1995, 1996 Russell King
+ */
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/head.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+
+#define FAULT_CODE_FORCECOW 0x80
+#define FAULT_CODE_PREFETCH 0x04
+#define FAULT_CODE_WRITE 0x02
+#define FAULT_CODE_USER 0x01
+
+extern void die_if_kernel(char *msg, struct pt_regs *regs, unsigned int err, unsigned int ret);
+
+static void kernel_page_fault (unsigned long addr, int mode, struct pt_regs *regs,
+ struct task_struct *tsk, struct mm_struct *mm)
+{
+ /*
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+ pgd_t *pgd;
+ if (addr < PAGE_SIZE)
+ printk (KERN_ALERT "Unable to handle kernel NULL pointer dereference");
+ else
+ printk (KERN_ALERT "Unable to handle kernel paging request");
+ printk (" at virtual address %08lx\n", addr);
+ printk (KERN_ALERT "current->tss.memmap = %08lX\n", tsk->tss.memmap);
+ pgd = pgd_offset (mm, addr);
+ printk (KERN_ALERT "*pgd = %08lx", pgd_val (*pgd));
+ if (!pgd_none (*pgd)) {
+ pmd_t *pmd;
+ pmd = pmd_offset (pgd, addr);
+ printk (", *pmd = %08lx", pmd_val (*pmd));
+ if (!pmd_none (*pmd))
+ printk (", *pte = %08lx", pte_val (*pte_offset (pmd, addr)));
+ }
+ printk ("\n");
+ die_if_kernel ("Oops", regs, mode, SIGKILL);
+ do_exit (SIGKILL);
+}
+
+static void
+handle_dataabort (unsigned long addr, int mode, struct pt_regs *regs)
+{
+ struct task_struct *tsk;
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+ unsigned long fixup;
+
+ lock_kernel();
+ tsk = current;
+ mm = tsk->mm;
+
+ down(&mm->mmap_sem);
+ vma = find_vma (mm, addr);
+ if (!vma)
+ goto bad_area;
+ if (addr >= vma->vm_start)
+ goto good_area;
+ if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack (vma, addr))
+ goto bad_area;
+
+ /*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+good_area:
+ if (!(mode & FAULT_CODE_WRITE)) { /* write? */
+ if (!(vma->vm_flags & (VM_READ|VM_EXEC)))
+ goto bad_area;
+ } else {
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ }
+ handle_mm_fault (tsk, vma, addr, mode & (FAULT_CODE_WRITE|FAULT_CODE_FORCECOW));
+ up(&mm->mmap_sem);
+ goto out;
+
+ /*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+ up(&mm->mmap_sem);
+ if (mode & FAULT_CODE_USER) {
+extern int console_loglevel;
+cli();
+ tsk->tss.error_code = mode;
+ tsk->tss.trap_no = 14;
+console_loglevel = 9;
+ printk ("%s: memory violation at pc=0x%08lx, lr=0x%08lx (bad address=0x%08lx, code %d)\n",
+ tsk->comm, regs->ARM_pc, regs->ARM_lr, addr, mode);
+//#ifdef DEBUG
+ show_regs (regs);
+ c_backtrace (regs->ARM_fp, 0);
+//#endif
+ force_sig(SIGSEGV, tsk);
+while (1);
+ goto out;
+ }
+
+ /* Are we prepared to handle this kernel fault? */
+ if ((fixup = search_exception_table(regs->ARM_pc)) != 0) {
+ printk(KERN_DEBUG "%s: Exception at [<%lx>] addr=%lx (fixup: %lx)\n",
+ tsk->comm, regs->ARM_pc, addr, fixup);
+ regs->ARM_pc = fixup;
+ goto out;
+ }
+
+
+ kernel_page_fault (addr, mode, regs, tsk, mm);
+out:
+ unlock_kernel();
+}
+
+/*
+ * Handle a data abort. Note that we have to handle a range of addresses
+ * on ARM2/3 for ldm. If both pages are zero-mapped, then we have to force
+ * a copy-on-write
+ */
+asmlinkage void
+do_DataAbort (unsigned long min_addr, unsigned long max_addr, int mode, struct pt_regs *regs)
+{
+ handle_dataabort (min_addr, mode, regs);
+
+ if ((min_addr ^ max_addr) >> PAGE_SHIFT)
+ handle_dataabort (max_addr, mode | FAULT_CODE_FORCECOW, regs);
+}
+
+asmlinkage int
+do_PrefetchAbort (unsigned long addr, int mode, struct pt_regs *regs)
+{
+#if 0
+ if (the memc mapping for this page exists - can check now...) {
+ printk ("Page in, but got abort (undefined instruction?)\n");
+ return 0;
+ }
+#endif
+ handle_dataabort (addr, mode, regs);
+ return 1;
+}
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
new file mode 100644
index 000000000..2925761fb
--- /dev/null
+++ b/arch/arm/mm/fault-armv.c
@@ -0,0 +1,200 @@
+/*
+ * linux/arch/arm/mm/fault.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ * Modifications for ARM processor (c) 1995, 1996 Russell King
+ */
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/head.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+
+#define FAULT_CODE_READ 0x02
+#define FAULT_CODE_USER 0x01
+
+extern void die_if_kernel(char *msg, struct pt_regs *regs, unsigned int err, unsigned int ret);
+
+static void kernel_page_fault (unsigned long addr, int mode, struct pt_regs *regs,
+ struct task_struct *tsk, struct mm_struct *mm)
+{
+ /*
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+ pgd_t *pgd;
+ if (addr < PAGE_SIZE)
+ printk (KERN_ALERT "Unable to handle kernel NULL pointer dereference");
+ else
+ printk (KERN_ALERT "Unable to handle kernel paging request");
+ printk (" at virtual address %08lx\n", addr);
+ printk (KERN_ALERT "current->tss.memmap = %08lX\n", tsk->tss.memmap);
+ pgd = pgd_offset (mm, addr);
+ printk (KERN_ALERT "*pgd = %08lx", pgd_val (*pgd));
+ if (!pgd_none (*pgd)) {
+ pmd_t *pmd;
+ pmd = pmd_offset (pgd, addr);
+ printk (", *pmd = %08lx", pmd_val (*pmd));
+ if (!pmd_none (*pmd))
+ printk (", *pte = %08lx", pte_val (*pte_offset (pmd, addr)));
+ }
+ printk ("\n");
+ die_if_kernel ("Oops", regs, mode, SIGKILL);
+ do_exit (SIGKILL);
+}
+
+static void page_fault (unsigned long addr, int mode, struct pt_regs *regs)
+{
+ struct task_struct *tsk;
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+ unsigned long fixup;
+
+ lock_kernel();
+ tsk = current;
+ mm = tsk->mm;
+
+ down(&mm->mmap_sem);
+ vma = find_vma (mm, addr);
+ if (!vma)
+ goto bad_area;
+ if (vma->vm_start <= addr)
+ goto good_area;
+ if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack (vma, addr))
+ goto bad_area;
+
+ /*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+good_area:
+ if (mode & FAULT_CODE_READ) { /* read? */
+ if (!(vma->vm_flags & (VM_READ|VM_EXEC)))
+ goto bad_area;
+ } else {
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ }
+ handle_mm_fault (tsk, vma, addr & PAGE_MASK, !(mode & FAULT_CODE_READ));
+ up(&mm->mmap_sem);
+ goto out;
+
+ /*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+ up(&mm->mmap_sem);
+ if (mode & FAULT_CODE_USER) {
+ tsk->tss.error_code = mode;
+ tsk->tss.trap_no = 14;
+ printk ("%s: memory violation at pc=0x%08lx, lr=0x%08lx (bad address=0x%08lx, code %d)\n",
+ tsk->comm, regs->ARM_pc, regs->ARM_lr, addr, mode);
+#ifdef DEBUG
+ show_regs (regs);
+ c_backtrace (regs->ARM_fp, regs->ARM_cpsr);
+#endif
+ force_sig(SIGSEGV, tsk);
+ goto out;
+ }
+
+ /* Are we prepared to handle this kernel fault? */
+ if ((fixup = search_exception_table(regs->ARM_pc)) != 0) {
+ printk(KERN_DEBUG "%s: Exception at [<%lx>] addr=%lx (fixup: %lx)\n",
+ tsk->comm, regs->ARM_pc, addr, fixup);
+ regs->ARM_pc = fixup;
+ goto out;
+ }
+
+ kernel_page_fault (addr, mode, regs, tsk, mm);
+out:
+ unlock_kernel();
+}
+
+/*
+ * Handle a data abort. Note that we have to handle a range of addresses
+ * on ARM2/3 for ldm. If both pages are zero-mapped, then we have to force
+ * a copy-on-write
+ */
+asmlinkage void
+do_DataAbort (unsigned long addr, int fsr, int error_code, struct pt_regs *regs)
+{
+ if (user_mode(regs))
+ error_code |= FAULT_CODE_USER;
+
+#define DIE(signr,nam)\
+ force_sig(signr, current);\
+ die_if_kernel(nam, regs, fsr, signr);\
+ break;
+
+ switch (fsr & 15) {
+ case 2:
+ DIE(SIGKILL, "Terminal exception")
+ case 0:
+ DIE(SIGSEGV, "Vector exception")
+ case 1:
+ case 3:
+ DIE(SIGBUS, "Alignment exception")
+ case 12:
+ case 14:
+ DIE(SIGBUS, "External abort on translation")
+ case 9:
+ case 11:
+ DIE(SIGSEGV, "Domain fault")
+ case 13:/* permission fault on section */
+#ifndef DEBUG
+ {
+ unsigned int i, j, a;
+static int count=2;
+if (count-- == 0) while (1);
+ a = regs->ARM_sp;
+ for (j = 0; j < 10; j++) {
+ printk ("%08x: ", a);
+ for (i = 0; i < 8; i += 1, a += 4)
+ printk ("%08lx ", *(unsigned long *)a);
+ printk ("\n");
+ }
+ }
+#endif
+ DIE(SIGSEGV, "Permission fault")
+
+ case 15:/* permission fault on page */
+ case 5: /* page-table entry descriptor fault */
+ case 7: /* first-level descriptor fault */
+ page_fault (addr, error_code, regs);
+ break;
+ case 4:
+ case 6:
+ DIE(SIGBUS, "External abort on linefetch")
+ case 8:
+ case 10:
+ DIE(SIGBUS, "External abort on non-linefetch")
+ }
+}
+
+asmlinkage int
+do_PrefetchAbort (unsigned long addr, struct pt_regs *regs)
+{
+#if 0
+ /* does this still apply ? */
+ if (the memc mapping for this page exists - can check now...) {
+ printk ("Page in, but got abort (undefined instruction?)\n");
+ return 0;
+ }
+#endif
+ page_fault (addr, FAULT_CODE_USER|FAULT_CODE_READ, regs);
+ return 1;
+}
+
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
new file mode 100644
index 000000000..b9e777a32
--- /dev/null
+++ b/arch/arm/mm/init.c
@@ -0,0 +1,215 @@
+/*
+ * linux/arch/arm/mm/init.c
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ */
+
+#include <linux/config.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/head.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/smp.h>
+#ifdef CONFIG_BLK_DEV_INITRD
+#include <linux/blk.h>
+#endif
+
+#include <asm/system.h>
+#include <asm/segment.h>
+#include <asm/pgtable.h>
+#include <asm/dma.h>
+#include <asm/hardware.h>
+#include <asm/proc/mm-init.h>
+
+pgd_t swapper_pg_dir[PTRS_PER_PGD];
+
+const char bad_pmd_string[] = "Bad pmd in pte_alloc: %08lx\n";
+extern char _etext, _stext, _edata, __bss_start, _end;
+extern char __init_begin, __init_end;
+
+/*
+ * BAD_PAGE is the page that is used for page faults when linux
+ * is out-of-memory. Older versions of linux just did a
+ * do_exit(), but using this instead means there is less risk
+ * for a process dying in kernel mode, possibly leaving a inode
+ * unused etc..
+ *
+ * BAD_PAGETABLE is the accompanying page-table: it is initialized
+ * to point to BAD_PAGE entries.
+ *
+ * ZERO_PAGE is a special page that is used for zero-initialized
+ * data and COW.
+ */
+#if PTRS_PER_PTE != 1
+unsigned long *empty_bad_page_table;
+
+pte_t *__bad_pagetable(void)
+{
+ int i;
+ pte_t bad_page;
+
+ bad_page = BAD_PAGE;
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ empty_bad_page_table[i] = (unsigned long)pte_val(bad_page);
+ return (pte_t *) empty_bad_page_table;
+}
+#endif
+
+unsigned long *empty_zero_page;
+unsigned long *empty_bad_page;
+
+pte_t __bad_page(void)
+{
+ memzero (empty_bad_page, PAGE_SIZE);
+ return pte_nocache(pte_mkdirty(mk_pte((unsigned long) empty_bad_page, PAGE_SHARED)));
+}
+
+void show_mem(void)
+{
+ extern void show_net_buffers(void);
+ int i,free = 0,total = 0,reserved = 0;
+ int shared = 0;
+
+ printk("Mem-info:\n");
+ show_free_areas();
+ printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
+ i = MAP_NR(high_memory);
+ while (i-- > 0) {
+ total++;
+ if (PageReserved(mem_map+i))
+ reserved++;
+ else if (!atomic_read(&mem_map[i].count))
+ free++;
+ else
+ shared += atomic_read(&mem_map[i].count) - 1;
+ }
+ printk("%d pages of RAM\n",total);
+ printk("%d free pages\n",free);
+ printk("%d reserved pages\n",reserved);
+ printk("%d pages shared\n",shared);
+ show_buffers();
+#ifdef CONFIG_NET
+ show_net_buffers();
+#endif
+}
+
+/*
+ * paging_init() sets up the page tables...
+ */
+unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
+{
+ extern unsigned long free_area_init(unsigned long, unsigned long);
+
+ start_mem = PAGE_ALIGN(start_mem);
+ empty_zero_page = (unsigned long *)start_mem;
+ start_mem += PAGE_SIZE;
+ empty_bad_page = (unsigned long *)start_mem;
+ start_mem += PAGE_SIZE;
+#if PTRS_PER_PTE != 1
+ empty_bad_page_table = (unsigned long *)start_mem;
+ start_mem += PTRS_PER_PTE * sizeof (void *);
+#endif
+ memzero (empty_zero_page, PAGE_SIZE);
+ start_mem = setup_pagetables (start_mem, end_mem);
+
+ flush_tlb_all ();
+ update_mm_cache_all ();
+
+ return free_area_init (start_mem, end_mem);
+}
+
+/*
+ * mem_init() marks the free areas in the mem_map and tells us how much
+ * memory is free. This is done after various parts of the system have
+ * claimed their memory after the kernel image.
+ */
+void mem_init(unsigned long start_mem, unsigned long end_mem)
+{
+ extern void sound_init(void);
+ int codepages = 0;
+ int reservedpages = 0;
+ int datapages = 0;
+ int initpages = 0;
+ unsigned long tmp;
+
+ end_mem &= PAGE_MASK;
+ high_memory = (void *)end_mem;
+ max_mapnr = num_physpages = MAP_NR(end_mem);
+
+ /* mark usable pages in the mem_map[] */
+ mark_usable_memory_areas(&start_mem, end_mem);
+
+ for (tmp = PAGE_OFFSET; tmp < end_mem ; tmp += PAGE_SIZE) {
+ if (PageReserved(mem_map+MAP_NR(tmp))) {
+ if (tmp >= KERNTOPHYS(_stext) &&
+ tmp < KERNTOPHYS(_edata)) {
+ if (tmp < KERNTOPHYS(_etext))
+ codepages++;
+ else
+ datapages++;
+ } else if (tmp >= KERNTOPHYS(__init_begin)
+ && tmp < KERNTOPHYS(__init_end))
+ initpages++;
+ else if (tmp >= KERNTOPHYS(__bss_start)
+ && tmp < (unsigned long) start_mem)
+ datapages++;
+ else
+ reservedpages++;
+ continue;
+ }
+ atomic_set(&mem_map[MAP_NR(tmp)].count, 1);
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (!initrd_start || (tmp < initrd_start || tmp >= initrd_end))
+#endif
+ free_page(tmp);
+ }
+ printk ("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
+ (unsigned long) nr_free_pages << (PAGE_SHIFT-10),
+ max_mapnr << (PAGE_SHIFT-10),
+ codepages << (PAGE_SHIFT-10),
+ reservedpages << (PAGE_SHIFT-10),
+ datapages << (PAGE_SHIFT-10),
+ initpages << (PAGE_SHIFT-10));
+}
+
+void free_initmem (void)
+{
+ unsigned long addr;
+
+ addr = (unsigned long)(&__init_begin);
+ for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
+ mem_map[MAP_NR(addr)].flags &= ~(1 << PG_reserved);
+ atomic_set(&mem_map[MAP_NR(addr)].count, 1);
+ free_page(addr);
+ }
+ printk ("Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10);
+}
+
+void si_meminfo(struct sysinfo *val)
+{
+ int i;
+
+ i = MAP_NR(high_memory);
+ val->totalram = 0;
+ val->sharedram = 0;
+ val->freeram = nr_free_pages << PAGE_SHIFT;
+ val->bufferram = buffermem;
+ while (i-- > 0) {
+ if (PageReserved(mem_map+i))
+ continue;
+ val->totalram++;
+ if (!atomic_read(&mem_map[i].count))
+ continue;
+ val->sharedram += atomic_read(&mem_map[i].count) - 1;
+ }
+ val->totalram <<= PAGE_SHIFT;
+ val->sharedram <<= PAGE_SHIFT;
+}
+
diff --git a/arch/arm/mm/mm-a5k.c b/arch/arm/mm/mm-a5k.c
new file mode 100644
index 000000000..3906a29ec
--- /dev/null
+++ b/arch/arm/mm/mm-a5k.c
@@ -0,0 +1,7 @@
+/*
+ * arch/arm/mm/mm-a5k.c
+ *
+ * Extra MM routines for the Archimedes architecture
+ *
+ * Copyright (C) 1998 Russell King
+ */
diff --git a/arch/arm/mm/mm-arc.c b/arch/arm/mm/mm-arc.c
new file mode 100644
index 000000000..4a4b4718c
--- /dev/null
+++ b/arch/arm/mm/mm-arc.c
@@ -0,0 +1,7 @@
+/*
+ * arch/arm/mm/mm-arc.c
+ *
+ * Extra MM routines for the Archimedes architecture
+ *
+ * Copyright (C) 1998 Russell King
+ */
diff --git a/arch/arm/mm/mm-ebsa110.c b/arch/arm/mm/mm-ebsa110.c
new file mode 100644
index 000000000..907a3f399
--- /dev/null
+++ b/arch/arm/mm/mm-ebsa110.c
@@ -0,0 +1,7 @@
+/*
+ * arch/arm/mm/mm-ebsa110.c
+ *
+ * Extra MM routines for the Archimedes architecture
+ *
+ * Copyright (C) 1998 Russell King
+ */
diff --git a/arch/arm/mm/mm-nexuspci.c b/arch/arm/mm/mm-nexuspci.c
new file mode 100644
index 000000000..bbae80b19
--- /dev/null
+++ b/arch/arm/mm/mm-nexuspci.c
@@ -0,0 +1,7 @@
+/*
+ * arch/arm/mm/mm-nexuspci.c
+ *
+ * Extra MM routines for the Archimedes architecture
+ *
+ * Copyright (C) 1998 Russell King
+ */
diff --git a/arch/arm/mm/mm-rpc.c b/arch/arm/mm/mm-rpc.c
new file mode 100644
index 000000000..5eccb1f81
--- /dev/null
+++ b/arch/arm/mm/mm-rpc.c
@@ -0,0 +1,80 @@
+/*
+ * arch/arm/mm/mm-rpc.c
+ *
+ * Extra MM routines for RiscPC architecture
+ *
+ * Copyright (C) 1998 Russell King
+ */
+
+#include <asm/setup.h>
+
+#define NR_DRAM_BANKS 4
+#define NR_VRAM_BANKS 1
+
+#define NR_BANKS (NR_DRAM_BANKS + NR_VRAM_BANKS)
+
+#define FIRST_BANK 0
+#define FIRST_DRAM_BANK 0
+#define FIRST_VRAM_BANK NR_DRAM_BANKS
+
+#define BANK_SHIFT 26
+#define FIRST_DRAM_ADDR 0x10000000
+
+#define PHYS_TO_BANK(x) (((x) >> BANK_SHIFT) & (NR_DRAM_BANKS - 1))
+#define BANK_TO_PHYS(x) ((FIRST_DRAM_ADDR) +
+ (((x) - FIRST_DRAM_BANK) << BANK_SHIFT)
+
+struct ram_bank {
+ unsigned int virt_addr; /* virtual address of the *end* of this bank + 1 */
+ signed int phys_offset; /* offset to physical address of this bank */
+};
+
+static struct ram_bank rambank[NR_BANKS];
+
+/*
+ * Return the physical (0x10000000 -> 0x20000000) address of
+ * the virtual (0xc0000000 -> 0xd0000000) address
+ */
+unsigned long __virt_to_phys(unsigned long vpage)
+{
+ unsigned int bank = FIRST_BANK;
+
+ while (vpage >= rambank[bank].virt_addr && bank < NR_BANKS)
+ bank ++;
+
+ return vpage - rambank[bank].phys_offset;
+}
+
+/*
+ * Return the virtual (0xc0000000 -> 0xd0000000) address of
+ * the physical (0x10000000 -> 0x20000000) address
+ */
+unsigned long __phys_to_virt(unsigned long phys)
+{
+ unsigned int bank;
+
+ if (phys > FIRST_DRAM_ADDR)
+ bank = PHYS_TO_BANK(phys);
+ else
+ bank = FIRST_VRAM_BANK;
+
+ return phys + rambank[bank].phys_offset;
+}
+
+void init_dram_banks(struct param_struct *params)
+{
+ unsigned int bank;
+ unsigned int bytes = 0;
+
+ for (bank = FIRST_DRAM_BANK; bank < NR_DRAM_BANKS; bank++) {
+ rambank[bank].phys_offset = PAGE_OFFSET + bytes
+ - BANK_TO_PHYS(bank);
+
+ bytes += params->u1.s.pages_in_bank[bank - FIRST_DRAM_BANK] * PAGE_SIZE;
+
+ rambank[bank].virt_addr = PAGE_OFFSET + bytes;
+ }
+
+ drambank[4].phys_offset = 0xd6000000;
+ drambank[4].virt_addr = 0xd8000000;
+}
diff --git a/arch/arm/mm/proc-arm2,3.S b/arch/arm/mm/proc-arm2,3.S
new file mode 100644
index 000000000..916bab104
--- /dev/null
+++ b/arch/arm/mm/proc-arm2,3.S
@@ -0,0 +1,494 @@
+/*
+ * linux/arch/arm/mm/arm2,3.S: MMU functions for ARM2,3
+ *
+ * (C) 1997 Russell King
+ *
+ * These are the low level assembler for performing cache
+ * and memory functions on ARM2, ARM250 and ARM3 processors.
+ */
+#include <linux/linkage.h>
+
+#include <asm/assembler.h>
+#include "../lib/constants.h"
+
+/*
+ * Code common to all processors - MEMC specific not processor
+ * specific!
+ */
+
+LC1: .word SYMBOL_NAME(page_nr)
+/*
+ * Function: arm2_3_update_map (struct task_struct *tsk)
+ *
+ * Params : tsk Task structure to be updated
+ *
+ * Purpose : Re-generate memc maps for task from its pseudo page tables
+ */
+_arm2_3_update_map:
+ mov ip, sp
+ stmfd sp!, {r4 - r6, fp, ip, lr, pc}
+ sub fp, ip, #4
+ add r1, r0, #TSS_MEMCMAP
+ ldr r2, LC1
+ ldr r2, [r2]
+ mov r3, #0x03f00000
+ orr r3, r3, #0x00000f00
+ orr r4, r3, #1
+ orr r5, r3, #2
+ orr r6, r3, #3
+1: stmia r1!, {r3, r4, r5, r6} @ Default mapping (null mapping)
+ add r3, r3, #4
+ add r4, r4, #4
+ add r5, r5, #4
+ add r6, r6, #4
+ stmia r1!, {r3, r4, r5, r6} @ Default mapping (null mapping)
+ add r3, r3, #4
+ add r4, r4, #4
+ add r5, r5, #4
+ add r6, r6, #4
+ subs r2, r2, #8
+ bhi 1b
+
+ adr r2, Lphystomemc32 @ r2 = conversion table to logical page number
+ ldr r4, [r0, #TSS_MEMMAP] @ r4 = active mem map
+ add r5, r4, #32 << 2 @ r5 = end of active mem map
+ add r0, r0, #TSS_MEMCMAP @ r0 = memc map
+
+ mov r6, #0
+2: ldmia r4!, {r1, r3}
+ tst r1, #PAGE_PRESENT
+ blne update_map_pgd
+ add r6, r6, #32 << 2
+ tst r3, #PAGE_PRESENT
+ blne update_map_pgd3
+ add r6, r6, #32 << 2
+ cmp r4, r5
+ blt 2b
+ ldmea fp, {r4 - r6, fp, sp, pc}^
+
+@ r0,r2,r3,r4,r5 = preserve
+@ r1,ip = available
+@ r0 = memc map
+@ r1 = pgd entry
+@ r2 = conversion table
+@ r6 = logical page no << 2
+
+update_map_pgd3:
+ mov r1, r3
+update_map_pgd: stmfd sp!, {r3, r4, r5, lr}
+ bic r4, r1, #3 @ r4 = page table
+ sub r5, r6, #1 << 2
+ add ip, r4, #32 << 2 @ ip = end of page table
+
+1: ldr r1, [r4], #4 @ get entry
+ add r5, r5, #1 << 2
+ tst r1, #PAGE_PRESENT @ page present?
+ blne Lconvertmemc @ yes
+ ldr r1, [r4], #4 @ get entry
+ add r5, r5, #1 << 2
+ tst r1, #PAGE_PRESENT @ page present?
+ blne Lconvertmemc @ yes
+ ldr r1, [r4], #4 @ get entry
+ add r5, r5, #1 << 2
+ tst r1, #PAGE_PRESENT @ page present?
+ blne Lconvertmemc @ yes
+ ldr r1, [r4], #4 @ get entry
+ add r5, r5, #1 << 2
+ tst r1, #PAGE_PRESENT @ page present?
+ blne Lconvertmemc @ yes
+ cmp r4, ip
+ blt 1b
+ ldmfd sp!, {r3, r4, r5, pc}^
+
+Lconvertmemc: mov r3, r1, lsr #13 @
+ and r3, r3, #0x3fc @ Convert to memc physical page no
+ ldr r3, [r2, r3] @
+
+ tst r1, #PAGE_OLD|PAGE_NOT_USER @ check for MEMC read
+ biceq r3, r3, #0x200 @
+ tsteq r1, #PAGE_READONLY|PAGE_CLEAN @ check for MEMC write
+ biceq r3, r3, #0x300 @
+
+ orr r3, r3, r5, lsl #13
+ and r1, r5, #0x01800000 >> 13
+ orr r3, r3, r1
+
+ and r1, r3, #255
+ str r3, [r0, r1, lsl #2]
+ movs pc, lr
+
+/*
+ * Function: arm2_3_update_cache (struct task_struct *tsk, unsigned long addr, pte_t pte)
+ * Params : tsk Task to update
+ * address Address of fault.
+ * pte New PTE at address
+ * Purpose : Update the mapping for this address.
+ * Notes : does the ARM3 run faster if you dont use the result in the next instruction?
+ */
+_arm2_3_update_cache:
+ tst r2, #PAGE_PRESENT
+ moveqs pc, lr
+ mov r3, r2, lsr #13 @ Physical page no.
+ adr ip, Lphystomemc32 @ Convert to logical page number
+ and r3, r3, #0x3fc
+ mov r1, r1, lsr #15
+ ldr r3, [ip, r3] @ Convert to memc phys page no.
+ tst r2, #PAGE_OLD|PAGE_NOT_USER
+ biceq r3, r3, #0x200
+ tsteq r2, #PAGE_READONLY|PAGE_CLEAN
+ biceq r3, r3, #0x300
+ mov ip, sp, lsr #13
+ orr r3, r3, r1, lsl #15
+ mov ip, ip, lsl #13
+ and r1, r1, #0x300
+ teq ip, r0
+ orr r3, r3, r1, lsl #2
+ add r0, r0, #TSS_MEMCMAP
+ and r2, r3, #255
+ streqb r3, [r3]
+ str r3, [r0, r2, lsl #2]
+ movs pc, lr
+
+#define PCD(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, aa, ab, ac, ad, ae, af) \
+ .long a0| 0x03800300; .long a1| 0x03800300;\
+ .long a2| 0x03800300; .long a3| 0x03800300;\
+ .long a4| 0x03800300; .long a5| 0x03800300;\
+ .long a6| 0x03800300; .long a7| 0x03800300;\
+ .long a8| 0x03800300; .long a9| 0x03800300;\
+ .long aa| 0x03800300; .long ab| 0x03800300;\
+ .long ac| 0x03800300; .long ad| 0x03800300;\
+ .long ae| 0x03800300; .long af| 0x03800300
+
+@ Table to map from page number to vidc page number
+Lphystomemc32: PCD(0x00,0x08,0x10,0x18,0x20,0x28,0x30,0x38,0x40,0x48,0x50,0x58,0x60,0x68,0x70,0x78)
+ PCD(0x01,0x09,0x11,0x19,0x21,0x29,0x31,0x39,0x41,0x49,0x51,0x59,0x61,0x69,0x71,0x79)
+ PCD(0x04,0x0C,0x14,0x1C,0x24,0x2C,0x34,0x3C,0x44,0x4C,0x54,0x5C,0x64,0x6C,0x74,0x7C)
+ PCD(0x05,0x0D,0x15,0x1D,0x25,0x2D,0x35,0x3D,0x45,0x4D,0x55,0x5D,0x65,0x6D,0x75,0x7D)
+ PCD(0x02,0x0A,0x12,0x1A,0x22,0x2A,0x32,0x3A,0x42,0x4A,0x52,0x5A,0x62,0x6A,0x72,0x7A)
+ PCD(0x03,0x0B,0x13,0x1B,0x23,0x2B,0x33,0x3B,0x43,0x4B,0x53,0x5B,0x63,0x6B,0x73,0x7B)
+ PCD(0x06,0x0E,0x16,0x1E,0x26,0x2E,0x36,0x3E,0x46,0x4E,0x56,0x5E,0x66,0x6E,0x76,0x7E)
+ PCD(0x07,0x0F,0x17,0x1F,0x27,0x2F,0x37,0x3F,0x47,0x4F,0x57,0x5F,0x67,0x6F,0x77,0x7F)
+ PCD(0x80,0x88,0x90,0x98,0xA0,0xA8,0xB0,0xB8,0xC0,0xC8,0xD0,0xD8,0xE0,0xE8,0xF0,0xF8)
+ PCD(0x81,0x89,0x91,0x99,0xA1,0xA9,0xB1,0xB9,0xC1,0xC9,0xD1,0xD9,0xE1,0xE9,0xF1,0xF9)
+ PCD(0x84,0x8C,0x94,0x9C,0xA4,0xAC,0xB4,0xBC,0xC4,0xCC,0xD4,0xDC,0xE4,0xEC,0xF4,0xFC)
+ PCD(0x85,0x8D,0x95,0x9D,0xA5,0xAD,0xB5,0xBD,0xC5,0xCD,0xD5,0xDD,0xE5,0xED,0xF5,0xFD)
+ PCD(0x82,0x8A,0x92,0x9A,0xA2,0xAA,0xB2,0xBA,0xC2,0xCA,0xD2,0xDA,0xE2,0xEA,0xF2,0xFA)
+ PCD(0x83,0x8B,0x93,0x9B,0xA3,0xAB,0xB3,0xBB,0xC3,0xCB,0xD3,0xDB,0xE3,0xEB,0xF3,0xFB)
+ PCD(0x86,0x8E,0x96,0x9E,0xA6,0xAE,0xB6,0xBE,0xC6,0xCE,0xD6,0xDE,0xE6,0xEE,0xF6,0xFE)
+ PCD(0x87,0x8F,0x97,0x9F,0xA7,0xAF,0xB7,0xBF,0xC7,0xCF,0xD7,0xDF,0xE7,0xEF,0xF7,0xFF)
+
+/*
+ * Function: arm2_3_data_abort ()
+ *
+ * Params : r0 = address of aborted instruction
+ *
+ * Purpose :
+ *
+ * Returns : r0 = address of abort
+ * : r1 = FSR
+ * : r2 != 0 if writing
+ */
+
+_arm2_3_data_abort:
+ movs pc, lr
+
+_arm2_3_check_bugs:
+ movs pc, lr
+
+/*
+ * Processor specific - ARM2
+ */
+
+LC0: .word SYMBOL_NAME(page_nr)
+/*
+ * Function: arm2_switch_to (struct task_struct *prev, struct task_struct *next)
+ *
+ * Params : prev Old task structure
+ * : next New task structure for process to run
+ *
+ * Purpose : Perform a task switch, saving the old processes state, and restoring
+ * the new.
+ *
+ * Notes : We don't fiddle with the FP registers here - we postpone this until
+ * the new task actually uses FP. This way, we don't swap FP for tasks
+ * that do not require it.
+ */
+_arm2_switch_to:
+ stmfd sp!, {r4 - r9, fp, lr} @ Store most regs on stack
+ str sp, [r0, #TSS_SAVE] @ Save sp_SVC
+ ldr sp, [r1, #TSS_SAVE] @ Get saved sp_SVC
+ mov r4, r1
+ add r0, r1, #TSS_MEMCMAP @ Remap MEMC
+ ldr r1, LC0
+ ldr r1, [r1]
+1: ldmia r0!, {r2, r3, r5, r6}
+ strb r2, [r2]
+ strb r3, [r3]
+ strb r5, [r5]
+ strb r6, [r6]
+ ldmia r0!, {r2, r3, r5, r6}
+ strb r2, [r2]
+ strb r3, [r3]
+ strb r5, [r5]
+ strb r6, [r6]
+ subs r1, r1, #8
+ bhi 1b
+ ldmfd sp!, {r4 - r9, fp, pc}^ @ Load all regs saved previously
+
+/*
+ * Function: arm2_remap_memc (struct task_struct *tsk)
+ *
+ * Params : tsk Task structure specifing the new mapping structure
+ *
+ * Purpose : remap MEMC tables
+ */
+_arm2_remap_memc:
+ stmfd sp!, {lr}
+ add r0, r0, #TSS_MEMCMAP
+ ldr r1, LC0
+ ldr r1, [r1]
+1: ldmia r0!, {r2, r3, ip, lr}
+ strb r2, [r2]
+ strb r3, [r3]
+ strb ip, [ip]
+ strb lr, [lr]
+ ldmia r0!, {r2, r3, ip, lr}
+ strb r2, [r2]
+ strb r3, [r3]
+ strb ip, [ip]
+ strb lr, [lr]
+ subs r1, r1, #8
+ bhi 1b
+ ldmfd sp!, {pc}^
+
+/*
+ * Function: arm2_xchg_1 (int new, volatile void *ptr)
+ *
+ * Params : new New value to store at...
+ * : ptr pointer to byte-wide location
+ *
+ * Purpose : Performs an exchange operation
+ *
+ * Returns : Original byte data at 'ptr'
+ *
+ * Notes : This will have to be changed if we ever use multi-processing using these
+ * processors, but that is very unlikely...
+ */
+_arm2_xchg_1: mov r2, pc
+ orr r2, r2, #I_BIT
+ teqp r2, #0
+ ldrb r2, [r1]
+ strb r0, [r1]
+ mov r0, r2
+ movs pc, lr
+
+/*
+ * Function: arm2_xchg_4 (int new, volatile void *ptr)
+ *
+ * Params : new New value to store at...
+ * : ptr pointer to word-wide location
+ *
+ * Purpose : Performs an exchange operation
+ *
+ * Returns : Original word data at 'ptr'
+ *
+ * Notes : This will have to be changed if we ever use multi-processing using these
+ * processors, but that is very unlikely...
+ */
+_arm2_xchg_4: mov r2, pc
+ orr r2, r2, #I_BIT
+ teqp r2, #0
+ ldr r2, [r1]
+ str r0, [r1]
+ mov r0, r2
+/*
+ * fall through
+ */
+/*
+ * Function: arm2_proc_init (void)
+ * : arm2_proc_fin (void)
+ *
+ * Purpose : Initialise / finalise processor specifics (none required)
+ */
+_arm2_proc_init:
+_arm2_proc_fin: movs pc, lr
+/*
+ * Function: arm3_switch_to (struct task_struct *prev, struct task_struct *next)
+ *
+ * Params : prev Old task structure
+ * : next New task structure for process to run
+ *
+ * Purpose : Perform a task switch, saving the old processes state, and restoring
+ * the new.
+ *
+ * Notes : We don't fiddle with the FP registers here - we postpone this until
+ * the new task actually uses FP. This way, we don't swap FP for tasks
+ * that do not require it.
+ */
+_arm3_switch_to:
+ stmfd sp!, {r4 - r9, fp, lr} @ Store most regs on stack
+ str sp, [r0, #TSS_SAVE] @ Save sp_SVC
+ ldr sp, [r1, #TSS_SAVE] @ Get saved sp_SVC
+ mov r4, r1
+ add r0, r1, #TSS_MEMCMAP @ Remap MEMC
+ ldr r1, LC0
+ ldr r1, [r1]
+1: ldmia r0!, {r2, r3, r5, r6}
+ strb r2, [r2]
+ strb r3, [r3]
+ strb r5, [r5]
+ strb r6, [r6]
+ ldmia r0!, {r2, r3, r5, r6}
+ strb r2, [r2]
+ strb r3, [r3]
+ strb r5, [r5]
+ strb r6, [r6]
+ subs r1, r1, #8
+ bhi 1b
+ mcr p15, 0, r0, c1, c0, 0 @ flush cache
+ ldmfd sp!, {r4 - r9, fp, pc}^ @ Load all regs saved previously
+/*
+ * Function: arm3_remap_memc (struct task_struct *tsk)
+ *
+ * Params : tsk Task structure specifing the new mapping structure
+ *
+ * Purpose : remap MEMC tables
+ */
+_arm3_remap_memc:
+ stmfd sp!, {lr}
+ add r0, r0, #TSS_MEMCMAP
+ ldr r1, LC0
+ ldr r1, [r1]
+1: ldmia r0!, {r2, r3, ip, lr}
+ strb r2, [r2]
+ strb r3, [r3]
+ strb ip, [ip]
+ strb lr, [lr]
+ ldmia r0!, {r2, r3, ip, lr}
+ strb r2, [r2]
+ strb r3, [r3]
+ strb ip, [ip]
+ strb lr, [lr]
+ subs r1, r1, #8
+ bhi 1b
+ mcr p15, 0, r0, c1, c0, 0 @ flush cache
+ ldmfd sp!, {pc}^
+
+/*
+ * Function: arm3_proc_init (void)
+ *
+ * Purpose : Initialise the cache control registers
+ */
+_arm3_proc_init:
+ mov r0, #0x001f0000
+ orr r0, r0, #0x0000ff00
+ orr r0, r0, #0x000000ff
+ mcr p15, 0, r0, c3, c0
+ mcr p15, 0, r0, c4, c0
+ mov r0, #0
+ mcr p15, 0, r0, c5, c0
+ mov r0, #3
+ mcr p15, 0, r0, c1, c0
+ mcr p15, 0, r0, c2, c0
+ movs pc, lr
+
+/*
+ * Function: arm3_proc_fin (void)
+ *
+ * Purpose : Finalise processor (disable caches)
+ */
+_arm3_proc_fin: mov r0, #2
+ mcr p15, 0, r0, c2, c0
+ movs pc, lr
+
+/*
+ * Function: arm3_xchg_1 (int new, volatile void *ptr)
+ *
+ * Params : new New value to store at...
+ * : ptr pointer to byte-wide location
+ *
+ * Purpose : Performs an exchange operation
+ *
+ * Returns : Original byte data at 'ptr'
+ */
+_arm3_xchg_1: swpb r0, r0, [r1]
+ movs pc, lr
+
+/*
+ * Function: arm3_xchg_4 (int new, volatile void *ptr)
+ *
+ * Params : new New value to store at...
+ * : ptr pointer to word-wide location
+ *
+ * Purpose : Performs an exchange operation
+ *
+ * Returns : Original word data at 'ptr'
+ */
+_arm3_xchg_4: swp r0, r0, [r1]
+ movs pc, lr
+
+
+/*
+ * Purpose : Function pointers used to access above functions - all calls
+ * come through these
+ */
+_arm2_name:
+ .ascii "arm2\0"
+ .align
+
+ .globl SYMBOL_NAME(arm2_processor_functions)
+SYMBOL_NAME(arm2_processor_functions):
+ .word _arm2_name @ 0
+ .word _arm2_switch_to @ 4
+ .word _arm2_3_data_abort @ 8
+ .word _arm2_3_check_bugs @ 12
+ .word _arm2_proc_init @ 16
+ .word _arm2_proc_fin @ 20
+
+ .word _arm2_remap_memc @ 24
+ .word _arm2_3_update_map @ 28
+ .word _arm2_3_update_cache @ 32
+ .word _arm2_xchg_1 @ 36
+ .word SYMBOL_NAME(abort) @ 40
+ .word _arm2_xchg_4 @ 44
+
+_arm250_name:
+ .ascii "arm250\0"
+ .align
+
+ .globl SYMBOL_NAME(arm250_processor_functions)
+SYMBOL_NAME(arm250_processor_functions):
+ .word _arm250_name @ 0
+ .word _arm2_switch_to @ 4
+ .word _arm2_3_data_abort @ 8
+ .word _arm2_3_check_bugs @ 12
+ .word _arm2_proc_init @ 16
+ .word _arm2_proc_fin @ 20
+
+ .word _arm2_remap_memc @ 24
+ .word _arm2_3_update_map @ 28
+ .word _arm2_3_update_cache @ 32
+ .word _arm3_xchg_1 @ 36
+ .word SYMBOL_NAME(abort) @ 40
+ .word _arm3_xchg_4 @ 44
+
+_arm3_name:
+ .ascii "arm3\0"
+ .align
+
+ .globl SYMBOL_NAME(arm3_processor_functions)
+SYMBOL_NAME(arm3_processor_functions):
+ .word _arm3_name @ 0
+ .word _arm3_switch_to @ 4
+ .word _arm2_3_data_abort @ 8
+ .word _arm2_3_check_bugs @ 12
+ .word _arm3_proc_init @ 16
+ .word _arm3_proc_fin @ 20
+
+ .word _arm3_remap_memc @ 24
+ .word _arm2_3_update_map @ 28
+ .word _arm2_3_update_cache @ 32
+ .word _arm3_xchg_1 @ 36
+ .word SYMBOL_NAME(abort) @ 40
+ .word _arm3_xchg_4 @ 44
+
diff --git a/arch/arm/mm/proc-arm6,7.S b/arch/arm/mm/proc-arm6,7.S
new file mode 100644
index 000000000..776d0d57c
--- /dev/null
+++ b/arch/arm/mm/proc-arm6,7.S
@@ -0,0 +1,436 @@
+/*
+ * linux/arch/arm/mm/arm6.S: MMU functions for ARM6
+ *
+ * (C) 1997 Russell King
+ *
+ * These are the low level assembler for performing cache and TLB
+ * functions on the ARM6 & ARM7.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include "../lib/constants.h"
+
+/*
+ * Function: arm6_7_flush_cache_all (void)
+ * : arm6_7_flush_cache_page (unsigned long address, int size, int flags)
+ *
+ * Params : address Area start address
+ * : size size of area
+ * : flags b0 = I cache as well
+ *
+ * Purpose : Flush all cache lines
+ */
+_arm6_7_flush_cache:
+ mov r0, #0
+ mcr p15, 0, r0, c7, c0, 0 @ flush cache
+_arm6_7_null:
+ mov pc, lr
+
+/*
+ * Function: arm6_7_flush_tlb_all (void)
+ *
+ * Purpose : flush all TLB entries in all caches
+ */
+_arm6_7_flush_tlb_all:
+ mov r0, #0
+ mcr p15, 0, r0, c5, c0, 0 @ flush TLB
+ mov pc, lr
+
+/*
+ * Function: arm6_7_flush_tlb_page (unsigned long address, int end, int flags)
+ *
+ * Params : address Area start address
+ * : end Area end address
+ * : flags b0 = I cache as well
+ *
+ * Purpose : flush a TLB entry
+ */
+_arm6_7_flush_tlb_area:
+1: mcr p15, 0, r0, c6, c0, 0 @ flush TLB
+ add r0, r0, #4096
+ cmp r0, r1
+ blt 1b
+ mov pc, lr
+
+@LC0: .word _current
+/*
+ * Function: arm6_7_switch_to (struct task_struct *prev, struct task_struct *next)
+ *
+ * Params : prev Old task structure
+ * : next New task structure for process to run
+ *
+ * Purpose : Perform a task switch, saving the old processes state, and restoring
+ * the new.
+ *
+ * Notes : We don't fiddle with the FP registers here - we postpone this until
+ * the new task actually uses FP. This way, we don't swap FP for tasks
+ * that do not require it.
+ */
+_arm6_7_switch_to:
+ stmfd sp!, {r4 - r9, fp, lr} @ Store most regs on stack
+ mrs ip, cpsr
+ stmfd sp!, {ip} @ Save cpsr_SVC
+ str sp, [r0, #TSS_SAVE] @ Save sp_SVC
+ ldr sp, [r1, #TSS_SAVE] @ Get saved sp_SVC
+ ldr r0, [r1, #ADDR_LIMIT]
+ teq r0, #0
+ moveq r0, #KERNEL_DOMAIN
+ movne r0, #USER_DOMAIN
+ mcr p15, 0, r0, c3, c0 @ Set domain reg
+ ldr r0, [r1, #TSS_MEMMAP] @ Page table pointer
+ mov r1, #0
+ mcr p15, 0, r1, c7, c0, 0 @ flush cache
+ mcr p15, 0, r0, c2, c0, 0 @ update page table ptr
+ mcr p15, 0, r1, c5, c0, 0 @ flush TLBs
+ ldmfd sp!, {ip}
+ msr spsr, ip @ Save tasks CPSR into SPSR for this return
+ ldmfd sp!, {r4 - r9, fp, pc}^ @ Load all regs saved previously
+
+/*
+ * Function: arm6_7_data_abort ()
+ *
+ * Params : r0 = address of aborted instruction
+ *
+ * Purpose : obtain information about current aborted instruction
+ *
+ * Returns : r0 = address of abort
+ * : r1 = FSR
+ * : r2 != 0 if writing
+ * : sp = pointer to registers
+ */
+
+Lukabttxt: .ascii "Unknown data abort code %d [pc=%p, *pc=%p] LR=%p\0"
+ .align
+
+msg: .ascii "DA*%p=%p\n\0"
+ .align
+
+_arm6_data_abort:
+ ldr r4, [r0] @ read instruction causing problem
+ mov r2, r4, lsr #19 @ r2 b1 = L
+ and r1, r4, #15 << 24
+ add pc, pc, r1, lsr #22 @ Now branch to the relevent processing routine
+ movs pc, lr
+ b Ldata_unknown
+ b Ldata_unknown
+ b Ldata_unknown
+ b Ldata_unknown
+ b Ldata_earlyldrpost @ ldr rd, [rn], #m
+ b Ldata_simple @ ldr rd, [rn, #m] @ RegVal
+ b Ldata_earlyldrpost @ ldr rd, [rn], rm
+ b Ldata_simple @ ldr rd, [rn, rm]
+ b Ldata_ldmstm @ ldm*a rn, <rlist>
+ b Ldata_ldmstm @ ldm*b rn, <rlist>
+ b Ldata_unknown
+ b Ldata_unknown
+ b Ldata_simple @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m
+ b Ldata_simple @ ldc rd, [rn, #m]
+ b Ldata_unknown
+Ldata_unknown: @ Part of jumptable
+ ldr r3, [sp, #15 * 4] @ Get PC
+ str r3, [sp, #-4]!
+ mov r1, r1, lsr #2
+ mov r3, r4
+ mov r2, r0
+ adr r0, Lukabttxt
+ bl SYMBOL_NAME(panic)
+Lstop: b Lstop
+
+_arm7_data_abort:
+ ldr r4, [r0] @ read instruction causing problem
+ mov r2, r4, lsr #19 @ r2 b1 = L
+ and r1, r4, #15 << 24
+ add pc, pc, r1, lsr #22 @ Now branch to the relevent processing routine
+ movs pc, lr
+ b Ldata_unknown
+ b Ldata_unknown
+ b Ldata_unknown
+ b Ldata_unknown
+ b Ldata_lateldrpostconst @ ldr rd, [rn], #m
+ b Ldata_lateldrpreconst @ ldr rd, [rn, #m] @ RegVal
+ b Ldata_lateldrpostreg @ ldr rd, [rn], rm
+ b Ldata_lateldrprereg @ ldr rd, [rn, rm]
+ b Ldata_ldmstm @ ldm*a rn, <rlist>
+ b Ldata_ldmstm @ ldm*b rn, <rlist>
+ b Ldata_unknown
+ b Ldata_unknown
+ b Ldata_simple @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m
+ b Ldata_simple @ ldc rd, [rn, #m]
+ b Ldata_unknown
+ b Ldata_unknown
+
+Ldata_ldmstm: tst r4, #1 << 21 @ check writeback bit
+ beq Ldata_simple
+
+ mov r7, #0x11
+ orr r7, r7, r7, lsl #8
+ and r0, r4, r7
+ and r1, r4, r7, lsl #1
+ add r0, r0, r1, lsr #1
+ and r1, r4, r7, lsl #2
+ add r0, r0, r1, lsr #2
+ and r1, r4, r7, lsl #3
+ add r0, r0, r1, lsr #3
+ add r0, r0, r0, lsr #8
+ add r0, r0, r0, lsr #4
+ and r7, r0, #15 @ r7 = no. of registers to transfer.
+ and r5, r4, #15 << 16 @ Get Rn
+ ldr r0, [sp, r5, lsr #14] @ Get register
+ eor r6, r4, r4, lsl #2
+ tst r6, #1 << 23 @ Check inc/dec ^ writeback
+ rsbeq r7, r7, #0
+ add r7, r0, r7, lsl #2 @ Do correction (signed)
+ str r7, [sp, r5, lsr #14] @ Put register
+
+Ldata_simple: and r2, r2, #2 @ check read/write bit
+ mrc p15, 0, r0, c6, c0, 0 @ get FAR
+ mrc p15, 0, r1, c5, c0, 0 @ get FSR
+ and r1, r1, #15
+ mov pc, lr
+
+Ldata_earlyldrpost:
+ tst r2, #4
+ and r2, r2, #2 @ check read/write bit
+ orrne r2, r2, #1 @ T bit
+ mrc p15, 0, r0, c6, c0, 0 @ get FAR
+ mrc p15, 0, r1, c5, c0, 0 @ get FSR
+ and r1, r1, #15
+ mov pc, lr
+
+Ldata_lateldrpostconst:
+ movs r1, r4, lsl #20 @ Get offset
+ beq Ldata_earlyldrpost @ if offset is zero, no effect
+ and r5, r4, #15 << 16 @ Get Rn
+ ldr r0, [sp, r5, lsr #14]
+ tst r4, #1 << 23 @ U bit
+ subne r0, r0, r1, lsr #20
+ addeq r0, r0, r1, lsr #20
+ str r0, [sp, r5, lsr #14] @ Put register
+ b Ldata_earlyldrpost
+
+Ldata_lateldrpreconst:
+ tst r4, #1 << 21 @ check writeback bit
+ movnes r1, r4, lsl #20 @ Get offset
+ beq Ldata_simple
+ and r5, r4, #15 << 16 @ Get Rn
+ ldr r0, [sp, r5, lsr #14]
+ tst r4, #1 << 23 @ U bit
+ subne r0, r0, r1, lsr #20
+ addeq r0, r0, r1, lsr #20
+ str r0, [sp, r5, lsr #14] @ Put register
+ b Ldata_simple
+
+Ldata_lateldrpostreg:
+ and r5, r4, #15
+ ldr r1, [sp, r5, lsl #2] @ Get Rm
+ mov r3, r4, lsr #7
+ ands r3, r3, #31
+ and r6, r4, #0x70
+ orreq r6, r6, #8
+ add pc, pc, r6
+ mov r0, r0
+
+ mov r1, r1, lsl r3 @ 0: LSL #!0
+ b 1f
+ b 1f @ 1: LSL #0
+ mov r0, r0
+ b 1f @ 2: MUL?
+ mov r0, r0
+ b 1f @ 3: MUL?
+ mov r0, r0
+ mov r1, r1, lsr r3 @ 4: LSR #!0
+ b 1f
+ mov r1, r1, lsr #32 @ 5: LSR #32
+ b 1f
+ b 1f @ 6: MUL?
+ mov r0, r0
+ b 1f @ 7: MUL?
+ mov r0, r0
+ mov r1, r1, asr r3 @ 8: ASR #!0
+ b 1f
+ mov r1, r1, asr #32 @ 9: ASR #32
+ b 1f
+ b 1f @ A: MUL?
+ mov r0, r0
+ b 1f @ B: MUL?
+ mov r0, r0
+ mov r1, r1, ror r3 @ C: ROR #!0
+ b 1f
+ mov r1, r1, rrx @ D: RRX
+ b 1f
+ mov r0, r0 @ E: MUL?
+ mov r0, r0
+ mov r0, r0 @ F: MUL?
+
+
+1: and r5, r4, #15 << 16 @ Get Rn
+ ldr r0, [sp, r5, lsr #14]
+ tst r4, #1 << 23 @ U bit
+ subne r0, r0, r1
+ addeq r0, r0, r1
+ str r0, [sp, r5, lsr #14] @ Put register
+ b Ldata_earlyldrpost
+
+Ldata_lateldrprereg:
+ tst r4, #1 << 21 @ check writeback bit
+ beq Ldata_simple
+ and r5, r4, #15
+ ldr r1, [sp, r5, lsl #2] @ Get Rm
+ mov r3, r4, lsr #7
+ ands r3, r3, #31
+ and r6, r4, #0x70
+ orreq r6, r6, #8
+ add pc, pc, r6
+ mov r0, r0
+
+ mov r1, r1, lsl r3 @ 0: LSL #!0
+ b 1f
+ b 1f @ 1: LSL #0
+ mov r0, r0
+ b 1f @ 2: MUL?
+ mov r0, r0
+ b 1f @ 3: MUL?
+ mov r0, r0
+ mov r1, r1, lsr r3 @ 4: LSR #!0
+ b 1f
+ mov r1, r1, lsr #32 @ 5: LSR #32
+ b 1f
+ b 1f @ 6: MUL?
+ mov r0, r0
+ b 1f @ 7: MUL?
+ mov r0, r0
+ mov r1, r1, asr r3 @ 8: ASR #!0
+ b 1f
+ mov r1, r1, asr #32 @ 9: ASR #32
+ b 1f
+ b 1f @ A: MUL?
+ mov r0, r0
+ b 1f @ B: MUL?
+ mov r0, r0
+ mov r1, r1, ror r3 @ C: ROR #!0
+ b 1f
+ mov r1, r1, rrx @ D: RRX
+ b 1f
+ mov r0, r0 @ E: MUL?
+ mov r0, r0
+ mov r0, r0 @ F: MUL?
+
+
+1: and r5, r4, #15 << 16 @ Get Rn
+ ldr r0, [sp, r5, lsr #14]
+ tst r4, #1 << 23 @ U bit
+ subne r0, r0, r1
+ addeq r0, r0, r1
+ str r0, [sp, r5, lsr #14] @ Put register
+ b Ldata_simple
+
+/*
+ * Function: arm6_7_check_bugs (void)
+ * : arm6_7_proc_init (void)
+ * : arm6_7_proc_fin (void)
+ *
+ * Notes : This processor does not require these
+ */
+_arm6_7_check_bugs:
+ mrs ip, cpsr
+ bic ip, ip, #F_BIT
+ msr cpsr, ip
+_arm6_7_proc_init:
+_arm6_7_proc_fin:
+ mov pc, lr
+
+/*
+ * Function: arm6_set_pmd ()
+ *
+ * Params : r0 = Address to set
+ * : r1 = value to set
+ *
+ * Purpose : Set a PMD and flush it out of any WB cache
+ */
+_arm6_set_pmd: and r2, r1, #3
+ teq r2, #2
+ andeq r2, r1, #8
+ orreq r1, r1, r2, lsl #1 @ Updatable = Cachable
+ teq r2, #1
+ orreq r1, r1, #16 @ Updatable = 1 if Page table
+ str r1, [r0]
+ mov pc, lr
+
+/*
+ * Function: arm7_set_pmd ()
+ *
+ * Params : r0 = Address to set
+ * : r1 = value to set
+ *
+ * Purpose : Set a PMD and flush it out of any WB cache
+ */
+_arm7_set_pmd: orr r1, r1, #16 @ Updatable bit is always set on ARM7
+ str r1, [r0]
+ mov pc, lr
+
+/*
+ * Function: _arm6_7_reset
+ *
+ * Notes : This sets up everything for a reset
+ */
+_arm6_7_reset: mrs r1, cpsr
+ orr r1, r1, #F_BIT|I_BIT
+ msr cpsr, r1
+ mov r0, #0
+ mcr p15, 0, r0, c7, c0, 0 @ flush cache
+ mcr p15, 0, r0, c5, c0, 0 @ flush TLB
+ mov r1, #F_BIT | I_BIT | 3
+ mov pc, lr
+
+/*
+ * Purpose : Function pointers used to access above functions - all calls
+ * come through these
+ */
+_arm6_name: .ascii "arm6\0"
+ .align
+
+ENTRY(arm6_processor_functions)
+ .word _arm6_name @ 0
+ .word _arm6_7_switch_to @ 4
+ .word _arm6_data_abort @ 8
+ .word _arm6_7_check_bugs @ 12
+ .word _arm6_7_proc_init @ 16
+ .word _arm6_7_proc_fin @ 20
+
+ .word _arm6_7_flush_cache @ 24
+ .word _arm6_7_flush_cache @ 28
+ .word _arm6_7_flush_cache @ 32
+ .word _arm6_7_null @ 36
+ .word _arm6_7_flush_cache @ 40
+ .word _arm6_7_flush_tlb_all @ 44
+ .word _arm6_7_flush_tlb_area @ 48
+ .word _arm6_set_pmd @ 52
+ .word _arm6_7_reset @ 54
+ .word _arm6_7_flush_cache @ 58
+
+/*
+ * Purpose : Function pointers used to access above functions - all calls
+ * come through these
+ */
+_arm7_name: .ascii "arm7\0"
+ .align
+
+ENTRY(arm7_processor_functions)
+ .word _arm7_name @ 0
+ .word _arm6_7_switch_to @ 4
+ .word _arm7_data_abort @ 8
+ .word _arm6_7_check_bugs @ 12
+ .word _arm6_7_proc_init @ 16
+ .word _arm6_7_proc_fin @ 20
+
+ .word _arm6_7_flush_cache @ 24
+ .word _arm6_7_flush_cache @ 28
+ .word _arm6_7_flush_cache @ 32
+ .word _arm6_7_null @ 36
+ .word _arm6_7_flush_cache @ 40
+ .word _arm6_7_flush_tlb_all @ 44
+ .word _arm6_7_flush_tlb_area @ 48
+ .word _arm7_set_pmd @ 52
+ .word _arm6_7_reset @ 54
+ .word _arm6_7_flush_cache @ 58
+
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
new file mode 100644
index 000000000..7d53bf230
--- /dev/null
+++ b/arch/arm/mm/proc-sa110.S
@@ -0,0 +1,305 @@
+/*
+ * linux/arch/arm/mm/sa110.S: MMU functions for SA110
+ *
+ * (C) 1997 Russell King
+ *
+ * These are the low level assembler for performing cache and TLB
+ * functions on the sa110.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include "../lib/constants.h"
+
+ .data
+Lclean_switch: .long 0
+ .text
+
+/*
+ * Function: sa110_flush_cache_all (void)
+ *
+ * Purpose : Flush all cache lines
+ */
+ .align 5
+_sa110_flush_cache_all: @ preserves r0
+ ldr r3, =Lclean_switch
+ ldr r2, [r3]
+ ands r2, r2, #1
+ eor r2, r2, #1
+ str r2, [r3]
+ ldr ip, =0xdf000000
+ addne ip, ip, #32768
+ add r1, ip, #16384 @ only necessary for 16k
+1: ldr r2, [ip], #32
+ teq r1, ip
+ bne 1b
+ mov ip, #0
+ mcr p15, 0, ip, c7, c5, 0 @ flush I cache
+ mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ mov pc, lr
+
+/*
+ * Function: sa110_flush_cache_area (unsigned long address, int end, int flags)
+ *
+ * Params : address Area start address
+ * : end Area end address
+ * : flags b0 = I cache as well
+ *
+ * Purpose : clean & flush all cache lines associated with this area of memory
+ */
+ .align 5
+_sa110_flush_cache_area:
+ sub r3, r1, r0
+ cmp r3, #32768
+ bgt _sa110_flush_cache_all
+1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ mcr p15, 0, r0, c7, c6, 1 @ flush D entry
+ add r0, r0, #32
+ mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ mcr p15, 0, r0, c7, c6, 1 @ flush D entry
+ add r0, r0, #32
+ cmp r0, r1
+ blt 1b
+ tst r2, #1
+ movne r0, #0
+ mcrne p15, 0, r0, c7, c5, 0 @ flush I cache
+ mov pc, lr
+
+/*
+ * Function: sa110_flush_cache_entry (unsigned long address)
+ *
+ * Params : address Address of cache line to flush
+ *
+ * Purpose : clean & flush an entry
+ */
+ .align 5
+_sa110_flush_cache_entry:
+ mov r1, #0
+ mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ mcr p15, 0, r1, c7, c10, 4 @ drain WB
+ mcr p15, 0, r1, c7, c5, 0 @ flush I cache
+ mov pc, lr
+
+/*
+ * Function: sa110_flush_cache_pte (unsigned long address)
+ *
+ * Params : address Address of cache line to clean
+ *
+ * Purpose : Ensure that physical memory reflects cache at this location
+ * for page table purposes.
+ */
+_sa110_flush_cache_pte:
+ mcr p15, 0, r0, c7, c10, 1 @ clean D entry (drain is done by TLB fns)
+ mov pc, lr
+
+/*
+ * Function: sa110_flush_ram_page (unsigned long page)
+ *
+ * Params : address Area start address
+ * : size size of area
+ * : flags b0 = I cache as well
+ *
+ * Purpose : clean & flush all cache lines associated with this area of memory
+ */
+ .align 5
+_sa110_flush_ram_page:
+ mov r1, #4096
+1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ mcr p15, 0, r0, c7, c6, 1 @ flush D entry
+ add r0, r0, #32
+ mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ mcr p15, 0, r0, c7, c6, 1 @ flush D entry
+ add r0, r0, #32
+ subs r1, r1, #64
+ bne 1b
+ mov r0, #0
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mcr p15, 0, r0, c7, c5, 0 @ flush I cache
+ mov pc, lr
+
+/*
+ * Function: sa110_flush_tlb_all (void)
+ *
+ * Purpose : flush all TLB entries in all caches
+ */
+ .align 5
+_sa110_flush_tlb_all:
+ mov r0, #0
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mcr p15, 0, r0, c8, c7, 0 @ flush I & D tlbs
+ mov pc, lr
+
+/*
+ * Function: sa110_flush_tlb_area (unsigned long address, int end, int flags)
+ *
+ * Params : address Area start address
+ * : end Area end address
+ * : flags b0 = I cache as well
+ *
+ * Purpose : flush a TLB entry
+ */
+ .align 5
+_sa110_flush_tlb_area:
+ mov r3, #0
+ mcr p15, 0, r3, c7, c10, 4 @ drain WB
+1: cmp r0, r1
+ mcrlt p15, 0, r0, c8, c6, 1 @ flush D TLB entry
+ addlt r0, r0, #4096
+ cmp r0, r1
+ mcrlt p15, 0, r0, c8, c6, 1 @ flush D TLB entry
+ addlt r0, r0, #4096
+ blt 1b
+ tst r2, #1
+ mcrne p15, 0, r3, c8, c5, 0 @ flush I TLB
+ mov pc, lr
+
+ .align 5
+_sa110_flush_icache_area:
+ mov r3, #0
+1: mcr p15, 0, r0, c7, c10, 1 @ Clean D entry
+ add r0, r0, #32
+ cmp r0, r1
+ blt 1b
+ mcr p15, 0, r0, c7, c5, 0 @ flush I cache
+ mov pc, lr
+
+@LC0: .word _current
+/*
+ * Function: sa110_switch_to (struct task_struct *prev, struct task_struct *next)
+ *
+ * Params : prev Old task structure
+ * : next New task structure for process to run
+ *
+ * Purpose : Perform a task switch, saving the old processes state, and restoring
+ * the new.
+ *
+ * Notes : We don't fiddle with the FP registers here - we postpone this until
+ * the new task actually uses FP. This way, we don't swap FP for tasks
+ * that do not require it.
+ */
+ .align 5
+_sa110_switch_to:
+ stmfd sp!, {r4 - r9, fp, lr} @ Store most regs on stack
+ mrs ip, cpsr
+ stmfd sp!, {ip} @ Save cpsr_SVC
+ str sp, [r0, #TSS_SAVE] @ Save sp_SVC
+ ldr sp, [r1, #TSS_SAVE] @ Get saved sp_SVC
+ ldr r0, [r1, #ADDR_LIMIT]
+ teq r0, #0
+ moveq r0, #KERNEL_DOMAIN
+ movne r0, #USER_DOMAIN
+ mcr p15, 0, r0, c3, c0 @ Set segment
+ ldr r0, [r1, #TSS_MEMMAP] @ Page table pointer
+ ldr r3, =Lclean_switch
+ ldr r2, [r3]
+ ands r2, r2, #1
+ eor r2, r2, #1
+ str r2, [r3]
+ ldr r2, =0xdf000000
+ addne r2, r2, #32768
+ add r1, r2, #16384 @ only necessary for 16k
+1: ldr r3, [r2], #32
+ teq r1, r2
+ bne 1b
+ mov r1, #0
+ mcr p15, 0, r1, c7, c5, 0 @ flush I cache
+ mcr p15, 0, r1, c7, c10, 4 @ drain WB
+ mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
+ mcr p15, 0, r1, c8, c7, 0 @ flush TLBs
+ ldmfd sp!, {ip}
+ msr spsr, ip @ Save tasks CPSR into SPSR for this return
+ ldmfd sp!, {r4 - r9, fp, pc}^ @ Load all regs saved previously
+
+/*
+ * Function: sa110_data_abort ()
+ *
+ * Params : r0 = address of aborted instruction
+ *
+ * Purpose : obtain information about current aborted instruction
+ *
+ * Returns : r0 = address of abort
+ * : r1 = FSR
+ * : r2 != 0 if writing
+ */
+ .align 5
+_sa110_data_abort:
+ ldr r2, [r0] @ read instruction causing problem
+ mrc p15, 0, r0, c6, c0, 0 @ get FAR
+ mov r2, r2, lsr #19 @ b1 = L
+ and r3, r2, #0x69 << 2
+ and r2, r2, #2
+// teq r3, #0x21 << 2
+// orreq r2, r2, #1 @ b0 = {LD,ST}RT
+ mrc p15, 0, r1, c5, c0, 0 @ get FSR
+ and r1, r1, #255
+ mov pc, lr
+
+/*
+ * Function: sa110_set_pmd ()
+ *
+ * Params : r0 = Address to set
+ * : r1 = value to set
+ *
+ * Purpose : Set a PMD and flush it out of any WB cache
+ */
+ .align 5
+_sa110_set_pmd: str r1, [r0]
+ mcr p15, 0, r0, c7, c10, 1 @ clean D entry (drain is done by TLB fns)
+ mov pc, lr
+
+/*
+ * Function: sa110_check_bugs (void)
+ * : sa110_proc_init (void)
+ * : sa110_proc_fin (void)
+ *
+ * Notes : This processor does not require these
+ */
+_sa110_check_bugs:
+ mrs ip, cpsr
+ bic ip, ip, #F_BIT
+ msr cpsr, ip
+_sa110_proc_init:
+_sa110_proc_fin:
+ mov pc, lr
+
+/*
+ * Function: sa110_reset
+ *
+ * Notes : This sets up everything for a reset
+ */
+_sa110_reset: mrs r1, cpsr
+ orr r1, r1, #F_BIT | I_BIT
+ msr cpsr, r1
+ stmfd sp!, {r1, lr}
+ bl _sa110_flush_cache_all
+ bl _sa110_flush_tlb_all
+ mcr p15, 0, ip, c7, c7, 0 @ flush I,D caches
+ mrc p15, 0, r0, c1, c0, 0 @ ctrl register
+ bic r0, r0, #0x1800
+ bic r0, r0, #0x000f
+ ldmfd sp!, {r1, pc}
+/*
+ * Purpose : Function pointers used to access above functions - all calls
+ * come through these
+ */
+_sa110_name: .ascii "sa110\0"
+ .align
+
+ENTRY(sa110_processor_functions)
+ .word _sa110_name @ 0
+ .word _sa110_switch_to @ 4
+ .word _sa110_data_abort @ 8
+ .word _sa110_check_bugs @ 12
+ .word _sa110_proc_init @ 16
+ .word _sa110_proc_fin @ 20
+
+ .word _sa110_flush_cache_all @ 24
+ .word _sa110_flush_cache_area @ 28
+ .word _sa110_flush_cache_entry @ 32
+ .word _sa110_flush_cache_pte @ 36
+ .word _sa110_flush_ram_page @ 40
+ .word _sa110_flush_tlb_all @ 44
+ .word _sa110_flush_tlb_area @ 48
+
+ .word _sa110_set_pmd @ 52
+ .word _sa110_reset @ 54
+ .word _sa110_flush_icache_area @ 58
diff --git a/arch/arm/mm/small_page.c b/arch/arm/mm/small_page.c
new file mode 100644
index 000000000..dcf039dcc
--- /dev/null
+++ b/arch/arm/mm/small_page.c
@@ -0,0 +1,201 @@
+/*
+ * linux/arch/arm/mm/small_page.c
+ *
+ * Copyright (C) 1996 Russell King
+ *
+ * Changelog:
+ * 26/01/1996 RMK Cleaned up various areas to make little more generic
+ */
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/head.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/smp.h>
+
+#define SMALL_ALLOC_SHIFT (10)
+#define SMALL_ALLOC_SIZE (1 << SMALL_ALLOC_SHIFT)
+#define NR_BLOCKS (PAGE_SIZE / SMALL_ALLOC_SIZE)
+
+#if NR_BLOCKS != 4
+#error I only support 4 blocks per page!
+#endif
+
+#define USED(pg) ((atomic_read(&(pg)->count) >> 8) & 15)
+#define SET_USED(pg,off) (atomic_read(&(pg)->count) |= 256 << off)
+#define CLEAR_USED(pg,off) (atomic_read(&(pg)->count) &= ~(256 << off))
+#define IS_FREE(pg,off) (!(atomic_read(&(pg)->count) & (256 << off)))
+#define PAGE_PTR(page,block) ((struct free_small_page *)((page) + \
+ ((block) << SMALL_ALLOC_SHIFT)))
+
+struct free_small_page {
+ unsigned long next;
+ unsigned long prev;
+};
+
+/*
+ * To handle allocating small pages, we use the main get_free_page routine,
+ * and split the page up into 4. The page is marked in mem_map as reserved,
+ * so it can't be free'd by free_page. The count field is used to keep track
+ * of which sections of this page are allocated.
+ */
+static unsigned long small_page_ptr;
+
+static unsigned char offsets[1<<NR_BLOCKS] = {
+ 0, /* 0000 */
+ 1, /* 0001 */
+ 0, /* 0010 */
+ 2, /* 0011 */
+ 0, /* 0100 */
+ 1, /* 0101 */
+ 0, /* 0110 */
+ 3, /* 0111 */
+ 0, /* 1000 */
+ 1, /* 1001 */
+ 0, /* 1010 */
+ 2, /* 1011 */
+ 0, /* 1100 */
+ 1, /* 1101 */
+ 0, /* 1110 */
+ 4 /* 1111 */
+};
+
+static inline void clear_page_links(unsigned long page)
+{
+ struct free_small_page *fsp;
+ int i;
+
+ for (i = 0; i < NR_BLOCKS; i++) {
+ fsp = PAGE_PTR(page, i);
+ fsp->next = fsp->prev = 0;
+ }
+}
+
+static inline void set_page_links_prev(unsigned long page, unsigned long prev)
+{
+ struct free_small_page *fsp;
+ unsigned int mask;
+ int i;
+
+ if (!page)
+ return;
+
+ mask = USED(&mem_map[MAP_NR(page)]);
+ for (i = 0; i < NR_BLOCKS; i++) {
+ if (mask & (1 << i))
+ continue;
+ fsp = PAGE_PTR(page, i);
+ fsp->prev = prev;
+ }
+}
+
+static inline void set_page_links_next(unsigned long page, unsigned long next)
+{
+ struct free_small_page *fsp;
+ unsigned int mask;
+ int i;
+
+ if (!page)
+ return;
+
+ mask = USED(&mem_map[MAP_NR(page)]);
+ for (i = 0; i < NR_BLOCKS; i++) {
+ if (mask & (1 << i))
+ continue;
+ fsp = PAGE_PTR(page, i);
+ fsp->next = next;
+ }
+}
+
+unsigned long get_small_page(int priority)
+{
+ struct free_small_page *fsp;
+ unsigned long new_page;
+ unsigned long flags;
+ struct page *page;
+ int offset;
+
+ save_flags(flags);
+ if (!small_page_ptr)
+ goto need_new_page;
+ cli();
+again:
+ page = mem_map + MAP_NR(small_page_ptr);
+ offset = offsets[USED(page)];
+ SET_USED(page, offset);
+ new_page = (unsigned long)PAGE_PTR(small_page_ptr, offset);
+ if (USED(page) == 15) {
+ fsp = (struct free_small_page *)new_page;
+ set_page_links_prev (fsp->next, 0);
+ small_page_ptr = fsp->next;
+ }
+ restore_flags(flags);
+ return new_page;
+
+need_new_page:
+ new_page = __get_free_page(priority);
+ if (!small_page_ptr) {
+ if (new_page) {
+ set_bit (PG_reserved, &mem_map[MAP_NR(new_page)].flags);
+ clear_page_links (new_page);
+ cli();
+ small_page_ptr = new_page;
+ goto again;
+ }
+ restore_flags(flags);
+ return 0;
+ }
+ free_page(new_page);
+ cli();
+ goto again;
+}
+
+void free_small_page(unsigned long spage)
+{
+ struct free_small_page *ofsp, *cfsp;
+ unsigned long flags;
+ struct page *page;
+ int offset, oldoffset;
+
+ offset = (spage >> SMALL_ALLOC_SHIFT) & (NR_BLOCKS - 1);
+ spage -= offset << SMALL_ALLOC_SHIFT;
+
+ page = mem_map + MAP_NR(spage);
+ if (!PageReserved(page) || !USED(page)) {
+ printk ("Trying to free non-small page from %p\n", __builtin_return_address(0));
+ return;
+ }
+ if (IS_FREE(page, offset)) {
+ printk ("Trying to free free small page from %p\n", __builtin_return_address(0));
+ return;
+ }
+ save_flags_cli (flags);
+ oldoffset = offsets[USED(page)];
+ CLEAR_USED(page, offset);
+ ofsp = PAGE_PTR(spage, oldoffset);
+ cfsp = PAGE_PTR(spage, offset);
+
+ if (oldoffset == NR_BLOCKS) { /* going from totally used to mostly used */
+ cfsp->prev = 0;
+ cfsp->next = small_page_ptr;
+ set_page_links_prev (small_page_ptr, spage);
+ small_page_ptr = spage;
+ } else if (!USED(page)) {
+ set_page_links_prev (ofsp->next, ofsp->prev);
+ set_page_links_next (ofsp->prev, ofsp->next);
+ if (spage == small_page_ptr)
+ small_page_ptr = ofsp->next;
+ clear_bit (PG_reserved, &page->flags);
+ restore_flags(flags);
+ free_page (spage);
+ } else
+ *cfsp = *ofsp;
+ restore_flags(flags);
+}
diff --git a/arch/arm/vmlinux.lds b/arch/arm/vmlinux.lds
new file mode 100644
index 000000000..787e5c99d
--- /dev/null
+++ b/arch/arm/vmlinux.lds
@@ -0,0 +1,58 @@
+/* ld script to make i386 Linux kernel
+ * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+ */
+OUTPUT_FORMAT("elf32-arm", "elf32-arm", "elf32-arm")
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ _text = .; /* Text and read-only data */
+ .text : {
+ *(.text)
+ *(.fixup)
+ *(.gnu.warning)
+ } = 0x9090
+ .text.lock : { *(.text.lock) } /* out-of-line lock text */
+ .rodata : { *(.rodata) }
+ .kstrtab : { *(.kstrtab) }
+
+ . = ALIGN(16); /* Exception table */
+ __start___ex_table = .;
+ __ex_table : { *(__ex_table) }
+ __stop___ex_table = .;
+
+ __start___ksymtab = .; /* Kernel symbol table */
+ __ksymtab : { *(__ksymtab) }
+ __stop___ksymtab = .;
+
+ _etext = .; /* End of text section */
+
+ .data : { /* Data */
+ *(.data)
+ CONSTRUCTORS
+ }
+
+ _edata = .; /* End of data section */
+
+ . = ALIGN(4096); /* Init code and data */
+ __init_begin = .;
+ .text.init : { *(.text.init) }
+ .data.init : { *(.data.init) }
+ . = ALIGN(4096);
+ __init_end = .;
+
+ __bss_start = .; /* BSS */
+ .bss : {
+ *(.bss)
+ }
+ _end = . ;
+
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+}