summaryrefslogtreecommitdiffstats
path: root/arch/arm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/lib')
-rw-r--r--arch/arm/lib/Makefile55
-rw-r--r--arch/arm/lib/backtrace.S100
-rw-r--r--arch/arm/lib/bitops.S152
-rw-r--r--arch/arm/lib/checksum.S600
-rw-r--r--arch/arm/lib/delay.S43
-rw-r--r--arch/arm/lib/floppydma.S57
-rw-r--r--arch/arm/lib/fp_support.c22
-rw-r--r--arch/arm/lib/getconsdata.c31
-rw-r--r--arch/arm/lib/getconstants.c74
-rw-r--r--arch/arm/lib/getconstants.h17
-rw-r--r--arch/arm/lib/io-acorn.S215
-rw-r--r--arch/arm/lib/io-ebsa110.S149
-rw-r--r--arch/arm/lib/ll_char_wr.S157
-rw-r--r--arch/arm/lib/loaders.S53
-rw-r--r--arch/arm/lib/memcpy.S312
-rw-r--r--arch/arm/lib/memfastset.S35
-rw-r--r--arch/arm/lib/string.S139
-rw-r--r--arch/arm/lib/system.S20
-rw-r--r--arch/arm/lib/testm.c81
-rw-r--r--arch/arm/lib/uaccess-armo.S230
-rw-r--r--arch/arm/lib/uaccess.S631
21 files changed, 3173 insertions, 0 deletions
diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
new file mode 100644
index 000000000..10fad6b43
--- /dev/null
+++ b/arch/arm/lib/Makefile
@@ -0,0 +1,55 @@
+#
+# linux/arch/arm/lib/Makefile
+#
+# Copyright (C) 1995-1998 Russell King
+#
+
+L_TARGET := lib.a
+L_OBJS := backtrace.o bitops.o delay.o fp_support.o \
+ loaders.o memcpy.o memfastset.o system.o string.o uaccess.o
+
+ifeq ($(PROCESSOR),armo)
+ L_OBJS += uaccess-armo.o
+endif
+
+ifdef CONFIG_INET
+ L_OBJS += checksum.o
+endif
+
+ifdef CONFIG_ARCH_ACORN
+ L_OBJS += ll_char_wr.o io-acorn.o
+ ifdef CONFIG_ARCH_A5K
+ L_OBJS += floppydma.o
+ endif
+ ifdef CONFIG_ARCH_RPC
+ L_OBJS += floppydma.o
+ endif
+endif
+
+ifdef CONFIG_ARCH_EBSA110
+ L_OBJS += io-ebsa110.o
+endif
+
+include $(TOPDIR)/Rules.make
+
+constants.h: getconstants
+ ./getconstants > constants.h
+
+getconstants: getconstants.c getconstants.h
+ $(HOSTCC) -D__KERNEL__ -o getconstants getconstants.c
+
+getconstants.h: getconsdata.c
+ $(CC) $(CFLAGS) -c getconsdata.c
+ $(PERL) extractinfo.perl $(OBJDUMP) > $@
+
+%.o: %.S
+ifndef $(CONFIG_BINUTILS_NEW)
+ $(CC) $(CFLAGS) -D__ASSEMBLY__ -E $< | tr ';$$' '\n#' > ..tmp.$<.s
+ $(CC) $(CFLAGS:-pipe=) -c -o $@ ..tmp.$<.s
+ $(RM) ..tmp.$<.s
+else
+ $(CC) $(CFLAGS) -D__ASSEMBLY__ -c -o $@ $<
+endif
+
+clean:
+ $(RM) getconstants constants.h getconstants.h
diff --git a/arch/arm/lib/backtrace.S b/arch/arm/lib/backtrace.S
new file mode 100644
index 000000000..d48055b70
--- /dev/null
+++ b/arch/arm/lib/backtrace.S
@@ -0,0 +1,100 @@
+/*
+ * linux/arch/arm/lib/backtrace.S
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+ .text
+
+@ fp is 0 or stack frame
+
+#define frame r4
+#define next r5
+#define save r6
+#define mask r7
+#define offset r8
+
+ENTRY(__backtrace)
+ mov r1, #0x10
+ mov r0, fp
+
+ENTRY(c_backtrace)
+ stmfd sp!, {r4 - r8, lr} @ Save an extra register so we have a location...
+ tst r1, #0x10 @ 26 or 32-bit?
+ moveq mask, #0xfc000003
+ movne mask, #0
+ tst mask, r0
+ movne r0, #0
+ movs frame, r0
+1: moveq r0, #-2
+ LOADREGS(eqfd, sp!, {r4 - r8, pc})
+
+2: stmfd sp!, {pc} @ calculate offset of PC in STMIA instruction
+ ldr r0, [sp], #4
+ adr r1, 2b - 4
+ sub offset, r0, r1
+
+3: tst frame, mask @ Check for address exceptions...
+ bne 1b
+
+ ldmda frame, {r0, r1, r2, r3} @ fp, sp, lr, pc
+ mov next, r0
+
+ sub save, r3, offset @ Correct PC for prefetching
+ bic save, save, mask
+ adr r0, .Lfe
+ mov r1, save
+ bic r2, r2, mask
+ bl SYMBOL_NAME(printk)
+
+ sub r0, frame, #16
+ ldr r1, [save, #4]
+ mov r3, r1, lsr #10
+ ldr r2, .Ldsi+4
+ teq r3, r2 @ Check for stmia sp!, {args}
+ addeq save, save, #4 @ next instruction
+ bleq .Ldumpstm
+
+ ldr r1, [save, #4] @ Get 'stmia sp!, {rlist, fp, ip, lr, pc}' instruction
+ mov r3, r1, lsr #10
+ ldr r2, .Ldsi
+ teq r3, r2
+ bleq .Ldumpstm
+
+ teq frame, next
+ movne frame, next
+ teqne frame, #0
+ bne 3b
+ LOADREGS(fd, sp!, {r4 - r8, pc})
+
+
+#define instr r4
+#define reg r5
+#define stack r6
+
+.Ldumpstm: stmfd sp!, {instr, reg, stack, lr}
+ mov stack, r0
+ mov instr, r1
+ mov reg, #9
+
+1: mov r3, #1
+ tst instr, r3, lsl reg
+ beq 2f
+ ldr r2, [stack], #-4
+ mov r1, reg
+ adr r0, .Lfp
+ bl SYMBOL_NAME(printk)
+2: subs reg, reg, #1
+ bpl 1b
+
+ mov r0, stack
+ LOADREGS(fd, sp!, {instr, reg, stack, pc})
+
+.Lfe: .ascii "Function entered at [<%p>] from [<%p>]\n"
+ .byte 0
+.Lfp: .ascii " r%d = %p\n"
+ .byte 0
+ .align
+.Ldsi: .word 0x00e92dd8 >> 2
+ .word 0x00e92d00 >> 2
diff --git a/arch/arm/lib/bitops.S b/arch/arm/lib/bitops.S
new file mode 100644
index 000000000..4c1f4b0aa
--- /dev/null
+++ b/arch/arm/lib/bitops.S
@@ -0,0 +1,152 @@
+/*
+ * linux/arch/arm/lib/bitops.S
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+ .text
+
+@ Purpose : Function to set a bit
+@ Prototype: int set_bit(int bit,int *addr)
+
+ENTRY(set_bit)
+ and r2, r0, #7
+ mov r3, #1
+ mov r3, r3, lsl r2
+ SAVEIRQS(ip)
+ DISABLEIRQS(ip)
+ ldrb r2, [r1, r0, lsr #3]
+ orr r2, r2, r3
+ strb r2, [r1, r0, lsr #3]
+ RESTOREIRQS(ip)
+ RETINSTR(mov,pc,lr)
+
+ENTRY(test_and_set_bit)
+ add r1, r1, r0, lsr #3 @ Get byte offset
+ and r3, r0, #7 @ Get bit offset
+ mov r0, #1
+ SAVEIRQS(ip)
+ DISABLEIRQS(ip)
+ ldrb r2, [r1]
+ tst r2, r0, lsl r3
+ orr r2, r2, r0, lsl r3
+ moveq r0, #0
+ strb r2, [r1]
+ RESTOREIRQS(ip)
+ RETINSTR(mov,pc,lr)
+
+@ Purpose : Function to clear a bit
+@ Prototype: int clear_bit(int bit,int *addr)
+
+ENTRY(clear_bit)
+ and r2, r0, #7
+ mov r3, #1
+ mov r3, r3, lsl r2
+ SAVEIRQS(ip)
+ DISABLEIRQS(ip)
+ ldrb r2, [r1, r0, lsr #3]
+ bic r2, r2, r3
+ strb r2, [r1, r0, lsr #3]
+ RESTOREIRQS(ip)
+ RETINSTR(mov,pc,lr)
+
+ENTRY(test_and_clear_bit)
+ add r1, r1, r0, lsr #3 @ Get byte offset
+ and r3, r0, #7 @ Get bit offset
+ mov r0, #1
+ SAVEIRQS(ip)
+ DISABLEIRQS(ip)
+ ldrb r2, [r1]
+ tst r2, r0, lsl r3
+ bic r2, r2, r0, lsl r3
+ moveq r0, #0
+ strb r2, [r1]
+ RESTOREIRQS(ip)
+ RETINSTR(mov,pc,lr)
+
+/* Purpose : Function to change a bit
+ * Prototype: int change_bit(int bit,int *addr)
+ */
+ENTRY(change_bit)
+ and r2, r0, #7
+ mov r3, #1
+ mov r3, r3, lsl r2
+ SAVEIRQS(ip)
+ DISABLEIRQS(ip)
+ ldrb r2, [r1, r0, lsr #3]
+ eor r2, r2, r3
+ strb r2, [r1, r0, lsr #3]
+ RESTOREIRQS(ip)
+ RETINSTR(mov,pc,lr)
+
+ENTRY(test_and_change_bit)
+ add r1, r1, r0, lsr #3
+ and r3, r0, #7
+ mov r0, #1
+ SAVEIRQS(ip)
+ DISABLEIRQS(ip)
+ ldrb r2, [r1]
+ tst r2, r0, lsl r3
+ eor r2, r2, r0, lsl r3
+ moveq r0, #0
+ strb r2, [r1]
+ RESTOREIRQS(ip)
+ RETINSTR(mov,pc,lr)
+
+@ Purpose : Find a 'zero' bit
+@ Prototype: int find_first_zero_bit(char *addr,int maxbit);
+
+ENTRY(find_first_zero_bit)
+ mov r2, #0 @ Initialise bit position
+Lfindzbit1lp: ldrb r3, [r0, r2, lsr #3] @ Check byte, if 0xFF, then all bits set
+ teq r3, #0xFF
+ bne Lfoundzbit
+ add r2, r2, #8
+ cmp r2, r1 @ Check to see if we have come to the end
+ bcc Lfindzbit1lp
+ add r0, r1, #1 @ Make sure that we flag an error
+ RETINSTR(mov,pc,lr)
+Lfoundzbit: tst r3, #1 @ Check individual bits
+ moveq r0, r2
+ RETINSTR(moveq,pc,lr)
+ tst r3, #2
+ addeq r0, r2, #1
+ RETINSTR(moveq,pc,lr)
+ tst r3, #4
+ addeq r0, r2, #2
+ RETINSTR(moveq,pc,lr)
+ tst r3, #8
+ addeq r0, r2, #3
+ RETINSTR(moveq,pc,lr)
+ tst r3, #16
+ addeq r0, r2, #4
+ RETINSTR(moveq,pc,lr)
+ tst r3, #32
+ addeq r0, r2, #5
+ RETINSTR(moveq,pc,lr)
+ tst r3, #64
+ addeq r0, r2, #6
+ RETINSTR(moveq,pc,lr)
+ add r0, r2, #7
+ RETINSTR(mov,pc,lr)
+
+@ Purpose : Find next 'zero' bit
+@ Prototype: int find_next_zero_bit(char *addr,int maxbit,int offset)
+
+ENTRY(find_next_zero_bit)
+ tst r2, #7
+ beq Lfindzbit1lp @ If new byte, goto old routine
+ ldrb r3, [r0, r2, lsr#3]
+ orr r3, r3, #0xFF00 @ Set top bits so we wont get confused
+ stmfd sp!, {r4}
+ and r4, r2, #7
+ mov r3, r3, lsr r4 @ Shift right by no. of bits
+ ldmfd sp!, {r4}
+ and r3, r3, #0xFF
+ teq r3, #0xFF
+ orreq r2, r2, #7
+ addeq r2, r2, #1
+ beq Lfindzbit1lp @ If all bits are set, goto old routine
+ b Lfoundzbit
diff --git a/arch/arm/lib/checksum.S b/arch/arm/lib/checksum.S
new file mode 100644
index 000000000..f273f960d
--- /dev/null
+++ b/arch/arm/lib/checksum.S
@@ -0,0 +1,600 @@
+/*
+ * linux/arch/arm/lib/iputils.S
+ *
+ * Copyright (C) 1995, 1996, 1997, 1998 Russell King
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/errno.h>
+
+ .text
+
+/* Function: __u32 csum_partial(const char *src, int len, __u32)
+ * Params : r0 = buffer, r1 = len, r2 = checksum
+ * Returns : r0 = new checksum
+ */
+
+ENTRY(csum_partial)
+ tst r0, #2
+ beq 1f
+ subs r1, r1, #2
+ addmi r1, r1, #2
+ bmi 3f
+ bic r0, r0, #3
+ ldr r3, [r0], #4
+ adds r2, r2, r3, lsr #16
+ adcs r2, r2, #0
+1: adds r2, r2, #0
+ bics ip, r1, #31
+ beq 3f
+ stmfd sp!, {r4 - r6}
+2: ldmia r0!, {r3 - r6}
+ adcs r2, r2, r3
+ adcs r2, r2, r4
+ adcs r2, r2, r5
+ adcs r2, r2, r6
+ ldmia r0!, {r3 - r6}
+ adcs r2, r2, r3
+ adcs r2, r2, r4
+ adcs r2, r2, r5
+ adcs r2, r2, r6
+ sub ip, ip, #32
+ teq ip, #0
+ bne 2b
+ adcs r2, r2, #0
+ ldmfd sp!, {r4 - r6}
+3: ands ip, r1, #0x1c
+ beq 5f
+4: ldr r3, [r0], #4
+ adcs r2, r2, r3
+ sub ip, ip, #4
+ teq ip, #0
+ bne 4b
+ adcs r2, r2, #0
+5: ands ip, r1, #3
+ moveq r0, r2
+ RETINSTR(moveq,pc,lr)
+ mov ip, ip, lsl #3
+ rsb ip, ip, #32
+ ldr r3, [r0]
+ mov r3, r3, lsl ip
+ adds r2, r2, r3, lsr ip
+ adc r0, r2, #0
+ RETINSTR(mov,pc,lr)
+
+/* Function: __u32 csum_partial_copy_from_user (const char *src, char *dst, int len, __u32 sum, int *err_ptr)
+ * Params : r0 = src, r1 = dst, r2 = len, r3 = sum, [sp, #0] = &err
+ * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
+ */
+
+#define USER_LDR(instr...) \
+9999: instr; \
+ .section __ex_table, "a"; \
+ .align 3; \
+ .long 9999b, 6001f; \
+ .previous;
+
+ENTRY(csum_partial_copy_from_user)
+ mov ip, sp
+ stmfd sp!, {r4 - r8, fp, ip, lr, pc}
+ sub fp, ip, #4
+ cmp r2, #4
+ blt .too_small_user
+ tst r1, #2 @ Test destination alignment
+ beq .dst_aligned_user
+ subs r2, r2, #2 @ We dont know if SRC is aligned...
+USER_LDR( ldrbt ip, [r0], #1)
+USER_LDR( ldrbt r8, [r0], #1)
+ orr ip, ip, r8, lsl #8
+ adds r3, r3, ip
+ adcs r3, r3, #0
+ strb ip, [r1], #1
+ mov ip, ip, lsr #8
+ strb ip, [r1], #1 @ Destination now aligned
+.dst_aligned_user:
+ tst r0, #3
+ bne .src_not_aligned_user
+ adds r3, r3, #0
+ bics ip, r2, #15 @ Routine for src & dst aligned
+ beq 2f
+1:
+USER_LDR( ldrt r4, [r0], #4)
+USER_LDR( ldrt r5, [r0], #4)
+USER_LDR( ldrt r6, [r0], #4)
+USER_LDR( ldrt r7, [r0], #4)
+ stmia r1!, {r4, r5, r6, r7}
+ adcs r3, r3, r4
+ adcs r3, r3, r5
+ adcs r3, r3, r6
+ adcs r3, r3, r7
+ sub ip, ip, #16
+ teq ip, #0
+ bne 1b
+2: ands ip, r2, #12
+ beq 4f
+ tst ip, #8
+ beq 3f
+USER_LDR( ldrt r4, [r0], #4)
+USER_LDR( ldrt r5, [r0], #4)
+ stmia r1!, {r4, r5}
+ adcs r3, r3, r4
+ adcs r3, r3, r5
+ tst ip, #4
+ beq 4f
+3:
+USER_LDR( ldrt r4, [r0], #4)
+ str r4, [r1], #4
+ adcs r3, r3, r4
+4: ands r2, r2, #3
+ adceq r0, r3, #0
+ LOADREGS(eqea,fp,{r4 - r8, fp, sp, pc})
+USER_LDR( ldrt r4, [r0], #4)
+ tst r2, #2
+ beq .exit
+ adcs r3, r3, r4, lsl #16
+ strb r4, [r1], #1
+ mov r4, r4, lsr #8
+ strb r4, [r1], #1
+ mov r4, r4, lsr #8
+.exit: tst r2, #1
+ strneb r4, [r1], #1
+ andne r4, r4, #255
+ adcnes r3, r3, r4
+ adcs r0, r3, #0
+ LOADREGS(ea,fp,{r4 - r8, fp, sp, pc})
+
+.too_small_user:
+ teq r2, #0
+ LOADREGS(eqea,fp,{r4 - r8, fp, sp, pc})
+ cmp r2, #2
+ blt .too_small_user1
+USER_LDR( ldrbt ip, [r0], #1)
+USER_LDR( ldrbt r8, [r0], #1)
+ orr ip, ip, r8, lsl #8
+ adds r3, r3, ip
+ strb ip, [r1], #1
+ strb r8, [r1], #1
+ tst r2, #1
+.too_small_user1:
+USER_LDR( ldrnebt ip, [r0], #1)
+ strneb ip, [r1], #1
+ adcnes r3, r3, ip
+ adcs r0, r3, #0
+ LOADREGS(ea,fp,{r4 - r8, fp, sp, pc})
+
+.src_not_aligned_user:
+ cmp r2, #4
+ blt .too_small_user
+ and ip, r0, #3
+ bic r0, r0, #3
+USER_LDR( ldrt r4, [r0], #4)
+ cmp ip, #2
+ beq .src2_aligned_user
+ bhi .src3_aligned_user
+ mov r4, r4, lsr #8
+ adds r3, r3, #0
+ bics ip, r2, #15
+ beq 2f
+1:
+USER_LDR( ldrt r5, [r0], #4)
+USER_LDR( ldrt r6, [r0], #4)
+USER_LDR( ldrt r7, [r0], #4)
+USER_LDR( ldrt r8, [r0], #4)
+ orr r4, r4, r5, lsl #24
+ mov r5, r5, lsr #8
+ orr r5, r5, r6, lsl #24
+ mov r6, r6, lsr #8
+ orr r6, r6, r7, lsl #24
+ mov r7, r7, lsr #8
+ orr r7, r7, r8, lsl #24
+ stmia r1!, {r4, r5, r6, r7}
+ adcs r3, r3, r4
+ adcs r3, r3, r5
+ adcs r3, r3, r6
+ adcs r3, r3, r7
+ mov r4, r8, lsr #8
+ sub ip, ip, #16
+ teq ip, #0
+ bne 1b
+2: ands ip, r2, #12
+ beq 4f
+ tst ip, #8
+ beq 3f
+USER_LDR( ldrt r5, [r0], #4)
+USER_LDR( ldrt r6, [r0], #4)
+ orr r4, r4, r5, lsl #24
+ mov r5, r5, lsr #8
+ orr r5, r5, r6, lsl #24
+ stmia r1!, {r4, r5}
+ adcs r3, r3, r4
+ adcs r3, r3, r5
+ mov r4, r6, lsr #8
+ tst ip, #4
+ beq 4f
+3:
+USER_LDR( ldrt r5, [r0], #4)
+ orr r4, r4, r5, lsl #24
+ str r4, [r1], #4
+ adcs r3, r3, r4
+ mov r4, r5, lsr #8
+4: ands r2, r2, #3
+ adceq r0, r3, #0
+ LOADREGS(eqea,fp,{r4 - r8, fp, sp, pc})
+ tst r2, #2
+ beq .exit
+ adcs r3, r3, r4, lsl #16
+ strb r4, [r1], #1
+ mov r4, r4, lsr #8
+ strb r4, [r1], #1
+ mov r4, r4, lsr #8
+ b .exit
+
+.src2_aligned_user:
+ mov r4, r4, lsr #16
+ adds r3, r3, #0
+ bics ip, r2, #15
+ beq 2f
+1:
+USER_LDR( ldrt r5, [r0], #4)
+USER_LDR( ldrt r6, [r0], #4)
+USER_LDR( ldrt r7, [r0], #4)
+USER_LDR( ldrt r8, [r0], #4)
+ orr r4, r4, r5, lsl #16
+ mov r5, r5, lsr #16
+ orr r5, r5, r6, lsl #16
+ mov r6, r6, lsr #16
+ orr r6, r6, r7, lsl #16
+ mov r7, r7, lsr #16
+ orr r7, r7, r8, lsl #16
+ stmia r1!, {r4, r5, r6, r7}
+ adcs r3, r3, r4
+ adcs r3, r3, r5
+ adcs r3, r3, r6
+ adcs r3, r3, r7
+ mov r4, r8, lsr #16
+ sub ip, ip, #16
+ teq ip, #0
+ bne 1b
+2: ands ip, r2, #12
+ beq 4f
+ tst ip, #8
+ beq 3f
+USER_LDR( ldrt r5, [r0], #4)
+USER_LDR( ldrt r6, [r0], #4)
+ orr r4, r4, r5, lsl #16
+ mov r5, r5, lsr #16
+ orr r5, r5, r6, lsl #16
+ stmia r1!, {r4, r5}
+ adcs r3, r3, r4
+ adcs r3, r3, r5
+ mov r4, r6, lsr #16
+ tst ip, #4
+ beq 4f
+3:
+USER_LDR( ldrt r5, [r0], #4)
+ orr r4, r4, r5, lsl #16
+ str r4, [r1], #4
+ adcs r3, r3, r4
+ mov r4, r5, lsr #16
+4: ands r2, r2, #3
+ adceq r0, r3, #0
+ LOADREGS(eqea,fp,{r4 - r8, fp, sp, pc})
+ tst r2, #2
+ beq .exit
+ adcs r3, r3, r4, lsl #16
+ strb r4, [r1], #1
+ mov r4, r4, lsr #8
+ strb r4, [r1], #1
+USER_LDR( ldrb r4, [r0], #1)
+ b .exit
+
+.src3_aligned_user:
+ mov r4, r4, lsr #24
+ adds r3, r3, #0
+ bics ip, r2, #15
+ beq 2f
+1:
+USER_LDR( ldrt r5, [r0], #4)
+USER_LDR( ldrt r6, [r0], #4)
+USER_LDR( ldrt r7, [r0], #4)
+USER_LDR( ldrt r8, [r0], #4)
+ orr r4, r4, r5, lsl #8
+ mov r5, r5, lsr #24
+ orr r5, r5, r6, lsl #8
+ mov r6, r6, lsr #24
+ orr r6, r6, r7, lsl #8
+ mov r7, r7, lsr #24
+ orr r7, r7, r8, lsl #8
+ stmia r1!, {r4, r5, r6, r7}
+ adcs r3, r3, r4
+ adcs r3, r3, r5
+ adcs r3, r3, r6
+ adcs r3, r3, r7
+ mov r4, r8, lsr #24
+ sub ip, ip, #16
+ teq ip, #0
+ bne 1b
+2: ands ip, r2, #12
+ beq 4f
+ tst ip, #8
+ beq 3f
+USER_LDR( ldrt r5, [r0], #4)
+USER_LDR( ldrt r6, [r0], #4)
+ orr r4, r4, r5, lsl #8
+ mov r5, r5, lsr #24
+ orr r5, r5, r6, lsl #8
+ stmia r1!, {r4, r5}
+ adcs r3, r3, r4
+ adcs r3, r3, r5
+ mov r4, r6, lsr #24
+ tst ip, #4
+ beq 4f
+3:
+USER_LDR( ldrt r5, [r0], #4)
+ orr r4, r4, r5, lsl #8
+ str r4, [r1], #4
+ adcs r3, r3, r4
+ mov r4, r5, lsr #24
+4: ands r2, r2, #3
+ adceq r0, r3, #0
+ LOADREGS(eqea,fp,{r4 - r8, fp, sp, pc})
+ tst r2, #2
+ beq .exit
+ adcs r3, r3, r4, lsl #16
+ strb r4, [r1], #1
+USER_LDR( ldrt r4, [r0], #4)
+ strb r4, [r1], #1
+ adcs r3, r3, r4, lsl #24
+ mov r4, r4, lsr #8
+ b .exit
+
+ .section .fixup,"ax"
+ .align 4
+6001: mov r4, #-EFAULT
+ ldr r5, [sp, #4*8]
+ str r4, [r5]
+ LOADREGS(ea,fp,{r4 - r8, fp, sp, pc})
+
+/* Function: __u32 csum_partial_copy (const char *src, char *dst, int len, __u32 sum)
+ * Params : r0 = src, r1 = dst, r2 = len, r3 = checksum
+ * Returns : r0 = new checksum
+ */
+ENTRY(csum_partial_copy)
+ mov ip, sp
+ stmfd sp!, {r4 - r8, fp, ip, lr, pc}
+ sub fp, ip, #4
+ cmp r2, #4
+ blt Ltoo_small
+ tst r1, #2 @ Test destination alignment
+ beq Ldst_aligned
+ subs r2, r2, #2 @ We dont know if SRC is aligned...
+ ldrb ip, [r0], #1
+ ldrb r8, [r0], #1
+ orr ip, ip, r8, lsl #8
+ adds r3, r3, ip
+ adcs r3, r3, #0
+ strb ip, [r1], #1
+ mov ip, ip, lsr #8
+ strb ip, [r1], #1 @ Destination now aligned
+Ldst_aligned: tst r0, #3
+ bne Lsrc_not_aligned
+ adds r3, r3, #0
+ bics ip, r2, #15 @ Routine for src & dst aligned
+ beq 3f
+1: ldmia r0!, {r4, r5, r6, r7}
+ stmia r1!, {r4, r5, r6, r7}
+ adcs r3, r3, r4
+ adcs r3, r3, r5
+ adcs r3, r3, r6
+ adcs r3, r3, r7
+ sub ip, ip, #16
+ teq ip, #0
+ bne 1b
+3: ands ip, r2, #12
+ beq 5f
+ tst ip, #8
+ beq 4f
+ ldmia r0!, {r4, r5}
+ stmia r1!, {r4, r5}
+ adcs r3, r3, r4
+ adcs r3, r3, r5
+ tst ip, #4
+ beq 5f
+4: ldr r4, [r0], #4
+ str r4, [r1], #4
+ adcs r3, r3, r4
+5: ands r2, r2, #3
+ adceq r0, r3, #0
+ LOADREGS(eqea,fp,{r4 - r8, fp, sp, pc})
+ ldr r4, [r0], #4
+ tst r2, #2
+ beq Lexit
+ adcs r3, r3, r4, lsl #16
+ strb r4, [r1], #1
+ mov r4, r4, lsr #8
+ strb r4, [r1], #1
+ mov r4, r4, lsr #8
+ b Lexit
+
+Ltoo_small: teq r2, #0
+ LOADREGS(eqea,fp,{r4 - r8, fp, sp, pc})
+ cmp r2, #2
+ blt Ltoo_small1
+ ldrb ip, [r0], #1
+ ldrb r8, [r0], #1
+ orr ip, ip, r8, lsl #8
+ adds r3, r3, ip
+ strb ip, [r1], #1
+ strb r8, [r1], #1
+Lexit: tst r2, #1
+Ltoo_small1: ldrneb ip, [r0], #1
+ strneb ip, [r1], #1
+ adcnes r3, r3, ip
+ adcs r0, r3, #0
+ LOADREGS(ea,fp,{r4 - r8, fp, sp, pc})
+
+Lsrc_not_aligned:
+ cmp r2, #4
+ blt Ltoo_small
+ and ip, r0, #3
+ bic r0, r0, #3
+ ldr r4, [r0], #4
+ cmp ip, #2
+ beq Lsrc2_aligned
+ bhi Lsrc3_aligned
+ mov r4, r4, lsr #8
+ adds r3, r3, #0
+ bics ip, r2, #15
+ beq 2f
+1: ldmia r0!, {r5, r6, r7, r8}
+ orr r4, r4, r5, lsl #24
+ mov r5, r5, lsr #8
+ orr r5, r5, r6, lsl #24
+ mov r6, r6, lsr #8
+ orr r6, r6, r7, lsl #24
+ mov r7, r7, lsr #8
+ orr r7, r7, r8, lsl #24
+ stmia r1!, {r4, r5, r6, r7}
+ adcs r3, r3, r4
+ adcs r3, r3, r5
+ adcs r3, r3, r6
+ adcs r3, r3, r7
+ mov r4, r8, lsr #8
+ sub ip, ip, #16
+ teq ip, #0
+ bne 1b
+2: ands ip, r2, #12
+ beq 4f
+ tst ip, #8
+ beq 3f
+ ldmia r0!, {r5, r6}
+ orr r4, r4, r5, lsl #24
+ mov r5, r5, lsr #8
+ orr r5, r5, r6, lsl #24
+ stmia r1!, {r4, r5}
+ adcs r3, r3, r4
+ adcs r3, r3, r5
+ mov r4, r6, lsr #8
+ tst ip, #4
+ beq 4f
+3: ldr r5, [r0], #4
+ orr r4, r4, r5, lsl #24
+ str r4, [r1], #4
+ adcs r3, r3, r4
+ mov r4, r5, lsr #8
+4: ands r2, r2, #3
+ adceq r0, r3, #0
+ LOADREGS(eqea,fp,{r4 - r8, fp, sp, pc})
+ tst r2, #2
+ beq Lexit
+ adcs r3, r3, r4, lsl #16
+ strb r4, [r1], #1
+ mov r4, r4, lsr #8
+ strb r4, [r1], #1
+ mov r4, r4, lsr #8
+ b Lexit
+
+Lsrc2_aligned: mov r4, r4, lsr #16
+ adds r3, r3, #0
+ bics ip, r2, #15
+ beq 2f
+1: ldmia r0!, {r5, r6, r7, r8}
+ orr r4, r4, r5, lsl #16
+ mov r5, r5, lsr #16
+ orr r5, r5, r6, lsl #16
+ mov r6, r6, lsr #16
+ orr r6, r6, r7, lsl #16
+ mov r7, r7, lsr #16
+ orr r7, r7, r8, lsl #16
+ stmia r1!, {r4, r5, r6, r7}
+ adcs r3, r3, r4
+ adcs r3, r3, r5
+ adcs r3, r3, r6
+ adcs r3, r3, r7
+ mov r4, r8, lsr #16
+ sub ip, ip, #16
+ teq ip, #0
+ bne 1b
+2: ands ip, r2, #12
+ beq 4f
+ tst ip, #8
+ beq 3f
+ ldmia r0!, {r5, r6}
+ orr r4, r4, r5, lsl #16
+ mov r5, r5, lsr #16
+ orr r5, r5, r6, lsl #16
+ stmia r1!, {r4, r5}
+ adcs r3, r3, r4
+ adcs r3, r3, r5
+ mov r4, r6, lsr #16
+ tst ip, #4
+ beq 4f
+3: ldr r5, [r0], #4
+ orr r4, r4, r5, lsl #16
+ str r4, [r1], #4
+ adcs r3, r3, r4
+ mov r4, r5, lsr #16
+4: ands r2, r2, #3
+ adceq r0, r3, #0
+ LOADREGS(eqea,fp,{r4 - r8, fp, sp, pc})
+ tst r2, #2
+ beq Lexit
+ adcs r3, r3, r4, lsl #16
+ strb r4, [r1], #1
+ mov r4, r4, lsr #8
+ strb r4, [r1], #1
+ ldrb r4, [r0], #1
+ b Lexit
+
+Lsrc3_aligned: mov r4, r4, lsr #24
+ adds r3, r3, #0
+ bics ip, r2, #15
+ beq 2f
+1: ldmia r0!, {r5, r6, r7, r8}
+ orr r4, r4, r5, lsl #8
+ mov r5, r5, lsr #24
+ orr r5, r5, r6, lsl #8
+ mov r6, r6, lsr #24
+ orr r6, r6, r7, lsl #8
+ mov r7, r7, lsr #24
+ orr r7, r7, r8, lsl #8
+ stmia r1!, {r4, r5, r6, r7}
+ adcs r3, r3, r4
+ adcs r3, r3, r5
+ adcs r3, r3, r6
+ adcs r3, r3, r7
+ mov r4, r8, lsr #24
+ sub ip, ip, #16
+ teq ip, #0
+ bne 1b
+2: ands ip, r2, #12
+ beq 4f
+ tst ip, #8
+ beq 3f
+ ldmia r0!, {r5, r6}
+ orr r4, r4, r5, lsl #8
+ mov r5, r5, lsr #24
+ orr r5, r5, r6, lsl #8
+ stmia r1!, {r4, r5}
+ adcs r3, r3, r4
+ adcs r3, r3, r5
+ mov r4, r6, lsr #24
+ tst ip, #4
+ beq 4f
+3: ldr r5, [r0], #4
+ orr r4, r4, r5, lsl #8
+ str r4, [r1], #4
+ adcs r3, r3, r4
+ mov r4, r5, lsr #24
+4: ands r2, r2, #3
+ adceq r0, r3, #0
+ LOADREGS(eqea,fp,{r4 - r8, fp, sp, pc})
+ tst r2, #2
+ beq Lexit
+ adcs r3, r3, r4, lsl #16
+ strb r4, [r1], #1
+ ldr r4, [r0], #4
+ strb r4, [r1], #1
+ adcs r3, r3, r4, lsl #24
+ mov r4, r4, lsr #8
+ b Lexit
diff --git a/arch/arm/lib/delay.S b/arch/arm/lib/delay.S
new file mode 100644
index 000000000..72dab5a95
--- /dev/null
+++ b/arch/arm/lib/delay.S
@@ -0,0 +1,43 @@
+/*
+ * linux/arch/arm/lib/delay.S
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+ .text
+
+LC0: .word SYMBOL_NAME(loops_per_sec)
+
+ENTRY(udelay)
+ mov r2, #0x1000
+ orr r2, r2, #0x00c6
+ mul r1, r0, r2
+ ldr r2, LC0
+ ldr r2, [r2]
+ mov r1, r1, lsr #11
+ mov r2, r2, lsr #11
+ mul r0, r1, r2
+ movs r0, r0, lsr #10
+ RETINSTR(moveq,pc,lr)
+
+@ Delay routine
+ENTRY(__delay)
+ subs r0, r0, #1
+ RETINSTR(movcc,pc,lr)
+ subs r0, r0, #1
+ RETINSTR(movcc,pc,lr)
+ subs r0, r0, #1
+ RETINSTR(movcc,pc,lr)
+ subs r0, r0, #1
+ RETINSTR(movcc,pc,lr)
+ subs r0, r0, #1
+ RETINSTR(movcc,pc,lr)
+ subs r0, r0, #1
+ RETINSTR(movcc,pc,lr)
+ subs r0, r0, #1
+ RETINSTR(movcc,pc,lr)
+ subs r0, r0, #1
+ bcs SYMBOL_NAME(__delay)
+ RETINSTR(mov,pc,lr)
+
diff --git a/arch/arm/lib/floppydma.S b/arch/arm/lib/floppydma.S
new file mode 100644
index 000000000..08fdccb27
--- /dev/null
+++ b/arch/arm/lib/floppydma.S
@@ -0,0 +1,57 @@
+/*
+ * linux/arch/arm/lib/floppydma.S
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+ .text
+
+ .global SYMBOL_NAME(floppy_fiqin_end)
+ENTRY(floppy_fiqin_start)
+ subs r9, r9, #1
+ ldrgtb r12, [r11, #-4]
+ ldrleb r12, [r11], #0
+ strb r12, [r10], #1
+ subs pc, lr, #4
+SYMBOL_NAME(floppy_fiqin_end):
+
+ .global SYMBOL_NAME(floppy_fiqout_end)
+ENTRY(floppy_fiqout_start)
+ subs r9, r9, #1
+ ldrgeb r12, [r10], #1
+ movlt r12, #0
+ strleb r12, [r11], #0
+ subles pc, lr, #4
+ strb r12, [r11, #-4]
+ subs pc, lr, #4
+SYMBOL_NAME(floppy_fiqout_end):
+
+@ Params:
+@ r0 = length
+@ r1 = address
+@ r2 = floppy port
+@ Puts these into R9_fiq, R10_fiq, R11_fiq
+ENTRY(floppy_fiqsetup)
+ mov ip, sp
+ stmfd sp!, {fp, ip, lr, pc}
+ sub fp, ip, #4
+ MODE(r3,ip,I_BIT|F_BIT|DEFAULT_FIQ) @ disable FIQs, IRQs, FIQ mode
+ mov r0, r0
+ mov r9, r0
+ mov r10, r1
+ mov r11, r2
+ RESTOREMODE(r3) @ back to normal
+ mov r0, r0
+ LOADREGS(ea,fp,{fp, sp, pc})
+
+ENTRY(floppy_fiqresidual)
+ mov ip, sp
+ stmfd sp!, {fp, ip, lr, pc}
+ sub fp, ip, #4
+ MODE(r3,ip,I_BIT|F_BIT|DEFAULT_FIQ) @ disable FIQs, IRQs, FIQ mode
+ mov r0, r0
+ mov r0, r9
+ RESTOREMODE(r3)
+ mov r0, r0
+ LOADREGS(ea,fp,{fp, sp, pc})
diff --git a/arch/arm/lib/fp_support.c b/arch/arm/lib/fp_support.c
new file mode 100644
index 000000000..aaac3c766
--- /dev/null
+++ b/arch/arm/lib/fp_support.c
@@ -0,0 +1,22 @@
+/*
+ * linux/arch/arm/lib/fp_support.c
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ */
+
+#include <linux/sched.h>
+#include <linux/linkage.h>
+
+extern void (*fp_save)(struct fp_soft_struct *);
+
+asmlinkage void fp_setup(void)
+{
+ struct task_struct *p;
+
+ p = &init_task;
+ do {
+ fp_save(&p->tss.fpstate.soft);
+ p = p->next_task;
+ }
+ while (p != &init_task);
+}
diff --git a/arch/arm/lib/getconsdata.c b/arch/arm/lib/getconsdata.c
new file mode 100644
index 000000000..901c1ad16
--- /dev/null
+++ b/arch/arm/lib/getconsdata.c
@@ -0,0 +1,31 @@
+/*
+ * linux/arch/arm/lib/getconsdata.c
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/unistd.h>
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+
+#define OFF_TSK(n) (unsigned long)&(((struct task_struct *)0)->n)
+#define OFF_MM(n) (unsigned long)&(((struct mm_struct *)0)->n)
+
+#ifdef KERNEL_DOMAIN
+unsigned long kernel_domain = KERNEL_DOMAIN;
+#endif
+#ifdef USER_DOMAIN
+unsigned long user_domain = USER_DOMAIN;
+#endif
+unsigned long addr_limit = OFF_TSK(addr_limit);
+unsigned long tss_memmap = OFF_TSK(tss.memmap);
+unsigned long mm = OFF_TSK(mm);
+unsigned long pgd = OFF_MM(pgd);
+unsigned long tss_save = OFF_TSK(tss.save);
+unsigned long tss_fpesave = OFF_TSK(tss.fpstate.soft.save);
+#if defined(CONFIG_CPU_ARM2) || defined(CONFIG_CPU_ARM3)
+unsigned long tss_memcmap = OFF_TSK(tss.memcmap);
+#endif
diff --git a/arch/arm/lib/getconstants.c b/arch/arm/lib/getconstants.c
new file mode 100644
index 000000000..edb67a5d3
--- /dev/null
+++ b/arch/arm/lib/getconstants.c
@@ -0,0 +1,74 @@
+/*
+ * linux/arch/arm/lib/getconstants.c
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ */
+
+#include <linux/mm.h>
+#include <asm/pgtable.h>
+#include <stdio.h>
+#include <linux/unistd.h>
+
+void printdef(char *def, int no)
+{
+ printf("#define %s\t%d\n", def, no);
+}
+
+#include "getconstants.h"
+
+int main()
+{
+ printf("/*\n * contants.h generated by getconstants\n * DO NOT EDIT!\n */\n");
+
+ printf("#define _current\t_%s\n", "current_set");
+
+#ifdef _PAGE_PRESENT
+ printdef("PAGE_PRESENT", _PAGE_PRESENT);
+#endif
+#ifdef _PAGE_RW
+ printdef("PAGE_RW", _PAGE_RW);
+#endif
+#ifdef _PAGE_USER
+ printdef("PAGE_USER", _PAGE_USER);
+#endif
+#ifdef _PAGE_ACCESSED
+ printdef("PAGE_ACCESSED", _PAGE_ACCESSED);
+#endif
+#ifdef _PAGE_DIRTY
+ printdef("PAGE_DIRTY", _PAGE_DIRTY);
+#endif
+#ifdef _PAGE_READONLY
+ printdef("PAGE_READONLY", _PAGE_READONLY);
+#endif
+#ifdef _PAGE_NOT_USER
+ printdef("PAGE_NOT_USER", _PAGE_NOT_USER);
+#endif
+#ifdef _PAGE_OLD
+ printdef("PAGE_OLD", _PAGE_OLD);
+#endif
+#ifdef _PAGE_CLEAN
+ printdef("PAGE_CLEAN", _PAGE_CLEAN);
+#endif
+ printdef("TSS_MEMMAP", (int)tss_memmap);
+ printdef("TSS_SAVE", (int)tss_save);
+#ifdef __HAS_tss_memcmap
+ printdef("TSS_MEMCMAP", (int)tss_memcmap);
+#endif
+#ifdef __HAS_addr_limit
+ printdef("ADDR_LIMIT", (int)addr_limit);
+#endif
+#ifdef __HAS_kernel_domain
+ printdef("KERNEL_DOMAIN", kernel_domain);
+#endif
+#ifdef __HAS_user_domain
+ printdef("USER_DOMAIN", user_domain);
+#endif
+ printdef("TSS_FPESAVE", (int)tss_fpesave);
+ printdef("MM", (int)mm);
+ printdef("PGD", (int)pgd);
+
+ printf("#define KSWI_BASE 0x900000\n");
+ printf("#define KSWI_SYS_BASE 0x9F0000\n");
+ printf("#define SYS_ERROR0 0x9F0000\n");
+ return 0;
+}
diff --git a/arch/arm/lib/getconstants.h b/arch/arm/lib/getconstants.h
new file mode 100644
index 000000000..ef9637781
--- /dev/null
+++ b/arch/arm/lib/getconstants.h
@@ -0,0 +1,17 @@
+/*
+ * *** THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT! ***
+ */
+unsigned long addr_limit = 56;
+#define __HAS_addr_limit
+unsigned long tss_memmap = 640;
+#define __HAS_tss_memmap
+unsigned long mm = 1676;
+#define __HAS_mm
+unsigned long pgd = 8;
+#define __HAS_pgd
+unsigned long tss_save = 636;
+#define __HAS_tss_save
+unsigned long tss_fpesave = 492;
+#define __HAS_tss_fpesave
+unsigned long tss_memcmap = 644;
+#define __HAS_tss_memcmap
diff --git a/arch/arm/lib/io-acorn.S b/arch/arm/lib/io-acorn.S
new file mode 100644
index 000000000..172783b02
--- /dev/null
+++ b/arch/arm/lib/io-acorn.S
@@ -0,0 +1,215 @@
+/*
+ * linux/arch/arm/lib/io.S
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ */
+#include <linux/config.h> /* for CONFIG_CPU_ARM2 and CONFIG_CPU_ARM3 */
+#include <linux/autoconf.h>
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/hardware.h>
+
+ .text
+ .align
+
+#define OUT(reg) \
+ mov r8, reg, lsl $16 ;\
+ orr r8, r8, r8, lsr $16 ;\
+ str r8, [r3, r0, lsl $2] ;\
+ mov r8, reg, lsr $16 ;\
+ orr r8, r8, r8, lsl $16 ;\
+ str r8, [r3, r0, lsl $2]
+
+#define IN(reg) \
+ ldr reg, [r0] ;\
+ and reg, reg, ip ;\
+ ldr lr, [r0] ;\
+ orr reg, reg, lr, lsl $16
+
+ .equ pcio_base_high, PCIO_BASE & 0xff000000
+ .equ pcio_base_low, PCIO_BASE & 0x00ff0000
+ .equ io_base_high, IO_BASE & 0xff000000
+ .equ io_base_low, IO_BASE & 0x00ff0000
+
+ .equ addr_io_diff_hi, pcio_base_high - io_base_high
+ .equ addr_io_diff_lo, pcio_base_low - io_base_low
+
+ .macro addr reg, off
+ tst \off, #0x80000000
+ .if addr_io_diff_hi
+ movne \reg, #IO_BASE
+ moveq \reg, #pcio_base_high
+ .if pcio_base_low
+ addeq \reg, \reg, #pcio_base_low
+ .endif
+ .else
+ mov \reg, #IO_BASE
+ addeq \reg, \reg, #addr_io_diff_lo
+ .endif
+ .endm
+
+@ Purpose: read a block of data from a hardware register to memory.
+@ Proto : insw(int from_port, void *to, int len_in_words);
+@ Proto : inswb(int from_port, void *to, int len_in_bytes);
+@ Notes : increment to
+
+ENTRY(insw)
+ mov r2, r2, lsl#1
+ENTRY(inswb)
+ mov ip, sp
+ stmfd sp!, {r4 - r10 ,fp ,ip ,lr ,pc}
+ sub fp, ip, #4
+ addr r3, r0
+ add r0, r3, r0, lsl #2
+ tst r1, #3
+ beq Linswok
+ tst r1, #1
+ bne Linsw_notaligned
+ cmp r2, #1
+ ldrge r4, [r0]
+ strgeb r4, [r1], #1
+ movgt r4, r4, LSR#8
+ strgtb r4, [r1], #1
+ ldmleea fp, {r4 - r10, fp, sp, pc}^
+ sub r2, r2, #2
+Linswok: mov ip, #0xFF
+ orr ip, ip, ip, lsl #8
+Linswlp: subs r2, r2, #64
+ bmi Linsw_toosmall
+ IN(r3)
+ IN(r4)
+ IN(r5)
+ IN(r6)
+ IN(r7)
+ IN(r8)
+ IN(r9)
+ IN(r10)
+ stmia r1!, {r3 - r10}
+ IN(r3)
+ IN(r4)
+ IN(r5)
+ IN(r6)
+ IN(r7)
+ IN(r8)
+ IN(r9)
+ IN(r10)
+ stmia r1!, {r3 - r10}
+ bne Linswlp
+ LOADREGS(ea, fp, {r4 - r10, fp, sp, pc})
+Linsw_toosmall:
+ adds r2, r2, #32
+ bmi Linsw_toosmall2
+Linsw2lp: IN(r3)
+ IN(r4)
+ IN(r5)
+ IN(r6)
+ IN(r7)
+ IN(r8)
+ IN(r9)
+ IN(r10)
+ stmia r1!, {r3 - r10}
+ LOADREGS(eqea, fp, {r4 - r10, fp, sp, pc})
+ b Linsw_notaligned
+Linsw_toosmall2:
+ add r2, r2, #32
+Linsw_notaligned:
+ cmp r2, #1
+ LOADREGS(ltea, fp, {r4 - r10, fp, sp, pc})
+ ldr r4, [r0]
+ strb r4, [r1], #1
+ movgt r4, r4, LSR#8
+ strgtb r4, [r1], #1
+ subs r2, r2, #2
+ bgt Linsw_notaligned
+ LOADREGS(ea, fp, {r4 - r10, fp, sp, pc})
+
+@ Purpose: write a block of data from memory to a hardware register.
+@ Proto : outsw(int to_reg, void *from, int len_in_words);
+@ Proto : outswb(int to_reg, void *from, int len_in_bytes);
+@ Notes : increments from
+
+ENTRY(outsw)
+ mov r2, r2, LSL#1
+ENTRY(outswb)
+ mov ip, sp
+ stmfd sp!, {r4 - r8, fp, ip, lr, pc}
+ sub fp, ip, #4
+ addr r3, r0
+ tst r1, #2
+ beq 1f
+ ldr r4, [r1], #2
+ mov r4, r4, lsl #16
+ orr r4, r4, r4, lsr #16
+ str r4, [r3, r0, lsl #2]
+ subs r2, r2, #2
+ LOADREGS(eqea, fp, {r4 - r8, fp, sp, pc})
+1: subs r2, r2, #32
+ blt 2f
+ ldmia r1!, {r4, r5, r6, r7}
+ OUT(r4)
+ OUT(r5)
+ OUT(r6)
+ OUT(r7)
+ ldmia r1!, {r4, r5, r6, r7}
+ OUT(r4)
+ OUT(r5)
+ OUT(r6)
+ OUT(r7)
+ bne 1b
+ LOADREGS(ea, fp, {r4 - r8, fp, sp, pc})
+2: adds r2, r2, #32
+ LOADREGS(eqea, fp, {r4 - r8, fp, sp, pc})
+3: ldr r4, [r1],#2
+ mov r4, r4, lsl#16
+ orr r4, r4, r4, lsr#16
+ str r4, [r3, r0, lsl#2]
+ subs r2, r2, #2
+ bgt 3b
+ LOADREGS(ea, fp, {r4 - r8, fp, sp, pc})
+
+@ Purpose: write a memc register
+@ Proto : void memc_write(int register, int value);
+@ Returns: nothing
+
+#if defined(CONFIG_CPU_ARM2) || defined(CONFIG_CPU_ARM3)
+ENTRY(memc_write)
+ cmp r0, #7
+ RETINSTR(movgt,pc,lr)
+ mov r0, r0, lsl #17
+ mov r1, r1, lsl #15
+ mov r1, r1, lsr #17
+ orr r0, r0, r1, lsl #2
+ add r0, r0, #0x03600000
+ strb r0, [r0]
+ RETINSTR(mov,pc,lr)
+#define CPSR2SPSR(rt)
+#else
+#define CPSR2SPSR(rt) \
+ mrs rt, cpsr; \
+ msr spsr, rt
+#endif
+
+@ Purpose: call an expansion card loader to read bytes.
+@ Proto : char read_loader(int offset, char *card_base, char *loader);
+@ Returns: byte read
+
+ENTRY(ecard_loader_read)
+ stmfd sp!, {r4 - r12, lr}
+ mov r11, r1
+ mov r1, r0
+ CPSR2SPSR(r0)
+ mov lr, pc
+ mov pc, r2
+ LOADREGS(fd, sp!, {r4 - r12, pc})
+
+@ Purpose: call an expansion card loader to reset the card
+@ Proto : void read_loader(int card_base, char *loader);
+@ Returns: byte read
+
+ENTRY(ecard_loader_reset)
+ stmfd sp!, {r4 - r12, lr}
+ mov r11, r0
+ CPSR2SPSR(r0)
+ mov lr, pc
+ add pc, r1, #8
+ LOADREGS(fd, sp!, {r4 - r12, pc})
diff --git a/arch/arm/lib/io-ebsa110.S b/arch/arm/lib/io-ebsa110.S
new file mode 100644
index 000000000..e0b8229a4
--- /dev/null
+++ b/arch/arm/lib/io-ebsa110.S
@@ -0,0 +1,149 @@
+/*
+ * linux/arch/arm/lib/io-ebsa.S
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+ .text
+ .align
+
+#define OUT(reg) \
+ mov r8, reg, lsl $16 ;\
+ orr r8, r8, r8, lsr $16 ;\
+ str r8, [r3, r0, lsl $2] ;\
+ mov r8, reg, lsr $16 ;\
+ orr r8, r8, r8, lsl $16 ;\
+ str r8, [r3, r0, lsl $2]
+
+#define IN(reg) \
+ ldr reg, [r0] ;\
+ and reg, reg, ip ;\
+ ldr lr, [r0] ;\
+ orr reg, reg, lr, lsl $16
+
+@ Purpose: read a block of data from a hardware register to memory.
+@ Proto : insw(int from_port, void *to, int len_in_words);
+@ Proto : inswb(int from_port, void *to, int len_in_bytes);
+@ Notes : increment to
+
+ENTRY(insw)
+ mov r2, r2, lsl#1
+ENTRY(inswb)
+ mov ip, sp
+ stmfd sp!, {r4 - r10 ,fp ,ip ,lr ,pc}
+ sub fp, ip, #4
+ cmp r0, #0x00c00000
+ movge r3, #0
+ movlt r3, #0xf0000000
+ add r0, r3, r0, lsl #2
+ tst r1, #3
+ beq Linswok
+ tst r1, #1
+ bne Linsw_notaligned
+ cmp r2, #1
+ ldrge r4, [r0]
+ strgeb r4, [r1], #1
+ movgt r4, r4, LSR#8
+ strgtb r4, [r1], #1
+ ldmleea fp, {r4 - r10, fp, sp, pc}^
+ sub r2, r2, #2
+Linswok: mov ip, #0xFF
+ orr ip, ip, ip, lsl #8
+Linswlp: subs r2, r2, #64
+ bmi Linsw_toosmall
+ IN(r3)
+ IN(r4)
+ IN(r5)
+ IN(r6)
+ IN(r7)
+ IN(r8)
+ IN(r9)
+ IN(r10)
+ stmia r1!, {r3 - r10}
+ IN(r3)
+ IN(r4)
+ IN(r5)
+ IN(r6)
+ IN(r7)
+ IN(r8)
+ IN(r9)
+ IN(r10)
+ stmia r1!, {r3 - r10}
+ bne Linswlp
+ LOADREGS(ea, fp, {r4 - r10, fp, sp, pc})
+Linsw_toosmall:
+ add r2, r2, #32
+ bmi Linsw_toosmall2
+Linsw2lp: IN(r3)
+ IN(r4)
+ IN(r5)
+ IN(r6)
+ IN(r7)
+ IN(r8)
+ IN(r9)
+ IN(r10)
+ stmia r1!, {r3 - r10}
+ LOADREGS(eqea, fp, {r4 - r10, fp, sp, pc})
+ b Linsw_notaligned
+Linsw_toosmall2:
+ add r2, r2, #32
+Linsw_notaligned:
+ cmp r2, #1
+ LOADREGS(ltea, fp, {r4 - r10, fp, sp, pc})
+ ldr r4, [r0]
+ strb r4, [r1], #1
+ movgt r4, r4, LSR#8
+ strgtb r4, [r1], #1
+ subs r2, r2, #2
+ bgt Linsw_notaligned
+ LOADREGS(ea, fp, {r4 - r10, fp, sp, pc})
+
+@ Purpose: write a block of data from memory to a hardware register.
+@ Proto : outsw(int to_reg, void *from, int len_in_words);
+@ Proto : outswb(int to_reg, void *from, int len_in_bytes);
+@ Notes : increments from
+
+ENTRY(outsw)
+ mov r2, r2, LSL#1
+ENTRY(outswb)
+ mov ip, sp
+ stmfd sp!, {r4 - r8, fp, ip, lr, pc}
+ sub fp, ip, #4
+ cmp r0, #0x00c00000
+ movge r3, #0
+ movlt r3, #0xf0000000
+ tst r1, #2
+ beq Loutsw32lp
+ ldr r4, [r1], #2
+ mov r4, r4, lsl #16
+ orr r4, r4, r4, lsr #16
+ str r4, [r3, r0, lsl #2]
+ sub r2, r2, #2
+ teq r2, #0
+ LOADREGS(eqea, fp, {r4 - r8, fp, sp, pc})
+Loutsw32lp: subs r2,r2,#32
+ blt Loutsw_toosmall
+ ldmia r1!,{r4,r5,r6,r7}
+ OUT(r4)
+ OUT(r5)
+ OUT(r6)
+ OUT(r7)
+ ldmia r1!,{r4,r5,r6,r7}
+ OUT(r4)
+ OUT(r5)
+ OUT(r6)
+ OUT(r7)
+ LOADREGS(eqea, fp, {r4 - r8, fp, sp, pc})
+ b Loutsw32lp
+Loutsw_toosmall:
+ adds r2,r2,#32
+ LOADREGS(eqea, fp, {r4 - r8, fp, sp, pc})
+Llpx: ldr r4,[r1],#2
+ mov r4,r4,LSL#16
+ orr r4,r4,r4,LSR#16
+ str r4,[r3,r0,LSL#2]
+ subs r2,r2,#2
+ bgt Llpx
+ LOADREGS(ea, fp, {r4 - r8, fp, sp, pc})
+
diff --git a/arch/arm/lib/ll_char_wr.S b/arch/arm/lib/ll_char_wr.S
new file mode 100644
index 000000000..7df08d93b
--- /dev/null
+++ b/arch/arm/lib/ll_char_wr.S
@@ -0,0 +1,157 @@
+/*
+ * linux/arch/arm/lib/ll_char_wr.S
+ *
+ * Copyright (C) 1995, 1996 Russell King.
+ *
+ * Speedups & 1bpp code (C) 1996 Philip Blundel & Russell King.
+ *
+ * 10-04-96 RMK Various cleanups & reduced register usage.
+ */
+
+@ Regs: [] = corruptable
+@ {} = used
+@ () = dont use
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+ .text
+
+#define BOLD 0x01
+#define ITALIC 0x02
+#define UNDERLINE 0x04
+#define FLASH 0x08
+#define INVERSE 0x10
+
+LC0: .word SYMBOL_NAME(bytes_per_char_h)
+ .word SYMBOL_NAME(video_size_row)
+ .word SYMBOL_NAME(cmap_80)
+ .word SYMBOL_NAME(con_charconvtable)
+
+ENTRY(ll_write_char)
+ stmfd sp!, {r4 - r7, lr}
+@
+@ Smashable regs: {r0 - r3}, [r4 - r7], (r8 - fp), [ip], (sp), [lr], (pc)
+@
+ eor ip, r1, #UNDERLINE << 24
+/*
+ * calculate colours
+ */
+ tst r1, #INVERSE << 24
+ moveq r2, r1, lsr #8
+ moveq r3, r1, lsr #16
+ movne r2, r1, lsr #16
+ movne r3, r1, lsr #8
+ and r3, r3, #255
+ and r2, r2, #255
+/*
+ * calculate offset into character table
+ */
+ and r1, r1, #255
+ mov r1, r1, lsl #3
+/*
+ * calculate offset required for each row [maybe I should make this an argument to this fn.
+ * Have to see what the register usage is like in the calling routines.
+ */
+ adr r4, LC0
+ ldmia r4, {r4, r5, r6, lr}
+ ldr r4, [r4]
+ ldr r5, [r5]
+/*
+ * Go to resolution-dependent routine...
+ */
+ cmp r4, #4
+ blt Lrow1bpp
+ eor r2, r3, r2 @ Create eor mask to change colour from bg
+ orr r3, r3, r3, lsl #8 @ to fg.
+ orr r3, r3, r3, lsl #16
+ add r0, r0, r5, lsl #3 @ Move to bottom of character
+ add r1, r1, #7
+ ldrb r7, [r6, r1]
+ tst ip, #UNDERLINE << 24
+ eoreq r7, r7, #255
+ teq r4, #8
+ beq Lrow8bpplp
+@
+@ Smashable regs: {r0 - r3}, [r4], {r5 - r7}, (r8 - fp), [ip], (sp), {lr}, (pc)
+@
+ orr r3, r3, r3, lsl #4
+Lrow4bpplp: ldr r7, [lr, r7, lsl #2]
+ mul r7, r2, r7
+ tst r1, #7 @ avoid using r7 directly after
+ eor ip, r3, r7
+ str ip, [r0, -r5]!
+ LOADREGS(eqfd, sp!, {r4 - r7, pc})
+ sub r1, r1, #1
+ ldrb r7, [r6, r1]
+ ldr r7, [lr, r7, lsl #2]
+ mul r7, r2, r7
+ tst r1, #7 @ avoid using r7 directly after
+ eor ip, r3, r7
+ str ip, [r0, -r5]!
+ subne r1, r1, #1
+ ldrneb r7, [r6, r1]
+ bne Lrow4bpplp
+ LOADREGS(fd, sp!, {r4 - r7, pc})
+
+@
+@ Smashable regs: {r0 - r3}, [r4], {r5 - r7}, (r8 - fp), [ip], (sp), {lr}, (pc)
+@
+Lrow8bpplp: mov ip, r7, lsr #4
+ ldr ip, [lr, ip, lsl #2]
+ mul r4, r2, ip
+ and ip, r7, #15
+ eor r4, r3, r4
+ ldr ip, [lr, ip, lsl #2]
+ mul ip, r2, ip
+ tst r1, #7
+ eor ip, r3, ip
+ sub r0, r0, r5
+ stmia r0, {r4, ip}
+ LOADREGS(eqfd, sp!, {r4 - r7, pc})
+ sub r1, r1, #1
+ ldrb r7, [r6, r1]
+ mov ip, r7, lsr #4
+ ldr ip, [lr, ip, lsl #2]
+ mul r4, r2, ip
+ and ip, r7, #15
+ eor r4, r3, r4
+ ldr ip, [lr, ip, lsl #2]
+ mul ip, r2, ip
+ tst r1, #7
+ eor ip, r3, ip
+ sub r0, r0, r5
+ stmia r0, {r4, ip}
+ subne r1, r1, #1
+ ldrneb r7, [r6, r1]
+ bne Lrow8bpplp
+ LOADREGS(fd, sp!, {r4 - r7, pc})
+
+@
+@ Smashable regs: {r0 - r3}, [r4], {r5, r6}, [r7], (r8 - fp), [ip], (sp), [lr], (pc)
+@
+Lrow1bpp: add r6, r6, r1
+ ldmia r6, {r4, r7}
+ tst ip, #INVERSE << 24
+ mvnne r4, r4
+ mvnne r7, r7
+ strb r4, [r0], r5
+ mov r4, r4, lsr #8
+ strb r4, [r0], r5
+ mov r4, r4, lsr #8
+ strb r4, [r0], r5
+ mov r4, r4, lsr #8
+ strb r4, [r0], r5
+ strb r7, [r0], r5
+ mov r7, r7, lsr #8
+ strb r7, [r0], r5
+ mov r7, r7, lsr #8
+ strb r7, [r0], r5
+ mov r7, r7, lsr #8
+ tst ip, #UNDERLINE << 24
+ mvneq r7, r7
+ strb r7, [r0], r5
+ LOADREGS(fd, sp!, {r4 - r7, pc})
+
+ .bss
+ENTRY(con_charconvtable)
+ .space 1024
diff --git a/arch/arm/lib/loaders.S b/arch/arm/lib/loaders.S
new file mode 100644
index 000000000..760e2e311
--- /dev/null
+++ b/arch/arm/lib/loaders.S
@@ -0,0 +1,53 @@
+/*
+ * linux/arch/arm/lib/loaders.S
+ *
+ * This file contains the ROM loaders for buggy cards
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+/*
+ * Oak SCSI
+ */
+
+ENTRY(oak_scsi_loader)
+ b Loak_scsi_read
+ .word 0
+Loak_scsi_reset: bic r10, r11, #0x00ff0000
+ ldr r2, [r10]
+ RETINSTR(mov,pc,lr)
+
+Loak_scsi_read: mov r2, r1, lsr #3
+ and r2, r2, #15 << 9
+ bic r10, r11, #0x00ff0000
+ ldr r2, [r10, r2]
+ mov r2, r1, lsl #20
+ ldrb r0, [r11, r2, lsr #18]
+ ldr r2, [r10]
+ RETINSTR(mov,pc,lr)
+
+ENTRY(atomwide_serial_loader)
+ b Latomwide_serial_read
+ .word 0
+Latomwide_serial_reset: mov r2, #0x3c00
+ strb r2, [r11, r2]
+ RETINSTR(mov,pc,lr)
+
+Latomwide_serial_read: cmp r1, #0x8000
+ RETINSTR(movhi,pc,lr)
+ add r0, r1, #0x800
+ mov r0, r0, lsr #11
+ mov r3, #0x3c00
+ strb r0, [r11, r3]
+ mov r2, r1, lsl #21
+ ldrb r0, [r11, r2, lsr #19]
+ strb r2, [r11, r3]
+ RETINSTR(mov,pc,lr)
+
+/*
+ * Cards we don't know about yet
+ */
+ENTRY(noloader)
+ mov r0, r0
+ mov r0, #0
+ RETINSTR(mov,pc,lr)
diff --git a/arch/arm/lib/memcpy.S b/arch/arm/lib/memcpy.S
new file mode 100644
index 000000000..209768f9f
--- /dev/null
+++ b/arch/arm/lib/memcpy.S
@@ -0,0 +1,312 @@
+/*
+ * linux/arch/arm/lib/segment.S
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ * Except memcpy/memmove routine.
+ */
+
+#include <asm/assembler.h>
+#include <linux/linkage.h>
+
+ .text
+#define ENTER \
+ mov ip,sp ;\
+ stmfd sp!,{r4-r9,fp,ip,lr,pc} ;\
+ sub fp,ip,#4
+
+#define EXIT \
+ LOADREGS(ea, fp, {r4 - r9, fp, sp, pc})
+
+#define EXITEQ \
+ LOADREGS(eqea, fp, {r4 - r9, fp, sp, pc})
+
+# Prototype: void memcpy(void *to,const void *from,unsigned long n);
+# ARM3: cant use memcopy here!!!
+
+ENTRY(memcpy)
+ENTRY(memmove)
+ ENTER
+ cmp r1, r0
+ bcc 19f
+ subs r2, r2, #4
+ blt 6f
+ ands ip, r0, #3
+ bne 7f
+ ands ip, r1, #3
+ bne 8f
+
+1: subs r2, r2, #8
+ blt 5f
+ subs r2, r2, #0x14
+ blt 3f
+2: ldmia r1!,{r3 - r9, ip}
+ stmia r0!,{r3 - r9, ip}
+ subs r2, r2, #32
+ bge 2b
+ cmn r2, #16
+ ldmgeia r1!, {r3 - r6}
+ stmgeia r0!, {r3 - r6}
+ subge r2, r2, #0x10
+3: adds r2, r2, #0x14
+4: ldmgeia r1!, {r3 - r5}
+ stmgeia r0!, {r3 - r5}
+ subges r2, r2, #12
+ bge 4b
+5: adds r2, r2, #8
+ blt 6f
+ subs r2, r2, #4
+ ldrlt r3, [r1], #4
+ strlt r3, [r0], #4
+ ldmgeia r1!, {r3, r4}
+ stmgeia r0!, {r3, r4}
+ subge r2, r2, #4
+
+6: adds r2, r2, #4
+ EXITEQ
+ cmp r2, #2
+ ldrb r3, [r1], #1
+ strb r3, [r0], #1
+ ldrgeb r3, [r1], #1
+ strgeb r3, [r0], #1
+ ldrgtb r3, [r1], #1
+ strgtb r3, [r0], #1
+ EXIT
+
+7: rsb ip, ip, #4
+ cmp ip, #2
+ ldrb r3, [r1], #1
+ strb r3, [r0], #1
+ ldrgeb r3, [r1], #1
+ strgeb r3, [r0], #1
+ ldrgtb r3, [r1], #1
+ strgtb r3, [r0], #1
+ subs r2, r2, ip
+ blt 6b
+ ands ip, r1, #3
+ beq 1b
+8: bic r1, r1, #3
+ ldr r7, [r1], #4
+ cmp ip, #2
+ bgt 15f
+ beq 11f
+ cmp r2, #12
+ blt 10f
+ sub r2, r2, #12
+9: mov r3, r7, lsr #8
+ ldmia r1!, {r4 - r7}
+ orr r3, r3, r4, lsl #24
+ mov r4, r4, lsr #8
+ orr r4, r4, r5, lsl #24
+ mov r5, r5, lsr #8
+ orr r5, r5, r6, lsl #24
+ mov r6, r6, lsr #8
+ orr r6, r6, r7, lsl #24
+ stmia r0!, {r3 - r6}
+ subs r2, r2, #16
+ bge 9b
+ adds r2, r2, #12
+ blt 1b
+10: mov r3, r7, lsr #8
+ ldr r7, [r1], #4
+ orr r3, r3, r7, lsl #24
+ str r3, [r0], #4
+ subs r2, r2, #4
+ bge 10b
+ sub r1, r1, #3
+ b 6b
+
+11: cmp r2, #12
+ blt 13f /* */
+ sub r2, r2, #12
+12: mov r3, r7, lsr #16
+ ldmia r1!, {r4 - r7}
+ orr r3, r3, r4, lsl #16
+ mov r4, r4, lsr #16
+ orr r4, r4, r5, lsl #16
+ mov r5, r5, lsr #16
+ orr r5, r5, r6, lsl #16
+ mov r6, r6, lsr #16
+ orr r6, r6, r7,LSL#16
+ stmia r0!, {r3 - r6}
+ subs r2, r2, #16
+ bge 12b
+ adds r2, r2, #12
+ blt 14f
+13: mov r3, r7, lsr #16
+ ldr r7, [r1], #4
+ orr r3, r3, r7, lsl #16
+ str r3, [r0], #4
+ subs r2, r2, #4
+ bge 13b
+14: sub r1, r1, #2
+ b 6b
+
+15: cmp r2, #12
+ blt 17f
+ sub r2, r2, #12
+16: mov r3, r7, lsr #24
+ ldmia r1!,{r4 - r7}
+ orr r3, r3, r4, lsl #8
+ mov r4, r4, lsr #24
+ orr r4, r4, r5, lsl #8
+ mov r5, r5, lsr #24
+ orr r5, r5, r6, lsl #8
+ mov r6, r6, lsr #24
+ orr r6, r6, r7, lsl #8
+ stmia r0!, {r3 - r6}
+ subs r2, r2, #16
+ bge 16b
+ adds r2, r2, #12
+ blt 18f
+17: mov r3, r7, lsr #24
+ ldr r7, [r1], #4
+ orr r3, r3, r7, lsl#8
+ str r3, [r0], #4
+ subs r2, r2, #4
+ bge 17b
+18: sub r1, r1, #1
+ b 6b
+
+
+19: add r1, r1, r2
+ add r0, r0, r2
+ subs r2, r2, #4
+ blt 24f
+ ands ip, r0, #3
+ bne 25f
+ ands ip, r1, #3
+ bne 26f
+
+20: subs r2, r2, #8
+ blt 23f
+ subs r2, r2, #0x14
+ blt 22f
+21: ldmdb r1!, {r3 - r9, ip}
+ stmdb r0!, {r3 - r9, ip}
+ subs r2, r2, #32
+ bge 21b
+22: cmn r2, #16
+ ldmgedb r1!, {r3 - r6}
+ stmgedb r0!, {r3 - r6}
+ subge r2, r2, #16
+ adds r2, r2, #20
+ ldmgedb r1!, {r3 - r5}
+ stmgedb r0!, {r3 - r5}
+ subge r2, r2, #12
+23: adds r2, r2, #8
+ blt 24f
+ subs r2, r2, #4
+ ldrlt r3, [r1, #-4]!
+ strlt r3, [r0, #-4]!
+ ldmgedb r1!, {r3, r4}
+ stmgedb r0!, {r3, r4}
+ subge r2, r2, #4
+
+24: adds r2, r2, #4
+ EXITEQ
+ cmp r2, #2
+ ldrb r3, [r1, #-1]!
+ strb r3, [r0, #-1]!
+ ldrgeb r3, [r1, #-1]!
+ strgeb r3, [r0, #-1]!
+ ldrgtb r3, [r1, #-1]!
+ strgtb r3, [r0, #-1]!
+ EXIT
+
+25: cmp ip, #2
+ ldrb r3, [r1, #-1]!
+ strb r3, [r0, #-1]!
+ ldrgeb r3, [r1, #-1]!
+ strgeb r3, [r0, #-1]!
+ ldrgtb r3, [r1, #-1]!
+ strgtb r3, [r0, #-1]!
+ subs r2, r2, ip
+ blt 24b
+ ands ip, r1, #3
+ beq 20b
+
+26: bic r1, r1, #3
+ ldr r3, [r1], #0
+ cmp ip, #2
+ blt 34f
+ beq 30f
+ cmp r2, #12
+ blt 28f
+ sub r2, r2, #12
+27: mov r7, r3, lsl #8
+ ldmdb r1!, {r3, r4, r5, r6}
+ orr r7, r7, r6, lsr #24
+ mov r6, r6, lsl #8
+ orr r6, r6, r5, lsr #24
+ mov r5, r5, lsl #8
+ orr r5, r5, r4, lsr #24
+ mov r4, r4, lsl #8
+ orr r4, r4, r3, lsr #24
+ stmdb r0!, {r4, r5, r6, r7}
+ subs r2, r2, #16
+ bge 27b
+ adds r2, r2, #12
+ blt 29f
+28: mov ip, r3, lsl #8
+ ldr r3, [r1, #-4]!
+ orr ip, ip, r3, lsr #24
+ str ip, [r0, #-4]!
+ subs r2, r2, #4
+ bge 28b
+29: add r1, r1, #3
+ b 24b
+
+30: cmp r2, #12
+ blt 32f
+ sub r2, r2, #12
+31: mov r7, r3, lsl #16
+ ldmdb r1!, {r3, r4, r5, r6}
+ orr r7, r7, r6, lsr #16
+ mov r6, r6, lsl #16
+ orr r6, r6, r5, lsr #16
+ mov r5, r5, lsl #16
+ orr r5, r5, r4, lsr #16
+ mov r4, r4, lsl #16
+ orr r4, r4, r3, lsr #16
+ stmdb r0!, {r4, r5, r6, r7}
+ subs r2, r2, #16
+ bge 31b
+ adds r2, r2, #12
+ blt 33f
+32: mov ip, r3, lsl #16
+ ldr r3, [r1, #-4]!
+ orr ip, ip, r3, lsr #16
+ str ip, [r0, #-4]!
+ subs r2, r2, #4
+ bge 32b
+33: add r1, r1, #2
+ b 24b
+
+34: cmp r2, #12
+ blt 36f
+ sub r2, r2, #12
+35: mov r7, r3, lsl #24
+ ldmdb r1!, {r3, r4, r5, r6}
+ orr r7, r7, r6, lsr #8
+ mov r6, r6, lsl #24
+ orr r6, r6, r5, lsr #8
+ mov r5, r5, lsl #24
+ orr r5, r5, r4, lsr #8
+ mov r4, r4, lsl #24
+ orr r4, r4, r3, lsr #8
+ stmdb r0!, {r4, r5, r6, r7}
+ subs r2, r2, #16
+ bge 35b
+ adds r2, r2, #12
+ blt 37f
+36: mov ip, r3, lsl #24
+ ldr r3, [r1, #-4]!
+ orr ip, ip, r3, lsr #8
+ str ip, [r0, #-4]!
+ subs r2, r2, #4
+ bge 36b
+37: add r1, r1, #1
+ b 24b
+
+ .align
+
diff --git a/arch/arm/lib/memfastset.S b/arch/arm/lib/memfastset.S
new file mode 100644
index 000000000..a7e8a5d29
--- /dev/null
+++ b/arch/arm/lib/memfastset.S
@@ -0,0 +1,35 @@
+/*
+ * linux/arch/arm/lib/memfastset.S
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+ .text
+@ Prototype: void memsetl (unsigned long *d, unsigned long c, size_t n);
+
+ENTRY(memsetl)
+ stmfd sp!, {lr}
+ cmp r2, #16
+ blt 5f
+ mov r3, r1
+ mov ip, r1
+ mov lr, r1
+ subs r2, r2, #32
+ bmi 2f
+1: stmia r0!, {r1, r3, ip, lr}
+ stmia r0!, {r1, r3, ip, lr}
+ LOADREGS(eqfd, sp!, {pc})
+ subs r2, r2, #32
+ bpl 1b
+2: adds r2, r2, #16
+ bmi 4f
+3: stmia r0!, {r1, r3, ip, lr}
+ LOADREGS(eqfd, sp!, {pc})
+ subs r2, r2, #16
+ bpl 3b
+4: add r2, r2, #16
+5: subs r2, r2, #4
+ strge r1, [r0], #4
+ bgt 5b
+ LOADREGS(fd, sp!, {pc})
diff --git a/arch/arm/lib/string.S b/arch/arm/lib/string.S
new file mode 100644
index 000000000..b54c902a4
--- /dev/null
+++ b/arch/arm/lib/string.S
@@ -0,0 +1,139 @@
+/*
+ * linux/arch/arm/lib/string.S
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+ .text
+# Prototype: char *strrchr(const char *s,char c);
+
+@ r0 = pointer, r1 = length
+ .global memzero
+memzero: stmfd sp!, {lr}
+ mov r2, #0
+ mov r3, #0
+ mov ip, #0
+ mov lr, #0
+1: subs r1, r1, #4*8
+ stmgeia r0!, {r2, r3, ip, lr}
+ stmgeia r0!, {r2, r3, ip, lr}
+ bgt 1b
+ LOADREGS(fd, sp!, {pc})
+
+ .global __page_memcpy
+__page_memcpy: stmfd sp!, {r4, r5, lr}
+1: subs r2, r2, #4*8
+ ldmgeia r1!, {r3, r4, r5, ip}
+ stmgeia r0!, {r3, r4, r5, ip}
+ ldmgeia r1!, {r3, r4, r5, ip}
+ stmgeia r0!, {r3, r4, r5, ip}
+ bgt 1b
+ LOADREGS(fd, sp!, {r4, r5, pc})
+
+ .global memset
+memset: mov r3, r0
+ cmp r2, #16
+ blt 6f
+ ands ip, r3, #3
+ beq 1f
+ cmp ip, #2
+ strltb r1, [r3], #1 @ Align destination
+ strleb r1, [r3], #1
+ strb r1, [r3], #1
+ rsb ip, ip, #4
+ sub r2, r2, ip
+1: orr r1, r1, r1, lsl #8
+ orr r1, r1, r1, lsl #16
+ cmp r2, #256
+ blt 4f
+ stmfd sp!, {r4, r5, lr}
+ mov r4, r1
+ mov r5, r1
+ mov lr, r1
+ mov ip, r2, lsr #6
+ sub r2, r2, ip, lsl #6
+2: stmia r3!, {r1, r4, r5, lr} @ 64 bytes at a time.
+ stmia r3!, {r1, r4, r5, lr}
+ stmia r3!, {r1, r4, r5, lr}
+ stmia r3!, {r1, r4, r5, lr}
+ subs ip, ip, #1
+ bne 2b
+ teq r2, #0
+ LOADREGS(eqfd, sp!, {r4, r5, pc}) @ Now <64 bytes to go.
+ tst r2, #32
+ stmneia r3!, {r1, r4, r5, lr}
+ stmneia r3!, {r1, r4, r5, lr}
+ tst r2, #16
+ stmneia r3!, {r1, r4, r5, lr}
+ ldmia sp!, {r4, r5}
+3: tst r2, #8
+ stmneia r3!, {r1, lr}
+ tst r2, #4
+ strne r1, [r3], #4
+ tst r2, #2
+ strneb r1, [r3], #1
+ strneb r1, [r3], #1
+ tst r2, #1
+ strneb r1, [r3], #1
+ LOADREGS(fd, sp!, {pc})
+
+4: movs ip, r2, lsr #3
+ beq 3b
+ sub r2, r2, ip, lsl #3
+ stmfd sp!, {lr}
+ mov lr, r1
+ subs ip, ip, #4
+5: stmgeia r3!, {r1, lr}
+ stmgeia r3!, {r1, lr}
+ stmgeia r3!, {r1, lr}
+ stmgeia r3!, {r1, lr}
+ subges ip, ip, #4
+ bge 5b
+ tst ip, #2
+ stmneia r3!, {r1, lr}
+ stmneia r3!, {r1, lr}
+ tst ip, #1
+ stmneia r3!, {r1, lr}
+ teq r2, #0
+ LOADREGS(eqfd, sp!, {pc})
+ b 3b
+
+6: subs r2, r2, #1
+ strgeb r1, [r3], #1
+ bgt 6b
+ RETINSTR(mov, pc, lr)
+
+ENTRY(strrchr)
+ stmfd sp!, {lr}
+ mov r3, #0
+1: ldrb r2, [r0], #1
+ teq r2, r1
+ moveq r3, r0
+ teq r2, #0
+ bne 1b
+ mov r0, r3
+ LOADREGS(fd, sp!, {pc})
+
+ENTRY(strchr)
+ stmfd sp!,{lr}
+ mov r3, #0
+1: ldrb r2, [r0], #1
+ teq r2, r1
+ teqne r2, #0
+ bne 1b
+ teq r2, #0
+ moveq r0, #0
+ subne r0, r0, #1
+ LOADREGS(fd, sp!, {pc})
+
+ENTRY(memchr)
+ stmfd sp!, {lr}
+1: ldrb r3, [r0], #1
+ teq r3, r1
+ beq 2f
+ subs r2, r2, #1
+ bpl 1b
+2: movne r0, #0
+ subeq r0, r0, #1
+ LOADREGS(fd, sp!, {pc})
diff --git a/arch/arm/lib/system.S b/arch/arm/lib/system.S
new file mode 100644
index 000000000..54ea4d9a4
--- /dev/null
+++ b/arch/arm/lib/system.S
@@ -0,0 +1,20 @@
+/*
+ * linux/arch/arm/lib/system.S
+ *
+ * Copyright (C) 1995, 1996 Russell King
+ *
+ * 07/06/96: Now support tasks running in SVC mode.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ .text
+
+ENTRY(abort)
+ adr r0, .abort_msg
+ mov r1, lr
+ b SYMBOL_NAME(panic)
+
+.abort_msg: .ascii "Eek! Got to an abort() from %p! "
+ .ascii "(Please report to rmk@ecs.soton.ac.uk)\n\0"
+ .align
diff --git a/arch/arm/lib/testm.c b/arch/arm/lib/testm.c
new file mode 100644
index 000000000..88e815605
--- /dev/null
+++ b/arch/arm/lib/testm.c
@@ -0,0 +1,81 @@
+char buffer[1036];
+char buffer2[1036];
+
+int main ()
+{
+ char *p;
+ int i, o, o2, l;
+
+ printf ("Testing memset\n");
+ for (l = 1; l < 1020; l ++) {
+ for (o = 0; o < 4; o++) {
+ p = buffer + o + 4;
+ for (i = 0; i < l + 12; i++)
+ buffer[i] = 0x55;
+
+ memset (p, 0xaa, l);
+
+ for (i = 0; i < l; i++)
+ if (p[i] != 0xaa)
+ printf ("Error: %X+%d\n", p, i);
+ if (p[-1] != 0x55 || p[-2] != 0x55 || p[-3] != 0x55 || p[-4] != 0x55)
+ printf ("Error before %X\n", p);
+ if (p[l] != 0x55 || p[l+1] != 0x55 || p[l+2] != 0x55 || p[l+3] != 0x55)
+ printf ("Error at end: %p: %02X %02X %02X %02X\n", p+l, p[l], p[l+1], p[l+2], p[l+3]);
+ }
+ }
+
+ printf ("Testing memcpy s > d\n");
+ for (l = 1; l < 1020; l++) {
+ for (o = 0; o < 4; o++) {
+ for (o2 = 0; o2 < 4; o2++) {
+ char *d, *s;
+
+ for (i = 0; i < l + 12; i++)
+ buffer[i] = (i & 0x3f) + 0x40;
+ for (i = 0; i < 1036; i++)
+ buffer2[i] = 0;
+
+ s = buffer + o;
+ d = buffer2 + o2 + 4;
+
+ memcpy (d, s, l);
+
+ for (i = 0; i < l; i++)
+ if (s[i] != d[i])
+ printf ("Error at %X+%d -> %X+%d (%02X != %02X)\n", s, i, d, i, s[i], d[i]);
+ if (d[-1] || d[-2] || d[-3] || d[-4])
+ printf ("Error before %X\n", d);
+ if (d[l] || d[l+1] || d[l+2] || d[l+3])
+ printf ("Error after %X\n", d+l);
+ }
+ }
+ }
+
+ printf ("Testing memcpy s < d\n");
+ for (l = 1; l < 1020; l++) {
+ for (o = 0; o < 4; o++) {
+ for (o2 = 0; o2 < 4; o2++) {
+ char *d, *s;
+
+ for (i = 0; i < l + 12; i++)
+ buffer2[i] = (i & 0x3f) + 0x40;
+ for (i = 0; i < 1036; i++)
+ buffer[i] = 0;
+
+ s = buffer2 + o;
+ d = buffer + o2 + 4;
+
+ memcpy (d, s, l);
+
+ for (i = 0; i < l; i++)
+ if (s[i] != d[i])
+ printf ("Error at %X+%d -> %X+%d (%02X != %02X)\n", s, i, d, i, s[i], d[i]);
+ if (d[-1] || d[-2] || d[-3] || d[-4])
+ printf ("Error before %X\n", d);
+ if (d[l] || d[l+1] || d[l+2] || d[l+3])
+ printf ("Error after %X\n", d+l);
+ }
+ }
+ }
+}
diff --git a/arch/arm/lib/uaccess-armo.S b/arch/arm/lib/uaccess-armo.S
new file mode 100644
index 000000000..1a740493a
--- /dev/null
+++ b/arch/arm/lib/uaccess-armo.S
@@ -0,0 +1,230 @@
+/*
+ * arch/arm/lib/uaccess-armo.S
+ *
+ * Copyright (C) 1998 Russell King
+ *
+ * Note! Some code fragments found in here have a special calling
+ * convention - they are not APCS compliant!
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ .text
+
+#define USER(x...) \
+9999: x; \
+ .section __ex_table,"a"; \
+ .align 3; \
+ .long 9999b,9001f; \
+ .previous
+
+ .globl SYMBOL_NAME(uaccess_user)
+SYMBOL_NAME(uaccess_user):
+ .word uaccess_user_put_byte
+ .word uaccess_user_get_byte
+ .word uaccess_user_put_half
+ .word uaccess_user_get_half
+ .word uaccess_user_put_word
+ .word uaccess_user_get_word
+ .word __arch_copy_from_user
+ .word __arch_copy_to_user
+ .word __arch_clear_user
+ .word __arch_strncpy_from_user
+ .word __arch_strlen_user
+
+
+@ In : r0 = x, r1 = addr, r2 = error
+@ Out: r2 = error
+uaccess_user_put_byte:
+ stmfd sp!, {lr}
+USER( strbt r0, [r1])
+ ldmfd sp!, {pc}^
+
+@ In : r0 = x, r1 = addr, r2 = error
+@ Out: r2 = error
+uaccess_user_put_half:
+ stmfd sp!, {lr}
+USER( strbt r0, [r1], #1)
+ mov r0, r0, lsr #8
+USER( strbt r0, [r1])
+ ldmfd sp!, {pc}^
+
+@ In : r0 = x, r1 = addr, r2 = error
+@ Out: r2 = error
+uaccess_user_put_word:
+ stmfd sp!, {lr}
+USER( strt r0, [r1])
+ ldmfd sp!, {pc}^
+
+9001: mov r2, #-EFAULT
+ ldmfd sp!, {pc}^
+
+@ In : r0 = addr, r1 = error
+@ Out: r0 = x, r1 = error
+uaccess_user_get_byte:
+ stmfd sp!, {lr}
+USER( ldrbt r0, [r0])
+ ldmfd sp!, {pc}^
+
+@ In : r0 = addr, r1 = error
+@ Out: r0 = x, r1 = error
+uaccess_user_get_half:
+ stmfd sp!, {lr}
+USER( ldrt r0, [r0])
+ mov r0, r0, lsl #16
+ mov r0, r0, lsr #16
+ ldmfd sp!, {pc}^
+
+@ In : r0 = addr, r1 = error
+@ Out: r0 = x, r1 = error
+uaccess_user_get_word:
+ stmfd sp!, {lr}
+USER( ldrt r0, [r0])
+ ldmfd sp!, {pc}^
+
+9001: mov r1, #-EFAULT
+ ldmfd sp!, {pc}^
+
+
+
+ .globl SYMBOL_NAME(uaccess_kernel)
+SYMBOL_NAME(uaccess_kernel):
+ .word uaccess_kernel_put_byte
+ .word uaccess_kernel_get_byte
+ .word uaccess_kernel_put_half
+ .word uaccess_kernel_get_half
+ .word uaccess_kernel_put_word
+ .word uaccess_kernel_get_word
+ .word uaccess_kernel_copy
+ .word uaccess_kernel_copy
+ .word uaccess_kernel_clear
+ .word uaccess_kernel_strncpy_from
+ .word uaccess_kernel_strlen
+
+@ In : r0 = x, r1 = addr, r2 = error
+@ Out: r2 = error
+uaccess_kernel_put_byte:
+ stmfd sp!, {lr}
+ strb r0, [r1]
+ ldmfd sp!, {pc}^
+
+@ In : r0 = x, r1 = addr, r2 = error
+@ Out: r2 = error
+uaccess_kernel_put_half:
+ stmfd sp!, {lr}
+ strb r0, [r1]
+ mov r0, r0, lsr #8
+ strb r0, [r1, #1]
+ ldmfd sp!, {pc}^
+
+@ In : r0 = x, r1 = addr, r2 = error
+@ Out: r2 = error
+uaccess_kernel_put_word:
+ stmfd sp!, {lr}
+ str r0, [r1]
+ ldmfd sp!, {pc}^
+
+@ In : r0 = addr, r1 = error
+@ Out: r0 = x, r1 = error
+uaccess_kernel_get_byte:
+ stmfd sp!, {lr}
+ ldrb r0, [r0]
+ ldmfd sp!, {pc}^
+
+@ In : r0 = addr, r1 = error
+@ Out: r0 = x, r1 = error
+uaccess_kernel_get_half:
+ stmfd sp!, {lr}
+ ldr r0, [r0]
+ mov r0, r0, lsl #16
+ mov r0, r0, lsr #16
+ ldmfd sp!, {pc}^
+
+@ In : r0 = addr, r1 = error
+@ Out: r0 = x, r1 = error
+uaccess_kernel_get_word:
+ stmfd sp!, {lr}
+ ldr r0, [r0]
+ ldmfd sp!, {pc}^
+
+
+/* Prototype: int uaccess_kernel_copy(void *to, const char *from, size_t n)
+ * Purpose : copy a block to kernel memory from kernel memory
+ * Params : to - kernel memory
+ * : from - kernel memory
+ * : n - number of bytes to copy
+ * Returns : Number of bytes NOT copied.
+ */
+uaccess_kernel_copy:
+ stmfd sp!, {lr}
+ bl SYMBOL_NAME(memcpy)
+ mov r0, #0
+ ldmfd sp!, {pc}^
+
+/* Prototype: int uaccess_kernel_clear(void *addr, size_t sz)
+ * Purpose : clear some kernel memory
+ * Params : addr - kernel memory address to clear
+ * : sz - number of bytes to clear
+ * Returns : number of bytes NOT cleared
+ */
+uaccess_kernel_clear:
+ stmfd sp!, {lr}
+ mov r2, #0
+ cmp r1, #4
+ blt 2f
+ ands ip, r0, #3
+ beq 1f
+ cmp ip, #1
+ strb r2, [r0], #1
+ strleb r2, [r0], #1
+ strltb r2, [r0], #1
+ rsb ip, ip, #4
+ sub r1, r1, ip @ 7 6 5 4 3 2 1
+1: subs r1, r1, #8 @ -1 -2 -3 -4 -5 -6 -7
+ bmi 2f
+ str r2, [r0], #4
+ str r2, [r0], #4
+ b 1b
+2: adds r1, r1, #4 @ 3 2 1 0 -1 -2 -3
+ strpl r2, [r0], #4
+ tst r1, #2 @ 1x 1x 0x 0x 1x 1x 0x
+ strneb r2, [r0], #1
+ strneb r2, [r0], #1
+ tst r1, #1 @ x1 x0 x1 x0 x1 x0 x1
+ strneb r2, [r0], #1
+ mov r0, #0
+ ldmfd sp!, {pc}^
+
+/* Prototype: size_t uaccess_kernel_strncpy_from(char *dst, char *src, size_t len)
+ * Purpose : copy a string from kernel memory to kernel memory
+ * Params : dst - kernel memory destination
+ * : src - kernel memory source
+ * : len - maximum length of string
+ * Returns : number of characters copied
+ */
+uaccess_kernel_strncpy_from:
+ stmfd sp!, {lr}
+ mov ip, r2
+1: subs r2, r2, #1
+ bmi 2f
+ ldrb r3, [r1], #1
+ strb r3, [r0], #1
+ teq r3, #0
+ bne 1b
+2: subs r0, ip, r2
+ ldmfd sp!, {pc}^
+
+/* Prototype: int uaccess_kernel_strlen(char *str)
+ * Purpose : get length of a string in kernel memory
+ * Params : str - address of string in kernel memory
+ * Returns : length of string *including terminator*, or zero on error
+ */
+uaccess_kernel_strlen:
+ stmfd sp!, {lr}
+ mov r2, r0
+1: ldrb r1, [r0], #1
+ teq r1, #0
+ bne 1b
+ sub r0, r0, r2
+ ldmfd sp!, {pc}^
+
diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
new file mode 100644
index 000000000..a1524bee9
--- /dev/null
+++ b/arch/arm/lib/uaccess.S
@@ -0,0 +1,631 @@
+/*
+ * linux/arch/arm/lib/uaccess.S
+ *
+ * Copyright (C) 1995, 1996,1997,1998 Russell King
+ *
+ * Routines to block copy data to/from user memory
+ * These are highly optimised both for the 4k page size
+ * and for various alignments.
+ */
+#include <linux/autoconf.h>
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/errno.h>
+
+ .text
+
+#define USER(x...) \
+9999: x; \
+ .section __ex_table,"a"; \
+ .align 3; \
+ .long 9999b,9001f; \
+ .previous
+
+#define PAGE_SHIFT 12
+
+/* Prototype: int __arch_copy_to_user(void *to, const char *from, size_t n)
+ * Purpose : copy a block to user memory from kernel memory
+ * Params : to - user memory
+ * : from - kernel memory
+ * : n - number of bytes to copy
+ * Returns : Number of bytes NOT copied.
+ */
+
+.c2u_dest_not_aligned:
+ rsb ip, ip, #4
+ cmp ip, #2
+ ldrb r3, [r1], #1
+USER( strbt r3, [r0], #1) // May fault
+ ldrgeb r3, [r1], #1
+USER( strgebt r3, [r0], #1) // May fault
+ ldrgtb r3, [r1], #1
+USER( strgtbt r3, [r0], #1) // May fault
+ sub r2, r2, ip
+ b .c2u_dest_aligned
+
+ENTRY(__arch_copy_to_user)
+ stmfd sp!, {r2, r4 - r7, lr}
+ cmp r2, #4
+ blt .c2u_not_enough
+ ands ip, r0, #3
+ bne .c2u_dest_not_aligned
+.c2u_dest_aligned:
+
+ ands ip, r1, #3
+ bne .c2u_src_not_aligned
+/*
+ * Seeing as there has to be at least 8 bytes to copy, we can
+ * copy one word, and force a user-mode page fault...
+ */
+
+.c2u_0fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .c2u_0nowords
+ ldr r3, [r1], #4
+USER( strt r3, [r0], #4) // May fault
+ mov ip, r0, lsl #32 - PAGE_SHIFT // On each page, use a ld/st??t instruction
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .c2u_0fupi
+/*
+ * ip = max no. of bytes to copy before needing another "strt" insn
+ */
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #32
+ blt .c2u_0rem8lp
+
+.c2u_0cpy8lp: ldmia r1!, {r3 - r6}
+ stmia r0!, {r3 - r6} // Shouldn't fault
+ ldmia r1!, {r3 - r6}
+ stmia r0!, {r3 - r6} // Shouldn't fault
+ subs ip, ip, #32
+ bpl .c2u_0cpy8lp
+.c2u_0rem8lp: cmn ip, #16
+ ldmgeia r1!, {r3 - r6}
+ stmgeia r0!, {r3 - r6} // Shouldn't fault
+ tst ip, #8
+ ldmneia r1!, {r3 - r4}
+ stmneia r0!, {r3 - r4} // Shouldn't fault
+ tst ip, #4
+ ldrne r3, [r1], #4
+ strnet r3, [r0], #4 // Shouldn't fault
+ ands ip, ip, #3
+ beq .c2u_0fupi
+.c2u_0nowords: teq ip, #0
+ beq .c2u_finished
+.c2u_nowords: cmp ip, #2
+ ldrb r3, [r1], #1
+USER( strbt r3, [r0], #1) // May fault
+ ldrgeb r3, [r1], #1
+USER( strgebt r3, [r0], #1) // May fault
+ ldrgtb r3, [r1], #1
+USER( strgtbt r3, [r0], #1) // May fault
+ b .c2u_finished
+
+.c2u_not_enough:
+ movs ip, r2
+ bne .c2u_nowords
+.c2u_finished: mov r0, #0
+ LOADREGS(fd,sp!,{r2, r4 - r7, pc})
+
+.c2u_src_not_aligned:
+ bic r1, r1, #3
+ ldr r7, [r1], #4
+ cmp ip, #2
+ bgt .c2u_3fupi
+ beq .c2u_2fupi
+.c2u_1fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .c2u_1nowords
+ mov r3, r7, lsr #8
+ ldr r7, [r1], #4
+ orr r3, r3, r7, lsl #24
+USER( strt r3, [r0], #4) // May fault
+ mov ip, r0, lsl #32 - PAGE_SHIFT
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .c2u_1fupi
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #16
+ blt .c2u_1rem8lp
+
+.c2u_1cpy8lp: mov r3, r7, lsr #8
+ ldmia r1!, {r4 - r7}
+ orr r3, r3, r4, lsl #24
+ mov r4, r4, lsr #8
+ orr r4, r4, r5, lsl #24
+ mov r5, r5, lsr #8
+ orr r5, r5, r6, lsl #24
+ mov r6, r6, lsr #8
+ orr r6, r6, r7, lsl #24
+ stmia r0!, {r3 - r6} // Shouldn't fault
+ subs ip, ip, #16
+ bpl .c2u_1cpy8lp
+.c2u_1rem8lp: tst ip, #8
+ movne r3, r7, lsr #8
+ ldmneia r1!, {r4, r7}
+ orrne r3, r3, r4, lsl #24
+ movne r4, r4, lsr #8
+ orrne r4, r4, r7, lsl #24
+ stmneia r0!, {r3 - r4} // Shouldn't fault
+ tst ip, #4
+ movne r3, r7, lsr #8
+ ldrne r7, [r1], #4
+ orrne r3, r3, r7, lsl #24
+ strnet r3, [r0], #4 // Shouldn't fault
+ ands ip, ip, #3
+ beq .c2u_1fupi
+.c2u_1nowords: mov r3, r7, lsr #8
+ teq ip, #0
+ beq .c2u_finished
+ cmp ip, #2
+USER( strbt r3, [r0], #1) // May fault
+ movge r3, r3, lsr #8
+USER( strgebt r3, [r0], #1) // May fault
+ movgt r3, r3, lsr #8
+USER( strgtbt r3, [r0], #1) // May fault
+ b .c2u_finished
+
+.c2u_2fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .c2u_2nowords
+ mov r3, r7, lsr #16
+ ldr r7, [r1], #4
+ orr r3, r3, r7, lsl #16
+USER( strt r3, [r0], #4) // May fault
+ mov ip, r0, lsl #32 - PAGE_SHIFT
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .c2u_2fupi
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #16
+ blt .c2u_2rem8lp
+
+.c2u_2cpy8lp: mov r3, r7, lsr #16
+ ldmia r1!, {r4 - r7}
+ orr r3, r3, r4, lsl #16
+ mov r4, r4, lsr #16
+ orr r4, r4, r5, lsl #16
+ mov r5, r5, lsr #16
+ orr r5, r5, r6, lsl #16
+ mov r6, r6, lsr #16
+ orr r6, r6, r7, lsl #16
+ stmia r0!, {r3 - r6} // Shouldn't fault
+ subs ip, ip, #16
+ bpl .c2u_2cpy8lp
+.c2u_2rem8lp: tst ip, #8
+ movne r3, r7, lsr #16
+ ldmneia r1!, {r4, r7}
+ orrne r3, r3, r4, lsl #16
+ movne r4, r4, lsr #16
+ orrne r4, r4, r7, lsl #16
+ stmneia r0!, {r3 - r4} // Shouldn't fault
+ tst ip, #4
+ movne r3, r7, lsr #16
+ ldrne r7, [r1], #4
+ orrne r3, r3, r7, lsl #16
+ strnet r3, [r0], #4 // Shouldn't fault
+ ands ip, ip, #3
+ beq .c2u_2fupi
+.c2u_2nowords: mov r3, r7, lsr #16
+ teq ip, #0
+ beq .c2u_finished
+ cmp ip, #2
+USER( strbt r3, [r0], #1) // May fault
+ movge r3, r3, lsr #8
+USER( strgebt r3, [r0], #1) // May fault
+ ldrgtb r3, [r1], #0
+USER( strgtbt r3, [r0], #1) // May fault
+ b .c2u_finished
+
+.c2u_3fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .c2u_3nowords
+ mov r3, r7, lsr #24
+ ldr r7, [r1], #4
+ orr r3, r3, r7, lsl #8
+USER( strt r3, [r0], #4) // May fault
+ mov ip, r0, lsl #32 - PAGE_SHIFT
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .c2u_3fupi
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #16
+ blt .c2u_3rem8lp
+
+.c2u_3cpy8lp: mov r3, r7, lsr #24
+ ldmia r1!, {r4 - r7}
+ orr r3, r3, r4, lsl #8
+ mov r4, r4, lsr #24
+ orr r4, r4, r5, lsl #8
+ mov r5, r5, lsr #24
+ orr r5, r5, r6, lsl #8
+ mov r6, r6, lsr #24
+ orr r6, r6, r7, lsl #8
+ stmia r0!, {r3 - r6} // Shouldn't fault
+ subs ip, ip, #16
+ bpl .c2u_3cpy8lp
+.c2u_3rem8lp: tst ip, #8
+ movne r3, r7, lsr #24
+ ldmneia r1!, {r4, r7}
+ orrne r3, r3, r4, lsl #8
+ movne r4, r4, lsr #24
+ orrne r4, r4, r7, lsl #8
+ stmneia r0!, {r3 - r4} // Shouldn't fault
+ tst ip, #4
+ movne r3, r7, lsr #24
+ ldrne r7, [r1], #4
+ orrne r3, r3, r7, lsl #8
+ strnet r3, [r0], #4 // Shouldn't fault
+ ands ip, ip, #3
+ beq .c2u_3fupi
+.c2u_3nowords: mov r3, r7, lsr #24
+ teq ip, #0
+ beq .c2u_finished
+ cmp ip, #2
+USER( strbt r3, [r0], #1) // May fault
+ ldrge r3, [r1], #0
+USER( strgebt r3, [r0], #1) // May fault
+ movgt r3, r3, lsr #8
+USER( strgtbt r3, [r0], #1) // May fault
+ b .c2u_finished
+
+ .section .fixup,"ax"
+ .align 0
+9001: LOADREGS(fd,sp!, {r0, r4 - r7, pc})
+ .previous
+
+
+
+/* Prototype: unsigned long __arch_copy_from_user(void *to,const void *from,unsigned long n);
+ * Purpose : copy a block from user memory to kernel memory
+ * Params : to - kernel memory
+ * : from - user memory
+ * : n - number of bytes to copy
+ * Returns : Number of bytes NOT copied.
+ */
+.cfu_dest_not_aligned:
+ rsb ip, ip, #4
+ cmp ip, #2
+USER( ldrbt r3, [r1], #1) // May fault
+ strb r3, [r0], #1
+USER( ldrgebt r3, [r1], #1) // May fault
+ strgeb r3, [r0], #1
+USER( ldrgtbt r3, [r1], #1) // May fault
+ strgtb r3, [r0], #1
+ sub r2, r2, ip
+ b .cfu_dest_aligned
+
+ENTRY(__arch_copy_from_user)
+ stmfd sp!, {r2, r4 - r7, lr}
+ cmp r2, #4
+ blt .cfu_not_enough
+ ands ip, r0, #3
+ bne .cfu_dest_not_aligned
+.cfu_dest_aligned:
+ ands ip, r1, #3
+ bne .cfu_src_not_aligned
+/*
+ * Seeing as there has to be at least 8 bytes to copy, we can
+ * copy one word, and force a user-mode page fault...
+ */
+
+.cfu_0fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .cfu_0nowords
+USER( ldrt r3, [r1], #4)
+ str r3, [r0], #4
+ mov ip, r1, lsl #32 - PAGE_SHIFT // On each page, use a ld/st??t instruction
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .cfu_0fupi
+/*
+ * ip = max no. of bytes to copy before needing another "strt" insn
+ */
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #32
+ blt .cfu_0rem8lp
+
+.cfu_0cpy8lp: ldmia r1!, {r3 - r6} // Shouldn't fault
+ stmia r0!, {r3 - r6}
+ ldmia r1!, {r3 - r6} // Shouldn't fault
+ stmia r0!, {r3 - r6}
+ subs ip, ip, #32
+ bpl .cfu_0cpy8lp
+.cfu_0rem8lp: cmn ip, #16
+ ldmgeia r1!, {r3 - r6} // Shouldn't fault
+ stmgeia r0!, {r3 - r6}
+ tst ip, #8
+ ldmneia r1!, {r3 - r4} // Shouldn't fault
+ stmneia r0!, {r3 - r4}
+ tst ip, #4
+ ldrnet r3, [r1], #4 // Shouldn't fault
+ strne r3, [r0], #4
+ ands ip, ip, #3
+ beq .cfu_0fupi
+.cfu_0nowords: teq ip, #0
+ beq .cfu_finished
+.cfu_nowords: cmp ip, #2
+USER( ldrbt r3, [r1], #1) // May fault
+ strb r3, [r0], #1
+USER( ldrgebt r3, [r1], #1) // May fault
+ strgeb r3, [r0], #1
+USER( ldrgtbt r3, [r1], #1) // May fault
+ strgtb r3, [r0], #1
+ b .cfu_finished
+
+.cfu_not_enough:
+ movs ip, r2
+ bne .cfu_nowords
+.cfu_finished: mov r0, #0
+ LOADREGS(fd,sp!,{r2, r4 - r7, pc})
+
+.cfu_src_not_aligned:
+ bic r1, r1, #3
+USER( ldrt r7, [r1], #4) // May fault
+ cmp ip, #2
+ bgt .cfu_3fupi
+ beq .cfu_2fupi
+.cfu_1fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .cfu_1nowords
+ mov r3, r7, lsr #8
+USER( ldrt r7, [r1], #4) // May fault
+ orr r3, r3, r7, lsl #24
+ str r3, [r0], #4
+ mov ip, r1, lsl #32 - PAGE_SHIFT
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .cfu_1fupi
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #16
+ blt .cfu_1rem8lp
+
+.cfu_1cpy8lp: mov r3, r7, lsr #8
+ ldmia r1!, {r4 - r7} // Shouldn't fault
+ orr r3, r3, r4, lsl #24
+ mov r4, r4, lsr #8
+ orr r4, r4, r5, lsl #24
+ mov r5, r5, lsr #8
+ orr r5, r5, r6, lsl #24
+ mov r6, r6, lsr #8
+ orr r6, r6, r7, lsl #24
+ stmia r0!, {r3 - r6}
+ subs ip, ip, #16
+ bpl .cfu_1cpy8lp
+.cfu_1rem8lp: tst ip, #8
+ movne r3, r7, lsr #8
+ ldmneia r1!, {r4, r7} // Shouldn't fault
+ orrne r3, r3, r4, lsl #24
+ movne r4, r4, lsr #8
+ orrne r4, r4, r7, lsl #24
+ stmneia r0!, {r3 - r4}
+ tst ip, #4
+ movne r3, r7, lsr #8
+USER( ldrnet r7, [r1], #4) // May fault
+ orrne r3, r3, r7, lsl #24
+ strne r3, [r0], #4
+ ands ip, ip, #3
+ beq .cfu_1fupi
+.cfu_1nowords: mov r3, r7, lsr #8
+ teq ip, #0
+ beq .cfu_finished
+ cmp ip, #2
+ strb r3, [r0], #1
+ movge r3, r3, lsr #8
+ strgeb r3, [r0], #1
+ movgt r3, r3, lsr #8
+ strgtb r3, [r0], #1
+ b .cfu_finished
+
+.cfu_2fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .cfu_2nowords
+ mov r3, r7, lsr #16
+USER( ldrt r7, [r1], #4) // May fault
+ orr r3, r3, r7, lsl #16
+ str r3, [r0], #4
+ mov ip, r1, lsl #32 - PAGE_SHIFT
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .cfu_2fupi
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #16
+ blt .cfu_2rem8lp
+
+.cfu_2cpy8lp: mov r3, r7, lsr #16
+ ldmia r1!, {r4 - r7} // Shouldn't fault
+ orr r3, r3, r4, lsl #16
+ mov r4, r4, lsr #16
+ orr r4, r4, r5, lsl #16
+ mov r5, r5, lsr #16
+ orr r5, r5, r6, lsl #16
+ mov r6, r6, lsr #16
+ orr r6, r6, r7, lsl #16
+ stmia r0!, {r3 - r6}
+ subs ip, ip, #16
+ bpl .cfu_2cpy8lp
+.cfu_2rem8lp: tst ip, #8
+ movne r3, r7, lsr #16
+ ldmneia r1!, {r4, r7} // Shouldn't fault
+ orrne r3, r3, r4, lsl #16
+ movne r4, r4, lsr #16
+ orrne r4, r4, r7, lsl #16
+ stmneia r0!, {r3 - r4}
+ tst ip, #4
+ movne r3, r7, lsr #16
+USER( ldrnet r7, [r1], #4) // May fault
+ orrne r3, r3, r7, lsl #16
+ strne r3, [r0], #4
+ ands ip, ip, #3
+ beq .cfu_2fupi
+.cfu_2nowords: mov r3, r7, lsr #16
+ teq ip, #0
+ beq .cfu_finished
+ cmp ip, #2
+ strb r3, [r0], #1
+ movge r3, r3, lsr #8
+ strgeb r3, [r0], #1
+USER( ldrgtbt r3, [r1], #0) // May fault
+ strgtb r3, [r0], #1
+ b .cfu_finished
+
+.cfu_3fupi: subs r2, r2, #4
+ addmi ip, r2, #4
+ bmi .cfu_3nowords
+ mov r3, r7, lsr #24
+USER( ldrt r7, [r1], #4) // May fault
+ orr r3, r3, r7, lsl #8
+ str r3, [r0], #4
+ mov ip, r1, lsl #32 - PAGE_SHIFT
+ rsb ip, ip, #0
+ movs ip, ip, lsr #32 - PAGE_SHIFT
+ beq .cfu_3fupi
+ cmp r2, ip
+ movlt ip, r2
+ sub r2, r2, ip
+ subs ip, ip, #16
+ blt .cfu_3rem8lp
+
+.cfu_3cpy8lp: mov r3, r7, lsr #24
+ ldmia r1!, {r4 - r7} // Shouldn't fault
+ orr r3, r3, r4, lsl #8
+ mov r4, r4, lsr #24
+ orr r4, r4, r5, lsl #8
+ mov r5, r5, lsr #24
+ orr r5, r5, r6, lsl #8
+ mov r6, r6, lsr #24
+ orr r6, r6, r7, lsl #8
+ stmia r0!, {r3 - r6}
+ subs ip, ip, #16
+ bpl .cfu_3cpy8lp
+.cfu_3rem8lp: tst ip, #8
+ movne r3, r7, lsr #24
+ ldmneia r1!, {r4, r7} // Shouldn't fault
+ orrne r3, r3, r4, lsl #8
+ movne r4, r4, lsr #24
+ orrne r4, r4, r7, lsl #8
+ stmneia r0!, {r3 - r4}
+ tst ip, #4
+ movne r3, r7, lsr #24
+USER( ldrnet r7, [r1], #4) // May fault
+ orrne r3, r3, r7, lsl #8
+ strne r3, [r0], #4
+ ands ip, ip, #3
+ beq .cfu_3fupi
+.cfu_3nowords: mov r3, r7, lsr #24
+ teq ip, #0
+ beq .cfu_finished
+ cmp ip, #2
+ strb r3, [r0], #1
+USER( ldrget r3, [r1], #0) // May fault
+ strgeb r3, [r0], #1
+ movgt r3, r3, lsr #8
+ strgtb r3, [r0], #1
+ b .cfu_finished
+
+ .section .fixup,"ax"
+ .align 0
+9001: LOADREGS(fd,sp!, {r0, r4 - r7, pc})
+ .previous
+
+/* Prototype: int __arch_clear_user(void *addr, size_t sz)
+ * Purpose : clear some user memory
+ * Params : addr - user memory address to clear
+ * : sz - number of bytes to clear
+ * Returns : number of bytes NOT cleared
+ */
+ENTRY(__arch_clear_user)
+ stmfd sp!, {r1, lr}
+ mov r2, #0
+ cmp r1, #4
+ blt 2f
+ ands ip, r0, #3
+ beq 1f
+ cmp ip, #1
+USER( strbt r2, [r0], #1)
+USER( strlebt r2, [r0], #1)
+USER( strltbt r2, [r0], #1)
+ rsb ip, ip, #4
+ sub r1, r1, ip @ 7 6 5 4 3 2 1
+1: subs r1, r1, #8 @ -1 -2 -3 -4 -5 -6 -7
+USER( strplt r2, [r0], #4)
+USER( strplt r2, [r0], #4)
+ bpl 1b
+2: adds r1, r1, #4 @ 3 2 1 0 -1 -2 -3
+USER( strplt r2, [r0], #4)
+ tst r1, #2 @ 1x 1x 0x 0x 1x 1x 0x
+USER( strnebt r2, [r0], #1)
+USER( strnebt r2, [r0], #1)
+ tst r1, #1 @ x1 x0 x1 x0 x1 x0 x1
+USER( strnebt r2, [r0], #1)
+ mov r0, #0
+ LOADREGS(fd,sp!, {r1, pc})
+
+ .section .fixup,"ax"
+ .align 0
+9001: LOADREGS(fd,sp!, {r0, pc})
+ .previous
+
+/* Prototype: int __arch_strlen_user(char *str)
+ * Purpose : get length of a string in user memory
+ * Params : str - address of string in user memory
+ * Returns : length of string *including terminator*, or zero on error
+ */
+ENTRY(__arch_strlen_user)
+ stmfd sp!, {lr}
+ mov r2, r0
+1:
+USER( ldrbt r1, [r0], #1)
+ teq r1, #0
+ bne 1b
+ sub r0, r0, r2
+ LOADREGS(fd,sp!, {pc})
+
+ .section .fixup,"ax"
+ .align 0
+9001: mov r0, #0
+ LOADREGS(fd,sp!,{pc})
+ .previous
+
+/* Prototype: size_t __arch_strncpy_from_user(char *dst, char *src, size_t len)
+ * Purpose : copy a string from user memory to kernel memory
+ * Params : dst - kernel memory destination
+ * : src - user memory source
+ * : len - maximum length of string
+ * Returns : number of characters copied
+ */
+ENTRY(__arch_strncpy_from_user)
+ stmfd sp!, {lr}
+ mov ip, r2
+1: subs r2, r2, #1
+ bmi 2f
+USER( ldrbt r3, [r1], #1)
+ strb r3, [r0], #1
+ teq r3, #0
+ bne 1b
+2: subs r0, ip, r2
+ LOADREGS(fd,sp!, {pc})
+
+ .section .fixup,"ax"
+ .align 0
+9001: mov r0, #-EFAULT
+ LOADREGS(fd,sp!, {pc})
+ .previous
+
+ .align
+