summaryrefslogtreecommitdiffstats
path: root/arch/mips/lib
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/lib')
-rw-r--r--arch/mips/lib/Makefile15
-rw-r--r--arch/mips/lib/beep.S21
-rw-r--r--arch/mips/lib/checksum.c131
-rw-r--r--arch/mips/lib/copy_user.S201
-rw-r--r--arch/mips/lib/csum.S25
-rw-r--r--arch/mips/lib/csum_partial.S242
-rw-r--r--arch/mips/lib/csum_partial_copy.S518
-rw-r--r--arch/mips/lib/ide-no.c73
-rw-r--r--arch/mips/lib/ide-std.c91
-rw-r--r--arch/mips/lib/memcpy.S824
-rw-r--r--arch/mips/lib/memset.S141
-rw-r--r--arch/mips/lib/memset.c71
-rw-r--r--arch/mips/lib/strlen_user.S43
-rw-r--r--arch/mips/lib/strncpy_user.S63
14 files changed, 1788 insertions, 671 deletions
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index 7da90c6d0..597202403 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -1,11 +1,7 @@
#
# Makefile for MIPS-specific library files..
#
-# Many of these routines are just left over debugging trash of ancient
-# times when I just make my Tyne beep and so ...
-#
-# ...and for when I need to get the DECStation to use the boot prom to
-# do things... Paul M. Antoine.
+# $Id: Makefile,v 1.9 1998/05/03 00:28:00 ralf Exp $
#
.S.s:
@@ -14,12 +10,7 @@
$(CC) $(CFLAGS) -c $< -o $*.o
L_TARGET = lib.a
-L_OBJS = beep.o checksum.o copy_user.o csum.o dump_tlb.o memset.o memcpy.o \
- strlen_user.o strncpy_user.o tags.o watch.o
-
-#
-# Debug console, works without other support from the kernel
-#
-L_OBJS += tinycon.o
+L_OBJS = csum_partial.o csum_partial_copy.o dump_tlb.o ide-std.c ide-no.o \
+ memset.o memcpy.o strlen_user.o strncpy_user.o tags.o watch.o
include $(TOPDIR)/Rules.make
diff --git a/arch/mips/lib/beep.S b/arch/mips/lib/beep.S
deleted file mode 100644
index e74a63c0e..000000000
--- a/arch/mips/lib/beep.S
+++ /dev/null
@@ -1,21 +0,0 @@
-#include <asm/asm.h>
-#include <asm/regdef.h>
-
-/*
- * Just for debugging...
- */
- LEAF(beep)
- lw t0,beepflag
- bnez t0,1f
- lbu t0,0xb4000061
- xori t0,3
- sb t0,0xb4000061
- li t0,1
- sw t0,beepflag
-1: jr ra
- END(beep)
-
- .bss
-beepflag: .word 0
- .text
-
diff --git a/arch/mips/lib/checksum.c b/arch/mips/lib/checksum.c
deleted file mode 100644
index f3ef6295c..000000000
--- a/arch/mips/lib/checksum.c
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * INET An implementation of the TCP/IP protocol suite for the LINUX
- * operating system. INET is implemented using the BSD Socket
- * interface as the means of communication with the user level.
- *
- * MIPS specific IP/TCP/UDP checksumming routines
- *
- * Authors: Ralf Baechle, <ralf@waldorf-gmbh.de>
- * Lots of code moved from tcp.c and ip.c; see those files
- * for more names.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * $Id: checksum.c,v 1.2 1997/07/29 18:37:35 ralf Exp $
- */
-#include <net/checksum.h>
-#include <linux/types.h>
-#include <asm/byteorder.h>
-#include <asm/string.h>
-#include <asm/uaccess.h>
-
-static inline unsigned short from32to16(unsigned long x)
-{
- /* 32 bits --> 16 bits + carry */
- x = (x & 0xffff) + (x >> 16);
- /* 16 bits + carry --> 16 bits including carry */
- x = (x & 0xffff) + (x >> 16);
- return x;
-}
-
-static inline unsigned long do_csum(const unsigned char * buff, int len)
-{
- int odd, count;
- unsigned long result = 0;
-
- if (len <= 0)
- goto out;
- odd = 1 & (unsigned long) buff;
- if (odd) {
- result = be16_to_cpu(*buff);
- len--;
- buff++;
- }
- count = len >> 1; /* nr of 16-bit words.. */
- if (count) {
- if (2 & (unsigned long) buff) {
- result += *(unsigned short *) buff;
- count--;
- len -= 2;
- buff += 2;
- }
- count >>= 1; /* nr of 32-bit words.. */
- if (count) {
- unsigned long carry = 0;
- do {
- unsigned long w = *(unsigned long *) buff;
- count--;
- buff += 4;
- result += carry;
- result += w;
- carry = (w > result);
- } while (count);
- result += carry;
- result = (result & 0xffff) + (result >> 16);
- }
- if (len & 2) {
- result += *(unsigned short *) buff;
- buff += 2;
- }
- }
- if (len & 1)
- result += le16_to_cpu(*buff);
- result = from32to16(result);
- if (odd)
- result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
-out:
- return result;
-}
-
-/*
- * computes a partial checksum, e.g. for TCP/UDP fragments
- */
-unsigned int csum_partial(const unsigned char *buff, int len, unsigned int sum)
-{
- unsigned long result = do_csum(buff, len);
-
- /* add in old sum, and carry.. */
- result += sum;
- if(sum > result)
- result += 1;
- return result;
-}
-
-/*
- * copy while checksumming, otherwise like csum_partial
- */
-unsigned int csum_partial_copy(const char *src, char *dst,
- int len, unsigned int sum)
-{
- /*
- * It's 2:30 am and I don't feel like doing it real ...
- * This is lots slower than the real thing (tm)
- */
- sum = csum_partial(src, len, sum);
- memcpy(dst, src, len);
-
- return sum;
-}
-
-/*
- * Copy from userspace and compute checksum. If we catch an exception
- * then zero the rest of the buffer.
- */
-unsigned int csum_partial_copy_from_user (const char *src, char *dst,
- int len, unsigned int sum,
- int *err_ptr)
-{
- int *dst_err_ptr=NULL;
- int missing;
-
- missing = copy_from_user(dst, src, len);
- if (missing) {
- memset(dst + len - missing, 0, missing);
- *err_ptr = -EFAULT;
- }
-
- return csum_partial(dst, len, sum);
-}
diff --git a/arch/mips/lib/copy_user.S b/arch/mips/lib/copy_user.S
deleted file mode 100644
index a7fdc74e5..000000000
--- a/arch/mips/lib/copy_user.S
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * arch/mips/lib/copy_user.S
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 1996, 1997 by Ralf Baechle
- *
- * Less stupid user_copy implementation for 32 bit MIPS CPUs.
- *
- * $Id: copy_user.S,v 1.2 1997/08/11 04:26:12 ralf Exp $
- */
-#include <asm/asm.h>
-#include <asm/regdef.h>
-#include <asm/mipsregs.h>
-
-#define BLOCK_SIZE 16
-
-#define EX(addr,handler) \
- .section __ex_table,"a"; \
- PTR addr, handler; \
- .previous
-#define UEX(addr,handler) \
- EX(addr,handler); \
- EX(addr+4,handler)
-
- .set noreorder
- .set noat
-
-/* ---------------------------------------------------------------------- */
-
-/*
- * Bad. We can't fix the alignment for both address parts.
- * Align the source address and copy slowly ...
- */
-not_even_the_same_alignment:
- LONG_SUBU v1,zero,a1
- andi v1,3
- sltu t0,v0,v1
- MOVN(v1,v0,t0)
- beqz v1,src_aligned
- LONG_ADDU v1,a0
-1: lb $1,(a1)
- EX(1b, fault)
- LONG_ADDIU a1,1
-2: sb $1,(a0)
- EX(2b, fault)
- LONG_ADDIU a0,1
- bne a0,v1,1b
- LONG_SUBU v0,1
-src_aligned:
-
-/*
- * Ok. We've fixed the alignment of the copy src for this case.
- * Now let's copy in the usual BLOCK_SIZE byte blocks using unaligned
- * stores.
- * XXX Align the destination address. This is better if the __copy_user
- * encounters an access fault because we never have to deal with an
- * only partially modified destination word. This is required to
- * keep the semantics of the result of copy_*_user().
- */
- ori v1,v0,BLOCK_SIZE-1
- xori v1,BLOCK_SIZE-1
- beqz v1,copy_left_over
- nop
- LONG_SUBU v0,v1
- LONG_ADDU v1,a0
-
-1: lw t0,(a1) # Can cause tlb fault
- EX(1b, fault)
-2: lw t1,4(a1) # Can cause tlb fault
- EX(2b, fault)
-2: lw t2,8(a1) # Can cause tlb fault
- EX(2b, fault)
-2: lw t3,12(a1) # Can cause tlb fault
- EX(2b, fault)
-2: usw t0,(a0) # Can cause tlb faults
- UEX(2b, fault)
-2: usw t1,4(a0) # Can cause tlb faults
- UEX(2b, fault_plus_4)
-2: usw t2,8(a0) # Can cause tlb faults
- UEX(2b, fault_plus_8)
-2: usw t3,12(a0) # Can cause tlb faults
- UEX(2b, fault_plus_12)
- LONG_ADDIU a0,BLOCK_SIZE
- bne a0,v1,1b
- LONG_ADDIU a1,BLOCK_SIZE
-9:
- b copy_left_over # < BLOCK_SIZE bytes left
- nop
-
-/* ---------------------------------------------------------------------- */
-
-not_w_aligned:
-/*
- * Ok, src or destination are not 4-byte aligned.
- * Try to fix that. Do at least both addresses have the same alignment?
- */
- xor t0,a0,a1
- andi t0,3
- bnez t0,not_even_the_same_alignment
- nop # delay slot
-
-/*
- * Ok, we can fix the alignment for both operands and go back to the
- * fast path. We have to copy at least one byte, on average 3 bytes
- * bytewise.
- */
- LONG_SUBU v1,zero,a0
- andi v1,3
- sltu t0,v0,v1
- MOVN(v1,v0,t0)
- beqz v1,__copy_user
- LONG_ADDU v1,a0
-1: lb $1,(a1)
- EX(1b, fault)
- LONG_ADDIU a1,1
-2: sb $1,(a0)
- EX(2b, fault)
- LONG_ADDIU a0,1
- bne a0,v1,1b
- LONG_SUBU v0,1
- b align4
- nop
-
-/* ---------------------------------------------------------------------- */
-
-LEAF(__copy_user)
- or t1,a0,a1
- andi t1,3
- bnez t1,not_w_aligned # not word alignment
- move v0,a2
-
-align4:
- ori v1,v0,BLOCK_SIZE-1
- xori v1,BLOCK_SIZE-1
- beqz v1,copy_left_over
- nop
- LONG_SUBU v0,v1
- LONG_ADDU v1,a0
-
-1: lw t0,(a1) # Can cause tlb fault
- EX(1b, fault)
-2: lw t1,4(a1) # Can cause tlb fault
- EX(2b, fault)
-2: lw t2,8(a1) # Can cause tlb fault
- EX(2b, fault)
-2: lw t3,12(a1) # Can cause tlb fault
- EX(2b, fault)
-2: sw t0,(a0) # Can cause tlb fault
- EX(2b, fault)
-2: sw t1,4(a0) # Can cause tlb fault
- EX(2b, fault_plus_4)
-2: sw t2,8(a0) # Can cause tlb fault
- EX(2b, fault_plus_8)
-2: sw t3,12(a0) # Can cause tlb fault
- EX(2b, fault_plus_12)
- LONG_ADDIU a0,BLOCK_SIZE
- bne a0,v1,1b
- LONG_ADDIU a1,BLOCK_SIZE
-9:
-
-/*
- * XXX Tune me ...
- */
-copy_left_over:
- beqz v0,3f
- nop
-1: lb $1,(a1)
- EX(1b, fault)
- LONG_ADDIU a1,1
-2: sb $1,(a0)
- EX(2b, fault)
- LONG_SUBU v0,1
- bnez v0,1b
- LONG_ADDIU a0,1
-3:
-
-done: jr ra
- nop
-
- END(__copy_user)
- .set at
- .set reorder
-
-/* ---------------------------------------------------------------------- */
-
-/*
- * Access fault. The number of not copied bytes is in v0. We have to
- * correct the number of the not copied bytes in v0 in case of a access
- * fault in an unrolled loop, then return.
- */
-
-fault: jr ra
-fault_plus_4: LONG_ADDIU v0,4
- jr ra
-fault_plus_8: LONG_ADDIU v0,8
- jr ra
-fault_plus_12: LONG_ADDIU v0,12
- jr ra
diff --git a/arch/mips/lib/csum.S b/arch/mips/lib/csum.S
deleted file mode 100644
index 08224e86b..000000000
--- a/arch/mips/lib/csum.S
+++ /dev/null
@@ -1,25 +0,0 @@
-#include <asm/addrspace.h>
-#include <asm/asm.h>
-#include <asm/regdef.h>
-
-/*
- * Compute kernel code checksum to check kernel code against corruption
- * (Ancient debugging trash ...)
- */
- LEAF(csum)
- LONG_L t0,cacheflush
- move t8,ra
- jalr t0
- li t0,KSEG1
- la t1,final
- li t2,KSEG1
- or t0,t2
- or t1,t2
- move v0,zero
-1: lw t2,(t0)
- addiu t0,4
- bne t0,t1,1b
- xor v0,t2
- jr t8
- nop
- END(csum)
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S
new file mode 100644
index 000000000..ce43987d8
--- /dev/null
+++ b/arch/mips/lib/csum_partial.S
@@ -0,0 +1,242 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ralf Baechle
+ *
+ * $Id: csum_partial.S,v 1.2 1998/04/22 03:26:19 ralf Exp $
+ */
+#include <sys/asm.h>
+#include <sys/regdef.h>
+
+#define ADDC(sum,reg) \
+ addu sum, reg; \
+ sltu v1, sum, reg; \
+ addu sum, v1
+
+#define CSUM_BIGCHUNK(src, offset, sum, t0, t1, t2, t3) \
+ lw t0, (offset + 0x00)(src); \
+ lw t1, (offset + 0x04)(src); \
+ lw t2, (offset + 0x08)(src); \
+ lw t3, (offset + 0x0c)(src); \
+ ADDC(sum, t0); \
+ ADDC(sum, t1); \
+ ADDC(sum, t2); \
+ ADDC(sum, t3); \
+ lw t0, (offset + 0x10)(src); \
+ lw t1, (offset + 0x14)(src); \
+ lw t2, (offset + 0x18)(src); \
+ lw t3, (offset + 0x1c)(src); \
+ ADDC(sum, t0); \
+ ADDC(sum, t1); \
+ ADDC(sum, t2); \
+ ADDC(sum, t3); \
+
+/*
+ * a0: source address
+ * a1: length of the area to checksum
+ * a2: partial checksum
+ */
+
+#define src a0
+#define dest a1
+#define sum v0
+
+ .text
+ .set noreorder
+
+/* unknown src alignment and < 8 bytes to go */
+small_csumcpy:
+ move a1, t2
+
+ andi t0, a1, 4
+ beqz t0, 1f
+ andi t0, a1, 2
+
+ /* Still a full word to go */
+ ulw t1, (src)
+ addiu src, 4
+ ADDC(sum, t1)
+
+1: move t1, zero
+ beqz t0, 1f
+ andi t0, a1, 1
+
+ /* Still a halfword to go */
+ ulhu t1, (src)
+ addiu src, 2
+
+1: beqz t0, 1f
+ sll t1, t1, 16
+
+ lbu t2, (src)
+ nop
+
+#ifdef __MIPSEB__
+ sll t2, t2, 8
+#endif
+ or t1, t2
+
+1: ADDC(sum, t1)
+
+ /* fold checksum */
+ sll v1, sum, 16
+ addu sum, v1
+ sltu v1, sum, v1
+ srl sum, sum, 16
+ addu sum, v1
+
+ /* odd buffer alignment? */
+ beqz t7, 1f
+ nop
+ sll v1, sum, 8
+ srl sum, sum, 8
+ or sum, v1
+ andi sum, 0xffff
+1:
+ .set reorder
+ /* Add the passed partial csum. */
+ ADDC(sum, a2)
+ jr ra
+ .set noreorder
+
+/* ------------------------------------------------------------------------- */
+
+ .align 5
+LEAF(csum_partial)
+ move sum, zero
+ move t7, zero
+
+ sltiu t8, a1, 0x8
+ bnez t8, small_csumcpy /* < 8 bytes to copy */
+ move t2, a1
+
+ beqz a1, out
+ andi t7, src, 0x1 /* odd buffer? */
+
+hword_align:
+ beqz t7, word_align
+ andi t8, src, 0x2
+
+ lbu t0, (src)
+ subu a1, a1, 0x1
+#ifdef __MIPSEL__
+ sll t0, t0, 8
+#endif
+ ADDC(sum, t0)
+ addu src, src, 0x1
+ andi t8, src, 0x2
+
+word_align:
+ beqz t8, dword_align
+ sltiu t8, a1, 56
+
+ lhu t0, (src)
+ subu a1, a1, 0x2
+ ADDC(sum, t0)
+ sltiu t8, a1, 56
+ addu src, src, 0x2
+
+dword_align:
+ bnez t8, do_end_words
+ move t8, a1
+
+ andi t8, src, 0x4
+ beqz t8, qword_align
+ andi t8, src, 0x8
+
+ lw t0, 0x00(src)
+ subu a1, a1, 0x4
+ ADDC(sum, t0)
+ addu src, src, 0x4
+ andi t8, src, 0x8
+
+qword_align:
+ beqz t8, oword_align
+ andi t8, src, 0x10
+
+ lw t0, 0x00(src)
+ lw t1, 0x04(src)
+ subu a1, a1, 0x8
+ ADDC(sum, t0)
+ ADDC(sum, t1)
+ addu src, src, 0x8
+ andi t8, src, 0x10
+
+oword_align:
+ beqz t8, begin_movement
+ srl t8, a1, 0x7
+
+ lw t3, 0x08(src)
+ lw t4, 0x0c(src)
+ lw t0, 0x00(src)
+ lw t1, 0x04(src)
+ ADDC(sum, t3)
+ ADDC(sum, t4)
+ ADDC(sum, t0)
+ ADDC(sum, t1)
+ subu a1, a1, 0x10
+ addu src, src, 0x10
+ srl t8, a1, 0x7
+
+begin_movement:
+ beqz t8, 1f
+ andi t2, a1, 0x40
+
+move_128bytes:
+ CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
+ CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
+ CSUM_BIGCHUNK(src, 0x40, sum, t0, t1, t3, t4)
+ CSUM_BIGCHUNK(src, 0x60, sum, t0, t1, t3, t4)
+ subu t8, t8, 0x01
+ bnez t8, move_128bytes
+ addu src, src, 0x80
+
+1:
+ beqz t2, 1f
+ andi t2, a1, 0x20
+
+move_64bytes:
+ CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
+ CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
+ addu src, src, 0x40
+
+1:
+ beqz t2, do_end_words
+ andi t8, a1, 0x1c
+
+move_32bytes:
+ CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
+ andi t8, a1, 0x1c
+ addu src, src, 0x20
+
+do_end_words:
+ beqz t8, maybe_end_cruft
+ srl t8, t8, 0x2
+
+end_words:
+ lw t0, (src)
+ subu t8, t8, 0x1
+ ADDC(sum, t0)
+ bnez t8, end_words
+ addu src, src, 0x4
+
+maybe_end_cruft:
+ andi t2, a1, 0x3
+
+small_memcpy:
+ j small_csumcpy; move a1, t2
+ beqz t2, out
+ move a1, t2
+
+end_bytes:
+ lb t0, (src)
+ subu a1, a1, 0x1
+ bnez a2, end_bytes
+ addu src, src, 0x1
+
+out:
+ jr ra
+ move v0, sum
+ END(csum_partial)
diff --git a/arch/mips/lib/csum_partial_copy.S b/arch/mips/lib/csum_partial_copy.S
new file mode 100644
index 000000000..62ee35395
--- /dev/null
+++ b/arch/mips/lib/csum_partial_copy.S
@@ -0,0 +1,518 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ralf Baechle
+ *
+ * $Id: csum_partial_copy.S,v 1.3 1998/05/01 06:54:07 ralf Exp $
+ *
+ * Unified implementation of csum_copy_partial and csum_copy_partial_from_user.
+ */
+#include <asm/asm.h>
+#include <asm/offset.h>
+#include <asm/regdef.h>
+
+/*
+ * The fixup routine for csum_partial_copy_from_user depends on copying
+ * strictly in increasing order. Gas expands ulw/usw macros in the wrong order
+ * for little endian machines, so we cannot depend on them.
+ */
+#ifdef __MIPSEB__
+#define ulwL lwl
+#define ulwU lwr
+#endif
+#ifdef __MIPSEL__
+#define ulwL lwr
+#define ulwU lwl
+#endif
+
+#define EX(insn,reg,addr,handler) \
+9: insn reg, addr; \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
+ .previous
+
+#define UEX(insn,reg,addr,handler) \
+9: insn ## L reg, addr; \
+10: insn ## U reg, 3 + addr; \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
+ PTR 10b, handler; \
+ .previous
+
+#define ADDC(sum,reg) \
+ addu sum, reg; \
+ sltu v1, sum, reg; \
+ addu sum, v1
+
+/* ascending order, destination aligned */
+#define CSUM_BIGCHUNK(src, dst, offset, sum, t0, t1, t2, t3) \
+ EX(lw, t0, (offset + 0x00)(src), l_fixup); \
+ EX(lw, t1, (offset + 0x04)(src), l_fixup); \
+ EX(lw, t2, (offset + 0x08)(src), l_fixup); \
+ EX(lw, t3, (offset + 0x0c)(src), l_fixup); \
+ ADDC(sum, t0); \
+ ADDC(sum, t1); \
+ ADDC(sum, t2); \
+ ADDC(sum, t3); \
+ sw t0, (offset + 0x00)(dst); \
+ sw t1, (offset + 0x04)(dst); \
+ sw t2, (offset + 0x08)(dst); \
+ sw t3, (offset + 0x0c)(dst); \
+ EX(lw, t0, (offset + 0x10)(src), l_fixup); \
+ EX(lw, t1, (offset + 0x14)(src), l_fixup); \
+ EX(lw, t2, (offset + 0x18)(src), l_fixup); \
+ EX(lw, t3, (offset + 0x1c)(src), l_fixup); \
+ ADDC(sum, t0); \
+ ADDC(sum, t1); \
+ ADDC(sum, t2); \
+ ADDC(sum, t3); \
+ sw t0, (offset + 0x10)(dst); \
+ sw t1, (offset + 0x14)(dst); \
+ sw t2, (offset + 0x18)(dst); \
+ sw t3, (offset + 0x1c)(dst)
+
+/* ascending order, destination unaligned */
+#define UCSUM_BIGCHUNK(src, dst, offset, sum, t0, t1, t2, t3) \
+ EX(lw, t0, (offset + 0x00)(src), l_fixup); \
+ EX(lw, t1, (offset + 0x04)(src), l_fixup); \
+ EX(lw, t2, (offset + 0x08)(src), l_fixup); \
+ EX(lw, t3, (offset + 0x0c)(src), l_fixup); \
+ ADDC(sum, t0); \
+ ADDC(sum, t1); \
+ ADDC(sum, t2); \
+ ADDC(sum, t3); \
+ usw t0, (offset + 0x00)(dst); \
+ usw t1, (offset + 0x04)(dst); \
+ usw t2, (offset + 0x08)(dst); \
+ usw t3, (offset + 0x0c)(dst); \
+ EX(lw, t0, (offset + 0x00)(src), l_fixup); \
+ EX(lw, t1, (offset + 0x04)(src), l_fixup); \
+ EX(lw, t2, (offset + 0x08)(src), l_fixup); \
+ EX(lw, t3, (offset + 0x0c)(src), l_fixup); \
+ ADDC(sum, t0); \
+ ADDC(sum, t1); \
+ ADDC(sum, t2); \
+ ADDC(sum, t3); \
+ usw t0, (offset + 0x10)(dst); \
+ usw t1, (offset + 0x14)(dst); \
+ usw t2, (offset + 0x18)(dst); \
+ usw t3, (offset + 0x1c)(dst)
+
+#
+# a0: source address
+# a1: destination address
+# a2: length of the area to checksum
+# a3: partial checksum
+#
+
+#define src a0
+#define dest a1
+#define sum v0
+
+ .text
+ .set noreorder
+
+/* unknown src/dst alignment and < 8 bytes to go */
+small_csumcpy:
+ move a2, t2
+
+ andi t0, a2, 4
+ beqz t0, 1f
+ andi t0, a2, 2
+
+ /* Still a full word to go */
+ UEX(ulw, t1, 0(src), l_fixup)
+ addiu src, 4
+ usw t1, 0(dest)
+ addiu dest, 4
+ ADDC(sum, t1)
+
+1: move t1, zero
+ beqz t0, 1f
+ andi t0, a2, 1
+
+ /* Still a halfword to go */
+ ulhu t1, (src)
+ addiu src, 2
+ ush t1, (dest)
+ addiu dest, 2
+
+1: beqz t0, 1f
+ sll t1, t1, 16
+
+ lbu t2, (src)
+ nop
+ sb t2, (dest)
+
+#ifdef __MIPSEB__
+ sll t2, t2, 8
+#endif
+ or t1, t2
+
+1: ADDC(sum, t1)
+
+ /* fold checksum */
+ sll v1, sum, 16
+ addu sum, v1
+ sltu v1, sum, v1
+ srl sum, sum, 16
+ addu sum, v1
+
+ /* odd buffer alignment? */
+ beqz t7, 1f
+ nop
+ sll v1, sum, 8
+ srl sum, sum, 8
+ or sum, v1
+ andi sum, 0xffff
+1:
+ .set reorder
+ /* Add the passed partial csum. */
+ ADDC(sum, a3)
+ jr ra
+ .set noreorder
+
+/* ------------------------------------------------------------------------- */
+
+ .align 5
+LEAF(csum_partial_copy_from_user)
+ addu t5, src, a2 # end address for fixup
+EXPORT(csum_partial_copy)
+ move sum, zero # clear computed sum
+ move t7, zero # clear odd flag
+ xor t0, dest, src
+ andi t0, t0, 0x3
+ beqz t0, can_align
+ sltiu t8, a2, 0x8
+
+ b memcpy_u_src # bad alignment
+ move t2, a2
+
+can_align:
+ bnez t8, small_csumcpy # < 8 bytes to copy
+ move t2, a2
+
+ beqz a2, out
+ andi t7, src, 0x1 # odd buffer?
+
+hword_align:
+ beqz t7, word_align
+ andi t8, src, 0x2
+
+ EX(lbu, t0, (src), l_fixup)
+ subu a2, a2, 0x1
+ EX(sb, t0, (dest), l_fixup)
+#ifdef __MIPSEL__
+ sll t0, t0, 8
+#endif
+ ADDC(sum, t0)
+ addu src, src, 0x1
+ addu dest, dest, 0x1
+ andi t8, src, 0x2
+
+word_align:
+ beqz t8, dword_align
+ sltiu t8, a2, 56
+
+ EX(lhu, t0, (src), l_fixup)
+ subu a2, a2, 0x2
+ sh t0, (dest)
+ ADDC(sum, t0)
+ sltiu t8, a2, 56
+ addu dest, dest, 0x2
+ addu src, src, 0x2
+
+dword_align:
+ bnez t8, do_end_words
+ move t8, a2
+
+ andi t8, src, 0x4
+ beqz t8, qword_align
+ andi t8, src, 0x8
+
+ EX(lw, t0, 0x00(src), l_fixup)
+ subu a2, a2, 0x4
+ ADDC(sum, t0)
+ sw t0, 0x00(dest)
+ addu src, src, 0x4
+ addu dest, dest, 0x4
+ andi t8, src, 0x8
+
+qword_align:
+ beqz t8, oword_align
+ andi t8, src, 0x10
+
+ EX(lw, t0, 0x00(src), l_fixup)
+ EX(lw, t1, 0x04(src), l_fixup)
+ subu a2, a2, 0x8
+ ADDC(sum, t0)
+ ADDC(sum, t1)
+ sw t0, 0x00(dest)
+ addu src, src, 0x8
+ sw t1, 0x04(dest)
+ andi t8, src, 0x10
+ addu dest, dest, 0x8
+
+oword_align:
+ beqz t8, begin_movement
+ srl t8, a2, 0x7
+
+ EX(lw, t3, 0x08(src), l_fixup) # assumes subblock ordering
+ EX(lw, t4, 0x0c(src), l_fixup)
+ EX(lw, t0, 0x00(src), l_fixup)
+ EX(lw, t1, 0x04(src), l_fixup)
+ ADDC(sum, t3)
+ ADDC(sum, t4)
+ ADDC(sum, t0)
+ ADDC(sum, t1)
+ sw t3, 0x08(dest)
+ subu a2, a2, 0x10
+ sw t4, 0x0c(dest)
+ addu src, src, 0x10
+ sw t0, 0x00(dest)
+ srl t8, a2, 0x7
+ addu dest, dest, 0x10
+ sw t1, -0x0c(dest)
+
+begin_movement:
+ beqz t8, 0f
+ andi t2, a2, 0x40
+
+move_128bytes:
+ CSUM_BIGCHUNK(src, dest, 0x00, sum, t0, t1, t3, t4)
+ CSUM_BIGCHUNK(src, dest, 0x20, sum, t0, t1, t3, t4)
+ CSUM_BIGCHUNK(src, dest, 0x40, sum, t0, t1, t3, t4)
+ CSUM_BIGCHUNK(src, dest, 0x60, sum, t0, t1, t3, t4)
+ subu t8, t8, 0x01
+ addu src, src, 0x80
+ bnez t8, move_128bytes
+ addu dest, dest, 0x80
+
+0:
+ beqz t2, 1f
+ andi t2, a2, 0x20
+
+move_64bytes:
+ CSUM_BIGCHUNK(src, dest, 0x00, sum, t0, t1, t3, t4)
+ CSUM_BIGCHUNK(src, dest, 0x20, sum, t0, t1, t3, t4)
+ addu src, src, 0x40
+ addu dest, dest, 0x40
+
+1:
+ beqz t2, do_end_words
+ andi t8, a2, 0x1c
+
+move_32bytes:
+ CSUM_BIGCHUNK(src, dest, 0x00, sum, t0, t1, t3, t4)
+ andi t8, a2, 0x1c
+ addu src, src, 0x20
+ addu dest, dest, 0x20
+
+do_end_words:
+ beqz t8, maybe_end_cruft
+ srl t8, t8, 0x2
+
+end_words:
+ EX(lw, t0, (src), l_fixup)
+ subu t8, t8, 0x1
+ ADDC(sum, t0)
+ sw t0, (dest)
+ addu src, src, 0x4
+ bnez t8, end_words
+ addu dest, dest, 0x4
+
+maybe_end_cruft:
+ andi t2, a2, 0x3
+
+small_memcpy:
+ j small_csumcpy; move a2, t2
+ beqz t2, out
+ move a2, t2
+
+end_bytes:
+ EX(lb, t0, (src), l_fixup)
+ subu a2, a2, 0x1
+ sb t0, (dest)
+ addu src, src, 0x1
+ bnez a2, end_bytes
+ addu dest, dest, 0x1
+
+out:
+ jr ra
+ move v0, sum
+
+/* ------------------------------------------------------------------------- */
+
+/* Bad, bad. At least try to align the source */
+
+memcpy_u_src:
+ bnez t8, small_memcpy # < 8 bytes?
+ move t2, a2
+
+ beqz a2, out
+ andi t7, src, 0x1 # odd alignment?
+
+u_hword_align:
+ beqz t7, u_word_align
+ andi t8, src, 0x2
+
+ EX(lbu, t0, (src), l_fixup)
+ subu a2, a2, 0x1
+ sb t0, (dest)
+#ifdef __MIPSEL__
+ sll t0, t0, 8
+#endif
+ ADDC(sum, t0)
+ addu src, src, 0x1
+ addu dest, dest, 0x1
+ andi t8, src, 0x2
+
+u_word_align:
+ beqz t8, u_dword_align
+ sltiu t8, a2, 56
+
+ EX(lhu, t0, (src), l_fixup)
+ subu a2, a2, 0x2
+ ush t0, (dest)
+ ADDC(sum, t0)
+ sltiu t8, a2, 56
+ addu dest, dest, 0x2
+ addu src, src, 0x2
+
+u_dword_align:
+ bnez t8, u_do_end_words
+ move t8, a2
+
+ andi t8, src, 0x4
+ beqz t8, u_qword_align
+ andi t8, src, 0x8
+
+ EX(lw, t0, 0x00(src), l_fixup)
+ subu a2, a2, 0x4
+ ADDC(sum, t0)
+ usw t0, 0x00(dest)
+ addu src, src, 0x4
+ addu dest, dest, 0x4
+ andi t8, src, 0x8
+
+u_qword_align:
+ beqz t8, u_oword_align
+ andi t8, src, 0x10
+
+ EX(lw, t0, 0x00(src), l_fixup)
+ EX(lw, t1, 0x04(src), l_fixup)
+ subu a2, a2, 0x8
+ ADDC(sum, t0)
+ ADDC(sum, t1)
+ usw t0, 0x00(dest)
+ addu src, src, 0x8
+ usw t1, 0x04(dest)
+ andi t8, src, 0x10
+ addu dest, dest, 0x8
+
+u_oword_align:
+ beqz t8, u_begin_movement
+ srl t8, a2, 0x7
+
+ EX(lw, t3, 0x08(src), l_fixup)
+ EX(lw, t4, 0x0c(src), l_fixup)
+ EX(lw, t0, 0x00(src), l_fixup)
+ EX(lw, t1, 0x04(src), l_fixup)
+ ADDC(sum, t3)
+ ADDC(sum, t4)
+ ADDC(sum, t0)
+ ADDC(sum, t1)
+ usw t3, 0x08(dest)
+ subu a2, a2, 0x10
+ usw t4, 0x0c(dest)
+ addu src, src, 0x10
+ usw t0, 0x00(dest)
+ srl t8, a2, 0x7
+ addu dest, dest, 0x10
+ usw t1, -0x0c(dest)
+
+u_begin_movement:
+ beqz t8, 0f
+ andi t2, a2, 0x40
+
+u_move_128bytes:
+ UCSUM_BIGCHUNK(src, dest, 0x00, sum, t0, t1, t3, t4)
+ UCSUM_BIGCHUNK(src, dest, 0x20, sum, t0, t1, t3, t4)
+ UCSUM_BIGCHUNK(src, dest, 0x40, sum, t0, t1, t3, t4)
+ UCSUM_BIGCHUNK(src, dest, 0x60, sum, t0, t1, t3, t4)
+ subu t8, t8, 0x01
+ addu src, src, 0x80
+ bnez t8, u_move_128bytes
+ addu dest, dest, 0x80
+
+0:
+ beqz t2, 1f
+ andi t2, a2, 0x20
+
+u_move_64bytes:
+ UCSUM_BIGCHUNK(src, dest, 0x00, sum, t0, t1, t3, t4)
+ UCSUM_BIGCHUNK(src, dest, 0x20, sum, t0, t1, t3, t4)
+ addu src, src, 0x40
+ addu dest, dest, 0x40
+
+1:
+ beqz t2, u_do_end_words
+ andi t8, a2, 0x1c
+
+u_move_32bytes:
+ UCSUM_BIGCHUNK(src, dest, 0x00, sum, t0, t1, t3, t4)
+ andi t8, a2, 0x1c
+ addu src, src, 0x20
+ addu dest, dest, 0x20
+
+u_do_end_words:
+ beqz t8, u_maybe_end_cruft
+ srl t8, t8, 0x2
+
+u_end_words:
+ EX(lw, t0, 0x00(src), l_fixup)
+ subu t8, t8, 0x1
+ ADDC(sum, t0)
+ usw t0, 0x00(dest)
+ addu src, src, 0x4
+ bnez t8, u_end_words
+ addu dest, dest, 0x4
+
+u_maybe_end_cruft:
+ andi t2, a2, 0x3
+
+u_cannot_optimize:
+ j small_csumcpy; move a2, t2
+ beqz t2, out
+ move a2, t2
+
+u_end_bytes:
+ EX(lb, t0, (src), l_fixup)
+ subu a2, a2, 0x1
+ sb t0, (dest)
+ addu src, src, 0x1
+ bnez a2, u_end_bytes
+ addu dest, dest, 0x1
+
+ jr ra
+ move v0, sum
+ END(csum_partial_copy_from_user)
+
+l_fixup:
+ beqz t7, 1f # odd buffer alignment?
+ nop
+ sll v1, sum, 8 # swap bytes
+ srl sum, sum, 8
+ or sum, v1
+ andi sum, 0xffff
+1: ADDC(sum, a3) # Add csum argument.
+
+ lw t0, THREAD_BUADDR($28) # clear the rest of the buffer
+ nop
+ subu t1, t0, src # where to start clearing
+ addu a0, dest, t1
+ move a1, zero # zero fill
+ j __bzero
+ subu a2, t5, t0 # a2 = bad - srcend bytes to go
diff --git a/arch/mips/lib/ide-no.c b/arch/mips/lib/ide-no.c
new file mode 100644
index 000000000..3b6307b51
--- /dev/null
+++ b/arch/mips/lib/ide-no.c
@@ -0,0 +1,73 @@
+/*
+ * arch/mips/kernel/ide-none.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Stub IDE routines to keep Linux from crashing on machine which don't
+ * have IDE like the Indy.
+ *
+ * Copyright (C) 1998 by Ralf Baechle
+ *
+ * $Id: ide-no.c,v 1.1 1998/05/03 00:28:00 ralf Exp $
+ */
+#include <linux/hdreg.h>
+#include <linux/kernel.h>
+#include <asm/ptrace.h>
+#include <asm/ide.h>
+
+static int no_ide_default_irq(ide_ioreg_t base)
+{
+ return 0;
+}
+
+static ide_ioreg_t no_ide_default_io_base(int index)
+{
+ return 0;
+}
+
+static void no_ide_init_hwif_ports(ide_ioreg_t *p, ide_ioreg_t base,
+ int *irq)
+{
+}
+
+static int no_ide_request_irq(unsigned int irq,
+ void (*handler)(int,void *, struct pt_regs *),
+ unsigned long flags, const char *device,
+ void *dev_id)
+{
+ panic("no_no_ide_request_irq called - shouldn't happen");
+}
+
+static void no_ide_free_irq(unsigned int irq, void *dev_id)
+{
+ panic("no_ide_free_irq called - shouldn't happen");
+}
+
+static int no_ide_check_region(ide_ioreg_t from, unsigned int extent)
+{
+ panic("no_ide_check_region called - shouldn't happen");
+}
+
+static void no_ide_request_region(ide_ioreg_t from, unsigned int extent,
+ const char *name)
+{
+ panic("no_ide_request_region called - shouldn't happen");
+}
+
+static void no_ide_release_region(ide_ioreg_t from, unsigned int extent)
+{
+ panic("no_ide_release_region called - shouldn't happen");
+}
+
+struct ide_ops no_ide_ops = {
+ &no_ide_default_irq,
+ &no_ide_default_io_base,
+ &no_ide_init_hwif_ports,
+ &no_ide_request_irq,
+ &no_ide_free_irq,
+ &no_ide_check_region,
+ &no_ide_request_region,
+ &no_ide_release_region
+};
diff --git a/arch/mips/lib/ide-std.c b/arch/mips/lib/ide-std.c
new file mode 100644
index 000000000..47b103c03
--- /dev/null
+++ b/arch/mips/lib/ide-std.c
@@ -0,0 +1,91 @@
+/*
+ * include/asm-mips/types.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * IDE routines for typical pc-like standard configurations.
+ *
+ * Copyright (C) 1998 by Ralf Baechle
+ */
+#include <linux/hdreg.h>
+#include <asm/ptrace.h>
+#include <asm/ide.h>
+
+static int std_ide_default_irq(ide_ioreg_t base)
+{
+ switch (base) {
+ case 0x1f0: return 14;
+ case 0x170: return 15;
+ case 0x1e8: return 11;
+ case 0x168: return 10;
+ default:
+ return 0;
+ }
+}
+
+static ide_ioreg_t std_ide_default_io_base(int index)
+{
+ switch (index) {
+ case 0: return 0x1f0;
+ case 1: return 0x170;
+ case 2: return 0x1e8;
+ case 3: return 0x168;
+ default:
+ return 0;
+ }
+}
+
+static void std_ide_init_hwif_ports(ide_ioreg_t *p, ide_ioreg_t base,
+ int *irq)
+{
+ ide_ioreg_t port = base;
+ int i = 8;
+
+ while (i--)
+ *p++ = port++;
+ *p++ = base + 0x206;
+ if (irq != NULL)
+ *irq = 0;
+}
+
+static int std_ide_request_irq(unsigned int irq,
+ void (*handler)(int,void *, struct pt_regs *),
+ unsigned long flags, const char *device,
+ void *dev_id)
+{
+ return request_irq(irq, handler, flags, device, dev_id);
+}
+
+static void std_ide_free_irq(unsigned int irq, void *dev_id)
+{
+ free_irq(irq, dev_id);
+}
+
+static int std_ide_check_region(ide_ioreg_t from, unsigned int extent)
+{
+ return check_region(from, extent);
+}
+
+static void std_ide_request_region(ide_ioreg_t from, unsigned int extent,
+ const char *name)
+{
+ request_region(from, extent, name);
+}
+
+static void std_ide_release_region(ide_ioreg_t from, unsigned int extent)
+{
+ release_region(from, extent);
+}
+
+struct ide_ops std_ide_ops = {
+ &std_ide_default_irq,
+ &std_ide_default_io_base,
+ &std_ide_init_hwif_ports,
+ &std_ide_request_irq,
+ &std_ide_free_irq,
+ &std_ide_check_region,
+ &std_ide_request_region,
+ &std_ide_release_region
+};
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index 95639cb01..2bae5324d 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -1,221 +1,701 @@
-/* memcpy.S: Mips optimized memcpy based upon SparcLinux code.
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
*
- * Copyright(C) 1995 Linus Torvalds
- * Copyright(C) 1996 David S. Miller
- * Copyright(C) 1996 Eddie C. Dost
+ * $Id: memcpy.S,v 1.3 1998/04/27 06:00:36 ralf Exp $
*
- * derived from:
- * e-mail between David and Eddie.
+ * Unified implementation of memcpy, memmove and the __copy_user backend.
+ * For __rmemcpy and memmove an exception is always a kernel bug, therefore
+ * they're not protected. In order to keep the exception fixup routine
+ * simple all memory accesses in __copy_user to src rsp. dst are stricly
+ * incremental. The fixup routine depends on $at not being changed.
*/
-
#include <asm/asm.h>
+#include <asm/offset.h>
#include <asm/regdef.h>
-#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5) \
- lw t0, (offset + 0x18)(src); \
- lw t1, (offset + 0x1c)(src); \
- sw t0, (offset + 0x18)(dst); \
- lw t2, (offset + 0x10)(src); \
- sw t1, (offset + 0x1c)(dst); \
- lw t3, (offset + 0x14)(src); \
- sw t2, (offset + 0x10)(dst); \
- lw t4, (offset + 0x08)(src); \
- sw t3, (offset + 0x14)(dst); \
- lw t5, (offset + 0x0c)(src); \
- sw t4, (offset + 0x08)(dst); \
- lw t0, (offset + 0x00)(src); \
- sw t5, (offset + 0x0c)(dst); \
- lw t1, (offset + 0x04)(src); \
- sw t0, (offset + 0x00)(dst); \
- sw t1, (offset + 0x04)(dst); \
-
- /* Alignment cases are:
- * 1) (src&0x3)=0x0 (dst&0x3)=0x0 can optimize
- * 2) (src&0x3)=0x1 (dst&0x3)=0x1 can optimize
- * 3) (src&0x3)=0x2 (dst&0x3)=0x2 can optimize
- * 4) (src&0x3)=0x3 (dst&0x3)=0x3 can optimize
- * 5) anything else cannot optimize
- */
-
- /* I hate MIPS register names... AIEEE, it's a SPARC! */
-#define o0 a0
-#define o1 a1
-#define o2 a2
-#define o3 a3
-#define o4 t0
-#define o5 t1
-#define o6 sp
-#define o7 ra
-#define g0 zero
-#define g1 t2
-#define g2 t3
-#define g3 t4
-#define g4 t5
-#define g5 t6
-#define g6 t7
-#define g7 t8
+/*
+ * The fixup routine for copy_to_user depends on copying strictly in
+ * increasing order. Gas expands the ulw/usw macros in the wrong order for
+ * little endian machines, so we cannot depend on them.
+ */
+#ifdef __MIPSEB__
+#define uswL swl
+#define uswU swr
+#define ulwL lwl
+#define ulwU lwr
+#endif
+#ifdef __MIPSEL__
+#define uswL swr
+#define uswU swl
+#define ulwL lwr
+#define ulwU lwl
+#endif
+
+#define EX(insn,reg,addr,handler) \
+9: insn reg, addr; \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
+ .previous
+
+#define UEX(insn,reg,addr,handler) \
+9: insn ## L reg, addr; \
+10: insn ## U reg, 3 + addr; \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
+ PTR 10b, handler; \
+ .previous
+
+/* ascending order, destination aligned */
+#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3) \
+ EX(lw, t0, (offset + 0x00)(src), l_fixup); \
+ EX(lw, t1, (offset + 0x04)(src), l_fixup); \
+ EX(lw, t2, (offset + 0x08)(src), l_fixup); \
+ EX(lw, t3, (offset + 0x0c)(src), l_fixup); \
+ EX(sw, t0, (offset + 0x00)(dst), s_fixup); \
+ EX(sw, t1, (offset + 0x04)(dst), s_fixup); \
+ EX(sw, t2, (offset + 0x08)(dst), s_fixup); \
+ EX(sw, t3, (offset + 0x0c)(dst), s_fixup); \
+ EX(lw, t0, (offset + 0x10)(src), l_fixup); \
+ EX(lw, t1, (offset + 0x14)(src), l_fixup); \
+ EX(lw, t2, (offset + 0x18)(src), l_fixup); \
+ EX(lw, t3, (offset + 0x1c)(src), l_fixup); \
+ EX(sw, t0, (offset + 0x10)(dst), s_fixup); \
+ EX(sw, t1, (offset + 0x14)(dst), s_fixup); \
+ EX(sw, t2, (offset + 0x18)(dst), s_fixup); \
+ EX(sw, t3, (offset + 0x1c)(dst), s_fixup)
+
+/* ascending order, destination unaligned */
+#define UMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3) \
+ EX(lw, t0, (offset + 0x00)(src), l_fixup); \
+ EX(lw, t1, (offset + 0x04)(src), l_fixup); \
+ EX(lw, t2, (offset + 0x08)(src), l_fixup); \
+ EX(lw, t3, (offset + 0x0c)(src), l_fixup); \
+ UEX(usw, t0, (offset + 0x00)(dst), s_fixup); \
+ UEX(usw, t1, (offset + 0x04)(dst), s_fixup); \
+ UEX(usw, t2, (offset + 0x08)(dst), s_fixup); \
+ UEX(usw, t3, (offset + 0x0c)(dst), s_fixup); \
+ EX(lw, t0, (offset + 0x10)(src), l_fixup); \
+ EX(lw, t1, (offset + 0x14)(src), l_fixup); \
+ EX(lw, t2, (offset + 0x18)(src), l_fixup); \
+ EX(lw, t3, (offset + 0x1c)(src), l_fixup); \
+ UEX(usw, t0, (offset + 0x10)(dst), s_fixup); \
+ UEX(usw, t1, (offset + 0x14)(dst), s_fixup); \
+ UEX(usw, t2, (offset + 0x18)(dst), s_fixup); \
+ UEX(usw, t3, (offset + 0x1c)(dst), s_fixup)
.text
.set noreorder
.set noat
- .globl bcopy
- .globl amemmove
- .globl memmove
- .globl memcpy
- .align 2
-bcopy:
- move o3, o0
- move o0, o1
- move o1, o3
-
-amemmove:
-memmove:
-memcpy: /* o0=dst o1=src o2=len */
- xor o4, o0, o1
- andi o4, o4, 0x3
- move g6, o0
- beq o4, g0, can_align
- sltiu g7, o2, 0x8
-
- b cannot_optimize
- move g1, o2
+ .align 5
+LEAF(memcpy) /* a0=dst a1=src a2=len */
+ move v0, a0 /* return value */
+__memcpy:
+EXPORT(__copy_user)
+ xor t0, a0, a1
+ andi t0, t0, 0x3
+ move t7, a0
+ beqz t0, can_align
+ sltiu t8, a2, 0x8
+
+ b memcpy_u_src # bad alignment
+ move t2, a2
can_align:
- bne g7, g0, cannot_optimize
- move g1, o2
+ bnez t8, small_memcpy # < 8 bytes to copy
+ move t2, a2
- beq o2, g0, out
- andi g7, o1, 0x1
+ beqz a2, out
+ andi t8, a1, 0x1
hword_align:
- beq g7, g0, word_align
- andi g7, o1, 0x2
+ beqz t8, word_align
+ andi t8, a1, 0x2
- lbu o4, 0x00(o1)
- subu o2, o2, 0x1
- sb o4, 0x00(o0)
- addu o1, o1, 0x1
- addu o0, o0, 0x1
- andi g7, o1, 0x2
+ EX(lb, t0, (a1), l_fixup)
+ subu a2, a2, 0x1
+ EX(sb, t0, (a0), s_fixup)
+ addu a1, a1, 0x1
+ addu a0, a0, 0x1
+ andi t8, a1, 0x2
word_align:
- beq g7, g0, dword_align
- sltiu g7, o2, 56
+ beqz t8, dword_align
+ sltiu t8, a2, 56
- lhu o4, 0x00(o1)
- subu o2, o2, 0x2
- sh o4, 0x00(o0)
- sltiu g7, o2, 56
- addu o0, o0, 0x2
- addu o1, o1, 0x2
+ EX(lh, t0, (a1), l_fixup)
+ subu a2, a2, 0x2
+ EX(sh, t0, (a0), s_fixup)
+ sltiu t8, a2, 56
+ addu a0, a0, 0x2
+ addu a1, a1, 0x2
dword_align:
- bne g7, g0, do_end_words
- move g7, o2
+ bnez t8, do_end_words
+ move t8, a2
- andi g7, o1, 0x4
- beq g7, zero, qword_align
- andi g7, o1, 0x8
+ andi t8, a1, 0x4
+ beqz t8, qword_align
+ andi t8, a1, 0x8
- lw o4, 0x00(o1)
- subu o2, o2, 0x4
- sw o4, 0x00(o0)
- addu o1, o1, 0x4
- addu o0, o0, 0x4
- andi g7, o1, 0x8
+ EX(lw, t0, 0x00(a1), l_fixup)
+ subu a2, a2, 0x4
+ EX(sw, t0, 0x00(a0), s_fixup)
+ addu a1, a1, 0x4
+ addu a0, a0, 0x4
+ andi t8, a1, 0x8
qword_align:
- beq g7, g0, oword_align
- andi g7, o1, 0x10
-
- lw o4, 0x00(o1)
- lw o5, 0x04(o1)
- subu o2, o2, 0x8
- sw o4, 0x00(o0)
- addu o1, o1, 0x8
- sw o5, 0x04(o0)
- andi g7, o1, 0x10
- addu o0, o0, 0x8
+ beqz t8, oword_align
+ andi t8, a1, 0x10
+
+ EX(lw, t0, 0x00(a1), l_fixup)
+ EX(lw, t1, 0x04(a1), l_fixup)
+ subu a2, a2, 0x8
+ EX(sw, t0, 0x00(a0), s_fixup)
+ EX(sw, t1, 0x04(a0), s_fixup)
+ addu a1, a1, 0x8
+ andi t8, a1, 0x10
+ addu a0, a0, 0x8
oword_align:
- beq g7, g0, begin_movement
- srl g7, o2, 0x7
-
- lw g2, 0x08(o1)
- lw g3, 0x0c(o1)
- lw o4, 0x00(o1)
- lw o5, 0x04(o1)
- sw g2, 0x08(o0)
- subu o2, o2, 0x10
- sw g3, 0x0c(o0)
- addu o1, o1, 0x10
- sw o4, 0x00(o0)
- srl g7, o2, 0x7
- addu o0, o0, 0x10
- sw o5, -0x0c(o0)
+ beqz t8, begin_movement
+ srl t8, a2, 0x7
+
+ EX(lw, t3, 0x00(a1), l_fixup)
+ EX(lw, t4, 0x04(a1), l_fixup)
+ EX(lw, t0, 0x08(a1), l_fixup)
+ EX(lw, t1, 0x0c(a1), l_fixup)
+ EX(sw, t3, 0x00(a0), s_fixup)
+ EX(sw, t4, 0x04(a0), s_fixup)
+ EX(sw, t0, 0x08(a0), s_fixup)
+ EX(sw, t1, 0x0c(a0), s_fixup)
+ subu a2, a2, 0x10
+ addu a1, a1, 0x10
+ srl t8, a2, 0x7
+ addu a0, a0, 0x10
begin_movement:
- beq g7, g0, 0f
- andi g1, o2, 0x40
+ beqz t8, 0f
+ andi t2, a2, 0x40
move_128bytes:
- MOVE_BIGCHUNK(o1, o0, 0x00, o4, o5, g2, g3, g4, g5)
- MOVE_BIGCHUNK(o1, o0, 0x20, o4, o5, g2, g3, g4, g5)
- MOVE_BIGCHUNK(o1, o0, 0x40, o4, o5, g2, g3, g4, g5)
- MOVE_BIGCHUNK(o1, o0, 0x60, o4, o5, g2, g3, g4, g5)
- subu g7, g7, 0x01
- addu o1, o1, 0x80
- bne g7, g0, move_128bytes
- addu o0, o0, 0x80
+ MOVE_BIGCHUNK(a1, a0, 0x00, t0, t1, t3, t4)
+ MOVE_BIGCHUNK(a1, a0, 0x20, t0, t1, t3, t4)
+ MOVE_BIGCHUNK(a1, a0, 0x40, t0, t1, t3, t4)
+ MOVE_BIGCHUNK(a1, a0, 0x60, t0, t1, t3, t4)
+ subu t8, t8, 0x01
+ addu a1, a1, 0x80
+ bnez t8, move_128bytes
+ addu a0, a0, 0x80
0:
- beq g1, g0, 1f
- andi g1, o2, 0x20
+ beqz t2, 1f
+ andi t2, a2, 0x20
move_64bytes:
- MOVE_BIGCHUNK(o1, o0, 0x00, o4, o5, g2, g3, g4, g5)
- MOVE_BIGCHUNK(o1, o0, 0x20, o4, o5, g2, g3, g4, g5)
- addu o1, o1, 0x40
- addu o0, o0, 0x40
+ MOVE_BIGCHUNK(a1, a0, 0x00, t0, t1, t3, t4)
+ MOVE_BIGCHUNK(a1, a0, 0x20, t0, t1, t3, t4)
+ addu a1, a1, 0x40
+ addu a0, a0, 0x40
1:
- beq g1, g0, do_end_words
- andi g7, o2, 0x1c
+ beqz t2, do_end_words
+ andi t8, a2, 0x1c
move_32bytes:
- MOVE_BIGCHUNK(o1, o0, 0x00, o4, o5, g2, g3, g4, g5)
- andi g7, o2, 0x1c
- addu o1, o1, 0x20
- addu o0, o0, 0x20
+ MOVE_BIGCHUNK(a1, a0, 0x00, t0, t1, t3, t4)
+ andi t8, a2, 0x1c
+ addu a1, a1, 0x20
+ addu a0, a0, 0x20
do_end_words:
- beq g7, g0, maybe_end_cruft
- srl g7, g7, 0x2
+ beqz t8, maybe_end_cruft
+ srl t8, t8, 0x2
end_words:
- lw o4, 0x00(o1)
- subu g7, g7, 0x1
- sw o4, 0x00(o0)
- addu o1, o1, 0x4
- bne g7, g0, end_words
- addu o0, o0, 0x4
+ EX(lw, t0, (a1), l_fixup)
+ subu t8, t8, 0x1
+ EX(sw, t0, (a0), s_fixup)
+ addu a1, a1, 0x4
+ bnez t8, end_words
+ addu a0, a0, 0x4
maybe_end_cruft:
- andi g1, o2, 0x3
+ andi t2, a2, 0x3
-cannot_optimize:
- beq g1, g0, out
- move o2, g1
+small_memcpy:
+ beqz t2, out
+ move a2, t2
end_bytes:
- lbu o4, 0x00(o1)
- subu o2, o2, 0x1
- sb o4, 0x00(o0)
- addu o1, o1, 0x1
- bne o2, g0, end_bytes
- addu o0, o0, 0x1
-
-out:
- jr o7
- move v0, g6
+ EX(lb, t0, (a1), l_fixup)
+ subu a2, a2, 0x1
+ EX(sb, t0, (a0), s_fixup)
+ addu a1, a1, 0x1
+ bnez a2, end_bytes
+ addu a0, a0, 0x1
+
+out: jr ra
+ move a2, zero
+
+/* ------------------------------------------------------------------------- */
+
+/* Bad, bad. At least try to align the source */
+
+memcpy_u_src:
+ bnez t8, small_memcpy # < 8 bytes?
+ move t2, a2
+
+ addiu t0, a1, 7 # t0: how much to align
+ ori t0, 7
+ xori t0, 7
+ subu t0, a1
+
+ UEX(ulw, t1, 0(a1), l_fixup) # dword alignment
+ UEX(ulw, t2, 4(a1), l_fixup)
+ UEX(usw, t1, 0(a0), s_fixup)
+ UEX(usw, t2, 4(a0), s_fixup)
+
+ addu a1, t0 # src
+ addu a0, t0 # dst
+ subu a2, t0 # len
+
+ sltiu t8, a2, 56
+ bnez t8, u_do_end_words
+ andi t8, a2, 0x3c
+
+ andi t8, a1, 8 # now qword aligned?
+
+u_qword_align:
+ beqz t8, u_oword_align
+ andi t8, a1, 0x10
+
+ EX(lw, t0, 0x00(a1), l_fixup)
+ EX(lw, t1, 0x04(a1), l_fixup)
+ subu a2, a2, 0x8
+ UEX(usw, t0, 0x00(a0), s_fixup)
+ UEX(usw, t1, 0x04(a0), s_fixup)
+ addu a1, a1, 0x8
+ andi t8, a1, 0x10
+ addu a0, a0, 0x8
+
+u_oword_align:
+ beqz t8, u_begin_movement
+ srl t8, a2, 0x7
+
+ EX(lw, t3, 0x08(a1), l_fixup)
+ EX(lw, t4, 0x0c(a1), l_fixup)
+ EX(lw, t0, 0x00(a1), l_fixup)
+ EX(lw, t1, 0x04(a1), l_fixup)
+ UEX(usw, t3, 0x08(a0), s_fixup)
+ UEX(usw, t4, 0x0c(a0), s_fixup)
+ UEX(usw, t0, 0x00(a0), s_fixup)
+ UEX(usw, t1, 0x04(a0), s_fixup)
+ subu a2, a2, 0x10
+ addu a1, a1, 0x10
+ srl t8, a2, 0x7
+ addu a0, a0, 0x10
+
+u_begin_movement:
+ beqz t8, 0f
+ andi t2, a2, 0x40
+
+u_move_128bytes:
+ UMOVE_BIGCHUNK(a1, a0, 0x00, t0, t1, t3, t4)
+ UMOVE_BIGCHUNK(a1, a0, 0x20, t0, t1, t3, t4)
+ UMOVE_BIGCHUNK(a1, a0, 0x40, t0, t1, t3, t4)
+ UMOVE_BIGCHUNK(a1, a0, 0x60, t0, t1, t3, t4)
+ subu t8, t8, 0x01
+ addu a1, a1, 0x80
+ bnez t8, u_move_128bytes
+ addu a0, a0, 0x80
+
+0:
+ beqz t2, 1f
+ andi t2, a2, 0x20
+
+u_move_64bytes:
+ UMOVE_BIGCHUNK(a1, a0, 0x00, t0, t1, t3, t4)
+ UMOVE_BIGCHUNK(a1, a0, 0x20, t0, t1, t3, t4)
+ addu a1, a1, 0x40
+ addu a0, a0, 0x40
+
+1:
+ beqz t2, u_do_end_words
+ andi t8, a2, 0x1c
+
+u_move_32bytes:
+ UMOVE_BIGCHUNK(a1, a0, 0x00, t0, t1, t3, t4)
+ andi t8, a2, 0x1c
+ addu a1, a1, 0x20
+ addu a0, a0, 0x20
+
+u_do_end_words:
+ beqz t8, u_maybe_end_cruft
+ srl t8, t8, 0x2
+
+u_end_words:
+ EX(lw, t0, 0x00(a1), l_fixup)
+ subu t8, t8, 0x1
+ UEX(usw, t0, 0x00(a0), s_fixup)
+ addu a1, a1, 0x4
+ bnez t8, u_end_words
+ addu a0, a0, 0x4
+
+u_maybe_end_cruft:
+ andi t2, a2, 0x3
+
+u_cannot_optimize:
+ beqz t2, out
+ move a2, t2
+
+u_end_bytes:
+ EX(lb, t0, (a1), l_fixup)
+ subu a2, a2, 0x1
+ EX(sb, t0, (a0), s_fixup)
+ addu a1, a1, 0x1
+ bnez a2, u_end_bytes
+ addu a0, a0, 0x1
+
+ jr ra
+ move a2, zero
+ END(memcpy)
+
+/* descending order, destination aligned */
+#define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3) \
+ lw t0, (offset + 0x10)(src); \
+ lw t1, (offset + 0x14)(src); \
+ lw t2, (offset + 0x18)(src); \
+ lw t3, (offset + 0x1c)(src); \
+ sw t0, (offset + 0x10)(dst); \
+ sw t1, (offset + 0x14)(dst); \
+ sw t2, (offset + 0x18)(dst); \
+ sw t3, (offset + 0x1c)(dst); \
+ lw t0, (offset + 0x00)(src); \
+ lw t1, (offset + 0x04)(src); \
+ lw t2, (offset + 0x08)(src); \
+ lw t3, (offset + 0x0c)(src); \
+ sw t0, (offset + 0x00)(dst); \
+ sw t1, (offset + 0x04)(dst); \
+ sw t2, (offset + 0x08)(dst); \
+ sw t3, (offset + 0x0c)(dst)
+
+/* descending order, destination ununaligned */
+#define RUMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3) \
+ lw t0, (offset + 0x10)(src); \
+ lw t1, (offset + 0x14)(src); \
+ lw t2, (offset + 0x18)(src); \
+ lw t3, (offset + 0x1c)(src); \
+ usw t0, (offset + 0x10)(dst); \
+ usw t1, (offset + 0x14)(dst); \
+ usw t2, (offset + 0x18)(dst); \
+ usw t3, (offset + 0x1c)(dst); \
+ lw t0, (offset + 0x00)(src); \
+ lw t1, (offset + 0x04)(src); \
+ lw t2, (offset + 0x08)(src); \
+ lw t3, (offset + 0x0c)(src); \
+ usw t0, (offset + 0x00)(dst); \
+ usw t1, (offset + 0x04)(dst); \
+ usw t2, (offset + 0x08)(dst); \
+ usw t3, (offset + 0x0c)(dst)
+
+ .align 5
+LEAF(memmove)
+ sltu t0, a0, a1 # dst < src -> memcpy
+ bnez t0, memcpy
+ addu v0, a0, a2
+ sltu t0, v0, a1 # dst + len < src -> non-
+ bnez t0, __memcpy # overlapping, can use memcpy
+ move v0, a0 /* return value */
+ END(memmove)
+
+LEAF(__rmemcpy) /* a0=dst a1=src a2=len */
+ addu a0, a2 # dst = dst + len
+ addu a1, a2 # src = src + len
+
+ xor t0, a0, a1
+ andi t0, t0, 0x3
+ move t7, a0
+ beqz t0, r_can_align
+ sltiu t8, a2, 0x8
+
+ b r_memcpy_u_src # bad alignment
+ move t2, a2
+
+r_can_align:
+ bnez t8, r_small_memcpy # < 8 bytes to copy
+ move t2, a2
+
+ beqz a2, r_out
+ andi t8, a1, 0x1
+
+r_hword_align:
+ beqz t8, r_word_align
+ andi t8, a1, 0x2
+
+ lb t0, -1(a1)
+ subu a2, a2, 0x1
+ sb t0, -1(a0)
+ subu a1, a1, 0x1
+ subu a0, a0, 0x1
+ andi t8, a1, 0x2
+
+r_word_align:
+ beqz t8, r_dword_align
+ sltiu t8, a2, 56
+
+ lh t0, -2(a1)
+ subu a2, a2, 0x2
+ sh t0, -2(a0)
+ sltiu t8, a2, 56
+ subu a0, a0, 0x2
+ subu a1, a1, 0x2
+
+r_dword_align:
+ bnez t8, r_do_end_words
+ move t8, a2
+
+ andi t8, a1, 0x4
+ beqz t8, r_qword_align
+ andi t8, a1, 0x8
+
+ lw t0, -4(a1)
+ subu a2, a2, 0x4
+ sw t0, -4(a0)
+ subu a1, a1, 0x4
+ subu a0, a0, 0x4
+ andi t8, a1, 0x8
+
+r_qword_align:
+ beqz t8, r_oword_align
+ andi t8, a1, 0x10
+
+ subu a1, a1, 0x8
+ lw t0, 0x04(a1)
+ lw t1, 0x00(a1)
+ subu a0, a0, 0x8
+ sw t0, 0x04(a0)
+ sw t1, 0x00(a0)
+ subu a2, a2, 0x8
+
+ andi t8, a1, 0x10
+
+r_oword_align:
+ beqz t8, r_begin_movement
+ srl t8, a2, 0x7
+
+ subu a1, a1, 0x10
+ lw t3, 0x08(a1) # assumes subblock ordering
+ lw t4, 0x0c(a1)
+ lw t0, 0x00(a1)
+ lw t1, 0x04(a1)
+ subu a0, a0, 0x10
+ sw t3, 0x08(a0)
+ sw t4, 0x0c(a0)
+ sw t0, 0x00(a0)
+ sw t1, 0x04(a0)
+ subu a2, a2, 0x10
+ srl t8, a2, 0x7
+
+r_begin_movement:
+ beqz t8, 0f
+ andi t2, a2, 0x40
+
+r_move_128bytes:
+ RMOVE_BIGCHUNK(a1, a0, -0x80, t0, t1, t3, t4)
+ RMOVE_BIGCHUNK(a1, a0, -0x60, t0, t1, t3, t4)
+ RMOVE_BIGCHUNK(a1, a0, -0x40, t0, t1, t3, t4)
+ RMOVE_BIGCHUNK(a1, a0, -0x20, t0, t1, t3, t4)
+ subu t8, t8, 0x01
+ subu a1, a1, 0x80
+ bnez t8, r_move_128bytes
+ subu a0, a0, 0x80
+
+0:
+ beqz t2, 1f
+ andi t2, a2, 0x20
+
+r_move_64bytes:
+ subu a1, a1, 0x40
+ subu a0, a0, 0x40
+ RMOVE_BIGCHUNK(a1, a0, 0x20, t0, t1, t3, t4)
+ RMOVE_BIGCHUNK(a1, a0, 0x00, t0, t1, t3, t4)
+
+1:
+ beqz t2, r_do_end_words
+ andi t8, a2, 0x1c
+
+r_move_32bytes:
+ subu a1, a1, 0x20
+ subu a0, a0, 0x20
+ RMOVE_BIGCHUNK(a1, a0, 0x00, t0, t1, t3, t4)
+ andi t8, a2, 0x1c
+
+r_do_end_words:
+ beqz t8, r_maybe_end_cruft
+ srl t8, t8, 0x2
+
+r_end_words:
+ lw t0, -4(a1)
+ subu t8, t8, 0x1
+ sw t0, -4(a0)
+ subu a1, a1, 0x4
+ bnez t8, r_end_words
+ subu a0, a0, 0x4
+
+r_maybe_end_cruft:
+ andi t2, a2, 0x3
+
+r_small_memcpy:
+ beqz t2, r_out
+ move a2, t2
+
+r_end_bytes:
+ lb t0, -1(a1)
+ subu a2, a2, 0x1
+ sb t0, -1(a0)
+ subu a1, a1, 0x1
+ bnez a2, r_end_bytes
+ subu a0, a0, 0x1
+
+r_out:
+ jr ra
+ move a2, zero
+
+/* ------------------------------------------------------------------------- */
+
+/* Bad, bad. At least try to align the source */
+
+r_memcpy_u_src:
+ bnez t8, r_small_memcpy # < 8 bytes?
+ move t2, a2
+
+ andi t0, a1, 7 # t0: how much to align
+
+ ulw t1, -8(a1) # dword alignment
+ ulw t2, -4(a1)
+ usw t1, -8(a0)
+ usw t2, -4(a0)
+
+ subu a1, t0 # src
+ subu a0, t0 # dst
+ subu a2, t0 # len
+
+ sltiu t8, a2, 56
+ bnez t8, ru_do_end_words
+ andi t8, a2, 0x3c
+
+ andi t8, a1, 8 # now qword aligned?
+
+ru_qword_align:
+ beqz t8, ru_oword_align
+ andi t8, a1, 0x10
+
+ subu a1, a1, 0x8
+ lw t0, 0x00(a1)
+ lw t1, 0x04(a1)
+ subu a0, a0, 0x8
+ usw t0, 0x00(a0)
+ usw t1, 0x04(a0)
+ subu a2, a2, 0x8
+
+ andi t8, a1, 0x10
+
+ru_oword_align:
+ beqz t8, ru_begin_movement
+ srl t8, a2, 0x7
+
+ subu a1, a1, 0x10
+ lw t3, 0x08(a1) # assumes subblock ordering
+ lw t4, 0x0c(a1)
+ lw t0, 0x00(a1)
+ lw t1, 0x04(a1)
+ subu a0, a0, 0x10
+ usw t3, 0x08(a0)
+ usw t4, 0x0c(a0)
+ usw t0, 0x00(a0)
+ usw t1, 0x04(a0)
+ subu a2, a2, 0x10
+
+ srl t8, a2, 0x7
+
+ru_begin_movement:
+ beqz t8, 0f
+ andi t2, a2, 0x40
+
+ru_move_128bytes:
+ RUMOVE_BIGCHUNK(a1, a0, -0x80, t0, t1, t3, t4)
+ RUMOVE_BIGCHUNK(a1, a0, -0x60, t0, t1, t3, t4)
+ RUMOVE_BIGCHUNK(a1, a0, -0x40, t0, t1, t3, t4)
+ RUMOVE_BIGCHUNK(a1, a0, -0x20, t0, t1, t3, t4)
+ subu t8, t8, 0x01
+ subu a1, a1, 0x80
+ bnez t8, ru_move_128bytes
+ subu a0, a0, 0x80
+
+0:
+ beqz t2, 1f
+ andi t2, a2, 0x20
+
+ru_move_64bytes:
+ subu a1, a1, 0x40
+ subu a0, a0, 0x40
+ RUMOVE_BIGCHUNK(a1, a0, 0x20, t0, t1, t3, t4)
+ RUMOVE_BIGCHUNK(a1, a0, 0x00, t0, t1, t3, t4)
+
+1:
+ beqz t2, ru_do_end_words
+ andi t8, a2, 0x1c
+
+ru_move_32bytes:
+ subu a1, a1, 0x20
+ subu a0, a0, 0x20
+ RUMOVE_BIGCHUNK(a1, a0, 0x00, t0, t1, t3, t4)
+ andi t8, a2, 0x1c
+
+ru_do_end_words:
+ beqz t8, ru_maybe_end_cruft
+ srl t8, t8, 0x2
+
+ru_end_words:
+ lw t0, -4(a1)
+ usw t0, -4(a0)
+ subu t8, t8, 0x1
+ subu a1, a1, 0x4
+ bnez t8, ru_end_words
+ subu a0, a0, 0x4
+
+ru_maybe_end_cruft:
+ andi t2, a2, 0x3
+
+ru_cannot_optimize:
+ beqz t2, r_out
+ move a2, t2
+
+ru_end_bytes:
+ lb t0, -1(a1)
+ subu a2, a2, 0x1
+ sb t0, -1(a0)
+ subu a1, a1, 0x1
+ bnez a2, ru_end_bytes
+ subu a0, a0, 0x1
+
+ jr ra
+ move a2, zero
+ END(__rmemcpy)
+
+l_fixup: # clear the rest of the buffer
+ lw t0, THREAD_BUADDR($28)
+ nop
+ subu a2, AT, t0 # a2 bytes to go
+ addu a0, t0 # compute start address in a1
+ subu a0, a1
+ j __bzero
+ move a1, zero
+
+s_fixup:
+ jr ra
+ nop
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
new file mode 100644
index 000000000..32f175756
--- /dev/null
+++ b/arch/mips/lib/memset.S
@@ -0,0 +1,141 @@
+/*
+ * include/asm-mips/types.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 by Ralf Baechle
+ *
+ * $Id: memset.S,v 1.2 1998/04/25 17:01:45 ralf Exp $
+ */
+#include <asm/asm.h>
+#include <asm/offset.h>
+#include <asm/regdef.h>
+
+#define EX(insn,reg,addr,handler) \
+9: insn reg, addr; \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
+ .previous
+
+#define F_FILL64(dst, offset, val, fixup) \
+ EX(sw, val, (offset + 0x00)(dst), fixup); \
+ EX(sw, val, (offset + 0x04)(dst), fixup); \
+ EX(sw, val, (offset + 0x08)(dst), fixup); \
+ EX(sw, val, (offset + 0x0c)(dst), fixup); \
+ EX(sw, val, (offset + 0x10)(dst), fixup); \
+ EX(sw, val, (offset + 0x14)(dst), fixup); \
+ EX(sw, val, (offset + 0x18)(dst), fixup); \
+ EX(sw, val, (offset + 0x1c)(dst), fixup); \
+ EX(sw, val, (offset + 0x20)(dst), fixup); \
+ EX(sw, val, (offset + 0x24)(dst), fixup); \
+ EX(sw, val, (offset + 0x28)(dst), fixup); \
+ EX(sw, val, (offset + 0x2c)(dst), fixup); \
+ EX(sw, val, (offset + 0x30)(dst), fixup); \
+ EX(sw, val, (offset + 0x34)(dst), fixup); \
+ EX(sw, val, (offset + 0x38)(dst), fixup); \
+ EX(sw, val, (offset + 0x3c)(dst), fixup)
+
+/*
+ * memset(void *s, int c, size_t n)
+ *
+ * a0: start of area to clear
+ * a1: char to fill with
+ * a2: size of area to clear
+ */
+ .set noreorder
+ .align 5
+LEAF(memset)
+ beqz a1, 1f
+ move v0, a0 /* result */
+
+ andi a1, 0xff /* spread fillword */
+ sll t1, a1, 8
+ or a1, t1
+ sll t1, a1, 16
+ or a1, t1
+1:
+
+EXPORT(__bzero)
+ sltiu t0, a2, 4 /* very small region? */
+ bnez t0, small_memset
+ andi t0, a0, 3 /* aligned? */
+
+ beqz t0, 1f
+ subu t0, 4 /* alignment in bytes */
+
+#ifdef __MIPSEB__
+ EX(swl, a1, (a0), first_fixup) /* make word aligned */
+#endif
+#ifdef __MIPSEL__
+ EX(swr, a1, (a0), first_fixup) /* make word aligned */
+#endif
+ subu a0, t0 /* word align ptr */
+ addu a2, t0 /* correct size */
+
+1: ori t1, a2, 0x3f /* # of full blocks */
+ xori t1, 0x3f
+ beqz t1, memset_partial /* no block to fill */
+ andi t0, a2, 0x3c
+
+ addu t1, a0 /* end address */
+ .set reorder
+1: addiu a0, 64
+ F_FILL64(a0, -64, a1, fwd_fixup)
+ bne t1, a0, 1b
+ .set noreorder
+
+memset_partial:
+ la t1, 2f /* where to start */
+ subu t1, t0
+ jr t1
+ addu a0, t0 /* dest ptr */
+
+ F_FILL64(a0, -64, a1, partial_fixup) /* ... but first do wrds ... */
+2: andi a2, 3 /* 0 <= n <= 3 to go */
+
+ beqz a2, 1f
+ addu a0, a2 /* What's left */
+#ifdef __MIPSEB__
+ EX(swr, a1, -1(a0), last_fixup)
+#endif
+#ifdef __MIPSEL__
+ EX(swl, a1, -1(a0), last_fixup)
+#endif
+1: jr ra
+ move a2, zero
+
+small_memset:
+ beqz a2, 2f
+ addu t1, a0, a2
+
+1: addiu a0, 1 /* fill bytewise */
+ bne t1, a0, 1b
+ sb a1, -1(a0)
+
+2: jr ra /* done */
+ move a2, zero
+ END(memset)
+
+first_fixup:
+ jr ra
+ nop
+
+fwd_fixup:
+ lw t0, THREAD_BUADDR($28)
+ andi a2, 0x3f
+ addu a2, t1
+ jr ra
+ subu a2, t0
+
+partial_fixup:
+ lw t0, THREAD_BUADDR($28)
+ andi a2, 3
+ addu a2, t1
+ jr ra
+ subu a2, t0
+
+last_fixup:
+ jr ra
+ andi v1, a2, 3
diff --git a/arch/mips/lib/memset.c b/arch/mips/lib/memset.c
deleted file mode 100644
index bbdbcbb31..000000000
--- a/arch/mips/lib/memset.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/* linux/arch/mips/lib/memset.c
- *
- * This is from GNU libc.
- */
-
-#include <linux/types.h>
-
-#define op_t unsigned long int
-#define OPSIZ (sizeof(op_t))
-
-typedef unsigned char byte;
-
-void *memset(void *dstpp, char c, size_t len)
-{
- long int dstp = (long int) dstpp;
-
- if (len >= 8) {
- size_t xlen;
- op_t cccc;
-
- cccc = (unsigned char) c;
- cccc |= cccc << 8;
- cccc |= cccc << 16;
-
- /* There are at least some bytes to set.
- No need to test for LEN == 0 in this alignment loop. */
- while (dstp % OPSIZ != 0) {
- ((byte *) dstp)[0] = c;
- dstp += 1;
- len -= 1;
- }
-
- /* Write 8 `op_t' per iteration until less
- * than 8 `op_t' remain.
- */
- xlen = len / (OPSIZ * 8);
- while (xlen > 0) {
- ((op_t *) dstp)[0] = cccc;
- ((op_t *) dstp)[1] = cccc;
- ((op_t *) dstp)[2] = cccc;
- ((op_t *) dstp)[3] = cccc;
- ((op_t *) dstp)[4] = cccc;
- ((op_t *) dstp)[5] = cccc;
- ((op_t *) dstp)[6] = cccc;
- ((op_t *) dstp)[7] = cccc;
- dstp += 8 * OPSIZ;
- xlen -= 1;
- }
- len %= OPSIZ * 8;
-
- /* Write 1 `op_t' per iteration until less than
- * OPSIZ bytes remain.
- */
- xlen = len / OPSIZ;
- while (xlen > 0) {
- ((op_t *) dstp)[0] = cccc;
- dstp += OPSIZ;
- xlen -= 1;
- }
- len %= OPSIZ;
- }
-
- /* Write the last few bytes. */
- while (len > 0) {
- ((byte *) dstp)[0] = c;
- dstp += 1;
- len -= 1;
- }
-
- return dstpp;
-}
diff --git a/arch/mips/lib/strlen_user.S b/arch/mips/lib/strlen_user.S
index c42144b35..5f44c3eb4 100644
--- a/arch/mips/lib/strlen_user.S
+++ b/arch/mips/lib/strlen_user.S
@@ -5,29 +5,44 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 1996 by Ralf Baechle
+ * Copyright (c) 1996, 1998 by Ralf Baechle
+ *
+ * $Id: strlen_user.S,v 1.3 1998/05/03 11:13:45 ralf Exp $
*/
#include <asm/asm.h>
+#include <asm/offset.h>
#include <asm/regdef.h>
#include <asm/sgidefs.h>
+#define EX(insn,reg,addr,handler) \
+9: insn reg, addr; \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
+ .previous
+
/*
* Return the size of a string (including the ending 0)
*
* Return 0 for error
*/
-LEAF(__strlen_user)
- move v0,zero
-1: lb t0,(a0)
- LONG_ADDIU v0,1
- LONG_ADDIU a0,1
- bnez t0,1b
- jr ra
- END(__strlen_user)
+LEAF(__strlen_user_nocheck_asm)
+ lw v0, THREAD_CURDS($28) # pointer ok?
+ subu v0, zero, v0
+ and v0, a0
+ nor v0, zero, v0
+ beqz v0, fault
+EXPORT(__strlen_user_asm)
+ move v0, a0
+1: EX(lb, t0, (v0), fault)
+ LONG_ADDIU v0, 1
+ bnez t0, 1b
+ LONG_SUBU v0, a0
+ jr ra
+ END(__strlen_user_nocheck_asm)
- .section __ex_table,"a"
- PTR 1b,fault
- .previous
+ .section __ex_table,"a"
+ PTR 1b, fault
+ .previous
-fault: move v0,zero
- jr ra
+fault: move v0, zero
+ jr ra
diff --git a/arch/mips/lib/strncpy_user.S b/arch/mips/lib/strncpy_user.S
index f942740e6..f3240475a 100644
--- a/arch/mips/lib/strncpy_user.S
+++ b/arch/mips/lib/strncpy_user.S
@@ -6,12 +6,20 @@
* for more details.
*
* Copyright (c) 1996 by Ralf Baechle
+ *
+ * $Id: strncpy_user.S,v 1.3 1998/05/03 11:13:45 ralf Exp $
*/
#include <linux/errno.h>
-
#include <asm/asm.h>
+#include <asm/offset.h>
#include <asm/regdef.h>
+#define EX(insn,reg,addr,handler) \
+9: insn reg, addr; \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
+ .previous
+
/*
* Returns: -EFAULT if exception before terminator, N if the entire
* buffer filled, else strlen.
@@ -19,30 +27,37 @@
/*
* Ugly special case have to check: we might get passed a user space
- * pointer which wraps into the kernel space ...
+ * pointer which wraps into the kernel space. We don't deal with that. If
+ * it happens at most some bytes of the exceptions handlers will be copied.
*/
-LEAF(__strncpy_from_user)
- move v0,zero
- move v1,a1
- .set noreorder
-1: lbu t0,(v1)
- LONG_ADDIU v1,1
- beqz t0,2f
- sb t0,(a0) # delay slot
- LONG_ADDIU v0,1
- bne v0,a2,1b
- LONG_ADDIU a0,1 # delay slot
- .set reorder
-2: LONG_ADDU t0,a1,v0
- xor t0,a1
- bltz t0,fault
- jr ra # return n
- END(__strncpy_from_user)
+LEAF(__strncpy_from_user_asm)
+ lw v0, THREAD_CURDS($28) # pointer ok?
+ subu v0, zero, v0
+ and v0, a1
+ nor v0, zero, v0
+ beqz v0, fault
+EXPORT(__strncpy_from_user_nocheck_asm)
+ move v0,zero
+ move v1,a1
+ .set noreorder
+1: EX(lbu, t0, (v1), fault)
+ LONG_ADDIU v1,1
+ beqz t0,2f
+ sb t0,(a0)
+ LONG_ADDIU v0,1
+ bne v0,a2,1b
+ LONG_ADDIU a0,1
+ .set reorder
+2: LONG_ADDU t0,a1,v0
+ xor t0,a1
+ bltz t0,fault
+ jr ra # return n
+ END(__strncpy_from_user_asm)
-fault: li v0,-EFAULT
- jr ra
+fault: li v0,-EFAULT
+ jr ra
- .section __ex_table,"a"
- PTR 1b,fault
- .previous
+ .section __ex_table,"a"
+ PTR 1b,fault
+ .previous