summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/mips/jazz/setup.c13
-rw-r--r--arch/mips/kernel/entry.S8
-rw-r--r--arch/mips/kernel/mips_ksyms.c4
-rw-r--r--arch/mips/kernel/process.c4
-rw-r--r--arch/mips/kernel/setup.c13
-rw-r--r--arch/mips/kernel/traps.c24
-rw-r--r--arch/mips/kernel/unaligned.c137
-rw-r--r--arch/mips/lib/Makefile15
-rw-r--r--arch/mips/lib/beep.S21
-rw-r--r--arch/mips/lib/checksum.c131
-rw-r--r--arch/mips/lib/copy_user.S201
-rw-r--r--arch/mips/lib/csum.S25
-rw-r--r--arch/mips/lib/csum_partial.S242
-rw-r--r--arch/mips/lib/csum_partial_copy.S518
-rw-r--r--arch/mips/lib/ide-no.c73
-rw-r--r--arch/mips/lib/ide-std.c91
-rw-r--r--arch/mips/lib/memcpy.S824
-rw-r--r--arch/mips/lib/memset.S141
-rw-r--r--arch/mips/lib/memset.c71
-rw-r--r--arch/mips/lib/strlen_user.S43
-rw-r--r--arch/mips/lib/strncpy_user.S63
-rw-r--r--arch/mips/mm/andes.c4
-rw-r--r--arch/mips/mm/fault.c5
-rw-r--r--arch/mips/mm/r4xx0.c6
-rw-r--r--arch/mips/mm/tfp.c5
-rw-r--r--arch/mips/sgi/kernel/indy_mc.c4
-rw-r--r--arch/mips/sgi/kernel/indy_sc.c57
-rw-r--r--arch/mips/sni/setup.c12
-rw-r--r--arch/mips/tools/offset.c3
-rw-r--r--drivers/scsi/sgiwd93.h4
-rw-r--r--drivers/sgi/char/sgiserial.c45
-rw-r--r--include/asm-mips/branch.h13
-rw-r--r--include/asm-mips/ide.h76
-rw-r--r--include/asm-mips/offset.h17
-rw-r--r--include/asm-mips/processor.h11
-rw-r--r--include/asm-mips/stackframe.h17
-rw-r--r--include/asm-mips/string.h7
-rw-r--r--include/asm-mips/system.h129
-rw-r--r--include/asm-mips/uaccess.h247
39 files changed, 2253 insertions, 1071 deletions
diff --git a/arch/mips/jazz/setup.c b/arch/mips/jazz/setup.c
index 82fbdd143..7508904a5 100644
--- a/arch/mips/jazz/setup.c
+++ b/arch/mips/jazz/setup.c
@@ -5,10 +5,12 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1996, 1997 by Ralf Baechle
+ * Copyright (C) 1996, 1997, 1998 by Ralf Baechle
*
- * $Id: setup.c,v 1.8 1998/03/04 08:29:10 ralf Exp $
+ * $Id: setup.c,v 1.9 1998/03/17 22:07:31 ralf Exp $
*/
+#include <linux/config.h>
+#include <linux/hdreg.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/sched.h>
@@ -16,6 +18,7 @@
#include <linux/mm.h>
#include <asm/bootinfo.h>
#include <asm/keyboard.h>
+#include <asm/ide.h>
#include <asm/irq.h>
#include <asm/jazz.h>
#include <asm/ptrace.h>
@@ -43,6 +46,8 @@ extern void jazz_machine_restart(char *command);
extern void jazz_machine_halt(void);
extern void jazz_machine_power_off(void);
+extern struct ide_ops std_ide_ops;
+
void (*board_time_init)(struct irqaction *irq);
__initfunc(static void jazz_time_init(struct irqaction *irq))
@@ -120,4 +125,8 @@ __initfunc(void jazz_setup(void))
_machine_restart = jazz_machine_restart;
_machine_halt = jazz_machine_halt;
_machine_power_off = jazz_machine_power_off;
+
+#ifdef CONFIG_BLK_DEV_IDE
+ ide_ops = &std_ide_ops;
+#endif
}
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index 413eb8a2f..40624da25 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -7,7 +7,7 @@
*
* Copyright (C) 1994, 1995 by Ralf Baechle
*
- * $Id: entry.S,v 1.8 1998/03/27 04:47:53 ralf Exp $
+ * $Id: entry.S,v 1.9 1998/04/05 11:23:50 ralf Exp $
*/
/*
@@ -100,6 +100,8 @@ LEAF(spurious_interrupt)
STI
#define __BUILD_clear_cli(exception) \
CLI
+#define __BUILD_clear_kmode(exception) \
+ KMODE
#define __BUILD_clear_fpe(exception) \
cfc1 a1,fcr31; \
li a2,~(0x3f<<12); \
@@ -143,8 +145,8 @@ EXPORT(exception_count_##exception); \
nop; \
END(handle_##exception)
- BUILD_HANDLER(adel,ade,ade,silent) /* #4 */
- BUILD_HANDLER(ades,ade,ade,silent) /* #5 */
+ BUILD_HANDLER(adel,ade,kmode,silent) /* #4 */
+ BUILD_HANDLER(ades,ade,kmode,silent) /* #5 */
BUILD_HANDLER(ibe,ibe,cli,verbose) /* #6 */
BUILD_HANDLER(dbe,dbe,cli,verbose) /* #7 */
BUILD_HANDLER(bp,bp,sti,silent) /* #9 */
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c
index 95bdbdc5c..57668c7a8 100644
--- a/arch/mips/kernel/mips_ksyms.c
+++ b/arch/mips/kernel/mips_ksyms.c
@@ -7,7 +7,7 @@
*
* Copyright (C) 1996, 1997 by Ralf Baechle
*
- * $Id: mips_ksyms.c,v 1.5 1998/03/17 22:07:35 ralf Exp $
+ * $Id: mips_ksyms.c,v 1.6 1998/03/18 17:18:12 ralf Exp $
*/
#include <linux/config.h>
#include <linux/module.h>
@@ -32,7 +32,6 @@ EXPORT_SYMBOL(EISA_bus);
/*
* String functions
*/
-EXPORT_SYMBOL_NOVERS(bcopy);
EXPORT_SYMBOL_NOVERS(memcmp);
EXPORT_SYMBOL_NOVERS(memset);
EXPORT_SYMBOL_NOVERS(memcpy);
@@ -54,7 +53,6 @@ EXPORT_SYMBOL(local_irq_count);
* Userspace access stuff.
*/
EXPORT_SYMBOL(__copy_user);
-EXPORT_SYMBOL(active_ds);
/* Networking helper routines. */
EXPORT_SYMBOL(csum_partial_copy);
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 64c8cff16..5a5a379ec 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -7,7 +7,7 @@
*
* Copyright (C) 1994 - 1998 by Ralf Baechle and others.
*
- * $Id: process.c,v 1.7 1998/03/27 04:47:55 ralf Exp $
+ * $Id: process.c,v 1.8 1998/04/05 11:23:51 ralf Exp $
*/
#include <linux/config.h>
#include <linux/errno.h>
@@ -33,8 +33,6 @@
#include <asm/io.h>
#include <asm/elf.h>
-mm_segment_t active_ds = USER_DS;
-
asmlinkage void ret_from_sys_call(void);
/*
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 98f99f9f5..8f80fb327 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -5,10 +5,11 @@
* Copyright (C) 1995, 1996 Ralf Baechle
* Copyright (C) 1996 Stoned Elipot
*
- * $Id: setup.c,v 1.5 1997/12/02 23:44:02 ralf Exp $
+ * $Id: setup.c,v 1.6 1997/12/16 05:34:37 ralf Exp $
*/
#include <linux/config.h>
#include <linux/errno.h>
+#include <linux/hdreg.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/sched.h>
@@ -34,6 +35,7 @@
#include <asm/asm.h>
#include <asm/bootinfo.h>
#include <asm/cachectl.h>
+#include <asm/ide.h>
#include <asm/io.h>
#include <asm/vector.h>
#include <asm/stackframe.h>
@@ -86,6 +88,11 @@ int EISA_bus = 0;
struct drive_info_struct drive_info = DEFAULT_DRIVE_INFO;
struct screen_info screen_info = DEFAULT_SCREEN_INFO;
+#ifdef CONFIG_BLK_DEV_IDE
+extern struct ide_ops no_ide_ops;
+struct ide_ops *ide_ops;
+#endif
+
/*
* setup informations
*
@@ -170,8 +177,12 @@ __initfunc(void setup_arch(char **cmdline_p,
atag = bi_TagFind(tag_vram_base);
memcpy(&mips_vram_base, TAGVALPTR(atag), atag->size);
+ /* Save defaults for configuration dependand routines. */
irq_setup = default_irq_setup;
fd_cacheflush = default_fd_cacheflush;
+#ifdef CONFIG_BLK_DEV_IDE
+ ide_ops = &no_ide_ops;
+#endif
switch(mips_machgroup)
{
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index b554cad39..7ea3017e5 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -8,7 +8,7 @@
* Copyright 1994, 1995, 1996, 1997 by Ralf Baechle
* Modified for R3000 by Paul M. Antoine, 1995, 1996
*
- * $Id: traps.c,v 1.9 1998/03/27 04:47:56 ralf Exp $
+ * $Id: traps.c,v 1.13 1998/04/04 13:59:39 ralf Exp $
*/
#include <linux/config.h>
#include <linux/init.h>
@@ -387,9 +387,8 @@ void do_vcei(struct pt_regs *regs)
{
lock_kernel();
/*
- * Theory says this exception doesn't happen.
- *
- * Murphy is right. It does happen ...
+ * Only possible on R4[04]00[SM]C. No handler because I don't have
+ * such a cpu. Theory says this exception doesn't happen.
*/
panic("Caught VCEI exception - should not happen");
unlock_kernel();
@@ -399,11 +398,10 @@ void do_vced(struct pt_regs *regs)
{
lock_kernel();
/*
- * Theory says this exception doesn't happen.
- *
- * Murphy is right. It does happen ...
+ * Only possible on R4[04]00[SM]C. No handler because I don't have
+ * such a cpu. Theory says this exception doesn't happen.
*/
- panic("Caught VCED exception - should not happen");
+ panic("Caught VCE exception - should not happen");
unlock_kernel();
}
@@ -547,14 +545,8 @@ __initfunc(void trap_init(void))
case CPU_R4400MC:
case CPU_R4000SC:
case CPU_R4400SC:
- /*
- * The following won't work because we _cannot_ perform any
- * load/store before the VCE handler. We deal with this
- * by checking for for vced / vcei exceptions before doing
- * the generic exception handling thing. This costs us
- * several instructions, therefore there should be a special
- * handler for those CPUs which have these exceptions.
- *
+ /* XXX The following won't work because we _cannot_
+ * XXX perform any load/store before the VCE handler.
*/
set_except_vector(14, handle_vcei);
set_except_vector(31, handle_vced);
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index ea81ba7db..52205475a 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -5,7 +5,9 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1996 by Ralf Baechle
+ * Copyright (C) 1996, 1998 by Ralf Baechle
+ *
+ * $Id: unaligned.c,v 1.4 1998/05/03 00:24:48 ralf Exp $
*
* This file contains exception handler for address error exception with the
* special capability to execute faulting instructions in software. The
@@ -34,13 +36,7 @@
* sysmips(MIPS_FIXADE, x);
* ...
*
- * The parameter x is 0 for disabeling software emulation. Set bit 0 for
- * enabeling software emulation and bit 1 for enabeling printing debug
- * messages into syslog to aid finding address errors in programs.
- *
- * The logging feature is an addition over RISC/os and IRIX where only the
- * values 0 and 1 are acceptable values for x. I'll probably remove this
- * hack later on.
+ * The argument x is 0 for disabling software emulation, enabled otherwise.
*
* Below a little program to play around with this feature.
*
@@ -69,12 +65,6 @@
* printf("\n");
* }
*
- * Until I've written the code to handle branch delay slots it may happen
- * that the kernel receives an ades/adel instruction from an insn in a
- * branch delay slot but is unable to handle this case. The kernel knows
- * this fact and therefore will kill the process. For most code you can
- * fix this temporarily by compiling with flags -fno-delayed-branch -Wa,-O0.
- *
* Coprozessor loads are not supported; I think this case is unimportant
* in the practice.
*
@@ -88,19 +78,15 @@
#include <linux/smp.h>
#include <linux/smp_lock.h>
+#include <asm/asm.h>
#include <asm/branch.h>
#include <asm/byteorder.h>
#include <asm/inst.h>
#include <asm/uaccess.h>
-#undef CONF_NO_UNALIGNED_KERNEL_ACCESS
-#undef CONF_LOG_UNALIGNED_ACCESSES
-
#define STR(x) __STR(x)
#define __STR(x) #x
-typedef unsigned long register_t;
-
/*
* User code may only access USEG; kernel code may access the
* entire address space.
@@ -110,10 +96,12 @@ typedef unsigned long register_t;
goto sigbus;
static inline void
-emulate_load_store_insn(struct pt_regs *regs, unsigned long addr, unsigned long pc)
+emulate_load_store_insn(struct pt_regs *regs,
+ unsigned long addr,
+ unsigned long pc)
{
union mips_instruction insn;
- register_t value;
+ unsigned long value, fixup;
regs->regs[0] = 0;
/*
@@ -358,99 +346,70 @@ emulate_load_store_insn(struct pt_regs *regs, unsigned long addr, unsigned long
*/
default:
/*
- * Pheeee... We encountered an yet unknown instruction ...
+ * Pheeee... We encountered an yet unknown instruction or
+ * cache coherence problem. Die sucker, die ...
*/
- force_sig(SIGILL, current);
+ goto sigill;
}
return;
fault:
+ /* Did we have an exception handler installed? */
+ fixup = search_exception_table(regs->cp0_epc);
+ if (fixup) {
+ long new_epc;
+ new_epc = fixup_exception(dpf_reg, fixup, regs->cp0_epc);
+ printk(KERN_DEBUG "%s: Forwarding exception at [<%lx>] (%lx)\n",
+ current->comm, regs->cp0_epc, new_epc);
+ regs->cp0_epc = new_epc;
+ return;
+ }
+
+ lock_kernel();
send_sig(SIGSEGV, current, 1);
+ unlock_kernel();
return;
sigbus:
+ lock_kernel();
send_sig(SIGBUS, current, 1);
+ unlock_kernel();
+ return;
+sigill:
+ lock_kernel();
+ send_sig(SIGILL, current, 1);
+ unlock_kernel();
return;
}
unsigned long unaligned_instructions;
-static inline void
-fix_ade(struct pt_regs *regs, unsigned long pc)
+asmlinkage void do_ade(struct pt_regs *regs)
{
+ unsigned long pc;
+
/*
* Did we catch a fault trying to load an instruction?
+ * This also catches attempts to activate MIPS16 code on
+ * CPUs which don't support it.
*/
- if (regs->cp0_badvaddr == pc) {
- /*
- * Phee... Either the code is severly messed up or the
- * process tried to activate some MIPS16 code.
- */
- force_sig(SIGBUS, current);
- }
+ if (regs->cp0_badvaddr == regs->cp0_epc)
+ goto sigbus;
+
+ pc = regs->cp0_epc + ((regs->cp0_cause & CAUSEF_BD) ? 4 : 0);
+ if (compute_return_epc(regs))
+ return;
+ if ((current->tss.mflags & MF_FIXADE) == 0)
+ goto sigbus;
- /*
- * Ok, this wasn't a failed instruction load. The CPU was capable of
- * reading the instruction and faulted after this. So we don't need
- * to verify_area the address of the instrucion. We still don't
- * know whether the address used was legal and therefore need to do
- * verify_area(). The CPU already did the checking for legal
- * instructions for us, so we don't need to do this.
- */
emulate_load_store_insn(regs, regs->cp0_badvaddr, pc);
unaligned_instructions++;
-}
-
-#define kernel_address(x) ((long)(x) < 0)
-asmlinkage void
-do_ade(struct pt_regs *regs)
-{
- register_t pc = regs->cp0_epc;
- register_t badvaddr __attribute__ ((unused)) = regs->cp0_badvaddr;
- char adels;
+ return;
+sigbus:
lock_kernel();
- adels = (((regs->cp0_cause & CAUSEF_EXCCODE) >>
- CAUSEB_EXCCODE) == 4) ? 'l' : 's';
-
-#ifdef CONF_NO_UNALIGNED_KERNEL_ACCESS
- /*
- * In an ideal world there are no unaligned accesses by the kernel.
- * So be a bit noisy ...
- */
- if (kernel_address(badvaddr) && !user_mode(regs)) {
- show_regs(regs);
- panic("Caught adel%c exception in kernel mode accessing %08lx.",
- adels, badvaddr);
- }
-#endif /* CONF_NO_UNALIGNED_KERNEL_ACCESS */
-
-#ifdef CONF_LOG_UNALIGNED_ACCESSES
- if (current->tss.mflags & MF_LOGADE) {
- register_t logpc = pc;
- if (regs->cp0_cause & CAUSEF_BD)
- logpc += 4;
- printk(KERN_DEBUG
- "Caught adel%c in '%s' at 0x%08lx accessing 0x%08lx.\n",
- adels, current->comm, logpc, regs->cp0_badvaddr);
- }
-#endif /* CONF_LOG_UNALIGNED_ACCESSES */
-
- if (compute_return_epc(regs))
- goto out;
- if(current->tss.mflags & MF_FIXADE) {
- pc += ((regs->cp0_cause & CAUSEF_BD) ? 4 : 0);
- fix_ade(regs, pc);
- goto out;
- }
-
-#ifdef CONF_DEBUG_EXCEPTIONS
- show_regs(regs);
-#endif
-
force_sig(SIGBUS, current);
-
-out:
unlock_kernel();
+
return;
}
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index 7da90c6d0..597202403 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -1,11 +1,7 @@
#
# Makefile for MIPS-specific library files..
#
-# Many of these routines are just left over debugging trash of ancient
-# times when I just make my Tyne beep and so ...
-#
-# ...and for when I need to get the DECStation to use the boot prom to
-# do things... Paul M. Antoine.
+# $Id: Makefile,v 1.9 1998/05/03 00:28:00 ralf Exp $
#
.S.s:
@@ -14,12 +10,7 @@
$(CC) $(CFLAGS) -c $< -o $*.o
L_TARGET = lib.a
-L_OBJS = beep.o checksum.o copy_user.o csum.o dump_tlb.o memset.o memcpy.o \
- strlen_user.o strncpy_user.o tags.o watch.o
-
-#
-# Debug console, works without other support from the kernel
-#
-L_OBJS += tinycon.o
+L_OBJS = csum_partial.o csum_partial_copy.o dump_tlb.o ide-std.c ide-no.o \
+ memset.o memcpy.o strlen_user.o strncpy_user.o tags.o watch.o
include $(TOPDIR)/Rules.make
diff --git a/arch/mips/lib/beep.S b/arch/mips/lib/beep.S
deleted file mode 100644
index e74a63c0e..000000000
--- a/arch/mips/lib/beep.S
+++ /dev/null
@@ -1,21 +0,0 @@
-#include <asm/asm.h>
-#include <asm/regdef.h>
-
-/*
- * Just for debugging...
- */
- LEAF(beep)
- lw t0,beepflag
- bnez t0,1f
- lbu t0,0xb4000061
- xori t0,3
- sb t0,0xb4000061
- li t0,1
- sw t0,beepflag
-1: jr ra
- END(beep)
-
- .bss
-beepflag: .word 0
- .text
-
diff --git a/arch/mips/lib/checksum.c b/arch/mips/lib/checksum.c
deleted file mode 100644
index f3ef6295c..000000000
--- a/arch/mips/lib/checksum.c
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * INET An implementation of the TCP/IP protocol suite for the LINUX
- * operating system. INET is implemented using the BSD Socket
- * interface as the means of communication with the user level.
- *
- * MIPS specific IP/TCP/UDP checksumming routines
- *
- * Authors: Ralf Baechle, <ralf@waldorf-gmbh.de>
- * Lots of code moved from tcp.c and ip.c; see those files
- * for more names.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * $Id: checksum.c,v 1.2 1997/07/29 18:37:35 ralf Exp $
- */
-#include <net/checksum.h>
-#include <linux/types.h>
-#include <asm/byteorder.h>
-#include <asm/string.h>
-#include <asm/uaccess.h>
-
-static inline unsigned short from32to16(unsigned long x)
-{
- /* 32 bits --> 16 bits + carry */
- x = (x & 0xffff) + (x >> 16);
- /* 16 bits + carry --> 16 bits including carry */
- x = (x & 0xffff) + (x >> 16);
- return x;
-}
-
-static inline unsigned long do_csum(const unsigned char * buff, int len)
-{
- int odd, count;
- unsigned long result = 0;
-
- if (len <= 0)
- goto out;
- odd = 1 & (unsigned long) buff;
- if (odd) {
- result = be16_to_cpu(*buff);
- len--;
- buff++;
- }
- count = len >> 1; /* nr of 16-bit words.. */
- if (count) {
- if (2 & (unsigned long) buff) {
- result += *(unsigned short *) buff;
- count--;
- len -= 2;
- buff += 2;
- }
- count >>= 1; /* nr of 32-bit words.. */
- if (count) {
- unsigned long carry = 0;
- do {
- unsigned long w = *(unsigned long *) buff;
- count--;
- buff += 4;
- result += carry;
- result += w;
- carry = (w > result);
- } while (count);
- result += carry;
- result = (result & 0xffff) + (result >> 16);
- }
- if (len & 2) {
- result += *(unsigned short *) buff;
- buff += 2;
- }
- }
- if (len & 1)
- result += le16_to_cpu(*buff);
- result = from32to16(result);
- if (odd)
- result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
-out:
- return result;
-}
-
-/*
- * computes a partial checksum, e.g. for TCP/UDP fragments
- */
-unsigned int csum_partial(const unsigned char *buff, int len, unsigned int sum)
-{
- unsigned long result = do_csum(buff, len);
-
- /* add in old sum, and carry.. */
- result += sum;
- if(sum > result)
- result += 1;
- return result;
-}
-
-/*
- * copy while checksumming, otherwise like csum_partial
- */
-unsigned int csum_partial_copy(const char *src, char *dst,
- int len, unsigned int sum)
-{
- /*
- * It's 2:30 am and I don't feel like doing it real ...
- * This is lots slower than the real thing (tm)
- */
- sum = csum_partial(src, len, sum);
- memcpy(dst, src, len);
-
- return sum;
-}
-
-/*
- * Copy from userspace and compute checksum. If we catch an exception
- * then zero the rest of the buffer.
- */
-unsigned int csum_partial_copy_from_user (const char *src, char *dst,
- int len, unsigned int sum,
- int *err_ptr)
-{
- int *dst_err_ptr=NULL;
- int missing;
-
- missing = copy_from_user(dst, src, len);
- if (missing) {
- memset(dst + len - missing, 0, missing);
- *err_ptr = -EFAULT;
- }
-
- return csum_partial(dst, len, sum);
-}
diff --git a/arch/mips/lib/copy_user.S b/arch/mips/lib/copy_user.S
deleted file mode 100644
index a7fdc74e5..000000000
--- a/arch/mips/lib/copy_user.S
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * arch/mips/lib/copy_user.S
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 1996, 1997 by Ralf Baechle
- *
- * Less stupid user_copy implementation for 32 bit MIPS CPUs.
- *
- * $Id: copy_user.S,v 1.2 1997/08/11 04:26:12 ralf Exp $
- */
-#include <asm/asm.h>
-#include <asm/regdef.h>
-#include <asm/mipsregs.h>
-
-#define BLOCK_SIZE 16
-
-#define EX(addr,handler) \
- .section __ex_table,"a"; \
- PTR addr, handler; \
- .previous
-#define UEX(addr,handler) \
- EX(addr,handler); \
- EX(addr+4,handler)
-
- .set noreorder
- .set noat
-
-/* ---------------------------------------------------------------------- */
-
-/*
- * Bad. We can't fix the alignment for both address parts.
- * Align the source address and copy slowly ...
- */
-not_even_the_same_alignment:
- LONG_SUBU v1,zero,a1
- andi v1,3
- sltu t0,v0,v1
- MOVN(v1,v0,t0)
- beqz v1,src_aligned
- LONG_ADDU v1,a0
-1: lb $1,(a1)
- EX(1b, fault)
- LONG_ADDIU a1,1
-2: sb $1,(a0)
- EX(2b, fault)
- LONG_ADDIU a0,1
- bne a0,v1,1b
- LONG_SUBU v0,1
-src_aligned:
-
-/*
- * Ok. We've fixed the alignment of the copy src for this case.
- * Now let's copy in the usual BLOCK_SIZE byte blocks using unaligned
- * stores.
- * XXX Align the destination address. This is better if the __copy_user
- * encounters an access fault because we never have to deal with an
- * only partially modified destination word. This is required to
- * keep the semantics of the result of copy_*_user().
- */
- ori v1,v0,BLOCK_SIZE-1
- xori v1,BLOCK_SIZE-1
- beqz v1,copy_left_over
- nop
- LONG_SUBU v0,v1
- LONG_ADDU v1,a0
-
-1: lw t0,(a1) # Can cause tlb fault
- EX(1b, fault)
-2: lw t1,4(a1) # Can cause tlb fault
- EX(2b, fault)
-2: lw t2,8(a1) # Can cause tlb fault
- EX(2b, fault)
-2: lw t3,12(a1) # Can cause tlb fault
- EX(2b, fault)
-2: usw t0,(a0) # Can cause tlb faults
- UEX(2b, fault)
-2: usw t1,4(a0) # Can cause tlb faults
- UEX(2b, fault_plus_4)
-2: usw t2,8(a0) # Can cause tlb faults
- UEX(2b, fault_plus_8)
-2: usw t3,12(a0) # Can cause tlb faults
- UEX(2b, fault_plus_12)
- LONG_ADDIU a0,BLOCK_SIZE
- bne a0,v1,1b
- LONG_ADDIU a1,BLOCK_SIZE
-9:
- b copy_left_over # < BLOCK_SIZE bytes left
- nop
-
-/* ---------------------------------------------------------------------- */
-
-not_w_aligned:
-/*
- * Ok, src or destination are not 4-byte aligned.
- * Try to fix that. Do at least both addresses have the same alignment?
- */
- xor t0,a0,a1
- andi t0,3
- bnez t0,not_even_the_same_alignment
- nop # delay slot
-
-/*
- * Ok, we can fix the alignment for both operands and go back to the
- * fast path. We have to copy at least one byte, on average 3 bytes
- * bytewise.
- */
- LONG_SUBU v1,zero,a0
- andi v1,3
- sltu t0,v0,v1
- MOVN(v1,v0,t0)
- beqz v1,__copy_user
- LONG_ADDU v1,a0
-1: lb $1,(a1)
- EX(1b, fault)
- LONG_ADDIU a1,1
-2: sb $1,(a0)
- EX(2b, fault)
- LONG_ADDIU a0,1
- bne a0,v1,1b
- LONG_SUBU v0,1
- b align4
- nop
-
-/* ---------------------------------------------------------------------- */
-
-LEAF(__copy_user)
- or t1,a0,a1
- andi t1,3
- bnez t1,not_w_aligned # not word alignment
- move v0,a2
-
-align4:
- ori v1,v0,BLOCK_SIZE-1
- xori v1,BLOCK_SIZE-1
- beqz v1,copy_left_over
- nop
- LONG_SUBU v0,v1
- LONG_ADDU v1,a0
-
-1: lw t0,(a1) # Can cause tlb fault
- EX(1b, fault)
-2: lw t1,4(a1) # Can cause tlb fault
- EX(2b, fault)
-2: lw t2,8(a1) # Can cause tlb fault
- EX(2b, fault)
-2: lw t3,12(a1) # Can cause tlb fault
- EX(2b, fault)
-2: sw t0,(a0) # Can cause tlb fault
- EX(2b, fault)
-2: sw t1,4(a0) # Can cause tlb fault
- EX(2b, fault_plus_4)
-2: sw t2,8(a0) # Can cause tlb fault
- EX(2b, fault_plus_8)
-2: sw t3,12(a0) # Can cause tlb fault
- EX(2b, fault_plus_12)
- LONG_ADDIU a0,BLOCK_SIZE
- bne a0,v1,1b
- LONG_ADDIU a1,BLOCK_SIZE
-9:
-
-/*
- * XXX Tune me ...
- */
-copy_left_over:
- beqz v0,3f
- nop
-1: lb $1,(a1)
- EX(1b, fault)
- LONG_ADDIU a1,1
-2: sb $1,(a0)
- EX(2b, fault)
- LONG_SUBU v0,1
- bnez v0,1b
- LONG_ADDIU a0,1
-3:
-
-done: jr ra
- nop
-
- END(__copy_user)
- .set at
- .set reorder
-
-/* ---------------------------------------------------------------------- */
-
-/*
- * Access fault. The number of not copied bytes is in v0. We have to
- * correct the number of the not copied bytes in v0 in case of a access
- * fault in an unrolled loop, then return.
- */
-
-fault: jr ra
-fault_plus_4: LONG_ADDIU v0,4
- jr ra
-fault_plus_8: LONG_ADDIU v0,8
- jr ra
-fault_plus_12: LONG_ADDIU v0,12
- jr ra
diff --git a/arch/mips/lib/csum.S b/arch/mips/lib/csum.S
deleted file mode 100644
index 08224e86b..000000000
--- a/arch/mips/lib/csum.S
+++ /dev/null
@@ -1,25 +0,0 @@
-#include <asm/addrspace.h>
-#include <asm/asm.h>
-#include <asm/regdef.h>
-
-/*
- * Compute kernel code checksum to check kernel code against corruption
- * (Ancient debugging trash ...)
- */
- LEAF(csum)
- LONG_L t0,cacheflush
- move t8,ra
- jalr t0
- li t0,KSEG1
- la t1,final
- li t2,KSEG1
- or t0,t2
- or t1,t2
- move v0,zero
-1: lw t2,(t0)
- addiu t0,4
- bne t0,t1,1b
- xor v0,t2
- jr t8
- nop
- END(csum)
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S
new file mode 100644
index 000000000..ce43987d8
--- /dev/null
+++ b/arch/mips/lib/csum_partial.S
@@ -0,0 +1,242 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ralf Baechle
+ *
+ * $Id: csum_partial.S,v 1.2 1998/04/22 03:26:19 ralf Exp $
+ */
+#include <sys/asm.h>
+#include <sys/regdef.h>
+
+#define ADDC(sum,reg) \
+ addu sum, reg; \
+ sltu v1, sum, reg; \
+ addu sum, v1
+
+#define CSUM_BIGCHUNK(src, offset, sum, t0, t1, t2, t3) \
+ lw t0, (offset + 0x00)(src); \
+ lw t1, (offset + 0x04)(src); \
+ lw t2, (offset + 0x08)(src); \
+ lw t3, (offset + 0x0c)(src); \
+ ADDC(sum, t0); \
+ ADDC(sum, t1); \
+ ADDC(sum, t2); \
+ ADDC(sum, t3); \
+ lw t0, (offset + 0x10)(src); \
+ lw t1, (offset + 0x14)(src); \
+ lw t2, (offset + 0x18)(src); \
+ lw t3, (offset + 0x1c)(src); \
+ ADDC(sum, t0); \
+ ADDC(sum, t1); \
+ ADDC(sum, t2); \
+ ADDC(sum, t3); \
+
+/*
+ * a0: source address
+ * a1: length of the area to checksum
+ * a2: partial checksum
+ */
+
+#define src a0
+#define dest a1
+#define sum v0
+
+ .text
+ .set noreorder
+
+/* unknown src alignment and < 8 bytes to go */
+small_csumcpy:
+ move a1, t2
+
+ andi t0, a1, 4
+ beqz t0, 1f
+ andi t0, a1, 2
+
+ /* Still a full word to go */
+ ulw t1, (src)
+ addiu src, 4
+ ADDC(sum, t1)
+
+1: move t1, zero
+ beqz t0, 1f
+ andi t0, a1, 1
+
+ /* Still a halfword to go */
+ ulhu t1, (src)
+ addiu src, 2
+
+1: beqz t0, 1f
+ sll t1, t1, 16
+
+ lbu t2, (src)
+ nop
+
+#ifdef __MIPSEB__
+ sll t2, t2, 8
+#endif
+ or t1, t2
+
+1: ADDC(sum, t1)
+
+ /* fold checksum */
+ sll v1, sum, 16
+ addu sum, v1
+ sltu v1, sum, v1
+ srl sum, sum, 16
+ addu sum, v1
+
+ /* odd buffer alignment? */
+ beqz t7, 1f
+ nop
+ sll v1, sum, 8
+ srl sum, sum, 8
+ or sum, v1
+ andi sum, 0xffff
+1:
+ .set reorder
+ /* Add the passed partial csum. */
+ ADDC(sum, a2)
+ jr ra
+ .set noreorder
+
+/* ------------------------------------------------------------------------- */
+
+ .align 5
+LEAF(csum_partial)
+ move sum, zero
+ move t7, zero
+
+ sltiu t8, a1, 0x8
+ bnez t8, small_csumcpy /* < 8 bytes to copy */
+ move t2, a1
+
+ beqz a1, out
+ andi t7, src, 0x1 /* odd buffer? */
+
+hword_align:
+ beqz t7, word_align
+ andi t8, src, 0x2
+
+ lbu t0, (src)
+ subu a1, a1, 0x1
+#ifdef __MIPSEL__
+ sll t0, t0, 8
+#endif
+ ADDC(sum, t0)
+ addu src, src, 0x1
+ andi t8, src, 0x2
+
+word_align:
+ beqz t8, dword_align
+ sltiu t8, a1, 56
+
+ lhu t0, (src)
+ subu a1, a1, 0x2
+ ADDC(sum, t0)
+ sltiu t8, a1, 56
+ addu src, src, 0x2
+
+dword_align:
+ bnez t8, do_end_words
+ move t8, a1
+
+ andi t8, src, 0x4
+ beqz t8, qword_align
+ andi t8, src, 0x8
+
+ lw t0, 0x00(src)
+ subu a1, a1, 0x4
+ ADDC(sum, t0)
+ addu src, src, 0x4
+ andi t8, src, 0x8
+
+qword_align:
+ beqz t8, oword_align
+ andi t8, src, 0x10
+
+ lw t0, 0x00(src)
+ lw t1, 0x04(src)
+ subu a1, a1, 0x8
+ ADDC(sum, t0)
+ ADDC(sum, t1)
+ addu src, src, 0x8
+ andi t8, src, 0x10
+
+oword_align:
+ beqz t8, begin_movement
+ srl t8, a1, 0x7
+
+ lw t3, 0x08(src)
+ lw t4, 0x0c(src)
+ lw t0, 0x00(src)
+ lw t1, 0x04(src)
+ ADDC(sum, t3)
+ ADDC(sum, t4)
+ ADDC(sum, t0)
+ ADDC(sum, t1)
+ subu a1, a1, 0x10
+ addu src, src, 0x10
+ srl t8, a1, 0x7
+
+begin_movement:
+ beqz t8, 1f
+ andi t2, a1, 0x40
+
+move_128bytes:
+ CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
+ CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
+ CSUM_BIGCHUNK(src, 0x40, sum, t0, t1, t3, t4)
+ CSUM_BIGCHUNK(src, 0x60, sum, t0, t1, t3, t4)
+ subu t8, t8, 0x01
+ bnez t8, move_128bytes
+ addu src, src, 0x80
+
+1:
+ beqz t2, 1f
+ andi t2, a1, 0x20
+
+move_64bytes:
+ CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
+ CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
+ addu src, src, 0x40
+
+1:
+ beqz t2, do_end_words
+ andi t8, a1, 0x1c
+
+move_32bytes:
+ CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
+ andi t8, a1, 0x1c
+ addu src, src, 0x20
+
+do_end_words:
+ beqz t8, maybe_end_cruft
+ srl t8, t8, 0x2
+
+end_words:
+ lw t0, (src)
+ subu t8, t8, 0x1
+ ADDC(sum, t0)
+ bnez t8, end_words
+ addu src, src, 0x4
+
+maybe_end_cruft:
+ andi t2, a1, 0x3
+
+small_memcpy:
+ j small_csumcpy; move a1, t2
+ beqz t2, out
+ move a1, t2
+
+end_bytes:
+ lb t0, (src)
+ subu a1, a1, 0x1
+ bnez a2, end_bytes
+ addu src, src, 0x1
+
+out:
+ jr ra
+ move v0, sum
+ END(csum_partial)
diff --git a/arch/mips/lib/csum_partial_copy.S b/arch/mips/lib/csum_partial_copy.S
new file mode 100644
index 000000000..62ee35395
--- /dev/null
+++ b/arch/mips/lib/csum_partial_copy.S
@@ -0,0 +1,518 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ralf Baechle
+ *
+ * $Id: csum_partial_copy.S,v 1.3 1998/05/01 06:54:07 ralf Exp $
+ *
+ * Unified implementation of csum_copy_partial and csum_copy_partial_from_user.
+ */
+#include <asm/asm.h>
+#include <asm/offset.h>
+#include <asm/regdef.h>
+
+/*
+ * The fixup routine for csum_partial_copy_from_user depends on copying
+ * strictly in increasing order. Gas expands ulw/usw macros in the wrong order
+ * for little endian machines, so we cannot depend on them.
+ */
+#ifdef __MIPSEB__
+#define ulwL lwl
+#define ulwU lwr
+#endif
+#ifdef __MIPSEL__
+#define ulwL lwr
+#define ulwU lwl
+#endif
+
+#define EX(insn,reg,addr,handler) \
+9: insn reg, addr; \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
+ .previous
+
+#define UEX(insn,reg,addr,handler) \
+9: insn ## L reg, addr; \
+10: insn ## U reg, 3 + addr; \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
+ PTR 10b, handler; \
+ .previous
+
+#define ADDC(sum,reg) \
+ addu sum, reg; \
+ sltu v1, sum, reg; \
+ addu sum, v1
+
+/* ascending order, destination aligned */
+#define CSUM_BIGCHUNK(src, dst, offset, sum, t0, t1, t2, t3) \
+ EX(lw, t0, (offset + 0x00)(src), l_fixup); \
+ EX(lw, t1, (offset + 0x04)(src), l_fixup); \
+ EX(lw, t2, (offset + 0x08)(src), l_fixup); \
+ EX(lw, t3, (offset + 0x0c)(src), l_fixup); \
+ ADDC(sum, t0); \
+ ADDC(sum, t1); \
+ ADDC(sum, t2); \
+ ADDC(sum, t3); \
+ sw t0, (offset + 0x00)(dst); \
+ sw t1, (offset + 0x04)(dst); \
+ sw t2, (offset + 0x08)(dst); \
+ sw t3, (offset + 0x0c)(dst); \
+ EX(lw, t0, (offset + 0x10)(src), l_fixup); \
+ EX(lw, t1, (offset + 0x14)(src), l_fixup); \
+ EX(lw, t2, (offset + 0x18)(src), l_fixup); \
+ EX(lw, t3, (offset + 0x1c)(src), l_fixup); \
+ ADDC(sum, t0); \
+ ADDC(sum, t1); \
+ ADDC(sum, t2); \
+ ADDC(sum, t3); \
+ sw t0, (offset + 0x10)(dst); \
+ sw t1, (offset + 0x14)(dst); \
+ sw t2, (offset + 0x18)(dst); \
+ sw t3, (offset + 0x1c)(dst)
+
+/* ascending order, destination unaligned */
+#define UCSUM_BIGCHUNK(src, dst, offset, sum, t0, t1, t2, t3) \
+ EX(lw, t0, (offset + 0x00)(src), l_fixup); \
+ EX(lw, t1, (offset + 0x04)(src), l_fixup); \
+ EX(lw, t2, (offset + 0x08)(src), l_fixup); \
+ EX(lw, t3, (offset + 0x0c)(src), l_fixup); \
+ ADDC(sum, t0); \
+ ADDC(sum, t1); \
+ ADDC(sum, t2); \
+ ADDC(sum, t3); \
+ usw t0, (offset + 0x00)(dst); \
+ usw t1, (offset + 0x04)(dst); \
+ usw t2, (offset + 0x08)(dst); \
+ usw t3, (offset + 0x0c)(dst); \
+ EX(lw, t0, (offset + 0x00)(src), l_fixup); \
+ EX(lw, t1, (offset + 0x04)(src), l_fixup); \
+ EX(lw, t2, (offset + 0x08)(src), l_fixup); \
+ EX(lw, t3, (offset + 0x0c)(src), l_fixup); \
+ ADDC(sum, t0); \
+ ADDC(sum, t1); \
+ ADDC(sum, t2); \
+ ADDC(sum, t3); \
+ usw t0, (offset + 0x10)(dst); \
+ usw t1, (offset + 0x14)(dst); \
+ usw t2, (offset + 0x18)(dst); \
+ usw t3, (offset + 0x1c)(dst)
+
+#
+# a0: source address
+# a1: destination address
+# a2: length of the area to checksum
+# a3: partial checksum
+#
+
+#define src a0
+#define dest a1
+#define sum v0
+
+ .text
+ .set noreorder
+
+/* unknown src/dst alignment and < 8 bytes to go */
+small_csumcpy:
+ move a2, t2
+
+ andi t0, a2, 4
+ beqz t0, 1f
+ andi t0, a2, 2
+
+ /* Still a full word to go */
+ UEX(ulw, t1, 0(src), l_fixup)
+ addiu src, 4
+ usw t1, 0(dest)
+ addiu dest, 4
+ ADDC(sum, t1)
+
+1: move t1, zero
+ beqz t0, 1f
+ andi t0, a2, 1
+
+ /* Still a halfword to go */
+ ulhu t1, (src)
+ addiu src, 2
+ ush t1, (dest)
+ addiu dest, 2
+
+1: beqz t0, 1f
+ sll t1, t1, 16
+
+ lbu t2, (src)
+ nop
+ sb t2, (dest)
+
+#ifdef __MIPSEB__
+ sll t2, t2, 8
+#endif
+ or t1, t2
+
+1: ADDC(sum, t1)
+
+ /* fold checksum */
+ sll v1, sum, 16
+ addu sum, v1
+ sltu v1, sum, v1
+ srl sum, sum, 16
+ addu sum, v1
+
+ /* odd buffer alignment? */
+ beqz t7, 1f
+ nop
+ sll v1, sum, 8
+ srl sum, sum, 8
+ or sum, v1
+ andi sum, 0xffff
+1:
+ .set reorder
+ /* Add the passed partial csum. */
+ ADDC(sum, a3)
+ jr ra
+ .set noreorder
+
+/* ------------------------------------------------------------------------- */
+
+ .align 5
+LEAF(csum_partial_copy_from_user)
+ addu t5, src, a2 # end address for fixup
+EXPORT(csum_partial_copy)
+ move sum, zero # clear computed sum
+ move t7, zero # clear odd flag
+ xor t0, dest, src
+ andi t0, t0, 0x3
+ beqz t0, can_align
+ sltiu t8, a2, 0x8
+
+ b memcpy_u_src # bad alignment
+ move t2, a2
+
+can_align:
+ bnez t8, small_csumcpy # < 8 bytes to copy
+ move t2, a2
+
+ beqz a2, out
+ andi t7, src, 0x1 # odd buffer?
+
+hword_align:
+ beqz t7, word_align
+ andi t8, src, 0x2
+
+ EX(lbu, t0, (src), l_fixup)
+ subu a2, a2, 0x1
+ EX(sb, t0, (dest), l_fixup)
+#ifdef __MIPSEL__
+ sll t0, t0, 8
+#endif
+ ADDC(sum, t0)
+ addu src, src, 0x1
+ addu dest, dest, 0x1
+ andi t8, src, 0x2
+
+word_align:
+ beqz t8, dword_align
+ sltiu t8, a2, 56
+
+ EX(lhu, t0, (src), l_fixup)
+ subu a2, a2, 0x2
+ sh t0, (dest)
+ ADDC(sum, t0)
+ sltiu t8, a2, 56
+ addu dest, dest, 0x2
+ addu src, src, 0x2
+
+dword_align:
+ bnez t8, do_end_words
+ move t8, a2
+
+ andi t8, src, 0x4
+ beqz t8, qword_align
+ andi t8, src, 0x8
+
+ EX(lw, t0, 0x00(src), l_fixup)
+ subu a2, a2, 0x4
+ ADDC(sum, t0)
+ sw t0, 0x00(dest)
+ addu src, src, 0x4
+ addu dest, dest, 0x4
+ andi t8, src, 0x8
+
+qword_align:
+ beqz t8, oword_align
+ andi t8, src, 0x10
+
+ EX(lw, t0, 0x00(src), l_fixup)
+ EX(lw, t1, 0x04(src), l_fixup)
+ subu a2, a2, 0x8
+ ADDC(sum, t0)
+ ADDC(sum, t1)
+ sw t0, 0x00(dest)
+ addu src, src, 0x8
+ sw t1, 0x04(dest)
+ andi t8, src, 0x10
+ addu dest, dest, 0x8
+
+oword_align:
+ beqz t8, begin_movement
+ srl t8, a2, 0x7
+
+ EX(lw, t3, 0x08(src), l_fixup) # assumes subblock ordering
+ EX(lw, t4, 0x0c(src), l_fixup)
+ EX(lw, t0, 0x00(src), l_fixup)
+ EX(lw, t1, 0x04(src), l_fixup)
+ ADDC(sum, t3)
+ ADDC(sum, t4)
+ ADDC(sum, t0)
+ ADDC(sum, t1)
+ sw t3, 0x08(dest)
+ subu a2, a2, 0x10
+ sw t4, 0x0c(dest)
+ addu src, src, 0x10
+ sw t0, 0x00(dest)
+ srl t8, a2, 0x7
+ addu dest, dest, 0x10
+ sw t1, -0x0c(dest)
+
+begin_movement:
+ beqz t8, 0f
+ andi t2, a2, 0x40
+
+move_128bytes:
+ CSUM_BIGCHUNK(src, dest, 0x00, sum, t0, t1, t3, t4)
+ CSUM_BIGCHUNK(src, dest, 0x20, sum, t0, t1, t3, t4)
+ CSUM_BIGCHUNK(src, dest, 0x40, sum, t0, t1, t3, t4)
+ CSUM_BIGCHUNK(src, dest, 0x60, sum, t0, t1, t3, t4)
+ subu t8, t8, 0x01
+ addu src, src, 0x80
+ bnez t8, move_128bytes
+ addu dest, dest, 0x80
+
+0:
+ beqz t2, 1f
+ andi t2, a2, 0x20
+
+move_64bytes:
+ CSUM_BIGCHUNK(src, dest, 0x00, sum, t0, t1, t3, t4)
+ CSUM_BIGCHUNK(src, dest, 0x20, sum, t0, t1, t3, t4)
+ addu src, src, 0x40
+ addu dest, dest, 0x40
+
+1:
+ beqz t2, do_end_words
+ andi t8, a2, 0x1c
+
+move_32bytes:
+ CSUM_BIGCHUNK(src, dest, 0x00, sum, t0, t1, t3, t4)
+ andi t8, a2, 0x1c
+ addu src, src, 0x20
+ addu dest, dest, 0x20
+
+do_end_words:
+ beqz t8, maybe_end_cruft
+ srl t8, t8, 0x2
+
+end_words:
+ EX(lw, t0, (src), l_fixup)
+ subu t8, t8, 0x1
+ ADDC(sum, t0)
+ sw t0, (dest)
+ addu src, src, 0x4
+ bnez t8, end_words
+ addu dest, dest, 0x4
+
+maybe_end_cruft:
+ andi t2, a2, 0x3
+
+small_memcpy:
+ j small_csumcpy; move a2, t2
+ beqz t2, out
+ move a2, t2
+
+end_bytes:
+ EX(lb, t0, (src), l_fixup)
+ subu a2, a2, 0x1
+ sb t0, (dest)
+ addu src, src, 0x1
+ bnez a2, end_bytes
+ addu dest, dest, 0x1
+
+out:
+ jr ra
+ move v0, sum
+
+/* ------------------------------------------------------------------------- */
+
+/* Bad, bad. At least try to align the source */
+
+memcpy_u_src:
+ bnez t8, small_memcpy # < 8 bytes?
+ move t2, a2
+
+ beqz a2, out
+ andi t7, src, 0x1 # odd alignment?
+
+u_hword_align:
+ beqz t7, u_word_align
+ andi t8, src, 0x2
+
+ EX(lbu, t0, (src), l_fixup)
+ subu a2, a2, 0x1
+ sb t0, (dest)
+#ifdef __MIPSEL__
+ sll t0, t0, 8
+#endif
+ ADDC(sum, t0)
+ addu src, src, 0x1
+ addu dest, dest, 0x1
+ andi t8, src, 0x2
+
+u_word_align:
+ beqz t8, u_dword_align
+ sltiu t8, a2, 56
+
+ EX(lhu, t0, (src), l_fixup)
+ subu a2, a2, 0x2
+ ush t0, (dest)
+ ADDC(sum, t0)
+ sltiu t8, a2, 56
+ addu dest, dest, 0x2
+ addu src, src, 0x2
+
+u_dword_align:
+ bnez t8, u_do_end_words
+ move t8, a2
+
+ andi t8, src, 0x4
+ beqz t8, u_qword_align
+ andi t8, src, 0x8
+
+ EX(lw, t0, 0x00(src), l_fixup)
+ subu a2, a2, 0x4
+ ADDC(sum, t0)
+ usw t0, 0x00(dest)
+ addu src, src, 0x4
+ addu dest, dest, 0x4
+ andi t8, src, 0x8
+
+u_qword_align:
+ beqz t8, u_oword_align
+ andi t8, src, 0x10
+
+ EX(lw, t0, 0x00(src), l_fixup)
+ EX(lw, t1, 0x04(src), l_fixup)
+ subu a2, a2, 0x8
+ ADDC(sum, t0)
+ ADDC(sum, t1)
+ usw t0, 0x00(dest)
+ addu src, src, 0x8
+ usw t1, 0x04(dest)
+ andi t8, src, 0x10
+ addu dest, dest, 0x8
+
+u_oword_align:
+ beqz t8, u_begin_movement
+ srl t8, a2, 0x7
+
+ EX(lw, t3, 0x08(src), l_fixup)
+ EX(lw, t4, 0x0c(src), l_fixup)
+ EX(lw, t0, 0x00(src), l_fixup)
+ EX(lw, t1, 0x04(src), l_fixup)
+ ADDC(sum, t3)
+ ADDC(sum, t4)
+ ADDC(sum, t0)
+ ADDC(sum, t1)
+ usw t3, 0x08(dest)
+ subu a2, a2, 0x10
+ usw t4, 0x0c(dest)
+ addu src, src, 0x10
+ usw t0, 0x00(dest)
+ srl t8, a2, 0x7
+ addu dest, dest, 0x10
+ usw t1, -0x0c(dest)
+
+u_begin_movement:
+ beqz t8, 0f
+ andi t2, a2, 0x40
+
+u_move_128bytes:
+ UCSUM_BIGCHUNK(src, dest, 0x00, sum, t0, t1, t3, t4)
+ UCSUM_BIGCHUNK(src, dest, 0x20, sum, t0, t1, t3, t4)
+ UCSUM_BIGCHUNK(src, dest, 0x40, sum, t0, t1, t3, t4)
+ UCSUM_BIGCHUNK(src, dest, 0x60, sum, t0, t1, t3, t4)
+ subu t8, t8, 0x01
+ addu src, src, 0x80
+ bnez t8, u_move_128bytes
+ addu dest, dest, 0x80
+
+0:
+ beqz t2, 1f
+ andi t2, a2, 0x20
+
+u_move_64bytes:
+ UCSUM_BIGCHUNK(src, dest, 0x00, sum, t0, t1, t3, t4)
+ UCSUM_BIGCHUNK(src, dest, 0x20, sum, t0, t1, t3, t4)
+ addu src, src, 0x40
+ addu dest, dest, 0x40
+
+1:
+ beqz t2, u_do_end_words
+ andi t8, a2, 0x1c
+
+u_move_32bytes:
+ UCSUM_BIGCHUNK(src, dest, 0x00, sum, t0, t1, t3, t4)
+ andi t8, a2, 0x1c
+ addu src, src, 0x20
+ addu dest, dest, 0x20
+
+u_do_end_words:
+ beqz t8, u_maybe_end_cruft
+ srl t8, t8, 0x2
+
+u_end_words:
+ EX(lw, t0, 0x00(src), l_fixup)
+ subu t8, t8, 0x1
+ ADDC(sum, t0)
+ usw t0, 0x00(dest)
+ addu src, src, 0x4
+ bnez t8, u_end_words
+ addu dest, dest, 0x4
+
+u_maybe_end_cruft:
+ andi t2, a2, 0x3
+
+u_cannot_optimize:
+ j small_csumcpy; move a2, t2
+ beqz t2, out
+ move a2, t2
+
+u_end_bytes:
+ EX(lb, t0, (src), l_fixup)
+ subu a2, a2, 0x1
+ sb t0, (dest)
+ addu src, src, 0x1
+ bnez a2, u_end_bytes
+ addu dest, dest, 0x1
+
+ jr ra
+ move v0, sum
+ END(csum_partial_copy_from_user)
+
+l_fixup:
+ beqz t7, 1f # odd buffer alignment?
+ nop
+ sll v1, sum, 8 # swap bytes
+ srl sum, sum, 8
+ or sum, v1
+ andi sum, 0xffff
+1: ADDC(sum, a3) # Add csum argument.
+
+ lw t0, THREAD_BUADDR($28) # clear the rest of the buffer
+ nop
+ subu t1, t0, src # where to start clearing
+ addu a0, dest, t1
+ move a1, zero # zero fill
+ j __bzero
+ subu a2, t5, t0 # a2 = bad - srcend bytes to go
diff --git a/arch/mips/lib/ide-no.c b/arch/mips/lib/ide-no.c
new file mode 100644
index 000000000..3b6307b51
--- /dev/null
+++ b/arch/mips/lib/ide-no.c
@@ -0,0 +1,73 @@
+/*
+ * arch/mips/kernel/ide-none.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Stub IDE routines to keep Linux from crashing on machine which don't
+ * have IDE like the Indy.
+ *
+ * Copyright (C) 1998 by Ralf Baechle
+ *
+ * $Id: ide-no.c,v 1.1 1998/05/03 00:28:00 ralf Exp $
+ */
+#include <linux/hdreg.h>
+#include <linux/kernel.h>
+#include <asm/ptrace.h>
+#include <asm/ide.h>
+
+static int no_ide_default_irq(ide_ioreg_t base)
+{
+ return 0;
+}
+
+static ide_ioreg_t no_ide_default_io_base(int index)
+{
+ return 0;
+}
+
+static void no_ide_init_hwif_ports(ide_ioreg_t *p, ide_ioreg_t base,
+ int *irq)
+{
+}
+
+static int no_ide_request_irq(unsigned int irq,
+ void (*handler)(int,void *, struct pt_regs *),
+ unsigned long flags, const char *device,
+ void *dev_id)
+{
+ panic("no_no_ide_request_irq called - shouldn't happen");
+}
+
+static void no_ide_free_irq(unsigned int irq, void *dev_id)
+{
+ panic("no_ide_free_irq called - shouldn't happen");
+}
+
+static int no_ide_check_region(ide_ioreg_t from, unsigned int extent)
+{
+ panic("no_ide_check_region called - shouldn't happen");
+}
+
+static void no_ide_request_region(ide_ioreg_t from, unsigned int extent,
+ const char *name)
+{
+ panic("no_ide_request_region called - shouldn't happen");
+}
+
+static void no_ide_release_region(ide_ioreg_t from, unsigned int extent)
+{
+ panic("no_ide_release_region called - shouldn't happen");
+}
+
+struct ide_ops no_ide_ops = {
+ &no_ide_default_irq,
+ &no_ide_default_io_base,
+ &no_ide_init_hwif_ports,
+ &no_ide_request_irq,
+ &no_ide_free_irq,
+ &no_ide_check_region,
+ &no_ide_request_region,
+ &no_ide_release_region
+};
diff --git a/arch/mips/lib/ide-std.c b/arch/mips/lib/ide-std.c
new file mode 100644
index 000000000..47b103c03
--- /dev/null
+++ b/arch/mips/lib/ide-std.c
@@ -0,0 +1,91 @@
+/*
+ * include/asm-mips/types.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * IDE routines for typical pc-like standard configurations.
+ *
+ * Copyright (C) 1998 by Ralf Baechle
+ */
+#include <linux/hdreg.h>
+#include <asm/ptrace.h>
+#include <asm/ide.h>
+
+static int std_ide_default_irq(ide_ioreg_t base)
+{
+ switch (base) {
+ case 0x1f0: return 14;
+ case 0x170: return 15;
+ case 0x1e8: return 11;
+ case 0x168: return 10;
+ default:
+ return 0;
+ }
+}
+
+static ide_ioreg_t std_ide_default_io_base(int index)
+{
+ switch (index) {
+ case 0: return 0x1f0;
+ case 1: return 0x170;
+ case 2: return 0x1e8;
+ case 3: return 0x168;
+ default:
+ return 0;
+ }
+}
+
+static void std_ide_init_hwif_ports(ide_ioreg_t *p, ide_ioreg_t base,
+ int *irq)
+{
+ ide_ioreg_t port = base;
+ int i = 8;
+
+ while (i--)
+ *p++ = port++;
+ *p++ = base + 0x206;
+ if (irq != NULL)
+ *irq = 0;
+}
+
+static int std_ide_request_irq(unsigned int irq,
+ void (*handler)(int,void *, struct pt_regs *),
+ unsigned long flags, const char *device,
+ void *dev_id)
+{
+ return request_irq(irq, handler, flags, device, dev_id);
+}
+
+static void std_ide_free_irq(unsigned int irq, void *dev_id)
+{
+ free_irq(irq, dev_id);
+}
+
+static int std_ide_check_region(ide_ioreg_t from, unsigned int extent)
+{
+ return check_region(from, extent);
+}
+
+static void std_ide_request_region(ide_ioreg_t from, unsigned int extent,
+ const char *name)
+{
+ request_region(from, extent, name);
+}
+
+static void std_ide_release_region(ide_ioreg_t from, unsigned int extent)
+{
+ release_region(from, extent);
+}
+
+struct ide_ops std_ide_ops = {
+ &std_ide_default_irq,
+ &std_ide_default_io_base,
+ &std_ide_init_hwif_ports,
+ &std_ide_request_irq,
+ &std_ide_free_irq,
+ &std_ide_check_region,
+ &std_ide_request_region,
+ &std_ide_release_region
+};
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index 95639cb01..2bae5324d 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -1,221 +1,701 @@
-/* memcpy.S: Mips optimized memcpy based upon SparcLinux code.
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
*
- * Copyright(C) 1995 Linus Torvalds
- * Copyright(C) 1996 David S. Miller
- * Copyright(C) 1996 Eddie C. Dost
+ * $Id: memcpy.S,v 1.3 1998/04/27 06:00:36 ralf Exp $
*
- * derived from:
- * e-mail between David and Eddie.
+ * Unified implementation of memcpy, memmove and the __copy_user backend.
+ * For __rmemcpy and memmove an exception is always a kernel bug, therefore
+ * they're not protected. In order to keep the exception fixup routine
+ * simple all memory accesses in __copy_user to src rsp. dst are stricly
+ * incremental. The fixup routine depends on $at not being changed.
*/
-
#include <asm/asm.h>
+#include <asm/offset.h>
#include <asm/regdef.h>
-#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5) \
- lw t0, (offset + 0x18)(src); \
- lw t1, (offset + 0x1c)(src); \
- sw t0, (offset + 0x18)(dst); \
- lw t2, (offset + 0x10)(src); \
- sw t1, (offset + 0x1c)(dst); \
- lw t3, (offset + 0x14)(src); \
- sw t2, (offset + 0x10)(dst); \
- lw t4, (offset + 0x08)(src); \
- sw t3, (offset + 0x14)(dst); \
- lw t5, (offset + 0x0c)(src); \
- sw t4, (offset + 0x08)(dst); \
- lw t0, (offset + 0x00)(src); \
- sw t5, (offset + 0x0c)(dst); \
- lw t1, (offset + 0x04)(src); \
- sw t0, (offset + 0x00)(dst); \
- sw t1, (offset + 0x04)(dst); \
-
- /* Alignment cases are:
- * 1) (src&0x3)=0x0 (dst&0x3)=0x0 can optimize
- * 2) (src&0x3)=0x1 (dst&0x3)=0x1 can optimize
- * 3) (src&0x3)=0x2 (dst&0x3)=0x2 can optimize
- * 4) (src&0x3)=0x3 (dst&0x3)=0x3 can optimize
- * 5) anything else cannot optimize
- */
-
- /* I hate MIPS register names... AIEEE, it's a SPARC! */
-#define o0 a0
-#define o1 a1
-#define o2 a2
-#define o3 a3
-#define o4 t0
-#define o5 t1
-#define o6 sp
-#define o7 ra
-#define g0 zero
-#define g1 t2
-#define g2 t3
-#define g3 t4
-#define g4 t5
-#define g5 t6
-#define g6 t7
-#define g7 t8
+/*
+ * The fixup routine for copy_to_user depends on copying strictly in
+ * increasing order. Gas expands the ulw/usw macros in the wrong order for
+ * little endian machines, so we cannot depend on them.
+ */
+#ifdef __MIPSEB__
+#define uswL swl
+#define uswU swr
+#define ulwL lwl
+#define ulwU lwr
+#endif
+#ifdef __MIPSEL__
+#define uswL swr
+#define uswU swl
+#define ulwL lwr
+#define ulwU lwl
+#endif
+
+#define EX(insn,reg,addr,handler) \
+9: insn reg, addr; \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
+ .previous
+
+#define UEX(insn,reg,addr,handler) \
+9: insn ## L reg, addr; \
+10: insn ## U reg, 3 + addr; \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
+ PTR 10b, handler; \
+ .previous
+
+/* ascending order, destination aligned */
+#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3) \
+ EX(lw, t0, (offset + 0x00)(src), l_fixup); \
+ EX(lw, t1, (offset + 0x04)(src), l_fixup); \
+ EX(lw, t2, (offset + 0x08)(src), l_fixup); \
+ EX(lw, t3, (offset + 0x0c)(src), l_fixup); \
+ EX(sw, t0, (offset + 0x00)(dst), s_fixup); \
+ EX(sw, t1, (offset + 0x04)(dst), s_fixup); \
+ EX(sw, t2, (offset + 0x08)(dst), s_fixup); \
+ EX(sw, t3, (offset + 0x0c)(dst), s_fixup); \
+ EX(lw, t0, (offset + 0x10)(src), l_fixup); \
+ EX(lw, t1, (offset + 0x14)(src), l_fixup); \
+ EX(lw, t2, (offset + 0x18)(src), l_fixup); \
+ EX(lw, t3, (offset + 0x1c)(src), l_fixup); \
+ EX(sw, t0, (offset + 0x10)(dst), s_fixup); \
+ EX(sw, t1, (offset + 0x14)(dst), s_fixup); \
+ EX(sw, t2, (offset + 0x18)(dst), s_fixup); \
+ EX(sw, t3, (offset + 0x1c)(dst), s_fixup)
+
+/* ascending order, destination unaligned */
+#define UMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3) \
+ EX(lw, t0, (offset + 0x00)(src), l_fixup); \
+ EX(lw, t1, (offset + 0x04)(src), l_fixup); \
+ EX(lw, t2, (offset + 0x08)(src), l_fixup); \
+ EX(lw, t3, (offset + 0x0c)(src), l_fixup); \
+ UEX(usw, t0, (offset + 0x00)(dst), s_fixup); \
+ UEX(usw, t1, (offset + 0x04)(dst), s_fixup); \
+ UEX(usw, t2, (offset + 0x08)(dst), s_fixup); \
+ UEX(usw, t3, (offset + 0x0c)(dst), s_fixup); \
+ EX(lw, t0, (offset + 0x10)(src), l_fixup); \
+ EX(lw, t1, (offset + 0x14)(src), l_fixup); \
+ EX(lw, t2, (offset + 0x18)(src), l_fixup); \
+ EX(lw, t3, (offset + 0x1c)(src), l_fixup); \
+ UEX(usw, t0, (offset + 0x10)(dst), s_fixup); \
+ UEX(usw, t1, (offset + 0x14)(dst), s_fixup); \
+ UEX(usw, t2, (offset + 0x18)(dst), s_fixup); \
+ UEX(usw, t3, (offset + 0x1c)(dst), s_fixup)
.text
.set noreorder
.set noat
- .globl bcopy
- .globl amemmove
- .globl memmove
- .globl memcpy
- .align 2
-bcopy:
- move o3, o0
- move o0, o1
- move o1, o3
-
-amemmove:
-memmove:
-memcpy: /* o0=dst o1=src o2=len */
- xor o4, o0, o1
- andi o4, o4, 0x3
- move g6, o0
- beq o4, g0, can_align
- sltiu g7, o2, 0x8
-
- b cannot_optimize
- move g1, o2
+ .align 5
+LEAF(memcpy) /* a0=dst a1=src a2=len */
+ move v0, a0 /* return value */
+__memcpy:
+EXPORT(__copy_user)
+ xor t0, a0, a1
+ andi t0, t0, 0x3
+ move t7, a0
+ beqz t0, can_align
+ sltiu t8, a2, 0x8
+
+ b memcpy_u_src # bad alignment
+ move t2, a2
can_align:
- bne g7, g0, cannot_optimize
- move g1, o2
+ bnez t8, small_memcpy # < 8 bytes to copy
+ move t2, a2
- beq o2, g0, out
- andi g7, o1, 0x1
+ beqz a2, out
+ andi t8, a1, 0x1
hword_align:
- beq g7, g0, word_align
- andi g7, o1, 0x2
+ beqz t8, word_align
+ andi t8, a1, 0x2
- lbu o4, 0x00(o1)
- subu o2, o2, 0x1
- sb o4, 0x00(o0)
- addu o1, o1, 0x1
- addu o0, o0, 0x1
- andi g7, o1, 0x2
+ EX(lb, t0, (a1), l_fixup)
+ subu a2, a2, 0x1
+ EX(sb, t0, (a0), s_fixup)
+ addu a1, a1, 0x1
+ addu a0, a0, 0x1
+ andi t8, a1, 0x2
word_align:
- beq g7, g0, dword_align
- sltiu g7, o2, 56
+ beqz t8, dword_align
+ sltiu t8, a2, 56
- lhu o4, 0x00(o1)
- subu o2, o2, 0x2
- sh o4, 0x00(o0)
- sltiu g7, o2, 56
- addu o0, o0, 0x2
- addu o1, o1, 0x2
+ EX(lh, t0, (a1), l_fixup)
+ subu a2, a2, 0x2
+ EX(sh, t0, (a0), s_fixup)
+ sltiu t8, a2, 56
+ addu a0, a0, 0x2
+ addu a1, a1, 0x2
dword_align:
- bne g7, g0, do_end_words
- move g7, o2
+ bnez t8, do_end_words
+ move t8, a2
- andi g7, o1, 0x4
- beq g7, zero, qword_align
- andi g7, o1, 0x8
+ andi t8, a1, 0x4
+ beqz t8, qword_align
+ andi t8, a1, 0x8
- lw o4, 0x00(o1)
- subu o2, o2, 0x4
- sw o4, 0x00(o0)
- addu o1, o1, 0x4
- addu o0, o0, 0x4
- andi g7, o1, 0x8
+ EX(lw, t0, 0x00(a1), l_fixup)
+ subu a2, a2, 0x4
+ EX(sw, t0, 0x00(a0), s_fixup)
+ addu a1, a1, 0x4
+ addu a0, a0, 0x4
+ andi t8, a1, 0x8
qword_align:
- beq g7, g0, oword_align
- andi g7, o1, 0x10
-
- lw o4, 0x00(o1)
- lw o5, 0x04(o1)
- subu o2, o2, 0x8
- sw o4, 0x00(o0)
- addu o1, o1, 0x8
- sw o5, 0x04(o0)
- andi g7, o1, 0x10
- addu o0, o0, 0x8
+ beqz t8, oword_align
+ andi t8, a1, 0x10
+
+ EX(lw, t0, 0x00(a1), l_fixup)
+ EX(lw, t1, 0x04(a1), l_fixup)
+ subu a2, a2, 0x8
+ EX(sw, t0, 0x00(a0), s_fixup)
+ EX(sw, t1, 0x04(a0), s_fixup)
+ addu a1, a1, 0x8
+ andi t8, a1, 0x10
+ addu a0, a0, 0x8
oword_align:
- beq g7, g0, begin_movement
- srl g7, o2, 0x7
-
- lw g2, 0x08(o1)
- lw g3, 0x0c(o1)
- lw o4, 0x00(o1)
- lw o5, 0x04(o1)
- sw g2, 0x08(o0)
- subu o2, o2, 0x10
- sw g3, 0x0c(o0)
- addu o1, o1, 0x10
- sw o4, 0x00(o0)
- srl g7, o2, 0x7
- addu o0, o0, 0x10
- sw o5, -0x0c(o0)
+ beqz t8, begin_movement
+ srl t8, a2, 0x7
+
+ EX(lw, t3, 0x00(a1), l_fixup)
+ EX(lw, t4, 0x04(a1), l_fixup)
+ EX(lw, t0, 0x08(a1), l_fixup)
+ EX(lw, t1, 0x0c(a1), l_fixup)
+ EX(sw, t3, 0x00(a0), s_fixup)
+ EX(sw, t4, 0x04(a0), s_fixup)
+ EX(sw, t0, 0x08(a0), s_fixup)
+ EX(sw, t1, 0x0c(a0), s_fixup)
+ subu a2, a2, 0x10
+ addu a1, a1, 0x10
+ srl t8, a2, 0x7
+ addu a0, a0, 0x10
begin_movement:
- beq g7, g0, 0f
- andi g1, o2, 0x40
+ beqz t8, 0f
+ andi t2, a2, 0x40
move_128bytes:
- MOVE_BIGCHUNK(o1, o0, 0x00, o4, o5, g2, g3, g4, g5)
- MOVE_BIGCHUNK(o1, o0, 0x20, o4, o5, g2, g3, g4, g5)
- MOVE_BIGCHUNK(o1, o0, 0x40, o4, o5, g2, g3, g4, g5)
- MOVE_BIGCHUNK(o1, o0, 0x60, o4, o5, g2, g3, g4, g5)
- subu g7, g7, 0x01
- addu o1, o1, 0x80
- bne g7, g0, move_128bytes
- addu o0, o0, 0x80
+ MOVE_BIGCHUNK(a1, a0, 0x00, t0, t1, t3, t4)
+ MOVE_BIGCHUNK(a1, a0, 0x20, t0, t1, t3, t4)
+ MOVE_BIGCHUNK(a1, a0, 0x40, t0, t1, t3, t4)
+ MOVE_BIGCHUNK(a1, a0, 0x60, t0, t1, t3, t4)
+ subu t8, t8, 0x01
+ addu a1, a1, 0x80
+ bnez t8, move_128bytes
+ addu a0, a0, 0x80
0:
- beq g1, g0, 1f
- andi g1, o2, 0x20
+ beqz t2, 1f
+ andi t2, a2, 0x20
move_64bytes:
- MOVE_BIGCHUNK(o1, o0, 0x00, o4, o5, g2, g3, g4, g5)
- MOVE_BIGCHUNK(o1, o0, 0x20, o4, o5, g2, g3, g4, g5)
- addu o1, o1, 0x40
- addu o0, o0, 0x40
+ MOVE_BIGCHUNK(a1, a0, 0x00, t0, t1, t3, t4)
+ MOVE_BIGCHUNK(a1, a0, 0x20, t0, t1, t3, t4)
+ addu a1, a1, 0x40
+ addu a0, a0, 0x40
1:
- beq g1, g0, do_end_words
- andi g7, o2, 0x1c
+ beqz t2, do_end_words
+ andi t8, a2, 0x1c
move_32bytes:
- MOVE_BIGCHUNK(o1, o0, 0x00, o4, o5, g2, g3, g4, g5)
- andi g7, o2, 0x1c
- addu o1, o1, 0x20
- addu o0, o0, 0x20
+ MOVE_BIGCHUNK(a1, a0, 0x00, t0, t1, t3, t4)
+ andi t8, a2, 0x1c
+ addu a1, a1, 0x20
+ addu a0, a0, 0x20
do_end_words:
- beq g7, g0, maybe_end_cruft
- srl g7, g7, 0x2
+ beqz t8, maybe_end_cruft
+ srl t8, t8, 0x2
end_words:
- lw o4, 0x00(o1)
- subu g7, g7, 0x1
- sw o4, 0x00(o0)
- addu o1, o1, 0x4
- bne g7, g0, end_words
- addu o0, o0, 0x4
+ EX(lw, t0, (a1), l_fixup)
+ subu t8, t8, 0x1
+ EX(sw, t0, (a0), s_fixup)
+ addu a1, a1, 0x4
+ bnez t8, end_words
+ addu a0, a0, 0x4
maybe_end_cruft:
- andi g1, o2, 0x3
+ andi t2, a2, 0x3
-cannot_optimize:
- beq g1, g0, out
- move o2, g1
+small_memcpy:
+ beqz t2, out
+ move a2, t2
end_bytes:
- lbu o4, 0x00(o1)
- subu o2, o2, 0x1
- sb o4, 0x00(o0)
- addu o1, o1, 0x1
- bne o2, g0, end_bytes
- addu o0, o0, 0x1
-
-out:
- jr o7
- move v0, g6
+ EX(lb, t0, (a1), l_fixup)
+ subu a2, a2, 0x1
+ EX(sb, t0, (a0), s_fixup)
+ addu a1, a1, 0x1
+ bnez a2, end_bytes
+ addu a0, a0, 0x1
+
+out: jr ra
+ move a2, zero
+
+/* ------------------------------------------------------------------------- */
+
+/* Bad, bad. At least try to align the source */
+
+memcpy_u_src:
+ bnez t8, small_memcpy # < 8 bytes?
+ move t2, a2
+
+ addiu t0, a1, 7 # t0: how much to align
+ ori t0, 7
+ xori t0, 7
+ subu t0, a1
+
+ UEX(ulw, t1, 0(a1), l_fixup) # dword alignment
+ UEX(ulw, t2, 4(a1), l_fixup)
+ UEX(usw, t1, 0(a0), s_fixup)
+ UEX(usw, t2, 4(a0), s_fixup)
+
+ addu a1, t0 # src
+ addu a0, t0 # dst
+ subu a2, t0 # len
+
+ sltiu t8, a2, 56
+ bnez t8, u_do_end_words
+ andi t8, a2, 0x3c
+
+ andi t8, a1, 8 # now qword aligned?
+
+u_qword_align:
+ beqz t8, u_oword_align
+ andi t8, a1, 0x10
+
+ EX(lw, t0, 0x00(a1), l_fixup)
+ EX(lw, t1, 0x04(a1), l_fixup)
+ subu a2, a2, 0x8
+ UEX(usw, t0, 0x00(a0), s_fixup)
+ UEX(usw, t1, 0x04(a0), s_fixup)
+ addu a1, a1, 0x8
+ andi t8, a1, 0x10
+ addu a0, a0, 0x8
+
+u_oword_align:
+ beqz t8, u_begin_movement
+ srl t8, a2, 0x7
+
+ EX(lw, t3, 0x08(a1), l_fixup)
+ EX(lw, t4, 0x0c(a1), l_fixup)
+ EX(lw, t0, 0x00(a1), l_fixup)
+ EX(lw, t1, 0x04(a1), l_fixup)
+ UEX(usw, t3, 0x08(a0), s_fixup)
+ UEX(usw, t4, 0x0c(a0), s_fixup)
+ UEX(usw, t0, 0x00(a0), s_fixup)
+ UEX(usw, t1, 0x04(a0), s_fixup)
+ subu a2, a2, 0x10
+ addu a1, a1, 0x10
+ srl t8, a2, 0x7
+ addu a0, a0, 0x10
+
+u_begin_movement:
+ beqz t8, 0f
+ andi t2, a2, 0x40
+
+u_move_128bytes:
+ UMOVE_BIGCHUNK(a1, a0, 0x00, t0, t1, t3, t4)
+ UMOVE_BIGCHUNK(a1, a0, 0x20, t0, t1, t3, t4)
+ UMOVE_BIGCHUNK(a1, a0, 0x40, t0, t1, t3, t4)
+ UMOVE_BIGCHUNK(a1, a0, 0x60, t0, t1, t3, t4)
+ subu t8, t8, 0x01
+ addu a1, a1, 0x80
+ bnez t8, u_move_128bytes
+ addu a0, a0, 0x80
+
+0:
+ beqz t2, 1f
+ andi t2, a2, 0x20
+
+u_move_64bytes:
+ UMOVE_BIGCHUNK(a1, a0, 0x00, t0, t1, t3, t4)
+ UMOVE_BIGCHUNK(a1, a0, 0x20, t0, t1, t3, t4)
+ addu a1, a1, 0x40
+ addu a0, a0, 0x40
+
+1:
+ beqz t2, u_do_end_words
+ andi t8, a2, 0x1c
+
+u_move_32bytes:
+ UMOVE_BIGCHUNK(a1, a0, 0x00, t0, t1, t3, t4)
+ andi t8, a2, 0x1c
+ addu a1, a1, 0x20
+ addu a0, a0, 0x20
+
+u_do_end_words:
+ beqz t8, u_maybe_end_cruft
+ srl t8, t8, 0x2
+
+u_end_words:
+ EX(lw, t0, 0x00(a1), l_fixup)
+ subu t8, t8, 0x1
+ UEX(usw, t0, 0x00(a0), s_fixup)
+ addu a1, a1, 0x4
+ bnez t8, u_end_words
+ addu a0, a0, 0x4
+
+u_maybe_end_cruft:
+ andi t2, a2, 0x3
+
+u_cannot_optimize:
+ beqz t2, out
+ move a2, t2
+
+u_end_bytes:
+ EX(lb, t0, (a1), l_fixup)
+ subu a2, a2, 0x1
+ EX(sb, t0, (a0), s_fixup)
+ addu a1, a1, 0x1
+ bnez a2, u_end_bytes
+ addu a0, a0, 0x1
+
+ jr ra
+ move a2, zero
+ END(memcpy)
+
+/* descending order, destination aligned */
+#define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3) \
+ lw t0, (offset + 0x10)(src); \
+ lw t1, (offset + 0x14)(src); \
+ lw t2, (offset + 0x18)(src); \
+ lw t3, (offset + 0x1c)(src); \
+ sw t0, (offset + 0x10)(dst); \
+ sw t1, (offset + 0x14)(dst); \
+ sw t2, (offset + 0x18)(dst); \
+ sw t3, (offset + 0x1c)(dst); \
+ lw t0, (offset + 0x00)(src); \
+ lw t1, (offset + 0x04)(src); \
+ lw t2, (offset + 0x08)(src); \
+ lw t3, (offset + 0x0c)(src); \
+ sw t0, (offset + 0x00)(dst); \
+ sw t1, (offset + 0x04)(dst); \
+ sw t2, (offset + 0x08)(dst); \
+ sw t3, (offset + 0x0c)(dst)
+
+/* descending order, destination ununaligned */
+#define RUMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3) \
+ lw t0, (offset + 0x10)(src); \
+ lw t1, (offset + 0x14)(src); \
+ lw t2, (offset + 0x18)(src); \
+ lw t3, (offset + 0x1c)(src); \
+ usw t0, (offset + 0x10)(dst); \
+ usw t1, (offset + 0x14)(dst); \
+ usw t2, (offset + 0x18)(dst); \
+ usw t3, (offset + 0x1c)(dst); \
+ lw t0, (offset + 0x00)(src); \
+ lw t1, (offset + 0x04)(src); \
+ lw t2, (offset + 0x08)(src); \
+ lw t3, (offset + 0x0c)(src); \
+ usw t0, (offset + 0x00)(dst); \
+ usw t1, (offset + 0x04)(dst); \
+ usw t2, (offset + 0x08)(dst); \
+ usw t3, (offset + 0x0c)(dst)
+
+ .align 5
+LEAF(memmove)
+ sltu t0, a0, a1 # dst < src -> memcpy
+ bnez t0, memcpy
+ addu v0, a0, a2
+ sltu t0, v0, a1 # dst + len < src -> non-
+ bnez t0, __memcpy # overlapping, can use memcpy
+ move v0, a0 /* return value */
+ END(memmove)
+
+LEAF(__rmemcpy) /* a0=dst a1=src a2=len */
+ addu a0, a2 # dst = dst + len
+ addu a1, a2 # src = src + len
+
+ xor t0, a0, a1
+ andi t0, t0, 0x3
+ move t7, a0
+ beqz t0, r_can_align
+ sltiu t8, a2, 0x8
+
+ b r_memcpy_u_src # bad alignment
+ move t2, a2
+
+r_can_align:
+ bnez t8, r_small_memcpy # < 8 bytes to copy
+ move t2, a2
+
+ beqz a2, r_out
+ andi t8, a1, 0x1
+
+r_hword_align:
+ beqz t8, r_word_align
+ andi t8, a1, 0x2
+
+ lb t0, -1(a1)
+ subu a2, a2, 0x1
+ sb t0, -1(a0)
+ subu a1, a1, 0x1
+ subu a0, a0, 0x1
+ andi t8, a1, 0x2
+
+r_word_align:
+ beqz t8, r_dword_align
+ sltiu t8, a2, 56
+
+ lh t0, -2(a1)
+ subu a2, a2, 0x2
+ sh t0, -2(a0)
+ sltiu t8, a2, 56
+ subu a0, a0, 0x2
+ subu a1, a1, 0x2
+
+r_dword_align:
+ bnez t8, r_do_end_words
+ move t8, a2
+
+ andi t8, a1, 0x4
+ beqz t8, r_qword_align
+ andi t8, a1, 0x8
+
+ lw t0, -4(a1)
+ subu a2, a2, 0x4
+ sw t0, -4(a0)
+ subu a1, a1, 0x4
+ subu a0, a0, 0x4
+ andi t8, a1, 0x8
+
+r_qword_align:
+ beqz t8, r_oword_align
+ andi t8, a1, 0x10
+
+ subu a1, a1, 0x8
+ lw t0, 0x04(a1)
+ lw t1, 0x00(a1)
+ subu a0, a0, 0x8
+ sw t0, 0x04(a0)
+ sw t1, 0x00(a0)
+ subu a2, a2, 0x8
+
+ andi t8, a1, 0x10
+
+r_oword_align:
+ beqz t8, r_begin_movement
+ srl t8, a2, 0x7
+
+ subu a1, a1, 0x10
+ lw t3, 0x08(a1) # assumes subblock ordering
+ lw t4, 0x0c(a1)
+ lw t0, 0x00(a1)
+ lw t1, 0x04(a1)
+ subu a0, a0, 0x10
+ sw t3, 0x08(a0)
+ sw t4, 0x0c(a0)
+ sw t0, 0x00(a0)
+ sw t1, 0x04(a0)
+ subu a2, a2, 0x10
+ srl t8, a2, 0x7
+
+r_begin_movement:
+ beqz t8, 0f
+ andi t2, a2, 0x40
+
+r_move_128bytes:
+ RMOVE_BIGCHUNK(a1, a0, -0x80, t0, t1, t3, t4)
+ RMOVE_BIGCHUNK(a1, a0, -0x60, t0, t1, t3, t4)
+ RMOVE_BIGCHUNK(a1, a0, -0x40, t0, t1, t3, t4)
+ RMOVE_BIGCHUNK(a1, a0, -0x20, t0, t1, t3, t4)
+ subu t8, t8, 0x01
+ subu a1, a1, 0x80
+ bnez t8, r_move_128bytes
+ subu a0, a0, 0x80
+
+0:
+ beqz t2, 1f
+ andi t2, a2, 0x20
+
+r_move_64bytes:
+ subu a1, a1, 0x40
+ subu a0, a0, 0x40
+ RMOVE_BIGCHUNK(a1, a0, 0x20, t0, t1, t3, t4)
+ RMOVE_BIGCHUNK(a1, a0, 0x00, t0, t1, t3, t4)
+
+1:
+ beqz t2, r_do_end_words
+ andi t8, a2, 0x1c
+
+r_move_32bytes:
+ subu a1, a1, 0x20
+ subu a0, a0, 0x20
+ RMOVE_BIGCHUNK(a1, a0, 0x00, t0, t1, t3, t4)
+ andi t8, a2, 0x1c
+
+r_do_end_words:
+ beqz t8, r_maybe_end_cruft
+ srl t8, t8, 0x2
+
+r_end_words:
+ lw t0, -4(a1)
+ subu t8, t8, 0x1
+ sw t0, -4(a0)
+ subu a1, a1, 0x4
+ bnez t8, r_end_words
+ subu a0, a0, 0x4
+
+r_maybe_end_cruft:
+ andi t2, a2, 0x3
+
+r_small_memcpy:
+ beqz t2, r_out
+ move a2, t2
+
+r_end_bytes:
+ lb t0, -1(a1)
+ subu a2, a2, 0x1
+ sb t0, -1(a0)
+ subu a1, a1, 0x1
+ bnez a2, r_end_bytes
+ subu a0, a0, 0x1
+
+r_out:
+ jr ra
+ move a2, zero
+
+/* ------------------------------------------------------------------------- */
+
+/* Bad, bad. At least try to align the source */
+
+r_memcpy_u_src:
+ bnez t8, r_small_memcpy # < 8 bytes?
+ move t2, a2
+
+ andi t0, a1, 7 # t0: how much to align
+
+ ulw t1, -8(a1) # dword alignment
+ ulw t2, -4(a1)
+ usw t1, -8(a0)
+ usw t2, -4(a0)
+
+ subu a1, t0 # src
+ subu a0, t0 # dst
+ subu a2, t0 # len
+
+ sltiu t8, a2, 56
+ bnez t8, ru_do_end_words
+ andi t8, a2, 0x3c
+
+ andi t8, a1, 8 # now qword aligned?
+
+ru_qword_align:
+ beqz t8, ru_oword_align
+ andi t8, a1, 0x10
+
+ subu a1, a1, 0x8
+ lw t0, 0x00(a1)
+ lw t1, 0x04(a1)
+ subu a0, a0, 0x8
+ usw t0, 0x00(a0)
+ usw t1, 0x04(a0)
+ subu a2, a2, 0x8
+
+ andi t8, a1, 0x10
+
+ru_oword_align:
+ beqz t8, ru_begin_movement
+ srl t8, a2, 0x7
+
+ subu a1, a1, 0x10
+ lw t3, 0x08(a1) # assumes subblock ordering
+ lw t4, 0x0c(a1)
+ lw t0, 0x00(a1)
+ lw t1, 0x04(a1)
+ subu a0, a0, 0x10
+ usw t3, 0x08(a0)
+ usw t4, 0x0c(a0)
+ usw t0, 0x00(a0)
+ usw t1, 0x04(a0)
+ subu a2, a2, 0x10
+
+ srl t8, a2, 0x7
+
+ru_begin_movement:
+ beqz t8, 0f
+ andi t2, a2, 0x40
+
+ru_move_128bytes:
+ RUMOVE_BIGCHUNK(a1, a0, -0x80, t0, t1, t3, t4)
+ RUMOVE_BIGCHUNK(a1, a0, -0x60, t0, t1, t3, t4)
+ RUMOVE_BIGCHUNK(a1, a0, -0x40, t0, t1, t3, t4)
+ RUMOVE_BIGCHUNK(a1, a0, -0x20, t0, t1, t3, t4)
+ subu t8, t8, 0x01
+ subu a1, a1, 0x80
+ bnez t8, ru_move_128bytes
+ subu a0, a0, 0x80
+
+0:
+ beqz t2, 1f
+ andi t2, a2, 0x20
+
+ru_move_64bytes:
+ subu a1, a1, 0x40
+ subu a0, a0, 0x40
+ RUMOVE_BIGCHUNK(a1, a0, 0x20, t0, t1, t3, t4)
+ RUMOVE_BIGCHUNK(a1, a0, 0x00, t0, t1, t3, t4)
+
+1:
+ beqz t2, ru_do_end_words
+ andi t8, a2, 0x1c
+
+ru_move_32bytes:
+ subu a1, a1, 0x20
+ subu a0, a0, 0x20
+ RUMOVE_BIGCHUNK(a1, a0, 0x00, t0, t1, t3, t4)
+ andi t8, a2, 0x1c
+
+ru_do_end_words:
+ beqz t8, ru_maybe_end_cruft
+ srl t8, t8, 0x2
+
+ru_end_words:
+ lw t0, -4(a1)
+ usw t0, -4(a0)
+ subu t8, t8, 0x1
+ subu a1, a1, 0x4
+ bnez t8, ru_end_words
+ subu a0, a0, 0x4
+
+ru_maybe_end_cruft:
+ andi t2, a2, 0x3
+
+ru_cannot_optimize:
+ beqz t2, r_out
+ move a2, t2
+
+ru_end_bytes:
+ lb t0, -1(a1)
+ subu a2, a2, 0x1
+ sb t0, -1(a0)
+ subu a1, a1, 0x1
+ bnez a2, ru_end_bytes
+ subu a0, a0, 0x1
+
+ jr ra
+ move a2, zero
+ END(__rmemcpy)
+
+l_fixup: # clear the rest of the buffer
+ lw t0, THREAD_BUADDR($28)
+ nop
+ subu a2, AT, t0 # a2 bytes to go
+ addu a0, t0 # compute start address in a1
+ subu a0, a1
+ j __bzero
+ move a1, zero
+
+s_fixup:
+ jr ra
+ nop
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
new file mode 100644
index 000000000..32f175756
--- /dev/null
+++ b/arch/mips/lib/memset.S
@@ -0,0 +1,141 @@
+/*
+ * include/asm-mips/types.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 by Ralf Baechle
+ *
+ * $Id: memset.S,v 1.2 1998/04/25 17:01:45 ralf Exp $
+ */
+#include <asm/asm.h>
+#include <asm/offset.h>
+#include <asm/regdef.h>
+
+#define EX(insn,reg,addr,handler) \
+9: insn reg, addr; \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
+ .previous
+
+#define F_FILL64(dst, offset, val, fixup) \
+ EX(sw, val, (offset + 0x00)(dst), fixup); \
+ EX(sw, val, (offset + 0x04)(dst), fixup); \
+ EX(sw, val, (offset + 0x08)(dst), fixup); \
+ EX(sw, val, (offset + 0x0c)(dst), fixup); \
+ EX(sw, val, (offset + 0x10)(dst), fixup); \
+ EX(sw, val, (offset + 0x14)(dst), fixup); \
+ EX(sw, val, (offset + 0x18)(dst), fixup); \
+ EX(sw, val, (offset + 0x1c)(dst), fixup); \
+ EX(sw, val, (offset + 0x20)(dst), fixup); \
+ EX(sw, val, (offset + 0x24)(dst), fixup); \
+ EX(sw, val, (offset + 0x28)(dst), fixup); \
+ EX(sw, val, (offset + 0x2c)(dst), fixup); \
+ EX(sw, val, (offset + 0x30)(dst), fixup); \
+ EX(sw, val, (offset + 0x34)(dst), fixup); \
+ EX(sw, val, (offset + 0x38)(dst), fixup); \
+ EX(sw, val, (offset + 0x3c)(dst), fixup)
+
+/*
+ * memset(void *s, int c, size_t n)
+ *
+ * a0: start of area to clear
+ * a1: char to fill with
+ * a2: size of area to clear
+ */
+ .set noreorder
+ .align 5
+LEAF(memset)
+ beqz a1, 1f
+ move v0, a0 /* result */
+
+ andi a1, 0xff /* spread fillword */
+ sll t1, a1, 8
+ or a1, t1
+ sll t1, a1, 16
+ or a1, t1
+1:
+
+EXPORT(__bzero)
+ sltiu t0, a2, 4 /* very small region? */
+ bnez t0, small_memset
+ andi t0, a0, 3 /* aligned? */
+
+ beqz t0, 1f
+ subu t0, 4 /* alignment in bytes */
+
+#ifdef __MIPSEB__
+ EX(swl, a1, (a0), first_fixup) /* make word aligned */
+#endif
+#ifdef __MIPSEL__
+ EX(swr, a1, (a0), first_fixup) /* make word aligned */
+#endif
+ subu a0, t0 /* word align ptr */
+ addu a2, t0 /* correct size */
+
+1: ori t1, a2, 0x3f /* # of full blocks */
+ xori t1, 0x3f
+ beqz t1, memset_partial /* no block to fill */
+ andi t0, a2, 0x3c
+
+ addu t1, a0 /* end address */
+ .set reorder
+1: addiu a0, 64
+ F_FILL64(a0, -64, a1, fwd_fixup)
+ bne t1, a0, 1b
+ .set noreorder
+
+memset_partial:
+ la t1, 2f /* where to start */
+ subu t1, t0
+ jr t1
+ addu a0, t0 /* dest ptr */
+
+ F_FILL64(a0, -64, a1, partial_fixup) /* ... but first do wrds ... */
+2: andi a2, 3 /* 0 <= n <= 3 to go */
+
+ beqz a2, 1f
+ addu a0, a2 /* What's left */
+#ifdef __MIPSEB__
+ EX(swr, a1, -1(a0), last_fixup)
+#endif
+#ifdef __MIPSEL__
+ EX(swl, a1, -1(a0), last_fixup)
+#endif
+1: jr ra
+ move a2, zero
+
+small_memset:
+ beqz a2, 2f
+ addu t1, a0, a2
+
+1: addiu a0, 1 /* fill bytewise */
+ bne t1, a0, 1b
+ sb a1, -1(a0)
+
+2: jr ra /* done */
+ move a2, zero
+ END(memset)
+
+first_fixup:
+ jr ra
+ nop
+
+fwd_fixup:
+ lw t0, THREAD_BUADDR($28)
+ andi a2, 0x3f
+ addu a2, t1
+ jr ra
+ subu a2, t0
+
+partial_fixup:
+ lw t0, THREAD_BUADDR($28)
+ andi a2, 3
+ addu a2, t1
+ jr ra
+ subu a2, t0
+
+last_fixup:
+ jr ra
+ andi v1, a2, 3
diff --git a/arch/mips/lib/memset.c b/arch/mips/lib/memset.c
deleted file mode 100644
index bbdbcbb31..000000000
--- a/arch/mips/lib/memset.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/* linux/arch/mips/lib/memset.c
- *
- * This is from GNU libc.
- */
-
-#include <linux/types.h>
-
-#define op_t unsigned long int
-#define OPSIZ (sizeof(op_t))
-
-typedef unsigned char byte;
-
-void *memset(void *dstpp, char c, size_t len)
-{
- long int dstp = (long int) dstpp;
-
- if (len >= 8) {
- size_t xlen;
- op_t cccc;
-
- cccc = (unsigned char) c;
- cccc |= cccc << 8;
- cccc |= cccc << 16;
-
- /* There are at least some bytes to set.
- No need to test for LEN == 0 in this alignment loop. */
- while (dstp % OPSIZ != 0) {
- ((byte *) dstp)[0] = c;
- dstp += 1;
- len -= 1;
- }
-
- /* Write 8 `op_t' per iteration until less
- * than 8 `op_t' remain.
- */
- xlen = len / (OPSIZ * 8);
- while (xlen > 0) {
- ((op_t *) dstp)[0] = cccc;
- ((op_t *) dstp)[1] = cccc;
- ((op_t *) dstp)[2] = cccc;
- ((op_t *) dstp)[3] = cccc;
- ((op_t *) dstp)[4] = cccc;
- ((op_t *) dstp)[5] = cccc;
- ((op_t *) dstp)[6] = cccc;
- ((op_t *) dstp)[7] = cccc;
- dstp += 8 * OPSIZ;
- xlen -= 1;
- }
- len %= OPSIZ * 8;
-
- /* Write 1 `op_t' per iteration until less than
- * OPSIZ bytes remain.
- */
- xlen = len / OPSIZ;
- while (xlen > 0) {
- ((op_t *) dstp)[0] = cccc;
- dstp += OPSIZ;
- xlen -= 1;
- }
- len %= OPSIZ;
- }
-
- /* Write the last few bytes. */
- while (len > 0) {
- ((byte *) dstp)[0] = c;
- dstp += 1;
- len -= 1;
- }
-
- return dstpp;
-}
diff --git a/arch/mips/lib/strlen_user.S b/arch/mips/lib/strlen_user.S
index c42144b35..5f44c3eb4 100644
--- a/arch/mips/lib/strlen_user.S
+++ b/arch/mips/lib/strlen_user.S
@@ -5,29 +5,44 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (c) 1996 by Ralf Baechle
+ * Copyright (c) 1996, 1998 by Ralf Baechle
+ *
+ * $Id: strlen_user.S,v 1.3 1998/05/03 11:13:45 ralf Exp $
*/
#include <asm/asm.h>
+#include <asm/offset.h>
#include <asm/regdef.h>
#include <asm/sgidefs.h>
+#define EX(insn,reg,addr,handler) \
+9: insn reg, addr; \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
+ .previous
+
/*
* Return the size of a string (including the ending 0)
*
* Return 0 for error
*/
-LEAF(__strlen_user)
- move v0,zero
-1: lb t0,(a0)
- LONG_ADDIU v0,1
- LONG_ADDIU a0,1
- bnez t0,1b
- jr ra
- END(__strlen_user)
+LEAF(__strlen_user_nocheck_asm)
+ lw v0, THREAD_CURDS($28) # pointer ok?
+ subu v0, zero, v0
+ and v0, a0
+ nor v0, zero, v0
+ beqz v0, fault
+EXPORT(__strlen_user_asm)
+ move v0, a0
+1: EX(lb, t0, (v0), fault)
+ LONG_ADDIU v0, 1
+ bnez t0, 1b
+ LONG_SUBU v0, a0
+ jr ra
+ END(__strlen_user_nocheck_asm)
- .section __ex_table,"a"
- PTR 1b,fault
- .previous
+ .section __ex_table,"a"
+ PTR 1b, fault
+ .previous
-fault: move v0,zero
- jr ra
+fault: move v0, zero
+ jr ra
diff --git a/arch/mips/lib/strncpy_user.S b/arch/mips/lib/strncpy_user.S
index f942740e6..f3240475a 100644
--- a/arch/mips/lib/strncpy_user.S
+++ b/arch/mips/lib/strncpy_user.S
@@ -6,12 +6,20 @@
* for more details.
*
* Copyright (c) 1996 by Ralf Baechle
+ *
+ * $Id: strncpy_user.S,v 1.3 1998/05/03 11:13:45 ralf Exp $
*/
#include <linux/errno.h>
-
#include <asm/asm.h>
+#include <asm/offset.h>
#include <asm/regdef.h>
+#define EX(insn,reg,addr,handler) \
+9: insn reg, addr; \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
+ .previous
+
/*
* Returns: -EFAULT if exception before terminator, N if the entire
* buffer filled, else strlen.
@@ -19,30 +27,37 @@
/*
* Ugly special case have to check: we might get passed a user space
- * pointer which wraps into the kernel space ...
+ * pointer which wraps into the kernel space. We don't deal with that. If
+ * it happens at most some bytes of the exceptions handlers will be copied.
*/
-LEAF(__strncpy_from_user)
- move v0,zero
- move v1,a1
- .set noreorder
-1: lbu t0,(v1)
- LONG_ADDIU v1,1
- beqz t0,2f
- sb t0,(a0) # delay slot
- LONG_ADDIU v0,1
- bne v0,a2,1b
- LONG_ADDIU a0,1 # delay slot
- .set reorder
-2: LONG_ADDU t0,a1,v0
- xor t0,a1
- bltz t0,fault
- jr ra # return n
- END(__strncpy_from_user)
+LEAF(__strncpy_from_user_asm)
+ lw v0, THREAD_CURDS($28) # pointer ok?
+ subu v0, zero, v0
+ and v0, a1
+ nor v0, zero, v0
+ beqz v0, fault
+EXPORT(__strncpy_from_user_nocheck_asm)
+ move v0,zero
+ move v1,a1
+ .set noreorder
+1: EX(lbu, t0, (v1), fault)
+ LONG_ADDIU v1,1
+ beqz t0,2f
+ sb t0,(a0)
+ LONG_ADDIU v0,1
+ bne v0,a2,1b
+ LONG_ADDIU a0,1
+ .set reorder
+2: LONG_ADDU t0,a1,v0
+ xor t0,a1
+ bltz t0,fault
+ jr ra # return n
+ END(__strncpy_from_user_asm)
-fault: li v0,-EFAULT
- jr ra
+fault: li v0,-EFAULT
+ jr ra
- .section __ex_table,"a"
- PTR 1b,fault
- .previous
+ .section __ex_table,"a"
+ PTR 1b,fault
+ .previous
diff --git a/arch/mips/mm/andes.c b/arch/mips/mm/andes.c
index 05150aa83..b29dcb274 100644
--- a/arch/mips/mm/andes.c
+++ b/arch/mips/mm/andes.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
*
- * $Id: andes.c,v 1.3 1998/03/22 23:27:14 ralf Exp $
+ * $Id: andes.c,v 1.4 1998/04/05 11:23:54 ralf Exp $
*/
#include <linux/init.h>
#include <linux/kernel.h>
@@ -86,7 +86,7 @@ static void andes_add_wired_entry(unsigned long entrylo0, unsigned long entrylo1
/* XXX */
}
-static void andes_user_mode(struct pt_regs *regs)
+static int andes_user_mode(struct pt_regs *regs)
{
return (regs->cp0_status & ST0_KSU) == KSU_USER;
}
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 98d707542..61649268a 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -97,9 +97,6 @@ bad_area:
(unsigned long) regs->cp0_epc,
(unsigned long) regs->regs[31]);
#endif
-
- current->tss.cp0_badvaddr = address;
- current->tss.error_code = writeaccess;
force_sig(SIGSEGV, tsk);
goto out;
}
@@ -108,6 +105,8 @@ bad_area:
fixup = search_exception_table(regs->cp0_epc);
if (fixup) {
long new_epc;
+
+ tsk->tss.cp0_baduaddr = address;
new_epc = fixup_exception(dpf_reg, fixup, regs->cp0_epc);
printk(KERN_DEBUG "%s: Exception at [<%lx>] (%lx)\n",
tsk->comm, regs->cp0_epc, new_epc);
diff --git a/arch/mips/mm/r4xx0.c b/arch/mips/mm/r4xx0.c
index 91d24a690..7d6d1b073 100644
--- a/arch/mips/mm/r4xx0.c
+++ b/arch/mips/mm/r4xx0.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
*
- * $Id: r4xx0.c,v 1.15 1998/04/05 11:23:55 ralf Exp $
+ * $Id: r4xx0.c,v 1.23 1998/04/04 14:02:54 ralf Exp $
*
* To do:
*
@@ -2608,7 +2608,7 @@ __initfunc(static void setup_noscache_funcs(void))
dma_cache_inv = r4k_dma_cache_inv_pc;
}
-static void setup_scache_funcs(void)
+__initfunc(static void setup_scache_funcs(void))
{
switch(sc_lsize) {
case 16:
@@ -2690,7 +2690,7 @@ static void setup_scache_funcs(void)
typedef int (*probe_func_t)(unsigned long);
-static inline void setup_scache(unsigned int config)
+__initfunc(static inline void setup_scache(unsigned int config))
{
probe_func_t probe_scache_kseg1;
int sc_present = 0;
diff --git a/arch/mips/mm/tfp.c b/arch/mips/mm/tfp.c
index d1701c03a..dab618b3e 100644
--- a/arch/mips/mm/tfp.c
+++ b/arch/mips/mm/tfp.c
@@ -1,7 +1,8 @@
-/* $Id: tfp.c,v 1.4 1998/03/27 08:53:42 ralf Exp $
+/*
* tfp.c: MMU and cache routines specific to the r8000 (TFP).
*
* Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
+ *
*/
#include <linux/init.h>
#include <linux/kernel.h>
@@ -87,7 +88,7 @@ static void tfp_add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
static int tfp_user_mode(struct pt_regs *regs)
{
- return regs->cp0_status & ST0_KSU == KSU_USER;
+ return (regs->cp0_status & ST0_KSU) == KSU_USER;
}
__initfunc(void ld_mmu_tfp(void))
diff --git a/arch/mips/sgi/kernel/indy_mc.c b/arch/mips/sgi/kernel/indy_mc.c
index c34cc48f2..73c7ed006 100644
--- a/arch/mips/sgi/kernel/indy_mc.c
+++ b/arch/mips/sgi/kernel/indy_mc.c
@@ -3,13 +3,13 @@
*
* Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
*
- * $Id: indy_mc.c,v 1.2 1998/03/27 08:53:44 ralf Exp $
+ * $Id: indy_mc.c,v 1.2 1998/04/05 11:23:58 ralf Exp $
*/
#include <linux/init.h>
+#include <linux/kernel.h>
#include <asm/addrspace.h>
#include <asm/ptrace.h>
-#include <asm/processor.h>
#include <asm/sgimc.h>
#include <asm/sgihpc.h>
#include <asm/sgialib.h>
diff --git a/arch/mips/sgi/kernel/indy_sc.c b/arch/mips/sgi/kernel/indy_sc.c
index b7466339a..81c18cb09 100644
--- a/arch/mips/sgi/kernel/indy_sc.c
+++ b/arch/mips/sgi/kernel/indy_sc.c
@@ -4,7 +4,7 @@
* Copyright (C) 1997 Ralf Baechle (ralf@gnu.org),
* derived from r4xx0.c by David S. Miller (dm@engr.sgi.com).
*
- * $Id: indy_sc.c,v 1.2 1998/03/27 04:47:57 ralf Exp $
+ * $Id: indy_sc.c,v 1.3 1998/04/05 11:23:58 ralf Exp $
*/
#include <linux/config.h>
#include <linux/init.h>
@@ -23,20 +23,8 @@
#include <asm/sgialib.h>
#include <asm/mmu_context.h>
-/* CP0 hazard avoidance. */
-#define BARRIER __asm__ __volatile__(".set noreorder\n\t" \
- "nop; nop; nop; nop; nop; nop;\n\t" \
- ".set reorder\n\t")
-
-/* Primary cache parameters. */
-static int icache_size, dcache_size; /* Size in bytes */
-static int ic_lsize, dc_lsize; /* LineSize in bytes */
-
-/* Secondary cache (if present) parameters. */
-static scache_size, sc_lsize; /* Again, in bytes */
-
-#include <asm/cacheops.h>
-#include <asm/r4kcache.h>
+/* Secondary cache size in bytes, if present. */
+static unsigned long scache_size;
#undef DEBUG_CACHE
@@ -166,7 +154,6 @@ __initfunc(static inline int indy_sc_probe(void))
volatile unsigned int *cpu_control;
unsigned short cmd = 0xc220;
unsigned long data = 0;
- unsigned long addr;
int i, n;
#ifdef __MIPSEB__
@@ -209,43 +196,17 @@ __initfunc(static inline int indy_sc_probe(void))
DEASSERT(SGIMC_EEPROM_CSEL);
ASSERT(SGIMC_EEPROM_PRE);
ASSERT(SGIMC_EEPROM_SECLOCK);
+
data <<= PAGE_SHIFT;
- printk("R4600/R5000 SCACHE size %dK ", (int) (data >> 10));
- switch(mips_cputype) {
- case CPU_R4600:
- case CPU_R4640:
- sc_lsize = 32;
- break;
+ if (data == 0)
+ return 0;
- default:
- sc_lsize = 128;
- break;
- }
- printk("linesize %d bytes\n", sc_lsize);
scache_size = data;
- if (data == 0) {
- if (mips_cputype == CPU_R5000)
- return -1;
- else
- return 0;
- }
-
- /* Enable r4600/r5000 cache. But flush it first. */
- for(addr = KSEG0; addr < (KSEG0 + dcache_size);
- addr += dc_lsize)
- flush_dcache_line_indexed(addr);
- for(addr = KSEG0; addr < (KSEG0 + icache_size);
- addr += ic_lsize)
- flush_icache_line_indexed(addr);
- for(addr = KSEG0; addr < (KSEG0 + scache_size);
- addr += sc_lsize)
- flush_scache_line_indexed(addr);
- if (mips_cputype == CPU_R4600 ||
- mips_cputype == CPU_R5000)
- return 1;
+ printk("R4600/R5000 SCACHE size %ldK, linesize 32 bytes.\n",
+ scache_size >> 10);
- return 0;
+ return 1;
}
/* XXX Check with wje if the Indy caches can differenciate between
diff --git a/arch/mips/sni/setup.c b/arch/mips/sni/setup.c
index ee77d50ba..d9fa7c8de 100644
--- a/arch/mips/sni/setup.c
+++ b/arch/mips/sni/setup.c
@@ -5,11 +5,13 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1996, 1997 by Ralf Baechle
+ * Copyright (C) 1996, 1997, 1998 by Ralf Baechle
*
- * $Id: setup.c,v 1.4 1998/03/04 08:47:29 ralf Exp $
+ * $Id: setup.c,v 1.5 1998/03/17 22:07:43 ralf Exp $
*/
#include <asm/ptrace.h>
+#include <linux/config.h>
+#include <linux/hdreg.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/init.h>
@@ -19,6 +21,7 @@
#include <asm/bcache.h>
#include <asm/bootinfo.h>
#include <asm/keyboard.h>
+#include <asm/ide.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/processor.h>
@@ -46,6 +49,8 @@ extern void sni_machine_restart(char *command);
extern void sni_machine_halt(void);
extern void sni_machine_power_off(void);
+extern struct ide_ops std_ide_ops;
+
__initfunc(static void sni_irq_setup(void))
{
set_except_vector(0, sni_rm200_pci_handle_int);
@@ -161,4 +166,7 @@ __initfunc(void sni_rm200_pci_setup(void))
*/
request_region(0xcfc,0x04,"PCI config data");
pci_ops = &sni_pci_ops;
+#ifdef CONFIG_BLK_DEV_IDE
+ ide_ops = &std_ide_ops;
+#endif
}
diff --git a/arch/mips/tools/offset.c b/arch/mips/tools/offset.c
index 3ccd434d3..5ea6db0de 100644
--- a/arch/mips/tools/offset.c
+++ b/arch/mips/tools/offset.c
@@ -4,7 +4,7 @@
* Copyright (C) 1996 David S. Miller
* Made portable by Ralf Baechle
*
- * $Id: offset.c,v 1.6 1998/03/27 04:47:58 ralf Exp $
+ * $Id: offset.c,v 1.7 1998/04/05 11:24:07 ralf Exp $
*/
#include <linux/types.h>
@@ -102,6 +102,7 @@ void output_thread_defines(void)
offset("#define THREAD_STATUS ", struct task_struct, tss.cp0_status);
offset("#define THREAD_FPU ", struct task_struct, tss.fpu);
offset("#define THREAD_BVADDR ", struct task_struct, tss.cp0_badvaddr);
+ offset("#define THREAD_BUADDR ", struct task_struct, tss.cp0_baduaddr);
offset("#define THREAD_ECODE ", struct task_struct, tss.error_code);
offset("#define THREAD_TRAPNO ", struct task_struct, tss.trap_no);
offset("#define THREAD_PGDIR ", struct task_struct, tss.pg_dir);
diff --git a/drivers/scsi/sgiwd93.h b/drivers/scsi/sgiwd93.h
index badcebb7e..075db0904 100644
--- a/drivers/scsi/sgiwd93.h
+++ b/drivers/scsi/sgiwd93.h
@@ -1,4 +1,4 @@
-/* $Id: sgiwd93.h,v 1.2 1997/12/01 18:00:19 ralf Exp $
+/* $Id: sgiwd93.h,v 1.3 1998/03/17 22:12:40 ralf Exp $
* sgiwd93.h: SGI WD93 scsi definitions.
*
* Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
@@ -28,7 +28,7 @@ int wd33c93_reset(Scsi_Cmnd *, unsigned int);
extern struct proc_dir_entry proc_scsi_sgiwd93;
#define SGIWD93_SCSI {proc_dir: &proc_scsi_sgiwd93, \
- name: "GVP Series II SCSI", \
+ name: "SGI WD93", \
detect: sgiwd93_detect, \
release: sgiwd93_release, \
queuecommand: wd33c93_queuecommand, \
diff --git a/drivers/sgi/char/sgiserial.c b/drivers/sgi/char/sgiserial.c
index 12176d6d2..7239e216b 100644
--- a/drivers/sgi/char/sgiserial.c
+++ b/drivers/sgi/char/sgiserial.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
*
- * $Id$
+ * $Id: sgiserial.c,v 1.8 1998/04/05 11:24:34 ralf Exp $
*/
#include <linux/config.h> /* for CONFIG_REMOTE_DEBUG */
@@ -20,6 +20,7 @@
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/delay.h>
+#include <linux/console.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -55,6 +56,19 @@ static int zs_cons_chanout = 0;
static int zs_cons_chanin = 0;
struct sgi_serial *zs_consinfo = 0;
+static struct console sgi_console_driver = {
+ "debug",
+ NULL, /* write */
+ NULL, /* read */
+ NULL, /* device */
+ NULL, /* wait_key */
+ NULL, /* unblank */
+ NULL, /* setup */
+ CON_PRINTBUFFER,
+ -1,
+ 0,
+ NULL
+};
static unsigned char kgdb_regs[16] = {
0, 0, 0, /* write 0, 1, 2 */
(Rx8 | RxENABLE), /* write 3 */
@@ -963,14 +977,14 @@ static void rs_fair_output(void)
/*
* zs_console_print is registered for printk.
*/
-static void zs_console_print(const char *p)
+
+static void zs_console_print(struct console *co, const char *str, unsigned int count)
{
- char c;
- while((c=*(p++)) != 0) {
- if(c == '\n')
+ while(count--) {
+ if(*str == '\n')
rs_put_char('\r');
- rs_put_char(c);
+ rs_put_char(*str++);
}
/* Comment this if you want to have a strict interrupt-driven output */
@@ -1715,7 +1729,7 @@ static inline struct sgi_zslayout *get_zs(int chip)
}
-extern void register_console(void (*proc)(const char *));
+
static inline void
rs_cons_check(struct sgi_serial *ss, int channel)
@@ -1726,6 +1740,7 @@ rs_cons_check(struct sgi_serial *ss, int channel)
i = o = io = 0;
+
/* Is this one of the serial console lines? */
if((zs_cons_chanout != channel) &&
(zs_cons_chanin != channel))
@@ -1733,16 +1748,20 @@ rs_cons_check(struct sgi_serial *ss, int channel)
zs_conschan = ss->zs_channel;
zs_consinfo = ss;
+
/* Register the console output putchar, if necessary */
if((zs_cons_chanout == channel)) {
o = 1;
/* double whee.. */
+
if(!consout_registered) {
- register_console(zs_console_print);
+ sgi_console_driver.write = zs_console_print;
+ register_console(&sgi_console_driver);
consout_registered = 1;
}
}
+
/* If this is console input, we handle the break received
* status interrupt on this line to mean prom_halt().
*/
@@ -1759,6 +1778,7 @@ rs_cons_check(struct sgi_serial *ss, int channel)
panic("Console baud rate weirdness");
}
+
/* Set flag variable for this port so that it cannot be
* opened for other uses by accident.
*/
@@ -1769,9 +1789,11 @@ rs_cons_check(struct sgi_serial *ss, int channel)
printk("zs%d: console I/O\n", ((channel>>1)&1));
msg_printed = 1;
}
+
} else {
printk("zs%d: console %s\n", ((channel>>1)&1),
(i==1 ? "input" : (o==1 ? "output" : "WEIRD")));
+
}
}
@@ -1783,6 +1805,7 @@ int rs_init(void)
int chip, channel, i, flags;
struct sgi_serial *info;
+
/* Setup base handler, and timer table. */
init_bh(SERIAL_BH, do_serial_bh);
timer_table[RS_TIMER].fn = rs_timer;
@@ -1969,6 +1992,7 @@ rs_cons_hook(int chip, int out, int line)
{
int channel;
+
if(chip)
panic("rs_cons_hook called with chip not zero");
if(line != 1 && line != 2)
@@ -1984,10 +2008,11 @@ rs_cons_hook(int chip, int out, int line)
zs_soft[channel].change_needed = 0;
zs_soft[channel].clk_divisor = 16;
zs_soft[channel].zs_baud = get_zsbaud(&zs_soft[channel]);
- if(out)
+ if(out)
zs_cons_chanout = ((chip * 2) + channel);
- else
+ else
zs_cons_chanin = ((chip * 2) + channel);
+
rs_cons_check(&zs_soft[channel], channel);
}
diff --git a/include/asm-mips/branch.h b/include/asm-mips/branch.h
index 08dda213e..d8882cfb1 100644
--- a/include/asm-mips/branch.h
+++ b/include/asm-mips/branch.h
@@ -5,7 +5,9 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1996, 1997 by Ralf Baechle
+ * Copyright (C) 1996, 1997, 1998 by Ralf Baechle
+ *
+ * $Id: branch.h,v 1.2 1998/04/28 19:37:46 ralf Exp $
*/
#include <asm/ptrace.h>
@@ -15,12 +17,13 @@ extern inline int delay_slot(struct pt_regs *regs)
}
extern int __compute_return_epc(struct pt_regs *regs);
+
extern inline int compute_return_epc(struct pt_regs *regs)
{
- if (delay_slot(regs)) {
- return __compute_return_epc(regs);
+ if (!delay_slot(regs)) {
+ regs->cp0_epc += 4;
+ return 0;
}
- regs->cp0_epc += 4;
- return 0;
+ return __compute_return_epc(regs);
}
diff --git a/include/asm-mips/ide.h b/include/asm-mips/ide.h
index bda27206d..b54a7657c 100644
--- a/include/asm-mips/ide.h
+++ b/include/asm-mips/ide.h
@@ -21,40 +21,36 @@ typedef unsigned short ide_ioreg_t;
#define ide_sti() sti()
+struct ide_ops {
+ int (*ide_default_irq)(ide_ioreg_t base);
+ ide_ioreg_t (*ide_default_io_base)(int index);
+ void (*ide_init_hwif_ports)(ide_ioreg_t *p, ide_ioreg_t base, int *irq);
+ int (*ide_request_irq)(unsigned int irq, void (*handler)(int, void *,
+ struct pt_regs *), unsigned long flags,
+ const char *device, void *dev_id);
+ void (*ide_free_irq)(unsigned int irq, void *dev_id);
+ int (*ide_check_region) (ide_ioreg_t from, unsigned int extent);
+ void (*ide_request_region)(ide_ioreg_t from, unsigned int extent,
+ const char *name);
+ void (*ide_release_region)(ide_ioreg_t from, unsigned int extent);
+};
+
+extern struct ide_ops *ide_ops;
+
static __inline__ int ide_default_irq(ide_ioreg_t base)
{
- switch (base) {
- case 0x1f0: return 14;
- case 0x170: return 15;
- case 0x1e8: return 11;
- case 0x168: return 10;
- default:
- return 0;
- }
+ return ide_ops->ide_default_irq(base);
}
static __inline__ ide_ioreg_t ide_default_io_base(int index)
{
- switch (index) {
- case 0: return 0x1f0;
- case 1: return 0x170;
- case 2: return 0x1e8;
- case 3: return 0x168;
- default:
- return 0;
- }
+ return ide_ops->ide_default_io_base(index);
}
-static __inline__ void ide_init_hwif_ports (ide_ioreg_t *p, ide_ioreg_t base, int *irq)
+static __inline__ void ide_init_hwif_ports(ide_ioreg_t *p, ide_ioreg_t base,
+ int *irq)
{
- ide_ioreg_t port = base;
- int i = 8;
-
- while (i--)
- *p++ = port++;
- *p++ = base + 0x206;
- if (irq != NULL)
- *irq = 0;
+ ide_ops->ide_init_hwif_ports(p, base, irq);
}
typedef union {
@@ -68,38 +64,41 @@ typedef union {
} b;
} select_t;
-static __inline__ int ide_request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
+static __inline__ int ide_request_irq(unsigned int irq, void (*handler)(int,void *, struct pt_regs *),
unsigned long flags, const char *device, void *dev_id)
{
- return request_irq(irq, handler, flags, device, dev_id);
-}
+ return ide_ops->ide_request_irq(irq, handler, flags, device, dev_id);
+}
static __inline__ void ide_free_irq(unsigned int irq, void *dev_id)
{
- free_irq(irq, dev_id);
+ ide_ops->ide_free_irq(irq, dev_id);
}
static __inline__ int ide_check_region (ide_ioreg_t from, unsigned int extent)
{
- return check_region(from, extent);
+ return ide_ops->ide_check_region(from, extent);
}
-static __inline__ void ide_request_region (ide_ioreg_t from, unsigned int extent, const char *name)
+static __inline__ void ide_request_region(ide_ioreg_t from,
+ unsigned int extent, const char *name)
{
- request_region(from, extent, name);
+ ide_ops->ide_request_region(from, extent, name);
}
-static __inline__ void ide_release_region (ide_ioreg_t from, unsigned int extent)
+static __inline__ void ide_release_region(ide_ioreg_t from,
+ unsigned int extent)
{
- release_region(from, extent);
+ ide_ops->ide_release_region(from, extent);
}
/*
* The following are not needed for the non-m68k ports
*/
-static __inline__ int ide_ack_intr (ide_ioreg_t status_port, ide_ioreg_t irq_port)
+static __inline__ int ide_ack_intr (ide_ioreg_t status_port,
+ ide_ioreg_t irq_port)
{
- return(1);
+ return 1;
}
static __inline__ void ide_fix_driveid(struct hd_driveid *id)
@@ -110,7 +109,10 @@ static __inline__ void ide_release_lock (int *ide_lock)
{
}
-static __inline__ void ide_get_lock (int *ide_lock, void (*handler)(int, void *, struct pt_regs *), void *data)
+static __inline__ void ide_get_lock (int *ide_lock,
+ void (*handler)(int, void *,
+ struct pt_regs *),
+ void *data)
{
}
diff --git a/include/asm-mips/offset.h b/include/asm-mips/offset.h
index e211de3cc..32deafe7b 100644
--- a/include/asm-mips/offset.h
+++ b/include/asm-mips/offset.h
@@ -50,7 +50,7 @@
#define TASK_PRIORITY 56
#define TASK_FLAGS 4
#define TASK_SIGPENDING 8
-#define TASK_MM 920
+#define TASK_MM 928
/* MIPS specific thread_struct offsets. */
#define THREAD_REG16 568
@@ -67,13 +67,14 @@
#define THREAD_STATUS 612
#define THREAD_FPU 616
#define THREAD_BVADDR 880
-#define THREAD_ECODE 884
-#define THREAD_TRAPNO 888
-#define THREAD_PGDIR 892
-#define THREAD_MFLAGS 896
-#define THREAD_CURDS 900
-#define THREAD_TRAMP 904
-#define THREAD_OLDCTX 908
+#define THREAD_BUADDR 884
+#define THREAD_ECODE 888
+#define THREAD_TRAPNO 892
+#define THREAD_PGDIR 896
+#define THREAD_MFLAGS 900
+#define THREAD_CURDS 904
+#define THREAD_TRAMP 908
+#define THREAD_OLDCTX 912
/* Linux mm_struct offsets. */
#define MM_COUNT 12
diff --git a/include/asm-mips/processor.h b/include/asm-mips/processor.h
index 765208d3e..98d5e6a86 100644
--- a/include/asm-mips/processor.h
+++ b/include/asm-mips/processor.h
@@ -5,7 +5,7 @@
* written by Ralf Baechle
* Modified further for R[236]000 compatibility by Paul M. Antoine
*
- * $Id: processor.h,v 1.8 1998/03/27 04:47:59 ralf Exp $
+ * $Id: processor.h,v 1.13 1998/04/25 05:35:15 ralf Exp $
*/
#ifndef __ASM_MIPS_PROCESSOR_H
#define __ASM_MIPS_PROCESSOR_H
@@ -90,7 +90,7 @@ typedef struct {
*/
struct thread_struct {
/* Saved main processor registers. */
- unsigned long reg16 __attribute__ ((aligned (8)));
+ unsigned long reg16;
unsigned long reg17, reg18, reg19, reg20, reg21, reg22, reg23;
unsigned long reg29, reg30, reg31;
@@ -98,10 +98,11 @@ struct thread_struct {
unsigned long cp0_status;
/* Saved fpu/fpu emulator stuff. */
- union mips_fpu_union fpu __attribute__ ((aligned (8)));
+ union mips_fpu_union fpu;
/* Other stuff associated with the thread. */
- unsigned long cp0_badvaddr;
+ unsigned long cp0_badvaddr; /* Last user fault */
+ unsigned long cp0_baduaddr; /* Last kernel fault accessing USEG */
unsigned long error_code;
unsigned long trap_no;
unsigned long pg_dir; /* used in tlb refill */
@@ -135,7 +136,7 @@ struct thread_struct {
/* \
* Other stuff associated with the process \
*/ \
- 0, 0, 0, (unsigned long) swapper_pg_dir, \
+ 0, 0, 0, 0, (unsigned long) swapper_pg_dir, \
/* \
* For now the default is to fix address errors \
*/ \
diff --git a/include/asm-mips/stackframe.h b/include/asm-mips/stackframe.h
index e925dcd50..726e82f23 100644
--- a/include/asm-mips/stackframe.h
+++ b/include/asm-mips/stackframe.h
@@ -3,7 +3,7 @@
*
* Copyright (C) 1994, 1995, 1996 by Ralf Baechle and Paul M. Antoine.
*
- * $Id: stackframe.h,v 1.6 1998/03/26 07:39:21 ralf Exp $
+ * $Id: stackframe.h,v 1.7 1998/04/28 19:39:15 ralf Exp $
*/
#ifndef __ASM_MIPS_STACKFRAME_H
#define __ASM_MIPS_STACKFRAME_H
@@ -174,10 +174,6 @@
/*
* Move to kernel mode and enable interrupts.
* Set cp0 enable bit as sign that we're running on the kernel stack
- *
- * Note that the mtc0 will be effective on R4000 pipeline stage 7. This
- * means that another three instructions will be executed with interrupts
- * disabled. Arch/mips/mips3/r4xx0.S makes use of this fact.
*/
#define STI \
mfc0 t0,CP0_STATUS; \
@@ -186,4 +182,15 @@
xori t0,0x1e; \
mtc0 t0,CP0_STATUS
+/*
+ * Just move to kernel mode and leave interrupts as they are.
+ * Set cp0 enable bit as sign that we're running on the kernel stack
+ */
+#define KMODE \
+ mfc0 t0,CP0_STATUS; \
+ li t1,ST0_CU0|0x1e; \
+ or t0,t1; \
+ xori t0,0x1e; \
+ mtc0 t0,CP0_STATUS
+
#endif /* __ASM_MIPS_STACKFRAME_H */
diff --git a/include/asm-mips/string.h b/include/asm-mips/string.h
index 39e6b445c..7f4bded24 100644
--- a/include/asm-mips/string.h
+++ b/include/asm-mips/string.h
@@ -7,7 +7,7 @@
*
* Copyright (c) 1994, 1995, 1996, 1997 by Ralf Baechle
*
- * $Id: string.h,v 1.4 1998/03/21 19:31:09 ralf Exp $
+ * $Id: string.h,v 1.7 1998/03/25 00:24:10 ralf Exp $
*/
#ifndef __ASM_MIPS_STRING_H
#define __ASM_MIPS_STRING_H
@@ -128,7 +128,10 @@ extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
#define __HAVE_ARCH_BCOPY
-extern char * bcopy(const char * src, char * dest, int count);
+extern __inline__ char * bcopy(const char * src, char * dest, int count)
+{
+ memmove(dest, src, count);
+}
#define __HAVE_ARCH_MEMSCAN
extern __inline__ void *memscan(void *__addr, int __c, size_t __size)
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h
index abeaa3343..ab8faf278 100644
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -7,6 +7,8 @@
*
* Copyright (C) 1994, 1995 by Ralf Baechle
* Modified further for R[236]000 by Paul M. Antoine, 1996
+ *
+ * $Id: system.h,v 1.6 1998/05/03 11:13:54 ralf Exp $
*/
#ifndef __ASM_MIPS_SYSTEM_H
#define __ASM_MIPS_SYSTEM_H
@@ -17,18 +19,18 @@
extern __inline__ void
__sti(void)
{
- __asm__ __volatile__(
- ".set\tnoreorder\n\t"
- ".set\tnoat\n\t"
- "mfc0\t$1,$12\n\t"
- "ori\t$1,0x1f\n\t"
- "xori\t$1,0x1e\n\t"
- "mtc0\t$1,$12\n\t"
- ".set\tat\n\t"
- ".set\treorder"
- : /* no outputs */
- : /* no inputs */
- : "$1", "memory");
+ __asm__ __volatile__(
+ ".set\tnoreorder\n\t"
+ ".set\tnoat\n\t"
+ "mfc0\t$1,$12\n\t"
+ "ori\t$1,0x1f\n\t"
+ "xori\t$1,0x1e\n\t"
+ "mtc0\t$1,$12\n\t"
+ ".set\tat\n\t"
+ ".set\treorder"
+ : /* no outputs */
+ : /* no inputs */
+ : "$1", "memory");
}
/*
@@ -41,21 +43,21 @@ __sti(void)
extern __inline__ void
__cli(void)
{
- __asm__ __volatile__(
- ".set\tnoreorder\n\t"
- ".set\tnoat\n\t"
- "mfc0\t$1,$12\n\t"
- "ori\t$1,1\n\t"
- "xori\t$1,1\n\t"
- "mtc0\t$1,$12\n\t"
- "nop\n\t"
- "nop\n\t"
- "nop\n\t"
- ".set\tat\n\t"
- ".set\treorder"
- : /* no outputs */
- : /* no inputs */
- : "$1", "memory");
+ __asm__ __volatile__(
+ ".set\tnoreorder\n\t"
+ ".set\tnoat\n\t"
+ "mfc0\t$1,$12\n\t"
+ "ori\t$1,1\n\t"
+ "xori\t$1,1\n\t"
+ "mtc0\t$1,$12\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ ".set\tat\n\t"
+ ".set\treorder"
+ : /* no outputs */
+ : /* no inputs */
+ : "$1", "memory");
}
#define __save_flags(x) \
@@ -87,16 +89,16 @@ __asm__ __volatile__( \
extern void __inline__
__restore_flags(int flags)
{
- __asm__ __volatile__(
- ".set\tnoreorder\n\t"
- "mtc0\t%0,$12\n\t"
- "nop\n\t"
- "nop\n\t"
- "nop\n\t"
- ".set\treorder"
- : /* no output */
- : "r" (flags)
- : "memory");
+ __asm__ __volatile__(
+ ".set\tnoreorder\n\t"
+ "mtc0\t%0,$12\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ ".set\treorder"
+ : /* no output */
+ : "r" (flags)
+ : "memory");
}
/*
@@ -113,7 +115,7 @@ __asm__ __volatile__( \
"# prevent instructions being moved around\n\t" \
".set\tnoreorder\n\t" \
".set\treorder" \
- : /* no output */ \
+ : /* no output */ \
: /* no input */ \
: "memory")
@@ -125,47 +127,12 @@ __asm__ __volatile__( \
extern asmlinkage void (*resume)(void *tsk);
#endif /* !defined (__LANGUAGE_ASSEMBLY__) */
-/*
- * FIXME: resume() assumes current == prev
- */
#define switch_to(prev,next) \
do { \
- prev->tss.current_ds = active_ds; \
- active_ds = next->tss.current_ds; \
- resume(next); \
+ resume(next); \
} while(0)
/*
- * The 8 and 16 bit variants have to disable interrupts temporarily.
- * Both are currently unused.
- */
-extern __inline__ unsigned long xchg_u8(volatile char * m, unsigned long val)
-{
- unsigned long flags, retval;
-
- save_flags(flags);
- cli();
- retval = *m;
- *m = val;
- restore_flags(flags);
-
- return retval;
-}
-
-extern __inline__ unsigned long xchg_u16(volatile short * m, unsigned long val)
-{
- unsigned long flags, retval;
-
- save_flags(flags);
- cli();
- retval = *m;
- *m = val;
- restore_flags(flags);
-
- return retval;
-}
-
-/*
* For 32 and 64 bit operands we can take advantage of ll and sc.
* FIXME: This doesn't work for R3000 machines.
*/
@@ -186,8 +153,9 @@ extern __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val)
".set\tat\n\t"
".set\treorder"
: "=r" (val), "=r" (m), "=r" (dummy)
- : "1" (m), "2" (val));
-#else /* FIXME: Brain-dead approach, but then again, I AM hacking - PMA */
+ : "1" (m), "2" (val)
+ : "memory");
+#else
unsigned long flags, retval;
save_flags(flags);
@@ -218,7 +186,8 @@ extern __inline__ unsigned long xchg_u64(volatile long * m, unsigned long val)
".set\tat\n\t"
".set\treorder"
: "=r" (val), "=r" (m), "=r" (dummy)
- : "1" (m), "2" (val));
+ : "1" (m), "2" (val)
+ : "memory");
return val;
}
@@ -239,14 +208,12 @@ extern void __xchg_called_with_bad_pointer(void);
static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
{
switch (size) {
- case 1:
- return xchg_u8(ptr, x);
- case 2:
- return xchg_u16(ptr, x);
case 4:
return xchg_u32(ptr, x);
+#if defined(__mips64)
case 8:
return xchg_u64(ptr, x);
+#endif
}
__xchg_called_with_bad_pointer();
return x;
diff --git a/include/asm-mips/uaccess.h b/include/asm-mips/uaccess.h
index 42a7df686..8b139fe23 100644
--- a/include/asm-mips/uaccess.h
+++ b/include/asm-mips/uaccess.h
@@ -5,16 +5,15 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1996, 1997 by Ralf Baechle
+ * Copyright (C) 1996, 1997, 1998 by Ralf Baechle
*
- * $Id: uaccess.h,v 1.7 1998/03/21 08:04:33 ralf Exp $
+ * $Id: uaccess.h,v 1.15 1998/05/03 11:13:54 ralf Exp $
*/
#ifndef __ASM_MIPS_UACCESS_H
#define __ASM_MIPS_UACCESS_H
#include <linux/errno.h>
#include <linux/sched.h>
-#include <asm/asm.h>
#define STR(x) __STR(x)
#define __STR(x) #x
@@ -32,11 +31,9 @@
#define VERIFY_READ 0
#define VERIFY_WRITE 1
-extern mm_segment_t active_ds;
-
-#define get_fs() (active_ds)
+#define get_fs() (current->tss.current_ds)
#define get_ds() (KERNEL_DS)
-#define set_fs(x) (active_ds=(x))
+#define set_fs(x) (current->tss.current_ds=(x))
#define segment_eq(a,b) ((a).seg == (b).seg)
@@ -107,24 +104,6 @@ if (__get_user(x,ptr)) return ret; })
struct __large_struct { unsigned long buf[100]; };
#define __m(x) (*(struct __large_struct *)(x))
-#define copy_to_user(to,from,n) __copy_tofrom_user((to),(from),(n),__cu_to)
-#define copy_from_user(to,from,n) __copy_tofrom_user((to),(from),(n),__cu_from)
-
-extern size_t __copy_user(void *__to, const void *__from, size_t __n);
-
-#define __copy_tofrom_user(to,from,n,v) ({ \
- void * __cu_to; \
- const void * __cu_from; \
- long __cu_len; \
- \
- __cu_to = (to); \
- __cu_from = (from); \
- __cu_len = (n); \
- if (__access_ok(((unsigned long)(v)),__cu_len,__access_mask)) \
- __cu_len = __copy_user(__cu_to, __cu_from, __cu_len); \
- __cu_len; \
-})
-
/*
* Yuck. We need two variants, one for 64bit operation and one
* for 32 bit mode and old iron.
@@ -174,10 +153,11 @@ __asm__ __volatile__( \
"2:\n\t" \
".section\t.fixup,\"ax\"\n" \
"3:\tli\t%0,%3\n\t" \
+ "move\t%1,$0\n\t" \
"j\t2b\n\t" \
".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \
- STR(PTR)"\t1b,3b\n\t" \
+ ".word\t1b,3b\n\t" \
".previous" \
:"=r" (__gu_err), "=r" (__gu_val) \
:"o" (__m(__gu_addr)), "i" (-EFAULT)); })
@@ -193,11 +173,13 @@ __asm__ __volatile__( \
"move\t%0,$0\n" \
"3:\t.section\t.fixup,\"ax\"\n" \
"4:\tli\t%0,%4\n\t" \
+ "move\t%1,$0\n\t" \
+ "move\t%D1,$0\n\t" \
"j\t3b\n\t" \
".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \
- STR(PTR)"\t1b,4b\n\t" \
- STR(PTR)"\t2b,4b\n\t" \
+ ".word\t1b,4b\n\t" \
+ ".word\t2b,4b\n\t" \
".previous" \
:"=r" (__gu_err), "=&r" (__gu_val) \
:"o" (__m(__gu_addr)), "o" (__m(__gu_addr + 4)), \
@@ -257,7 +239,7 @@ __asm__ __volatile__( \
"j\t2b\n\t" \
".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \
- STR(PTR)"\t1b,3b\n\t" \
+ ".word\t1b,3b\n\t" \
".previous" \
:"=r" (__pu_err) \
:"r" (__pu_val), "o" (__m(__pu_addr)), "i" (-EFAULT)); })
@@ -274,8 +256,8 @@ __asm__ __volatile__( \
"j\t3b\n\t" \
".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \
- STR(PTR)"\t1b,4b\n\t" \
- STR(PTR)"\t2b,4b\n\t" \
+ ".word\t1b,4b\n\t" \
+ ".word\t2b,4b\n\t" \
".previous" \
:"=r" (__pu_err) \
:"r" (__pu_val), "o" (__m(__pu_addr)), "o" (__m(__pu_addr + 4)), \
@@ -293,60 +275,195 @@ if (copy_from_user(to,from,n)) \
return retval; \
})
-#define __copy_to_user(to,from,n) \
- __copy_user((to),(from),(n))
+extern size_t __copy_user(void *__to, const void *__from, size_t __n);
-#define __copy_from_user(to,from,n) \
- __copy_user((to),(from),(n))
+#define __copy_to_user(to,from,n) ({ \
+ void *__cu_to; \
+ const void *__cu_from; \
+ long __cu_len; \
+ \
+ __cu_to = (to); \
+ __cu_from = (from); \
+ __cu_len = (n); \
+ __asm__ __volatile__( \
+ "move\t$4, %1\n\t" \
+ "move\t$5, %2\n\t" \
+ "move\t$6, %3\n\t" \
+ "jal\t__copy_user\n\t" \
+ "move\t%0, $6" \
+ : "=r" (__cu_len) \
+ : "r" (__cu_to), "r" (__cu_from), "r" (__cu_len) \
+ : "$4", "$5", "$6", "$8", "$9", "$10", "$11", "$12", "$15", \
+ "$24", "$31","memory"); \
+ __cu_len; \
+})
-#define __clear_user(addr,size) \
-({ \
- void *__cu_end; \
+#define __copy_from_user(to,from,n) ({ \
+ void *__cu_to; \
+ const void *__cu_from; \
+ long __cu_len; \
+ \
+ __cu_to = (to); \
+ __cu_from = (from); \
+ __cu_len = (n); \
__asm__ __volatile__( \
- ".set\tnoreorder\n\t" \
- "1:\taddiu\t%0,1\n" \
- "bne\t%0,%1,1b\n\t" \
- "sb\t$0,-1(%0)\n\t" \
- "2:\t.set\treorder\n\t" \
- ".section\t.fixup,\"ax\"\n" \
- "3:\tsubu\t%0,1\n\t" \
- "j\t2b\n\t" \
- ".previous\n\t" \
- ".section\t__ex_table,\"a\"\n\t" \
- STR(PTR)"\t1b,3b\n\t" \
- ".previous" \
- :"=r" (addr), "=r" (__cu_end) \
- :"0" (addr), "1" (addr + size), "i" (-EFAULT) \
- :"memory"); \
- size = __cu_end - (addr); \
+ "move\t$4, %1\n\t" \
+ "move\t$5, %2\n\t" \
+ "move\t$6, %3\n\t" \
+ ".set\tnoat\n\t" \
+ "addu\t$1, %2, %3\n\t" \
+ ".set\tat\n\t" \
+ "jal\t__copy_user\n\t" \
+ "move\t%0, $6" \
+ : "=r" (__cu_len) \
+ : "r" (__cu_to), "r" (__cu_from), "r" (__cu_len) \
+ : "$4", "$5", "$6", "$8", "$9", "$10", "$11", "$12", "$15", \
+ "$24", "$31","memory"); \
+ __cu_len; \
+})
+
+#define copy_to_user(to,from,n) ({ \
+ void *__cu_to; \
+ const void *__cu_from; \
+ long __cu_len; \
+ \
+ __cu_to = (to); \
+ __cu_from = (from); \
+ __cu_len = (n); \
+ if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) \
+ __asm__ __volatile__( \
+ "move\t$4, %1\n\t" \
+ "move\t$5, %2\n\t" \
+ "move\t$6, %3\n\t" \
+ "jal\t__copy_user\n\t" \
+ "move\t%0, $6" \
+ : "=r" (__cu_len) \
+ : "r" (__cu_to), "r" (__cu_from), "r" (__cu_len) \
+ : "$4", "$5", "$6", "$8", "$9", "$10", "$11", "$12", \
+ "$15", "$24", "$31","memory"); \
+ __cu_len; \
+})
+
+#define copy_from_user(to,from,n) ({ \
+ void *__cu_to; \
+ const void *__cu_from; \
+ long __cu_len; \
+ \
+ __cu_to = (to); \
+ __cu_from = (from); \
+ __cu_len = (n); \
+ if (access_ok(VERIFY_READ, __cu_from, __cu_len)) \
+ __asm__ __volatile__( \
+ "move\t$4, %1\n\t" \
+ "move\t$5, %2\n\t" \
+ "move\t$6, %3\n\t" \
+ ".set\tnoat\n\t" \
+ "addu\t$1, %2, %3\n\t" \
+ ".set\tat\n\t" \
+ "jal\t__copy_user\n\t" \
+ "move\t%0, $6" \
+ : "=r" (__cu_len) \
+ : "r" (__cu_to), "r" (__cu_from), "r" (__cu_len) \
+ : "$4", "$5", "$6", "$8", "$9", "$10", "$11", "$12", \
+ "$15", "$24", "$31","memory"); \
+ __cu_len; \
})
+extern inline __kernel_size_t
+__clear_user(void *addr, __kernel_size_t size)
+{
+ __kernel_size_t res;
+
+ __asm__ __volatile__(
+ "move\t$4, %1\n\t"
+ "move\t$5, $0\n\t"
+ "move\t$6, %2\n\t"
+ "jal\t__bzero\n\t"
+ "move\t%0, $6"
+ : "=r" (res)
+ : "r" (addr), "r" (size)
+ : "$4", "$5", "$6", "$8", "$9", "$31");
+
+ return res;
+}
+
#define clear_user(addr,n) ({ \
void * __cl_addr = (addr); \
unsigned long __cl_size = (n); \
if (__cl_size && __access_ok(VERIFY_WRITE, ((unsigned long)(__cl_addr)), __cl_size)) \
-__clear_user(__cl_addr, __cl_size); \
+__cl_size = __clear_user(__cl_addr, __cl_size); \
__cl_size; })
/*
* Returns: -EFAULT if exception before terminator, N if the entire
* buffer filled, else strlen.
*/
-extern long __strncpy_from_user(char *__to, const char *__from, long __to_len);
+extern inline long
+__strncpy_from_user(char *__to, const char *__from, long __len)
+{
+ long res;
+
+ __asm__ __volatile__(
+ "move\t$4, %1\n\t"
+ "move\t$5, %2\n\t"
+ "move\t$6, %3\n\t"
+ "jal\t__strncpy_from_user_nocheck_asm\n\t"
+ "move\t%0, $2"
+ : "=r" (res)
+ : "r" (__to), "r" (__from), "r" (__len)
+ : "$2", "$3", "$4", "$5", "$6", "$8", "$31", "memory");
+
+ return res;
+}
+
+extern inline long
+strncpy_from_user(char *__to, const char *__from, long __len)
+{
+ long res;
+
+ __asm__ __volatile__(
+ "move\t$4, %1\n\t"
+ "move\t$5, %2\n\t"
+ "move\t$6, %3\n\t"
+ "jal\t__strncpy_from_user_asm\n\t"
+ "move\t%0, $2"
+ : "=r" (res)
+ : "r" (__to), "r" (__from), "r" (__len)
+ : "$2", "$3", "$4", "$5", "$6", "$8", "$31", "memory");
+
+ return res;
+}
-#define strncpy_from_user(dest,src,count) ({ \
- const void * __sc_src = (src); \
- long __sc_res = -EFAULT; \
- if (access_ok(VERIFY_READ, __sc_src, 0)) { \
- __sc_res = __strncpy_from_user(dest, __sc_src, count); \
-} __sc_res; })
/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
-extern long __strlen_user(const char *);
+extern inline long __strlen_user(const char *s)
+{
+ long res;
-extern inline long strlen_user(const char *str)
+ __asm__ __volatile__(
+ "move\t$4, %1\n\t"
+ "jal\t__strlen_user_nocheck_asm\n\t"
+ "move\t%0, $2"
+ : "=r" (res)
+ : "r" (s)
+ : "$2", "$4", "$8", "$31");
+
+ return res;
+}
+
+extern inline long strlen_user(const char *s)
{
- return access_ok(VERIFY_READ,str,0) ? __strlen_user(str) : 0;
+ long res;
+
+ __asm__ __volatile__(
+ "move\t$4, %1\n\t"
+ "jal\t__strlen_user_asm\n\t"
+ "move\t%0, $2"
+ : "=r" (res)
+ : "r" (s)
+ : "$2", "$4", "$8", "$31");
+
+ return res;
}
struct exception_table_entry