summaryrefslogtreecommitdiffstats
path: root/arch/mips/mips1
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1997-01-07 02:33:00 +0000
committer <ralf@linux-mips.org>1997-01-07 02:33:00 +0000
commitbeb116954b9b7f3bb56412b2494b562f02b864b1 (patch)
tree120e997879884e1b9d93b265221b939d2ef1ade1 /arch/mips/mips1
parent908d4681a1dc3792ecafbe64265783a86c4cccb6 (diff)
Import of Linux/MIPS 2.1.14
Diffstat (limited to 'arch/mips/mips1')
-rw-r--r--arch/mips/mips1/Makefile28
-rw-r--r--arch/mips/mips1/cache.S162
-rw-r--r--arch/mips/mips1/cpu.c91
-rw-r--r--arch/mips/mips1/fp-context.S159
-rw-r--r--arch/mips/mips1/memcpy.S206
-rw-r--r--arch/mips/mips1/memset.S27
-rw-r--r--arch/mips/mips1/pagetables.c86
-rw-r--r--arch/mips/mips1/r3000.S1125
-rw-r--r--arch/mips/mips1/showregs.c32
9 files changed, 1916 insertions, 0 deletions
diff --git a/arch/mips/mips1/Makefile b/arch/mips/mips1/Makefile
new file mode 100644
index 000000000..3e9f13037
--- /dev/null
+++ b/arch/mips/mips1/Makefile
@@ -0,0 +1,28 @@
+#
+# Makefile for the MIPS I specific parts of the Linux/MIPS kernel.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+
+.S.s:
+ $(CPP) $(CFLAGS) $< -o $*.s
+.S.o:
+ $(CC) $(CFLAGS) -c $< -o $*.o
+
+all: mips.o
+EXTRA_ASFLAGS = -mips1 -mcpu=r3000
+O_TARGET := mips.o
+O_OBJS := cache.o cpu.o memcpy.o memset.o r3000.o pagetables.o showregs.o
+ fp-context.o
+
+r3000.o: r3000.S
+
+fp-context.o: fp-context.S
+
+cache.o: cache.S
+
+clean:
+
+include $(TOPDIR)/Rules.make
diff --git a/arch/mips/mips1/cache.S b/arch/mips/mips1/cache.S
new file mode 100644
index 000000000..788a567e8
--- /dev/null
+++ b/arch/mips/mips1/cache.S
@@ -0,0 +1,162 @@
+# R3000 cache routines lifted from IDT documentation
+# by Ruud Riem-Viis. Adapted for linux by Didier Frick
+# (dfrick@dial.eunet.ch)
+
+#include <asm/addrspace.h>
+#include <asm/asm.h>
+#include <asm/processor.h>
+#include <asm/regdef.h>
+#include <asm/cache.h>
+#include <asm/mipsregs.h>
+#include <asm/mipsconfig.h>
+#include <asm/stackframe.h>
+#include <asm/bootinfo.h>
+
+#define MINCACHE 0x00200 /* minimum cache size 512 */
+#define MAXCACHE 0x04000 /* maximum cache size 16K */
+
+/*
+ * Figure out the size of the I- and D-caches, using the diagnostic isolate
+ * swap features. The cache size is left in an environment variable because
+ * the system will want to know it later.
+ * Flush the cache so that it is in a known state.
+ */
+NESTED(cache_init,8,ra)
+ subu sp, sp, 24 # keep sp aligned on 8 words
+ sw ra, 20(sp) # push return address on stack
+ sw s0, 16(sp) # save s0 on stack
+ mfc0 s0, CP0_STATUS # save sr
+ mtc0 zero, CP0_STATUS
+ nop
+ .set reorder
+ jal cache_size
+ sw v0, mips_dcache_size
+ li v0, ST0_CE # swap caches
+ .set noreorder
+ mtc0 v0, CP0_STATUS
+ nop
+ jal cache_size
+ nop
+ sw v0, mips_icache_size
+ mtc0 zero, CP0_STATUS # swap back caches
+ nop
+ mtc0 s0, CP0_STATUS # restore sr
+ nop
+ .set reorder
+ jal cache_flush
+ lw s0, 16(sp) # restore s0
+ lw ra, 20(sp)
+ addu sp, sp, 24
+ j ra
+ nop
+END(cache_init)
+
+LEAF(cache_size)
+ .set noreorder
+ mfc0 t0, CP0_STATUS # save current SR
+ nop
+ and t0, ~ST0_SR # do not clear parity error bit
+ or v0, t0, ST0_DE # isolate cache
+ mtc0 v0, CP0_STATUS
+ nop
+
+ move v0, zero
+ li v1, 0xa5a5a5a5
+ nop
+ sw v1, KSEG0 # try to write in cache
+ lw t1, KSEG0 # try to read from cache
+ nop
+ mfc0 t2, CP0_STATUS
+ nop
+ .set reorder
+ and t2, (1<<19)
+ bne t2, zero, 3f # cache miss, must be no cache
+ bne v1, t1, 3f # data not equal -> no cache
+
+/*
+ * Clear cache boundries to known state.
+ */
+ li v0, MINCACHE
+1:
+ sw zero, KSEG0(v0)
+ sll v0, 1
+ ble v0, MAXCACHE, 1b
+
+ li v0, -1
+ sw v0, KSEG0(zero) # store marker in cache
+ li v0, MINCACHE # MIN cache size
+2:
+ lw v1, KSEG0(v0) # look for marker
+ bne v1, zero, 3f # found marker
+ sll v0, 1 # cache size * 2
+ ble v0, MAXCACHE, 2b # keep looking
+ move v0, zero # must be no cache
+ .set noreorder
+3:
+ mtc0 t0, CP0_STATUS # restore sr
+ nop
+ j ra
+ nop
+ .set reorder
+END(cache_size)
+
+LEAF(cache_flush)
+ lw t1, mips_icache_size
+ lw t2, mips_dcache_size
+ .set noreorder
+ mfc0 t3, CP0_STATUS # save sr
+ nop
+ and t3, ~ST0_SR # do not clear PE
+ beq t1, zero, check_dcache # if no icache, check dcache
+ nop
+ li v0, ST0_DE | ST0_CE # isolate and swap
+ nop
+ mtc0 v0, CP0_STATUS
+ nop
+ li t0, KSEG0
+ .set reorder
+ or t1, t0, t1
+1:
+ sb zero, 0(t0)
+ sb zero, 4(t0)
+ sb zero, 8(t0)
+ sb zero, 12(t0)
+ sb zero, 16(t0)
+ sb zero, 20(t0)
+ sb zero, 24(t0)
+ addu t0, 32
+ sb zero, -4(t0)
+ bne t0, t1, 1b # continue until done
+
+check_dcache:
+ li v0, ST0_DE
+ nop
+ .set noreorder
+ mtc0 v0, CP0_STATUS
+ nop
+ beq t2, zero, flush_done # if no dcache, done
+ .set reorder
+ li t0, KSEG0
+ or t1, t0, t2
+1:
+ sb zero, 0(t0)
+ sb zero, 4(t0)
+ sb zero, 8(t0)
+ sb zero, 12(t0)
+ sb zero, 16(t0)
+ sb zero, 20(t0)
+ sb zero, 24(t0)
+ addu t0, 32
+ sb zero, -4(t0)
+ bne t0, t1, 1b # continue until done
+
+ .set noreorder
+flush_done:
+ mtc0 t3, CP0_STATUS # restore old sr
+ nop
+ j ra
+ nop
+ .set reorder
+END(cache_flush)
+
+
diff --git a/arch/mips/mips1/cpu.c b/arch/mips/mips1/cpu.c
new file mode 100644
index 000000000..fd41ce15b
--- /dev/null
+++ b/arch/mips/mips1/cpu.c
@@ -0,0 +1,91 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996 Ralf Baechle
+ */
+#include <linux/sched.h>
+
+#include <asm/cache.h>
+#include <asm/mipsregs.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+
+extern asmlinkage void mips1_cacheflush(void *addr, int nbytes, unsigned int flags);
+
+void (*mips_cache_init)(void);
+
+static void
+mips1_cache_init(void)
+{
+ cacheflush = mips1_cacheflush;
+}
+
+void (*switch_to_user_mode)(struct pt_regs *regs);
+
+static void
+mips1_switch_to_user_mode(struct pt_regs *regs)
+{
+ regs->cp0_status = regs->cp0_status | ST0_KUC;
+}
+
+unsigned long (*thread_saved_pc)(struct thread_struct *t);
+
+/*
+ * Return saved PC of a blocked thread.
+ */
+static unsigned long mips1_thread_saved_pc(struct thread_struct *t)
+{
+ return ((unsigned long *)(unsigned long)t->reg29)[13];
+}
+
+unsigned long (*get_wchan)(struct task_struct *p);
+
+static unsigned long mips1_get_wchan(struct task_struct *p)
+{
+ /*
+ * This one depends on the frame size of schedule(). Do a
+ * "disass schedule" in gdb to find the frame size. Also, the
+ * code assumes that sleep_on() follows immediately after
+ * interruptible_sleep_on() and that add_timer() follows
+ * immediately after interruptible_sleep(). Ugly, isn't it?
+ * Maybe adding a wchan field to task_struct would be better,
+ * after all...
+ */
+ unsigned long schedule_frame;
+ unsigned long pc;
+
+ pc = thread_saved_pc(&p->tss);
+ if (pc >= (unsigned long) interruptible_sleep_on && pc < (unsigned long) add_timer) {
+ schedule_frame = ((unsigned long *)(long)p->tss.reg30)[13];
+ return ((unsigned long *)schedule_frame)[11];
+ }
+ return pc;
+}
+
+void (*pgd_init)(unsigned long page);
+void (*copy_page)(unsigned long to, unsigned long from);
+asmlinkage void (*restore_fp_context)(struct sigcontext *sc);
+asmlinkage void (*save_fp_context)(struct sigcontext *sc);
+
+void
+mips1_cpu_init(void)
+{
+ extern void mips1_cache_init(void);
+ extern void mips1_pgd_init(unsigned long page);
+ extern void mips1_clear_page(unsigned long page);
+ extern void mips1_copy_page(unsigned long to, unsigned long from);
+ extern asmlinkage void mips1_restore_fp_context(struct sigcontext *sc);
+ extern asmlinkage void mips1_save_fp_context(struct sigcontext *sc);
+
+ mips_cache_init = mips1_cache_init;
+ pgd_init = mips1_pgd_init;
+ switch_to_user_mode = mips1_switch_to_user_mode;
+ thread_saved_pc = mips1_thread_saved_pc;
+ get_wchan = mips1_get_wchan;
+ clear_page = mips1_clear_page;
+ copy_page = mips1_copy_page;
+ restore_fp_context = mips1_restore_fp_context;
+ save_fp_context = mips1_save_fp_context;
+}
diff --git a/arch/mips/mips1/fp-context.S b/arch/mips/mips1/fp-context.S
new file mode 100644
index 000000000..6ff3c6be3
--- /dev/null
+++ b/arch/mips/mips1/fp-context.S
@@ -0,0 +1,159 @@
+/*
+ * Save/restore floating point context for signal handlers.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996 by Ralf Baechle
+ */
+#include <asm/asm.h>
+#include <asm/fpregdef.h>
+#include <asm/mipsregs.h>
+#include <asm/regdef.h>
+#include <asm/sigcontext.h>
+
+#define SWC1(r,m) \
+7: swc1 r,m; \
+ .section __ex_table,"a"; \
+ PTR 7b,bad_stack; \
+ .text
+
+#define SW(r,m) \
+7: sw r,m; \
+ .section __ex_table,"a"; \
+ PTR 7b,bad_stack; \
+ .text
+
+#define LWC1(r,m) \
+7: lwc1 r,m; \
+ .section __ex_table,"a"; \
+ PTR 7b,bad_stack; \
+ .text
+
+#define LW(r,m) \
+7: lw r,m; \
+ .section __ex_table,"a"; \
+ PTR 7b,bad_stack; \
+ .text
+
+ .set noreorder
+/*
+ * Save floating point context
+ */
+LEAF(mips1_save_fp_context)
+ mfc0 t0,CP0_STATUS
+ sll t0,t0,2
+ bgez t0,1f
+ nop # delay slot
+ cfc1 t0,fcr31
+ /*
+ * Store the 16 odd double precision registers
+ */
+ SWC1 ($f0,(SC_FPREGS+0)(a0))
+ SWC1 ($f1,(SC_FPREGS+8)(a0))
+ SWC1 ($f2,(SC_FPREGS+16)(a0))
+ SWC1 ($f3,(SC_FPREGS+24)(a0))
+ SWC1 ($f4,(SC_FPREGS+32)(a0))
+ SWC1 ($f5,(SC_FPREGS+40)(a0))
+ SWC1 ($f6,(SC_FPREGS+48)(a0))
+ SWC1 ($f7,(SC_FPREGS+56)(a0))
+ SWC1 ($f8,(SC_FPREGS+64)(a0))
+ SWC1 ($f9,(SC_FPREGS+72)(a0))
+ SWC1 ($f10,(SC_FPREGS+80)(a0))
+ SWC1 ($f11,(SC_FPREGS+88)(a0))
+ SWC1 ($f12,(SC_FPREGS+96)(a0))
+ SWC1 ($f13,(SC_FPREGS+104)(a0))
+ SWC1 ($f14,(SC_FPREGS+112)(a0))
+ SWC1 ($f15,(SC_FPREGS+120)(a0))
+ SWC1 ($f16,(SC_FPREGS+128)(a0))
+ SWC1 ($f17,(SC_FPREGS+136)(a0))
+ SWC1 ($f18,(SC_FPREGS+144)(a0))
+ SWC1 ($f19,(SC_FPREGS+152)(a0))
+ SWC1 ($f20,(SC_FPREGS+160)(a0))
+ SWC1 ($f21,(SC_FPREGS+168)(a0))
+ SWC1 ($f22,(SC_FPREGS+176)(a0))
+ SWC1 ($f23,(SC_FPREGS+184)(a0))
+ SWC1 ($f24,(SC_FPREGS+192)(a0))
+ SWC1 ($f25,(SC_FPREGS+200)(a0))
+ SWC1 ($f26,(SC_FPREGS+208)(a0))
+ SWC1 ($f27,(SC_FPREGS+216)(a0))
+ SWC1 ($f28,(SC_FPREGS+224)(a0))
+ SWC1 ($f29,(SC_FPREGS+232)(a0))
+ SWC1 ($f30,(SC_FPREGS+240)(a0))
+ SWC1 ($f31,(SC_FPREGS+248)(a0))
+ SW (t0,SC_FPC_CSR(a0))
+ cfc1 t0,$0 # implementation/version
+ jr ra
+ .set nomacro
+ SW t0,SC_FPC_EIR(a0) # delay slot
+ .set macro
+
+1: jr ra
+ .set nomacro
+ nop # delay slot
+ .set macro
+ END(mips1_save_fp_context)
+
+/*
+ * Restore fpu state:
+ * - fp gp registers
+ * - cp1 status/control register
+ *
+ * We base the decission which registers to restore from the signal stack
+ * frame on the current content of c0_status, not on the content of the
+ * stack frame which might have been changed by the user.
+ */
+LEAF(mips1_restore_fp_context)
+ mfc0 t0,CP0_STATUS
+ sll t0,t0,2
+ bgez t0,1f
+ nop # delay slot
+ bgez t0,1f
+ LW (t0,SC_FPC_CSR(a0)) # delay slot
+ /*
+ * Restore the 16 odd double precision registers only
+ * when enabled in the cp0 status register.
+ */
+ LWC1 ($f0,(SC_FPREGS+0)(a0))
+ LWC1 ($f1,(SC_FPREGS+8)(a0))
+ LWC1 ($f2,(SC_FPREGS+16)(a0))
+ LWC1 ($f3,(SC_FPREGS+24)(a0))
+ LWC1 ($f4,(SC_FPREGS+32)(a0))
+ LWC1 ($f5,(SC_FPREGS+40)(a0))
+ LWC1 ($f6,(SC_FPREGS+48)(a0))
+ LWC1 ($f7,(SC_FPREGS+56)(a0))
+ LWC1 ($f8,(SC_FPREGS+64)(a0))
+ LWC1 ($f9,(SC_FPREGS+72)(a0))
+ LWC1 ($f10,(SC_FPREGS+80)(a0))
+ LWC1 ($f11,(SC_FPREGS+88)(a0))
+ LWC1 ($f12,(SC_FPREGS+96)(a0))
+ LWC1 ($f13,(SC_FPREGS+104)(a0))
+ LWC1 ($f14,(SC_FPREGS+112)(a0))
+ LWC1 ($f15,(SC_FPREGS+120)(a0))
+ LWC1 ($f16,(SC_FPREGS+128)(a0))
+ LWC1 ($f17,(SC_FPREGS+136)(a0))
+ LWC1 ($f18,(SC_FPREGS+144)(a0))
+ LWC1 ($f19,(SC_FPREGS+152)(a0))
+ LWC1 ($f20,(SC_FPREGS+160)(a0))
+ LWC1 ($f21,(SC_FPREGS+168)(a0))
+ LWC1 ($f22,(SC_FPREGS+176)(a0))
+ LWC1 ($f23,(SC_FPREGS+184)(a0))
+ LWC1 ($f24,(SC_FPREGS+192)(a0))
+ LWC1 ($f25,(SC_FPREGS+200)(a0))
+ LWC1 ($f26,(SC_FPREGS+208)(a0))
+ LWC1 ($f27,(SC_FPREGS+216)(a0))
+ LWC1 ($f28,(SC_FPREGS+224)(a0))
+ LWC1 ($f29,(SC_FPREGS+232)(a0))
+ LWC1 ($f30,(SC_FPREGS+240)(a0))
+ LWC1 ($f31,(SC_FPREGS+248)(a0))
+ jr ra
+ .set nomacro
+ ctc1 t0,fcr31 # delay slot
+ .set macro
+
+1: jr ra
+ .set nomacro
+ nop # delay slot
+ .set macro
+ END(mips1_restore_fp_context)
diff --git a/arch/mips/mips1/memcpy.S b/arch/mips/mips1/memcpy.S
new file mode 100644
index 000000000..9685fa8df
--- /dev/null
+++ b/arch/mips/mips1/memcpy.S
@@ -0,0 +1,206 @@
+/*
+ * arch/mips/mips1/memcpy.S
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1996 by Ralf Baechle
+ *
+ * Less stupid memcpy/user_copy implementation for 32 bit MIPS CPUs.
+ */
+#include <asm/asm.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+
+#define BLOCK_SIZE 16
+
+#define EX(addr,handler) \
+ .section __ex_table,"a"; \
+ PTR addr, handler; \
+ .text
+#define UEX(addr,handler) \
+ EX(addr,handler); \
+ EX(addr+4,handler)
+
+ .set noreorder
+ .set noat
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * Bad. We can't fix the alignment for both address parts.
+ * Align the source address and copy slowly ...
+ */
+not_even_the_same_alignment:
+ LONG_SUBU v1,zero,a1
+ andi v1,a1,3
+ sltu t0,v0,v1
+ MOVN(v1,v0,t0)
+ beqz v1,align4 # -> finished
+ LONG_ADDU v1,a0 # delay slot
+1: lb $1,(a1)
+ EX(1b, fault)
+ LONG_ADDIU a1,1
+2: sb $1,(a0)
+ EX(2b, fault)
+ LONG_ADDIU a0,1
+ bne a0,v1,1b
+ LONG_SUBU v0,1 # delay slot
+
+/*
+ * Ok. We've fixed the alignment of the copy src for this case.
+ * Now let's copy in the usual BLOCK_SIZE byte blocks using unaligned
+ * stores.
+ * XXX Align the destination address. This is better if the __copy_user
+ * encounters an access fault because we never have to deal with an
+ * only partially modified destination word.
+ */
+ ori v1,v0,BLOCK_SIZE-1
+ xori v1,BLOCK_SIZE-1
+ beqz v1,copy_left_over
+ nop # delay slot
+ LONG_SUBU v0,v1
+ LONG_ADDU v1,a0
+
+1: lw t0,(a1) # Can cause tlb fault
+ EX(1b, fault)
+2: lw t1,4(a1) # Can cause tlb fault
+ EX(2b, fault)
+2: lw t2,8(a1) # Can cause tlb fault
+ EX(2b, fault)
+2: lw t3,12(a1) # Can cause tlb fault
+ EX(2b, fault)
+2: usw t0,(a0) # Can cause tlb faults
+ UEX(2b, fault)
+2: usw t1,4(a0) # Can cause tlb faults
+ UEX(2b, fault_plus_4)
+2: usw t2,8(a0) # Can cause tlb faults
+ UEX(2b, fault_plus_8)
+2: usw t3,12(a0) # Can cause tlb faults
+ UEX(2b, fault_plus_12)
+ LONG_ADDIU a0,BLOCK_SIZE
+ bne a0,v1,1b
+ LONG_ADDIU a1,BLOCK_SIZE # delay slot
+9:
+ b copy_left_over # < BLOCK_SIZE bytes left
+ nop # delay slot
+
+/* ---------------------------------------------------------------------- */
+
+not_w_aligned:
+/*
+ * Ok, src or destination are not 8-byte aligned.
+ * Try to fix that. Do at least both addresses have the same alignment?
+ */
+ xor t0,a0,a1
+ andi t0,3
+ bnez t0,not_even_the_same_alignment
+ nop # delay slot
+
+/*
+ * Ok, we can fix the alignment for both operands and go back to the
+ * fast path. We have to copy at least one byte, on average 3 bytes
+ * bytewise.
+ */
+ LONG_SUBU v1,zero,a0
+ andi v1,3
+ sltu t0,v0,v1
+ MOVN(v1,v0,t0)
+ beqz v1,align4 # -> finished
+ LONG_ADDU v1,a0 # delay slot
+1: lb $1,(a1)
+ EX(1b, fault)
+ LONG_ADDIU a1,1
+2: sb $1,(a0)
+ EX(2b, fault)
+ LONG_ADDIU a0,1
+ bne a0,v1,1b
+ LONG_SUBU v0,1 # delay slot
+ b align4
+ nop # delay slot
+
+/* ---------------------------------------------------------------------- */
+
+LEAF(__copy_user)
+ or t1,a0,a1
+ andi t1,3
+ bnez t1,not_w_aligned
+ move v0,a2 # delay slot
+
+align4:
+ ori v1,v0,BLOCK_SIZE-1
+ xori v1,BLOCK_SIZE-1
+ beqz v1,copy_left_over
+ nop # delay slot
+ LONG_SUBU v0,v1
+ LONG_ADDU v1,a0
+
+1: lw t0,(a1) # Can cause tlb fault
+ EX(1b, fault)
+2: lw t1,4(a1) # Can cause tlb fault
+ EX(2b, fault)
+2: lw t2,8(a1) # Can cause tlb fault
+ EX(2b, fault)
+2: lw t3,12(a1) # Can cause tlb fault
+ EX(2b, fault)
+2: sw t0,(a0) # Can cause tlb fault
+ EX(2b, fault)
+2: sw t1,4(a0) # Can cause tlb fault
+ EX(2b, fault_plus_4)
+2: sw t2,8(a0) # Can cause tlb fault
+ EX(2b, fault_plus_8)
+2: sw t3,12(a0) # Can cause tlb fault
+ EX(2b, fault_plus_12)
+ LONG_ADDIU a0,BLOCK_SIZE
+ bne a0,v1,1b
+ LONG_ADDIU a1,BLOCK_SIZE # delay slot
+9:
+
+/*
+ * XXX Tune me ...
+ */
+copy_left_over:
+ beqz v0,3f
+ nop # delay slot
+1: lb $1,(a1)
+ EX(1b, fault)
+ LONG_ADDIU a1,1
+2: sb $1,(a0)
+ EX(2b, fault)
+ LONG_SUBU v0,1
+ bnez v0,1b
+ LONG_ADDIU a0,1
+3: jr ra
+ nop # delay slot
+
+ END(__copy_user)
+ .set at
+ .set reorder
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * Access fault. The number of not copied bytes is in v0. We have to
+ * correct the number of the not copied bytes in v0 in case of a access
+ * fault in an unrolled loop, then return.
+ */
+
+fault: jr ra
+fault_plus_4: LONG_ADDIU v0,4
+ jr ra
+fault_plus_8: LONG_ADDIU v0,8
+ jr ra
+fault_plus_12: LONG_ADDIU v0,12
+ jr ra
+
+/* ---------------------------------------------------------------------- */
+
+/*
+ * For now we use __copy_user for __memcpy, too. This is effizient (one
+ * instruction penatly) and smaller but adds unwanted error checking we don't
+ * need. This hopefully doesn't cover any bugs. The memcpy() wrapper in
+ * <asm/string.h> takes care of the return value in a way GCC can optimize.
+ */
+ .globl __memcpy
+__memcpy = __copy_user
diff --git a/arch/mips/mips1/memset.S b/arch/mips/mips1/memset.S
new file mode 100644
index 000000000..5cfb5d9a7
--- /dev/null
+++ b/arch/mips/mips1/memset.S
@@ -0,0 +1,27 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996 by Ralf Baechle
+ *
+ * Generic memset for all MIPS CPUs.
+ * This is time critical. Hear it crying "optimize me" ...
+ */
+#include <asm/asm.h>
+#include <asm/regdef.h>
+
+LEAF(__generic_memset_b)
+__generic_memset_dw = __generic_memset_b
+ .set noreorder
+ beqz a2,2f
+ LONG_ADDU a3,a0,a2
+ .set reorder
+ LONG_SUBU a3,1
+1: sb a1,(a0)
+ .set noreorder
+ bne a0,a3,1b
+ LONG_ADDIU a0,1
+ .set reorder
+2: jr ra
+ END(__generic_memset_b)
diff --git a/arch/mips/mips1/pagetables.c b/arch/mips/mips1/pagetables.c
new file mode 100644
index 000000000..22419d1c9
--- /dev/null
+++ b/arch/mips/mips1/pagetables.c
@@ -0,0 +1,86 @@
+/*
+ * 32 bit MIPS specific page handling.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996 Ralf Baechle
+ */
+#include <linux/mm.h>
+#include <asm/cache.h>
+#include <asm/mipsconfig.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+void (*pgd_init)(unsigned long page);
+
+/*
+ * Initialize new page directory with pointers to invalid ptes
+ */
+void mips1_pgd_init(unsigned long page)
+{
+ unsigned long dummy1, dummy2;
+
+ /*
+ * The plain and boring version for the R3000. No cache flushing
+ * stuff is needed since the R3000 has physical caches.
+ */
+ __asm__ __volatile__(
+ ".set\tnoreorder\n"
+ "1:\tsw\t%2,(%0)\n\t"
+ "sw\t%2,4(%0)\n\t"
+ "sw\t%2,8(%0)\n\t"
+ "sw\t%2,12(%0)\n\t"
+ "sw\t%2,16(%0)\n\t"
+ "sw\t%2,20(%0)\n\t"
+ "sw\t%2,24(%0)\n\t"
+ "sw\t%2,28(%0)\n\t"
+ "subu\t%1,1\n\t"
+ "bnez\t%1,1b\n\t"
+ "addiu\t%0,32\n\t"
+ ".set\treorder"
+ :"=r" (dummy1),
+ "=r" (dummy2)
+ :"r" (((unsigned long) invalid_pte_table /* - PAGE_OFFSET */ ) |
+ _PAGE_TABLE),
+ "0" (page),
+ "1" (PAGE_SIZE/(sizeof(pmd_t)*8)));
+}
+
+void (*clear_page)(unsigned long page)
+
+/*
+ * To do: cache magic ...
+ */
+void mips1_clear_page(unsigned long page)
+{
+ __asm__ __volatile__(
+ ".set\tnoreorder\n\t"
+ ".set\tnoat\n\t"
+ "addiu\t$1,%0,%2\n"
+ "1:\tsw\t$0,(%0)\n\t"
+ "sw\t$0,4(%0)\n\t"
+ "sw\t$0,8(%0)\n\t"
+ "sw\t$0,12(%0)\n\t"
+ "addiu\t%0,32\n\t"
+ "sw\t$0,-16(%0)\n\t"
+ "sw\t$0,-12(%0)\n\t"
+ "sw\t$0,-8(%0)\n\t"
+ "bne\t$1,%0,1b\n\t"
+ "sw\t$0,-4(%0)\n\t"
+ ".set\tat\n\t"
+ ".set\treorder"
+ :"=r" (page)
+ :"0" (page),
+ "I" (PAGE_SIZE)
+ :"$1","memory");
+}
+
+void (*copy_page)(unsigned long to, unsigned long from);
+
+void mips1_copy_page(unsigned long to, unsigned long from)
+{
+ memcpy((void *) to,
+ (void *) (from + (PT_OFFSET - PAGE_OFFSET)), PAGE_SIZE);
+}
diff --git a/arch/mips/mips1/r3000.S b/arch/mips/mips1/r3000.S
new file mode 100644
index 000000000..25529d9a0
--- /dev/null
+++ b/arch/mips/mips1/r3000.S
@@ -0,0 +1,1125 @@
+/*
+ * arch/mips/kernel/r3000.S
+ *
+ * Copyright (C) 1994, 1995 Waldorf Electronics, 1996 Paul M. Antoine
+ * Written by Ralf Baechle and Andreas Busse
+ * Modified for R3000 by Paul M. Antoine
+ *
+ * Additional R3000 support by Didier Frick <dfrick@dial.eunet.ch>
+ * for ACN S.A, Copyright (C) 1996 by ACN S.A
+ *
+ * This file contains most of the R3000/R3000A specific routines, which would
+ * probably work on the R2000 (if anyone's interested!).
+ *
+ * This code is evil magic. Read appendix f (coprocessor 0 hazards) of
+ * all R3000/MIPS manuals and think about that MIPS means "Microprocessor without
+ * Interlocked Pipeline Stages" before you even think about changing this code!
+ *
+ * Then remember that some bugs here are due to my not having completely
+ * converted the R4xx0 code to R3000 and that the R4xx0 CPU's are more
+ * forgiving than the R3000/A!! All that, and the fact that I'm not up to
+ * 'guru' level on R3000 - PMA.
+ * (Paul, I replaced all occurances of TLBMAPHI with %HI(TLBMAP) -- Ralf)
+ */
+#include <linux/config.h>
+
+#include <asm/asm.h>
+#include <asm/bootinfo.h>
+#include <asm/cache.h>
+#include <asm/fpregdef.h>
+#include <asm/mipsconfig.h>
+#include <asm/mipsregs.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/mipsregs.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+
+#ifdef __SMP__
+#error "Fix this for SMP!"
+#else
+#define current current_set
+#endif
+
+/*
+
+FIXME:
+ - First of all, this really screams for a light version of SAVE_ALL
+ and RESTORE_ALL, saving and restoring only the context actually
+ needed in this case. I'm afraid it's necessary to save some context
+ on the stack because on the R3000 tlb exceptions can nest in some
+ cases where they wouldn't on the R4000.
+
+ - The TLB handling code should be completely rewritten for the R3000
+ because too many things are different from the R4000.
+ For instance, the CP0_CONTEXT register has a different format
+ and cannot be reused with the current setup.
+ I really had to do a fast hack to get it to work, but no time to do
+ it cleanly for now, sorry.
+ We also introduced a tlb_softindex variable to point to the next
+ TLB entry to write. This variable is incremented everytime we add a
+ new entry to the TLB. We did this because we felt that using the
+ CP0_RANDOM register could be unsafe in some cases (like trashing
+ the TLB entry for the handler's return address in user space).
+ It's very possible that we are wrong on this one, but we had so
+ much trouble with this TLB thing that we chose the safe side.
+*/
+
+#define CONF_DEBUG_TLB
+#undef CONFIG_TLB_SHUTDOWN
+#undef TLB_LOG
+
+MODE_ALIAS = 0x00e0 # cachable
+
+ .text
+ .set mips1
+ .set noreorder
+
+ .align 5
+ NESTED(handle_tlbl, FR_SIZE, sp)
+ .set noat
+ /*
+ * Check whether this is a refill or an invalid exception
+ */
+ mfc0 k0,CP0_BADVADDR
+ nop
+ mfc0 k1,CP0_ENTRYHI
+ ori k0,0xfff # clear ASID...
+ xori k0,0xfff # in BadVAddr
+ andi k1,0xfc0 # get current ASID
+ or k0,k1 # make new entryhi
+ mfc0 k1,CP0_ENTRYHI
+ nop
+ mtc0 k0,CP0_ENTRYHI
+ nop # for pipeline
+ tlbp
+ nop # for pipeline
+ mfc0 k0,CP0_INDEX
+ nop
+ mtc0 k1,CP0_ENTRYHI # delay slot
+ bgez k0,invalid_tlbl # bad addr in c0_badvaddr
+ nop
+
+
+ mfc0 k0,CP0_BADVADDR
+ lui k1,0xe000
+ subu k0,k0,k1
+ bgez k0,1f
+ nop
+ j real_utlb
+ nop
+
+1:
+
+
+#ifdef CONF_DEBUG_TLB
+ /*
+ * OK, this is a double fault. Let's see whether this is
+ * due to an invalid entry in the page_table.
+ */
+
+ lw k0, tlbl_lock
+ nop
+ bnez k0,1f
+ li k1,1
+ la k0, tlbl_lock
+ sw k1,(k0)
+
+
+ mfc0 k0,CP0_BADVADDR
+ lui k1,58368
+ srl k0,12 # get PFN?
+ sll k0,2
+ addu k0,k1
+ lw k1,(k0)
+ nop
+ andi k1,(_PAGE_PRESENT|_PAGE_ACCESSED)
+ bnez k1,reload_pgd_entries
+ nop # delay slot
+
+1:
+ SAVE_ALL
+ REG_S sp,FR_ORIG_REG2(sp)
+
+ PRINT("Double fault caused by invalid entries in pgd:\n")
+ mfc0 a1,CP0_BADVADDR
+ nop
+ PRINT("Double fault address : %08lx\n")
+ mfc0 a1,CP0_EPC
+ nop
+ PRINT("c0_epc : %08lx\n")
+ jal show_regs
+ move a0,sp
+ jal dump_tlb_nonwired
+ nop
+ mfc0 a0,CP0_BADVADDR
+ jal dump_list_current
+ nop
+ .set noat
+ STI
+ .set at
+ PANIC("Corrupted pagedir")
+ .set noat
+
+reload_pgd_entries:
+#endif /* CONF_DEBUG_TLB */
+
+ /*
+ * Load missing pair of entries from the pgd and return.
+ */
+
+ mfc0 k0,CP0_BADVADDR
+ nop
+ lui k1,58368
+
+ srl k0,12
+ sll k0,2
+ addu k0,k1
+ lw k0,(k0)
+ nop
+ mtc0 k0,CP0_ENTRYLO0
+
+ la k0, tlb_softIndex
+ lw k1,(k0)
+ nop
+ mtc0 k1,CP0_INDEX
+ nop
+ addu k1,(1<<8)
+ andi k0,k1,(63<<8)
+ bnez k0, 1f
+ nop
+ li k1,(8<<8)
+1:
+ la k0, tlb_softIndex
+ sw k1,(k0)
+
+
+ nop
+ nop
+ nop # for pipeline
+ tlbwi
+ nop # for pipeline
+ nop
+ nop
+
+
+#ifdef CONF_DEBUG_TLB
+ la k0, tlbl_lock
+ sw zero,(k0)
+#endif
+ mfc0 k0,CP0_EPC
+ nop
+ jr k0
+ rfe
+ nop
+
+
+ /*
+ * Handle invalid exception
+ *
+ * There are two possible causes for an invalid (tlbl)
+ * exception:
+ * 1) pages with present bit set but the valid bit clear
+ * 2) nonexistant pages
+ * Case one needs fast handling, therefore don't save
+ * registers yet.
+ *
+ * k0 contains c0_index.
+ */
+invalid_tlbl:
+ SAVE_ALL
+ REG_S sp,FR_ORIG_REG2(sp)
+
+#ifdef TLB_LOG
+ PRINT ("tlbl: invalid\n");
+ nop
+#endif
+ /*
+ * Test present bit in entry
+ */
+ lw s0,FR_BADVADDR(sp)
+ nop
+ srl s0,12
+ sll s0,2
+ lui k1,58368
+ addu s0,k1
+
+ lw k1,(s0)
+ nop
+ andi k1,(_PAGE_PRESENT|_PAGE_READ)
+ xori k1,(_PAGE_PRESENT|_PAGE_READ)
+ bnez k1,nopage_tlbl
+ nop
+ /*
+ * Present and read bits are set -> set valid and accessed bits
+ */
+ lw k1,(s0) # delay slot
+ nop
+ ori k1,(_PAGE_VALID|_PAGE_ACCESSED)
+ sw k1,(s0)
+
+ mtc0 k1,CP0_ENTRYLO0
+ nop
+ tlbwi
+ nop
+ nop
+
+ j return
+ nop
+
+ /*
+ * Page doesn't exist. Lots of work which is less important
+ * for speed needs to be done, so hand it all over to the
+ * kernel memory management routines.
+ */
+nopage_tlbl:
+/* SAVE_ALL */
+ REG_S sp,FR_ORIG_REG2(sp) */
+#ifdef TLB_LOG
+ PRINT ("nopage_tlbl\n");
+ nop
+#endif
+#ifdef CONFIG_TLB_SHUTDOWN
+ mfc0 t0,CP0_INDEX
+ sll t0,4
+ la t1,KSEG1
+ or t0,t1
+ mtc0 t0,CP0_ENTRYHI
+ mtc0 zero,CP0_ENTRYLO0
+ nop
+ nop
+ tlbwi
+ nop
+ nop
+#endif
+
+ lw a2,FR_BADVADDR(sp)
+ li t1,-1 # not a sys call
+ sw t1,FR_ORIG_REG2(sp)
+ nop
+ STI
+ .set at
+ /*
+ * a0 (struct pt_regs *) regs
+ * a1 (unsigned long) 0 for read access
+ * a2 (unsigned long) faulting virtual address
+ */
+ move a0,sp
+ jal do_page_fault
+ li a1,0 # delay slot
+ j ret_from_sys_call
+ nop # delay slot
+ END(handle_tlbl)
+
+ .text
+ .align 5
+ NESTED(handle_tlbs, FR_SIZE, sp)
+ .set noat
+ /*
+ * It is impossible that is a nested reload exception.
+ * Therefore this must be a invalid exception.
+ * Two possible cases:
+ * 1) Page exists but not dirty.
+ * 2) Page doesn't exist yet. Hand over to the kernel.
+ *
+ * Test whether present bit in entry is set
+ */
+ /* used to be dmfc0 */
+
+#ifdef CONF_DEBUG_TLB
+
+ la k0,tlbs_lock
+ lw k1,(k0)
+ nop
+ beqz k1,3f
+ nop
+ .set noat
+ SAVE_ALL
+ REG_S sp,FR_ORIG_REG2(sp)
+ .set at
+ PRINT("Nested tlbs exception:\n")
+ mfc0 a1,CP0_BADVADDR
+ nop
+ PRINT("Virtual address : %08lx\n")
+ mfc0 a1,CP0_EPC
+ nop
+ PRINT("c0_epc : %08lx\n")
+ jal show_regs
+ move a0,sp
+ jal dump_tlb_nonwired
+ nop
+ mfc0 a0,CP0_BADVADDR
+ jal dump_list_current
+ nop
+ .set noat
+ STI
+ .set at
+ PANIC("Nested tlbs exception")
+
+3:
+ li k1,1
+ sw k1,(k0)
+
+#endif
+ .set noat
+ SAVE_ALL
+ REG_S sp,FR_ORIG_REG2(sp)
+ .set at
+
+ mfc0 s0,CP0_BADVADDR
+
+ lui k1,58368
+ srl s0,12
+ sll s0,2
+ addu s0,k1
+ nop
+ lw k1,(s0) # may cause nested xcpt.
+ nop
+ move k0,s0
+
+ lw k1,FR_ENTRYHI(sp)
+ nop
+ mtc0 k1,CP0_ENTRYHI
+ nop
+ nop
+ tlbp # find faulting entry
+ nop
+ lw k1,(k0)
+ nop
+ andi k1,(_PAGE_PRESENT|_PAGE_WRITE)
+ xori k1,(_PAGE_PRESENT|_PAGE_WRITE)
+ bnez k1,nopage_tlbs
+ nop
+ /*
+ * Present and writable bits set: set accessed and dirty bits.
+ */
+ lw k1,(k0) # delay slot
+ nop
+ ori k1,k1,(_PAGE_ACCESSED|_PAGE_MODIFIED| \
+ _PAGE_VALID|_PAGE_DIRTY)
+ sw k1,(k0)
+ /*
+ * Now reload the entry into the TLB
+ */
+ mtc0 k1,CP0_ENTRYLO0
+ nop
+ nop
+ nop # for pipeline
+ tlbwi
+ nop # for pipeline
+#ifdef CONF_DEBUG_TLB
+ la k0,tlbs_lock
+ li k1,0
+ sw k1,(k0)
+#endif
+ j return
+ nop
+
+ /*
+ * Page doesn't exist. Lots of work which is less important
+ * for speed needs to be done, so hand it all over to the
+ * kernel memory management routines.
+ */
+nowrite_mod:
+nopage_tlbs:
+
+#ifdef CONFIG_TLB_SHUTDOWN
+ /*
+ * Remove entry so we don't need to care later
+ */
+ mfc0 k0,CP0_INDEX
+ nop
+#ifdef CONF_DEBUG_TLB
+ bgez k0,2f
+ nop
+ /*
+ * We got a tlbs exception but found no matching entry in
+ * the tlb. This should never happen. Paranoia makes us
+ * check it, though.
+ */
+ .set noat
+/* SAVE_ALL
+ REG_S sp,FR_ORIG_REG2(sp) */
+ jal show_regs
+ move a0,sp
+ .set at
+ la a1,FR_BADVADDR(sp)
+ lw a1,(a1)
+ nop
+ PRINT("c0_badvaddr == %08lx\n")
+ nop
+ mfc0 a1,CP0_INDEX
+ nop
+ PRINT("c0_index == %08x\n")
+ nop
+ la a1,FR_ENTRYHI(sp)
+ lw a1,(a1)
+ nop
+ PRINT("c0_entryhi == %08x\n")
+ nop
+ jal dump_tlb_nonwired
+ nop
+ la a0,FR_BADVADDR(sp)
+ lw a0,(a0)
+ jal dump_list_current
+ nop
+
+ .set noat
+ STI
+ .set at
+ PANIC("Tlbs or tlbm exception with no matching entry in tlb")
+1: j 1b
+ nop
+2:
+#endif /* CONF_DEBUG_TLB */
+ lui k1,0xa000
+ sll k0,4
+ or k0,k1
+ xor k0,k1
+ or k0,k1 # make it a KSEG1 address
+ mtc0 k0,CP0_ENTRYHI
+ nop
+ mtc0 zero,CP0_ENTRYLO0
+ nop
+ nop
+ nop
+ tlbwi
+ nop
+#endif /* CONFIG_TLB_SHUTDOWN */
+
+#ifdef CONF_DEBUG_TLB
+ la k0,tlbs_lock
+ li k1,0
+ sw k1,(k0)
+#endif
+ .set noat
+/* SAVE_ALL
+ REG_S sp,FR_ORIG_REG2(sp) */
+ lw a2,FR_BADVADDR(sp)
+ li t1,-1
+ sw t1,FR_ORIG_REG2(sp) # not a sys call
+ nop
+ STI
+ .set at
+ /*
+ * a0 (struct pt_regs *) regs
+ * a1 (unsigned long) 1 for write access
+ * a2 (unsigned long) faulting virtual address
+ */
+ move a0,sp
+ jal do_page_fault
+ li a1,1 # delay slot
+ j ret_from_sys_call
+ nop # delay slot
+ END(handle_tlbs)
+
+ .align 5
+ NESTED(handle_mod, FR_SIZE, sp)
+ .set noat
+ /*
+ * Two possible cases:
+ * 1) Page is writable but not dirty -> set dirty and return
+ * 2) Page is not writable -> call C handler
+ */
+ /* used to be dmfc0 */
+
+ SAVE_ALL
+ REG_S sp,FR_ORIG_REG2(sp)
+
+ mfc0 s0,CP0_BADVADDR
+ nop
+
+ srl s0,12
+ sll s0,2
+ lui k1,58368
+ addu s0,k1
+ lw k1,(s0)
+ nop
+ move k0,s0
+ nop
+
+ lw k1,FR_ENTRYHI(sp)
+ nop
+ mtc0 k1,CP0_ENTRYHI
+ nop
+ tlbp
+ nop
+ lw k1,(k0)
+ nop
+ andi k1,_PAGE_WRITE
+ beqz k1,nowrite_mod
+ nop
+ /*
+ * Present and writable bits set: set accessed and dirty bits.
+ */
+ lw k1,(k0) # delay slot
+ nop
+ ori k1,(_PAGE_ACCESSED|_PAGE_DIRTY)
+ sw k1,(k0)
+ /*
+ * Now reload the entry into the tlb
+ */
+ lw k0,(k0)
+ nop
+ mtc0 k0,CP0_ENTRYLO0
+ nop
+ nop # for pipeline
+ nop
+ tlbwi
+ nop # for pipeline
+ j return
+ nop
+ END(handle_mod)
+ .set at
+
+ .set reorder
+ LEAF(tlbflush)
+
+ .set noreorder
+
+ mfc0 t3,CP0_STATUS # disable interrupts...
+ nop
+ ori t4,t3,1
+ xori t4,1
+ mtc0 t4,CP0_STATUS
+ lw t1,mips_tlb_entries /* mips_tlb_enbtries is set */
+ /* by bi_EarlySnarf() */
+ mfc0 t0,CP0_ENTRYHI
+ nop
+ mtc0 zero,CP0_ENTRYLO0
+ sll t1,t1,8
+ li t2,KSEG1
+ li t5,(7<<8) /* R3000 has 8 wired entries */
+1:
+ subu t1,(1<<8)
+ beq t1,t5,2f /* preserve wired entries */
+
+ sll t6,t1,4
+ addu t6,t2
+ mtc0 t6,CP0_ENTRYHI
+ nop
+ mtc0 t1,CP0_INDEX
+ nop
+ nop
+ nop
+ tlbwi
+ nop
+ b 1b
+ nop
+2:
+
+ mtc0 t0,CP0_ENTRYHI
+ nop
+ mtc0 t3,CP0_STATUS
+ nop
+ jr ra
+ nop
+ END(tlbflush)
+
+/*
+ * Flush a single entry from the TLB
+ *
+ * Parameters: a0 - unsigned long address
+ */
+ .set noreorder
+ LEAF(tlbflush_page)
+ /*
+ * Step 1: Wipe out old TLB information. Not shure if
+ * we really need that step; call it paranoia ...
+ * In order to do that we need to disable interrupts.
+ */
+ mfc0 t0,CP0_STATUS # interrupts off
+ nop
+ ori t1,t0,1
+ xori t1,1
+ mtc0 t1,CP0_STATUS
+ li t3,TLBMAP # then wait 3 cycles
+ ori t1,a0,0xfff # mask off low 12 bits
+ xori t1,0xfff
+ mfc0 t2,CP0_ENTRYHI # copy ASID into address
+ nop
+ andi t2,0xfc0 # ASID in bits 11-6
+ or t2,t1
+ mtc0 t2,CP0_ENTRYHI
+/* FIXME:
+ shouldn't we save ENTRYHI before trashing it ?
+*/
+
+ srl t4,a0,12 # wait again three cycles
+ sll t4,t4,PTRLOG
+ mtc0 zero,CP0_ENTRYLO0
+ nop
+ tlbp # now query the TLB
+ addu t3,t4 # wait another three cycles
+ ori t3,0xffff
+ xori t3,0xffff
+ mfc0 t1,CP0_INDEX
+ nop
+ blez t1,1f # No old entry?
+ nop # delay slot
+ li t5, KSEG1
+ sll t1,4
+ addu t5,t1
+ mtc0 t5,CP0_ENTRYHI
+ nop
+ nop
+ tlbwi
+ /*
+ * But there still might be a entry for the pgd ...
+ */
+1: mtc0 t3,CP0_ENTRYHI
+ nop # wait 3 cycles
+ nop
+ nop
+ tlbp # TLB lookup
+ nop
+ nop
+ mfc0 t1,CP0_INDEX # wait 3 cycles
+ nop
+ blez t1,1f # No old entry?
+ nop
+ li t5, KSEG1
+ sll t1,4
+ addu t5,t1
+ mtc0 t5,CP0_ENTRYHI
+ nop
+ nop
+ tlbwi # gotcha ...
+ nop
+ nop
+ nop
+
+1:
+ mtc0 t0,CP0_STATUS
+ nop
+ jr ra
+ nop
+
+ END(tlbflush_page)
+
+ .set noreorder
+ LEAF(tlbload)
+ /*
+ address in a0
+ pte in a1
+ */
+
+ mfc0 t1,CP0_STATUS
+ nop
+ ori t0,t1,1
+ xori t0,1
+ mtc0 t0,CP0_STATUS
+ nop
+ mfc0 t0,CP0_ENTRYHI
+ nop
+ ori a0,0xfff
+ xori a0,0xfff
+ andi t2,t0,0xfc0
+ or a0,t2
+ mtc0 a0,CP0_ENTRYHI
+ nop
+ nop
+ mtc0 a1,CP0_ENTRYLO0
+
+ la t2, tlb_softIndex
+ lw t3,(t2)
+ nop
+ mtc0 t3, CP0_INDEX
+ nop
+ addu t3,(1<<8)
+ andi t2,t3,(63<<8)
+ bnez t2, 1f
+ nop
+ li t3,(8<<8)
+1:
+ la t2, tlb_softIndex
+ sw t3,(t2)
+
+
+ nop
+ nop
+ nop
+ tlbwi
+ nop
+ nop
+ mtc0 t0,CP0_ENTRYHI
+ nop
+ mtc0 t1,CP0_STATUS
+
+ jr ra
+ nop
+
+ END(tlbload)
+
+
+
+/*
+ * Code necessary to switch tasks on an Linux/MIPS machine.
+ * FIXME: We don't need to disable interrupts anymore.
+ */
+ .align 5
+ LEAF(resume)
+ /*
+ * Current task's task_struct
+ */
+ lui t5,%hi(current)
+ lw t0,%lo(current)(t5)
+
+ /*
+ * Save status register
+ */
+ mfc0 t1,CP0_STATUS
+ addu t0,a1 # Add tss offset
+ sw t1,TOFF_CP0_STATUS(t0)
+
+/*
+ li t2,ST0_CU0
+ and t2,t1
+ beqz t2,1f
+ nop
+ sw sp,TOFF_KSP(t0)
+1:
+*/
+ /*
+ * Disable interrupts
+ */
+#ifndef __R3000__
+ ori t2,t1,0x1f
+ xori t2,0x1e
+#else
+ ori t2,t1,1
+ xori t2,1
+#endif
+ mtc0 t2,CP0_STATUS
+
+ /*
+ * Save non-scratch registers
+ * All other registers have been saved on the kernel stack
+ */
+ sw s0,TOFF_REG16(t0)
+ sw s1,TOFF_REG17(t0)
+ sw s2,TOFF_REG18(t0)
+ sw s3,TOFF_REG19(t0)
+ sw s4,TOFF_REG20(t0)
+ sw s5,TOFF_REG21(t0)
+ sw s6,TOFF_REG22(t0)
+ sw s7,TOFF_REG23(t0)
+ sw gp,TOFF_REG28(t0)
+ sw sp,TOFF_REG29(t0)
+ sw fp,TOFF_REG30(t0)
+
+ /*
+ * Save floating point state
+ */
+ sll t2,t1,2
+ bgez t2,2f
+ sw ra,TOFF_REG31(t0) # delay slot
+ sll t2,t1,5
+ bgez t2,1f
+ swc1 $f0,(TOFF_FPU+0)(t0) # delay slot
+ /*
+ * Store the 16 odd double precision registers
+ */
+ swc1 $f1,(TOFF_FPU+8)(t0)
+ swc1 $f3,(TOFF_FPU+24)(t0)
+ swc1 $f5,(TOFF_FPU+40)(t0)
+ swc1 $f7,(TOFF_FPU+56)(t0)
+ swc1 $f9,(TOFF_FPU+72)(t0)
+ swc1 $f11,(TOFF_FPU+88)(t0)
+ swc1 $f13,(TOFF_FPU+104)(t0)
+ swc1 $f15,(TOFF_FPU+120)(t0)
+ swc1 $f17,(TOFF_FPU+136)(t0)
+ swc1 $f19,(TOFF_FPU+152)(t0)
+ swc1 $f21,(TOFF_FPU+168)(t0)
+ swc1 $f23,(TOFF_FPU+184)(t0)
+ swc1 $f25,(TOFF_FPU+200)(t0)
+ swc1 $f27,(TOFF_FPU+216)(t0)
+ swc1 $f29,(TOFF_FPU+232)(t0)
+ swc1 $f31,(TOFF_FPU+248)(t0)
+
+ /*
+ * Store the 16 even double precision registers
+ */
+1: cfc1 t1,fcr31
+ swc1 $f2,(TOFF_FPU+16)(t0)
+ swc1 $f4,(TOFF_FPU+32)(t0)
+ swc1 $f6,(TOFF_FPU+48)(t0)
+ swc1 $f8,(TOFF_FPU+64)(t0)
+ swc1 $f10,(TOFF_FPU+80)(t0)
+ swc1 $f12,(TOFF_FPU+96)(t0)
+ swc1 $f14,(TOFF_FPU+112)(t0)
+ swc1 $f16,(TOFF_FPU+128)(t0)
+ swc1 $f18,(TOFF_FPU+144)(t0)
+ swc1 $f20,(TOFF_FPU+160)(t0)
+ swc1 $f22,(TOFF_FPU+176)(t0)
+ swc1 $f24,(TOFF_FPU+192)(t0)
+ swc1 $f26,(TOFF_FPU+208)(t0)
+ swc1 $f28,(TOFF_FPU+224)(t0)
+ swc1 $f30,(TOFF_FPU+240)(t0)
+ sw t1,(TOFF_FPU+256)(t0)
+
+ /*
+ * Switch current task
+ */
+2: sw a0,%lo(current)(t5)
+ addu a0,a1 # Add tss offset
+
+ /*
+ * Switch address space
+ */
+
+ /*
+ * (Choose new ASID for process)
+ * This isn't really required, but would speed up
+ * context switching.
+ */
+
+ /*
+ * Switch the root pointer
+ */
+ lw t0,TOFF_PG_DIR(a0) # get PFN
+ li t1,TLB_ROOT
+ mtc0 t1,CP0_ENTRYHI
+ nop
+ mtc0 zero,CP0_INDEX
+ ori t0,MODE_ALIAS # want cachable, dirty, valid
+ mtc0 t0,CP0_ENTRYLO0
+ nop
+ nop
+ nop
+ tlbwi # delay slot
+ nop
+
+ /*
+ * Flush tlb
+ * (probably not needed, doesn't clobber a0-a3)
+ */
+ jal tlbflush
+ nop
+
+ lw a2,TOFF_CP0_STATUS(a0)
+ nop
+
+ /*
+ * Restore fpu state:
+ * - cp0 status register bits
+ * - fp gp registers
+ * - cp1 status/control register
+ */
+ ori t1,a2,1 # pipeline magic
+ xori t1,1
+ mtc0 t1,CP0_STATUS
+ sll t0,a2,2
+ bgez t0,2f
+ sll t0,a2,5 # delay slot
+ bgez t0,1f
+ nop
+ lwc1 $f0,(TOFF_FPU+0)(a0) # delay slot
+ /*
+ * Restore the 16 odd double precision registers only
+ * when enabled in the cp0 status register.
+ */
+ lwc1 $f1,(TOFF_FPU+8)(a0)
+ lwc1 $f3,(TOFF_FPU+24)(a0)
+ lwc1 $f5,(TOFF_FPU+40)(a0)
+ lwc1 $f7,(TOFF_FPU+56)(a0)
+ lwc1 $f9,(TOFF_FPU+72)(a0)
+ lwc1 $f11,(TOFF_FPU+88)(a0)
+ lwc1 $f13,(TOFF_FPU+104)(a0)
+ lwc1 $f15,(TOFF_FPU+120)(a0)
+ lwc1 $f17,(TOFF_FPU+136)(a0)
+ lwc1 $f19,(TOFF_FPU+152)(a0)
+ lwc1 $f21,(TOFF_FPU+168)(a0)
+ lwc1 $f23,(TOFF_FPU+184)(a0)
+ lwc1 $f25,(TOFF_FPU+200)(a0)
+ lwc1 $f27,(TOFF_FPU+216)(a0)
+ lwc1 $f29,(TOFF_FPU+232)(a0)
+ lwc1 $f31,(TOFF_FPU+248)(a0)
+
+ /*
+ * Restore the 16 even double precision registers
+ * when cp1 was enabled in the cp0 status register.
+ */
+1: lw t0,(TOFF_FPU+256)(a0)
+ lwc1 $f2,(TOFF_FPU+16)(a0)
+ lwc1 $f4,(TOFF_FPU+32)(a0)
+ lwc1 $f6,(TOFF_FPU+48)(a0)
+ lwc1 $f8,(TOFF_FPU+64)(a0)
+ lwc1 $f10,(TOFF_FPU+80)(a0)
+ lwc1 $f12,(TOFF_FPU+96)(a0)
+ lwc1 $f14,(TOFF_FPU+112)(a0)
+ lwc1 $f16,(TOFF_FPU+128)(a0)
+ lwc1 $f18,(TOFF_FPU+144)(a0)
+ lwc1 $f20,(TOFF_FPU+160)(a0)
+ lwc1 $f22,(TOFF_FPU+176)(a0)
+ lwc1 $f24,(TOFF_FPU+192)(a0)
+ lwc1 $f26,(TOFF_FPU+208)(a0)
+ lwc1 $f28,(TOFF_FPU+224)(a0)
+ lwc1 $f30,(TOFF_FPU+240)(a0)
+ ctc1 t0,fcr31
+
+ /*
+ * Restore non-scratch registers
+ */
+2: lw s0,TOFF_REG16(a0)
+ lw s1,TOFF_REG17(a0)
+ lw s2,TOFF_REG18(a0)
+ lw s3,TOFF_REG19(a0)
+ lw s4,TOFF_REG20(a0)
+ lw s5,TOFF_REG21(a0)
+ lw s6,TOFF_REG22(a0)
+ lw s7,TOFF_REG23(a0)
+ lw gp,TOFF_REG28(a0)
+ lw sp,TOFF_REG29(a0)
+ lw fp,TOFF_REG30(a0)
+ lw ra,TOFF_REG31(a0)
+
+ /*
+ * Restore status register
+ */
+ lw t0,TOFF_KSP(a0)
+ nop
+ sw t0,kernelsp
+
+ mtc0 a2,CP0_STATUS # delay slot
+ jr ra
+ nop
+ END(resume)
+
+ /*
+ * Load a new root pointer into the tlb
+ */
+ .set noreorder
+ LEAF(load_pgd)
+ /*
+ * Switch the root pointer
+ */
+ mfc0 t0,CP0_STATUS
+ nop
+ ori t1,t0,1
+ xori t1,1
+ mtc0 t1,CP0_STATUS
+
+ ori a0,MODE_ALIAS
+ li t1,TLB_ROOT
+ mtc0 t1,CP0_ENTRYHI
+ nop
+ mtc0 zero,CP0_INDEX
+ nop
+ mtc0 a0,CP0_ENTRYLO0
+ nop
+ nop
+ nop
+ tlbwi
+ nop
+ nop
+ mtc0 t0,CP0_STATUS
+ nop
+ jr ra
+ nop
+ END(load_pgd)
+
+/*
+ * Some bits in the config register
+ */
+#define CONFIG_DB (1<<4)
+#define CONFIG_IB (1<<5)
+
+/*
+ * Flush instruction/data caches - FIXME: Don't know how to do this on R[236]000!
+ * (Actually most of this flushing stuff isn't needed for the R2000/R3000/R6000
+ * since these CPUs have physical indexed caches unlike R4000 and better
+ * which have virtual indexed caches.)
+ *
+ * Parameters: a0 - starting address to flush
+ * a1 - size of area to be flushed
+ * a2 - which caches to be flushed
+ *
+ * FIXME: - ignores parameters in a0/a1
+ * - doesn't know about second level caches
+
+ */
+ .set noreorder
+ LEAF(mips1_cacheflush)
+
+done: j cache_flush
+ nop
+ END(mips1_cacheflush)
+
+/*
+ * Invalidate virtual addresses. - FIXME: Don't know how on R[236]000 yet!
+ * (Flushing is relativly expensive; it isn't required at all if a
+ * particular machines' chipset keeps the external cache in a state that is
+ * consistent with memory -- Ralf)
+ *
+ * Parameters: a0 - starting address to flush
+ * a1 - size of area to be flushed
+ *
+ * FIXME: - ignores parameters in a0/a1
+ * - doesn't know about second level caches
+ */
+ .set noreorder
+ LEAF(fd_cacheflush)
+ jr ra
+ nop
+ END(fd_cacheflush)
+
+/*
+ * do_syscall calls the function in a1 with upto 7 arguments. If over
+ * four arguments are being requested, the additional arguments will
+ * be copied from the user stack pointed to by a0->reg29.
+ *
+ * a0 (struct pt_regs *) pointer to user registers
+ * a1 (syscall_t) pointer to syscall to do
+ * a2 (int) number of arguments to syscall
+ */
+ .set reorder
+ .text
+NESTED(do_syscalls, 32, sp)
+ subu sp,32
+ sw ra,28(sp)
+ sll a2,a2,PTRLOG
+ lw t1,dst(a2)
+ move t2,a1
+ lw t0,FR_REG29(a0) # get old user stack pointer
+ jalr t1
+
+7: lw t1,24(t0) # parameter #7 from usp
+ sw t1,24(sp)
+6: lw t1,20(t0) # parameter #6 from usp
+ sw t1,20(sp)
+5: lw t1,16(t0) # parameter #5 from usp
+ sw t1,16(sp)
+4: lw a3,FR_REG7(a0) # 4 args
+3: lw a2,FR_REG6(a0) # 3 args
+2: lw a1,FR_REG5(a0) # 2 args
+1: lw a0,FR_REG4(a0) # delay slot
+ jalr t2 # 1 args
+ lw ra,28(sp)
+ addiu sp,32
+ jr ra
+0: jalr t2 # 0 args, just pass a0
+ lw ra,28(sp)
+ addiu sp,32
+ jr ra
+ END(do_syscalls)
+
+ .rdata
+ .align PTRLOG
+dst: PTR 0b, 1b, 2b, 3b, 4b, 5b, 6b, 7b
+
+ .section __ex_table,"a"
+ PTR 7b,bad_stack
+ PTR 6b,bad_stack
+ PTR 5b,bad_stack
+
+ .data
+
+ EXPORT(tlbl_lock)
+ .word 0
+
+tlbs_lock:
+ .word 0
+
+ EXPORT(tlb_softIndex)
+ .word 0
diff --git a/arch/mips/mips1/showregs.c b/arch/mips/mips1/showregs.c
new file mode 100644
index 000000000..0be6db80e
--- /dev/null
+++ b/arch/mips/mips1/showregs.c
@@ -0,0 +1,32 @@
+/*
+ * linux/arch/mips/mips1/showregs.c
+ *
+ * Copyright (C) 1995, 1996 Ralf Baechle, Paul M. Antoine.
+ */
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+
+void show_regs(struct pt_regs * regs)
+{
+ /*
+ * Saved main processor registers
+ */
+ printk("$0 : %08x %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
+ 0, regs->regs[1], regs->regs[2], regs->regs[3],
+ regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]);
+ printk("$8 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
+ regs->regs[8], regs->regs[9], regs->regs[10], regs->regs[11],
+ regs->regs[12], regs->regs[13], regs->regs[14], regs->regs[15]);
+ printk("$16: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
+ regs->regs[16], regs->regs[17], regs->regs[18], regs->regs[19],
+ regs->regs[20], regs->regs[21], regs->regs[22], regs->regs[23]);
+ printk("$24: %08lx %08lx %08lx %08lx %08lx %08lx\n",
+ regs->regs[24], regs->regs[25], regs->regs[28], regs->regs[29],
+ regs->regs[30], regs->regs[31]);
+
+ /*
+ * Saved cp0 registers
+ */
+ printk("epc : %08lx\nStatus: %08x\nCause : %08x\nBadVAdddr : %08x\n",
+ regs->cp0_epc, regs->cp0_status, regs->cp0_cause,regs->cp0_badvaddr);
+}