summaryrefslogtreecommitdiffstats
path: root/arch/mips64/kernel
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1999-09-28 22:25:29 +0000
committerRalf Baechle <ralf@linux-mips.org>1999-09-28 22:25:29 +0000
commit0ae8dceaebe3659ee0c3352c08125f403e77ebca (patch)
tree5085c389f09da78182b899d19fe1068b619a69dd /arch/mips64/kernel
parent273767781288c35c9d679e908672b9996cda4c34 (diff)
Merge with 2.3.10.
Diffstat (limited to 'arch/mips64/kernel')
-rw-r--r--arch/mips64/kernel/Makefile2
-rw-r--r--arch/mips64/kernel/entry.S97
-rw-r--r--arch/mips64/kernel/mips64_ksyms.c5
-rw-r--r--arch/mips64/kernel/proc.c71
-rw-r--r--arch/mips64/kernel/process.c167
-rw-r--r--arch/mips64/kernel/r4k_fpu.S149
-rw-r--r--arch/mips64/kernel/setup.c4
-rw-r--r--arch/mips64/kernel/signal.c654
-rw-r--r--arch/mips64/kernel/traps.c562
9 files changed, 1658 insertions, 53 deletions
diff --git a/arch/mips64/kernel/Makefile b/arch/mips64/kernel/Makefile
index 0fada481d..f61a7139b 100644
--- a/arch/mips64/kernel/Makefile
+++ b/arch/mips64/kernel/Makefile
@@ -12,7 +12,7 @@
all: kernel.o head.o init_task.o
O_TARGET := kernel.o
-O_OBJS := entry.o setup.o
+O_OBJS := entry.o proc.o r4k_fpu.o setup.o signal.o traps.o
OX_OBJS := mips64_ksyms.o
clean:
diff --git a/arch/mips64/kernel/entry.S b/arch/mips64/kernel/entry.S
index dc8a6d4bc..297c41840 100644
--- a/arch/mips64/kernel/entry.S
+++ b/arch/mips64/kernel/entry.S
@@ -18,9 +18,9 @@
#include <asm/mipsregs.h>
#include <asm/page.h>
#include <asm/pgtable.h>
+#include <asm/regdef.h>
#include <asm/stackframe.h>
#include <asm/processor.h>
-#include <asm/regdef.h>
#include <asm/fpregdef.h>
#include <asm/unistd.h>
@@ -102,7 +102,7 @@ LEAF(spurious_interrupt)
cfc1 a1, fcr31
li a2, ~(0x3f << 13)
and a2, a1
- ctc a2, fcr31
+ ctc1 a2, fcr31
STI
.endm
@@ -112,48 +112,51 @@ LEAF(spurious_interrupt)
KMODE
.endm
-#define __BUILD_silent(exception)
-
-#define fmt "Got %s at %08lx.\n"
-
-#define __BUILD_verbose(exception) \
- la a1,8f; \
- TEXT (#exception); \
- ld a2, PT_EPC(sp); \
- PRINT(fmt)
-#define __BUILD_count(exception) \
- .set reorder; \
- lw t0,exception_count_##exception; \
- addiu t0, 1; \
- sw t0,exception_count_##exception; \
- .set noreorder; \
- .data; \
-EXPORT(exception_count_##exception); \
- .word 0; \
- .previous;
-#define BUILD_HANDLER(exception,handler,clear,verbose) \
- .align 5; \
- NESTED(handle_##exception, PT_SIZE, sp); \
- .set noat; \
- SAVE_ALL; \
- __BUILD_clear_##clear exception; \
- .set at; \
- __BUILD_##verbose(exception); \
- jal do_##handler; \
- move a0,sp; \
- j ret_from_sys_call; \
- nop; \
- END(handle_##exception)
-
- BUILD_HANDLER(adel,ade,ade,silent) /* #4 */
- BUILD_HANDLER(ades,ade,ade,silent) /* #5 */
- BUILD_HANDLER(ibe,ibe,cli,verbose) /* #6 */
- BUILD_HANDLER(dbe,dbe,cli,silent) /* #7 */
- BUILD_HANDLER(bp,bp,sti,silent) /* #9 */
- BUILD_HANDLER(ri,ri,sti,silent) /* #10 */
- BUILD_HANDLER(cpu,cpu,sti,silent) /* #11 */
- BUILD_HANDLER(ov,ov,sti,silent) /* #12 */
- BUILD_HANDLER(tr,tr,sti,silent) /* #13 */
- BUILD_HANDLER(fpe,fpe,fpe,silent) /* #15 */
- BUILD_HANDLER(watch,watch,sti,verbose) /* #23 */
- BUILD_HANDLER(reserved,reserved,sti,verbose) /* others */
+ .macro __BUILD_silent exception
+ .endm
+
+ /* Gas tries to parse the PRINT argument as a string containing
+ string escapes and emits bogus warnings if it believes to
+ recognize an unknown escape code. So make the arguments
+ start with an n and gas will believe \n is ok ... */
+ .macro __BUILD_verbose nexception
+ ld a1, PT_EPC(sp)
+ PRINT("Got \nexception at %016lx")
+ .endm
+
+ .macro __BUILD_count exception
+ .set reorder
+ ld t0,exception_count_\exception
+ daddiu t0, 1
+ sd t0,exception_count_\exception
+ .set noreorder
+ .comm exception_count\exception, 8, 8
+ .endm
+
+ .macro BUILD_HANDLER exception handler clear verbose
+ .align 5
+ NESTED(handle_\exception, PT_SIZE, sp)
+ .set noat
+ SAVE_ALL
+ __BUILD_clear_\clear
+ .set at
+ __BUILD_\verbose \exception
+ jal do_\handler
+ move a0,sp
+ j ret_from_sys_call
+ nop
+ END(handle_\exception)
+ .endm
+
+ BUILD_HANDLER adel ade ade silent /* #4 */
+ BUILD_HANDLER ades ade ade silent /* #5 */
+ BUILD_HANDLER ibe ibe cli verbose /* #6 */
+ BUILD_HANDLER dbe dbe cli silent /* #7 */
+ BUILD_HANDLER bp bp sti silent /* #9 */
+ BUILD_HANDLER ri ri sti silent /* #10 */
+ BUILD_HANDLER cpu cpu sti silent /* #11 */
+ BUILD_HANDLER ov ov sti silent /* #12 */
+ BUILD_HANDLER tr tr sti silent /* #13 */
+ BUILD_HANDLER fpe fpe fpe silent /* #15 */
+ BUILD_HANDLER watch watch sti verbose /* #23 */
+ BUILD_HANDLER reserved reserved sti verbose /* others */
diff --git a/arch/mips64/kernel/mips64_ksyms.c b/arch/mips64/kernel/mips64_ksyms.c
index fc422788e..355f34115 100644
--- a/arch/mips64/kernel/mips64_ksyms.c
+++ b/arch/mips64/kernel/mips64_ksyms.c
@@ -86,11 +86,6 @@ EXPORT_SYMBOL(dma_cache_inv);
EXPORT_SYMBOL(invalid_pte_table);
/*
- * Base address of ports for Intel style I/O.
- */
-EXPORT_SYMBOL(mips_io_port_base);
-
-/*
* Kernel hacking ...
*/
#include <asm/branch.h>
diff --git a/arch/mips64/kernel/proc.c b/arch/mips64/kernel/proc.c
new file mode 100644
index 000000000..6fba1b756
--- /dev/null
+++ b/arch/mips64/kernel/proc.c
@@ -0,0 +1,71 @@
+/* $Id: proc.c,v 1.1 1999/09/27 16:01:37 ralf Exp $
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 1996, 1999 Ralf Baechle
+ *
+ * XXX Rewrite this mess.
+ */
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <asm/bootinfo.h>
+#include <asm/mipsregs.h>
+#include <asm/processor.h>
+#include <asm/watch.h>
+
+unsigned long unaligned_instructions;
+unsigned int vced_count, vcei_count;
+
+/*
+ * BUFFER is PAGE_SIZE bytes long.
+ *
+ * Currently /proc/cpuinfo is being abused to print data about the
+ * number of date/instruction cacheflushes.
+ */
+int get_cpuinfo(char *buffer)
+{
+ char fmt [64];
+ size_t len;
+
+ len = sprintf(buffer, "cpu\t\t\t: MIPS\n");
+#if 0
+ len += sprintf(buffer + len, "cpu model\t\t: %s V%d.%d\n",
+ cpu_name[mips_cputype <= CPU_LAST ?
+ mips_cputype :
+ CPU_UNKNOWN],
+ (version >> 4) & 0x0f,
+ version & 0x0f);
+ len += sprintf(buffer + len, "system type\t\t: %s %s\n",
+ mach_group_names[mips_machgroup],
+ mach_group_to_name[mips_machgroup][mips_machtype]);
+#endif
+ len += sprintf(buffer + len, "BogoMIPS\t\t: %lu.%02lu\n",
+ (loops_per_sec + 2500) / 500000,
+ ((loops_per_sec + 2500) / 5000) % 100);
+#if defined (__MIPSEB__)
+ len += sprintf(buffer + len, "byteorder\t\t: big endian\n");
+#endif
+#if defined (__MIPSEL__)
+ len += sprintf(buffer + len, "byteorder\t\t: little endian\n");
+#endif
+ len += sprintf(buffer + len, "unaligned accesses\t: %lu\n",
+ unaligned_instructions);
+ len += sprintf(buffer + len, "wait instruction\t: %s\n",
+ wait_available ? "yes" : "no");
+ len += sprintf(buffer + len, "microsecond timers\t: %s\n",
+ cyclecounter_available ? "yes" : "no");
+ len += sprintf(buffer + len, "extra interrupt vector\t: %s\n",
+ dedicated_iv_available ? "yes" : "no");
+ len += sprintf(buffer + len, "hardware watchpoint\t: %s\n",
+ watch_available ? "yes" : "no");
+
+ sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
+ vce_available ? "%d" : "not available");
+ len += sprintf(buffer + len, fmt, 'D', vced_count);
+ len += sprintf(buffer + len, fmt, 'I', vcei_count);
+
+ return len;
+}
diff --git a/arch/mips64/kernel/process.c b/arch/mips64/kernel/process.c
new file mode 100644
index 000000000..8002c4f64
--- /dev/null
+++ b/arch/mips64/kernel/process.c
@@ -0,0 +1,167 @@
+/* $Id: process.c,v 1.1 1999/09/27 16:01:37 ralf Exp $
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 - 1999 by Ralf Baechle and others.
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ */
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/malloc.h>
+#include <linux/mman.h>
+#include <linux/sys.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+
+#include <asm/bootinfo.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/mipsregs.h>
+#include <asm/processor.h>
+#include <asm/stackframe.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/elf.h>
+
+struct task_struct *last_task_used_math = NULL;
+
+asmlinkage void ret_from_sys_call(void);
+
+void exit_thread(void)
+{
+ /* Forget lazy fpu state */
+ if (last_task_used_math == current) {
+ set_cp0_status(ST0_CU1, ST0_CU1);
+ __asm__ __volatile__("cfc1\t$0,$31");
+ last_task_used_math = NULL;
+ }
+}
+
+void flush_thread(void)
+{
+ /* Forget lazy fpu state */
+ if (last_task_used_math == current) {
+ set_cp0_status(ST0_CU1, ST0_CU1);
+ __asm__ __volatile__("cfc1\t$0,$31");
+ last_task_used_math = NULL;
+ }
+}
+
+int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
+ struct task_struct * p, struct pt_regs * regs)
+{
+ struct pt_regs * childregs;
+ long childksp;
+
+ childksp = (unsigned long)p + KERNEL_STACK_SIZE - 32;
+
+ if (last_task_used_math == current) {
+ set_cp0_status(ST0_CU1, ST0_CU1);
+ r4xx0_save_fp(p);
+ }
+ /* set up new TSS. */
+ childregs = (struct pt_regs *) childksp - 1;
+ *childregs = *regs;
+ childregs->regs[7] = 0; /* Clear error flag */
+ if(current->personality == PER_LINUX) {
+ childregs->regs[2] = 0; /* Child gets zero as return value */
+ regs->regs[2] = p->pid;
+ } else {
+ /* Under IRIX things are a little different. */
+ childregs->regs[2] = 0;
+ childregs->regs[3] = 1;
+ regs->regs[2] = p->pid;
+ regs->regs[3] = 0;
+ }
+ if (childregs->cp0_status & ST0_CU0) {
+ childregs->regs[28] = (unsigned long) p;
+ childregs->regs[29] = childksp;
+ p->tss.current_ds = KERNEL_DS;
+ } else {
+ childregs->regs[29] = usp;
+ p->tss.current_ds = USER_DS;
+ }
+ p->tss.reg29 = (unsigned long) childregs;
+ p->tss.reg31 = (unsigned long) ret_from_sys_call;
+
+ /*
+ * New tasks loose permission to use the fpu. This accelerates context
+ * switching for most programs since they don't use the fpu.
+ */
+ p->tss.cp0_status = read_32bit_cp0_register(CP0_STATUS) &
+ ~(ST0_CU3|ST0_CU2|ST0_CU1|ST0_KSU);
+ childregs->cp0_status &= ~(ST0_CU3|ST0_CU2|ST0_CU1);
+ p->mm->context = 0;
+
+ return 0;
+}
+
+/* Fill in the fpu structure for a core dump.. */
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
+{
+ /* We actually store the FPU info in the task->tss
+ * area.
+ */
+ if(regs->cp0_status & ST0_CU1) {
+ memcpy(r, &current->tss.fpu, sizeof(current->tss.fpu));
+ return 1;
+ }
+ return 0; /* Task didn't use the fpu at all. */
+}
+
+/* Fill in the user structure for a core dump.. */
+void dump_thread(struct pt_regs *regs, struct user *dump)
+{
+ dump->magic = CMAGIC;
+ dump->start_code = current->mm->start_code;
+ dump->start_data = current->mm->start_data;
+ dump->start_stack = regs->regs[29] & ~(PAGE_SIZE - 1);
+ dump->u_tsize = (current->mm->end_code - dump->start_code) >> PAGE_SHIFT;
+ dump->u_dsize = (current->mm->brk + (PAGE_SIZE - 1) - dump->start_data) >> PAGE_SHIFT;
+ dump->u_ssize =
+ (current->mm->start_stack - dump->start_stack + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ memcpy(&dump->regs[0], regs, sizeof(struct pt_regs));
+ memcpy(&dump->regs[EF_SIZE/4], &current->tss.fpu, sizeof(current->tss.fpu));
+}
+
+/*
+ * Create a kernel thread
+ */
+int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
+{
+ long retval;
+
+ __asm__ __volatile__(
+ ".set\tnoreorder\n\t"
+ "move\t$6, $sp\n\t"
+ "move\t$4, %5\n\t"
+ "li\t$2, %1\n\t"
+ "syscall\n\t"
+ "beq\t$6, $sp, 1f\n\t"
+ " dsubu\t$sp, 32\n\t"
+ "jalr\t%4\n\t"
+ " move\t$4, %3\n\t"
+ "move\t$4, $2\n\t"
+ "li\t$2, %2\n\t"
+ "syscall\n"
+ "1:\tdaddiu\t$sp, 32\n\t"
+ "move\t%0, $2\n\t"
+ ".set\treorder"
+ :"=r" (retval)
+ :"i" (__NR_clone), "i" (__NR_exit), "r" (arg), "r" (fn),
+ "r" (flags | CLONE_VM)
+
+ /* The called subroutine might have destroyed any of the
+ * at, result, argument or temporary registers ... */
+ :"$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8",
+ "$9","$10","$11","$12","$13","$14","$15","$24","$25");
+
+ return retval;
+}
diff --git a/arch/mips64/kernel/r4k_fpu.S b/arch/mips64/kernel/r4k_fpu.S
new file mode 100644
index 000000000..609b9fd0a
--- /dev/null
+++ b/arch/mips64/kernel/r4k_fpu.S
@@ -0,0 +1,149 @@
+/* $Id: r4k_fpu.S,v 1.1 1999/09/27 16:01:38 ralf Exp $
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Save/restore floating point context for signal handlers.
+ *
+ * Copyright (C) 1996, 1998, 1999 by Ralf Baechle
+ *
+ * Multi-arch abstraction and asm macros for easier reading:
+ * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
+ *
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ */
+#include <asm/asm.h>
+#include <asm/errno.h>
+#include <asm/fpregdef.h>
+#include <asm/mipsregs.h>
+#include <asm/offset.h>
+#include <asm/regdef.h>
+
+ .macro EX insn, reg, src
+ .set push
+ .set nomacro
+.ex\@: \insn \reg, \src
+ .set pop
+ .section __ex_table,"a"
+ PTR .ex\@, fault
+ .previous
+ .endm
+
+ .set noreorder
+ /* Save floating point context */
+LEAF(save_fp_context)
+ mfc0 t1,CP0_STATUS
+ sll t2,t1,5
+
+ bgez t2,1f
+ cfc1 t1,fcr31
+ /* Store the 16 odd double precision registers */
+ EX sdc1 $f1, SC_FPREGS+8(a0)
+ EX sdc1 $f3, SC_FPREGS+24(a0)
+ EX sdc1 $f5, SC_FPREGS+40(a0)
+ EX sdc1 $f7, SC_FPREGS+56(a0)
+ EX sdc1 $f9, SC_FPREGS+72(a0)
+ EX sdc1 $f11, SC_FPREGS+88(a0)
+ EX sdc1 $f13, SC_FPREGS+104(a0)
+ EX sdc1 $f15, SC_FPREGS+120(a0)
+ EX sdc1 $f17, SC_FPREGS+136(a0)
+ EX sdc1 $f19, SC_FPREGS+152(a0)
+ EX sdc1 $f21, SC_FPREGS+168(a0)
+ EX sdc1 $f23, SC_FPREGS+184(a0)
+ EX sdc1 $f25, SC_FPREGS+200(a0)
+ EX sdc1 $f27, SC_FPREGS+216(a0)
+ EX sdc1 $f29, SC_FPREGS+232(a0)
+ EX sdc1 $f31, SC_FPREGS+248(a0)
+
+ /* Store the 16 even double precision registers */
+1:
+ EX sdc1 $f0, SC_FPREGS+0(a0)
+ EX sdc1 $f2, SC_FPREGS+16(a0)
+ EX sdc1 $f4, SC_FPREGS+32(a0)
+ EX sdc1 $f6, SC_FPREGS+48(a0)
+ EX sdc1 $f8, SC_FPREGS+64(a0)
+ EX sdc1 $f10, SC_FPREGS+80(a0)
+ EX sdc1 $f12, SC_FPREGS+96(a0)
+ EX sdc1 $f14, SC_FPREGS+112(a0)
+ EX sdc1 $f16, SC_FPREGS+128(a0)
+ EX sdc1 $f18, SC_FPREGS+144(a0)
+ EX sdc1 $f20, SC_FPREGS+160(a0)
+ EX sdc1 $f22, SC_FPREGS+176(a0)
+ EX sdc1 $f24, SC_FPREGS+192(a0)
+ EX sdc1 $f26, SC_FPREGS+208(a0)
+ EX sdc1 $f28, SC_FPREGS+224(a0)
+ EX sdc1 $f30, SC_FPREGS+240(a0)
+ EX sw t1, SC_FPC_CSR(a0)
+ cfc1 t0,$0 # implementation/version
+ EX sw t0,SC_FPC_EIR(a0)
+
+ jr ra
+ li v0, 0 # success
+ END(save_fp_context)
+
+/*
+ * Restore FPU state:
+ * - fp gp registers
+ * - cp1 status/control register
+ *
+ * We base the decision which registers to restore from the signal stack
+ * frame on the current content of c0_status, not on the content of the
+ * stack frame which might have been changed by the user.
+ */
+LEAF(restore_fp_context)
+ mfc0 t1, CP0_STATUS
+ sll t0,t1,5
+ bgez t0,1f
+ EX lw t0, SC_FPC_CSR(a0)
+
+ /* Restore the 16 odd double precision registers only
+ * when enabled in the cp0 status register.
+ */
+ EX ldc1 $f1, SC_FPREGS+8(a0)
+ EX ldc1 $f3, SC_FPREGS+24(a0)
+ EX ldc1 $f5, SC_FPREGS+40(a0)
+ EX ldc1 $f7, SC_FPREGS+56(a0)
+ EX ldc1 $f9, SC_FPREGS+72(a0)
+ EX ldc1 $f11, SC_FPREGS+88(a0)
+ EX ldc1 $f13, SC_FPREGS+104(a0)
+ EX ldc1 $f15, SC_FPREGS+120(a0)
+ EX ldc1 $f17, SC_FPREGS+136(a0)
+ EX ldc1 $f19, SC_FPREGS+152(a0)
+ EX ldc1 $f21, SC_FPREGS+168(a0)
+ EX ldc1 $f23, SC_FPREGS+184(a0)
+ EX ldc1 $f25, SC_FPREGS+200(a0)
+ EX ldc1 $f27, SC_FPREGS+216(a0)
+ EX ldc1 $f29, SC_FPREGS+232(a0)
+ EX ldc1 $f31, SC_FPREGS+248(a0)
+
+ /*
+ * Restore the 16 even double precision registers
+ * when cp1 was enabled in the cp0 status register.
+ */
+1: EX ldc1 $f0, SC_FPREGS+0(a0)
+ EX ldc1 $f2, SC_FPREGS+16(a0)
+ EX ldc1 $f4, SC_FPREGS+32(a0)
+ EX ldc1 $f6, SC_FPREGS+48(a0)
+ EX ldc1 $f8, SC_FPREGS+64(a0)
+ EX ldc1 $f10, SC_FPREGS+80(a0)
+ EX ldc1 $f12, SC_FPREGS+96(a0)
+ EX ldc1 $f14, SC_FPREGS+112(a0)
+ EX ldc1 $f16, SC_FPREGS+128(a0)
+ EX ldc1 $f18, SC_FPREGS+144(a0)
+ EX ldc1 $f20, SC_FPREGS+160(a0)
+ EX ldc1 $f22, SC_FPREGS+176(a0)
+ EX ldc1 $f24, SC_FPREGS+192(a0)
+ EX ldc1 $f26, SC_FPREGS+208(a0)
+ EX ldc1 $f28, SC_FPREGS+224(a0)
+ EX ldc1 $f30, SC_FPREGS+240(a0)
+ ctc1 t0,fcr31
+ jr ra
+ li v0, 0 # success
+ END(restore_fp_context)
+
+ .type fault@function
+ .ent fault
+fault: li v0, -EFAULT
+ jr ra
+ .end fault
diff --git a/arch/mips64/kernel/setup.c b/arch/mips64/kernel/setup.c
index dc5b4e902..f7904551e 100644
--- a/arch/mips64/kernel/setup.c
+++ b/arch/mips64/kernel/setup.c
@@ -37,6 +37,10 @@
struct mips_cpuinfo boot_cpu_data;
+#ifdef CONFIG_VT
+struct screen_info screen_info;
+#endif
+
/*
* Not all of the MIPS CPUs have the "wait" instruction available. This
* is set to true if it is available. The wait instruction stops the
diff --git a/arch/mips64/kernel/signal.c b/arch/mips64/kernel/signal.c
new file mode 100644
index 000000000..e7950553d
--- /dev/null
+++ b/arch/mips64/kernel/signal.c
@@ -0,0 +1,654 @@
+/* $Id: signal.c,v 1.1 1999/09/27 16:01:38 ralf Exp $
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1994 - 1999 Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ */
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+
+#include <asm/asm.h>
+#include <asm/bitops.h>
+#include <asm/pgtable.h>
+#include <asm/stackframe.h>
+#include <asm/uaccess.h>
+#include <asm/ucontext.h>
+
+#define DEBUG_SIG 0
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+extern asmlinkage int sys_wait4(pid_t pid, unsigned long *stat_addr,
+ int options, unsigned long *ru);
+extern asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs);
+extern asmlinkage int (*save_fp_context)(struct sigcontext *sc);
+extern asmlinkage int (*restore_fp_context)(struct sigcontext *sc);
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+asmlinkage inline int
+sys_sigsuspend(struct pt_regs regs)
+{
+ sigset_t *uset, saveset, newset;
+
+ save_static(&regs);
+ uset = (sigset_t *) regs.regs[4];
+ if (copy_from_user(&newset, uset, sizeof(sigset_t)))
+ return -EFAULT;
+ sigdelsetmask(&newset, ~_BLOCKABLE);
+
+ spin_lock_irq(&current->sigmask_lock);
+ saveset = current->blocked;
+ current->blocked = newset;
+ recalc_sigpending(current);
+ spin_unlock_irq(&current->sigmask_lock);
+
+ regs.regs[2] = EINTR;
+ regs.regs[7] = 1;
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(&saveset, &regs))
+ return -EINTR;
+ }
+}
+
+asmlinkage int
+sys_rt_sigsuspend(struct pt_regs regs)
+{
+ sigset_t *unewset, saveset, newset;
+ size_t sigsetsize;
+
+ save_static(&regs);
+
+ /* XXX Don't preclude handling different sized sigset_t's. */
+ sigsetsize = regs.regs[5];
+ if (sigsetsize != sizeof(sigset_t))
+ return -EINVAL;
+
+ unewset = (sigset_t *) regs.regs[4];
+ if (copy_from_user(&newset, unewset, sizeof(newset)))
+ return -EFAULT;
+ sigdelsetmask(&newset, ~_BLOCKABLE);
+
+ spin_lock_irq(&current->sigmask_lock);
+ saveset = current->blocked;
+ current->blocked = newset;
+ recalc_sigpending(current);
+ spin_unlock_irq(&current->sigmask_lock);
+
+ regs.regs[2] = EINTR;
+ regs.regs[7] = 1;
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(&saveset, &regs))
+ return -EINTR;
+ }
+}
+
+asmlinkage int
+sys_sigaction(int sig, const struct sigaction *act, struct sigaction *oact)
+{
+ struct k_sigaction new_ka, old_ka;
+ int ret;
+ int err = 0;
+
+ if (act) {
+ old_sigset_t mask;
+
+ if (!access_ok(VERIFY_READ, act, sizeof(*act)))
+ return -EFAULT;
+ err |= __get_user(new_ka.sa.sa_handler, &act->sa_handler);
+ err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+ err |= __get_user(mask, &act->sa_mask.sig[0]);
+ err |= __get_user(new_ka.sa.sa_restorer, &act->sa_restorer);
+ if (err)
+ return -EFAULT;
+
+ siginitset(&new_ka.sa.sa_mask, mask);
+ }
+
+ ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+ if (!ret && oact) {
+ if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
+ return -EFAULT;
+ err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+ err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler);
+ err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig);
+ err |= __put_user(0, &oact->sa_mask.sig[1]);
+ err |= __put_user(0, &oact->sa_mask.sig[2]);
+ err |= __put_user(0, &oact->sa_mask.sig[3]);
+ err |= __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer);
+ if (err)
+ return -EFAULT;
+ }
+
+ return ret;
+}
+
+asmlinkage int
+sys_sigaltstack(struct pt_regs regs)
+{
+ const stack_t *uss = (const stack_t *) regs.regs[4];
+ stack_t *uoss = (stack_t *) regs.regs[5];
+ unsigned long usp = regs.regs[29];
+
+ return do_sigaltstack(uss, uoss, usp);
+}
+
+asmlinkage int
+restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
+{
+ int owned_fp;
+ int err = 0;
+
+ err |= __get_user(regs->cp0_epc, &sc->sc_pc);
+ err |= __get_user(regs->hi, &sc->sc_mdhi);
+ err |= __get_user(regs->lo, &sc->sc_mdlo);
+
+#define restore_gp_reg(i) do { \
+ err |= __get_user(regs->regs[i], &sc->sc_regs[i]); \
+} while(0)
+ restore_gp_reg( 1); restore_gp_reg( 2); restore_gp_reg( 3);
+ restore_gp_reg( 4); restore_gp_reg( 5); restore_gp_reg( 6);
+ restore_gp_reg( 7); restore_gp_reg( 8); restore_gp_reg( 9);
+ restore_gp_reg(10); restore_gp_reg(11); restore_gp_reg(12);
+ restore_gp_reg(13); restore_gp_reg(14); restore_gp_reg(15);
+ restore_gp_reg(16); restore_gp_reg(17); restore_gp_reg(18);
+ restore_gp_reg(19); restore_gp_reg(20); restore_gp_reg(21);
+ restore_gp_reg(22); restore_gp_reg(23); restore_gp_reg(24);
+ restore_gp_reg(25); restore_gp_reg(26); restore_gp_reg(27);
+ restore_gp_reg(28); restore_gp_reg(29); restore_gp_reg(30);
+ restore_gp_reg(31);
+#undef restore_gp_reg
+
+ err |= __get_user(owned_fp, &sc->sc_ownedfp);
+ if (owned_fp) {
+ err |= restore_fp_context(sc);
+ last_task_used_math = current;
+ }
+
+ return err;
+}
+
+struct sigframe {
+ u32 sf_ass[4]; /* argument save space for o32 */
+ u32 sf_code[2]; /* signal trampoline */
+ struct sigcontext sf_sc;
+ sigset_t sf_mask;
+};
+
+struct rt_sigframe {
+ u32 rs_ass[4]; /* argument save space for o32 */
+ u32 rs_code[2]; /* signal trampoline */
+ struct siginfo rs_info;
+ struct ucontext rs_uc;
+};
+
+asmlinkage void
+sys_sigreturn(struct pt_regs regs)
+{
+ struct sigframe *frame;
+ sigset_t blocked;
+
+ frame = (struct sigframe *) regs.regs[29];
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked)))
+ goto badframe;
+
+ sigdelsetmask(&blocked, ~_BLOCKABLE);
+ spin_lock_irq(&current->sigmask_lock);
+ current->blocked = blocked;
+ recalc_sigpending(current);
+ spin_unlock_irq(&current->sigmask_lock);
+
+ if (restore_sigcontext(&regs, &frame->sf_sc))
+ goto badframe;
+
+ /*
+ * Don't let your children do this ...
+ */
+ __asm__ __volatile__(
+ "move\t$29, %0\n\t"
+ "j\tret_from_sys_call"
+ :/* no outputs */
+ :"r" (&regs));
+ /* Unreached */
+
+badframe:
+ force_sig(SIGSEGV, current);
+}
+
+asmlinkage void
+sys_rt_sigreturn(struct pt_regs regs)
+{
+ struct rt_sigframe *frame;
+ sigset_t set;
+ stack_t st;
+
+ frame = (struct rt_sigframe *) regs.regs[29];
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sigmask_lock);
+ current->blocked = set;
+ recalc_sigpending(current);
+ spin_unlock_irq(&current->sigmask_lock);
+
+ if (restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext))
+ goto badframe;
+
+ if (__copy_from_user(&st, &frame->rs_uc.uc_stack, sizeof(st)))
+ goto badframe;
+ /* It is more difficult to avoid calling this function than to
+ call it and ignore errors. */
+ do_sigaltstack(&st, NULL, regs.regs[29]);
+
+ /*
+ * Don't let your children do this ...
+ */
+ __asm__ __volatile__(
+ "move\t$29, %0\n\t"
+ "j\tret_from_sys_call"
+ :/* no outputs */
+ :"r" (&regs));
+ /* Unreached */
+
+badframe:
+ force_sig(SIGSEGV, current);
+}
+
+static int inline
+setup_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
+{
+ int owned_fp;
+ int err = 0;
+
+ err |= __put_user(regs->cp0_epc, &sc->sc_pc);
+ err |= __put_user(regs->cp0_status, &sc->sc_status);
+
+#define save_gp_reg(i) { \
+ err |= __put_user(regs->regs[i], &sc->sc_regs[i]); \
+} while(0)
+ __put_user(0, &sc->sc_regs[0]); save_gp_reg(1); save_gp_reg(2);
+ save_gp_reg(3); save_gp_reg(4); save_gp_reg(5); save_gp_reg(6);
+ save_gp_reg(7); save_gp_reg(8); save_gp_reg(9); save_gp_reg(10);
+ save_gp_reg(11); save_gp_reg(12); save_gp_reg(13); save_gp_reg(14);
+ save_gp_reg(15); save_gp_reg(16); save_gp_reg(17); save_gp_reg(18);
+ save_gp_reg(19); save_gp_reg(20); save_gp_reg(21); save_gp_reg(22);
+ save_gp_reg(23); save_gp_reg(24); save_gp_reg(25); save_gp_reg(26);
+ save_gp_reg(27); save_gp_reg(28); save_gp_reg(29); save_gp_reg(30);
+ save_gp_reg(31);
+#undef save_gp_reg
+
+ err |= __put_user(regs->hi, &sc->sc_mdhi);
+ err |= __put_user(regs->lo, &sc->sc_mdlo);
+ err |= __put_user(regs->cp0_cause, &sc->sc_cause);
+ err |= __put_user(regs->cp0_badvaddr, &sc->sc_badvaddr);
+
+ owned_fp = (current == last_task_used_math);
+ err |= __put_user(owned_fp, &sc->sc_ownedfp);
+
+ if (current->used_math) { /* fp is active. */
+ set_cp0_status(ST0_CU1, ST0_CU1);
+ err |= save_fp_context(sc);
+ last_task_used_math = NULL;
+ regs->cp0_status &= ~ST0_CU1;
+ current->used_math = 0;
+ }
+
+ return err;
+}
+
+/*
+ * Determine which stack to use..
+ */
+static inline void *
+get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
+{
+ unsigned long sp;
+
+ /* Default to using normal stack */
+ sp = regs->regs[29];
+
+ /* This is the X/Open sanctioned signal stack switching. */
+ if ((ka->sa.sa_flags & SA_ONSTACK) && ! on_sig_stack(sp))
+ sp = current->sas_ss_sp + current->sas_ss_size;
+
+ return (void *)((sp - frame_size) & ALMASK);
+}
+
+static void inline
+setup_frame(struct k_sigaction * ka, struct pt_regs *regs,
+ int signr, sigset_t *set)
+{
+ struct sigframe *frame;
+ int err = 0;
+
+ frame = get_sigframe(ka, regs, sizeof(*frame));
+ if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
+ goto give_sigsegv;
+
+ /* Set up to return from userspace. If provided, use a stub already
+ in userspace. */
+ if (ka->sa.sa_flags & SA_RESTORER)
+ regs->regs[31] = (unsigned long) ka->sa.sa_restorer;
+ else {
+ /*
+ * Set up the return code ...
+ *
+ * li v0, __NR_sigreturn
+ * syscall
+ */
+ err |= __put_user(0x24020000 + __NR_sigreturn,
+ frame->sf_code + 0);
+ err |= __put_user(0x0000000c ,
+ frame->sf_code + 1);
+ flush_cache_sigtramp((unsigned long) frame->sf_code);
+ }
+
+ err |= setup_sigcontext(regs, &frame->sf_sc);
+ err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set));
+ if (err)
+ goto give_sigsegv;
+
+ /*
+ * Arguments to signal handler:
+ *
+ * a0 = signal number
+ * a1 = 0 (should be cause)
+ * a2 = pointer to struct sigcontext
+ *
+ * $25 and c0_epc point to the signal handler, $29 points to the
+ * struct sigframe.
+ */
+ regs->regs[ 4] = signr;
+ regs->regs[ 5] = 0;
+ regs->regs[ 6] = (unsigned long) &frame->sf_sc;
+ regs->regs[29] = (unsigned long) frame;
+ regs->regs[31] = (unsigned long) frame->sf_code;
+ regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
+
+#if DEBUG_SIG
+ printk("SIG deliver (%s:%d): sp=0x%p pc=0x%p ra=0x%p\n",
+ current->comm, current->pid, frame, regs->cp0_epc, frame->code);
+#endif
+ return;
+
+give_sigsegv:
+ if (signr == SIGSEGV)
+ ka->sa.sa_handler = SIG_DFL;
+ force_sig(SIGSEGV, current);
+}
+
+static void inline
+setup_rt_frame(struct k_sigaction * ka, struct pt_regs *regs,
+ int signr, sigset_t *set, siginfo_t *info)
+{
+ struct rt_sigframe *frame;
+ int err = 0;
+
+ frame = get_sigframe(ka, regs, sizeof(*frame));
+ if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
+ goto give_sigsegv;
+
+ /* Set up to return from userspace. If provided, use a stub already
+ in userspace. */
+ if (ka->sa.sa_flags & SA_RESTORER)
+ regs->regs[31] = (unsigned long) ka->sa.sa_restorer;
+ else {
+ /*
+ * Set up the return code ...
+ *
+ * li v0, __NR_sigreturn
+ * syscall
+ */
+ err |= __put_user(0x24020000 + __NR_sigreturn,
+ frame->rs_code + 0);
+ err |= __put_user(0x0000000c ,
+ frame->rs_code + 1);
+ flush_cache_sigtramp((unsigned long) frame->rs_code);
+ }
+
+ /* Create siginfo. */
+ err |= __copy_to_user(&frame->rs_info, info, sizeof(*info));
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->rs_uc.uc_flags);
+ err |= __put_user(0, &frame->rs_uc.uc_link);
+ err |= __put_user((void *)current->sas_ss_sp,
+ &frame->rs_uc.uc_stack.ss_sp);
+ err |= __put_user(sas_ss_flags(regs->regs[29]),
+ &frame->rs_uc.uc_stack.ss_flags);
+ err |= __put_user(current->sas_ss_size,
+ &frame->rs_uc.uc_stack.ss_size);
+ err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext);
+ err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set));
+
+ if (err)
+ goto give_sigsegv;
+
+ /*
+ * Arguments to signal handler:
+ *
+ * a0 = signal number
+ * a1 = 0 (should be cause)
+ * a2 = pointer to ucontext
+ *
+ * $25 and c0_epc point to the signal handler, $29 points to
+ * the struct rt_sigframe.
+ */
+ regs->regs[ 4] = signr;
+ regs->regs[ 5] = (unsigned long) &frame->rs_info;
+ regs->regs[ 6] = (unsigned long) &frame->rs_uc;
+ regs->regs[29] = (unsigned long) frame;
+ regs->regs[31] = (unsigned long) frame->rs_code;
+ regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
+
+#if DEBUG_SIG
+ printk("SIG deliver (%s:%d): sp=0x%p pc=0x%p ra=0x%p\n",
+ current->comm, current->pid, frame, regs->cp0_epc, frame->code);
+#endif
+ return;
+
+give_sigsegv:
+ if (signr == SIGSEGV)
+ ka->sa.sa_handler = SIG_DFL;
+ force_sig(SIGSEGV, current);
+}
+
+static inline void
+handle_signal(unsigned long sig, struct k_sigaction *ka,
+ siginfo_t *info, sigset_t *oldset, struct pt_regs * regs)
+{
+ if (ka->sa.sa_flags & SA_SIGINFO)
+ setup_rt_frame(ka, regs, sig, oldset, info);
+ else
+ setup_frame(ka, regs, sig, oldset);
+
+ if (ka->sa.sa_flags & SA_ONESHOT)
+ ka->sa.sa_handler = SIG_DFL;
+ if (!(ka->sa.sa_flags & SA_NODEFER)) {
+ spin_lock_irq(&current->sigmask_lock);
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ sigaddset(&current->blocked,sig);
+ recalc_sigpending(current);
+ spin_unlock_irq(&current->sigmask_lock);
+ }
+}
+
+static inline void
+syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
+{
+ switch(regs->regs[0]) {
+ case ERESTARTNOHAND:
+ regs->regs[2] = EINTR;
+ break;
+ case ERESTARTSYS:
+ if(!(ka->sa.sa_flags & SA_RESTART)) {
+ regs->regs[2] = EINTR;
+ break;
+ }
+ /* fallthrough */
+ case ERESTARTNOINTR: /* Userland will reload $v0. */
+ regs->regs[7] = regs->regs[26];
+ regs->cp0_epc -= 8;
+ }
+
+ regs->regs[0] = 0; /* Don't deal with this again. */
+}
+
+extern int do_irix_signal(sigset_t *oldset, struct pt_regs *regs);
+
+asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs)
+{
+ struct k_sigaction *ka;
+ siginfo_t info;
+
+#ifdef CONFIG_BINFMT_IRIX
+ if (current->personality != PER_LINUX)
+ return do_irix_signal(oldset, regs);
+#endif
+
+ if (!oldset)
+ oldset = &current->blocked;
+
+ for (;;) {
+ unsigned long signr;
+
+ spin_lock_irq(&current->sigmask_lock);
+ signr = dequeue_signal(&current->blocked, &info);
+ spin_unlock_irq(&current->sigmask_lock);
+
+ if (!signr)
+ break;
+
+ if ((current->flags & PF_PTRACED) && signr != SIGKILL) {
+ /* Let the debugger run. */
+ current->exit_code = signr;
+ current->state = TASK_STOPPED;
+ notify_parent(current, SIGCHLD);
+ schedule();
+
+ /* We're back. Did the debugger cancel the sig? */
+ if (!(signr = current->exit_code))
+ continue;
+ current->exit_code = 0;
+
+ /* The debugger continued. Ignore SIGSTOP. */
+ if (signr == SIGSTOP)
+ continue;
+
+ /* Update the siginfo structure. Is this good? */
+ if (signr != info.si_signo) {
+ info.si_signo = signr;
+ info.si_errno = 0;
+ info.si_code = SI_USER;
+ info.si_pid = current->p_pptr->pid;
+ info.si_uid = current->p_pptr->uid;
+ }
+
+ /* If the (new) signal is now blocked, requeue it. */
+ if (sigismember(&current->blocked, signr)) {
+ send_sig_info(signr, &info, current);
+ continue;
+ }
+ }
+
+ ka = &current->sig->action[signr-1];
+ if (ka->sa.sa_handler == SIG_IGN) {
+ if (signr != SIGCHLD)
+ continue;
+ /* Check for SIGCHLD: it's special. */
+ while (sys_wait4(-1, NULL, WNOHANG, NULL) > 0)
+ /* nothing */;
+ continue;
+ }
+
+ if (ka->sa.sa_handler == SIG_DFL) {
+ int exit_code = signr;
+
+ /* Init gets no signals it doesn't want. */
+ if (current->pid == 1)
+ continue;
+
+ switch (signr) {
+ case SIGCONT: case SIGCHLD: case SIGWINCH:
+ continue;
+
+ case SIGTSTP: case SIGTTIN: case SIGTTOU:
+ if (is_orphaned_pgrp(current->pgrp))
+ continue;
+ /* FALLTHRU */
+
+ case SIGSTOP:
+ current->state = TASK_STOPPED;
+ current->exit_code = signr;
+ if (!(current->p_pptr->sig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
+ notify_parent(current, SIGCHLD);
+ schedule();
+ continue;
+
+ case SIGQUIT: case SIGILL: case SIGTRAP:
+ case SIGABRT: case SIGFPE: case SIGSEGV:
+ case SIGBUS:
+ lock_kernel();
+ if (current->binfmt
+ && current->binfmt->core_dump
+ && current->binfmt->core_dump(signr, regs))
+ exit_code |= 0x80;
+ unlock_kernel();
+ /* FALLTHRU */
+
+ default:
+ lock_kernel();
+ sigaddset(&current->signal, signr);
+ recalc_sigpending(current);
+ current->flags |= PF_SIGNALED;
+ do_exit(exit_code);
+ /* NOTREACHED */
+ }
+ }
+
+ if (regs->regs[0])
+ syscall_restart(regs, ka);
+ /* Whee! Actually deliver the signal. */
+ handle_signal(signr, ka, &info, oldset, regs);
+ return 1;
+ }
+
+ /*
+ * Who's code doesn't conform to the restartable syscall convention
+ * dies here!!! The li instruction, a single machine instruction,
+ * must directly be followed by the syscall instruction.
+ */
+ if (regs->regs[0]) {
+ if (regs->regs[2] == ERESTARTNOHAND ||
+ regs->regs[2] == ERESTARTSYS ||
+ regs->regs[2] == ERESTARTNOINTR) {
+ regs->regs[7] = regs->regs[26];
+ regs->cp0_epc -= 8;
+ }
+ }
+ return 0;
+}
diff --git a/arch/mips64/kernel/traps.c b/arch/mips64/kernel/traps.c
new file mode 100644
index 000000000..ea700e5ef
--- /dev/null
+++ b/arch/mips64/kernel/traps.c
@@ -0,0 +1,562 @@
+/* $Id: traps.c,v 1.1 1999/09/27 16:01:38 ralf Exp $
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 - 1999 by Ralf Baechle
+ * Copyright (C) 1995, 1996 Paul M. Antoine
+ * Copyright (C) 1998 Ulf Carlsson
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ */
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+
+#include <asm/branch.h>
+#include <asm/cachectl.h>
+#include <asm/pgtable.h>
+#include <asm/io.h>
+#include <asm/bootinfo.h>
+#include <asm/watch.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+static inline void console_verbose(void)
+{
+ extern int console_loglevel;
+ console_loglevel = 15;
+}
+
+extern asmlinkage void r4k_handle_mod(void);
+extern asmlinkage void r4k_handle_tlbl(void);
+extern asmlinkage void r4k_handle_tlbs(void);
+extern asmlinkage void handle_adel(void);
+extern asmlinkage void handle_ades(void);
+extern asmlinkage void handle_ibe(void);
+extern asmlinkage void handle_dbe(void);
+extern asmlinkage void handle_sys(void);
+extern asmlinkage void handle_bp(void);
+extern asmlinkage void handle_ri(void);
+extern asmlinkage void handle_cpu(void);
+extern asmlinkage void handle_ov(void);
+extern asmlinkage void handle_tr(void);
+extern asmlinkage void handle_fpe(void);
+extern asmlinkage void handle_watch(void);
+extern asmlinkage void handle_reserved(void);
+
+static char *cpu_names[] = CPU_NAMES;
+
+char watch_available = 0;
+char dedicated_iv_available = 0;
+char vce_available = 0;
+
+void (*ibe_board_handler)(struct pt_regs *regs);
+void (*dbe_board_handler)(struct pt_regs *regs);
+
+int kstack_depth_to_print = 24;
+
+/*
+ * These constant is for searching for possible module text segments.
+ * MODULE_RANGE is a guess of how much space is likely to be vmalloced.
+ */
+#define MODULE_RANGE (8*1024*1024)
+
+/*
+ * This routine abuses get_user()/put_user() to reference pointers
+ * with at least a bit of error checking ...
+ */
+void show_stack(unsigned int *sp)
+{
+ int i;
+ unsigned int *stack;
+
+ stack = sp;
+ i = 0;
+
+ printk("Stack:");
+ while ((unsigned long) stack & (PAGE_SIZE - 1)) {
+ unsigned long stackdata;
+
+ if (__get_user(stackdata, stack++)) {
+ printk(" (Bad stack address)");
+ break;
+ }
+
+ printk(" %08lx", stackdata);
+
+ if (++i > 40) {
+ printk(" ...");
+ break;
+ }
+
+ if (i % 8 == 0)
+ printk("\n ");
+ }
+}
+
+void show_trace(unsigned int *sp)
+{
+ int i;
+ unsigned int *stack;
+ unsigned long kernel_start, kernel_end;
+ unsigned long module_start, module_end;
+ extern char _stext, _etext;
+
+ stack = sp;
+ i = 0;
+
+ kernel_start = (unsigned long) &_stext;
+ kernel_end = (unsigned long) &_etext;
+ module_start = VMALLOC_START;
+ module_end = module_start + MODULE_RANGE;
+
+ printk("\nCall Trace:");
+
+ while ((unsigned long) stack & (PAGE_SIZE -1)) {
+ unsigned long addr;
+
+ if (__get_user(addr, stack++)) {
+ printk(" (Bad stack address)\n");
+ break;
+ }
+
+ /*
+ * If the address is either in the text segment of the
+ * kernel, or in the region which contains vmalloc'ed
+ * memory, it *may* be the address of a calling
+ * routine; if so, print it so that someone tracing
+ * down the cause of the crash will be able to figure
+ * out the call path that was taken.
+ */
+
+ if ((addr >= kernel_start && addr < kernel_end) ||
+ (addr >= module_start && addr < module_end)) {
+
+ printk(" [<%08lx>]", addr);
+ if (++i > 40) {
+ printk(" ...");
+ break;
+ }
+ }
+ }
+}
+
+void show_code(unsigned int *pc)
+{
+ long i;
+
+ printk("\nCode:");
+
+ for(i = -3 ; i < 6 ; i++) {
+ unsigned long insn;
+ if (__get_user(insn, pc + i)) {
+ printk(" (Bad address in epc)\n");
+ break;
+ }
+ printk("%c%08lx%c",(i?' ':'<'),insn,(i?' ':'>'));
+ }
+}
+
+void die(const char * str, struct pt_regs * regs, unsigned long err)
+{
+ if (user_mode(regs)) /* Just return if in user mode. */
+ return;
+
+ console_verbose();
+ printk("%s: %04lx\n", str, err & 0xffff);
+ show_regs(regs);
+ printk("Process %s (pid: %ld, stackpage=%08lx)\n",
+ current->comm, current->pid, (unsigned long) current);
+ show_stack((unsigned int *) regs->regs[29]);
+ show_trace((unsigned int *) regs->regs[29]);
+ show_code((unsigned int *) regs->cp0_epc);
+ printk("\n");
+ do_exit(SIGSEGV);
+}
+
+void die_if_kernel(const char * str, struct pt_regs * regs, unsigned long err)
+{
+ if (!user_mode(regs))
+ die(str, regs, err);
+}
+
+static void default_be_board_handler(struct pt_regs *regs)
+{
+ /*
+ * Assume it would be too dangerous to continue ...
+ */
+ force_sig(SIGBUS, current);
+}
+
+void do_ibe(struct pt_regs *regs)
+{
+show_regs(regs); while(1);
+ ibe_board_handler(regs);
+}
+
+void do_dbe(struct pt_regs *regs)
+{
+show_regs(regs); while(1);
+ dbe_board_handler(regs);
+}
+
+void do_ov(struct pt_regs *regs)
+{
+ if (compute_return_epc(regs))
+ return;
+ force_sig(SIGFPE, current);
+}
+
+#ifdef CONFIG_MIPS_FPE_MODULE
+static void (*fpe_handler)(struct pt_regs *regs, unsigned int fcr31);
+
+/*
+ * Register_fpe/unregister_fpe are for debugging purposes only. To make
+ * this hack work a bit better there is no error checking.
+ */
+int register_fpe(void (*handler)(struct pt_regs *regs, unsigned int fcr31))
+{
+ fpe_handler = handler;
+ return 0;
+}
+
+int unregister_fpe(void (*handler)(struct pt_regs *regs, unsigned int fcr31))
+{
+ fpe_handler = NULL;
+ return 0;
+}
+#endif
+
+/*
+ * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
+ */
+void do_fpe(struct pt_regs *regs, unsigned long fcr31)
+{
+ unsigned long pc;
+ unsigned int insn;
+
+#ifdef CONFIG_MIPS_FPE_MODULE
+ if (fpe_handler != NULL) {
+ fpe_handler(regs, fcr31);
+ return;
+ }
+#endif
+ lock_kernel();
+ if (fcr31 & 0x20000) {
+ /* Retry instruction with flush to zero ... */
+ if (!(fcr31 & (1<<24))) {
+ printk("Setting flush to zero for %s.\n",
+ current->comm);
+ fcr31 &= ~0x20000;
+ fcr31 |= (1<<24);
+ __asm__ __volatile__(
+ "ctc1\t%0,$31"
+ : /* No outputs */
+ : "r" (fcr31));
+ goto out;
+ }
+ pc = regs->cp0_epc + ((regs->cp0_cause & CAUSEF_BD) ? 4 : 0);
+ if (get_user(insn, (unsigned int *)pc)) {
+ /* XXX Can this happen? */
+ force_sig(SIGSEGV, current);
+ }
+
+ printk(KERN_DEBUG "Unimplemented exception for insn %08x at 0x%08lx in %s.\n",
+ insn, regs->cp0_epc, current->comm);
+ simfp(insn);
+ }
+
+ if (compute_return_epc(regs))
+ goto out;
+ //force_sig(SIGFPE, current);
+ printk(KERN_DEBUG "Should send SIGFPE to %s\n", current->comm);
+
+out:
+ unlock_kernel();
+}
+
+static inline int get_insn_opcode(struct pt_regs *regs, unsigned int *opcode)
+{
+ unsigned int *epc;
+
+ epc = (unsigned int *) (unsigned long) regs->cp0_epc;
+ if (regs->cp0_cause & CAUSEF_BD)
+ epc += 4;
+
+ if (verify_area(VERIFY_READ, epc, 4)) {
+ force_sig(SIGSEGV, current);
+ return 1;
+ }
+ *opcode = *epc;
+
+ return 0;
+}
+
+void do_bp(struct pt_regs *regs)
+{
+ unsigned int opcode, bcode;
+
+ /*
+ * There is the ancient bug in the MIPS assemblers that the break
+ * code starts left to bit 16 instead to bit 6 in the opcode.
+ * Gas is bug-compatible ...
+ */
+ if (get_insn_opcode(regs, &opcode))
+ return;
+ bcode = ((opcode >> 16) & ((1 << 20) - 1));
+
+ /*
+ * (A short test says that IRIX 5.3 sends SIGTRAP for all break
+ * insns, even for break codes that indicate arithmetic failures.
+ * Weird ...)
+ */
+ force_sig(SIGTRAP, current);
+}
+
+void do_tr(struct pt_regs *regs)
+{
+ unsigned int opcode, bcode;
+
+ if (get_insn_opcode(regs, &opcode))
+ return;
+ bcode = ((opcode >> 6) & ((1 << 20) - 1));
+
+ /*
+ * (A short test says that IRIX 5.3 sends SIGTRAP for all break
+ * insns, even for break codes that indicate arithmetic failures.
+ * Wiered ...)
+ */
+ force_sig(SIGTRAP, current);
+}
+
+void do_ri(struct pt_regs *regs)
+{
+ lock_kernel();
+ printk("[%s:%ld] Illegal instruction at %08lx ra=%08lx\n",
+ current->comm, current->pid, regs->cp0_epc, regs->regs[31]);
+ unlock_kernel();
+ if (compute_return_epc(regs))
+ return;
+ force_sig(SIGILL, current);
+}
+
+void do_cpu(struct pt_regs *regs)
+{
+ u32 cpid;
+
+ cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
+ if (cpid != 1)
+ goto bad_cid;
+
+ regs->cp0_status |= ST0_CU1;
+ if (last_task_used_math == current)
+ return;
+
+ if (current->used_math) { /* Using the FPU again. */
+ r4xx0_lazy_fpu_switch(last_task_used_math);
+ } else { /* First time FPU user. */
+
+ r4xx0_init_fpu();
+ current->used_math = 1;
+ }
+ last_task_used_math = current;
+ return;
+
+bad_cid:
+ force_sig(SIGILL, current);
+}
+
+void do_watch(struct pt_regs *regs)
+{
+ /*
+ * We use the watch exception where available to detect stack
+ * overflows.
+ */
+ show_regs(regs);
+ panic("Caught WATCH exception - probably caused by stack overflow.");
+}
+
+void do_reserved(struct pt_regs *regs)
+{
+ /*
+ * Game over - no way to handle this if it ever occurs.
+ * Most probably caused by a new unknown cpu type or
+ * after another deadly hard/software error.
+ */
+ panic("Caught reserved exception - should not happen.");
+}
+
+static inline void watch_init(unsigned long cputype)
+{
+ switch(cputype) {
+ case CPU_R10000:
+ case CPU_R4000MC:
+ case CPU_R4400MC:
+ case CPU_R4000SC:
+ case CPU_R4400SC:
+ case CPU_R4000PC:
+ case CPU_R4400PC:
+ case CPU_R4200:
+ case CPU_R4300:
+ set_except_vector(23, handle_watch);
+ watch_available = 1;
+ break;
+ }
+}
+
+/*
+ * Some MIPS CPUs have a dedicated interrupt vector which reduces the
+ * interrupt processing overhead. Use it where available.
+ * FIXME: more CPUs than just the Nevada have this feature.
+ */
+static inline void setup_dedicated_int(void)
+{
+ extern void except_vec4(void);
+ switch(mips_cputype) {
+ case CPU_NEVADA:
+ memcpy((void *)(KSEG0 + 0x200), except_vec4, 8);
+ set_cp0_cause(CAUSEF_IV, CAUSEF_IV);
+ dedicated_iv_available = 1;
+ }
+}
+
+unsigned long exception_handlers[32];
+
+/*
+ * As a side effect of the way this is implemented we're limited
+ * to interrupt handlers in the address range from
+ * KSEG0 <= x < KSEG0 + 256mb on the Nevada. Oh well ...
+ */
+void set_except_vector(int n, void *addr)
+{
+ unsigned handler = (unsigned long) addr;
+ exception_handlers[n] = handler;
+ if (n == 0 && dedicated_iv_available) {
+ *(volatile u32 *)(KSEG0+0x200) = 0x08000000 |
+ (0x03ffffff & (handler >> 2));
+ flush_icache_range(KSEG0+0x200, KSEG0 + 0x204);
+ }
+}
+
+asmlinkage void (*save_fp_context)(struct sigcontext *sc);
+extern asmlinkage void r4k_save_fp_context(struct sigcontext *sc);
+
+asmlinkage void (*restore_fp_context)(struct sigcontext *sc);
+extern asmlinkage void r4k_restore_fp_context(struct sigcontext *sc);
+
+extern asmlinkage void *r4xx0_resume(void *last, void *next);
+
+void __init trap_init(void)
+{
+ extern char except_vec0_nevada, except_vec0_r4000;
+ extern char except_vec0_r4600, except_vec0_r2300;
+ extern char except_vec1_generic, except_vec2_generic;
+ extern char except_vec3_generic, except_vec3_r4000;
+ unsigned long i;
+
+ /* Copy the generic exception handler code to it's final destination. */
+ memcpy((void *)(KSEG0 + 0x80), &except_vec1_generic, 0x80);
+ memcpy((void *)(KSEG0 + 0x100), &except_vec2_generic, 0x80);
+ memcpy((void *)(KSEG0 + 0x180), &except_vec3_generic, 0x80);
+
+ /*
+ * Setup default vectors
+ */
+ for(i = 0; i <= 31; i++)
+ set_except_vector(i, handle_reserved);
+
+ /*
+ * Only some CPUs have the watch exceptions or a dedicated
+ * interrupt vector.
+ */
+ watch_init(mips_cputype);
+ setup_dedicated_int();
+
+ /*
+ * Handling the following exceptions depends mostly of the cpu type
+ */
+ switch(mips_cputype) {
+ case CPU_R10000:
+ /*
+ * The R10000 is in most aspects similar to the R4400. It
+ * should get some special optimizations.
+ */
+ write_32bit_cp0_register(CP0_FRAMEMASK, 0);
+ set_cp0_status(ST0_XX, ST0_XX);
+ /*
+ * The R10k might even work for Linux/MIPS - but we're paranoid
+ * and refuse to run until this is tested on real silicon
+ */
+ panic("CPU too expensive - making holiday in the ANDES!");
+ break;
+ case CPU_R4000MC:
+ case CPU_R4400MC:
+ case CPU_R4000SC:
+ case CPU_R4400SC:
+ vce_available = 1;
+ /* Fall through ... */
+ case CPU_R4000PC:
+ case CPU_R4400PC:
+ case CPU_R4200:
+ case CPU_R4300:
+ case CPU_R4600:
+ case CPU_R5000:
+ case CPU_NEVADA:
+ if(mips_cputype == CPU_NEVADA) {
+ memcpy((void *)KSEG0, &except_vec0_nevada, 0x80);
+ } else if (mips_cputype == CPU_R4600)
+ memcpy((void *)KSEG0, &except_vec0_r4600, 0x80);
+ else
+ memcpy((void *)KSEG0, &except_vec0_r4000, 0x80);
+
+ /* Cache error vector */
+ memcpy((void *)(KSEG0 + 0x100), (void *) KSEG0, 0x80);
+
+ if (vce_available) {
+ memcpy((void *)(KSEG0 + 0x180), &except_vec3_r4000,
+ 0x180);
+ } else {
+ memcpy((void *)(KSEG0 + 0x180), &except_vec3_generic,
+ 0x100);
+ }
+
+ save_fp_context = r4k_save_fp_context;
+ restore_fp_context = r4k_restore_fp_context;
+ resume = r4xx0_resume;
+ set_except_vector(1, r4k_handle_mod);
+ set_except_vector(2, r4k_handle_tlbl);
+ set_except_vector(3, r4k_handle_tlbs);
+ set_except_vector(4, handle_adel);
+ set_except_vector(5, handle_ades);
+
+ /*
+ * The following two are signaled by onboard hardware and
+ * should get board specific handlers to get maximum
+ * available information.
+ */
+ set_except_vector(6, handle_ibe);
+ set_except_vector(7, handle_dbe);
+
+ set_except_vector(8, handle_sys);
+ set_except_vector(9, handle_bp);
+ set_except_vector(10, handle_ri);
+ set_except_vector(11, handle_cpu);
+ set_except_vector(12, handle_ov);
+ set_except_vector(13, handle_tr);
+ set_except_vector(15, handle_fpe);
+ break;
+
+ case CPU_R8000:
+ panic("unsupported CPU type %s.\n", cpu_names[mips_cputype]);
+ break;
+
+ case CPU_UNKNOWN:
+ default:
+ panic("Unknown CPU type");
+ }
+ flush_icache_range(KSEG0, KSEG0 + 0x200);
+}