summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1997-09-12 23:25:15 +0000
committerRalf Baechle <ralf@linux-mips.org>1997-09-12 23:25:15 +0000
commit0c9824af05b775b18bff274f3a07d174c718bae1 (patch)
tree4890f863496f44ef27897017d7c1ac3feadba770 /arch
parente8ad72aaaa65930e821f72c4b568219f8392ba7b (diff)
- Set caching mode for KSEG0 to cached-noncoherent for all machines.
On some MIPS boxes the firmware doesn't do that for us. - We still had two unprotected loads in the sys_sigrestore(2). Use __get_user(). - Handle QED-style L1 caches != 16kb per cache correctly. - Protect the cacheflush instructions for the signal trampoline just like the loads in __get_user(). Otherwise the following code will result in a nice "Can not handle kernel paging request" message: #include <signal.h> static void hurz(void) { } main() { signal(SIGSEGV, hurz); /* Chainsaw the stack pointer ... */ asm("move $29,%0" : :"r"(0x70000000)); *(int *) 8 = 0; } With the fix applied I still get the error message. The cause of this problem is that gas produces an __ex_table section with wrong contents. Oh well, how good that I have a nice working post 2.8.1 binutils version at home in good ol' Germany ...
Diffstat (limited to 'arch')
-rw-r--r--arch/mips/kernel/signal.c44
-rw-r--r--arch/mips/mm/r4xx0.c25
2 files changed, 34 insertions, 35 deletions
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index b2e76e8a7..4ff1a98b4 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -4,7 +4,7 @@
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1994, 1995, 1996 Ralf Baechle
*
- * $Id: signal.c,v 1.4 1997/08/06 19:15:07 miguel Exp $
+ * $Id: signal.c,v 1.5 1997/09/12 01:30:24 ralf Exp $
*/
#include <linux/config.h>
#include <linux/sched.h>
@@ -64,6 +64,8 @@ asmlinkage int sys_sigsuspend(struct pt_regs *regs)
asmlinkage int sys_sigreturn(struct pt_regs *regs)
{
struct sigcontext *context;
+ unsigned long blocked;
+ long long reg;
int i;
context = (struct sigcontext *)(long) regs->regs[29];
@@ -71,36 +73,26 @@ asmlinkage int sys_sigreturn(struct pt_regs *regs)
(regs->regs[29] & (SZREG - 1)))
goto badframe;
- current->blocked = context->sc_sigset & _BLOCKABLE; /* XXX */
- regs->cp0_epc = context->sc_pc; /* XXX */
+ __get_user(blocked, &context->sc_sigset);
+ current->blocked = blocked & _BLOCKABLE;
+ __get_user(regs->cp0_epc, &context->sc_pc);
-/*
- * Disabled because we only use the lower 32 bit of the registers.
- */
-#if 0
/*
- * We only allow user processes in 64bit mode (n32, 64 bit ABI) to
- * restore the upper half of registers.
+ * Restore all integer registers.
*/
- if (read_32bit_cp0_register(CP0_STATUS) & ST0_UX) {
- for(i = 31;i >= 0;i--)
- __get_user(regs->regs[i], &context->sc_regs[i]);
- __get_user(regs->hi, &context->sc_mdhi);
- __get_user(regs->lo, &context->sc_mdlo);
- } else
-#endif
- {
- long long reg;
- for(i = 31;i >= 0;i--) {
- __get_user(reg, &context->sc_regs[i]);
- regs->regs[i] = (int) reg;
- }
- __get_user(reg, &context->sc_mdhi);
- regs->hi = (int) reg;
- __get_user(reg, &context->sc_mdlo);
- regs->lo = (int) reg;
+ for(i = 31;i >= 0;i--) {
+ __get_user(reg, &context->sc_regs[i]);
+ regs->regs[i] = (int) reg;
}
+ __get_user(reg, &context->sc_mdhi);
+ regs->hi = (int) reg;
+ __get_user(reg, &context->sc_mdlo);
+ regs->lo = (int) reg;
+ /*
+ * FP depends on what FPU in what mode we have. Best done in
+ * Assembler ...
+ */
restore_fp_context(context);
/*
diff --git a/arch/mips/mm/r4xx0.c b/arch/mips/mm/r4xx0.c
index 2586a961c..7ac80eb46 100644
--- a/arch/mips/mm/r4xx0.c
+++ b/arch/mips/mm/r4xx0.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
*
- * $Id: r4xx0.c,v 1.6 1997/08/06 19:15:10 miguel Exp $
+ * $Id: r4xx0.c,v 1.7 1997/09/12 01:30:27 ralf Exp $
*/
#include <linux/config.h>
@@ -43,7 +43,8 @@ static scache_size, sc_lsize; /* Again, in bytes */
* this is the bit which selects the way in the cache for the
* indexed cachops.
*/
-#define waybit 0x2000
+#define icache_waybit (icache_size >> 1)
+#define dcache_waybit (dcache_size >> 1)
/*
* Zero an entire page. We have three flavours of the routine available.
@@ -1589,10 +1590,10 @@ static void r4k_flush_cache_page_d32i32_r4600(struct vm_area_struct *vma,
*/
page = (KSEG0 + (page & (dcache_size - 1)));
blast_dcache32_page_indexed(page);
- blast_dcache32_page_indexed(page ^ waybit);
+ blast_dcache32_page_indexed(page ^ dcache_waybit);
if(text) {
blast_icache32_page_indexed(page);
- blast_icache32_page_indexed(page ^ waybit);
+ blast_icache32_page_indexed(page ^ icache_waybit);
}
}
out:
@@ -1826,15 +1827,19 @@ static void r4k_flush_page_to_ram_d32i32_r4600(unsigned long page)
}
}
+/*
+ * While we're protected against bad userland addresses we don't care
+ * very much about what happens in that case. Usually a segmentation
+ * fault will dump the process later on anyway ...
+ */
static void r4k_flush_cache_sigtramp(unsigned long addr)
{
- /* XXX protect like uaccess.h loads/stores */
addr &= ~(dc_lsize - 1);
__asm__ __volatile__("nop;nop;nop;nop");
- flush_dcache_line(addr);
- flush_dcache_line(addr + dc_lsize);
- flush_icache_line(addr);
- flush_icache_line(addr + dc_lsize);
+ protected_writeback_dcache_line(addr);
+ protected_writeback_dcache_line(addr + dc_lsize);
+ protected_flush_icache_line(addr);
+ protected_flush_icache_line(addr + dc_lsize);
}
#undef DEBUG_TLB
@@ -2541,6 +2546,8 @@ void ld_mmu_r4xx0(void)
printk("CPU revision is: %08x\n", read_32bit_cp0_register(CP0_PRID));
+ set_cp0_config(CONFIG_CM_CMASK, CONFIG_CM_CACHABLE_NONCOHERENT);
+
probe_icache(cfg);
probe_dcache(cfg);