summaryrefslogtreecommitdiffstats
path: root/arch/ppc/mm/fault.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ppc/mm/fault.c')
-rw-r--r--arch/ppc/mm/fault.c216
1 files changed, 62 insertions, 154 deletions
diff --git a/arch/ppc/mm/fault.c b/arch/ppc/mm/fault.c
index ff7e0b9a8..7104d6bbb 100644
--- a/arch/ppc/mm/fault.c
+++ b/arch/ppc/mm/fault.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
* Ported to PPC by Gary Thomas
+ * Modified by Cort Dougan (cort@cs.nmt.edu)
*/
#include <linux/config.h>
@@ -22,164 +23,71 @@
extern void die_if_kernel(char *, struct pt_regs *, long);
extern void do_page_fault(struct pt_regs *, unsigned long, unsigned long);
+void new_page_fault(unsigned long address, unsigned long code, unsigned long text,
+ struct pt_regs *regs);
+
#undef SHOW_FAULTS
#undef NOISY_INSTRFAULT
#undef NOISY_DATAFAULT
+unsigned int probingmem = 0;
#define NEWMM 1
-void
-DataAccessException(struct pt_regs *regs)
+void new_page_fault(unsigned long address, unsigned long ppc_code,
+ unsigned long text, struct pt_regs *regs)
{
- pgd_t *dir;
- pmd_t *pmd;
- pte_t *pte;
- int tries, mode = 0;
-
-#ifdef NOISY_DATAFAULT
- printk("Data fault on %x\n",regs->dar);
-#endif
-
- if (user_mode(regs)) mode |= 0x04;
- if (regs->dsisr & 0x02000000) mode |= 0x02; /* Load/store */
- if (regs->dsisr & 0x08000000) mode |= 0x01; /* Protection violation */
- if (mode & 0x01)
- {
-#ifdef NOISY_DATAFAULT
- printk("Write Protect fault\n ");
-#endif
- do_page_fault(regs, regs->dar, mode);
-#ifdef NOISY_DATAFAULT
- printk("Write Protect fault handled\n");
-#endif
- return;
- }
-
-
- for (tries = 0; tries < 1; tries++)
- {
- dir = pgd_offset(current->mm, regs->dar & PAGE_MASK);
- if (dir)
- {
- pmd = pmd_offset(dir, regs->dar & PAGE_MASK);
- if (pmd && pmd_present(*pmd))
- {
- pte = pte_offset(pmd, regs->dar & PAGE_MASK);
- if (pte && pte_present(*pte))
- {
-#if NOISY_DATAFAULT
- printk("Page mapped - PTE: %x[%x]\n", pte, *(long *)pte);
-#endif
- MMU_hash_page(&current->tss, regs->dar & PAGE_MASK, pte);
- /*MMU_hash_page2(current->mm, regs->dar & PAGE_MASK, pte);*/
- return;
- }
- }
- } else
- {
-#if NOISY_DATAFAULT
- printk("No PGD\n");
-#endif
- }
- do_page_fault(regs, regs->dar, mode);
- }
-}
+ struct vm_area_struct * vma;
+ struct mm_struct *mm = current->mm;
-void
-InstructionAccessException(struct pt_regs *regs)
-{
+ int intel_code = 0;
pgd_t *dir;
pmd_t *pmd;
pte_t *pte;
- int tries, mode = 0;
-#if NOISY_INSTRFAULT
- printk("Instr fault on %x\n",regs->dar);
-#endif
-
-#ifdef NEWMM
- if (!user_mode(regs))
+ /*
+ * bit 0 == 0 means no page found, 1 means protection fault
+ * bit 1 == 0 means read, 1 means write
+ * bit 2 == 0 means kernel, 1 means user-mode
+ */
+ if (user_mode(regs)) intel_code |= 0x04;
+ if (!text && (ppc_code & 0x02000000)) intel_code |= 0x02; /* Load/store */
+ if (!text && (ppc_code & 0x08000000))
{
-
- panic("InstructionAcessException in kernel mode. PC %x addr %x",
- regs->nip, regs->dar);
+ intel_code |= 0x01; /* prot viol */
+ goto do_page;
}
-#endif
- if (user_mode(regs)) mode |= 0x04;
- if (regs->dsisr & 0x02000000) mode |= 0x02; /* Load/store */
- if (regs->dsisr & 0x08000000) mode |= 0x01; /* Protection violation */
- if (mode & 0x01)
+ dir = pgd_offset(mm, address & PAGE_MASK);
+ if (dir)
{
- do_page_fault(regs, regs->dar, mode);
- return;
- }
- for (tries = 0; tries < 1; tries++)
- {
- dir = pgd_offset(current->mm, regs->dar & PAGE_MASK);
- if (dir)
+ pmd = pmd_offset(dir, address & PAGE_MASK);
+ if (pmd && pmd_present(*pmd))
{
- pmd = pmd_offset(dir, regs->dar & PAGE_MASK);
- if (pmd && pmd_present(*pmd))
- {
- pte = pte_offset(pmd, regs->dar & PAGE_MASK);
- if (pte && pte_present(*pte))
- {
-
- MMU_hash_page(&current->tss, regs->dar & PAGE_MASK, pte);
- /* MMU_hash_page2(current->mm, regs->dar & PAGE_MASK, pte);*/
- return;
- }
- }
- } else
+ pte = pte_offset(pmd, address & PAGE_MASK);
+ if (pte && pte_present(*pte))
{
+ MMU_hash_page(&current->tss, address & PAGE_MASK, pte);
+ return;
}
- do_page_fault(regs, regs->dar, mode);
+ }
}
-}
-
-/*
- * This routine handles page faults. It determines the address,
- * and the problem, and then passes it off to one of the appropriate
- * routines.
- *
- * The error_code parameter just the same as in the i386 version:
- *
- * bit 0 == 0 means no page found, 1 means protection fault
- * bit 1 == 0 means read, 1 means write
- * bit 2 == 0 means kernel, 1 means user-mode
- */
-void do_page_fault(struct pt_regs *regs, unsigned long address, unsigned long error_code)
-{
- struct vm_area_struct * vma;
- unsigned long page;
-
- vma = find_vma(current, address);
+do_page:
+ down(&mm->mmap_sem);
+ vma = find_vma(current->mm, address);
if (!vma)
- {
goto bad_area;
- }
-
- if (vma->vm_start <= address){
+ if (vma->vm_start <= address)
goto good_area;
- }
if (!(vma->vm_flags & VM_GROWSDOWN))
- {
goto bad_area;
- }
- if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur)
- {
+ if (expand_stack(vma, address))
goto bad_area;
- }
- vma->vm_offset -= vma->vm_start - (address & PAGE_MASK);
-
- vma->vm_start = (address & PAGE_MASK);
good_area:
/* a write */
- if (error_code & 2) {
+ if (intel_code & 2) {
if (!(vma->vm_flags & VM_WRITE))
{
goto bad_area;
@@ -187,32 +95,49 @@ good_area:
/* a read */
} else {
/* protection fault */
- if (error_code & 1)
+ if (intel_code & 1)
{
+ printk("prot fault\n");
goto bad_area;
}
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
{
+ printk("no read or exec\n");
goto bad_area;
}
}
- handle_mm_fault(vma, address, error_code & 2);
- flush_page(address); /* Flush & Invalidate cache - note: address is OK now */
+ handle_mm_fault(vma, address, intel_code & 2);
+ up(&mm->mmap_sem); flush_page(address); /* Flush & Invalidate cache - note: address is OK now */
return;
bad_area:
+ up(&mm->mmap_sem);
+
+ /* Did we have an exception handler installed? */
+ if(current->tss.excount != 0) {
+ if(user_mode(regs)) {
+ printk("Exception signalled from user mode!\n");
+ } else {
+#if 0
+ printk("Exception from kernel mode. pc %x expc %x count %d\n",
+ regs->nip,current->tss.expc,current->tss.excount);
+#endif
+ current->tss.excount = 0;
+ regs->gpr[3] = -EFAULT;
+ regs->nip = current->tss.expc;
+ return;
+ }
+ }
+
if (user_mode(regs))
{
-/* printk("Bad User Area: Addr %x PC %x Task %x pid %d %s\n",
- address,regs->nip, current,current->pid,current->comm);*/
- send_sig(SIGSEGV, current, 1);
+ force_sig(SIGSEGV, current);
return;
}
panic("KERNEL access of bad area PC %x address %x vm_flags %x\n",
regs->nip,address,vma->vm_flags);
}
-
va_to_phys(unsigned long address)
{
pgd_t *dir;
@@ -240,25 +165,8 @@ va_to_phys(unsigned long address)
return (0);
}
-
-/*
- * See if an address should be valid in the current context.
- */
-valid_addr(unsigned long addr)
+inline void
+update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t _pte)
{
- struct vm_area_struct * vma;
- for (vma = current->mm->mmap ; ; vma = vma->vm_next)
- {
- if (!vma)
- {
- return (0);
- }
- if (vma->vm_end > addr)
- break;
- }
- if (vma->vm_start <= addr)
- {
- return (1);
- }
- return (0);
+ MMU_hash_page(&current->tss, address & PAGE_MASK, (pte *)&_pte);
}