summaryrefslogtreecommitdiffstats
path: root/arch/i386/mm
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1999-01-03 17:49:53 +0000
committerRalf Baechle <ralf@linux-mips.org>1999-01-03 17:49:53 +0000
commiteb7a5bf93aaa4be1d7c6181100ab7639e74d67f7 (patch)
tree5746fea1605ff013be9b78a1556aaad7615d664a /arch/i386/mm
parent80ea5b1e15398277650e1197957053b5a71c08bc (diff)
Merge with Linux 2.1.131 plus some more MIPS goodies.
Diffstat (limited to 'arch/i386/mm')
-rw-r--r--arch/i386/mm/fault.c43
-rw-r--r--arch/i386/mm/init.c9
-rw-r--r--arch/i386/mm/ioremap.c15
3 files changed, 55 insertions, 12 deletions
diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c
index 358ff5033..5a1f363bd 100644
--- a/arch/i386/mm/fault.c
+++ b/arch/i386/mm/fault.c
@@ -103,8 +103,13 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
tsk = current;
mm = tsk->mm;
- if (in_interrupt())
- die("page fault from irq handler",regs,error_code);
+
+ /*
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+ if (in_interrupt() || mm == &init_mm)
+ goto no_context;
down(&mm->mmap_sem);
@@ -119,7 +124,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
/*
* accessing the stack below %esp is always a bug.
* The "+ 32" is there due to some instructions (like
- * pusha) doing pre-decrement on the stack and that
+ * pusha) doing post-decrement on the stack and that
* doesn't show up until later..
*/
if (address + 32 < regs->esp)
@@ -151,7 +156,14 @@ good_area:
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
}
- handle_mm_fault(tsk, vma, address, write);
+
+ /*
+ * If for any reason at all we couldn't handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+ if (!handle_mm_fault(tsk, vma, address, write))
+ goto do_sigbus;
/*
* Did it hit the DOS screen memory VA from vm86 mode?
@@ -194,6 +206,7 @@ bad_area:
}
}
+no_context:
/* Are we prepared to handle this kernel fault? */
if ((fixup = search_exception_table(regs->eip)) != 0) {
regs->eip = fixup;
@@ -235,8 +248,26 @@ bad_area:
page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
printk(KERN_ALERT "*pte = %08lx\n", page);
}
- lock_kernel();
die("Oops", regs, error_code);
do_exit(SIGKILL);
- unlock_kernel();
+
+/*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+do_sigbus:
+ up(&mm->mmap_sem);
+
+ /*
+ * Send a sigbus, regardless of whether we were in kernel
+ * or user mode.
+ */
+ tsk->tss.cr2 = address;
+ tsk->tss.error_code = error_code;
+ tsk->tss.trap_no = 14;
+ force_sig(SIGBUS, tsk);
+
+ /* Kernel mode? Handle exceptions or die */
+ if (!(error_code & 4))
+ goto no_context;
}
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index aed7ecc55..693072b1a 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -293,11 +293,18 @@ __initfunc(unsigned long paging_init(unsigned long start_mem, unsigned long end_
* extended bios data area.
*
* there is a real-mode segmented pointer pointing to the
- * 4K EBDA area at 0x40E, calculate and scan it here:
+ * 4K EBDA area at 0x40E, calculate and scan it here.
+ *
+ * NOTE! There are Linux loaders that will corrupt the EBDA
+ * area, and as such this kind of SMP config may be less
+ * trustworthy, simply because the SMP table may have been
+ * stomped on during early boot.
*/
address = *(unsigned short *)phys_to_virt(0x40E);
address<<=4;
smp_scan_config(address, 0x1000);
+ if (smp_found_config)
+ printk(KERN_WARNING "WARNING: MP table in the EBDA can be UNSAFE, contact linux-smp@vger.rutgers.edu if you experience SMP problems!\n");
}
#endif
start_mem = PAGE_ALIGN(start_mem);
diff --git a/arch/i386/mm/ioremap.c b/arch/i386/mm/ioremap.c
index 740f4551f..28250b0bd 100644
--- a/arch/i386/mm/ioremap.c
+++ b/arch/i386/mm/ioremap.c
@@ -84,11 +84,16 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr,
* Remap an arbitrary physical address space into the kernel virtual
* address space. Needed when the kernel wants to access high addresses
* directly.
+ *
+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
+ * have to convert them into an offset in a page-aligned mapping, but the
+ * caller shouldn't need to know that small detail.
*/
void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
{
void * addr;
struct vm_struct * area;
+ unsigned long offset;
/*
* Don't remap the low PCI/ISA area, it's always mapped..
@@ -105,9 +110,9 @@ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flag
/*
* Mappings have to be page-aligned
*/
- if (phys_addr & ~PAGE_MASK)
- return NULL;
- size = PAGE_ALIGN(size);
+ offset = phys_addr & ~PAGE_MASK;
+ phys_addr &= PAGE_MASK;
+ size = PAGE_ALIGN(size + offset);
/*
* Don't allow mappings that wrap..
@@ -126,11 +131,11 @@ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flag
vfree(addr);
return NULL;
}
- return addr;
+ return (void *) (offset + (char *)addr);
}
void iounmap(void *addr)
{
if (addr > high_memory)
- return vfree(addr);
+ return vfree((void *) (PAGE_MASK & (unsigned long) addr));
}