summaryrefslogtreecommitdiffstats
path: root/arch/mips/mm
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2001-04-05 04:55:58 +0000
committerRalf Baechle <ralf@linux-mips.org>2001-04-05 04:55:58 +0000
commit74a9f2e1b4d3ab45a9f72cb5b556c9f521524ab3 (patch)
tree7c4cdb103ab1b388c9852a88bd6fb1e73eba0b5c /arch/mips/mm
parentee6374c8b0d333c08061c6a97bc77090d7461225 (diff)
Merge with Linux 2.4.3.
Note that mingetty does no longer work with serial console, you have to switch to another getty like getty_ps. This commit also includes a fix for a setitimer bug which did prevent getty_ps from working on older kernels.
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/extable.c2
-rw-r--r--arch/mips/mm/fault.c10
-rw-r--r--arch/mips/mm/init.c110
-rw-r--r--arch/mips/mm/umap.c8
4 files changed, 13 insertions, 117 deletions
diff --git a/arch/mips/mm/extable.c b/arch/mips/mm/extable.c
index e563e8267..4c2cf3b43 100644
--- a/arch/mips/mm/extable.c
+++ b/arch/mips/mm/extable.c
@@ -49,7 +49,7 @@ search_exception_table(unsigned long addr)
spin_lock_irqsave(&modlist_lock, flags);
for (mp = module_list; mp != NULL; mp = mp->next) {
- if (mp->ex_table_start == NULL)
+ if (mp->ex_table_start == NULL || !(mp->flags&(MOD_RUNNING|MOD_INITIALIZING)))
continue;
ret = search_one_table(mp->ex_table_start,
mp->ex_table_end - 1, addr);
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index ed3d2a2ac..6820df7c5 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -72,7 +72,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
printk("[%s:%d:%08lx:%ld:%08lx]\n", current->comm, current->pid,
address, write, regs->cp0_epc);
#endif
- down(&mm->mmap_sem);
+ down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
@@ -115,7 +115,7 @@ good_area:
goto out_of_memory;
}
- up(&mm->mmap_sem);
+ up_read(&mm->mmap_sem);
return;
/*
@@ -123,7 +123,7 @@ good_area:
* Fix it, but check if it's kernel or user first..
*/
bad_area:
- up(&mm->mmap_sem);
+ up_read(&mm->mmap_sem);
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
@@ -177,14 +177,14 @@ no_context:
* us unable to handle the page fault gracefully.
*/
out_of_memory:
- up(&mm->mmap_sem);
+ up_read(&mm->mmap_sem);
printk("VM: killing process %s\n", tsk->comm);
if (user_mode(regs))
do_exit(SIGKILL);
goto no_context;
do_sigbus:
- up(&mm->mmap_sem);
+ up_read(&mm->mmap_sem);
/*
* Send a sigbus, regardless of whether we were in kernel
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 7fa5c9582..8d6c9a9f2 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -5,15 +5,10 @@
*
* Copyright (C) 1994 - 2000 by Ralf Baechle
* Copyright (C) 2000 Silicon Graphics, Inc.
- */
-/**************************************************************************
- * 9 Nov, 2000.
- * Use mips_cpu structure.
*
- * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
- *************************************************************************/
-
+ * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ */
#include <linux/config.h>
#include <linux/init.h>
#include <linux/signal.h>
@@ -52,63 +47,6 @@ static unsigned long totalram_pages;
extern void prom_free_prom_memory(void);
-void __bad_pte_kernel(pmd_t *pmd)
-{
- printk("Bad pmd in pte_alloc_kernel: %08lx\n", pmd_val(*pmd));
- pmd_set(pmd, BAD_PAGETABLE);
-}
-
-void __bad_pte(pmd_t *pmd)
-{
- printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
- pmd_set(pmd, BAD_PAGETABLE);
-}
-
-pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long offset)
-{
- pte_t *page;
-
- page = (pte_t *) __get_free_page(GFP_USER);
- if (pmd_none(*pmd)) {
- if (page) {
- clear_page(page);
- pmd_val(*pmd) = (unsigned long)page;
- return page + offset;
- }
- pmd_set(pmd, BAD_PAGETABLE);
- return NULL;
- }
- free_page((unsigned long)page);
- if (pmd_bad(*pmd)) {
- __bad_pte_kernel(pmd);
- return NULL;
- }
- return (pte_t *) pmd_page(*pmd) + offset;
-}
-
-pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset)
-{
- pte_t *page;
-
- page = (pte_t *) __get_free_page(GFP_KERNEL);
- if (pmd_none(*pmd)) {
- if (page) {
- clear_page(page);
- pmd_val(*pmd) = (unsigned long)page;
- return page + offset;
- }
- pmd_set(pmd, BAD_PAGETABLE);
- return NULL;
- }
- free_page((unsigned long)page);
- if (pmd_bad(*pmd)) {
- __bad_pte(pmd);
- return NULL;
- }
- return (pte_t *) pmd_page(*pmd) + offset;
-}
-
-
asmlinkage int sys_cacheflush(void *addr, int bytes, int cache)
{
/* This should flush more selectivly ... */
@@ -170,48 +108,6 @@ int do_check_pgt_cache(int low, int high)
return freed;
}
-/*
- * BAD_PAGE is the page that is used for page faults when linux
- * is out-of-memory. Older versions of linux just did a
- * do_exit(), but using this instead means there is less risk
- * for a process dying in kernel mode, possibly leaving a inode
- * unused etc..
- *
- * BAD_PAGETABLE is the accompanying page-table: it is initialized
- * to point to BAD_PAGE entries.
- *
- * ZERO_PAGE is a special page that is used for zero-initialized
- * data and COW.
- */
-pte_t * __bad_pagetable(void)
-{
- extern char empty_bad_page_table[PAGE_SIZE];
- unsigned long page, dummy1, dummy2;
-
- page = (unsigned long) empty_bad_page_table;
- __asm__ __volatile__(
- ".set\tnoreorder\n"
- "1:\tsw\t%2,(%0)\n\t"
- "subu\t%1,1\n\t"
- "bnez\t%1,1b\n\t"
- "addiu\t%0,4\n\t"
- ".set\treorder"
- :"=r" (dummy1), "=r" (dummy2)
- :"r" (pte_val(BAD_PAGE)), "0" (page), "1" (PAGE_SIZE/4)
- :"$1");
-
- return (pte_t *)page;
-}
-
-pte_t __bad_page(void)
-{
- extern char empty_bad_page[PAGE_SIZE];
- unsigned long page = (unsigned long) empty_bad_page;
-
- clear_page((void *)page);
- return pte_mkdirty(mk_pte_phys(__pa(page), PAGE_SHARED));
-}
-
void show_mem(void)
{
int i, free = 0, total = 0, reserved = 0;
diff --git a/arch/mips/mm/umap.c b/arch/mips/mm/umap.c
index c952004c9..100cb148d 100644
--- a/arch/mips/mm/umap.c
+++ b/arch/mips/mm/umap.c
@@ -93,7 +93,7 @@ remove_mapping (struct task_struct *task, unsigned long start, unsigned long end
unsigned long beg = start;
pgd_t *dir;
- down (&task->mm->mmap_sem);
+ down_write (&task->mm->mmap_sem);
dir = pgd_offset (task->mm, start);
flush_cache_range (task->mm, beg, end);
while (start < end){
@@ -102,7 +102,7 @@ remove_mapping (struct task_struct *task, unsigned long start, unsigned long end
dir++;
}
flush_tlb_range (task->mm, beg, end);
- up (&task->mm->mmap_sem);
+ up_write (&task->mm->mmap_sem);
}
EXPORT_SYMBOL(remove_mapping);
@@ -181,7 +181,7 @@ vmap_pmd_range (pmd_t *pmd, unsigned long address, unsigned long size, unsigned
end = PGDIR_SIZE;
vaddr -= address;
do {
- pte_t * pte = pte_alloc(pmd, address);
+ pte_t * pte = pte_alloc(current->mm, pmd, address);
if (!pte)
return -ENOMEM;
vmap_pte_range(pte, address, end - address, address + vaddr);
@@ -203,7 +203,7 @@ vmap_page_range (unsigned long from, unsigned long size, unsigned long vaddr)
dir = pgd_offset(current->mm, from);
flush_cache_range(current->mm, beg, end);
while (from < end) {
- pmd_t *pmd = pmd_alloc(dir, from);
+ pmd_t *pmd = pmd_alloc(current->mm, dir, from);
error = -ENOMEM;
if (!pmd)
break;