summaryrefslogtreecommitdiffstats
path: root/arch/mips/mm
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1997-01-07 02:33:00 +0000
committer <ralf@linux-mips.org>1997-01-07 02:33:00 +0000
commitbeb116954b9b7f3bb56412b2494b562f02b864b1 (patch)
tree120e997879884e1b9d93b265221b939d2ef1ade1 /arch/mips/mm
parent908d4681a1dc3792ecafbe64265783a86c4cccb6 (diff)
Import of Linux/MIPS 2.1.14
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/Makefile27
-rw-r--r--arch/mips/mm/extable.c57
-rw-r--r--arch/mips/mm/fault.c73
-rw-r--r--arch/mips/mm/init.c221
-rw-r--r--arch/mips/mm/stack.c26
5 files changed, 221 insertions, 183 deletions
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
index 6ff21fafd..6f69a0ee6 100644
--- a/arch/mips/mm/Makefile
+++ b/arch/mips/mm/Makefile
@@ -1,5 +1,5 @@
#
-# Makefile for the linux mips-specific parts of the memory manager.
+# Makefile for the Linux/MIPS-specific parts of the memory manager.
#
# Note! Dependencies are done automagically by 'make dep', which also
# removes any old dependencies. DON'T put your own dependencies here
@@ -7,26 +7,7 @@
#
# Note 2! The CFLAGS definition is now in the main makefile...
-.c.o:
- $(CC) $(CFLAGS) -c $<
-.s.o:
- $(AS) -o $*.o $<
-.c.s:
- $(CC) $(CFLAGS) -S $<
+O_TARGET := mm.o
+O_OBJS := extable.o init.o fault.o stack.o
-OBJS = fault.o init.o
-
-mm.o: $(OBJS)
- $(LD) -r -o mm.o $(OBJS)
-
-modules:
-
-dep:
- $(CPP) -M *.c > .depend
-
-#
-# include a dependency file if one exists
-#
-ifeq (.depend,$(wildcard .depend))
-include .depend
-endif
+include $(TOPDIR)/Rules.make
diff --git a/arch/mips/mm/extable.c b/arch/mips/mm/extable.c
new file mode 100644
index 000000000..c46568ab1
--- /dev/null
+++ b/arch/mips/mm/extable.c
@@ -0,0 +1,57 @@
+/*
+ * linux/arch/mips/mm/extable.c
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <asm/uaccess.h>
+
+extern const struct exception_table_entry __start___ex_table[];
+extern const struct exception_table_entry __stop___ex_table[];
+
+static inline unsigned
+search_one_table(const struct exception_table_entry *first,
+ const struct exception_table_entry *last,
+ unsigned long value)
+{
+ while (first <= last) {
+ const struct exception_table_entry *mid;
+ long diff;
+
+ mid = (last - first) / 2 + first;
+ diff = mid->insn - value;
+ if (diff == 0)
+ return mid->nextinsn;
+ else if (diff < 0)
+ first = mid+1;
+ else
+ last = mid-1;
+ }
+ return 0;
+}
+
+unsigned long search_exception_table(unsigned long addr)
+{
+ unsigned ret;
+#ifdef CONFIG_MODULES
+ struct module *mp;
+#endif
+
+ /* Search the kernel's table first. */
+ ret = search_one_table(__start___ex_table,
+ __stop___ex_table-1, addr);
+ if (ret)
+ return ret;
+
+#ifdef CONFIG_MODULES
+ for (mp = module_list; mp != NULL; mp = mp->next) {
+ if (mp->exceptinfo.start != NULL) {
+ ret = search_one_table(mp->exceptinfo.start,
+ mp->exceptinfo.stop-1, addr);
+ if (ret)
+ return ret;
+ }
+ }
+#endif
+
+ return 0;
+}
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 9256025d9..f1462eff9 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -1,10 +1,8 @@
/*
* arch/mips/mm/fault.c
*
- * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
- * Ported to MIPS by Ralf Baechle
+ * Copyright (C) 1995, 1996 by Ralf Baechle
*/
-#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/head.h>
@@ -16,45 +14,47 @@
#include <linux/mman.h>
#include <linux/mm.h>
+#include <asm/cache.h>
#include <asm/system.h>
-#include <asm/segment.h>
+#include <asm/uaccess.h>
#include <asm/pgtable.h>
extern void die_if_kernel(char *, struct pt_regs *, long);
/*
+ * Macro for exception fixup code to access integer registers.
+ */
+#define dpf_reg(r) (regs->regs[r])
+
+/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*/
asmlinkage void
-do_page_fault(struct pt_regs *regs, unsigned long writeaccess)
+do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
+ unsigned long address)
{
struct vm_area_struct * vma;
- unsigned long address;
-
- /* get the address */
- __asm__(".set\tmips3\n\t"
- "dmfc0\t%0,$8\n\t"
- ".set\tmips0"
- : "=r" (address));
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->mm;
+ unsigned long fixup;
#if 0
- printk("do_page_fault() #1: %s %08lx (epc == %08lx)\n",
+ printk("do_page_fault() #1: %s %08lx (epc == %08lx, ra == %08lx)\n",
writeaccess ? "writeaccess to" : "readaccess from",
- address, regs->cp0_epc);
+ address, regs->cp0_epc, regs->regs[31]);
#endif
- vma = find_vma(current, address);
+ down(&mm->mmap_sem);
+ vma = find_vma(mm, address);
if (!vma)
goto bad_area;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
- if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur)
+ if (expand_stack(vma, address))
goto bad_area;
- vma->vm_offset -= vma->vm_start - (address & PAGE_MASK);
- vma->vm_start = (address & PAGE_MASK);
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
@@ -68,6 +68,8 @@ good_area:
goto bad_area;
}
handle_mm_fault(vma, address, writeaccess);
+ up(&mm->mmap_sem);
+
return;
/*
@@ -75,18 +77,43 @@ good_area:
* Fix it, but check if it's kernel or user first..
*/
bad_area:
+ up(&mm->mmap_sem);
+ /* Did we have an exception handler installed? */
+
+ fixup = search_exception_table(regs->cp0_epc);
+ if (fixup) {
+ long new_epc;
+ new_epc = fixup_exception(dpf_reg, fixup, regs->cp0_epc);
+ printk("Taking exception at %lx (%lx)\n",
+ regs->cp0_epc, new_epc);
+ regs->cp0_epc = new_epc;
+ return;
+ }
if (user_mode(regs)) {
- current->tss.cp0_badvaddr = address;
- current->tss.error_code = writeaccess;
- send_sig(SIGSEGV, current, 1);
+ tsk->tss.cp0_badvaddr = address;
+ tsk->tss.error_code = writeaccess;
+#if 1
+ printk("do_page_fault() #2: sending SIGSEGV to %s for illegal %s\n"
+ "%08lx (epc == %08lx, ra == %08lx)\n",
+ tsk->comm,
+ writeaccess ? "writeaccess to" : "readaccess from",
+ address,
+ (unsigned long) regs->cp0_epc,
+ (unsigned long) regs->regs[31]);
+#endif
+ force_sig(SIGSEGV, tsk);
return;
}
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
- printk(KERN_ALERT "Unable to handle kernel paging request at virtual address %08lx",
- address);
+ printk(KERN_ALERT "Unable to handle kernel paging request at virtual "
+#ifdef __mips64
+ "address %08lx, epc == %08Lx\n", address, regs->cp0_epc);
+#else
+ "address %08lx, epc == %016lx\n", address, regs->cp0_epc);
+#endif
die_if_kernel("Oops", regs, writeaccess);
do_exit(SIGKILL);
}
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 37912e2d0..149349102 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -15,26 +15,29 @@
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
+#include <linux/swap.h>
+#ifdef CONFIG_BLK_DEV_INITRD
+#include <linux/blk.h>
+#endif
-#include <asm/cachectl.h>
+#include <asm/bootinfo.h>
+#include <asm/cache.h>
+#include <asm/dma.h>
+#include <asm/jazzdma.h>
#include <asm/vector.h>
#include <asm/system.h>
-#include <asm/segment.h>
+#include <asm/uaccess.h>
#include <asm/pgtable.h>
+#include <asm/page.h>
extern void deskstation_tyne_dma_init(void);
-extern void scsi_mem_init(unsigned long);
-extern void sound_mem_init(void);
-extern void die_if_kernel(char *,struct pt_regs *,long);
extern void show_net_buffers(void);
-extern char empty_zero_page[PAGE_SIZE];
-
/*
* BAD_PAGE is the page that is used for page faults when linux
* is out-of-memory. Older versions of linux just did a
* do_exit(), but using this instead means there is less risk
- * for a process dying in kernel mode, possibly leaving a inode
+ * for a process dying in kernel mode, possibly leaving an inode
* unused etc..
*
* BAD_PAGETABLE is the accompanying page-table: it is initialized
@@ -48,9 +51,18 @@ pte_t * __bad_pagetable(void)
extern char empty_bad_page_table[PAGE_SIZE];
unsigned long page;
unsigned long dummy1, dummy2;
+#if (_MIPS_ISA == _MIPS_ISA_MIPS3) || (_MIPS_ISA == _MIPS_ISA_MIPS4)
+ unsigned long dummy3;
+#endif
- page = ((unsigned long)empty_bad_page_table) + (PT_OFFSET - PAGE_OFFSET);
-#ifdef __R4000__
+ page = (unsigned long) empty_bad_page_table;
+ page = page_to_ptp(page);
+ /*
+ * As long as we only save the low 32 bit of the 64 bit wide
+ * R4000 registers on interrupt we cannot use 64 bit memory accesses
+ * to the main memory.
+ */
+#if (_MIPS_ISA == _MIPS_ISA_MIPS3) || (_MIPS_ISA == _MIPS_ISA_MIPS4)
/*
* Use 64bit code even for Linux/MIPS 32bit on R4000
*/
@@ -69,11 +81,12 @@ pte_t * __bad_pagetable(void)
".set\tat\n"
".set\treorder"
:"=r" (dummy1),
- "=r" (dummy2)
- :"r" (pte_val(BAD_PAGE)),
- "0" (page),
- "1" (PAGE_SIZE/8));
-#else
+ "=r" (dummy2),
+ "=r" (dummy3)
+ :"0" (page),
+ "1" (PAGE_SIZE/8),
+ "2" (pte_val(BAD_PAGE)));
+#else /* (_MIPS_ISA == _MIPS_ISA_MIPS1) || (_MIPS_ISA == _MIPS_ISA_MIPS2) */
__asm__ __volatile__(
".set\tnoreorder\n"
"1:\tsw\t%2,(%0)\n\t"
@@ -91,128 +104,37 @@ pte_t * __bad_pagetable(void)
return (pte_t *)page;
}
-static inline void
-__zeropage(unsigned long page)
-{
- unsigned long dummy1, dummy2;
-
-#ifdef __R4000__
- /*
- * Use 64bit code even for Linux/MIPS 32bit on R4000
- */
- __asm__ __volatile__(
- ".set\tnoreorder\n"
- ".set\tnoat\n\t"
- ".set\tmips3\n"
- "1:\tsd\t$0,(%0)\n\t"
- "subu\t%1,1\n\t"
- "bnez\t%1,1b\n\t"
- "addiu\t%0,8\n\t"
- ".set\tmips0\n\t"
- ".set\tat\n"
- ".set\treorder"
- :"=r" (dummy1),
- "=r" (dummy2)
- :"0" (page),
- "1" (PAGE_SIZE/8));
-#else
- __asm__ __volatile__(
- ".set\tnoreorder\n"
- "1:\tsw\t$0,(%0)\n\t"
- "subu\t%1,1\n\t"
- "bnez\t%1,1b\n\t"
- "addiu\t%0,4\n\t"
- ".set\treorder"
- :"=r" (dummy1),
- "=r" (dummy2)
- :"0" (page),
- "1" (PAGE_SIZE/4));
-#endif
-}
-
-static inline void
-zeropage(unsigned long page)
-{
- sys_cacheflush((void *)page, PAGE_SIZE, BCACHE);
- sync_mem();
- __zeropage(page + (PT_OFFSET - PAGE_OFFSET));
-}
-
pte_t __bad_page(void)
{
extern char empty_bad_page[PAGE_SIZE];
unsigned long page = (unsigned long)empty_bad_page;
- zeropage(page);
+ clear_page(page_to_ptp(page));
+ cacheflush(page, PAGE_SIZE, CF_DCACHE|CF_VIRTUAL);
return pte_mkdirty(mk_pte(page, PAGE_SHARED));
}
-unsigned long __zero_page(void)
-{
- unsigned long page = (unsigned long) empty_zero_page;
-
- zeropage(page);
- return page;
-}
-
-/*
- * This is horribly inefficient ...
- */
-void __copy_page(unsigned long from, unsigned long to)
-{
- /*
- * Now copy page from uncached KSEG1 to KSEG0. The copy destination
- * is in KSEG0 so that we keep stupid L2 caches happy.
- */
- if(from == (unsigned long) empty_zero_page)
- {
- /*
- * The page copied most is the COW empty_zero_page. Since we
- * know it's contents we can avoid the writeback reading of
- * the page. Speeds up the standard case alot.
- */
- __zeropage(to);
- }
- else
- {
- /*
- * Force writeback of old page to memory. We don't know the
- * virtual address, so we have to flush the entire cache ...
- */
- sys_cacheflush(0, ~0, DCACHE);
- sync_mem();
- memcpy((void *) to,
- (void *) (from + (PT_OFFSET - PAGE_OFFSET)), PAGE_SIZE);
- }
- /*
- * Now writeback the page again if colour has changed.
- * Actually this does a Hit_Writeback, but due to an artifact in
- * the R4xx0 implementation this should be slightly faster.
- * Then sweep chipset controlled secondary caches and the ICACHE.
- */
- if (page_colour(from) != page_colour(to))
- sys_cacheflush(0, ~0, DCACHE);
- sys_cacheflush(0, ~0, ICACHE);
-}
-
void show_mem(void)
{
- int i,free = 0,total = 0;
+ int i, free = 0, total = 0, reserved = 0;
int shared = 0;
printk("Mem-info:\n");
show_free_areas();
printk("Free swap: %6dkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
- i = (high_memory - PAGE_OFFSET) >> PAGE_SHIFT;
+ i = max_mapnr;
while (i-- > 0) {
total++;
- if (!mem_map[i])
+ if (PageReserved(mem_map+i))
+ reserved++;
+ else if (!mem_map[i].count)
free++;
else
- shared += mem_map[i]-1;
+ shared += mem_map[i].count-1;
}
printk("%d pages of RAM\n", total);
printk("%d free pages\n", free);
+ printk("%d reserved pages\n", reserved);
printk("%d pages shared\n", shared);
show_buffers();
#ifdef CONFIG_NET
@@ -224,6 +146,7 @@ extern unsigned long free_area_init(unsigned long, unsigned long);
unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
{
+ mips_cache_init();
pgd_init((unsigned long)swapper_pg_dir - (PT_OFFSET - PAGE_OFFSET));
return free_area_init(start_mem, end_mem);
}
@@ -235,41 +158,65 @@ void mem_init(unsigned long start_mem, unsigned long end_mem)
unsigned long tmp;
extern int _etext;
+#ifdef CONFIG_MIPS_JAZZ
+ if (mips_machgroup == MACH_GROUP_JAZZ)
+ start_mem = vdma_init(start_mem, end_mem);
+#endif
+
end_mem &= PAGE_MASK;
- high_memory = end_mem;
+ max_mapnr = MAP_NR(end_mem);
+ high_memory = (void *)end_mem;
+
+ /* clear the zero-page */
+ memset(empty_zero_page, 0, PAGE_SIZE);
/* mark usable pages in the mem_map[] */
start_mem = PAGE_ALIGN(start_mem);
- tmp = start_mem;
- while (tmp < high_memory) {
- mem_map[MAP_NR(tmp)] = 0;
- tmp += PAGE_SIZE;
- }
+ for(tmp = MAP_NR(start_mem);tmp < max_mapnr;tmp++)
+ clear_bit(PG_reserved, &mem_map[tmp].flags);
+
+ /*
+ * For rPC44 we've reserved some memory too much. Free the memory
+ * from PAGE_SIZE to PAGE_OFFSET + 0xa0000 again. We don't free the
+ * lowest page where the exception handlers will reside.
+ */
+ if (mips_machgroup == MACH_GROUP_ARC &&
+ mips_machtype == MACH_DESKSTATION_RPC44)
+ for(tmp = MAP_NR(PAGE_OFFSET + PAGE_SIZE);
+ tmp < MAP_NR(PAGE_OFFSET + 0xa000); tmp++)
+ clear_bit(PG_reserved, &mem_map[tmp].flags);
+
#ifdef CONFIG_DESKSTATION_TYNE
- deskstation_tyne_dma_init();
-#endif
-#ifdef CONFIG_SCSI
- scsi_mem_init(high_memory);
+ if (mips_machtype == MACH_DESKSTATION_TYNE)
+ deskstation_tyne_dma_init();
#endif
-#ifdef CONFIG_SOUND
- sound_mem_init();
-#endif
- for (tmp = PAGE_OFFSET ; tmp < high_memory ; tmp += PAGE_SIZE) {
- if (mem_map[MAP_NR(tmp)]) {
+ for (tmp = PAGE_OFFSET; tmp < end_mem; tmp += PAGE_SIZE) {
+ /*
+ * This is only for PC-style DMA. The onboard DMA
+ * of Jazz and Tyne machines is completly different and
+ * not handled via a flag in mem_map_t.
+ */
+ if (tmp >= MAX_DMA_ADDRESS)
+ clear_bit(PG_DMA, &mem_map[MAP_NR(tmp)].flags);
+ if (PageReserved(mem_map+MAP_NR(tmp))) {
if (tmp < (unsigned long) &_etext)
codepages++;
else if (tmp < start_mem)
datapages++;
continue;
}
- mem_map[MAP_NR(tmp)] = 1;
- free_page(tmp);
+ mem_map[MAP_NR(tmp)].count = 1;
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (!initrd_start || (tmp < initrd_start || tmp >=
+ initrd_end))
+#endif
+ free_page(tmp);
}
tmp = nr_free_pages << PAGE_SHIFT;
printk("Memory: %luk/%luk available (%dk kernel code, %dk data)\n",
tmp >> 10,
- (high_memory - PAGE_OFFSET) >> 10,
+ max_mapnr << (PAGE_SHIFT-10),
codepages << (PAGE_SHIFT-10),
datapages << (PAGE_SHIFT-10));
@@ -280,18 +227,18 @@ void si_meminfo(struct sysinfo *val)
{
int i;
- i = high_memory >> PAGE_SHIFT;
+ i = MAP_NR(high_memory);
val->totalram = 0;
val->sharedram = 0;
val->freeram = nr_free_pages << PAGE_SHIFT;
val->bufferram = buffermem;
while (i-- > 0) {
- if (mem_map[i] & MAP_PAGE_RESERVED)
+ if (PageReserved(mem_map+i))
continue;
val->totalram++;
- if (!mem_map[i])
+ if (!mem_map[i].count)
continue;
- val->sharedram += mem_map[i]-1;
+ val->sharedram += mem_map[i].count-1;
}
val->totalram <<= PAGE_SHIFT;
val->sharedram <<= PAGE_SHIFT;
diff --git a/arch/mips/mm/stack.c b/arch/mips/mm/stack.c
new file mode 100644
index 000000000..3a4dccd26
--- /dev/null
+++ b/arch/mips/mm/stack.c
@@ -0,0 +1,26 @@
+/*
+ * Kernel stack allocation/deallocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996 by Ralf Baechle
+ *
+ * (This is _bad_ if the free page pool is fragmented ...)
+ */
+#include <linux/sched.h>
+#include <linux/mm.h>
+
+extern unsigned long alloc_kernel_stack(void)
+{
+ unsigned long stack;
+ stack = __get_free_pages(GFP_KERNEL, 1, 0);
+
+ return stack;
+}
+
+extern void free_kernel_stack(unsigned long stack)
+{
+ free_pages(stack, 1);
+}