summaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-01-27 01:05:20 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-01-27 01:05:20 +0000
commit546db14ee74118296f425f3b91634fb767d67290 (patch)
tree22b613a3da8d4bf663eec5e155af01b87fdf9094 /arch/mips
parent1e25e41c4f5474e14452094492dbc169b800e4c8 (diff)
Merge with Linux 2.3.23. The new bootmem stuff has broken various
platforms. At this time I've only verified that IP22 support compiles and IP27 actually works.
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/arc/memory.c214
-rw-r--r--arch/mips/jazz/jazzdma.c50
-rw-r--r--arch/mips/jazz/setup.c10
-rw-r--r--arch/mips/kernel/setup.c26
-rw-r--r--arch/mips/ld.script.big20
-rw-r--r--arch/mips/ld.script.little20
-rw-r--r--arch/mips/lib/memcpy.S4
-rw-r--r--arch/mips/mm/andes.c84
-rw-r--r--arch/mips/mm/init.c138
-rw-r--r--arch/mips/mm/loadmmu.c8
-rw-r--r--arch/mips/mm/r2300.c14
-rw-r--r--arch/mips/mm/r4xx0.c143
-rw-r--r--arch/mips/mm/r6000.c102
-rw-r--r--arch/mips/mm/tfp.c4
-rw-r--r--arch/mips/mm/umap.c14
-rw-r--r--arch/mips/sgi/kernel/setup.c11
-rw-r--r--arch/mips/sni/setup.c7
17 files changed, 491 insertions, 378 deletions
diff --git a/arch/mips/arc/memory.c b/arch/mips/arc/memory.c
index 851a16e9d..a3b02dc67 100644
--- a/arch/mips/arc/memory.c
+++ b/arch/mips/arc/memory.c
@@ -4,13 +4,14 @@
*
* Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
*
- * $Id: memory.c,v 1.7 1999/12/04 03:58:59 ralf Exp $
+ * $Id: memory.c,v 1.8 2000/01/17 23:32:46 ralf Exp $
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/bootmem.h>
#include <linux/swap.h>
#include <linux/config.h>
@@ -21,6 +22,8 @@
#undef DEBUG
+extern char _end;
+
struct linux_mdesc * __init prom_getmdesc(struct linux_mdesc *curr)
{
return romvec->get_mdesc(curr);
@@ -53,69 +56,87 @@ static char *arc_mtypes[8] = {
static struct prom_pmemblock prom_pblocks[PROM_MAX_PMEMBLOCKS];
-struct prom_pmemblock * __init prom_getpblock_array(void)
-{
- return &prom_pblocks[0];
-}
-
#define MEMTYPE_DONTUSE 0
#define MEMTYPE_PROM 1
#define MEMTYPE_FREE 2
-static int __init prom_memtype_classify (union linux_memtypes type)
+static inline int memtype_classify_arcs (union linux_memtypes type)
{
- if (prom_flags & PROM_FLAG_ARCS) {
switch (type.arcs) {
- case arcs_free:
- case arcs_fcontig:
- return MEMTYPE_FREE;
- case arcs_atmp:
- case arcs_aperm:
- return MEMTYPE_PROM;
- default:
- return MEMTYPE_DONTUSE;
+ case arcs_fcontig:
+ case arcs_free:
+ return MEMTYPE_FREE;
+ case arcs_atmp:
+ return MEMTYPE_PROM;
+ case arcs_eblock:
+ case arcs_rvpage:
+ case arcs_bmem:
+ case arcs_prog:
+ case arcs_aperm:
+ return MEMTYPE_DONTUSE;
+ default:
+ BUG();
}
- } else {
+ while(1); /* Nuke warning. */
+}
+
+static inline int memtype_classify_arc (union linux_memtypes type)
+{
switch (type.arc) {
- case arc_free:
- case arc_fcontig:
- return MEMTYPE_FREE;
- case arc_rvpage:
- case arc_atmp:
- case arc_aperm:
- return MEMTYPE_PROM;
- default:
- return MEMTYPE_DONTUSE;
+ case arc_free:
+ case arc_fcontig:
+ return MEMTYPE_FREE;
+ case arc_atmp:
+ return MEMTYPE_PROM;
+ case arc_eblock:
+ case arc_rvpage:
+ case arc_bmem:
+ case arc_prog:
+ case arc_aperm:
+ return MEMTYPE_DONTUSE;
+ default:
+ BUG();
}
- }
+ while(1); /* Nuke warning. */
+}
+
+static int __init prom_memtype_classify (union linux_memtypes type)
+{
+ if (prom_flags & PROM_FLAG_ARCS) /* SGI is ``different'' ... */
+ return memtype_classify_arc(type);
+
+ return memtype_classify_arc(type);
}
-static void __init prom_setup_memupper(void)
+static unsigned long __init find_max_low_pfn(void)
{
struct prom_pmemblock *p, *highest;
- for(p = prom_getpblock_array(), highest = 0; p->size != 0; p++) {
- if(p->base == 0xdeadbeef)
- prom_printf("WHEEE, bogus pmemblock\n");
- if(!highest || p->base > highest->base)
+ for (p = prom_pblocks, highest = 0; p->size != 0; p++) {
+ if (!highest || p->base > highest->base)
highest = p;
}
- mips_memory_upper = highest->base + highest->size;
#ifdef DEBUG
- prom_printf("prom_setup_memupper: mips_memory_upper = %08lx\n",
- mips_memory_upper);
+ prom_printf("find_max_low_pfn: mips_memory_upper = %08lx\n", highest);
#endif
+ return (highest->base + highest->size) >> PAGE_SHIFT;
}
+#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
+#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
+#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
+#define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK)
+
void __init prom_meminit(void)
{
+ unsigned long start_pfn;
struct linux_mdesc *p;
int totram;
int i = 0;
- p = prom_getmdesc(PROM_NULL_MDESC);
#ifdef DEBUG
prom_printf("ARCS MEMORY DESCRIPTOR dump:\n");
+ p = prom_getmdesc(PROM_NULL_MDESC);
while(p) {
prom_printf("[%d,%p]: base<%08lx> pages<%08lx> type<%s>\n",
i, p, p->base, p->pages, mtypes(p->type));
@@ -123,87 +144,72 @@ void __init prom_meminit(void)
i++;
}
#endif
- p = prom_getmdesc(PROM_NULL_MDESC);
+
totram = 0;
+ p = prom_getmdesc(PROM_NULL_MDESC);
i = 0;
- while(p) {
- prom_pblocks[i].type = prom_memtype_classify (p->type);
- prom_pblocks[i].base = ((p->base<<PAGE_SHIFT) + 0x80000000);
- prom_pblocks[i].size = p->pages << PAGE_SHIFT;
- switch (prom_pblocks[i].type) {
- case MEMTYPE_FREE:
- totram += prom_pblocks[i].size;
+ while (p) {
+ prom_pblocks[i].type = prom_memtype_classify(p->type);
+ prom_pblocks[i].base = p->base << PAGE_SHIFT;
+ prom_pblocks[i].size = p->pages << PAGE_SHIFT;
+
+ switch (prom_pblocks[i].type) {
+ case MEMTYPE_FREE:
+ totram += prom_pblocks[i].size;
#ifdef DEBUG
- prom_printf("free_chunk[%d]: base=%08lx size=%d\n",
- i, prom_pblocks[i].base,
- prom_pblocks[i].size);
+ prom_printf("free_chunk[%d]: base=%08lx size=%d\n",
+ i, prom_pblocks[i].base,
+ prom_pblocks[i].size);
#endif
- i++;
- break;
- case MEMTYPE_PROM:
+ i++;
+ break;
+ case MEMTYPE_PROM:
#ifdef DEBUG
- prom_printf("prom_chunk[%d]: base=%08lx size=%d\n",
- i, prom_pblocks[i].base,
- prom_pblocks[i].size);
+ prom_printf("prom_chunk[%d]: base=%08lx size=%d\n",
+ i, prom_pblocks[i].base,
+ prom_pblocks[i].size);
#endif
- i++;
- break;
- default:
- break;
- }
- p = prom_getmdesc(p);
+ i++;
+ break;
+ default:
+ break;
+ }
+ p = prom_getmdesc(p);
}
- prom_pblocks[i].base = 0xdeadbeef;
- prom_pblocks[i].size = 0; /* indicates last elem. of array */
- printk("PROMLIB: Total free ram %d bytes (%dK,%dMB)\n",
- totram, (totram/1024), (totram/1024/1024));
+ prom_pblocks[i].size = 0;
/* Setup upper physical memory bound. */
- prom_setup_memupper();
-}
+ max_low_pfn = find_max_low_pfn();
-/* Called from mem_init() to fixup the mem_map page settings. */
-void __init prom_fixup_mem_map(unsigned long start, unsigned long end)
-{
- struct prom_pmemblock *p;
- int i, nents;
-
- /* Determine number of pblockarray entries. */
- p = prom_getpblock_array();
- for(i = 0; p[i].size; i++)
- ;
- nents = i;
-restart:
- while(start < end) {
- for(i = 0; i < nents; i++) {
- if((p[i].type == MEMTYPE_FREE) &&
- (start >= (p[i].base)) &&
- (start < (p[i].base + p[i].size))) {
- start = p[i].base + p[i].size;
- start &= PAGE_MASK;
- goto restart;
- }
- }
- set_bit(PG_reserved, &mem_map[MAP_NR(start)].flags);
- start += PAGE_SIZE;
- }
+ start_pfn = PFN_UP((unsigned long)&_end - PAGE_OFFSET);
+ init_bootmem(start_pfn, max_low_pfn);
+
+ for (i = 0; prom_pblocks[i].size; i++)
+ if (prom_pblocks[i].type == MEMTYPE_FREE)
+ free_bootmem(prom_pblocks[i].base, prom_pblocks[i].size);
+
+ printk("PROMLIB: Total free ram %d bytes (%dK,%dMB)\n",
+ totram, (totram/1024), (totram/1024/1024));
}
void __init prom_free_prom_memory (void)
{
- struct prom_pmemblock *p;
- unsigned long addr;
- unsigned long num_pages = 0;
-
- for(p = prom_getpblock_array(); p->size != 0; p++) {
- if (p->type == MEMTYPE_PROM) {
- for (addr = p->base; addr < p->base + p->size; addr += PAGE_SIZE) {
- mem_map[MAP_NR(addr)].flags &= ~(1 << PG_reserved);
- atomic_set(&mem_map[MAP_NR(addr)].count, 1);
- free_page(addr);
- num_pages++;
- }
+ struct prom_pmemblock *p;
+ unsigned long freed = 0;
+ unsigned long addr;
+
+ for (p = prom_pblocks; p->size != 0; p++) {
+ if (p->type != MEMTYPE_PROM)
+ continue;
+
+ addr = PAGE_OFFSET + p->base;
+ while (addr < p->base + p->size) {
+ ClearPageReserved(mem_map + MAP_NR(addr));
+ set_page_count(mem_map + MAP_NR(addr), 1);
+ free_page(addr);
+ addr += PAGE_SIZE;
+ freed += PAGE_SIZE;
+ }
}
- }
- printk ("Freeing prom memory: %dk freed\n",num_pages << (PAGE_SHIFT - 10));
+ printk("Freeing prom memory: %ldk freed\n", freed >> 10);
}
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
index bb8007bf1..0990db6a0 100644
--- a/arch/mips/jazz/jazzdma.c
+++ b/arch/mips/jazz/jazzdma.c
@@ -10,8 +10,10 @@
* and return the more usual NULL pointer as logical address.
*/
#include <linux/kernel.h>
+#include <linux/init.h>
#include <linux/errno.h>
#include <linux/mm.h>
+#include <linux/bootmem.h>
#include <asm/mipsregs.h>
#include <asm/jazz.h>
#include <asm/io.h>
@@ -26,7 +28,6 @@
#define CONF_DEBUG_VDMA 0
static unsigned long vdma_pagetable_start = 0;
-static unsigned long vdma_pagetable_end = 0;
/*
* Debug stuff
@@ -58,30 +59,31 @@ static inline void vdma_pgtbl_init(void)
/*
* Initialize the Jazz R4030 dma controller
*/
-unsigned long vdma_init(unsigned long memory_start, unsigned long memory_end)
+void __init vdma_init(void)
{
- /*
- * Allocate 32k of memory for DMA page tables.
- * This needs to be page aligned and should be
- * uncached to avoid cache flushing after every
- * update.
- */
- vdma_pagetable_start = KSEG1ADDR((memory_start + 4095) & ~4095);
- vdma_pagetable_end = vdma_pagetable_start + VDMA_PGTBL_SIZE;
- flush_cache_all();
-
- /*
- * Clear the R4030 translation table
- */
- vdma_pgtbl_init();
-
- r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE,PHYSADDR(vdma_pagetable_start));
- r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM,VDMA_PGTBL_SIZE);
- r4030_write_reg32(JAZZ_R4030_TRSTBL_INV,0);
-
- printk("VDMA: R4030 DMA pagetables initialized.\n");
-
- return KSEG0ADDR(vdma_pagetable_end);
+ /*
+ * Allocate 32k of memory for DMA page tables. This needs to be page
+ * aligned and should be uncached to avoid cache flushing after every
+ * update.
+ */
+ vdma_pagetable_start = alloc_bootmem_low_pages(VDMA_PGTBL_SIZE);
+ if (!vdma_pagetable_start)
+ BUG();
+ dma_cache_wback_inv(vdma_pagetable_start, VDMA_PGTBL_SIZE);
+ vdma_pagetable_start = KSEG1ADDR(vdma_pagetable_start);
+
+ /*
+ * Clear the R4030 translation table
+ */
+ vdma_pgtbl_init();
+
+ r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE,PHYSADDR(vdma_pagetable_start));
+ r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE);
+ r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
+
+ printk("VDMA: R4030 DMA pagetables initialized.\n");
+
+ return KSEG0ADDR(vdma_pagetable_end);
}
/*
diff --git a/arch/mips/jazz/setup.c b/arch/mips/jazz/setup.c
index 4b405f405..a16ce7b69 100644
--- a/arch/mips/jazz/setup.c
+++ b/arch/mips/jazz/setup.c
@@ -1,4 +1,4 @@
-/* $Id: setup.c,v 1.23 1999/10/07 07:31:14 raiko Exp $
+/* $Id: setup.c,v 1.24 1999/10/09 00:00:58 ralf Exp $
*
* Setup pointers to hardware-dependent routines.
*
@@ -23,6 +23,7 @@
#include <asm/keyboard.h>
#include <asm/irq.h>
#include <asm/jazz.h>
+#include <asm/jazzdma.h>
#include <asm/ptrace.h>
#include <asm/reboot.h>
#include <asm/io.h>
@@ -79,6 +80,11 @@ static void __init jazz_irq_setup(void)
i8259_setup_irq(2, &irq2);
}
+int __init page_is_ram(unsigned long pagenr)
+{
+ return 1;
+}
+
void __init jazz_setup(void)
{
add_wired_entry (0x02000017, 0x03c00017, 0xe0000000, PM_64K);
@@ -125,4 +131,6 @@ void __init jazz_setup(void)
rtc_ops = &jazz_rtc_ops;
kbd_ops = &jazz_kbd_ops;
+
+ vdma_init();
}
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index c1c556e1c..43cf5ad74 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -1,4 +1,4 @@
-/* $Id: setup.c,v 1.20 1999/10/09 00:00:58 ralf Exp $
+/* $Id: setup.c,v 1.21 2000/01/26 00:07:44 ralf Exp $
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -97,13 +97,6 @@ unsigned long mips_machgroup = MACH_GROUP_UNKNOWN;
unsigned char aux_device_present;
extern int _end;
-extern char empty_zero_page[PAGE_SIZE];
-
-/*
- * This is set up by the setup-routine at boot-time
- */
-#define PARAM empty_zero_page
-
static char command_line[CL_SIZE] = { 0, };
char saved_command_line[CL_SIZE];
extern char arcs_cmdline[CL_SIZE];
@@ -131,10 +124,8 @@ static void __init default_irq_setup(void)
panic("Unknown machtype in init_IRQ");
}
-void __init setup_arch(char **cmdline_p,
- unsigned long * memory_start_p, unsigned long * memory_end_p)
+void __init setup_arch(char **cmdline_p)
{
- unsigned long memory_end;
#ifdef CONFIG_BLK_DEV_INITRD
unsigned long tmp;
unsigned long *initrd_header;
@@ -204,25 +195,14 @@ void __init setup_arch(char **cmdline_p,
panic("Unsupported architecture");
}
- memory_end = mips_memory_upper;
- /*
- * Due to prefetching and similar mechanism the CPU sometimes
- * generates addresses beyond the end of memory. We leave the size
- * of one cache line at the end of memory unused to make shure we
- * don't catch this type of bus errors.
- */
- memory_end -= 128;
- memory_end &= PAGE_MASK;
-
strncpy (command_line, arcs_cmdline, CL_SIZE);
memcpy(saved_command_line, command_line, CL_SIZE);
saved_command_line[CL_SIZE-1] = '\0';
*cmdline_p = command_line;
- *memory_start_p = (unsigned long) &_end;
- *memory_end_p = memory_end;
#ifdef CONFIG_BLK_DEV_INITRD
+#error "Fixme, I'm broken."
tmp = (((unsigned long)&_end + PAGE_SIZE-1) & PAGE_MASK) - 8;
if (tmp < (unsigned long)&_end)
tmp += PAGE_SIZE;
diff --git a/arch/mips/ld.script.big b/arch/mips/ld.script.big
index 57cc8dba6..68ac48528 100644
--- a/arch/mips/ld.script.big
+++ b/arch/mips/ld.script.big
@@ -5,26 +5,6 @@ SECTIONS
{
/* Read-only sections, merged into text segment: */
. = 0x80000000;
- .rel.text : { *(.rel.text) }
- .rela.text : { *(.rela.text) }
- .rel.data : { *(.rel.data) }
- .rela.data : { *(.rela.data) }
- .rel.rodata : { *(.rel.rodata) }
- .rela.rodata : { *(.rela.rodata) }
- .rel.got : { *(.rel.got) }
- .rela.got : { *(.rela.got) }
- .rel.ctors : { *(.rel.ctors) }
- .rela.ctors : { *(.rela.ctors) }
- .rel.dtors : { *(.rel.dtors) }
- .rela.dtors : { *(.rela.dtors) }
- .rel.init : { *(.rel.init) }
- .rela.init : { *(.rela.init) }
- .rel.fini : { *(.rel.fini) }
- .rela.fini : { *(.rela.fini) }
- .rel.bss : { *(.rel.bss) }
- .rela.bss : { *(.rela.bss) }
- .rel.plt : { *(.rel.plt) }
- .rela.plt : { *(.rela.plt) }
.init : { *(.init) } =0
.text :
{
diff --git a/arch/mips/ld.script.little b/arch/mips/ld.script.little
index 1a396ce08..5ee17215d 100644
--- a/arch/mips/ld.script.little
+++ b/arch/mips/ld.script.little
@@ -5,26 +5,6 @@ SECTIONS
{
/* Read-only sections, merged into text segment: */
. = 0x80000000;
- .rel.text : { *(.rel.text) }
- .rela.text : { *(.rela.text) }
- .rel.data : { *(.rel.data) }
- .rela.data : { *(.rela.data) }
- .rel.rodata : { *(.rel.rodata) }
- .rela.rodata : { *(.rela.rodata) }
- .rel.got : { *(.rel.got) }
- .rela.got : { *(.rela.got) }
- .rel.ctors : { *(.rel.ctors) }
- .rela.ctors : { *(.rela.ctors) }
- .rel.dtors : { *(.rel.dtors) }
- .rela.dtors : { *(.rela.dtors) }
- .rel.init : { *(.rel.init) }
- .rela.init : { *(.rela.init) }
- .rel.fini : { *(.rel.fini) }
- .rela.fini : { *(.rela.fini) }
- .rel.bss : { *(.rel.bss) }
- .rela.bss : { *(.rela.bss) }
- .rel.plt : { *(.rel.plt) }
- .rela.plt : { *(.rela.plt) }
.init : { *(.init) } =0
.text :
{
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index 907a471a6..4850b09ce 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * $Id: memcpy.S,v 1.2 1998/05/04 09:12:53 ralf Exp $
+ * $Id: memcpy.S,v 1.3 1998/07/10 01:14:49 ralf Exp $
*
* Unified implementation of memcpy, memmove and the __copy_user backend.
* For __rmemcpy and memmove an exception is always a kernel bug, therefore
@@ -688,8 +688,8 @@ ru_end_bytes:
jr ra
move a2, zero
- END(__rmemcpy)
#endif /* Horror fix */
+ END(__rmemcpy)
l_fixup: # clear the rest of the buffer
lw t0, THREAD_BUADDR($28)
diff --git a/arch/mips/mm/andes.c b/arch/mips/mm/andes.c
index 05a722cf3..3230106b8 100644
--- a/arch/mips/mm/andes.c
+++ b/arch/mips/mm/andes.c
@@ -1,4 +1,4 @@
-/* $Id: andes.c,v 1.7 1999/08/09 19:43:16 harald Exp $
+/* $Id: andes.c,v 1.8 1999/10/09 00:00:58 ralf Exp $
*
* andes.c: MMU and cache operations for the R10000 (ANDES).
*
@@ -14,6 +14,83 @@
#include <asm/sgialib.h>
#include <asm/mmu_context.h>
+/* page functions */
+void andes_clear_page(void * page)
+{
+ __asm__ __volatile__(
+ ".set\tnoreorder\n\t"
+ ".set\tnoat\n\t"
+ "addiu\t$1,%0,%2\n"
+ "1:\tsw\t$0,(%0)\n\t"
+ "sw\t$0,4(%0)\n\t"
+ "sw\t$0,8(%0)\n\t"
+ "sw\t$0,12(%0)\n\t"
+ "addiu\t%0,32\n\t"
+ "sw\t$0,-16(%0)\n\t"
+ "sw\t$0,-12(%0)\n\t"
+ "sw\t$0,-8(%0)\n\t"
+ "bne\t$1,%0,1b\n\t"
+ "sw\t$0,-4(%0)\n\t"
+ ".set\tat\n\t"
+ ".set\treorder"
+ :"=r" (page)
+ :"0" (page),
+ "I" (PAGE_SIZE)
+ :"$1","memory");
+}
+
+static void andes_copy_page(void * to, void * from)
+{
+ unsigned long dummy1, dummy2;
+ unsigned long reg1, reg2, reg3, reg4;
+
+ __asm__ __volatile__(
+ ".set\tnoreorder\n\t"
+ ".set\tnoat\n\t"
+ "addiu\t$1,%0,%8\n"
+ "1:\tlw\t%2,(%1)\n\t"
+ "lw\t%3,4(%1)\n\t"
+ "lw\t%4,8(%1)\n\t"
+ "lw\t%5,12(%1)\n\t"
+ "sw\t%2,(%0)\n\t"
+ "sw\t%3,4(%0)\n\t"
+ "sw\t%4,8(%0)\n\t"
+ "sw\t%5,12(%0)\n\t"
+ "lw\t%2,16(%1)\n\t"
+ "lw\t%3,20(%1)\n\t"
+ "lw\t%4,24(%1)\n\t"
+ "lw\t%5,28(%1)\n\t"
+ "sw\t%2,16(%0)\n\t"
+ "sw\t%3,20(%0)\n\t"
+ "sw\t%4,24(%0)\n\t"
+ "sw\t%5,28(%0)\n\t"
+ "addiu\t%0,64\n\t"
+ "addiu\t%1,64\n\t"
+ "lw\t%2,-32(%1)\n\t"
+ "lw\t%3,-28(%1)\n\t"
+ "lw\t%4,-24(%1)\n\t"
+ "lw\t%5,-20(%1)\n\t"
+ "sw\t%2,-32(%0)\n\t"
+ "sw\t%3,-28(%0)\n\t"
+ "sw\t%4,-24(%0)\n\t"
+ "sw\t%5,-20(%0)\n\t"
+ "lw\t%2,-16(%1)\n\t"
+ "lw\t%3,-12(%1)\n\t"
+ "lw\t%4,-8(%1)\n\t"
+ "lw\t%5,-4(%1)\n\t"
+ "sw\t%2,-16(%0)\n\t"
+ "sw\t%3,-12(%0)\n\t"
+ "sw\t%4,-8(%0)\n\t"
+ "bne\t$1,%0,1b\n\t"
+ "sw\t%5,-4(%0)\n\t"
+ ".set\tat\n\t"
+ ".set\treorder"
+ :"=r" (dummy1), "=r" (dummy2),
+ "=&r" (reg1), "=&r" (reg2), "=&r" (reg3), "=&r" (reg4)
+ :"0" (to), "1" (from),
+ "I" (PAGE_SIZE));
+}
+
/* Cache operations. XXX Write these dave... */
static inline void andes_flush_cache_all(void)
{
@@ -38,7 +115,7 @@ static void andes_flush_cache_page(struct vm_area_struct *vma,
/* XXX */
}
-static void andes_flush_page_to_ram(unsigned long page)
+static void andes_flush_page_to_ram(struct page * page)
{
/* XXX */
}
@@ -86,6 +163,9 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
void __init ld_mmu_andes(void)
{
+ clear_page = andes_clear_page;
+ copy_page = andes_copy_page;
+
flush_cache_all = andes_flush_cache_all;
flush_cache_mm = andes_flush_cache_mm;
flush_cache_range = andes_flush_cache_range;
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index bf77a7998..cf74a6dd5 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -1,10 +1,11 @@
-/* $Id: init.c,v 1.19 1999/10/09 00:00:58 ralf Exp $
+/* $Id: init.c,v 1.20 2000/01/26 00:07:44 ralf Exp $
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1994 - 1998 by Ralf Baechle
+ * Copyright (C) 1994 - 2000 by Ralf Baechle
+ * Copyright (C) 2000 Silicon Graphics, Inc.
*/
#include <linux/config.h>
#include <linux/init.h>
@@ -18,6 +19,7 @@
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
+#include <linux/bootmem.h>
#include <linux/swap.h>
#include <linux/swapctl.h>
#ifdef CONFIG_BLK_DEV_INITRD
@@ -35,7 +37,7 @@
#endif
#include <asm/mmu_context.h>
-static unsigned long totalram = 0;
+static unsigned long totalram_pages = 0;
extern void show_net_buffers(void);
extern void prom_fixup_mem_map(unsigned long start, unsigned long end);
@@ -45,13 +47,13 @@ extern void prom_free_prom_memory(void);
void __bad_pte_kernel(pmd_t *pmd)
{
printk("Bad pmd in pte_alloc_kernel: %08lx\n", pmd_val(*pmd));
- pmd_val(*pmd) = BAD_PAGETABLE;
+ pmd_set(pmd, BAD_PAGETABLE);
}
void __bad_pte(pmd_t *pmd)
{
printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
- pmd_val(*pmd) = BAD_PAGETABLE;
+ pmd_set(pmd, BAD_PAGETABLE);
}
pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long offset)
@@ -61,11 +63,11 @@ pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long offset)
page = (pte_t *) __get_free_page(GFP_USER);
if (pmd_none(*pmd)) {
if (page) {
- clear_page((unsigned long)page);
+ clear_page(page);
pmd_val(*pmd) = (unsigned long)page;
return page + offset;
}
- pmd_val(*pmd) = BAD_PAGETABLE;
+ pmd_set(pmd, BAD_PAGETABLE);
return NULL;
}
free_page((unsigned long)page);
@@ -83,11 +85,11 @@ pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset)
page = (pte_t *) __get_free_page(GFP_KERNEL);
if (pmd_none(*pmd)) {
if (page) {
- clear_page((unsigned long)page);
+ clear_page(page);
pmd_val(*pmd) = (unsigned long)page;
return page + offset;
}
- pmd_val(*pmd) = BAD_PAGETABLE;
+ pmd_set(pmd, BAD_PAGETABLE);
return NULL;
}
free_page((unsigned long)page);
@@ -135,7 +137,7 @@ static inline unsigned long setup_zero_pages(void)
panic("Oh boy, that early out of memory?");
pg = MAP_NR(empty_zero_page);
- while(pg < MAP_NR(empty_zero_page) + (1 << order)) {
+ while (pg < MAP_NR(empty_zero_page) + (1 << order)) {
set_bit(PG_reserved, &mem_map[pg].flags);
set_page_count(mem_map + pg, 0);
pg++;
@@ -145,7 +147,7 @@ static inline unsigned long setup_zero_pages(void)
zero_page_mask = (size - 1) & PAGE_MASK;
memset((void *)empty_zero_page, 0, size);
- return size;
+ return 1UL << order;
}
int do_check_pgt_cache(int low, int high)
@@ -201,10 +203,10 @@ pte_t * __bad_pagetable(void)
pte_t __bad_page(void)
{
extern char empty_bad_page[PAGE_SIZE];
- unsigned long page = (unsigned long)empty_bad_page;
+ unsigned long page = (unsigned long) empty_bad_page;
- clear_page(page);
- return pte_mkdirty(mk_pte(page, PAGE_SHARED));
+ clear_page((void *)page);
+ return pte_mkdirty(mk_pte_phys(__pa(page), PAGE_SHARED));
}
void show_mem(void)
@@ -233,85 +235,61 @@ void show_mem(void)
printk("%d pages swap cached\n",cached);
printk("%ld pages in page table cache\n",pgtable_cache_size);
printk("%d free pages\n", free);
+ show_buffers();
#ifdef CONFIG_NET
show_net_buffers();
#endif
}
-extern unsigned long free_area_init(unsigned long, unsigned long);
+/* References to section boundaries */
-unsigned long __init paging_init(unsigned long start_mem, unsigned long end_mem)
+extern char _ftext, _etext, _fdata, _edata;
+extern char __init_begin, __init_end;
+
+void __init paging_init(void)
{
/* Initialize the entire pgd. */
pgd_init((unsigned long)swapper_pg_dir);
pgd_init((unsigned long)swapper_pg_dir + PAGE_SIZE / 2);
- return free_area_init(start_mem, end_mem);
+ return free_area_init(max_low_pfn);
}
-void __init mem_init(unsigned long start_mem, unsigned long end_mem)
+extern int page_is_ram(unsigned long pagenr);
+
+void __init mem_init(void)
{
- int codepages = 0;
- int datapages = 0;
+ unsigned long codesize, reservedpages, datasize, initsize;
unsigned long tmp;
- extern int _etext, _ftext;
-
-#ifdef CONFIG_MIPS_JAZZ
- if (mips_machgroup == MACH_GROUP_JAZZ)
- start_mem = vdma_init(start_mem, end_mem);
-#endif
-
- end_mem &= PAGE_MASK;
- max_mapnr = MAP_NR(end_mem);
- high_memory = (void *)end_mem;
- num_physpages = 0;
-
- /* mark usable pages in the mem_map[] */
- start_mem = PAGE_ALIGN(start_mem);
- for(tmp = MAP_NR(start_mem);tmp < max_mapnr;tmp++)
- clear_bit(PG_reserved, &mem_map[tmp].flags);
+ max_mapnr = num_physpages = max_low_pfn;
+ high_memory = (void *) __va(max_mapnr << PAGE_SHIFT);
- prom_fixup_mem_map(start_mem, (unsigned long)high_memory);
+ totalram_pages += free_all_bootmem();
+ totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */
- for (tmp = PAGE_OFFSET; tmp < end_mem; tmp += PAGE_SIZE) {
+ reservedpages = 0;
+ for (tmp = 0; tmp < max_low_pfn; tmp++)
/*
- * This is only for PC-style DMA. The onboard DMA
- * of Jazz and Tyne machines is completely different and
- * not handled via a flag in mem_map_t.
+ * Only count resrved RAM pages
*/
- if (tmp >= MAX_DMA_ADDRESS)
- clear_bit(PG_DMA, &mem_map[MAP_NR(tmp)].flags);
- if (PageReserved(mem_map+MAP_NR(tmp))) {
- if ((tmp < (unsigned long) &_etext) &&
- (tmp >= (unsigned long) &_ftext))
- codepages++;
- else if ((tmp < start_mem) &&
- (tmp > (unsigned long) &_etext))
- datapages++;
- continue;
- }
- num_physpages++;
- set_page_count(mem_map + MAP_NR(tmp), 1);
- totalram += PAGE_SIZE;
-#ifdef CONFIG_BLK_DEV_INITRD
- if (!initrd_start || (tmp < initrd_start || tmp >=
- initrd_end))
-#endif
- free_page(tmp);
- }
- tmp = nr_free_pages << PAGE_SHIFT;
-
- /* Setup zeroed pages. */
- tmp -= setup_zero_pages();
-
- printk("Memory: %luk/%luk available (%dk kernel code, %dk data)\n",
- tmp >> 10,
- max_mapnr << (PAGE_SHIFT-10),
- codepages << (PAGE_SHIFT-10),
- datapages << (PAGE_SHIFT-10));
+ if (page_is_ram(tmp) && PageReserved(mem_map+tmp))
+ reservedpages++;
+
+ codesize = (unsigned long) &_etext - (unsigned long) &_ftext;
+ datasize = (unsigned long) &_edata - (unsigned long) &_fdata;
+ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
+ printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, "
+ "%ldk data, %ldk init)\n",
+ (unsigned long) nr_free_pages << (PAGE_SHIFT-10),
+ max_mapnr << (PAGE_SHIFT-10),
+ codesize >> 10,
+ reservedpages << (PAGE_SHIFT-10),
+ datasize >> 10,
+ initsize >> 10);
}
extern char __init_begin, __init_end;
+extern void prom_free_prom_memory(void);
void free_initmem(void)
{
@@ -319,12 +297,13 @@ void free_initmem(void)
prom_free_prom_memory ();
- addr = (unsigned long)(&__init_begin);
- for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
- mem_map[MAP_NR(addr)].flags &= ~(1 << PG_reserved);
+ addr = (unsigned long) &__init_begin;
+ while (addr < (unsigned long) &__init_end) {
+ ClearPageReserved(mem_map + MAP_NR(addr));
set_page_count(mem_map + MAP_NR(addr), 1);
free_page(addr);
- totalram += PAGE_SIZE;
+ totalram_pages++;
+ addr += PAGE_SIZE;
}
printk("Freeing unused kernel memory: %dk freed\n",
(&__init_end - &__init_begin) >> 10);
@@ -332,10 +311,13 @@ void free_initmem(void)
void si_meminfo(struct sysinfo *val)
{
- val->totalram = totalram;
+ val->totalram = totalram_pages;
val->sharedram = 0;
- val->freeram = nr_free_pages << PAGE_SHIFT;
- val->bufferram = atomic_read(&buffermem);
+ val->freeram = nr_free_pages;
+ val->bufferram = atomic_read(&buffermem_pages);
+ val->totalhigh = 0;
+ val->freehigh = 0;
+ val->mem_unit = PAGE_SIZE;
return;
}
diff --git a/arch/mips/mm/loadmmu.c b/arch/mips/mm/loadmmu.c
index ef7720527..cf4816889 100644
--- a/arch/mips/mm/loadmmu.c
+++ b/arch/mips/mm/loadmmu.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
*
- * $Id: loadmmu.c,v 1.12 1999/09/18 20:48:03 harald Exp $
+ * $Id: loadmmu.c,v 1.13 1999/10/09 00:00:58 ralf Exp $
*/
#include <linux/init.h>
#include <linux/kernel.h>
@@ -17,8 +17,8 @@
#include <asm/sgialib.h>
/* memory functions */
-void (*clear_page)(unsigned long page);
-void (*copy_page)(unsigned long to, unsigned long from);
+void (*clear_page)(void * page);
+void (*copy_page)(void * to, void * from);
/* Cache operations. */
void (*flush_cache_all)(void);
@@ -27,7 +27,7 @@ void (*flush_cache_range)(struct mm_struct *mm, unsigned long start,
unsigned long end);
void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page);
void (*flush_cache_sigtramp)(unsigned long addr);
-void (*flush_page_to_ram)(unsigned long page);
+void (*flush_page_to_ram)(struct page * page);
/* DMA cache operations. */
void (*dma_cache_wback_inv)(unsigned long start, unsigned long size);
diff --git a/arch/mips/mm/r2300.c b/arch/mips/mm/r2300.c
index 03e1a16d1..b0e72cd79 100644
--- a/arch/mips/mm/r2300.c
+++ b/arch/mips/mm/r2300.c
@@ -7,7 +7,7 @@
* Copyright (C) 1998 Harald Koerfgen
* Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
*
- * $Id: r2300.c,v 1.11 1999/10/09 00:00:58 ralf Exp $
+ * $Id: r2300.c,v 1.12 1999/10/12 17:33:49 harald Exp $
*/
#include <linux/init.h>
#include <linux/kernel.h>
@@ -45,7 +45,7 @@ static struct cache_space {
#define NTLB_ENTRIES 64 /* Fixed on all R23000 variants... */
/* page functions */
-void r2300_clear_page(unsigned long page)
+void r2300_clear_page(void * page)
{
__asm__ __volatile__(
".set\tnoreorder\n\t"
@@ -69,7 +69,7 @@ void r2300_clear_page(unsigned long page)
:"$1","memory");
}
-static void r2300_copy_page(unsigned long to, unsigned long from)
+static void r2300_copy_page(void * to, void * from)
{
unsigned long dummy1, dummy2;
unsigned long reg1, reg2, reg3, reg4;
@@ -200,8 +200,8 @@ static inline unsigned long get_phys_page (unsigned long page,
pte = *page_table;
if (!pte_present(pte))
return 0;
- return pte_page(pte);
- }
+ return pte_val(pte) & PAGE_MASK;
+ }
}
}
@@ -348,12 +348,12 @@ static void r2300_flush_cache_page(struct vm_area_struct *vma,
}
}
-static void r2300_flush_page_to_ram(unsigned long page)
+static void r2300_flush_page_to_ram(struct page * page)
{
/*
* We need to flush both i- & d- caches :-(
*/
- unsigned long phys_page = get_phys_page(page, NULL);
+ unsigned long phys_page = get_phys_page(page_address(page), NULL);
#ifdef DEBUG_CACHE
printk("cram[%08lx]", page);
#endif
diff --git a/arch/mips/mm/r4xx0.c b/arch/mips/mm/r4xx0.c
index 6005bca95..b5c76c801 100644
--- a/arch/mips/mm/r4xx0.c
+++ b/arch/mips/mm/r4xx0.c
@@ -1,4 +1,4 @@
-/* $Id: r4xx0.c,v 1.25 1999/10/09 00:00:58 ralf Exp $
+/* $Id: r4xx0.c,v 1.26 1999/10/21 00:23:04 ralf Exp $
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -78,7 +78,7 @@ struct bcache_ops *bcops = &no_sc_ops;
* versions of R4000 and R4400.
*/
-static void r4k_clear_page_d16(unsigned long page)
+static void r4k_clear_page_d16(void * page)
{
__asm__ __volatile__(
".set\tnoreorder\n\t"
@@ -109,7 +109,7 @@ static void r4k_clear_page_d16(unsigned long page)
:"$1","memory");
}
-static void r4k_clear_page_d32(unsigned long page)
+static void r4k_clear_page_d32(void * page)
{
__asm__ __volatile__(
".set\tnoreorder\n\t"
@@ -166,7 +166,7 @@ static void r4k_clear_page_d32(unsigned long page)
* nop
* cache Hit_Writeback_Invalidate_D
*/
-static void r4k_clear_page_r4600_v1(unsigned long page)
+static void r4k_clear_page_r4600_v1(void * page)
{
__asm__ __volatile__(
".set\tnoreorder\n\t"
@@ -205,7 +205,7 @@ static void r4k_clear_page_r4600_v1(unsigned long page)
/*
* And this one is for the R4600 V2.0
*/
-static void r4k_clear_page_r4600_v2(unsigned long page)
+static void r4k_clear_page_r4600_v2(void * page)
{
unsigned int flags;
@@ -248,7 +248,7 @@ static void r4k_clear_page_r4600_v2(unsigned long page)
* this the kernel crashed shortly after mounting the root filesystem. CPU
* bug? Weirdo cache instruction semantics?
*/
-static void r4k_clear_page_s16(unsigned long page)
+static void r4k_clear_page_s16(void * page)
{
__asm__ __volatile__(
".set\tnoreorder\n\t"
@@ -279,7 +279,7 @@ static void r4k_clear_page_s16(unsigned long page)
:"$1","memory");
}
-static void r4k_clear_page_s32(unsigned long page)
+static void r4k_clear_page_s32(void * page)
{
__asm__ __volatile__(
".set\tnoreorder\n\t"
@@ -308,7 +308,7 @@ static void r4k_clear_page_s32(unsigned long page)
:"$1","memory");
}
-static void r4k_clear_page_s64(unsigned long page)
+static void r4k_clear_page_s64(void * page)
{
__asm__ __volatile__(
".set\tnoreorder\n\t"
@@ -336,7 +336,7 @@ static void r4k_clear_page_s64(unsigned long page)
:"$1","memory");
}
-static void r4k_clear_page_s128(unsigned long page)
+static void r4k_clear_page_s128(void * page)
{
__asm__ __volatile__(
".set\tnoreorder\n\t"
@@ -378,7 +378,7 @@ static void r4k_clear_page_s128(unsigned long page)
* virtual address where the copy will be accessed.
*/
-static void r4k_copy_page_d16(unsigned long to, unsigned long from)
+static void r4k_copy_page_d16(void * to, void * from)
{
unsigned long dummy1, dummy2;
unsigned long reg1, reg2, reg3, reg4;
@@ -437,7 +437,7 @@ static void r4k_copy_page_d16(unsigned long to, unsigned long from)
"i" (Create_Dirty_Excl_D));
}
-static void r4k_copy_page_d32(unsigned long to, unsigned long from)
+static void r4k_copy_page_d32(void * to, void * from)
{
unsigned long dummy1, dummy2;
unsigned long reg1, reg2, reg3, reg4;
@@ -497,7 +497,7 @@ static void r4k_copy_page_d32(unsigned long to, unsigned long from)
/*
* Again a special version for the R4600 V1.x
*/
-static void r4k_copy_page_r4600_v1(unsigned long to, unsigned long from)
+static void r4k_copy_page_r4600_v1(void * to, void * from)
{
unsigned long dummy1, dummy2;
unsigned long reg1, reg2, reg3, reg4;
@@ -562,7 +562,7 @@ static void r4k_copy_page_r4600_v1(unsigned long to, unsigned long from)
"i" (Create_Dirty_Excl_D));
}
-static void r4k_copy_page_r4600_v2(unsigned long to, unsigned long from)
+static void r4k_copy_page_r4600_v2(void * to, void * from)
{
unsigned long dummy1, dummy2;
unsigned long reg1, reg2, reg3, reg4;
@@ -633,7 +633,7 @@ static void r4k_copy_page_r4600_v2(unsigned long to, unsigned long from)
/*
* These are for R4000SC / R4400MC
*/
-static void r4k_copy_page_s16(unsigned long to, unsigned long from)
+static void r4k_copy_page_s16(void * to, void * from)
{
unsigned long dummy1, dummy2;
unsigned long reg1, reg2, reg3, reg4;
@@ -692,7 +692,7 @@ static void r4k_copy_page_s16(unsigned long to, unsigned long from)
"i" (Create_Dirty_Excl_SD));
}
-static void r4k_copy_page_s32(unsigned long to, unsigned long from)
+static void r4k_copy_page_s32(void * to, void * from)
{
unsigned long dummy1, dummy2;
unsigned long reg1, reg2, reg3, reg4;
@@ -749,7 +749,7 @@ static void r4k_copy_page_s32(unsigned long to, unsigned long from)
"i" (Create_Dirty_Excl_SD));
}
-static void r4k_copy_page_s64(unsigned long to, unsigned long from)
+static void r4k_copy_page_s64(void * to, void * from)
{
unsigned long dummy1, dummy2;
unsigned long reg1, reg2, reg3, reg4;
@@ -805,7 +805,7 @@ static void r4k_copy_page_s64(unsigned long to, unsigned long from)
"i" (Create_Dirty_Excl_SD));
}
-static void r4k_copy_page_s128(unsigned long to, unsigned long from)
+static void r4k_copy_page_s128(void * to, void * from)
{
unsigned long dummy1, dummy2;
unsigned long reg1, reg2, reg3, reg4;
@@ -1739,7 +1739,7 @@ static void r4k_flush_cache_page_s128d32i32(struct vm_area_struct *vma,
* If ownes no valid ASID yet, cannot possibly have gotten
* this page into the cache.
*/
- if(mm->context == 0)
+ if (mm->context == 0)
return;
#ifdef DEBUG_CACHE
@@ -1962,110 +1962,119 @@ out:
* flush.
* 3) In KSEG1, no flush necessary.
*/
-static void r4k_flush_page_to_ram_s16d16i16(unsigned long page)
+static void r4k_flush_page_to_ram_s16d16i16(struct page * page)
{
- page &= PAGE_MASK;
- if((page >= KSEG0 && page < KSEG1) || (page >= KSEG2)) {
+ unsigned long addr = page_address(page) & PAGE_MASK;
+
+ if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {
#ifdef DEBUG_CACHE
- printk("cram[%08lx]", page);
+ printk("cram[%08lx]", addr);
#endif
- blast_scache16_page(page);
+ blast_scache16_page(addr);
}
}
-static void r4k_flush_page_to_ram_s32d16i16(unsigned long page)
+static void r4k_flush_page_to_ram_s32d16i16(struct page * page)
{
- page &= PAGE_MASK;
- if((page >= KSEG0 && page < KSEG1) || (page >= KSEG2)) {
+ unsigned long addr = page_address(page) & PAGE_MASK;
+
+ if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {
#ifdef DEBUG_CACHE
- printk("cram[%08lx]", page);
+ printk("cram[%08lx]", addr);
#endif
- blast_scache32_page(page);
+ blast_scache32_page(addr);
}
}
-static void r4k_flush_page_to_ram_s64d16i16(unsigned long page)
+static void r4k_flush_page_to_ram_s64d16i16(struct page * page)
{
- page &= PAGE_MASK;
- if((page >= KSEG0 && page < KSEG1) || (page >= KSEG2)) {
+ unsigned long addr = page_address(page) & PAGE_MASK;
+
+ if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {
#ifdef DEBUG_CACHE
- printk("cram[%08lx]", page);
+ printk("cram[%08lx]", addr);
#endif
- blast_scache64_page(page);
+ blast_scache64_page(addr);
}
}
-static void r4k_flush_page_to_ram_s128d16i16(unsigned long page)
+static void r4k_flush_page_to_ram_s128d16i16(struct page * page)
{
- page &= PAGE_MASK;
- if((page >= KSEG0 && page < KSEG1) || (page >= KSEG2)) {
+ unsigned long addr = page_address(page) & PAGE_MASK;
+
+ if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {
#ifdef DEBUG_CACHE
- printk("cram[%08lx]", page);
+ printk("cram[%08lx]", addr);
#endif
- blast_scache128_page(page);
+ blast_scache128_page(addr);
}
}
-static void r4k_flush_page_to_ram_s32d32i32(unsigned long page)
+static void r4k_flush_page_to_ram_s32d32i32(struct page * page)
{
- page &= PAGE_MASK;
- if((page >= KSEG0 && page < KSEG1) || (page >= KSEG2)) {
+ unsigned long addr = page_address(page) & PAGE_MASK;
+
+ if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {
#ifdef DEBUG_CACHE
- printk("cram[%08lx]", page);
+ printk("cram[%08lx]", addr);
#endif
- blast_scache32_page(page);
+ blast_scache32_page(addr);
}
}
-static void r4k_flush_page_to_ram_s64d32i32(unsigned long page)
+static void r4k_flush_page_to_ram_s64d32i32(struct page * page)
{
- page &= PAGE_MASK;
- if((page >= KSEG0 && page < KSEG1) || (page >= KSEG2)) {
+ unsigned long addr = page_address(page) & PAGE_MASK;
+
+ if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {
#ifdef DEBUG_CACHE
- printk("cram[%08lx]", page);
+ printk("cram[%08lx]", addr);
#endif
- blast_scache64_page(page);
+ blast_scache64_page(addr);
}
}
-static void r4k_flush_page_to_ram_s128d32i32(unsigned long page)
+static void r4k_flush_page_to_ram_s128d32i32(struct page * page)
{
- page &= PAGE_MASK;
- if((page >= KSEG0 && page < KSEG1) || (page >= KSEG2)) {
+ unsigned long addr = page_address(page) & PAGE_MASK;
+
+ if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {
#ifdef DEBUG_CACHE
- printk("cram[%08lx]", page);
+ printk("cram[%08lx]", addr);
#endif
- blast_scache128_page(page);
+ blast_scache128_page(addr);
}
}
-static void r4k_flush_page_to_ram_d16i16(unsigned long page)
+static void r4k_flush_page_to_ram_d16i16(struct page * page)
{
- page &= PAGE_MASK;
- if((page >= KSEG0 && page < KSEG1) || (page >= KSEG2)) {
+ unsigned long addr = page_address(page) & PAGE_MASK;
+
+ if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {
unsigned long flags;
#ifdef DEBUG_CACHE
- printk("cram[%08lx]", page);
+ printk("cram[%08lx]", addr);
#endif
- save_and_cli(flags);
- blast_dcache16_page(page);
- restore_flags(flags);
+ __save_and_cli(flags);
+ blast_dcache16_page(addr);
+ __restore_flags(flags);
}
}
-static void r4k_flush_page_to_ram_d32i32(unsigned long page)
+static void r4k_flush_page_to_ram_d32i32(struct page * page)
{
- page &= PAGE_MASK;
- if((page >= KSEG0 && page < KSEG1) || (page >= KSEG2)) {
+ unsigned long addr = page_address(page) & PAGE_MASK;
+
+ if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {
unsigned long flags;
#ifdef DEBUG_CACHE
- printk("cram[%08lx]", page);
+ printk("cram[%08lx]", addr);
#endif
- save_and_cli(flags);
- blast_dcache32_page(page);
- restore_flags(flags);
+ __save_and_cli(flags);
+ blast_dcache32_page(addr);
+ __restore_flags(flags);
}
}
diff --git a/arch/mips/mm/r6000.c b/arch/mips/mm/r6000.c
index 9baf83b27..90728f89d 100644
--- a/arch/mips/mm/r6000.c
+++ b/arch/mips/mm/r6000.c
@@ -1,4 +1,4 @@
-/* $Id: r6000.c,v 1.7 1999/08/09 19:43:16 harald Exp $
+/* $Id: r6000.c,v 1.8 1999/10/09 00:00:58 ralf Exp $
*
* r6000.c: MMU and cache routines for the R6000 processors.
*
@@ -16,7 +16,84 @@
#include <asm/sgialib.h>
#include <asm/mmu_context.h>
-__asm__(".set mips3"); /* because we know... */
+__asm__(".set mips2"); /* because we know... */
+
+/* page functions */
+void r6000_clear_page(void * page)
+{
+ __asm__ __volatile__(
+ ".set\tnoreorder\n\t"
+ ".set\tnoat\n\t"
+ "addiu\t$1,%0,%2\n"
+ "1:\tsw\t$0,(%0)\n\t"
+ "sw\t$0,4(%0)\n\t"
+ "sw\t$0,8(%0)\n\t"
+ "sw\t$0,12(%0)\n\t"
+ "addiu\t%0,32\n\t"
+ "sw\t$0,-16(%0)\n\t"
+ "sw\t$0,-12(%0)\n\t"
+ "sw\t$0,-8(%0)\n\t"
+ "bne\t$1,%0,1b\n\t"
+ "sw\t$0,-4(%0)\n\t"
+ ".set\tat\n\t"
+ ".set\treorder"
+ :"=r" (page)
+ :"0" (page),
+ "I" (PAGE_SIZE)
+ :"$1","memory");
+}
+
+static void r6000_copy_page(void * to, void * from)
+{
+ unsigned long dummy1, dummy2;
+ unsigned long reg1, reg2, reg3, reg4;
+
+ __asm__ __volatile__(
+ ".set\tnoreorder\n\t"
+ ".set\tnoat\n\t"
+ "addiu\t$1,%0,%8\n"
+ "1:\tlw\t%2,(%1)\n\t"
+ "lw\t%3,4(%1)\n\t"
+ "lw\t%4,8(%1)\n\t"
+ "lw\t%5,12(%1)\n\t"
+ "sw\t%2,(%0)\n\t"
+ "sw\t%3,4(%0)\n\t"
+ "sw\t%4,8(%0)\n\t"
+ "sw\t%5,12(%0)\n\t"
+ "lw\t%2,16(%1)\n\t"
+ "lw\t%3,20(%1)\n\t"
+ "lw\t%4,24(%1)\n\t"
+ "lw\t%5,28(%1)\n\t"
+ "sw\t%2,16(%0)\n\t"
+ "sw\t%3,20(%0)\n\t"
+ "sw\t%4,24(%0)\n\t"
+ "sw\t%5,28(%0)\n\t"
+ "addiu\t%0,64\n\t"
+ "addiu\t%1,64\n\t"
+ "lw\t%2,-32(%1)\n\t"
+ "lw\t%3,-28(%1)\n\t"
+ "lw\t%4,-24(%1)\n\t"
+ "lw\t%5,-20(%1)\n\t"
+ "sw\t%2,-32(%0)\n\t"
+ "sw\t%3,-28(%0)\n\t"
+ "sw\t%4,-24(%0)\n\t"
+ "sw\t%5,-20(%0)\n\t"
+ "lw\t%2,-16(%1)\n\t"
+ "lw\t%3,-12(%1)\n\t"
+ "lw\t%4,-8(%1)\n\t"
+ "lw\t%5,-4(%1)\n\t"
+ "sw\t%2,-16(%0)\n\t"
+ "sw\t%3,-12(%0)\n\t"
+ "sw\t%4,-8(%0)\n\t"
+ "bne\t$1,%0,1b\n\t"
+ "sw\t%5,-4(%0)\n\t"
+ ".set\tat\n\t"
+ ".set\treorder"
+ :"=r" (dummy1), "=r" (dummy2),
+ "=&r" (reg1), "=&r" (reg2), "=&r" (reg3), "=&r" (reg4)
+ :"0" (to), "1" (from),
+ "I" (PAGE_SIZE));
+}
/* Cache operations. XXX Write these dave... */
static inline void r6000_flush_cache_all(void)
@@ -42,7 +119,7 @@ static void r6000_flush_cache_page(struct vm_area_struct *vma,
/* XXX */
}
-static void r6000_flush_page_to_ram(unsigned long page)
+static void r6000_flush_page_to_ram(struct page * page)
{
/* XXX */
}
@@ -83,22 +160,15 @@ void pgd_init(unsigned long page)
unsigned long dummy1, dummy2;
/*
- * This version is optimized for the R6000. We generate dirty lines
- * in the datacache, overwrite these lines with zeros and then flush
- * the cache. Sounds horribly complicated but is just a trick to
- * avoid unnecessary loads of from memory and uncached stores which
- * are very expensive. Not tested yet as the R6000 is a rare CPU only
- * available in SGI machines and I don't have one.
+ * The plain and boring version for the R3000. No cache flushing
+ * stuff is implemented since the R3000 has physical caches.
*/
__asm__ __volatile__(
".set\tnoreorder\n"
- "1:\t"
- "cache\t%5,(%0)\n\t"
- "sw\t%2,(%0)\n\t"
+ "1:\tsw\t%2,(%0)\n\t"
"sw\t%2,4(%0)\n\t"
"sw\t%2,8(%0)\n\t"
"sw\t%2,12(%0)\n\t"
- "cache\t%5,16(%0)\n\t"
"sw\t%2,16(%0)\n\t"
"sw\t%2,20(%0)\n\t"
"sw\t%2,24(%0)\n\t"
@@ -111,8 +181,7 @@ void pgd_init(unsigned long page)
"=r" (dummy2)
:"r" ((unsigned long) invalid_pte_table),
"0" (page),
- "1" (USER_PTRS_PER_PGD/8),
- "i" (Create_Dirty_Excl_D));
+ "1" (PAGE_SIZE/(sizeof(pmd_t)*8)));
}
void update_mmu_cache(struct vm_area_struct * vma,
@@ -166,6 +235,9 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
void __init ld_mmu_r6000(void)
{
+ clear_page = r6000_clear_page;
+ copy_page = r6000_copy_page;
+
flush_cache_all = r6000_flush_cache_all;
flush_cache_mm = r6000_flush_cache_mm;
flush_cache_range = r6000_flush_cache_range;
diff --git a/arch/mips/mm/tfp.c b/arch/mips/mm/tfp.c
index c0afcd21f..875a93927 100644
--- a/arch/mips/mm/tfp.c
+++ b/arch/mips/mm/tfp.c
@@ -1,4 +1,4 @@
-/* $Id: tfp.c,v 1.7 1999/08/09 19:43:17 harald Exp $
+/* $Id: tfp.c,v 1.8 1999/10/09 00:00:58 ralf Exp $
*
* tfp.c: MMU and cache routines specific to the r8000 (TFP).
*
@@ -42,7 +42,7 @@ static void tfp_flush_cache_page(struct vm_area_struct *vma,
/* XXX */
}
-static void tfp_flush_page_to_ram(unsigned long page)
+static void tfp_flush_page_to_ram(struct page * page)
{
/* XXX */
}
diff --git a/arch/mips/mm/umap.c b/arch/mips/mm/umap.c
index 7ce0d8e5a..73b5d643d 100644
--- a/arch/mips/mm/umap.c
+++ b/arch/mips/mm/umap.c
@@ -114,16 +114,16 @@ void *vmalloc_uncached (unsigned long size)
static inline void free_pte(pte_t page)
{
if (pte_present(page)) {
- unsigned long addr = pte_page(page);
- if (MAP_NR(addr) >= max_mapnr || PageReserved(mem_map+MAP_NR(addr)))
+ unsigned long nr = pte_pagenr(page);
+ if (nr >= max_mapnr || PageReserved(mem_map+nr))
return;
- free_page(addr);
+ __free_page(pte_page(page));
if (current->mm->rss <= 0)
return;
current->mm->rss--;
return;
}
- swap_free(pte_val(page));
+ swap_free(page);
}
static inline void forget_pte(pte_t page)
@@ -152,15 +152,15 @@ vmap_pte_range (pte_t *pte, unsigned long address, unsigned long size, unsigned
end = PMD_SIZE;
do {
pte_t oldpage = *pte;
- unsigned long page;
+ struct page * page;
pte_clear(pte);
vdir = pgd_offset_k (vaddr);
vpmd = pmd_offset (vdir, vaddr);
vpte = pte_offset (vpmd, vaddr);
page = pte_page (*vpte);
-
- set_pte(pte, mk_pte_phys(page, PAGE_USERIO));
+
+ set_pte(pte, mk_pte(page, PAGE_USERIO));
forget_pte(oldpage);
address += PAGE_SIZE;
vaddr += PAGE_SIZE;
diff --git a/arch/mips/sgi/kernel/setup.c b/arch/mips/sgi/kernel/setup.c
index e49ade621..49df754f9 100644
--- a/arch/mips/sgi/kernel/setup.c
+++ b/arch/mips/sgi/kernel/setup.c
@@ -1,4 +1,4 @@
-/* $Id: setup.c,v 1.27 1999/10/21 00:23:05 ralf Exp $
+/* $Id: setup.c,v 1.28 1999/12/08 11:35:38 ralf Exp $
*
* setup.c: SGI specific setup, including init of the feature struct.
*
@@ -128,6 +128,15 @@ static void __init sgi_irq_setup(void)
#endif
}
+int __init page_is_ram(unsigned long pagenr)
+{
+ if (pagenr < MAP_NR(PAGE_OFFSET + 0x2000UL))
+ return 1;
+ if (pagenr > MAP_NR(PAGE_OFFSET + 0x08002000))
+ return 1;
+ return 0;
+}
+
void __init sgi_setup(void)
{
#ifdef CONFIG_SERIAL_CONSOLE
diff --git a/arch/mips/sni/setup.c b/arch/mips/sni/setup.c
index 5bcd016ee..767e2fc11 100644
--- a/arch/mips/sni/setup.c
+++ b/arch/mips/sni/setup.c
@@ -1,4 +1,4 @@
-/* $Id: setup.c,v 1.12 1999/10/09 00:00:59 ralf Exp $
+/* $Id: setup.c,v 1.13 1999/12/04 03:59:00 ralf Exp $
*
* Setup pointers to hardware-dependent routines.
*
@@ -103,6 +103,11 @@ static inline void sni_pcimt_detect(void)
printk("%s.\n", boardtype);
}
+int __init page_is_ram(unsigned long pagenr)
+{
+ return 1;
+}
+
void __init sni_rm200_pci_setup(void)
{
tag *atag;