diff options
author | Ralf Baechle <ralf@linux-mips.org> | 1998-05-07 02:55:41 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 1998-05-07 02:55:41 +0000 |
commit | dcec8a13bf565e47942a1751a9cec21bec5648fe (patch) | |
tree | 548b69625b18cc2e88c3e68d0923be546c9ebb03 /arch/sparc/mm | |
parent | 2e0f55e79c49509b7ff70ff1a10e1e9e90a3dfd4 (diff) |
o Merge with Linux 2.1.99.
o Fix ancient bug in the ELF loader making ldd crash.
o Fix ancient bug in the keyboard code for SGI, SNI and Jazz.
Diffstat (limited to 'arch/sparc/mm')
-rw-r--r-- | arch/sparc/mm/Makefile | 16 | ||||
-rw-r--r-- | arch/sparc/mm/btfixup.c | 334 | ||||
-rw-r--r-- | arch/sparc/mm/fault.c | 4 | ||||
-rw-r--r-- | arch/sparc/mm/hypersparc.S | 9 | ||||
-rw-r--r-- | arch/sparc/mm/init.c | 114 | ||||
-rw-r--r-- | arch/sparc/mm/io-unit.c | 152 | ||||
-rw-r--r-- | arch/sparc/mm/iommu.c | 45 | ||||
-rw-r--r-- | arch/sparc/mm/loadmmu.c | 136 | ||||
-rw-r--r-- | arch/sparc/mm/nosrmmu.c | 50 | ||||
-rw-r--r-- | arch/sparc/mm/nosun4c.c | 77 | ||||
-rw-r--r-- | arch/sparc/mm/srmmu.c | 1623 | ||||
-rw-r--r-- | arch/sparc/mm/sun4c.c | 629 | ||||
-rw-r--r-- | arch/sparc/mm/turbosparc.S | 4 | ||||
-rw-r--r-- | arch/sparc/mm/viking.S | 78 |
14 files changed, 1980 insertions, 1291 deletions
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile index bae5d323a..18eeb1f52 100644 --- a/arch/sparc/mm/Makefile +++ b/arch/sparc/mm/Makefile @@ -1,4 +1,4 @@ -# $Id: Makefile,v 1.27 1997/11/07 15:01:27 jj Exp $ +# $Id: Makefile,v 1.30 1998/03/09 14:03:53 jj Exp $ # Makefile for the linux Sparc-specific parts of the memory manager. # # Note! Dependencies are done automagically by 'make dep', which also @@ -8,9 +8,17 @@ # Note 2! The CFLAGS definition is now in the main makefile... O_TARGET := mm.o -O_OBJS := fault.o init.o sun4c.o srmmu.o hypersparc.o viking.o \ - tsunami.o loadmmu.o generic.o asyncd.o extable.o \ - turbosparc.o iommu.o io-unit.o +O_OBJS := fault.o init.o loadmmu.o generic.o asyncd.o extable.o btfixup.o +ifeq ($(CONFIG_SUN4),y) +O_OBJS += nosrmmu.o +else +O_OBJS += srmmu.o iommu.o io-unit.o hypersparc.o viking.o tsunami.o turbosparc.o +endif +ifdef SMP +O_OBJS += nosun4c.o +else +O_OBJS += sun4c.o +endif include $(TOPDIR)/Rules.make diff --git a/arch/sparc/mm/btfixup.c b/arch/sparc/mm/btfixup.c new file mode 100644 index 000000000..e61ccc158 --- /dev/null +++ b/arch/sparc/mm/btfixup.c @@ -0,0 +1,334 @@ +/* $Id: btfixup.c,v 1.7 1998/03/09 14:03:56 jj Exp $ + * btfixup.c: Boot time code fixup and relocator, so that + * we can get rid of most indirect calls to achieve single + * image sun4c and srmmu kernel. + * + * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + */ + +#include <linux/config.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <asm/btfixup.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/oplib.h> +#include <asm/system.h> + +#define BTFIXUP_OPTIMIZE_NOP +#define BTFIXUP_OPTIMIZE_OTHER + +extern char *srmmu_name; +static char version[] __initdata = "Boot time fixup v1.6. 4/Mar/98 Jakub Jelinek (jj@ultra.linux.cz). Patching kernel for "; +#ifdef CONFIG_SUN4 +static char str_sun4c[] __initdata = "sun4\n"; +#else +static char str_sun4c[] __initdata = "sun4c\n"; +#endif +static char str_srmmu[] __initdata = "srmmu[%s]/"; +static char str_iommu[] __initdata = "iommu\n"; +static char str_iounit[] __initdata = "io-unit\n"; + +static int visited __initdata = 0; +extern unsigned int ___btfixup_start[], ___btfixup_end[], __init_begin[], __init_end[], __init_text_end[]; +extern unsigned int _stext[], _end[], __start___ksymtab[], __stop___ksymtab[]; +static char wrong_f[] __initdata = "Trying to set f fixup %p to invalid function %08x\n"; +static char wrong_b[] __initdata = "Trying to set b fixup %p to invalid function %08x\n"; +static char wrong_s[] __initdata = "Trying to set s fixup %p to invalid value %08x\n"; +static char wrong_h[] __initdata = "Trying to set h fixup %p to invalid value %08x\n"; +static char wrong_a[] __initdata = "Trying to set a fixup %p to invalid value %08x\n"; +static char wrong[] __initdata = "Wrong address for %c fixup %p\n"; +static char insn_f[] __initdata = "Fixup f %p refers to weird instructions at %p[%08x,%08x]\n"; +static char insn_b[] __initdata = "Fixup b %p doesn't refer to a SETHI at %p[%08x]\n"; +static char insn_s[] __initdata = "Fixup s %p doesn't refer to an OR at %p[%08x]\n"; +static char insn_h[] __initdata = "Fixup h %p doesn't refer to a SETHI at %p[%08x]\n"; +static char insn_a[] __initdata = "Fixup a %p doesn't refer to a SETHI nor OR at %p[%08x]\n"; +static char insn_i[] __initdata = "Fixup i %p doesn't refer to a valid instruction at %p[%08x]\n"; +static char fca_und[] __initdata = "flush_cache_all undefined in btfixup()\n"; +static char wrong_setaddr[] __initdata = "Garbled CALL/INT patch at %p[%08x,%08x,%08x]=%08x\n"; + +#ifdef BTFIXUP_OPTIMIZE_OTHER +__initfunc(static void set_addr(unsigned int *addr, unsigned int q1, int fmangled, unsigned int value)) +{ + if (!fmangled) + *addr = value; + else { + unsigned int *q = (unsigned int *)q1; + if (*addr == 0x01000000) { + /* Noped */ + *q = value; + } else if (addr[-1] == *q) { + /* Moved */ + addr[-1] = value; + *q = value; + } else { + prom_printf(wrong_setaddr, addr-1, addr[-1], *addr, *q, value); + prom_halt(); + } + } +} +#else +static __inline__ void set_addr(unsigned int *addr, unsigned int q1, int fmangled, unsigned int value) +{ + *addr = value; +} +#endif + +__initfunc(void btfixup(void)) +{ + unsigned int *p, *q; + int type, count; + unsigned insn; + unsigned *addr; + int fmangled = 0; + void (*flush_cacheall)(void); + + if (!visited) { + visited++; + printk(version); + if (ARCH_SUN4C_SUN4) + printk(str_sun4c); + else { + printk(str_srmmu, srmmu_name); + if (sparc_cpu_model == sun4d) + printk(str_iounit); + else + printk(str_iommu); + } + } + for (p = ___btfixup_start; p < ___btfixup_end; ) { + count = p[2]; + q = p + 3; + switch (type = *(unsigned char *)p) { + case 'f': + count = p[3]; + q = p + 4; + if (((p[0] & 1) || p[1]) + && ((p[1] & 3) || (unsigned *)(p[1]) < _stext || (unsigned *)(p[1]) >= _end)) { + prom_printf(wrong_f, p, p[1]); + prom_halt(); + } + break; + case 'b': + if (p[1] < (unsigned long)__init_begin || p[1] >= (unsigned long)__init_text_end || (p[1] & 3)) { + prom_printf(wrong_b, p, p[1]); + prom_halt(); + } + break; + case 's': + if (p[1] + 0x1000 >= 0x2000) { + prom_printf(wrong_s, p, p[1]); + prom_halt(); + } + break; + case 'h': + if (p[1] & 0x3ff) { + prom_printf(wrong_h, p, p[1]); + prom_halt(); + } + break; + case 'a': + if (p[1] + 0x1000 >= 0x2000 && (p[1] & 0x3ff)) { + prom_printf(wrong_a, p, p[1]); + prom_halt(); + } + break; + } + if (p[0] & 1) { + p[0] &= ~1; + while (count) { + fmangled = 0; + addr = (unsigned *)*q; + if (addr < _stext || addr >= _end) { + prom_printf(wrong, type, p); + prom_halt(); + } + insn = *addr; +#ifdef BTFIXUP_OPTIMIZE_OTHER + if (type != 'f' && q[1]) { + insn = *(unsigned int *)q[1]; + if (!insn || insn == 1) + insn = *addr; + else + fmangled = 1; + } +#endif + switch (type) { + case 'f': /* CALL */ + if (addr >= __start___ksymtab && addr < __stop___ksymtab) { + *addr = p[1]; + break; + } else if (!q[1]) { + if ((insn & 0xc1c00000) == 0x01000000) { /* SETHI */ + *addr = (insn & 0xffc00000) | (p[1] >> 10); break; + } else if ((insn & 0xc1f82000) == 0x80102000) { /* OR X, %LO(i), Y */ + *addr = (insn & 0xffffe000) | (p[1] & 0x3ff); break; + } else if ((insn & 0xc0000000) != 0x40000000) { /* !CALL */ + bad_f: + prom_printf(insn_f, p, addr, insn, addr[1]); + prom_halt(); + } + } else if (q[1] != 1) + addr[1] = q[1]; + if (p[2] == BTFIXUPCALL_NORM) { + norm_f: + *addr = 0x40000000 | ((p[1] - (unsigned)addr) >> 2); + q[1] = 0; + break; + } +#ifndef BTFIXUP_OPTIMIZE_NOP + goto norm_f; +#else + if (!(addr[1] & 0x80000000)) { + if ((addr[1] & 0xc1c00000) != 0x01000000) /* !SETHI */ + goto bad_f; /* CALL, Bicc, FBfcc, CBccc are weird in delay slot, aren't they? */ + } else { + if ((addr[1] & 0x01800000) == 0x01800000) { + if ((addr[1] & 0x01f80000) == 0x01e80000) { + /* RESTORE */ + goto norm_f; /* It is dangerous to patch that */ + } + goto bad_f; + } + if ((addr[1] & 0xffffe003) == 0x9e03e000) { + /* ADD %O7, XX, %o7 */ + int displac = (addr[1] << 19); + + displac = (displac >> 21) + 2; + *addr = (0x10800000) + (displac & 0x3fffff); + q[1] = addr[1]; + addr[1] = p[2]; + break; + } + if ((addr[1] & 0x201f) == 0x200f || (addr[1] & 0x7c000) == 0x3c000) + goto norm_f; /* Someone is playing bad tricks with us: rs1 or rs2 is o7 */ + if ((addr[1] & 0x3e000000) == 0x1e000000) + goto norm_f; /* rd is %o7. We'd better take care. */ + } + if (p[2] == BTFIXUPCALL_NOP) { + *addr = 0x01000000; + q[1] = 1; + break; + } +#ifndef BTFIXUP_OPTIMIZE_OTHER + goto norm_f; +#else + if (addr[1] == 0x01000000) { /* NOP in the delay slot */ + q[1] = addr[1]; + *addr = p[2]; + break; + } + if ((addr[1] & 0xc0000000) != 0xc0000000) { + /* Not a memory operation */ + if ((addr[1] & 0x30000000) == 0x10000000) { + /* Ok, non-memory op with rd %oX */ + if ((addr[1] & 0x3e000000) == 0x1c000000) + goto bad_f; /* Aiee. Someone is playing strange %sp tricks */ + if ((addr[1] & 0x3e000000) > 0x12000000 || + ((addr[1] & 0x3e000000) == 0x12000000 && + p[2] != BTFIXUPCALL_STO1O0 && p[2] != BTFIXUPCALL_SWAPO0O1) || + ((p[2] & 0xffffe000) == BTFIXUPCALL_RETINT(0))) { + /* Nobody uses the result. We can nop it out. */ + *addr = p[2]; + q[1] = addr[1]; + addr[1] = 0x01000000; + break; + } + if ((addr[1] & 0xf1ffffe0) == 0x90100000) { + /* MOV %reg, %Ox */ + if ((addr[1] & 0x3e000000) == 0x10000000 && + (p[2] & 0x7c000) == 0x20000) { + /* Ok, it is call xx; mov reg, %o0 and call optimizes + to doing something on %o0. Patch the patch. */ + *addr = (p[2] & ~0x7c000) | ((addr[1] & 0x1f) << 14); + q[1] = addr[1]; + addr[1] = 0x01000000; + break; + } + if ((addr[1] & 0x3e000000) == 0x12000000 && + p[2] == BTFIXUPCALL_STO1O0) { + *addr = (p[2] & ~0x3e000000) | ((addr[1] & 0x1f) << 25); + q[1] = addr[1]; + addr[1] = 0x01000000; + break; + } + } + } + } + *addr = addr[1]; + q[1] = addr[1]; + addr[1] = p[2]; + break; +#endif /* BTFIXUP_OPTIMIZE_OTHER */ +#endif /* BTFIXUP_OPTIMIZE_NOP */ + case 'b': /* BLACKBOX */ + /* Has to be sethi i, xx */ + if ((insn & 0xc1c00000) != 0x01000000) { + prom_printf(insn_b, p, addr, insn); + prom_halt(); + } else { + void (*do_fixup)(unsigned *); + + do_fixup = (void (*)(unsigned *))p[1]; + do_fixup(addr); + } + break; + case 's': /* SIMM13 */ + /* Has to be or %g0, i, xx */ + if ((insn & 0xc1ffe000) != 0x80102000) { + prom_printf(insn_s, p, addr, insn); + prom_halt(); + } + set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x1fff)); + break; + case 'h': /* SETHI */ + /* Has to be sethi i, xx */ + if ((insn & 0xc1c00000) != 0x01000000) { + prom_printf(insn_h, p, addr, insn); + prom_halt(); + } + set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10)); + break; + case 'a': /* HALF */ + /* Has to be sethi i, xx or or %g0, i, xx */ + if ((insn & 0xc1c00000) != 0x01000000 && + (insn & 0xc1ffe000) != 0x80102000) { + prom_printf(insn_a, p, addr, insn); + prom_halt(); + } + if (p[1] & 0x3ff) + set_addr(addr, q[1], fmangled, + (insn & 0x3e000000) | 0x80102000 | (p[1] & 0x1fff)); + else + set_addr(addr, q[1], fmangled, + (insn & 0x3e000000) | 0x01000000 | (p[1] >> 10)); + break; + case 'i': /* INT */ + if ((insn & 0xc1c00000) == 0x01000000) /* %HI */ + set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10)); + else if ((insn & 0x80002000) == 0x80002000 && + (insn & 0x01800000) != 0x01800000) /* %LO */ + set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff)); + else { + prom_printf(insn_i, p, addr, insn); + prom_halt(); + } + break; + } + count -= 2; + q += 2; + } + } else + p = q + count; + } +#ifdef __SMP__ + flush_cacheall = (void (*)(void))BTFIXUPVAL_CALL(local_flush_cache_all); +#else + flush_cacheall = (void (*)(void))BTFIXUPVAL_CALL(flush_cache_all); +#endif + if (!flush_cacheall) { + prom_printf(fca_und); + prom_halt(); + } + (*flush_cacheall)(); +} diff --git a/arch/sparc/mm/fault.c b/arch/sparc/mm/fault.c index 0d6490860..274b9eebf 100644 --- a/arch/sparc/mm/fault.c +++ b/arch/sparc/mm/fault.c @@ -1,4 +1,4 @@ -/* $Id: fault.c,v 1.92 1997/05/15 21:14:21 davem Exp $ +/* $Id: fault.c,v 1.93 1998/03/25 10:43:16 jj Exp $ * fault.c: Page fault handlers for the Sparc. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) @@ -271,7 +271,7 @@ bad_area: #endif tsk->tss.sig_address = address; tsk->tss.sig_desc = SUBSIG_NOMAPPING; - send_sig(SIGSEGV, tsk, 1); + force_sig(SIGSEGV, tsk); goto out; } unhandled_fault (address, tsk, regs); diff --git a/arch/sparc/mm/hypersparc.S b/arch/sparc/mm/hypersparc.S index 2c27bfdab..dcf3fd990 100644 --- a/arch/sparc/mm/hypersparc.S +++ b/arch/sparc/mm/hypersparc.S @@ -1,4 +1,4 @@ -/* $Id: hypersparc.S,v 1.12 1997/11/27 15:42:30 jj Exp $ +/* $Id: hypersparc.S,v 1.13 1998/02/13 15:35:09 jj Exp $ * hypersparc.S: High speed Hypersparc mmu/cache operations. * * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) @@ -301,14 +301,13 @@ hypersparc_flush_tlb_range: cmp %o3, -1 be hypersparc_flush_tlb_range_out #endif - srl %o1, SRMMU_PGDIR_SHIFT, %o1 + sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4 sta %o3, [%g1] ASI_M_MMUREGS - sll %o1, SRMMU_PGDIR_SHIFT, %o1 - sethi %hi(1 << SRMMU_PGDIR_SHIFT), %o4 + and %o1, %o4, %o1 add %o1, 0x200, %o1 sta %g0, [%o1] ASI_M_FLUSH_PROBE 1: - add %o1, %o4, %o1 + sub %o1, %o4, %o1 cmp %o1, %o2 blu,a 1b sta %g0, [%o1] ASI_M_FLUSH_PROBE diff --git a/arch/sparc/mm/init.c b/arch/sparc/mm/init.c index aa85666c6..db6559214 100644 --- a/arch/sparc/mm/init.c +++ b/arch/sparc/mm/init.c @@ -1,8 +1,9 @@ -/* $Id: init.c,v 1.50 1998/01/10 18:19:42 ecd Exp $ +/* $Id: init.c,v 1.59 1998/03/27 06:59:57 davem Exp $ * linux/arch/sparc/mm/init.c * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1995 Eddie C. Dost (ecd@skynet.be) + * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ #include <linux/config.h> @@ -30,11 +31,18 @@ #include <asm/pgtable.h> #include <asm/vaddrs.h> +/* Turn this off if you suspect some place in some physical memory hole + might get into page tables (something would be broken very much). */ + +#define FREE_UNUSED_MEM_MAP + extern void show_net_buffers(void); struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS]; unsigned long sparc_unmapped_base; +struct pgtable_cache_struct pgt_quicklists; + /* References to section boundaries */ extern char __init_begin, __init_end, etext; @@ -65,26 +73,38 @@ pte_t __bad_page(void) void show_mem(void) { - int i,free = 0,total = 0,reserved = 0; - int shared = 0; + int free = 0,total = 0,reserved = 0; + int shared = 0, cached = 0; + struct page *page, *end; printk("\nMem-info:\n"); show_free_areas(); printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10)); - i = max_mapnr; - while (i-- > 0) { + for (page = mem_map, end = mem_map + max_mapnr; + page < end; page++) { + if (PageSkip(page)) { + if (page->next_hash < page) + break; + page = page->next_hash; + } total++; - if (PageReserved(mem_map + i)) + if (PageReserved(page)) reserved++; - else if (!atomic_read(&mem_map[i].count)) + else if (PageSwapCache(page)) + cached++; + else if (!atomic_read(&page->count)) free++; else - shared += atomic_read(&mem_map[i].count) - 1; + shared += atomic_read(&page->count) - 1; } printk("%d pages of RAM\n",total); printk("%d free pages\n",free); printk("%d reserved pages\n",reserved); printk("%d pages shared\n",shared); + printk("%d pages swap cached\n",cached); + printk("%ld page tables cached\n",pgtable_cache_size); + if (sparc_cpu_model == sun4m || sparc_cpu_model == sun4d) + printk("%ld page dirs cached\n", pgd_cache_size); show_buffers(); #ifdef CONFIG_NET show_net_buffers(); @@ -128,19 +148,23 @@ paging_init(unsigned long start_mem, unsigned long end_mem)) switch(sparc_cpu_model) { case sun4c: case sun4e: + case sun4: start_mem = sun4c_paging_init(start_mem, end_mem); sparc_unmapped_base = 0xe0000000; + BTFIXUPSET_SETHI(sparc_unmapped_base, 0xe0000000); break; case sun4m: case sun4d: start_mem = srmmu_paging_init(start_mem, end_mem); sparc_unmapped_base = 0x50000000; + BTFIXUPSET_SETHI(sparc_unmapped_base, 0x50000000); break; case ap1000: #if CONFIG_AP1000 start_mem = apmmu_paging_init(start_mem, end_mem); sparc_unmapped_base = 0x50000000; + BTFIXUPSET_SETHI(sparc_unmapped_base, 0x50000000); break; #endif @@ -168,6 +192,7 @@ paging_init(unsigned long start_mem, unsigned long end_mem)) protection_map[13] = PAGE_READONLY; protection_map[14] = PAGE_SHARED; protection_map[15] = PAGE_SHARED; + btfixup(); return device_scan(start_mem); } @@ -175,7 +200,7 @@ struct cache_palias *sparc_aliases; extern void srmmu_frob_mem_map(unsigned long); -int physmem_mapped_contig = 1; +int physmem_mapped_contig __initdata = 1; __initfunc(static void taint_real_pages(unsigned long start_mem, unsigned long end_mem)) { @@ -210,7 +235,8 @@ __initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem)) int codepages = 0; int datapages = 0; int initpages = 0; - unsigned long tmp2, addr; + unsigned long addr; + struct page *page, *end; /* Saves us work later. */ memset((void *) ZERO_PAGE, 0, PAGE_SIZE); @@ -220,33 +246,60 @@ __initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem)) high_memory = (void *) end_mem; start_mem = PAGE_ALIGN(start_mem); - num_physpages = (start_mem - KERNBASE) >> PAGE_SHIFT; + num_physpages = 0; addr = KERNBASE; while(addr < start_mem) { #ifdef CONFIG_BLK_DEV_INITRD - if (initrd_below_start_ok && addr >= initrd_start && addr < initrd_end) { + if (initrd_below_start_ok && addr >= initrd_start && addr < initrd_end) mem_map[MAP_NR(addr)].flags &= ~(1<<PG_reserved); - num_physpages--; - } else + else #endif mem_map[MAP_NR(addr)].flags |= (1<<PG_reserved); addr += PAGE_SIZE; } taint_real_pages(start_mem, end_mem); + +#ifdef FREE_UNUSED_MEM_MAP + end = mem_map + max_mapnr; + for (page = mem_map; page < end; page++) { + if (PageSkip(page)) { + unsigned long low, high; + + low = PAGE_ALIGN((unsigned long)(page+1)); + if (page->next_hash < page) + high = ((unsigned long)end) & PAGE_MASK; + else + high = ((unsigned long)page->next_hash) & PAGE_MASK; + while (low < high) { + mem_map[MAP_NR(low)].flags &= ~(1<<PG_reserved); + low += PAGE_SIZE; + } + } + } +#endif + for (addr = PAGE_OFFSET; addr < end_mem; addr += PAGE_SIZE) { + if (PageSkip(mem_map + MAP_NR(addr))) { + unsigned long next = mem_map[MAP_NR(addr)].next_hash - mem_map; + + next = (next << PAGE_SHIFT) + PAGE_OFFSET; + if (next < addr || next >= end_mem) + break; + addr = next; + } + num_physpages++; if(PageReserved(mem_map + MAP_NR(addr))) { if ((addr < (unsigned long) &etext) && (addr >= KERNBASE)) codepages++; - else if((addr >= (unsigned long)&__init_begin && addr < (unsigned long)&__init_end)) - initpages++; - else if((addr < start_mem) && (addr >= KERNBASE)) + else if((addr >= (unsigned long)&__init_begin && addr < (unsigned long)&__init_end)) + initpages++; + else if((addr < start_mem) && (addr >= KERNBASE)) datapages++; continue; } atomic_set(&mem_map[MAP_NR(addr)].count, 1); - num_physpages++; #ifdef CONFIG_BLK_DEV_INITRD if (!initrd_start || (addr < initrd_start || addr >= initrd_end)) @@ -254,14 +307,12 @@ __initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem)) free_page(addr); } - tmp2 = nr_free_pages << PAGE_SHIFT; - - printk("Memory: %luk available (%dk kernel code, %dk data, %dk init) [%08lx,%08lx]\n", - tmp2 >> 10, + printk("Memory: %dk available (%dk kernel code, %dk data, %dk init) [%08lx,%08lx]\n", + nr_free_pages << (PAGE_SHIFT-10), codepages << (PAGE_SHIFT-10), datapages << (PAGE_SHIFT-10), initpages << (PAGE_SHIFT-10), - PAGE_OFFSET, end_mem); + (unsigned long)PAGE_OFFSET, end_mem); freepages.min = nr_free_pages >> 7; if(freepages.min < 16) @@ -284,20 +335,25 @@ void free_initmem (void) void si_meminfo(struct sysinfo *val) { - int i; + struct page *page, *end; - i = MAP_NR(high_memory); val->totalram = 0; val->sharedram = 0; val->freeram = nr_free_pages << PAGE_SHIFT; val->bufferram = buffermem; - while (i-- > 0) { - if (PageReserved(mem_map + i)) + for (page = mem_map, end = mem_map + max_mapnr; + page < end; page++) { + if (PageSkip(page)) { + if (page->next_hash < page) + break; + page = page->next_hash; + } + if (PageReserved(page)) continue; val->totalram++; - if (!atomic_read(&mem_map[i].count)) + if (!atomic_read(&page->count)) continue; - val->sharedram += atomic_read(&mem_map[i].count) - 1; + val->sharedram += atomic_read(&page->count) - 1; } val->totalram <<= PAGE_SHIFT; val->sharedram <<= PAGE_SHIFT; diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c index 519c124c9..d293fc71c 100644 --- a/arch/sparc/mm/io-unit.c +++ b/arch/sparc/mm/io-unit.c @@ -1,7 +1,7 @@ -/* $Id: io-unit.c,v 1.5 1997/12/22 16:09:26 jj Exp $ +/* $Id: io-unit.c,v 1.10 1998/03/03 12:31:14 jj Exp $ * io-unit.c: IO-UNIT specific routines for memory management. * - * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ #include <linux/config.h> @@ -13,28 +13,41 @@ #include <asm/io.h> #include <asm/io-unit.h> #include <asm/mxcc.h> +#include <asm/spinlock.h> +#include <asm/bitops.h> + +/* #define IOUNIT_DEBUG */ +#ifdef IOUNIT_DEBUG +#define IOD(x) printk(x) +#else +#define IOD(x) do { } while (0) +#endif #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) #define IOPERM (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID) -#define MKIOPTE(phys) ((((phys)>>4) & IOUPTE_PAGE) | IOPERM) +#define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM) -unsigned long sun4d_dma_base; -unsigned long sun4d_dma_vbase; -unsigned long sun4d_dma_size; __initfunc(unsigned long iounit_init(int sbi_node, int io_node, unsigned long memory_start, unsigned long memory_end, struct linux_sbus *sbus)) { iopte_t *xpt, *xptend; - unsigned long paddr; struct iounit_struct *iounit; struct linux_prom_registers iommu_promregs[PROMREG_MAX]; memory_start = LONG_ALIGN(memory_start); iounit = (struct iounit_struct *)memory_start; - memory_start += sizeof(struct iounit_struct); - + memory_start = LONG_ALIGN(memory_start + sizeof(struct iounit_struct)); + + memset(iounit, 0, sizeof(*iounit)); + iounit->limit[0] = IOUNIT_BMAP1_START; + iounit->limit[1] = IOUNIT_BMAP2_START; + iounit->limit[2] = IOUNIT_BMAPM_START; + iounit->limit[3] = IOUNIT_BMAPM_END; + iounit->rotor[1] = IOUNIT_BMAP2_START; + iounit->rotor[2] = IOUNIT_BMAPM_START; + prom_getproperty(sbi_node, "reg", (void *) iommu_promregs, sizeof(iommu_promregs)); prom_apply_generic_ranges(io_node, 0, iommu_promregs, 3); @@ -46,11 +59,6 @@ iounit_init(int sbi_node, int io_node, unsigned long memory_start, sbus->iommu = (struct iommu_struct *)iounit; iounit->page_table = xpt; - /* Initialize new table. */ - paddr = IOUNIT_DMA_BASE - sun4d_dma_base; - for (xptend = xpt + (sun4d_dma_size >> PAGE_SHIFT); - xpt < xptend; paddr++) - *xpt++ = MKIOPTE(paddr); for (xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t); xpt < xptend;) *xpt++ = 0; @@ -58,36 +66,108 @@ iounit_init(int sbi_node, int io_node, unsigned long memory_start, return memory_start; } +/* One has to hold iounit->lock to call this */ +static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size) +{ + int i, j, k, npages; + unsigned long rotor, scan, limit; + iopte_t iopte; + + npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT; + + /* A tiny bit of magic ingredience :) */ + switch (npages) { + case 1: i = 0x0231; break; + case 2: i = 0x0132; break; + default: i = 0x0213; break; + } + + IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages)); + +next: j = (i & 15); + rotor = iounit->rotor[j - 1]; + limit = iounit->limit[j]; + scan = rotor; +nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan); + if (scan + npages > limit) { + if (limit != rotor) { + limit = rotor; + scan = iounit->limit[j - 1]; + goto nexti; + } + i >>= 4; + if (!(i & 15)) + panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size); + goto next; + } + for (k = 1, scan++; k < npages; k++) + if (test_bit(scan++, iounit->bmap)) + goto nexti; + iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1]; + scan -= npages; + iopte = MKIOPTE(mmu_v2p(vaddr & PAGE_MASK)); + vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK); + for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) { + set_bit(scan, iounit->bmap); + iounit->page_table[scan] = iopte; + } + IOD(("%08lx\n", vaddr)); + return vaddr; +} + static __u32 iounit_get_scsi_one(char *vaddr, unsigned long len, struct linux_sbus *sbus) { - /* Viking MXCC is IO coherent, just need to translate the address to DMA handle */ -#ifdef IOUNIT_DEBUG - if ((((unsigned long) vaddr) & PAGE_MASK) < sun4d_dma_vaddr || - (((unsigned long) vaddr) & PAGE_MASK) + len > sun4d_dma_vbase + sun4d_dma_size) - panic("Using non-DMA memory for iounit_get_scsi_one"); -#endif - return (__u32)(sun4d_dma_base + mmu_v2p((long)vaddr)); + unsigned long ret, flags; + struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu; + + spin_lock_irqsave(&iounit->lock, flags); + ret = iounit_get_area(iounit, (unsigned long)vaddr, len); + spin_unlock_irqrestore(&iounit->lock, flags); + return ret; } static void iounit_get_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus) { - /* Viking MXCC is IO coherent, just need to translate the address to DMA handle */ + unsigned long flags; + struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu; + + /* FIXME: Cache some resolved pages - often several sg entries are to the same page */ + spin_lock_irqsave(&iounit->lock, flags); for (; sz >= 0; sz--) { -#ifdef IOUNIT_DEBUG - unsigned long page = ((unsigned long) sg[sz].addr) & PAGE_MASK; - if (page < sun4d_dma_vbase || page + sg[sz].len > sun4d_dma_vbase + sun4d_dma_size) - panic("Using non-DMA memory for iounit_get_scsi_sgl"); -#endif - sg[sz].dvma_addr = (__u32) (sun4d_dma_base + mmu_v2p((long)sg[sz].addr));; + sg[sz].dvma_addr = iounit_get_area(iounit, (unsigned long)sg[sz].addr, sg[sz].len); } + spin_unlock_irqrestore(&iounit->lock, flags); } static void iounit_release_scsi_one(__u32 vaddr, unsigned long len, struct linux_sbus *sbus) { + unsigned long flags; + struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu; + + spin_lock_irqsave(&iounit->lock, flags); + len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT; + vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT; + IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr)); + for (len += vaddr; vaddr < len; vaddr++) + clear_bit(vaddr, iounit->bmap); + spin_unlock_irqrestore(&iounit->lock, flags); } static void iounit_release_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus) { + unsigned long flags; + unsigned long vaddr, len; + struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu; + + spin_lock_irqsave(&iounit->lock, flags); + for (; sz >= 0; sz--) { + len = ((sg[sz].dvma_addr & ~PAGE_MASK) + sg[sz].len + (PAGE_SIZE-1)) >> PAGE_SHIFT; + vaddr = (sg[sz].dvma_addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT; + IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr)); + for (len += vaddr; vaddr < len; vaddr++) + clear_bit(vaddr, iounit->bmap); + } + spin_unlock_irqrestore(&iounit->lock, flags); } #ifdef CONFIG_SBUS @@ -135,24 +215,26 @@ static void iounit_map_dma_area(unsigned long addr, int len) static char *iounit_lockarea(char *vaddr, unsigned long len) { +/* FIXME: Write this */ return vaddr; } static void iounit_unlockarea(char *vaddr, unsigned long len) { +/* FIXME: Write this */ } __initfunc(void ld_mmu_iounit(void)) { - mmu_lockarea = iounit_lockarea; - mmu_unlockarea = iounit_unlockarea; + BTFIXUPSET_CALL(mmu_lockarea, iounit_lockarea, BTFIXUPCALL_RETO0); + BTFIXUPSET_CALL(mmu_unlockarea, iounit_unlockarea, BTFIXUPCALL_NOP); - mmu_get_scsi_one = iounit_get_scsi_one; - mmu_get_scsi_sgl = iounit_get_scsi_sgl; - mmu_release_scsi_one = iounit_release_scsi_one; - mmu_release_scsi_sgl = iounit_release_scsi_sgl; + BTFIXUPSET_CALL(mmu_get_scsi_one, iounit_get_scsi_one, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(mmu_get_scsi_sgl, iounit_get_scsi_sgl, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(mmu_release_scsi_one, iounit_release_scsi_one, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(mmu_release_scsi_sgl, iounit_release_scsi_sgl, BTFIXUPCALL_NORM); #ifdef CONFIG_SBUS - mmu_map_dma_area = iounit_map_dma_area; + BTFIXUPSET_CALL(mmu_map_dma_area, iounit_map_dma_area, BTFIXUPCALL_NORM); #endif } diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c index 301946326..e46216233 100644 --- a/arch/sparc/mm/iommu.c +++ b/arch/sparc/mm/iommu.c @@ -1,10 +1,10 @@ -/* $Id: iommu.c,v 1.4 1997/11/21 17:31:31 jj Exp $ +/* $Id: iommu.c,v 1.7 1998/02/22 10:32:26 ecd Exp $ * iommu.c: IOMMU specific routines for memory management. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1995 Peter A. Zaitcev (zaitcev@ithil.mcst.ru) * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) - * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ #include <linux/config.h> @@ -18,8 +18,10 @@ /* srmmu.c */ extern int viking_mxcc_present; -extern void (*flush_page_for_dma)(unsigned long page); +BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long) +#define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page) extern int flush_page_for_dma_global; +static int viking_flush = 0; /* viking.S */ extern void viking_flush_page(unsigned long page); extern void viking_mxcc_flush_page(unsigned long page); @@ -113,7 +115,7 @@ iommu_init(int iommund, unsigned long memory_start, viking_mxcc_flush_page(start); start += PAGE_SIZE; } - } else if(flush_page_for_dma == viking_flush_page) { + } else if (viking_flush) { unsigned long start = (unsigned long) iommu->page_table; unsigned long end = (start + ptsize); while(start < end) { @@ -199,7 +201,7 @@ static void iommu_map_dma_area(unsigned long addr, int len) pgprot_t dvma_prot; struct iommu_struct *iommu = SBus_chain->iommu; iopte_t *iopte = iommu->page_table; - iopte_t *iopte_first = iopte; + iopte_t *first; if(viking_mxcc_present) dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV); @@ -207,6 +209,7 @@ static void iommu_map_dma_area(unsigned long addr, int len) dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV); iopte += ((addr - iommu->start) >> PAGE_SHIFT); + first = iopte; end = PAGE_ALIGN((addr + len)); while(addr < end) { page = get_free_page(GFP_KERNEL); @@ -223,21 +226,20 @@ static void iommu_map_dma_area(unsigned long addr, int len) ptep = pte_offset(pmdp, addr); set_pte(ptep, pte_val(mk_pte(page, dvma_prot))); - iopte_val(*iopte++) = MKIOPTE(mmu_v2p(page)); } addr += PAGE_SIZE; } flush_cache_all(); if(viking_mxcc_present) { - unsigned long start = ((unsigned long) iopte_first) & PAGE_MASK; + unsigned long start = ((unsigned long) first) & PAGE_MASK; unsigned long end = PAGE_ALIGN(((unsigned long) iopte)); while(start < end) { viking_mxcc_flush_page(start); start += PAGE_SIZE; } - } else if(flush_page_for_dma == viking_flush_page) { - unsigned long start = ((unsigned long) iopte_first) & PAGE_MASK; + } else if(viking_flush) { + unsigned long start = ((unsigned long) first) & PAGE_MASK; unsigned long end = PAGE_ALIGN(((unsigned long) iopte)); while(start < end) { viking_flush_page(start); @@ -260,25 +262,26 @@ static void iommu_unlockarea(char *vaddr, unsigned long len) __initfunc(void ld_mmu_iommu(void)) { - mmu_lockarea = iommu_lockarea; - mmu_unlockarea = iommu_unlockarea; + viking_flush = (BTFIXUPVAL_CALL(flush_page_for_dma) == (unsigned long)viking_flush_page); + BTFIXUPSET_CALL(mmu_lockarea, iommu_lockarea, BTFIXUPCALL_RETO0); + BTFIXUPSET_CALL(mmu_unlockarea, iommu_unlockarea, BTFIXUPCALL_NOP); - if (!flush_page_for_dma) { + if (!BTFIXUPVAL_CALL(flush_page_for_dma)) { /* IO coherent chip */ - mmu_get_scsi_one = iommu_get_scsi_one_noflush; - mmu_get_scsi_sgl = iommu_get_scsi_sgl_noflush; + BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_noflush, BTFIXUPCALL_RETO0); + BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_noflush, BTFIXUPCALL_NORM); } else if (flush_page_for_dma_global) { /* flush_page_for_dma flushes everything, no matter of what page is it */ - mmu_get_scsi_one = iommu_get_scsi_one_gflush; - mmu_get_scsi_sgl = iommu_get_scsi_sgl_gflush; + BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_gflush, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_gflush, BTFIXUPCALL_NORM); } else { - mmu_get_scsi_one = iommu_get_scsi_one_pflush; - mmu_get_scsi_sgl = iommu_get_scsi_sgl_pflush; + BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_pflush, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_pflush, BTFIXUPCALL_NORM); } - mmu_release_scsi_one = iommu_release_scsi_one; - mmu_release_scsi_sgl = iommu_release_scsi_sgl; + BTFIXUPSET_CALL(mmu_release_scsi_one, iommu_release_scsi_one, BTFIXUPCALL_NOP); + BTFIXUPSET_CALL(mmu_release_scsi_sgl, iommu_release_scsi_sgl, BTFIXUPCALL_NOP); #ifdef CONFIG_SBUS - mmu_map_dma_area = iommu_map_dma_area; + BTFIXUPSET_CALL(mmu_map_dma_area, iommu_map_dma_area, BTFIXUPCALL_NORM); #endif } diff --git a/arch/sparc/mm/loadmmu.c b/arch/sparc/mm/loadmmu.c index 10eebecce..b38eea6d8 100644 --- a/arch/sparc/mm/loadmmu.c +++ b/arch/sparc/mm/loadmmu.c @@ -1,9 +1,10 @@ -/* $Id: loadmmu.c,v 1.46 1997/04/10 05:12:51 davem Exp $ +/* $Id: loadmmu.c,v 1.50 1998/02/05 14:19:02 jj Exp $ * loadmmu.c: This code loads up all the mm function pointers once the * machine type has been determined. It also sets the static * mmu values such as PAGE_NONE, etc. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ #include <linux/kernel.h> @@ -16,6 +17,7 @@ #include <asm/pgtable.h> #include <asm/a.out.h> #include <asm/mmu_context.h> +#include <asm/oplib.h> unsigned long page_offset = 0xf0000000; unsigned long stack_top = 0xf0000000 - PAGE_SIZE; @@ -24,132 +26,8 @@ struct ctx_list *ctx_list_pool; struct ctx_list ctx_free; struct ctx_list ctx_used; -unsigned long (*alloc_kernel_stack)(struct task_struct *tsk); -void (*free_kernel_stack)(unsigned long stack); -struct task_struct *(*alloc_task_struct)(void); -void (*free_task_struct)(struct task_struct *tsk); - -void (*quick_kernel_fault)(unsigned long); - -void (*init_new_context)(struct mm_struct *mm); -void (*destroy_context)(struct mm_struct *mm); - -/* translate between physical and virtual addresses */ -unsigned long (*mmu_v2p)(unsigned long); -unsigned long (*mmu_p2v)(unsigned long); - -char *(*mmu_lockarea)(char *, unsigned long); -void (*mmu_unlockarea)(char *, unsigned long); - -__u32 (*mmu_get_scsi_one)(char *, unsigned long, struct linux_sbus *sbus); -void (*mmu_get_scsi_sgl)(struct mmu_sglist *, int, struct linux_sbus *sbus); -void (*mmu_release_scsi_one)(__u32, unsigned long, struct linux_sbus *sbus); -void (*mmu_release_scsi_sgl)(struct mmu_sglist *, int, struct linux_sbus *sbus); - -void (*mmu_map_dma_area)(unsigned long addr, int len); - -void (*update_mmu_cache)(struct vm_area_struct *vma, unsigned long address, pte_t pte); - -#ifdef __SMP__ -void (*local_flush_cache_all)(void); -void (*local_flush_cache_mm)(struct mm_struct *); -void (*local_flush_cache_range)(struct mm_struct *, unsigned long start, - unsigned long end); -void (*local_flush_cache_page)(struct vm_area_struct *, unsigned long address); - -void (*local_flush_tlb_all)(void); -void (*local_flush_tlb_mm)(struct mm_struct *); -void (*local_flush_tlb_range)(struct mm_struct *, unsigned long start, - unsigned long end); -void (*local_flush_tlb_page)(struct vm_area_struct *, unsigned long address); -void (*local_flush_page_to_ram)(unsigned long address); -void (*local_flush_sig_insns)(struct mm_struct *mm, unsigned long insn_addr); -#endif - -void (*flush_cache_all)(void); -void (*flush_cache_mm)(struct mm_struct *); -void (*flush_cache_range)(struct mm_struct *, unsigned long start, - unsigned long end); -void (*flush_cache_page)(struct vm_area_struct *, unsigned long address); - -void (*flush_tlb_all)(void); -void (*flush_tlb_mm)(struct mm_struct *); -void (*flush_tlb_range)(struct mm_struct *, unsigned long start, - unsigned long end); -void (*flush_tlb_page)(struct vm_area_struct *, unsigned long address); - -void (*flush_page_to_ram)(unsigned long page); - -void (*flush_sig_insns)(struct mm_struct *mm, unsigned long insn_addr); - -void (*set_pte)(pte_t *pteptr, pte_t pteval); - -unsigned int pmd_shift, pmd_size, pmd_mask; -unsigned int (*pmd_align)(unsigned int); -unsigned int pgdir_shift, pgdir_size, pgdir_mask; -unsigned int (*pgdir_align)(unsigned int); -unsigned int ptrs_per_pte, ptrs_per_pmd, ptrs_per_pgd; unsigned int pg_iobits; -pgprot_t page_none, page_shared, page_copy, page_readonly, page_kernel; - -unsigned long (*pte_page)(pte_t); -unsigned long (*pmd_page)(pmd_t); -unsigned long (*pgd_page)(pgd_t); - -void (*sparc_update_rootmmu_dir)(struct task_struct *, pgd_t *pgdir); -unsigned long (*(vmalloc_start))(void); -void (*switch_to_context)(struct task_struct *tsk); - -int (*pte_none)(pte_t); -int (*pte_present)(pte_t); -void (*pte_clear)(pte_t *); - -int (*pmd_none)(pmd_t); -int (*pmd_bad)(pmd_t); -int (*pmd_present)(pmd_t); -void (*pmd_clear)(pmd_t *); - -int (*pgd_none)(pgd_t); -int (*pgd_bad)(pgd_t); -int (*pgd_present)(pgd_t); -void (*pgd_clear)(pgd_t *); - -pte_t (*mk_pte)(unsigned long, pgprot_t); -pte_t (*mk_pte_phys)(unsigned long, pgprot_t); -pte_t (*mk_pte_io)(unsigned long, pgprot_t, int); -void (*pgd_set)(pgd_t *, pmd_t *); -pte_t (*pte_modify)(pte_t, pgprot_t); -pgd_t * (*pgd_offset)(struct mm_struct *, unsigned long); -pmd_t * (*pmd_offset)(pgd_t *, unsigned long); -pte_t * (*pte_offset)(pmd_t *, unsigned long); -void (*pte_free_kernel)(pte_t *); -pte_t * (*pte_alloc_kernel)(pmd_t *, unsigned long); - -void (*pmd_free_kernel)(pmd_t *); -pmd_t * (*pmd_alloc_kernel)(pgd_t *, unsigned long); -void (*pte_free)(pte_t *); -pte_t * (*pte_alloc)(pmd_t *, unsigned long); - -void (*pmd_free)(pmd_t *); -pmd_t * (*pmd_alloc)(pgd_t *, unsigned long); -void (*pgd_free)(pgd_t *); - -pgd_t * (*pgd_alloc)(void); - -int (*pte_write)(pte_t); -int (*pte_dirty)(pte_t); -int (*pte_young)(pte_t); - -pte_t (*pte_wrprotect)(pte_t); -pte_t (*pte_mkclean)(pte_t); -pte_t (*pte_mkold)(pte_t); -pte_t (*pte_mkwrite)(pte_t); -pte_t (*pte_mkdirty)(pte_t); -pte_t (*pte_mkyoung)(pte_t); - -char *(*mmu_info)(void); - extern void ld_mmu_sun4c(void); extern void ld_mmu_srmmu(void); @@ -157,6 +35,7 @@ __initfunc(void load_mmu(void)) { switch(sparc_cpu_model) { case sun4c: + case sun4: ld_mmu_sun4c(); break; case sun4m: @@ -169,9 +48,8 @@ __initfunc(void load_mmu(void)) break; #endif default: - printk("load_mmu:MMU support not available for this architecture\n"); - printk("load_mmu:sparc_cpu_model = %d\n", (int) sparc_cpu_model); - printk("load_mmu:Halting...\n"); - panic("load_mmu()"); + prom_printf("load_mmu: %d unsupported\n", (int)sparc_cpu_model); + prom_halt(); } + btfixup(); } diff --git a/arch/sparc/mm/nosrmmu.c b/arch/sparc/mm/nosrmmu.c new file mode 100644 index 000000000..f82599f42 --- /dev/null +++ b/arch/sparc/mm/nosrmmu.c @@ -0,0 +1,50 @@ +/* $Id: nosrmmu.c,v 1.1 1998/03/09 14:04:15 jj Exp $ + * nosrmmu.c: This file is a bunch of dummies for sun4 compiles, + * so that it does not need srmmu and avoid ifdefs. + * + * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + */ + +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/init.h> +#include <asm/mbus.h> + +static char shouldnothappen[] __initdata = "SUN4 kernel can only run on SUN4\n"; + +enum mbus_module srmmu_modtype; + +__initfunc(static void should_not_happen(void)) +{ + prom_printf(shouldnothappen); + prom_halt(); +} + +__initfunc(void srmmu_frob_mem_map(unsigned long start_mem)) +{ + should_not_happen(); +} + +__initfunc(unsigned long srmmu_paging_init(unsigned long start_mem, unsigned long end_mem)) +{ + should_not_happen(); + return 0; +} + +__initfunc(void ld_mmu_srmmu(void)) +{ + should_not_happen(); +} + +void srmmu_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type, int rdonly) +{ +} + +void srmmu_unmapioaddr(unsigned long virt_addr) +{ +} + +__initfunc(void srmmu_end_memory(unsigned long memory_size, unsigned long *mem_end_p)) +{ + return 0; +} diff --git a/arch/sparc/mm/nosun4c.c b/arch/sparc/mm/nosun4c.c new file mode 100644 index 000000000..7da883a31 --- /dev/null +++ b/arch/sparc/mm/nosun4c.c @@ -0,0 +1,77 @@ +/* $Id: nosun4c.c,v 1.1 1998/03/09 14:04:16 jj Exp $ + * nosun4c.c: This file is a bunch of dummies for SMP compiles, + * so that it does not need sun4c and avoid ifdefs. + * + * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + */ + +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/init.h> +#include <asm/pgtable.h> + +static char shouldnothappen[] __initdata = "32bit SMP kernel only supports sun4m and sun4d\n"; + +/* Dummies */ +struct sun4c_mmu_ring { + unsigned long xxx1[3]; + unsigned char xxx2[2]; + int xxx3; +}; +struct sun4c_mmu_ring sun4c_kernel_ring; +struct sun4c_mmu_ring sun4c_kfree_ring; +unsigned long sun4c_kernel_faults; +unsigned long *sun4c_memerr_reg; + +__initfunc(static void should_not_happen(void)) +{ + prom_printf(shouldnothappen); + prom_halt(); +} + +__initfunc(unsigned long sun4c_paging_init(unsigned long start_mem, unsigned long end_mem)) +{ + should_not_happen(); + return 0; +} + +__initfunc(void ld_mmu_sun4c(void)) +{ + should_not_happen(); +} + +void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type, int rdonly) +{ +} + +void sun4c_unmapioaddr(unsigned long virt_addr) +{ +} + +void sun4c_complete_all_stores(void) +{ +} + +pgd_t *sun4c_pgd_offset(struct mm_struct * mm, unsigned long address) +{ + return NULL; +} + +pte_t *sun4c_pte_offset(pmd_t * dir, unsigned long address) +{ + return NULL; +} + +void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) +{ +} + +__initfunc(void sun4c_probe_vac(void)) +{ + should_not_happen(); +} + +__initfunc(void sun4c_probe_memerr_reg(void)) +{ + should_not_happen(); +} diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index b16e3cc1e..f9794125d 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -1,10 +1,10 @@ -/* $Id: srmmu.c,v 1.156 1997/11/28 14:23:42 jj Exp $ +/* $Id: srmmu.c,v 1.170 1998/03/09 14:04:01 jj Exp $ * srmmu.c: SRMMU specific routines for memory management. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1995 Peter A. Zaitcev (zaitcev@ithil.mcst.ru) * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) - * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ #include <linux/config.h> @@ -30,6 +30,7 @@ #include <asm/a.out.h> #include <asm/mmu_context.h> #include <asm/io-unit.h> +#include <asm/spinlock.h> /* Now the cpu specific definitions. */ #include <asm/viking.h> @@ -39,6 +40,11 @@ #include <asm/swift.h> #include <asm/turbosparc.h> +#include <asm/btfixup.h> + +/* #define DEBUG_MAP_KERNEL */ +/* #define PAGESKIP_DEBUG */ + enum mbus_module srmmu_modtype; unsigned int hwbug_bitmask; int vac_cache_size; @@ -47,10 +53,6 @@ int vac_badbits; extern unsigned long sparc_iobase_vaddr; -extern unsigned long sun4d_dma_base; -extern unsigned long sun4d_dma_size; -extern unsigned long sun4d_dma_vbase; - #ifdef __SMP__ #define FLUSH_BEGIN(mm) #define FLUSH_END @@ -60,16 +62,24 @@ extern unsigned long sun4d_dma_vbase; #endif static int phys_mem_contig; -long page_contig_offset; +BTFIXUPDEF_SETHI(page_contig_offset) + +BTFIXUPDEF_CALL(void, ctxd_set, ctxd_t *, pgd_t *) +BTFIXUPDEF_CALL(void, pmd_set, pmd_t *, pte_t *) -static void (*ctxd_set)(ctxd_t *ctxp, pgd_t *pgdp); -static void (*pmd_set)(pmd_t *pmdp, pte_t *ptep); +#define ctxd_set(ctxp,pgdp) BTFIXUP_CALL(ctxd_set)(ctxp,pgdp) +#define pmd_set(pmdp,ptep) BTFIXUP_CALL(pmd_set)(pmdp,ptep) -void (*flush_page_for_dma)(unsigned long page); +BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long) +BTFIXUPDEF_CALL(void, flush_chunk, unsigned long) + +#define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page) int flush_page_for_dma_global = 1; -static void (*flush_chunk)(unsigned long chunk); +#define flush_chunk(chunk) BTFIXUP_CALL(flush_chunk)(chunk) #ifdef __SMP__ -static void (*local_flush_page_for_dma)(unsigned long page); +BTFIXUPDEF_CALL(void, local_flush_page_for_dma, unsigned long) + +#define local_flush_page_for_dma(page) BTFIXUP_CALL(local_flush_page_for_dma)(page) #endif static struct srmmu_stats { @@ -79,7 +89,7 @@ static struct srmmu_stats { int invmm; } module_stats; -static char *srmmu_name; +char *srmmu_name; ctxd_t *srmmu_ctx_table_phys; ctxd_t *srmmu_context_table; @@ -96,8 +106,8 @@ static struct srmmu_trans { #define SRMMU_HASHSZ 256 /* Not static, viking.S uses it. */ -struct srmmu_trans *srmmu_v2p_hash[SRMMU_HASHSZ]; -static struct srmmu_trans *srmmu_p2v_hash[SRMMU_HASHSZ]; +unsigned long srmmu_v2p_hash[SRMMU_HASHSZ]; +static unsigned long srmmu_p2v_hash[SRMMU_HASHSZ]; #define srmmu_ahashfn(addr) ((addr) >> 24) @@ -111,20 +121,17 @@ int viking_mxcc_present = 0; */ static inline unsigned long srmmu_v2p(unsigned long vaddr) { - struct srmmu_trans *tp = srmmu_v2p_hash[srmmu_ahashfn(vaddr)]; - - if(tp) - return (vaddr - tp->vbase + tp->pbase); - else - return 0xffffffffUL; + unsigned long off = srmmu_v2p_hash[srmmu_ahashfn(vaddr)]; + + return (vaddr + off); } static inline unsigned long srmmu_p2v(unsigned long paddr) { - struct srmmu_trans *tp = srmmu_p2v_hash[srmmu_ahashfn(paddr)]; - - if(tp) - return (paddr - tp->pbase + tp->vbase); + unsigned long off = srmmu_p2v_hash[srmmu_ahashfn(paddr)]; + + if (off != 0xffffffffUL) + return (paddr - off); else return 0xffffffffUL; } @@ -132,16 +139,47 @@ static inline unsigned long srmmu_p2v(unsigned long paddr) /* Physical memory on most SS1000/SC2000 can be contiguous, so we handle that case * as a special case to make things faster. */ +/* FIXME: gcc is stupid here and generates very very bad code in this + * heavily used routine. So we help it a bit. */ static inline unsigned long srmmu_c_v2p(unsigned long vaddr) { +#if KERNBASE != 0xf0000000 if (vaddr >= KERNBASE) return vaddr - KERNBASE; - return (vaddr - page_contig_offset); + return vaddr - BTFIXUP_SETHI(page_contig_offset); +#else + register unsigned long kernbase; + + __asm__ ("sethi %%hi(0xf0000000), %0" : "=r"(kernbase)); + return vaddr - ((vaddr >= kernbase) ? kernbase : BTFIXUP_SETHI(page_contig_offset)); +#endif } static inline unsigned long srmmu_c_p2v(unsigned long paddr) { +#if KERNBASE != 0xf0000000 if (paddr < (0xfd000000 - KERNBASE)) return paddr + KERNBASE; - return (paddr + page_contig_offset); + return (paddr + BTFIXUP_SETHI(page_contig_offset)); +#else + register unsigned long kernbase; + register unsigned long limit; + + __asm__ ("sethi %%hi(0x0d000000), %0" : "=r"(limit)); + __asm__ ("sethi %%hi(0xf0000000), %0" : "=r"(kernbase)); + + return paddr + ((paddr < limit) ? kernbase : BTFIXUP_SETHI(page_contig_offset)); +#endif +} + +/* On boxes where there is no lots_of_ram, KERNBASE is mapped to PA<0> and highest + PA is below 0x0d000000, we can optimize even more :) */ +static inline unsigned long srmmu_s_v2p(unsigned long vaddr) +{ + return vaddr - PAGE_OFFSET; +} + +static inline unsigned long srmmu_s_p2v(unsigned long paddr) +{ + return paddr + PAGE_OFFSET; } /* In general all page table modifications should use the V8 atomic @@ -157,19 +195,43 @@ static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value) /* Functions really use this, not srmmu_swap directly. */ #define srmmu_set_entry(ptr, newentry) srmmu_swap((unsigned long *) (ptr), (newentry)) +#ifdef PAGESKIP_DEBUG +#define PGSKIP_DEBUG(from,to) prom_printf("PG_skip %ld->%ld\n", (long)(from), (long)(to)); printk("PG_skip %ld->%ld\n", (long)(from), (long)(to)) +#else +#define PGSKIP_DEBUG(from,to) do { } while (0) +#endif + __initfunc(void srmmu_frob_mem_map(unsigned long start_mem)) { - unsigned long bank_start, bank_end; + unsigned long bank_start, bank_end = 0; unsigned long addr; int i; /* First, mark all pages as invalid. */ for(addr = PAGE_OFFSET; MAP_NR(addr) < max_mapnr; addr += PAGE_SIZE) mem_map[MAP_NR(addr)].flags |= (1<<PG_reserved); + + /* Next, pg[0-3] is sun4c cruft, so we can free it... */ + mem_map[MAP_NR(pg0)].flags &= ~(1<<PG_reserved); + mem_map[MAP_NR(pg1)].flags &= ~(1<<PG_reserved); + mem_map[MAP_NR(pg2)].flags &= ~(1<<PG_reserved); + mem_map[MAP_NR(pg3)].flags &= ~(1<<PG_reserved); start_mem = PAGE_ALIGN(start_mem); for(i = 0; srmmu_map[i].size; i++) { bank_start = srmmu_map[i].vbase; + + if (i && bank_start - bank_end > 2 * PAGE_SIZE) { + mem_map[MAP_NR(bank_end)].flags |= (1<<PG_skip); + mem_map[MAP_NR(bank_end)].next_hash = mem_map + MAP_NR(bank_start); + PGSKIP_DEBUG(MAP_NR(bank_end), MAP_NR(bank_start)); + if (bank_end > KERNBASE && bank_start < KERNBASE) { + mem_map[0].flags |= (1<<PG_skip); + mem_map[0].next_hash = mem_map + MAP_NR(bank_start); + PGSKIP_DEBUG(0, MAP_NR(bank_start)); + } + } + bank_end = bank_start + srmmu_map[i].size; while(bank_start < bank_end) { if((bank_start >= KERNBASE) && @@ -180,23 +242,28 @@ __initfunc(void srmmu_frob_mem_map(unsigned long start_mem)) mem_map[MAP_NR(bank_start)].flags &= ~(1<<PG_reserved); bank_start += PAGE_SIZE; } + + if (bank_end == 0xfd000000) + bank_end = PAGE_OFFSET; } - if (sparc_cpu_model == sun4d) { - for (addr = PAGE_OFFSET; MAP_NR(addr) < max_mapnr; addr += PAGE_SIZE) - if (addr < sun4d_dma_vbase || addr >= sun4d_dma_vbase + sun4d_dma_size) - clear_bit(PG_DMA, &mem_map[MAP_NR(addr)].flags); + + if (bank_end < KERNBASE) { + mem_map[MAP_NR(bank_end)].flags |= (1<<PG_skip); + mem_map[MAP_NR(bank_end)].next_hash = mem_map + MAP_NR(KERNBASE); + PGSKIP_DEBUG(MAP_NR(bank_end), MAP_NR(KERNBASE)); + } else if (MAP_NR(bank_end) < max_mapnr) { + mem_map[MAP_NR(bank_end)].flags |= (1<<PG_skip); + if (mem_map[0].flags & (1 << PG_skip)) { + mem_map[MAP_NR(bank_end)].next_hash = mem_map[0].next_hash; + PGSKIP_DEBUG(MAP_NR(bank_end), mem_map[0].next_hash - mem_map); + } else { + mem_map[MAP_NR(bank_end)].next_hash = mem_map; + PGSKIP_DEBUG(MAP_NR(bank_end), 0); + } } } /* The very generic SRMMU page table operations. */ -static unsigned int srmmu_pmd_align(unsigned int addr) { return SRMMU_PMD_ALIGN(addr); } -static unsigned int srmmu_pgdir_align(unsigned int addr) { return SRMMU_PGDIR_ALIGN(addr); } - -static unsigned long srmmu_vmalloc_start(void) -{ - return SRMMU_VMALLOC_START; -} - static inline int srmmu_device_memory(unsigned long x) { return ((x & 0xF0000000) != 0); @@ -220,44 +287,53 @@ static unsigned long srmmu_c_pmd_page(pmd_t pmd) static unsigned long srmmu_c_pte_page(pte_t pte) { return srmmu_device_memory(pte_val(pte))?~0:srmmu_c_p2v((pte_val(pte) & SRMMU_PTE_PMASK) << 4); } -static int srmmu_pte_none(pte_t pte) +static unsigned long srmmu_s_pgd_page(pgd_t pgd) +{ return srmmu_device_memory(pgd_val(pgd))?~0:srmmu_s_p2v((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); } + +static unsigned long srmmu_s_pmd_page(pmd_t pmd) +{ return srmmu_device_memory(pmd_val(pmd))?~0:srmmu_s_p2v((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4); } + +static unsigned long srmmu_s_pte_page(pte_t pte) +{ return srmmu_device_memory(pte_val(pte))?~0:srmmu_s_p2v((pte_val(pte) & SRMMU_PTE_PMASK) << 4); } + +static inline int srmmu_pte_none(pte_t pte) { return !(pte_val(pte) & 0xFFFFFFF); } -static int srmmu_pte_present(pte_t pte) +static inline int srmmu_pte_present(pte_t pte) { return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); } -static void srmmu_pte_clear(pte_t *ptep) { set_pte(ptep, __pte(0)); } +static inline void srmmu_pte_clear(pte_t *ptep) { set_pte(ptep, __pte(0)); } -static int srmmu_pmd_none(pmd_t pmd) +static inline int srmmu_pmd_none(pmd_t pmd) { return !(pmd_val(pmd) & 0xFFFFFFF); } -static int srmmu_pmd_bad(pmd_t pmd) +static inline int srmmu_pmd_bad(pmd_t pmd) { return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; } -static int srmmu_pmd_present(pmd_t pmd) +static inline int srmmu_pmd_present(pmd_t pmd) { return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); } -static void srmmu_pmd_clear(pmd_t *pmdp) { set_pte((pte_t *)pmdp, __pte(0)); } +static inline void srmmu_pmd_clear(pmd_t *pmdp) { set_pte((pte_t *)pmdp, __pte(0)); } -static int srmmu_pgd_none(pgd_t pgd) +static inline int srmmu_pgd_none(pgd_t pgd) { return !(pgd_val(pgd) & 0xFFFFFFF); } -static int srmmu_pgd_bad(pgd_t pgd) +static inline int srmmu_pgd_bad(pgd_t pgd) { return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; } -static int srmmu_pgd_present(pgd_t pgd) +static inline int srmmu_pgd_present(pgd_t pgd) { return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); } -static void srmmu_pgd_clear(pgd_t * pgdp) { set_pte((pte_t *)pgdp, __pte(0)); } +static inline void srmmu_pgd_clear(pgd_t * pgdp) { set_pte((pte_t *)pgdp, __pte(0)); } -static int srmmu_pte_write(pte_t pte) { return pte_val(pte) & SRMMU_WRITE; } -static int srmmu_pte_dirty(pte_t pte) { return pte_val(pte) & SRMMU_DIRTY; } -static int srmmu_pte_young(pte_t pte) { return pte_val(pte) & SRMMU_REF; } +static inline int srmmu_pte_write(pte_t pte) { return pte_val(pte) & SRMMU_WRITE; } +static inline int srmmu_pte_dirty(pte_t pte) { return pte_val(pte) & SRMMU_DIRTY; } +static inline int srmmu_pte_young(pte_t pte) { return pte_val(pte) & SRMMU_REF; } -static pte_t srmmu_pte_wrprotect(pte_t pte) { return __pte(pte_val(pte) & ~SRMMU_WRITE);} -static pte_t srmmu_pte_mkclean(pte_t pte) { return __pte(pte_val(pte) & ~SRMMU_DIRTY);} -static pte_t srmmu_pte_mkold(pte_t pte) { return __pte(pte_val(pte) & ~SRMMU_REF);} -static pte_t srmmu_pte_mkwrite(pte_t pte) { return __pte(pte_val(pte) | SRMMU_WRITE);} -static pte_t srmmu_pte_mkdirty(pte_t pte) { return __pte(pte_val(pte) | SRMMU_DIRTY);} -static pte_t srmmu_pte_mkyoung(pte_t pte) { return __pte(pte_val(pte) | SRMMU_REF);} +static inline pte_t srmmu_pte_wrprotect(pte_t pte) { return __pte(pte_val(pte) & ~SRMMU_WRITE);} +static inline pte_t srmmu_pte_mkclean(pte_t pte) { return __pte(pte_val(pte) & ~SRMMU_DIRTY);} +static inline pte_t srmmu_pte_mkold(pte_t pte) { return __pte(pte_val(pte) & ~SRMMU_REF);} +static inline pte_t srmmu_pte_mkwrite(pte_t pte) { return __pte(pte_val(pte) | SRMMU_WRITE);} +static inline pte_t srmmu_pte_mkdirty(pte_t pte) { return __pte(pte_val(pte) | SRMMU_DIRTY);} +static inline pte_t srmmu_pte_mkyoung(pte_t pte) { return __pte(pte_val(pte) | SRMMU_REF);} /* * Conversion functions: convert a page and protection to a page entry, @@ -269,6 +345,9 @@ static pte_t srmmu_mk_pte(unsigned long page, pgprot_t pgprot) static pte_t srmmu_c_mk_pte(unsigned long page, pgprot_t pgprot) { return __pte(((srmmu_c_v2p(page)) >> 4) | pgprot_val(pgprot)); } +static pte_t srmmu_s_mk_pte(unsigned long page, pgprot_t pgprot) +{ return __pte(((srmmu_s_v2p(page)) >> 4) | pgprot_val(pgprot)); } + static pte_t srmmu_mk_pte_phys(unsigned long page, pgprot_t pgprot) { return __pte(((page) >> 4) | pgprot_val(pgprot)); } @@ -307,41 +386,64 @@ static void srmmu_c_pmd_set(pmd_t * pmdp, pte_t * ptep) set_pte((pte_t *)pmdp, (SRMMU_ET_PTD | (srmmu_c_v2p((unsigned long) ptep) >> 4))); } -static pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot) +static void srmmu_s_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp) +{ + set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (srmmu_s_v2p((unsigned long) pgdp) >> 4))); +} + +static void srmmu_s_pgd_set(pgd_t * pgdp, pmd_t * pmdp) +{ + set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (srmmu_s_v2p((unsigned long) pmdp) >> 4))); +} + +static void srmmu_s_pmd_set(pmd_t * pmdp, pte_t * ptep) +{ + set_pte((pte_t *)pmdp, (SRMMU_ET_PTD | (srmmu_s_v2p((unsigned long) ptep) >> 4))); +} + +static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot) { return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); } /* to find an entry in a top-level page table... */ -static pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address) +static inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address) { - return mm->pgd + ((address >> SRMMU_PGDIR_SHIFT) & (SRMMU_PTRS_PER_PGD - 1)); + return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); } /* Find an entry in the second-level page table.. */ -static pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address) +static inline pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address) { return (pmd_t *) srmmu_pgd_page(*dir) + ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1)); } /* Find an entry in the third-level page table.. */ -static pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address) +static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address) { return (pte_t *) srmmu_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1)); } -/* Find an entry in the second-level page table.. */ -static pmd_t *srmmu_c_pmd_offset(pgd_t * dir, unsigned long address) +static inline pmd_t *srmmu_c_pmd_offset(pgd_t * dir, unsigned long address) { return (pmd_t *) srmmu_c_pgd_page(*dir) + ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1)); } -/* Find an entry in the third-level page table.. */ -static pte_t *srmmu_c_pte_offset(pmd_t * dir, unsigned long address) +static inline pte_t *srmmu_c_pte_offset(pmd_t * dir, unsigned long address) { return (pte_t *) srmmu_c_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1)); } +static inline pmd_t *srmmu_s_pmd_offset(pgd_t * dir, unsigned long address) +{ + return (pmd_t *) srmmu_s_pgd_page(*dir) + ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1)); +} + +static inline pte_t *srmmu_s_pte_offset(pmd_t * dir, unsigned long address) +{ + return (pte_t *) srmmu_s_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1)); +} + /* This must update the context table entry for this process. */ static void srmmu_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp) { @@ -352,334 +454,146 @@ static void srmmu_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp) } } -static inline void srmmu_putpage(unsigned long page) -{ - free_page(page); -} - -#define LC_HIGH_WATER 128 -#define BC_HIGH_WATER 32 - -static unsigned long *lcnks = 0; -static unsigned long *bcnks = 0; -static int lcwater = 0; -static int bcwater = 0; -static int chunk_pages = 0; -static int clct_pages = 0; - -#define RELAX_JIFFIES 16 - -static int lcjiffies; -static int bcjiffies; - -struct chunk { - struct chunk *next; - struct chunk *prev; - struct chunk *npage; - struct chunk *ppage; - int count; -}; - -static int garbage_calls = 0; - -#define OTHER_PAGE(p,q) (((unsigned long)(p) ^ (unsigned long)(q)) & PAGE_MASK) - -static int garbage_collect(unsigned long **cnks, int n, int cpp) +static inline pte_t *srmmu_get_pte_fast(void) { - struct chunk *root = (struct chunk *)*cnks; - struct chunk *p, *q, *curr, *next; - int water = n; - - next = root->next; - curr = root->prev = root->next = root->npage = root->ppage = root; - root->count = 1; - - garbage_calls++; - - while (--n) { - p = next; - next = next->next; - - if (OTHER_PAGE(p, curr)) { - - q = curr->npage; - while (q != curr) { - if (!OTHER_PAGE(p, q)) - break; - q = q->npage; - } - - if (q == curr) { - - (p->npage = curr->npage)->ppage = p; - curr->npage = p; - p->ppage = curr; - - p->next = p->prev = p; - p->count = 1; - - curr = p; - - continue; - } - curr = q; - } - - (p->next = curr->next)->prev = p; - curr->next = p; - p->prev = curr; - - if (++curr->count == cpp) { - - q = curr->npage; - if (curr == q) { - - srmmu_putpage((unsigned long)curr & PAGE_MASK); - water -= cpp; - - clct_pages++; - chunk_pages--; - - if (--n) { - p = next; - next = next->next; - - curr = root->prev = - root->next = root->npage = - root->ppage = root = p; - root->count = 1; - - continue; - } - return 0; - } - - if (curr == root) - root = q; - - curr->ppage->npage = q; - q->ppage = curr->ppage; - - srmmu_putpage((unsigned long)curr & PAGE_MASK); - water -= cpp; - - clct_pages++; - chunk_pages--; - - curr = q; - } - } - - p = root; - while (p->npage != root) { - p->prev->next = p->npage; - p = p->npage; + struct page *ret; + + spin_lock(&pte_spinlock); + if ((ret = (struct page *)pte_quicklist) != NULL) { + unsigned int mask = (unsigned int)ret->pprev_hash; + unsigned int tmp, off; + + if (mask & 0xff) + for (tmp = 0x001, off = 0; (mask & tmp) == 0; tmp <<= 1, off += 256); + else + for (tmp = 0x100, off = 2048; (mask & tmp) == 0; tmp <<= 1, off += 256); + (unsigned int)ret->pprev_hash = mask & ~tmp; + if (!(mask & ~tmp)) + pte_quicklist = (unsigned long *)ret->next_hash; + ret = (struct page *)(PAGE_OFFSET + (ret->map_nr << PAGE_SHIFT) + off); + pgtable_cache_size--; } - - *cnks = (unsigned long *)root; - return water; + spin_unlock(&pte_spinlock); + return (pte_t *)ret; } -static unsigned long *get_small_chunk(void) +static inline pte_t *srmmu_get_pte_slow(void) { - unsigned long *rval; - unsigned long flags; - - save_and_cli(flags); - if(lcwater) { - lcwater--; - rval = lcnks; - lcnks = (unsigned long *) *rval; - } else { - rval = (unsigned long *) __get_free_page(GFP_KERNEL); - - if(!rval) { - restore_flags(flags); - return 0; - } - chunk_pages++; - - lcnks = (rval + 64); - - /* Cache stomping, I know... */ - *(rval + 64) = (unsigned long) (rval + 128); - *(rval + 128) = (unsigned long) (rval + 192); - *(rval + 192) = (unsigned long) (rval + 256); - *(rval + 256) = (unsigned long) (rval + 320); - *(rval + 320) = (unsigned long) (rval + 384); - *(rval + 384) = (unsigned long) (rval + 448); - *(rval + 448) = (unsigned long) (rval + 512); - *(rval + 512) = (unsigned long) (rval + 576); - *(rval + 576) = (unsigned long) (rval + 640); - *(rval + 640) = (unsigned long) (rval + 704); - *(rval + 704) = (unsigned long) (rval + 768); - *(rval + 768) = (unsigned long) (rval + 832); - *(rval + 832) = (unsigned long) (rval + 896); - *(rval + 896) = (unsigned long) (rval + 960); - *(rval + 960) = 0; - lcwater = 15; + pte_t *ret; + struct page *page; + + ret = (pte_t *)get_free_page(GFP_KERNEL); + if (ret) { + page = mem_map + MAP_NR(ret); + flush_chunk((unsigned long)ret); + (unsigned int)page->pprev_hash = 0xfffe; + spin_lock(&pte_spinlock); + (unsigned long *)page->next_hash = pte_quicklist; + pte_quicklist = (unsigned long *)page; + pgtable_cache_size += 15; } - lcjiffies = jiffies; - restore_flags(flags); - memset(rval, 0, 256); - flush_chunk((unsigned long)rval); - return rval; -} - -static inline void free_small_chunk(unsigned long *it) -{ - unsigned long flags; - - save_and_cli(flags); - *it = (unsigned long) lcnks; - lcnks = it; - lcwater++; - - if ((lcwater > LC_HIGH_WATER) && - (jiffies > lcjiffies + RELAX_JIFFIES)) - lcwater = garbage_collect(&lcnks, lcwater, 16); - - restore_flags(flags); + return ret; } -static unsigned long *get_big_chunk(void) +static inline pgd_t *srmmu_get_pgd_fast(void) { - unsigned long *rval; - unsigned long flags; - - save_and_cli(flags); - if(bcwater) { - bcwater--; - rval = bcnks; - bcnks = (unsigned long *) *rval; - } else { - rval = (unsigned long *) __get_free_page(GFP_KERNEL); - - if(!rval) { - restore_flags(flags); - return 0; - } - chunk_pages++; - - bcnks = (rval + 256); + struct page *ret; - /* Cache stomping, I know... */ - *(rval + 256) = (unsigned long) (rval + 512); - *(rval + 512) = (unsigned long) (rval + 768); - *(rval + 768) = 0; - bcwater = 3; + spin_lock(&pgd_spinlock); + if ((ret = (struct page *)pgd_quicklist) != NULL) { + unsigned int mask = (unsigned int)ret->pprev_hash; + unsigned int tmp, off; + + for (tmp = 0x001, off = 0; (mask & tmp) == 0; tmp <<= 1, off += 1024); + (unsigned int)ret->pprev_hash = mask & ~tmp; + if (!(mask & ~tmp)) + pgd_quicklist = (unsigned long *)ret->next_hash; + ret = (struct page *)(PAGE_OFFSET + (ret->map_nr << PAGE_SHIFT) + off); + pgd_cache_size--; } - bcjiffies = jiffies; - restore_flags(flags); - memset(rval, 0, 1024); - flush_chunk((unsigned long)rval); - return rval; + spin_unlock(&pgd_spinlock); + return (pte_t *)ret; } -static inline void free_big_chunk(unsigned long *it) +static inline pgd_t *srmmu_get_pgd_slow(void) { - unsigned long flags; - - save_and_cli(flags); - *it = (unsigned long) bcnks; - bcnks = it; - bcwater++; - - if ((bcwater > BC_HIGH_WATER) && - (jiffies > bcjiffies + RELAX_JIFFIES)) - bcwater = garbage_collect(&bcnks, bcwater, 4); - - restore_flags(flags); + pgd_t *ret; + struct page *page; + + ret = (pgd_t *)__get_free_page(GFP_KERNEL); + if (ret) { + pgd_t *init = pgd_offset(&init_mm, 0); + memset(ret + (0 * PTRS_PER_PGD), 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); + memcpy(ret + (0 * PTRS_PER_PGD) + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, + (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); + memset(ret + (1 * PTRS_PER_PGD), 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); + memcpy(ret + (1 * PTRS_PER_PGD) + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, + (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); + memset(ret + (2 * PTRS_PER_PGD), 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); + memcpy(ret + (2 * PTRS_PER_PGD) + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, + (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); + memset(ret + (3 * PTRS_PER_PGD), 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); + memcpy(ret + (3 * PTRS_PER_PGD) + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, + (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); + page = mem_map + MAP_NR(ret); + flush_chunk((unsigned long)ret); + (unsigned int)page->pprev_hash = 0xe; + spin_lock(&pgd_spinlock); + (unsigned long *)page->next_hash = pgd_quicklist; + pgd_quicklist = (unsigned long *)page; + pgd_cache_size += 3; + spin_unlock(&pgd_spinlock); + } + return ret; } -#define NEW_PGD() (pgd_t *) get_big_chunk() -#define NEW_PMD() (pmd_t *) get_small_chunk() -#define NEW_PTE() (pte_t *) get_small_chunk() -#define FREE_PGD(chunk) free_big_chunk((unsigned long *)(chunk)) -#define FREE_PMD(chunk) free_small_chunk((unsigned long *)(chunk)) -#define FREE_PTE(chunk) free_small_chunk((unsigned long *)(chunk)) - -/* - * Allocate and free page tables. The xxx_kernel() versions are - * used to allocate a kernel page table - this turns on ASN bits - * if any, and marks the page tables reserved. - */ -static void srmmu_pte_free_kernel(pte_t *pte) +static void srmmu_free_pte_slow(pte_t *pte) { - FREE_PTE(pte); } -static pte_t *srmmu_pte_alloc_kernel(pmd_t *pmd, unsigned long address) +static void srmmu_free_pgd_slow(pgd_t *pgd) { - address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1); - if(srmmu_pmd_none(*pmd)) { - pte_t *page = NEW_PTE(); - if(srmmu_pmd_none(*pmd)) { - if(page) { - pmd_set(pmd, page); - return page + address; - } - pmd_set(pmd, BAD_PAGETABLE); - return NULL; - } - FREE_PTE(page); - } - if(srmmu_pmd_bad(*pmd)) { - printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd)); - pmd_set(pmd, BAD_PAGETABLE); - return NULL; - } - return (pte_t *) pmd_page(*pmd) + address; } -static void srmmu_pmd_free_kernel(pmd_t *pmd) +static inline void srmmu_pte_free(pte_t *pte) { - FREE_PMD(pmd); -} + struct page *page = mem_map + MAP_NR(pte); -static pmd_t *srmmu_pmd_alloc_kernel(pgd_t *pgd, unsigned long address) -{ - address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1); - if(srmmu_pgd_none(*pgd)) { - pmd_t *page; - page = NEW_PMD(); - if(srmmu_pgd_none(*pgd)) { - if(page) { - pgd_set(pgd, page); - return page + address; - } - pgd_set(pgd, (pmd_t *) BAD_PAGETABLE); - return NULL; - } - FREE_PMD(page); + spin_lock(&pte_spinlock); + if (!page->pprev_hash) { + (unsigned long *)page->next_hash = pte_quicklist; + pte_quicklist = (unsigned long *)page; } - if(srmmu_pgd_bad(*pgd)) { - printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd)); - pgd_set(pgd, (pmd_t *) BAD_PAGETABLE); - return NULL; - } - return (pmd_t *) pgd_page(*pgd) + address; -} - -static void srmmu_pte_free(pte_t *pte) -{ - FREE_PTE(pte); + (unsigned int)page->pprev_hash |= (1 << ((((unsigned long)pte) >> 8) & 15)); + pgtable_cache_size++; + spin_unlock(&pte_spinlock); } static pte_t *srmmu_pte_alloc(pmd_t * pmd, unsigned long address) { address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1); if(srmmu_pmd_none(*pmd)) { - pte_t *page = NEW_PTE(); + pte_t *page = srmmu_get_pte_fast(); + + if (page) { + pmd_set(pmd, page); + return page + address; + } + page = srmmu_get_pte_slow(); if(srmmu_pmd_none(*pmd)) { if(page) { + spin_unlock(&pte_spinlock); pmd_set(pmd, page); return page + address; } pmd_set(pmd, BAD_PAGETABLE); return NULL; } - FREE_PTE(page); + if (page) { + (unsigned int)(((struct page *)pte_quicklist)->pprev_hash) = 0xffff; + pgtable_cache_size++; + spin_unlock(&pte_spinlock); + } } if(srmmu_pmd_bad(*pmd)) { printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd)); @@ -692,23 +606,34 @@ static pte_t *srmmu_pte_alloc(pmd_t * pmd, unsigned long address) /* Real three-level page tables on SRMMU. */ static void srmmu_pmd_free(pmd_t * pmd) { - FREE_PMD(pmd); + return srmmu_pte_free((pte_t *)pmd); } static pmd_t *srmmu_pmd_alloc(pgd_t * pgd, unsigned long address) { address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1); if(srmmu_pgd_none(*pgd)) { - pmd_t *page = NEW_PMD(); + pmd_t *page = (pmd_t *)srmmu_get_pte_fast(); + + if (page) { + pgd_set(pgd, page); + return page + address; + } + page = (pmd_t *)srmmu_get_pte_slow(); if(srmmu_pgd_none(*pgd)) { if(page) { + spin_unlock(&pte_spinlock); pgd_set(pgd, page); return page + address; } pgd_set(pgd, (pmd_t *) BAD_PAGETABLE); return NULL; } - FREE_PMD(page); + if (page) { + (unsigned int)(((struct page *)pte_quicklist)->pprev_hash) = 0xffff; + pgtable_cache_size++; + spin_unlock(&pte_spinlock); + } } if(srmmu_pgd_bad(*pgd)) { printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd)); @@ -720,12 +645,58 @@ static pmd_t *srmmu_pmd_alloc(pgd_t * pgd, unsigned long address) static void srmmu_pgd_free(pgd_t *pgd) { - FREE_PGD(pgd); + struct page *page = mem_map + MAP_NR(pgd); + + spin_lock(&pgd_spinlock); + if (!page->pprev_hash) { + (unsigned long *)page->next_hash = pgd_quicklist; + pgd_quicklist = (unsigned long *)page; + } + (unsigned int)page->pprev_hash |= (1 << ((((unsigned long)pgd) >> 10) & 3)); + pgd_cache_size++; + spin_unlock(&pgd_spinlock); } static pgd_t *srmmu_pgd_alloc(void) { - return NEW_PGD(); + pgd_t *ret; + + ret = srmmu_get_pgd_fast(); + if (ret) return ret; + return srmmu_get_pgd_slow(); +} + + +static void srmmu_set_pgdir(unsigned long address, pgd_t entry) +{ + struct task_struct * p; + struct page *page; + + read_lock(&tasklist_lock); + for_each_task(p) { + if (!p->mm) + continue; + *pgd_offset(p->mm,address) = entry; + } + read_unlock(&tasklist_lock); + spin_lock(&pgd_spinlock); + address >>= SRMMU_PGDIR_SHIFT; + for (page = (struct page *)pgd_quicklist; page; page = page->next_hash) { + pgd_t *pgd = (pgd_t *)(PAGE_OFFSET + (page->map_nr << PAGE_SHIFT)); + unsigned int mask = (unsigned int)page->pprev_hash; + + if (mask & 1) + pgd[address + 0 * SRMMU_PTRS_PER_PGD] = entry; + if (mask & 2) + pgd[address + 1 * SRMMU_PTRS_PER_PGD] = entry; + if (mask & 4) + pgd[address + 2 * SRMMU_PTRS_PER_PGD] = entry; + if (mask & 8) + pgd[address + 3 * SRMMU_PTRS_PER_PGD] = entry; + if (mask) + flush_chunk((unsigned long)pgd); + } + spin_unlock(&pgd_spinlock); } static void srmmu_set_pte_cacheable(pte_t *ptep, pte_t pteval) @@ -926,6 +897,19 @@ extern void tsunami_flush_tlb_mm(struct mm_struct *mm); extern void tsunami_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end); extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); +/* Workaround, until we find what's going on with Swift. When low on memory, it sometimes + * loops in fault/handle_mm_fault incl. flush_tlb_page to find out it is already in page tables/ + * fault again on the same instruction. I really don't understand it, have checked it and contexts + * are right, flush_tlb_all is done as well, and it faults again... Strange. -jj + */ +static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte) +{ + static unsigned long last; + + if (last == address) viking_hwprobe(address); + last = address; +} + /* Swift flushes. It has the recommended SRMMU specification flushing * facilities, so we can do things in a more fine grained fashion than we * could on the tsunami. Let's watch out for HARDWARE BUGS... @@ -1191,12 +1175,10 @@ static void cypress_flush_chunk(unsigned long chunk) cypress_flush_page_to_ram(chunk); } -#if NOTUSED /* Cypress is also IO cache coherent. */ static void cypress_flush_page_for_dma(unsigned long page) { } -#endif /* Cypress has unified L2 VIPT, from which both instructions and data * are stored. It does not have an onboard icache of any sort, therefore @@ -1282,9 +1264,8 @@ extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr); extern void viking_flush_page(unsigned long page); extern void viking_mxcc_flush_page(unsigned long page); extern void viking_flush_chunk(unsigned long chunk); -extern void viking_c_flush_page(unsigned long page); -extern void viking_c_mxcc_flush_page(unsigned long page); extern void viking_c_flush_chunk(unsigned long chunk); +extern void viking_s_flush_chunk(unsigned long chunk); extern void viking_mxcc_flush_chunk(unsigned long chunk); extern void viking_flush_tlb_all(void); extern void viking_flush_tlb_mm(struct mm_struct *mm); @@ -1481,7 +1462,7 @@ static inline void srmmu_allocate_ptable_skeleton(unsigned long start, unsigned * looking at the prom's page table directly which is what most * other OS's do. Yuck... this is much better. */ -void srmmu_inherit_prom_mappings(unsigned long start,unsigned long end) +__initfunc(void srmmu_inherit_prom_mappings(unsigned long start,unsigned long end)) { pgd_t *pgdp; pmd_t *pmdp; @@ -1539,21 +1520,79 @@ void srmmu_inherit_prom_mappings(unsigned long start,unsigned long end) } } -/* #define DEBUG_MAP_KERNEL */ - #ifdef DEBUG_MAP_KERNEL #define MKTRACE(foo) prom_printf foo #else #define MKTRACE(foo) #endif -static int lots_of_ram = 0; -static int large_pte_optimize = 1; +static int lots_of_ram __initdata = 0; +static int srmmu_low_pa __initdata = 0; +static unsigned long end_of_phys_memory __initdata = 0; + +__initfunc(void srmmu_end_memory(unsigned long memory_size, unsigned long *end_mem_p)) +{ + unsigned int sum = 0; + unsigned long last = 0xff000000; + long first, cur; + unsigned long pa; + unsigned long total = 0; + int i; + + pa = srmmu_hwprobe(KERNBASE + PAGE_SIZE); + pa = (pa & SRMMU_PTE_PMASK) << 4; + if (!sp_banks[0].base_addr && pa == PAGE_SIZE) { + for(i = 0; sp_banks[i].num_bytes != 0; i++) { + if (sp_banks[i].base_addr + sp_banks[i].num_bytes > 0x0d000000) + break; + } + if (!sp_banks[i].num_bytes) { + srmmu_low_pa = 1; + end_of_phys_memory = SRMMU_PGDIR_ALIGN(sp_banks[i-1].base_addr + sp_banks[i-1].num_bytes); + *end_mem_p = KERNBASE + end_of_phys_memory; + if (sp_banks[0].num_bytes >= (6 * 1024 * 1024) || end_of_phys_memory <= 0x06000000) { + /* Make sure there will be enough memory for the whole mem_map (even if sparse) */ + return; + } + } + } + for(i = 0; sp_banks[i].num_bytes != 0; i++) { + pa = sp_banks[i].base_addr; + first = (pa & (~SRMMU_PGDIR_MASK)); + cur = (sp_banks[i].num_bytes + first - SRMMU_PGDIR_SIZE); + if (cur < 0) cur = 0; + if (!first || last != (pa & SRMMU_PGDIR_MASK)) + total += SRMMU_PGDIR_SIZE; + sum += sp_banks[i].num_bytes; + if (memory_size) { + if (sum > memory_size) { + sp_banks[i].num_bytes -= + (sum - memory_size); + cur = (sp_banks[i].num_bytes + first - SRMMU_PGDIR_SIZE); + if (cur < 0) cur = 0; + total += SRMMU_PGDIR_ALIGN(cur); + sum = memory_size; + sp_banks[++i].base_addr = 0xdeadbeef; + sp_banks[i].num_bytes = 0; + break; + } + } + total += SRMMU_PGDIR_ALIGN(cur); + last = (sp_banks[i].base_addr + sp_banks[i].num_bytes - 1) & SRMMU_PGDIR_MASK; + } + if (total <= 0x0d000000) + *end_mem_p = KERNBASE + total; + else { + *end_mem_p = 0xfd000000; + lots_of_ram = 1; + } + end_of_phys_memory = total; +} #define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID) /* Create a third-level SRMMU 16MB page mapping. */ -static inline void do_large_mapping(unsigned long vaddr, unsigned long phys_base) +__initfunc(static void do_large_mapping(unsigned long vaddr, unsigned long phys_base)) { pgd_t *pgdp = srmmu_pgd_offset(init_task.mm, vaddr); unsigned long big_pte; @@ -1563,47 +1602,6 @@ static inline void do_large_mapping(unsigned long vaddr, unsigned long phys_base *pgdp = __pgd(big_pte); } -/* Create second-level SRMMU 256K medium sized page mappings. */ -static inline void do_medium_mapping(unsigned long vaddr, unsigned long vend, - unsigned long phys_base) -{ - pgd_t *pgdp; - pmd_t *pmdp; - unsigned long medium_pte; - - MKTRACE(("dmm[v<%08lx,%08lx>-->p<%08lx>]", vaddr, vend, phys_base)); - while(vaddr < vend) { - pgdp = srmmu_pgd_offset(init_task.mm, vaddr); - pmdp = srmmu_early_pmd_offset(pgdp, vaddr); - medium_pte = KERNEL_PTE(phys_base >> 4); - *pmdp = __pmd(medium_pte); - phys_base += SRMMU_PMD_SIZE; - vaddr += SRMMU_PMD_SIZE; - } -} - -/* Create a normal set of SRMMU page mappings for the virtual range - * START to END, using physical pages beginning at PHYS_BASE. - */ -static inline void do_small_mapping(unsigned long start, unsigned long end, - unsigned long phys_base) -{ - pgd_t *pgdp; - pmd_t *pmdp; - pte_t *ptep; - - MKTRACE(("dsm[v<%08lx,%08lx>-->p<%08lx>]", start, end, phys_base)); - while(start < end) { - pgdp = srmmu_pgd_offset(init_task.mm, start); - pmdp = srmmu_early_pmd_offset(pgdp, start); - ptep = srmmu_early_pte_offset(pmdp, start); - - *ptep = __pte(KERNEL_PTE(phys_base >> 4)); - phys_base += PAGE_SIZE; - start += PAGE_SIZE; - } -} - /* Look in the sp_bank for the given physical page, return the * index number the entry was found in, or -1 for not found. */ @@ -1625,7 +1623,7 @@ static inline int find_in_spbanks(unsigned long phys_page) * array of char's, each member indicating if that spbank is mapped * yet or not. */ -static inline int find_free_spbank(char *taken_vector) +__initfunc(static int find_free_spbank(char *taken_vector)) { int entry; @@ -1635,78 +1633,28 @@ static inline int find_free_spbank(char *taken_vector) return entry; } -/* Same as above, but with a given bank size limit BLIMIT. */ -static inline int find_free_spbank_limited(char *taken_vector, unsigned long limit) -{ - int entry; - - for(entry = 0; sp_banks[entry].num_bytes; entry++) - if(!taken_vector[entry] && - (sp_banks[entry].num_bytes < limit)) - break; - return entry; -} +static unsigned long map_spbank_last_pa __initdata = 0xff000000; /* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. - * This routine is expected to update the srmmu_map and try as - * hard as possible to use 16MB level-one SRMMU pte's when at all - * possible to get short termination and faster translations. */ -static inline unsigned long map_spbank(unsigned long vbase, int sp_entry) +__initfunc(static unsigned long map_spbank(unsigned long vbase, int sp_entry)) { - unsigned long pstart = sp_banks[sp_entry].base_addr; - unsigned long vstart = vbase; - unsigned long vend = vbase + sp_banks[sp_entry].num_bytes; + unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK); + unsigned long vstart = (vbase & SRMMU_PGDIR_MASK); + unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes); static int srmmu_bank = 0; - /* If physically not aligned on 16MB boundry, just shortcut - * right here by mapping them with 4k normal pages, and bumping - * the next virtual address to the next 16MB boundry. You can - * get this with various RAM configurations due to the way in - * which the PROM carves out it's own chunks of memory. - */ - if(pstart & ~SRMMU_PGDIR_MASK) { - do_small_mapping(vstart, vend, pstart); - vstart = SRMMU_PGDIR_ALIGN(vend); - goto finish_up; - } + MKTRACE(("map_spbank %d[v<%08lx>p<%08lx>s<%08lx>]", sp_entry, vbase, sp_banks[sp_entry].base_addr, sp_banks[sp_entry].num_bytes)); + MKTRACE(("map_spbank2 %d[p%08lx v%08lx-%08lx]", sp_entry, pstart, vstart, vend)); while(vstart < vend) { - unsigned long coverage, next_aligned; - if(vstart & ~SRMMU_PMD_MASK) { - next_aligned = SRMMU_PMD_ALIGN(vstart); - if(next_aligned <= vend) { - coverage = (next_aligned - vstart); - do_small_mapping(vstart, next_aligned, pstart); - } else { - coverage = (vend - vstart); - do_small_mapping(vstart, vend, pstart); - } - } else if(vstart & ~SRMMU_PGDIR_MASK) { - next_aligned = SRMMU_PGDIR_ALIGN(vstart); - if(next_aligned <= vend) { - coverage = (next_aligned - vstart); - do_medium_mapping(vstart, next_aligned, pstart); - } else { - coverage = (vend - vstart); - do_small_mapping(vstart, vend, pstart); - } - } else { - coverage = SRMMU_PGDIR_SIZE; - if(large_pte_optimize || ((vstart+coverage)<=vend)) { - do_large_mapping(vstart, pstart); - } else { - coverage = (vend - vstart); - do_small_mapping(vstart, vend, pstart); - } - } - vstart += coverage; pstart += coverage; + do_large_mapping(vstart, pstart); + vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE; } -finish_up: srmmu_map[srmmu_bank].vbase = vbase; srmmu_map[srmmu_bank].pbase = sp_banks[sp_entry].base_addr; srmmu_map[srmmu_bank].size = sp_banks[sp_entry].num_bytes; - MKTRACE(("SRMMUBANK[v<%08lx>p<%08lx>s<%08lx>]", vbase, sp_banks[sp_entry].base_addr, sp_banks[sp_entry].num_bytes)); srmmu_bank++; + map_spbank_last_pa = pstart - SRMMU_PGDIR_SIZE; return vstart; } @@ -1721,10 +1669,10 @@ static inline void memprobe_error(char *msg) * is part of a full bank which is at least 4MB in size and begins at * 0xf0000000 (ie. KERNBASE). */ -static void map_kernel(void) +static inline void map_kernel(void) { unsigned long raw_pte, physpage; - unsigned long vaddr, tally, low_base; + unsigned long vaddr, low_base; char etaken[SPARC_PHYS_BANKS]; int entry; @@ -1735,17 +1683,7 @@ static void map_kernel(void) low_base = KERNBASE; - /* Step 2: Calculate 'lots_of_ram'. */ - tally = 0; - for(entry = 0; sp_banks[entry].num_bytes; entry++) - tally += sp_banks[entry].num_bytes; - if(tally > (0xfd000000 - KERNBASE)) - lots_of_ram = 1; - else - lots_of_ram = 0; - MKTRACE(("tally=%08lx lots_of_ram<%d>\n", tally, lots_of_ram)); - - /* Step 3: Fill in KERNBASE base pgd. Lots of sanity checking here. */ + /* Step 2: Fill in KERNBASE base pgd. Lots of sanity checking here. */ raw_pte = srmmu_hwprobe(KERNBASE + PAGE_SIZE); if((raw_pte & SRMMU_ET_MASK) != SRMMU_ET_PTE) memprobe_error("Wheee, kernel not mapped at all by boot loader.\n"); @@ -1757,11 +1695,10 @@ static void map_kernel(void) if(entry == -1 || (sp_banks[entry].base_addr != physpage)) memprobe_error("Kernel mapped in non-existant memory.\n"); MKTRACE(("map_kernel: map_spbank(vbase=%08x, entry<%d>)[%08lx,%08lx]\n", KERNBASE, entry, sp_banks[entry].base_addr, sp_banks[entry].num_bytes)); - if(((KERNBASE + (sp_banks[entry].num_bytes)) > 0xfd000000) || - ((KERNBASE + (sp_banks[entry].num_bytes)) < KERNBASE)) { + if (sp_banks[entry].num_bytes > 0x0d000000) { unsigned long orig_base = sp_banks[entry].base_addr; unsigned long orig_len = sp_banks[entry].num_bytes; - unsigned long can_map = (0xfd000000 - KERNBASE); + unsigned long can_map = 0x0d000000; /* Map a partial bank in this case, adjust the base * and the length, but don't mark it used. @@ -1779,7 +1716,7 @@ static void map_kernel(void) vaddr = map_spbank(KERNBASE, entry); etaken[entry] = 1; - /* Step 4: Map what we can above KERNBASE. */ + /* Step 3: Map what we can above KERNBASE. */ MKTRACE(("map_kernel: vaddr=%08lx, entering first loop\n", vaddr)); for(;;) { unsigned long bank_size; @@ -1790,8 +1727,14 @@ static void map_kernel(void) MKTRACE(("<%d> base=%08lx bs=%08lx ", entry, sp_banks[entry].base_addr, bank_size)); if(!bank_size) break; - if(((vaddr + bank_size) > 0xfd000000) || - ((vaddr + bank_size) < KERNBASE)) { + if (srmmu_low_pa) + vaddr = KERNBASE + sp_banks[entry].base_addr; + else if (sp_banks[entry].base_addr & (~SRMMU_PGDIR_MASK)) { + if (map_spbank_last_pa == (sp_banks[entry].base_addr & SRMMU_PGDIR_MASK)) + vaddr -= SRMMU_PGDIR_SIZE; + vaddr += (sp_banks[entry].base_addr & (~SRMMU_PGDIR_MASK)); + } + if ((vaddr + bank_size - KERNBASE) > 0x0d000000) { unsigned long orig_base = sp_banks[entry].base_addr; unsigned long orig_len = sp_banks[entry].num_bytes; unsigned long can_map = (0xfd000000 - vaddr); @@ -1808,8 +1751,6 @@ static void map_kernel(void) MKTRACE(("adjust[%08lx,%08lx]\n", (orig_base + can_map), (orig_len - can_map))); break; } - if(!bank_size) - break; /* Ok, we can map this one, do it. */ MKTRACE(("map_spbank(%08lx,entry<%d>) ", vaddr, entry)); @@ -1823,22 +1764,16 @@ loop_skip: if(!lots_of_ram) goto check_and_return; - /* Step 5: Map the rest (if any) right below KERNBASE. */ + /* Step 4: Map the rest (if any) right below KERNBASE. */ MKTRACE(("map_kernel: doing low mappings... ")); - tally = 0; - for(entry = 0; sp_banks[entry].num_bytes; entry++) { - if(!etaken[entry]) - tally += SRMMU_PGDIR_ALIGN(sp_banks[entry].num_bytes); - } - if(!tally) - memprobe_error("Whee, lots_of_ram yet no low pages to map.\n"); - low_base = (KERNBASE - tally); - MKTRACE(("tally=%08lx low_base=%08lx\n", tally, low_base)); + low_base = (KERNBASE - end_of_phys_memory + 0x0d000000); + MKTRACE(("end_of_phys_memory=%08lx low_base=%08lx\n", end_of_phys_memory, low_base)); /* Ok, now map 'em. */ MKTRACE(("map_kernel: Allocate pt skeleton (%08lx, %08x)\n",low_base,KERNBASE)); srmmu_allocate_ptable_skeleton(low_base, KERNBASE); vaddr = low_base; + map_spbank_last_pa = 0xff000000; MKTRACE(("map_kernel: vaddr=%08lx Entering second loop for low maps.\n", vaddr)); for(;;) { unsigned long bank_size; @@ -1848,19 +1783,22 @@ loop_skip: MKTRACE(("map_kernel: e<%d> base=%08lx bs=%08lx ", entry, sp_banks[entry].base_addr, bank_size)); if(!bank_size) break; + if (sp_banks[entry].base_addr & (~SRMMU_PGDIR_MASK)) { + if (map_spbank_last_pa == (sp_banks[entry].base_addr & SRMMU_PGDIR_MASK)) + vaddr -= SRMMU_PGDIR_SIZE; + vaddr += (sp_banks[entry].base_addr & (~SRMMU_PGDIR_MASK)); + } if((vaddr + bank_size) > KERNBASE) memprobe_error("Wheee, kernel low mapping overflow.\n"); MKTRACE(("map_spbank(%08lx, %d) ", vaddr, entry)); vaddr = map_spbank(vaddr, entry); etaken[entry] = 1; - tally -= SRMMU_PGDIR_ALIGN(bank_size); - MKTRACE(("Now, vaddr=%08lx tally=%08lx\n", vaddr, tally)); + MKTRACE(("Now, vaddr=%08lx end_of_phys_memory=%08lx\n", vaddr, end_of_phys_memory)); } MKTRACE(("\n")); - if(tally) - memprobe_error("Wheee, did not map all of low mappings.\n"); + check_and_return: - /* Step 6: Sanity check, make sure we did it all. */ + /* Step 5: Sanity check, make sure we did it all. */ MKTRACE(("check_and_return: ")); for(entry = 0; sp_banks[entry].num_bytes; entry++) { MKTRACE(("e[%d]=%d ", entry, etaken[entry])); @@ -1872,6 +1810,10 @@ check_and_return: MKTRACE(("success\n")); init_task.mm->mmap->vm_start = page_offset = low_base; stack_top = page_offset - PAGE_SIZE; + BTFIXUPSET_SETHI(page_offset, low_base); + BTFIXUPSET_SETHI(stack_top, page_offset - PAGE_SIZE); + BTFIXUPSET_SIMM13(user_ptrs_per_pgd, page_offset / SRMMU_PGDIR_SIZE); + #if 1 for(entry = 0; srmmu_map[entry].size; entry++) { printk("[%d]: v[%08lx,%08lx](%lx) p[%08lx]\n", entry, @@ -1884,90 +1826,73 @@ check_and_return: /* Now setup the p2v/v2p hash tables. */ for(entry = 0; entry < SRMMU_HASHSZ; entry++) - srmmu_v2p_hash[entry] = srmmu_p2v_hash[entry] = NULL; + srmmu_v2p_hash[entry] = ((0xff - entry) << 24); + for(entry = 0; entry < SRMMU_HASHSZ; entry++) + srmmu_p2v_hash[entry] = 0xffffffffUL; for(entry = 0; srmmu_map[entry].size; entry++) { unsigned long addr; for(addr = srmmu_map[entry].vbase; addr < (srmmu_map[entry].vbase + srmmu_map[entry].size); addr += (1 << 24)) - srmmu_v2p_hash[srmmu_ahashfn(addr)] = &srmmu_map[entry]; + srmmu_v2p_hash[srmmu_ahashfn(addr)] = + srmmu_map[entry].pbase - srmmu_map[entry].vbase; for(addr = srmmu_map[entry].pbase; addr < (srmmu_map[entry].pbase + srmmu_map[entry].size); addr += (1 << 24)) - srmmu_p2v_hash[srmmu_ahashfn(addr)] = &srmmu_map[entry]; + srmmu_p2v_hash[srmmu_ahashfn(addr)] = + srmmu_map[entry].pbase - srmmu_map[entry].vbase; } - page_contig_offset = page_offset - (0xfd000000 - KERNBASE); - phys_mem_contig = 1; - for(entry = 0; srmmu_map[entry].size; entry++) - if (srmmu_map[entry].pbase != srmmu_c_v2p (srmmu_map[entry].vbase)) { - phys_mem_contig = 0; - break; - } - if (phys_mem_contig) { - printk ("SRMMU: Physical memory is contiguous, bypassing VA<->PA hashes\n"); - pte_page = srmmu_c_pte_page; - pmd_page = srmmu_c_pmd_page; - pgd_page = srmmu_c_pgd_page; - mk_pte = srmmu_c_mk_pte; - pte_offset = srmmu_c_pte_offset; - pmd_offset = srmmu_c_pmd_offset; - if (ctxd_set == srmmu_ctxd_set) - ctxd_set = srmmu_c_ctxd_set; - pgd_set = srmmu_c_pgd_set; - pmd_set = srmmu_c_pmd_set; - mmu_v2p = srmmu_c_v2p; - mmu_p2v = srmmu_c_p2v; - if (flush_chunk == viking_flush_chunk) - flush_chunk = viking_c_flush_chunk; - } - - if (sparc_cpu_model == sun4d) { - int i, j = -1; - unsigned long bank_start, bank_end; - - sun4d_dma_vbase = 0; - sun4d_dma_size = IOUNIT_DMA_SIZE - IOUNIT_DVMA_SIZE; - for (i = 0; srmmu_map[i].size; i++) { - bank_start = srmmu_map[i].vbase; - bank_end = bank_start + srmmu_map[i].size; - if (bank_start <= KERNBASE && bank_end > KERNBASE) - j = i; - else if (srmmu_map[i].size >= sun4d_dma_size) { - sun4d_dma_vbase = srmmu_map[i].vbase; + BTFIXUPSET_SETHI(page_contig_offset, page_offset - (0xfd000000 - KERNBASE)); + if (srmmu_low_pa) + phys_mem_contig = 0; + else { + phys_mem_contig = 1; + for(entry = 0; srmmu_map[entry].size; entry++) + if (srmmu_map[entry].pbase != srmmu_c_v2p (srmmu_map[entry].vbase)) { + phys_mem_contig = 0; break; } - } - if (!sun4d_dma_vbase && j != -1) { - if (srmmu_map[j].size >= sun4d_dma_size + 0x1000000) - sun4d_dma_vbase = srmmu_map[j].vbase + 0x1000000; - else { - sun4d_dma_vbase = srmmu_map[j].vbase; - if (srmmu_map[j].size < sun4d_dma_size) - sun4d_dma_size = srmmu_map[j].size; - } - } - sun4d_dma_base = IOUNIT_DMA_BASE - srmmu_v2p(sun4d_dma_vbase); } + if (phys_mem_contig) { + printk ("SRMMU: Physical memory is contiguous, bypassing VA<->PA hashes.\n"); + BTFIXUPSET_CALL(pte_page, srmmu_c_pte_page, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pmd_page, srmmu_c_pmd_page, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pgd_page, srmmu_c_pgd_page, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(mk_pte, srmmu_c_mk_pte, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pte_offset, srmmu_c_pte_offset, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pmd_offset, srmmu_c_pmd_offset, BTFIXUPCALL_NORM); + if (BTFIXUPVAL_CALL(ctxd_set) == (unsigned long)srmmu_ctxd_set) + BTFIXUPSET_CALL(ctxd_set, srmmu_c_ctxd_set, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pgd_set, srmmu_c_pgd_set, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pmd_set, srmmu_c_pmd_set, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(mmu_v2p, srmmu_c_v2p, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(mmu_p2v, srmmu_c_p2v, BTFIXUPCALL_NORM); + if (BTFIXUPVAL_CALL(flush_chunk) == (unsigned long)viking_flush_chunk) + BTFIXUPSET_CALL(flush_chunk, viking_c_flush_chunk, BTFIXUPCALL_NORM); + } else if (srmmu_low_pa) { + printk ("SRMMU: Compact physical memory. Using strightforward VA<->PA translations.\n"); + BTFIXUPSET_CALL(pte_page, srmmu_s_pte_page, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pmd_page, srmmu_s_pmd_page, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pgd_page, srmmu_s_pgd_page, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(mk_pte, srmmu_s_mk_pte, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pte_offset, srmmu_s_pte_offset, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pmd_offset, srmmu_s_pmd_offset, BTFIXUPCALL_NORM); + if (BTFIXUPVAL_CALL(ctxd_set) == (unsigned long)srmmu_ctxd_set) + BTFIXUPSET_CALL(ctxd_set, srmmu_s_ctxd_set, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pgd_set, srmmu_s_pgd_set, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pmd_set, srmmu_s_pmd_set, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(mmu_v2p, srmmu_s_v2p, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(mmu_p2v, srmmu_s_p2v, BTFIXUPCALL_NORM); + if (BTFIXUPVAL_CALL(flush_chunk) == (unsigned long)viking_flush_chunk) + BTFIXUPSET_CALL(flush_chunk, viking_s_flush_chunk, BTFIXUPCALL_NORM); + } + btfixup(); return; /* SUCCESS! */ } -unsigned long srmmu_endmem_fixup(unsigned long mem_end_now) -{ - unsigned long tally = 0; - int i; - - for(i = 0; sp_banks[i].num_bytes; i++) - tally += SRMMU_PGDIR_ALIGN(sp_banks[i].num_bytes); - if(tally < (0x0d000000UL)) { - return KERNBASE + tally; - } else { - return 0xfd000000UL; - } -} - /* Paging initialization on the Sparc Reference MMU. */ extern unsigned long free_area_init(unsigned long, unsigned long); extern unsigned long sparc_context_init(unsigned long, int); @@ -1975,9 +1900,9 @@ extern unsigned long sparc_context_init(unsigned long, int); extern int physmem_mapped_contig; extern int linux_num_cpus; -void (*poke_srmmu)(void); +void (*poke_srmmu)(void) __initdata = NULL; -unsigned long srmmu_paging_init(unsigned long start_mem, unsigned long end_mem) +__initfunc(unsigned long srmmu_paging_init(unsigned long start_mem, unsigned long end_mem)) { unsigned long ptables_start; int i, cpunode; @@ -2029,7 +1954,7 @@ unsigned long srmmu_paging_init(unsigned long start_mem, unsigned long end_mem) start_mem = PAGE_ALIGN(mempool); flush_cache_all(); - if(flush_page_for_dma == viking_flush_page) { + if(BTFIXUPVAL_CALL(flush_page_for_dma) == (unsigned long)viking_flush_page) { unsigned long start = ptables_start; unsigned long end = start_mem; @@ -2048,37 +1973,22 @@ unsigned long srmmu_paging_init(unsigned long start_mem, unsigned long end_mem) return PAGE_ALIGN(start_mem); } -static char srmmuinfo[512]; - -static char *srmmu_mmu_info(void) +static int srmmu_mmu_info(char *buf) { - sprintf(srmmuinfo, "MMU type\t: %s\n" + return sprintf(buf, + "MMU type\t: %s\n" "invall\t\t: %d\n" "invmm\t\t: %d\n" "invrnge\t\t: %d\n" "invpg\t\t: %d\n" "contexts\t: %d\n" -#ifdef USE_CHUNK_ALLOC - "big chunks\t: %d\n" - "little chunks\t: %d\n" - "chunk pages\t: %d\n" - "garbage\t\t: %d\n" - "garbage hits\t: %d\n" -#endif , srmmu_name, module_stats.invall, module_stats.invmm, module_stats.invrnge, module_stats.invpg, num_contexts -#ifdef USE_CHUNK_ALLOC - , bcwater, lcwater, - chunk_pages, - garbage_calls, - clct_pages -#endif - ); - return srmmuinfo; + ); } static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte) @@ -2242,7 +2152,7 @@ __initfunc(static void init_vac_layout(void)) (int)vac_cache_size, (int)vac_line_size); } -static void poke_hypersparc(void) +__initfunc(static void poke_hypersparc(void)) { volatile unsigned long clear; unsigned long mreg = srmmu_get_mmureg(); @@ -2271,35 +2181,38 @@ __initfunc(static void init_hypersparc(void)) init_vac_layout(); - set_pte = srmmu_set_pte_nocache_hyper; - flush_cache_all = hypersparc_flush_cache_all; - flush_cache_mm = hypersparc_flush_cache_mm; - flush_cache_range = hypersparc_flush_cache_range; - flush_cache_page = hypersparc_flush_cache_page; - - flush_tlb_all = hypersparc_flush_tlb_all; - flush_tlb_mm = hypersparc_flush_tlb_mm; - flush_tlb_range = hypersparc_flush_tlb_range; - flush_tlb_page = hypersparc_flush_tlb_page; - - flush_page_to_ram = hypersparc_flush_page_to_ram; - flush_sig_insns = hypersparc_flush_sig_insns; - flush_page_for_dma = NULL /* hypersparc_flush_page_for_dma */; - - flush_chunk = hypersparc_flush_chunk; /* local flush _only_ */ - - ctxd_set = hypersparc_ctxd_set; - switch_to_context = hypersparc_switch_to_context; - init_new_context = hypersparc_init_new_context; - destroy_context = hypersparc_destroy_context; - update_mmu_cache = srmmu_vac_update_mmu_cache; - sparc_update_rootmmu_dir = hypersparc_update_rootmmu_dir; + BTFIXUPSET_CALL(set_pte, srmmu_set_pte_nocache_hyper, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_cache_all, hypersparc_flush_cache_all, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_cache_mm, hypersparc_flush_cache_mm, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_cache_range, hypersparc_flush_cache_range, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_cache_page, hypersparc_flush_cache_page, BTFIXUPCALL_NORM); + + BTFIXUPSET_CALL(flush_tlb_all, hypersparc_flush_tlb_all, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_mm, hypersparc_flush_tlb_mm, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_range, hypersparc_flush_tlb_range, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_page, hypersparc_flush_tlb_page, BTFIXUPCALL_NORM); + + BTFIXUPSET_CALL(flush_page_to_ram, hypersparc_flush_page_to_ram, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_sig_insns, hypersparc_flush_sig_insns, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_page_for_dma, hypersparc_flush_page_for_dma, BTFIXUPCALL_NOP); + + BTFIXUPSET_CALL(flush_chunk, hypersparc_flush_chunk, BTFIXUPCALL_NORM); /* local flush _only_ */ + + BTFIXUPSET_CALL(ctxd_set, hypersparc_ctxd_set, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(switch_to_context, hypersparc_switch_to_context, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(init_new_context, hypersparc_init_new_context, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(destroy_context, hypersparc_destroy_context, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(update_mmu_cache, srmmu_vac_update_mmu_cache, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(sparc_update_rootmmu_dir, hypersparc_update_rootmmu_dir, BTFIXUPCALL_NORM); poke_srmmu = poke_hypersparc; hypersparc_setup_blockops(); } -static void poke_cypress(void) +__initfunc(static void poke_cypress(void)) { unsigned long mreg = srmmu_get_mmureg(); unsigned long faddr, tagval; @@ -2342,25 +2255,28 @@ __initfunc(static void init_cypress_common(void)) { init_vac_layout(); - set_pte = srmmu_set_pte_nocache_cypress; - flush_cache_all = cypress_flush_cache_all; - flush_cache_mm = cypress_flush_cache_mm; - flush_cache_range = cypress_flush_cache_range; - flush_cache_page = cypress_flush_cache_page; + BTFIXUPSET_CALL(set_pte, srmmu_set_pte_nocache_cypress, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_cache_all, cypress_flush_cache_all, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_cache_mm, cypress_flush_cache_mm, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_cache_range, cypress_flush_cache_range, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_cache_page, cypress_flush_cache_page, BTFIXUPCALL_NORM); - flush_tlb_all = cypress_flush_tlb_all; - flush_tlb_mm = cypress_flush_tlb_mm; - flush_tlb_page = cypress_flush_tlb_page; - flush_tlb_range = cypress_flush_tlb_range; + BTFIXUPSET_CALL(flush_tlb_all, cypress_flush_tlb_all, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_mm, cypress_flush_tlb_mm, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_page, cypress_flush_tlb_page, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_range, cypress_flush_tlb_range, BTFIXUPCALL_NORM); - flush_chunk = cypress_flush_chunk; /* local flush _only_ */ + BTFIXUPSET_CALL(flush_chunk, cypress_flush_chunk, BTFIXUPCALL_NORM); /* local flush _only_ */ - flush_page_to_ram = cypress_flush_page_to_ram; - flush_sig_insns = cypress_flush_sig_insns; - flush_page_for_dma = NULL /* cypress_flush_page_for_dma */; - sparc_update_rootmmu_dir = cypress_update_rootmmu_dir; + BTFIXUPSET_CALL(flush_page_to_ram, cypress_flush_page_to_ram, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_sig_insns, cypress_flush_sig_insns, BTFIXUPCALL_NOP); + BTFIXUPSET_CALL(flush_page_for_dma, cypress_flush_page_for_dma, BTFIXUPCALL_NOP); + BTFIXUPSET_CALL(sparc_update_rootmmu_dir, cypress_update_rootmmu_dir, BTFIXUPCALL_NORM); - update_mmu_cache = srmmu_vac_update_mmu_cache; + BTFIXUPSET_CALL(update_mmu_cache, srmmu_vac_update_mmu_cache, BTFIXUPCALL_NORM); poke_srmmu = poke_cypress; } @@ -2388,7 +2304,7 @@ __initfunc(static void init_cypress_605(unsigned long mrev)) init_cypress_common(); } -static void poke_swift(void) +__initfunc(static void poke_swift(void)) { unsigned long mreg = srmmu_get_mmureg(); @@ -2456,21 +2372,23 @@ __initfunc(static void init_swift(void)) break; }; - flush_cache_all = swift_flush_cache_all; - flush_cache_mm = swift_flush_cache_mm; - flush_cache_page = swift_flush_cache_page; - flush_cache_range = swift_flush_cache_range; + BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_cache_page, swift_flush_cache_page, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_cache_range, swift_flush_cache_range, BTFIXUPCALL_NORM); + + BTFIXUPSET_CALL(flush_chunk, swift_flush_chunk, BTFIXUPCALL_NOP); /* local flush _only_ */ - flush_chunk = swift_flush_chunk; /* local flush _only_ */ + BTFIXUPSET_CALL(flush_tlb_all, swift_flush_tlb_all, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_mm, swift_flush_tlb_mm, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_page, swift_flush_tlb_page, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_range, swift_flush_tlb_range, BTFIXUPCALL_NORM); - flush_tlb_all = swift_flush_tlb_all; - flush_tlb_mm = swift_flush_tlb_mm; - flush_tlb_page = swift_flush_tlb_page; - flush_tlb_range = swift_flush_tlb_range; + BTFIXUPSET_CALL(flush_page_to_ram, swift_flush_page_to_ram, BTFIXUPCALL_NOP); + BTFIXUPSET_CALL(flush_sig_insns, swift_flush_sig_insns, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_page_for_dma, swift_flush_page_for_dma, BTFIXUPCALL_NORM); - flush_page_to_ram = swift_flush_page_to_ram; - flush_sig_insns = swift_flush_sig_insns; - flush_page_for_dma = swift_flush_page_for_dma; + BTFIXUPSET_CALL(update_mmu_cache, swift_update_mmu_cache, BTFIXUPCALL_NORM); /* Are you now convinced that the Swift is one of the * biggest VLSI abortions of all time? Bravo Fujitsu! @@ -2484,8 +2402,9 @@ __initfunc(static void init_swift(void)) /* turbosparc.S */ extern void turbosparc_flush_cache_all(void); extern void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr); +extern void turbosparc_flush_page_for_dma(unsigned long page); -static void poke_turbosparc(void) +__initfunc(static void poke_turbosparc(void)) { unsigned long mreg = srmmu_get_mmureg(); unsigned long ccreg; @@ -2529,31 +2448,31 @@ __initfunc(static void init_turbosparc(void)) srmmu_name = "Fujitsu TurboSparc"; srmmu_modtype = TurboSparc; - flush_cache_all = turbosparc_flush_cache_all; - flush_cache_mm = hypersparc_flush_cache_mm; - flush_cache_page = hypersparc_flush_cache_page; - flush_cache_range = hypersparc_flush_cache_range; + BTFIXUPSET_CALL(flush_cache_all, turbosparc_flush_cache_all, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_cache_mm, hypersparc_flush_cache_mm, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_cache_page, hypersparc_flush_cache_page, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_cache_range, hypersparc_flush_cache_range, BTFIXUPCALL_NORM); - flush_tlb_all = hypersparc_flush_tlb_all; - flush_tlb_mm = hypersparc_flush_tlb_mm; - flush_tlb_page = hypersparc_flush_tlb_page; - flush_tlb_range = hypersparc_flush_tlb_range; + BTFIXUPSET_CALL(flush_tlb_all, hypersparc_flush_tlb_all, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_mm, hypersparc_flush_tlb_mm, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_page, hypersparc_flush_tlb_page, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_range, hypersparc_flush_tlb_range, BTFIXUPCALL_NORM); #ifdef TURBOSPARC_WRITEBACK - flush_page_to_ram = hypersparc_flush_page_to_ram; - flush_chunk = hypersparc_flush_chunk; + BTFIXUPSET_CALL(flush_page_to_ram, hypersparc_flush_page_to_ram, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_chunk, hypersparc_flush_chunk, BTFIXUPCALL_NORM); #else - flush_page_to_ram = swift_flush_page_to_ram; - flush_chunk = swift_flush_chunk; + BTFIXUPSET_CALL(flush_page_to_ram, swift_flush_page_to_ram, BTFIXUPCALL_NOP); + BTFIXUPSET_CALL(flush_chunk, swift_flush_chunk, BTFIXUPCALL_NOP); #endif - flush_sig_insns = turbosparc_flush_sig_insns; - flush_page_for_dma = NULL /* turbosparc_flush_page_for_dma */; + BTFIXUPSET_CALL(flush_sig_insns, turbosparc_flush_sig_insns, BTFIXUPCALL_NOP); + BTFIXUPSET_CALL(flush_page_for_dma, turbosparc_flush_page_for_dma, BTFIXUPCALL_NOP); poke_srmmu = poke_turbosparc; } -static void poke_tsunami(void) +__initfunc(static void poke_tsunami(void)) { unsigned long mreg = srmmu_get_mmureg(); @@ -2574,26 +2493,26 @@ __initfunc(static void init_tsunami(void)) srmmu_name = "TI Tsunami"; srmmu_modtype = Tsunami; - flush_cache_all = tsunami_flush_cache_all; - flush_cache_mm = tsunami_flush_cache_mm; - flush_cache_page = tsunami_flush_cache_page; - flush_cache_range = tsunami_flush_cache_range; + BTFIXUPSET_CALL(flush_cache_all, tsunami_flush_cache_all, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_cache_mm, tsunami_flush_cache_mm, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_cache_page, tsunami_flush_cache_page, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_cache_range, tsunami_flush_cache_range, BTFIXUPCALL_NORM); - flush_chunk = tsunami_flush_chunk; /* local flush _only_ */ + BTFIXUPSET_CALL(flush_chunk, tsunami_flush_chunk, BTFIXUPCALL_NOP); /* local flush _only_ */ - flush_tlb_all = tsunami_flush_tlb_all; - flush_tlb_mm = tsunami_flush_tlb_mm; - flush_tlb_page = tsunami_flush_tlb_page; - flush_tlb_range = tsunami_flush_tlb_range; + BTFIXUPSET_CALL(flush_tlb_all, tsunami_flush_tlb_all, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_mm, tsunami_flush_tlb_mm, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_page, tsunami_flush_tlb_page, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_range, tsunami_flush_tlb_range, BTFIXUPCALL_NORM); - flush_page_to_ram = tsunami_flush_page_to_ram; - flush_sig_insns = tsunami_flush_sig_insns; - flush_page_for_dma = tsunami_flush_page_for_dma; + BTFIXUPSET_CALL(flush_page_to_ram, tsunami_flush_page_to_ram, BTFIXUPCALL_NOP); + BTFIXUPSET_CALL(flush_sig_insns, tsunami_flush_sig_insns, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_page_for_dma, tsunami_flush_page_for_dma, BTFIXUPCALL_NORM); poke_srmmu = poke_tsunami; } -static void poke_viking(void) +__initfunc(static void poke_viking(void)) { unsigned long mreg = srmmu_get_mmureg(); static int smp_catch = 0; @@ -2637,13 +2556,14 @@ static void poke_viking(void) #ifdef __SMP__ /* Avoid unnecessary cross calls. */ - flush_cache_all = local_flush_cache_all; - flush_cache_mm = local_flush_cache_mm; - flush_cache_range = local_flush_cache_range; - flush_cache_page = local_flush_cache_page; - flush_page_to_ram = local_flush_page_to_ram; - flush_sig_insns = local_flush_sig_insns; - flush_page_for_dma = local_flush_page_for_dma; + BTFIXUPCOPY_CALL(flush_cache_all, local_flush_cache_all); + BTFIXUPCOPY_CALL(flush_cache_mm, local_flush_cache_mm); + BTFIXUPCOPY_CALL(flush_cache_range, local_flush_cache_range); + BTFIXUPCOPY_CALL(flush_cache_page, local_flush_cache_page); + BTFIXUPCOPY_CALL(flush_page_to_ram, local_flush_page_to_ram); + BTFIXUPCOPY_CALL(flush_sig_insns, local_flush_sig_insns); + BTFIXUPCOPY_CALL(flush_page_for_dma, local_flush_page_for_dma); + btfixup(); #endif } @@ -2664,10 +2584,13 @@ __initfunc(static void init_viking(void)) msi_set_sync(); - set_pte = srmmu_set_pte_nocache_viking; - sparc_update_rootmmu_dir = viking_update_rootmmu_dir; + BTFIXUPSET_CALL(set_pte, srmmu_set_pte_nocache_viking, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(sparc_update_rootmmu_dir, viking_update_rootmmu_dir, BTFIXUPCALL_NORM); - flush_chunk = viking_flush_chunk; /* local flush _only_ */ + BTFIXUPSET_CALL(flush_chunk, viking_flush_chunk, BTFIXUPCALL_NORM); /* local flush _only_ */ /* We need this to make sure old viking takes no hits * on it's cache for dma snoops to workaround the @@ -2675,7 +2598,7 @@ __initfunc(static void init_viking(void)) * This is only necessary because of the new way in * which we use the IOMMU. */ - flush_page_for_dma = viking_flush_page; + BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page, BTFIXUPCALL_NORM); /* Also, this is so far the only chip which actually uses the page argument to flush_page_for_dma */ flush_page_for_dma_global = 0; @@ -2683,24 +2606,25 @@ __initfunc(static void init_viking(void)) srmmu_name = "TI Viking/MXCC"; viking_mxcc_present = 1; - flush_chunk = viking_mxcc_flush_chunk; /* local flush _only_ */ + BTFIXUPSET_CALL(flush_chunk, viking_mxcc_flush_chunk, BTFIXUPCALL_NOP); /* local flush _only_ */ /* MXCC vikings lack the DMA snooping bug. */ - flush_page_for_dma = NULL /* viking_flush_page_for_dma */; + BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page_for_dma, BTFIXUPCALL_NOP); } - flush_cache_all = viking_flush_cache_all; - flush_cache_mm = viking_flush_cache_mm; - flush_cache_page = viking_flush_cache_page; - flush_cache_range = viking_flush_cache_range; + /* flush_cache_* are nops */ + BTFIXUPSET_CALL(flush_cache_all, viking_flush_cache_all, BTFIXUPCALL_NOP); + BTFIXUPSET_CALL(flush_cache_mm, viking_flush_cache_mm, BTFIXUPCALL_NOP); + BTFIXUPSET_CALL(flush_cache_page, viking_flush_cache_page, BTFIXUPCALL_NOP); + BTFIXUPSET_CALL(flush_cache_range, viking_flush_cache_range, BTFIXUPCALL_NOP); - flush_tlb_all = viking_flush_tlb_all; - flush_tlb_mm = viking_flush_tlb_mm; - flush_tlb_page = viking_flush_tlb_page; - flush_tlb_range = viking_flush_tlb_range; + BTFIXUPSET_CALL(flush_tlb_all, viking_flush_tlb_all, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_mm, viking_flush_tlb_mm, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_page, viking_flush_tlb_page, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_range, viking_flush_tlb_range, BTFIXUPCALL_NORM); - flush_page_to_ram = viking_flush_page_to_ram; - flush_sig_insns = viking_flush_sig_insns; + BTFIXUPSET_CALL(flush_page_to_ram, viking_flush_page_to_ram, BTFIXUPCALL_NOP); + BTFIXUPSET_CALL(flush_sig_insns, viking_flush_sig_insns, BTFIXUPCALL_NOP); poke_srmmu = poke_viking; } @@ -2798,6 +2722,67 @@ __initfunc(static void get_srmmu_type(void)) srmmu_is_bad(); } +/* Low and high watermarks for page table cache. + The system should try to have pgt_water[0] <= cache elements <= pgt_water[1] + */ +extern int pgt_cache_water[2]; + +void srmmu_check_pgt_cache(void) +{ + struct page *page, *page2; + + if (pgtable_cache_size > pgt_cache_water[0]) { + spin_lock(&pte_spinlock); + for (page2 = NULL, page = (struct page *)pte_quicklist; page;) { + if ((unsigned int)page->pprev_hash == 0xffff) { + if (page2) + page2->next_hash = page->next_hash; + else + (struct page *)pte_quicklist = page->next_hash; + page->next_hash = NULL; + page->pprev_hash = NULL; + pgtable_cache_size -= 16; + free_page(PAGE_OFFSET + (page->map_nr << PAGE_SHIFT)); + if (page2) + page = page2->next_hash; + else + page = (struct page *)pte_quicklist; + if (pgtable_cache_size <= pgt_cache_water[1]) + break; + continue; + } + page2 = page; + page = page->next_hash; + } + spin_unlock(&pte_spinlock); + } + if (pgd_cache_size > pgt_cache_water[0] / 4) { + spin_lock(&pgd_spinlock); + for (page2 = NULL, page = (struct page *)pgd_quicklist; page;) { + if ((unsigned int)page->pprev_hash == 0xf) { + if (page2) + page2->next_hash = page->next_hash; + else + (struct page *)pgd_quicklist = page->next_hash; + page->next_hash = NULL; + page->pprev_hash = NULL; + pgd_cache_size -= 4; + free_page(PAGE_OFFSET + (page->map_nr << PAGE_SHIFT)); + if (page2) + page = page2->next_hash; + else + page = (struct page *)pgd_quicklist; + if (pgd_cache_size <= pgt_cache_water[1] / 4) + break; + continue; + } + page2 = page; + page = page->next_hash; + } + spin_unlock(&pgd_spinlock); + } +} + extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme, tsetup_mmu_patchme, rtrap_mmu_patchme; @@ -2810,7 +2795,7 @@ extern unsigned long srmmu_fault; iaddr = &(insn); \ daddr = &(dest); \ *iaddr = SPARC_BRANCH((unsigned long) daddr, (unsigned long) iaddr); \ - } while(0); + } while(0); __initfunc(static void patch_window_trap_handlers(void)) { @@ -2829,7 +2814,7 @@ __initfunc(static void patch_window_trap_handlers(void)) /* Local cross-calls. */ static void smp_flush_page_for_dma(unsigned long page) { - xc1((smpfunc_t) local_flush_page_for_dma, page); + xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_for_dma), page); } #endif @@ -2839,98 +2824,107 @@ __initfunc(void ld_mmu_srmmu(void)) { extern void ld_mmu_iommu(void); extern void ld_mmu_iounit(void); + extern void ___xchg32_sun4md(void); /* First the constants */ - pmd_shift = SRMMU_PMD_SHIFT; - pmd_size = SRMMU_PMD_SIZE; - pmd_mask = SRMMU_PMD_MASK; - pgdir_shift = SRMMU_PGDIR_SHIFT; - pgdir_size = SRMMU_PGDIR_SIZE; - pgdir_mask = SRMMU_PGDIR_MASK; - - ptrs_per_pte = SRMMU_PTRS_PER_PTE; - ptrs_per_pmd = SRMMU_PTRS_PER_PMD; - ptrs_per_pgd = SRMMU_PTRS_PER_PGD; - - page_none = SRMMU_PAGE_NONE; - page_shared = SRMMU_PAGE_SHARED; - page_copy = SRMMU_PAGE_COPY; - page_readonly = SRMMU_PAGE_RDONLY; - page_kernel = SRMMU_PAGE_KERNEL; + BTFIXUPSET_SIMM13(pmd_shift, SRMMU_PMD_SHIFT); + BTFIXUPSET_SETHI(pmd_size, SRMMU_PMD_SIZE); + BTFIXUPSET_SETHI(pmd_mask, SRMMU_PMD_MASK); + BTFIXUPSET_SIMM13(pgdir_shift, SRMMU_PGDIR_SHIFT); + BTFIXUPSET_SETHI(pgdir_size, SRMMU_PGDIR_SIZE); + BTFIXUPSET_SETHI(pgdir_mask, SRMMU_PGDIR_MASK); + + BTFIXUPSET_SIMM13(ptrs_per_pte, SRMMU_PTRS_PER_PTE); + BTFIXUPSET_SIMM13(ptrs_per_pmd, SRMMU_PTRS_PER_PMD); + BTFIXUPSET_SIMM13(ptrs_per_pgd, SRMMU_PTRS_PER_PGD); + + BTFIXUPSET_INT(page_none, pgprot_val(SRMMU_PAGE_NONE)); + BTFIXUPSET_INT(page_shared, pgprot_val(SRMMU_PAGE_SHARED)); + BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY)); + BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY)); + BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL)); pg_iobits = SRMMU_VALID | SRMMU_WRITE | SRMMU_REF; - + /* Functions */ - set_pte = srmmu_set_pte_cacheable; - init_new_context = srmmu_init_new_context; - switch_to_context = srmmu_switch_to_context; - pmd_align = srmmu_pmd_align; - pgdir_align = srmmu_pgdir_align; - vmalloc_start = srmmu_vmalloc_start; - - pte_page = srmmu_pte_page; - pmd_page = srmmu_pmd_page; - pgd_page = srmmu_pgd_page; - - sparc_update_rootmmu_dir = srmmu_update_rootmmu_dir; - - pte_none = srmmu_pte_none; - pte_present = srmmu_pte_present; - pte_clear = srmmu_pte_clear; - - pmd_none = srmmu_pmd_none; - pmd_bad = srmmu_pmd_bad; - pmd_present = srmmu_pmd_present; - pmd_clear = srmmu_pmd_clear; - - pgd_none = srmmu_pgd_none; - pgd_bad = srmmu_pgd_bad; - pgd_present = srmmu_pgd_present; - pgd_clear = srmmu_pgd_clear; - - mk_pte = srmmu_mk_pte; - mk_pte_phys = srmmu_mk_pte_phys; - pgd_set = srmmu_pgd_set; - mk_pte_io = srmmu_mk_pte_io; - pte_modify = srmmu_pte_modify; - pgd_offset = srmmu_pgd_offset; - pmd_offset = srmmu_pmd_offset; - pte_offset = srmmu_pte_offset; - pte_free_kernel = srmmu_pte_free_kernel; - pmd_free_kernel = srmmu_pmd_free_kernel; - pte_alloc_kernel = srmmu_pte_alloc_kernel; - pmd_alloc_kernel = srmmu_pmd_alloc_kernel; - pte_free = srmmu_pte_free; - pte_alloc = srmmu_pte_alloc; - pmd_free = srmmu_pmd_free; - pmd_alloc = srmmu_pmd_alloc; - pgd_free = srmmu_pgd_free; - pgd_alloc = srmmu_pgd_alloc; - - pte_write = srmmu_pte_write; - pte_dirty = srmmu_pte_dirty; - pte_young = srmmu_pte_young; - pte_wrprotect = srmmu_pte_wrprotect; - pte_mkclean = srmmu_pte_mkclean; - pte_mkold = srmmu_pte_mkold; - pte_mkwrite = srmmu_pte_mkwrite; - pte_mkdirty = srmmu_pte_mkdirty; - pte_mkyoung = srmmu_pte_mkyoung; - update_mmu_cache = srmmu_update_mmu_cache; - destroy_context = srmmu_destroy_context; +#ifndef __SMP__ + BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2); +#endif + BTFIXUPSET_CALL(get_pte_fast, srmmu_get_pte_fast, BTFIXUPCALL_RETINT(0)); + BTFIXUPSET_CALL(get_pgd_fast, srmmu_get_pgd_fast, BTFIXUPCALL_RETINT(0)); + BTFIXUPSET_CALL(free_pte_slow, srmmu_free_pte_slow, BTFIXUPCALL_NOP); + BTFIXUPSET_CALL(free_pgd_slow, srmmu_free_pgd_slow, BTFIXUPCALL_NOP); + + BTFIXUPSET_CALL(set_pgdir, srmmu_set_pgdir, BTFIXUPCALL_NORM); + + BTFIXUPSET_CALL(set_pte, srmmu_set_pte_cacheable, BTFIXUPCALL_SWAPO0O1); + BTFIXUPSET_CALL(init_new_context, srmmu_init_new_context, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(switch_to_context, srmmu_switch_to_context, BTFIXUPCALL_NORM); + + BTFIXUPSET_CALL(pte_page, srmmu_pte_page, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pmd_page, srmmu_pmd_page, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pgd_page, srmmu_pgd_page, BTFIXUPCALL_NORM); + + BTFIXUPSET_CALL(sparc_update_rootmmu_dir, srmmu_update_rootmmu_dir, BTFIXUPCALL_NORM); + + BTFIXUPSET_SETHI(none_mask, 0xF0000000); + + BTFIXUPSET_CALL(pte_present, srmmu_pte_present, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_SWAPO0G0); + + BTFIXUPSET_CALL(pmd_bad, srmmu_pmd_bad, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pmd_present, srmmu_pmd_present, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_SWAPO0G0); + + BTFIXUPSET_CALL(pgd_none, srmmu_pgd_none, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pgd_bad, srmmu_pgd_bad, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pgd_present, srmmu_pgd_present, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_SWAPO0G0); + + BTFIXUPSET_CALL(mk_pte, srmmu_mk_pte, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(mk_pte_phys, srmmu_mk_pte_phys, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(mk_pte_io, srmmu_mk_pte_io, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pgd_set, srmmu_pgd_set, BTFIXUPCALL_NORM); - mmu_info = srmmu_mmu_info; - mmu_v2p = srmmu_v2p; - mmu_p2v = srmmu_p2v; + BTFIXUPSET_INT(pte_modify_mask, SRMMU_CHG_MASK); + BTFIXUPSET_CALL(pgd_offset, srmmu_pgd_offset, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pmd_offset, srmmu_pmd_offset, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pte_offset, srmmu_pte_offset, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pte_free_kernel, srmmu_pte_free, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pmd_free_kernel, srmmu_pmd_free, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pte_alloc_kernel, srmmu_pte_alloc, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pmd_alloc_kernel, srmmu_pmd_alloc, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pte_free, srmmu_pte_free, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pte_alloc, srmmu_pte_alloc, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pmd_free, srmmu_pmd_free, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pmd_alloc, srmmu_pmd_alloc, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pgd_free, srmmu_pgd_free, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pgd_alloc, srmmu_pgd_alloc, BTFIXUPCALL_NORM); + + BTFIXUPSET_HALF(pte_writei, SRMMU_WRITE); + BTFIXUPSET_HALF(pte_dirtyi, SRMMU_DIRTY); + BTFIXUPSET_HALF(pte_youngi, SRMMU_REF); + BTFIXUPSET_HALF(pte_wrprotecti, SRMMU_WRITE); + BTFIXUPSET_HALF(pte_mkcleani, SRMMU_DIRTY); + BTFIXUPSET_HALF(pte_mkoldi, SRMMU_REF); + BTFIXUPSET_CALL(pte_mkwrite, srmmu_pte_mkwrite, BTFIXUPCALL_ORINT(SRMMU_WRITE)); + BTFIXUPSET_CALL(pte_mkdirty, srmmu_pte_mkdirty, BTFIXUPCALL_ORINT(SRMMU_DIRTY)); + BTFIXUPSET_CALL(pte_mkyoung, srmmu_pte_mkyoung, BTFIXUPCALL_ORINT(SRMMU_REF)); + BTFIXUPSET_CALL(update_mmu_cache, srmmu_update_mmu_cache, BTFIXUPCALL_NOP); + BTFIXUPSET_CALL(destroy_context, srmmu_destroy_context, BTFIXUPCALL_NORM); + + BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(mmu_v2p, srmmu_v2p, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(mmu_p2v, srmmu_p2v, BTFIXUPCALL_NORM); /* Task struct and kernel stack allocating/freeing. */ - alloc_task_struct = srmmu_alloc_task_struct; - free_task_struct = srmmu_free_task_struct; + BTFIXUPSET_CALL(alloc_task_struct, srmmu_alloc_task_struct, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(free_task_struct, srmmu_free_task_struct, BTFIXUPCALL_NORM); - quick_kernel_fault = srmmu_quick_kernel_fault; + BTFIXUPSET_CALL(quick_kernel_fault, srmmu_quick_kernel_fault, BTFIXUPCALL_NORM); /* SRMMU specific. */ - ctxd_set = srmmu_ctxd_set; - pmd_set = srmmu_pmd_set; + BTFIXUPSET_CALL(ctxd_set, srmmu_ctxd_set, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pmd_set, srmmu_pmd_set, BTFIXUPCALL_NORM); get_srmmu_type(); patch_window_trap_handlers(); @@ -2938,33 +2932,38 @@ __initfunc(void ld_mmu_srmmu(void)) #ifdef __SMP__ /* El switcheroo... */ - local_flush_cache_all = flush_cache_all; - local_flush_cache_mm = flush_cache_mm; - local_flush_cache_range = flush_cache_range; - local_flush_cache_page = flush_cache_page; - local_flush_tlb_all = flush_tlb_all; - local_flush_tlb_mm = flush_tlb_mm; - local_flush_tlb_range = flush_tlb_range; - local_flush_tlb_page = flush_tlb_page; - local_flush_page_to_ram = flush_page_to_ram; - local_flush_sig_insns = flush_sig_insns; - local_flush_page_for_dma = flush_page_for_dma; - - flush_cache_all = smp_flush_cache_all; - flush_cache_mm = smp_flush_cache_mm; - flush_cache_range = smp_flush_cache_range; - flush_cache_page = smp_flush_cache_page; - flush_tlb_all = smp_flush_tlb_all; - flush_tlb_mm = smp_flush_tlb_mm; - flush_tlb_range = smp_flush_tlb_range; - flush_tlb_page = smp_flush_tlb_page; - flush_page_to_ram = smp_flush_page_to_ram; - flush_sig_insns = smp_flush_sig_insns; - if (flush_page_for_dma) - flush_page_for_dma = smp_flush_page_for_dma; + BTFIXUPCOPY_CALL(local_flush_cache_all, flush_cache_all); + BTFIXUPCOPY_CALL(local_flush_cache_mm, flush_cache_mm); + BTFIXUPCOPY_CALL(local_flush_cache_range, flush_cache_range); + BTFIXUPCOPY_CALL(local_flush_cache_page, flush_cache_page); + BTFIXUPCOPY_CALL(local_flush_tlb_all, flush_tlb_all); + BTFIXUPCOPY_CALL(local_flush_tlb_mm, flush_tlb_mm); + BTFIXUPCOPY_CALL(local_flush_tlb_range, flush_tlb_range); + BTFIXUPCOPY_CALL(local_flush_tlb_page, flush_tlb_page); + BTFIXUPCOPY_CALL(local_flush_page_to_ram, flush_page_to_ram); + BTFIXUPCOPY_CALL(local_flush_sig_insns, flush_sig_insns); + BTFIXUPCOPY_CALL(local_flush_page_for_dma, flush_page_for_dma); + + BTFIXUPSET_CALL(flush_cache_all, smp_flush_cache_all, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_cache_mm, smp_flush_cache_mm, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_cache_range, smp_flush_cache_range, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_cache_page, smp_flush_cache_page, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_all, smp_flush_tlb_all, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_mm, smp_flush_tlb_mm, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_range, smp_flush_tlb_range, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_page, smp_flush_tlb_page, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_page_to_ram, smp_flush_page_to_ram, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_sig_insns, smp_flush_sig_insns, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_page_for_dma, smp_flush_page_for_dma, BTFIXUPCALL_NORM); #endif if (sparc_cpu_model == sun4d) ld_mmu_iounit(); else ld_mmu_iommu(); +#ifdef __SMP__ + if (sparc_cpu_model == sun4d) + sun4d_init_smp(); + else + sun4m_init_smp(); +#endif } diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c index c70753fa4..d247e1f2d 100644 --- a/arch/sparc/mm/sun4c.c +++ b/arch/sparc/mm/sun4c.c @@ -1,11 +1,14 @@ -/* $Id: sun4c.c,v 1.149 1997/07/20 05:59:38 davem Exp $ +/* $Id: sun4c.c,v 1.163 1998/03/11 04:08:21 tdyas Exp $ * sun4c.c: Doing in software what should be done in hardware. * * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) * Copyright (C) 1996 Andrew Tridgell (Andrew.Tridgell@anu.edu.au) + * Copyright (C) 1997 Anton Blanchard (anton@progsoc.uts.edu.au) + * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ +#include <linux/config.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/init.h> @@ -22,6 +25,7 @@ #include <asm/oplib.h> #include <asm/openprom.h> #include <asm/mmu_context.h> +#include <asm/sun4paddr.h> /* TODO: Make it such that interrupt handlers cannot dick with * the user segment lists, most of the cli/sti pairs can @@ -59,11 +63,15 @@ extern int num_segmaps, num_contexts; : "g4", "cc"); \ } while(0); -/* That's it, we prom_halt() if the cache size is something other than 65536. +#ifdef CONFIG_SUN4 +#define SUN4C_VAC_SIZE sun4c_vacinfo.num_bytes +#else +/* That's it, we prom_halt() on sun4c if the cache size is something other than 65536. * So let's save some cycles and just use that everywhere except for that bootup * sanity check. */ -#define SUN4C_VAC_SIZE 65536 +#define SUN4C_VAC_SIZE 65536 +#endif #define SUN4C_KERNEL_BUCKETS 32 @@ -427,22 +435,76 @@ static inline void sun4c_init_clean_mmu(unsigned long kernel_end) sun4c_set_context(savectx); } -void sun4c_probe_vac(void) +__initfunc(void sun4c_probe_vac(void)) { sun4c_disable_vac(); - if((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) || - (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) { - /* PROM on SS1 lacks this info, to be super safe we - * hard code it here since this arch is cast in stone. - */ - sun4c_vacinfo.num_bytes = 65536; - sun4c_vacinfo.linesize = 16; + + if (ARCH_SUN4) { + switch(idprom->id_machtype) { + + case (SM_SUN4|SM_4_110): + sun4c_vacinfo.type = NONE; + sun4c_vacinfo.num_bytes = 0; + sun4c_vacinfo.linesize = 0; + sun4c_vacinfo.do_hwflushes = 0; + prom_printf("No VAC. Get some bucks and buy a real computer."); + prom_halt(); + break; + + case (SM_SUN4|SM_4_260): + sun4c_vacinfo.type = WRITE_BACK; + sun4c_vacinfo.num_bytes = 128 * 1024; + sun4c_vacinfo.linesize = 16; + sun4c_vacinfo.do_hwflushes = 0; + break; + + case (SM_SUN4|SM_4_330): + sun4c_vacinfo.type = WRITE_THROUGH; + sun4c_vacinfo.num_bytes = 128 * 1024; + sun4c_vacinfo.linesize = 16; + sun4c_vacinfo.do_hwflushes = 0; + break; + + case (SM_SUN4|SM_4_470): + sun4c_vacinfo.type = WRITE_BACK; + sun4c_vacinfo.num_bytes = 128 * 1024; + sun4c_vacinfo.linesize = 32; + sun4c_vacinfo.do_hwflushes = 0; + break; + + default: + prom_printf("Cannot initialize VAC - wierd sun4 model idprom->id_machtype = %d", idprom->id_machtype); + prom_halt(); + } } else { - sun4c_vacinfo.num_bytes = prom_getintdefault(prom_root_node, - "vac-size", 65536); - sun4c_vacinfo.linesize = prom_getintdefault(prom_root_node, - "vac-linesize", 16); + sun4c_vacinfo.type = WRITE_THROUGH; + + if((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) || + (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) { + /* PROM on SS1 lacks this info, to be super safe we + * hard code it here since this arch is cast in stone. + */ + sun4c_vacinfo.num_bytes = 65536; + sun4c_vacinfo.linesize = 16; + } else { + sun4c_vacinfo.num_bytes = + prom_getintdefault(prom_root_node, "vac-size", 65536); + sun4c_vacinfo.linesize = + prom_getintdefault(prom_root_node, "vac-linesize", 16); + } + sun4c_vacinfo.do_hwflushes = + prom_getintdefault(prom_root_node, "vac-hwflush", 0); + + if(sun4c_vacinfo.do_hwflushes == 0) + sun4c_vacinfo.do_hwflushes = + prom_getintdefault(prom_root_node, "vac_hwflush", 0); + + if (sun4c_vacinfo.num_bytes != 65536) { + prom_printf("WEIRD Sun4C VAC cache size, tell davem"); + prom_halt(); + } } + sun4c_vacinfo.num_lines = (sun4c_vacinfo.num_bytes / sun4c_vacinfo.linesize); switch(sun4c_vacinfo.linesize) { @@ -458,17 +520,6 @@ void sun4c_probe_vac(void) prom_halt(); }; - sun4c_vacinfo.do_hwflushes = prom_getintdefault(prom_root_node, - "vac-hwflush", 0); - if(sun4c_vacinfo.do_hwflushes == 0) - sun4c_vacinfo.do_hwflushes = prom_getintdefault(prom_root_node, - "vac_hwflush", 0); - - if(sun4c_vacinfo.num_bytes != 65536) { - prom_printf("WEIRD Sun4C VAC cache size, tell davem"); - prom_halt(); - } - sun4c_flush_all(); sun4c_enable_vac(); } @@ -476,6 +527,7 @@ void sun4c_probe_vac(void) /* Patch instructions for the low level kernel fault handler. */ extern unsigned long invalid_segment_patch1, invalid_segment_patch1_ff; extern unsigned long invalid_segment_patch2, invalid_segment_patch2_ff; +extern unsigned long invalid_segment_patch1_1ff, invalid_segment_patch2_1ff; extern unsigned long num_context_patch1, num_context_patch1_16; extern unsigned long num_context_patch2, num_context_patch2_16; extern unsigned long vac_linesize_patch, vac_linesize_patch_32; @@ -502,6 +554,12 @@ static void patch_kernel_fault_handler(void) PATCH_INSN(invalid_segment_patch2_ff, invalid_segment_patch2); break; + case 512: + PATCH_INSN(invalid_segment_patch1_1ff, + invalid_segment_patch1); + PATCH_INSN(invalid_segment_patch2_1ff, + invalid_segment_patch2); + break; default: prom_printf("Unhandled number of segmaps: %d\n", num_segmaps); @@ -541,38 +599,80 @@ static void patch_kernel_fault_handler(void) } } -static void sun4c_probe_mmu(void) +__initfunc(static void sun4c_probe_mmu(void)) { - if((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) || - (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) { - /* Hardcode these just to be safe, PROM on SS1 does - * not have this info available in the root node. - */ - num_segmaps = 128; - num_contexts = 8; + if (ARCH_SUN4) { + switch(idprom->id_machtype) { + case (SM_SUN4|SM_4_110): + prom_printf("No support for 4100 yet\n"); + prom_halt(); + num_segmaps = 256; + num_contexts = 8; + break; + + case (SM_SUN4|SM_4_260): + prom_printf("No support for 4200 yet\n"); + prom_halt(); + num_segmaps = 512; + num_contexts = 16; + break; + + case (SM_SUN4|SM_4_330): + num_segmaps = 256; + num_contexts = 16; + break; + + case (SM_SUN4|SM_4_470): + prom_printf("No support for 4400 yet\n"); + prom_halt(); + num_segmaps = 1024; + num_contexts = 64; + break; + default: + prom_printf("Invalid SUN4 model\n"); + prom_halt(); + } } else { - num_segmaps = prom_getintdefault(prom_root_node, "mmu-npmg", 128); - num_contexts = prom_getintdefault(prom_root_node, "mmu-nctx", 0x8); + if((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) || + (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) { + /* Hardcode these just to be safe, PROM on SS1 does + * not have this info available in the root node. + */ + num_segmaps = 128; + num_contexts = 8; + } else { + num_segmaps = + prom_getintdefault(prom_root_node, "mmu-npmg", 128); + num_contexts = + prom_getintdefault(prom_root_node, "mmu-nctx", 0x8); + } } patch_kernel_fault_handler(); } volatile unsigned long *sun4c_memerr_reg = 0; -void sun4c_probe_memerr_reg(void) +__initfunc(void sun4c_probe_memerr_reg(void)) { int node; struct linux_prom_registers regs[1]; - node = prom_getchild(prom_root_node); - node = prom_searchsiblings(prom_root_node, "memory-error"); - if (!node) - return; - prom_getproperty(node, "reg", (char *)regs, sizeof(regs)); - sun4c_memerr_reg = sparc_alloc_io(regs[0].phys_addr, 0, - regs[0].reg_size, - "memory parity error", - regs[0].which_io, 0); + if (ARCH_SUN4) { + sun4c_memerr_reg = sparc_alloc_io(SUN4_MEMREG_PHYSADDR, 0, + PAGE_SIZE, + "memory parity error", + 0x0, 0); + } else { + node = prom_getchild(prom_root_node); + node = prom_searchsiblings(prom_root_node, "memory-error"); + if (!node) + return; + prom_getproperty(node, "reg", (char *)regs, sizeof(regs)); + sun4c_memerr_reg = sparc_alloc_io(regs[0].phys_addr, 0, + regs[0].reg_size, + "memory parity error", + regs[0].which_io, 0); + } } static inline void sun4c_init_ss2_cache_bug(void) @@ -581,6 +681,7 @@ static inline void sun4c_init_ss2_cache_bug(void) if((idprom->id_machtype == (SM_SUN4C | SM_4C_SS2)) || (idprom->id_machtype == (SM_SUN4C | SM_4C_IPX)) || + (idprom->id_machtype == (SM_SUN4 | SM_4_330)) || (idprom->id_machtype == (SM_SUN4C | SM_4C_ELC))) { /* Whee.. */ printk("SS2 cache bug detected, uncaching trap table page\n"); @@ -626,13 +727,14 @@ struct sun4c_mmu_entry { unsigned char pseg; unsigned char locked; }; -static struct sun4c_mmu_entry mmu_entry_pool[256]; + +static struct sun4c_mmu_entry mmu_entry_pool[SUN4C_MAX_SEGMAPS]; __initfunc(static void sun4c_init_mmu_entry_pool(void)) { int i; - for(i=0; i < 256; i++) { + for(i=0; i < SUN4C_MAX_SEGMAPS; i++) { mmu_entry_pool[i].pseg = i; mmu_entry_pool[i].next = 0; mmu_entry_pool[i].prev = 0; @@ -703,7 +805,8 @@ struct sun4c_mmu_ring { struct sun4c_mmu_entry ringhd; int num_entries; }; -static struct sun4c_mmu_ring sun4c_context_ring[16]; /* used user entries */ + +static struct sun4c_mmu_ring sun4c_context_ring[SUN4C_MAX_CONTEXTS]; /* used user entries */ static struct sun4c_mmu_ring sun4c_ufree_ring; /* free user entries */ struct sun4c_mmu_ring sun4c_kernel_ring; /* used kernel entries */ struct sun4c_mmu_ring sun4c_kfree_ring; /* free kernel entries */ @@ -711,7 +814,7 @@ struct sun4c_mmu_ring sun4c_kfree_ring; /* free kernel entries */ static inline void sun4c_init_rings(unsigned long *mempool) { int i; - for(i=0; i<16; i++) { + for(i=0; i<SUN4C_MAX_CONTEXTS; i++) { sun4c_context_ring[i].ringhd.next = sun4c_context_ring[i].ringhd.prev = &sun4c_context_ring[i].ringhd; @@ -1120,7 +1223,7 @@ static int sun4c_lowbucket_avail; #define BUCKET_PTE(page) \ ((((page) - PAGE_OFFSET) >> PAGE_SHIFT) | pgprot_val(SUN4C_PAGE_KERNEL)) #define BUCKET_PTE_PAGE(pte) \ - (PAGE_OFFSET + (((pte) & 0xffff) << PAGE_SHIFT)) + (PAGE_OFFSET + (((pte) & SUN4C_PFN_MASK) << PAGE_SHIFT)) static inline void get_locked_segment(unsigned long addr) { @@ -1180,12 +1283,18 @@ static inline void garbage_collect(int entry) free_locked_segment(BUCKET_ADDR(entry)); } +#ifdef CONFIG_SUN4 +#define TASK_STRUCT_ORDER 0 +#else +#define TASK_STRUCT_ORDER 1 +#endif + static struct task_struct *sun4c_alloc_task_struct(void) { unsigned long addr, pages; int entry; - pages = __get_free_pages(GFP_KERNEL, 1); + pages = __get_free_pages(GFP_KERNEL, TASK_STRUCT_ORDER); if(!pages) return (struct task_struct *) 0; @@ -1193,7 +1302,7 @@ static struct task_struct *sun4c_alloc_task_struct(void) if(sun4c_bucket[entry] == BUCKET_EMPTY) break; if(entry == NR_TASKS) { - free_pages(pages, 1); + free_pages(pages, TASK_STRUCT_ORDER); return (struct task_struct *) 0; } if(entry >= sun4c_lowbucket_avail) @@ -1204,8 +1313,9 @@ static struct task_struct *sun4c_alloc_task_struct(void) if(sun4c_get_segmap(addr) == invalid_segment) get_locked_segment(addr); sun4c_put_pte(addr, BUCKET_PTE(pages)); +#ifndef CONFIG_SUN4 sun4c_put_pte(addr + PAGE_SIZE, BUCKET_PTE(pages + PAGE_SIZE)); - +#endif return (struct task_struct *) addr; } @@ -1217,15 +1327,18 @@ static void sun4c_free_task_struct_hw(struct task_struct *tsk) /* We are deleting a mapping, so the flush here is mandatory. */ sun4c_flush_page_hw(tsaddr); +#ifndef CONFIG_SUN4 sun4c_flush_page_hw(tsaddr + PAGE_SIZE); - +#endif sun4c_put_pte(tsaddr, 0); +#ifndef CONFIG_SUN4 sun4c_put_pte(tsaddr + PAGE_SIZE, 0); +#endif sun4c_bucket[entry] = BUCKET_EMPTY; if(entry < sun4c_lowbucket_avail) sun4c_lowbucket_avail = entry; - free_pages(pages, 1); + free_pages(pages, TASK_STRUCT_ORDER); garbage_collect(entry); } @@ -1237,15 +1350,18 @@ static void sun4c_free_task_struct_sw(struct task_struct *tsk) /* We are deleting a mapping, so the flush here is mandatory. */ sun4c_flush_page_sw(tsaddr); +#ifndef CONFIG_SUN4 sun4c_flush_page_sw(tsaddr + PAGE_SIZE); - +#endif sun4c_put_pte(tsaddr, 0); +#ifndef CONFIG_SUN4 sun4c_put_pte(tsaddr + PAGE_SIZE, 0); +#endif sun4c_bucket[entry] = BUCKET_EMPTY; if(entry < sun4c_lowbucket_avail) sun4c_lowbucket_avail = entry; - free_pages(pages, 1); + free_pages(pages, TASK_STRUCT_ORDER); garbage_collect(entry); } @@ -1253,9 +1369,8 @@ __initfunc(static void sun4c_init_buckets(void)) { int entry; - if(sizeof(union task_union) != (PAGE_SIZE << 1)) { - prom_printf("task union not 2 pages!\n"); - prom_halt(); + if(sizeof(union task_union) != (PAGE_SIZE << TASK_STRUCT_ORDER)) { + prom_printf("task union not %d page(s)!\n", 1 << TASK_STRUCT_ORDER); } for(entry = 0; entry < NR_TASKS; entry++) sun4c_bucket[entry] = BUCKET_EMPTY; @@ -1949,12 +2064,17 @@ static void sun4c_set_pte(pte_t *ptep, pte_t pte) *ptep = pte; } +static void sun4c_pgd_set(pgd_t * pgdp, pmd_t * pmdp) +{ +} + + void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type, int rdonly) { unsigned long page_entry; - page_entry = ((physaddr >> PAGE_SHIFT) & 0xffff); + page_entry = ((physaddr >> PAGE_SHIFT) & SUN4C_PFN_MASK); page_entry |= ((pg_iobits | _SUN4C_PAGE_PRIV) & ~(_SUN4C_PAGE_PRESENT)); if(rdonly) page_entry &= ~_SUN4C_WRITEABLE; @@ -2092,21 +2212,17 @@ static void sun4c_destroy_context_sw(struct mm_struct *mm) } } -#if KGPROF_PROFILING -static char s4cinfo[10240]; -#else -static char s4cinfo[512]; -#endif - -static char *sun4c_mmu_info(void) +static int sun4c_mmu_info(char *buf) { int used_user_entries, i; + int len; used_user_entries = 0; for(i=0; i < num_contexts; i++) used_user_entries += sun4c_context_ring[i].num_entries; - sprintf(s4cinfo, "vacsize\t\t: %d bytes\n" + len = sprintf(buf, + "vacsize\t\t: %d bytes\n" "vachwflush\t: %s\n" "vaclinesize\t: %d bytes\n" "mmuctxs\t\t: %d\n" @@ -2135,29 +2251,31 @@ static char *sun4c_mmu_info(void) #if KGPROF_PROFILING { - char *p = s4cinfo + strlen(s4cinfo); int i,j; - sprintf(p,"kgprof profiling:\n"); p += strlen(p); + len += sprintf(buf + len,"kgprof profiling:\n"); for (i=0;i<KGPROF_SIZE && kgprof_counters[i].addr[0];i++) { - sprintf(p,"%5d ",kgprof_counters[i].count); p += strlen(p); + len += sprintf(buf + len,"%5d ",kgprof_counters[i].count); for (j=0;j<KGPROF_DEPTH;j++) { - sprintf(p,"%08x ",kgprof_counters[i].addr[j]); - p += strlen(p); + len += sprintf(buf + len,"%08x ",kgprof_counters[i].addr[j]); } - sprintf(p,"\n"); p += strlen(p); + len += sprintf(buf + len,"\n"); } } #endif - return s4cinfo; + return len; } /* Nothing below here should touch the mmu hardware nor the mmu_entry * data structures. */ +#if 0 /* Not used due to BTFIXUPs */ static unsigned int sun4c_pmd_align(unsigned int addr) { return SUN4C_PMD_ALIGN(addr); } +#endif +#if 0 /* Not used due to BTFIXUPs */ static unsigned int sun4c_pgdir_align(unsigned int addr) { return SUN4C_PGDIR_ALIGN(addr); } +#endif /* First the functions which the mid-level code uses to directly * manipulate the software page tables. Some defines since we are @@ -2170,12 +2288,17 @@ static unsigned int sun4c_pgdir_align(unsigned int addr) { return SUN4C_PGDIR_AL #define PGD_DIRTY 0x040 #define PGD_TABLE (PGD_PRESENT | PGD_RW | PGD_USER | PGD_ACCESSED | PGD_DIRTY) +#if 0 /* Not used due to BTFIXUPs */ static unsigned long sun4c_vmalloc_start(void) { return SUN4C_VMALLOC_START; } +#endif +#if 0 /* Not used due to BTFIXUPs */ static int sun4c_pte_none(pte_t pte) { return !pte_val(pte); } +#endif + static int sun4c_pte_present(pte_t pte) { return ((pte_val(pte) & (_SUN4C_PAGE_PRESENT | _SUN4C_PAGE_PRIV)) != 0); @@ -2204,35 +2327,47 @@ static void sun4c_pgd_clear(pgd_t * pgdp) { } * The following only work if pte_present() is true. * Undefined behaviour if not.. */ +#if 0 /* Not used due to BTFIXUPs */ static int sun4c_pte_write(pte_t pte) { return pte_val(pte) & _SUN4C_PAGE_WRITE; } +#endif +#if 0 /* Not used due to BTFIXUPs */ static int sun4c_pte_dirty(pte_t pte) { return pte_val(pte) & _SUN4C_PAGE_MODIFIED; } +#endif +#if 0 /* Not used due to BTFIXUPs */ static int sun4c_pte_young(pte_t pte) { return pte_val(pte) & _SUN4C_PAGE_ACCESSED; } +#endif +#if 0 /* Not used due to BTFIXUPs */ static pte_t sun4c_pte_wrprotect(pte_t pte) { return __pte(pte_val(pte) & ~(_SUN4C_PAGE_WRITE | _SUN4C_PAGE_SILENT_WRITE)); } +#endif +#if 0 /* Not used due to BTFIXUPs */ static pte_t sun4c_pte_mkclean(pte_t pte) { return __pte(pte_val(pte) & ~(_SUN4C_PAGE_MODIFIED | _SUN4C_PAGE_SILENT_WRITE)); } +#endif +#if 0 /* Not used due to BTFIXUPs */ static pte_t sun4c_pte_mkold(pte_t pte) { return __pte(pte_val(pte) & ~(_SUN4C_PAGE_ACCESSED | _SUN4C_PAGE_SILENT_READ)); } +#endif static pte_t sun4c_pte_mkwrite(pte_t pte) { @@ -2277,22 +2412,29 @@ static pte_t sun4c_mk_pte_io(unsigned long page, pgprot_t pgprot, int space) return __pte(((page - PAGE_OFFSET) >> PAGE_SHIFT) | pgprot_val(pgprot)); } +#if 0 /* Not used due to BTFIXUPs */ static pte_t sun4c_pte_modify(pte_t pte, pgprot_t newprot) { return __pte((pte_val(pte) & _SUN4C_PAGE_CHG_MASK) | pgprot_val(newprot)); } +#endif static unsigned long sun4c_pte_page(pte_t pte) { - return (PAGE_OFFSET + ((pte_val(pte) & 0xffff) << (PAGE_SHIFT))); + return (PAGE_OFFSET + ((pte_val(pte) & SUN4C_PFN_MASK) << (PAGE_SHIFT))); } -static unsigned long sun4c_pmd_page(pmd_t pmd) +static inline unsigned long sun4c_pmd_page(pmd_t pmd) { return (pmd_val(pmd) & PAGE_MASK); } +static unsigned long sun4c_pgd_page(pgd_t pgd) +{ + return 0; +} + /* to find an entry in a page-table-directory */ pgd_t *sun4c_pgd_offset(struct mm_struct * mm, unsigned long address) { @@ -2351,6 +2493,16 @@ static pte_t *sun4c_pte_alloc_kernel(pmd_t *pmd, unsigned long address) return (pte_t *) sun4c_pmd_page(*pmd) + address; } +static void sun4c_free_pte_slow(pte_t *pte) +{ + free_page((unsigned long)pte); +} + +static void sun4c_free_pgd_slow(pgd_t *pgd) +{ + free_page((unsigned long)pgd); +} + /* * allocating and freeing a pmd is trivial: the 1-entry pmd is * inside the pgd, so has no extra memory associated with it. @@ -2364,16 +2516,73 @@ static pmd_t *sun4c_pmd_alloc_kernel(pgd_t *pgd, unsigned long address) return (pmd_t *) pgd; } +extern __inline__ pgd_t *sun4c_get_pgd_fast(void) +{ + unsigned long *ret; + + if((ret = pgd_quicklist) != NULL) { + pgd_quicklist = (unsigned long *)(*ret); + ret[0] = ret[1]; + pgtable_cache_size--; + } else { + pgd_t *init; + + ret = (unsigned long *)__get_free_page(GFP_KERNEL); + memset (ret, 0, (KERNBASE / SUN4C_PGDIR_SIZE) * sizeof(pgd_t)); + init = pgd_offset(&init_mm, 0); + memcpy (((pgd_t *)ret) + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, + (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); + } + return (pgd_t *)ret; +} + +static void sun4c_set_pgdir(unsigned long address, pgd_t entry) +{ + /* Nothing to do */ +} + +extern __inline__ void sun4c_free_pgd_fast(pgd_t *pgd) +{ + *(unsigned long *)pgd = (unsigned long) pgd_quicklist; + pgd_quicklist = (unsigned long *) pgd; + pgtable_cache_size++; +} + +extern __inline__ pte_t *sun4c_get_pte_fast(void) +{ + unsigned long *ret; + + if((ret = (unsigned long *)pte_quicklist) != NULL) { + pte_quicklist = (unsigned long *)(*ret); + ret[0] = ret[1]; + pgtable_cache_size--; + } + return (pte_t *)ret; +} + +extern __inline__ void sun4c_free_pte_fast(pte_t *pte) +{ + *(unsigned long *)pte = (unsigned long) pte_quicklist; + pte_quicklist = (unsigned long *) pte; + pgtable_cache_size++; +} + static void sun4c_pte_free(pte_t *pte) { - free_page((unsigned long) pte); + sun4c_free_pte_fast(pte); } static pte_t *sun4c_pte_alloc(pmd_t * pmd, unsigned long address) { address = (address >> PAGE_SHIFT) & (SUN4C_PTRS_PER_PTE - 1); if (sun4c_pmd_none(*pmd)) { - pte_t *page = (pte_t *) get_free_page(GFP_KERNEL); + pte_t *page = (pte_t *) sun4c_get_pte_fast(); + + if (page) { + *pmd = __pmd(PGD_TABLE | (unsigned long) page); + return page + address; + } + page = (pte_t *) get_free_page(GFP_KERNEL); if (sun4c_pmd_none(*pmd)) { if (page) { *pmd = __pmd(PGD_TABLE | (unsigned long) page); @@ -2392,13 +2601,17 @@ static pte_t *sun4c_pte_alloc(pmd_t * pmd, unsigned long address) return (pte_t *) sun4c_pmd_page(*pmd) + address; } +static pte_t *sun4c_pte_get(void) +{ + return sun4c_get_pte_fast(); +} + /* * allocating and freeing a pmd is trivial: the 1-entry pmd is * inside the pgd, so has no extra memory associated with it. */ static void sun4c_pmd_free(pmd_t * pmd) { - *pmd = __pmd(0); } static pmd_t *sun4c_pmd_alloc(pgd_t * pgd, unsigned long address) @@ -2408,12 +2621,12 @@ static pmd_t *sun4c_pmd_alloc(pgd_t * pgd, unsigned long address) static void sun4c_pgd_free(pgd_t *pgd) { - free_page((unsigned long) pgd); + sun4c_free_pgd_fast(pgd); } static pgd_t *sun4c_pgd_alloc(void) { - return (pgd_t *) get_free_page(GFP_KERNEL); + return sun4c_get_pgd_fast(); } /* There are really two cases of aliases to watch out for, and these @@ -2435,12 +2648,13 @@ static pgd_t *sun4c_pgd_alloc(void) */ static void sun4c_vac_alias_fixup(struct vm_area_struct *vma, unsigned long address, pte_t pte) { - struct dentry *dentry; + struct dentry *dentry = NULL; struct inode *inode = NULL; pgd_t *pgdp; pte_t *ptep; - dentry = vma->vm_dentry; + if (vma->vm_file) + dentry = vma->vm_file->f_dentry; if(dentry) inode = dentry->d_inode; if(inode) { @@ -2556,134 +2770,147 @@ __initfunc(unsigned long sun4c_paging_init(unsigned long start_mem, unsigned lon /* Load up routines and constants for sun4c mmu */ __initfunc(void ld_mmu_sun4c(void)) { + extern void ___xchg32_sun4c(void); + printk("Loading sun4c MMU routines\n"); /* First the constants */ - pmd_shift = SUN4C_PMD_SHIFT; - pmd_size = SUN4C_PMD_SIZE; - pmd_mask = SUN4C_PMD_MASK; - pgdir_shift = SUN4C_PGDIR_SHIFT; - pgdir_size = SUN4C_PGDIR_SIZE; - pgdir_mask = SUN4C_PGDIR_MASK; - - ptrs_per_pte = SUN4C_PTRS_PER_PTE; - ptrs_per_pmd = SUN4C_PTRS_PER_PMD; - ptrs_per_pgd = SUN4C_PTRS_PER_PGD; - - page_none = SUN4C_PAGE_NONE; - page_shared = SUN4C_PAGE_SHARED; - page_copy = SUN4C_PAGE_COPY; - page_readonly = SUN4C_PAGE_READONLY; - page_kernel = SUN4C_PAGE_KERNEL; + BTFIXUPSET_SIMM13(pmd_shift, SUN4C_PMD_SHIFT); + BTFIXUPSET_SETHI(pmd_size, SUN4C_PMD_SIZE); + BTFIXUPSET_SETHI(pmd_mask, SUN4C_PMD_MASK); + BTFIXUPSET_SIMM13(pgdir_shift, SUN4C_PGDIR_SHIFT); + BTFIXUPSET_SETHI(pgdir_size, SUN4C_PGDIR_SIZE); + BTFIXUPSET_SETHI(pgdir_mask, SUN4C_PGDIR_MASK); + + BTFIXUPSET_SIMM13(ptrs_per_pte, SUN4C_PTRS_PER_PTE); + BTFIXUPSET_SIMM13(ptrs_per_pmd, SUN4C_PTRS_PER_PMD); + BTFIXUPSET_SIMM13(ptrs_per_pgd, SUN4C_PTRS_PER_PGD); + BTFIXUPSET_SIMM13(user_ptrs_per_pgd, KERNBASE / SUN4C_PGDIR_SIZE); + + BTFIXUPSET_INT(page_none, pgprot_val(SUN4C_PAGE_NONE)); + BTFIXUPSET_INT(page_shared, pgprot_val(SUN4C_PAGE_SHARED)); + BTFIXUPSET_INT(page_copy, pgprot_val(SUN4C_PAGE_COPY)); + BTFIXUPSET_INT(page_readonly, pgprot_val(SUN4C_PAGE_READONLY)); + BTFIXUPSET_INT(page_kernel, pgprot_val(SUN4C_PAGE_KERNEL)); pg_iobits = _SUN4C_PAGE_PRESENT | _SUN4C_READABLE | _SUN4C_WRITEABLE | _SUN4C_PAGE_IO | _SUN4C_PAGE_NOCACHE; /* Functions */ - flush_cache_all = sun4c_flush_cache_all; +#ifndef __SMP__ + BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4c, BTFIXUPCALL_NORM); +#endif + BTFIXUPSET_CALL(get_pte_fast, sun4c_pte_get, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(get_pgd_fast, sun4c_pgd_alloc, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(free_pte_slow, sun4c_free_pte_slow, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(free_pgd_slow, sun4c_free_pgd_slow, BTFIXUPCALL_NORM); + + BTFIXUPSET_CALL(set_pgdir, sun4c_set_pgdir, BTFIXUPCALL_NOP); + + BTFIXUPSET_CALL(flush_cache_all, sun4c_flush_cache_all, BTFIXUPCALL_NORM); if(sun4c_vacinfo.do_hwflushes) { - flush_cache_mm = sun4c_flush_cache_mm_hw; - flush_cache_range = sun4c_flush_cache_range_hw; - flush_cache_page = sun4c_flush_cache_page_hw; - flush_page_to_ram = sun4c_flush_page_to_ram_hw; - flush_tlb_mm = sun4c_flush_tlb_mm_hw; - flush_tlb_range = sun4c_flush_tlb_range_hw; - flush_tlb_page = sun4c_flush_tlb_page_hw; - free_task_struct = sun4c_free_task_struct_hw; - switch_to_context = sun4c_switch_to_context_hw; - destroy_context = sun4c_destroy_context_hw; - init_new_context = sun4c_init_new_context_hw; + BTFIXUPSET_CALL(flush_cache_mm, sun4c_flush_cache_mm_hw, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_cache_range, sun4c_flush_cache_range_hw, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_cache_page, sun4c_flush_cache_page_hw, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_page_to_ram, sun4c_flush_page_to_ram_hw, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_mm, sun4c_flush_tlb_mm_hw, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_range, sun4c_flush_tlb_range_hw, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_page, sun4c_flush_tlb_page_hw, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(free_task_struct, sun4c_free_task_struct_hw, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(switch_to_context, sun4c_switch_to_context_hw, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(destroy_context, sun4c_destroy_context_hw, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(init_new_context, sun4c_init_new_context_hw, BTFIXUPCALL_NORM); } else { - flush_cache_mm = sun4c_flush_cache_mm_sw; - flush_cache_range = sun4c_flush_cache_range_sw; - flush_cache_page = sun4c_flush_cache_page_sw; - flush_page_to_ram = sun4c_flush_page_to_ram_sw; - flush_tlb_mm = sun4c_flush_tlb_mm_sw; - flush_tlb_range = sun4c_flush_tlb_range_sw; - flush_tlb_page = sun4c_flush_tlb_page_sw; - free_task_struct = sun4c_free_task_struct_sw; - switch_to_context = sun4c_switch_to_context_sw; - destroy_context = sun4c_destroy_context_sw; - init_new_context = sun4c_init_new_context_sw; - } - - flush_tlb_all = sun4c_flush_tlb_all; - - flush_sig_insns = sun4c_flush_sig_insns; - - set_pte = sun4c_set_pte; - pmd_align = sun4c_pmd_align; - pgdir_align = sun4c_pgdir_align; - vmalloc_start = sun4c_vmalloc_start; - - pte_page = sun4c_pte_page; - pmd_page = sun4c_pmd_page; - - sparc_update_rootmmu_dir = sun4c_update_rootmmu_dir; - - pte_none = sun4c_pte_none; - pte_present = sun4c_pte_present; - pte_clear = sun4c_pte_clear; - - pmd_none = sun4c_pmd_none; - pmd_bad = sun4c_pmd_bad; - pmd_present = sun4c_pmd_present; - pmd_clear = sun4c_pmd_clear; - - pgd_none = sun4c_pgd_none; - pgd_bad = sun4c_pgd_bad; - pgd_present = sun4c_pgd_present; - pgd_clear = sun4c_pgd_clear; - - mk_pte = sun4c_mk_pte; - mk_pte_phys = sun4c_mk_pte_phys; - mk_pte_io = sun4c_mk_pte_io; - pte_modify = sun4c_pte_modify; - pgd_offset = sun4c_pgd_offset; - pmd_offset = sun4c_pmd_offset; - pte_offset = sun4c_pte_offset; - pte_free_kernel = sun4c_pte_free_kernel; - pmd_free_kernel = sun4c_pmd_free_kernel; - pte_alloc_kernel = sun4c_pte_alloc_kernel; - pmd_alloc_kernel = sun4c_pmd_alloc_kernel; - pte_free = sun4c_pte_free; - pte_alloc = sun4c_pte_alloc; - pmd_free = sun4c_pmd_free; - pmd_alloc = sun4c_pmd_alloc; - pgd_free = sun4c_pgd_free; - pgd_alloc = sun4c_pgd_alloc; - - pte_write = sun4c_pte_write; - pte_dirty = sun4c_pte_dirty; - pte_young = sun4c_pte_young; - pte_wrprotect = sun4c_pte_wrprotect; - pte_mkclean = sun4c_pte_mkclean; - pte_mkold = sun4c_pte_mkold; - pte_mkwrite = sun4c_pte_mkwrite; - pte_mkdirty = sun4c_pte_mkdirty; - pte_mkyoung = sun4c_pte_mkyoung; - update_mmu_cache = sun4c_update_mmu_cache; - - mmu_lockarea = sun4c_lockarea; - mmu_unlockarea = sun4c_unlockarea; - - mmu_get_scsi_one = sun4c_get_scsi_one; - mmu_get_scsi_sgl = sun4c_get_scsi_sgl; - mmu_release_scsi_one = sun4c_release_scsi_one; - mmu_release_scsi_sgl = sun4c_release_scsi_sgl; - - mmu_map_dma_area = sun4c_map_dma_area; - - mmu_v2p = sun4c_v2p; - mmu_p2v = sun4c_p2v; + BTFIXUPSET_CALL(flush_cache_mm, sun4c_flush_cache_mm_sw, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_cache_range, sun4c_flush_cache_range_sw, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_cache_page, sun4c_flush_cache_page_sw, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_page_to_ram, sun4c_flush_page_to_ram_sw, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_mm, sun4c_flush_tlb_mm_sw, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_range, sun4c_flush_tlb_range_sw, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_page, sun4c_flush_tlb_page_sw, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(free_task_struct, sun4c_free_task_struct_sw, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(switch_to_context, sun4c_switch_to_context_sw, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(destroy_context, sun4c_destroy_context_sw, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(init_new_context, sun4c_init_new_context_sw, BTFIXUPCALL_NORM); + } + + BTFIXUPSET_CALL(flush_tlb_all, sun4c_flush_tlb_all, BTFIXUPCALL_NORM); + + BTFIXUPSET_CALL(flush_sig_insns, sun4c_flush_sig_insns, BTFIXUPCALL_NOP); + + BTFIXUPSET_CALL(set_pte, sun4c_set_pte, BTFIXUPCALL_STO1O0); + + BTFIXUPSET_CALL(pte_page, sun4c_pte_page, BTFIXUPCALL_NORM); +#if PAGE_SHIFT <= 12 + BTFIXUPSET_CALL(pmd_page, sun4c_pmd_page, BTFIXUPCALL_ANDNINT(PAGE_SIZE - 1)); +#else + BTFIXUPSET_CALL(pmd_page, sun4c_pmd_page, BTFIXUPCALL_NORM); +#endif + + BTFIXUPSET_CALL(sparc_update_rootmmu_dir, sun4c_update_rootmmu_dir, BTFIXUPCALL_NOP); + + BTFIXUPSET_CALL(pte_present, sun4c_pte_present, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pte_clear, sun4c_pte_clear, BTFIXUPCALL_STG0O0); + + BTFIXUPSET_CALL(pmd_bad, sun4c_pmd_bad, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pmd_present, sun4c_pmd_present, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pmd_clear, sun4c_pmd_clear, BTFIXUPCALL_STG0O0); + + BTFIXUPSET_CALL(pgd_none, sun4c_pgd_none, BTFIXUPCALL_RETINT(0)); + BTFIXUPSET_CALL(pgd_bad, sun4c_pgd_bad, BTFIXUPCALL_RETINT(0)); + BTFIXUPSET_CALL(pgd_present, sun4c_pgd_present, BTFIXUPCALL_RETINT(1)); + BTFIXUPSET_CALL(pgd_clear, sun4c_pgd_clear, BTFIXUPCALL_NOP); + + BTFIXUPSET_CALL(mk_pte, sun4c_mk_pte, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(mk_pte_phys, sun4c_mk_pte_phys, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(mk_pte_io, sun4c_mk_pte_io, BTFIXUPCALL_NORM); + + BTFIXUPSET_INT(pte_modify_mask, _SUN4C_PAGE_CHG_MASK); + BTFIXUPSET_CALL(pgd_offset, sun4c_pgd_offset, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pmd_offset, sun4c_pmd_offset, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pte_offset, sun4c_pte_offset, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pte_free_kernel, sun4c_pte_free_kernel, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pmd_free_kernel, sun4c_pmd_free_kernel, BTFIXUPCALL_NOP); + BTFIXUPSET_CALL(pte_alloc_kernel, sun4c_pte_alloc_kernel, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pmd_alloc_kernel, sun4c_pmd_alloc_kernel, BTFIXUPCALL_RETO0); + BTFIXUPSET_CALL(pte_free, sun4c_pte_free, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pte_alloc, sun4c_pte_alloc, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pmd_free, sun4c_pmd_free, BTFIXUPCALL_NOP); + BTFIXUPSET_CALL(pmd_alloc, sun4c_pmd_alloc, BTFIXUPCALL_RETO0); + BTFIXUPSET_CALL(pgd_free, sun4c_pgd_free, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pgd_alloc, sun4c_pgd_alloc, BTFIXUPCALL_NORM); + + BTFIXUPSET_HALF(pte_writei, _SUN4C_PAGE_WRITE); + BTFIXUPSET_HALF(pte_dirtyi, _SUN4C_PAGE_MODIFIED); + BTFIXUPSET_HALF(pte_youngi, _SUN4C_PAGE_ACCESSED); + BTFIXUPSET_HALF(pte_wrprotecti, _SUN4C_PAGE_WRITE|_SUN4C_PAGE_SILENT_WRITE); + BTFIXUPSET_HALF(pte_mkcleani, _SUN4C_PAGE_MODIFIED|_SUN4C_PAGE_SILENT_WRITE); + BTFIXUPSET_HALF(pte_mkoldi, _SUN4C_PAGE_ACCESSED|_SUN4C_PAGE_SILENT_READ); + BTFIXUPSET_CALL(pte_mkwrite, sun4c_pte_mkwrite, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pte_mkdirty, sun4c_pte_mkdirty, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(pte_mkyoung, sun4c_pte_mkyoung, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(update_mmu_cache, sun4c_update_mmu_cache, BTFIXUPCALL_NORM); + + BTFIXUPSET_CALL(mmu_lockarea, sun4c_lockarea, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(mmu_unlockarea, sun4c_unlockarea, BTFIXUPCALL_NORM); + + BTFIXUPSET_CALL(mmu_get_scsi_one, sun4c_get_scsi_one, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(mmu_get_scsi_sgl, sun4c_get_scsi_sgl, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(mmu_release_scsi_one, sun4c_release_scsi_one, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(mmu_release_scsi_sgl, sun4c_release_scsi_sgl, BTFIXUPCALL_NORM); + + BTFIXUPSET_CALL(mmu_map_dma_area, sun4c_map_dma_area, BTFIXUPCALL_NORM); + + BTFIXUPSET_CALL(mmu_v2p, sun4c_v2p, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(mmu_p2v, sun4c_p2v, BTFIXUPCALL_NORM); /* Task struct and kernel stack allocating/freeing. */ - alloc_task_struct = sun4c_alloc_task_struct; + BTFIXUPSET_CALL(alloc_task_struct, sun4c_alloc_task_struct, BTFIXUPCALL_NORM); - quick_kernel_fault = sun4c_quick_kernel_fault; - mmu_info = sun4c_mmu_info; + BTFIXUPSET_CALL(quick_kernel_fault, sun4c_quick_kernel_fault, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(mmu_info, sun4c_mmu_info, BTFIXUPCALL_NORM); /* These should _never_ get called with two level tables. */ - pgd_set = 0; - pgd_page = 0; + BTFIXUPSET_CALL(pgd_set, sun4c_pgd_set, BTFIXUPCALL_NOP); + BTFIXUPSET_CALL(pgd_page, sun4c_pgd_page, BTFIXUPCALL_RETO0); } diff --git a/arch/sparc/mm/turbosparc.S b/arch/sparc/mm/turbosparc.S index 415f09056..df580a85c 100644 --- a/arch/sparc/mm/turbosparc.S +++ b/arch/sparc/mm/turbosparc.S @@ -1,4 +1,4 @@ -/* $Id: turbosparc.S,v 1.2 1998/03/16 08:40:31 ralf Exp $ +/* $Id: turbosparc.S,v 1.3 1998/05/04 12:41:29 ralf Exp $ * turbosparc.S: High speed TurboSparc mmu/cache operations. * * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) @@ -27,6 +27,7 @@ .globl turbosparc_flush_cache_all .globl turbosparc_flush_sig_insns + .globl turbosparc_flush_page_for_dma turbosparc_flush_cache_all: WINDOW_FLUSH(%g4, %g5) @@ -42,5 +43,6 @@ turbosparc_flush_cache_all: sta %g0, [%g0] ASI_M_IC_FLCLEAR turbosparc_flush_sig_insns: +turbosparc_flush_page_for_dma: retl nop diff --git a/arch/sparc/mm/viking.S b/arch/sparc/mm/viking.S index b05b7b416..c65f72007 100644 --- a/arch/sparc/mm/viking.S +++ b/arch/sparc/mm/viking.S @@ -1,8 +1,8 @@ -/* $Id: viking.S,v 1.6 1997/11/27 15:42:32 jj Exp $ +/* $Id: viking.S,v 1.11 1998/02/20 18:07:50 jj Exp $ * viking.S: High speed Viking cache/mmu operations * * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) - * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ #include <asm/ptrace.h> @@ -13,6 +13,7 @@ #include <asm/pgtsrmmu.h> #include <asm/viking.h> #include <asm/cprefix.h> +#include <asm/btfixup.h> #define WINDOW_FLUSH(tmp1, tmp2) \ mov 0, tmp1; \ @@ -37,40 +38,33 @@ .globl viking_flush_tlb_all, viking_flush_tlb_mm .globl viking_flush_tlb_range, viking_flush_tlb_page - .globl viking_c_mxcc_flush_page - .globl viking_c_flush_page, viking_c_flush_chunk + .globl viking_c_flush_chunk, viking_s_flush_chunk + +viking_s_flush_chunk: + sethi %hi(KERNBASE), %g2 + ba 2f + sub %o0, %g2, %g3 -viking_c_flush_page: viking_c_flush_chunk: sethi %hi(KERNBASE), %g2 cmp %o0, %g2 bgeu 2f sub %o0, %g2, %g3 - sethi %hi(C_LABEL(page_contig_offset)), %g2 - ld [%g2 + %lo(C_LABEL(page_contig_offset))], %g2 + sethi BTFIXUP_SETHI(page_contig_offset), %g2 ba 2f sub %o0, %g2, %g3 viking_flush_page: viking_flush_chunk: sethi %hi(C_LABEL(srmmu_v2p_hash)), %g2 - or %g2, %lo(C_LABEL(srmmu_v2p_hash)), %g2 srl %o0, 24, %o1 + or %g2, %lo(C_LABEL(srmmu_v2p_hash)), %g2 sll %o1, 2, %o1 - ld [%g2 + %o1], %g3 - cmp %g3, 0 - bne 1f - and %o0, PAGE_MASK, %o0 - - retl - nop - -1: - ld [%g3], %o1 - sub %o0, %o1, %g2 - ld [%g3 + 4], %o0 - add %g2, %o0, %g3 + and %o0, PAGE_MASK, %o0 + cmp %g3, -1 + be 9f + add %o0, %g3, %g3 2: srl %g3, 12, %g1 ! ppage >> 12 clr %o1 ! set counter, 0 - 127 @@ -124,41 +118,22 @@ viking_flush_chunk: ble 5b clr %o2 - retl +9: retl nop -viking_c_mxcc_flush_page: - sethi %hi(KERNBASE), %g2 - cmp %o0, %g2 - bgeu 2f - sub %o0, %g2, %g3 - sethi %hi(C_LABEL(page_contig_offset)), %g2 - ld [%g2 + %lo(C_LABEL(page_contig_offset))], %g2 - ba 2f - sub %o0, %g2, %g3 - viking_mxcc_flush_page: sethi %hi(C_LABEL(srmmu_v2p_hash)), %g2 - or %g2, %lo(C_LABEL(srmmu_v2p_hash)), %g2 srl %o0, 24, %o1 + or %g2, %lo(C_LABEL(srmmu_v2p_hash)), %g2 sll %o1, 2, %o1 - ld [%g2 + %o1], %g3 - cmp %g3, 0 - bne 1f - and %o0, PAGE_MASK, %o0 - - retl - nop - -1: - ld [%g3], %o1 - sub %o0, %o1, %g2 - ld [%g3 + 4], %o0 - add %g2, %o0, %g3 + and %o0, PAGE_MASK, %o0 + cmp %g3, -1 + be 9f + add %o0, %g3, %g3 2: sub %g3, -PAGE_SIZE, %g3 ! ppage + PAGE_SIZE - mov 0x10, %g2 ! set cacheable bit sethi %hi(MXCC_SRCSTREAM), %o3 ! assume %hi(MXCC_SRCSTREAM) == %hi(MXCC_DESTSTREAM) + mov 0x10, %g2 ! set cacheable bit or %o3, %lo(MXCC_SRCSTREAM), %o2 or %o3, %lo(MXCC_DESSTREAM), %o3 sub %g3, MXCC_STREAM_SIZE, %g3 @@ -169,7 +144,7 @@ viking_mxcc_flush_page: bne 6b sub %g3, MXCC_STREAM_SIZE, %g3 - retl +9: retl nop viking_mxcc_flush_chunk: @@ -212,13 +187,12 @@ viking_flush_tlb_range: cmp %o3, -1 be 2f #endif - srl %o1, SRMMU_PGDIR_SHIFT, %o1 + sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4 sta %o3, [%g1] ASI_M_MMUREGS - sll %o1, SRMMU_PGDIR_SHIFT, %o1 - sethi %hi(1 << SRMMU_PGDIR_SHIFT), %o4 + and %o1, %o4, %o1 add %o1, 0x200, %o1 sta %g0, [%o1] ASI_M_FLUSH_PROBE -1: add %o1, %o4, %o1 +1: sub %o1, %o4, %o1 cmp %o1, %o2 blu,a 1b sta %g0, [%o1] ASI_M_FLUSH_PROBE |