diff options
Diffstat (limited to 'arch/sparc/mm')
-rw-r--r-- | arch/sparc/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/sparc/mm/fault.c | 13 | ||||
-rw-r--r-- | arch/sparc/mm/init.c | 40 | ||||
-rw-r--r-- | arch/sparc/mm/iommu.c | 63 | ||||
-rw-r--r-- | arch/sparc/mm/nosrmmu.c | 12 | ||||
-rw-r--r-- | arch/sparc/mm/srmmu.c | 144 | ||||
-rw-r--r-- | arch/sparc/mm/sun4c.c | 12 | ||||
-rw-r--r-- | arch/sparc/mm/viking.S | 157 |
8 files changed, 302 insertions, 141 deletions
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile index ecb1943c3..76dbf643a 100644 --- a/arch/sparc/mm/Makefile +++ b/arch/sparc/mm/Makefile @@ -1,4 +1,4 @@ -# $Id: Makefile,v 1.32 1998/08/16 16:02:25 ecd Exp $ +# $Id: Makefile,v 1.33 1999/01/02 16:45:47 davem Exp $ # Makefile for the linux Sparc-specific parts of the memory manager. # # Note! Dependencies are done automagically by 'make dep', which also diff --git a/arch/sparc/mm/fault.c b/arch/sparc/mm/fault.c index 3c8ffbfae..a6110b886 100644 --- a/arch/sparc/mm/fault.c +++ b/arch/sparc/mm/fault.c @@ -1,4 +1,4 @@ -/* $Id: fault.c,v 1.96 1998/11/08 11:13:56 davem Exp $ +/* $Id: fault.c,v 1.101 1999/01/04 06:24:52 jj Exp $ * fault.c: Page fault handlers for the Sparc. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) @@ -13,11 +13,13 @@ #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/tasks.h> +#include <linux/kernel.h> #include <linux/smp.h> #include <linux/signal.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/smp_lock.h> +#include <linux/interrupt.h> #include <asm/system.h> #include <asm/segment.h> @@ -149,9 +151,7 @@ static void unhandled_fault(unsigned long address, struct task_struct *tsk, (unsigned long) tsk->mm->context); printk(KERN_ALERT "tsk->mm->pgd = %08lx\n", (unsigned long) tsk->mm->pgd); - lock_kernel(); die_if_kernel("Oops", regs); - unlock_kernel(); } asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc, @@ -202,6 +202,13 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, if(text_fault) address = regs->pc; + /* + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ + if (in_interrupt() || mm == &init_mm) + goto do_kernel_fault; + down(&mm->mmap_sem); /* The kernel referencing a bad kernel pointer can lock up * a sun4c machine completely, so we must attempt recovery. diff --git a/arch/sparc/mm/init.c b/arch/sparc/mm/init.c index 391a4dedb..4652e4fe6 100644 --- a/arch/sparc/mm/init.c +++ b/arch/sparc/mm/init.c @@ -1,4 +1,4 @@ -/* $Id: init.c,v 1.60 1998/09/13 04:30:31 davem Exp $ +/* $Id: init.c,v 1.65 1999/04/09 16:28:03 davem Exp $ * linux/arch/sparc/mm/init.c * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) @@ -37,6 +37,8 @@ extern void show_net_buffers(void); +unsigned long *sparc_valid_addr_bitmap; + struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS]; unsigned long sparc_unmapped_base; @@ -215,16 +217,20 @@ __initfunc(static void taint_real_pages(unsigned long start_mem, unsigned long e unsigned long limit = base + sp_banks[tmp2].num_bytes; if((phys_addr >= base) && (phys_addr < limit) && - ((phys_addr + PAGE_SIZE) < limit)) + ((phys_addr + PAGE_SIZE) < limit)) { mem_map[MAP_NR(addr)].flags &= ~(1<<PG_reserved); + set_bit(MAP_NR(addr) >> 8, sparc_valid_addr_bitmap); + } } } } else { if((sparc_cpu_model == sun4m) || (sparc_cpu_model == sun4d)) { srmmu_frob_mem_map(start_mem); } else { - for(addr = start_mem; addr < end_mem; addr += PAGE_SIZE) + for(addr = start_mem; addr < end_mem; addr += PAGE_SIZE) { mem_map[MAP_NR(addr)].flags &= ~(1<<PG_reserved); + set_bit(MAP_NR(addr) >> 8, sparc_valid_addr_bitmap); + } } } } @@ -234,6 +240,7 @@ __initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem)) int codepages = 0; int datapages = 0; int initpages = 0; + int i; unsigned long addr; struct page *page, *end; @@ -243,6 +250,12 @@ __initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem)) end_mem &= PAGE_MASK; max_mapnr = MAP_NR(end_mem); high_memory = (void *) end_mem; + + sparc_valid_addr_bitmap = (unsigned long *)start_mem; + i = max_mapnr >> (8 + 5); + i += 1; + memset(sparc_valid_addr_bitmap, 0, i << 2); + start_mem += i << 2; start_mem = PAGE_ALIGN(start_mem); num_physpages = 0; @@ -255,6 +268,7 @@ __initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem)) else #endif mem_map[MAP_NR(addr)].flags |= (1<<PG_reserved); + set_bit(MAP_NR(addr) >> 8, sparc_valid_addr_bitmap); addr += PAGE_SIZE; } @@ -266,6 +280,9 @@ __initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem)) if (PageSkip(page)) { unsigned long low, high; + /* See srmmu_frob_mem_map() for why this is done. -DaveM */ + page++; + low = PAGE_ALIGN((unsigned long)(page+1)); if (page->next_hash < page) high = ((unsigned long)end) & PAGE_MASK; @@ -313,11 +330,18 @@ __initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem)) initpages << (PAGE_SHIFT-10), (unsigned long)PAGE_OFFSET, end_mem); - freepages.min = nr_free_pages >> 7; - if(freepages.min < 16) - freepages.min = 16; - freepages.low = freepages.min + (freepages.min >> 1); - freepages.high = freepages.min + freepages.min; + /* NOTE NOTE NOTE NOTE + * Please keep track of things and make sure this + * always matches the code in mm/page_alloc.c -DaveM + */ + i = nr_free_pages >> 7; + if (i < 48) + i = 48; + if (i > 256) + i = 256; + freepages.min = i; + freepages.low = i << 1; + freepages.high = freepages.low + i; } void free_initmem (void) diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c index c7c6bdd5f..a0f92ea79 100644 --- a/arch/sparc/mm/iommu.c +++ b/arch/sparc/mm/iommu.c @@ -1,4 +1,4 @@ -/* $Id: iommu.c,v 1.9 1998/04/15 14:58:37 jj Exp $ +/* $Id: iommu.c,v 1.10 1999/05/07 17:03:34 jj Exp $ * iommu.c: IOMMU specific routines for memory management. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) @@ -51,8 +51,7 @@ iommu_init(int iommund, struct linux_sbus *sbus)) unsigned long tmp; struct iommu_struct *iommu; struct linux_prom_registers iommu_promregs[PROMREG_MAX]; - int i, j, k, l, m; - struct iommu_alloc { unsigned long addr; int next; } *ia; + int i; iommu = kmalloc(sizeof(struct iommu_struct), GFP_ATOMIC); prom_getproperty(iommund, "reg", (void *) iommu_promregs, @@ -97,62 +96,18 @@ iommu_init(int iommund, struct linux_sbus *sbus)) ptsize = (ptsize >> PAGE_SHIFT) * sizeof(iopte_t); /* Stupid alignment constraints give me a headache. - We want to get very large aligned memory area, larger than - maximum what get_free_pages gives us (128K): we need - 256K or 512K or 1M or 2M aligned to its size. */ - ia = (struct iommu_alloc *) kmalloc (sizeof(struct iommu_alloc) * 128, GFP_ATOMIC); - for (i = 0; i < 128; i++) { - ia[i].addr = 0; - ia[i].next = -1; - } - k = 0; - for (i = 0; i < 128; i++) { - ia[i].addr = __get_free_pages(GFP_DMA, 5); - if (ia[i].addr <= ia[k].addr) { - if (i) { - ia[i].next = k; - k = i; - } - } else { - for (m = k, l = ia[k].next; l != -1; m = l, l = ia[l].next) - if (ia[i].addr <= ia[l].addr) { - ia[i].next = l; - ia[m].next = i; - } - if (l == -1) - ia[m].next = i; - } - for (m = -1, j = 0, l = k; l != -1; l = ia[l].next) { - if (!(ia[l].addr & (ptsize - 1))) { - tmp = ia[l].addr; - m = l; - j = 128 * 1024; - } else if (m != -1) { - if (ia[l].addr != tmp + j) - m = -1; - else { - j += 128 * 1024; - if (j == ptsize) { - break; - } - } - } - } - if (l != -1) + We need 256K or 512K or 1M or 2M area aligned to + its size and current gfp will fortunately give + it to us. */ + for (i = 6; i < 9; i++) + if ((1 << (i + PAGE_SHIFT)) == ptsize) break; - } - if (i == 128) { + tmp = __get_free_pages(GFP_DMA, i); + if (!tmp) { prom_printf("Could not allocate iopte of size 0x%08x\n", ptsize); prom_halt(); } - for (l = m, j = 0; j < ptsize; j += 128 * 1024, l = ia[l].next) - ia[l].addr = 0; - for (l = k; l != -1; l = ia[l].next) - if (ia[l].addr) - free_pages(ia[l].addr, 5); - kfree (ia); iommu->lowest = iommu->page_table = (iopte_t *)tmp; - /* Initialize new table. */ flush_cache_all(); diff --git a/arch/sparc/mm/nosrmmu.c b/arch/sparc/mm/nosrmmu.c index f82599f42..b87b2bdeb 100644 --- a/arch/sparc/mm/nosrmmu.c +++ b/arch/sparc/mm/nosrmmu.c @@ -1,4 +1,4 @@ -/* $Id: nosrmmu.c,v 1.1 1998/03/09 14:04:15 jj Exp $ +/* $Id: nosrmmu.c,v 1.2 1999/03/30 10:17:39 jj Exp $ * nosrmmu.c: This file is a bunch of dummies for sun4 compiles, * so that it does not need srmmu and avoid ifdefs. * @@ -48,3 +48,13 @@ __initfunc(void srmmu_end_memory(unsigned long memory_size, unsigned long *mem_e { return 0; } + +__u32 iounit_map_dma_init(struct linux_sbus *sbus, int size) +{ + return 0; +} + +__u32 iounit_map_dma_page(__u32 vaddr, void *addr, struct linux_sbus *sbus) +{ + return 0; +} diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index d94fd4083..5b63aa11a 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -1,4 +1,4 @@ -/* $Id: srmmu.c,v 1.175 1998/08/28 18:57:31 zaitcev Exp $ +/* $Id: srmmu.c,v 1.187 1999/04/28 17:00:45 davem Exp $ * srmmu.c: SRMMU specific routines for memory management. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) @@ -12,7 +12,9 @@ #include <linux/mm.h> #include <linux/malloc.h> #include <linux/vmalloc.h> +#include <linux/pagemap.h> #include <linux/init.h> +#include <linux/blk.h> #include <asm/page.h> #include <asm/pgtable.h> @@ -216,24 +218,36 @@ __initfunc(void srmmu_frob_mem_map(unsigned long start_mem)) mem_map[MAP_NR(pg1)].flags &= ~(1<<PG_reserved); mem_map[MAP_NR(pg2)].flags &= ~(1<<PG_reserved); mem_map[MAP_NR(pg3)].flags &= ~(1<<PG_reserved); - + start_mem = PAGE_ALIGN(start_mem); for(i = 0; srmmu_map[i].size; i++) { bank_start = srmmu_map[i].vbase; - if (i && bank_start - bank_end > 2 * PAGE_SIZE) { + /* Making a one or two pages PG_skip holes + * is not necessary. We add one more because + * we must set the PG_skip flag on the first + * two mem_map[] entries for the hole. Go and + * see the mm/filemap.c:shrink_mmap() loop for + * details. -DaveM + */ + if (i && bank_start - bank_end > 3 * PAGE_SIZE) { mem_map[MAP_NR(bank_end)].flags |= (1<<PG_skip); mem_map[MAP_NR(bank_end)].next_hash = mem_map + MAP_NR(bank_start); + mem_map[MAP_NR(bank_end)+1UL].flags |= (1<<PG_skip); + mem_map[MAP_NR(bank_end)+1UL].next_hash = mem_map + MAP_NR(bank_start); PGSKIP_DEBUG(MAP_NR(bank_end), MAP_NR(bank_start)); if (bank_end > KERNBASE && bank_start < KERNBASE) { mem_map[0].flags |= (1<<PG_skip); mem_map[0].next_hash = mem_map + MAP_NR(bank_start); + mem_map[1].flags |= (1<<PG_skip); + mem_map[1].next_hash = mem_map + MAP_NR(bank_start); PGSKIP_DEBUG(0, MAP_NR(bank_start)); } } bank_end = bank_start + srmmu_map[i].size; while(bank_start < bank_end) { + set_bit(MAP_NR(bank_start) >> 8, sparc_valid_addr_bitmap); if((bank_start >= KERNBASE) && (bank_start < start_mem)) { bank_start += PAGE_SIZE; @@ -250,14 +264,19 @@ __initfunc(void srmmu_frob_mem_map(unsigned long start_mem)) if (bank_end < KERNBASE) { mem_map[MAP_NR(bank_end)].flags |= (1<<PG_skip); mem_map[MAP_NR(bank_end)].next_hash = mem_map + MAP_NR(KERNBASE); + mem_map[MAP_NR(bank_end)+1UL].flags |= (1<<PG_skip); + mem_map[MAP_NR(bank_end)+1UL].next_hash = mem_map + MAP_NR(KERNBASE); PGSKIP_DEBUG(MAP_NR(bank_end), MAP_NR(KERNBASE)); } else if (MAP_NR(bank_end) < max_mapnr) { mem_map[MAP_NR(bank_end)].flags |= (1<<PG_skip); + mem_map[MAP_NR(bank_end)+1UL].flags |= (1<<PG_skip); if (mem_map[0].flags & (1 << PG_skip)) { mem_map[MAP_NR(bank_end)].next_hash = mem_map[0].next_hash; + mem_map[MAP_NR(bank_end)+1UL].next_hash = mem_map[0].next_hash; PGSKIP_DEBUG(MAP_NR(bank_end), mem_map[0].next_hash - mem_map); } else { mem_map[MAP_NR(bank_end)].next_hash = mem_map; + mem_map[MAP_NR(bank_end)+1UL].next_hash = mem_map; PGSKIP_DEBUG(MAP_NR(bank_end), 0); } } @@ -447,7 +466,8 @@ static inline pte_t *srmmu_s_pte_offset(pmd_t * dir, unsigned long address) /* This must update the context table entry for this process. */ static void srmmu_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp) { - if(tsk->mm->context != NO_CONTEXT) { + if(tsk->mm->context != NO_CONTEXT && + tsk->mm->pgd != pgdp) { flush_cache_mm(tsk->mm); ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp); flush_tlb_mm(tsk->mm); @@ -800,9 +820,7 @@ static void srmmu_switch_to_context(struct task_struct *tsk) { if(tsk->mm->context == NO_CONTEXT) { alloc_context(tsk->mm); - flush_cache_mm(tsk->mm); ctxd_set(&srmmu_context_table[tsk->mm->context], tsk->mm->pgd); - flush_tlb_mm(tsk->mm); } srmmu_set_context(tsk->mm->context); } @@ -1273,6 +1291,12 @@ extern void viking_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end); extern void viking_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); +extern void sun4dsmp_flush_tlb_all(void); +extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm); +extern void sun4dsmp_flush_tlb_range(struct mm_struct *mm, unsigned long start, + unsigned long end); +extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma, + unsigned long page); /* hypersparc.S */ extern void hypersparc_flush_cache_all(void); @@ -1311,7 +1335,8 @@ static void hypersparc_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp) if(pgdp != swapper_pg_dir) hypersparc_flush_page_to_ram(page); - if(tsk->mm->context != NO_CONTEXT) { + if(tsk->mm->context != NO_CONTEXT && + tsk->mm->pgd != pgdp) { flush_cache_mm(tsk->mm); ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp); flush_tlb_mm(tsk->mm); @@ -1320,11 +1345,13 @@ static void hypersparc_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp) static void viking_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp) { - viking_flush_page((unsigned long)pgdp); - if(tsk->mm->context != NO_CONTEXT) { - flush_cache_mm(current->mm); + if(pgdp != swapper_pg_dir) + flush_chunk((unsigned long)pgdp); + if(tsk->mm->context != NO_CONTEXT && + tsk->mm->pgd != pgdp) { + flush_cache_mm(tsk->mm); ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp); - flush_tlb_mm(current->mm); + flush_tlb_mm(tsk->mm); } } @@ -1334,6 +1361,9 @@ static void cypress_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp) unsigned long page = ((unsigned long) pgdp) & PAGE_MASK; unsigned long line; + if(pgdp == swapper_pg_dir) + goto skip_flush; + a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; page &= PAGE_MASK; line = (page + PAGE_SIZE) - 0x100; @@ -1354,11 +1384,12 @@ static void cypress_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp) "r" (a), "r" (b), "r" (c), "r" (d), "r" (e), "r" (f), "r" (g)); } while(line != page); - - if(tsk->mm->context != NO_CONTEXT) { - flush_cache_mm(current->mm); +skip_flush: + if(tsk->mm->context != NO_CONTEXT && + tsk->mm->pgd != pgdp) { + flush_cache_mm(tsk->mm); ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp); - flush_tlb_mm(current->mm); + flush_tlb_mm(tsk->mm); } } @@ -1386,9 +1417,10 @@ static void hypersparc_init_new_context(struct mm_struct *mm) srmmu_set_entry((pte_t *)ctxp, __pte((SRMMU_ET_PTD | (srmmu_v2p((unsigned long) mm->pgd) >> 4)))); hypersparc_flush_page_to_ram((unsigned long)ctxp); - hyper_flush_whole_icache(); - if(mm == current->mm) + if(mm == current->mm) { + hyper_flush_whole_icache(); srmmu_set_context(mm->context); + } } static unsigned long mempool; @@ -1917,12 +1949,13 @@ __initfunc(unsigned long srmmu_paging_init(unsigned long start_mem, unsigned lon /* Find the number of contexts on the srmmu. */ cpunode = prom_getchild(prom_root_node); num_contexts = 0; - while((cpunode = prom_getsibling(cpunode)) != 0) { + while(cpunode != 0) { prom_getstring(cpunode, "device_type", node_str, sizeof(node_str)); if(!strcmp(node_str, "cpu")) { num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8); break; } + cpunode = prom_getsibling(cpunode); } } @@ -1969,6 +2002,18 @@ __initfunc(unsigned long srmmu_paging_init(unsigned long start_mem, unsigned lon start_mem = sparc_context_init(start_mem, num_contexts); start_mem = free_area_init(start_mem, end_mem); + +#ifdef CONFIG_BLK_DEV_INITRD + /* If initial ramdisk was specified with physical address, + translate it here, as the p2v translation in srmmu + is not straightforward. */ + if (initrd_start && initrd_start < KERNBASE) { + initrd_start = srmmu_p2v(initrd_start); + initrd_end = srmmu_p2v(initrd_end); + if (initrd_end <= initrd_start) + initrd_start = 0; + } +#endif return PAGE_ALIGN(start_mem); } @@ -1998,6 +2043,11 @@ static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long ad static void srmmu_destroy_context(struct mm_struct *mm) { if(mm->context != NO_CONTEXT && atomic_read(&mm->count) == 1) { + /* XXX This could be drastically improved. + * XXX We are only called from __exit_mm and it just did + * XXX cache/tlb mm flush and right after this will (re-) + * XXX SET_PAGE_DIR to swapper_pg_dir. -DaveM + */ flush_cache_mm(mm); ctxd_set(&srmmu_context_table[mm->context], swapper_pg_dir); flush_tlb_mm(mm); @@ -2028,8 +2078,11 @@ static void srmmu_vac_update_mmu_cache(struct vm_area_struct * vma, offset = (address & PAGE_MASK) - vma->vm_start; vmaring = inode->i_mmap; do { - vaddr = vmaring->vm_start + offset; + /* Do not mistake ourselves as another mapping. */ + if(vmaring == vma) + continue; + vaddr = vmaring->vm_start + offset; if ((vaddr ^ address) & vac_badbits) { alias_found++; start = vmaring->vm_start; @@ -2042,7 +2095,7 @@ static void srmmu_vac_update_mmu_cache(struct vm_area_struct * vma, if(!ptep) goto next; if((pte_val(*ptep) & SRMMU_ET_MASK) == SRMMU_VALID) { -#if 1 +#if 0 printk("Fixing USER/USER alias [%ld:%08lx]\n", vmaring->vm_mm->context, start); #endif @@ -2057,11 +2110,12 @@ static void srmmu_vac_update_mmu_cache(struct vm_area_struct * vma, } } while ((vmaring = vmaring->vm_next_share) != NULL); - if(alias_found && !(pte_val(pte) & _SUN4C_PAGE_NOCACHE)) { + if(alias_found && ((pte_val(pte) & SRMMU_CACHE) != 0)) { pgdp = srmmu_pgd_offset(vma->vm_mm, address); - ptep = srmmu_pte_offset((pmd_t *) pgdp, address); + pmdp = srmmu_pmd_offset(pgdp, address); + ptep = srmmu_pte_offset(pmdp, address); flush_cache_page(vma, address); - *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_NOCACHE); + set_pte(ptep, __pte((pte_val(*ptep) & ~SRMMU_CACHE))); flush_tlb_page(vma, address); } done: @@ -2652,15 +2706,8 @@ __initfunc(static void init_viking(void)) /* Ahhh, the viking. SRMMU VLSI abortion number two... */ if(mreg & VIKING_MMODE) { - unsigned long bpreg; - srmmu_name = "TI Viking"; viking_mxcc_present = 0; - - bpreg = viking_get_bpreg(); - bpreg &= ~(VIKING_ACTION_MIX); - viking_set_bpreg(bpreg); - msi_set_sync(); BTFIXUPSET_CALL(set_pte, srmmu_set_pte_nocache_viking, BTFIXUPCALL_NORM); @@ -2691,16 +2738,25 @@ __initfunc(static void init_viking(void)) BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page_for_dma, BTFIXUPCALL_NOP); } - /* flush_cache_* are nops */ - BTFIXUPSET_CALL(flush_cache_all, viking_flush_cache_all, BTFIXUPCALL_NOP); - BTFIXUPSET_CALL(flush_cache_mm, viking_flush_cache_mm, BTFIXUPCALL_NOP); - BTFIXUPSET_CALL(flush_cache_page, viking_flush_cache_page, BTFIXUPCALL_NOP); - BTFIXUPSET_CALL(flush_cache_range, viking_flush_cache_range, BTFIXUPCALL_NOP); + BTFIXUPSET_CALL(flush_cache_all, viking_flush_cache_all, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_cache_mm, viking_flush_cache_mm, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_cache_page, viking_flush_cache_page, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_cache_range, viking_flush_cache_range, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(flush_tlb_all, viking_flush_tlb_all, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(flush_tlb_mm, viking_flush_tlb_mm, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(flush_tlb_page, viking_flush_tlb_page, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(flush_tlb_range, viking_flush_tlb_range, BTFIXUPCALL_NORM); +#ifdef __SMP__ + if (sparc_cpu_model == sun4d) { + BTFIXUPSET_CALL(flush_tlb_all, sun4dsmp_flush_tlb_all, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_mm, sun4dsmp_flush_tlb_mm, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_page, sun4dsmp_flush_tlb_page, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_range, sun4dsmp_flush_tlb_range, BTFIXUPCALL_NORM); + } else +#endif + { + BTFIXUPSET_CALL(flush_tlb_all, viking_flush_tlb_all, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_mm, viking_flush_tlb_mm, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_page, viking_flush_tlb_page, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_range, viking_flush_tlb_range, BTFIXUPCALL_NORM); + } BTFIXUPSET_CALL(flush_page_to_ram, viking_flush_page_to_ram, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(flush_sig_insns, viking_flush_sig_insns, BTFIXUPCALL_NOP); @@ -3027,10 +3083,12 @@ __initfunc(void ld_mmu_srmmu(void)) BTFIXUPSET_CALL(flush_cache_mm, smp_flush_cache_mm, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_range, smp_flush_cache_range, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_cache_page, smp_flush_cache_page, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(flush_tlb_all, smp_flush_tlb_all, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(flush_tlb_mm, smp_flush_tlb_mm, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(flush_tlb_range, smp_flush_tlb_range, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(flush_tlb_page, smp_flush_tlb_page, BTFIXUPCALL_NORM); + if (sparc_cpu_model != sun4d) { + BTFIXUPSET_CALL(flush_tlb_all, smp_flush_tlb_all, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_mm, smp_flush_tlb_mm, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_range, smp_flush_tlb_range, BTFIXUPCALL_NORM); + BTFIXUPSET_CALL(flush_tlb_page, smp_flush_tlb_page, BTFIXUPCALL_NORM); + } BTFIXUPSET_CALL(flush_page_to_ram, smp_flush_page_to_ram, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_sig_insns, smp_flush_sig_insns, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(flush_page_for_dma, smp_flush_page_for_dma, BTFIXUPCALL_NORM); diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c index fa8105d57..d6387d473 100644 --- a/arch/sparc/mm/sun4c.c +++ b/arch/sparc/mm/sun4c.c @@ -1,4 +1,4 @@ -/* $Id: sun4c.c,v 1.171 1998/09/21 05:05:41 jj Exp $ +/* $Id: sun4c.c,v 1.173 1999/01/17 02:20:37 davem Exp $ * sun4c.c: Doing in software what should be done in hardware. * * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) @@ -2688,6 +2688,10 @@ static void sun4c_vac_alias_fixup(struct vm_area_struct *vma, unsigned long addr unsigned long vaddr = vmaring->vm_start + offset; unsigned long start; + /* Do not mistake ourselves as another mapping. */ + if(vmaring == vma) + continue; + if (S4CVAC_BADALIAS(vaddr, address)) { alias_found++; start = vmaring->vm_start; @@ -2699,8 +2703,8 @@ static void sun4c_vac_alias_fixup(struct vm_area_struct *vma, unsigned long addr if(pte_val(*ptep) & _SUN4C_PAGE_PRESENT) { flush_cache_page(vmaring, start); - pte_val(*ptep) = (pte_val(*ptep) | - _SUN4C_PAGE_NOCACHE); + *ptep = __pte(pte_val(*ptep) | + _SUN4C_PAGE_NOCACHE); flush_tlb_page(vmaring, start); } next: @@ -2712,7 +2716,7 @@ static void sun4c_vac_alias_fixup(struct vm_area_struct *vma, unsigned long addr if(alias_found && !(pte_val(pte) & _SUN4C_PAGE_NOCACHE)) { pgdp = sun4c_pgd_offset(vma->vm_mm, address); ptep = sun4c_pte_offset((pmd_t *) pgdp, address); - pte_val(*ptep) = (pte_val(*ptep) | _SUN4C_PAGE_NOCACHE); + *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_NOCACHE); pte = pte_val(*ptep); } } diff --git a/arch/sparc/mm/viking.S b/arch/sparc/mm/viking.S index c65f72007..2ab87121f 100644 --- a/arch/sparc/mm/viking.S +++ b/arch/sparc/mm/viking.S @@ -1,8 +1,9 @@ -/* $Id: viking.S,v 1.11 1998/02/20 18:07:50 jj Exp $ +/* $Id: viking.S,v 1.13 1999/03/24 11:42:32 davem Exp $ * viking.S: High speed Viking cache/mmu operations * * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) - * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + * Copyright (C) 1997,1998,1999 Jakub Jelinek (jj@ultra.linux.cz) + * Copyright (C) 1999 Pavel Semerad (semerad@ss1000.ms.mff.cuni.cz) */ #include <asm/ptrace.h> @@ -15,16 +16,12 @@ #include <asm/cprefix.h> #include <asm/btfixup.h> -#define WINDOW_FLUSH(tmp1, tmp2) \ - mov 0, tmp1; \ -98: ld [%g6 + AOFF_task_tss + AOFF_thread_uwinmask], tmp2; \ - orcc %g0, tmp2, %g0; \ - add tmp1, 1, tmp1; \ - bne 98b; \ - save %sp, -64, %sp; \ -99: subcc tmp1, 1, tmp1; \ - bne 99b; \ - restore %g0, %g0, %g0; +#ifdef __SMP__ + .data + .align 4 +sun4dsmp_flush_tlb_spin: + .word 0 +#endif .text .align 4 @@ -70,7 +67,7 @@ viking_flush_chunk: clr %o1 ! set counter, 0 - 127 sethi %hi(KERNBASE + PAGE_SIZE - 0x80000000), %o3 sethi %hi(0x80000000), %o4 - sethi %hi(VIKING_PTAG_VALID | VIKING_PTAG_DIRTY), %o5 + sethi %hi(VIKING_PTAG_VALID), %o5 sethi %hi(2*PAGE_SIZE), %o0 sethi %hi(PAGE_SIZE), %g7 clr %o2 ! block counter, 0 - 3 @@ -83,15 +80,12 @@ viking_flush_chunk: or %g5, %g4, %g5 ldda [%g5] ASI_M_DATAC_TAG, %g2 cmp %g3, %g1 ! ptag == ppage? - bne,a 7f - inc %o2 - - and %g2, %o5, %g3 ! ptag VALID and DIRTY? - cmp %g3, %o5 - bne,a 7f + bne 7f inc %o2 - add %g4, %o3, %g2 ! (KERNBASE + PAGE_SIZE) | (set << 5) + andcc %g2, %o5, %g0 ! ptag VALID? + be 7f + add %g4, %o3, %g2 ! (KERNBASE + PAGE_SIZE) | (set << 5) ld [%g2], %g3 ld [%g2 + %g7], %g3 add %g2, %o0, %g2 @@ -102,18 +96,15 @@ viking_flush_chunk: ld [%g2 + %g7], %g3 add %g2, %o0, %g2 ld [%g2], %g3 - ld [%g2 + %g7], %g3 - b 8f - inc %o1 + ld [%g2 + %g7], %g3 7: cmp %o2, 3 ble 6b sll %o2, 26, %g5 ! block << 26 - inc %o1 -8: +8: inc %o1 cmp %o1, 0x7f ble 5b clr %o2 @@ -151,10 +142,33 @@ viking_mxcc_flush_chunk: retl nop -viking_flush_cache_all: +#define WINDOW_FLUSH(tmp1, tmp2) \ + mov 0, tmp1; \ +98: ld [%g6 + AOFF_task_tss + AOFF_thread_uwinmask], tmp2; \ + orcc %g0, tmp2, %g0; \ + add tmp1, 1, tmp1; \ + bne 98b; \ + save %sp, -64, %sp; \ +99: subcc tmp1, 1, tmp1; \ + bne 99b; \ + restore %g0, %g0, %g0; + +viking_flush_cache_page: +#ifndef __SMP__ + ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ +#endif viking_flush_cache_mm: viking_flush_cache_range: -viking_flush_cache_page: +#ifndef __SMP__ + ld [%o0 + AOFF_mm_context], %g1 + cmp %g1, -1 + bne viking_flush_cache_all + nop + b,a viking_flush_cache_out +#endif +viking_flush_cache_all: + WINDOW_FLUSH(%g4, %g5) +viking_flush_cache_out: retl nop @@ -176,8 +190,10 @@ viking_flush_tlb_mm: sta %g0, [%g2] ASI_M_FLUSH_PROBE retl sta %g5, [%g1] ASI_M_MMUREGS +#ifndef __SMP__ 1: retl nop +#endif viking_flush_tlb_range: mov SRMMU_CTX_REG, %g1 @@ -198,8 +214,10 @@ viking_flush_tlb_range: sta %g0, [%o1] ASI_M_FLUSH_PROBE retl sta %g5, [%g1] ASI_M_MMUREGS +#ifndef __SMP__ 2: retl nop +#endif viking_flush_tlb_page: ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ @@ -215,11 +233,96 @@ viking_flush_tlb_page: sta %g0, [%o1] ASI_M_FLUSH_PROBE retl sta %g5, [%g1] ASI_M_MMUREGS +#ifndef __SMP__ 1: retl nop +#endif viking_flush_page_to_ram: viking_flush_page_for_dma: viking_flush_sig_insns: retl nop + +#ifdef __SMP__ + .globl sun4dsmp_flush_tlb_all, sun4dsmp_flush_tlb_mm + .globl sun4dsmp_flush_tlb_range, sun4dsmp_flush_tlb_page +sun4dsmp_flush_tlb_all: + sethi %hi(sun4dsmp_flush_tlb_spin), %g3 +1: ldstub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5 + tst %g5 + bne 2f + mov 0x400, %g1 + sta %g0, [%g1] ASI_M_FLUSH_PROBE + retl + stb %g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)] +2: tst %g5 + bne,a 2b + ldub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5 + b,a 1b + +sun4dsmp_flush_tlb_mm: + sethi %hi(sun4dsmp_flush_tlb_spin), %g3 +1: ldstub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5 + tst %g5 + bne 2f + mov SRMMU_CTX_REG, %g1 + ld [%o0 + AOFF_mm_context], %o1 + lda [%g1] ASI_M_MMUREGS, %g5 + mov 0x300, %g2 + sta %o1, [%g1] ASI_M_MMUREGS + sta %g0, [%g2] ASI_M_FLUSH_PROBE + sta %g5, [%g1] ASI_M_MMUREGS + retl + stb %g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)] +2: tst %g5 + bne,a 2b + ldub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5 + b,a 1b + +sun4dsmp_flush_tlb_range: + sethi %hi(sun4dsmp_flush_tlb_spin), %g3 +1: ldstub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5 + tst %g5 + bne 3f + mov SRMMU_CTX_REG, %g1 + ld [%o0 + AOFF_mm_context], %o3 + lda [%g1] ASI_M_MMUREGS, %g5 + sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4 + sta %o3, [%g1] ASI_M_MMUREGS + and %o1, %o4, %o1 + add %o1, 0x200, %o1 + sta %g0, [%o1] ASI_M_FLUSH_PROBE +2: sub %o1, %o4, %o1 + cmp %o1, %o2 + blu,a 2b + sta %g0, [%o1] ASI_M_FLUSH_PROBE + sta %g5, [%g1] ASI_M_MMUREGS + retl + stb %g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)] +3: tst %g5 + bne,a 3b + ldub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5 + b,a 1b + +sun4dsmp_flush_tlb_page: + sethi %hi(sun4dsmp_flush_tlb_spin), %g3 +1: ldstub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5 + tst %g5 + bne 2f + mov SRMMU_CTX_REG, %g1 + ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + AOFF_mm_context], %o3 + lda [%g1] ASI_M_MMUREGS, %g5 + and %o1, PAGE_MASK, %o1 + sta %o3, [%g1] ASI_M_MMUREGS + sta %g0, [%o1] ASI_M_FLUSH_PROBE + sta %g5, [%g1] ASI_M_MMUREGS + retl + stb %g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)] +2: tst %g5 + bne,a 2b + ldub [%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5 + b,a 1b + nop +#endif |