diff options
author | Ralf Baechle <ralf@linux-mips.org> | 1998-03-17 22:05:47 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 1998-03-17 22:05:47 +0000 |
commit | 27cfca1ec98e91261b1a5355d10a8996464b63af (patch) | |
tree | 8e895a53e372fa682b4c0a585b9377d67ed70d0e /arch/sparc/mm | |
parent | 6a76fb7214c477ccf6582bd79c5b4ccc4f9c41b1 (diff) |
Look Ma' what I found on my harddisk ...
o New faster syscalls for 2.1.x, too
o Upgrade to 2.1.89.
Don't try to run this. It's flaky as hell. But feel free to debug ...
Diffstat (limited to 'arch/sparc/mm')
-rw-r--r-- | arch/sparc/mm/Makefile | 4 | ||||
-rw-r--r-- | arch/sparc/mm/asyncd.c | 9 | ||||
-rw-r--r-- | arch/sparc/mm/hypersparc.S | 14 | ||||
-rw-r--r-- | arch/sparc/mm/init.c | 7 | ||||
-rw-r--r-- | arch/sparc/mm/io-unit.c | 158 | ||||
-rw-r--r-- | arch/sparc/mm/iommu.c | 284 | ||||
-rw-r--r-- | arch/sparc/mm/srmmu.c | 506 | ||||
-rw-r--r-- | arch/sparc/mm/sun4c.c | 2 | ||||
-rw-r--r-- | arch/sparc/mm/turbosparc.S | 4 | ||||
-rw-r--r-- | arch/sparc/mm/viking.S | 81 |
10 files changed, 701 insertions, 368 deletions
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile index 84cd1d2e2..bae5d323a 100644 --- a/arch/sparc/mm/Makefile +++ b/arch/sparc/mm/Makefile @@ -1,4 +1,4 @@ -# $Id: Makefile,v 1.26 1997/06/24 15:48:06 jj Exp $ +# $Id: Makefile,v 1.27 1997/11/07 15:01:27 jj Exp $ # Makefile for the linux Sparc-specific parts of the memory manager. # # Note! Dependencies are done automagically by 'make dep', which also @@ -10,7 +10,7 @@ O_TARGET := mm.o O_OBJS := fault.o init.o sun4c.o srmmu.o hypersparc.o viking.o \ tsunami.o loadmmu.o generic.o asyncd.o extable.o \ - turbosparc.o + turbosparc.o iommu.o io-unit.o include $(TOPDIR)/Rules.make diff --git a/arch/sparc/mm/asyncd.c b/arch/sparc/mm/asyncd.c index 46635db97..d9b17deed 100644 --- a/arch/sparc/mm/asyncd.c +++ b/arch/sparc/mm/asyncd.c @@ -1,4 +1,4 @@ -/* $Id: asyncd.c,v 1.10 1997/05/15 21:14:24 davem Exp $ +/* $Id: asyncd.c,v 1.11 1997/12/14 23:24:34 ecd Exp $ * The asyncd kernel daemon. This handles paging on behalf of * processes that receive page faults due to remote (async) memory * accesses. @@ -239,7 +239,8 @@ int asyncd(void *unused) current->session = 1; current->pgrp = 1; sprintf(current->comm, "asyncd"); - current->blocked = ~0UL; /* block all signals */ + sigfillset(¤t->blocked); /* block all signals */ + recalc_sigpending(current); /* Give asyncd a realtime priority. */ current->policy = SCHED_FIFO; @@ -259,7 +260,9 @@ int asyncd(void *unused) save_flags(flags); cli(); while (!async_queue) { - current->signal = 0; + spin_lock_irq(¤t->sigmask_lock); + flush_signals(current); + spin_unlock_irq(¤t->sigmask_lock); interruptible_sleep_on(&asyncd_wait); } diff --git a/arch/sparc/mm/hypersparc.S b/arch/sparc/mm/hypersparc.S index e6ba7b235..2c27bfdab 100644 --- a/arch/sparc/mm/hypersparc.S +++ b/arch/sparc/mm/hypersparc.S @@ -1,4 +1,4 @@ -/* $Id: hypersparc.S,v 1.10 1997/05/27 19:29:58 jj Exp $ +/* $Id: hypersparc.S,v 1.12 1997/11/27 15:42:30 jj Exp $ * hypersparc.S: High speed Hypersparc mmu/cache operations. * * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) @@ -135,8 +135,7 @@ hypersparc_flush_cache_range: mov SRMMU_CTX_REG, %g7 lda [%g7] ASI_M_MMUREGS, %o3 sta %o0, [%g7] ASI_M_MMUREGS - sethi %hi(PAGE_SIZE), %g7 /* XXX ick, stupid stalls... */ - sub %o2, %g7, %o0 + add %o2, -PAGE_SIZE, %o0 1: or %o0, 0x400, %g7 lda [%g7] ASI_M_FLUSH_PROBE, %g7 @@ -157,10 +156,9 @@ hypersparc_flush_cache_range: bne 2b sta %g0, [%o2 + %g5] ASI_M_FLUSH_PAGE 3: - sethi %hi(PAGE_SIZE), %g7 cmp %o2, %o1 bne 1b - sub %o2, %g7, %o0 + add %o2, -PAGE_SIZE, %o0 mov SRMMU_FAULT_STATUS, %g5 lda [%g5] ASI_M_MMUREGS, %g0 mov SRMMU_CTX_REG, %g7 @@ -191,10 +189,9 @@ hypersparc_flush_cache_page: or %o1, 0x400, %o5 lda [%o5] ASI_M_FLUSH_PROBE, %g1 orcc %g0, %g1, %g0 - sethi %hi(PAGE_SIZE), %g7 be 2f add %o4, %o4, %o5 - add %o1, %g7, %o1 + sub %o1, -PAGE_SIZE, %o1 add %o4, %o5, %g1 add %o4, %g1, %g2 add %o4, %g2, %g3 @@ -242,9 +239,8 @@ hypersparc_flush_chunk: orcc %g5, 0, %g0 be 2f add %o4, %g1, %g2 - sethi %hi(PAGE_SIZE), %g5 add %o4, %g2, %g3 - add %o0, %g5, %o0 + sub %o0, -PAGE_SIZE, %o0 add %o4, %g3, %g4 add %o4, %g4, %g5 add %o4, %g5, %g7 diff --git a/arch/sparc/mm/init.c b/arch/sparc/mm/init.c index 3dd0e470f..868fc2162 100644 --- a/arch/sparc/mm/init.c +++ b/arch/sparc/mm/init.c @@ -1,4 +1,4 @@ -/* $Id: init.c,v 1.49 1997/04/17 21:49:31 jj Exp $ +/* $Id: init.c,v 1.50 1998/01/10 18:19:42 ecd Exp $ * linux/arch/sparc/mm/init.c * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) @@ -227,9 +227,10 @@ __initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem)) addr = KERNBASE; while(addr < start_mem) { #ifdef CONFIG_BLK_DEV_INITRD - if (initrd_below_start_ok && addr >= initrd_start && addr < initrd_end) + if (initrd_below_start_ok && addr >= initrd_start && addr < initrd_end) { mem_map[MAP_NR(addr)].flags &= ~(1<<PG_reserved); - else + num_physpages--; + } else #endif mem_map[MAP_NR(addr)].flags |= (1<<PG_reserved); addr += PAGE_SIZE; diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c new file mode 100644 index 000000000..519c124c9 --- /dev/null +++ b/arch/sparc/mm/io-unit.c @@ -0,0 +1,158 @@ +/* $Id: io-unit.c,v 1.5 1997/12/22 16:09:26 jj Exp $ + * io-unit.c: IO-UNIT specific routines for memory management. + * + * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + */ + +#include <linux/config.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/malloc.h> +#include <asm/pgtable.h> +#include <asm/sbus.h> +#include <asm/io.h> +#include <asm/io-unit.h> +#include <asm/mxcc.h> + +#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) + +#define IOPERM (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID) +#define MKIOPTE(phys) ((((phys)>>4) & IOUPTE_PAGE) | IOPERM) + +unsigned long sun4d_dma_base; +unsigned long sun4d_dma_vbase; +unsigned long sun4d_dma_size; +__initfunc(unsigned long +iounit_init(int sbi_node, int io_node, unsigned long memory_start, + unsigned long memory_end, struct linux_sbus *sbus)) +{ + iopte_t *xpt, *xptend; + unsigned long paddr; + struct iounit_struct *iounit; + struct linux_prom_registers iommu_promregs[PROMREG_MAX]; + + memory_start = LONG_ALIGN(memory_start); + iounit = (struct iounit_struct *)memory_start; + memory_start += sizeof(struct iounit_struct); + + prom_getproperty(sbi_node, "reg", (void *) iommu_promregs, + sizeof(iommu_promregs)); + prom_apply_generic_ranges(io_node, 0, iommu_promregs, 3); + xpt = (iopte_t *) + sparc_alloc_io(iommu_promregs[2].phys_addr, 0, (PAGE_SIZE * 16), + "XPT", iommu_promregs[2].which_io, 0x0); + if(!xpt) panic("Cannot map External Page Table."); + + sbus->iommu = (struct iommu_struct *)iounit; + iounit->page_table = xpt; + + /* Initialize new table. */ + paddr = IOUNIT_DMA_BASE - sun4d_dma_base; + for (xptend = xpt + (sun4d_dma_size >> PAGE_SHIFT); + xpt < xptend; paddr++) + *xpt++ = MKIOPTE(paddr); + for (xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t); + xpt < xptend;) + *xpt++ = 0; + + return memory_start; +} + +static __u32 iounit_get_scsi_one(char *vaddr, unsigned long len, struct linux_sbus *sbus) +{ + /* Viking MXCC is IO coherent, just need to translate the address to DMA handle */ +#ifdef IOUNIT_DEBUG + if ((((unsigned long) vaddr) & PAGE_MASK) < sun4d_dma_vaddr || + (((unsigned long) vaddr) & PAGE_MASK) + len > sun4d_dma_vbase + sun4d_dma_size) + panic("Using non-DMA memory for iounit_get_scsi_one"); +#endif + return (__u32)(sun4d_dma_base + mmu_v2p((long)vaddr)); +} + +static void iounit_get_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus) +{ + /* Viking MXCC is IO coherent, just need to translate the address to DMA handle */ + for (; sz >= 0; sz--) { +#ifdef IOUNIT_DEBUG + unsigned long page = ((unsigned long) sg[sz].addr) & PAGE_MASK; + if (page < sun4d_dma_vbase || page + sg[sz].len > sun4d_dma_vbase + sun4d_dma_size) + panic("Using non-DMA memory for iounit_get_scsi_sgl"); +#endif + sg[sz].dvma_addr = (__u32) (sun4d_dma_base + mmu_v2p((long)sg[sz].addr));; + } +} + +static void iounit_release_scsi_one(__u32 vaddr, unsigned long len, struct linux_sbus *sbus) +{ +} + +static void iounit_release_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus) +{ +} + +#ifdef CONFIG_SBUS +static void iounit_map_dma_area(unsigned long addr, int len) +{ + unsigned long page, end; + pgprot_t dvma_prot; + iopte_t *iopte; + struct linux_sbus *sbus; + + dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV); + end = PAGE_ALIGN((addr + len)); + while(addr < end) { + page = get_free_page(GFP_KERNEL); + if(!page) { + prom_printf("alloc_dvma: Cannot get a dvma page\n"); + prom_halt(); + } else { + pgd_t *pgdp; + pmd_t *pmdp; + pte_t *ptep; + long i; + + pgdp = pgd_offset(init_task.mm, addr); + pmdp = pmd_offset(pgdp, addr); + ptep = pte_offset(pmdp, addr); + + set_pte(ptep, pte_val(mk_pte(page, dvma_prot))); + + i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT); + + for_each_sbus(sbus) { + struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu; + + iopte = (iopte_t *)(iounit->page_table + i); + *iopte = __iopte(MKIOPTE(mmu_v2p(page))); + } + } + addr += PAGE_SIZE; + } + flush_cache_all(); + flush_tlb_all(); +} +#endif + +static char *iounit_lockarea(char *vaddr, unsigned long len) +{ + return vaddr; +} + +static void iounit_unlockarea(char *vaddr, unsigned long len) +{ +} + +__initfunc(void ld_mmu_iounit(void)) +{ + mmu_lockarea = iounit_lockarea; + mmu_unlockarea = iounit_unlockarea; + + mmu_get_scsi_one = iounit_get_scsi_one; + mmu_get_scsi_sgl = iounit_get_scsi_sgl; + mmu_release_scsi_one = iounit_release_scsi_one; + mmu_release_scsi_sgl = iounit_release_scsi_sgl; + +#ifdef CONFIG_SBUS + mmu_map_dma_area = iounit_map_dma_area; +#endif +} diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c new file mode 100644 index 000000000..301946326 --- /dev/null +++ b/arch/sparc/mm/iommu.c @@ -0,0 +1,284 @@ +/* $Id: iommu.c,v 1.4 1997/11/21 17:31:31 jj Exp $ + * iommu.c: IOMMU specific routines for memory management. + * + * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 1995 Peter A. Zaitcev (zaitcev@ithil.mcst.ru) + * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) + * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + */ + +#include <linux/config.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/malloc.h> +#include <asm/pgtable.h> +#include <asm/sbus.h> +#include <asm/io.h> +#include <asm/mxcc.h> + +/* srmmu.c */ +extern int viking_mxcc_present; +extern void (*flush_page_for_dma)(unsigned long page); +extern int flush_page_for_dma_global; +/* viking.S */ +extern void viking_flush_page(unsigned long page); +extern void viking_mxcc_flush_page(unsigned long page); + +#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) + +#define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID) +#define MKIOPTE(phys) (((((phys)>>4) & IOPTE_PAGE) | IOPERM) & ~IOPTE_WAZ) + +static inline void iommu_map_dvma_pages_for_iommu(struct iommu_struct *iommu, + unsigned long kern_end) +{ + unsigned long first = page_offset; + unsigned long last = kern_end; + iopte_t *iopte = iommu->page_table; + + iopte += ((first - iommu->start) >> PAGE_SHIFT); + while(first <= last) { + *iopte++ = __iopte(MKIOPTE(mmu_v2p(first))); + first += PAGE_SIZE; + } +} + +__initfunc(unsigned long +iommu_init(int iommund, unsigned long memory_start, + unsigned long memory_end, struct linux_sbus *sbus)) +{ + unsigned int impl, vers, ptsize; + unsigned long tmp; + struct iommu_struct *iommu; + struct linux_prom_registers iommu_promregs[PROMREG_MAX]; + + memory_start = LONG_ALIGN(memory_start); + iommu = (struct iommu_struct *) memory_start; + memory_start += sizeof(struct iommu_struct); + prom_getproperty(iommund, "reg", (void *) iommu_promregs, + sizeof(iommu_promregs)); + iommu->regs = (struct iommu_regs *) + sparc_alloc_io(iommu_promregs[0].phys_addr, 0, (PAGE_SIZE * 3), + "IOMMU registers", iommu_promregs[0].which_io, 0x0); + if(!iommu->regs) + panic("Cannot map IOMMU registers."); + impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28; + vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24; + tmp = iommu->regs->control; + tmp &= ~(IOMMU_CTRL_RNGE); + switch(page_offset & 0xf0000000) { + case 0xf0000000: + tmp |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB); + iommu->plow = iommu->start = 0xf0000000; + break; + case 0xe0000000: + tmp |= (IOMMU_RNGE_512MB | IOMMU_CTRL_ENAB); + iommu->plow = iommu->start = 0xe0000000; + break; + case 0xd0000000: + case 0xc0000000: + tmp |= (IOMMU_RNGE_1GB | IOMMU_CTRL_ENAB); + iommu->plow = iommu->start = 0xc0000000; + break; + case 0xb0000000: + case 0xa0000000: + case 0x90000000: + case 0x80000000: + tmp |= (IOMMU_RNGE_2GB | IOMMU_CTRL_ENAB); + iommu->plow = iommu->start = 0x80000000; + break; + } + iommu->regs->control = tmp; + iommu_invalidate(iommu->regs); + iommu->end = 0xffffffff; + + /* Allocate IOMMU page table */ + ptsize = iommu->end - iommu->start + 1; + ptsize = (ptsize >> PAGE_SHIFT) * sizeof(iopte_t); + + /* Stupid alignment constraints give me a headache. */ + memory_start = PAGE_ALIGN(memory_start); + memory_start = (((memory_start) + (ptsize - 1)) & ~(ptsize - 1)); + iommu->lowest = iommu->page_table = (iopte_t *) memory_start; + memory_start += ptsize; + + /* Initialize new table. */ + flush_cache_all(); + memset(iommu->page_table, 0, ptsize); + iommu_map_dvma_pages_for_iommu(iommu, memory_end); + if(viking_mxcc_present) { + unsigned long start = (unsigned long) iommu->page_table; + unsigned long end = (start + ptsize); + while(start < end) { + viking_mxcc_flush_page(start); + start += PAGE_SIZE; + } + } else if(flush_page_for_dma == viking_flush_page) { + unsigned long start = (unsigned long) iommu->page_table; + unsigned long end = (start + ptsize); + while(start < end) { + viking_flush_page(start); + start += PAGE_SIZE; + } + } + flush_tlb_all(); + iommu->regs->base = mmu_v2p((unsigned long) iommu->page_table) >> 4; + iommu_invalidate(iommu->regs); + + sbus->iommu = iommu; + printk("IOMMU: impl %d vers %d page table at %p of size %d bytes\n", + impl, vers, iommu->page_table, ptsize); + return memory_start; +} + +static __u32 iommu_get_scsi_one_noflush(char *vaddr, unsigned long len, struct linux_sbus *sbus) +{ + return (__u32)vaddr; +} + +static __u32 iommu_get_scsi_one_gflush(char *vaddr, unsigned long len, struct linux_sbus *sbus) +{ + flush_page_for_dma(0); + return (__u32)vaddr; +} + +static __u32 iommu_get_scsi_one_pflush(char *vaddr, unsigned long len, struct linux_sbus *sbus) +{ + unsigned long page = ((unsigned long) vaddr) & PAGE_MASK; + + while(page < ((unsigned long)(vaddr + len))) { + flush_page_for_dma(page); + page += PAGE_SIZE; + } + return (__u32)vaddr; +} + +static void iommu_get_scsi_sgl_noflush(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus) +{ + for (; sz >= 0; sz--) + sg[sz].dvma_addr = (__u32) (sg[sz].addr); +} + +static void iommu_get_scsi_sgl_gflush(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus) +{ + flush_page_for_dma(0); + for (; sz >= 0; sz--) + sg[sz].dvma_addr = (__u32) (sg[sz].addr); +} + +static void iommu_get_scsi_sgl_pflush(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus) +{ + unsigned long page, oldpage = 0; + + while(sz >= 0) { + page = ((unsigned long) sg[sz].addr) & PAGE_MASK; + if (oldpage == page) + page += PAGE_SIZE; /* We flushed that page already */ + while(page < (unsigned long)(sg[sz].addr + sg[sz].len)) { + flush_page_for_dma(page); + page += PAGE_SIZE; + } + sg[sz].dvma_addr = (__u32) (sg[sz].addr); + sz--; + oldpage = page - PAGE_SIZE; + } +} + +static void iommu_release_scsi_one(__u32 vaddr, unsigned long len, struct linux_sbus *sbus) +{ +} + +static void iommu_release_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus) +{ +} + +#ifdef CONFIG_SBUS +static void iommu_map_dma_area(unsigned long addr, int len) +{ + unsigned long page, end; + pgprot_t dvma_prot; + struct iommu_struct *iommu = SBus_chain->iommu; + iopte_t *iopte = iommu->page_table; + iopte_t *iopte_first = iopte; + + if(viking_mxcc_present) + dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV); + else + dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV); + + iopte += ((addr - iommu->start) >> PAGE_SHIFT); + end = PAGE_ALIGN((addr + len)); + while(addr < end) { + page = get_free_page(GFP_KERNEL); + if(!page) { + prom_printf("alloc_dvma: Cannot get a dvma page\n"); + prom_halt(); + } else { + pgd_t *pgdp; + pmd_t *pmdp; + pte_t *ptep; + + pgdp = pgd_offset(init_task.mm, addr); + pmdp = pmd_offset(pgdp, addr); + ptep = pte_offset(pmdp, addr); + + set_pte(ptep, pte_val(mk_pte(page, dvma_prot))); + + iopte_val(*iopte++) = MKIOPTE(mmu_v2p(page)); + } + addr += PAGE_SIZE; + } + flush_cache_all(); + if(viking_mxcc_present) { + unsigned long start = ((unsigned long) iopte_first) & PAGE_MASK; + unsigned long end = PAGE_ALIGN(((unsigned long) iopte)); + while(start < end) { + viking_mxcc_flush_page(start); + start += PAGE_SIZE; + } + } else if(flush_page_for_dma == viking_flush_page) { + unsigned long start = ((unsigned long) iopte_first) & PAGE_MASK; + unsigned long end = PAGE_ALIGN(((unsigned long) iopte)); + while(start < end) { + viking_flush_page(start); + start += PAGE_SIZE; + } + } + flush_tlb_all(); + iommu_invalidate(iommu->regs); +} +#endif + +static char *iommu_lockarea(char *vaddr, unsigned long len) +{ + return vaddr; +} + +static void iommu_unlockarea(char *vaddr, unsigned long len) +{ +} + +__initfunc(void ld_mmu_iommu(void)) +{ + mmu_lockarea = iommu_lockarea; + mmu_unlockarea = iommu_unlockarea; + + if (!flush_page_for_dma) { + /* IO coherent chip */ + mmu_get_scsi_one = iommu_get_scsi_one_noflush; + mmu_get_scsi_sgl = iommu_get_scsi_sgl_noflush; + } else if (flush_page_for_dma_global) { + /* flush_page_for_dma flushes everything, no matter of what page is it */ + mmu_get_scsi_one = iommu_get_scsi_one_gflush; + mmu_get_scsi_sgl = iommu_get_scsi_sgl_gflush; + } else { + mmu_get_scsi_one = iommu_get_scsi_one_pflush; + mmu_get_scsi_sgl = iommu_get_scsi_sgl_pflush; + } + mmu_release_scsi_one = iommu_release_scsi_one; + mmu_release_scsi_sgl = iommu_release_scsi_sgl; + +#ifdef CONFIG_SBUS + mmu_map_dma_area = iommu_map_dma_area; +#endif +} diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index 8c3358a64..b16e3cc1e 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -1,4 +1,4 @@ -/* $Id: srmmu.c,v 1.151 1997/08/28 11:10:54 jj Exp $ +/* $Id: srmmu.c,v 1.156 1997/11/28 14:23:42 jj Exp $ * srmmu.c: SRMMU specific routines for memory management. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) @@ -25,11 +25,11 @@ #include <asm/cache.h> #include <asm/oplib.h> #include <asm/sbus.h> -#include <asm/iommu.h> #include <asm/asi.h> #include <asm/msi.h> #include <asm/a.out.h> #include <asm/mmu_context.h> +#include <asm/io-unit.h> /* Now the cpu specific definitions. */ #include <asm/viking.h> @@ -47,6 +47,10 @@ int vac_badbits; extern unsigned long sparc_iobase_vaddr; +extern unsigned long sun4d_dma_base; +extern unsigned long sun4d_dma_size; +extern unsigned long sun4d_dma_vbase; + #ifdef __SMP__ #define FLUSH_BEGIN(mm) #define FLUSH_END @@ -55,10 +59,14 @@ extern unsigned long sparc_iobase_vaddr; #define FLUSH_END } #endif +static int phys_mem_contig; +long page_contig_offset; + static void (*ctxd_set)(ctxd_t *ctxp, pgd_t *pgdp); static void (*pmd_set)(pmd_t *pmdp, pte_t *ptep); -static void (*flush_page_for_dma)(unsigned long page); +void (*flush_page_for_dma)(unsigned long page); +int flush_page_for_dma_global = 1; static void (*flush_chunk)(unsigned long chunk); #ifdef __SMP__ static void (*local_flush_page_for_dma)(unsigned long page); @@ -93,37 +101,13 @@ static struct srmmu_trans *srmmu_p2v_hash[SRMMU_HASHSZ]; #define srmmu_ahashfn(addr) ((addr) >> 24) -static int viking_mxcc_present = 0; - -void srmmu_frob_mem_map(unsigned long start_mem) -{ - unsigned long bank_start, bank_end; - unsigned long addr; - int i; - - /* First, mark all pages as invalid. */ - for(addr = PAGE_OFFSET; MAP_NR(addr) < max_mapnr; addr += PAGE_SIZE) - mem_map[MAP_NR(addr)].flags |= (1<<PG_reserved); - - start_mem = PAGE_ALIGN(start_mem); - for(i = 0; srmmu_map[i].size; i++) { - bank_start = srmmu_map[i].vbase; - bank_end = bank_start + srmmu_map[i].size; - while(bank_start < bank_end) { - if((bank_start >= KERNBASE) && - (bank_start < start_mem)) { - bank_start += PAGE_SIZE; - continue; - } - mem_map[MAP_NR(bank_start)].flags &= ~(1<<PG_reserved); - bank_start += PAGE_SIZE; - } - } -} +int viking_mxcc_present = 0; /* Physical memory can be _very_ non-contiguous on the sun4m, especially * the SS10/20 class machines and with the latest openprom revisions. * So we have to do a quick lookup. + * We use the same for SS1000/SC2000 as a fall back, when phys memory is + * non-contiguous. */ static inline unsigned long srmmu_v2p(unsigned long vaddr) { @@ -145,6 +129,21 @@ static inline unsigned long srmmu_p2v(unsigned long paddr) return 0xffffffffUL; } +/* Physical memory on most SS1000/SC2000 can be contiguous, so we handle that case + * as a special case to make things faster. + */ +static inline unsigned long srmmu_c_v2p(unsigned long vaddr) +{ + if (vaddr >= KERNBASE) return vaddr - KERNBASE; + return (vaddr - page_contig_offset); +} + +static inline unsigned long srmmu_c_p2v(unsigned long paddr) +{ + if (paddr < (0xfd000000 - KERNBASE)) return paddr + KERNBASE; + return (paddr + page_contig_offset); +} + /* In general all page table modifications should use the V8 atomic * swap instruction. This insures the mmu and the cpu are in sync * with respect to ref/mod bits in the page tables. @@ -158,6 +157,37 @@ static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value) /* Functions really use this, not srmmu_swap directly. */ #define srmmu_set_entry(ptr, newentry) srmmu_swap((unsigned long *) (ptr), (newentry)) +__initfunc(void srmmu_frob_mem_map(unsigned long start_mem)) +{ + unsigned long bank_start, bank_end; + unsigned long addr; + int i; + + /* First, mark all pages as invalid. */ + for(addr = PAGE_OFFSET; MAP_NR(addr) < max_mapnr; addr += PAGE_SIZE) + mem_map[MAP_NR(addr)].flags |= (1<<PG_reserved); + + start_mem = PAGE_ALIGN(start_mem); + for(i = 0; srmmu_map[i].size; i++) { + bank_start = srmmu_map[i].vbase; + bank_end = bank_start + srmmu_map[i].size; + while(bank_start < bank_end) { + if((bank_start >= KERNBASE) && + (bank_start < start_mem)) { + bank_start += PAGE_SIZE; + continue; + } + mem_map[MAP_NR(bank_start)].flags &= ~(1<<PG_reserved); + bank_start += PAGE_SIZE; + } + } + if (sparc_cpu_model == sun4d) { + for (addr = PAGE_OFFSET; MAP_NR(addr) < max_mapnr; addr += PAGE_SIZE) + if (addr < sun4d_dma_vbase || addr >= sun4d_dma_vbase + sun4d_dma_size) + clear_bit(PG_DMA, &mem_map[MAP_NR(addr)].flags); + } +} + /* The very generic SRMMU page table operations. */ static unsigned int srmmu_pmd_align(unsigned int addr) { return SRMMU_PMD_ALIGN(addr); } static unsigned int srmmu_pgdir_align(unsigned int addr) { return SRMMU_PGDIR_ALIGN(addr); } @@ -181,6 +211,15 @@ static unsigned long srmmu_pmd_page(pmd_t pmd) static unsigned long srmmu_pte_page(pte_t pte) { return srmmu_device_memory(pte_val(pte))?~0:srmmu_p2v((pte_val(pte) & SRMMU_PTE_PMASK) << 4); } +static unsigned long srmmu_c_pgd_page(pgd_t pgd) +{ return srmmu_device_memory(pgd_val(pgd))?~0:srmmu_c_p2v((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); } + +static unsigned long srmmu_c_pmd_page(pmd_t pmd) +{ return srmmu_device_memory(pmd_val(pmd))?~0:srmmu_c_p2v((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4); } + +static unsigned long srmmu_c_pte_page(pte_t pte) +{ return srmmu_device_memory(pte_val(pte))?~0:srmmu_c_p2v((pte_val(pte) & SRMMU_PTE_PMASK) << 4); } + static int srmmu_pte_none(pte_t pte) { return !(pte_val(pte) & 0xFFFFFFF); } static int srmmu_pte_present(pte_t pte) @@ -227,6 +266,9 @@ static pte_t srmmu_pte_mkyoung(pte_t pte) { return __pte(pte_val(pte) | SRMM static pte_t srmmu_mk_pte(unsigned long page, pgprot_t pgprot) { return __pte(((srmmu_v2p(page)) >> 4) | pgprot_val(pgprot)); } +static pte_t srmmu_c_mk_pte(unsigned long page, pgprot_t pgprot) +{ return __pte(((srmmu_c_v2p(page)) >> 4) | pgprot_val(pgprot)); } + static pte_t srmmu_mk_pte_phys(unsigned long page, pgprot_t pgprot) { return __pte(((page) >> 4) | pgprot_val(pgprot)); } @@ -250,6 +292,21 @@ static void srmmu_pmd_set(pmd_t * pmdp, pte_t * ptep) set_pte((pte_t *)pmdp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) ptep) >> 4))); } +static void srmmu_c_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp) +{ + set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (srmmu_c_v2p((unsigned long) pgdp) >> 4))); +} + +static void srmmu_c_pgd_set(pgd_t * pgdp, pmd_t * pmdp) +{ + set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (srmmu_c_v2p((unsigned long) pmdp) >> 4))); +} + +static void srmmu_c_pmd_set(pmd_t * pmdp, pte_t * ptep) +{ + set_pte((pte_t *)pmdp, (SRMMU_ET_PTD | (srmmu_c_v2p((unsigned long) ptep) >> 4))); +} + static pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot) { return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); @@ -273,6 +330,18 @@ static pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address) return (pte_t *) srmmu_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1)); } +/* Find an entry in the second-level page table.. */ +static pmd_t *srmmu_c_pmd_offset(pgd_t * dir, unsigned long address) +{ + return (pmd_t *) srmmu_c_pgd_page(*dir) + ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1)); +} + +/* Find an entry in the third-level page table.. */ +static pte_t *srmmu_c_pte_offset(pmd_t * dir, unsigned long address) +{ + return (pte_t *) srmmu_c_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1)); +} + /* This must update the context table entry for this process. */ static void srmmu_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp) { @@ -560,7 +629,7 @@ static pte_t *srmmu_pte_alloc_kernel(pmd_t *pmd, unsigned long address) pmd_set(pmd, BAD_PAGETABLE); return NULL; } - return (pte_t *) srmmu_pmd_page(*pmd) + address; + return (pte_t *) pmd_page(*pmd) + address; } static void srmmu_pmd_free_kernel(pmd_t *pmd) @@ -617,7 +686,7 @@ static pte_t *srmmu_pte_alloc(pmd_t * pmd, unsigned long address) pmd_set(pmd, BAD_PAGETABLE); return NULL; } - return ((pte_t *) srmmu_pmd_page(*pmd)) + address; + return ((pte_t *) pmd_page(*pmd)) + address; } /* Real three-level page tables on SRMMU. */ @@ -646,7 +715,7 @@ static pmd_t *srmmu_pmd_alloc(pgd_t * pgd, unsigned long address) pgd_set(pgd, (pmd_t *) BAD_PAGETABLE); return NULL; } - return (pmd_t *) srmmu_pgd_page(*pgd) + address; + return (pmd_t *) pgd_page(*pgd) + address; } static void srmmu_pgd_free(pgd_t *pgd) @@ -789,8 +858,8 @@ void srmmu_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_ty physaddr &= PAGE_MASK; pgdp = srmmu_pgd_offset(init_task.mm, virt_addr); - pmdp = srmmu_pmd_offset(pgdp, virt_addr); - ptep = srmmu_pte_offset(pmdp, virt_addr); + pmdp = pmd_offset(pgdp, virt_addr); + ptep = pte_offset(pmdp, virt_addr); tmp = (physaddr >> 4) | SRMMU_ET_PTE; /* I need to test whether this is consistent over all @@ -814,23 +883,14 @@ void srmmu_unmapioaddr(unsigned long virt_addr) pte_t *ptep; pgdp = srmmu_pgd_offset(init_task.mm, virt_addr); - pmdp = srmmu_pmd_offset(pgdp, virt_addr); - ptep = srmmu_pte_offset(pmdp, virt_addr); + pmdp = pmd_offset(pgdp, virt_addr); + ptep = pte_offset(pmdp, virt_addr); /* No need to flush uncacheable page. */ - set_pte(ptep, srmmu_mk_pte((unsigned long) EMPTY_PGE, PAGE_SHARED)); + set_pte(ptep, mk_pte((unsigned long) EMPTY_PGE, PAGE_SHARED)); flush_tlb_all(); } -static char *srmmu_lockarea(char *vaddr, unsigned long len) -{ - return vaddr; -} - -static void srmmu_unlockarea(char *vaddr, unsigned long len) -{ -} - /* This is used in many routines below. */ #define UWINMASK_OFFSET (const unsigned long)(&(((struct task_struct *)0)->tss.uwinmask)) @@ -844,7 +904,7 @@ static void srmmu_unlockarea(char *vaddr, unsigned long len) */ struct task_struct *srmmu_alloc_task_struct(void) { - return (struct task_struct *) __get_free_pages(GFP_KERNEL, 1, 0); + return (struct task_struct *) __get_free_pages(GFP_KERNEL, 1); } static void srmmu_free_task_struct(struct task_struct *tsk) @@ -1131,10 +1191,12 @@ static void cypress_flush_chunk(unsigned long chunk) cypress_flush_page_to_ram(chunk); } +#if NOTUSED /* Cypress is also IO cache coherent. */ static void cypress_flush_page_for_dma(unsigned long page) { } +#endif /* Cypress has unified L2 VIPT, from which both instructions and data * are stored. It does not have an onboard icache of any sort, therefore @@ -1220,6 +1282,9 @@ extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr); extern void viking_flush_page(unsigned long page); extern void viking_mxcc_flush_page(unsigned long page); extern void viking_flush_chunk(unsigned long chunk); +extern void viking_c_flush_page(unsigned long page); +extern void viking_c_mxcc_flush_page(unsigned long page); +extern void viking_c_flush_chunk(unsigned long chunk); extern void viking_mxcc_flush_chunk(unsigned long chunk); extern void viking_flush_tlb_all(void); extern void viking_flush_tlb_mm(struct mm_struct *mm); @@ -1345,184 +1410,6 @@ static void hypersparc_init_new_context(struct mm_struct *mm) srmmu_set_context(mm->context); } -/* IOMMU things go here. */ - -#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) - -#define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID) -#define MKIOPTE(phys) (((((phys)>>4) & IOPTE_PAGE) | IOPERM) & ~IOPTE_WAZ) - -static inline void srmmu_map_dvma_pages_for_iommu(struct iommu_struct *iommu, - unsigned long kern_end) -{ - unsigned long first = page_offset; - unsigned long last = kern_end; - iopte_t *iopte = iommu->page_table; - - iopte += ((first - iommu->start) >> PAGE_SHIFT); - while(first <= last) { - *iopte++ = __iopte(MKIOPTE(srmmu_v2p(first))); - first += PAGE_SIZE; - } -} - -unsigned long iommu_init(int iommund, unsigned long memory_start, - unsigned long memory_end, struct linux_sbus *sbus) -{ - unsigned int impl, vers, ptsize; - unsigned long tmp; - struct iommu_struct *iommu; - struct linux_prom_registers iommu_promregs[PROMREG_MAX]; - - memory_start = LONG_ALIGN(memory_start); - iommu = (struct iommu_struct *) memory_start; - memory_start += sizeof(struct iommu_struct); - prom_getproperty(iommund, "reg", (void *) iommu_promregs, - sizeof(iommu_promregs)); - iommu->regs = (struct iommu_regs *) - sparc_alloc_io(iommu_promregs[0].phys_addr, 0, (PAGE_SIZE * 3), - "IOMMU registers", iommu_promregs[0].which_io, 0x0); - if(!iommu->regs) - panic("Cannot map IOMMU registers."); - impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28; - vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24; - tmp = iommu->regs->control; - tmp &= ~(IOMMU_CTRL_RNGE); - switch(page_offset & 0xf0000000) { - case 0xf0000000: - tmp |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB); - iommu->plow = iommu->start = 0xf0000000; - break; - case 0xe0000000: - tmp |= (IOMMU_RNGE_512MB | IOMMU_CTRL_ENAB); - iommu->plow = iommu->start = 0xe0000000; - break; - case 0xd0000000: - case 0xc0000000: - tmp |= (IOMMU_RNGE_1GB | IOMMU_CTRL_ENAB); - iommu->plow = iommu->start = 0xc0000000; - break; - case 0xb0000000: - case 0xa0000000: - case 0x90000000: - case 0x80000000: - tmp |= (IOMMU_RNGE_2GB | IOMMU_CTRL_ENAB); - iommu->plow = iommu->start = 0x80000000; - break; - } - iommu->regs->control = tmp; - iommu_invalidate(iommu->regs); - iommu->end = 0xffffffff; - - /* Allocate IOMMU page table */ - ptsize = iommu->end - iommu->start + 1; - ptsize = (ptsize >> PAGE_SHIFT) * sizeof(iopte_t); - - /* Stupid alignment constraints give me a headache. */ - memory_start = PAGE_ALIGN(memory_start); - memory_start = (((memory_start) + (ptsize - 1)) & ~(ptsize - 1)); - iommu->lowest = iommu->page_table = (iopte_t *) memory_start; - memory_start += ptsize; - - /* Initialize new table. */ - flush_cache_all(); - memset(iommu->page_table, 0, ptsize); - srmmu_map_dvma_pages_for_iommu(iommu, memory_end); - if(viking_mxcc_present) { - unsigned long start = (unsigned long) iommu->page_table; - unsigned long end = (start + ptsize); - while(start < end) { - viking_mxcc_flush_page(start); - start += PAGE_SIZE; - } - } else if(flush_page_for_dma == viking_flush_page) { - unsigned long start = (unsigned long) iommu->page_table; - unsigned long end = (start + ptsize); - while(start < end) { - viking_flush_page(start); - start += PAGE_SIZE; - } - } - flush_tlb_all(); - iommu->regs->base = srmmu_v2p((unsigned long) iommu->page_table) >> 4; - iommu_invalidate(iommu->regs); - - sbus->iommu = iommu; - printk("IOMMU: impl %d vers %d page table at %p of size %d bytes\n", - impl, vers, iommu->page_table, ptsize); - return memory_start; -} - -void iommu_sun4d_init(int sbi_node, struct linux_sbus *sbus) -{ - u32 *iommu; - struct linux_prom_registers iommu_promregs[PROMREG_MAX]; - - prom_getproperty(sbi_node, "reg", (void *) iommu_promregs, - sizeof(iommu_promregs)); - iommu = (u32 *) - sparc_alloc_io(iommu_promregs[2].phys_addr, 0, (PAGE_SIZE * 16), - "XPT", iommu_promregs[2].which_io, 0x0); - if(!iommu) - panic("Cannot map External Page Table."); - - /* Initialize new table. */ - flush_cache_all(); - memset(iommu, 0, 16 * PAGE_SIZE); - if(viking_mxcc_present) { - unsigned long start = (unsigned long) iommu; - unsigned long end = (start + 16 * PAGE_SIZE); - while(start < end) { - viking_mxcc_flush_page(start); - start += PAGE_SIZE; - } - } else if(flush_page_for_dma == viking_flush_page) { - unsigned long start = (unsigned long) iommu; - unsigned long end = (start + 16 * PAGE_SIZE); - while(start < end) { - viking_flush_page(start); - start += PAGE_SIZE; - } - } - flush_tlb_all(); - - sbus->iommu = (struct iommu_struct *)iommu; -} - -static __u32 srmmu_get_scsi_one(char *vaddr, unsigned long len, struct linux_sbus *sbus) -{ - unsigned long page = ((unsigned long) vaddr) & PAGE_MASK; - - while(page < ((unsigned long)(vaddr + len))) { - flush_page_for_dma(page); - page += PAGE_SIZE; - } - return (__u32)vaddr; -} - -static void srmmu_get_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus) -{ - unsigned long page; - - while(sz >= 0) { - page = ((unsigned long) sg[sz].addr) & PAGE_MASK; - while(page < (unsigned long)(sg[sz].addr + sg[sz].len)) { - flush_page_for_dma(page); - page += PAGE_SIZE; - } - sg[sz].dvma_addr = (__u32) (sg[sz].addr); - sz--; - } -} - -static void srmmu_release_scsi_one(__u32 vaddr, unsigned long len, struct linux_sbus *sbus) -{ -} - -static void srmmu_release_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus) -{ -} - static unsigned long mempool; /* NOTE: All of this startup code assumes the low 16mb (approx.) of @@ -1652,63 +1539,6 @@ void srmmu_inherit_prom_mappings(unsigned long start,unsigned long end) } } -#ifdef CONFIG_SBUS -static void srmmu_map_dma_area(unsigned long addr, int len) -{ - unsigned long page, end; - pgprot_t dvma_prot; - struct iommu_struct *iommu = SBus_chain->iommu; - iopte_t *iopte = iommu->page_table; - iopte_t *iopte_first = iopte; - - if(viking_mxcc_present) - dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV); - else - dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV); - - iopte += ((addr - iommu->start) >> PAGE_SHIFT); - end = PAGE_ALIGN((addr + len)); - while(addr < end) { - page = get_free_page(GFP_KERNEL); - if(!page) { - prom_printf("alloc_dvma: Cannot get a dvma page\n"); - prom_halt(); - } else { - pgd_t *pgdp; - pmd_t *pmdp; - pte_t *ptep; - - pgdp = srmmu_pgd_offset(init_task.mm, addr); - pmdp = srmmu_pmd_offset(pgdp, addr); - ptep = srmmu_pte_offset(pmdp, addr); - - set_pte(ptep, pte_val(srmmu_mk_pte(page, dvma_prot))); - - iopte_val(*iopte++) = MKIOPTE(srmmu_v2p(page)); - } - addr += PAGE_SIZE; - } - flush_cache_all(); - if(viking_mxcc_present) { - unsigned long start = ((unsigned long) iopte_first) & PAGE_MASK; - unsigned long end = PAGE_ALIGN(((unsigned long) iopte)); - while(start < end) { - viking_mxcc_flush_page(start); - start += PAGE_SIZE; - } - } else if(flush_page_for_dma == viking_flush_page) { - unsigned long start = ((unsigned long) iopte_first) & PAGE_MASK; - unsigned long end = PAGE_ALIGN(((unsigned long) iopte)); - while(start < end) { - viking_flush_page(start); - start += PAGE_SIZE; - } - } - flush_tlb_all(); - iommu_invalidate(iommu->regs); -} -#endif - /* #define DEBUG_MAP_KERNEL */ #ifdef DEBUG_MAP_KERNEL @@ -2068,6 +1898,59 @@ check_and_return: srmmu_p2v_hash[srmmu_ahashfn(addr)] = &srmmu_map[entry]; } + page_contig_offset = page_offset - (0xfd000000 - KERNBASE); + phys_mem_contig = 1; + for(entry = 0; srmmu_map[entry].size; entry++) + if (srmmu_map[entry].pbase != srmmu_c_v2p (srmmu_map[entry].vbase)) { + phys_mem_contig = 0; + break; + } + if (phys_mem_contig) { + printk ("SRMMU: Physical memory is contiguous, bypassing VA<->PA hashes\n"); + pte_page = srmmu_c_pte_page; + pmd_page = srmmu_c_pmd_page; + pgd_page = srmmu_c_pgd_page; + mk_pte = srmmu_c_mk_pte; + pte_offset = srmmu_c_pte_offset; + pmd_offset = srmmu_c_pmd_offset; + if (ctxd_set == srmmu_ctxd_set) + ctxd_set = srmmu_c_ctxd_set; + pgd_set = srmmu_c_pgd_set; + pmd_set = srmmu_c_pmd_set; + mmu_v2p = srmmu_c_v2p; + mmu_p2v = srmmu_c_p2v; + if (flush_chunk == viking_flush_chunk) + flush_chunk = viking_c_flush_chunk; + } + + if (sparc_cpu_model == sun4d) { + int i, j = -1; + unsigned long bank_start, bank_end; + + sun4d_dma_vbase = 0; + sun4d_dma_size = IOUNIT_DMA_SIZE - IOUNIT_DVMA_SIZE; + for (i = 0; srmmu_map[i].size; i++) { + bank_start = srmmu_map[i].vbase; + bank_end = bank_start + srmmu_map[i].size; + if (bank_start <= KERNBASE && bank_end > KERNBASE) + j = i; + else if (srmmu_map[i].size >= sun4d_dma_size) { + sun4d_dma_vbase = srmmu_map[i].vbase; + break; + } + } + if (!sun4d_dma_vbase && j != -1) { + if (srmmu_map[j].size >= sun4d_dma_size + 0x1000000) + sun4d_dma_vbase = srmmu_map[j].vbase + 0x1000000; + else { + sun4d_dma_vbase = srmmu_map[j].vbase; + if (srmmu_map[j].size < sun4d_dma_size) + sun4d_dma_size = srmmu_map[j].size; + } + } + sun4d_dma_base = IOUNIT_DMA_BASE - srmmu_v2p(sun4d_dma_vbase); + } + return; /* SUCCESS! */ } @@ -2104,7 +1987,7 @@ unsigned long srmmu_paging_init(unsigned long start_mem, unsigned long end_mem) physmem_mapped_contig = 0; /* for init.c:taint_real_pages() */ if (sparc_cpu_model == sun4d) - num_contexts = 65536; /* We now it is Viking */ + num_contexts = 65536; /* We know it is Viking */ else { /* Find the number of contexts on the srmmu. */ cpunode = prom_getchild(prom_root_node); @@ -2218,8 +2101,8 @@ static void srmmu_vac_update_mmu_cache(struct vm_area_struct * vma, { if((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED)) { struct vm_area_struct *vmaring; - struct dentry *dentry; - struct inode *inode = NULL; + struct file *file; + struct inode *inode; unsigned long flags, offset, vaddr, start; int alias_found = 0; pgd_t *pgdp; @@ -2228,11 +2111,10 @@ static void srmmu_vac_update_mmu_cache(struct vm_area_struct * vma, save_and_cli(flags); - dentry = vma->vm_dentry; - if(dentry) - inode = dentry->d_inode; - if (!inode) + file = vma->vm_file; + if (!file) goto done; + inode = file->f_dentry->d_inode; offset = (address & PAGE_MASK) - vma->vm_start; vmaring = inode->i_mmap; do { @@ -2381,11 +2263,6 @@ static void poke_hypersparc(void) hyper_flush_whole_icache(); clear = srmmu_get_faddr(); clear = srmmu_get_fstatus(); - -#ifdef __SMP__ - /* Avoid unnecessary cross calls. */ - flush_page_for_dma = local_flush_page_for_dma; -#endif } __initfunc(static void init_hypersparc(void)) @@ -2407,7 +2284,7 @@ __initfunc(static void init_hypersparc(void)) flush_page_to_ram = hypersparc_flush_page_to_ram; flush_sig_insns = hypersparc_flush_sig_insns; - flush_page_for_dma = hypersparc_flush_page_for_dma; + flush_page_for_dma = NULL /* hypersparc_flush_page_for_dma */; flush_chunk = hypersparc_flush_chunk; /* local flush _only_ */ @@ -2480,7 +2357,7 @@ __initfunc(static void init_cypress_common(void)) flush_page_to_ram = cypress_flush_page_to_ram; flush_sig_insns = cypress_flush_sig_insns; - flush_page_for_dma = cypress_flush_page_for_dma; + flush_page_for_dma = NULL /* cypress_flush_page_for_dma */; sparc_update_rootmmu_dir = cypress_update_rootmmu_dir; update_mmu_cache = srmmu_vac_update_mmu_cache; @@ -2671,7 +2548,7 @@ __initfunc(static void init_turbosparc(void)) #endif flush_sig_insns = turbosparc_flush_sig_insns; - flush_page_for_dma = hypersparc_flush_page_for_dma; + flush_page_for_dma = NULL /* turbosparc_flush_page_for_dma */; poke_srmmu = poke_turbosparc; } @@ -2761,6 +2638,9 @@ static void poke_viking(void) #ifdef __SMP__ /* Avoid unnecessary cross calls. */ flush_cache_all = local_flush_cache_all; + flush_cache_mm = local_flush_cache_mm; + flush_cache_range = local_flush_cache_range; + flush_cache_page = local_flush_cache_page; flush_page_to_ram = local_flush_page_to_ram; flush_sig_insns = local_flush_sig_insns; flush_page_for_dma = local_flush_page_for_dma; @@ -2796,6 +2676,9 @@ __initfunc(static void init_viking(void)) * which we use the IOMMU. */ flush_page_for_dma = viking_flush_page; + /* Also, this is so far the only chip which actually uses + the page argument to flush_page_for_dma */ + flush_page_for_dma_global = 0; } else { srmmu_name = "TI Viking/MXCC"; viking_mxcc_present = 1; @@ -2803,7 +2686,7 @@ __initfunc(static void init_viking(void)) flush_chunk = viking_mxcc_flush_chunk; /* local flush _only_ */ /* MXCC vikings lack the DMA snooping bug. */ - flush_page_for_dma = viking_flush_page_for_dma; + flush_page_for_dma = NULL /* viking_flush_page_for_dma */; } flush_cache_all = viking_flush_cache_all; @@ -2951,9 +2834,12 @@ static void smp_flush_page_for_dma(unsigned long page) #endif -/* Load up routines and constants for sun4m mmu */ +/* Load up routines and constants for sun4m and sun4d mmu */ __initfunc(void ld_mmu_srmmu(void)) { + extern void ld_mmu_iommu(void); + extern void ld_mmu_iounit(void); + /* First the constants */ pmd_shift = SRMMU_PMD_SHIFT; pmd_size = SRMMU_PMD_SIZE; @@ -3031,18 +2917,7 @@ __initfunc(void ld_mmu_srmmu(void)) pte_mkyoung = srmmu_pte_mkyoung; update_mmu_cache = srmmu_update_mmu_cache; destroy_context = srmmu_destroy_context; - mmu_lockarea = srmmu_lockarea; - mmu_unlockarea = srmmu_unlockarea; - - mmu_get_scsi_one = srmmu_get_scsi_one; - mmu_get_scsi_sgl = srmmu_get_scsi_sgl; - mmu_release_scsi_one = srmmu_release_scsi_one; - mmu_release_scsi_sgl = srmmu_release_scsi_sgl; - -#ifdef CONFIG_SBUS - mmu_map_dma_area = srmmu_map_dma_area; -#endif - + mmu_info = srmmu_mmu_info; mmu_v2p = srmmu_v2p; mmu_p2v = srmmu_p2v; @@ -3085,6 +2960,11 @@ __initfunc(void ld_mmu_srmmu(void)) flush_tlb_page = smp_flush_tlb_page; flush_page_to_ram = smp_flush_page_to_ram; flush_sig_insns = smp_flush_sig_insns; - flush_page_for_dma = smp_flush_page_for_dma; + if (flush_page_for_dma) + flush_page_for_dma = smp_flush_page_for_dma; #endif + if (sparc_cpu_model == sun4d) + ld_mmu_iounit(); + else + ld_mmu_iommu(); } diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c index 7ffca1033..c70753fa4 100644 --- a/arch/sparc/mm/sun4c.c +++ b/arch/sparc/mm/sun4c.c @@ -1185,7 +1185,7 @@ static struct task_struct *sun4c_alloc_task_struct(void) unsigned long addr, pages; int entry; - pages = __get_free_pages(GFP_KERNEL, 1, 0); + pages = __get_free_pages(GFP_KERNEL, 1); if(!pages) return (struct task_struct *) 0; diff --git a/arch/sparc/mm/turbosparc.S b/arch/sparc/mm/turbosparc.S index 5660d4f84..415f09056 100644 --- a/arch/sparc/mm/turbosparc.S +++ b/arch/sparc/mm/turbosparc.S @@ -1,5 +1,5 @@ -/* $Id: turbosparc.S,v 1.1 1997/07/18 06:26:22 ralf Exp $ - * turbosparc.S: High speed Hypersparc mmu/cache operations. +/* $Id: turbosparc.S,v 1.2 1998/03/16 08:40:31 ralf Exp $ + * turbosparc.S: High speed TurboSparc mmu/cache operations. * * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) diff --git a/arch/sparc/mm/viking.S b/arch/sparc/mm/viking.S index 19d426ec7..b05b7b416 100644 --- a/arch/sparc/mm/viking.S +++ b/arch/sparc/mm/viking.S @@ -1,7 +1,8 @@ -/* $Id: viking.S,v 1.3 1997/05/04 10:02:14 ecd Exp $ +/* $Id: viking.S,v 1.6 1997/11/27 15:42:32 jj Exp $ * viking.S: High speed Viking cache/mmu operations * * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) + * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ #include <asm/ptrace.h> @@ -36,6 +37,20 @@ .globl viking_flush_tlb_all, viking_flush_tlb_mm .globl viking_flush_tlb_range, viking_flush_tlb_page + .globl viking_c_mxcc_flush_page + .globl viking_c_flush_page, viking_c_flush_chunk + +viking_c_flush_page: +viking_c_flush_chunk: + sethi %hi(KERNBASE), %g2 + cmp %o0, %g2 + bgeu 2f + sub %o0, %g2, %g3 + sethi %hi(C_LABEL(page_contig_offset)), %g2 + ld [%g2 + %lo(C_LABEL(page_contig_offset))], %g2 + ba 2f + sub %o0, %g2, %g3 + viking_flush_page: viking_flush_chunk: sethi %hi(C_LABEL(srmmu_v2p_hash)), %g2 @@ -56,13 +71,14 @@ viking_flush_chunk: sub %o0, %o1, %g2 ld [%g3 + 4], %o0 add %g2, %o0, %g3 - srl %g3, 12, %g1 ! ppage >> 12 +2: srl %g3, 12, %g1 ! ppage >> 12 clr %o1 ! set counter, 0 - 127 sethi %hi(KERNBASE + PAGE_SIZE - 0x80000000), %o3 sethi %hi(0x80000000), %o4 sethi %hi(VIKING_PTAG_VALID | VIKING_PTAG_DIRTY), %o5 - sethi %hi(PAGE_SIZE), %o0 + sethi %hi(2*PAGE_SIZE), %o0 + sethi %hi(PAGE_SIZE), %g7 clr %o2 ! block counter, 0 - 3 5: sll %o1, 5, %g4 @@ -83,20 +99,16 @@ viking_flush_chunk: add %g4, %o3, %g2 ! (KERNBASE + PAGE_SIZE) | (set << 5) ld [%g2], %g3 + ld [%g2 + %g7], %g3 add %g2, %o0, %g2 ld [%g2], %g3 + ld [%g2 + %g7], %g3 add %g2, %o0, %g2 ld [%g2], %g3 + ld [%g2 + %g7], %g3 add %g2, %o0, %g2 ld [%g2], %g3 - add %g2, %o0, %g2 - ld [%g2], %g3 - add %g2, %o0, %g2 - ld [%g2], %g3 - add %g2, %o0, %g2 - ld [%g2], %g3 - add %g2, %o0, %g2 - ld [%g2], %g3 + ld [%g2 + %g7], %g3 b 8f inc %o1 @@ -115,6 +127,15 @@ viking_flush_chunk: retl nop +viking_c_mxcc_flush_page: + sethi %hi(KERNBASE), %g2 + cmp %o0, %g2 + bgeu 2f + sub %o0, %g2, %g3 + sethi %hi(C_LABEL(page_contig_offset)), %g2 + ld [%g2 + %lo(C_LABEL(page_contig_offset))], %g2 + ba 2f + sub %o0, %g2, %g3 viking_mxcc_flush_page: sethi %hi(C_LABEL(srmmu_v2p_hash)), %g2 @@ -134,17 +155,12 @@ viking_mxcc_flush_page: ld [%g3], %o1 sub %o0, %o1, %g2 ld [%g3 + 4], %o0 - sethi %hi(PAGE_SIZE), %g4 add %g2, %o0, %g3 - add %g3, %g4, %g3 ! ppage + PAGE_SIZE - +2: sub %g3, -PAGE_SIZE, %g3 ! ppage + PAGE_SIZE mov 0x10, %g2 ! set cacheable bit - sethi %hi(MXCC_SRCSTREAM), %o2 - or %o2, %lo(MXCC_SRCSTREAM), %o2 - sethi %hi(MXCC_DESSTREAM), %o3 + sethi %hi(MXCC_SRCSTREAM), %o3 ! assume %hi(MXCC_SRCSTREAM) == %hi(MXCC_DESTSTREAM) + or %o3, %lo(MXCC_SRCSTREAM), %o2 or %o3, %lo(MXCC_DESSTREAM), %o3 - -5: sub %g3, MXCC_STREAM_SIZE, %g3 6: stda %g2, [%o2] ASI_M_MXCC @@ -168,7 +184,6 @@ viking_flush_cache_page: nop viking_flush_tlb_all: - WINDOW_FLUSH(%g4, %g5) mov 0x400, %g1 retl sta %g0, [%g1] ASI_M_FLUSH_PROBE @@ -179,16 +194,15 @@ viking_flush_tlb_mm: lda [%g1] ASI_M_MMUREGS, %g5 #ifndef __SMP__ cmp %o1, -1 - be viking_flush_tlb_mm_out + be 1f #endif - WINDOW_FLUSH(%g2, %g3) - mov 0x300, %g2 sta %o1, [%g1] ASI_M_MMUREGS sta %g0, [%g2] ASI_M_FLUSH_PROBE -viking_flush_tlb_mm_out: retl sta %g5, [%g1] ASI_M_MMUREGS +1: retl + nop viking_flush_tlb_range: mov SRMMU_CTX_REG, %g1 @@ -196,42 +210,39 @@ viking_flush_tlb_range: lda [%g1] ASI_M_MMUREGS, %g5 #ifndef __SMP__ cmp %o3, -1 - be viking_flush_tlb_range_out + be 2f #endif - WINDOW_FLUSH(%g2, %g3) - srl %o1, SRMMU_PGDIR_SHIFT, %o1 sta %o3, [%g1] ASI_M_MMUREGS sll %o1, SRMMU_PGDIR_SHIFT, %o1 sethi %hi(1 << SRMMU_PGDIR_SHIFT), %o4 add %o1, 0x200, %o1 sta %g0, [%o1] ASI_M_FLUSH_PROBE -1: - add %o1, %o4, %o1 +1: add %o1, %o4, %o1 cmp %o1, %o2 blu,a 1b sta %g0, [%o1] ASI_M_FLUSH_PROBE -viking_flush_tlb_range_out: retl sta %g5, [%g1] ASI_M_MMUREGS +2: retl + nop viking_flush_tlb_page: ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 - and %o1, PAGE_MASK, %o1 lda [%g1] ASI_M_MMUREGS, %g5 #ifndef __SMP__ cmp %o3, -1 - be viking_flush_tlb_page_out + be 1f #endif - WINDOW_FLUSH(%g2, %g3) - + and %o1, PAGE_MASK, %o1 sta %o3, [%g1] ASI_M_MMUREGS sta %g0, [%o1] ASI_M_FLUSH_PROBE -viking_flush_tlb_page_out: retl sta %g5, [%g1] ASI_M_MMUREGS +1: retl + nop viking_flush_page_to_ram: viking_flush_page_for_dma: |