summaryrefslogtreecommitdiffstats
path: root/arch/sparc/mm/srmmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/mm/srmmu.c')
-rw-r--r--arch/sparc/mm/srmmu.c1615
1 files changed, 598 insertions, 1017 deletions
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 7d9b653df..9d3afdbdf 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -1,4 +1,4 @@
-/* $Id: srmmu.c,v 1.103 1996/10/31 06:28:35 davem Exp $
+/* $Id: srmmu.c,v 1.136 1997/04/20 14:11:51 ecd Exp $
* srmmu.c: SRMMU specific routines for memory management.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -10,6 +10,8 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/malloc.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -25,6 +27,8 @@
#include <asm/iommu.h>
#include <asm/asi.h>
#include <asm/msi.h>
+#include <asm/a.out.h>
+#include <asm/mmu_context.h>
/* Now the cpu specific definitions. */
#include <asm/viking.h>
@@ -42,25 +46,20 @@ int vac_badbits;
extern unsigned long sparc_iobase_vaddr;
#ifdef __SMP__
-extern void smp_capture(void);
-extern void smp_release(void);
+#define FLUSH_BEGIN(mm)
+#define FLUSH_END
#else
-#define smp_capture()
-#define smp_release()
-#endif /* !(__SMP__) */
-
-/* #define USE_CHUNK_ALLOC 1 */
+#define FLUSH_BEGIN(mm) if((mm)->context != NO_CONTEXT) {
+#define FLUSH_END }
+#endif
static void (*ctxd_set)(ctxd_t *ctxp, pgd_t *pgdp);
static void (*pmd_set)(pmd_t *pmdp, pte_t *ptep);
static void (*flush_page_for_dma)(unsigned long page);
-static void (*flush_cache_page_to_uncache)(unsigned long page);
-static void (*flush_tlb_page_for_cbit)(unsigned long page);
+static void (*flush_chunk)(unsigned long chunk);
#ifdef __SMP__
static void (*local_flush_page_for_dma)(unsigned long page);
-static void (*local_flush_cache_page_to_uncache)(unsigned long page);
-static void (*local_flush_tlb_page_for_cbit)(unsigned long page);
#endif
static struct srmmu_stats {
@@ -75,7 +74,10 @@ static char *srmmu_name;
ctxd_t *srmmu_ctx_table_phys;
ctxd_t *srmmu_context_table;
-static struct srmmu_trans {
+/* Don't change this without changing access to this
+ * in arch/sparc/mm/viking.S
+ */
+struct srmmu_trans {
unsigned long vbase;
unsigned long pbase;
unsigned long size;
@@ -144,22 +146,12 @@ static inline unsigned long srmmu_p2v(unsigned long paddr)
*/
static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
{
-#if MEM_BUS_SPACE
- /* the AP1000 has its memory on bus 8, not 0 like suns do */
- if (!(value&KERNBASE))
- value |= MEM_BUS_SPACE<<28;
- if (value == MEM_BUS_SPACE<<28) value = 0;
-#endif
- __asm__ __volatile__("swap [%2], %0\n\t" :
- "=&r" (value) :
- "0" (value), "r" (addr));
+ __asm__ __volatile__("swap [%2], %0" : "=&r" (value) : "0" (value), "r" (addr));
return value;
}
/* Functions really use this, not srmmu_swap directly. */
-#define srmmu_set_entry(ptr, newentry) \
- srmmu_swap((unsigned long *) (ptr), (newentry))
-
+#define srmmu_set_entry(ptr, newentry) srmmu_swap((unsigned long *) (ptr), (newentry))
/* The very generic SRMMU page table operations. */
static unsigned int srmmu_pmd_align(unsigned int addr) { return SRMMU_PMD_ALIGN(addr); }
@@ -170,27 +162,29 @@ static unsigned long srmmu_vmalloc_start(void)
return SRMMU_VMALLOC_START;
}
+static inline int srmmu_device_memory(unsigned long x)
+{
+ return ((x & 0xF0000000) != 0);
+}
+
static unsigned long srmmu_pgd_page(pgd_t pgd)
-{ return srmmu_p2v((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); }
+{ return srmmu_device_memory(pgd_val(pgd))?~0:srmmu_p2v((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); }
static unsigned long srmmu_pmd_page(pmd_t pmd)
-{ return srmmu_p2v((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4); }
-
-static inline int srmmu_device_memory(pte_t pte)
-{
- return (pte_val(pte)>>28) != MEM_BUS_SPACE;
-}
+{ return srmmu_device_memory(pmd_val(pmd))?~0:srmmu_p2v((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4); }
static unsigned long srmmu_pte_page(pte_t pte)
-{ return srmmu_device_memory(pte)?~0:srmmu_p2v((pte_val(pte) & SRMMU_PTE_PMASK) << 4); }
+{ return srmmu_device_memory(pte_val(pte))?~0:srmmu_p2v((pte_val(pte) & SRMMU_PTE_PMASK) << 4); }
-static int srmmu_pte_none(pte_t pte) { return !pte_val(pte); }
+static int srmmu_pte_none(pte_t pte)
+{ return !(pte_val(pte) & 0xFFFFFFF); }
static int srmmu_pte_present(pte_t pte)
{ return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); }
static void srmmu_pte_clear(pte_t *ptep) { set_pte(ptep, __pte(0)); }
-static int srmmu_pmd_none(pmd_t pmd) { return !pmd_val(pmd); }
+static int srmmu_pmd_none(pmd_t pmd)
+{ return !(pmd_val(pmd) & 0xFFFFFFF); }
static int srmmu_pmd_bad(pmd_t pmd)
{ return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
@@ -199,7 +193,9 @@ static int srmmu_pmd_present(pmd_t pmd)
static void srmmu_pmd_clear(pmd_t *pmdp) { set_pte((pte_t *)pmdp, __pte(0)); }
-static int srmmu_pgd_none(pgd_t pgd) { return !pgd_val(pgd); }
+static int srmmu_pgd_none(pgd_t pgd)
+{ return !(pgd_val(pgd) & 0xFFFFFFF); }
+
static int srmmu_pgd_bad(pgd_t pgd)
{ return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
@@ -212,28 +208,26 @@ static int srmmu_pte_write(pte_t pte) { return pte_val(pte) & SRMMU_WRIT
static int srmmu_pte_dirty(pte_t pte) { return pte_val(pte) & SRMMU_DIRTY; }
static int srmmu_pte_young(pte_t pte) { return pte_val(pte) & SRMMU_REF; }
-static pte_t srmmu_pte_wrprotect(pte_t pte) { pte_val(pte) &= ~SRMMU_WRITE; return pte;}
-static pte_t srmmu_pte_mkclean(pte_t pte) { pte_val(pte) &= ~SRMMU_DIRTY; return pte; }
-static pte_t srmmu_pte_mkold(pte_t pte) { pte_val(pte) &= ~SRMMU_REF; return pte; }
-static pte_t srmmu_pte_mkwrite(pte_t pte) { pte_val(pte) |= SRMMU_WRITE; return pte; }
-static pte_t srmmu_pte_mkdirty(pte_t pte) { pte_val(pte) |= SRMMU_DIRTY; return pte; }
-static pte_t srmmu_pte_mkyoung(pte_t pte) { pte_val(pte) |= SRMMU_REF; return pte; }
+static pte_t srmmu_pte_wrprotect(pte_t pte) { return __pte(pte_val(pte) & ~SRMMU_WRITE);}
+static pte_t srmmu_pte_mkclean(pte_t pte) { return __pte(pte_val(pte) & ~SRMMU_DIRTY);}
+static pte_t srmmu_pte_mkold(pte_t pte) { return __pte(pte_val(pte) & ~SRMMU_REF);}
+static pte_t srmmu_pte_mkwrite(pte_t pte) { return __pte(pte_val(pte) | SRMMU_WRITE);}
+static pte_t srmmu_pte_mkdirty(pte_t pte) { return __pte(pte_val(pte) | SRMMU_DIRTY);}
+static pte_t srmmu_pte_mkyoung(pte_t pte) { return __pte(pte_val(pte) | SRMMU_REF);}
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
static pte_t srmmu_mk_pte(unsigned long page, pgprot_t pgprot)
-{ pte_t pte; pte_val(pte) = ((srmmu_v2p(page)) >> 4) | pgprot_val(pgprot); return pte; }
+{ return __pte(((srmmu_v2p(page)) >> 4) | pgprot_val(pgprot)); }
static pte_t srmmu_mk_pte_phys(unsigned long page, pgprot_t pgprot)
-{ pte_t pte; pte_val(pte) = ((page) >> 4) | pgprot_val(pgprot); return pte; }
+{ return __pte(((page) >> 4) | pgprot_val(pgprot)); }
static pte_t srmmu_mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
{
- pte_t pte;
- pte_val(pte) = ((page) >> 4) | (space << 28) | pgprot_val(pgprot);
- return pte;
+ return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot));
}
static void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
@@ -253,8 +247,7 @@ static void srmmu_pmd_set(pmd_t * pmdp, pte_t * ptep)
static pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot)
{
- pte_val(pte) = (pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot);
- return pte;
+ return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot));
}
/* to find an entry in a top-level page table... */
@@ -279,68 +272,17 @@ static pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address)
static void srmmu_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp)
{
if(tsk->mm->context != NO_CONTEXT) {
- flush_cache_mm(current->mm);
+ flush_cache_mm(tsk->mm);
ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp);
- flush_tlb_mm(current->mm);
+ flush_tlb_mm(tsk->mm);
}
}
-static inline void srmmu_uncache_page(unsigned long addr)
-{
- pgd_t *pgdp = srmmu_pgd_offset(init_task.mm, addr);
- pmd_t *pmdp;
- pte_t *ptep;
-
- if((pgd_val(*pgdp) & SRMMU_ET_MASK) == SRMMU_ET_PTE) {
- ptep = (pte_t *) pgdp;
- } else {
- pmdp = srmmu_pmd_offset(pgdp, addr);
- if((pmd_val(*pmdp) & SRMMU_ET_MASK) == SRMMU_ET_PTE) {
- ptep = (pte_t *) pmdp;
- } else {
- ptep = srmmu_pte_offset(pmdp, addr);
- }
- }
-
- flush_cache_page_to_uncache(addr);
- set_pte(ptep, __pte((pte_val(*ptep) & ~SRMMU_CACHE)));
- flush_tlb_page_for_cbit(addr);
-}
-
-static inline void srmmu_recache_page(unsigned long addr)
-{
- pgd_t *pgdp = srmmu_pgd_offset(init_task.mm, addr);
- pmd_t *pmdp;
- pte_t *ptep;
-
- if((pgd_val(*pgdp) & SRMMU_ET_MASK) == SRMMU_ET_PTE) {
- ptep = (pte_t *) pgdp;
- } else {
- pmdp = srmmu_pmd_offset(pgdp, addr);
- if((pmd_val(*pmdp) & SRMMU_ET_MASK) == SRMMU_ET_PTE) {
- ptep = (pte_t *) pmdp;
- } else {
- ptep = srmmu_pte_offset(pmdp, addr);
- }
- }
- set_pte(ptep, __pte((pte_val(*ptep) | SRMMU_CACHE)));
- flush_tlb_page_for_cbit(addr);
-}
-
-static inline unsigned long srmmu_getpage(void)
-{
- unsigned long page = get_free_page(GFP_KERNEL);
-
- return page;
-}
-
static inline void srmmu_putpage(unsigned long page)
{
free_page(page);
}
-#ifdef USE_CHUNK_ALLOC
-
#define LC_HIGH_WATER 128
#define BC_HIGH_WATER 32
@@ -368,7 +310,7 @@ static int garbage_calls = 0;
#define OTHER_PAGE(p,q) (((unsigned long)(p) ^ (unsigned long)(q)) & PAGE_MASK)
-static inline int garbage_collect(unsigned long **cnks, int n, int cpp)
+static int garbage_collect(unsigned long **cnks, int n, int cpp)
{
struct chunk *root = (struct chunk *)*cnks;
struct chunk *p, *q, *curr, *next;
@@ -464,8 +406,7 @@ static inline int garbage_collect(unsigned long **cnks, int n, int cpp)
return water;
}
-
-static inline unsigned long *get_small_chunk(void)
+static unsigned long *get_small_chunk(void)
{
unsigned long *rval;
unsigned long flags;
@@ -507,6 +448,7 @@ static inline unsigned long *get_small_chunk(void)
lcjiffies = jiffies;
restore_flags(flags);
memset(rval, 0, 256);
+ flush_chunk((unsigned long)rval);
return rval;
}
@@ -526,7 +468,7 @@ static inline void free_small_chunk(unsigned long *it)
restore_flags(flags);
}
-static inline unsigned long *get_big_chunk(void)
+static unsigned long *get_big_chunk(void)
{
unsigned long *rval;
unsigned long flags;
@@ -556,6 +498,7 @@ static inline unsigned long *get_big_chunk(void)
bcjiffies = jiffies;
restore_flags(flags);
memset(rval, 0, 1024);
+ flush_chunk((unsigned long)rval);
return rval;
}
@@ -582,18 +525,6 @@ static inline void free_big_chunk(unsigned long *it)
#define FREE_PMD(chunk) free_small_chunk((unsigned long *)(chunk))
#define FREE_PTE(chunk) free_small_chunk((unsigned long *)(chunk))
-#else
-
-/* The easy versions. */
-#define NEW_PGD() (pgd_t *) srmmu_getpage()
-#define NEW_PMD() (pmd_t *) srmmu_getpage()
-#define NEW_PTE() (pte_t *) srmmu_getpage()
-#define FREE_PGD(chunk) srmmu_putpage((unsigned long)(chunk))
-#define FREE_PMD(chunk) srmmu_putpage((unsigned long)(chunk))
-#define FREE_PTE(chunk) srmmu_putpage((unsigned long)(chunk))
-
-#endif
-
/*
* Allocate and free page tables. The xxx_kernel() versions are
* used to allocate a kernel page table - this turns on ASN bits
@@ -730,12 +661,23 @@ static void srmmu_set_pte_cacheable(pte_t *ptep, pte_t pteval)
static void srmmu_set_pte_nocache_hyper(pte_t *ptep, pte_t pteval)
{
- unsigned long flags;
+ unsigned long page = ((unsigned long)ptep) & PAGE_MASK;
- save_and_cli(flags);
srmmu_set_entry(ptep, pte_val(pteval));
- hyper_flush_cache_page(((unsigned long)ptep) & PAGE_MASK);
- restore_flags(flags);
+ __asm__ __volatile__("
+ lda [%0] %2, %%g4
+ orcc %%g4, 0x0, %%g0
+ be 2f
+ sethi %%hi(%7), %%g5
+1: subcc %%g5, %6, %%g5 ! hyper_flush_cache_page
+ bne 1b
+ sta %%g0, [%1 + %%g5] %3
+ lda [%4] %5, %%g0
+2:" : /* no outputs */
+ : "r" (page | 0x400), "r" (page), "i" (ASI_M_FLUSH_PROBE),
+ "i" (ASI_M_FLUSH_PAGE), "r" (SRMMU_FAULT_STATUS), "i" (ASI_M_MMUREGS),
+ "r" (vac_line_size), "i" (PAGE_SIZE)
+ : "g4", "g5", "cc");
}
static void srmmu_set_pte_nocache_cypress(pte_t *ptep, pte_t pteval)
@@ -766,16 +708,15 @@ static void srmmu_set_pte_nocache_cypress(pte_t *ptep, pte_t pteval)
} while(line != page);
}
-static void srmmu_set_pte_nocache_nomxccvik(pte_t *ptep, pte_t pteval)
+static void srmmu_set_pte_nocache_viking(pte_t *ptep, pte_t pteval)
{
- unsigned long paddr = srmmu_v2p(((unsigned long)ptep));
unsigned long vaddr;
int set;
int i;
- set = (paddr >> 5) & 0x7f;
+ set = ((unsigned long)ptep >> 5) & 0x7f;
vaddr = (KERNBASE + PAGE_SIZE) | (set << 5);
- srmmu_set_entry(ptep, pteval);
+ srmmu_set_entry(ptep, pte_val(pteval));
for (i = 0; i < 8; i++) {
__asm__ __volatile__ ("ld [%0], %%g0" : : "r" (vaddr));
vaddr += PAGE_SIZE;
@@ -795,18 +736,10 @@ static void srmmu_quick_kernel_fault(unsigned long address)
#endif
}
-static inline void alloc_context(struct task_struct *tsk)
+static inline void alloc_context(struct mm_struct *mm)
{
- struct mm_struct *mm = tsk->mm;
struct ctx_list *ctxp;
-#if CONFIG_AP1000
- if (tsk->taskid >= MPP_TASK_BASE) {
- mm->context = MPP_CONTEXT_BASE + (tsk->taskid - MPP_TASK_BASE);
- return;
- }
-#endif
-
ctxp = ctx_free.next;
if(ctxp != &ctx_free) {
remove_from_ctx_list(ctxp);
@@ -833,11 +766,6 @@ static inline void free_context(int context)
{
struct ctx_list *ctx_old;
-#if CONFIG_AP1000
- if (context >= MPP_CONTEXT_BASE)
- return; /* nothing to do! */
-#endif
-
ctx_old = ctx_list_pool + context;
remove_from_ctx_list(ctx_old);
add_to_free_ctxlist(ctx_old);
@@ -847,14 +775,26 @@ static inline void free_context(int context)
static void srmmu_switch_to_context(struct task_struct *tsk)
{
if(tsk->mm->context == NO_CONTEXT) {
- alloc_context(tsk);
- flush_cache_mm(current->mm);
+ alloc_context(tsk->mm);
+ flush_cache_mm(tsk->mm);
ctxd_set(&srmmu_context_table[tsk->mm->context], tsk->mm->pgd);
- flush_tlb_mm(current->mm);
+ flush_tlb_mm(tsk->mm);
}
srmmu_set_context(tsk->mm->context);
}
+static void srmmu_init_new_context(struct mm_struct *mm)
+{
+ alloc_context(mm);
+
+ flush_cache_mm(mm);
+ ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
+ flush_tlb_mm(mm);
+
+ if(mm == current->mm)
+ srmmu_set_context(mm->context);
+}
+
/* Low level IO area allocation on the SRMMU. */
void srmmu_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type, int rdonly)
{
@@ -879,7 +819,7 @@ void srmmu_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_ty
else
tmp |= SRMMU_PRIV;
flush_page_to_ram(virt_addr);
- set_pte(ptep, tmp);
+ set_pte(ptep, __pte(tmp));
flush_tlb_all();
}
@@ -894,7 +834,7 @@ void srmmu_unmapioaddr(unsigned long virt_addr)
ptep = srmmu_pte_offset(pmdp, virt_addr);
/* No need to flush uncacheable page. */
- set_pte(ptep, pte_val(srmmu_mk_pte((unsigned long) EMPTY_PGE, PAGE_SHARED)));
+ set_pte(ptep, srmmu_mk_pte((unsigned long) EMPTY_PGE, PAGE_SHARED));
flush_tlb_all();
}
@@ -907,6 +847,9 @@ static void srmmu_unlockarea(char *vaddr, unsigned long len)
{
}
+/* This is used in many routines below. */
+#define UWINMASK_OFFSET (const unsigned long)(&(((struct task_struct *)0)->tss.uwinmask))
+
/* On the SRMMU we do not have the problems with limited tlb entries
* for mapping kernel pages, so we just take things from the free page
* pool. As a side effect we are putting a little too much pressure
@@ -922,7 +865,12 @@ struct task_struct *srmmu_alloc_task_struct(void)
unsigned long srmmu_alloc_kernel_stack(struct task_struct *tsk)
{
- return __get_free_pages(GFP_KERNEL, 1, 0);
+ unsigned long kstk = __get_free_pages(GFP_KERNEL, 1, 0);
+
+ if(!kstk)
+ kstk = (unsigned long) vmalloc(PAGE_SIZE << 1);
+
+ return kstk;
}
static void srmmu_free_task_struct(struct task_struct *tsk)
@@ -932,7 +880,10 @@ static void srmmu_free_task_struct(struct task_struct *tsk)
static void srmmu_free_kernel_stack(unsigned long stack)
{
- free_pages(stack, 1);
+ if(stack < VMALLOC_START)
+ free_pages(stack, 1);
+ else
+ vfree((char *)stack);
}
/* Tsunami flushes. It's page level tlb invalidation is not very
@@ -948,47 +899,29 @@ static void tsunami_flush_cache_all(void)
static void tsunami_flush_cache_mm(struct mm_struct *mm)
{
-#ifndef __SMP__
- if(mm->context != NO_CONTEXT) {
-#endif
- flush_user_windows();
- tsunami_flush_icache();
- tsunami_flush_dcache();
-#ifndef __SMP__
- }
-#endif
+ FLUSH_BEGIN(mm)
+ flush_user_windows();
+ tsunami_flush_icache();
+ tsunami_flush_dcache();
+ FLUSH_END
}
static void tsunami_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
{
-#ifndef __SMP__
- if(mm->context != NO_CONTEXT) {
-#endif
- flush_user_windows();
- tsunami_flush_icache();
- tsunami_flush_dcache();
-#ifndef __SMP__
- }
-#endif
+ FLUSH_BEGIN(mm)
+ flush_user_windows();
+ tsunami_flush_icache();
+ tsunami_flush_dcache();
+ FLUSH_END
}
static void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
{
-#ifndef __SMP__
- struct mm_struct *mm = vma->vm_mm;
- if(mm->context != NO_CONTEXT) {
-#endif
- flush_user_windows();
- tsunami_flush_icache();
- tsunami_flush_dcache();
-#ifndef __SMP__
- }
-#endif
-}
-
-static void tsunami_flush_cache_page_to_uncache(unsigned long page)
-{
+ FLUSH_BEGIN(vma->vm_mm)
+ flush_user_windows();
+ tsunami_flush_icache();
tsunami_flush_dcache();
+ FLUSH_END
}
/* Tsunami does not have a Copy-back style virtual cache. */
@@ -1003,62 +936,57 @@ static void tsunami_flush_page_for_dma(unsigned long page)
tsunami_flush_dcache();
}
+/* Tsunami has harvard style split I/D caches which do not snoop each other,
+ * so we have to flush on-stack sig insns. Only the icache need be flushed
+ * since the Tsunami has a write-through data cache.
+ */
+static void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
+{
+ tsunami_flush_icache();
+}
+
+static void tsunami_flush_chunk(unsigned long chunk)
+{
+}
+
static void tsunami_flush_tlb_all(void)
{
- module_stats.invall++;
srmmu_flush_whole_tlb();
+ module_stats.invall++;
}
static void tsunami_flush_tlb_mm(struct mm_struct *mm)
{
+ FLUSH_BEGIN(mm)
+ srmmu_flush_whole_tlb();
module_stats.invmm++;
-#ifndef __SMP__
- if(mm->context != NO_CONTEXT) {
-#endif
- srmmu_flush_whole_tlb();
-#ifndef __SMP__
- }
-#endif
+ FLUSH_END
}
static void tsunami_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
{
+ FLUSH_BEGIN(mm)
+ srmmu_flush_whole_tlb();
module_stats.invrnge++;
-#ifndef __SMP__
- if(mm->context != NO_CONTEXT) {
-#endif
- srmmu_flush_whole_tlb();
-#ifndef __SMP__
- }
-#endif
+ FLUSH_END
}
static void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
- int octx;
struct mm_struct *mm = vma->vm_mm;
-#ifndef __SMP__
- if(mm->context != NO_CONTEXT) {
-#endif
- unsigned long flags;
-
- save_and_cli(flags);
- octx = srmmu_get_context();
-
- srmmu_set_context(mm->context);
- srmmu_flush_tlb_page(page);
- srmmu_set_context(octx);
- restore_flags(flags);
-#ifndef __SMP__
- }
-#endif
+ FLUSH_BEGIN(mm)
+ __asm__ __volatile__("
+ lda [%0] %3, %%g5
+ sta %1, [%0] %3
+ sta %%g0, [%2] %4
+ sta %%g5, [%0] %3"
+ : /* no outputs */
+ : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (page & PAGE_MASK),
+ "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE)
+ : "g5");
module_stats.invpg++;
-}
-
-static void tsunami_flush_tlb_page_for_cbit(unsigned long page)
-{
- srmmu_flush_tlb_page(page);
+ FLUSH_END
}
/* Swift flushes. It has the recommended SRMMU specification flushing
@@ -1074,41 +1002,28 @@ static void swift_flush_cache_all(void)
static void swift_flush_cache_mm(struct mm_struct *mm)
{
-#ifndef __SMP__
- if(mm->context != NO_CONTEXT) {
-#endif
- flush_user_windows();
- swift_idflash_clear();
-#ifndef __SMP__
- }
-#endif
+ FLUSH_BEGIN(mm)
+ flush_user_windows();
+ swift_idflash_clear();
+ FLUSH_END
}
static void swift_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
{
-#ifndef __SMP__
- if(mm->context != NO_CONTEXT) {
-#endif
- flush_user_windows();
- swift_idflash_clear();
-#ifndef __SMP__
- }
-#endif
+ FLUSH_BEGIN(mm)
+ flush_user_windows();
+ swift_idflash_clear();
+ FLUSH_END
}
static void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
{
-#ifndef __SMP__
- struct mm_struct *mm = vma->vm_mm;
- if(mm->context != NO_CONTEXT) {
-#endif
- flush_user_windows();
- if(vma->vm_flags & VM_EXEC)
- swift_flush_icache();
- swift_flush_dcache();
-#ifndef __SMP__
- }
-#endif
+ FLUSH_BEGIN(vma->vm_mm)
+ flush_user_windows();
+ if(vma->vm_flags & VM_EXEC)
+ swift_flush_icache();
+ swift_flush_dcache();
+ FLUSH_END
}
/* Not copy-back on swift. */
@@ -1122,48 +1037,47 @@ static void swift_flush_page_for_dma(unsigned long page)
swift_flush_dcache();
}
-static void swift_flush_cache_page_to_uncache(unsigned long page)
+/* Again, Swift is non-snooping split I/D cache'd just like tsunami,
+ * so have to punt the icache for on-stack signal insns. Only the
+ * icache need be flushed since the dcache is write-through.
+ */
+static void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
+{
+ swift_flush_icache();
+}
+
+static void swift_flush_chunk(unsigned long chunk)
{
- swift_flush_dcache();
}
static void swift_flush_tlb_all(void)
{
- module_stats.invall++;
srmmu_flush_whole_tlb();
+ module_stats.invall++;
}
static void swift_flush_tlb_mm(struct mm_struct *mm)
{
+ FLUSH_BEGIN(mm)
+ srmmu_flush_whole_tlb();
module_stats.invmm++;
-#ifndef __SMP__
- if(mm->context != NO_CONTEXT)
-#endif
- srmmu_flush_whole_tlb();
+ FLUSH_END
}
static void swift_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
{
+ FLUSH_BEGIN(mm)
+ srmmu_flush_whole_tlb();
module_stats.invrnge++;
-#ifndef __SMP__
- if(mm->context != NO_CONTEXT)
-#endif
- srmmu_flush_whole_tlb();
+ FLUSH_END
}
static void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
-#ifndef __SMP__
- struct mm_struct *mm = vma->vm_mm;
- if(mm->context != NO_CONTEXT)
-#endif
- srmmu_flush_whole_tlb();
- module_stats.invpg++;
-}
-
-static void swift_flush_tlb_page_for_cbit(unsigned long page)
-{
+ FLUSH_BEGIN(vma->vm_mm)
srmmu_flush_whole_tlb();
+ module_stats.invpg++;
+ FLUSH_END
}
/* The following are all MBUS based SRMMU modules, and therefore could
@@ -1172,212 +1086,6 @@ static void swift_flush_tlb_page_for_cbit(unsigned long page)
* with respect to cache coherency.
*/
-/* Viking flushes. For Sun's mainline MBUS processor it is pretty much
- * a crappy mmu. The on-chip I&D caches only have full flushes, no fine
- * grained cache invalidations. It only has these "flash clear" things
- * just like the MicroSparcI. Added to this many revs of the chip are
- * teaming with hardware buggery. Someday maybe we'll do direct
- * diagnostic tag accesses for page level flushes as those should
- * be painless and will increase performance due to the frequency of
- * page level flushes. This is a must to _really_ flush the caches,
- * crazy hardware ;-)
- */
-
-static void viking_flush_cache_all(void)
-{
-}
-
-static void viking_flush_cache_mm(struct mm_struct *mm)
-{
-}
-
-static void viking_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
-{
-}
-
-static void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
-{
-}
-
-/* Non-mxcc vikings are copy-back but are pure-physical so no flushing. */
-static void viking_flush_page_to_ram(unsigned long page)
-{
-}
-
-static void viking_mxcc_flush_page(unsigned long page)
-{
- unsigned long ppage = srmmu_v2p(page & PAGE_MASK);
- unsigned long paddr0, paddr1;
-
- if (ppage == 0xffffffffUL)
- return;
-
- paddr0 = 0x10; /* Set cacheable bit. */
- paddr1 = ppage;
-
- /* Read the page's data through the stream registers,
- * and write it back to memory. This will issue
- * coherent write invalidates to all other caches, thus
- * should also be sufficient in an MP system.
- */
- __asm__ __volatile__ ("or %%g0, %0, %%g2\n\t"
- "or %%g0, %1, %%g3\n"
- "1:\n\t"
- "stda %%g2, [%2] %5\n\t"
- "stda %%g2, [%3] %5\n\t"
- "add %%g3, %4, %%g3\n\t"
- "btst 0xfff, %%g3\n\t"
- "bne 1b\n\t"
- "nop\n\t" : :
- "r" (paddr0), "r" (paddr1),
- "r" (MXCC_SRCSTREAM),
- "r" (MXCC_DESSTREAM),
- "r" (MXCC_STREAM_SIZE),
- "i" (ASI_M_MXCC) : "g2", "g3");
-
- /* This was handcoded after a look at the gcc output from
- *
- * do {
- * mxcc_set_stream_src(paddr);
- * mxcc_set_stream_dst(paddr);
- * paddr[1] += MXCC_STREAM_SIZE;
- * } while (paddr[1] & ~PAGE_MASK);
- */
-}
-
-static void viking_no_mxcc_flush_page(unsigned long page)
-{
- unsigned long ppage = srmmu_v2p(page & PAGE_MASK);
- int set, block;
- unsigned long ptag[2];
- unsigned long vaddr;
- int i;
-
- if (ppage == 0xffffffffUL)
- return;
- ppage >>= 12;
-
- for (set = 0; set < 128; set++) {
- for (block = 0; block < 4; block++) {
-
- viking_get_dcache_ptag(set, block, ptag);
-
- if (ptag[1] != ppage)
- continue;
- if (!(ptag[0] & VIKING_PTAG_VALID))
- continue;
- if (!(ptag[0] & VIKING_PTAG_DIRTY))
- continue;
-
- /* There was a great cache from TI
- * with comfort as much as vi,
- * 4 pages to flush,
- * 4 pages, no rush,
- * since anything else makes him die.
- */
- vaddr = (KERNBASE + PAGE_SIZE) | (set << 5);
- for (i = 0; i < 8; i++) {
- __asm__ __volatile__ ("ld [%0], %%g2\n\t" : :
- "r" (vaddr) : "g2");
- vaddr += PAGE_SIZE;
- }
-
- /* Continue with next set. */
- break;
- }
- }
-}
-
-/* Viking is IO cache coherent, but really only on MXCC. */
-static void viking_flush_page_for_dma(unsigned long page)
-{
-}
-
-static void viking_flush_tlb_all(void)
-{
- module_stats.invall++;
- flush_user_windows();
- srmmu_flush_whole_tlb();
-}
-
-static void viking_flush_tlb_mm(struct mm_struct *mm)
-{
- int octx;
- module_stats.invmm++;
-
-#ifndef __SMP__
- if(mm->context != NO_CONTEXT) {
-#endif
- flush_user_windows();
- octx = srmmu_get_context();
- srmmu_set_context(mm->context);
- srmmu_flush_tlb_ctx();
- srmmu_set_context(octx);
-#ifndef __SMP__
- }
-#endif
-}
-
-static void viking_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
-{
- int octx;
- module_stats.invrnge++;
-
-#ifndef __SMP__
- if(mm->context != NO_CONTEXT) {
-#endif
- flush_user_windows();
- octx = srmmu_get_context();
- srmmu_set_context(mm->context);
- if((start - end) < SRMMU_PMD_SIZE) {
- start &= PAGE_MASK;
- while(start < end) {
- srmmu_flush_tlb_page(start);
- start += PAGE_SIZE;
- }
- } else if((start - end) < SRMMU_PGDIR_SIZE) {
- start &= SRMMU_PMD_MASK;
- while(start < end) {
- srmmu_flush_tlb_segment(start);
- start += SRMMU_PMD_SIZE;
- }
- } else {
- start &= SRMMU_PGDIR_MASK;
- while(start < end) {
- srmmu_flush_tlb_region(start);
- start += SRMMU_PGDIR_SIZE;
- }
- }
- srmmu_set_context(octx);
-#ifndef __SMP__
- }
-#endif
-}
-
-static void viking_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
-{
- int octx;
- struct mm_struct *mm = vma->vm_mm;
-
- module_stats.invpg++;
-#ifndef __SMP__
- if(mm->context != NO_CONTEXT) {
-#endif
- flush_user_windows();
- octx = srmmu_get_context();
- srmmu_set_context(mm->context);
- srmmu_flush_tlb_page(page);
- srmmu_set_context(octx);
-#ifndef __SMP__
- }
-#endif
-}
-
-static void viking_flush_tlb_page_for_cbit(unsigned long page)
-{
- srmmu_flush_tlb_page(page);
-}
-
/* Cypress flushes. */
static void cypress_flush_cache_all(void)
{
@@ -1399,19 +1107,57 @@ static void cypress_flush_cache_all(void)
static void cypress_flush_cache_mm(struct mm_struct *mm)
{
+ register unsigned long a, b, c, d, e, f, g;
unsigned long flags, faddr;
int octx;
-#ifndef __SMP__
- if(mm->context != NO_CONTEXT) {
-#endif
- register unsigned long a, b, c, d, e, f, g;
- flush_user_windows();
- save_and_cli(flags);
- octx = srmmu_get_context();
- srmmu_set_context(mm->context);
- a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
- faddr = (0x10000 - 0x100);
+ FLUSH_BEGIN(mm)
+ flush_user_windows();
+ save_and_cli(flags);
+ octx = srmmu_get_context();
+ srmmu_set_context(mm->context);
+ a = 0x20; b = 0x40; c = 0x60;
+ d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
+
+ faddr = (0x10000 - 0x100);
+ goto inside;
+ do {
+ faddr -= 0x100;
+ inside:
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
+ "sta %%g0, [%0 + %2] %1\n\t"
+ "sta %%g0, [%0 + %3] %1\n\t"
+ "sta %%g0, [%0 + %4] %1\n\t"
+ "sta %%g0, [%0 + %5] %1\n\t"
+ "sta %%g0, [%0 + %6] %1\n\t"
+ "sta %%g0, [%0 + %7] %1\n\t"
+ "sta %%g0, [%0 + %8] %1\n\t" : :
+ "r" (faddr), "i" (ASI_M_FLUSH_CTX),
+ "r" (a), "r" (b), "r" (c), "r" (d),
+ "r" (e), "r" (f), "r" (g));
+ } while(faddr);
+ srmmu_set_context(octx);
+ restore_flags(flags);
+ FLUSH_END
+}
+
+static void cypress_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+ register unsigned long a, b, c, d, e, f, g;
+ unsigned long flags, faddr;
+ int octx;
+
+ FLUSH_BEGIN(mm)
+ flush_user_windows();
+ save_and_cli(flags);
+ octx = srmmu_get_context();
+ srmmu_set_context(mm->context);
+ a = 0x20; b = 0x40; c = 0x60;
+ d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
+
+ start &= SRMMU_PMD_MASK;
+ while(start < end) {
+ faddr = (start + (0x10000 - 0x100));
goto inside;
do {
faddr -= 0x100;
@@ -1424,99 +1170,55 @@ static void cypress_flush_cache_mm(struct mm_struct *mm)
"sta %%g0, [%0 + %6] %1\n\t"
"sta %%g0, [%0 + %7] %1\n\t"
"sta %%g0, [%0 + %8] %1\n\t" : :
- "r" (faddr), "i" (ASI_M_FLUSH_CTX),
+ "r" (faddr),
+ "i" (ASI_M_FLUSH_SEG),
"r" (a), "r" (b), "r" (c), "r" (d),
"r" (e), "r" (f), "r" (g));
- } while(faddr);
- srmmu_set_context(octx);
- restore_flags(flags);
-#ifndef __SMP__
- }
-#endif
-}
-
-static void cypress_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
-{
- unsigned long flags, faddr;
- int octx;
-
-#ifndef __SMP__
- if(mm->context != NO_CONTEXT) {
-#endif
- register unsigned long a, b, c, d, e, f, g;
- flush_user_windows();
- save_and_cli(flags);
- octx = srmmu_get_context();
- srmmu_set_context(mm->context);
- a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
- start &= SRMMU_PMD_MASK;
- while(start < end) {
- faddr = (start + (0x10000 - 0x100));
- goto inside;
- do {
- faddr -= 0x100;
- inside:
- __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
- "sta %%g0, [%0 + %2] %1\n\t"
- "sta %%g0, [%0 + %3] %1\n\t"
- "sta %%g0, [%0 + %4] %1\n\t"
- "sta %%g0, [%0 + %5] %1\n\t"
- "sta %%g0, [%0 + %6] %1\n\t"
- "sta %%g0, [%0 + %7] %1\n\t"
- "sta %%g0, [%0 + %8] %1\n\t" : :
- "r" (faddr),
- "i" (ASI_M_FLUSH_SEG),
- "r" (a), "r" (b), "r" (c), "r" (d),
- "r" (e), "r" (f), "r" (g));
- } while (faddr != start);
- start += SRMMU_PMD_SIZE;
- }
- srmmu_set_context(octx);
- restore_flags(flags);
-#ifndef __SMP__
+ } while (faddr != start);
+ start += SRMMU_PMD_SIZE;
}
-#endif
+ srmmu_set_context(octx);
+ restore_flags(flags);
+ FLUSH_END
}
static void cypress_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
{
+ register unsigned long a, b, c, d, e, f, g;
struct mm_struct *mm = vma->vm_mm;
unsigned long flags, line;
int octx;
-#ifndef __SMP__
- if(mm->context != NO_CONTEXT) {
-#endif
- register unsigned long a, b, c, d, e, f, g;
- flush_user_windows();
- save_and_cli(flags);
- octx = srmmu_get_context();
- srmmu_set_context(mm->context);
- a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
- page &= PAGE_MASK;
- line = (page + PAGE_SIZE) - 0x100;
- goto inside;
- do {
- line -= 0x100;
- inside:
- __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
- "sta %%g0, [%0 + %2] %1\n\t"
- "sta %%g0, [%0 + %3] %1\n\t"
- "sta %%g0, [%0 + %4] %1\n\t"
- "sta %%g0, [%0 + %5] %1\n\t"
- "sta %%g0, [%0 + %6] %1\n\t"
- "sta %%g0, [%0 + %7] %1\n\t"
- "sta %%g0, [%0 + %8] %1\n\t" : :
- "r" (line),
- "i" (ASI_M_FLUSH_PAGE),
- "r" (a), "r" (b), "r" (c), "r" (d),
- "r" (e), "r" (f), "r" (g));
- } while(line != page);
- srmmu_set_context(octx);
- restore_flags(flags);
-#ifndef __SMP__
- }
-#endif
+ FLUSH_BEGIN(mm)
+ flush_user_windows();
+ save_and_cli(flags);
+ octx = srmmu_get_context();
+ srmmu_set_context(mm->context);
+ a = 0x20; b = 0x40; c = 0x60;
+ d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
+
+ page &= PAGE_MASK;
+ line = (page + PAGE_SIZE) - 0x100;
+ goto inside;
+ do {
+ line -= 0x100;
+ inside:
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
+ "sta %%g0, [%0 + %2] %1\n\t"
+ "sta %%g0, [%0 + %3] %1\n\t"
+ "sta %%g0, [%0 + %4] %1\n\t"
+ "sta %%g0, [%0 + %5] %1\n\t"
+ "sta %%g0, [%0 + %6] %1\n\t"
+ "sta %%g0, [%0 + %7] %1\n\t"
+ "sta %%g0, [%0 + %8] %1\n\t" : :
+ "r" (line),
+ "i" (ASI_M_FLUSH_PAGE),
+ "r" (a), "r" (b), "r" (c), "r" (d),
+ "r" (e), "r" (f), "r" (g));
+ } while(line != page);
+ srmmu_set_context(octx);
+ restore_flags(flags);
+ FLUSH_END
}
/* Cypress is copy-back, at least that is how we configure it. */
@@ -1547,314 +1249,177 @@ static void cypress_flush_page_to_ram(unsigned long page)
} while(line != page);
}
+static void cypress_flush_chunk(unsigned long chunk)
+{
+ cypress_flush_page_to_ram(chunk);
+}
+
/* Cypress is also IO cache coherent. */
static void cypress_flush_page_for_dma(unsigned long page)
{
}
-static void cypress_flush_page_to_uncache(unsigned long page)
+/* Cypress has unified L2 VIPT, from which both instructions and data
+ * are stored. It does not have an onboard icache of any sort, therefore
+ * no flush is necessary.
+ */
+static void cypress_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
{
- register unsigned long a, b, c, d, e, f, g;
- unsigned long line;
-
- a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
- page &= PAGE_MASK;
- line = (page + PAGE_SIZE) - 0x100;
- goto inside;
- do {
- line -= 0x100;
- inside:
- __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
- "sta %%g0, [%0 + %2] %1\n\t"
- "sta %%g0, [%0 + %3] %1\n\t"
- "sta %%g0, [%0 + %4] %1\n\t"
- "sta %%g0, [%0 + %5] %1\n\t"
- "sta %%g0, [%0 + %6] %1\n\t"
- "sta %%g0, [%0 + %7] %1\n\t"
- "sta %%g0, [%0 + %8] %1\n\t" : :
- "r" (line),
- "i" (ASI_M_FLUSH_PAGE),
- "r" (a), "r" (b), "r" (c), "r" (d),
- "r" (e), "r" (f), "r" (g));
- } while(line != page);
}
static void cypress_flush_tlb_all(void)
{
- module_stats.invall++;
srmmu_flush_whole_tlb();
+ module_stats.invall++;
}
static void cypress_flush_tlb_mm(struct mm_struct *mm)
{
- int octx;
-
+ FLUSH_BEGIN(mm)
+ __asm__ __volatile__("
+ lda [%0] %3, %%g5
+ sta %2, [%0] %3
+ sta %%g0, [%1] %4
+ sta %%g5, [%0] %3"
+ : /* no outputs */
+ : "r" (SRMMU_CTX_REG), "r" (0x300), "r" (mm->context),
+ "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE)
+ : "g5");
module_stats.invmm++;
-#ifndef __SMP__
- if(mm->context != NO_CONTEXT) {
-#endif
- octx = srmmu_get_context();
- srmmu_set_context(mm->context);
- srmmu_flush_tlb_ctx();
- srmmu_set_context(octx);
-#ifndef __SMP__
- }
-#endif
+ FLUSH_END
}
static void cypress_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
{
- int octx;
- module_stats.invrnge++;
+ unsigned long size;
-#ifndef __SMP__
- if(mm->context != NO_CONTEXT) {
-#endif
- flush_user_windows();
- octx = srmmu_get_context();
- srmmu_set_context(mm->context);
- if((start - end) < SRMMU_PMD_SIZE) {
- start &= PAGE_MASK;
- while(start < end) {
- srmmu_flush_tlb_page(start);
- start += PAGE_SIZE;
- }
- } else if((start - end) < SRMMU_PGDIR_SIZE) {
- start &= SRMMU_PMD_MASK;
- while(start < end) {
- srmmu_flush_tlb_segment(start);
- start += SRMMU_PMD_SIZE;
- }
- } else {
- start &= SRMMU_PGDIR_MASK;
- while(start < end) {
- srmmu_flush_tlb_region(start);
- start += SRMMU_PGDIR_SIZE;
- }
- }
- srmmu_set_context(octx);
-#ifndef __SMP__
- }
-#endif
+ FLUSH_BEGIN(mm)
+ start &= SRMMU_PGDIR_MASK;
+ size = SRMMU_PGDIR_ALIGN(end) - start;
+ __asm__ __volatile__("
+ lda [%0] %5, %%g5
+ sta %1, [%0] %5
+ 1: subcc %3, %4, %3
+ bne 1b
+ sta %%g0, [%2 + %3] %6
+ sta %%g5, [%0] %5"
+ : /* no outputs */
+ : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (start | 0x200),
+ "r" (size), "r" (SRMMU_PGDIR_SIZE), "i" (ASI_M_MMUREGS),
+ "i" (ASI_M_FLUSH_PROBE)
+ : "g5", "cc");
+ module_stats.invrnge++;
+ FLUSH_END
}
static void cypress_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
- int octx;
struct mm_struct *mm = vma->vm_mm;
+ FLUSH_BEGIN(mm)
+ __asm__ __volatile__("
+ lda [%0] %3, %%g5
+ sta %1, [%0] %3
+ sta %%g0, [%2] %4
+ sta %%g5, [%0] %3"
+ : /* no outputs */
+ : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (page & PAGE_MASK),
+ "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE)
+ : "g5");
module_stats.invpg++;
-#ifndef __SMP__
- if(mm->context != NO_CONTEXT) {
-#endif
- flush_user_windows();
- octx = srmmu_get_context();
- srmmu_set_context(mm->context);
- srmmu_flush_tlb_page(page);
- srmmu_set_context(octx);
-#ifndef __SMP__
- }
-#endif
-}
+ FLUSH_END
+}
+
+/* viking.S */
+extern void viking_flush_cache_all(void);
+extern void viking_flush_cache_mm(struct mm_struct *mm);
+extern void viking_flush_cache_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end);
+extern void viking_flush_cache_page(struct vm_area_struct *vma,
+ unsigned long page);
+extern void viking_flush_page_to_ram(unsigned long page);
+extern void viking_flush_page_for_dma(unsigned long page);
+extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr);
+extern void viking_flush_page(unsigned long page);
+extern void viking_mxcc_flush_page(unsigned long page);
+extern void viking_flush_chunk(unsigned long chunk);
+extern void viking_mxcc_flush_chunk(unsigned long chunk);
+extern void viking_flush_tlb_all(void);
+extern void viking_flush_tlb_mm(struct mm_struct *mm);
+extern void viking_flush_tlb_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end);
+extern void viking_flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long page);
+
+/* hypersparc.S */
+extern void hypersparc_flush_cache_all(void);
+extern void hypersparc_flush_cache_mm(struct mm_struct *mm);
+extern void hypersparc_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end);
+extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
+extern void hypersparc_flush_page_to_ram(unsigned long page);
+extern void hypersparc_flush_chunk(unsigned long chunk);
+extern void hypersparc_flush_page_for_dma(unsigned long page);
+extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
+extern void hypersparc_flush_tlb_all(void);
+extern void hypersparc_flush_tlb_mm(struct mm_struct *mm);
+extern void hypersparc_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end);
+extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
-static void cypress_flush_tlb_page_for_cbit(unsigned long page)
-{
- srmmu_flush_tlb_page(page);
-}
-
-/* Hypersparc flushes. Very nice chip... */
-static void hypersparc_flush_cache_all(void)
+static void hypersparc_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
{
- flush_user_windows();
- hyper_flush_unconditional_combined();
hyper_flush_whole_icache();
+ set_pte((pte_t *)ctxp, __pte((SRMMU_ET_PTD | (srmmu_v2p((unsigned long) pgdp) >> 4))));
}
-static void hypersparc_flush_cache_mm(struct mm_struct *mm)
-{
-#ifndef __SMP__
- if(mm->context != NO_CONTEXT) {
-#endif
- flush_user_windows();
- hyper_flush_cache_user();
- hyper_flush_whole_icache();
-#ifndef __SMP__
- }
-#endif
-}
-
-/* Boy was my older implementation inefficient... */
-static void hypersparc_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
-{
- volatile unsigned long clear;
- int octx;
-
-#ifndef __SMP__
- if(mm->context != NO_CONTEXT) {
-#endif
- flush_user_windows();
- octx = srmmu_get_context();
- start &= PAGE_MASK;
- srmmu_set_context(mm->context);
- while(start < end) {
- if(srmmu_hwprobe(start))
- hyper_flush_cache_page(start);
- start += PAGE_SIZE;
- }
- clear = srmmu_get_fstatus();
- srmmu_set_context(octx);
- hyper_flush_whole_icache();
-#ifndef __SMP__
- }
-#endif
-}
-
-/* HyperSparc requires a valid mapping where we are about to flush
- * in order to check for a physical tag match during the flush.
- */
-static void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
-{
- struct mm_struct *mm = vma->vm_mm;
- volatile unsigned long clear;
- int octx;
-
-#ifndef __SMP__
- if(mm->context != NO_CONTEXT) {
-#endif
- octx = srmmu_get_context();
- flush_user_windows();
- srmmu_set_context(mm->context);
- hyper_flush_whole_icache();
- if(!srmmu_hwprobe(page))
- goto no_mapping;
- hyper_flush_cache_page(page);
- no_mapping:
- clear = srmmu_get_fstatus();
- srmmu_set_context(octx);
-#ifndef __SMP__
- }
-#endif
-}
-
-/* HyperSparc is copy-back. */
-static void hypersparc_flush_page_to_ram(unsigned long page)
-{
- volatile unsigned long clear;
-
- if(srmmu_hwprobe(page))
- hyper_flush_cache_page(page);
- clear = srmmu_get_fstatus();
-}
-
-/* HyperSparc is IO cache coherent. */
-static void hypersparc_flush_page_for_dma(unsigned long page)
-{
-}
-
-static void hypersparc_flush_cache_page_to_uncache(unsigned long page)
-{
- volatile unsigned long clear;
-
- if(srmmu_hwprobe(page))
- hyper_flush_cache_page(page);
- clear = srmmu_get_fstatus();
-}
-
-static void hypersparc_flush_tlb_all(void)
-{
- module_stats.invall++;
- srmmu_flush_whole_tlb();
-}
-
-static void hypersparc_flush_tlb_mm(struct mm_struct *mm)
-{
- int octx;
-
- module_stats.invmm++;
-#ifndef __SMP__
- if(mm->context != NO_CONTEXT) {
-#endif
-
- octx = srmmu_get_context();
- srmmu_set_context(mm->context);
- srmmu_flush_tlb_ctx();
- srmmu_set_context(octx);
-
-#ifndef __SMP__
- }
-#endif
-}
-
-static void hypersparc_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+static void hypersparc_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp)
{
- int octx;
-
- module_stats.invrnge++;
-#ifndef __SMP__
- if(mm->context != NO_CONTEXT) {
-#endif
+ unsigned long page = ((unsigned long) pgdp) & PAGE_MASK;
- octx = srmmu_get_context();
- srmmu_set_context(mm->context);
- if((start - end) < SRMMU_PMD_SIZE) {
- start &= PAGE_MASK;
- while(start < end) {
- srmmu_flush_tlb_page(start);
- start += PAGE_SIZE;
- }
- } else if((start - end) < SRMMU_PGDIR_SIZE) {
- start &= SRMMU_PMD_MASK;
- while(start < end) {
- srmmu_flush_tlb_segment(start);
- start += SRMMU_PMD_SIZE;
- }
- } else {
- start &= SRMMU_PGDIR_MASK;
- while(start < end) {
- srmmu_flush_tlb_region(start);
- start += SRMMU_PGDIR_SIZE;
- }
- }
- srmmu_set_context(octx);
-
-#ifndef __SMP__
+ hypersparc_flush_page_to_ram(page);
+ if(tsk->mm->context != NO_CONTEXT) {
+ flush_cache_mm(tsk->mm);
+ ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp);
+ flush_tlb_mm(tsk->mm);
}
-#endif
}
-static void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+static void viking_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp)
{
- struct mm_struct *mm = vma->vm_mm;
- int octx;
-
- module_stats.invpg++;
-#ifndef __SMP__
- if(mm->context != NO_CONTEXT) {
-#endif
-
- octx = srmmu_get_context();
- srmmu_set_context(mm->context);
- srmmu_flush_tlb_page(page);
- srmmu_set_context(octx);
-
-#ifndef __SMP__
+ viking_flush_page((unsigned long)pgdp);
+ if(tsk->mm->context != NO_CONTEXT) {
+ flush_cache_mm(current->mm);
+ ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp);
+ flush_tlb_mm(current->mm);
}
-#endif
}
-static void hypersparc_flush_tlb_page_for_cbit(unsigned long page)
+static void cypress_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp)
{
- srmmu_flush_tlb_page(page);
-}
+ register unsigned long a, b, c, d, e, f, g;
+ unsigned long page = ((unsigned long) pgdp) & PAGE_MASK;
+ unsigned long line;
-static void hypersparc_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
-{
- hyper_flush_whole_icache();
- set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) pgdp) >> 4)));
-}
+ a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
+ page &= PAGE_MASK;
+ line = (page + PAGE_SIZE) - 0x100;
+ goto inside;
+ do {
+ line -= 0x100;
+ inside:
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
+ "sta %%g0, [%0 + %2] %1\n\t"
+ "sta %%g0, [%0 + %3] %1\n\t"
+ "sta %%g0, [%0 + %4] %1\n\t"
+ "sta %%g0, [%0 + %5] %1\n\t"
+ "sta %%g0, [%0 + %6] %1\n\t"
+ "sta %%g0, [%0 + %7] %1\n\t"
+ "sta %%g0, [%0 + %8] %1\n\t" : :
+ "r" (line),
+ "i" (ASI_M_FLUSH_PAGE),
+ "r" (a), "r" (b), "r" (c), "r" (d),
+ "r" (e), "r" (f), "r" (g));
+ } while(line != page);
-static void hypersparc_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp)
-{
if(tsk->mm->context != NO_CONTEXT) {
flush_cache_mm(current->mm);
ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp);
@@ -1866,14 +1431,28 @@ static void hypersparc_switch_to_context(struct task_struct *tsk)
{
hyper_flush_whole_icache();
if(tsk->mm->context == NO_CONTEXT) {
- alloc_context(tsk);
- flush_cache_mm(current->mm);
+ alloc_context(tsk->mm);
+ flush_cache_mm(tsk->mm);
ctxd_set(&srmmu_context_table[tsk->mm->context], tsk->mm->pgd);
- flush_tlb_mm(current->mm);
+ flush_tlb_mm(tsk->mm);
}
srmmu_set_context(tsk->mm->context);
}
+static void hypersparc_init_new_context(struct mm_struct *mm)
+{
+ hyper_flush_whole_icache();
+
+ alloc_context(mm);
+
+ flush_cache_mm(mm);
+ ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
+ flush_tlb_mm(mm);
+
+ if(mm == current->mm)
+ srmmu_set_context(mm->context);
+}
+
/* IOMMU things go here. */
#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
@@ -1890,7 +1469,7 @@ static inline void srmmu_map_dvma_pages_for_iommu(struct iommu_struct *iommu,
iopte += ((first - iommu->start) >> PAGE_SHIFT);
while(first <= last) {
- iopte_val(*iopte++) = MKIOPTE(srmmu_v2p(first));
+ *iopte++ = __iopte(MKIOPTE(srmmu_v2p(first)));
first += PAGE_SIZE;
}
}
@@ -1955,6 +1534,8 @@ unsigned long iommu_init(int iommund, unsigned long memory_start,
/* Initialize new table. */
flush_cache_all();
+ memset(iommu->page_table, 0, ptsize);
+ srmmu_map_dvma_pages_for_iommu(iommu, memory_end);
if(viking_mxcc_present) {
unsigned long start = (unsigned long) iommu->page_table;
unsigned long end = (start + ptsize);
@@ -1962,16 +1543,14 @@ unsigned long iommu_init(int iommund, unsigned long memory_start,
viking_mxcc_flush_page(start);
start += PAGE_SIZE;
}
- } else if(flush_page_for_dma == viking_no_mxcc_flush_page) {
+ } else if(flush_page_for_dma == viking_flush_page) {
unsigned long start = (unsigned long) iommu->page_table;
unsigned long end = (start + ptsize);
while(start < end) {
- viking_no_mxcc_flush_page(start);
+ viking_flush_page(start);
start += PAGE_SIZE;
}
}
- memset(iommu->page_table, 0, ptsize);
- srmmu_map_dvma_pages_for_iommu(iommu, memory_end);
flush_tlb_all();
iommu->regs->base = srmmu_v2p((unsigned long) iommu->page_table) >> 4;
iommu_invalidate(iommu->regs);
@@ -1997,6 +1576,7 @@ void iommu_sun4d_init(int sbi_node, struct linux_sbus *sbus)
/* Initialize new table. */
flush_cache_all();
+ memset(iommu, 0, 16 * PAGE_SIZE);
if(viking_mxcc_present) {
unsigned long start = (unsigned long) iommu;
unsigned long end = (start + 16 * PAGE_SIZE);
@@ -2004,21 +1584,20 @@ void iommu_sun4d_init(int sbi_node, struct linux_sbus *sbus)
viking_mxcc_flush_page(start);
start += PAGE_SIZE;
}
- } else if(flush_page_for_dma == viking_no_mxcc_flush_page) {
+ } else if(flush_page_for_dma == viking_flush_page) {
unsigned long start = (unsigned long) iommu;
unsigned long end = (start + 16 * PAGE_SIZE);
while(start < end) {
- viking_no_mxcc_flush_page(start);
+ viking_flush_page(start);
start += PAGE_SIZE;
}
}
- memset(iommu, 0, 16 * PAGE_SIZE);
flush_tlb_all();
sbus->iommu = (struct iommu_struct *)iommu;
}
-static char *srmmu_get_scsi_one(char *vaddr, unsigned long len, struct linux_sbus *sbus)
+static __u32 srmmu_get_scsi_one(char *vaddr, unsigned long len, struct linux_sbus *sbus)
{
unsigned long page = ((unsigned long) vaddr) & PAGE_MASK;
@@ -2026,7 +1605,7 @@ static char *srmmu_get_scsi_one(char *vaddr, unsigned long len, struct linux_sbu
flush_page_for_dma(page);
page += PAGE_SIZE;
}
- return vaddr;
+ return (__u32)vaddr;
}
static void srmmu_get_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
@@ -2039,12 +1618,12 @@ static void srmmu_get_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus
flush_page_for_dma(page);
page += PAGE_SIZE;
}
- sg[sz].dvma_addr = (char *) (sg[sz].addr);
+ sg[sz].dvma_addr = (__u32) (sg[sz].addr);
sz--;
}
}
-static void srmmu_release_scsi_one(char *vaddr, unsigned long len, struct linux_sbus *sbus)
+static void srmmu_release_scsi_one(__u32 vaddr, unsigned long len, struct linux_sbus *sbus)
{
}
@@ -2070,12 +1649,12 @@ static inline unsigned long srmmu_early_paddr(unsigned long vaddr)
static inline void srmmu_early_pgd_set(pgd_t *pgdp, pmd_t *pmdp)
{
- set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (srmmu_early_paddr((unsigned long) pmdp) >> 4)));
+ set_pte((pte_t *)pgdp, __pte((SRMMU_ET_PTD | (srmmu_early_paddr((unsigned long) pmdp) >> 4))));
}
static inline void srmmu_early_pmd_set(pmd_t *pmdp, pte_t *ptep)
{
- set_pte((pte_t *)pmdp, (SRMMU_ET_PTD | (srmmu_early_paddr((unsigned long) ptep) >> 4)));
+ set_pte((pte_t *)pmdp, __pte((SRMMU_ET_PTD | (srmmu_early_paddr((unsigned long) ptep) >> 4))));
}
static inline unsigned long srmmu_early_pgd_page(pgd_t pgd)
@@ -2157,7 +1736,7 @@ void srmmu_inherit_prom_mappings(unsigned long start,unsigned long end)
pgdp = srmmu_pgd_offset(init_task.mm, start);
if(what == 2) {
- pgd_val(*pgdp) = prompte;
+ *pgdp = __pgd(prompte);
start += SRMMU_PGDIR_SIZE;
continue;
}
@@ -2167,7 +1746,7 @@ void srmmu_inherit_prom_mappings(unsigned long start,unsigned long end)
}
pmdp = srmmu_early_pmd_offset(pgdp, start);
if(what == 1) {
- pmd_val(*pmdp) = prompte;
+ *pmdp = __pmd(prompte);
start += SRMMU_PMD_SIZE;
continue;
}
@@ -2176,11 +1755,12 @@ void srmmu_inherit_prom_mappings(unsigned long start,unsigned long end)
srmmu_early_pmd_set(pmdp, ptep);
}
ptep = srmmu_early_pte_offset(pmdp, start);
- pte_val(*ptep) = prompte;
+ *ptep = __pte(prompte);
start += PAGE_SIZE;
}
}
+#ifdef CONFIG_SBUS
static void srmmu_map_dma_area(unsigned long addr, int len)
{
unsigned long page, end;
@@ -2224,17 +1804,18 @@ static void srmmu_map_dma_area(unsigned long addr, int len)
viking_mxcc_flush_page(start);
start += PAGE_SIZE;
}
- } else if(flush_page_for_dma == viking_no_mxcc_flush_page) {
+ } else if(flush_page_for_dma == viking_flush_page) {
unsigned long start = ((unsigned long) iopte_first) & PAGE_MASK;
unsigned long end = PAGE_ALIGN(((unsigned long) iopte));
while(start < end) {
- viking_no_mxcc_flush_page(start);
+ viking_flush_page(start);
start += PAGE_SIZE;
}
}
flush_tlb_all();
iommu_invalidate(iommu->regs);
}
+#endif
/* #define DEBUG_MAP_KERNEL */
@@ -2257,7 +1838,7 @@ static inline void do_large_mapping(unsigned long vaddr, unsigned long phys_base
MKTRACE(("dlm[v<%08lx>-->p<%08lx>]", vaddr, phys_base));
big_pte = KERNEL_PTE(phys_base >> 4);
- pgd_val(*pgdp) = big_pte;
+ *pgdp = __pgd(big_pte);
}
/* Create second-level SRMMU 256K medium sized page mappings. */
@@ -2273,7 +1854,7 @@ static inline void do_medium_mapping(unsigned long vaddr, unsigned long vend,
pgdp = srmmu_pgd_offset(init_task.mm, vaddr);
pmdp = srmmu_early_pmd_offset(pgdp, vaddr);
medium_pte = KERNEL_PTE(phys_base >> 4);
- pmd_val(*pmdp) = medium_pte;
+ *pmdp = __pmd(medium_pte);
phys_base += SRMMU_PMD_SIZE;
vaddr += SRMMU_PMD_SIZE;
}
@@ -2295,7 +1876,7 @@ static inline void do_small_mapping(unsigned long start, unsigned long end,
pmdp = srmmu_early_pmd_offset(pgdp, start);
ptep = srmmu_early_pte_offset(pmdp, start);
- pte_val(*ptep) = KERNEL_PTE(phys_base >> 4);
+ *ptep = __pte(KERNEL_PTE(phys_base >> 4));
phys_base += PAGE_SIZE;
start += PAGE_SIZE;
}
@@ -2436,7 +2017,7 @@ static void map_kernel(void)
tally = 0;
for(entry = 0; sp_banks[entry].num_bytes; entry++)
tally += sp_banks[entry].num_bytes;
- if(tally >= (0xfd000000 - KERNBASE))
+ if(tally > (0xfd000000 - KERNBASE))
lots_of_ram = 1;
else
lots_of_ram = 0;
@@ -2487,7 +2068,7 @@ static void map_kernel(void)
MKTRACE(("<%d> base=%08lx bs=%08lx ", entry, sp_banks[entry].base_addr, bank_size));
if(!bank_size)
break;
- if(((vaddr + bank_size) >= 0xfd000000) ||
+ if(((vaddr + bank_size) > 0xfd000000) ||
((vaddr + bank_size) < KERNBASE)) {
unsigned long orig_base = sp_banks[entry].base_addr;
unsigned long orig_len = sp_banks[entry].num_bytes;
@@ -2568,6 +2149,7 @@ check_and_return:
}
MKTRACE(("success\n"));
init_task.mm->mmap->vm_start = page_offset = low_base;
+ stack_top = page_offset - PAGE_SIZE;
return; /* SUCCESS! */
}
@@ -2603,9 +2185,6 @@ unsigned long srmmu_paging_init(unsigned long start_mem, unsigned long end_mem)
sparc_iobase_vaddr = 0xfd000000; /* 16MB of IOSPACE on all sun4m's. */
physmem_mapped_contig = 0; /* for init.c:taint_real_pages() */
-#if CONFIG_AP1000
- num_contexts = AP_NUM_CONTEXTS;
-#else
/* Find the number of contexts on the srmmu. */
cpunode = prom_getchild(prom_root_node);
num_contexts = 0;
@@ -2616,7 +2195,7 @@ unsigned long srmmu_paging_init(unsigned long start_mem, unsigned long end_mem)
break;
}
}
-#endif
+
if(!num_contexts) {
prom_printf("Something wrong, can't find cpu node in paging_init.\n");
prom_halt();
@@ -2635,22 +2214,9 @@ unsigned long srmmu_paging_init(unsigned long start_mem, unsigned long end_mem)
#endif
mempool = PAGE_ALIGN(mempool);
-#if CONFIG_AP1000
- ap_inherit_mappings();
-#else
srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE));
-#endif
map_kernel();
-#if CONFIG_AP1000
- /* the MSC wants this aligned on a 16k boundary */
- srmmu_context_table =
- sparc_init_alloc(&mempool,
- num_contexts*sizeof(ctxd_t)<0x4000?
- 0x4000:
- num_contexts*sizeof(ctxd_t));
-#else
srmmu_context_table = sparc_init_alloc(&mempool, num_contexts*sizeof(ctxd_t));
-#endif
srmmu_ctx_table_phys = (ctxd_t *) srmmu_v2p((unsigned long) srmmu_context_table);
for(i = 0; i < num_contexts; i++)
ctxd_set(&srmmu_context_table[i], swapper_pg_dir);
@@ -2658,12 +2224,12 @@ unsigned long srmmu_paging_init(unsigned long start_mem, unsigned long end_mem)
start_mem = PAGE_ALIGN(mempool);
flush_cache_all();
- if(flush_page_for_dma == viking_no_mxcc_flush_page) {
+ if(flush_page_for_dma == viking_flush_page) {
unsigned long start = ptables_start;
unsigned long end = start_mem;
while(start < end) {
- viking_no_mxcc_flush_page(start);
+ viking_flush_page(start);
start += PAGE_SIZE;
}
}
@@ -2671,13 +2237,7 @@ unsigned long srmmu_paging_init(unsigned long start_mem, unsigned long end_mem)
flush_tlb_all();
poke_srmmu();
-#if CONFIG_AP1000
- /* on the AP we don't put the top few contexts into the free
- context list as these are reserved for parallel tasks */
- start_mem = sparc_context_init(start_mem, MPP_CONTEXT_BASE);
-#else
start_mem = sparc_context_init(start_mem, num_contexts);
-#endif
start_mem = free_area_init(start_mem, end_mem);
return PAGE_ALIGN(start_mem);
@@ -2720,10 +2280,8 @@ static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long ad
{
}
-static void srmmu_exit_hook(void)
+static void srmmu_destroy_context(struct mm_struct *mm)
{
- struct mm_struct *mm = current->mm;
-
if(mm->context != NO_CONTEXT && mm->count == 1) {
flush_cache_mm(mm);
ctxd_set(&srmmu_context_table[mm->context], swapper_pg_dir);
@@ -2733,66 +2291,69 @@ static void srmmu_exit_hook(void)
}
}
-static void srmmu_flush_hook(void)
-{
- if(current->tss.flags & SPARC_FLAG_KTHREAD) {
- alloc_context(current);
- flush_cache_mm(current->mm);
- ctxd_set(&srmmu_context_table[current->mm->context], current->mm->pgd);
- flush_tlb_mm(current->mm);
- srmmu_set_context(current->mm->context);
- }
-}
-
static void srmmu_vac_update_mmu_cache(struct vm_area_struct * vma,
unsigned long address, pte_t pte)
{
-#if 0
- struct inode *inode;
- struct vm_area_struct *vmaring;
- unsigned long offset, vaddr;
- unsigned long start;
- pgd_t *pgdp;
- pmd_t *pmdp;
- pte_t *ptep;
-
- if (!(vma->vm_flags & VM_WRITE) ||
- !(vma->vm_flags & VM_SHARED))
- return;
-
- inode = vma->vm_inode;
- if (!inode)
- return;
-
- offset = (address & PAGE_MASK) - vma->vm_start;
- vmaring = inode->i_mmap;
- do {
- vaddr = vmaring->vm_start + offset;
+ if((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED)) {
+ struct vm_area_struct *vmaring;
+ struct inode *inode;
+ unsigned long flags, offset, vaddr, start;
+ int alias_found = 0;
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep;
- if ((vaddr ^ address) & vac_badbits) {
- start = vma->vm_start;
- while (start < vma->vm_end) {
- pgdp = srmmu_pgd_offset(vma->vm_mm, start);
- pmdp = srmmu_pmd_offset(pgdp, start);
- ptep = srmmu_pte_offset(pmdp, start);
-
- flush_cache_page_to_uncache(start);
- set_pte(ptep, __pte((pte_val(*ptep) &
- ~SRMMU_CACHE)));
- flush_tlb_page_for_cbit(start);
+ save_and_cli(flags);
- start += PAGE_SIZE;
+ inode = vma->vm_inode;
+ if (!inode)
+ goto done;
+ offset = (address & PAGE_MASK) - vma->vm_start;
+ vmaring = inode->i_mmap;
+ do {
+ vaddr = vmaring->vm_start + offset;
+
+ if ((vaddr ^ address) & vac_badbits) {
+ alias_found++;
+ start = vmaring->vm_start;
+ while (start < vmaring->vm_end) {
+ pgdp = srmmu_pgd_offset(vmaring->vm_mm, start);
+ if(!pgdp) goto next;
+ pmdp = srmmu_pmd_offset(pgdp, start);
+ if(!pmdp) goto next;
+ ptep = srmmu_pte_offset(pmdp, start);
+ if(!ptep) goto next;
+
+ if((pte_val(*ptep) & SRMMU_ET_MASK) == SRMMU_VALID) {
+#if 1
+ printk("Fixing USER/USER alias [%ld:%08lx]\n",
+ vmaring->vm_mm->context, start);
+#endif
+ flush_cache_page(vmaring, start);
+ set_pte(ptep, __pte((pte_val(*ptep) &
+ ~SRMMU_CACHE)));
+ flush_tlb_page(vmaring, start);
+ }
+ next:
+ start += PAGE_SIZE;
+ }
}
- return;
+ } while ((vmaring = vmaring->vm_next_share) != inode->i_mmap);
+
+ if(alias_found && !(pte_val(pte) & _SUN4C_PAGE_NOCACHE)) {
+ pgdp = srmmu_pgd_offset(vma->vm_mm, address);
+ ptep = srmmu_pte_offset((pmd_t *) pgdp, address);
+ flush_cache_page(vma, address);
+ *ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_NOCACHE);
+ flush_tlb_page(vma, address);
}
- } while ((vmaring = vmaring->vm_next_share) != inode->i_mmap);
-#endif
+ done:
+ restore_flags(flags);
+ }
}
-static void hypersparc_exit_hook(void)
+static void hypersparc_destroy_context(struct mm_struct *mm)
{
- struct mm_struct *mm = current->mm;
-
if(mm->context != NO_CONTEXT && mm->count == 1) {
/* HyperSparc is copy-back, any data for this
* process in a modified cache line is stale
@@ -2807,52 +2368,65 @@ static void hypersparc_exit_hook(void)
}
}
-static void hypersparc_flush_hook(void)
-{
- if(current->tss.flags & SPARC_FLAG_KTHREAD) {
- alloc_context(current);
- flush_cache_mm(current->mm);
- ctxd_set(&srmmu_context_table[current->mm->context], current->mm->pgd);
- flush_tlb_mm(current->mm);
- srmmu_set_context(current->mm->context);
- }
-}
-
/* Init various srmmu chip types. */
-static void srmmu_is_bad(void)
+__initfunc(static void srmmu_is_bad(void))
{
prom_printf("Could not determine SRMMU chip type.\n");
prom_halt();
}
-static void init_vac_layout(void)
+__initfunc(static void init_vac_layout(void))
{
int nd, cache_lines;
char node_str[128];
+#ifdef __SMP__
+ int cpu = 0;
+ unsigned long max_size = 0;
+ unsigned long min_line_size = 0x10000000;
+#endif
nd = prom_getchild(prom_root_node);
while((nd = prom_getsibling(nd)) != 0) {
prom_getstring(nd, "device_type", node_str, sizeof(node_str));
- if(!strcmp(node_str, "cpu"))
+ if(!strcmp(node_str, "cpu")) {
+ vac_line_size = prom_getint(nd, "cache-line-size");
+ if (vac_line_size == -1) {
+ prom_printf("can't determine cache-line-size, "
+ "halting.\n");
+ prom_halt();
+ }
+ cache_lines = prom_getint(nd, "cache-nlines");
+ if (cache_lines == -1) {
+ prom_printf("can't determine cache-nlines, halting.\n");
+ prom_halt();
+ }
+
+ vac_cache_size = cache_lines * vac_line_size;
+ vac_badbits = (vac_cache_size - 1) & PAGE_MASK;
+#ifdef __SMP__
+ if(vac_cache_size > max_size)
+ max_size = vac_cache_size;
+ if(vac_line_size < min_line_size)
+ min_line_size = vac_line_size;
+ cpu++;
+ if(cpu == smp_num_cpus)
+ break;
+#else
break;
+#endif
+ }
}
if(nd == 0) {
prom_printf("No CPU nodes found, halting.\n");
prom_halt();
}
-
- vac_line_size = prom_getint(nd, "cache-line-size");
- if (vac_line_size == -1) {
- prom_printf("can't determine cache-line-size, halting.\n");
- prom_halt();
- }
- cache_lines = prom_getint(nd, "cache-nlines");
- if (cache_lines == -1) {
- prom_printf("can't determine cache-nlines, halting.\n");
- prom_halt();
- }
- vac_cache_size = cache_lines * vac_line_size;
+#ifdef __SMP__
+ vac_cache_size = max_size;
+ vac_line_size = min_line_size;
vac_badbits = (vac_cache_size - 1) & PAGE_MASK;
+#endif
+ printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n",
+ (int)vac_cache_size, (int)vac_line_size);
}
static void poke_hypersparc(void)
@@ -2867,7 +2441,10 @@ static void poke_hypersparc(void)
mreg |= (HYPERSPARC_CMODE);
srmmu_set_mmureg(mreg);
+
+#if 0 /* I think this is bad news... -DaveM */
hyper_clear_all_tags();
+#endif
put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE);
hyper_flush_whole_icache();
@@ -2875,7 +2452,7 @@ static void poke_hypersparc(void)
clear = srmmu_get_fstatus();
}
-static void init_hypersparc(void)
+__initfunc(static void init_hypersparc(void))
{
srmmu_name = "ROSS HyperSparc";
@@ -2893,14 +2470,15 @@ static void init_hypersparc(void)
flush_tlb_page = hypersparc_flush_tlb_page;
flush_page_to_ram = hypersparc_flush_page_to_ram;
+ flush_sig_insns = hypersparc_flush_sig_insns;
flush_page_for_dma = hypersparc_flush_page_for_dma;
- flush_cache_page_to_uncache = hypersparc_flush_cache_page_to_uncache;
- flush_tlb_page_for_cbit = hypersparc_flush_tlb_page_for_cbit;
+
+ flush_chunk = hypersparc_flush_chunk; /* local flush _only_ */
ctxd_set = hypersparc_ctxd_set;
switch_to_context = hypersparc_switch_to_context;
- mmu_exit_hook = hypersparc_exit_hook;
- mmu_flush_hook = hypersparc_flush_hook;
+ init_new_context = hypersparc_init_new_context;
+ destroy_context = hypersparc_destroy_context;
update_mmu_cache = srmmu_vac_update_mmu_cache;
sparc_update_rootmmu_dir = hypersparc_update_rootmmu_dir;
poke_srmmu = poke_hypersparc;
@@ -2909,17 +2487,32 @@ static void init_hypersparc(void)
static void poke_cypress(void)
{
unsigned long mreg = srmmu_get_mmureg();
- unsigned long faddr;
+ unsigned long faddr, tagval;
+ volatile unsigned long cypress_sucks;
volatile unsigned long clear;
clear = srmmu_get_faddr();
clear = srmmu_get_fstatus();
- for(faddr = 0x0; faddr < 0x10000; faddr += 20) {
- __asm__ __volatile__("sta %%g0, [%0 + %1] %2\n\t"
- "sta %%g0, [%0] %2\n\t" : :
- "r" (faddr), "r" (0x40000),
- "i" (ASI_M_DATAC_TAG));
+ if (!(mreg & CYPRESS_CENABLE)) {
+ for(faddr = 0x0; faddr < 0x10000; faddr += 20) {
+ __asm__ __volatile__("sta %%g0, [%0 + %1] %2\n\t"
+ "sta %%g0, [%0] %2\n\t" : :
+ "r" (faddr), "r" (0x40000),
+ "i" (ASI_M_DATAC_TAG));
+ }
+ } else {
+ for(faddr = 0; faddr < 0x10000; faddr += 0x20) {
+ __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" :
+ "=r" (tagval) :
+ "r" (faddr), "r" (0x40000),
+ "i" (ASI_M_DATAC_TAG));
+
+ /* If modified and valid, kick it. */
+ if((tagval & 0x60) == 0x60)
+ cypress_sucks = *(unsigned long *)
+ (0xf0020000 + faddr);
+ }
}
/* And one more, for our good neighbor, Mr. Broken Cypress. */
@@ -2930,7 +2523,7 @@ static void poke_cypress(void)
srmmu_set_mmureg(mreg);
}
-static void init_cypress_common(void)
+__initfunc(static void init_cypress_common(void))
{
init_vac_layout();
@@ -2945,23 +2538,25 @@ static void init_cypress_common(void)
flush_tlb_page = cypress_flush_tlb_page;
flush_tlb_range = cypress_flush_tlb_range;
+ flush_chunk = cypress_flush_chunk; /* local flush _only_ */
+
flush_page_to_ram = cypress_flush_page_to_ram;
+ flush_sig_insns = cypress_flush_sig_insns;
flush_page_for_dma = cypress_flush_page_for_dma;
- flush_cache_page_to_uncache = cypress_flush_page_to_uncache;
- flush_tlb_page_for_cbit = cypress_flush_tlb_page_for_cbit;
+ sparc_update_rootmmu_dir = cypress_update_rootmmu_dir;
update_mmu_cache = srmmu_vac_update_mmu_cache;
poke_srmmu = poke_cypress;
}
-static void init_cypress_604(void)
+__initfunc(static void init_cypress_604(void))
{
srmmu_name = "ROSS Cypress-604(UP)";
srmmu_modtype = Cypress;
init_cypress_common();
}
-static void init_cypress_605(unsigned long mrev)
+__initfunc(static void init_cypress_605(unsigned long mrev))
{
srmmu_name = "ROSS Cypress-605(MP)";
if(mrev == 0xe) {
@@ -2999,7 +2594,7 @@ static void poke_swift(void)
}
#define SWIFT_MASKID_ADDR 0x10003018
-static void init_swift(void)
+__initfunc(static void init_swift(void))
{
unsigned long swift_rev;
@@ -3051,15 +2646,16 @@ static void init_swift(void)
flush_cache_page = swift_flush_cache_page;
flush_cache_range = swift_flush_cache_range;
+ flush_chunk = swift_flush_chunk; /* local flush _only_ */
+
flush_tlb_all = swift_flush_tlb_all;
flush_tlb_mm = swift_flush_tlb_mm;
flush_tlb_page = swift_flush_tlb_page;
flush_tlb_range = swift_flush_tlb_range;
flush_page_to_ram = swift_flush_page_to_ram;
+ flush_sig_insns = swift_flush_sig_insns;
flush_page_for_dma = swift_flush_page_for_dma;
- flush_cache_page_to_uncache = swift_flush_cache_page_to_uncache;
- flush_tlb_page_for_cbit = swift_flush_tlb_page_for_cbit;
/* Are you now convinced that the Swift is one of the
* biggest VLSI abortions of all time? Bravo Fujitsu!
@@ -3081,7 +2677,7 @@ static void poke_tsunami(void)
srmmu_set_mmureg(mreg);
}
-static void init_tsunami(void)
+__initfunc(static void init_tsunami(void))
{
/* Tsunami's pretty sane, Sun and TI actually got it
* somewhat right this time. Fujitsu should have
@@ -3096,15 +2692,16 @@ static void init_tsunami(void)
flush_cache_page = tsunami_flush_cache_page;
flush_cache_range = tsunami_flush_cache_range;
+ flush_chunk = tsunami_flush_chunk; /* local flush _only_ */
+
flush_tlb_all = tsunami_flush_tlb_all;
flush_tlb_mm = tsunami_flush_tlb_mm;
flush_tlb_page = tsunami_flush_tlb_page;
flush_tlb_range = tsunami_flush_tlb_range;
flush_page_to_ram = tsunami_flush_page_to_ram;
+ flush_sig_insns = tsunami_flush_sig_insns;
flush_page_for_dma = tsunami_flush_page_for_dma;
- flush_cache_page_to_uncache = tsunami_flush_cache_page_to_uncache;
- flush_tlb_page_for_cbit = tsunami_flush_tlb_page_for_cbit;
poke_srmmu = poke_tsunami;
}
@@ -3149,35 +2746,27 @@ static void poke_viking(void)
mreg |= (VIKING_ICENABLE | VIKING_DCENABLE);
mreg |= VIKING_SBENABLE;
mreg &= ~(VIKING_ACENABLE);
-#if CONFIG_AP1000
- mreg &= ~(VIKING_SBENABLE);
-#endif
srmmu_set_mmureg(mreg);
-
#ifdef __SMP__
/* Avoid unnecessary cross calls. */
flush_cache_all = local_flush_cache_all;
flush_page_to_ram = local_flush_page_to_ram;
+ flush_sig_insns = local_flush_sig_insns;
flush_page_for_dma = local_flush_page_for_dma;
- if (viking_mxcc_present) {
- flush_cache_page_to_uncache = local_flush_cache_page_to_uncache;
- }
#endif
}
-static void init_viking(void)
+__initfunc(static void init_viking(void))
{
unsigned long mreg = srmmu_get_mmureg();
/* Ahhh, the viking. SRMMU VLSI abortion number two... */
-
if(mreg & VIKING_MMODE) {
unsigned long bpreg;
srmmu_name = "TI Viking";
viking_mxcc_present = 0;
- set_pte = srmmu_set_pte_nocache_nomxccvik;
bpreg = viking_get_bpreg();
bpreg &= ~(VIKING_ACTION_MIX);
@@ -3185,7 +2774,10 @@ static void init_viking(void)
msi_set_sync();
- flush_cache_page_to_uncache = viking_no_mxcc_flush_page;
+ set_pte = srmmu_set_pte_nocache_viking;
+ sparc_update_rootmmu_dir = viking_update_rootmmu_dir;
+
+ flush_chunk = viking_flush_chunk; /* local flush _only_ */
/* We need this to make sure old viking takes no hits
* on it's cache for dma snoops to workaround the
@@ -3193,11 +2785,12 @@ static void init_viking(void)
* This is only necessary because of the new way in
* which we use the IOMMU.
*/
- flush_page_for_dma = viking_no_mxcc_flush_page;
+ flush_page_for_dma = viking_flush_page;
} else {
srmmu_name = "TI Viking/MXCC";
viking_mxcc_present = 1;
- flush_cache_page_to_uncache = viking_mxcc_flush_page;
+
+ flush_chunk = viking_mxcc_flush_chunk; /* local flush _only_ */
/* MXCC vikings lack the DMA snooping bug. */
flush_page_for_dma = viking_flush_page_for_dma;
@@ -3214,13 +2807,13 @@ static void init_viking(void)
flush_tlb_range = viking_flush_tlb_range;
flush_page_to_ram = viking_flush_page_to_ram;
- flush_tlb_page_for_cbit = viking_flush_tlb_page_for_cbit;
+ flush_sig_insns = viking_flush_sig_insns;
poke_srmmu = poke_viking;
}
/* Probe for the srmmu chip version. */
-static void get_srmmu_type(void)
+__initfunc(static void get_srmmu_type(void))
{
unsigned long mreg, psr;
unsigned long mod_typ, mod_rev, psr_typ, psr_vers;
@@ -3242,9 +2835,12 @@ static void get_srmmu_type(void)
init_hypersparc();
break;
case 0:
+ case 2:
/* Uniprocessor Cypress */
init_cypress_604();
break;
+ case 10:
+ case 11:
case 12:
/* _REALLY OLD_ Cypress MP chips... */
case 13:
@@ -3254,7 +2850,8 @@ static void get_srmmu_type(void)
init_cypress_605(mod_rev);
break;
default:
- srmmu_is_bad();
+ /* Some other Cypress revision, assume a 605. */
+ init_cypress_605(mod_rev);
break;
};
return;
@@ -3290,10 +2887,6 @@ extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme,
extern unsigned long spwin_srmmu_stackchk, srmmu_fwin_stackchk,
tsetup_srmmu_stackchk, srmmu_rett_stackchk;
-#ifdef __SMP__
-extern unsigned long rirq_mmu_patchme, srmmu_reti_stackchk;
-#endif
-
extern unsigned long srmmu_fault;
#define PATCH_BRANCH(insn, dest) do { \
@@ -3302,7 +2895,7 @@ extern unsigned long srmmu_fault;
*iaddr = SPARC_BRANCH((unsigned long) daddr, (unsigned long) iaddr); \
} while(0);
-static void patch_window_trap_handlers(void)
+__initfunc(static void patch_window_trap_handlers(void))
{
unsigned long *iaddr, *daddr;
@@ -3310,9 +2903,6 @@ static void patch_window_trap_handlers(void)
PATCH_BRANCH(fwin_mmu_patchme, srmmu_fwin_stackchk);
PATCH_BRANCH(tsetup_mmu_patchme, tsetup_srmmu_stackchk);
PATCH_BRANCH(rtrap_mmu_patchme, srmmu_rett_stackchk);
-#ifdef __SMP__
- PATCH_BRANCH(rirq_mmu_patchme, srmmu_reti_stackchk);
-#endif
PATCH_BRANCH(sparc_ttable[SP_TRAP_TFLT].inst_three, srmmu_fault);
PATCH_BRANCH(sparc_ttable[SP_TRAP_DFLT].inst_three, srmmu_fault);
PATCH_BRANCH(sparc_ttable[SP_TRAP_DACC].inst_three, srmmu_fault);
@@ -3325,19 +2915,10 @@ static void smp_flush_page_for_dma(unsigned long page)
xc1((smpfunc_t) local_flush_page_for_dma, page);
}
-static void smp_flush_cache_page_to_uncache(unsigned long page)
-{
- xc1((smpfunc_t) local_flush_cache_page_to_uncache, page);
-}
-
-static void smp_flush_tlb_page_for_cbit(unsigned long page)
-{
- xc1((smpfunc_t) local_flush_tlb_page_for_cbit, page);
-}
#endif
/* Load up routines and constants for sun4m mmu */
-void ld_mmu_srmmu(void)
+__initfunc(void ld_mmu_srmmu(void))
{
/* First the constants */
pmd_shift = SRMMU_PMD_SHIFT;
@@ -3360,6 +2941,7 @@ void ld_mmu_srmmu(void)
/* Functions */
set_pte = srmmu_set_pte_cacheable;
+ init_new_context = srmmu_init_new_context;
switch_to_context = srmmu_switch_to_context;
pmd_align = srmmu_pmd_align;
pgdir_align = srmmu_pgdir_align;
@@ -3414,8 +2996,7 @@ void ld_mmu_srmmu(void)
pte_mkdirty = srmmu_pte_mkdirty;
pte_mkyoung = srmmu_pte_mkyoung;
update_mmu_cache = srmmu_update_mmu_cache;
- mmu_exit_hook = srmmu_exit_hook;
- mmu_flush_hook = srmmu_flush_hook;
+ destroy_context = srmmu_destroy_context;
mmu_lockarea = srmmu_lockarea;
mmu_unlockarea = srmmu_unlockarea;
@@ -3424,7 +3005,9 @@ void ld_mmu_srmmu(void)
mmu_release_scsi_one = srmmu_release_scsi_one;
mmu_release_scsi_sgl = srmmu_release_scsi_sgl;
+#ifdef CONFIG_SBUS
mmu_map_dma_area = srmmu_map_dma_area;
+#endif
mmu_info = srmmu_mmu_info;
mmu_v2p = srmmu_v2p;
@@ -3457,9 +3040,8 @@ void ld_mmu_srmmu(void)
local_flush_tlb_range = flush_tlb_range;
local_flush_tlb_page = flush_tlb_page;
local_flush_page_to_ram = flush_page_to_ram;
+ local_flush_sig_insns = flush_sig_insns;
local_flush_page_for_dma = flush_page_for_dma;
- local_flush_cache_page_to_uncache = flush_cache_page_to_uncache;
- local_flush_tlb_page_for_cbit = flush_tlb_page_for_cbit;
flush_cache_all = smp_flush_cache_all;
flush_cache_mm = smp_flush_cache_mm;
@@ -3470,8 +3052,7 @@ void ld_mmu_srmmu(void)
flush_tlb_range = smp_flush_tlb_range;
flush_tlb_page = smp_flush_tlb_page;
flush_page_to_ram = smp_flush_page_to_ram;
+ flush_sig_insns = smp_flush_sig_insns;
flush_page_for_dma = smp_flush_page_for_dma;
- flush_cache_page_to_uncache = smp_flush_cache_page_to_uncache;
- flush_tlb_page_for_cbit = smp_flush_tlb_page_for_cbit;
#endif
}