summaryrefslogtreecommitdiffstats
path: root/arch/sparc64/mm
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1999-01-04 16:03:48 +0000
committerRalf Baechle <ralf@linux-mips.org>1999-01-04 16:03:48 +0000
commit78c388aed2b7184182c08428db1de6c872d815f5 (patch)
tree4b2003b1b4ceb241a17faa995da8dd1004bb8e45 /arch/sparc64/mm
parenteb7a5bf93aaa4be1d7c6181100ab7639e74d67f7 (diff)
Merge with Linux 2.1.131 and more MIPS goodies.
(Did I mention that CVS is buggy ...)
Diffstat (limited to 'arch/sparc64/mm')
-rw-r--r--arch/sparc64/mm/asyncd.c2
-rw-r--r--arch/sparc64/mm/fault.c77
-rw-r--r--arch/sparc64/mm/generic.c11
-rw-r--r--arch/sparc64/mm/init.c191
-rw-r--r--arch/sparc64/mm/ultra.S77
5 files changed, 224 insertions, 134 deletions
diff --git a/arch/sparc64/mm/asyncd.c b/arch/sparc64/mm/asyncd.c
index 6dfaca524..a0d1c8144 100644
--- a/arch/sparc64/mm/asyncd.c
+++ b/arch/sparc64/mm/asyncd.c
@@ -1,4 +1,4 @@
-/* $Id: asyncd.c,v 1.4 1998/05/24 02:53:58 davem Exp $
+/* $Id: asyncd.c,v 1.5 1998/09/13 04:30:33 davem Exp $
* The asyncd kernel daemon. This handles paging on behalf of
* processes that receive page faults due to remote (async) memory
* accesses.
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index 21389e397..737872fb2 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -1,4 +1,4 @@
-/* $Id: fault.c,v 1.21 1998/03/25 10:43:20 jj Exp $
+/* $Id: fault.c,v 1.26 1998/11/08 11:14:03 davem Exp $
* arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
@@ -100,49 +100,38 @@ void unhandled_fault(unsigned long address, struct task_struct *tsk,
(unsigned long) tsk->mm->context);
printk(KERN_ALERT "tsk->mm->pgd = %016lx\n",
(unsigned long) tsk->mm->pgd);
+ lock_kernel();
die_if_kernel("Oops", regs);
-}
-
-asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
- unsigned long address)
-{
- unsigned long g2;
- int i;
- unsigned insn;
- struct pt_regs regs;
-
- i = search_exception_table (ret_pc, &g2);
- switch (i) {
- /* load & store will be handled by fixup */
- case 3: return 3;
- /* store will be handled by fixup, load will bump out */
- /* for _to_ macros */
- case 1: insn = *(unsigned *)pc; if ((insn >> 21) & 1) return 1; break;
- /* load will be handled by fixup, store will bump out */
- /* for _from_ macros */
- case 2: insn = *(unsigned *)pc;
- if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15) return 2;
- break;
- default: break;
- }
- memset (&regs, 0, sizeof (regs));
- regs.tpc = pc;
- regs.tnpc = pc + 4;
- /* FIXME: Should set up regs->tstate? */
- unhandled_fault (address, current, &regs);
- /* Not reached */
- return 0;
+ unlock_kernel();
}
/* #define DEBUG_EXCEPTIONS */
+/* #define DEBUG_LOCKUPS */
asmlinkage void do_sparc64_fault(struct pt_regs *regs, unsigned long address, int write)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
+#ifdef DEBUG_LOCKUPS
+ static unsigned long lastaddr, lastpc;
+ static int lastwrite, lockcnt;
+#endif
- lock_kernel();
down(&mm->mmap_sem);
+#ifdef DEBUG_LOCKUPS
+ if (regs->tpc == lastpc && address == lastaddr && write == lastwrite) {
+ lockcnt++;
+ if (lockcnt == 100000) {
+ printk("do_sparc64_fault: possible fault loop for %016lx %s\n", address, write ? "write" : "read");
+ show_regs(regs);
+ }
+ } else {
+ lastpc = regs->tpc;
+ lastaddr = address;
+ lastwrite = write;
+ lockcnt = 0;
+ }
+#endif
vma = find_vma(mm, address);
if(!vma)
goto bad_area;
@@ -165,9 +154,11 @@ good_area:
if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
}
- handle_mm_fault(current, vma, address, write);
+ current->mm->segments = (void *) (address & PAGE_SIZE);
+ if (!handle_mm_fault(current, vma, address, write))
+ goto do_sigbus;
up(&mm->mmap_sem);
- goto out;
+ return;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
@@ -175,6 +166,7 @@ good_area:
bad_area:
up(&mm->mmap_sem);
+do_kernel_fault:
{
unsigned long g2 = regs->u_regs[UREG_G2];
@@ -204,16 +196,23 @@ bad_area:
regs->tpc = fixup;
regs->tnpc = regs->tpc + 4;
regs->u_regs[UREG_G2] = g2;
- goto out;
+ return;
}
} else {
current->tss.sig_address = address;
current->tss.sig_desc = SUBSIG_NOMAPPING;
force_sig(SIGSEGV, current);
- goto out;
+ return;
}
unhandled_fault (address, current, regs);
}
-out:
- unlock_kernel();
+ return;
+
+do_sigbus:
+ up(&mm->mmap_sem);
+ current->tss.sig_address = address;
+ current->tss.sig_desc = SUBSIG_MISCERROR;
+ force_sig(SIGBUS, current);
+ if (regs->tstate & TSTATE_PRIV)
+ goto do_kernel_fault;
}
diff --git a/arch/sparc64/mm/generic.c b/arch/sparc64/mm/generic.c
index 730e8cb32..0b869a2f2 100644
--- a/arch/sparc64/mm/generic.c
+++ b/arch/sparc64/mm/generic.c
@@ -1,4 +1,4 @@
-/* $Id: generic.c,v 1.2 1997/07/01 09:11:42 jj Exp $
+/* $Id: generic.c,v 1.3 1998/10/27 23:28:07 davem Exp $
* generic.c: Generic Sparc mm routines that are not dependent upon
* MMU type but are Sparc specific.
*
@@ -41,10 +41,11 @@ static inline void forget_pte(pte_t page)
unsigned long addr = pte_page(page);
if (MAP_NR(addr) >= max_mapnr || PageReserved(mem_map+MAP_NR(addr)))
return;
- free_page(addr);
- if (current->mm->rss <= 0)
- return;
- current->mm->rss--;
+ /*
+ * free_page() used to be able to clear swap cache
+ * entries. We may now have to do it manually.
+ */
+ free_page_and_swap_cache(addr);
return;
}
swap_free(pte_val(page));
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 035b023fc..236c866d6 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -1,4 +1,4 @@
-/* $Id: init.c,v 1.93 1998/08/04 20:49:25 davem Exp $
+/* $Id: init.c,v 1.103 1998/10/20 03:09:12 jj Exp $
* arch/sparc64/mm/init.c
*
* Copyright (C) 1996,1997 David S. Miller (davem@caip.rutgers.edu)
@@ -37,14 +37,14 @@ struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
/* Ugly, but necessary... -DaveM */
unsigned long phys_base;
-unsigned long tlb_context_cache = CTX_FIRST_VERSION;
+/* get_new_mmu_context() uses "cache + 1". */
+unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
/* References to section boundaries */
extern char __init_begin, __init_end, etext, __bss_start;
int do_check_pgt_cache(int low, int high)
{
- struct page *page, *page2;
int freed = 0;
if(pgtable_cache_size > high) {
@@ -59,6 +59,7 @@ int do_check_pgt_cache(int low, int high)
}
#ifndef __SMP__
if (pgd_cache_size > high / 4) {
+ struct page *page, *page2;
for (page2 = NULL, page = (struct page *)pgd_quicklist; page;) {
if ((unsigned long)page->pprev_hash == 3) {
if (page2)
@@ -536,7 +537,7 @@ static inline void inherit_prom_mappings(void)
if (pgd_none(*pgdp)) {
pmdp = sparc_init_alloc(&mempool,
PMD_TABLE_SIZE);
- clear_page(pmdp);
+ memset(pmdp, 0, PAGE_SIZE);
pgd_set(pgdp, pmdp);
}
pmdp = pmd_offset(pgdp, vaddr);
@@ -565,9 +566,9 @@ static void __flush_nucleus_vptes(void)
unsigned long prom_reserved_base = 0xfffffffc00000000UL;
int i;
- __asm__ __volatile__("rdpr %%pstate, %0\n\t"
- "wrpr %0, %1, %%pstate\n\t"
- "flushw"
+ __asm__ __volatile__("flushw\n\t"
+ "rdpr %%pstate, %0\n\t"
+ "wrpr %0, %1, %%pstate"
: "=r" (pstate)
: "i" (PSTATE_IE));
@@ -590,12 +591,17 @@ static void __flush_nucleus_vptes(void)
}
static int prom_ditlb_set = 0;
-int prom_itlb_ent, prom_dtlb_ent;
-unsigned long prom_itlb_tag, prom_itlb_data;
-unsigned long prom_dtlb_tag, prom_dtlb_data;
+struct prom_tlb_entry {
+ int tlb_ent;
+ unsigned long tlb_tag;
+ unsigned long tlb_data;
+};
+struct prom_tlb_entry prom_itlb[8], prom_dtlb[8];
void prom_world(int enter)
{
+ int i;
+
if (!prom_ditlb_set)
return;
if (enter) {
@@ -603,29 +609,44 @@ void prom_world(int enter)
__flush_nucleus_vptes();
/* Install PROM world. */
- __asm__ __volatile__("stxa %0, [%1] %2"
- : : "r" (prom_dtlb_tag), "r" (TLB_TAG_ACCESS),
+ for (i = 0; i < 8; i++) {
+ if (prom_dtlb[i].tlb_ent != -1) {
+ __asm__ __volatile__("stxa %0, [%1] %2"
+ : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),
"i" (ASI_DMMU));
- membar("#Sync");
- spitfire_put_dtlb_data(62, prom_dtlb_data);
- membar("#Sync");
- __asm__ __volatile__("stxa %0, [%1] %2"
- : : "r" (prom_itlb_tag), "r" (TLB_TAG_ACCESS),
+ membar("#Sync");
+ spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent,
+ prom_dtlb[i].tlb_data);
+ membar("#Sync");
+ }
+
+ if (prom_itlb[i].tlb_ent != -1) {
+ __asm__ __volatile__("stxa %0, [%1] %2"
+ : : "r" (prom_itlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),
"i" (ASI_IMMU));
- membar("#Sync");
- spitfire_put_itlb_data(62, prom_itlb_data);
- membar("#Sync");
+ membar("#Sync");
+ spitfire_put_itlb_data(prom_itlb[i].tlb_ent,
+ prom_itlb[i].tlb_data);
+ membar("#Sync");
+ }
+ }
} else {
- __asm__ __volatile__("stxa %%g0, [%0] %1"
+ for (i = 0; i < 8; i++) {
+ if (prom_dtlb[i].tlb_ent != -1) {
+ __asm__ __volatile__("stxa %%g0, [%0] %1"
: : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
- membar("#Sync");
- spitfire_put_dtlb_data(62, 0x0UL);
- membar("#Sync");
- __asm__ __volatile__("stxa %%g0, [%0] %1"
+ membar("#Sync");
+ spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, 0x0UL);
+ membar("#Sync");
+ }
+ if (prom_itlb[i].tlb_ent != -1) {
+ __asm__ __volatile__("stxa %%g0, [%0] %1"
: : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
- membar("#Sync");
- spitfire_put_itlb_data(62, 0x0UL);
- membar("#Sync");
+ membar("#Sync");
+ spitfire_put_itlb_data(prom_itlb[i].tlb_ent, 0x0UL);
+ membar("#Sync");
+ }
+ }
}
}
@@ -639,8 +660,8 @@ void inherit_locked_prom_mappings(int save_p)
* it (conveniently) fails to mention any of these in the
* translations property. The only ones that matter are
* the locked PROM tlb entries, so we impose the following
- * irrecovable rule on the PROM, it is allowed 1 locked
- * entry in the ITLB and 1 in the DTLB.
+ * irrecovable rule on the PROM, it is allowed 8 locked
+ * entries in the ITLB and 8 in the DTLB.
*
* Supposedly the upper 16GB of the address space is
* reserved for OBP, BUT I WISH THIS WAS DOCUMENTED
@@ -649,17 +670,23 @@ void inherit_locked_prom_mappings(int save_p)
* systems to coordinate mmu mappings is also COMPLETELY
* UNDOCUMENTED!!!!!! Thanks S(t)un!
*/
+ if (save_p) {
+ for(i = 0; i < 8; i++) {
+ prom_dtlb[i].tlb_ent = -1;
+ prom_itlb[i].tlb_ent = -1;
+ }
+ }
for(i = 0; i < 63; i++) {
unsigned long data;
data = spitfire_get_dtlb_data(i);
- if(!dtlb_seen && (data & _PAGE_L)) {
+ if(data & _PAGE_L) {
unsigned long tag = spitfire_get_dtlb_tag(i);
if(save_p) {
- prom_dtlb_ent = i;
- prom_dtlb_tag = tag;
- prom_dtlb_data = data;
+ prom_dtlb[dtlb_seen].tlb_ent = i;
+ prom_dtlb[dtlb_seen].tlb_tag = tag;
+ prom_dtlb[dtlb_seen].tlb_data = data;
}
__asm__ __volatile__("stxa %%g0, [%0] %1"
: : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
@@ -667,18 +694,22 @@ void inherit_locked_prom_mappings(int save_p)
spitfire_put_dtlb_data(i, 0x0UL);
membar("#Sync");
- dtlb_seen = 1;
- if(itlb_seen)
+ dtlb_seen++;
+ if(dtlb_seen > 7)
break;
}
+ }
+ for(i = 0; i < 63; i++) {
+ unsigned long data;
+
data = spitfire_get_itlb_data(i);
- if(!itlb_seen && (data & _PAGE_L)) {
+ if(data & _PAGE_L) {
unsigned long tag = spitfire_get_itlb_tag(i);
if(save_p) {
- prom_itlb_ent = i;
- prom_itlb_tag = tag;
- prom_itlb_data = data;
+ prom_itlb[itlb_seen].tlb_ent = i;
+ prom_itlb[itlb_seen].tlb_tag = tag;
+ prom_itlb[itlb_seen].tlb_data = data;
}
__asm__ __volatile__("stxa %%g0, [%0] %1"
: : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
@@ -686,15 +717,8 @@ void inherit_locked_prom_mappings(int save_p)
spitfire_put_itlb_data(i, 0x0UL);
membar("#Sync");
- /* Re-install it. */
- __asm__ __volatile__("stxa %0, [%1] %2"
- : : "r" (tag), "r" (TLB_TAG_ACCESS),
- "i" (ASI_IMMU));
- membar("#Sync");
- spitfire_put_itlb_data(62, data);
- membar("#Sync");
- itlb_seen = 1;
- if(dtlb_seen)
+ itlb_seen++;
+ if(itlb_seen > 7)
break;
}
}
@@ -705,19 +729,41 @@ void inherit_locked_prom_mappings(int save_p)
/* Give PROM back his world, done during reboots... */
void prom_reload_locked(void)
{
- __asm__ __volatile__("stxa %0, [%1] %2"
- : : "r" (prom_dtlb_tag), "r" (TLB_TAG_ACCESS),
- "i" (ASI_DMMU));
- membar("#Sync");
- spitfire_put_dtlb_data(prom_dtlb_ent, prom_dtlb_data);
- membar("#Sync");
+ int i;
- __asm__ __volatile__("stxa %0, [%1] %2"
- : : "r" (prom_itlb_tag), "r" (TLB_TAG_ACCESS),
- "i" (ASI_IMMU));
- membar("#Sync");
- spitfire_put_itlb_data(prom_itlb_ent, prom_itlb_data);
- membar("#Sync");
+ for (i = 0; i < 8; i++) {
+ if (prom_dtlb[i].tlb_ent != -1) {
+ __asm__ __volatile__("stxa %0, [%1] %2"
+ : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),
+ "i" (ASI_DMMU));
+ membar("#Sync");
+ spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent,
+ prom_dtlb[i].tlb_data);
+ membar("#Sync");
+ }
+
+ if (prom_itlb[i].tlb_ent != -1) {
+ __asm__ __volatile__("stxa %0, [%1] %2"
+ : : "r" (prom_itlb[i].tlb_tag), "r" (TLB_TAG_ACCESS),
+ "i" (ASI_IMMU));
+ membar("#Sync");
+ spitfire_put_itlb_data(prom_itlb[i].tlb_ent,
+ prom_itlb[i].tlb_data);
+ membar("#Sync");
+ }
+ }
+}
+
+void __flush_dcache_range(unsigned long start, unsigned long end)
+{
+ unsigned long va;
+ int n = 0;
+
+ for (va = start; va < end; va += 32) {
+ spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
+ if (++n >= 512)
+ break;
+ }
}
void __flush_cache_all(void)
@@ -735,9 +781,9 @@ void __flush_tlb_all(void)
unsigned long pstate;
int i;
- __asm__ __volatile__("rdpr %%pstate, %0\n\t"
- "wrpr %0, %1, %%pstate\n\t"
- "flushw"
+ __asm__ __volatile__("flushw\n\t"
+ "rdpr %%pstate, %0\n\t"
+ "wrpr %0, %1, %%pstate"
: "=r" (pstate)
: "i" (PSTATE_IE));
for(i = 0; i < 64; i++) {
@@ -831,7 +877,7 @@ pmd_t *get_pmd_slow(pgd_t *pgd, unsigned long offset)
pmd = (pmd_t *) __get_free_page(GFP_DMA|GFP_KERNEL);
if(pmd) {
- clear_page(pmd);
+ memset(pmd, 0, PAGE_SIZE);
pgd_set(pgd, pmd);
return pmd + offset;
}
@@ -844,7 +890,7 @@ pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset)
pte = (pte_t *) __get_free_page(GFP_DMA|GFP_KERNEL);
if(pte) {
- clear_page(pte);
+ memset(pte, 0, PAGE_SIZE);
pmd_set(pmd, pte);
return pte + offset;
}
@@ -862,13 +908,13 @@ allocate_ptable_skeleton(unsigned long start, unsigned long end))
pgdp = pgd_offset(init_task.mm, start);
if (pgd_none(*pgdp)) {
pmdp = sparc_init_alloc(&mempool, PAGE_SIZE);
- clear_page(pmdp);
+ memset(pmdp, 0, PAGE_SIZE);
pgd_set(pgdp, pmdp);
}
pmdp = pmd_offset(pgdp, start);
if (pmd_none(*pmdp)) {
ptep = sparc_init_alloc(&mempool, PAGE_SIZE);
- clear_page(ptep);
+ memset(ptep, 0, PAGE_SIZE);
pmd_set(pmdp, ptep);
}
start = (start + PMD_SIZE) & PMD_MASK;
@@ -913,7 +959,6 @@ void sparc_ultra_unmapioaddr(unsigned long virt_addr)
pte_clear(ptep);
}
-#ifdef NOTUSED
void sparc_ultra_dump_itlb(void)
{
int slot;
@@ -933,17 +978,17 @@ void sparc_ultra_dump_dtlb(void)
{
int slot;
- prom_printf ("Contents of dtlb: ");
+ printk ("Contents of dtlb: ");
for (slot = 0; slot < 14; slot++) printk (" ");
- prom_printf ("%2x:%016lx,%016lx\n", 0, spitfire_get_dtlb_tag(0), spitfire_get_dtlb_data(0));
+ printk ("%2x:%016lx,%016lx\n", 0, spitfire_get_dtlb_tag(0),
+ spitfire_get_dtlb_data(0));
for (slot = 1; slot < 64; slot+=3) {
- prom_printf ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
+ printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
slot, spitfire_get_dtlb_tag(slot), spitfire_get_dtlb_data(slot),
slot+1, spitfire_get_dtlb_tag(slot+1), spitfire_get_dtlb_data(slot+1),
slot+2, spitfire_get_dtlb_tag(slot+2), spitfire_get_dtlb_data(slot+2));
}
}
-#endif
/* paging_init() sets up the page tables */
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
index 683f4bcb1..4362a15b4 100644
--- a/arch/sparc64/mm/ultra.S
+++ b/arch/sparc64/mm/ultra.S
@@ -1,4 +1,4 @@
-/* $Id: ultra.S,v 1.24 1998/05/22 11:02:56 davem Exp $
+/* $Id: ultra.S,v 1.31 1998/11/07 06:39:21 davem Exp $
* ultra.S: Don't expand these all over the place...
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
@@ -56,38 +56,43 @@ __flush_tlb_range_page_by_page:
__flush_tlb_range_constant_time: /* %o0=ctx, %o1=start, %o3=end */
/*IC5*/ rdpr %pstate, %g1
wrpr %g1, PSTATE_IE, %pstate
+ mov TLB_TAG_ACCESS, %g3
mov (62 << 3), %g2
1: ldxa [%g2] ASI_ITLB_TAG_READ, %o4
and %o4, 0x3ff, %o5
cmp %o5, %o0
bne,pt %icc, 2f
- andn %o4, 0x3ff, %o4
-/*IC6*/ cmp %o4, %o1
+/*IC6*/ andn %o4, 0x3ff, %o4
+ cmp %o4, %o1
blu,pt %xcc, 2f
cmp %o4, %o3
blu,pn %xcc, 4f
2: ldxa [%g2] ASI_DTLB_TAG_READ, %o4
and %o4, 0x3ff, %o5
cmp %o5, %o0
- andn %o4, 0x3ff, %o4
-/*IC7*/ bne,pt %icc, 3f
+/*IC7*/ andn %o4, 0x3ff, %o4
+ bne,pt %icc, 3f
cmp %o4, %o1
blu,pt %xcc, 3f
cmp %o4, %o3
blu,pn %xcc, 5f
nop
3: brnz,pt %g2, 1b
- sub %g2, (1 << 3), %g2
-/*IC8*/ retl
+/*IC8*/ sub %g2, (1 << 3), %g2
+ retl
wrpr %g1, 0x0, %pstate
-4: stxa %g0, [%g2] ASI_ITLB_DATA_ACCESS
+4: stxa %g0, [%g3] ASI_IMMU
+ stxa %g0, [%g2] ASI_ITLB_DATA_ACCESS
ba,pt %xcc, 2b
flush %g6
-5: stxa %g0, [%g2] ASI_DTLB_DATA_ACCESS
+5: stxa %g0, [%g3] ASI_DMMU
+/*IC9*/ stxa %g0, [%g2] ASI_DTLB_DATA_ACCESS
ba,pt %xcc, 3b
flush %g6
+
+ .align 32
__flush_tlb_mm_slow:
-/*IC9*/ rdpr %pstate, %g1
+/*IC10*/rdpr %pstate, %g1
wrpr %g1, PSTATE_IE, %pstate
stxa %o0, [%o1] ASI_DMMU
stxa %g0, [%g3] ASI_DMMU_DEMAP
@@ -95,21 +100,25 @@ __flush_tlb_mm_slow:
flush %g6
stxa %g2, [%o1] ASI_DMMU
flush %g6
-/*IC10*/retl
+/*IC11*/retl
wrpr %g1, 0, %pstate
+
+ .align 32
__flush_tlb_page_slow:
- rdpr %pstate, %g1
+/*IC12*/rdpr %pstate, %g1
wrpr %g1, PSTATE_IE, %pstate
stxa %o0, [%o2] ASI_DMMU
stxa %g0, [%g3] ASI_DMMU_DEMAP
stxa %g0, [%g3] ASI_IMMU_DEMAP
flush %g6
-/*IC11*/stxa %g2, [%o2] ASI_DMMU
+ stxa %g2, [%o2] ASI_DMMU
flush %g6
- retl
+/*IC13*/retl
wrpr %g1, 0, %pstate
+
+ .align 32
__flush_tlb_range_pbp_slow:
- rdpr %pstate, %g1
+/*IC13*/rdpr %pstate, %g1
wrpr %g1, PSTATE_IE, %pstate
stxa %o0, [%o2] ASI_DMMU
2: stxa %g0, [%g5 + %o5] ASI_DMMU_DEMAP
@@ -117,11 +126,47 @@ __flush_tlb_range_pbp_slow:
brnz,pt %o5, 2b
sub %o5, %o4, %o5
flush %g6
-/*IC13*/stxa %g2, [%o2] ASI_DMMU
+/*IC14*/stxa %g2, [%o2] ASI_DMMU
flush %g6
retl
wrpr %g1, 0x0, %pstate
+ .align 32
+ .globl flush_icache_page
+flush_icache_page: /* %o0 = phys_page */
+ sethi %hi(1 << 13), %o2 ! IC_set bit
+ mov 1, %g1
+ srlx %o0, 5, %o0 ! phys-addr comparitor
+ clr %o1 ! IC_addr
+ sllx %g1, 36, %g1
+ sub %g1, 1, %g2
+ andn %g2, 0xff, %g2 ! IC_tag mask
+ nop
+
+1: ldda [%o1] ASI_IC_TAG, %o4
+ and %o5, %g2, %o5
+ cmp %o5, %o0
+ be,pn %xcc, iflush1
+ nop
+2: ldda [%o1 + %o2] ASI_IC_TAG, %o4
+ and %o5, %g2, %o5
+ cmp %o5, %o0
+
+ be,pn %xcc, iflush2
+ nop
+3: add %o1, 0x20, %o1
+ cmp %o1, %o2
+ bne,pt %xcc, 1b
+ nop
+ retl
+ nop
+iflush1:stxa %g0, [%o1] ASI_IC_TAG
+ ba,pt %xcc, 2b
+ flush %g6
+iflush2:stxa %g0, [%o1 + %o2] ASI_IC_TAG
+ ba,pt %xcc, 3b
+ flush %g6
+
#ifdef __SMP__
/* These are all called by the slaves of a cross call, at
* trap level 1, with interrupts fully disabled.