summaryrefslogtreecommitdiffstats
path: root/include/asm-sparc64/mmu_context.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-sparc64/mmu_context.h')
-rw-r--r--include/asm-sparc64/mmu_context.h66
1 files changed, 38 insertions, 28 deletions
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index 44209a120..1d8a9f7ee 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -1,4 +1,4 @@
-/* $Id: mmu_context.h,v 1.20 1997/09/18 10:42:08 rth Exp $ */
+/* $Id: mmu_context.h,v 1.26 1998/07/31 10:42:38 jj Exp $ */
#ifndef __SPARC64_MMU_CONTEXT_H
#define __SPARC64_MMU_CONTEXT_H
@@ -6,70 +6,80 @@
#include <asm/system.h>
#include <asm/spitfire.h>
+#include <asm/spinlock.h>
#define NO_CONTEXT 0
#ifndef __ASSEMBLY__
extern unsigned long tlb_context_cache;
+extern spinlock_t scheduler_lock;
+extern unsigned long mmu_context_bmap[];
-#define CTX_VERSION_SHIFT PAGE_SHIFT
+#define CTX_VERSION_SHIFT (PAGE_SHIFT - 3)
#define CTX_VERSION_MASK ((~0UL) << CTX_VERSION_SHIFT)
#define CTX_FIRST_VERSION ((1UL << CTX_VERSION_SHIFT) + 1UL)
-extern void get_new_mmu_context(struct mm_struct *mm, unsigned long *ctx);
+extern void get_new_mmu_context(struct mm_struct *mm);
/* Initialize/destroy the context related info for a new mm_struct
* instance.
*/
#define init_new_context(mm) ((mm)->context = NO_CONTEXT)
-#define destroy_context(mm) ((mm)->context = NO_CONTEXT)
-
-#ifdef __SMP__
-#define LOCAL_FLUSH_PENDING(cpu) \
- ((cpu_data[(cpu)].last_tlbversion_seen ^ tlb_context_cache) & CTX_VERSION_MASK)
-#define DO_LOCAL_FLUSH(cpu) do { __flush_tlb_all(); \
- cpu_data[cpu].last_tlbversion_seen = \
- tlb_context_cache & CTX_VERSION_MASK; \
- } while(0)
-#else
-#define LOCAL_FLUSH_PENDING(cpu) 0
-#define DO_LOCAL_FLUSH(cpu) do { __flush_tlb_all(); } while(0)
-#endif
-
-extern void __flush_tlb_all(void);
+#define destroy_context(mm) do { \
+ if ((mm)->context != NO_CONTEXT) { \
+ spin_lock(&scheduler_lock); \
+ if (!(((mm)->context ^ tlb_context_cache) & CTX_VERSION_MASK)) \
+ clear_bit((mm)->context & ~(CTX_VERSION_MASK), \
+ mmu_context_bmap); \
+ spin_unlock(&scheduler_lock); \
+ (mm)->context = NO_CONTEXT; \
+ } \
+} while (0)
extern __inline__ void get_mmu_context(struct task_struct *tsk)
{
register unsigned long paddr asm("o5");
+ register unsigned long pgd_cache asm("o4");
struct mm_struct *mm = tsk->mm;
flushw_user();
- if(LOCAL_FLUSH_PENDING(current->processor))
- DO_LOCAL_FLUSH(current->processor);
if(!(tsk->tss.flags & SPARC_FLAG_KTHREAD) &&
!(tsk->flags & PF_EXITING)) {
unsigned long ctx = tlb_context_cache;
if((mm->context ^ ctx) & CTX_VERSION_MASK)
- get_new_mmu_context(mm, &tlb_context_cache);
-
+ get_new_mmu_context(mm);
+ if(!(mm->cpu_vm_mask & (1UL<<smp_processor_id()))) {
+ spitfire_set_secondary_context(mm->context & 0x3ff);
+ __asm__ __volatile__("flush %g6");
+ spitfire_flush_dtlb_secondary_context();
+ spitfire_flush_itlb_secondary_context();
+ __asm__ __volatile__("flush %g6");
+ }
/* Don't worry, set_fs() will restore it... */
/* Sigh, damned include loops... just poke seg directly. */
tsk->tss.ctx = (tsk->tss.current_ds.seg ?
- (mm->context & 0x1fff) : 0);
+ (mm->context & 0x3ff) : 0);
} else
tsk->tss.ctx = 0;
spitfire_set_secondary_context(tsk->tss.ctx);
__asm__ __volatile__("flush %g6");
paddr = __pa(mm->pgd);
+ if(tsk->tss.flags & SPARC_FLAG_32BIT)
+ pgd_cache = (unsigned long) mm->pgd[0];
+ else
+ pgd_cache = 0;
__asm__ __volatile__("
- rdpr %%pstate, %%o4
- wrpr %%o4, %1, %%pstate
+ rdpr %%pstate, %%o3
+ wrpr %%o3, %2, %%pstate
+ mov %4, %%g4
mov %0, %%g7
- wrpr %%o4, 0x0, %%pstate
+ stxa %1, [%%g4] %3
+ wrpr %%o3, 0x0, %%pstate
" : /* no outputs */
- : "r" (paddr), "i" (PSTATE_MG|PSTATE_IE)
- : "o4");
+ : "r" (paddr), "r" (pgd_cache), "i" (PSTATE_MG|PSTATE_IE),
+ "i" (ASI_DMMU), "i" (TSB_REG)
+ : "o3");
}
/*