summaryrefslogtreecommitdiffstats
path: root/include/asm-sparc64/mmu_context.h
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1999-01-04 16:03:48 +0000
committerRalf Baechle <ralf@linux-mips.org>1999-01-04 16:03:48 +0000
commit78c388aed2b7184182c08428db1de6c872d815f5 (patch)
tree4b2003b1b4ceb241a17faa995da8dd1004bb8e45 /include/asm-sparc64/mmu_context.h
parenteb7a5bf93aaa4be1d7c6181100ab7639e74d67f7 (diff)
Merge with Linux 2.1.131 and more MIPS goodies.
(Did I mention that CVS is buggy ...)
Diffstat (limited to 'include/asm-sparc64/mmu_context.h')
-rw-r--r--include/asm-sparc64/mmu_context.h70
1 files changed, 53 insertions, 17 deletions
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index 1d8a9f7ee..7fa368638 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -1,4 +1,4 @@
-/* $Id: mmu_context.h,v 1.26 1998/07/31 10:42:38 jj Exp $ */
+/* $Id: mmu_context.h,v 1.32 1998/10/13 14:03:52 davem Exp $ */
#ifndef __SPARC64_MMU_CONTEXT_H
#define __SPARC64_MMU_CONTEXT_H
@@ -25,25 +25,47 @@ extern void get_new_mmu_context(struct mm_struct *mm);
/* Initialize/destroy the context related info for a new mm_struct
* instance.
*/
-#define init_new_context(mm) ((mm)->context = NO_CONTEXT)
-#define destroy_context(mm) do { \
- if ((mm)->context != NO_CONTEXT) { \
+#define init_new_context(__mm) ((__mm)->context = NO_CONTEXT)
+
+/* Kernel threads like rpciod and nfsd drop their mm, and then use
+ * init_mm, when this happens we must make sure the tsk->tss.ctx is
+ * updated as well. Otherwise we have disasters relating to
+ * set_fs/get_fs usage later on.
+ *
+ * Also we can only clear the mmu_context_bmap bit when this is
+ * the final reference to the address space.
+ */
+#define destroy_context(__mm) do { \
+ if ((__mm)->context != NO_CONTEXT && \
+ atomic_read(&(__mm)->count) == 1) { \
spin_lock(&scheduler_lock); \
- if (!(((mm)->context ^ tlb_context_cache) & CTX_VERSION_MASK)) \
- clear_bit((mm)->context & ~(CTX_VERSION_MASK), \
+ if (!(((__mm)->context ^ tlb_context_cache) & CTX_VERSION_MASK))\
+ clear_bit((__mm)->context & ~(CTX_VERSION_MASK), \
mmu_context_bmap); \
spin_unlock(&scheduler_lock); \
- (mm)->context = NO_CONTEXT; \
+ (__mm)->context = NO_CONTEXT; \
+ if(current->mm == (__mm)) { \
+ current->tss.ctx = 0; \
+ spitfire_set_secondary_context(0); \
+ __asm__ __volatile__("flush %g6"); \
+ } \
} \
} while (0)
-extern __inline__ void get_mmu_context(struct task_struct *tsk)
+/* This routine must called with interrupts off,
+ * this is necessary to guarentee that the current->tss.ctx
+ * to CPU secontary context register relationship is maintained
+ * when traps can happen.
+ *
+ * Also the caller must flush the current set of user windows
+ * to the stack (if necessary) before we get here.
+ */
+extern __inline__ void __get_mmu_context(struct task_struct *tsk)
{
register unsigned long paddr asm("o5");
register unsigned long pgd_cache asm("o4");
struct mm_struct *mm = tsk->mm;
- flushw_user();
if(!(tsk->tss.flags & SPARC_FLAG_KTHREAD) &&
!(tsk->flags & PF_EXITING)) {
unsigned long ctx = tlb_context_cache;
@@ -65,28 +87,42 @@ extern __inline__ void get_mmu_context(struct task_struct *tsk)
spitfire_set_secondary_context(tsk->tss.ctx);
__asm__ __volatile__("flush %g6");
paddr = __pa(mm->pgd);
- if(tsk->tss.flags & SPARC_FLAG_32BIT)
+ if((tsk->tss.flags & (SPARC_FLAG_32BIT|SPARC_FLAG_KTHREAD)) ==
+ (SPARC_FLAG_32BIT))
pgd_cache = (unsigned long) mm->pgd[0];
else
pgd_cache = 0;
__asm__ __volatile__("
- rdpr %%pstate, %%o3
- wrpr %%o3, %2, %%pstate
+ rdpr %%pstate, %%o2
+ andn %%o2, %2, %%o3
+ wrpr %%o3, %5, %%pstate
mov %4, %%g4
mov %0, %%g7
stxa %1, [%%g4] %3
- wrpr %%o3, 0x0, %%pstate
+ wrpr %%o2, 0x0, %%pstate
" : /* no outputs */
- : "r" (paddr), "r" (pgd_cache), "i" (PSTATE_MG|PSTATE_IE),
- "i" (ASI_DMMU), "i" (TSB_REG)
- : "o3");
+ : "r" (paddr), "r" (pgd_cache), "i" (PSTATE_IE),
+ "i" (ASI_DMMU), "i" (TSB_REG), "i" (PSTATE_MG)
+ : "o2", "o3");
}
+/* Now we define this as a do nothing macro, because the only
+ * generic user right now is the scheduler, and we handle all
+ * the atomicity issues by having switch_to() call the above
+ * function itself.
+ */
+#define get_mmu_context(x) do { } while(0)
+
/*
* After we have set current->mm to a new value, this activates
* the context for the new mm so we see the new mappings.
*/
-#define activate_context(tsk) get_mmu_context(tsk)
+#define activate_context(__tsk) \
+do { flushw_user(); \
+ spin_lock(&scheduler_lock); \
+ __get_mmu_context(__tsk); \
+ spin_unlock(&scheduler_lock); \
+} while(0)
#endif /* !(__ASSEMBLY__) */