summaryrefslogtreecommitdiffstats
path: root/include/asm-mips64/mmu_context.h
diff options
context:
space:
mode:
authorKanoj Sarcar <kanoj@engr.sgi.com>2000-04-22 18:31:40 +0000
committerKanoj Sarcar <kanoj@engr.sgi.com>2000-04-22 18:31:40 +0000
commitdc91613ed65a1549d9ca21bf489c3f55f34f2dd6 (patch)
treef9880ba42e57d38c5c12da697e36bfe2b413889a /include/asm-mips64/mmu_context.h
parente107a626e4dd3fbcac5e5e63cafd3d7c1470dfe4 (diff)
Per cpu tlbpid (asid) management for SMP. The asid cache is now per
cpu, and stays in cpu_data[].
Diffstat (limited to 'include/asm-mips64/mmu_context.h')
-rw-r--r--include/asm-mips64/mmu_context.h56
1 files changed, 45 insertions, 11 deletions
diff --git a/include/asm-mips64/mmu_context.h b/include/asm-mips64/mmu_context.h
index 4592f685f..30ee6a9d4 100644
--- a/include/asm-mips64/mmu_context.h
+++ b/include/asm-mips64/mmu_context.h
@@ -12,10 +12,17 @@
#ifndef _ASM_MMU_CONTEXT_H
#define _ASM_MMU_CONTEXT_H
+#include <linux/config.h>
+#include <linux/slab.h>
#include <asm/pgalloc.h>
+#include <asm/processor.h>
-/* Fuck. The f-word is here so you can grep for it :-) */
-extern unsigned long asid_cache;
+#ifndef CONFIG_SMP
+#define CPU_CONTEXT(cpu, mm) (mm)->context
+#else
+#define CPU_CONTEXT(cpu, mm) (*((unsigned long *)((mm)->context) + cpu))
+#endif
+#define ASID_CACHE(cpu) cpu_data[cpu].asid_cache
#define ASID_INC 0x1
#define ASID_MASK 0xff
@@ -32,16 +39,25 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk,
#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
extern inline void
-get_new_mmu_context(struct mm_struct *mm)
+get_new_cpu_mmu_context(struct mm_struct *mm, unsigned long cpu)
{
- unsigned long asid = asid_cache;
+ unsigned long asid = ASID_CACHE(cpu);
if (! ((asid += ASID_INC) & ASID_MASK) ) {
flush_tlb_all(); /* start new asid cycle */
if (!asid) /* fix version if needed */
asid = ASID_FIRST_VERSION;
}
- mm->context = asid_cache = asid;
+ CPU_CONTEXT(cpu, mm) = ASID_CACHE(cpu) = asid;
+}
+
+extern inline void
+get_new_mmu_context(struct mm_struct *mm)
+{
+#ifdef CONFIG_SMP
+ memset((void *)mm->context, 0, smp_num_cpus * sizeof(unsigned long));
+#endif
+ get_new_cpu_mmu_context(mm, smp_processor_id());
}
/*
@@ -51,17 +67,32 @@ get_new_mmu_context(struct mm_struct *mm)
extern inline void
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
+#ifndef CONFIG_SMP
mm->context = 0;
+#else
+ /* this allocation can be moved to copy_thread */
+ mm->context = (unsigned long)kmalloc(smp_num_cpus *
+ sizeof(unsigned long), GFP_KERNEL);
+ /*
+ * Init the "context" values so that a tlbpid allocation
+ * happens on the first switch.
+ */
+ if (mm->context)
+ memset((void *)mm->context, 0, smp_num_cpus *
+ sizeof(unsigned long));
+ else
+ printk("Warning: init_new_context failed\n");
+#endif
}
extern inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk, unsigned cpu)
{
/* Check if our ASID is of an older version and thus invalid */
- if ((next->context ^ asid_cache) & ASID_VERSION_MASK)
- get_new_mmu_context(next);
+ if ((CPU_CONTEXT(cpu, next) ^ ASID_CACHE(cpu)) & ASID_VERSION_MASK)
+ get_new_cpu_mmu_context(next, cpu);
- set_entryhi(next->context);
+ set_entryhi(CPU_CONTEXT(cpu, next));
}
/*
@@ -70,7 +101,10 @@ extern inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
*/
extern inline void destroy_context(struct mm_struct *mm)
{
- /* Nothing to do. */
+#ifdef CONFIG_SMP
+ if (mm->context)
+ kfree((void *)mm->context);
+#endif
}
/*
@@ -81,9 +115,9 @@ extern inline void
activate_mm(struct mm_struct *prev, struct mm_struct *next)
{
/* Unconditionally get a new ASID. */
- get_new_mmu_context(next);
+ get_new_cpu_mmu_context(next, smp_processor_id());
- set_entryhi(next->context);
+ set_entryhi(CPU_CONTEXT(smp_processor_id(), next));
}
#endif /* _ASM_MMU_CONTEXT_H */