summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorKanoj Sarcar <kanoj@engr.sgi.com>2000-07-30 01:04:19 +0000
committerKanoj Sarcar <kanoj@engr.sgi.com>2000-07-30 01:04:19 +0000
commitfa112d88a44ee84f7ca58a942c95d10a1a82d80f (patch)
tree07aa3eeaa1d8891587c01c6014954770b7de7201 /arch
parent51659931cea4391fe3fe09adc5faa3abf81ac092 (diff)
Smarter and more scalable tlb flushing routines. No need to interrupt
other cpus when flushing tlbs for single threaded mm's. Rather, just make sure the mm will have to do a new context allocation if it runs on the other cpus.
Diffstat (limited to 'arch')
-rw-r--r--arch/mips64/kernel/smp.c55
1 files changed, 43 insertions, 12 deletions
diff --git a/arch/mips64/kernel/smp.c b/arch/mips64/kernel/smp.c
index c36337564..c72d46276 100644
--- a/arch/mips64/kernel/smp.c
+++ b/arch/mips64/kernel/smp.c
@@ -205,9 +205,26 @@ static void flush_tlb_mm_ipi(void *mm)
_flush_tlb_mm((struct mm_struct *)mm);
}
+/*
+ * The following tlb flush calls are invoked when old translations are
+ * being torn down, or pte attributes are changing. For single threaded
+ * address spaces, a new context is obtained on the current cpu, and tlb
+ * context on other cpus are invalidated to force a new context allocation
+ * at switch_mm time, should the mm ever be used on other cpus. For
+ * multithreaded address spaces, intercpu interrupts have to be sent.
+ * Kanoj 07/00.
+ */
+
void flush_tlb_mm(struct mm_struct *mm)
{
- smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1);
+ if (atomic_read(&mm->mm_users) != 1) {
+ smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1);
+ } else {
+ int i;
+ for (i = 0; i < smp_num_cpus; i++)
+ if (smp_processor_id() != i)
+ CPU_CONTEXT(i, mm) = 0;
+ }
_flush_tlb_mm(mm);
}
@@ -227,12 +244,19 @@ static void flush_tlb_range_ipi(void *info)
void flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
{
- struct flush_tlb_data fd;
-
- fd.mm = mm;
- fd.addr1 = start;
- fd.addr2 = end;
- smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1);
+ if (atomic_read(&mm->mm_users) != 1) {
+ struct flush_tlb_data fd;
+
+ fd.mm = mm;
+ fd.addr1 = start;
+ fd.addr2 = end;
+ smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1);
+ } else {
+ int i;
+ for (i = 0; i < smp_num_cpus; i++)
+ if (smp_processor_id() != i)
+ CPU_CONTEXT(i, mm) = 0;
+ }
_flush_tlb_range(mm, start, end);
}
@@ -245,11 +269,18 @@ static void flush_tlb_page_ipi(void *info)
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
- struct flush_tlb_data fd;
-
- fd.vma = vma;
- fd.addr1 = page;
- smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1);
+ if (atomic_read(&vma->vm_mm->mm_users) != 1) {
+ struct flush_tlb_data fd;
+
+ fd.vma = vma;
+ fd.addr1 = page;
+ smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1);
+ } else {
+ int i;
+ for (i = 0; i < smp_num_cpus; i++)
+ if (smp_processor_id() != i)
+ CPU_CONTEXT(i, vma->vm_mm) = 0;
+ }
_flush_tlb_page(vma, page);
}