summaryrefslogtreecommitdiffstats
path: root/arch/mips/mm
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-07-31 03:39:50 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-07-31 03:39:50 +0000
commit17d285e537c498cf18ce79a52ca553ea964f389b (patch)
tree3905f77168a7a6d92c68bbd47b5ddae2664fe203 /arch/mips/mm
parentce36512304b7cb5e1a911829b249e403261263ac (diff)
Shave of 50% of lat_mmap. Our cache routines were plain stupid.
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/andes.c7
-rw-r--r--arch/mips/mm/loadmmu.c2
-rw-r--r--arch/mips/mm/r2300.c21
-rw-r--r--arch/mips/mm/r4xx0.c116
4 files changed, 84 insertions, 62 deletions
diff --git a/arch/mips/mm/andes.c b/arch/mips/mm/andes.c
index 631d6f002..5e5cb4768 100644
--- a/arch/mips/mm/andes.c
+++ b/arch/mips/mm/andes.c
@@ -119,6 +119,12 @@ static void andes_flush_page_to_ram(struct page * page)
/* XXX */
}
+static void andes_flush_icache_page(struct vm_area_struct *vma,
+ struct page *page, unsigned long addr)
+{
+ /* XXX */
+}
+
static void andes_flush_cache_sigtramp(unsigned long page)
{
/* XXX */
@@ -171,6 +177,7 @@ void __init ld_mmu_andes(void)
_flush_cache_page = andes_flush_cache_page;
_flush_cache_sigtramp = andes_flush_cache_sigtramp;
_flush_page_to_ram = andes_flush_page_to_ram;
+ _flush_icache_page = andes_flush_icache_page;
flush_cache_all();
flush_tlb_all();
diff --git a/arch/mips/mm/loadmmu.c b/arch/mips/mm/loadmmu.c
index 6a1ad94ae..69a5e2de5 100644
--- a/arch/mips/mm/loadmmu.c
+++ b/arch/mips/mm/loadmmu.c
@@ -28,6 +28,8 @@ void (*_flush_cache_range)(struct mm_struct *mm, unsigned long start,
void (*_flush_cache_page)(struct vm_area_struct *vma, unsigned long page);
void (*_flush_cache_sigtramp)(unsigned long addr);
void (*_flush_page_to_ram)(struct page * page);
+void (*_flush_icache_page)(struct vm_area_struct *vma, struct page *page,
+ unsigned long addr);
/* DMA cache operations. */
void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
diff --git a/arch/mips/mm/r2300.c b/arch/mips/mm/r2300.c
index 690e3698f..51bf703cd 100644
--- a/arch/mips/mm/r2300.c
+++ b/arch/mips/mm/r2300.c
@@ -360,6 +360,26 @@ static void r3k_flush_page_to_ram(struct page * page)
*/
}
+static void r3k_flush_icache_page(struct vm_area_struct *vma,
+ unsigned long page, unsigned long address)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long physpage;
+
+ if (mm->context == 0)
+ return;
+
+ if (!(vma->vm_flags & VM_EXEC))
+ return;
+
+#ifdef DEBUG_CACHE
+ printk("cpage[%d,%08lx]", (int)mm->context, page);
+#endif
+
+ if ((physpage = get_phys_page(page, vma->vm_mm)))
+ r3k_flush_icache_range(physpage, PAGE_SIZE);
+}
+
static void r3k_flush_cache_sigtramp(unsigned long addr)
{
unsigned long flags;
@@ -651,6 +671,7 @@ void __init ld_mmu_r2300(void)
_flush_cache_page = r3k_flush_cache_page;
_flush_cache_sigtramp = r3k_flush_cache_sigtramp;
_flush_page_to_ram = r3k_flush_page_to_ram;
+ _flush_icache_page = r3k_flush_icache_page;
_dma_cache_wback_inv = r3k_dma_cache_wback_inv;
diff --git a/arch/mips/mm/r4xx0.c b/arch/mips/mm/r4xx0.c
index 0e851688a..0446ec7b8 100644
--- a/arch/mips/mm/r4xx0.c
+++ b/arch/mips/mm/r4xx0.c
@@ -1390,7 +1390,6 @@ static void r4k_flush_cache_page_s16d16i16(struct vm_area_struct *vma,
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
- int text;
/*
* If ownes no valid ASID yet, cannot possibly have gotten
@@ -1415,20 +1414,19 @@ static void r4k_flush_cache_page_s16d16i16(struct vm_area_struct *vma,
if (!(pte_val(*ptep) & _PAGE_VALID))
goto out;
- text = (vma->vm_flags & VM_EXEC);
- /* Doing flushes for another ASID than the current one is
+ /*
+ * Doing flushes for another ASID than the current one is
* too difficult since stupid R4k caches do a TLB translation
* for every cache flush operation. So we do indexed flushes
* in that case, which doesn't overly flush the cache too much.
*/
if (mm->context != current->active_mm->context) {
- /* Do indexed flush, too much work to get the (possible)
+ /*
+ * Do indexed flush, too much work to get the (possible)
* tlb refills to work correctly.
*/
page = (KSEG0 + (page & (scache_size - 1)));
blast_dcache16_page_indexed(page);
- if(text)
- blast_icache16_page_indexed(page);
blast_scache16_page_indexed(page);
} else
blast_scache16_page(page);
@@ -1444,7 +1442,6 @@ static void r4k_flush_cache_page_s32d16i16(struct vm_area_struct *vma,
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
- int text;
/*
* If ownes no valid ASID yet, cannot possibly have gotten
@@ -1468,20 +1465,19 @@ static void r4k_flush_cache_page_s32d16i16(struct vm_area_struct *vma,
if (!(pte_val(*ptep) & _PAGE_VALID))
goto out;
- text = (vma->vm_flags & VM_EXEC);
- /* Doing flushes for another ASID than the current one is
+ /*
+ * Doing flushes for another ASID than the current one is
* too difficult since stupid R4k caches do a TLB translation
* for every cache flush operation. So we do indexed flushes
* in that case, which doesn't overly flush the cache too much.
*/
if (mm->context != current->active_mm->context) {
- /* Do indexed flush, too much work to get the (possible)
+ /*
+ * Do indexed flush, too much work to get the (possible)
* tlb refills to work correctly.
*/
page = (KSEG0 + (page & (scache_size - 1)));
blast_dcache16_page_indexed(page);
- if(text)
- blast_icache16_page_indexed(page);
blast_scache32_page_indexed(page);
} else
blast_scache32_page(page);
@@ -1497,7 +1493,6 @@ static void r4k_flush_cache_page_s64d16i16(struct vm_area_struct *vma,
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
- int text;
/*
* If ownes no valid ASID yet, cannot possibly have gotten
@@ -1521,7 +1516,6 @@ static void r4k_flush_cache_page_s64d16i16(struct vm_area_struct *vma,
if (!(pte_val(*ptep) & _PAGE_VALID))
goto out;
- text = (vma->vm_flags & VM_EXEC);
/*
* Doing flushes for another ASID than the current one is
* too difficult since stupid R4k caches do a TLB translation
@@ -1529,13 +1523,12 @@ static void r4k_flush_cache_page_s64d16i16(struct vm_area_struct *vma,
* in that case, which doesn't overly flush the cache too much.
*/
if (mm->context != current->active_mm->context) {
- /* Do indexed flush, too much work to get the (possible)
+ /*
+ * Do indexed flush, too much work to get the (possible)
* tlb refills to work correctly.
*/
page = (KSEG0 + (page & (scache_size - 1)));
blast_dcache16_page_indexed(page);
- if(text)
- blast_icache16_page_indexed(page);
blast_scache64_page_indexed(page);
} else
blast_scache64_page(page);
@@ -1551,7 +1544,6 @@ static void r4k_flush_cache_page_s128d16i16(struct vm_area_struct *vma,
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
- int text;
/*
* If ownes no valid ASID yet, cannot possibly have gotten
@@ -1576,8 +1568,8 @@ static void r4k_flush_cache_page_s128d16i16(struct vm_area_struct *vma,
if (!(pte_val(*ptep) & _PAGE_VALID))
goto out;
- text = (vma->vm_flags & VM_EXEC);
- /* Doing flushes for another ASID than the current one is
+ /*
+ * Doing flushes for another ASID than the current one is
* too difficult since stupid R4k caches do a TLB translation
* for every cache flush operation. So we do indexed flushes
* in that case, which doesn't overly flush the cache too much.
@@ -1589,8 +1581,6 @@ static void r4k_flush_cache_page_s128d16i16(struct vm_area_struct *vma,
*/
page = (KSEG0 + (page & (scache_size - 1)));
blast_dcache16_page_indexed(page);
- if(text)
- blast_icache16_page_indexed(page);
blast_scache128_page_indexed(page);
} else
blast_scache128_page(page);
@@ -1606,7 +1596,6 @@ static void r4k_flush_cache_page_s32d32i32(struct vm_area_struct *vma,
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
- int text;
/*
* If ownes no valid ASID yet, cannot possibly have gotten
@@ -1631,7 +1620,6 @@ static void r4k_flush_cache_page_s32d32i32(struct vm_area_struct *vma,
if (!(pte_val(*ptep) & _PAGE_VALID))
goto out;
- text = (vma->vm_flags & VM_EXEC);
/*
* Doing flushes for another ASID than the current one is
* too difficult since stupid R4k caches do a TLB translation
@@ -1645,8 +1633,6 @@ static void r4k_flush_cache_page_s32d32i32(struct vm_area_struct *vma,
*/
page = (KSEG0 + (page & (scache_size - 1)));
blast_dcache32_page_indexed(page);
- if(text)
- blast_icache32_page_indexed(page);
blast_scache32_page_indexed(page);
} else
blast_scache32_page(page);
@@ -1662,7 +1648,6 @@ static void r4k_flush_cache_page_s64d32i32(struct vm_area_struct *vma,
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
- int text;
/*
* If ownes no valid ASID yet, cannot possibly have gotten
@@ -1687,7 +1672,6 @@ static void r4k_flush_cache_page_s64d32i32(struct vm_area_struct *vma,
if (!(pte_val(*ptep) & _PAGE_VALID))
goto out;
- text = (vma->vm_flags & VM_EXEC);
/*
* Doing flushes for another ASID than the current one is
* too difficult since stupid R4k caches do a TLB translation
@@ -1701,8 +1685,6 @@ static void r4k_flush_cache_page_s64d32i32(struct vm_area_struct *vma,
*/
page = (KSEG0 + (page & (scache_size - 1)));
blast_dcache32_page_indexed(page);
- if(text)
- blast_icache32_page_indexed(page);
blast_scache64_page_indexed(page);
} else
blast_scache64_page(page);
@@ -1718,7 +1700,6 @@ static void r4k_flush_cache_page_s128d32i32(struct vm_area_struct *vma,
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
- int text;
/*
* If ownes no valid ASID yet, cannot possibly have gotten
@@ -1736,13 +1717,13 @@ static void r4k_flush_cache_page_s128d32i32(struct vm_area_struct *vma,
pmdp = pmd_offset(pgdp, page);
ptep = pte_offset(pmdp, page);
- /* If the page isn't marked valid, the page cannot possibly be
+ /*
+ * If the page isn't marked valid, the page cannot possibly be
* in the cache.
*/
if (!(pte_val(*ptep) & _PAGE_VALID))
goto out;
- text = (vma->vm_flags & VM_EXEC);
/*
* Doing flushes for another ASID than the current one is
* too difficult since stupid R4k caches do a TLB translation
@@ -1755,8 +1736,6 @@ static void r4k_flush_cache_page_s128d32i32(struct vm_area_struct *vma,
*/
page = (KSEG0 + (page & (scache_size - 1)));
blast_dcache32_page_indexed(page);
- if(text)
- blast_icache32_page_indexed(page);
blast_scache128_page_indexed(page);
} else
blast_scache128_page(page);
@@ -1772,7 +1751,6 @@ static void r4k_flush_cache_page_d16i16(struct vm_area_struct *vma,
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
- int text;
/*
* If ownes no valid ASID yet, cannot possibly have gotten
@@ -1790,13 +1768,13 @@ static void r4k_flush_cache_page_d16i16(struct vm_area_struct *vma,
pmdp = pmd_offset(pgdp, page);
ptep = pte_offset(pmdp, page);
- /* If the page isn't marked valid, the page cannot possibly be
+ /*
+ * If the page isn't marked valid, the page cannot possibly be
* in the cache.
*/
if (!(pte_val(*ptep) & _PAGE_VALID))
goto out;
- text = (vma->vm_flags & VM_EXEC);
/*
* Doing flushes for another ASID than the current one is
* too difficult since stupid R4k caches do a TLB translation
@@ -1805,16 +1783,12 @@ static void r4k_flush_cache_page_d16i16(struct vm_area_struct *vma,
*/
if (mm == current->active_mm) {
blast_dcache16_page(page);
- if(text)
- blast_icache16_page(page);
} else {
/* Do indexed flush, too much work to get the (possible)
* tlb refills to work correctly.
*/
page = (KSEG0 + (page & (dcache_size - 1)));
blast_dcache16_page_indexed(page);
- if(text)
- blast_icache16_page_indexed(page);
}
out:
restore_flags(flags);
@@ -1828,7 +1802,6 @@ static void r4k_flush_cache_page_d32i32(struct vm_area_struct *vma,
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
- int text;
/*
* If ownes no valid ASID yet, cannot possibly have gotten
@@ -1853,7 +1826,6 @@ static void r4k_flush_cache_page_d32i32(struct vm_area_struct *vma,
if (!(pte_val(*ptep) & _PAGE_PRESENT))
goto out;
- text = (vma->vm_flags & VM_EXEC);
/*
* Doing flushes for another ASID than the current one is
* too difficult since stupid R4k caches do a TLB translation
@@ -1862,8 +1834,6 @@ static void r4k_flush_cache_page_d32i32(struct vm_area_struct *vma,
*/
if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
blast_dcache32_page(page);
- if(text)
- blast_icache32_page(page);
} else {
/*
* Do indexed flush, too much work to get the (possible)
@@ -1871,8 +1841,6 @@ static void r4k_flush_cache_page_d32i32(struct vm_area_struct *vma,
*/
page = (KSEG0 + (page & (dcache_size - 1)));
blast_dcache32_page_indexed(page);
- if(text)
- blast_icache32_page_indexed(page);
}
out:
restore_flags(flags);
@@ -1886,7 +1854,6 @@ static void r4k_flush_cache_page_d32i32_r4600(struct vm_area_struct *vma,
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
- int text;
/*
* If ownes no valid ASID yet, cannot possibly have gotten
@@ -1911,7 +1878,6 @@ static void r4k_flush_cache_page_d32i32_r4600(struct vm_area_struct *vma,
if (!(pte_val(*ptep) & _PAGE_PRESENT))
goto out;
- text = (vma->vm_flags & VM_EXEC);
/*
* Doing flushes for another ASID than the current one is
* too difficult since stupid R4k caches do a TLB translation
@@ -1920,8 +1886,6 @@ static void r4k_flush_cache_page_d32i32_r4600(struct vm_area_struct *vma,
*/
if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
blast_dcache32_page(page);
- if(text)
- blast_icache32_page(page);
} else {
/* Do indexed flush, too much work to get the (possible)
* tlb refills to work correctly.
@@ -1929,10 +1893,6 @@ static void r4k_flush_cache_page_d32i32_r4600(struct vm_area_struct *vma,
page = (KSEG0 + (page & (dcache_size - 1)));
blast_dcache32_page_indexed(page);
blast_dcache32_page_indexed(page ^ dcache_waybit);
- if(text) {
- blast_icache32_page_indexed(page);
- blast_icache32_page_indexed(page ^ icache_waybit);
- }
}
out:
restore_flags(flags);
@@ -1986,6 +1946,35 @@ static void r4k_flush_page_to_ram_d32_r4600(struct page *page)
__restore_flags(flags);
}
+static void
+r4k_flush_icache_page_s(struct vm_area_struct *vma, struct page *page,
+ unsigned long address)
+{
+ /*
+ * We did an scache flush therefore PI is already clean.
+ */
+}
+
+static void
+r4k_flush_icache_page_i16(struct vm_area_struct *vma, struct page *page,
+ unsigned long address)
+{
+ if (!(vma->vm_flags & VM_EXEC))
+ return;
+
+ blast_icache16_page(address);
+}
+
+static void
+r4k_flush_icache_page_i32(struct vm_area_struct *vma, struct page *page,
+ unsigned long address)
+{
+ if (!(vma->vm_flags & VM_EXEC))
+ return;
+
+ blast_icache32_page(address);
+}
+
/*
* Writeback and invalidate the primary cache dcache before DMA.
*
@@ -2289,12 +2278,6 @@ void pgd_init(unsigned long page)
}
}
-#ifdef DEBUG_TLBUPDATE
-static unsigned long ehi_debug[NTLB_ENTRIES];
-static unsigned long el0_debug[NTLB_ENTRIES];
-static unsigned long el1_debug[NTLB_ENTRIES];
-#endif
-
/* We will need multiple versions of update_mmu_cache(), one that just
* updates the TLB with the new pte(s), and another which also checks
* for the R4k "end of page" hardware bug and does the needy.
@@ -2579,6 +2562,14 @@ static void __init setup_noscache_funcs(void)
_flush_cache_page = r4k_flush_cache_page_d32i32;
break;
}
+
+ switch(ic_lsize) {
+ case 16:
+ _flush_icache_page = r4k_flush_icache_page_i16;
+ case 32:
+ _flush_icache_page = r4k_flush_icache_page_i32;
+ }
+
_dma_cache_wback_inv = r4k_dma_cache_wback_inv_pc;
_dma_cache_wback = r4k_dma_cache_wback;
_dma_cache_inv = r4k_dma_cache_inv_pc;
@@ -2660,6 +2651,7 @@ static void __init setup_scache_funcs(void)
_copy_page = r4k_copy_page_s128;
break;
}
+ _flush_icache_page = r4k_flush_icache_page_s;
_dma_cache_wback_inv = r4k_dma_cache_wback_inv_sc;
_dma_cache_wback = r4k_dma_cache_wback;
_dma_cache_inv = r4k_dma_cache_inv_sc;