diff options
Diffstat (limited to 'arch/mips/mm/r4xx0.c')
-rw-r--r-- | arch/mips/mm/r4xx0.c | 22 |
1 files changed, 11 insertions, 11 deletions
diff --git a/arch/mips/mm/r4xx0.c b/arch/mips/mm/r4xx0.c index e1c3251f8..68be4e799 100644 --- a/arch/mips/mm/r4xx0.c +++ b/arch/mips/mm/r4xx0.c @@ -1,4 +1,4 @@ -/* $Id: r4xx0.c,v 1.23 1999/08/09 19:43:16 harald Exp $ +/* $Id: r4xx0.c,v 1.24 1999/09/28 22:25:48 ralf Exp $ * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -1926,7 +1926,7 @@ static void r4k_flush_cache_page_d32i32_r4600(struct vm_area_struct *vma, * If the page isn't marked valid, the page cannot possibly be * in the cache. */ - if(!(pte_val(*ptep) & _PAGE_PRESENT)) + if (!(pte_val(*ptep) & _PAGE_PRESENT)) goto out; text = (vma->vm_flags & VM_EXEC); @@ -1936,7 +1936,7 @@ static void r4k_flush_cache_page_d32i32_r4600(struct vm_area_struct *vma, * for every cache flush operation. So we do indexed flushes * in that case, which doesn't overly flush the cache too much. */ - if((mm == current->mm) && (pte_val(*ptep) & _PAGE_VALID)) { + if((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) { blast_dcache32_page(page); if(text) blast_icache32_page(page); @@ -2394,7 +2394,7 @@ void update_mmu_cache(struct vm_area_struct * vma, pte_t *ptep; int idx, pid; - pid = (get_entryhi() & 0xff); + pid = get_entryhi() & 0xff; #ifdef DEBUG_TLB if((pid != (vma->vm_mm->context & 0xff)) || (vma->vm_mm->context == 0)) { @@ -2516,7 +2516,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, } /* Detect and size the various r4k caches. */ -__initfunc(static void probe_icache(unsigned long config)) +static void __init probe_icache(unsigned long config) { icache_size = 1 << (12 + ((config >> 9) & 7)); ic_lsize = 16 << ((config >> 5) & 1); @@ -2525,7 +2525,7 @@ __initfunc(static void probe_icache(unsigned long config)) icache_size >> 10, ic_lsize); } -__initfunc(static void probe_dcache(unsigned long config)) +static void __init probe_dcache(unsigned long config) { dcache_size = 1 << (12 + ((config >> 6) & 7)); dc_lsize = 16 << ((config >> 4) & 1); @@ -2540,7 +2540,7 @@ __initfunc(static void probe_dcache(unsigned long config)) * the cache sizing loop that executes in KSEG1 space or else * you will crash and burn badly. You have been warned. */ -__initfunc(static int probe_scache(unsigned long config)) +static int __init probe_scache(unsigned long config) { extern unsigned long stext; unsigned long flags, addr, begin, end, pow2; @@ -2624,7 +2624,7 @@ __initfunc(static int probe_scache(unsigned long config)) return 1; } -__initfunc(static void setup_noscache_funcs(void)) +static void __init setup_noscache_funcs(void) { unsigned int prid; @@ -2662,7 +2662,7 @@ __initfunc(static void setup_noscache_funcs(void)) dma_cache_inv = r4k_dma_cache_inv_pc; } -__initfunc(static void setup_scache_funcs(void)) +static void __init setup_scache_funcs(void) { switch(sc_lsize) { case 16: @@ -2748,7 +2748,7 @@ __initfunc(static void setup_scache_funcs(void)) typedef int (*probe_func_t)(unsigned long); -__initfunc(static inline void setup_scache(unsigned int config)) +static inline void __init setup_scache(unsigned int config) { probe_func_t probe_scache_kseg1; int sc_present = 0; @@ -2765,7 +2765,7 @@ __initfunc(static inline void setup_scache(unsigned int config)) setup_noscache_funcs(); } -__initfunc(void ld_mmu_r4xx0(void)) +void __init ld_mmu_r4xx0(void) { unsigned long config = read_32bit_cp0_register(CP0_CONFIG); |