From e3f4987b05b99df724313e4906971e0c2392e9d5 Mon Sep 17 00:00:00 2001 From: Ulf Carlsson Date: Thu, 25 May 2000 19:33:16 +0000 Subject: First cut of TLB handlers in assembler. I'm not using the context register as it should be used, but let's look into that later. If there is a problem with the code it will crash after right after freeing unused kernel memery. I have this code tested on both UP and SMP though. --- arch/mips64/kernel/r4k_tlb.S | 217 +++++++++++++++++++++++++++++++++++++++ arch/mips64/kernel/traps.c | 20 ++-- include/asm-mips64/mmu_context.h | 10 +- 3 files changed, 235 insertions(+), 12 deletions(-) create mode 100644 arch/mips64/kernel/r4k_tlb.S diff --git a/arch/mips64/kernel/r4k_tlb.S b/arch/mips64/kernel/r4k_tlb.S new file mode 100644 index 000000000..a8ea18523 --- /dev/null +++ b/arch/mips64/kernel/r4k_tlb.S @@ -0,0 +1,217 @@ +/* $Id$ + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2000 Silicon Graphics, Inc. + * Written by Ulf Carlsson (ulfc@engr.sgi.com) + */ +#include +#include +#include +#include +#include +#include + + .data + .comm pgd_current, NR_CPUS * 8, 8 + + /* + * After this macro runs we have a pointer to the pte of the address + * that caused the fault in in PTR. + */ + .macro LOAD_PTE, ptr, tmp +#ifdef CONFIG_SMP + mfc0 \tmp, CP0_CONTEXT + dla \ptr, pgd_current + dsrl \tmp, 23 + daddu \ptr, \tmp +#else + dla \ptr, pgd_current +#endif + dmfc0 \tmp, CP0_BADVADDR + ld \ptr, (\ptr) + dsrl \tmp, 28 # get pgd offset + andi \tmp, 0x1ff8 + daddu \ptr, \tmp # add in pgd offset + dmfc0 \tmp, CP0_BADVADDR + ld \ptr, (\ptr) # get pmd pointer + dsrl \tmp, 18 # get pmd offset + andi \tmp, 0x1ff8 + daddu \ptr, \tmp # add in pmd offset + dmfc0 \tmp, CP0_BADVADDR + ld \ptr, (\ptr) # get pte pointer + dsrl \tmp, 9 + andi \tmp, 0xff8 # get pte offset + daddu \ptr, \tmp + .endm + + .macro LOAD_PTE2, ptr, tmp +#ifdef CONFIG_SMP + mfc0 \tmp, CP0_CONTEXT + dla \ptr, pgd_current + dsrl \tmp, 23 + daddu \ptr, \tmp +#else + dla \ptr, pgd_current +#endif + dmfc0 \tmp, CP0_BADVADDR + ld \ptr, (\ptr) + dsrl \tmp, 28 # get pgd offset + andi \tmp, 0x1ff8 + daddu \ptr, \tmp # add in pgd offset + dmfc0 \tmp, CP0_BADVADDR + ld \ptr, (\ptr) # get pmd pointer + dsrl \tmp, 18 # get pmd offset + andi \tmp, 0x1ff8 + daddu \ptr, \tmp # add in pmd offset + dmfc0 \tmp, CP0_XCONTEXT + ld \ptr, (\ptr) # get pte pointer + andi \tmp, 0xff0 # get pte offset + daddu \ptr, \tmp + .endm + + /* + * This places the even/odd pte pair in the page table at the pte + * entry pointed to by PTE into ENTRYLO0 and ENTRYLO1. + */ + .macro PTE_RELOAD, pte0, pte1 + dsrl \pte0, 6 # convert to entrylo0 + dmtc0 \pte0, CP0_ENTRYLO0 # load it + dsrl \pte1, 6 # convert to entrylo1 + dmtc0 \pte1, CP0_ENTRYLO1 # load it + .endm + + .macro DO_FAULT, write + SAVE_ALL + dmfc0 a2, CP0_BADVADDR + STI + .set at + move a0, sp + jal do_page_fault + li a1, \write + j ret_from_sys_call + nop + .set noat + .endm + + /* Check if PTE is present, if not jump to LABEL. */ + .macro PTE_PRESENT, pte, ptr, label + andi \pte, (_PAGE_PRESENT | _PAGE_READ) + xori \pte, (_PAGE_PRESENT | _PAGE_READ) + bnez \pte, \label + nop + ld \pte, (\ptr) + .endm + + /* Mark PTE as valid, and save in PTR. */ + .macro PTE_MAKEVALID, pte, ptr + ori \pte, (_PAGE_VALID | _PAGE_ACCESSED) + sd \pte, (\ptr) + .endm + + /* Check if PTE is writable, if not jump to LABEL. */ + .macro PTE_WRITEABLE, pte, ptr, label + andi \pte, (_PAGE_PRESENT | _PAGE_WRITE) + xori \pte, (_PAGE_PRESENT | _PAGE_WRITE) + bnez \pte, \label + nop + ld \pte, (\ptr) + .endm + + /* Mark PTE as writable, and save in PTR. */ + .macro PTE_MAKEWRITE, pte, ptr + ori \pte, (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | \ + _PAGE_DIRTY) + sd \pte, (\ptr) + .endm + + .text + .set noreorder + .set mips3 + + .align 5 +FEXPORT(except_vec0) + .set noat +1: b 1b + nop + + /* TLB refill handler for the R10000. + * Attention: We may only use 32 instructions. + */ + + .align 5 +FEXPORT(except_vec1_r10k) + .set noat + LOAD_PTE2 k1 k0 + ld k0, 0(k1) # get even pte + ld k1, 8(k1) # get odd pte + PTE_RELOAD k0 k1 + nop + tlbwr + eret + + .align 5 +FEXPORT(handle_tlbl) + .set noat + LOAD_PTE k1 k0 + nop + tlbp + ld k0, 0(k1) + PTE_PRESENT k0 k1 nopage_tlbl + PTE_MAKEVALID k0 k1 + ori k1, 0xf + xori k1, 0xf + ld k0, 0(k1) + ld k1, 8(k1) + PTE_RELOAD k0 k1 + nop + tlbwi + nop + eret + +nopage_tlbl: + DO_FAULT 0 + + .align 5 +FEXPORT(handle_tlbs) + .set noat + LOAD_PTE k1 k0 + nop + tlbp + ld k0, 0(k1) + PTE_WRITEABLE k0 k1 nopage_tlbs + PTE_MAKEWRITE k0 k1 + ori k1, 0xf + xori k1, 0xf + ld k0, 0(k1) + ld k1, 8(k1) + PTE_RELOAD k0 k1 + nop + tlbwi + eret + +nopage_tlbs: + DO_FAULT 1 + + .align 5 +FEXPORT(handle_mod) + .set noat + LOAD_PTE k1 k0 + nop + tlbp + ld k0, 0(k1) + PTE_WRITEABLE k0 k1 nowrite_mod + PTE_MAKEWRITE k0 k1 + ori k1, 0xf + xori k1, 0xf + ld k0, 0(k1) + ld k1, 8(k1) + PTE_RELOAD k0 k1 + nop + tlbwi + eret + +nowrite_mod: + DO_FAULT 1 diff --git a/arch/mips64/kernel/traps.c b/arch/mips64/kernel/traps.c index fa58017cf..d9213757f 100644 --- a/arch/mips64/kernel/traps.c +++ b/arch/mips64/kernel/traps.c @@ -41,9 +41,9 @@ static inline void console_verbose(void) console_loglevel = 15; } -extern asmlinkage void __xtlb_mod_debug(void); -extern asmlinkage void __xtlb_tlbl_debug(void); -extern asmlinkage void __xtlb_tlbs_debug(void); +extern asmlinkage void handle_mod(void); +extern asmlinkage void handle_tlbl(void); +extern asmlinkage void handle_tlbs(void); extern asmlinkage void handle_adel(void); extern asmlinkage void handle_ades(void); extern asmlinkage void handle_ibe(void); @@ -471,8 +471,8 @@ static inline void go_64(void) void __init trap_init(void) { - extern char __tlb_refill_debug_tramp; - extern char __xtlb_refill_debug_tramp; + extern char except_vec0; + extern char except_vec1_r10k; extern char except_vec2_generic; extern char except_vec3_generic, except_vec3_r4000; extern void bus_error_init(void); @@ -528,8 +528,8 @@ void __init trap_init(void) case CPU_NEVADA: r4k: /* Debug TLB refill handler. */ - memcpy((void *)KSEG0, &__tlb_refill_debug_tramp, 0x80); - memcpy((void *)KSEG0 + 0x080, &__xtlb_refill_debug_tramp, 0x80); + memcpy((void *)KSEG0, &except_vec0, 0x80); + memcpy((void *)KSEG0 + 0x080, &except_vec1_r10k, 0x80); /* Cache error vector */ memcpy((void *)(KSEG0 + 0x100), (void *) KSEG0, 0x80); @@ -542,9 +542,9 @@ r4k: 0x100); } - set_except_vector(1, __xtlb_mod_debug); - set_except_vector(2, __xtlb_tlbl_debug); - set_except_vector(3, __xtlb_tlbs_debug); + set_except_vector(1, handle_mod); + set_except_vector(2, handle_tlbl); + set_except_vector(3, handle_tlbs); set_except_vector(4, handle_adel); set_except_vector(5, handle_ades); diff --git a/include/asm-mips64/mmu_context.h b/include/asm-mips64/mmu_context.h index eeb8d9091..7f70157c4 100644 --- a/include/asm-mips64/mmu_context.h +++ b/include/asm-mips64/mmu_context.h @@ -17,6 +17,8 @@ #include #include +extern unsigned long pgd_current[]; + #ifndef CONFIG_SMP #define CPU_CONTEXT(cpu, mm) (mm)->context #else @@ -85,7 +87,9 @@ extern inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, if ((CPU_CONTEXT(cpu, next) ^ ASID_CACHE(cpu)) & ASID_VERSION_MASK) get_new_cpu_mmu_context(next, cpu); - set_entryhi(CPU_CONTEXT(cpu, next)); + set_entryhi(CPU_CONTEXT(cpu, next) & 0xff); + set_context((unsigned long) smp_processor_id() << (23 + 3)); + pgd_current[smp_processor_id()] = next->pgd; } /* @@ -110,7 +114,9 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next) /* Unconditionally get a new ASID. */ get_new_cpu_mmu_context(next, smp_processor_id()); - set_entryhi(CPU_CONTEXT(smp_processor_id(), next)); + set_entryhi(CPU_CONTEXT(smp_processor_id(), next) & 0xff); + set_context((unsigned long) smp_processor_id() << (23 + 3)); + pgd_current[smp_processor_id()] = next->pgd; } #endif /* _ASM_MMU_CONTEXT_H */ -- cgit v1.2.3