diff options
author | Kanoj Sarcar <kanoj@engr.sgi.com> | 2000-06-30 00:48:29 +0000 |
---|---|---|
committer | Kanoj Sarcar <kanoj@engr.sgi.com> | 2000-06-30 00:48:29 +0000 |
commit | 72d92cedc30af23d855fec53b04d9266d88c6671 (patch) | |
tree | a3d02ad3d35bc5939c34c0527daba0793c799ab9 /arch | |
parent | 706a11128cbe9c614436350e5ff807b6d6ac79ea (diff) |
Implement a functional vmalloc(). THe vmalloc range address translations
are stashed in an array of page tables, starting from kptbl[]. The fast
tlbmiss handler quickly checks to see if the faulting address is in the
vmalloc range, and if so, it uses the translations in the kptbl to update
the tlbs. Still to do: tlb invalid faults in the vmalloc range needs to
be handled properly.
Diffstat (limited to 'arch')
-rw-r--r-- | arch/mips64/kernel/head.S | 3 | ||||
-rw-r--r-- | arch/mips64/kernel/r4k_tlb.S | 59 | ||||
-rw-r--r-- | arch/mips64/kernel/setup.c | 2 |
3 files changed, 59 insertions, 5 deletions
diff --git a/arch/mips64/kernel/head.S b/arch/mips64/kernel/head.S index 946bd98b3..74d3c34ef 100644 --- a/arch/mips64/kernel/head.S +++ b/arch/mips64/kernel/head.S @@ -171,3 +171,6 @@ NESTED(bootstrap, 16, sp) page invalid_pmd_table, 1 page empty_bad_page_table, 0 page empty_bad_pmd_table, 1 + page kptbl, KPTBL_PAGE_ORDER + .globl ekptbl +ekptbl: diff --git a/arch/mips64/kernel/r4k_tlb.S b/arch/mips64/kernel/r4k_tlb.S index 2baa35ca1..54c4b934b 100644 --- a/arch/mips64/kernel/r4k_tlb.S +++ b/arch/mips64/kernel/r4k_tlb.S @@ -1,4 +1,4 @@ -/* $Id: r4k_tlb.S,v 1.1 2000/05/25 19:33:16 ulfc Exp $ +/* $Id: r4k_tlb.S,v 1.2 2000/06/13 23:48:20 kanoj Exp $ * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -33,13 +33,14 @@ #endif dmfc0 \tmp, CP0_BADVADDR ld \ptr, (\ptr) - dsrl \tmp, 28 # get pgd offset - andi \tmp, 0x1ff8 + bltz \tmp, kaddr + dsrl \tmp, (PGDIR_SHIFT-3) # get pgd offset in bytes + andi \tmp, ((PTRS_PER_PGD - 1)<<3) daddu \ptr, \tmp # add in pgd offset dmfc0 \tmp, CP0_BADVADDR ld \ptr, (\ptr) # get pmd pointer - dsrl \tmp, 18 # get pmd offset - andi \tmp, 0x1ff8 + dsrl \tmp, (PMD_SHIFT-3) # get pmd offset in bytes + andi \tmp, ((PTRS_PER_PMD - 1)<<3) daddu \ptr, \tmp # add in pmd offset dmfc0 \tmp, CP0_XCONTEXT ld \ptr, (\ptr) # get pte pointer @@ -82,3 +83,51 @@ FEXPORT(except_vec1_r10k) nop tlbwr eret +kaddr: + dla k0, handle_vmalloc_address # MAPPED kernel needs this + jr k0 + nop + + .align 5 +FEXPORT(handle_vmalloc_address) + .set noat + /* + * First, determine that the address is in/above vmalloc range. + */ + dmfc0 k0, CP0_BADVADDR + dli k1, VMALLOC_START + sltu k1, k0, k1 + bne k1, zero, not_vmalloc + dli k1, VMALLOC_START + + /* + * Now find offset into kptbl. + */ + dsubu k0, k0, k1 + dla k1, kptbl + dsrl k0, (PAGE_SHIFT+1) # get vpn2 + dsll k0, 4 # byte offset of pte + daddu k1, k1, k0 + + /* + * Determine that fault address is within vmalloc range. + */ + dla k0, ekptbl + sltu k0, k1, k0 + beq k0, zero, not_vmalloc + + /* + * Load cp0 registers. + */ + ld k0, 0(k1) # get even pte + ld k1, 8(k1) # get odd pte +1: + PTE_RELOAD k0 k1 + nop + tlbwr + eret +not_vmalloc: + daddu k0, zero, zero + daddu k1, zero, zero + j 1b + nop diff --git a/arch/mips64/kernel/setup.c b/arch/mips64/kernel/setup.c index fe4c750d4..93f7f8a78 100644 --- a/arch/mips64/kernel/setup.c +++ b/arch/mips64/kernel/setup.c @@ -35,6 +35,7 @@ #include <asm/io.h> #include <asm/stackframe.h> #include <asm/system.h> +#include <asm/pgalloc.h> #ifdef CONFIG_SGI_IP27 /* XXX Origin garbage has no business in this file */ @@ -154,6 +155,7 @@ void __init setup_arch(char **cmdline_p) unsigned long *initrd_header; #endif + memset((void *)kptbl, 0, PAGE_SIZE << KPTBL_PAGE_ORDER); cpu_probe(); load_mmu(); |