summaryrefslogtreecommitdiffstats
path: root/arch/mips64/kernel/r4k_tlb.S
blob: 1bee7a8a514817a2581607bd016c534725c57c44 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 2000 Silicon Graphics, Inc.
 * Written by Ulf Carlsson (ulfc@engr.sgi.com)
 */
#include <linux/config.h>
#include <linux/threads.h>
#include <asm/asm.h>
#include <asm/regdef.h>
#include <asm/mipsregs.h>
#include <asm/pgtable.h>
#include <asm/stackframe.h>

	.data
	.comm	pgd_current, NR_CPUS * 8, 8

	/*
	 * After this macro runs we have a pointer to the pte of the address
	 * that caused the fault in in PTR.
	 */

	.macro	LOAD_PTE2, ptr, tmp
#ifdef CONFIG_SMP
	dmfc0	\tmp, CP0_CONTEXT
	dla	\ptr, pgd_current
	dsrl	\tmp, 23
	daddu	\ptr, \tmp
#else
	dla	\ptr, pgd_current
#endif
	dmfc0	\tmp, CP0_BADVADDR
	ld	\ptr, (\ptr)
	bltz	\tmp, kaddr
	 dsrl	\tmp, (PGDIR_SHIFT-3)		# get pgd offset in bytes
	andi	\tmp, ((PTRS_PER_PGD - 1)<<3)
	daddu	\ptr, \tmp			# add in pgd offset
	dmfc0	\tmp, CP0_BADVADDR
	ld	\ptr, (\ptr)			# get pmd pointer
	dsrl	\tmp, (PMD_SHIFT-3)		# get pmd offset in bytes
	andi	\tmp, ((PTRS_PER_PMD - 1)<<3)
	daddu	\ptr, \tmp			# add in pmd offset
	dmfc0	\tmp, CP0_XCONTEXT
	ld	\ptr, (\ptr)			# get pte pointer
	andi	\tmp, 0xff0			# get pte offset
	daddu	\ptr, \tmp
	.endm

	/*
	 * This places the even/odd pte pair in the page table at the pte
	 * entry pointed to by PTE into ENTRYLO0 and ENTRYLO1.
	 */
	.macro	PTE_RELOAD, pte0, pte1
	dsrl	\pte0, 6			# convert to entrylo0
	dmtc0	\pte0, CP0_ENTRYLO0		# load it
	dsrl	\pte1, 6			# convert to entrylo1
	dmtc0	\pte1, CP0_ENTRYLO1		# load it
	.endm

	.text
	.set	noreorder
	.set	mips3

	.align	5
FEXPORT(except_vec0)
	.set	noat
1:	b	1b
	 nop

	/* TLB refill handler for the R10000.
	 * Attention:  We may only use 32 instructions.
	 */

	.align	5
FEXPORT(except_vec1_r10k)
	.set	noat
	LOAD_PTE2 k1 k0
	ld	k0, 0(k1)			# get even pte
	ld	k1, 8(k1)			# get odd pte
	PTE_RELOAD k0 k1
	nop
	tlbwr
	eret
kaddr:
	dla	k0, handle_vmalloc_address	# MAPPED kernel needs this
	jr	k0
	 nop

	.align	5
FEXPORT(handle_vmalloc_address)
	.set	noat
	/*
	 * First, determine that the address is in/above vmalloc range.
	 */
	dmfc0	k0, CP0_BADVADDR
	dli	k1, VMALLOC_START
	sltu	k1, k0, k1
	bne	k1, zero, not_vmalloc
	 dli	k1, VMALLOC_START

	/*
	 * Now find offset into kptbl.
	 */
	dsubu	k0, k0, k1
	dla	k1, kptbl
	dsrl	k0, (PAGE_SHIFT+1)		# get vpn2
	dsll	k0, 4				# byte offset of pte
	daddu	k1, k1, k0

	/*
	 * Determine that fault address is within vmalloc range.
	 */
	dla	k0, ekptbl
	sltu	k0, k1, k0
	beq	k0, zero, not_vmalloc

	/*
	 * Load cp0 registers.
	 */
	ld	k0, 0(k1)			# get even pte
	ld	k1, 8(k1)			# get odd pte
1:
	PTE_RELOAD k0 k1
	nop
	tlbwr
	eret
not_vmalloc:
	daddu	k0, zero, zero
	daddu	k1, zero, zero
	j	1b
	nop