summaryrefslogtreecommitdiffstats
path: root/include/asm-arm/pgtable.h
blob: 033541764a6b4e65a0b695b137b315cc7fac547b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
/*
 * linux/include/asm-arm/pgtable.h
 */
#ifndef _ASMARM_PGTABLE_H
#define _ASMARM_PGTABLE_H

#include <asm/arch/memory.h>
#include <asm/proc-fns.h>
#include <asm/system.h>

/*
 * PMD_SHIFT determines the size of the area a second-level page table can map
 * PGDIR_SHIFT determines what a third-level page table entry can map
 */
#define PMD_SHIFT		20
#define PGDIR_SHIFT		20

#define LIBRARY_TEXT_START	0x0c000000

#ifndef __ASSEMBLY__
extern void __pte_error(const char *file, int line, unsigned long val);
extern void __pmd_error(const char *file, int line, unsigned long val);
extern void __pgd_error(const char *file, int line, unsigned long val);

#define pte_ERROR(pte)		__pte_error(__FILE__, __LINE__, pte_val(pte))
#define pmd_ERROR(pmd)		__pmd_error(__FILE__, __LINE__, pmd_val(pmd))
#define pgd_ERROR(pgd)		__pgd_error(__FILE__, __LINE__, pgd_val(pgd))
#endif /* !__ASSEMBLY__ */

#define PMD_SIZE		(1UL << PMD_SHIFT)
#define PMD_MASK		(~(PMD_SIZE-1))
#define PGDIR_SIZE		(1UL << PGDIR_SHIFT)
#define PGDIR_MASK		(~(PGDIR_SIZE-1))

#define USER_PTRS_PER_PGD	(TASK_SIZE/PGDIR_SIZE)

/*
 * The table below defines the page protection levels that we insert into our
 * Linux page table version.  These get translated into the best that the
 * architecture can perform.  Note that on most ARM hardware:
 *  1) We cannot do execute protection
 *  2) If we could do execute protection, then read is implied
 *  3) write implies read permissions
 */
#define __P000  PAGE_NONE
#define __P001  PAGE_READONLY
#define __P010  PAGE_COPY
#define __P011  PAGE_COPY
#define __P100  PAGE_READONLY
#define __P101  PAGE_READONLY
#define __P110  PAGE_COPY
#define __P111  PAGE_COPY

#define __S000  PAGE_NONE
#define __S001  PAGE_READONLY
#define __S010  PAGE_SHARED
#define __S011  PAGE_SHARED
#define __S100  PAGE_READONLY
#define __S101  PAGE_READONLY
#define __S110  PAGE_SHARED
#define __S111  PAGE_SHARED

#ifndef __ASSEMBLY__
/*
 * ZERO_PAGE is a global shared page that is always zero: used
 * for zero-mapped memory areas etc..
 */
extern struct page *empty_zero_page;
#define ZERO_PAGE(vaddr)	(empty_zero_page)

/*
 * Handling allocation failures during page table setup.
 */
extern void __handle_bad_pmd(pmd_t *pmd);
extern void __handle_bad_pmd_kernel(pmd_t *pmd);

#define pte_none(pte)		(!pte_val(pte))
#define pte_clear(ptep)		set_pte((ptep), __pte(0))
#define pte_pagenr(pte)		((unsigned long)(((pte_val(pte) - PHYS_OFFSET) >> PAGE_SHIFT)))

#define pmd_none(pmd)		(!pmd_val(pmd))
#define pmd_clear(pmdp)		set_pmd(pmdp, __pmd(0))

/*
 * Permanent address of a page.
 */
#define page_address(page)	({ if (!(page)->virtual) BUG(); (page)->virtual; })
#define pages_to_mb(x)		((x) >> (20 - PAGE_SHIFT))
#define pte_page(x)		(mem_map + pte_pagenr(x))

/*
 * Conversion functions: convert a page and protection to a page entry,
 * and a page entry and page directory to the page they refer to.
 */
extern __inline__ pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
{
	pte_t pte;
	pte_val(pte) = physpage | pgprot_val(pgprot);
	return pte;
}

#define mk_pte(page,pgprot)					\
({								\
	pte_t __pte;						\
	pte_val(__pte) = PHYS_OFFSET + 				\
			  (((page) - mem_map) << PAGE_SHIFT) +	\
			   pgprot_val(pgprot);			\
	__pte;							\
})

/*
 * The "pgd_xxx()" functions here are trivial for a folded two-level
 * setup: the pgd is never bad, and a pmd always exists (as it's folded
 * into the pgd entry)
 */
#define pgd_none(pgd)		(0)
#define pgd_bad(pgd)		(0)
#define pgd_present(pgd)	(1)
#define pgd_clear(pgdp)

#define page_pte_prot(page,prot)	mk_pte(page, prot)
#define page_pte(page)		mk_pte(page, __pgprot(0))

/* to find an entry in a page-table-directory */
#define __pgd_offset(addr)	((addr) >> PGDIR_SHIFT)

#define pgd_offset(mm, addr)	((mm)->pgd+__pgd_offset(addr))

/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(addr)	pgd_offset(&init_mm, addr)

/* Find an entry in the second-level page table.. */
#define pmd_offset(dir, addr)	((pmd_t *)(dir))

/* Find an entry in the third-level page table.. */
#define __pte_offset(addr)	(((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pte_offset(dir, addr)	((pte_t *)pmd_page(*(dir)) + __pte_offset(addr))

#include <asm/proc/pgtable.h>

extern __inline__ pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
	pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
	return pte;
}

extern pgd_t swapper_pg_dir[PTRS_PER_PGD];

#define update_mmu_cache(vma,address,pte) do { } while (0)

/* Encode and decode a swap entry.
 *
 * We support up to 32GB of swap on 4k machines
 */
#define SWP_TYPE(x)		(((x).val >> 2) & 0x7f)
#define SWP_OFFSET(x)		((x).val >> 9)
#define SWP_ENTRY(type,offset)	((swp_entry_t) { ((type) << 2) | ((offset) << 9) })
#define pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
#define swp_entry_to_pte(swp)	((pte_t) { (swp).val })

#define module_map		vmalloc
#define module_unmap		vfree

/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
#define PageSkip(page)		(machine_is_riscpc() && test_bit(PG_skip, &(page)->flags))

#define io_remap_page_range	remap_page_range

#endif /* !__ASSEMBLY__ */

#endif /* _ASMARM_PGTABLE_H */