summaryrefslogtreecommitdiffstats
path: root/include/asm-arm/proc-armo/pgtable.h
blob: 56456e0e18c59bc211ef8e70bd305689ab36e431 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
/*
 * linux/include/asm-arm/proc-armo/pgtable.h
 *
 * Copyright (C) 1995-1999 Russell King
 *
 * 18-Oct-1997	RMK	Now two-level (32x32)
 */
#ifndef __ASM_PROC_PGTABLE_H
#define __ASM_PROC_PGTABLE_H

/*
 * entries per page directory level: they are two-level, so
 * we don't really have any PMD directory.
 */
#define PTRS_PER_PTE		32
#define PTRS_PER_PMD		1
#define PTRS_PER_PGD		32

/*
 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
 * area for the same reason. ;)
 */
#define VMALLOC_START	  0x01a00000
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
#define VMALLOC_END	  0x01c00000

#define _PAGE_TABLE     (0x01)

#define pmd_bad(pmd)		((pmd_val(pmd) & 0xfc000002))
#define set_pmd(pmdp,pmd)	((*(pmdp)) = (pmd))

extern __inline__ pmd_t __mk_pmd(pte_t *ptep, unsigned long prot)
{
	unsigned long pte_ptr = (unsigned long)ptep;
	pmd_t pmd;

	pmd_val(pmd) = __virt_to_phys(pte_ptr) | prot;

	return pmd;
}

/* these are aliases for the above function */
#define mk_user_pmd(ptep)	__mk_pmd(ptep, _PAGE_TABLE)
#define mk_kernel_pmd(ptep)	__mk_pmd(ptep, _PAGE_TABLE)

extern __inline__ unsigned long pmd_page(pmd_t pmd)
{
	return __phys_to_virt(pmd_val(pmd) & ~_PAGE_TABLE);
}

#define set_pte(pteptr, pteval)	((*(pteptr)) = (pteval))

#define _PAGE_PRESENT	0x01
#define _PAGE_READONLY	0x02
#define _PAGE_NOT_USER	0x04
#define _PAGE_OLD	0x08
#define _PAGE_CLEAN	0x10

/*                               -- present --   -- !dirty --  --- !write ---   ---- !user --- */
#define PAGE_NONE       __pgprot(_PAGE_PRESENT | _PAGE_CLEAN | _PAGE_READONLY | _PAGE_NOT_USER)
#define PAGE_SHARED     __pgprot(_PAGE_PRESENT | _PAGE_CLEAN                                  )
#define PAGE_COPY       __pgprot(_PAGE_PRESENT | _PAGE_CLEAN | _PAGE_READONLY                 )
#define PAGE_READONLY   __pgprot(_PAGE_PRESENT | _PAGE_CLEAN | _PAGE_READONLY                 )
#define PAGE_KERNEL     __pgprot(_PAGE_PRESENT                                | _PAGE_NOT_USER)

#define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_OLD | _PAGE_CLEAN)


/*
 * The following only work if pte_present() is true.
 * Undefined behaviour if not..
 */
#define pte_present(pte)		(pte_val(pte) & _PAGE_PRESENT)
#define pte_read(pte)			(!(pte_val(pte) & _PAGE_NOT_USER))
#define pte_write(pte)			(!(pte_val(pte) & _PAGE_READONLY))
#define pte_exec(pte)			(!(pte_val(pte) & _PAGE_NOT_USER))
#define pte_dirty(pte)			(!(pte_val(pte) & _PAGE_CLEAN))
#define pte_young(pte)			(!(pte_val(pte) & _PAGE_OLD))

extern inline pte_t pte_nocache(pte_t pte)	{ return pte; }
extern inline pte_t pte_wrprotect(pte_t pte)    { pte_val(pte) |= _PAGE_READONLY;  return pte; }
extern inline pte_t pte_rdprotect(pte_t pte)    { pte_val(pte) |= _PAGE_NOT_USER;  return pte; }
extern inline pte_t pte_exprotect(pte_t pte)    { pte_val(pte) |= _PAGE_NOT_USER;  return pte; }
extern inline pte_t pte_mkclean(pte_t pte)      { pte_val(pte) |= _PAGE_CLEAN;     return pte; }
extern inline pte_t pte_mkold(pte_t pte)        { pte_val(pte) |= _PAGE_OLD;       return pte; }

extern inline pte_t pte_mkwrite(pte_t pte)      { pte_val(pte) &= ~_PAGE_READONLY; return pte; }
extern inline pte_t pte_mkread(pte_t pte)       { pte_val(pte) &= ~_PAGE_NOT_USER; return pte; }
extern inline pte_t pte_mkexec(pte_t pte)       { pte_val(pte) &= ~_PAGE_NOT_USER; return pte; }
extern inline pte_t pte_mkdirty(pte_t pte)      { pte_val(pte) &= ~_PAGE_CLEAN;    return pte; }
extern inline pte_t pte_mkyoung(pte_t pte)      { pte_val(pte) &= ~_PAGE_OLD;      return pte; }

#define pte_alloc_kernel        pte_alloc

#endif /* __ASM_PROC_PGTABLE_H */