blob: 428a59d1c6c99ce4364294c92ff6c25baacb97dc (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
|
/*
* highmem.h: virtual kernel memory mappings for high memory
*
* PowerPC version, stolen from the i386 version.
*
* Used in CONFIG_HIGHMEM systems for memory pages which
* are not addressable by direct kernel virtual adresses.
*
* Copyright (C) 1999 Gerhard Wichert, Siemens AG
* Gerhard.Wichert@pdb.siemens.de
*
*
* Redesigned the x86 32-bit VM architecture to deal with
* up to 16 Terrabyte physical memory. With current x86 CPUs
* we now support up to 64 Gigabytes physical RAM.
*
* Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
*/
#ifndef _ASM_HIGHMEM_H
#define _ASM_HIGHMEM_H
#ifdef __KERNEL__
#include <linux/init.h>
#include <linux/interrupt.h>
#include <asm/kmap_types.h>
#include <asm/pgtable.h>
/* undef for production */
#define HIGHMEM_DEBUG 1
extern pte_t *kmap_pte;
extern pgprot_t kmap_prot;
extern pte_t *pkmap_page_table;
extern void kmap_init(void) __init;
/*
* Right now we initialize only a single pte table. It can be extended
* easily, subsequent pte tables have to be allocated in one physical
* chunk of RAM.
*/
#define PKMAP_BASE (0xfe000000UL)
#define LAST_PKMAP 1024
#define LAST_PKMAP_MASK (LAST_PKMAP-1)
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
#define KMAP_FIX_BEGIN (0xfe400000UL)
extern unsigned long kmap_high(struct page *page);
extern void kunmap_high(struct page *page);
extern inline unsigned long kmap(struct page *page)
{
if (in_interrupt())
BUG();
if (page < highmem_start_page)
return (unsigned long) page_address(page);
return kmap_high(page);
}
extern inline void kunmap(struct page *page)
{
if (in_interrupt())
BUG();
if (page < highmem_start_page)
return;
kunmap_high(page);
}
/*
* The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
* gives a more generic (and caching) interface. But kmap_atomic can
* be used in IRQ contexts, so in some (very limited) cases we need
* it.
*/
extern inline unsigned long kmap_atomic(struct page *page, enum km_type type)
{
unsigned int idx;
unsigned long vaddr;
if (page < highmem_start_page)
return (unsigned long) page_address(page);
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = KMAP_FIX_BEGIN + idx * PAGE_SIZE;
#if HIGHMEM_DEBUG
if (!pte_none(*(kmap_pte+idx)))
BUG();
#endif
set_pte(kmap_pte+idx, mk_pte(page, kmap_prot));
flush_hash_page(0, vaddr);
return vaddr;
}
extern inline void kunmap_atomic(unsigned long vaddr, enum km_type type)
{
#if HIGHMEM_DEBUG
unsigned int idx = type + KM_TYPE_NR*smp_processor_id();
if (vaddr < KMAP_FIX_BEGIN) // FIXME
return;
if (vaddr != KMAP_FIX_BEGIN + idx * PAGE_SIZE)
BUG();
/*
* force other mappings to Oops if they'll try to access
* this pte without first remap it
*/
pte_clear(kmap_pte+idx);
flush_hash_page(0, vaddr);
#endif
}
#endif /* __KERNEL__ */
#endif /* _ASM_HIGHMEM_H */
|