blob: ede2167e125f7d48385b24af0032bd2b4709d46c (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
|
/*
* highmem.h: virtual kernel memory mappings for high memory
*
* Used in CONFIG_HIGHMEM systems for memory pages which
* are not addressable by direct kernel virtual adresses.
*
* Copyright (C) 1999 Gerhard Wichert, Siemens AG
* Gerhard.Wichert@pdb.siemens.de
*
*
* Redesigned the x86 32-bit VM architecture to deal with
* up to 16 Terrabyte physical memory. With current x86 CPUs
* we now support up to 64 Gigabytes physical RAM.
*
* Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
*/
#ifndef _ASM_HIGHMEM_H
#define _ASM_HIGHMEM_H
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <asm/vaddrs.h>
#include <asm/kmap_types.h>
#include <asm/pgtable.h>
/* undef for production */
#define HIGHMEM_DEBUG 1
/* declarations for highmem.c */
extern unsigned long highstart_pfn, highend_pfn;
extern pte_t *kmap_pte;
extern pgprot_t kmap_prot;
extern pte_t *pkmap_page_table;
extern void kmap_init(void) __init;
/*
* Right now we initialize only a single pte table. It can be extended
* easily, subsequent pte tables have to be allocated in one physical
* chunk of RAM.
*/
#define LAST_PKMAP 1024
#define LAST_PKMAP_MASK (LAST_PKMAP-1)
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
extern unsigned long kmap_high(struct page *page);
extern void kunmap_high(struct page *page);
extern inline unsigned long kmap(struct page *page)
{
if (in_interrupt())
BUG();
if (page < highmem_start_page)
return (unsigned long) page_address(page);
return kmap_high(page);
}
extern inline void kunmap(struct page *page)
{
if (in_interrupt())
BUG();
if (page < highmem_start_page)
return;
kunmap_high(page);
}
/*
* The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
* gives a more generic (and caching) interface. But kmap_atomic can
* be used in IRQ contexts, so in some (very limited) cases we need
* it.
*/
extern inline unsigned long kmap_atomic(struct page *page, enum km_type type)
{
unsigned long idx;
unsigned long vaddr;
if (page < highmem_start_page)
return (unsigned long) page_address(page);
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = FIX_KMAP_BEGIN + idx * PAGE_SIZE;
/* XXX Fix - Anton */
#if 0
__flush_cache_one(vaddr);
#else
flush_cache_all();
#endif
#if HIGHMEM_DEBUG
if (!pte_none(*(kmap_pte+idx)))
BUG();
#endif
set_pte(kmap_pte+idx, mk_pte(page, kmap_prot));
/* XXX Fix - Anton */
#if 0
__flush_tlb_one(vaddr);
#else
flush_tlb_all();
#endif
return vaddr;
}
extern inline void kunmap_atomic(unsigned long vaddr, enum km_type type)
{
#if HIGHMEM_DEBUG
unsigned long idx = type + KM_TYPE_NR*smp_processor_id();
#if 0
if (vaddr < FIXADDR_START) // FIXME
return;
#endif
if (vaddr != FIX_KMAP_BEGIN + idx * PAGE_SIZE)
BUG();
/* XXX Fix - Anton */
#if 0
__flush_cache_one(vaddr);
#else
flush_cache_all();
#endif
/*
* force other mappings to Oops if they'll try to access
* this pte without first remap it
*/
pte_clear(kmap_pte+idx);
/* XXX Fix - Anton */
#if 0
__flush_tlb_one(vaddr);
#else
flush_tlb_all();
#endif
#endif
}
#endif /* __KERNEL__ */
#endif /* _ASM_HIGHMEM_H */
|