blob: ce0fd84e656c1faa78aec9c5030c6333c57e069d (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
|
/*
* linux/include/asm-arm/proc-armo/mm-init.h
*
* Copyright (C) 1996 Russell King
*
* This contains the code to setup the memory map on an ARM2/ARM250/ARM3
* machine. This is both processor & architecture specific, and requires
* some more work to get it to fit into our separate processor and
* architecture structure.
*/
#include <asm/arch/memory.h>
int page_nr;
#define setup_processor_functions()
#define PTE_SIZE (PTRS_PER_PTE * BYTES_PER_PTR)
static inline void setup_swapper_dir (int index, pte_t *ptep)
{
set_pmd (pmd_offset (swapper_pg_dir + index, 0), mk_pmd (ptep));
}
static inline unsigned long
setup_pagetables(unsigned long start_mem, unsigned long end_mem)
{
unsigned int i;
union { unsigned long l; pte_t *pte; } u;
page_nr = MAP_NR(end_mem);
/* map in pages for (0x0000 - 0x8000) */
u.l = ((start_mem + (PTE_SIZE-1)) & ~(PTE_SIZE-1));
start_mem = u.l + PTE_SIZE;
memzero (u.pte, PTE_SIZE);
u.pte[0] = mk_pte(PAGE_OFFSET + 491520, PAGE_READONLY);
setup_swapper_dir (0, u.pte);
for (i = 1; i < PTRS_PER_PGD; i++)
pgd_val(swapper_pg_dir[i]) = 0;
return start_mem;
}
static inline void
mark_usable_memory_areas(unsigned long *start_mem, unsigned long end_mem)
{
unsigned long smem;
*start_mem = smem = PAGE_ALIGN(*start_mem);
while (smem < end_mem) {
clear_bit(PG_reserved, &mem_map[MAP_NR(smem)].flags);
smem += PAGE_SIZE;
}
}
|