1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
|
/*
* Written by Kanoj Sarcar, SGI, Aug 1999
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/mmzone.h>
#include <linux/spinlock.h>
int numnodes = 1; /* Initialized for UMA platforms */
#ifndef CONFIG_DISCONTIGMEM
static bootmem_data_t contig_bootmem_data;
pg_data_t contig_page_data = { bdata: &contig_bootmem_data };
/*
* This is meant to be invoked by platforms whose physical memory starts
* at a considerably higher value than 0. Examples are Super-H, ARM, m68k.
* Should be invoked with paramters (0, 0, unsigned long *[], start_paddr).
*/
void __init free_area_init_node(int nid, pg_data_t *pgdat,
unsigned long *zones_size, unsigned long zone_start_paddr,
unsigned long *zholes_size)
{
free_area_init_core(0, NODE_DATA(0), &mem_map, zones_size,
zone_start_paddr, zholes_size);
}
#endif /* !CONFIG_DISCONTIGMEM */
struct page * alloc_pages_node(int nid, int gfp_mask, unsigned long order)
{
return __alloc_pages(NODE_DATA(nid)->node_zonelists + gfp_mask, order);
}
#ifdef CONFIG_DISCONTIGMEM
#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
static spinlock_t node_lock = SPIN_LOCK_UNLOCKED;
void show_free_areas_node(int nid)
{
unsigned long flags;
spin_lock_irqsave(&node_lock, flags);
printk("Memory information for node %d:\n", nid);
show_free_areas_core(nid);
spin_unlock_irqrestore(&node_lock, flags);
}
/*
* Nodes can be initialized parallely, in no particular order.
*/
void __init free_area_init_node(int nid, pg_data_t *pgdat,
unsigned long *zones_size, unsigned long zone_start_paddr,
unsigned long *zholes_size)
{
int i, size = 0;
struct page *discard;
if (mem_map == (mem_map_t *)NULL)
mem_map = (mem_map_t *)PAGE_OFFSET;
free_area_init_core(nid, pgdat, &discard, zones_size, zone_start_paddr,
zholes_size);
pgdat->node_id = nid;
/*
* Get space for the valid bitmap.
*/
for (i = 0; i < MAX_NR_ZONES; i++)
size += zones_size[i];
size = LONG_ALIGN((size + 7) >> 3);
pgdat->valid_addr_bitmap = (unsigned long *)alloc_bootmem_node(nid, size);
memset(pgdat->valid_addr_bitmap, 0, size);
}
/*
* This can be refined. Currently, tries to do round robin, instead
* should do concentratic circle search, starting from current node.
*/
struct page * alloc_pages(int gfp_mask, unsigned long order)
{
struct page *ret = 0;
int startnode, tnode;
#ifndef CONFIG_NUMA
unsigned long flags;
static int nextnid = 0;
#endif
if (order >= MAX_ORDER)
return NULL;
#ifdef CONFIG_NUMA
tnode = numa_node_id();
#else
spin_lock_irqsave(&node_lock, flags);
tnode = nextnid;
nextnid++;
if (nextnid == numnodes)
nextnid = 0;
spin_unlock_irqrestore(&node_lock, flags);
#endif
startnode = tnode;
while (tnode < numnodes) {
if ((ret = alloc_pages_node(tnode++, gfp_mask, order)))
return(ret);
}
tnode = 0;
while (tnode != startnode) {
if ((ret = alloc_pages_node(tnode++, gfp_mask, order)))
return(ret);
}
return(0);
}
#endif /* CONFIG_DISCONTIGMEM */
|