summaryrefslogtreecommitdiffstats
path: root/mm/numa.c
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-11-23 02:00:47 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-11-23 02:00:47 +0000
commit06615f62b17d7de6e12d2f5ec6b88cf30af08413 (patch)
tree8766f208847d4876a6db619aebbf54d53b76eb44 /mm/numa.c
parentfa9bdb574f4febb751848a685d9a9017e04e1d53 (diff)
Merge with Linux 2.4.0-test10.
Diffstat (limited to 'mm/numa.c')
-rw-r--r--mm/numa.c50
1 files changed, 30 insertions, 20 deletions
diff --git a/mm/numa.c b/mm/numa.c
index 06ad9ec63..47cb72ec6 100644
--- a/mm/numa.c
+++ b/mm/numa.c
@@ -11,11 +11,11 @@
int numnodes = 1; /* Initialized for UMA platforms */
-#ifndef CONFIG_DISCONTIGMEM
-
static bootmem_data_t contig_bootmem_data;
pg_data_t contig_page_data = { bdata: &contig_bootmem_data };
+#ifndef CONFIG_DISCONTIGMEM
+
/*
* This is meant to be invoked by platforms whose physical memory starts
* at a considerably higher value than 0. Examples are Super-H, ARM, m68k.
@@ -25,7 +25,7 @@ void __init free_area_init_node(int nid, pg_data_t *pgdat, struct page *pmap,
unsigned long *zones_size, unsigned long zone_start_paddr,
unsigned long *zholes_size)
{
- free_area_init_core(0, NODE_DATA(0), &mem_map, zones_size,
+ free_area_init_core(0, &contig_page_data, &mem_map, zones_size,
zone_start_paddr, zholes_size, pmap);
}
@@ -33,7 +33,11 @@ void __init free_area_init_node(int nid, pg_data_t *pgdat, struct page *pmap,
struct page * alloc_pages_node(int nid, int gfp_mask, unsigned long order)
{
+#ifdef CONFIG_NUMA
return __alloc_pages(NODE_DATA(nid)->node_zonelists + gfp_mask, order);
+#else
+ return alloc_pages(gfp_mask, order);
+#endif
}
#ifdef CONFIG_DISCONTIGMEM
@@ -42,13 +46,12 @@ struct page * alloc_pages_node(int nid, int gfp_mask, unsigned long order)
static spinlock_t node_lock = SPIN_LOCK_UNLOCKED;
-void show_free_areas_node(int nid)
+void show_free_areas_node(pg_data_t *pgdat)
{
unsigned long flags;
spin_lock_irqsave(&node_lock, flags);
- printk("Memory information for node %d:\n", nid);
- show_free_areas_core(nid);
+ show_free_areas_core(pgdat);
spin_unlock_irqrestore(&node_lock, flags);
}
@@ -75,10 +78,16 @@ void __init free_area_init_node(int nid, pg_data_t *pgdat, struct page *pmap,
for (i = 0; i < MAX_NR_ZONES; i++)
size += zones_size[i];
size = LONG_ALIGN((size + 7) >> 3);
- pgdat->valid_addr_bitmap = (unsigned long *)alloc_bootmem_node(nid, size);
+ pgdat->valid_addr_bitmap = (unsigned long *)alloc_bootmem_node(pgdat, size);
memset(pgdat->valid_addr_bitmap, 0, size);
}
+static struct page * alloc_pages_pgdat(pg_data_t *pgdat, int gfp_mask,
+ unsigned long order)
+{
+ return __alloc_pages(pgdat->node_zonelists + gfp_mask, order);
+}
+
/*
* This can be refined. Currently, tries to do round robin, instead
* should do concentratic circle search, starting from current node.
@@ -86,33 +95,34 @@ void __init free_area_init_node(int nid, pg_data_t *pgdat, struct page *pmap,
struct page * alloc_pages(int gfp_mask, unsigned long order)
{
struct page *ret = 0;
- int startnode, tnode;
+ pg_data_t *start, *temp;
#ifndef CONFIG_NUMA
unsigned long flags;
- static int nextnid = 0;
+ static pg_data_t *next = 0;
#endif
if (order >= MAX_ORDER)
return NULL;
#ifdef CONFIG_NUMA
- tnode = numa_node_id();
+ temp = NODE_DATA(numa_node_id());
#else
spin_lock_irqsave(&node_lock, flags);
- tnode = nextnid;
- nextnid++;
- if (nextnid == numnodes)
- nextnid = 0;
+ if (!next) next = pgdat_list;
+ temp = next;
+ next = next->node_next;
spin_unlock_irqrestore(&node_lock, flags);
#endif
- startnode = tnode;
- while (tnode < numnodes) {
- if ((ret = alloc_pages_node(tnode++, gfp_mask, order)))
+ start = temp;
+ while (temp) {
+ if ((ret = alloc_pages_pgdat(temp, gfp_mask, order)))
return(ret);
+ temp = temp->node_next;
}
- tnode = 0;
- while (tnode != startnode) {
- if ((ret = alloc_pages_node(tnode++, gfp_mask, order)))
+ temp = pgdat_list;
+ while (temp != start) {
+ if ((ret = alloc_pages_pgdat(temp, gfp_mask, order)))
return(ret);
+ temp = temp->node_next;
}
return(0);
}