summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ppc64/Kconfig12
-rw-r--r--include/linux/mmzone.h6
-rw-r--r--mm/page_alloc.c2
3 files changed, 20 insertions, 0 deletions
diff --git a/arch/ppc64/Kconfig b/arch/ppc64/Kconfig
index 011b5c0bf1d0..85f8fcf44b6c 100644
--- a/arch/ppc64/Kconfig
+++ b/arch/ppc64/Kconfig
@@ -211,6 +211,18 @@ config ARCH_FLATMEM_ENABLE
source "mm/Kconfig"
+# Some NUMA nodes have memory ranges that span
+# other nodes. Even though a pfn is valid and
+# between a node's start and end pfns, it may not
+# reside on that node.
+#
+# This is a relatively temporary hack that should
+# be able to go away when sparsemem is fully in
+# place
+config NODES_SPAN_OTHER_NODES
+ def_bool y
+ depends on NEED_MULTIPLE_NODES
+
config NUMA
bool "NUMA support"
depends on DISCONTIGMEM
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 19860d317ec2..746b57e3d370 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -528,6 +528,12 @@ void sparse_init(void);
#define sparse_init() do {} while (0)
#endif /* CONFIG_SPARSEMEM */
+#ifdef CONFIG_NODES_SPAN_OTHER_NODES
+#define early_pfn_in_nid(pfn, nid) (early_pfn_to_nid(pfn) == (nid))
+#else
+#define early_pfn_in_nid(pfn, nid) (1)
+#endif
+
#ifndef early_pfn_valid
#define early_pfn_valid(pfn) (1)
#endif
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5c1b8982a6da..1eb683f9b3af 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1656,6 +1656,8 @@ void __init memmap_init_zone(unsigned long size, int nid, unsigned long zone,
for (pfn = start_pfn; pfn < end_pfn; pfn++, page++) {
if (!early_pfn_valid(pfn))
continue;
+ if (!early_pfn_in_nid(pfn, nid))
+ continue;
page = pfn_to_page(pfn);
set_page_links(page, zone, nid, pfn);
set_page_count(page, 0);