summaryrefslogtreecommitdiffstats
path: root/include/linux/mmzone.h
blob: e3d688acffc1f218904017fdcbaa6f9a9aa40c96 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
#ifndef _LINUX_MMZONE_H
#define _LINUX_MMZONE_H

#ifdef __KERNEL__
#ifndef __ASSEMBLY__

#include <linux/config.h>
#include <linux/spinlock.h>
#include <linux/list.h>

/*
 * Free memory management - zoned buddy allocator.
 */

#define MAX_ORDER 10

typedef struct free_area_struct {
	struct list_head free_list;
	unsigned int * map;
} free_area_t;

struct pglist_data;

typedef struct zone_struct {
	/*
	 * Commonly accessed fields:
	 */
	spinlock_t lock;
	unsigned long offset;
	unsigned long free_pages;
	int low_on_memory;
	unsigned long pages_low, pages_high;
	struct pglist_data *zone_pgdat;

	/*
	 * free areas of different sizes
	 */
	free_area_t free_area[MAX_ORDER];

	/*
	 * rarely used fields:
	 */
	char * name;
	unsigned long size;
} zone_t;

#define ZONE_DMA		0
#define ZONE_NORMAL		1
#define ZONE_HIGHMEM		2
#define MAX_NR_ZONES		3

/*
 * One allocation request operates on a zonelist. A zonelist
 * is a list of zones, the first one is the 'goal' of the
 * allocation, the other zones are fallback zones, in decreasing
 * priority.
 *
 * Right now a zonelist takes up less than a cacheline. We never
 * modify it apart from boot-up, and only a few indices are used,
 * so despite the zonelist table being relatively big, the cache
 * footprint of this construct is very small.
 */
typedef struct zonelist_struct {
	zone_t * zones [MAX_NR_ZONES+1]; // NULL delimited
	int gfp_mask;
} zonelist_t;

#define NR_GFPINDEX		0x100

struct bootmem_data;
typedef struct pglist_data {
	zone_t node_zones[MAX_NR_ZONES];
	zonelist_t node_zonelists[NR_GFPINDEX];
	struct page *node_mem_map;
	unsigned long *valid_addr_bitmap;
	struct bootmem_data *bdata;
} pg_data_t;

extern int numnodes;

#define memclass(pgzone, tzone)	(((pgzone)->zone_pgdat == (tzone)->zone_pgdat) \
			&& (((pgzone) - (pgzone)->zone_pgdat->node_zones) <= \
			((tzone) - (pgzone)->zone_pgdat->node_zones)))

#ifndef CONFIG_DISCONTIGMEM

extern pg_data_t contig_page_data;

#define NODE_DATA(nid)		(&contig_page_data)
#define NODE_MEM_MAP(nid)	mem_map

#else /* !CONFIG_DISCONTIGMEM */

#include <asm/mmzone.h>

#endif /* !CONFIG_DISCONTIGMEM */

#define MAP_ALIGN(x)	((((x) % sizeof(mem_map_t)) == 0) ? (x) : ((x) + \
		sizeof(mem_map_t) - ((x) % sizeof(mem_map_t))))

#ifdef CONFIG_DISCONTIGMEM

#define LOCAL_MAP_NR(kvaddr) \
	(((unsigned long)(kvaddr)-LOCAL_BASE_ADDR((kvaddr))) >> PAGE_SHIFT)
#define MAP_NR(kaddr)	(LOCAL_MAP_NR((kaddr)) + \
		(((unsigned long)ADDR_TO_MAPBASE((kaddr)) - PAGE_OFFSET) / \
		sizeof(mem_map_t)))
#define kern_addr_valid(addr)	((KVADDR_TO_NID((unsigned long)addr) >= \
	numnodes) ? 0 : (test_bit(LOCAL_MAP_NR((addr)), \
	NODE_DATA(KVADDR_TO_NID((unsigned long)addr))->valid_addr_bitmap)))

#endif /* CONFIG_DISCONTIGMEM */

#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _LINUX_MMZONE_H */