1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
|
#ifndef _LINUX_MMZONE_H
#define _LINUX_MMZONE_H
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#include <linux/config.h>
#include <linux/spinlock.h>
#include <linux/list.h>
/*
* Free memory management - zoned buddy allocator.
*/
#define MAX_ORDER 10
typedef struct free_area_struct {
struct list_head free_list;
unsigned int *map;
} free_area_t;
struct pglist_data;
typedef struct zone_struct {
/*
* Commonly accessed fields:
*/
spinlock_t lock;
unsigned long offset;
unsigned long free_pages;
char low_on_memory;
char zone_wake_kswapd;
unsigned long pages_min, pages_low, pages_high;
/*
* free areas of different sizes
*/
free_area_t free_area[MAX_ORDER];
/*
* rarely used fields:
*/
char *name;
unsigned long size;
/*
* Discontig memory support fields.
*/
struct pglist_data *zone_pgdat;
unsigned long zone_start_paddr;
unsigned long zone_start_mapnr;
struct page *zone_mem_map;
} zone_t;
#define ZONE_DMA 0
#define ZONE_NORMAL 1
#define ZONE_HIGHMEM 2
#define MAX_NR_ZONES 3
/*
* One allocation request operates on a zonelist. A zonelist
* is a list of zones, the first one is the 'goal' of the
* allocation, the other zones are fallback zones, in decreasing
* priority.
*
* Right now a zonelist takes up less than a cacheline. We never
* modify it apart from boot-up, and only a few indices are used,
* so despite the zonelist table being relatively big, the cache
* footprint of this construct is very small.
*/
typedef struct zonelist_struct {
zone_t * zones [MAX_NR_ZONES+1]; // NULL delimited
int gfp_mask;
} zonelist_t;
#define NR_GFPINDEX 0x100
struct bootmem_data;
typedef struct pglist_data {
zone_t node_zones[MAX_NR_ZONES];
zonelist_t node_zonelists[NR_GFPINDEX];
struct page *node_mem_map;
unsigned long *valid_addr_bitmap;
struct bootmem_data *bdata;
unsigned long node_start_paddr;
unsigned long node_start_mapnr;
unsigned long node_size;
int node_id;
struct pglist_data *node_next;
} pg_data_t;
extern int numnodes;
extern pg_data_t *pgdat_list;
#define memclass(pgzone, tzone) (((pgzone)->zone_pgdat == (tzone)->zone_pgdat) \
&& (((pgzone) - (pgzone)->zone_pgdat->node_zones) <= \
((tzone) - (pgzone)->zone_pgdat->node_zones)))
/*
* The following two are not meant for general usage. They are here as
* prototypes for the discontig memory code.
*/
extern void show_free_areas_core(int);
extern void free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap,
unsigned long *zones_size, unsigned long paddr, unsigned long *zholes_size);
#ifndef CONFIG_DISCONTIGMEM
extern pg_data_t contig_page_data;
#define NODE_DATA(nid) (&contig_page_data)
#define NODE_MEM_MAP(nid) mem_map
#else /* !CONFIG_DISCONTIGMEM */
#include <asm/mmzone.h>
#endif /* !CONFIG_DISCONTIGMEM */
#define MAP_ALIGN(x) ((((x) % sizeof(mem_map_t)) == 0) ? (x) : ((x) + \
sizeof(mem_map_t) - ((x) % sizeof(mem_map_t))))
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _LINUX_MMZONE_H */
|