summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/small_page.c
blob: 6bdc6cfc7cb599db6240815a8eba5ec67f873cda (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
/*
 *  linux/arch/arm/mm/small_page.c
 *
 *  Copyright (C) 1996  Russell King
 *
 * Changelog:
 *  26/01/1996	RMK	Cleaned up various areas to make little more generic
 *  07/02/1999	RMK	Support added for 16K and 32K page sizes
 *			containing 8K blocks
 */

#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/smp.h>

#if PAGE_SIZE == 4096
/* 2K blocks */
#define SMALL_ALLOC_SHIFT	(11)
#define NAME(x)			x##_2k
#elif PAGE_SIZE == 32768 || PAGE_SIZE == 16384
/* 8K blocks */
#define SMALL_ALLOC_SHIFT	(13)
#define NAME(x)			x##_8k
#endif

#define SMALL_ALLOC_SIZE	(1 << SMALL_ALLOC_SHIFT)
#define NR_BLOCKS		(PAGE_SIZE / SMALL_ALLOC_SIZE)
#define BLOCK_MASK		((1 << NR_BLOCKS) - 1)

#define USED(pg)		((atomic_read(&(pg)->count) >> 8) & BLOCK_MASK)
#define SET_USED(pg,off)	(atomic_read(&(pg)->count) |= 256 << off)
#define CLEAR_USED(pg,off)	(atomic_read(&(pg)->count) &= ~(256 << off))
#define ALL_USED		BLOCK_MASK
#define IS_FREE(pg,off)		(!(atomic_read(&(pg)->count) & (256 << off)))
#define SM_PAGE_PTR(page,block)	((struct free_small_page *)((page) + \
					((block) << SMALL_ALLOC_SHIFT)))

#if NR_BLOCKS != 2 && NR_BLOCKS != 4
#error I only support 2 or 4 blocks per page
#endif

struct free_small_page {
	unsigned long next;
	unsigned long prev;
};

/*
 * To handle allocating small pages, we use the main get_free_page routine,
 * and split the page up into 4.  The page is marked in mem_map as reserved,
 * so it can't be free'd by free_page.  The count field is used to keep track
 * of which sections of this page are allocated.
 */
static unsigned long small_page_ptr;

static unsigned char offsets[1<<NR_BLOCKS] = {
	0,	/* 0000 */
	1,	/* 0001 */
	0,	/* 0010 */
	2,	/* 0011 */
#if NR_BLOCKS == 4
	0,	/* 0100 */
	1,	/* 0101 */
	0,	/* 0110 */
	3,	/* 0111 */
	0,	/* 1000 */
	1,	/* 1001 */
	0,	/* 1010 */
	2,	/* 1011 */
	0,	/* 1100 */
	1,	/* 1101 */
	0,	/* 1110 */
	4	/* 1111 */
#endif
};

static inline void clear_page_links(unsigned long page)
{
	struct free_small_page *fsp;
	int i;

	for (i = 0; i < NR_BLOCKS; i++) {
		fsp = SM_PAGE_PTR(page, i);
		fsp->next = fsp->prev = 0;
	}
}

static inline void set_page_links_prev(unsigned long page, unsigned long prev)
{
	struct free_small_page *fsp;
	unsigned int mask;
	int i;

	if (!page)
		return;

	mask = USED(&mem_map[MAP_NR(page)]);
	for (i = 0; i < NR_BLOCKS; i++) {
		if (mask & (1 << i))
			continue;
		fsp = SM_PAGE_PTR(page, i);
		fsp->prev = prev;
	}
}

static inline void set_page_links_next(unsigned long page, unsigned long next)
{
	struct free_small_page *fsp;
	unsigned int mask;
	int i;

	if (!page)
		return;

	mask = USED(&mem_map[MAP_NR(page)]);
	for (i = 0; i < NR_BLOCKS; i++) {
		if (mask & (1 << i))
			continue;
		fsp = SM_PAGE_PTR(page, i);
		fsp->next = next;
	}
}

unsigned long NAME(get_page)(int priority)
{
	struct free_small_page *fsp;
	unsigned long new_page;
	unsigned long flags;
	struct page *page;
	int offset;

	save_flags(flags);
	if (!small_page_ptr)
		goto need_new_page;
	cli();
again:
	page = mem_map + MAP_NR(small_page_ptr);
	offset = offsets[USED(page)];
	SET_USED(page, offset);
	new_page = (unsigned long)SM_PAGE_PTR(small_page_ptr, offset);
	if (USED(page) == ALL_USED) {
		fsp = (struct free_small_page *)new_page;
		set_page_links_prev (fsp->next, 0);
		small_page_ptr = fsp->next;
	}
	restore_flags(flags);
	return new_page;

need_new_page:
	new_page = __get_free_page(priority);
	if (!small_page_ptr) {
		if (new_page) {
			set_bit (PG_reserved, &mem_map[MAP_NR(new_page)].flags);
			clear_page_links (new_page);
			cli();
			small_page_ptr = new_page;
			goto again;
		}
		restore_flags(flags);
		return 0;
	}
	free_page(new_page);
	cli();
	goto again;
}

void NAME(free_page)(unsigned long spage)
{
	struct free_small_page *ofsp, *cfsp;
	unsigned long flags;
	struct page *page;
	int offset, oldoffset;

	if (!spage)
		goto none;

	offset = (spage >> SMALL_ALLOC_SHIFT) & (NR_BLOCKS - 1);
	spage -= offset << SMALL_ALLOC_SHIFT;

	page = mem_map + MAP_NR(spage);
	if (!PageReserved(page) || !USED(page))
		goto non_small;

	if (IS_FREE(page, offset))
		goto free;

	save_flags_cli (flags);
	oldoffset = offsets[USED(page)];
	CLEAR_USED(page, offset);
	ofsp = SM_PAGE_PTR(spage, oldoffset);
	cfsp = SM_PAGE_PTR(spage, offset);

	if (oldoffset == NR_BLOCKS) { /* going from totally used to mostly used */
		cfsp->prev = 0;
		cfsp->next = small_page_ptr;
		set_page_links_prev (small_page_ptr, spage);
		small_page_ptr = spage;
	} else if (!USED(page)) {
		set_page_links_prev (ofsp->next, ofsp->prev);
		set_page_links_next (ofsp->prev, ofsp->next);
		if (spage == small_page_ptr)
			small_page_ptr = ofsp->next;
		clear_bit (PG_reserved, &page->flags);
		restore_flags(flags);
		free_page (spage);
	} else
		*cfsp = *ofsp;
	restore_flags(flags);
	return;

non_small:
	printk ("Trying to free non-small page from %p\n", __builtin_return_address(0));
	return;
free:
	printk ("Trying to free free small page from %p\n", __builtin_return_address(0));
none:
	return;
}