summaryrefslogtreecommitdiffstats
path: root/mm/mremap.c
blob: aaabde322f4b58554a047b417cd34251e3a25860 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
/*
 *	linux/mm/remap.c
 *
 *	(C) Copyright 1996 Linus Torvalds
 */

#include <linux/stat.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/shm.h>
#include <linux/errno.h>
#include <linux/mman.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/swap.h>

#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/pgtable.h>

static inline pte_t *get_one_pte(struct mm_struct *mm, unsigned long addr)
{
	pgd_t * pgd;
	pmd_t * pmd;
	pte_t * pte = NULL;

	pgd = pgd_offset(mm, addr);
	if (pgd_none(*pgd))
		goto end;
	if (pgd_bad(*pgd)) {
		printk("move_one_page: bad source pgd (%08lx)\n", pgd_val(*pgd));
		pgd_clear(pgd);
		goto end;
	}

	pmd = pmd_offset(pgd, addr);
	if (pmd_none(*pmd))
		goto end;
	if (pmd_bad(*pmd)) {
		printk("move_one_page: bad source pmd (%08lx)\n", pmd_val(*pmd));
		pmd_clear(pmd);
		goto end;
	}

	pte = pte_offset(pmd, addr);
	if (pte_none(*pte))
		pte = NULL;
end:
	return pte;
}

static inline pte_t *alloc_one_pte(struct mm_struct *mm, unsigned long addr)
{
	pmd_t * pmd;
	pte_t * pte = NULL;

	pmd = pmd_alloc(pgd_offset(mm, addr), addr);
	if (pmd)
		pte = pte_alloc(pmd, addr);
	return pte;
}

static inline int copy_one_pte(pte_t * src, pte_t * dst)
{
	int error = 0;
	pte_t pte = *src;

	if (!pte_none(pte)) {
		error++;
		if (dst) {
			pte_clear(src);
			set_pte(dst, pte);
			error--;
		}
	}
	return error;
}

static int move_one_page(struct mm_struct *mm, unsigned long old_addr, unsigned long new_addr)
{
	int error = 0;
	pte_t * src;

	src = get_one_pte(mm, old_addr);
	if (src)
		error = copy_one_pte(src, alloc_one_pte(mm, new_addr));
	return error;
}

static int move_page_tables(struct mm_struct * mm,
	unsigned long new_addr, unsigned long old_addr, unsigned long len)
{
	unsigned long offset = len;

	flush_cache_range(mm, old_addr, old_addr + len);
	flush_tlb_range(mm, old_addr, old_addr + len);

	/*
	 * This is not the clever way to do this, but we're taking the
	 * easy way out on the assumption that most remappings will be
	 * only a few pages.. This also makes error recovery easier.
	 */
	while (offset) {
		offset -= PAGE_SIZE;
		if (move_one_page(mm, old_addr + offset, new_addr + offset))
			goto oops_we_failed;
	}
	return 0;

	/*
	 * Ok, the move failed because we didn't have enough pages for
	 * the new page table tree. This is unlikely, but we have to
	 * take the possibility into account. In that case we just move
	 * all the pages back (this will work, because we still have
	 * the old page tables)
	 */
oops_we_failed:
	flush_cache_range(mm, new_addr, new_addr + len);
	while ((offset += PAGE_SIZE) < len)
		move_one_page(mm, new_addr + offset, old_addr + offset);
	zap_page_range(mm, new_addr, new_addr + len);
	flush_tlb_range(mm, new_addr, new_addr + len);
	return -1;
}

static inline unsigned long move_vma(struct vm_area_struct * vma,
	unsigned long addr, unsigned long old_len, unsigned long new_len)
{
	struct vm_area_struct * new_vma;

	new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
	if (new_vma) {
		unsigned long new_addr = get_unmapped_area(addr, new_len);

		if (new_addr && !move_page_tables(current->mm, new_addr, addr, old_len)) {
			*new_vma = *vma;
			new_vma->vm_start = new_addr;
			new_vma->vm_end = new_addr+new_len;
			new_vma->vm_offset = vma->vm_offset + (addr - vma->vm_start);
			new_vma->vm_dentry = dget(vma->vm_dentry);
			if (new_vma->vm_ops && new_vma->vm_ops->open)
				new_vma->vm_ops->open(new_vma);
			insert_vm_struct(current->mm, new_vma);
			merge_segments(current->mm, new_vma->vm_start, new_vma->vm_end);
			do_munmap(addr, old_len);
			current->mm->total_vm += new_len >> PAGE_SHIFT;
			return new_addr;
		}
		kmem_cache_free(vm_area_cachep, new_vma);
	}
	return -ENOMEM;
}

/*
 * Expand (or shrink) an existing mapping, potentially moving it at the
 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
 */
asmlinkage unsigned long sys_mremap(unsigned long addr,
	unsigned long old_len, unsigned long new_len,
	unsigned long flags)
{
	struct vm_area_struct *vma;
	unsigned long ret = -EINVAL;

	lock_kernel();
	if (addr & ~PAGE_MASK)
		goto out;
	old_len = PAGE_ALIGN(old_len);
	new_len = PAGE_ALIGN(new_len);

	/*
	 * Always allow a shrinking remap: that just unmaps
	 * the unnecessary pages..
	 */
	ret = addr;
	if (old_len > new_len) {
		do_munmap(addr+new_len, old_len - new_len);
		goto out;
	}

	/*
	 * Ok, we need to grow..
	 */
	ret = -EFAULT;
	vma = find_vma(current->mm, addr);
	if (!vma || vma->vm_start > addr)
		goto out;
	/* We can't remap across vm area boundaries */
	if (old_len > vma->vm_end - addr)
		goto out;
	if (vma->vm_flags & VM_LOCKED) {
		unsigned long locked = current->mm->locked_vm << PAGE_SHIFT;
		locked += new_len - old_len;
		ret = -EAGAIN;
		if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
			goto out;
	}
	ret = -ENOMEM;
	if ((current->mm->total_vm << PAGE_SHIFT) + (new_len - old_len)
	    > current->rlim[RLIMIT_AS].rlim_cur)
		goto out;

	/* old_len exactly to the end of the area.. */
	if (old_len == vma->vm_end - addr &&
	    (old_len != new_len || !(flags & MREMAP_MAYMOVE))) {
		unsigned long max_addr = TASK_SIZE;
		if (vma->vm_next)
			max_addr = vma->vm_next->vm_start;
		/* can we just expand the current mapping? */
		if (max_addr - addr >= new_len) {
			int pages = (new_len - old_len) >> PAGE_SHIFT;
			vma->vm_end = addr + new_len;
			current->mm->total_vm += pages;
			if (vma->vm_flags & VM_LOCKED)
				current->mm->locked_vm += pages;
			ret = addr;
			goto out;
		}
	}

	/*
	 * We weren't able to just expand or shrink the area,
	 * we need to create a new one and move it..
	 */
	if (flags & MREMAP_MAYMOVE)
		ret = move_vma(vma, addr, old_len, new_len);
	else
		ret = -ENOMEM;
out:
	unlock_kernel();
	return ret;
}