summaryrefslogtreecommitdiffstats
path: root/mm/mmap.c
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1997-04-29 21:13:14 +0000
committer <ralf@linux-mips.org>1997-04-29 21:13:14 +0000
commit19c9bba94152148523ba0f7ef7cffe3d45656b11 (patch)
tree40b1cb534496a7f1ca0f5c314a523c69f1fee464 /mm/mmap.c
parent7206675c40394c78a90e74812bbdbf8cf3cca1be (diff)
Import of Linux/MIPS 2.1.36
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c109
1 files changed, 75 insertions, 34 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index a264e319b..4a5ed16a3 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -7,13 +7,15 @@
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/slab.h>
#include <linux/shm.h>
#include <linux/errno.h>
#include <linux/mman.h>
#include <linux/string.h>
-#include <linux/malloc.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -41,6 +43,11 @@ pgprot_t protection_map[16] = {
__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
};
+/* SLAB cache for vm_area_struct's. */
+kmem_cache_t *vm_area_cachep;
+
+int sysctl_overcommit_memory;
+
/*
* Check that a process has enough memory to allocate a
* new virtual mapping.
@@ -53,65 +60,78 @@ int vm_enough_memory(long pages)
* fool it, but this should catch most mistakes.
*/
long freepages;
+
+ /* sometimes we want to use more memory than we have. */
+ if (sysctl_overcommit_memory)
+ return 1;
+
freepages = buffermem >> PAGE_SHIFT;
freepages += page_cache_size;
freepages >>= 1;
freepages += nr_free_pages;
freepages += nr_swap_pages;
- freepages -= max_mapnr >> 4;
+ freepages -= num_physpages >> 4;
return freepages > pages;
}
asmlinkage unsigned long sys_brk(unsigned long brk)
{
- unsigned long rlim;
+ unsigned long rlim, retval;
unsigned long newbrk, oldbrk;
struct mm_struct *mm = current->mm;
+ lock_kernel();
+ retval = mm->brk;
if (brk < mm->end_code)
- return mm->brk;
+ goto out;
newbrk = PAGE_ALIGN(brk);
oldbrk = PAGE_ALIGN(mm->brk);
- if (oldbrk == newbrk)
- return mm->brk = brk;
+ if (oldbrk == newbrk) {
+ retval = mm->brk = brk;
+ goto out;
+ }
/*
* Always allow shrinking brk
*/
if (brk <= mm->brk) {
- mm->brk = brk;
+ retval = mm->brk = brk;
do_munmap(newbrk, oldbrk-newbrk);
- return brk;
+ goto out;
}
/*
* Check against rlimit and stack..
*/
+ retval = mm->brk;
rlim = current->rlim[RLIMIT_DATA].rlim_cur;
if (rlim >= RLIM_INFINITY)
rlim = ~0;
if (brk - mm->end_code > rlim)
- return mm->brk;
+ goto out;
/*
* Check against existing mmap mappings.
*/
if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
- return mm->brk;
+ goto out;
/*
* Check if we have enough memory..
*/
if (!vm_enough_memory((newbrk-oldbrk) >> PAGE_SHIFT))
- return mm->brk;
+ goto out;
/*
* Ok, looks good - let it rip.
*/
if(do_mmap(NULL, oldbrk, newbrk-oldbrk,
- PROT_READ|PROT_WRITE|PROT_EXEC,
- MAP_FIXED|MAP_PRIVATE, 0) != oldbrk)
- return mm->brk;
- return mm->brk = brk;
+ PROT_READ|PROT_WRITE|PROT_EXEC,
+ MAP_FIXED|MAP_PRIVATE, 0) == oldbrk)
+ mm->brk = brk;
+ retval = mm->brk;
+out:
+ unlock_kernel();
+ return retval;
}
/*
@@ -215,8 +235,7 @@ unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
if (file && (!file->f_op || !file->f_op->mmap))
return -ENODEV;
- vma = (struct vm_area_struct *)kmalloc(sizeof(struct vm_area_struct),
- GFP_KERNEL);
+ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (!vma)
return -ENOMEM;
@@ -256,7 +275,7 @@ unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
/* Check against address space limit. */
if ((mm->total_vm << PAGE_SHIFT) + len
> current->rlim[RLIMIT_AS].rlim_cur) {
- kfree(vma);
+ kmem_cache_free(vm_area_cachep, vma);
return -ENOMEM;
}
@@ -264,7 +283,7 @@ unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
if ((vma->vm_flags & (VM_SHARED | VM_WRITE)) == VM_WRITE) {
if (!(flags & MAP_NORESERVE) &&
!vm_enough_memory(len >> PAGE_SHIFT)) {
- kfree(vma);
+ kmem_cache_free(vm_area_cachep, vma);
return -ENOMEM;
}
}
@@ -273,7 +292,7 @@ unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
int error = file->f_op->mmap(file->f_inode, file, vma);
if (error) {
- kfree(vma);
+ kmem_cache_free(vm_area_cachep, vma);
return error;
}
}
@@ -284,7 +303,7 @@ unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
/* merge_segments might have merged our vma, so we can't use it any more */
mm->total_vm += len >> PAGE_SHIFT;
- if (flags & VM_LOCKED) {
+ if ((flags & VM_LOCKED) && !(flags & VM_IO)) {
unsigned long start = addr;
mm->locked_vm += len >> PAGE_SHIFT;
do {
@@ -310,7 +329,7 @@ unsigned long get_unmapped_area(unsigned long addr, unsigned long len)
if (len > TASK_SIZE)
return 0;
if (!addr)
- addr = TASK_SIZE / 3;
+ addr = TASK_UNMAPPED_BASE;
addr = PAGE_ALIGN(addr);
for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
@@ -751,7 +770,7 @@ static void unmap_fixup(struct vm_area_struct *area,
else {
/* Unmapping a hole: area->vm_start < addr <= end < area->vm_end */
/* Add end mapping -- leave beginning for below */
- mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
+ mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (!mpnt)
return;
@@ -767,7 +786,7 @@ static void unmap_fixup(struct vm_area_struct *area,
}
/* construct whatever mapping is needed */
- mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
+ mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (!mpnt)
return;
*mpnt = *area;
@@ -782,7 +801,12 @@ static void unmap_fixup(struct vm_area_struct *area,
asmlinkage int sys_munmap(unsigned long addr, size_t len)
{
- return do_munmap(addr, len);
+ int ret;
+
+ lock_kernel();
+ ret = do_munmap(addr, len);
+ unlock_kernel();
+ return ret;
}
/*
@@ -833,7 +857,7 @@ int do_munmap(unsigned long addr, size_t len)
* it will put new vm_area_struct(s) into the address space.
*/
do {
- unsigned long st, end;
+ unsigned long st, end, size;
mpnt = free;
free = free->vm_next;
@@ -843,12 +867,15 @@ int do_munmap(unsigned long addr, size_t len)
st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
end = addr+len;
end = end > mpnt->vm_end ? mpnt->vm_end : end;
+ size = end - st;
if (mpnt->vm_ops && mpnt->vm_ops->unmap)
- mpnt->vm_ops->unmap(mpnt, st, end-st);
- zap_page_range(current->mm, st, end-st);
- unmap_fixup(mpnt, st, end-st);
- kfree(mpnt);
+ mpnt->vm_ops->unmap(mpnt, st, size);
+ flush_cache_range(current->mm, st, end);
+ zap_page_range(current->mm, st, size);
+ flush_tlb_range(current->mm, st, end);
+ unmap_fixup(mpnt, st, size);
+ kmem_cache_free(vm_area_cachep, mpnt);
} while (free);
/* we could zap the page tables here too.. */
@@ -879,17 +906,21 @@ void exit_mmap(struct mm_struct * mm)
mm->locked_vm = 0;
while (mpnt) {
struct vm_area_struct * next = mpnt->vm_next;
+ unsigned long start = mpnt->vm_start;
+ unsigned long end = mpnt->vm_end;
+ unsigned long size = end - start;
+
if (mpnt->vm_ops) {
if (mpnt->vm_ops->unmap)
- mpnt->vm_ops->unmap(mpnt, mpnt->vm_start, mpnt->vm_end-mpnt->vm_start);
+ mpnt->vm_ops->unmap(mpnt, start, size);
if (mpnt->vm_ops->close)
mpnt->vm_ops->close(mpnt);
}
remove_shared_vm_struct(mpnt);
- zap_page_range(mm, mpnt->vm_start, mpnt->vm_end-mpnt->vm_start);
+ zap_page_range(mm, start, size);
if (mpnt->vm_inode)
iput(mpnt->vm_inode);
- kfree(mpnt);
+ kmem_cache_free(vm_area_cachep, mpnt);
mpnt = next;
}
}
@@ -1038,9 +1069,19 @@ void merge_segments (struct mm_struct * mm, unsigned long start_addr, unsigned l
remove_shared_vm_struct(mpnt);
if (mpnt->vm_inode)
mpnt->vm_inode->i_count--;
- kfree_s(mpnt, sizeof(*mpnt));
+ kmem_cache_free(vm_area_cachep, mpnt);
mpnt = prev;
}
no_vma:
up(&mm->mmap_sem);
}
+
+void vma_init(void)
+{
+ vm_area_cachep = kmem_cache_create("vm_area_struct",
+ sizeof(struct vm_area_struct),
+ sizeof(long)*8, SLAB_HWCACHE_ALIGN,
+ NULL, NULL);
+ if(!vm_area_cachep)
+ panic("vma_init: Cannot alloc vm_area_struct cache.");
+}