diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2000-04-28 01:09:25 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2000-04-28 01:09:25 +0000 |
commit | b9ba7aeb165cffecdffb60aec8c3fa8d590d9ca9 (patch) | |
tree | 42d07b0c7246ae2536a702e7c5de9e2732341116 /mm/mmap.c | |
parent | 7406b0a326f2d70ade2671c37d1beef62249db97 (diff) |
Merge with 2.3.99-pre6.
Diffstat (limited to 'mm/mmap.c')
-rw-r--r-- | mm/mmap.c | 50 |
1 files changed, 27 insertions, 23 deletions
@@ -110,7 +110,7 @@ asmlinkage unsigned long sys_brk(unsigned long brk) /* Always allow shrinking brk. */ if (brk <= mm->brk) { - if (!do_munmap(newbrk, oldbrk-newbrk)) + if (!do_munmap(mm, newbrk, oldbrk-newbrk)) goto set_brk; goto out; } @@ -220,8 +220,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, unsigned lon default: return -EINVAL; } - } else if ((flags & MAP_TYPE) != MAP_PRIVATE) - return -EINVAL; + } /* Obtain the address to map to. we verify (or select) it and ensure * that it represents a valid section of the address space. @@ -269,8 +268,11 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, unsigned lon if (!(file->f_mode & FMODE_WRITE)) vma->vm_flags &= ~(VM_MAYWRITE | VM_SHARED); } - } else + } else { vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; + if (flags & MAP_SHARED) + vma->vm_flags |= VM_SHARED | VM_MAYSHARE; + } vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f]; vma->vm_ops = NULL; vma->vm_pgoff = pgoff; @@ -279,7 +281,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, unsigned lon /* Clear old maps */ error = -ENOMEM; - if (do_munmap(addr, len)) + if (do_munmap(mm, addr, len)) goto free_vma; /* Check against address space limit. */ @@ -316,6 +318,8 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, unsigned lon atomic_inc(&file->f_dentry->d_inode->i_writecount); if (error) goto unmap_and_free_vma; + } else if (flags & MAP_SHARED) { + error = map_zero_setup(vma); } /* @@ -468,13 +472,13 @@ struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, return NULL; } -struct vm_area_struct * find_extend_vma(struct task_struct * tsk, unsigned long addr) +struct vm_area_struct * find_extend_vma(struct mm_struct * mm, unsigned long addr) { struct vm_area_struct * vma; unsigned long start; addr &= PAGE_MASK; - vma = find_vma(tsk->mm,addr); + vma = find_vma(mm,addr); if (!vma) return NULL; if (vma->vm_start <= addr) @@ -513,8 +517,9 @@ struct vm_area_struct * find_extend_vma(struct task_struct * tsk, unsigned long * allocate a new one, and the return indicates whether the old * area was reused. */ -static struct vm_area_struct * unmap_fixup(struct vm_area_struct *area, - unsigned long addr, size_t len, struct vm_area_struct *extra) +static struct vm_area_struct * unmap_fixup(struct mm_struct *mm, + struct vm_area_struct *area, unsigned long addr, size_t len, + struct vm_area_struct *extra) { struct vm_area_struct *mpnt; unsigned long end = addr + len; @@ -536,11 +541,11 @@ static struct vm_area_struct * unmap_fixup(struct vm_area_struct *area, /* Work out to one of the ends. */ if (end == area->vm_end) { area->vm_end = addr; - vmlist_modify_lock(current->mm); + vmlist_modify_lock(mm); } else if (addr == area->vm_start) { area->vm_pgoff += (end - area->vm_start) >> PAGE_SHIFT; area->vm_start = end; - vmlist_modify_lock(current->mm); + vmlist_modify_lock(mm); } else { /* Unmapping a hole: area->vm_start < addr <= end < area->vm_end */ /* Add end mapping -- leave beginning for below */ @@ -562,12 +567,12 @@ static struct vm_area_struct * unmap_fixup(struct vm_area_struct *area, if (mpnt->vm_ops && mpnt->vm_ops->open) mpnt->vm_ops->open(mpnt); area->vm_end = addr; /* Truncate area */ - vmlist_modify_lock(current->mm); - insert_vm_struct(current->mm, mpnt); + vmlist_modify_lock(mm); + insert_vm_struct(mm, mpnt); } - insert_vm_struct(current->mm, area); - vmlist_modify_unlock(current->mm); + insert_vm_struct(mm, area); + vmlist_modify_unlock(mm); return extra; } @@ -634,9 +639,8 @@ no_mmaps: * work. This now handles partial unmappings. * Jeremy Fitzhardine <jeremy@sw.oz.au> */ -int do_munmap(unsigned long addr, size_t len) +int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len) { - struct mm_struct * mm; struct vm_area_struct *mpnt, *prev, **npp, *free, *extra; if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr) @@ -650,7 +654,6 @@ int do_munmap(unsigned long addr, size_t len) * every area affected in some way (by any overlap) is put * on the list. If nothing is put on, nothing is affected. */ - mm = current->mm; mpnt = find_vma_prev(mm, addr, &prev); if (!mpnt) return 0; @@ -713,7 +716,7 @@ int do_munmap(unsigned long addr, size_t len) /* * Fix the mapping, and free the old area if it wasn't reused. */ - extra = unmap_fixup(mpnt, st, size, extra); + extra = unmap_fixup(mm, mpnt, st, size, extra); } /* Release the extra vma struct if it wasn't used */ @@ -728,10 +731,11 @@ int do_munmap(unsigned long addr, size_t len) asmlinkage long sys_munmap(unsigned long addr, size_t len) { int ret; + struct mm_struct *mm = current->mm; - down(¤t->mm->mmap_sem); - ret = do_munmap(addr, len); - up(¤t->mm->mmap_sem); + down(&mm->mmap_sem); + ret = do_munmap(mm, addr, len); + up(&mm->mmap_sem); return ret; } @@ -763,7 +767,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len) /* * Clear old maps. this also does some error checking for us */ - retval = do_munmap(addr, len); + retval = do_munmap(mm, addr, len); if (retval != 0) return retval; |