summaryrefslogtreecommitdiffstats
path: root/mm/mmap.c
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-06-25 01:20:01 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-06-25 01:20:01 +0000
commit3797ba0b62debb71af4606910acacc9896a9ae3b (patch)
tree414eea76253c7871bfdf3bd9d1817771eb40917c /mm/mmap.c
parent2b6c0c580795a4404f72d2a794214dd9e080709d (diff)
Merge with Linux 2.4.0-test2.
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c31
1 files changed, 15 insertions, 16 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index 9edabc02e..f5bb2599c 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -166,7 +166,6 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, unsigned lon
{
struct mm_struct * mm = current->mm;
struct vm_area_struct * vma;
- int correct_wcount = 0;
int error;
if (file && (!file->f_op || !file->f_op->mmap))
@@ -297,19 +296,32 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, unsigned lon
goto free_vma;
if (file) {
+ int correct_wcount = 0;
if (vma->vm_flags & VM_DENYWRITE) {
- error = deny_write_access(file);
- if (error)
+ if (atomic_read(&file->f_dentry->d_inode->i_writecount) > 0) {
+ error = -ETXTBSY;
goto free_vma;
+ }
+ /* f_op->mmap might possibly sleep
+ * (generic_file_mmap doesn't, but other code
+ * might). In any case, this takes care of any
+ * race that this might cause.
+ */
+ atomic_dec(&file->f_dentry->d_inode->i_writecount);
correct_wcount = 1;
}
vma->vm_file = file;
get_file(file);
error = file->f_op->mmap(file, vma);
+ /* Fix up the count if necessary, then check for an error */
+ if (correct_wcount)
+ atomic_inc(&file->f_dentry->d_inode->i_writecount);
if (error)
goto unmap_and_free_vma;
} else if (flags & MAP_SHARED) {
error = map_zero_setup(vma);
+ if (error)
+ goto free_vma;
}
/*
@@ -320,8 +332,6 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, unsigned lon
addr = vma->vm_start; /* can addr have changed?? */
vmlist_modify_lock(mm);
insert_vm_struct(mm, vma);
- if (correct_wcount)
- atomic_inc(&file->f_dentry->d_inode->i_writecount);
merge_segments(mm, vma->vm_start, vma->vm_end);
vmlist_modify_unlock(mm);
@@ -333,8 +343,6 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, unsigned lon
return addr;
unmap_and_free_vma:
- if (correct_wcount)
- atomic_inc(&file->f_dentry->d_inode->i_writecount);
vma->vm_file = NULL;
fput(file);
/* Undo any partial mapping done by a device driver. */
@@ -686,11 +694,9 @@ int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
* so release them, and unmap the page range..
* If the one of the segments is only being partially unmapped,
* it will put new vm_area_struct(s) into the address space.
- * In that case we have to be careful with VM_DENYWRITE.
*/
while ((mpnt = free) != NULL) {
unsigned long st, end, size;
- struct file *file = NULL;
free = free->vm_next;
@@ -702,11 +708,6 @@ int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
if (mpnt->vm_ops && mpnt->vm_ops->unmap)
mpnt->vm_ops->unmap(mpnt, st, size);
- if (mpnt->vm_flags & VM_DENYWRITE &&
- (st != mpnt->vm_start || end != mpnt->vm_end) &&
- (file = mpnt->vm_file) != NULL) {
- atomic_dec(&file->f_dentry->d_inode->i_writecount);
- }
remove_shared_vm_struct(mpnt);
mm->map_count--;
@@ -718,8 +719,6 @@ int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
* Fix the mapping, and free the old area if it wasn't reused.
*/
extra = unmap_fixup(mm, mpnt, st, size, extra);
- if (file)
- atomic_inc(&file->f_dentry->d_inode->i_writecount);
}
/* Release the extra vma struct if it wasn't used */