summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2001-03-09 20:33:35 +0000
committerRalf Baechle <ralf@linux-mips.org>2001-03-09 20:33:35 +0000
commit116674acc97ba75a720329996877077d988443a2 (patch)
tree6a3f2ff0b612ae2ee8a3f3509370c9e6333a53b3 /mm
parent71118c319fcae4a138f16e35b4f7e0a6d53ce2ca (diff)
Merge with Linux 2.4.2.
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c5
-rw-r--r--mm/memory.c3
-rw-r--r--mm/mmap.c10
-rw-r--r--mm/swapfile.c2
-rw-r--r--mm/vmalloc.c2
5 files changed, 17 insertions, 5 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 4c89ad3e9..156ef6010 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -9,7 +9,7 @@
* most "normal" filesystems (but you don't /have/ to use this:
* the NFS filesystem used to do this differently, for example)
*/
-#include <linux/malloc.h>
+#include <linux/slab.h>
#include <linux/shm.h>
#include <linux/mman.h>
#include <linux/locks.h>
@@ -19,7 +19,6 @@
#include <linux/blkdev.h>
#include <linux/file.h>
#include <linux/swapctl.h>
-#include <linux/slab.h>
#include <linux/init.h>
#include <linux/mm.h>
@@ -397,7 +396,7 @@ int generic_buffer_fdatasync(struct inode *inode, unsigned long start_idx, unsig
retval |= do_buffer_fdatasync(&inode->i_mapping->locked_pages, start_idx, end_idx, writeout_one_page);
/* now wait for locked buffers on pages from both clean and dirty lists */
- retval |= do_buffer_fdatasync(&inode->i_mapping->dirty_pages, start_idx, end_idx, writeout_one_page);
+ retval |= do_buffer_fdatasync(&inode->i_mapping->dirty_pages, start_idx, end_idx, waitfor_one_page);
retval |= do_buffer_fdatasync(&inode->i_mapping->clean_pages, start_idx, end_idx, waitfor_one_page);
retval |= do_buffer_fdatasync(&inode->i_mapping->locked_pages, start_idx, end_idx, waitfor_one_page);
diff --git a/mm/memory.c b/mm/memory.c
index 7fc8de5eb..242981f72 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -858,6 +858,8 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
UnlockPage(old_page);
/* FallThrough */
case 1:
+ if (PageReserved(old_page))
+ break;
flush_cache_page(vma, address);
establish_pte(vma, address, page_table, pte_mkyoung(pte_mkdirty(pte_mkwrite(pte))));
spin_unlock(&mm->page_table_lock);
@@ -1200,6 +1202,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
pgd_t *pgd;
pmd_t *pmd;
+ current->state = TASK_RUNNING;
pgd = pgd_offset(mm, address);
pmd = pmd_alloc(pgd, address);
diff --git a/mm/mmap.c b/mm/mmap.c
index e1faba3c7..ee45f2db8 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -12,6 +12,7 @@
#include <linux/smp_lock.h>
#include <linux/init.h>
#include <linux/file.h>
+#include <linux/fs.h>
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
@@ -63,6 +64,15 @@ int vm_enough_memory(long pages)
free += atomic_read(&page_cache_size);
free += nr_free_pages();
free += nr_swap_pages;
+ /*
+ * The code below doesn't account for free space in the inode
+ * and dentry slab cache, slab cache fragmentation, inodes and
+ * dentries which will become freeable under VM load, etc.
+ * Lets just hope all these (complex) factors balance out...
+ */
+ free += (dentry_stat.nr_unused * sizeof(struct dentry)) >> PAGE_SHIFT;
+ free += (inodes_stat.nr_unused * sizeof(struct inode)) >> PAGE_SHIFT;
+
return free > pages;
}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 57f815638..f02800bc6 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -5,7 +5,7 @@
* Swap reorganised 29.12.95, Stephen Tweedie
*/
-#include <linux/malloc.h>
+#include <linux/slab.h>
#include <linux/smp_lock.h>
#include <linux/kernel_stat.h>
#include <linux/swap.h>
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 93edab662..ab74d114b 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -6,7 +6,7 @@
* SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
*/
-#include <linux/malloc.h>
+#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/spinlock.h>
#include <linux/highmem.h>