summaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1998-03-18 17:17:51 +0000
committerRalf Baechle <ralf@linux-mips.org>1998-03-18 17:17:51 +0000
commitf1382dc4850bb459d24a81c6cb0ef93ea7bd4a79 (patch)
tree225271a3d5dcd4e9dea5ee393556abd754c964b1 /mm/vmscan.c
parent135b00fc2e90e605ac2a96b20b0ebd93851a3f89 (diff)
o Merge with Linux 2.1.90.
o Divide L1 cache sizes by 1024 before printing, makes the numbers a bit more credible ...
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c42
1 files changed, 16 insertions, 26 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index ebef7a362..5d4188ae5 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -6,7 +6,7 @@
* Swap reorganised 29.12.95, Stephen Tweedie.
* kswapd added: 7.1.96 sct
* Removed kswapd_ctl limits, and swap out as many pages as needed
- * to bring the system back to free_pages_high: 2.4.97, Rik van Riel.
+ * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
* Version: $Id: vmscan.c,v 1.5 1998/02/23 22:14:28 sct Exp $
*/
@@ -22,6 +22,8 @@
#include <linux/smp_lock.h>
#include <linux/slab.h>
#include <linux/dcache.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
#include <asm/bitops.h>
#include <asm/pgtable.h>
@@ -454,11 +456,14 @@ static inline int do_try_to_free_page(int gfp_mask)
stop = 3;
if (gfp_mask & __GFP_WAIT)
stop = 0;
+ if (BUFFER_MEM > buffer_mem.borrow_percent * num_physpages / 100)
+ state = 0;
switch (state) {
do {
case 0:
- if (shrink_mmap(i, gfp_mask))
+ if (BUFFER_MEM > (buffer_mem.min_percent * num_physpages /100) &&
+ shrink_mmap(i, gfp_mask))
return 1;
state = 1;
case 1:
@@ -511,7 +516,6 @@ void kswapd_setup(void)
printk ("Starting kswapd v%.*s\n", i, s);
}
-#define MAX_SWAP_FAIL 3
/*
* The background pageout daemon.
* Started as a kernel thread from the init process.
@@ -542,32 +546,25 @@ int kswapd(void *unused)
while (1) {
int tries;
+ current->state = TASK_INTERRUPTIBLE;
kswapd_awake = 0;
flush_signals(current);
run_task_queue(&tq_disk);
schedule();
- current->state = TASK_INTERRUPTIBLE;
kswapd_awake = 1;
swapstats.wakeups++;
/* Do the background pageout:
* When we've got loads of memory, we try
- * (free_pages_high - nr_free_pages) times to
+ * (freepages.high - nr_free_pages) times to
* free memory. As memory gets tighter, kswapd
* gets more and more agressive. -- Rik.
*/
- tries = free_pages_high - nr_free_pages;
- if (tries < min_free_pages) {
- tries = min_free_pages;
+ tries = freepages.high - nr_free_pages;
+ if (tries < freepages.min) {
+ tries = freepages.min;
}
- else if (nr_free_pages < (free_pages_high + free_pages_low) / 2) {
+ if (nr_free_pages < freepages.high + freepages.low)
tries <<= 1;
- if (nr_free_pages < free_pages_low) {
- tries <<= 1;
- if (nr_free_pages <= min_free_pages) {
- tries <<= 1;
- }
- }
- }
while (tries--) {
int gfp_mask;
@@ -583,14 +580,6 @@ int kswapd(void *unused)
run_task_queue(&tq_disk);
}
-#if 0
- /*
- * Report failure if we couldn't even reach min_free_pages.
- */
- if (nr_free_pages < min_free_pages)
- printk("kswapd: failed, got %d of %d\n",
- nr_free_pages, min_free_pages);
-#endif
}
/* As if we could ever get here - maybe we want to make this killable */
remove_wait_queue(&kswapd_wait, &wait);
@@ -606,9 +595,10 @@ void swap_tick(void)
int want_wakeup = 0, memory_low = 0;
int pages = nr_free_pages + atomic_read(&nr_async_pages);
- if (pages < free_pages_low)
+ if (pages < freepages.low)
memory_low = want_wakeup = 1;
- else if (pages < free_pages_high && jiffies >= next_swap_jiffies)
+ else if ((pages < freepages.high || BUFFER_MEM > (num_physpages * buffer_mem.max_percent / 100))
+ && jiffies >= next_swap_jiffies)
want_wakeup = 1;
if (want_wakeup) {