summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1998-04-05 11:23:36 +0000
committerRalf Baechle <ralf@linux-mips.org>1998-04-05 11:23:36 +0000
commit4318fbda2a7ee51caafdc4eb1f8028a3f0605142 (patch)
treecddb50a81d7d1a628cc400519162080c6d87868e /mm
parent36ea5120664550fae6d31f1c6f695e4f8975cb06 (diff)
o Merge with Linux 2.1.91.
o First round of bugfixes for the SC/MC CPUs. o FPU context switch fixes. o Lazy context switches. o Faster syscalls. o Removed dead code. o Shitloads of other things I forgot ...
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c6
-rw-r--r--mm/mprotect.c4
-rw-r--r--mm/page_alloc.c21
-rw-r--r--mm/swap.c11
-rw-r--r--mm/vmscan.c93
5 files changed, 84 insertions, 51 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 7a4e20e21..0971c63b7 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -150,6 +150,10 @@ int shrink_mmap(int priority, int gfp_mask)
}
tmp = tmp->b_this_page;
} while (tmp != bh);
+
+ /* Refuse to swap out all buffer pages */
+ if ((buffermem >> PAGE_SHIFT) * 100 < (buffer_mem.min_percent * num_physpages))
+ goto next;
}
/* We can't throw away shared pages, but we do mark
@@ -167,7 +171,7 @@ int shrink_mmap(int priority, int gfp_mask)
break;
}
age_page(page);
- if (page->age)
+ if (page->age || page_cache_size * 100 < (page_cache.min_percent * num_physpages))
break;
if (PageSwapCache(page)) {
delete_from_swap_cache(page);
diff --git a/mm/mprotect.c b/mm/mprotect.c
index a34225d83..0c5dac4cd 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -185,12 +185,12 @@ static int mprotect_fixup(struct vm_area_struct * vma,
if (newflags == vma->vm_flags)
return 0;
newprot = protection_map[newflags & 0xf];
- if (start == vma->vm_start)
+ if (start == vma->vm_start) {
if (end == vma->vm_end)
error = mprotect_fixup_all(vma, newflags, newprot);
else
error = mprotect_fixup_start(vma, end, newflags, newprot);
- else if (end == vma->vm_end)
+ } else if (end == vma->vm_end)
error = mprotect_fixup_end(vma, start, newflags, newprot);
else
error = mprotect_fixup_middle(vma, start, end, newflags, newprot);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a3b1c0e8c..cb181e437 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -118,26 +118,33 @@ static spinlock_t page_alloc_lock;
*
* [previously, there had to be two entries of the highest memory
* order, but this lead to problems on large-memory machines.]
+ *
+ * This will return zero if no list was found, non-zero
+ * if there was memory (the bigger, the better).
*/
-int free_memory_available(void)
+int free_memory_available(int nr)
{
- int i, retval = 0;
+ int retval = 0;
unsigned long flags;
struct free_area_struct * list = NULL;
+ list = free_area + NR_MEM_LISTS;
spin_lock_irqsave(&page_alloc_lock, flags);
/* We fall through the loop if the list contains one
* item. -- thanks to Colin Plumb <colin@nyx.net>
*/
- for (i = 1; i < 4; ++i) {
- list = free_area + NR_MEM_LISTS - i;
+ do {
+ list--;
+ /* Empty list? Bad - we need more memory */
if (list->next == memory_head(list))
break;
+ /* One item on the list? Look further */
if (list->next->next == memory_head(list))
continue;
- retval = 1;
+ /* More than one item? We're ok */
+ retval = nr + 1;
break;
- }
+ } while (--nr >= 0);
spin_unlock_irqrestore(&page_alloc_lock, flags);
return retval;
}
@@ -275,7 +282,7 @@ repeat:
spin_lock_irqsave(&page_alloc_lock, flags);
RMQUEUE(order, maxorder, (gfp_mask & GFP_DMA));
spin_unlock_irqrestore(&page_alloc_lock, flags);
- if ((gfp_mask & __GFP_WAIT) && try_to_free_page(gfp_mask))
+ if ((gfp_mask & __GFP_WAIT) && try_to_free_pages(gfp_mask,SWAP_CLUSTER_MAX))
goto repeat;
nopage:
diff --git a/mm/swap.c b/mm/swap.c
index 0ccf96dc8..957ad0a95 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -67,8 +67,13 @@ swap_control_t swap_control = {
swapstat_t swapstats = {0};
buffer_mem_t buffer_mem = {
- 6, /* minimum percent buffer + cache memory */
- 20, /* borrow percent buffer + cache memory */
- 90 /* maximum percent buffer + cache memory */
+ 3, /* minimum percent buffer */
+ 10, /* borrow percent buffer */
+ 30 /* maximum percent buffer */
};
+buffer_mem_t page_cache = {
+ 10, /* minimum percent page cache */
+ 30, /* borrow percent page cache */
+ 75 /* maximum */
+};
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5d4188ae5..0ad129a6b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -31,7 +31,7 @@
/*
* When are we next due for a page scan?
*/
-static int next_swap_jiffies = 0;
+static unsigned long next_swap_jiffies = 0;
/*
* How often do we do a pageout scan during normal conditions?
@@ -44,11 +44,6 @@ int swapout_interval = HZ / 4;
*/
static struct wait_queue * kswapd_wait = NULL;
-/*
- * We avoid doing a reschedule if the pageout daemon is already awake;
- */
-static int kswapd_awake = 0;
-
static void init_swap_timer(void);
/*
@@ -456,14 +451,14 @@ static inline int do_try_to_free_page(int gfp_mask)
stop = 3;
if (gfp_mask & __GFP_WAIT)
stop = 0;
- if (BUFFER_MEM > buffer_mem.borrow_percent * num_physpages / 100)
+ if (((buffermem >> PAGE_SHIFT) * 100 > buffer_mem.borrow_percent * num_physpages)
+ || (page_cache_size * 100 > page_cache.borrow_percent * num_physpages))
state = 0;
switch (state) {
do {
case 0:
- if (BUFFER_MEM > (buffer_mem.min_percent * num_physpages /100) &&
- shrink_mmap(i, gfp_mask))
+ if (shrink_mmap(i, gfp_mask))
return 1;
state = 1;
case 1:
@@ -545,30 +540,41 @@ int kswapd(void *unused)
add_wait_queue(&kswapd_wait, &wait);
while (1) {
int tries;
+ int tried = 0;
current->state = TASK_INTERRUPTIBLE;
- kswapd_awake = 0;
flush_signals(current);
run_task_queue(&tq_disk);
schedule();
- kswapd_awake = 1;
swapstats.wakeups++;
- /* Do the background pageout:
- * When we've got loads of memory, we try
- * (freepages.high - nr_free_pages) times to
- * free memory. As memory gets tighter, kswapd
- * gets more and more agressive. -- Rik.
+
+ /*
+ * Do the background pageout: be
+ * more aggressive if we're really
+ * low on free memory.
+ *
+ * Normally this is called 4 times
+ * a second if we need more memory,
+ * so this has a normal rate of
+ * X*4 pages of memory free'd per
+ * second. That rate goes up when
+ *
+ * - we're really low on memory (we get woken
+ * up a lot more)
+ * - other processes fail to allocate memory,
+ * at which time they try to do their own
+ * freeing.
+ *
+ * A "tries" value of 50 means up to 200 pages
+ * per second (1.6MB/s). This should be a /proc
+ * thing.
*/
- tries = freepages.high - nr_free_pages;
- if (tries < freepages.min) {
- tries = freepages.min;
- }
- if (nr_free_pages < freepages.high + freepages.low)
- tries <<= 1;
+ tries = (50 << 2) >> free_memory_available(3);
+
while (tries--) {
int gfp_mask;
- if (free_memory_available())
+ if (++tried > SWAP_CLUSTER_MAX && free_memory_available(0))
break;
gfp_mask = __GFP_IO;
try_to_free_page(gfp_mask);
@@ -589,27 +595,38 @@ int kswapd(void *unused)
/*
* The swap_tick function gets called on every clock tick.
*/
-
void swap_tick(void)
{
- int want_wakeup = 0, memory_low = 0;
- int pages = nr_free_pages + atomic_read(&nr_async_pages);
+ unsigned long now, want;
+ int want_wakeup = 0;
- if (pages < freepages.low)
- memory_low = want_wakeup = 1;
- else if ((pages < freepages.high || BUFFER_MEM > (num_physpages * buffer_mem.max_percent / 100))
- && jiffies >= next_swap_jiffies)
- want_wakeup = 1;
+ want = next_swap_jiffies;
+ now = jiffies;
- if (want_wakeup) {
- if (!kswapd_awake) {
+ /*
+ * Examine the memory queues. Mark memory low
+ * if there is nothing available in the three
+ * highest queues.
+ *
+ * Schedule for wakeup if there isn't lots
+ * of free memory.
+ */
+ switch (free_memory_available(3)) {
+ case 0:
+ want = now;
+ /* Fall through */
+ case 1 ... 3:
+ want_wakeup = 1;
+ default:
+ }
+
+ if ((long) (now - want) >= 0) {
+ if (want_wakeup || (num_physpages * buffer_mem.max_percent) < (buffermem >> PAGE_SHIFT) * 100
+ || (num_physpages * page_cache.max_percent < page_cache_size)) {
+ /* Set the next wake-up time */
+ next_swap_jiffies = now + swapout_interval;
wake_up(&kswapd_wait);
- need_resched = 1;
}
- /* Set the next wake-up time */
- next_swap_jiffies = jiffies;
- if (!memory_low)
- next_swap_jiffies += swapout_interval;
}
timer_active |= (1<<SWAP_TIMER);
}