diff options
author | Ralf Baechle <ralf@linux-mips.org> | 1998-03-18 17:17:51 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 1998-03-18 17:17:51 +0000 |
commit | f1382dc4850bb459d24a81c6cb0ef93ea7bd4a79 (patch) | |
tree | 225271a3d5dcd4e9dea5ee393556abd754c964b1 /mm | |
parent | 135b00fc2e90e605ac2a96b20b0ebd93851a3f89 (diff) |
o Merge with Linux 2.1.90.
o Divide L1 cache sizes by 1024 before printing, makes the numbers a
bit more credible ...
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 6 | ||||
-rw-r--r-- | mm/slab.c | 6 | ||||
-rw-r--r-- | mm/swap.c | 71 | ||||
-rw-r--r-- | mm/swap_state.c | 10 | ||||
-rw-r--r-- | mm/vmscan.c | 42 |
5 files changed, 47 insertions, 88 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ed748bbfb..a3b1c0e8c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -331,9 +331,9 @@ __initfunc(unsigned long free_area_init(unsigned long start_mem, unsigned long e i = (end_mem - PAGE_OFFSET) >> (PAGE_SHIFT+7); if (i < 48) i = 48; - min_free_pages = i; - free_pages_low = i + (i>>1); - free_pages_high = i + i; + freepages.min = i; + freepages.low = i + (i>>1); + freepages.high = i + i; mem_map = (mem_map_t *) LONG_ALIGN(start_mem); p = mem_map + MAP_NR(end_mem); start_mem = LONG_ALIGN((unsigned long) p); @@ -1824,7 +1824,9 @@ next: } spin_lock_irq(&best_cachep->c_spinlock); - if (!best_cachep->c_growing && !(slabp = best_cachep->c_lastp)->s_inuse && slabp != kmem_slab_end(best_cachep)) { + while (!best_cachep->c_growing && + !(slabp = best_cachep->c_lastp)->s_inuse && + slabp != kmem_slab_end(best_cachep)) { if (gfp_mask & GFP_DMA) { do { if (slabp->s_dma) @@ -1848,7 +1850,7 @@ good_dma: */ spin_unlock_irq(&best_cachep->c_spinlock); kmem_slab_destroy(best_cachep, slabp); - return; + spin_lock_irq(&best_cachep->c_spinlock); } dma_fail: spin_unlock_irq(&best_cachep->c_spinlock); @@ -5,10 +5,12 @@ */ /* - * This file should contain most things doing the swapping from/to disk. + * This file contains the default values for the opereation of the + * Linux VM subsystem. Finetuning documentation can be found in + * linux/Documentation/sysctl/vm.txt. * Started 18.12.91 - * * Swap aging added 23.2.95, Stephen Tweedie. + * Buffermem limits added 12.3.98, Rik van Riel. */ #include <linux/mm.h> @@ -33,15 +35,18 @@ /* * We identify three levels of free memory. We never let free mem - * fall below the min_free_pages except for atomic allocations. We - * start background swapping if we fall below free_pages_high free - * pages, and we begin intensive swapping below free_pages_low. + * fall below the freepages.min except for atomic allocations. We + * start background swapping if we fall below freepages.high free + * pages, and we begin intensive swapping below freepages.low. * - * Keep these three variables contiguous for sysctl(2). + * These values are there to keep GCC from complaining. Actual + * initialization is done in mm/page_alloc.c or arch/sparc(64)/mm/init.c. */ -int min_free_pages = 48; -int free_pages_low = 72; -int free_pages_high = 96; +freepages_t freepages = { + 48, /* freepages.min */ + 72, /* freepages.low */ + 96 /* freepages.high */ +}; /* We track the number of pages currently being asynchronously swapped out, so that we don't try to swap TOO many pages out at once */ @@ -55,53 +60,15 @@ atomic_t nr_async_pages = ATOMIC_INIT(0); swap_control_t swap_control = { 20, 3, 1, 3, /* Page aging */ - 10, 2, 2, 4, /* Buffer aging */ 32, 4, /* Aging cluster */ 8192, 8192, /* Pageout and bufferout weights */ - -200, /* Buffer grace */ - 1, 1, /* Buffs/pages to free */ - RCL_ROUND_ROBIN /* Balancing policy */ }; swapstat_t swapstats = {0}; -/* General swap control */ - -/* Parse the kernel command line "swap=" option at load time: */ -__initfunc(void swap_setup(char *str, int *ints)) -{ - int * swap_vars[8] = { - &MAX_PAGE_AGE, - &PAGE_ADVANCE, - &PAGE_DECLINE, - &PAGE_INITIAL_AGE, - &AGE_CLUSTER_FRACT, - &AGE_CLUSTER_MIN, - &PAGEOUT_WEIGHT, - &BUFFEROUT_WEIGHT - }; - int i; - for (i=0; i < ints[0] && i < 8; i++) { - if (ints[i+1]) - *(swap_vars[i]) = ints[i+1]; - } -} - -/* Parse the kernel command line "buff=" option at load time: */ -__initfunc(void buff_setup(char *str, int *ints)) -{ - int * buff_vars[6] = { - &MAX_BUFF_AGE, - &BUFF_ADVANCE, - &BUFF_DECLINE, - &BUFF_INITIAL_AGE, - &BUFFEROUT_WEIGHT, - &BUFFERMEM_GRACE - }; - int i; - for (i=0; i < ints[0] && i < 6; i++) { - if (ints[i+1]) - *(buff_vars[i]) = ints[i+1]; - } -} +buffer_mem_t buffer_mem = { + 6, /* minimum percent buffer + cache memory */ + 20, /* borrow percent buffer + cache memory */ + 90 /* maximum percent buffer + cache memory */ +}; diff --git a/mm/swap_state.c b/mm/swap_state.c index 4ebc5c05f..b575877ff 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -64,13 +64,13 @@ int add_to_swap_cache(struct page *page, unsigned long entry) #endif if (PageTestandSetSwapCache(page)) { printk("swap_cache: replacing non-empty entry %08lx " - "on page %08lx", + "on page %08lx\n", page->offset, page_address(page)); return 0; } if (page->inode) { printk("swap_cache: replacing page-cached entry " - "on page %08lx", page_address(page)); + "on page %08lx\n", page_address(page)); return 0; } atomic_inc(&page->count); @@ -138,18 +138,18 @@ void remove_from_swap_cache(struct page *page) { if (!page->inode) { printk ("VM: Removing swap cache page with zero inode hash " - "on page %08lx", page_address(page)); + "on page %08lx\n", page_address(page)); return; } if (page->inode != &swapper_inode) { printk ("VM: Removing swap cache page with wrong inode hash " - "on page %08lx", page_address(page)); + "on page %08lx\n", page_address(page)); } /* * This will be a legal case once we have a more mature swap cache. */ if (atomic_read(&page->count) == 1) { - printk ("VM: Removing page cache on unshared page %08lx", + printk ("VM: Removing page cache on unshared page %08lx\n", page_address(page)); return; } diff --git a/mm/vmscan.c b/mm/vmscan.c index ebef7a362..5d4188ae5 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -6,7 +6,7 @@ * Swap reorganised 29.12.95, Stephen Tweedie. * kswapd added: 7.1.96 sct * Removed kswapd_ctl limits, and swap out as many pages as needed - * to bring the system back to free_pages_high: 2.4.97, Rik van Riel. + * to bring the system back to freepages.high: 2.4.97, Rik van Riel. * Version: $Id: vmscan.c,v 1.5 1998/02/23 22:14:28 sct Exp $ */ @@ -22,6 +22,8 @@ #include <linux/smp_lock.h> #include <linux/slab.h> #include <linux/dcache.h> +#include <linux/fs.h> +#include <linux/pagemap.h> #include <asm/bitops.h> #include <asm/pgtable.h> @@ -454,11 +456,14 @@ static inline int do_try_to_free_page(int gfp_mask) stop = 3; if (gfp_mask & __GFP_WAIT) stop = 0; + if (BUFFER_MEM > buffer_mem.borrow_percent * num_physpages / 100) + state = 0; switch (state) { do { case 0: - if (shrink_mmap(i, gfp_mask)) + if (BUFFER_MEM > (buffer_mem.min_percent * num_physpages /100) && + shrink_mmap(i, gfp_mask)) return 1; state = 1; case 1: @@ -511,7 +516,6 @@ void kswapd_setup(void) printk ("Starting kswapd v%.*s\n", i, s); } -#define MAX_SWAP_FAIL 3 /* * The background pageout daemon. * Started as a kernel thread from the init process. @@ -542,32 +546,25 @@ int kswapd(void *unused) while (1) { int tries; + current->state = TASK_INTERRUPTIBLE; kswapd_awake = 0; flush_signals(current); run_task_queue(&tq_disk); schedule(); - current->state = TASK_INTERRUPTIBLE; kswapd_awake = 1; swapstats.wakeups++; /* Do the background pageout: * When we've got loads of memory, we try - * (free_pages_high - nr_free_pages) times to + * (freepages.high - nr_free_pages) times to * free memory. As memory gets tighter, kswapd * gets more and more agressive. -- Rik. */ - tries = free_pages_high - nr_free_pages; - if (tries < min_free_pages) { - tries = min_free_pages; + tries = freepages.high - nr_free_pages; + if (tries < freepages.min) { + tries = freepages.min; } - else if (nr_free_pages < (free_pages_high + free_pages_low) / 2) { + if (nr_free_pages < freepages.high + freepages.low) tries <<= 1; - if (nr_free_pages < free_pages_low) { - tries <<= 1; - if (nr_free_pages <= min_free_pages) { - tries <<= 1; - } - } - } while (tries--) { int gfp_mask; @@ -583,14 +580,6 @@ int kswapd(void *unused) run_task_queue(&tq_disk); } -#if 0 - /* - * Report failure if we couldn't even reach min_free_pages. - */ - if (nr_free_pages < min_free_pages) - printk("kswapd: failed, got %d of %d\n", - nr_free_pages, min_free_pages); -#endif } /* As if we could ever get here - maybe we want to make this killable */ remove_wait_queue(&kswapd_wait, &wait); @@ -606,9 +595,10 @@ void swap_tick(void) int want_wakeup = 0, memory_low = 0; int pages = nr_free_pages + atomic_read(&nr_async_pages); - if (pages < free_pages_low) + if (pages < freepages.low) memory_low = want_wakeup = 1; - else if (pages < free_pages_high && jiffies >= next_swap_jiffies) + else if ((pages < freepages.high || BUFFER_MEM > (num_physpages * buffer_mem.max_percent / 100)) + && jiffies >= next_swap_jiffies) want_wakeup = 1; if (want_wakeup) { |