diff options
author | Ralf Baechle <ralf@linux-mips.org> | 1998-05-07 02:55:41 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 1998-05-07 02:55:41 +0000 |
commit | dcec8a13bf565e47942a1751a9cec21bec5648fe (patch) | |
tree | 548b69625b18cc2e88c3e68d0923be546c9ebb03 /mm | |
parent | 2e0f55e79c49509b7ff70ff1a10e1e9e90a3dfd4 (diff) |
o Merge with Linux 2.1.99.
o Fix ancient bug in the ELF loader making ldd crash.
o Fix ancient bug in the keyboard code for SGI, SNI and Jazz.
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 49 | ||||
-rw-r--r-- | mm/page_io.c | 34 | ||||
-rw-r--r-- | mm/simp.c | 3 | ||||
-rw-r--r-- | mm/swapfile.c | 27 | ||||
-rw-r--r-- | mm/vmalloc.c | 6 | ||||
-rw-r--r-- | mm/vmscan.c | 40 |
6 files changed, 101 insertions, 58 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index cb181e437..85eaca9e5 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -108,17 +108,6 @@ static spinlock_t page_alloc_lock; * but this had better return false if any reasonable "get_free_page()" * allocation could currently fail.. * - * Currently we approve of the following situations: - * - the highest memory order has two entries - * - the highest memory order has one free entry and: - * - the next-highest memory order has two free entries - * - the highest memory order has one free entry and: - * - the next-highest memory order has one free entry - * - the next-next-highest memory order has two free entries - * - * [previously, there had to be two entries of the highest memory - * order, but this lead to problems on large-memory machines.] - * * This will return zero if no list was found, non-zero * if there was memory (the bigger, the better). */ @@ -126,7 +115,18 @@ int free_memory_available(int nr) { int retval = 0; unsigned long flags; - struct free_area_struct * list = NULL; + struct free_area_struct * list; + + /* + * If we have more than about 3% to 5% of all memory free, + * consider it to be good enough for anything. + * It may not be, due to fragmentation, but we + * don't want to keep on forever trying to find + * free unfragmented memory. + * Added low/high water marks to avoid thrashing -- Rik. + */ + if (nr_free_pages > (num_physpages >> 5) + (nr ? 0 : num_physpages >> 6)) + return nr+1; list = free_area + NR_MEM_LISTS; spin_lock_irqsave(&page_alloc_lock, flags); @@ -263,10 +263,8 @@ unsigned long __get_free_pages(int gfp_mask, unsigned long order) * "maxorder" is the highest order number that we're allowed * to empty in order to find a free page.. */ - maxorder = order + NR_MEM_LISTS/3; - if (gfp_mask & __GFP_MED) - maxorder += NR_MEM_LISTS/3; - if ((gfp_mask & __GFP_HIGH) || maxorder > NR_MEM_LISTS) + maxorder = NR_MEM_LISTS-1; + if (gfp_mask & __GFP_HIGH) maxorder = NR_MEM_LISTS; if (in_interrupt() && (gfp_mask & __GFP_WAIT)) { @@ -278,13 +276,18 @@ unsigned long __get_free_pages(int gfp_mask, unsigned long order) } } -repeat: - spin_lock_irqsave(&page_alloc_lock, flags); - RMQUEUE(order, maxorder, (gfp_mask & GFP_DMA)); - spin_unlock_irqrestore(&page_alloc_lock, flags); - if ((gfp_mask & __GFP_WAIT) && try_to_free_pages(gfp_mask,SWAP_CLUSTER_MAX)) - goto repeat; - + for (;;) { + spin_lock_irqsave(&page_alloc_lock, flags); + RMQUEUE(order, maxorder, (gfp_mask & GFP_DMA)); + spin_unlock_irqrestore(&page_alloc_lock, flags); + if (!(gfp_mask & __GFP_WAIT)) + break; + shrink_dcache(); + if (!try_to_free_pages(gfp_mask, SWAP_CLUSTER_MAX)) + break; + gfp_mask &= ~__GFP_WAIT; /* go through this only once */ + maxorder = NR_MEM_LISTS; /* Allow anything this time */ + } nopage: return 0; } diff --git a/mm/page_io.c b/mm/page_io.c index e02565def..eb436f7b7 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -28,6 +28,8 @@ #include <asm/bitops.h> #include <asm/pgtable.h> +static struct wait_queue * lock_queue = NULL; + /* * Reads or writes a swap page. * wait=1: start I/O and wait for completion. wait=0: start asynchronous I/O. @@ -87,6 +89,12 @@ void rw_swap_page(int rw, unsigned long entry, char * buf, int wait) return; } + /* Make sure we are the only process doing I/O with this swap page. */ + while (test_and_set_bit(offset,p->swap_lockmap)) { + run_task_queue(&tq_disk); + sleep_on(&lock_queue); + } + if (rw == READ) { clear_bit(PG_uptodate, &page->flags); kstat.pswpin++; @@ -115,6 +123,7 @@ void rw_swap_page(int rw, unsigned long entry, char * buf, int wait) if (!wait) { set_bit(PG_free_after, &page->flags); set_bit(PG_decr_after, &page->flags); + set_bit(PG_swap_unlock_after, &page->flags); atomic_inc(&nr_async_pages); } ll_rw_page(rw,p->swap_device,offset,buf); @@ -173,6 +182,9 @@ void rw_swap_page(int rw, unsigned long entry, char * buf, int wait) printk("rw_swap_page: no swap file or device\n"); atomic_dec(&page->count); + if (offset && !test_and_clear_bit(offset,p->swap_lockmap)) + printk("rw_swap_page: lock already cleared\n"); + wake_up(&lock_queue); #ifdef DEBUG_SWAP printk ("DebugVM: %s_swap_page finished on page %p (count %d)\n", (rw == READ) ? "read" : "write", @@ -180,6 +192,28 @@ void rw_swap_page(int rw, unsigned long entry, char * buf, int wait) #endif } +/* This is run when asynchronous page I/O has completed. */ +void swap_after_unlock_page (unsigned long entry) +{ + unsigned long type, offset; + struct swap_info_struct * p; + + type = SWP_TYPE(entry); + if (type >= nr_swapfiles) { + printk("swap_after_unlock_page: bad swap-device\n"); + return; + } + p = &swap_info[type]; + offset = SWP_OFFSET(entry); + if (offset >= p->max) { + printk("swap_after_unlock_page: weirdness\n"); + return; + } + if (!test_and_clear_bit(offset,p->swap_lockmap)) + printk("swap_after_unlock_page: lock already cleared\n"); + wake_up(&lock_queue); +} + /* * Setting up a new swap file needs a simple wrapper just to read the * swap signature. SysV shared memory also needs a simple wrapper. @@ -70,7 +70,8 @@ struct simp { /* next cache line */ struct header * usable_list; spinlock_t lock; - char fill[sizeof(void*) - sizeof(spinlock_t)]; + /* This value is negative on Alpha SMP. */ + /* char fill[sizeof(void*) - sizeof(spinlock_t)]; */ long real_size; long max_elems; structor again_ctor; diff --git a/mm/swapfile.c b/mm/swapfile.c index 8608db8d8..f9cd0d47e 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -52,6 +52,8 @@ static inline int scan_swap_map(struct swap_info_struct *si) offset = si->cluster_next++; if (si->swap_map[offset]) continue; + if (test_bit(offset, si->swap_lockmap)) + continue; si->cluster_nr--; goto got_page; } @@ -60,6 +62,8 @@ static inline int scan_swap_map(struct swap_info_struct *si) for (offset = si->lowest_bit; offset <= si->highest_bit ; offset++) { if (si->swap_map[offset]) continue; + if (test_bit(offset, si->swap_lockmap)) + continue; si->lowest_bit = offset; got_page: si->swap_map[offset] = 1; @@ -424,6 +428,8 @@ asmlinkage int sys_swapoff(const char * specialfile) p->swap_device = 0; vfree(p->swap_map); p->swap_map = NULL; + free_page((long) p->swap_lockmap); + p->swap_lockmap = NULL; p->flags = 0; err = 0; out: @@ -483,7 +489,6 @@ asmlinkage int sys_swapon(const char * specialfile, int swap_flags) int error = -EPERM; struct file filp; static int least_priority = 0; - unsigned char *avail_map = 0; lock_kernel(); if (!suser()) @@ -501,6 +506,7 @@ asmlinkage int sys_swapon(const char * specialfile, int swap_flags) p->swap_file = NULL; p->swap_device = 0; p->swap_map = NULL; + p->swap_lockmap = NULL; p->lowest_bit = 0; p->highest_bit = 0; p->cluster_nr = 0; @@ -543,24 +549,24 @@ asmlinkage int sys_swapon(const char * specialfile, int swap_flags) } } else if (!S_ISREG(swap_dentry->d_inode->i_mode)) goto bad_swap; - avail_map = (unsigned char *) get_free_page(GFP_USER); - if (!avail_map) { + p->swap_lockmap = (unsigned char *) get_free_page(GFP_USER); + if (!p->swap_lockmap) { printk("Unable to start swapping: out of memory :-)\n"); error = -ENOMEM; goto bad_swap; } - rw_swap_page_nocache(READ, SWP_ENTRY(type,0), (char *) avail_map); - if (memcmp("SWAP-SPACE",avail_map+PAGE_SIZE-10,10)) { + rw_swap_page_nocache(READ, SWP_ENTRY(type,0), (char *) p->swap_lockmap); + if (memcmp("SWAP-SPACE",p->swap_lockmap+PAGE_SIZE-10,10)) { printk("Unable to find swap-space signature\n"); error = -EINVAL; goto bad_swap; } - memset(avail_map+PAGE_SIZE-10,0,10); + memset(p->swap_lockmap+PAGE_SIZE-10,0,10); j = 0; p->lowest_bit = 0; p->highest_bit = 0; for (i = 1 ; i < 8*PAGE_SIZE ; i++) { - if (test_bit(i,avail_map)) { + if (test_bit(i,p->swap_lockmap)) { if (!p->lowest_bit) p->lowest_bit = i; p->highest_bit = i; @@ -579,12 +585,13 @@ asmlinkage int sys_swapon(const char * specialfile, int swap_flags) goto bad_swap; } for (i = 1 ; i < p->max ; i++) { - if (test_bit(i,avail_map)) + if (test_bit(i,p->swap_lockmap)) p->swap_map[i] = 0; else p->swap_map[i] = 0x80; } p->swap_map[0] = 0x80; + memset(p->swap_lockmap,0,PAGE_SIZE); p->flags = SWP_WRITEOK; p->pages = j; nr_swap_pages += j; @@ -611,15 +618,15 @@ bad_swap: if(filp.f_op && filp.f_op->release) filp.f_op->release(filp.f_dentry->d_inode,&filp); bad_swap_2: + free_page((long) p->swap_lockmap); vfree(p->swap_map); dput(p->swap_file); p->swap_device = 0; p->swap_file = NULL; p->swap_map = NULL; + p->swap_lockmap = NULL; p->flags = 0; out: - if (avail_map) - free_page((long) avail_map); unlock_kernel(); return error; } diff --git a/mm/vmalloc.c b/mm/vmalloc.c index ab6f09e2c..bf29463dd 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -213,12 +213,16 @@ void * vmalloc(unsigned long size) return vmalloc_prot (size, PAGE_KERNEL); } -int vread(char *buf, char *addr, int count) +long vread(char *buf, char *addr, unsigned long count) { struct vm_struct **p, *tmp; char *vaddr, *buf_start = buf; int n; + /* Don't allow overflow */ + if ((unsigned long) addr + count < count) + count = -(unsigned long) addr; + for (p = &vmlist; (tmp = *p) ; p = &tmp->next) { vaddr = (char *) tmp->addr; while (addr < vaddr) { diff --git a/mm/vmscan.c b/mm/vmscan.c index 0ad129a6b..8eaeb23d5 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -441,9 +441,6 @@ static inline int do_try_to_free_page(int gfp_mask) int i=6; int stop; - /* Let the dcache know we're looking for memory ... */ - shrink_dcache_memory(); - /* Always trim SLAB caches when memory gets low. */ kmem_cache_reap(gfp_mask); @@ -458,17 +455,17 @@ static inline int do_try_to_free_page(int gfp_mask) switch (state) { do { case 0: + state = 1; if (shrink_mmap(i, gfp_mask)) return 1; - state = 1; case 1: + state = 2; if ((gfp_mask & __GFP_IO) && shm_swap(i, gfp_mask)) return 1; - state = 2; default: + state = 0; if (swap_out(i, gfp_mask)) return 1; - state = 0; i--; } while ((i - stop) >= 0); } @@ -547,29 +544,26 @@ int kswapd(void *unused) run_task_queue(&tq_disk); schedule(); swapstats.wakeups++; + + /* This will gently shrink the dcache.. */ + shrink_dcache_memory(); /* * Do the background pageout: be * more aggressive if we're really * low on free memory. * - * Normally this is called 4 times - * a second if we need more memory, - * so this has a normal rate of - * X*4 pages of memory free'd per - * second. That rate goes up when - * - * - we're really low on memory (we get woken - * up a lot more) - * - other processes fail to allocate memory, - * at which time they try to do their own - * freeing. - * - * A "tries" value of 50 means up to 200 pages - * per second (1.6MB/s). This should be a /proc - * thing. + * The number of tries is 512 divided by an + * 'urgency factor'. In practice this will mean + * a value of 512 / 8 = 64 pages at a time, + * giving 64 * 4 (times/sec) * 4k (pagesize) = + * 1 MB/s in lowest-priority background + * paging. This number rises to 8 MB/s when the + * priority is highest (but then we'll be woken + * up more often and the rate will be even higher). + * -- Should make this sysctl tunable... */ - tries = (50 << 2) >> free_memory_available(3); + tries = (512) >> free_memory_available(3); while (tries--) { int gfp_mask; @@ -622,7 +616,7 @@ void swap_tick(void) if ((long) (now - want) >= 0) { if (want_wakeup || (num_physpages * buffer_mem.max_percent) < (buffermem >> PAGE_SHIFT) * 100 - || (num_physpages * page_cache.max_percent < page_cache_size)) { + || (num_physpages * page_cache.max_percent < page_cache_size * 100)) { /* Set the next wake-up time */ next_swap_jiffies = now + swapout_interval; wake_up(&kswapd_wait); |