diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2000-04-19 04:00:00 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2000-04-19 04:00:00 +0000 |
commit | 46e045034336a2cc90c1798cd7cc07af744ddfd6 (patch) | |
tree | 3b9b51fc482e729f663d25333e77fbed9aaa939a /mm | |
parent | 31dc59d503a02e84c4de98826452acaeb56dc15a (diff) |
Merge with Linux 2.3.99-pre4.
Diffstat (limited to 'mm')
-rw-r--r-- | mm/bootmem.c | 2 | ||||
-rw-r--r-- | mm/filemap.c | 61 | ||||
-rw-r--r-- | mm/memory.c | 2 | ||||
-rw-r--r-- | mm/numa.c | 11 | ||||
-rw-r--r-- | mm/page_alloc.c | 29 | ||||
-rw-r--r-- | mm/slab.c | 12 | ||||
-rw-r--r-- | mm/swap_state.c | 7 | ||||
-rw-r--r-- | mm/swapfile.c | 4 | ||||
-rw-r--r-- | mm/vmscan.c | 24 |
9 files changed, 81 insertions, 71 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c index 7a6d9db09..0e11fe9ed 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -24,6 +24,7 @@ * true for the boot process anyway) */ unsigned long max_low_pfn; +unsigned long min_low_pfn; /* return the number of _pages_ that will be allocated for the boot bitmap */ unsigned long __init bootmem_bootmap_pages (unsigned long pages) @@ -282,6 +283,7 @@ unsigned long __init free_all_bootmem_node (int nid) unsigned long __init init_bootmem (unsigned long start, unsigned long pages) { max_low_pfn = pages; + min_low_pfn = start; return(init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages)); } diff --git a/mm/filemap.c b/mm/filemap.c index bccdc9bd2..533747f96 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -250,6 +250,11 @@ int shrink_mmap(int priority, int gfp_mask, zone_t *zone) count--; dispose = &young; + + /* avoid unscalable SMP locking */ + if (!page->buffers && page_count(page) > 1) + goto dispose_continue; + if (TryLockPage(page)) goto dispose_continue; @@ -260,22 +265,11 @@ int shrink_mmap(int priority, int gfp_mask, zone_t *zone) page locked down ;). */ spin_unlock(&pagemap_lru_lock); - /* avoid unscalable SMP locking */ - if (!page->buffers && page_count(page) > 1) - goto unlock_noput_continue; - - /* Take the pagecache_lock spinlock held to avoid - other tasks to notice the page while we are looking at its - page count. If it's a pagecache-page we'll free it - in one atomic transaction after checking its page count. */ - spin_lock(&pagecache_lock); - /* avoid freeing the page while it's locked */ get_page(page); /* Is it a buffer page? */ if (page->buffers) { - spin_unlock(&pagecache_lock); if (!try_to_free_buffers(page)) goto unlock_continue; /* page was locked, inode can't go away under us */ @@ -283,9 +277,14 @@ int shrink_mmap(int priority, int gfp_mask, zone_t *zone) atomic_dec(&buffermem_pages); goto made_buffer_progress; } - spin_lock(&pagecache_lock); } + /* Take the pagecache_lock spinlock held to avoid + other tasks to notice the page while we are looking at its + page count. If it's a pagecache-page we'll free it + in one atomic transaction after checking its page count. */ + spin_lock(&pagecache_lock); + /* * We can't free pages unless there's just one user * (count == 2 because we added one ourselves above). @@ -294,12 +293,6 @@ int shrink_mmap(int priority, int gfp_mask, zone_t *zone) goto cache_unlock_continue; /* - * We did the page aging part. - */ - if (nr_lru_pages < freepages.min * priority) - goto cache_unlock_continue; - - /* * Is it a page swap page? If so, we want to * drop it if it is no longer used, even if it * were to be marked referenced.. @@ -312,8 +305,7 @@ int shrink_mmap(int priority, int gfp_mask, zone_t *zone) /* is it a page-cache page? */ if (page->mapping) { - if (!pgcache_under_min()) - { + if (!PageDirty(page) && !pgcache_under_min()) { remove_page_from_inode_queue(page); remove_page_from_hash_queue(page); page->mapping = NULL; @@ -329,21 +321,12 @@ int shrink_mmap(int priority, int gfp_mask, zone_t *zone) cache_unlock_continue: spin_unlock(&pagecache_lock); unlock_continue: + spin_lock(&pagemap_lru_lock); UnlockPage(page); put_page(page); -dispose_relock_continue: - /* even if the dispose list is local, a truncate_inode_page() - may remove a page from its queue so always - synchronize with the lru lock while accesing the - page->lru field */ - spin_lock(&pagemap_lru_lock); list_add(page_lru, dispose); continue; -unlock_noput_continue: - UnlockPage(page); - goto dispose_relock_continue; - dispose_continue: list_add(page_lru, dispose); } @@ -484,7 +467,7 @@ static inline void __add_to_page_cache(struct page * page, struct page *alias; unsigned long flags; - flags = page->flags & ~((1 << PG_uptodate) | (1 << PG_error)); + flags = page->flags & ~((1 << PG_uptodate) | (1 << PG_error) | (1 << PG_dirty)); page->flags = flags | (1 << PG_locked) | (1 << PG_referenced); get_page(page); page->index = offset; @@ -1724,10 +1707,8 @@ static int msync_interval(struct vm_area_struct * vma, error = vma->vm_ops->sync(vma, start, end-start, flags); if (!error && (flags & MS_SYNC)) { struct file * file = vma->vm_file; - if (file) { - struct dentry * dentry = file->f_dentry; - error = file_fsync(file, dentry); - } + if (file) + error = file_fsync(file, file->f_dentry); } return error; } @@ -2237,9 +2218,9 @@ asmlinkage long sys_mincore(unsigned long start, size_t len, down(¤t->mm->mmap_sem); - if (start & ~PAGE_MASK) + if (start & ~PAGE_CACHE_MASK) goto out; - len = (len + ~PAGE_MASK) & PAGE_MASK; + len = (len + ~PAGE_CACHE_MASK) & PAGE_CACHE_MASK; end = start + len; if (end < start) goto out; @@ -2371,8 +2352,7 @@ static inline void remove_suid(struct inode *inode) } /* - * Write to a file through the page cache. This is mainly for the - * benefit of NFS and possibly other network-based file systems. + * Write to a file through the page cache. * * We currently put everything into the page cache prior to writing it. * This is not a problem when writing full pages. With partial pages, @@ -2389,8 +2369,7 @@ static inline void remove_suid(struct inode *inode) ssize_t generic_file_write(struct file *file,const char *buf,size_t count,loff_t *ppos) { - struct dentry *dentry = file->f_dentry; - struct inode *inode = dentry->d_inode; + struct inode *inode = file->f_dentry->d_inode; struct address_space *mapping = inode->i_mapping; unsigned long limit = current->rlim[RLIMIT_FSIZE].rlim_cur; loff_t pos; diff --git a/mm/memory.c b/mm/memory.c index 1bb7433c0..28791baa2 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1053,7 +1053,7 @@ static int do_swap_page(struct task_struct * tsk, pte = mk_pte(page, vma->vm_page_prot); - set_bit(PG_swap_entry, &page->flags); + SetPageSwapEntry(page); /* * Freeze the "shared"ness of the page, ie page_count + swap_count. @@ -22,10 +22,11 @@ pg_data_t contig_page_data = { bdata: &contig_bootmem_data }; * Should be invoked with paramters (0, 0, unsigned long *[], start_paddr). */ void __init free_area_init_node(int nid, pg_data_t *pgdat, - unsigned long *zones_size, unsigned long zone_start_paddr) + unsigned long *zones_size, unsigned long zone_start_paddr, + unsigned long *zholes_size) { free_area_init_core(0, NODE_DATA(0), &mem_map, zones_size, - zone_start_paddr); + zone_start_paddr, zholes_size); } #endif /* !CONFIG_DISCONTIGMEM */ @@ -55,7 +56,8 @@ void show_free_areas_node(int nid) * Nodes can be initialized parallely, in no particular order. */ void __init free_area_init_node(int nid, pg_data_t *pgdat, - unsigned long *zones_size, unsigned long zone_start_paddr) + unsigned long *zones_size, unsigned long zone_start_paddr, + unsigned long *zholes_size) { int i, size = 0; struct page *discard; @@ -63,7 +65,8 @@ void __init free_area_init_node(int nid, pg_data_t *pgdat, if (mem_map == (mem_map_t *)NULL) mem_map = (mem_map_t *)PAGE_OFFSET; - free_area_init_core(nid, pgdat, &discard, zones_size, zone_start_paddr); + free_area_init_core(nid, pgdat, &discard, zones_size, zone_start_paddr, + zholes_size); pgdat->node_id = nid; /* diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a780c6a74..07fdaa021 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -100,12 +100,16 @@ void __free_pages_ok (struct page *page, unsigned long order) if (page->buffers) BUG(); + if (page->mapping) + BUG(); if (page-mem_map >= max_mapnr) BUG(); if (PageSwapCache(page)) BUG(); if (PageLocked(page)) BUG(); + if (PageDecrAfter(page)) + BUG(); zone = page->zone; @@ -487,12 +491,13 @@ static inline void build_zonelists(pg_data_t *pgdat) * - clear the memory bitmaps */ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap, - unsigned long *zones_size, unsigned long zone_start_paddr) + unsigned long *zones_size, unsigned long zone_start_paddr, + unsigned long *zholes_size) { struct page *p, *lmem_map; unsigned long i, j; unsigned long map_size; - unsigned long totalpages, offset; + unsigned long totalpages, offset, realtotalpages; unsigned int cumulative = 0; pgdat->node_next = pgdat_list; @@ -503,7 +508,12 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap, unsigned long size = zones_size[i]; totalpages += size; } - printk("On node %d totalpages: %lu\n", nid, totalpages); + realtotalpages = totalpages; + if (zholes_size) + for (i = 0; i < MAX_NR_ZONES; i++) + realtotalpages -= zholes_size[i]; + + printk("On node %d totalpages: %lu\n", nid, realtotalpages); /* * Select nr of pages we try to keep free for important stuff @@ -512,7 +522,7 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap, * This is fairly arbitrary, but based on some behaviour * analysis. */ - i = totalpages >> 7; + i = realtotalpages >> 7; if (i < 10) i = 10; if (i > 256) @@ -553,21 +563,24 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap, for (j = 0; j < MAX_NR_ZONES; j++) { zone_t *zone = pgdat->node_zones + j; unsigned long mask; - unsigned long size; + unsigned long size, realsize; - size = zones_size[j]; + realsize = size = zones_size[j]; + if (zholes_size) + realsize -= zholes_size[j]; printk("zone(%lu): %lu pages.\n", j, size); zone->size = size; zone->name = zone_names[j]; zone->lock = SPIN_LOCK_UNLOCKED; zone->zone_pgdat = pgdat; + zone->free_pages = 0; if (!size) continue; zone->offset = offset; cumulative += size; - mask = (size / zone_balance_ratio[j]); + mask = (realsize / zone_balance_ratio[j]); if (mask < zone_balance_min[j]) mask = zone_balance_min[j]; else if (mask > zone_balance_max[j]) @@ -611,7 +624,7 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap, void __init free_area_init(unsigned long *zones_size) { - free_area_init_core(0, NODE_DATA(0), &mem_map, zones_size, 0); + free_area_init_core(0, NODE_DATA(0), &mem_map, zones_size, 0, 0); } static int __init setup_mem_frac(char *str) @@ -132,7 +132,7 @@ * SLAB_SELFTEST - 1 to perform a few tests, mainly for development. */ #define SLAB_MGMT_CHECKS 1 -#define SLAB_DEBUG_SUPPORT 0 +#define SLAB_DEBUG_SUPPORT 1 #define SLAB_STATS 0 #define SLAB_SELFTEST 0 @@ -1024,6 +1024,16 @@ static int __kmem_cache_shrink(kmem_cache_t *cachep) slabp = cachep->c_lastp; if (slabp->s_inuse || slabp == kmem_slab_end(cachep)) break; + /* + * If this slab is the first slab with free objects + * (c_freep), and as we are walking the slab chain + * backwards, it is also the last slab with free + * objects. After unlinking it, there will be no + * slabs with free objects, so point c_freep into the + * cache structure. + */ + if (cachep->c_freep == slabp) + cachep->c_freep = kmem_slab_end(cachep); kmem_slab_unlink(slabp); spin_unlock_irq(&cachep->c_spinlock); kmem_slab_destroy(cachep, slabp); diff --git a/mm/swap_state.c b/mm/swap_state.c index 44adf8bdd..defe9b463 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -88,6 +88,9 @@ void __delete_from_swap_cache(struct page *page) */ void delete_from_swap_cache_nolock(struct page *page) { + if (!PageLocked(page)) + BUG(); + if (block_flushpage(page, 0)) lru_cache_del(page); @@ -122,8 +125,8 @@ void free_page_and_swap_cache(struct page *page) } UnlockPage(page); } - - clear_bit(PG_swap_entry, &page->flags); + + ClearPageSwapEntry(page); __free_page(page); } diff --git a/mm/swapfile.c b/mm/swapfile.c index e8a2a0b2f..abdb08e57 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -207,7 +207,7 @@ swp_entry_t acquire_swap_entry(struct page *page) unsigned long offset, type; swp_entry_t entry; - if (!test_bit(PG_swap_entry, &page->flags)) + if (!PageSwapEntry(page)) goto new_swap_entry; /* We have the old entry in the page offset still */ @@ -538,7 +538,7 @@ int get_swaparea_info(char *buf) len += sprintf(buf, "Filename\t\t\tType\t\tSize\tUsed\tPriority\n"); for (i = 0 ; i < nr_swapfiles ; i++, ptr++) { if (ptr->flags & SWP_USED) { - char * path = d_path(ptr->swap_file, page, PAGE_SIZE); + char * path = d_path(ptr->swap_file, NULL, page, PAGE_SIZE); len += sprintf(buf + len, "%-31s ", path); diff --git a/mm/vmscan.c b/mm/vmscan.c index f00e9c535..1057dbb60 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -499,19 +499,19 @@ int kswapd(void *unused) * the processes needing more memory will wake us * up on a more timely basis. */ - do { - pgdat = pgdat_list; - while (pgdat) { - for (i = 0; i < MAX_NR_ZONES; i++) { - zone = pgdat->node_zones + i; - if ((!zone->size) || (!zone->zone_wake_kswapd)) - continue; - do_try_to_free_pages(GFP_KSWAPD, zone); - } - pgdat = pgdat->node_next; + pgdat = pgdat_list; + while (pgdat) { + for (i = 0; i < MAX_NR_ZONES; i++) { + zone = pgdat->node_zones + i; + if (tsk->need_resched) + schedule(); + if ((!zone->size) || (!zone->zone_wake_kswapd)) + continue; + do_try_to_free_pages(GFP_KSWAPD, zone); } - run_task_queue(&tq_disk); - } while (!tsk->need_resched); + pgdat = pgdat->node_next; + } + run_task_queue(&tq_disk); tsk->state = TASK_INTERRUPTIBLE; interruptible_sleep_on(&kswapd_wait); } |