diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 23 | ||||
-rw-r--r-- | mm/page_alloc.c | 67 | ||||
-rw-r--r-- | mm/vmscan.c | 49 |
3 files changed, 75 insertions, 64 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 63a50b7e6..2ef865555 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1627,15 +1627,10 @@ static void filemap_unmap(struct vm_area_struct *vma, unsigned long start, size_ * backing-store for swapping.. */ static struct vm_operations_struct file_shared_mmap = { - NULL, /* no special open */ - NULL, /* no special close */ - filemap_unmap, /* unmap - we need to sync the pages */ - NULL, /* no special protect */ - filemap_sync, /* sync */ - NULL, /* advise */ - filemap_nopage, /* nopage */ - NULL, /* wppage */ - filemap_swapout /* swapout */ + unmap: filemap_unmap, /* unmap - we need to sync the pages */ + sync: filemap_sync, + nopage: filemap_nopage, + swapout: filemap_swapout, }; /* @@ -1645,15 +1640,7 @@ static struct vm_operations_struct file_shared_mmap = { * know they can't ever get write permissions..) */ static struct vm_operations_struct file_private_mmap = { - NULL, /* open */ - NULL, /* close */ - NULL, /* unmap */ - NULL, /* protect */ - NULL, /* sync */ - NULL, /* advise */ - filemap_nopage, /* nopage */ - NULL, /* wppage */ - NULL /* swapout */ + nopage: filemap_nopage, }; /* This is used for a general mmap of a disk file */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 1c6ced2be..b6d174188 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6,6 +6,7 @@ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 + * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 */ #include <linux/config.h> @@ -28,6 +29,7 @@ int nr_lru_pages; LIST_HEAD(lru_cache); static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" }; +static int zone_balance_ratio[MAX_NR_ZONES] = { 128, 128, 128 }; /* * Free_page() adds the page to the free lists. This is optimized for @@ -197,18 +199,32 @@ static inline struct page * rmqueue (zone_t *zone, unsigned long order) #define ZONE_BALANCED(zone) \ (((zone)->free_pages > (zone)->pages_low) && (!(zone)->low_on_memory)) +static inline unsigned long classfree(zone_t *zone) +{ + unsigned long free = 0; + zone_t *z = zone->zone_pgdat->node_zones; + + while (z != zone) { + free += z->free_pages; + z++; + } + free += zone->free_pages; + return(free); +} + static inline int zone_balance_memory (zone_t *zone, int gfp_mask) { int freed; + unsigned long free = classfree(zone); - if (zone->free_pages >= zone->pages_low) { + if (free >= zone->pages_low) { if (!zone->low_on_memory) return 1; /* * Simple hysteresis: exit 'low memory mode' if * the upper limit has been reached: */ - if (zone->free_pages >= zone->pages_high) { + if (free >= zone->pages_high) { zone->low_on_memory = 0; return 1; } @@ -220,18 +236,14 @@ static inline int zone_balance_memory (zone_t *zone, int gfp_mask) * state machine, but do not try to free pages * ourselves. */ - if (!(gfp_mask & __GFP_WAIT)) - return 1; - - current->flags |= PF_MEMALLOC; freed = try_to_free_pages(gfp_mask, zone); - current->flags &= ~PF_MEMALLOC; - if (!freed && !(gfp_mask & (__GFP_MED | __GFP_HIGH))) + if (!freed && !(gfp_mask & __GFP_HIGH)) return 0; return 1; } +#if 0 /* * We are still balancing memory in a global way: */ @@ -260,17 +272,13 @@ static inline int balance_memory (zone_t *zone, int gfp_mask) * state machine, but do not try to free pages * ourselves. */ - if (!(gfp_mask & __GFP_WAIT)) - return 1; - - current->flags |= PF_MEMALLOC; freed = try_to_free_pages(gfp_mask, zone); - current->flags &= ~PF_MEMALLOC; - if (!freed && !(gfp_mask & (__GFP_MED | __GFP_HIGH))) + if (!freed && !(gfp_mask & __GFP_HIGH)) return 0; return 1; } +#endif /* * This is the 'heart' of the zoned buddy allocator: @@ -340,7 +348,7 @@ nopage: * The main chunk of the balancing code is in this offline branch: */ balance: - if (!balance_memory(z, gfp_mask)) + if (!zone_balance_memory(z, gfp_mask)) goto nopage; goto ready; } @@ -513,6 +521,7 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap, unsigned long i, j; unsigned long map_size; unsigned int totalpages, offset; + unsigned int cumulative = 0; totalpages = 0; for (i = 0; i < MAX_NR_ZONES; i++) { @@ -565,7 +574,7 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap, offset = lmem_map - mem_map; for (j = 0; j < MAX_NR_ZONES; j++) { zone_t *zone = pgdat->node_zones + j; - unsigned long mask = -1; + unsigned long mask; unsigned long size; size = zones_size[j]; @@ -579,13 +588,11 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap, continue; zone->offset = offset; - /* - * It's unnecessery to balance the high memory zone - */ - if (j != ZONE_HIGHMEM) { - zone->pages_low = freepages.low; - zone->pages_high = freepages.high; - } + cumulative += size; + mask = (cumulative / zone_balance_ratio[j]); + if (mask < 1) mask = 1; + zone->pages_low = mask*2; + zone->pages_high = mask*3; zone->low_on_memory = 0; for (i = 0; i < size; i++) { @@ -598,6 +605,7 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap, } offset += size; + mask = -1; for (i = 0; i < MAX_ORDER; i++) { unsigned long bitmap_size; @@ -618,3 +626,16 @@ void __init free_area_init(unsigned int *zones_size) { free_area_init_core(0, NODE_DATA(0), &mem_map, zones_size, 0); } + +static int __init setup_mem_frac(char *str) +{ + int j = 0; + + while (get_option(&str, &zone_balance_ratio[j++]) == 2); + printk("setup_mem_frac: "); + for (j = 0; j < MAX_NR_ZONES; j++) printk("%d ", zone_balance_ratio[j]); + printk("\n"); + return 1; +} + +__setup("memfrac=", setup_mem_frac); diff --git a/mm/vmscan.c b/mm/vmscan.c index 3a5f4fbbe..231cbf8f7 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -33,7 +33,7 @@ * using a process that no longer actually exists (it might * have died while we slept). */ -static int try_to_swap_out(struct vm_area_struct* vma, unsigned long address, pte_t * page_table, int gfp_mask, zone_t *zone) +static int try_to_swap_out(struct vm_area_struct* vma, unsigned long address, pte_t * page_table, int gfp_mask) { pte_t pte; swp_entry_t entry; @@ -58,9 +58,7 @@ static int try_to_swap_out(struct vm_area_struct* vma, unsigned long address, pt goto out_failed; } - if (PageReserved(page) - || PageLocked(page) - || (zone && (!memclass(page->zone, zone)))) + if (PageReserved(page) || PageLocked(page)) goto out_failed; /* @@ -195,7 +193,7 @@ out_failed: * (C) 1993 Kai Petzke, wpp@marie.physik.tu-berlin.de */ -static inline int swap_out_pmd(struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long end, int gfp_mask, zone_t *zone) +static inline int swap_out_pmd(struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long end, int gfp_mask) { pte_t * pte; unsigned long pmd_end; @@ -217,7 +215,7 @@ static inline int swap_out_pmd(struct vm_area_struct * vma, pmd_t *dir, unsigned do { int result; vma->vm_mm->swap_address = address + PAGE_SIZE; - result = try_to_swap_out(vma, address, pte, gfp_mask, zone); + result = try_to_swap_out(vma, address, pte, gfp_mask); if (result) return result; address += PAGE_SIZE; @@ -226,7 +224,7 @@ static inline int swap_out_pmd(struct vm_area_struct * vma, pmd_t *dir, unsigned return 0; } -static inline int swap_out_pgd(struct vm_area_struct * vma, pgd_t *dir, unsigned long address, unsigned long end, int gfp_mask, zone_t *zone) +static inline int swap_out_pgd(struct vm_area_struct * vma, pgd_t *dir, unsigned long address, unsigned long end, int gfp_mask) { pmd_t * pmd; unsigned long pgd_end; @@ -246,7 +244,7 @@ static inline int swap_out_pgd(struct vm_area_struct * vma, pgd_t *dir, unsigned end = pgd_end; do { - int result = swap_out_pmd(vma, pmd, address, end, gfp_mask, zone); + int result = swap_out_pmd(vma, pmd, address, end, gfp_mask); if (result) return result; address = (address + PMD_SIZE) & PMD_MASK; @@ -255,7 +253,7 @@ static inline int swap_out_pgd(struct vm_area_struct * vma, pgd_t *dir, unsigned return 0; } -static int swap_out_vma(struct vm_area_struct * vma, unsigned long address, int gfp_mask, zone_t *zone) +static int swap_out_vma(struct vm_area_struct * vma, unsigned long address, int gfp_mask) { pgd_t *pgdir; unsigned long end; @@ -270,7 +268,7 @@ static int swap_out_vma(struct vm_area_struct * vma, unsigned long address, int if (address >= end) BUG(); do { - int result = swap_out_pgd(vma, pgdir, address, end, gfp_mask, zone); + int result = swap_out_pgd(vma, pgdir, address, end, gfp_mask); if (result) return result; address = (address + PGDIR_SIZE) & PGDIR_MASK; @@ -279,7 +277,7 @@ static int swap_out_vma(struct vm_area_struct * vma, unsigned long address, int return 0; } -static int swap_out_mm(struct mm_struct * mm, int gfp_mask, zone_t *zone) +static int swap_out_mm(struct mm_struct * mm, int gfp_mask) { unsigned long address; struct vm_area_struct* vma; @@ -300,7 +298,7 @@ static int swap_out_mm(struct mm_struct * mm, int gfp_mask, zone_t *zone) address = vma->vm_start; for (;;) { - int result = swap_out_vma(vma, address, gfp_mask, zone); + int result = swap_out_vma(vma, address, gfp_mask); if (result) return result; vma = vma->vm_next; @@ -322,7 +320,7 @@ static int swap_out_mm(struct mm_struct * mm, int gfp_mask, zone_t *zone) * N.B. This function returns only 0 or 1. Return values != 1 from * the lower level routines result in continued processing. */ -static int swap_out(unsigned int priority, int gfp_mask, zone_t *zone) +static int swap_out(unsigned int priority, int gfp_mask) { struct task_struct * p; int counter; @@ -383,7 +381,7 @@ static int swap_out(unsigned int priority, int gfp_mask, zone_t *zone) int ret; atomic_inc(&best->mm_count); - ret = swap_out_mm(best, gfp_mask, zone); + ret = swap_out_mm(best, gfp_mask); mmdrop(best); if (!ret) @@ -424,16 +422,18 @@ static int do_try_to_free_pages(unsigned int gfp_mask, zone_t *zone) goto done; } - /* don't be too light against the d/i cache since - shrink_mmap() almost never fail when there's - really plenty of memory free. */ - count -= shrink_dcache_memory(priority, gfp_mask, zone); - count -= shrink_icache_memory(priority, gfp_mask, zone); - if (count <= 0) - goto done; /* Try to get rid of some shared memory pages.. */ if (gfp_mask & __GFP_IO) { + /* + * don't be too light against the d/i cache since + * shrink_mmap() almost never fail when there's + * really plenty of memory free. + */ + count -= shrink_dcache_memory(priority, gfp_mask, zone); + count -= shrink_icache_memory(priority, gfp_mask, zone); + if (count <= 0) + goto done; while (shm_swap(priority, gfp_mask, zone)) { if (!--count) goto done; @@ -441,7 +441,7 @@ static int do_try_to_free_pages(unsigned int gfp_mask, zone_t *zone) } /* Then, try to page stuff out.. */ - while (swap_out(priority, gfp_mask, zone)) { + while (swap_out(priority, gfp_mask)) { if (!--count) goto done; } @@ -534,8 +534,11 @@ int try_to_free_pages(unsigned int gfp_mask, zone_t *zone) int retval = 1; wake_up_process(kswapd_process); - if (gfp_mask & __GFP_WAIT) + if (gfp_mask & __GFP_WAIT) { + current->flags |= PF_MEMALLOC; retval = do_try_to_free_pages(gfp_mask, zone); + current->flags &= ~PF_MEMALLOC; + } return retval; } |