diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 47 | ||||
-rw-r--r-- | mm/memory.c | 5 | ||||
-rw-r--r-- | mm/page_alloc.c | 14 | ||||
-rw-r--r-- | mm/vmscan.c | 91 |
4 files changed, 63 insertions, 94 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 69fe40466..dedd7911e 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -219,13 +219,12 @@ static inline void truncate_complete_page(struct page *page) page_cache_release(page); } -void truncate_list_pages(struct list_head *head, unsigned long start, unsigned partial) +static int FASTCALL(truncate_list_pages(struct list_head *, unsigned long, unsigned *)); +static int truncate_list_pages(struct list_head *head, unsigned long start, unsigned *partial) { struct list_head *curr; struct page * page; -repeat: - spin_lock(&pagecache_lock); curr = head->next; while (curr != head) { unsigned long offset; @@ -235,37 +234,29 @@ repeat: offset = page->index; /* Is one of the pages to truncate? */ - if ((offset >= start) || (partial && (offset + 1) == start)) { + if ((offset >= start) || (*partial && (offset + 1) == start)) { if (TryLockPage(page)) { page_cache_get(page); spin_unlock(&pagecache_lock); wait_on_page(page); page_cache_release(page); - goto repeat; + return 1; } page_cache_get(page); spin_unlock(&pagecache_lock); - if (partial && (offset + 1) == start) { - truncate_partial_page(page, partial); - partial = 0; + if (*partial && (offset + 1) == start) { + truncate_partial_page(page, *partial); + *partial = 0; } else truncate_complete_page(page); UnlockPage(page); page_cache_release(page); - - /* - * We have done things without the pagecache lock, - * so we'll have to repeat the scan. - * It's not possible to deadlock here because - * we are guaranteed to make progress. (ie. we have - * just removed a page) - */ - goto repeat; + return 1; } } - spin_unlock(&pagecache_lock); + return 0; } @@ -283,9 +274,15 @@ void truncate_inode_pages(struct address_space * mapping, loff_t lstart) unsigned long start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; unsigned partial = lstart & (PAGE_CACHE_SIZE - 1); - truncate_list_pages(&mapping->clean_pages, start, partial); - truncate_list_pages(&mapping->dirty_pages, start, partial); - truncate_list_pages(&mapping->locked_pages, start, partial); +repeat: + spin_lock(&pagecache_lock); + if (truncate_list_pages(&mapping->clean_pages, start, &partial)) + goto repeat; + if (truncate_list_pages(&mapping->dirty_pages, start, &partial)) + goto repeat; + if (truncate_list_pages(&mapping->locked_pages, start, &partial)) + goto repeat; + spin_unlock(&pagecache_lock); } static inline struct page * __find_page_nolock(struct address_space *mapping, unsigned long offset, struct page *page) @@ -2498,6 +2495,7 @@ generic_file_write(struct file *file,const char *buf,size_t count,loff_t *ppos) while (count) { unsigned long bytes, index, offset; char *kaddr; + int deactivate = 1; /* * Try to find the page in the cache. If it isn't there, @@ -2506,8 +2504,10 @@ generic_file_write(struct file *file,const char *buf,size_t count,loff_t *ppos) offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ index = pos >> PAGE_CACHE_SHIFT; bytes = PAGE_CACHE_SIZE - offset; - if (bytes > count) + if (bytes > count) { bytes = count; + deactivate = 0; + } /* * Bring in the user page that we will copy from _first_. @@ -2551,7 +2551,8 @@ generic_file_write(struct file *file,const char *buf,size_t count,loff_t *ppos) unlock: /* Mark it unlocked again and drop the page.. */ UnlockPage(page); - deactivate_page(page); + if (deactivate) + deactivate_page(page); page_cache_release(page); if (status < 0) diff --git a/mm/memory.c b/mm/memory.c index f4bb0141f..6f1f318a3 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -483,9 +483,10 @@ int map_user_kiobuf(int rw, struct kiobuf *iobuf, unsigned long va, size_t len) goto out_unlock; } map = get_page_map(map); - if (map) + if (map) { + flush_dcache_page(map); atomic_inc(&map->count); - else + } else printk (KERN_INFO "Mapped page missing [%d]\n", i); spin_unlock(&mm->page_table_lock); iobuf->maplist[i] = map; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index dca35de59..b67aa4913 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -68,16 +68,6 @@ static void __free_pages_ok (struct page *page, unsigned long order) struct page *base; zone_t *zone; - /* - * Subtle. We do not want to test this in the inlined part of - * __free_page() - it's a rare condition and just increases - * cache footprint unnecesserily. So we do an 'incorrect' - * decrement on page->count for reserved pages, but this part - * makes it safe. - */ - if (PageReserved(page)) - return; - if (page->buffers) BUG(); if (page->mapping) @@ -427,7 +417,9 @@ try_again: if (order > 0 && (gfp_mask & __GFP_WAIT)) { zone = zonelist->zones; /* First, clean some dirty pages. */ + current->flags |= PF_MEMALLOC; page_launder(gfp_mask, 1); + current->flags &= ~PF_MEMALLOC; for (;;) { zone_t *z = *(zone++); if (!z) @@ -556,7 +548,7 @@ unsigned long get_zeroed_page(int gfp_mask) void __free_pages(struct page *page, unsigned long order) { - if (put_page_testzero(page)) + if (!PageReserved(page) && put_page_testzero(page)) __free_pages_ok(page, order); } diff --git a/mm/vmscan.c b/mm/vmscan.c index d4a74f41f..afa5261c1 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -49,8 +49,10 @@ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, un if ((!VALID_PAGE(page)) || PageReserved(page)) goto out_failed; - if (mm->swap_cnt) - mm->swap_cnt--; + if (!mm->swap_cnt) + return 1; + + mm->swap_cnt--; onlist = PageActive(page); /* Don't look at this pte if it's been accessed recently. */ @@ -79,6 +81,7 @@ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, un * bits in hardware. */ pte = ptep_get_and_clear(page_table); + flush_tlb_page(vma, address); /* * Is the page already in the swap cache? If so, then @@ -98,7 +101,6 @@ set_swap_pte: drop_pte: UnlockPage(page); mm->rss--; - flush_tlb_page(vma, address); deactivate_page(page); page_cache_release(page); out_failed: @@ -193,8 +195,6 @@ static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vm result = try_to_swap_out(mm, vma, address, pte, gfp_mask); if (result) return result; - if (!mm->swap_cnt) - return 0; address += PAGE_SIZE; pte++; } while (address && (address < end)); @@ -224,8 +224,6 @@ static inline int swap_out_pgd(struct mm_struct * mm, struct vm_area_struct * vm int result = swap_out_pmd(mm, vma, pmd, address, end, gfp_mask); if (result) return result; - if (!mm->swap_cnt) - return 0; address = (address + PMD_SIZE) & PMD_MASK; pmd++; } while (address && (address < end)); @@ -250,8 +248,6 @@ static int swap_out_vma(struct mm_struct * mm, struct vm_area_struct * vma, unsi int result = swap_out_pgd(mm, vma, pgdir, address, end, gfp_mask); if (result) return result; - if (!mm->swap_cnt) - return 0; address = (address + PGDIR_SIZE) & PGDIR_MASK; pgdir++; } while (address && (address < end)); @@ -260,29 +256,28 @@ static int swap_out_vma(struct mm_struct * mm, struct vm_area_struct * vma, unsi static int swap_out_mm(struct mm_struct * mm, int gfp_mask) { + int result = 0; unsigned long address; struct vm_area_struct* vma; /* * Go through process' page directory. */ - address = mm->swap_address; /* * Find the proper vm-area after freezing the vma chain * and ptes. */ spin_lock(&mm->page_table_lock); + address = mm->swap_address; vma = find_vma(mm, address); if (vma) { if (address < vma->vm_start) address = vma->vm_start; for (;;) { - int result = swap_out_vma(mm, vma, address, gfp_mask); + result = swap_out_vma(mm, vma, address, gfp_mask); if (result) - return result; - if (!mm->swap_cnt) goto out_unlock; vma = vma->vm_next; if (!vma) @@ -296,9 +291,7 @@ static int swap_out_mm(struct mm_struct * mm, int gfp_mask) out_unlock: spin_unlock(&mm->page_table_lock); - - /* We didn't find anything for the process */ - return 0; + return result; } /* @@ -309,13 +302,11 @@ out_unlock: #define SWAP_SHIFT 5 #define SWAP_MIN 8 -static int swap_out(unsigned int priority, int gfp_mask, unsigned long idle_time) +static int swap_out(unsigned int priority, int gfp_mask) { - struct task_struct * p; int counter; int __ret = 0; - lock_kernel(); /* * We make one or two passes through the task list, indexed by * assign = {0, 1}: @@ -335,24 +326,18 @@ static int swap_out(unsigned int priority, int gfp_mask, unsigned long idle_time counter = 1; for (; counter >= 0; counter--) { + struct list_head *p; unsigned long max_cnt = 0; struct mm_struct *best = NULL; - int pid = 0; int assign = 0; int found_task = 0; select: - read_lock(&tasklist_lock); - p = init_task.next_task; - for (; p != &init_task; p = p->next_task) { - struct mm_struct *mm = p->mm; - if (!p->swappable || !mm) - continue; + spin_lock(&mmlist_lock); + p = init_mm.mmlist.next; + for (; p != &init_mm.mmlist; p = p->next) { + struct mm_struct *mm = list_entry(p, struct mm_struct, mmlist); if (mm->rss <= 0) continue; - /* Skip tasks which haven't slept long enough yet when idle-swapping. */ - if (idle_time && !assign && (!(p->state & TASK_INTERRUPTIBLE) || - time_after(p->sleep_time + idle_time * HZ, jiffies))) - continue; found_task++; /* Refresh swap_cnt? */ if (assign == 1) { @@ -363,29 +348,32 @@ static int swap_out(unsigned int priority, int gfp_mask, unsigned long idle_time if (mm->swap_cnt > max_cnt) { max_cnt = mm->swap_cnt; best = mm; - pid = p->pid; } } - read_unlock(&tasklist_lock); + + /* Make sure it doesn't disappear */ + if (best) + atomic_inc(&best->mm_users); + spin_unlock(&mmlist_lock); + + /* + * We have dropped the tasklist_lock, but we + * know that "mm" still exists: we are running + * with the big kernel lock, and exit_mm() + * cannot race with us. + */ if (!best) { if (!assign && found_task > 0) { assign = 1; goto select; } - goto out; + break; } else { - int ret; - - atomic_inc(&best->mm_count); - ret = swap_out_mm(best, gfp_mask); - mmdrop(best); - - __ret = 1; - goto out; + __ret = swap_out_mm(best, gfp_mask); + mmput(best); + break; } } -out: - unlock_kernel(); return __ret; } @@ -848,7 +836,6 @@ int inactive_shortage(void) static int refill_inactive(unsigned int gfp_mask, int user) { int priority, count, start_count, made_progress; - unsigned long idle_time; count = inactive_shortage() + free_shortage(); if (user) @@ -858,17 +845,6 @@ static int refill_inactive(unsigned int gfp_mask, int user) /* Always trim SLAB caches when memory gets low. */ kmem_cache_reap(gfp_mask); - /* - * Calculate the minimum time (in seconds) a process must - * have slept before we consider it for idle swapping. - * This must be the number of seconds it takes to go through - * all of the cache. Doing this idle swapping makes the VM - * smoother once we start hitting swap. - */ - idle_time = atomic_read(&page_cache_size); - idle_time += atomic_read(&buffermem_pages); - idle_time /= (inactive_target + 1); - priority = 6; do { made_progress = 0; @@ -878,8 +854,7 @@ static int refill_inactive(unsigned int gfp_mask, int user) schedule(); } - while (refill_inactive_scan(priority, 1) || - swap_out(priority, gfp_mask, idle_time)) { + while (refill_inactive_scan(priority, 1)) { made_progress = 1; if (--count <= 0) goto done; @@ -896,7 +871,7 @@ static int refill_inactive(unsigned int gfp_mask, int user) /* * Then, try to page stuff out.. */ - while (swap_out(priority, gfp_mask, 0)) { + while (swap_out(priority, gfp_mask)) { made_progress = 1; if (--count <= 0) goto done; |