diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 10 | ||||
-rw-r--r-- | mm/memory.c | 16 | ||||
-rw-r--r-- | mm/mmap.c | 39 | ||||
-rw-r--r-- | mm/mremap.c | 22 | ||||
-rw-r--r-- | mm/numa.c | 19 | ||||
-rw-r--r-- | mm/page_alloc.c | 24 | ||||
-rw-r--r-- | mm/swapfile.c | 4 | ||||
-rw-r--r-- | mm/vmscan.c | 16 |
8 files changed, 88 insertions, 62 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 4772ed254..749e14250 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -124,7 +124,7 @@ void invalidate_inode_pages(struct inode * inode) * Truncate the page cache at a set offset, removing the pages * that are beyond that offset (and zeroing out partial pages). */ -void truncate_inode_pages(struct inode * inode, loff_t lstart) +void truncate_inode_pages(struct address_space * mapping, loff_t lstart) { struct list_head *head, *curr; struct page * page; @@ -134,7 +134,7 @@ void truncate_inode_pages(struct inode * inode, loff_t lstart) start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; repeat: - head = &inode->i_mapping->pages; + head = &mapping->pages; spin_lock(&pagecache_lock); curr = head->next; while (curr != head) { @@ -479,8 +479,8 @@ static inline void __add_to_page_cache(struct page * page, struct page *alias; unsigned long flags; - flags = page->flags & ~((1 << PG_uptodate) | (1 << PG_error) | (1 << PG_referenced)); - page->flags = flags | (1 << PG_locked); + flags = page->flags & ~((1 << PG_uptodate) | (1 << PG_error)); + page->flags = flags | (1 << PG_locked) | (1 << PG_referenced); get_page(page); page->index = offset; add_page_to_inode_queue(mapping, page); @@ -1945,8 +1945,6 @@ generic_file_write(struct file *file,const char *buf,size_t count,loff_t *ppos) count -= status; pos += status; buf += status; - if (pos > inode->i_size) - inode->i_size = pos; } unlock: /* Mark it unlocked again and drop the page.. */ diff --git a/mm/memory.c b/mm/memory.c index 9e7525245..b4bf6ed36 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -860,16 +860,18 @@ void vmtruncate(struct inode * inode, loff_t offset) { unsigned long partial, pgoff; struct vm_area_struct * mpnt; + struct address_space *mapping = inode->i_mapping; - truncate_inode_pages(inode, offset); - spin_lock(&inode->i_shared_lock); - if (!inode->i_mmap) + inode->i_size = offset; + truncate_inode_pages(mapping, offset); + spin_lock(&mapping->i_shared_lock); + if (!mapping->i_mmap) goto out_unlock; pgoff = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; partial = (unsigned long)offset & (PAGE_CACHE_SIZE - 1); - mpnt = inode->i_mmap; + mpnt = mapping->i_mmap; do { struct mm_struct *mm = mpnt->vm_mm; unsigned long start = mpnt->vm_start; @@ -903,7 +905,9 @@ void vmtruncate(struct inode * inode, loff_t offset) flush_tlb_range(mm, start, end); } while ((mpnt = mpnt->vm_next_share) != NULL); out_unlock: - spin_unlock(&inode->i_shared_lock); + spin_unlock(&mapping->i_shared_lock); + if (inode->i_op && inode->i_op->truncate) + inode->i_op->truncate(inode); } @@ -957,6 +961,7 @@ static int do_swap_page(struct task_struct * tsk, return -1; flush_page_to_ram(page); + flush_icache_page(vma, page); } vma->vm_mm->rss++; @@ -1057,6 +1062,7 @@ static int do_no_page(struct task_struct * tsk, struct vm_area_struct * vma, * handle that later. */ flush_page_to_ram(new_page); + flush_icache_page(vma, new_page); entry = mk_pte(new_page, vma->vm_page_prot); if (write_access) { entry = pte_mkwrite(pte_mkdirty(entry)); @@ -75,13 +75,14 @@ static inline void remove_shared_vm_struct(struct vm_area_struct *vma) struct file * file = vma->vm_file; if (file) { + struct inode *inode = file->f_dentry->d_inode; if (vma->vm_flags & VM_DENYWRITE) - atomic_inc(&file->f_dentry->d_inode->i_writecount); - spin_lock(&file->f_dentry->d_inode->i_shared_lock); + atomic_inc(&inode->i_writecount); + spin_lock(&inode->i_mapping->i_shared_lock); if(vma->vm_next_share) vma->vm_next_share->vm_pprev_share = vma->vm_pprev_share; *vma->vm_pprev_share = vma->vm_next_share; - spin_unlock(&file->f_dentry->d_inode->i_shared_lock); + spin_unlock(&inode->i_mapping->i_shared_lock); } } @@ -346,6 +347,7 @@ free_vma: * For mmap() without MAP_FIXED and shmat() with addr=0. * Return value 0 means ENOMEM. */ +#ifndef HAVE_ARCH_UNMAPPED_AREA unsigned long get_unmapped_area(unsigned long addr, unsigned long len) { struct vm_area_struct * vmm; @@ -365,6 +367,7 @@ unsigned long get_unmapped_area(unsigned long addr, unsigned long len) addr = vmm->vm_end; } } +#endif #define vm_avl_empty (struct vm_area_struct *) NULL @@ -579,7 +582,8 @@ static void free_pgtables(struct mm_struct * mm, struct vm_area_struct *prev, unsigned long start, unsigned long end) { unsigned long first = start & PGDIR_MASK; - unsigned long last = (end + PGDIR_SIZE - 1) & PGDIR_MASK; + unsigned long last = end + PGDIR_SIZE - 1; + unsigned long start_index, end_index; if (!prev) { prev = mm->mmap; @@ -607,11 +611,15 @@ static void free_pgtables(struct mm_struct * mm, struct vm_area_struct *prev, break; } no_mmaps: - first = first >> PGDIR_SHIFT; - last = last >> PGDIR_SHIFT; - if (last > first) { - clear_page_tables(mm, first, last-first); - flush_tlb_pgtables(mm, first << PGDIR_SHIFT, last << PGDIR_SHIFT); + /* + * If the PGD bits are not consecutive in the virtual address, the + * old method of shifting the VA >> by PGDIR_SHIFT doesn't work. + */ + start_index = pgd_index(first); + end_index = pgd_index(last); + if (end_index > start_index) { + clear_page_tables(mm, start_index, end_index - start_index); + flush_tlb_pgtables(mm, first & PGDIR_MASK, last & PGDIR_MASK); } } @@ -884,16 +892,17 @@ void insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vmp) file = vmp->vm_file; if (file) { struct inode * inode = file->f_dentry->d_inode; + struct address_space *mapping = inode->i_mapping; if (vmp->vm_flags & VM_DENYWRITE) atomic_dec(&inode->i_writecount); /* insert vmp into inode's share list */ - spin_lock(&inode->i_shared_lock); - if((vmp->vm_next_share = inode->i_mmap) != NULL) - inode->i_mmap->vm_pprev_share = &vmp->vm_next_share; - inode->i_mmap = vmp; - vmp->vm_pprev_share = &inode->i_mmap; - spin_unlock(&inode->i_shared_lock); + spin_lock(&mapping->i_shared_lock); + if((vmp->vm_next_share = mapping->i_mmap) != NULL) + mapping->i_mmap->vm_pprev_share = &vmp->vm_next_share; + mapping->i_mmap = vmp; + vmp->vm_pprev_share = &mapping->i_mmap; + spin_unlock(&mapping->i_shared_lock); } } diff --git a/mm/mremap.c b/mm/mremap.c index 45bcf5264..5721fc5d5 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -165,19 +165,14 @@ static inline unsigned long move_vma(struct vm_area_struct * vma, * * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise * This option implies MREMAP_MAYMOVE. - * - * "__new_addr" toying in order to not change the saved stack layout - * for old x86 binaries that don't want %edi to change.. */ -asmlinkage unsigned long sys_mremap(unsigned long addr, +unsigned long do_mremap(unsigned long addr, unsigned long old_len, unsigned long new_len, - unsigned long flags, unsigned long __new_addr) + unsigned long flags, unsigned long new_addr) { - unsigned long new_addr = __new_addr; struct vm_area_struct *vma; unsigned long ret = -EINVAL; - down(¤t->mm->mmap_sem); if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) goto out; @@ -280,13 +275,24 @@ asmlinkage unsigned long sys_mremap(unsigned long addr, ret = -ENOMEM; if (flags & MREMAP_MAYMOVE) { if (!(flags & MREMAP_FIXED)) { - new_addr = get_unmapped_area(addr, new_len); + new_addr = get_unmapped_area(0, new_len); if (!new_addr) goto out; } ret = move_vma(vma, addr, old_len, new_len, new_addr); } out: + return ret; +} + +asmlinkage unsigned long sys_mremap(unsigned long addr, + unsigned long old_len, unsigned long new_len, + unsigned long flags, unsigned long new_addr) +{ + unsigned long ret; + + down(¤t->mm->mmap_sem); + ret = do_mremap(addr, old_len, new_len, flags, new_addr); up(¤t->mm->mmap_sem); return ret; } @@ -16,6 +16,18 @@ int numnodes = 1; /* Initialized for UMA platforms */ static bootmem_data_t contig_bootmem_data; pg_data_t contig_page_data = { bdata: &contig_bootmem_data }; +/* + * This is meant to be invoked by platforms whose physical memory starts + * at a considerably higher value than 0. Examples are Super-H, ARM, m68k. + * Should be invoked with paramters (0, 0, unsigned long *[], start_paddr). + */ +void __init free_area_init_node(int nid, pg_data_t *pgdat, + unsigned long *zones_size, unsigned long zone_start_paddr) +{ + free_area_init_core(0, NODE_DATA(0), &mem_map, zones_size, + zone_start_paddr); +} + #endif /* !CONFIG_DISCONTIGMEM */ struct page * alloc_pages_node(int nid, int gfp_mask, unsigned long order) @@ -29,10 +41,6 @@ struct page * alloc_pages_node(int nid, int gfp_mask, unsigned long order) static spinlock_t node_lock = SPIN_LOCK_UNLOCKED; -extern void show_free_areas_core(int); -extern void __init free_area_init_core(int nid, pg_data_t *pgdat, - struct page **gmap, unsigned int *zones_size, unsigned long paddr); - void show_free_areas_node(int nid) { unsigned long flags; @@ -47,7 +55,7 @@ void show_free_areas_node(int nid) * Nodes can be initialized parallely, in no particular order. */ void __init free_area_init_node(int nid, pg_data_t *pgdat, - unsigned int *zones_size, unsigned long zone_start_paddr) + unsigned long *zones_size, unsigned long zone_start_paddr) { int i, size = 0; struct page *discard; @@ -56,6 +64,7 @@ void __init free_area_init_node(int nid, pg_data_t *pgdat, mem_map = (mem_map_t *)PAGE_OFFSET; free_area_init_core(nid, pgdat, &discard, zones_size, zone_start_paddr); + pgdat->node_id = nid; /* * Get space for the valid bitmap. diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b212a6252..efdbb98f1 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -258,7 +258,9 @@ struct page * __alloc_pages (zonelist_t *zonelist, unsigned long order) */ if (!(current->flags & PF_MEMALLOC)) { - if (classfree(z) > z->pages_high) + unsigned long free = classfree(z); + + if (free > z->pages_high) { if (z->low_on_memory) z->low_on_memory = 0; @@ -270,11 +272,11 @@ struct page * __alloc_pages (zonelist_t *zonelist, unsigned long order) if (z->low_on_memory) goto balance; - if (classfree(z) <= z->pages_low) + if (free <= z->pages_low) { wake_up_interruptible(&kswapd_wait); - if (classfree(z) <= z->pages_min) + if (free <= z->pages_min) { z->low_on_memory = 1; goto balance; @@ -298,16 +300,6 @@ ready: } } - /* - * If we can schedule, do so, and make sure to yield. - * We may be a real-time process, and if kswapd is - * waiting for us we need to allow it to run a bit. - */ - if (gfp_mask & __GFP_WAIT) { - current->policy |= SCHED_YIELD; - schedule(); - } - nopage: return NULL; @@ -525,6 +517,9 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap, lmem_map = (struct page *)(PAGE_OFFSET + MAP_ALIGN((unsigned long)lmem_map - PAGE_OFFSET)); *gmap = pgdat->node_mem_map = lmem_map; + pgdat->node_size = totalpages; + pgdat->node_start_paddr = zone_start_paddr; + pgdat->node_start_mapnr = (lmem_map - mem_map); /* * Initially all pages are reserved - free ones are freed @@ -565,6 +560,9 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap, zone->pages_low = mask*2; zone->pages_high = mask*3; zone->low_on_memory = 0; + zone->zone_mem_map = mem_map + offset; + zone->zone_start_mapnr = offset; + zone->zone_start_paddr = zone_start_paddr; for (i = 0; i < size; i++) { struct page *page = mem_map + offset + i; diff --git a/mm/swapfile.c b/mm/swapfile.c index dc647f2b0..e8a2a0b2f 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -634,11 +634,15 @@ asmlinkage long sys_swapon(const char * specialfile, int swap_flags) if (S_ISBLK(swap_dentry->d_inode->i_mode)) { kdev_t dev = swap_dentry->d_inode->i_rdev; + struct block_device_operations *bdops; p->swap_device = dev; set_blocksize(dev, PAGE_SIZE); bdev = swap_dentry->d_inode->i_bdev; + bdops = devfs_get_ops ( devfs_get_handle_from_inode + (swap_dentry->d_inode) ); + if (bdops) bdev->bd_op = bdops; error = blkdev_get(bdev, FMODE_READ|FMODE_WRITE, 0, BDEV_SWAP); if (error) diff --git a/mm/vmscan.c b/mm/vmscan.c index e6cb394d9..02cf78030 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -457,12 +457,11 @@ DECLARE_WAIT_QUEUE_HEAD(kswapd_wait); * The background pageout daemon, started as a kernel thread * from the init process. * - * This basically executes once a second, trickling out pages - * so that we have _some_ free memory available even if there - * is no other activity that frees anything up. This is needed - * for things like routing etc, where we otherwise might have - * all activity going on in asynchronous contexts that cannot - * page things out. + * This basically trickles out pages so that we have _some_ + * free memory available even if there is no other activity + * that frees anything up. This is needed for things like routing + * etc, where we otherwise might have all activity going on in + * asynchronous contexts that cannot page things out. * * If there are applications that are active memory-allocators * (most normal use), this basically shouldn't matter. @@ -479,7 +478,7 @@ int kswapd(void *unused) /* * Tell the memory management that we're a "memory allocator", * and that if we need more memory we should get access to it - * regardless (see "__get_free_pages()"). "kswapd" should + * regardless (see "__alloc_pages()"). "kswapd" should * never get caught in the normal page freeing logic. * * (Kswapd normally doesn't need memory anyway, but sometimes @@ -492,9 +491,6 @@ int kswapd(void *unused) while (1) { /* - * Wake up once a second to see if we need to make - * more memory available. - * * If we actually get into a low-memory situation, * the processes needing more memory will wake us * up on a more timely basis. |