diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2000-02-23 00:40:54 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2000-02-23 00:40:54 +0000 |
commit | 529c593ece216e4aaffd36bd940cb94f1fa63129 (patch) | |
tree | 78f1c0b805f5656aa7b0417a043c5346f700a2cf /mm | |
parent | 0bd079751d25808d1972baee5c4eaa1db2227257 (diff) |
Merge with 2.3.43. I did ignore all modifications to the qlogicisp.c
driver due to the Origin A64 hacks.
Diffstat (limited to 'mm')
-rw-r--r-- | mm/bootmem.c | 38 | ||||
-rw-r--r-- | mm/filemap.c | 94 | ||||
-rw-r--r-- | mm/mlock.c | 6 | ||||
-rw-r--r-- | mm/mmap.c | 8 | ||||
-rw-r--r-- | mm/mprotect.c | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 107 | ||||
-rw-r--r-- | mm/slab.c | 13 | ||||
-rw-r--r-- | mm/swapfile.c | 13 | ||||
-rw-r--r-- | mm/vmscan.c | 6 |
9 files changed, 152 insertions, 135 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c index 719ab56f9..43ade5c96 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -129,11 +129,10 @@ static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr, static void * __init __alloc_bootmem_core (bootmem_data_t *bdata, unsigned long size, unsigned long align, unsigned long goal) { - int area = 0; - unsigned long i, start = 0, reserved; + unsigned long i, start = 0; void *ret; unsigned long offset, remaining_size; - unsigned long areasize, preferred; + unsigned long areasize, preferred, incr; unsigned long eidx = bdata->node_low_pfn - (bdata->node_boot_start >> PAGE_SHIFT); @@ -145,26 +144,28 @@ static void * __init __alloc_bootmem_core (bootmem_data_t *bdata, */ if (goal && (goal >= bdata->node_boot_start) && ((goal >> PAGE_SHIFT) < bdata->node_low_pfn)) { - preferred = (goal - bdata->node_boot_start) >> PAGE_SHIFT; + preferred = goal - bdata->node_boot_start; } else preferred = 0; + preferred = ((preferred + align - 1) & ~(align - 1)) >> PAGE_SHIFT; areasize = (size+PAGE_SIZE-1)/PAGE_SIZE; + incr = align >> PAGE_SHIFT ? : 1; restart_scan: - for (i = preferred; i < eidx; i++) { - reserved = test_bit(i, bdata->node_bootmem_map); - if (!reserved) { - if (!area) { - area = 1; - start = i; - } - if (i - start + 1 == areasize) - goto found; - } else { - area = 0; - start = -1; + for (i = preferred; i < eidx; i += incr) { + unsigned long j; + if (test_bit(i, bdata->node_bootmem_map)) + continue; + for (j = i + 1; j < i + areasize; ++j) { + if (j >= eidx) + goto fail_block; + if (test_bit (j, bdata->node_bootmem_map)) + goto fail_block; } + start = i; + goto found; + fail_block:; } if (preferred) { preferred = 0; @@ -183,13 +184,12 @@ found: * of this allocation's buffer? If yes then we can 'merge' * the previous partial page with this allocation. */ - if (bdata->last_offset && (bdata->last_pos+1 == start)) { + if (align <= PAGE_SIZE + && bdata->last_offset && bdata->last_pos+1 == start) { offset = (bdata->last_offset+align-1) & ~(align-1); if (offset > PAGE_SIZE) BUG(); remaining_size = PAGE_SIZE-offset; - if (remaining_size > PAGE_SIZE) - BUG(); if (size < remaining_size) { areasize = 0; // last_pos unchanged diff --git a/mm/filemap.c b/mm/filemap.c index 2ef865555..4772ed254 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -98,7 +98,7 @@ void invalidate_inode_pages(struct inode * inode) struct list_head *head, *curr; struct page * page; - head = &inode->i_data.pages; + head = &inode->i_mapping->pages; spin_lock(&pagecache_lock); curr = head->next; @@ -134,7 +134,7 @@ void truncate_inode_pages(struct inode * inode, loff_t lstart) start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; repeat: - head = &inode->i_data.pages; + head = &inode->i_mapping->pages; spin_lock(&pagecache_lock); curr = head->next; while (curr != head) { @@ -289,6 +289,12 @@ int shrink_mmap(int priority, int gfp_mask, zone_t *zone) goto cache_unlock_continue; /* + * We did the page aging part. + */ + if (nr_lru_pages < freepages.min * priority) + goto cache_unlock_continue; + + /* * Is it a page swap page? If so, we want to * drop it if it is no longer used, even if it * were to be marked referenced.. @@ -417,7 +423,7 @@ static int do_buffer_fdatasync(struct inode *inode, unsigned long start, unsigne struct page *page; int retval = 0; - head = &inode->i_data.pages; + head = &inode->i_mapping->pages; spin_lock(&pagecache_lock); curr = head->next; @@ -519,11 +525,12 @@ static int add_to_page_cache_unique(struct page * page, static inline int page_cache_read(struct file * file, unsigned long offset) { struct inode *inode = file->f_dentry->d_inode; - struct page **hash = page_hash(&inode->i_data, offset); + struct address_space *mapping = inode->i_mapping; + struct page **hash = page_hash(mapping, offset); struct page *page; spin_lock(&pagecache_lock); - page = __find_page_nolock(&inode->i_data, offset, *hash); + page = __find_page_nolock(mapping, offset, *hash); spin_unlock(&pagecache_lock); if (page) return 0; @@ -532,8 +539,8 @@ static inline int page_cache_read(struct file * file, unsigned long offset) if (!page) return -ENOMEM; - if (!add_to_page_cache_unique(page, &inode->i_data, offset, hash)) { - int error = inode->i_op->readpage(file->f_dentry, page); + if (!add_to_page_cache_unique(page, mapping, offset, hash)) { + int error = mapping->a_ops->readpage(file->f_dentry, page); page_cache_release(page); return error; } @@ -949,6 +956,7 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t * { struct dentry *dentry = filp->f_dentry; struct inode *inode = dentry->d_inode; + struct address_space *mapping = inode->i_mapping; unsigned long index, offset; struct page *cached_page; int reada_ok; @@ -1017,10 +1025,10 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t * /* * Try to find the data in the page cache.. */ - hash = page_hash(&inode->i_data, index); + hash = page_hash(mapping, index); spin_lock(&pagecache_lock); - page = __find_page_nolock(&inode->i_data, index, *hash); + page = __find_page_nolock(mapping, index, *hash); if (!page) goto no_cached_page; found_page: @@ -1068,7 +1076,7 @@ page_not_up_to_date: readpage: /* ... and start the actual read. The read will unlock the page. */ - error = inode->i_op->readpage(filp->f_dentry, page); + error = mapping->a_ops->readpage(filp->f_dentry, page); if (!error) { if (Page_Uptodate(page)) @@ -1107,7 +1115,7 @@ no_cached_page: * dropped the page cache lock. Check for that. */ spin_lock(&pagecache_lock); - page = __find_page_nolock(&inode->i_data, index, *hash); + page = __find_page_nolock(mapping, index, *hash); if (page) goto found_page; } @@ -1116,7 +1124,7 @@ no_cached_page: * Ok, add the new page to the hash-queues... */ page = cached_page; - __add_to_page_cache(page, &inode->i_data, index, hash); + __add_to_page_cache(page, mapping, index, hash); spin_unlock(&pagecache_lock); cached_page = NULL; @@ -1227,7 +1235,7 @@ asmlinkage ssize_t sys_sendfile(int out_fd, int in_fd, off_t *offset, size_t cou in_inode = in_file->f_dentry->d_inode; if (!in_inode) goto fput_in; - if (!in_inode->i_op || !in_inode->i_op->readpage) + if (!in_inode->i_mapping->a_ops->readpage) goto fput_in; retval = locks_verify_area(FLOCK_VERIFY_READ, in_inode, in_file, in_file->f_pos, count); if (retval) @@ -1301,6 +1309,7 @@ struct page * filemap_nopage(struct vm_area_struct * area, struct file *file = area->vm_file; struct dentry *dentry = file->f_dentry; struct inode *inode = dentry->d_inode; + struct address_space *mapping = inode->i_mapping; struct page *page, **hash, *old_page; unsigned long size = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; @@ -1319,9 +1328,9 @@ struct page * filemap_nopage(struct vm_area_struct * area, /* * Do we have something in the page cache already? */ - hash = page_hash(&inode->i_data, pgoff); + hash = page_hash(mapping, pgoff); retry_find: - page = __find_get_page(&inode->i_data, pgoff, hash); + page = __find_get_page(mapping, pgoff, hash); if (!page) goto no_cached_page; @@ -1390,7 +1399,7 @@ page_not_uptodate: goto success; } - if (!inode->i_op->readpage(file->f_dentry, page)) { + if (!mapping->a_ops->readpage(file->f_dentry, page)) { wait_on_page(page); if (Page_Uptodate(page)) goto success; @@ -1408,7 +1417,7 @@ page_not_uptodate: goto success; } ClearPageError(page); - if (!inode->i_op->readpage(file->f_dentry, page)) { + if (!mapping->a_ops->readpage(file->f_dentry, page)) { wait_on_page(page); if (Page_Uptodate(page)) goto success; @@ -1440,7 +1449,7 @@ static inline int do_write_page(struct inode * inode, struct file * file, if (size_idx <= index) return -EIO; } - writepage = inode->i_op->writepage; + writepage = inode->i_mapping->a_ops->writepage; lock_page(page); retval = writepage(file->f_dentry, page); @@ -1652,13 +1661,13 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma) ops = &file_private_mmap; if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) { - if (!inode->i_op || !inode->i_op->writepage) + if (!inode->i_mapping->a_ops->writepage) return -EINVAL; ops = &file_shared_mmap; } if (!inode->i_sb || !S_ISREG(inode->i_mode)) return -EACCES; - if (!inode->i_op || !inode->i_op->readpage) + if (!inode->i_mapping->a_ops->readpage) return -ENOEXEC; UPDATE_ATIME(inode); vma->vm_ops = ops; @@ -1810,6 +1819,21 @@ struct page *grab_cache_page(struct address_space *mapping, unsigned long index) return page; } +static inline void remove_suid(struct inode *inode) +{ + unsigned int mode; + + /* set S_IGID if S_IXGRP is set, and always set S_ISUID */ + mode = (inode->i_mode & S_IXGRP)*(S_ISGID/S_IXGRP) | S_ISUID; + + /* was any of the uid bits set? */ + mode &= inode->i_mode; + if (mode && !capable(CAP_FSETID)) { + inode->i_mode &= ~mode; + mark_inode_dirty(inode); + } +} + /* * Write to a file through the page cache. This is mainly for the * benefit of NFS and possibly other network-based file systems. @@ -1827,12 +1851,11 @@ struct page *grab_cache_page(struct address_space *mapping, unsigned long index) * okir@monad.swb.de */ ssize_t -generic_file_write(struct file *file, const char *buf, - size_t count, loff_t *ppos, - writepage_t write_one_page) +generic_file_write(struct file *file,const char *buf,size_t count,loff_t *ppos) { struct dentry *dentry = file->f_dentry; struct inode *inode = dentry->d_inode; + struct address_space *mapping = inode->i_mapping; unsigned long limit = current->rlim[RLIMIT_FSIZE].rlim_cur; loff_t pos; struct page *page, *cached_page; @@ -1876,9 +1899,15 @@ generic_file_write(struct file *file, const char *buf, } status = 0; + if (count) { + remove_suid(inode); + inode->i_ctime = inode->i_mtime = CURRENT_TIME; + mark_inode_dirty(inode); + } while (count) { unsigned long bytes, index, offset; + char *kaddr; /* * Try to find the page in the cache. If it isn't there, @@ -1891,7 +1920,7 @@ generic_file_write(struct file *file, const char *buf, bytes = count; status = -ENOMEM; /* we'll assign it later anyway */ - page = __grab_cache_page(&inode->i_data, index, &cached_page); + page = __grab_cache_page(mapping, index, &cached_page); if (!page) break; @@ -1900,7 +1929,16 @@ generic_file_write(struct file *file, const char *buf, PAGE_BUG(page); } - status = write_one_page(file, page, offset, bytes, buf); + status = mapping->a_ops->prepare_write(page, offset, offset+bytes); + if (status) + goto unlock; + kaddr = (char*)page_address(page); + status = copy_from_user(kaddr+offset, buf, bytes); + if (status) + goto fail_write; + status = mapping->a_ops->commit_write(file, page, offset, offset+bytes); + if (!status) + status = bytes; if (status >= 0) { written += status; @@ -1910,6 +1948,7 @@ generic_file_write(struct file *file, const char *buf, if (pos > inode->i_size) inode->i_size = pos; } +unlock: /* Mark it unlocked again and drop the page.. */ UnlockPage(page); page_cache_release(page); @@ -1926,6 +1965,11 @@ generic_file_write(struct file *file, const char *buf, out: up(&inode->i_sem); return err; +fail_write: + status = -EFAULT; + ClearPageUptodate(page); + kunmap(page); + goto unlock; } void __init page_cache_init(unsigned long mempages) diff --git a/mm/mlock.c b/mm/mlock.c index 59d11b922..c3e40db54 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -142,7 +142,7 @@ static int do_mlock(unsigned long start, size_t len, int on) if (on && !capable(CAP_IPC_LOCK)) return -EPERM; - len = (len + ~PAGE_MASK) & PAGE_MASK; + len = PAGE_ALIGN(len); end = start + len; if (end < start) return -EINVAL; @@ -191,7 +191,7 @@ asmlinkage long sys_mlock(unsigned long start, size_t len) int error = -ENOMEM; down(¤t->mm->mmap_sem); - len = (len + (start & ~PAGE_MASK) + ~PAGE_MASK) & PAGE_MASK; + len = PAGE_ALIGN(len + (start & ~PAGE_MASK)); start &= PAGE_MASK; locked = len >> PAGE_SHIFT; @@ -220,7 +220,7 @@ asmlinkage long sys_munlock(unsigned long start, size_t len) int ret; down(¤t->mm->mmap_sem); - len = (len + (start & ~PAGE_MASK) + ~PAGE_MASK) & PAGE_MASK; + len = PAGE_ALIGN(len + (start & ~PAGE_MASK)); start &= PAGE_MASK; ret = do_mlock(start, len, 0); up(¤t->mm->mmap_sem); @@ -114,9 +114,9 @@ asmlinkage unsigned long sys_brk(unsigned long brk) goto out; } - /* Check against rlimit and stack.. */ + /* Check against rlimit.. */ rlim = current->rlim[RLIMIT_DATA].rlim_cur; - if (rlim < RLIM_INFINITY && brk - mm->end_code > rlim) + if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim) goto out; /* Check against existing mmap mappings. */ @@ -609,8 +609,10 @@ static void free_pgtables(struct mm_struct * mm, struct vm_area_struct *prev, no_mmaps: first = first >> PGDIR_SHIFT; last = last >> PGDIR_SHIFT; - if (last > first) + if (last > first) { clear_page_tables(mm, first, last-first); + flush_tlb_pgtables(mm, first << PGDIR_SHIFT, last << PGDIR_SHIFT); + } } /* Munmap is split into 2 main parts -- this part which finds diff --git a/mm/mprotect.c b/mm/mprotect.c index fd4249b1d..70f1d8e2c 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -214,7 +214,7 @@ asmlinkage long sys_mprotect(unsigned long start, size_t len, unsigned long prot if (start & ~PAGE_MASK) return -EINVAL; - len = (len + ~PAGE_MASK) & PAGE_MASK; + len = PAGE_ALIGN(len); end = start + len; if (end < start) return -EINVAL; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b6d174188..b212a6252 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -29,7 +29,9 @@ int nr_lru_pages; LIST_HEAD(lru_cache); static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" }; -static int zone_balance_ratio[MAX_NR_ZONES] = { 128, 128, 128 }; +static int zone_balance_ratio[MAX_NR_ZONES] = { 128, 128, 128, }; +static int zone_balance_min[MAX_NR_ZONES] = { 10 , 10, 10, }; +static int zone_balance_max[MAX_NR_ZONES] = { 255 , 255, 255, }; /* * Free_page() adds the page to the free lists. This is optimized for @@ -196,9 +198,6 @@ static inline struct page * rmqueue (zone_t *zone, unsigned long order) return NULL; } -#define ZONE_BALANCED(zone) \ - (((zone)->free_pages > (zone)->pages_low) && (!(zone)->low_on_memory)) - static inline unsigned long classfree(zone_t *zone) { unsigned long free = 0; @@ -215,21 +214,6 @@ static inline unsigned long classfree(zone_t *zone) static inline int zone_balance_memory (zone_t *zone, int gfp_mask) { int freed; - unsigned long free = classfree(zone); - - if (free >= zone->pages_low) { - if (!zone->low_on_memory) - return 1; - /* - * Simple hysteresis: exit 'low memory mode' if - * the upper limit has been reached: - */ - if (free >= zone->pages_high) { - zone->low_on_memory = 0; - return 1; - } - } else - zone->low_on_memory = 1; /* * In the atomic allocation case we only 'kick' the @@ -243,43 +227,6 @@ static inline int zone_balance_memory (zone_t *zone, int gfp_mask) return 1; } -#if 0 -/* - * We are still balancing memory in a global way: - */ -static inline int balance_memory (zone_t *zone, int gfp_mask) -{ - unsigned long free = nr_free_pages(); - static int low_on_memory = 0; - int freed; - - if (free >= freepages.low) { - if (!low_on_memory) - return 1; - /* - * Simple hysteresis: exit 'low memory mode' if - * the upper limit has been reached: - */ - if (free >= freepages.high) { - low_on_memory = 0; - return 1; - } - } else - low_on_memory = 1; - - /* - * In the atomic allocation case we only 'kick' the - * state machine, but do not try to free pages - * ourselves. - */ - freed = try_to_free_pages(gfp_mask, zone); - - if (!freed && !(gfp_mask & __GFP_HIGH)) - return 0; - return 1; -} -#endif - /* * This is the 'heart' of the zoned buddy allocator: */ @@ -310,11 +257,31 @@ struct page * __alloc_pages (zonelist_t *zonelist, unsigned long order) * further thought. */ if (!(current->flags & PF_MEMALLOC)) - /* - * fastpath - */ - if (!ZONE_BALANCED(z)) - goto balance; + { + if (classfree(z) > z->pages_high) + { + if (z->low_on_memory) + z->low_on_memory = 0; + } + else + { + extern wait_queue_head_t kswapd_wait; + + if (z->low_on_memory) + goto balance; + + if (classfree(z) <= z->pages_low) + { + wake_up_interruptible(&kswapd_wait); + + if (classfree(z) <= z->pages_min) + { + z->low_on_memory = 1; + goto balance; + } + } + } + } /* * This is an optimization for the 'higher order zone * is empty' case - it can happen even in well-behaved @@ -378,7 +345,7 @@ unsigned int nr_free_buffer_pages (void) zone_t *zone; int i; - sum = nr_lru_pages; + sum = nr_lru_pages - atomic_read(&page_cache_size); for (i = 0; i < NUMNODES; i++) for (zone = NODE_DATA(i)->node_zones; zone <= NODE_DATA(i)->node_zones+ZONE_NORMAL; zone++) sum += zone->free_pages; @@ -515,12 +482,12 @@ static inline void build_zonelists(pg_data_t *pgdat) * - clear the memory bitmaps */ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap, - unsigned int *zones_size, unsigned long zone_start_paddr) + unsigned long *zones_size, unsigned long zone_start_paddr) { struct page *p, *lmem_map; unsigned long i, j; unsigned long map_size; - unsigned int totalpages, offset; + unsigned long totalpages, offset; unsigned int cumulative = 0; totalpages = 0; @@ -528,7 +495,7 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap, unsigned long size = zones_size[i]; totalpages += size; } - printk("On node %d totalpages: %08x\n", nid, totalpages); + printk("On node %d totalpages: %lu\n", nid, totalpages); /* * Select nr of pages we try to keep free for important stuff @@ -579,7 +546,7 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap, size = zones_size[j]; - printk("zone(%ld): %ld pages.\n", j, size); + printk("zone(%lu): %lu pages.\n", j, size); zone->size = size; zone->name = zone_names[j]; zone->lock = SPIN_LOCK_UNLOCKED; @@ -590,7 +557,11 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap, zone->offset = offset; cumulative += size; mask = (cumulative / zone_balance_ratio[j]); - if (mask < 1) mask = 1; + if (mask < zone_balance_min[j]) + mask = zone_balance_min[j]; + else if (mask > zone_balance_max[j]) + mask = zone_balance_max[j]; + zone->pages_min = mask; zone->pages_low = mask*2; zone->pages_high = mask*3; zone->low_on_memory = 0; @@ -622,7 +593,7 @@ void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap, build_zonelists(pgdat); } -void __init free_area_init(unsigned int *zones_size) +void __init free_area_init(unsigned long *zones_size) { free_area_init_core(0, NODE_DATA(0), &mem_map, zones_size, 0); } @@ -114,7 +114,7 @@ /* If there is a different PAGE_SIZE around, and it works with this allocator, * then change the following. */ -#if (PAGE_SIZE != 8192 && PAGE_SIZE != 4096 && PAGE_SIZE != 32768) +#if (PAGE_SIZE != 8192 && PAGE_SIZE != 4096 && PAGE_SIZE != 16384 && PAGE_SIZE != 32768) #error Your page size is probably not correctly supported - please check #endif @@ -1880,11 +1880,10 @@ next: } while (--scan && searchp != clock_searchp); clock_searchp = searchp; - up(&cache_chain_sem); if (!best_cachep) { /* couldn't find anything to reap */ - return; + goto out; } spin_lock_irq(&best_cachep->c_spinlock); @@ -1918,6 +1917,8 @@ good_dma: } dma_fail: spin_unlock_irq(&best_cachep->c_spinlock); +out: + up(&cache_chain_sem); return; } @@ -2006,14 +2007,14 @@ get_slabinfo(char *buf) unsigned long allocs = cachep->c_num_allocations; errors = (unsigned long) atomic_read(&cachep->c_errors); spin_unlock_irqrestore(&cachep->c_spinlock, save_flags); - len += sprintf(buf+len, "%-16s %6lu %6lu %4lu %4lu %4lu %6lu %7lu %5lu %4lu %4lu\n", - cachep->c_name, active_objs, num_objs, active_slabs, num_slabs, + len += sprintf(buf+len, "%-16s %6lu %6lu %6lu %4lu %4lu %4lu %6lu %7lu %5lu %4lu %4lu\n", + cachep->c_name, active_objs, num_objs, cachep->c_offset, active_slabs, num_slabs, (1<<cachep->c_gfporder)*num_slabs, high, allocs, grown, reaped, errors); } #else spin_unlock_irqrestore(&cachep->c_spinlock, save_flags); - len += sprintf(buf+len, "%-17s %6lu %6lu\n", cachep->c_name, active_objs, num_objs); + len += sprintf(buf+len, "%-17s %6lu %6lu %6lu\n", cachep->c_name, active_objs, num_objs, cachep->c_offset); #endif /* SLAB_STATS */ } while ((cachep = cachep->c_nextp) != &cache_cache); up(&cache_chain_sem); diff --git a/mm/swapfile.c b/mm/swapfile.c index 53a561201..dc647f2b0 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -445,12 +445,12 @@ asmlinkage long sys_swapoff(const char * specialfile) struct swap_info_struct * p = NULL; struct dentry * dentry; int i, type, prev; - int err = -EPERM; + int err; - lock_kernel(); if (!capable(CAP_SYS_ADMIN)) - goto out; + return -EPERM; + lock_kernel(); dentry = namei(specialfile); err = PTR_ERR(dentry); if (IS_ERR(dentry)) @@ -587,7 +587,7 @@ asmlinkage long sys_swapon(const char * specialfile, int swap_flags) struct dentry * swap_dentry; unsigned int type; int i, j, prev; - int error = -EPERM; + int error; static int least_priority = 0; union swap_header *swap_header = 0; int swap_header_version; @@ -596,13 +596,14 @@ asmlinkage long sys_swapon(const char * specialfile, int swap_flags) int swapfilesize; struct block_device *bdev = NULL; - lock_kernel(); if (!capable(CAP_SYS_ADMIN)) - goto out; + return -EPERM; + lock_kernel(); p = swap_info; for (type = 0 ; type < nr_swapfiles ; type++,p++) if (!(p->flags & SWP_USED)) break; + error = -EPERM; if (type >= MAX_SWAPFILES) goto out; if (type >= nr_swapfiles) diff --git a/mm/vmscan.c b/mm/vmscan.c index 231cbf8f7..e6cb394d9 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -451,7 +451,7 @@ done: return priority >= 0; } -static struct task_struct *kswapd_process; +DECLARE_WAIT_QUEUE_HEAD(kswapd_wait); /* * The background pageout daemon, started as a kernel thread @@ -471,7 +471,6 @@ int kswapd(void *unused) { struct task_struct *tsk = current; - kswapd_process = tsk; tsk->session = 1; tsk->pgrp = 1; strcpy(tsk->comm, "kswapd"); @@ -510,7 +509,7 @@ int kswapd(void *unused) run_task_queue(&tq_disk); } while (!tsk->need_resched); tsk->state = TASK_INTERRUPTIBLE; - schedule_timeout(HZ); + interruptible_sleep_on(&kswapd_wait); } } @@ -533,7 +532,6 @@ int try_to_free_pages(unsigned int gfp_mask, zone_t *zone) { int retval = 1; - wake_up_process(kswapd_process); if (gfp_mask & __GFP_WAIT) { current->flags |= PF_MEMALLOC; retval = do_try_to_free_pages(gfp_mask, zone); |