diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2000-11-28 03:58:46 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2000-11-28 03:58:46 +0000 |
commit | b63ad0882a16a5d28003e57f2b0b81dee3fb322b (patch) | |
tree | 0a343ce219e2b8b38a5d702d66032c57b83d9720 /mm | |
parent | a9d7bff9a84dba79609a0002e5321b74c4d64c64 (diff) |
Merge with 2.4.0-test11.
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 27 | ||||
-rw-r--r-- | mm/highmem.c | 25 | ||||
-rw-r--r-- | mm/memory.c | 31 | ||||
-rw-r--r-- | mm/mlock.c | 14 | ||||
-rw-r--r-- | mm/mmap.c | 80 | ||||
-rw-r--r-- | mm/mprotect.c | 14 | ||||
-rw-r--r-- | mm/mremap.c | 4 | ||||
-rw-r--r-- | mm/oom_kill.c | 18 | ||||
-rw-r--r-- | mm/swap_state.c | 2 | ||||
-rw-r--r-- | mm/vmalloc.c | 22 | ||||
-rw-r--r-- | mm/vmscan.c | 3 |
11 files changed, 161 insertions, 79 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index b19f4c5b3..a191cc2f4 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -387,7 +387,7 @@ static inline void __add_to_page_cache(struct page * page, if (PageLocked(page)) BUG(); - flags = page->flags & ~((1 << PG_uptodate) | (1 << PG_error) | (1 << PG_dirty) | (1 << PG_referenced)); + flags = page->flags & ~((1 << PG_uptodate) | (1 << PG_error) | (1 << PG_dirty) | (1 << PG_referenced) | (1 << PG_arch_1)); page->flags = flags | (1 << PG_locked); page_cache_get(page); page->index = offset; @@ -1095,14 +1095,14 @@ no_cached_page: static int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size) { - unsigned long kaddr; + char *kaddr; unsigned long left, count = desc->count; if (size > count) size = count; kaddr = kmap(page); - left = __copy_to_user(desc->buf, (void *)(kaddr + offset), size); + left = __copy_to_user(desc->buf, kaddr + offset, size); kunmap(page); if (left) { @@ -1146,7 +1146,7 @@ ssize_t generic_file_read(struct file * filp, char * buf, size_t count, loff_t * static int file_send_actor(read_descriptor_t * desc, struct page *page, unsigned long offset , unsigned long size) { - unsigned long kaddr; + char *kaddr; ssize_t written; unsigned long count = desc->count; struct file *file = (struct file *) desc->buf; @@ -1158,8 +1158,7 @@ static int file_send_actor(read_descriptor_t * desc, struct page *page, unsigned set_fs(KERNEL_DS); kaddr = kmap(page); - written = file->f_op->write(file, (char *)kaddr + offset, - size, &file->f_pos); + written = file->f_op->write(file, kaddr + offset, size, &file->f_pos); kunmap(page); set_fs(old_fs); if (written < 0) { @@ -1209,8 +1208,6 @@ asmlinkage ssize_t sys_sendfile(int out_fd, int in_fd, off_t *offset, size_t cou if (!out_file->f_op || !out_file->f_op->write) goto fput_out; out_inode = out_file->f_dentry->d_inode; - if (!out_inode) - goto fput_out; retval = locks_verify_area(FLOCK_VERIFY_WRITE, out_inode, out_file, out_file->f_pos, count); if (retval) goto fput_out; @@ -1814,11 +1811,13 @@ static long madvise_fixup_start(struct vm_area_struct * vma, get_file(n->vm_file); if (n->vm_ops && n->vm_ops->open) n->vm_ops->open(n); + lock_vma_mappings(vma); spin_lock(&vma->vm_mm->page_table_lock); vma->vm_pgoff += (end - vma->vm_start) >> PAGE_SHIFT; vma->vm_start = end; - insert_vm_struct(current->mm, n); + __insert_vm_struct(current->mm, n); spin_unlock(&vma->vm_mm->page_table_lock); + unlock_vma_mappings(vma); return 0; } @@ -1838,10 +1837,12 @@ static long madvise_fixup_end(struct vm_area_struct * vma, get_file(n->vm_file); if (n->vm_ops && n->vm_ops->open) n->vm_ops->open(n); + lock_vma_mappings(vma); spin_lock(&vma->vm_mm->page_table_lock); vma->vm_end = start; - insert_vm_struct(current->mm, n); + __insert_vm_struct(current->mm, n); spin_unlock(&vma->vm_mm->page_table_lock); + unlock_vma_mappings(vma); return 0; } @@ -1871,15 +1872,17 @@ static long madvise_fixup_middle(struct vm_area_struct * vma, vma->vm_ops->open(left); vma->vm_ops->open(right); } + lock_vma_mappings(vma); spin_lock(&vma->vm_mm->page_table_lock); vma->vm_pgoff += (start - vma->vm_start) >> PAGE_SHIFT; vma->vm_start = start; vma->vm_end = end; setup_read_behavior(vma, behavior); vma->vm_raend = 0; - insert_vm_struct(current->mm, left); - insert_vm_struct(current->mm, right); + __insert_vm_struct(current->mm, left); + __insert_vm_struct(current->mm, right); spin_unlock(&vma->vm_mm->page_table_lock); + unlock_vma_mappings(vma); return 0; } diff --git a/mm/highmem.c b/mm/highmem.c index d83d9bb87..5e8ebde4b 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -29,7 +29,7 @@ struct page * prepare_highmem_swapout(struct page * page) { struct page *new_page; unsigned long regular_page; - unsigned long vaddr; + /* * If this is a highmem page so it can't be swapped out directly * otherwise the b_data buffer addresses will break @@ -50,8 +50,7 @@ struct page * prepare_highmem_swapout(struct page * page) if (!regular_page) return NULL; - vaddr = kmap(page); - copy_page((void *)regular_page, (void *)vaddr); + copy_page((void *)regular_page, kmap(page)); kunmap(page); /* @@ -67,7 +66,6 @@ struct page * prepare_highmem_swapout(struct page * page) struct page * replace_with_highmem(struct page * page) { struct page *highpage; - unsigned long vaddr; if (PageHighMem(page) || !nr_free_highpages()) return page; @@ -80,8 +78,7 @@ struct page * replace_with_highmem(struct page * page) return page; } - vaddr = kmap(highpage); - copy_page((void *)vaddr, page_address(page)); + copy_page(kmap(highpage), page_address(page)); kunmap(highpage); if (page->mapping) @@ -188,7 +185,7 @@ start: return vaddr; } -unsigned long kmap_high(struct page *page) +void *kmap_high(struct page *page) { unsigned long vaddr; @@ -206,7 +203,7 @@ unsigned long kmap_high(struct page *page) if (pkmap_count[PKMAP_NR(vaddr)] < 2) BUG(); spin_unlock(&kmap_lock); - return vaddr; + return (void*) vaddr; } void kunmap_high(struct page *page) @@ -242,7 +239,7 @@ static inline void copy_from_high_bh (struct buffer_head *to, struct buffer_head *from) { struct page *p_from; - unsigned long vfrom; + char *vfrom; unsigned long flags; p_from = from->b_page; @@ -254,7 +251,7 @@ static inline void copy_from_high_bh (struct buffer_head *to, __save_flags(flags); __cli(); vfrom = kmap_atomic(p_from, KM_BOUNCE_WRITE); - memcpy(to->b_data, (char *)vfrom + bh_offset(from), to->b_size); + memcpy(to->b_data, vfrom + bh_offset(from), to->b_size); kunmap_atomic(vfrom, KM_BOUNCE_WRITE); __restore_flags(flags); } @@ -263,14 +260,14 @@ static inline void copy_to_high_bh_irq (struct buffer_head *to, struct buffer_head *from) { struct page *p_to; - unsigned long vto; + char *vto; unsigned long flags; p_to = to->b_page; __save_flags(flags); __cli(); vto = kmap_atomic(p_to, KM_BOUNCE_READ); - memcpy((char *)vto + bh_offset(to), from->b_data, to->b_size); + memcpy(vto + bh_offset(to), from->b_data, to->b_size); kunmap_atomic(vto, KM_BOUNCE_READ); __restore_flags(flags); } @@ -310,8 +307,6 @@ repeat_bh: bh = kmem_cache_alloc(bh_cachep, SLAB_BUFFER); if (!bh) { wakeup_bdflush(1); /* Sets task->state to TASK_RUNNING */ - current->policy |= SCHED_YIELD; - schedule(); goto repeat_bh; } /* @@ -324,8 +319,6 @@ repeat_page: page = alloc_page(GFP_BUFFER); if (!page) { wakeup_bdflush(1); /* Sets task->state to TASK_RUNNING */ - current->policy |= SCHED_YIELD; - schedule(); goto repeat_page; } set_bh_page(bh, page, 0); diff --git a/mm/memory.c b/mm/memory.c index 11048ddce..f4bd5163e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1201,29 +1201,32 @@ static inline int handle_pte_fault(struct mm_struct *mm, { pte_t entry; + /* + * We need the page table lock to synchronize with kswapd + * and the SMP-safe atomic PTE updates. + */ + spin_lock(&mm->page_table_lock); entry = *pte; if (!pte_present(entry)) { + /* + * If it truly wasn't present, we know that kswapd + * and the PTE updates will not touch it later. So + * drop the lock. + */ + spin_unlock(&mm->page_table_lock); if (pte_none(entry)) return do_no_page(mm, vma, address, write_access, pte); return do_swap_page(mm, vma, address, pte, pte_to_swp_entry(entry), write_access); } - /* - * Ok, the entry was present, we need to get the page table - * lock to synchronize with kswapd, and verify that the entry - * didn't change from under us.. - */ - spin_lock(&mm->page_table_lock); - if (pte_same(entry, *pte)) { - if (write_access) { - if (!pte_write(entry)) - return do_wp_page(mm, vma, address, pte, entry); + if (write_access) { + if (!pte_write(entry)) + return do_wp_page(mm, vma, address, pte, entry); - entry = pte_mkdirty(entry); - } - entry = pte_mkyoung(entry); - establish_pte(vma, address, pte, entry); + entry = pte_mkdirty(entry); } + entry = pte_mkyoung(entry); + establish_pte(vma, address, pte, entry); spin_unlock(&mm->page_table_lock); return 1; } diff --git a/mm/mlock.c b/mm/mlock.c index f684a3c60..551d61d39 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -36,11 +36,13 @@ static inline int mlock_fixup_start(struct vm_area_struct * vma, get_file(n->vm_file); if (n->vm_ops && n->vm_ops->open) n->vm_ops->open(n); + lock_vma_mappings(vma); spin_lock(&vma->vm_mm->page_table_lock); vma->vm_pgoff += (end - vma->vm_start) >> PAGE_SHIFT; vma->vm_start = end; - insert_vm_struct(current->mm, n); + __insert_vm_struct(current->mm, n); spin_unlock(&vma->vm_mm->page_table_lock); + unlock_vma_mappings(vma); return 0; } @@ -61,10 +63,12 @@ static inline int mlock_fixup_end(struct vm_area_struct * vma, get_file(n->vm_file); if (n->vm_ops && n->vm_ops->open) n->vm_ops->open(n); + lock_vma_mappings(vma); spin_lock(&vma->vm_mm->page_table_lock); vma->vm_end = start; - insert_vm_struct(current->mm, n); + __insert_vm_struct(current->mm, n); spin_unlock(&vma->vm_mm->page_table_lock); + unlock_vma_mappings(vma); return 0; } @@ -96,15 +100,17 @@ static inline int mlock_fixup_middle(struct vm_area_struct * vma, vma->vm_ops->open(left); vma->vm_ops->open(right); } + lock_vma_mappings(vma); spin_lock(&vma->vm_mm->page_table_lock); vma->vm_pgoff += (start - vma->vm_start) >> PAGE_SHIFT; vma->vm_start = start; vma->vm_end = end; vma->vm_flags = newflags; vma->vm_raend = 0; - insert_vm_struct(current->mm, left); - insert_vm_struct(current->mm, right); + __insert_vm_struct(current->mm, left); + __insert_vm_struct(current->mm, right); spin_unlock(&vma->vm_mm->page_table_lock); + unlock_vma_mappings(vma); return 0; } @@ -67,7 +67,7 @@ int vm_enough_memory(long pages) } /* Remove one vm structure from the inode's i_mapping address space. */ -static inline void remove_shared_vm_struct(struct vm_area_struct *vma) +static inline void __remove_shared_vm_struct(struct vm_area_struct *vma) { struct file * file = vma->vm_file; @@ -75,14 +75,41 @@ static inline void remove_shared_vm_struct(struct vm_area_struct *vma) struct inode *inode = file->f_dentry->d_inode; if (vma->vm_flags & VM_DENYWRITE) atomic_inc(&inode->i_writecount); - spin_lock(&inode->i_mapping->i_shared_lock); if(vma->vm_next_share) vma->vm_next_share->vm_pprev_share = vma->vm_pprev_share; *vma->vm_pprev_share = vma->vm_next_share; - spin_unlock(&inode->i_mapping->i_shared_lock); } } +static inline void remove_shared_vm_struct(struct vm_area_struct *vma) +{ + lock_vma_mappings(vma); + __remove_shared_vm_struct(vma); + unlock_vma_mappings(vma); +} + +void lock_vma_mappings(struct vm_area_struct *vma) +{ + struct address_space *mapping; + + mapping = NULL; + if (vma->vm_file) + mapping = vma->vm_file->f_dentry->d_inode->i_mapping; + if (mapping) + spin_lock(&mapping->i_shared_lock); +} + +void unlock_vma_mappings(struct vm_area_struct *vma) +{ + struct address_space *mapping; + + mapping = NULL; + if (vma->vm_file) + mapping = vma->vm_file->f_dentry->d_inode->i_mapping; + if (mapping) + spin_unlock(&mapping->i_shared_lock); +} + /* * sys_brk() for the most part doesn't need the global kernel * lock, except when an application is doing something nasty @@ -316,13 +343,22 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, unsigned lon * after the call. Save the values we need now ... */ flags = vma->vm_flags; - addr = vma->vm_start; /* can addr have changed?? */ + + /* Can addr have changed?? + * + * Answer: Yes, several device drivers can do it in their + * f_op->mmap method. -DaveM + */ + addr = vma->vm_start; + + lock_vma_mappings(vma); spin_lock(&mm->page_table_lock); - insert_vm_struct(mm, vma); + __insert_vm_struct(mm, vma); if (correct_wcount) atomic_inc(&file->f_dentry->d_inode->i_writecount); merge_segments(mm, vma->vm_start, vma->vm_end); spin_unlock(&mm->page_table_lock); + unlock_vma_mappings(vma); mm->total_vm += len >> PAGE_SHIFT; if (flags & VM_LOCKED) { @@ -534,10 +570,12 @@ static struct vm_area_struct * unmap_fixup(struct mm_struct *mm, /* Work out to one of the ends. */ if (end == area->vm_end) { area->vm_end = addr; + lock_vma_mappings(area); spin_lock(&mm->page_table_lock); } else if (addr == area->vm_start) { area->vm_pgoff += (end - area->vm_start) >> PAGE_SHIFT; area->vm_start = end; + lock_vma_mappings(area); spin_lock(&mm->page_table_lock); } else { /* Unmapping a hole: area->vm_start < addr <= end < area->vm_end */ @@ -560,12 +598,18 @@ static struct vm_area_struct * unmap_fixup(struct mm_struct *mm, if (mpnt->vm_ops && mpnt->vm_ops->open) mpnt->vm_ops->open(mpnt); area->vm_end = addr; /* Truncate area */ + + /* Because mpnt->vm_file == area->vm_file this locks + * things correctly. + */ + lock_vma_mappings(area); spin_lock(&mm->page_table_lock); - insert_vm_struct(mm, mpnt); + __insert_vm_struct(mm, mpnt); } - insert_vm_struct(mm, area); + __insert_vm_struct(mm, area); spin_unlock(&mm->page_table_lock); + unlock_vma_mappings(area); return extra; } @@ -811,10 +855,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len) flags = vma->vm_flags; addr = vma->vm_start; + lock_vma_mappings(vma); spin_lock(&mm->page_table_lock); - insert_vm_struct(mm, vma); + __insert_vm_struct(mm, vma); merge_segments(mm, vma->vm_start, vma->vm_end); spin_unlock(&mm->page_table_lock); + unlock_vma_mappings(vma); mm->total_vm += len >> PAGE_SHIFT; if (flags & VM_LOCKED) { @@ -877,9 +923,10 @@ void exit_mmap(struct mm_struct * mm) } /* Insert vm structure into process list sorted by address - * and into the inode's i_mmap ring. + * and into the inode's i_mmap ring. If vm_file is non-NULL + * then the i_shared_lock must be held here. */ -void insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vmp) +void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vmp) { struct vm_area_struct **pprev; struct file * file; @@ -916,15 +963,20 @@ void insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vmp) head = &mapping->i_mmap_shared; /* insert vmp into inode's share list */ - spin_lock(&mapping->i_shared_lock); if((vmp->vm_next_share = *head) != NULL) (*head)->vm_pprev_share = &vmp->vm_next_share; *head = vmp; vmp->vm_pprev_share = head; - spin_unlock(&mapping->i_shared_lock); } } +void insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vmp) +{ + lock_vma_mappings(vmp); + __insert_vm_struct(mm, vmp); + unlock_vma_mappings(vmp); +} + /* Merge the list of memory segments if possible. * Redundant vm_area_structs are freed. * This assumes that the list is ordered by address. @@ -986,11 +1038,13 @@ void merge_segments (struct mm_struct * mm, unsigned long start_addr, unsigned l mpnt->vm_pgoff += (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT; mpnt->vm_start = mpnt->vm_end; spin_unlock(&mm->page_table_lock); + unlock_vma_mappings(mpnt); mpnt->vm_ops->close(mpnt); + lock_vma_mappings(mpnt); spin_lock(&mm->page_table_lock); } mm->map_count--; - remove_shared_vm_struct(mpnt); + __remove_shared_vm_struct(mpnt); if (mpnt->vm_file) fput(mpnt->vm_file); kmem_cache_free(vm_area_cachep, mpnt); diff --git a/mm/mprotect.c b/mm/mprotect.c index 7b61abb3e..64c178b31 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -118,11 +118,13 @@ static inline int mprotect_fixup_start(struct vm_area_struct * vma, get_file(n->vm_file); if (n->vm_ops && n->vm_ops->open) n->vm_ops->open(n); + lock_vma_mappings(vma); spin_lock(&vma->vm_mm->page_table_lock); vma->vm_pgoff += (end - vma->vm_start) >> PAGE_SHIFT; vma->vm_start = end; - insert_vm_struct(current->mm, n); + __insert_vm_struct(current->mm, n); spin_unlock(&vma->vm_mm->page_table_lock); + unlock_vma_mappings(vma); return 0; } @@ -145,10 +147,12 @@ static inline int mprotect_fixup_end(struct vm_area_struct * vma, get_file(n->vm_file); if (n->vm_ops && n->vm_ops->open) n->vm_ops->open(n); + lock_vma_mappings(vma); spin_lock(&vma->vm_mm->page_table_lock); vma->vm_end = start; - insert_vm_struct(current->mm, n); + __insert_vm_struct(current->mm, n); spin_unlock(&vma->vm_mm->page_table_lock); + unlock_vma_mappings(vma); return 0; } @@ -179,6 +183,7 @@ static inline int mprotect_fixup_middle(struct vm_area_struct * vma, vma->vm_ops->open(left); vma->vm_ops->open(right); } + lock_vma_mappings(vma); spin_lock(&vma->vm_mm->page_table_lock); vma->vm_pgoff += (start - vma->vm_start) >> PAGE_SHIFT; vma->vm_start = start; @@ -186,9 +191,10 @@ static inline int mprotect_fixup_middle(struct vm_area_struct * vma, vma->vm_flags = newflags; vma->vm_raend = 0; vma->vm_page_prot = prot; - insert_vm_struct(current->mm, left); - insert_vm_struct(current->mm, right); + __insert_vm_struct(current->mm, left); + __insert_vm_struct(current->mm, right); spin_unlock(&vma->vm_mm->page_table_lock); + unlock_vma_mappings(vma); return 0; } diff --git a/mm/mremap.c b/mm/mremap.c index 719ca1ec1..764cfabb8 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -141,10 +141,12 @@ static inline unsigned long move_vma(struct vm_area_struct * vma, get_file(new_vma->vm_file); if (new_vma->vm_ops && new_vma->vm_ops->open) new_vma->vm_ops->open(new_vma); + lock_vma_mappings(vma); spin_lock(¤t->mm->page_table_lock); - insert_vm_struct(current->mm, new_vma); + __insert_vm_struct(current->mm, new_vma); merge_segments(current->mm, new_vma->vm_start, new_vma->vm_end); spin_unlock(¤t->mm->page_table_lock); + unlock_vma_mappings(vma); do_munmap(current->mm, addr, old_len); current->mm->total_vm += new_len >> PAGE_SHIFT; if (new_vma->vm_flags & VM_LOCKED) { diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 9882fe7cd..45e865d25 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -117,18 +117,18 @@ static int badness(struct task_struct *p) */ static struct task_struct * select_bad_process(void) { - int points = 0, maxpoints = 0; + int maxpoints = 0; struct task_struct *p = NULL; struct task_struct *chosen = NULL; read_lock(&tasklist_lock); - for_each_task(p) - { - if (p->pid) - points = badness(p); - if (points > maxpoints) { - chosen = p; - maxpoints = points; + for_each_task(p) { + if (p->pid) { + int points = badness(p); + if (points > maxpoints) { + chosen = p; + maxpoints = points; + } } } read_unlock(&tasklist_lock); @@ -156,7 +156,7 @@ void oom_kill(void) if (p == NULL) panic("Out of memory and no killable processes...\n"); - printk(KERN_ERR "Out of Memory: Killed process %d (%s).", p->pid, p->comm); + printk(KERN_ERR "Out of Memory: Killed process %d (%s).\n", p->pid, p->comm); /* * We give our sacrificial lamb high priority and access to diff --git a/mm/swap_state.c b/mm/swap_state.c index d26c66f54..3a91d955e 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -58,7 +58,7 @@ void add_to_swap_cache(struct page *page, swp_entry_t entry) BUG(); if (page->mapping) BUG(); - flags = page->flags & ~((1 << PG_error) | (1 << PG_dirty) | (1 << PG_referenced)); + flags = page->flags & ~((1 << PG_error) | (1 << PG_dirty) | (1 << PG_referenced) | (1 << PG_arch_1)); page->flags = flags | (1 << PG_uptodate); add_to_page_cache_locked(page, &swapper_space, entry.val); } diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 15261612e..339cffb5b 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -9,6 +9,7 @@ #include <linux/malloc.h> #include <linux/vmalloc.h> #include <linux/spinlock.h> +#include <linux/smp_lock.h> #include <asm/uaccess.h> #include <asm/pgalloc.h> @@ -137,24 +138,31 @@ inline int vmalloc_area_pages (unsigned long address, unsigned long size, { pgd_t * dir; unsigned long end = address + size; + int ret; dir = pgd_offset_k(address); flush_cache_all(); + lock_kernel(); do { pmd_t *pmd; pmd = pmd_alloc_kernel(dir, address); + ret = -ENOMEM; if (!pmd) - return -ENOMEM; + break; + ret = -ENOMEM; if (alloc_area_pmd(pmd, address, end - address, gfp_mask, prot)) - return -ENOMEM; + break; address = (address + PGDIR_SIZE) & PGDIR_MASK; dir++; + + ret = 0; } while (address && (address < end)); + unlock_kernel(); flush_tlb_all(); - return 0; + return ret; } struct vm_struct * get_vm_area(unsigned long size, unsigned long flags) @@ -165,9 +173,15 @@ struct vm_struct * get_vm_area(unsigned long size, unsigned long flags) area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL); if (!area) return NULL; + size += PAGE_SIZE; addr = VMALLOC_START; write_lock(&vmlist_lock); for (p = &vmlist; (tmp = *p) ; p = &tmp->next) { + if ((size + addr) < addr) { + write_unlock(&vmlist_lock); + kfree(area); + return NULL; + } if (size + addr < (unsigned long) tmp->addr) break; addr = tmp->size + (unsigned long) tmp->addr; @@ -179,7 +193,7 @@ struct vm_struct * get_vm_area(unsigned long size, unsigned long flags) } area->flags = flags; area->addr = (void *)addr; - area->size = size + PAGE_SIZE; + area->size = size; area->next = *p; *p = area; write_unlock(&vmlist_lock); diff --git a/mm/vmscan.c b/mm/vmscan.c index d7fd0aca8..dd92afecc 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -185,6 +185,8 @@ drop_pte: if (!entry.val) goto out_unlock_restore; /* No swap space left */ + /* Make sure to flush the TLB _before_ we start copying things.. */ + flush_tlb_page(vma, address); if (!(page = prepare_highmem_swapout(page))) goto out_swap_free; @@ -196,7 +198,6 @@ drop_pte: /* Put the swap entry into the pte after the page is in swapcache */ mm->rss--; set_pte(page_table, swp_entry_to_pte(entry)); - flush_tlb_page(vma, address); spin_unlock(&mm->page_table_lock); /* OK, do a physical asynchronous write to swap. */ |