summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2001-01-10 05:27:25 +0000
committerRalf Baechle <ralf@linux-mips.org>2001-01-10 05:27:25 +0000
commitc9c06167e7933d93a6e396174c68abf242294abb (patch)
treed9a8bb30663e9a3405a1ef37ffb62bc14b9f019f /mm
parentf79e8cc3c34e4192a3e5ef4cc9c6542fdef703c0 (diff)
Merge with Linux 2.4.0-test12.
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c86
-rw-r--r--mm/highmem.c71
-rw-r--r--mm/memory.c62
-rw-r--r--mm/mmap.c17
-rw-r--r--mm/mprotect.c1
-rw-r--r--mm/mremap.c2
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/swap.c30
-rw-r--r--mm/swap_state.c10
-rw-r--r--mm/vmalloc.c2
-rw-r--r--mm/vmscan.c82
11 files changed, 104 insertions, 263 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index a191cc2f4..ec8ff8ac7 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1469,15 +1469,15 @@ page_not_uptodate:
* The "mapping" test takes care of somebody having truncated the
* page and thus made this write-page a no-op..
*/
-static int filemap_write_page(struct file *file,
- struct page * page,
- int wait)
+static int filemap_write_page(struct page * page, int wait)
{
struct address_space * mapping = page->mapping;
int error = 0;
- if (mapping)
- error = mapping->a_ops->writepage(file, page);
+ if (mapping && mapping->a_ops->writepage) {
+ ClearPageDirty(page);
+ error = mapping->a_ops->writepage(page);
+ }
return error;
}
@@ -1488,11 +1488,10 @@ static int filemap_write_page(struct file *file,
* at the same time..
*/
extern void wakeup_bdflush(int);
-int filemap_swapout(struct page * page, struct file * file)
+int filemap_swapout(struct page * page, struct file *file)
{
- int retval = filemap_write_page(file, page, 0);
- wakeup_bdflush(0);
- return retval;
+ SetPageDirty(page);
+ return 0;
}
/* Called with mm->page_table_lock held to protect against other
@@ -1501,56 +1500,26 @@ int filemap_swapout(struct page * page, struct file * file)
static inline int filemap_sync_pte(pte_t * ptep, struct vm_area_struct *vma,
unsigned long address, unsigned int flags)
{
- unsigned long pgoff;
pte_t pte;
struct page *page;
int error;
pte = *ptep;
- if (!(flags & MS_INVALIDATE)) {
- if (!pte_present(pte))
- goto out;
- if (!ptep_test_and_clear_dirty(ptep))
- goto out;
- flush_page_to_ram(pte_page(pte));
- flush_cache_page(vma, address);
- flush_tlb_page(vma, address);
- page = pte_page(pte);
- page_cache_get(page);
- } else {
- if (pte_none(pte))
- goto out;
- flush_cache_page(vma, address);
-
- pte = ptep_get_and_clear(ptep);
- flush_tlb_page(vma, address);
-
- if (!pte_present(pte)) {
- spin_unlock(&vma->vm_mm->page_table_lock);
- swap_free(pte_to_swp_entry(pte));
- spin_lock(&vma->vm_mm->page_table_lock);
- goto out;
- }
- page = pte_page(pte);
- if (!pte_dirty(pte) || flags == MS_INVALIDATE) {
- page_cache_free(page);
- goto out;
- }
- }
- pgoff = (address - vma->vm_start) >> PAGE_CACHE_SHIFT;
- pgoff += vma->vm_pgoff;
- if (page->index != pgoff) {
- printk("weirdness: pgoff=%lu index=%lu address=%lu vm_start=%lu vm_pgoff=%lu\n",
- pgoff, page->index, address, vma->vm_start, vma->vm_pgoff);
- }
+ if (!pte_present(pte))
+ goto out;
+ if (!ptep_test_and_clear_dirty(ptep))
+ goto out;
+ flush_page_to_ram(pte_page(pte));
+ flush_cache_page(vma, address);
+ flush_tlb_page(vma, address);
+ page = pte_page(pte);
+ page_cache_get(page);
spin_unlock(&vma->vm_mm->page_table_lock);
- lock_page(page);
-
- error = filemap_write_page(vma->vm_file, page, 1);
- UnlockPage(page);
+ lock_page(page);
+ error = filemap_write_page(page, 1);
page_cache_free(page);
spin_lock(&vma->vm_mm->page_table_lock);
@@ -1649,20 +1618,11 @@ int filemap_sync(struct vm_area_struct * vma, unsigned long address,
}
/*
- * This handles (potentially partial) area unmaps..
- */
-static void filemap_unmap(struct vm_area_struct *vma, unsigned long start, size_t len)
-{
- filemap_sync(vma, start, len, MS_ASYNC);
-}
-
-/*
* Shared mappings need to be able to do the right thing at
* close/unmap/sync. They will also use the private file as
* backing-store for swapping..
*/
static struct vm_operations_struct file_shared_mmap = {
- unmap: filemap_unmap, /* unmap - we need to sync the pages */
sync: filemap_sync,
nopage: filemap_nopage,
swapout: filemap_swapout,
@@ -2462,7 +2422,7 @@ generic_file_write(struct file *file,const char *buf,size_t count,loff_t *ppos)
if (count) {
remove_suid(inode);
inode->i_ctime = inode->i_mtime = CURRENT_TIME;
- mark_inode_dirty(inode);
+ mark_inode_dirty_sync(inode);
}
while (count) {
@@ -2521,8 +2481,14 @@ unlock:
if (cached_page)
page_cache_free(cached_page);
+ /* For now, when the user asks for O_SYNC, we'll actually
+ * provide O_DSYNC. */
+ if ((status >= 0) && (file->f_flags & O_SYNC))
+ status = generic_osync_inode(inode, 1); /* 1 means datasync */
+
err = written ? written : status;
out:
+
up(&inode->i_sem);
return err;
fail_write:
diff --git a/mm/highmem.c b/mm/highmem.c
index 5e8ebde4b..7935d1280 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -23,77 +23,6 @@
#include <linux/slab.h>
/*
- * Take one locked page, return another low-memory locked page.
- */
-struct page * prepare_highmem_swapout(struct page * page)
-{
- struct page *new_page;
- unsigned long regular_page;
-
- /*
- * If this is a highmem page so it can't be swapped out directly
- * otherwise the b_data buffer addresses will break
- * the lowlevel device drivers.
- */
- if (!PageHighMem(page))
- return page;
-
- /*
- * Here we break the page lock, and we split the
- * dirty page into two. We can unlock the old page,
- * and we'll now have two of them. Too bad, it would
- * have been nice to continue to potentially share
- * across a fork().
- */
- UnlockPage(page);
- regular_page = __get_free_page(GFP_ATOMIC);
- if (!regular_page)
- return NULL;
-
- copy_page((void *)regular_page, kmap(page));
- kunmap(page);
-
- /*
- * ok, we can just forget about our highmem page since
- * we stored its data into the new regular_page.
- */
- page_cache_release(page);
- new_page = virt_to_page(regular_page);
- LockPage(new_page);
- return new_page;
-}
-
-struct page * replace_with_highmem(struct page * page)
-{
- struct page *highpage;
-
- if (PageHighMem(page) || !nr_free_highpages())
- return page;
-
- highpage = alloc_page(GFP_ATOMIC|__GFP_HIGHMEM);
- if (!highpage)
- return page;
- if (!PageHighMem(highpage)) {
- page_cache_release(highpage);
- return page;
- }
-
- copy_page(kmap(highpage), page_address(page));
- kunmap(highpage);
-
- if (page->mapping)
- BUG();
-
- /*
- * We can just forget the old page since
- * we stored its data into the new highmem-page.
- */
- page_cache_release(page);
-
- return highpage;
-}
-
-/*
* Virtual_count is not a pure "count".
* 0 means that it is not mapped, and has not been mapped
* since a TLB flush - it is usable.
diff --git a/mm/memory.c b/mm/memory.c
index d6b8f6371..13dad21a0 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -227,7 +227,7 @@ skip_copy_pte_range: address = (address + PMD_SIZE) & PMD_MASK;
/* If it's a COW mapping, write protect it both in the parent and the child */
if (cow) {
- ptep_clear_wrprotect(src_pte);
+ ptep_set_wrprotect(src_pte);
pte = *src_pte;
}
@@ -269,6 +269,8 @@ static inline int free_pte(pte_t page)
* free_page() used to be able to clear swap cache
* entries. We may now have to do it manually.
*/
+ if (pte_dirty(page))
+ SetPageDirty(ptpage);
free_page_and_swap_cache(ptpage);
return 1;
}
@@ -829,8 +831,9 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
* - we're the only user (count == 1)
* - the only other user is the swap cache,
* and the only swap cache user is itself,
- * in which case we can remove the page
- * from the swap cache.
+ * in which case we can just continue to
+ * use the same swap cache (it will be
+ * marked dirty).
*/
switch (page_count(old_page)) {
case 2:
@@ -845,7 +848,6 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
UnlockPage(old_page);
break;
}
- delete_from_swap_cache_nolock(old_page);
UnlockPage(old_page);
/* FallThrough */
case 1:
@@ -885,45 +887,6 @@ bad_wp_page:
return -1;
}
-/*
- * This function zeroes out partial mmap'ed pages at truncation time..
- */
-static void partial_clear(struct vm_area_struct *vma, unsigned long address)
-{
- unsigned int offset;
- struct page *page;
- pgd_t *page_dir;
- pmd_t *page_middle;
- pte_t *page_table, pte;
-
- page_dir = pgd_offset(vma->vm_mm, address);
- if (pgd_none(*page_dir))
- return;
- if (pgd_bad(*page_dir)) {
- pgd_ERROR(*page_dir);
- pgd_clear(page_dir);
- return;
- }
- page_middle = pmd_offset(page_dir, address);
- if (pmd_none(*page_middle))
- return;
- if (pmd_bad(*page_middle)) {
- pmd_ERROR(*page_middle);
- pmd_clear(page_middle);
- return;
- }
- page_table = pte_offset(page_middle, address);
- pte = *page_table;
- if (!pte_present(pte))
- return;
- flush_cache_page(vma, address);
- page = pte_page(pte);
- if ((!VALID_PAGE(page)) || PageReserved(page))
- return;
- offset = address & ~PAGE_MASK;
- memclear_highpage_flush(page, offset, PAGE_SIZE - offset);
-}
-
static void vmtruncate_list(struct vm_area_struct *mpnt,
unsigned long pgoff, unsigned long partial)
{
@@ -951,10 +914,6 @@ static void vmtruncate_list(struct vm_area_struct *mpnt,
/* Ok, partially affected.. */
start += diff << PAGE_SHIFT;
len = (len - diff) << PAGE_SHIFT;
- if (start & ~PAGE_MASK) {
- partial_clear(mpnt, start);
- start = (start + ~PAGE_MASK) & PAGE_MASK;
- }
flush_cache_range(mm, start, end);
zap_page_range(mm, start, len);
flush_tlb_range(mm, start, end);
@@ -1085,14 +1044,9 @@ static int do_swap_page(struct mm_struct * mm,
*/
lock_page(page);
swap_free(entry);
- if (write_access && !is_page_shared(page)) {
- delete_from_swap_cache_nolock(page);
- UnlockPage(page);
- page = replace_with_highmem(page);
- pte = mk_pte(page, vma->vm_page_prot);
+ if (write_access && !is_page_shared(page))
pte = pte_mkwrite(pte_mkdirty(pte));
- } else
- UnlockPage(page);
+ UnlockPage(page);
set_pte(page_table, pte);
/* No need to invalidate - it was non-present before */
diff --git a/mm/mmap.c b/mm/mmap.c
index da649f2a2..648cc5208 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -354,11 +354,11 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, unsigned lon
lock_vma_mappings(vma);
spin_lock(&mm->page_table_lock);
__insert_vm_struct(mm, vma);
+ unlock_vma_mappings(vma);
if (correct_wcount)
atomic_inc(&file->f_dentry->d_inode->i_writecount);
merge_segments(mm, vma->vm_start, vma->vm_end);
spin_unlock(&mm->page_table_lock);
- unlock_vma_mappings(vma);
mm->total_vm += len >> PAGE_SHIFT;
if (flags & VM_LOCKED) {
@@ -858,9 +858,9 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
lock_vma_mappings(vma);
spin_lock(&mm->page_table_lock);
__insert_vm_struct(mm, vma);
+ unlock_vma_mappings(vma);
merge_segments(mm, vma->vm_start, vma->vm_end);
spin_unlock(&mm->page_table_lock);
- unlock_vma_mappings(vma);
mm->total_vm += len >> PAGE_SHIFT;
if (flags & VM_LOCKED) {
@@ -1034,20 +1034,23 @@ void merge_segments (struct mm_struct * mm, unsigned long start_addr, unsigned l
avl_remove(mpnt, &mm->mmap_avl);
prev->vm_end = mpnt->vm_end;
prev->vm_next = mpnt->vm_next;
+ mm->map_count--;
if (mpnt->vm_ops && mpnt->vm_ops->close) {
mpnt->vm_pgoff += (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
mpnt->vm_start = mpnt->vm_end;
spin_unlock(&mm->page_table_lock);
- unlock_vma_mappings(mpnt);
mpnt->vm_ops->close(mpnt);
- lock_vma_mappings(mpnt);
- spin_lock(&mm->page_table_lock);
- }
- mm->map_count--;
+ } else
+ spin_unlock(&mm->page_table_lock);
+
+ lock_vma_mappings(mpnt);
__remove_shared_vm_struct(mpnt);
+ unlock_vma_mappings(mpnt);
if (mpnt->vm_file)
fput(mpnt->vm_file);
kmem_cache_free(vm_area_cachep, mpnt);
mpnt = prev;
+
+ spin_lock(&mm->page_table_lock);
}
}
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 64c178b31..e47987f1e 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -10,6 +10,7 @@
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
static inline void change_pte_range(pmd_t * pmd, unsigned long address,
unsigned long size, pgprot_t newprot)
diff --git a/mm/mremap.c b/mm/mremap.c
index 764cfabb8..bdbcf4841 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -144,9 +144,9 @@ static inline unsigned long move_vma(struct vm_area_struct * vma,
lock_vma_mappings(vma);
spin_lock(&current->mm->page_table_lock);
__insert_vm_struct(current->mm, new_vma);
+ unlock_vma_mappings(vma);
merge_segments(current->mm, new_vma->vm_start, new_vma->vm_end);
spin_unlock(&current->mm->page_table_lock);
- unlock_vma_mappings(vma);
do_munmap(current->mm, addr, old_len);
current->mm->total_vm += new_len >> PAGE_SHIFT;
if (new_vma->vm_flags & VM_LOCKED) {
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 90c077439..dca35de59 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -90,8 +90,6 @@ static void __free_pages_ok (struct page *page, unsigned long order)
BUG();
if (PageDecrAfter(page))
BUG();
- if (PageDirty(page))
- BUG();
if (PageActive(page))
BUG();
if (PageInactiveDirty(page))
@@ -99,7 +97,7 @@ static void __free_pages_ok (struct page *page, unsigned long order)
if (PageInactiveClean(page))
BUG();
- page->flags &= ~(1<<PG_referenced);
+ page->flags &= ~((1<<PG_referenced) | (1<<PG_dirty));
page->age = PAGE_AGE_START;
zone = page->zone;
diff --git a/mm/swap.c b/mm/swap.c
index b4b9f76be..693773ccd 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -180,33 +180,9 @@ void deactivate_page_nolock(struct page * page)
* Don't touch it if it's not on the active list.
* (some pages aren't on any list at all)
*/
- if (PageActive(page) && page_count(page) <= maxcount &&
- !page_ramdisk(page)) {
-
- /*
- * We can move the page to the inactive_dirty list
- * if we have the strong suspicion that they might
- * become freeable in the near future.
- *
- * That is, the page has buffer heads attached (that
- * need to be cleared away) and/or the function calling
- * us has an extra reference count on the page.
- */
- if (page->buffers || page_count(page) == 2) {
- del_page_from_active_list(page);
- add_page_to_inactive_dirty_list(page);
- /*
- * Only if we are SURE the page is clean and immediately
- * reusable, we move it to the inactive_clean list.
- */
- } else if (page->mapping && !PageDirty(page) &&
- !PageLocked(page)) {
- del_page_from_active_list(page);
- add_page_to_inactive_clean_list(page);
- }
- /*
- * OK, we cannot free the page. Leave it alone.
- */
+ if (PageActive(page) && page_count(page) <= maxcount && !page_ramdisk(page)) {
+ del_page_from_active_list(page);
+ add_page_to_inactive_dirty_list(page);
}
}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 3a91d955e..df45b34af 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -17,8 +17,15 @@
#include <asm/pgtable.h>
+static int swap_writepage(struct page *page)
+{
+ rw_swap_page(WRITE, page, 0);
+ return 0;
+}
+
static struct address_space_operations swap_aops = {
- sync_page: block_sync_page
+ writepage: swap_writepage,
+ sync_page: block_sync_page,
};
struct address_space swapper_space = {
@@ -106,6 +113,7 @@ void delete_from_swap_cache_nolock(struct page *page)
lru_cache_del(page);
spin_lock(&pagecache_lock);
+ ClearPageDirty(page);
__delete_from_swap_cache(page);
spin_unlock(&pagecache_lock);
page_cache_release(page);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 339cffb5b..62ce5f1ff 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -215,8 +215,8 @@ void vfree(void * addr)
if (tmp->addr == addr) {
*p = tmp->next;
vmfree_area_pages(VMALLOC_VMADDR(tmp->addr), tmp->size);
- kfree(tmp);
write_unlock(&vmlist_lock);
+ kfree(tmp);
return;
}
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index dd92afecc..46eb771af 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -91,6 +91,9 @@ static int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, un
*/
if (PageSwapCache(page)) {
entry.val = page->index;
+ if (pte_dirty(pte))
+ SetPageDirty(page);
+set_swap_pte:
swap_duplicate(entry);
set_pte(page_table, swp_entry_to_pte(entry));
drop_pte:
@@ -99,7 +102,8 @@ drop_pte:
flush_tlb_page(vma, address);
deactivate_page(page);
page_cache_release(page);
- goto out_failed;
+out_failed:
+ return 0;
}
/*
@@ -166,13 +170,13 @@ drop_pte:
flush_tlb_page(vma, address);
spin_unlock(&mm->page_table_lock);
error = swapout(page, file);
- UnlockPage(page);
if (file) fput(file);
- if (!error)
- goto out_free_success;
+ if (error < 0)
+ goto out_unlock_restore;
+ UnlockPage(page);
deactivate_page(page);
page_cache_release(page);
- return error;
+ return 1; /* We released page_table_lock */
}
/*
@@ -185,33 +189,11 @@ drop_pte:
if (!entry.val)
goto out_unlock_restore; /* No swap space left */
- /* Make sure to flush the TLB _before_ we start copying things.. */
- flush_tlb_page(vma, address);
- if (!(page = prepare_highmem_swapout(page)))
- goto out_swap_free;
-
- swap_duplicate(entry); /* One for the process, one for the swap cache */
-
- /* Add it to the swap cache */
+ /* Add it to the swap cache and mark it dirty */
add_to_swap_cache(page, entry);
+ SetPageDirty(page);
+ goto set_swap_pte;
- /* Put the swap entry into the pte after the page is in swapcache */
- mm->rss--;
- set_pte(page_table, swp_entry_to_pte(entry));
- spin_unlock(&mm->page_table_lock);
-
- /* OK, do a physical asynchronous write to swap. */
- rw_swap_page(WRITE, page, 0);
- deactivate_page(page);
-
-out_free_success:
- page_cache_release(page);
- return 1;
-out_swap_free:
- set_pte(page_table, pte);
- swap_free(entry);
-out_failed:
- return 0;
out_unlock_restore:
set_pte(page_table, pte);
UnlockPage(page);
@@ -501,7 +483,7 @@ struct page * reclaim_page(zone_t * zone)
continue;
}
- /* The page is dirty, or locked, move to inactive_diry list. */
+ /* The page is dirty, or locked, move to inactive_dirty list. */
if (page->buffers || TryLockPage(page)) {
del_page_from_inactive_clean_list(page);
add_page_to_inactive_dirty_list(page);
@@ -616,6 +598,36 @@ dirty_page_rescan:
}
/*
+ * Dirty swap-cache page? Write it out if
+ * last copy..
+ */
+ if (PageDirty(page)) {
+ int (*writepage)(struct page *) = page->mapping->a_ops->writepage;
+ if (!writepage)
+ goto page_active;
+
+ /* Can't start IO? Move it to the back of the list */
+ if (!can_get_io_locks) {
+ list_del(page_lru);
+ list_add(page_lru, &inactive_dirty_list);
+ UnlockPage(page);
+ continue;
+ }
+
+ /* OK, do a physical asynchronous write to swap. */
+ ClearPageDirty(page);
+ page_cache_get(page);
+ spin_unlock(&pagemap_lru_lock);
+
+ writepage(page);
+ page_cache_release(page);
+
+ /* And re-start the thing.. */
+ spin_lock(&pagemap_lru_lock);
+ continue;
+ }
+
+ /*
* If the page has buffers, try to free the buffer mappings
* associated with this page. If we succeed we either free
* the page (in case it was a buffercache only page) or we
@@ -701,6 +713,7 @@ dirty_page_rescan:
UnlockPage(page);
cleaned_pages++;
} else {
+page_active:
/*
* OK, we don't know what to do with the page.
* It's no use keeping it here, so we move it to
@@ -925,13 +938,6 @@ static int refill_inactive(unsigned int gfp_mask, int user)
shrink_dcache_memory(priority, gfp_mask);
shrink_icache_memory(priority, gfp_mask);
- /* Try to get rid of some shared memory pages.. */
- while (shm_swap(priority, gfp_mask)) {
- made_progress = 1;
- if (--count <= 0)
- goto done;
- }
-
/*
* Then, try to page stuff out..
*/