summaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-04-28 01:09:25 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-04-28 01:09:25 +0000
commitb9ba7aeb165cffecdffb60aec8c3fa8d590d9ca9 (patch)
tree42d07b0c7246ae2536a702e7c5de9e2732341116 /mm/filemap.c
parent7406b0a326f2d70ade2671c37d1beef62249db97 (diff)
Merge with 2.3.99-pre6.
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c113
1 files changed, 77 insertions, 36 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 533747f96..d0df8bd2c 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -44,6 +44,7 @@
atomic_t page_cache_size = ATOMIC_INIT(0);
unsigned int page_hash_bits;
struct page **page_hash_table;
+struct list_head lru_cache;
spinlock_t pagecache_lock = SPIN_LOCK_UNLOCKED;
/*
@@ -77,6 +78,15 @@ static void remove_page_from_hash_queue(struct page * page)
atomic_dec(&page_cache_size);
}
+static inline int sync_page(struct page *page)
+{
+ struct address_space *mapping = page->mapping;
+
+ if (mapping && mapping->a_ops && mapping->a_ops->sync_page)
+ return mapping->a_ops->sync_page(page);
+ return 0;
+}
+
/*
* Remove a page from the page cache and free it. Caller has to make
* sure the page is locked and that nobody else uses it - or that usage
@@ -87,6 +97,9 @@ void remove_inode_page(struct page *page)
if (!PageLocked(page))
PAGE_BUG(page);
+ /* Initiate completion of any async operations */
+ sync_page(page);
+
spin_lock(&pagecache_lock);
remove_page_from_inode_queue(page);
remove_page_from_hash_queue(page);
@@ -99,6 +112,7 @@ void invalidate_inode_pages(struct inode * inode)
struct list_head *head, *curr;
struct page * page;
+ repeat:
head = &inode->i_mapping->pages;
spin_lock(&pagecache_lock);
curr = head->next;
@@ -110,14 +124,13 @@ void invalidate_inode_pages(struct inode * inode)
/* We cannot invalidate a locked page */
if (TryLockPage(page))
continue;
+ spin_unlock(&pagecache_lock);
lru_cache_del(page);
- remove_page_from_inode_queue(page);
- remove_page_from_hash_queue(page);
- page->mapping = NULL;
+ remove_inode_page(page);
UnlockPage(page);
-
page_cache_release(page);
+ goto repeat;
}
spin_unlock(&pagecache_lock);
}
@@ -149,11 +162,16 @@ repeat:
/* page wholly truncated - free it */
if (offset >= start) {
+ if (TryLockPage(page)) {
+ spin_unlock(&pagecache_lock);
+ get_page(page);
+ wait_on_page(page);
+ put_page(page);
+ goto repeat;
+ }
get_page(page);
spin_unlock(&pagecache_lock);
- lock_page(page);
-
if (!page->buffers || block_flushpage(page, 0))
lru_cache_del(page);
@@ -191,11 +209,13 @@ repeat:
continue;
/* partial truncate, clear end of page */
+ if (TryLockPage(page)) {
+ spin_unlock(&pagecache_lock);
+ goto repeat;
+ }
get_page(page);
spin_unlock(&pagecache_lock);
- lock_page(page);
-
memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial);
if (page->buffers)
block_flushpage(page, partial);
@@ -208,6 +228,9 @@ repeat:
*/
UnlockPage(page);
page_cache_release(page);
+ get_page(page);
+ wait_on_page(page);
+ put_page(page);
goto repeat;
}
spin_unlock(&pagecache_lock);
@@ -215,46 +238,55 @@ repeat:
int shrink_mmap(int priority, int gfp_mask, zone_t *zone)
{
- int ret = 0, count;
+ int ret = 0, loop = 0, count;
LIST_HEAD(young);
LIST_HEAD(old);
LIST_HEAD(forget);
struct list_head * page_lru, * dispose;
- struct page * page;
-
+ struct page * page = NULL;
+ struct zone_struct * p_zone;
+ int maxloop = 256 >> priority;
+
if (!zone)
BUG();
- count = nr_lru_pages / (priority+1);
+ count = nr_lru_pages >> priority;
+ if (!count)
+ return ret;
spin_lock(&pagemap_lru_lock);
-
- while (count > 0 && (page_lru = zone->lru_cache.prev) != &zone->lru_cache) {
+again:
+ /* we need pagemap_lru_lock for list_del() ... subtle code below */
+ while (count > 0 && (page_lru = lru_cache.prev) != &lru_cache) {
page = list_entry(page_lru, struct page, lru);
list_del(page_lru);
+ p_zone = page->zone;
- dispose = &zone->lru_cache;
- if (test_and_clear_bit(PG_referenced, &page->flags))
- /* Roll the page at the top of the lru list,
- * we could also be more aggressive putting
- * the page in the young-dispose-list, so
- * avoiding to free young pages in each pass.
- */
- goto dispose_continue;
-
+ /*
+ * These two tests are there to make sure we don't free too
+ * many pages from the "wrong" zone. We free some anyway,
+ * they are the least recently used pages in the system.
+ * When we don't free them, leave them in &old.
+ */
dispose = &old;
- /* don't account passes over not DMA pages */
- if (zone && (!memclass(page->zone, zone)))
+ if (p_zone != zone && (loop > (maxloop / 4) ||
+ p_zone->free_pages > p_zone->pages_high))
goto dispose_continue;
- count--;
-
+ /* The page is in use, or was used very recently, put it in
+ * &young to make sure that we won't try to free it the next
+ * time */
dispose = &young;
- /* avoid unscalable SMP locking */
+ if (test_and_clear_bit(PG_referenced, &page->flags))
+ goto dispose_continue;
+
+ count--;
if (!page->buffers && page_count(page) > 1)
goto dispose_continue;
+ /* Page not used -> free it; if that fails -> &old */
+ dispose = &old;
if (TryLockPage(page))
goto dispose_continue;
@@ -327,6 +359,7 @@ unlock_continue:
list_add(page_lru, dispose);
continue;
+ /* we're holding pagemap_lru_lock, so we can just loop again */
dispose_continue:
list_add(page_lru, dispose);
}
@@ -342,9 +375,14 @@ made_buffer_progress:
/* nr_lru_pages needs the spinlock */
nr_lru_pages--;
+ loop++;
+ /* wrong zone? not looped too often? roll again... */
+ if (page->zone != zone && loop < maxloop)
+ goto again;
+
out:
- list_splice(&young, &zone->lru_cache);
- list_splice(&old, zone->lru_cache.prev);
+ list_splice(&young, &lru_cache);
+ list_splice(&old, lru_cache.prev);
spin_unlock(&pagemap_lru_lock);
@@ -467,6 +505,9 @@ static inline void __add_to_page_cache(struct page * page,
struct page *alias;
unsigned long flags;
+ if (PageLocked(page))
+ BUG();
+
flags = page->flags & ~((1 << PG_uptodate) | (1 << PG_error) | (1 << PG_dirty));
page->flags = flags | (1 << PG_locked) | (1 << PG_referenced);
get_page(page);
@@ -574,7 +615,7 @@ void ___wait_on_page(struct page *page)
add_wait_queue(&page->wait, &wait);
do {
- run_task_queue(&tq_disk);
+ sync_page(page);
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
if (!PageLocked(page))
break;
@@ -619,7 +660,7 @@ repeat:
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
- run_task_queue(&tq_disk);
+ sync_page(page);
__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
add_wait_queue(&page->wait, &wait);
@@ -669,7 +710,7 @@ repeat:
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
- run_task_queue(&tq_disk);
+ sync_page(page);
__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
add_wait_queue(&page->wait, &wait);
@@ -1496,7 +1537,7 @@ static int filemap_write_page(struct file *file,
* mmap_sem is held.
*/
lock_page(page);
- result = inode->i_mapping->a_ops->writepage(dentry, page);
+ result = inode->i_mapping->a_ops->writepage(file, dentry, page);
UnlockPage(page);
return result;
}
@@ -1707,8 +1748,8 @@ static int msync_interval(struct vm_area_struct * vma,
error = vma->vm_ops->sync(vma, start, end-start, flags);
if (!error && (flags & MS_SYNC)) {
struct file * file = vma->vm_file;
- if (file)
- error = file_fsync(file, file->f_dentry);
+ if (file && file->f_op && file->f_op->fsync)
+ error = file->f_op->fsync(file, file->f_dentry);
}
return error;
}