summaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-06-25 01:20:01 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-06-25 01:20:01 +0000
commit3797ba0b62debb71af4606910acacc9896a9ae3b (patch)
tree414eea76253c7871bfdf3bd9d1817771eb40917c /mm/filemap.c
parent2b6c0c580795a4404f72d2a794214dd9e080709d (diff)
Merge with Linux 2.4.0-test2.
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c200
1 files changed, 65 insertions, 135 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 5e9e6e00b..b1e2b8547 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -56,8 +56,6 @@ spinlock_t pagemap_lru_lock = SPIN_LOCK_UNLOCKED;
#define CLUSTER_PAGES (1 << page_cluster)
#define CLUSTER_OFFSET(x) (((x) >> page_cluster) << page_cluster)
-#define min(a,b) ((a < b) ? a : b)
-
void __add_page_to_hash_queue(struct page * page, struct page **p)
{
atomic_inc(&page_cache_size);
@@ -92,16 +90,10 @@ static inline int sync_page(struct page *page)
/*
* Remove a page from the page cache and free it. Caller has to make
* sure the page is locked and that nobody else uses it - or that usage
- * is safe. We need that the page don't have any buffers.
+ * is safe.
*/
static inline void __remove_inode_page(struct page *page)
{
- if (!PageLocked(page))
- PAGE_BUG(page);
-
- if (page->buffers)
- BUG();
-
remove_page_from_inode_queue(page);
remove_page_from_hash_queue(page);
page->mapping = NULL;
@@ -109,6 +101,9 @@ static inline void __remove_inode_page(struct page *page)
void remove_inode_page(struct page *page)
{
+ if (!PageLocked(page))
+ PAGE_BUG(page);
+
spin_lock(&pagecache_lock);
__remove_inode_page(page);
spin_unlock(&pagecache_lock);
@@ -119,16 +114,16 @@ void remove_inode_page(struct page *page)
* @inode: the inode which pages we want to invalidate
*
* This function only removes the unlocked pages, if you want to
- * remove all the pages of one inode, you must call
- * truncate_inode_pages. This function is not supposed to be called
- * by block based filesystems.
+ * remove all the pages of one inode, you must call truncate_inode_pages.
*/
+
void invalidate_inode_pages(struct inode * inode)
{
struct list_head *head, *curr;
struct page * page;
head = &inode->i_mapping->pages;
+
spin_lock(&pagecache_lock);
spin_lock(&pagemap_lru_lock);
curr = head->next;
@@ -140,53 +135,20 @@ void invalidate_inode_pages(struct inode * inode)
/* We cannot invalidate a locked page */
if (TryLockPage(page))
continue;
- /* We _should not be called_ by block based filesystems */
- if (page->buffers)
- BUG();
- __remove_inode_page(page);
__lru_cache_del(page);
+ __remove_inode_page(page);
UnlockPage(page);
page_cache_release(page);
}
+
spin_unlock(&pagemap_lru_lock);
spin_unlock(&pagecache_lock);
}
-static inline void truncate_partial_page(struct page *page, unsigned partial)
-{
- memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial);
-
- if (page->buffers)
- block_flushpage(page, partial);
-
-}
-
-static inline void truncate_complete_page(struct page *page)
-{
- if (page->buffers)
- block_destroy_buffers(page);
- lru_cache_del(page);
-
- /*
- * We remove the page from the page cache _after_ we have
- * destroyed all buffer-cache references to it. Otherwise some
- * other process might think this inode page is not in the
- * page cache and creates a buffer-cache alias to it causing
- * all sorts of fun problems ...
- */
- remove_inode_page(page);
- page_cache_release(page);
-}
-
-/**
- * truncate_inode_pages - truncate *all* the pages from an offset
- * @mapping: mapping to truncate
- * @lstart: offset from with to truncate
- *
+/*
* Truncate the page cache at a set offset, removing the pages
* that are beyond that offset (and zeroing out partial pages).
- * If any page is locked we wait for it to become unlocked.
*/
void truncate_inode_pages(struct address_space * mapping, loff_t lstart)
{
@@ -206,10 +168,11 @@ repeat:
page = list_entry(curr, struct page, list);
curr = curr->next;
+
offset = page->index;
- /* Is one of the pages to truncate? */
- if ((offset >= start) || (partial && (offset + 1) == start)) {
+ /* page wholly truncated - free it */
+ if (offset >= start) {
if (TryLockPage(page)) {
page_cache_get(page);
spin_unlock(&pagecache_lock);
@@ -220,14 +183,22 @@ repeat:
page_cache_get(page);
spin_unlock(&pagecache_lock);
- if (partial && (offset + 1) == start) {
- truncate_partial_page(page, partial);
- partial = 0;
- } else
- truncate_complete_page(page);
+ if (!page->buffers || block_flushpage(page, 0))
+ lru_cache_del(page);
+
+ /*
+ * We remove the page from the page cache
+ * _after_ we have destroyed all buffer-cache
+ * references to it. Otherwise some other process
+ * might think this inode page is not in the
+ * page cache and creates a buffer-cache alias
+ * to it causing all sorts of fun problems ...
+ */
+ remove_inode_page(page);
UnlockPage(page);
page_cache_release(page);
+ page_cache_release(page);
/*
* We have done things without the pagecache lock,
@@ -238,59 +209,38 @@ repeat:
*/
goto repeat;
}
- }
- spin_unlock(&pagecache_lock);
-}
-
-/**
- * truncate_all_inode_pages - truncate *all* the pages
- * @mapping: mapping to truncate
- *
- * Truncate all the inode pages. If any page is locked we wait for it
- * to become unlocked. This function can block.
- */
-void truncate_all_inode_pages(struct address_space * mapping)
-{
- struct list_head *head, *curr;
- struct page * page;
-
- head = &mapping->pages;
-repeat:
- spin_lock(&pagecache_lock);
- spin_lock(&pagemap_lru_lock);
- curr = head->next;
+ /*
+ * there is only one partial page possible.
+ */
+ if (!partial)
+ continue;
- while (curr != head) {
- page = list_entry(curr, struct page, list);
- curr = curr->next;
+ /* and it's the one preceeding the first wholly truncated page */
+ if ((offset + 1) != start)
+ continue;
+ /* partial truncate, clear end of page */
if (TryLockPage(page)) {
- page_cache_get(page);
- spin_unlock(&pagemap_lru_lock);
- spin_unlock(&pagecache_lock);
- wait_on_page(page);
- page_cache_release(page);
- goto repeat;
- }
- if (page->buffers) {
- page_cache_get(page);
- spin_unlock(&pagemap_lru_lock);
spin_unlock(&pagecache_lock);
- block_destroy_buffers(page);
- remove_inode_page(page);
- lru_cache_del(page);
- page_cache_release(page);
- UnlockPage(page);
- page_cache_release(page);
goto repeat;
}
- __lru_cache_del(page);
- __remove_inode_page(page);
+ page_cache_get(page);
+ spin_unlock(&pagecache_lock);
+
+ memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial);
+ if (page->buffers)
+ block_flushpage(page, partial);
+
+ partial = 0;
+
+ /*
+ * we have dropped the spinlock so we have to
+ * restart.
+ */
UnlockPage(page);
page_cache_release(page);
+ goto repeat;
}
-
- spin_unlock(&pagemap_lru_lock);
spin_unlock(&pagecache_lock);
}
@@ -314,15 +264,7 @@ int shrink_mmap(int priority, int gfp_mask)
page = list_entry(page_lru, struct page, lru);
list_del(page_lru);
- if (PageTestandClearReferenced(page)) {
- page->age += PG_AGE_ADV;
- if (page->age > PG_AGE_MAX)
- page->age = PG_AGE_MAX;
- goto dispose_continue;
- }
- page->age -= min(PG_AGE_DECL, page->age);
-
- if (page->age)
+ if (PageTestandClearReferenced(page))
goto dispose_continue;
count--;
@@ -380,23 +322,17 @@ int shrink_mmap(int priority, int gfp_mask)
* were to be marked referenced..
*/
if (PageSwapCache(page)) {
- if (!PageDirty(page)) {
- spin_unlock(&pagecache_lock);
- __delete_from_swap_cache(page);
- goto made_inode_progress;
- }
- /* PageDeferswap -> we swap out the page now. */
- if (gfp_mask & __GFP_IO) {
- spin_unlock(&pagecache_lock);
- /* Do NOT unlock the page ... brw_page does. */
- ClearPageDirty(page);
- rw_swap_page(WRITE, page, 0);
- spin_lock(&pagemap_lru_lock);
- page_cache_release(page);
- goto dispose_continue;
- }
+ spin_unlock(&pagecache_lock);
+ __delete_from_swap_cache(page);
+ goto made_inode_progress;
+ }
+
+ /*
+ * Page is from a zone we don't care about.
+ * Don't drop page cache entries in vain.
+ */
+ if (page->zone->free_pages > page->zone->pages_high)
goto cache_unlock_continue;
- }
/* is it a page-cache page? */
if (page->mapping) {
@@ -564,7 +500,7 @@ void add_to_page_cache_locked(struct page * page, struct address_space *mapping,
/*
* This adds a page to the page cache, starting out as locked,
- * owned by us, but not uptodate and with no errors.
+ * owned by us, referenced, but not uptodate and with no errors.
*/
static inline void __add_to_page_cache(struct page * page,
struct address_space *mapping, unsigned long offset,
@@ -576,8 +512,8 @@ static inline void __add_to_page_cache(struct page * page,
if (PageLocked(page))
BUG();
- flags = page->flags & ~((1 << PG_uptodate) | (1 << PG_error));
- page->flags = flags | (1 << PG_locked);
+ flags = page->flags & ~((1 << PG_uptodate) | (1 << PG_error) | (1 << PG_dirty));
+ page->flags = flags | (1 << PG_locked) | (1 << PG_referenced);
page_cache_get(page);
page->index = offset;
add_page_to_inode_queue(mapping, page);
@@ -1808,7 +1744,7 @@ static int msync_interval(struct vm_area_struct * vma,
if (!error && (flags & MS_SYNC)) {
struct file * file = vma->vm_file;
if (file && file->f_op && file->f_op->fsync)
- error = file->f_op->fsync(file, file->f_dentry, 1);
+ error = file->f_op->fsync(file, file->f_dentry);
}
return error;
}
@@ -2547,7 +2483,7 @@ generic_file_write(struct file *file,const char *buf,size_t count,loff_t *ppos)
if (count) {
remove_suid(inode);
inode->i_ctime = inode->i_mtime = CURRENT_TIME;
- mark_inode_dirty_sync(inode);
+ mark_inode_dirty(inode);
}
while (count) {
@@ -2604,13 +2540,7 @@ unlock:
if (cached_page)
page_cache_free(cached_page);
- /* For now, when the user asks for O_SYNC, we'll actually
- * provide O_DSYNC. */
- if ((status >= 0) && (file->f_flags & O_SYNC))
- status = generic_osync_inode(inode, 1); /* 1 means datasync */
-
err = written ? written : status;
-
out:
up(&inode->i_sem);
return err;