summaryrefslogtreecommitdiffstats
path: root/mm/filemap.c
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-06-15 01:55:58 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-06-15 01:55:58 +0000
commit53b3988d474435254a3b053a68bb24ce9e439295 (patch)
treef8da8e40f01f4ad02bbd76b8c9920749b118235f /mm/filemap.c
parentb0cb48abe83d1a4389ea938bf624f8baa82c5047 (diff)
Merge with 2.3.99-pre9.
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c65
1 files changed, 42 insertions, 23 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 81f7d7ab9..b1e2b8547 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -244,14 +244,19 @@ repeat:
spin_unlock(&pagecache_lock);
}
+/*
+ * nr_dirty represents the number of dirty pages that we will write async
+ * before doing sync writes. We can only do sync writes if we can
+ * wait for IO (__GFP_IO set).
+ */
int shrink_mmap(int priority, int gfp_mask)
{
- int ret = 0, count;
- LIST_HEAD(old);
- struct list_head * page_lru, * dispose;
+ int ret = 0, count, nr_dirty;
+ struct list_head * page_lru;
struct page * page = NULL;
count = nr_lru_pages / (priority + 1);
+ nr_dirty = priority;
/* we need pagemap_lru_lock for list_del() ... subtle code below */
spin_lock(&pagemap_lru_lock);
@@ -259,25 +264,10 @@ int shrink_mmap(int priority, int gfp_mask)
page = list_entry(page_lru, struct page, lru);
list_del(page_lru);
- dispose = &lru_cache;
if (PageTestandClearReferenced(page))
goto dispose_continue;
count--;
-
- /*
- * I'm ambivalent on this one.. Should we try to
- * maintain LRU on the LRU list, and put pages that
- * are old at the end of the queue, even if that
- * means that we'll re-scan then again soon and
- * often waste CPU time? Or should be just let any
- * pages we do not want to touch now for one reason
- * or another percolate to be "young"?
- *
- dispose = &old;
- *
- */
-
/*
* Avoid unscalable SMP locking for pages we can
* immediate tell are untouchable..
@@ -303,7 +293,8 @@ int shrink_mmap(int priority, int gfp_mask)
* of zone - it's old.
*/
if (page->buffers) {
- if (!try_to_free_buffers(page))
+ int wait = ((gfp_mask & __GFP_IO) && (nr_dirty-- < 0));
+ if (!try_to_free_buffers(page, wait))
goto unlock_continue;
/* page was locked, inode can't go away under us */
if (!page->mapping) {
@@ -362,7 +353,7 @@ unlock_continue:
UnlockPage(page);
page_cache_release(page);
dispose_continue:
- list_add(page_lru, dispose);
+ list_add(page_lru, &lru_cache);
}
goto out;
@@ -377,8 +368,6 @@ made_buffer_progress:
nr_lru_pages--;
out:
- list_splice(&old, lru_cache.prev);
-
spin_unlock(&pagemap_lru_lock);
return ret;
@@ -2319,7 +2308,8 @@ out:
return error;
}
-struct page *read_cache_page(struct address_space *mapping,
+static inline
+struct page *__read_cache_page(struct address_space *mapping,
unsigned long index,
int (*filler)(void *,struct page*),
void *data)
@@ -2350,6 +2340,35 @@ repeat:
return page;
}
+/*
+ * Read into the page cache. If a page already exists,
+ * and Page_Uptodate() is not set, try to fill the page.
+ */
+struct page *read_cache_page(struct address_space *mapping,
+ unsigned long index,
+ int (*filler)(void *,struct page*),
+ void *data)
+{
+ struct page *page = __read_cache_page(mapping, index, filler, data);
+ int err;
+
+ if (IS_ERR(page) || Page_Uptodate(page))
+ goto out;
+
+ lock_page(page);
+ if (Page_Uptodate(page)) {
+ UnlockPage(page);
+ goto out;
+ }
+ err = filler(data, page);
+ if (err < 0) {
+ page_cache_release(page);
+ page = ERR_PTR(err);
+ }
+ out:
+ return page;
+}
+
static inline struct page * __grab_cache_page(struct address_space *mapping,
unsigned long index, struct page **cached_page)
{