summaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c50
1 files changed, 34 insertions, 16 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 4e9fa9015..65c3fb627 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -789,7 +789,7 @@ static void end_buffer_io_async(struct buffer_head * bh, int uptodate)
/*
* Run the hooks that have to be done when a page I/O has completed.
*/
- if (test_and_clear_bit(PG_decr_after, &page->flags))
+ if (PageTestandClearDecrAfter(page))
atomic_dec(&nr_async_pages);
UnlockPage(page);
@@ -1578,7 +1578,6 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
nr++;
} while (i++, iblock++, (bh = bh->b_this_page) != head);
- ++current->maj_flt;
if (nr) {
if (Page_Uptodate(page))
BUG();
@@ -1958,7 +1957,7 @@ int brw_page(int rw, struct page *page, kdev_t dev, int b[], int size)
if (!PageLocked(page))
panic("brw_page: page not locked for I/O");
-// clear_bit(PG_error, &page->flags);
+// ClearPageError(page);
/*
* We pretty much rely on the page lock for this, because
* create_page_buffers() might sleep.
@@ -2002,8 +2001,6 @@ int brw_page(int rw, struct page *page, kdev_t dev, int b[], int size)
}
bh = bh->b_this_page;
} while (bh != head);
- if (rw == READ)
- ++current->maj_flt;
if ((rw == READ) && nr) {
if (Page_Uptodate(page))
BUG();
@@ -2115,6 +2112,29 @@ out:
}
/*
+ * Sync all the buffers on one page..
+ *
+ * If we have old buffers that are locked, we'll
+ * wait on them, but we won't wait on the new ones
+ * we're writing out now.
+ *
+ * This all is required so that we can free up memory
+ * later.
+ */
+static void sync_page_buffers(struct buffer_head *bh)
+{
+ struct buffer_head * tmp;
+
+ tmp = bh;
+ do {
+ struct buffer_head *p = tmp;
+ tmp = tmp->b_this_page;
+ if (buffer_dirty(p) && !buffer_locked(p))
+ ll_rw_block(WRITE, 1, &p);
+ } while (tmp != bh);
+}
+
+/*
* Can the buffer be thrown out?
*/
#define BUFFER_BUSY_BITS ((1<<BH_Dirty) | (1<<BH_Lock) | (1<<BH_Protected))
@@ -2133,16 +2153,15 @@ out:
*/
int try_to_free_buffers(struct page * page)
{
- struct buffer_head * tmp, * p, * bh = page->buffers;
+ struct buffer_head * tmp, * bh = page->buffers;
int index = BUFSIZE_INDEX(bh->b_size);
- int ret;
spin_lock(&lru_list_lock);
write_lock(&hash_table_lock);
spin_lock(&free_list[index].lock);
tmp = bh;
do {
- p = tmp;
+ struct buffer_head *p = tmp;
tmp = tmp->b_this_page;
if (buffer_busy(p))
@@ -2172,19 +2191,18 @@ int try_to_free_buffers(struct page * page)
/* And free the page */
page->buffers = NULL;
__free_page(page);
- ret = 1;
-out:
spin_unlock(&free_list[index].lock);
write_unlock(&hash_table_lock);
spin_unlock(&lru_list_lock);
- return ret;
+ return 1;
busy_buffer_page:
/* Uhhuh, start writeback so that we don't end up with all dirty pages */
- if (buffer_dirty(p))
- wakeup_bdflush(0);
- ret = 0;
- goto out;
+ spin_unlock(&free_list[index].lock);
+ write_unlock(&hash_table_lock);
+ spin_unlock(&lru_list_lock);
+ sync_page_buffers(bh);
+ return 0;
}
/* ================== Debugging =================== */
@@ -2277,7 +2295,7 @@ void __init buffer_init(unsigned long mempages)
__get_free_pages(GFP_ATOMIC, order);
} while (hash_table == NULL && --order > 0);
printk("Buffer-cache hash table entries: %d (order: %d, %ld bytes)\n",
- nr_hash, order, (1UL<<order) * PAGE_SIZE);
+ nr_hash, order, (PAGE_SIZE << order));
if (!hash_table)
panic("Failed to allocate buffer hash table\n");