summaryrefslogtreecommitdiffstats
path: root/drivers/block/ll_rw_blk.c
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-01-29 01:41:54 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-01-29 01:41:54 +0000
commitf969d69ba9f952e5bdd38278e25e26a3e4a61a70 (patch)
treeb3530d803df59d726afaabebc6626987dee1ca05 /drivers/block/ll_rw_blk.c
parenta10ce7ef2066b455d69187643ddf2073bfc4db24 (diff)
Merge with 2.3.27.
Diffstat (limited to 'drivers/block/ll_rw_blk.c')
-rw-r--r--drivers/block/ll_rw_blk.c39
1 files changed, 36 insertions, 3 deletions
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index 59116c19d..5363e04e2 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -22,6 +22,7 @@
#include <asm/system.h>
#include <asm/io.h>
#include <linux/blk.h>
+#include <linux/highmem.h>
#include <linux/module.h>
@@ -388,9 +389,12 @@ out:
/*
* Has to be called with the request spinlock aquired
*/
-static inline void attempt_merge (struct request *req, int max_sectors)
+static inline void attempt_merge (struct request *req,
+ int max_sectors,
+ int max_segments)
{
struct request *next = req->next;
+ int total_segments;
if (!next)
return;
@@ -398,9 +402,15 @@ static inline void attempt_merge (struct request *req, int max_sectors)
return;
if (next->sem || req->cmd != next->cmd || req->rq_dev != next->rq_dev || req->nr_sectors + next->nr_sectors > max_sectors)
return;
+ total_segments = req->nr_segments + next->nr_segments;
+ if (req->bhtail->b_data + req->bhtail->b_size == next->bh->b_data)
+ total_segments--;
+ if (total_segments > max_segments)
+ return;
req->bhtail->b_reqnext = next->bh;
req->bhtail = next->bhtail;
req->nr_sectors += next->nr_sectors;
+ req->nr_segments = total_segments;
next->rq_status = RQ_INACTIVE;
req->next = next->next;
wake_up (&wait_for_request);
@@ -410,7 +420,7 @@ void make_request(int major,int rw, struct buffer_head * bh)
{
unsigned int sector, count;
struct request * req;
- int rw_ahead, max_req, max_sectors;
+ int rw_ahead, max_req, max_sectors, max_segments;
unsigned long flags;
count = bh->b_size >> 9;
@@ -483,6 +493,15 @@ void make_request(int major,int rw, struct buffer_head * bh)
if (!buffer_mapped(bh))
BUG();
+ /*
+ * Temporary solution - in 2.5 this will be done by the lowlevel
+ * driver. Create a bounce buffer if the buffer data points into
+ * high memory - keep the original buffer otherwise.
+ */
+#if CONFIG_HIGHMEM
+ bh = create_bounce(rw, bh);
+#endif
+
/* look for a free request. */
/* Loop uses two requests, 1 for loop and 1 for the real device.
* Cut max_req in half to avoid running out and deadlocking. */
@@ -493,6 +512,7 @@ void make_request(int major,int rw, struct buffer_head * bh)
* Try to coalesce the new request with old requests
*/
max_sectors = get_max_sectors(bh->b_rdev);
+ max_segments = get_max_segments(bh->b_rdev);
/*
* Now we acquire the request spinlock, we have to be mega careful
@@ -572,14 +592,26 @@ void make_request(int major,int rw, struct buffer_head * bh)
continue;
/* Can we add it to the end of this request? */
if (req->sector + req->nr_sectors == sector) {
+ if (req->bhtail->b_data + req->bhtail->b_size
+ != bh->b_data) {
+ if (req->nr_segments < max_segments)
+ req->nr_segments++;
+ else continue;
+ }
req->bhtail->b_reqnext = bh;
req->bhtail = bh;
req->nr_sectors += count;
drive_stat_acct(req, count, 0);
/* Can we now merge this req with the next? */
- attempt_merge(req, max_sectors);
+ attempt_merge(req, max_sectors, max_segments);
/* or to the beginning? */
} else if (req->sector - count == sector) {
+ if (bh->b_data + bh->b_size
+ != req->bh->b_data) {
+ if (req->nr_segments < max_segments)
+ req->nr_segments++;
+ else continue;
+ }
bh->b_reqnext = req->bh;
req->bh = bh;
req->buffer = bh->b_data;
@@ -613,6 +645,7 @@ void make_request(int major,int rw, struct buffer_head * bh)
req->errors = 0;
req->sector = sector;
req->nr_sectors = count;
+ req->nr_segments = 1;
req->current_nr_sectors = count;
req->buffer = bh->b_data;
req->sem = NULL;