summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/scsi_merge.c
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-02-05 06:47:02 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-02-05 06:47:02 +0000
commit99a7e12f34b3661a0d1354eef83a0eef4df5e34c (patch)
tree3560aca9ca86792f9ab7bd87861ea143a1b3c7a3 /drivers/scsi/scsi_merge.c
parente73a04659c0b8cdee4dd40e58630e2cf63afb316 (diff)
Merge with Linux 2.3.38.
Diffstat (limited to 'drivers/scsi/scsi_merge.c')
-rw-r--r--drivers/scsi/scsi_merge.c362
1 files changed, 304 insertions, 58 deletions
diff --git a/drivers/scsi/scsi_merge.c b/drivers/scsi/scsi_merge.c
index 8182d0766..3dffe88ad 100644
--- a/drivers/scsi/scsi_merge.c
+++ b/drivers/scsi/scsi_merge.c
@@ -61,12 +61,19 @@
#include "constants.h"
#include <scsi/scsi_ioctl.h>
+/*
+ * This means that bounce buffers cannot be allocated in chunks > PAGE_SIZE.
+ * Ultimately we should get away from using a dedicated DMA bounce buffer
+ * pool, and we should instead try and use kmalloc() instead. If we can
+ * eliminate this pool, then this restriction would no longer be needed.
+ */
+#define DMA_SEGMENT_SIZE_LIMITED
+
#ifdef CONFIG_SCSI_DEBUG_QUEUES
/*
* Enable a bunch of additional consistency checking. Turn this off
* if you are benchmarking.
*/
-
static int dump_stats(struct request *req,
int use_clustering,
int dma_host,
@@ -98,17 +105,51 @@ static int dump_stats(struct request *req,
* This can be removed for optimization.
*/
#define SANITY_CHECK(req, _CLUSTER, _DMA) \
- if( req->nr_segments != __count_segments(req, _CLUSTER, _DMA) ) \
+ if( req->nr_segments != __count_segments(req, _CLUSTER, _DMA, NULL) ) \
{ \
__label__ here; \
here: \
printk("Incorrect segment count at 0x%p", &&here); \
- dump_stats(req, _CLUSTER, _DMA, __count_segments(req, _CLUSTER, _DMA)); \
+ dump_stats(req, _CLUSTER, _DMA, __count_segments(req, _CLUSTER, _DMA, NULL)); \
}
#else
#define SANITY_CHECK(req, _CLUSTER, _DMA)
#endif
+static void dma_exhausted(Scsi_Cmnd * SCpnt, int i)
+{
+ int jj;
+ struct scatterlist *sgpnt;
+ int consumed = 0;
+
+ sgpnt = (struct scatterlist *) SCpnt->request_buffer;
+
+ /*
+ * Now print out a bunch of stats. First, start with the request
+ * size.
+ */
+ printk("dma_free_sectors:%d\n", scsi_dma_free_sectors);
+ printk("use_sg:%d\ti:%d\n", SCpnt->use_sg, i);
+ printk("request_bufflen:%d\n", SCpnt->request_bufflen);
+ /*
+ * Now dump the scatter-gather table, up to the point of failure.
+ */
+ for(jj=0; jj < SCpnt->use_sg; jj++)
+ {
+ printk("[%d]\tlen:%d\taddr:%p\talt:%p\n",
+ jj,
+ sgpnt[jj].length,
+ sgpnt[jj].address,
+ sgpnt[jj].alt_address);
+ if( sgpnt[jj].alt_address != NULL )
+ {
+ consumed = (sgpnt[jj].length >> 9);
+ }
+ }
+ printk("Total %d sectors consumed\n", consumed);
+ panic("DMA pool exhausted");
+}
+
/*
* FIXME(eric) - the original disk code disabled clustering for MOD
* devices. I have no idea why we thought this was a good idea - my
@@ -133,6 +174,9 @@ here: \
* dma_host - 1 if this host has ISA DMA issues (bus doesn't
* expose all of the address lines, so that DMA cannot
* be done from an arbitrary address).
+ * remainder - used to track the residual size of the last
+ * segment. Comes in handy when we want to limit the
+ * size of bounce buffer segments to PAGE_SIZE.
*
* Returns: Count of the number of SG segments for the request.
*
@@ -142,12 +186,36 @@ here: \
*/
__inline static int __count_segments(struct request *req,
int use_clustering,
- int dma_host)
+ int dma_host,
+ int * remainder)
{
int ret = 1;
+ int reqsize = 0;
struct buffer_head *bh;
+ struct buffer_head *bhnext;
+
+ if( remainder != NULL ) {
+ reqsize = *remainder;
+ }
+
+ /*
+ * Add in the size increment for the first buffer.
+ */
+ bh = req->bh;
+#ifdef DMA_SEGMENT_SIZE_LIMITED
+ if( reqsize + bh->b_size > PAGE_SIZE ) {
+ ret++;
+ reqsize = bh->b_size;
+ } else {
+ reqsize += bh->b_size;
+ }
+#else
+ reqsize += bh->b_size;
+#endif
- for (bh = req->bh; bh->b_reqnext != NULL; bh = bh->b_reqnext) {
+ for (bh = req->bh, bhnext = bh->b_reqnext;
+ bhnext != NULL;
+ bh = bhnext, bhnext = bh->b_reqnext) {
if (use_clustering) {
/*
* See if we can do this without creating another
@@ -156,23 +224,86 @@ __inline static int __count_segments(struct request *req,
* the DMA threshold boundary.
*/
if (dma_host &&
- virt_to_phys(bh->b_data - 1) == ISA_DMA_THRESHOLD) {
+ virt_to_phys(bhnext->b_data) - 1 == ISA_DMA_THRESHOLD) {
ret++;
- } else if (CONTIGUOUS_BUFFERS(bh, bh->b_reqnext)) {
+ reqsize = bhnext->b_size;
+ } else if (CONTIGUOUS_BUFFERS(bh, bhnext)) {
/*
* This one is OK. Let it go.
+ */
+#ifdef DMA_SEGMENT_SIZE_LIMITED
+ /* Note scsi_malloc is only able to hand out
+ * chunks of memory in sizes of PAGE_SIZE or
+ * less. Thus we need to keep track of
+ * the size of the piece that we have
+ * seen so far, and if we have hit
+ * the limit of PAGE_SIZE, then we are
+ * kind of screwed and we need to start
+ * another segment.
*/
+ if( dma_host
+ && virt_to_phys(bh->b_data) - 1 >= ISA_DMA_THRESHOLD
+ && reqsize + bhnext->b_size > PAGE_SIZE )
+ {
+ ret++;
+ reqsize = bhnext->b_size;
+ continue;
+ }
+#endif
+ reqsize += bhnext->b_size;
continue;
}
ret++;
+ reqsize = bhnext->b_size;
} else {
ret++;
+ reqsize = bhnext->b_size;
}
}
+ if( remainder != NULL ) {
+ *remainder = reqsize;
+ }
return ret;
}
/*
+ * Function: recount_segments()
+ *
+ * Purpose: Recount the number of scatter-gather segments for this request.
+ *
+ * Arguments: req - request that needs recounting.
+ *
+ * Returns: Count of the number of SG segments for the request.
+ *
+ * Lock status: Irrelevant.
+ *
+ * Notes: This is only used when we have partially completed requests
+ * and the bit that is leftover is of an indeterminate size.
+ * This can come up if you get a MEDIUM_ERROR, for example,
+ * as we will have "completed" all of the sectors up to and
+ * including the bad sector, and the leftover bit is what
+ * we have to do now. This tends to be a rare occurence, so
+ * we aren't busting our butts to instantiate separate versions
+ * of this function for the 4 different flag values. We
+ * probably should, however.
+ */
+void
+recount_segments(Scsi_Cmnd * SCpnt)
+{
+ struct request *req;
+ struct Scsi_Host *SHpnt;
+ Scsi_Device * SDpnt;
+
+ req = &SCpnt->request;
+ SHpnt = SCpnt->host;
+ SDpnt = SCpnt->device;
+
+ req->nr_segments = __count_segments(req,
+ CLUSTERABLE_DEVICE(SHpnt, SDpnt),
+ SHpnt->unchecked_isa_dma, NULL);
+}
+
+/*
* Function: __scsi_merge_fn()
*
* Purpose: Prototype for queue merge function.
@@ -212,6 +343,7 @@ __inline static int __scsi_merge_fn(request_queue_t * q,
int dma_host)
{
unsigned int sector, count;
+ unsigned int segment_size = 0;
Scsi_Device *SDpnt;
struct Scsi_Host *SHpnt;
@@ -236,10 +368,21 @@ __inline static int __scsi_merge_fn(request_queue_t * q,
* the DMA threshold boundary.
*/
if (dma_host &&
- virt_to_phys(req->bhtail->b_data - 1) == ISA_DMA_THRESHOLD) {
+ virt_to_phys(req->bhtail->b_data) - 1 == ISA_DMA_THRESHOLD) {
goto new_segment;
}
if (CONTIGUOUS_BUFFERS(req->bhtail, bh)) {
+#ifdef DMA_SEGMENT_SIZE_LIMITED
+ if( dma_host
+ && virt_to_phys(bh->b_data) - 1 >= ISA_DMA_THRESHOLD ) {
+ segment_size = 0;
+ count = __count_segments(req, use_clustering, dma_host, &segment_size);
+ if( segment_size + bh->b_size > PAGE_SIZE )
+ {
+ goto new_segment;
+ }
+ }
+#endif
/*
* This one is OK. Let it go.
*/
@@ -256,10 +399,20 @@ __inline static int __scsi_merge_fn(request_queue_t * q,
* the DMA threshold boundary.
*/
if (dma_host &&
- virt_to_phys(bh->b_data - 1) == ISA_DMA_THRESHOLD) {
+ virt_to_phys(bh->b_data) - 1 == ISA_DMA_THRESHOLD) {
goto new_segment;
}
if (CONTIGUOUS_BUFFERS(bh, req->bh)) {
+#ifdef DMA_SEGMENT_SIZE_LIMITED
+ if( dma_host
+ && virt_to_phys(bh->b_data) - 1 >= ISA_DMA_THRESHOLD ) {
+ segment_size = bh->b_size;
+ count = __count_segments(req, use_clustering, dma_host, &segment_size);
+ if( count != req->nr_segments ) {
+ goto new_segment;
+ }
+ }
+#endif
/*
* This one is OK. Let it go.
*/
@@ -380,9 +533,28 @@ __inline static int __scsi_merge_requests_fn(request_queue_t * q,
* the DMA threshold boundary.
*/
if (dma_host &&
- virt_to_phys(req->bhtail->b_data - 1) == ISA_DMA_THRESHOLD) {
+ virt_to_phys(req->bhtail->b_data) - 1 == ISA_DMA_THRESHOLD) {
goto dont_combine;
}
+#ifdef DMA_SEGMENT_SIZE_LIMITED
+ /*
+ * We currently can only allocate scatter-gather bounce
+ * buffers in chunks of PAGE_SIZE or less.
+ */
+ if (dma_host
+ && CONTIGUOUS_BUFFERS(req->bhtail, next->bh)
+ && virt_to_phys(req->bhtail->b_data) - 1 >= ISA_DMA_THRESHOLD )
+ {
+ int segment_size = 0;
+ int count = 0;
+
+ count = __count_segments(req, use_clustering, dma_host, &segment_size);
+ count += __count_segments(next, use_clustering, dma_host, &segment_size);
+ if( count != req->nr_segments + next->nr_segments ) {
+ goto dont_combine;
+ }
+ }
+#endif
if (CONTIGUOUS_BUFFERS(req->bhtail, next->bh)) {
/*
* This one is OK. Let it go.
@@ -479,14 +651,15 @@ __inline static int __init_io(Scsi_Cmnd * SCpnt,
int use_clustering,
int dma_host)
{
- struct buffer_head *bh;
- struct buffer_head *bhprev;
- char *buff;
- int count;
- int i;
- struct request *req;
- struct scatterlist *sgpnt;
- int this_count;
+ struct buffer_head * bh;
+ struct buffer_head * bhprev;
+ char * buff;
+ int count;
+ int i;
+ struct request * req;
+ int sectors;
+ struct scatterlist * sgpnt;
+ int this_count;
/*
* FIXME(eric) - don't inline this - it doesn't depend on the
@@ -516,7 +689,7 @@ __inline static int __init_io(Scsi_Cmnd * SCpnt,
* First we need to know how many scatter gather segments are needed.
*/
if (!sg_count_valid) {
- count = __count_segments(req, use_clustering, dma_host);
+ count = __count_segments(req, use_clustering, dma_host, NULL);
} else {
count = req->nr_segments;
}
@@ -573,18 +746,34 @@ __inline static int __init_io(Scsi_Cmnd * SCpnt,
bh; bh = bh->b_reqnext) {
if (use_clustering && bhprev != NULL) {
if (dma_host &&
- virt_to_phys(bhprev->b_data - 1) == ISA_DMA_THRESHOLD) {
+ virt_to_phys(bhprev->b_data) - 1 == ISA_DMA_THRESHOLD) {
/* Nothing - fall through */
} else if (CONTIGUOUS_BUFFERS(bhprev, bh)) {
/*
- * This one is OK. Let it go.
+ * This one is OK. Let it go. Note that we
+ * do not have the ability to allocate
+ * bounce buffer segments > PAGE_SIZE, so
+ * for now we limit the thing.
*/
- sgpnt[count - 1].length += bh->b_size;
- if (!dma_host) {
+ if( dma_host ) {
+#ifdef DMA_SEGMENT_SIZE_LIMITED
+ if( virt_to_phys(bh->b_data) - 1 < ISA_DMA_THRESHOLD
+ || sgpnt[count - 1].length + bh->b_size <= PAGE_SIZE ) {
+ sgpnt[count - 1].length += bh->b_size;
+ bhprev = bh;
+ continue;
+ }
+#else
+ sgpnt[count - 1].length += bh->b_size;
+ bhprev = bh;
+ continue;
+#endif
+ } else {
+ sgpnt[count - 1].length += bh->b_size;
SCpnt->request_bufflen += bh->b_size;
+ bhprev = bh;
+ continue;
}
- bhprev = bh;
- continue;
}
}
count++;
@@ -600,7 +789,10 @@ __inline static int __init_io(Scsi_Cmnd * SCpnt,
* Verify that the count is correct.
*/
if (count != SCpnt->use_sg) {
- panic("Incorrect sg segment count");
+ printk("Incorrect number of segments after building list\n");
+#ifdef CONFIG_SCSI_DEBUG_QUEUES
+ dump_stats(req, use_clustering, dma_host, count);
+#endif
}
if (!dma_host) {
return 1;
@@ -610,9 +802,27 @@ __inline static int __init_io(Scsi_Cmnd * SCpnt,
*/
SCpnt->request_bufflen = 0;
for (i = 0; i < count; i++) {
+ sectors = (sgpnt[i].length >> 9);
SCpnt->request_bufflen += sgpnt[i].length;
if (virt_to_phys(sgpnt[i].address) + sgpnt[i].length - 1 >
- ISA_DMA_THRESHOLD && !sgpnt[count].alt_address) {
+ ISA_DMA_THRESHOLD) {
+ if( scsi_dma_free_sectors - sectors <= 10 ) {
+ /*
+ * If this would nearly drain the DMA
+ * pool, mpty, then let's stop here.
+ * Don't make this request any larger.
+ * This is kind of a safety valve that
+ * we use - we could get screwed later
+ * on if we run out completely.
+ */
+ SCpnt->request_bufflen -= sgpnt[i].length;
+ SCpnt->use_sg = i;
+ if (i == 0) {
+ goto big_trouble;
+ }
+ break;
+ }
+
sgpnt[i].alt_address = sgpnt[i].address;
sgpnt[i].address =
(char *) scsi_malloc(sgpnt[i].length);
@@ -625,7 +835,7 @@ __inline static int __init_io(Scsi_Cmnd * SCpnt,
SCpnt->request_bufflen -= sgpnt[i].length;
SCpnt->use_sg = i;
if (i == 0) {
- panic("DMA pool exhausted");
+ goto big_trouble;
}
break;
}
@@ -637,6 +847,64 @@ __inline static int __init_io(Scsi_Cmnd * SCpnt,
}
return 1;
+ big_trouble:
+ /*
+ * We come here in the event that we get one humongous
+ * request, where we need a bounce buffer, and the buffer is
+ * more than we can allocate in a single call to
+ * scsi_malloc(). In addition, we only come here when it is
+ * the 0th element of the scatter-gather table that gets us
+ * into this trouble. As a fallback, we fall back to
+ * non-scatter-gather, and ask for a single segment. We make
+ * a half-hearted attempt to pick a reasonably large request
+ * size mainly so that we don't thrash the thing with
+ * iddy-biddy requests.
+ */
+
+ /*
+ * The original number of sectors in the 0th element of the
+ * scatter-gather table.
+ */
+ sectors = sgpnt[0].length >> 9;
+
+ /*
+ * Free up the original scatter-gather table. Note that since
+ * it was the 0th element that got us here, we don't have to
+ * go in and free up memory from the other slots.
+ */
+ SCpnt->request_bufflen = 0;
+ SCpnt->use_sg = 0;
+ scsi_free(SCpnt->request_buffer, SCpnt->sglist_len);
+
+ /*
+ * Make an attempt to pick up as much as we reasonably can.
+ * Just keep adding sectors until the pool starts running kind of
+ * low. The limit of 30 is somewhat arbitrary - the point is that
+ * it would kind of suck if we dropped down and limited ourselves to
+ * single-block requests if we had hundreds of free sectors.
+ */
+ if( scsi_dma_free_sectors > 30 ) {
+ for (this_count = 0, bh = SCpnt->request.bh;
+ bh; bh = bh->b_reqnext) {
+ if( scsi_dma_free_sectors - this_count < 30
+ || this_count == sectors )
+ {
+ break;
+ }
+ this_count += bh->b_size >> 9;
+ }
+
+ } else {
+ /*
+ * Yow! Take the absolute minimum here.
+ */
+ this_count = SCpnt->request.current_nr_sectors;
+ }
+
+ /*
+ * Now drop through into the single-segment case.
+ */
+
single_segment:
/*
* Come here if for any reason we choose to do this as a single
@@ -660,7 +928,7 @@ __inline static int __init_io(Scsi_Cmnd * SCpnt,
this_count = SCpnt->request.current_nr_sectors;
buff = (char *) scsi_malloc(this_count << 9);
if (!buff) {
- panic("Unable to allocate DMA buffer\n");
+ dma_exhausted(SCpnt, 0);
}
}
if (SCpnt->request.cmd == WRITE)
@@ -685,24 +953,11 @@ static int _FUNCTION(Scsi_Cmnd * SCpnt) \
* We always force "_VALID" to 1. Eventually clean this up
* and get rid of the extra argument.
*/
-#if 0
-/* Old definitions */
-INITIO(scsi_init_io_, 0, 0, 0)
-INITIO(scsi_init_io_d, 0, 0, 1)
-INITIO(scsi_init_io_c, 0, 1, 0)
-INITIO(scsi_init_io_dc, 0, 1, 1)
-
-/* Newer redundant definitions. */
-INITIO(scsi_init_io_, 1, 0, 0)
-INITIO(scsi_init_io_d, 1, 0, 1)
-INITIO(scsi_init_io_c, 1, 1, 0)
-INITIO(scsi_init_io_dc, 1, 1, 1)
-#endif
-
INITIO(scsi_init_io_v, 1, 0, 0)
INITIO(scsi_init_io_vd, 1, 0, 1)
INITIO(scsi_init_io_vc, 1, 1, 0)
INITIO(scsi_init_io_vdc, 1, 1, 1)
+
/*
* Function: initialize_merge_fn()
*
@@ -735,21 +990,12 @@ void initialize_merge_fn(Scsi_Device * SDpnt)
* If this host has an unlimited tablesize, then don't bother with a
* merge manager. The whole point of the operation is to make sure
* that requests don't grow too large, and this host isn't picky.
- */
- if (SHpnt->sg_tablesize == SG_ALL) {
- if (!CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma == 0) {
- SDpnt->scsi_init_io_fn = scsi_init_io_v;
- } else if (!CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma != 0) {
- SDpnt->scsi_init_io_fn = scsi_init_io_vd;
- } else if (CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma == 0) {
- SDpnt->scsi_init_io_fn = scsi_init_io_vc;
- } else if (CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma != 0) {
- SDpnt->scsi_init_io_fn = scsi_init_io_vdc;
- }
- return;
- }
- /*
- * Now pick out the correct function.
+ *
+ * Note that ll_rw_blk.c is effectively maintaining a segment
+ * count which is only valid if clustering is used, and it obviously
+ * doesn't handle the DMA case. In the end, it
+ * is simply easier to do it ourselves with our own functions
+ * rather than rely upon the default behavior of ll_rw_blk.
*/
if (!CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma == 0) {
q->merge_fn = scsi_merge_fn_;