1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
|
/*
* scsi_dma.c Copyright (C) 2000 Eric Youngdale
*
* mid-level SCSI DMA bounce buffer allocator
*
*/
#define __NO_VERSION__
#include <linux/config.h>
#include <linux/module.h>
#include <linux/blk.h>
#include "scsi.h"
#include "hosts.h"
#include "constants.h"
#ifdef CONFIG_KMOD
#include <linux/kmod.h>
#endif
/*
* PAGE_SIZE must be a multiple of the sector size (512). True
* for all reasonably recent architectures (even the VAX...).
*/
#define SECTOR_SIZE 512
#define SECTORS_PER_PAGE (PAGE_SIZE/SECTOR_SIZE)
#if SECTORS_PER_PAGE <= 8
typedef unsigned char FreeSectorBitmap;
#elif SECTORS_PER_PAGE <= 32
typedef unsigned int FreeSectorBitmap;
#else
#error You lose.
#endif
/*
* Used for access to internal allocator used for DMA safe buffers.
*/
static spinlock_t allocator_request_lock = SPIN_LOCK_UNLOCKED;
static FreeSectorBitmap *dma_malloc_freelist = NULL;
static int need_isa_bounce_buffers;
static unsigned int dma_sectors = 0;
unsigned int scsi_dma_free_sectors = 0;
unsigned int scsi_need_isa_buffer = 0;
static unsigned char **dma_malloc_pages = NULL;
/*
* Function: scsi_malloc
*
* Purpose: Allocate memory from the DMA-safe pool.
*
* Arguments: len - amount of memory we need.
*
* Lock status: No locks assumed to be held. This function is SMP-safe.
*
* Returns: Pointer to memory block.
*
* Notes: Prior to the new queue code, this function was not SMP-safe.
* This function can only allocate in units of sectors
* (i.e. 512 bytes).
*
* We cannot use the normal system allocator becuase we need
* to be able to guarantee that we can process a complete disk
* I/O request without touching the system allocator. Think
* about it - if the system were heavily swapping, and tried to
* write out a block of memory to disk, and the SCSI code needed
* to allocate more memory in order to be able to write the
* data to disk, you would wedge the system.
*/
void *scsi_malloc(unsigned int len)
{
unsigned int nbits, mask;
unsigned long flags;
int i, j;
if (len % SECTOR_SIZE != 0 || len > PAGE_SIZE)
return NULL;
nbits = len >> 9;
mask = (1 << nbits) - 1;
spin_lock_irqsave(&allocator_request_lock, flags);
for (i = 0; i < dma_sectors / SECTORS_PER_PAGE; i++)
for (j = 0; j <= SECTORS_PER_PAGE - nbits; j++) {
if ((dma_malloc_freelist[i] & (mask << j)) == 0) {
dma_malloc_freelist[i] |= (mask << j);
scsi_dma_free_sectors -= nbits;
#ifdef DEBUG
SCSI_LOG_MLQUEUE(3, printk("SMalloc: %d %p [From:%p]\n", len, dma_malloc_pages[i] + (j << 9)));
printk("SMalloc: %d %p [From:%p]\n", len, dma_malloc_pages[i] + (j << 9));
#endif
spin_unlock_irqrestore(&allocator_request_lock, flags);
return (void *) ((unsigned long) dma_malloc_pages[i] + (j << 9));
}
}
spin_unlock_irqrestore(&allocator_request_lock, flags);
return NULL; /* Nope. No more */
}
/*
* Function: scsi_free
*
* Purpose: Free memory into the DMA-safe pool.
*
* Arguments: ptr - data block we are freeing.
* len - size of block we are freeing.
*
* Lock status: No locks assumed to be held. This function is SMP-safe.
*
* Returns: Nothing
*
* Notes: This function *must* only be used to free memory
* allocated from scsi_malloc().
*
* Prior to the new queue code, this function was not SMP-safe.
* This function can only allocate in units of sectors
* (i.e. 512 bytes).
*/
int scsi_free(void *obj, unsigned int len)
{
unsigned int page, sector, nbits, mask;
unsigned long flags;
#ifdef DEBUG
unsigned long ret = 0;
#ifdef __mips__
__asm__ __volatile__("move\t%0,$31":"=r"(ret));
#else
ret = __builtin_return_address(0);
#endif
printk("scsi_free %p %d\n", obj, len);
SCSI_LOG_MLQUEUE(3, printk("SFree: %p %d\n", obj, len));
#endif
spin_lock_irqsave(&allocator_request_lock, flags);
for (page = 0; page < dma_sectors / SECTORS_PER_PAGE; page++) {
unsigned long page_addr = (unsigned long) dma_malloc_pages[page];
if ((unsigned long) obj >= page_addr &&
(unsigned long) obj < page_addr + PAGE_SIZE) {
sector = (((unsigned long) obj) - page_addr) >> 9;
nbits = len >> 9;
mask = (1 << nbits) - 1;
if ((mask << sector) >= (1 << SECTORS_PER_PAGE))
panic("scsi_free:Bad memory alignment");
if ((dma_malloc_freelist[page] &
(mask << sector)) != (mask << sector)) {
#ifdef DEBUG
printk("scsi_free(obj=%p, len=%d) called from %08lx\n",
obj, len, ret);
#endif
panic("scsi_free:Trying to free unused memory");
}
scsi_dma_free_sectors += nbits;
dma_malloc_freelist[page] &= ~(mask << sector);
spin_unlock_irqrestore(&allocator_request_lock, flags);
return 0;
}
}
panic("scsi_free:Bad offset");
}
/*
* Function: scsi_resize_dma_pool
*
* Purpose: Ensure that the DMA pool is sufficiently large to be
* able to guarantee that we can always process I/O requests
* without calling the system allocator.
*
* Arguments: None.
*
* Lock status: No locks assumed to be held. This function is SMP-safe.
*
* Returns: Nothing
*
* Notes: Prior to the new queue code, this function was not SMP-safe.
* Go through the device list and recompute the most appropriate
* size for the dma pool. Then grab more memory (as required).
*/
void scsi_resize_dma_pool(void)
{
int i, k;
unsigned long size;
unsigned long flags;
struct Scsi_Host *shpnt;
struct Scsi_Host *host = NULL;
Scsi_Device *SDpnt;
FreeSectorBitmap *new_dma_malloc_freelist = NULL;
unsigned int new_dma_sectors = 0;
unsigned int new_need_isa_buffer = 0;
unsigned char **new_dma_malloc_pages = NULL;
int out_of_space = 0;
spin_lock_irqsave(&allocator_request_lock, flags);
if (!scsi_hostlist) {
/*
* Free up the DMA pool.
*/
if (scsi_dma_free_sectors != dma_sectors)
panic("SCSI DMA pool memory leak %d %d\n", scsi_dma_free_sectors, dma_sectors);
for (i = 0; i < dma_sectors / SECTORS_PER_PAGE; i++)
free_pages((unsigned long) dma_malloc_pages[i], 0);
if (dma_malloc_pages)
kfree((char *) dma_malloc_pages);
dma_malloc_pages = NULL;
if (dma_malloc_freelist)
kfree((char *) dma_malloc_freelist);
dma_malloc_freelist = NULL;
dma_sectors = 0;
scsi_dma_free_sectors = 0;
spin_unlock_irqrestore(&allocator_request_lock, flags);
return;
}
/* Next, check to see if we need to extend the DMA buffer pool */
new_dma_sectors = 2 * SECTORS_PER_PAGE; /* Base value we use */
if (__pa(high_memory) - 1 > ISA_DMA_THRESHOLD)
need_isa_bounce_buffers = 1;
else
need_isa_bounce_buffers = 0;
if (scsi_devicelist)
for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next)
new_dma_sectors += SECTORS_PER_PAGE; /* Increment for each host */
for (host = scsi_hostlist; host; host = host->next) {
for (SDpnt = host->host_queue; SDpnt; SDpnt = SDpnt->next) {
/*
* sd and sr drivers allocate scatterlists.
* sr drivers may allocate for each command 1x2048 or 2x1024 extra
* buffers for 2k sector size and 1k fs.
* sg driver allocates buffers < 4k.
* st driver does not need buffers from the dma pool.
* estimate 4k buffer/command for devices of unknown type (should panic).
*/
if (SDpnt->type == TYPE_WORM || SDpnt->type == TYPE_ROM ||
SDpnt->type == TYPE_DISK || SDpnt->type == TYPE_MOD) {
int nents = host->sg_tablesize;
#ifdef DMA_CHUNK_SIZE
/* If the architecture does DMA sg merging, make sure
we count with at least 64 entries even for HBAs
which handle very few sg entries. */
if (nents < 64) nents = 64;
#endif
new_dma_sectors += ((nents *
sizeof(struct scatterlist) + 511) >> 9) *
SDpnt->queue_depth;
if (SDpnt->type == TYPE_WORM || SDpnt->type == TYPE_ROM)
new_dma_sectors += (2048 >> 9) * SDpnt->queue_depth;
} else if (SDpnt->type == TYPE_SCANNER ||
SDpnt->type == TYPE_PROCESSOR ||
SDpnt->type == TYPE_MEDIUM_CHANGER ||
SDpnt->type == TYPE_ENCLOSURE) {
new_dma_sectors += (4096 >> 9) * SDpnt->queue_depth;
} else {
if (SDpnt->type != TYPE_TAPE) {
printk("resize_dma_pool: unknown device type %d\n", SDpnt->type);
new_dma_sectors += (4096 >> 9) * SDpnt->queue_depth;
}
}
if (host->unchecked_isa_dma &&
need_isa_bounce_buffers &&
SDpnt->type != TYPE_TAPE) {
new_dma_sectors += (PAGE_SIZE >> 9) * host->sg_tablesize *
SDpnt->queue_depth;
new_need_isa_buffer++;
}
}
}
#ifdef DEBUG_INIT
printk("resize_dma_pool: needed dma sectors = %d\n", new_dma_sectors);
#endif
/* limit DMA memory to 32MB: */
new_dma_sectors = (new_dma_sectors + 15) & 0xfff0;
/*
* We never shrink the buffers - this leads to
* race conditions that I would rather not even think
* about right now.
*/
#if 0 /* Why do this? No gain and risks out_of_space */
if (new_dma_sectors < dma_sectors)
new_dma_sectors = dma_sectors;
#endif
if (new_dma_sectors <= dma_sectors) {
spin_unlock_irqrestore(&allocator_request_lock, flags);
return; /* best to quit while we are in front */
}
for (k = 0; k < 20; ++k) { /* just in case */
out_of_space = 0;
size = (new_dma_sectors / SECTORS_PER_PAGE) *
sizeof(FreeSectorBitmap);
new_dma_malloc_freelist = (FreeSectorBitmap *)
kmalloc(size, GFP_ATOMIC);
if (new_dma_malloc_freelist) {
memset(new_dma_malloc_freelist, 0, size);
size = (new_dma_sectors / SECTORS_PER_PAGE) *
sizeof(*new_dma_malloc_pages);
new_dma_malloc_pages = (unsigned char **)
kmalloc(size, GFP_ATOMIC);
if (!new_dma_malloc_pages) {
size = (new_dma_sectors / SECTORS_PER_PAGE) *
sizeof(FreeSectorBitmap);
kfree((char *) new_dma_malloc_freelist);
out_of_space = 1;
} else {
memset(new_dma_malloc_pages, 0, size);
}
} else
out_of_space = 1;
if ((!out_of_space) && (new_dma_sectors > dma_sectors)) {
for (i = dma_sectors / SECTORS_PER_PAGE;
i < new_dma_sectors / SECTORS_PER_PAGE; i++) {
new_dma_malloc_pages[i] = (unsigned char *)
__get_free_pages(GFP_ATOMIC | GFP_DMA, 0);
if (!new_dma_malloc_pages[i])
break;
}
if (i != new_dma_sectors / SECTORS_PER_PAGE) { /* clean up */
int k = i;
out_of_space = 1;
for (i = 0; i < k; ++i)
free_pages((unsigned long) new_dma_malloc_pages[i], 0);
}
}
if (out_of_space) { /* try scaling down new_dma_sectors request */
printk("scsi::resize_dma_pool: WARNING, dma_sectors=%u, "
"wanted=%u, scaling\n", dma_sectors, new_dma_sectors);
if (new_dma_sectors < (8 * SECTORS_PER_PAGE))
break; /* pretty well hopeless ... */
new_dma_sectors = (new_dma_sectors * 3) / 4;
new_dma_sectors = (new_dma_sectors + 15) & 0xfff0;
if (new_dma_sectors <= dma_sectors)
break; /* stick with what we have got */
} else
break; /* found space ... */
} /* end of for loop */
if (out_of_space) {
spin_unlock_irqrestore(&allocator_request_lock, flags);
scsi_need_isa_buffer = new_need_isa_buffer; /* some useful info */
printk(" WARNING, not enough memory, pool not expanded\n");
return;
}
/* When we dick with the actual DMA list, we need to
* protect things
*/
if (dma_malloc_freelist) {
size = (dma_sectors / SECTORS_PER_PAGE) * sizeof(FreeSectorBitmap);
memcpy(new_dma_malloc_freelist, dma_malloc_freelist, size);
kfree((char *) dma_malloc_freelist);
}
dma_malloc_freelist = new_dma_malloc_freelist;
if (dma_malloc_pages) {
size = (dma_sectors / SECTORS_PER_PAGE) * sizeof(*dma_malloc_pages);
memcpy(new_dma_malloc_pages, dma_malloc_pages, size);
kfree((char *) dma_malloc_pages);
}
scsi_dma_free_sectors += new_dma_sectors - dma_sectors;
dma_malloc_pages = new_dma_malloc_pages;
dma_sectors = new_dma_sectors;
scsi_need_isa_buffer = new_need_isa_buffer;
spin_unlock_irqrestore(&allocator_request_lock, flags);
#ifdef DEBUG_INIT
printk("resize_dma_pool: dma free sectors = %d\n", scsi_dma_free_sectors);
printk("resize_dma_pool: dma sectors = %d\n", dma_sectors);
printk("resize_dma_pool: need isa buffers = %d\n", scsi_need_isa_buffer);
#endif
}
/*
* Function: scsi_init_minimal_dma_pool
*
* Purpose: Allocate a minimal (1-page) DMA pool.
*
* Arguments: None.
*
* Lock status: No locks assumed to be held. This function is SMP-safe.
*
* Returns: Nothing
*
* Notes:
*/
int scsi_init_minimal_dma_pool(void)
{
unsigned long size;
unsigned long flags;
int has_space = 0;
spin_lock_irqsave(&allocator_request_lock, flags);
dma_sectors = PAGE_SIZE / SECTOR_SIZE;
scsi_dma_free_sectors = dma_sectors;
/*
* Set up a minimal DMA buffer list - this will be used during scan_scsis
* in some cases.
*/
/* One bit per sector to indicate free/busy */
size = (dma_sectors / SECTORS_PER_PAGE) * sizeof(FreeSectorBitmap);
dma_malloc_freelist = (FreeSectorBitmap *)
kmalloc(size, GFP_ATOMIC);
if (dma_malloc_freelist) {
memset(dma_malloc_freelist, 0, size);
/* One pointer per page for the page list */
dma_malloc_pages = (unsigned char **) kmalloc(
(dma_sectors / SECTORS_PER_PAGE) * sizeof(*dma_malloc_pages),
GFP_ATOMIC);
if (dma_malloc_pages) {
memset(dma_malloc_pages, 0, size);
dma_malloc_pages[0] = (unsigned char *)
__get_free_pages(GFP_ATOMIC | GFP_DMA, 0);
if (dma_malloc_pages[0])
has_space = 1;
}
}
if (!has_space) {
if (dma_malloc_freelist) {
kfree((char *) dma_malloc_freelist);
if (dma_malloc_pages)
kfree((char *) dma_malloc_pages);
}
spin_unlock_irqrestore(&allocator_request_lock, flags);
printk("scsi::init_module: failed, out of memory\n");
return 1;
}
spin_unlock_irqrestore(&allocator_request_lock, flags);
return 0;
}
|