summaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-02-18 00:24:27 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-02-18 00:24:27 +0000
commitb9558d5f86c471a125abf1fb3a3882fb053b1f8c (patch)
tree707b53ec64e740a7da87d5f36485e3cd9b1c794e /drivers/block
parentb3ac367c7a3e6047abe74817db27e34e759f279f (diff)
Merge with Linux 2.3.41.
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/Config.in21
-rw-r--r--drivers/block/DAC960.c51
-rw-r--r--drivers/block/alim15x3.c4
-rw-r--r--drivers/block/amiflop.c3
-rw-r--r--drivers/block/buddha.c36
-rw-r--r--drivers/block/cmd64x.c1
-rw-r--r--drivers/block/hpt34x.c3
-rw-r--r--drivers/block/hpt366.c28
-rw-r--r--drivers/block/icside.c8
-rw-r--r--drivers/block/ide-disk.c2
-rw-r--r--drivers/block/ide-dma.c199
-rw-r--r--drivers/block/ide-pci.c89
-rw-r--r--drivers/block/ide-pmac.c6
-rw-r--r--drivers/block/ide-probe.c1
-rw-r--r--drivers/block/ide-tape.c33
-rw-r--r--drivers/block/ide.c17
-rw-r--r--drivers/block/ll_rw_blk.c312
-rw-r--r--drivers/block/ns87415.c1
-rw-r--r--drivers/block/paride/Config.in11
-rw-r--r--drivers/block/piix.c104
-rw-r--r--drivers/block/trm290.c3
-rw-r--r--drivers/block/z2ram.c2
22 files changed, 462 insertions, 473 deletions
diff --git a/drivers/block/Config.in b/drivers/block/Config.in
index e27fb0109..5634cc488 100644
--- a/drivers/block/Config.in
+++ b/drivers/block/Config.in
@@ -59,18 +59,20 @@ else
if [ "$CONFIG_IDEDMA_PCI_EXPERIMENTAL" = "y" -a "$CONFIG_BLK_DEV_AEC6210" = "y" ]; then
bool ' AEC6210 Tuning support (EXPERIMENTAL)' CONFIG_BLK_DEV_AEC6210_TUNING
fi
+ if [ "$CONFIG_BLK_DEV_IDEDMA_PCI" = "y" ]; then
+ bool ' ALI M15x3 chipset support' CONFIG_BLK_DEV_ALI15X3
+ if [ "$CONFIG_IDEDMA_PCI_EXPERIMENTAL" = "y" ]; then
+ bool ' AMD Viper support (EXPERIMENTAL)' CONFIG_BLK_DEV_AMD7409
+ fi
+ fi
bool ' CMD64X chipset support' CONFIG_BLK_DEV_CMD64X
- if [ "$CONFIG_BLK_DEV_CMD64X" = "y" -a "$CONFIG_IDEDMA_PCI_EXPERIMENTAL" = "y" ]; then
+ if [ "$CONFIG_BLK_DEV_CMD64X" = "y" -a "$CONFIG_IDEDMA_PCI_EXPERIMENTAL" = "y" ]; then
bool ' CMD64X chipset RAID support (EXPERIMENTAL) (WIP)' CONFIG_BLK_DEV_CMD64X_RAID
fi
if [ "$CONFIG_IDEDMA_PCI_EXPERIMENTAL" = "y" ]; then
bool ' CY82C693 chipset support (EXPERIMENTAL)' CONFIG_BLK_DEV_CY82C693
fi
if [ "$CONFIG_BLK_DEV_IDEDMA_PCI" = "y" ]; then
- bool ' ALI M15x3 chipset support' CONFIG_BLK_DEV_ALI15X3
- if [ "$CONFIG_IDEDMA_PCI_EXPERIMENTAL" = "y" ]; then
- bool ' AMD Viper support (EXPERIMENTAL)' CONFIG_BLK_DEV_AMD7409
- fi
bool ' HPT34X chipset support' CONFIG_BLK_DEV_HPT34X
if [ "$CONFIG_IDEDMA_PCI_EXPERIMENTAL" = "y" -a "$CONFIG_BLK_DEV_HPT34X" = "y" ]; then
bool ' HPT34X DMA support (EXPERIMENTAL)' CONFIG_BLK_DEV_HPT34X_DMA
@@ -218,15 +220,6 @@ if [ "$CONFIG_PCI" = "y" ]; then
tristate 'Mylex DAC960/DAC1100 PCI RAID Controller support' CONFIG_BLK_DEV_DAC960
fi
-# PARIDE doesn't need PARPORT, but if PARPORT is configured as a module,
-# PARIDE must also be a module. The bogus CONFIG_PARIDE_PARPORT option
-# controls the choices given to the user ...
-
-if [ "$CONFIG_PARPORT" = "y" -o "$CONFIG_PARPORT" = "n" ]; then
- define_tristate CONFIG_PARIDE_PARPORT y
-else
- define_tristate CONFIG_PARIDE_PARPORT m
-fi
dep_tristate 'Parallel port IDE device support' CONFIG_PARIDE $CONFIG_PARIDE_PARPORT
if [ "$CONFIG_PARIDE" = "y" -o "$CONFIG_PARIDE" = "m" ]; then
source drivers/block/paride/Config.in
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index b21c6c1a9..45e86000a 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -1009,6 +1009,45 @@ static boolean DAC960_ReportDeviceConfiguration(DAC960_Controller_T *Controller)
}
+static int DAC_merge_fn(request_queue_t *q, struct request *req,
+ struct buffer_head *bh)
+{
+ int max_segments;
+ DAC960_Controller_T * Controller = q->queuedata;
+
+ max_segments = Controller->MaxSegmentsPerRequest[MINOR(req->rq_dev)];
+
+ if (req->bhtail->b_data + req->bhtail->b_size != bh->b_data) {
+ if (req->nr_segments < max_segments) {
+ req->nr_segments++;
+ return 1;
+ }
+ return 0;
+ }
+
+ return 1;
+}
+
+static int DAC_merge_requests_fn(request_queue_t *q,
+ struct request *req,
+ struct request *next)
+{
+ int max_segments;
+ DAC960_Controller_T * Controller = q->queuedata;
+ int total_segments = req->nr_segments + next->nr_segments;
+
+ max_segments = Controller->MaxSegmentsPerRequest[MINOR(req->rq_dev)];
+
+ if (req->bhtail->b_data + req->bhtail->b_size == next->bh->b_data)
+ total_segments--;
+
+ if (total_segments > max_segments)
+ return 0;
+
+ req->nr_segments = total_segments;
+ return 1;
+}
+
/*
DAC960_RegisterBlockDevice registers the Block Device structures
associated with Controller.
@@ -1016,6 +1055,8 @@ static boolean DAC960_ReportDeviceConfiguration(DAC960_Controller_T *Controller)
static boolean DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
{
+ request_queue_t * q;
+
static void (*RequestFunctions[DAC960_MaxControllers])(request_queue_t *) =
{ DAC960_RequestFunction0, DAC960_RequestFunction1,
DAC960_RequestFunction2, DAC960_RequestFunction3,
@@ -1036,8 +1077,13 @@ static boolean DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
/*
Initialize the I/O Request Function.
*/
- blk_init_queue(BLK_DEFAULT_QUEUE(MajorNumber),
- RequestFunctions[Controller->ControllerNumber]);
+ q = BLK_DEFAULT_QUEUE(MajorNumber);
+ blk_init_queue(q, RequestFunctions[Controller->ControllerNumber]);
+ blk_queue_headactive(q, 0);
+ q->merge_fn = DAC_merge_fn;
+ q->merge_requests_fn = DAC_merge_requests_fn;
+ q->queuedata = (void *) Controller;
+
/*
Initialize the Disk Partitions array, Partition Sizes array, Block Sizes
array, Max Sectors per Request array, and Max Segments per Request array.
@@ -1054,7 +1100,6 @@ static boolean DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
Controller->GenericDiskInfo.sizes = Controller->PartitionSizes;
blksize_size[MajorNumber] = Controller->BlockSizes;
max_sectors[MajorNumber] = Controller->MaxSectorsPerRequest;
- max_segments[MajorNumber] = Controller->MaxSegmentsPerRequest;
/*
Initialize Read Ahead to 128 sectors.
*/
diff --git a/drivers/block/alim15x3.c b/drivers/block/alim15x3.c
index 7cecae978..4b9b28e3b 100644
--- a/drivers/block/alim15x3.c
+++ b/drivers/block/alim15x3.c
@@ -411,7 +411,11 @@ static byte ali15x3_can_ultra (ide_drive_t *drive)
{
struct hd_driveid *id = drive->id;
+#if 0
+ if (m5229_revision < 0x20) {
+#else
if (m5229_revision <= 0x20) {
+#endif
return 0;
} else if ((m5229_revision < 0xC2) &&
((drive->media!=ide_disk) ||
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index d9655d275..e6bf5fa0c 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1795,7 +1795,8 @@ int __init amiga_floppy_init(void)
return -EBUSY;
}
- if ((raw_buf = (char *)amiga_chip_alloc (RAW_BUF_SIZE)) == NULL) {
+ if ((raw_buf = (char *)amiga_chip_alloc (RAW_BUF_SIZE, "Floppy")) ==
+ NULL) {
printk("fd: cannot get chip mem buffer\n");
unregister_blkdev(MAJOR_NR,"fd");
return -ENOMEM;
diff --git a/drivers/block/buddha.c b/drivers/block/buddha.c
index 8086dac56..e5862cd31 100644
--- a/drivers/block/buddha.c
+++ b/drivers/block/buddha.c
@@ -111,24 +111,28 @@ static int buddha_ack_intr(ide_hwif_t *hwif)
static int find_buddha(void)
{
- u_int key;
- const struct ConfigDev *cd;
+ struct zorro_dev *z = NULL;
buddha_num_hwifs = 0;
- if ((key = zorro_find(ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA, 0, 0)))
- buddha_num_hwifs = BUDDHA_NUM_HWIFS;
- else if ((key = zorro_find(ZORRO_PROD_INDIVIDUAL_COMPUTERS_CATWEASEL, 0,
- 0)))
- buddha_num_hwifs = CATWEASEL_NUM_HWIFS;
- if (key) {
- cd = zorro_get_board(key);
- buddha_board = (u_long)cd->cd_BoardAddr;
- if (buddha_board) {
- buddha_board = ZTWO_VADDR(buddha_board);
- /* write to BUDDHA_IRQ_MR to enable the board IRQ */
- *(char *)(buddha_board+BUDDHA_IRQ_MR) = 0;
- zorro_config_board(key, 0);
- }
+ while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
+ unsigned long board;
+ const char *name;
+ if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA) {
+ buddha_num_hwifs = BUDDHA_NUM_HWIFS;
+ name = "Buddha IDE Interface";
+ } else if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_CATWEASEL) {
+ buddha_num_hwifs = CATWEASEL_NUM_HWIFS;
+ name = "Catweasel IDE Interface and Floppy Controller";
+ } else
+ continue;
+ board = z->resource.start;
+ if (!request_mem_region(board+BUDDHA_BASE1, 0x800, "IDE"))
+ continue;
+ strcpy(z->name, name);
+ buddha_board = ZTWO_VADDR(board);
+ /* write to BUDDHA_IRQ_MR to enable the board IRQ */
+ *(char *)(buddha_board+BUDDHA_IRQ_MR) = 0;
+ break;
}
return buddha_num_hwifs;
}
diff --git a/drivers/block/cmd64x.c b/drivers/block/cmd64x.c
index 6eaa0f3bb..6346f216e 100644
--- a/drivers/block/cmd64x.c
+++ b/drivers/block/cmd64x.c
@@ -228,6 +228,7 @@ static int cmd646_1_dmaproc (ide_dma_action_t func, ide_drive_t *drive)
dma_stat = inb(dma_base+2); /* get DMA status */
outb(inb(dma_base)&~1, dma_base); /* stop DMA */
outb(dma_stat|6, dma_base+2); /* clear the INTR & ERROR bits */
+ ide_destroy_dmatable(drive); /* and free any DMA resources */
return (dma_stat & 7) != 4; /* verify good DMA status */
}
diff --git a/drivers/block/hpt34x.c b/drivers/block/hpt34x.c
index f01d93152..10fe3ebc1 100644
--- a/drivers/block/hpt34x.c
+++ b/drivers/block/hpt34x.c
@@ -292,7 +292,7 @@ int hpt34x_dmaproc (ide_dma_action_t func, ide_drive_t *drive)
case ide_dma_write:
if (!(count = ide_build_dmatable(drive, func)))
return 1; /* try PIO instead of DMA */
- outl(virt_to_bus(hwif->dmatable), dma_base + 4); /* PRD table */
+ outl(hwif->dmatable_dma, dma_base + 4); /* PRD table */
reading |= 0x01;
outb(reading, dma_base); /* specify r/w */
outb(inb(dma_base+2)|6, dma_base+2); /* clear INTR & ERROR flags */
@@ -307,6 +307,7 @@ int hpt34x_dmaproc (ide_dma_action_t func, ide_drive_t *drive)
outb(inb(dma_base)&~1, dma_base); /* stop DMA */
dma_stat = inb(dma_base+2); /* get DMA status */
outb(dma_stat|6, dma_base+2); /* clear the INTR & ERROR bits */
+ ide_destroy_dmatable(drive); /* purge DMA mappings */
return (dma_stat & 7) != 4; /* verify good DMA status */
default:
break;
diff --git a/drivers/block/hpt366.c b/drivers/block/hpt366.c
index 1b497fecc..65c695183 100644
--- a/drivers/block/hpt366.c
+++ b/drivers/block/hpt366.c
@@ -30,7 +30,6 @@
#include "ide_modes.h"
const char *bad_ata66_4[] = {
- "QUANTUM FIREBALLP KA9.1",
"WDC AC310200R",
NULL
};
@@ -423,30 +422,17 @@ no_dma_set:
int hpt366_dmaproc (ide_dma_action_t func, ide_drive_t *drive)
{
+ byte reg50h = 0;
+
switch (func) {
case ide_dma_check:
return config_drive_xfer_rate(drive);
+ case ide_dma_lostirq:
+ pci_read_config_byte(HWIF(drive)->pci_dev, 0x50, &reg50h);
+ pci_write_config_byte(HWIF(drive)->pci_dev, 0x50, reg50h|0x03);
+ pci_read_config_byte(HWIF(drive)->pci_dev, 0x50, &reg50h);
+ /* ide_set_handler(drive, &ide_dma_intr, WAIT_CMD, NULL); */
case ide_dma_timeout:
- /* ide_do_reset(drive); */
-
- if (0) {
- byte reg50h = 0, reg52h = 0;
- (void) ide_dmaproc(ide_dma_off_quietly, drive);
- pci_read_config_byte(HWIF(drive)->pci_dev, 0x50, &reg50h);
- pci_read_config_byte(HWIF(drive)->pci_dev, 0x52, &reg52h);
- printk("%s: (ide_dma_timeout) reg52h=0x%02x\n", drive->name, reg52h);
- if (reg52h & 0x04) {
- pci_read_config_byte(HWIF(drive)->pci_dev, 0x50, &reg50h);
- pci_write_config_byte(HWIF(drive)->pci_dev, 0x50, reg50h|0xff);
- pci_write_config_byte(HWIF(drive)->pci_dev, 0x50, reg50h);
- }
- pci_read_config_byte(HWIF(drive)->pci_dev, 0x50, &reg50h);
- pci_read_config_byte(HWIF(drive)->pci_dev, 0x52, &reg52h);
- printk("%s: (ide_dma_timeout) reg50h=0x%02x reg52h=0x%02x :: again\n", drive->name, reg50h, reg52h);
- (void) ide_dmaproc(ide_dma_on, drive);
- if (reg52h & 0x04)
- (void) ide_dmaproc(ide_dma_off, drive);
- }
break;
default:
break;
diff --git a/drivers/block/icside.c b/drivers/block/icside.c
index d86a990f7..166d29abf 100644
--- a/drivers/block/icside.c
+++ b/drivers/block/icside.c
@@ -226,7 +226,7 @@ icside_build_dmatable(ide_drive_t *drive, int reading)
unsigned long addr, size;
unsigned char *virt_addr;
unsigned int count = 0;
- dmasg_t *sg = (dmasg_t *)HWIF(drive)->dmatable;
+ dmasg_t *sg = (dmasg_t *)HWIF(drive)->dmatable_cpu;
do {
if (bh == NULL) {
@@ -393,7 +393,7 @@ icside_dmaproc(ide_dma_action_t func, ide_drive_t *drive)
*/
set_dma_speed(hwif->hw.dma, drive->drive_data);
- set_dma_sg(hwif->hw.dma, (dmasg_t *)hwif->dmatable, count);
+ set_dma_sg(hwif->hw.dma, (dmasg_t *)hwif->dmatable_cpu, count);
set_dma_mode(hwif->hw.dma, reading ? DMA_MODE_READ
: DMA_MODE_WRITE);
@@ -458,7 +458,7 @@ icside_setup_dma(ide_hwif_t *hwif, int autodma)
if (!table)
printk(" -- ERROR, unable to allocate DMA table\n");
else {
- hwif->dmatable = (void *)table;
+ hwif->dmatable_cpu = (void *)table;
hwif->dmaproc = icside_dmaproc;
hwif->autodma = autodma;
@@ -466,7 +466,7 @@ icside_setup_dma(ide_hwif_t *hwif, int autodma)
", auto-enable" : "");
}
- return hwif->dmatable != NULL;
+ return hwif->dmatable_cpu != NULL;
}
#endif
diff --git a/drivers/block/ide-disk.c b/drivers/block/ide-disk.c
index 14952a049..1209aa82a 100644
--- a/drivers/block/ide-disk.c
+++ b/drivers/block/ide-disk.c
@@ -572,7 +572,7 @@ static void idedisk_pre_reset (ide_drive_t *drive)
drive->special.b.recalibrate = 1;
if (OK_TO_RESET_CONTROLLER)
drive->mult_count = 0;
- if (!drive->keep_settings)
+ if (!drive->keep_settings && !drive->using_dma)
drive->mult_req = 0;
if (drive->mult_req != drive->mult_count)
drive->special.b.set_multmode = 1;
diff --git a/drivers/block/ide-dma.c b/drivers/block/ide-dma.c
index f265180ab..1e450b7e6 100644
--- a/drivers/block/ide-dma.c
+++ b/drivers/block/ide-dma.c
@@ -208,6 +208,31 @@ ide_startstop_t ide_dma_intr (ide_drive_t *drive)
return ide_error(drive, "dma_intr", stat);
}
+static int ide_build_sglist (ide_hwif_t *hwif, struct request *rq)
+{
+ struct buffer_head *bh;
+ struct scatterlist *sg = hwif->sg_table;
+ int nents = 0;
+
+ bh = rq->bh;
+ do {
+ unsigned char *virt_addr = bh->b_data;
+ unsigned int size = bh->b_size;
+
+ while ((bh = bh->b_reqnext) != NULL) {
+ if ((virt_addr + size) != (unsigned char *) bh->b_data)
+ break;
+ size += bh->b_size;
+ }
+ memset(&sg[nents], 0, sizeof(*sg));
+ sg[nents].address = virt_addr;
+ sg[nents].length = size;
+ nents++;
+ } while (bh != NULL);
+
+ return pci_map_sg(hwif->pci_dev, sg, nents);
+}
+
/*
* ide_build_dmatable() prepares a dma request.
* Returns 0 if all went okay, returns 1 otherwise.
@@ -215,95 +240,70 @@ ide_startstop_t ide_dma_intr (ide_drive_t *drive)
*/
int ide_build_dmatable (ide_drive_t *drive, ide_dma_action_t func)
{
- struct request *rq = HWGROUP(drive)->rq;
- struct buffer_head *bh = rq->bh;
- unsigned int size, addr, *table = (unsigned int *)HWIF(drive)->dmatable;
- unsigned char *virt_addr;
+ unsigned int *table = HWIF(drive)->dmatable_cpu;
#ifdef CONFIG_BLK_DEV_TRM290
unsigned int is_trm290_chipset = (HWIF(drive)->chipset == ide_trm290);
#else
const int is_trm290_chipset = 0;
#endif
unsigned int count = 0;
+ int i;
+ struct scatterlist *sg;
- do {
- /*
- * Determine addr and size of next buffer area. We assume that
- * individual virtual buffers are always composed linearly in
- * physical memory. For example, we assume that any 8kB buffer
- * is always composed of two adjacent physical 4kB pages rather
- * than two possibly non-adjacent physical 4kB pages.
- */
- if (bh == NULL) { /* paging requests have (rq->bh == NULL) */
- virt_addr = rq->buffer;
- addr = virt_to_bus (virt_addr);
- size = rq->nr_sectors << 9;
- } else {
- /* group sequential buffers into one large buffer */
- virt_addr = bh->b_data;
- addr = virt_to_bus (virt_addr);
- size = bh->b_size;
- while ((bh = bh->b_reqnext) != NULL) {
- if ((addr + size) != virt_to_bus (bh->b_data))
- break;
- size += bh->b_size;
- }
- }
- /*
- * Fill in the dma table, without crossing any 64kB boundaries.
- * Most hardware requires 16-bit alignment of all blocks,
- * but the trm290 requires 32-bit alignment.
- */
- if ((addr & 3)) {
- printk("%s: misaligned DMA buffer\n", drive->name);
- return 0;
- }
+ HWIF(drive)->sg_nents = i = ide_build_sglist(HWIF(drive), HWGROUP(drive)->rq);
- /*
- * Some CPUs without cache snooping need to invalidate/write
- * back their caches before DMA transfers to guarantee correct
- * data. -- rmk
- */
- if (size) {
- if (func == ide_dma_read) {
- dma_cache_inv((unsigned int)virt_addr, size);
- } else {
- dma_cache_wback((unsigned int)virt_addr, size);
- }
- }
+ sg = HWIF(drive)->sg_table;
+ while (i && sg_dma_len(sg)) {
+ u32 cur_addr;
+ u32 cur_len;
+
+ cur_addr = sg_dma_address(sg);
+ cur_len = sg_dma_len(sg);
- while (size) {
+ while (cur_len) {
if (++count >= PRD_ENTRIES) {
printk("%s: DMA table too small\n", drive->name);
+ pci_unmap_sg(HWIF(drive)->pci_dev,
+ HWIF(drive)->sg_table,
+ HWIF(drive)->sg_nents);
return 0; /* revert to PIO for this request */
} else {
- unsigned int xcount, bcount = 0x10000 - (addr & 0xffff);
- if (bcount > size)
- bcount = size;
- *table++ = cpu_to_le32(addr);
+ u32 xcount, bcount = 0x10000 - (cur_addr & 0xffff);
+
+ if (bcount > cur_len)
+ bcount = cur_len;
+ *table++ = cpu_to_le32(cur_addr);
xcount = bcount & 0xffff;
if (is_trm290_chipset)
xcount = ((xcount >> 2) - 1) << 16;
*table++ = cpu_to_le32(xcount);
- addr += bcount;
- size -= bcount;
+ cur_addr += bcount;
+ cur_len -= bcount;
}
}
- } while (bh != NULL);
- if (!count) {
- printk("%s: empty DMA table?\n", drive->name);
- } else {
- if (!is_trm290_chipset)
- *--table |= cpu_to_le32(0x80000000); /* set End-Of-Table (EOT) bit */
- /*
- * Some CPUs need to flush the DMA table to physical RAM
- * before DMA can start. -- rmk
- */
- dma_cache_wback((unsigned long)HWIF(drive)->dmatable, count * sizeof(unsigned int) * 2);
+
+ sg++;
+ i--;
}
+
+ if (!count)
+ printk("%s: empty DMA table?\n", drive->name);
+ else if (!is_trm290_chipset)
+ *--table |= cpu_to_le32(0x80000000);
+
return count;
}
+/* Teardown mappings after DMA has completed. */
+void ide_destroy_dmatable (ide_drive_t *drive)
+{
+ struct pci_dev *dev = HWIF(drive)->pci_dev;
+ struct scatterlist *sg = HWIF(drive)->sg_table;
+ int nents = HWIF(drive)->sg_nents;
+
+ pci_unmap_sg(dev, sg, nents);
+}
+
/*
* For both Blacklisted and Whitelisted drives.
* This is setup to be called as an extern for future support
@@ -413,7 +413,7 @@ int ide_dmaproc (ide_dma_action_t func, ide_drive_t *drive)
case ide_dma_write:
if (!(count = ide_build_dmatable(drive, func)))
return 1; /* try PIO instead of DMA */
- outl(virt_to_bus(hwif->dmatable), dma_base + 4); /* PRD table */
+ outl(hwif->dmatable_dma, dma_base + 4); /* PRD table */
outb(reading, dma_base); /* specify r/w */
outb(inb(dma_base+2)|6, dma_base+2); /* clear INTR & ERROR flags */
drive->waiting_for_dma = 1;
@@ -434,6 +434,7 @@ int ide_dmaproc (ide_dma_action_t func, ide_drive_t *drive)
outb(inb(dma_base)&~1, dma_base); /* stop DMA */
dma_stat = inb(dma_base+2); /* get DMA status */
outb(dma_stat|6, dma_base+2); /* clear the INTR & ERROR bits */
+ ide_destroy_dmatable(drive); /* purge DMA mappings */
return (dma_stat & 7) != 4; /* verify good DMA status */
case ide_dma_test_irq: /* returns 1 if dma irq issued, 0 otherwise */
dma_stat = inb(dma_base+2);
@@ -458,9 +459,16 @@ int ide_dmaproc (ide_dma_action_t func, ide_drive_t *drive)
*/
int ide_release_dma (ide_hwif_t *hwif)
{
- if (hwif->dmatable) {
- clear_page((void *)hwif->dmatable); /* clear PRD 1st */
- free_page((unsigned long)hwif->dmatable); /* free PRD 2nd */
+ if (hwif->dmatable_cpu) {
+ pci_free_consistent(hwif->pci_dev,
+ PRD_ENTRIES * PRD_BYTES,
+ hwif->dmatable_cpu,
+ hwif->dmatable_dma);
+ hwif->dmatable_cpu = NULL;
+ }
+ if (hwif->sg_table) {
+ kfree(hwif->sg_table);
+ hwif->sg_table = NULL;
}
if ((hwif->dma_extra) && (hwif->channel == 0))
release_region((hwif->dma_base + 16), hwif->dma_extra);
@@ -474,9 +482,6 @@ int ide_release_dma (ide_hwif_t *hwif)
void ide_setup_dma (ide_hwif_t *hwif, unsigned long dma_base, unsigned int num_ports)
{
- static unsigned long dmatable = 0;
- static unsigned leftover = 0;
-
printk(" %s: BM-DMA at 0x%04lx-0x%04lx", hwif->name, dma_base, dma_base + num_ports - 1);
if (check_region(dma_base, num_ports)) {
printk(" -- ERROR, PORT ADDRESSES ALREADY IN USE\n");
@@ -484,31 +489,33 @@ void ide_setup_dma (ide_hwif_t *hwif, unsigned long dma_base, unsigned int num_p
}
request_region(dma_base, num_ports, hwif->name);
hwif->dma_base = dma_base;
- if (leftover < (PRD_ENTRIES * PRD_BYTES)) {
- /*
- * The BM-DMA uses full 32bit addr, so we can
- * safely use __get_free_page() here instead
- * of __get_dma_pages() -- no ISA limitations.
- */
- dmatable = __get_free_pages(GFP_KERNEL,1);
- leftover = dmatable ? PAGE_SIZE : 0;
+ hwif->dmatable_cpu = pci_alloc_consistent(hwif->pci_dev,
+ PRD_ENTRIES * PRD_BYTES,
+ &hwif->dmatable_dma);
+ if (hwif->dmatable_cpu == NULL)
+ goto dma_alloc_failure;
+
+ hwif->sg_table = kmalloc(sizeof(struct scatterlist) * PRD_ENTRIES,
+ GFP_KERNEL);
+ if (hwif->sg_table == NULL) {
+ pci_free_consistent(hwif->pci_dev, PRD_ENTRIES * PRD_BYTES,
+ hwif->dmatable_cpu, hwif->dmatable_dma);
+ goto dma_alloc_failure;
}
- if (!dmatable) {
- printk(" -- ERROR, UNABLE TO ALLOCATE PRD TABLE\n");
- } else {
- hwif->dmatable = (unsigned long *) dmatable;
- dmatable += (PRD_ENTRIES * PRD_BYTES);
- leftover -= (PRD_ENTRIES * PRD_BYTES);
- hwif->dmaproc = &ide_dmaproc;
-
- if (hwif->chipset != ide_trm290) {
- byte dma_stat = inb(dma_base+2);
- printk(", BIOS settings: %s:%s, %s:%s",
- hwif->drives[0].name, (dma_stat & 0x20) ? "DMA" : "pio",
- hwif->drives[1].name, (dma_stat & 0x40) ? "DMA" : "pio");
- }
- printk("\n");
+
+ hwif->dmaproc = &ide_dmaproc;
+
+ if (hwif->chipset != ide_trm290) {
+ byte dma_stat = inb(dma_base+2);
+ printk(", BIOS settings: %s:%s, %s:%s",
+ hwif->drives[0].name, (dma_stat & 0x20) ? "DMA" : "pio",
+ hwif->drives[1].name, (dma_stat & 0x40) ? "DMA" : "pio");
}
+ printk("\n");
+ return;
+
+dma_alloc_failure:
+ printk(" -- ERROR, UNABLE TO ALLOCATE DMA TABLES\n");
}
/*
diff --git a/drivers/block/ide-pci.c b/drivers/block/ide-pci.c
index 6ee45c49d..7f429eff2 100644
--- a/drivers/block/ide-pci.c
+++ b/drivers/block/ide-pci.c
@@ -29,8 +29,8 @@
#define DEVID_PIIXb ((ide_pci_devid_t){PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371FB_1})
#define DEVID_PIIX3 ((ide_pci_devid_t){PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_1})
#define DEVID_PIIX4 ((ide_pci_devid_t){PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB})
-#define DEVID_PIIX4E ((ide_pci_devid_t){PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_1})
-#define DEVID_PIIX4U ((ide_pci_devid_t){PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_1})
+#define DEVID_PIIX4E ((ide_pci_devid_t){PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_1})
+#define DEVID_PIIX4U ((ide_pci_devid_t){PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_1})
#define DEVID_VIA_IDE ((ide_pci_devid_t){PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C561})
#define DEVID_VP_IDE ((ide_pci_devid_t){PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1})
#define DEVID_PDC20246 ((ide_pci_devid_t){PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20246})
@@ -696,70 +696,45 @@ static void __init hpt366_device_order_fixup (struct pci_dev *dev, ide_pci_devic
* ide_scan_pcibus() gets invoked at boot time from ide.c.
* It finds all PCI IDE controllers and calls ide_setup_pci_device for them.
*/
-void __init ide_forward_scan_pcibus (void)
+void __init ide_scan_pcidev (struct pci_dev *dev)
{
- struct pci_dev *dev;
ide_pci_devid_t devid;
ide_pci_device_t *d;
- pci_for_each_dev(dev) {
- devid.vid = dev->vendor;
- devid.did = dev->device;
- for (d = ide_pci_chipsets; d->devid.vid && !IDE_PCI_DEVID_EQ(d->devid, devid); ++d);
- if (d->init_hwif == IDE_IGNORE)
- printk("%s: ignored by ide_scan_pci_device() (uses own driver)\n", d->name);
- else if (IDE_PCI_DEVID_EQ(d->devid, DEVID_OPTI621V) && !(PCI_FUNC(dev->devfn) & 1))
- continue; /* OPTI Viper-M uses same devid for functions 0 and 1 */
- else if (IDE_PCI_DEVID_EQ(d->devid, DEVID_CY82C693) && (!(PCI_FUNC(dev->devfn) & 1) || !((dev->class >> 8) == PCI_CLASS_STORAGE_IDE)))
- continue; /* CY82C693 is more than only a IDE controller */
- else if (IDE_PCI_DEVID_EQ(d->devid, DEVID_UM8886A) && !(PCI_FUNC(dev->devfn) & 1))
- continue; /* UM8886A/BF pair */
- else if (IDE_PCI_DEVID_EQ(d->devid, DEVID_HPT366))
- hpt366_device_order_fixup(dev, d);
- else if (!IDE_PCI_DEVID_EQ(d->devid, IDE_PCI_DEVID_NULL) || (dev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
- if (IDE_PCI_DEVID_EQ(d->devid, IDE_PCI_DEVID_NULL))
- printk("%s: unknown IDE controller on PCI bus %02x device %02x, VID=%04x, DID=%04x\n",
- d->name, dev->bus->number, dev->devfn, devid.vid, devid.did);
- else
- printk("%s: IDE controller on PCI bus %02x dev %02x\n", d->name, dev->bus->number, dev->devfn);
- ide_setup_pci_device(dev, d);
- }
+ devid.vid = dev->vendor;
+ devid.did = dev->device;
+ for (d = ide_pci_chipsets; d->devid.vid && !IDE_PCI_DEVID_EQ(d->devid, devid); ++d);
+ if (d->init_hwif == IDE_IGNORE)
+ printk("%s: ignored by ide_scan_pci_device() (uses own driver)\n", d->name);
+ else if (IDE_PCI_DEVID_EQ(d->devid, DEVID_OPTI621V) && !(PCI_FUNC(dev->devfn) & 1))
+ return;
+ else if (IDE_PCI_DEVID_EQ(d->devid, DEVID_CY82C693) && (!(PCI_FUNC(dev->devfn) & 1) || !((dev->class >> 8) == PCI_CLASS_STORAGE_IDE)))
+ return; /* CY82C693 is more than only a IDE controller */
+ else if (IDE_PCI_DEVID_EQ(d->devid, DEVID_UM8886A) && !(PCI_FUNC(dev->devfn) & 1))
+ return; /* UM8886A/BF pair */
+ else if (IDE_PCI_DEVID_EQ(d->devid, DEVID_HPT366))
+ hpt366_device_order_fixup(dev, d);
+ else if (!IDE_PCI_DEVID_EQ(d->devid, IDE_PCI_DEVID_NULL) || (dev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
+ if (IDE_PCI_DEVID_EQ(d->devid, IDE_PCI_DEVID_NULL))
+ printk("%s: unknown IDE controller on PCI bus %02x device %02x, VID=%04x, DID=%04x\n",
+ d->name, dev->bus->number, dev->devfn, devid.vid, devid.did);
+ else
+ printk("%s: IDE controller on PCI bus %02x dev %02x\n", d->name, dev->bus->number, dev->devfn);
+ ide_setup_pci_device(dev, d);
}
}
-void __init ide_reverse_scan_pcibus (void)
+void __init ide_scan_pcibus (int scan_direction)
{
- struct pci_dev *dev;
- ide_pci_devid_t devid;
- ide_pci_device_t *d;
+ struct pci_dev *dev;
- pci_for_each_dev_reverse(dev) {
- devid.vid = dev->vendor;
- devid.did = dev->device;
- for (d = ide_pci_chipsets; d->devid.vid && !IDE_PCI_DEVID_EQ(d->devid, devid); ++d);
- if (d->init_hwif == IDE_IGNORE)
- printk("%s: ignored by ide_scan_pci_device() (uses own driver)\n", d->name);
- else if (IDE_PCI_DEVID_EQ(d->devid, DEVID_OPTI621V) && !(PCI_FUNC(dev->devfn) & 1))
- continue; /* OPTI Viper-M uses same devid for functions 0 and 1 */
- else if (IDE_PCI_DEVID_EQ(d->devid, DEVID_CY82C693) && (!(PCI_FUNC(dev->devfn) & 1) || !((dev->class >> 8) == PCI_CLASS_STORAGE_IDE)))
- continue; /* CY82C693 is more than only a IDE controller */
- else if (IDE_PCI_DEVID_EQ(d->devid, DEVID_UM8886A) && !(PCI_FUNC(dev->devfn) & 1))
- continue; /* UM8886A/BF pair */
- else if (IDE_PCI_DEVID_EQ(d->devid, DEVID_HPT366))
- hpt366_device_order_fixup(dev, d);
- else if (!IDE_PCI_DEVID_EQ(d->devid, IDE_PCI_DEVID_NULL) || (dev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
- if (IDE_PCI_DEVID_EQ(d->devid, IDE_PCI_DEVID_NULL))
- printk("%s: unknown IDE controller on PCI bus %02x device %02x, VID=%04x, DID=%04x\n",
- d->name, dev->bus->number, dev->devfn, devid.vid, devid.did);
- else
- printk("%s: IDE controller on PCI bus %02x dev %02x\n", d->name, dev->bus->number, dev->devfn);
- ide_setup_pci_device(dev, d);
+ if (!scan_direction) {
+ pci_for_each_dev(dev) {
+ ide_scan_pcidev(dev);
+ }
+ } else {
+ pci_for_each_dev_reverse(dev) {
+ ide_scan_pcidev(dev);
}
}
}
-
-void __init ide_scan_pcibus (int scan_direction)
-{
- if (!scan_direction) ide_forward_scan_pcibus();
- else ide_reverse_scan_pcibus();
-}
diff --git a/drivers/block/ide-pmac.c b/drivers/block/ide-pmac.c
index e1eadcaa2..e6947e560 100644
--- a/drivers/block/ide-pmac.c
+++ b/drivers/block/ide-pmac.c
@@ -241,9 +241,9 @@ pmac_ide_setup_dma(struct device_node *np, ide_hwif_t *hwif)
* The +2 is +1 for the stop command and +1 to allow for
* aligning the start address to a multiple of 16 bytes.
*/
- hwif->dmatable = (unsigned long *)
+ hwif->dmatable_cpu = (unsigned long *)
kmalloc((MAX_DCMDS + 2) * sizeof(struct dbdma_cmd), GFP_KERNEL);
- if (hwif->dmatable == 0) {
+ if (hwif->dmatable_cpu == 0) {
printk(KERN_ERR "%s: unable to allocate DMA command list\n",
hwif->name);
return;
@@ -271,7 +271,7 @@ pmac_ide_build_dmatable(ide_drive_t *drive, int wr)
volatile struct dbdma_regs *dma
= (volatile struct dbdma_regs *) hwif->dma_base;
- table = tstart = (struct dbdma_cmd *) DBDMA_ALIGN(hwif->dmatable);
+ table = tstart = (struct dbdma_cmd *) DBDMA_ALIGN(hwif->dmatable_cpu);
out_le32(&dma->control, (RUN|PAUSE|FLUSH|WAKE|DEAD) << 16);
do {
diff --git a/drivers/block/ide-probe.c b/drivers/block/ide-probe.c
index efb4705f3..1c6f19eba 100644
--- a/drivers/block/ide-probe.c
+++ b/drivers/block/ide-probe.c
@@ -285,6 +285,7 @@ static int do_probe (ide_drive_t *drive, byte cmd)
drive->name, drive->present, drive->media,
(cmd == WIN_IDENTIFY) ? "ATA" : "ATAPI");
#endif
+ ide_delay_50ms(); /* needed for some systems (e.g. crw9624 as drive0 with disk as slave) */
SELECT_DRIVE(hwif,drive);
ide_delay_50ms();
if (IN_BYTE(IDE_SELECT_REG) != drive->select.all && !drive->present) {
diff --git a/drivers/block/ide-tape.c b/drivers/block/ide-tape.c
index 429888f32..cba18bced 100644
--- a/drivers/block/ide-tape.c
+++ b/drivers/block/ide-tape.c
@@ -384,7 +384,7 @@
* sharing a (fast) ATA-2 disk with any (slow) new ATAPI device.
*/
-#define IDETAPE_VERSION "1.16e"
+#define IDETAPE_VERSION "1.16f"
#include <linux/config.h>
#include <linux/module.h>
@@ -409,6 +409,9 @@
#include <asm/unaligned.h>
#include <asm/bitops.h>
+
+#define NO_LONGER_REQUIRE (1)
+
/*
* OnStream support
*/
@@ -1735,7 +1738,7 @@ static void idetape_end_request (byte uptodate, ide_hwgroup_t *hwgroup)
aux = stage->aux;
p = stage->bh->b_data;
if (ntohl(aux->logical_blk_num) < 11300 && ntohl(aux->logical_blk_num) > 11100)
- printk(KERN_INFO "ide-tape: finished writing logical blk %lu (data %x %x %x %x)\n", ntohl(aux->logical_blk_num), *p++, *p++, *p++, *p++);
+ printk(KERN_INFO "ide-tape: finished writing logical blk %u (data %x %x %x %x)\n", ntohl(aux->logical_blk_num), *p++, *p++, *p++, *p++);
}
}
#endif
@@ -2695,14 +2698,14 @@ static idetape_stage_t *__idetape_kmalloc_stage (idetape_tape_t *tape, int full,
goto abort;
if (clear)
memset(b_data, 0, PAGE_SIZE);
- if (bh->b_data == b_data + PAGE_SIZE && virt_to_bus (bh->b_data) == virt_to_bus (b_data) + PAGE_SIZE) {
+ if (bh->b_data == b_data + PAGE_SIZE) {
bh->b_size += PAGE_SIZE;
bh->b_data -= PAGE_SIZE;
if (full)
atomic_add(PAGE_SIZE, &bh->b_count);
continue;
}
- if (b_data == bh->b_data + bh->b_size && virt_to_bus (b_data) == virt_to_bus (bh->b_data) + bh->b_size) {
+ if (b_data == bh->b_data + bh->b_size) {
bh->b_size += PAGE_SIZE;
if (full)
atomic_add(PAGE_SIZE, &bh->b_count);
@@ -2851,7 +2854,7 @@ static void idetape_add_stage_tail (ide_drive_t *drive,idetape_stage_t *stage)
/*
* Initialize the OnStream AUX
*/
-static void idetape_init_stage(ide_drive_t *drive, idetape_stage_t *stage, int frame_type, int logical_blk_num)
+static void idetape_init_stage (ide_drive_t *drive, idetape_stage_t *stage, int frame_type, int logical_blk_num)
{
idetape_tape_t *tape = drive->driver_data;
os_aux_t *aux = stage->aux;
@@ -2965,7 +2968,7 @@ static ide_startstop_t idetape_read_position_callback (ide_drive_t *drive)
} else {
#if IDETAPE_DEBUG_LOG
if (tape->debug_level >= 2)
- printk (KERN_INFO "ide-tape: Block Location - %lu\n", ntohl (result->first_block));
+ printk (KERN_INFO "ide-tape: Block Location - %u\n", ntohl (result->first_block));
#endif /* IDETAPE_DEBUG_LOG */
tape->partition = result->partition;
tape->first_frame_position = ntohl (result->first_block);
@@ -3358,7 +3361,7 @@ static void idetape_onstream_write_error_recovery (ide_drive_t *drive)
unsigned int block;
if (tape->onstream_write_error == 1) {
- printk(KERN_ERR "ide-tape: %s: detected physical bad block at %lu\n", tape->name, ntohl(tape->sense.information));
+ printk(KERN_ERR "ide-tape: %s: detected physical bad block at %u\n", tape->name, ntohl(tape->sense.information));
block = ntohl(tape->sense.information) + 80;
idetape_update_stats(drive);
printk(KERN_ERR "ide-tape: %s: relocating %d buffered logical blocks to physical block %u\n", tape->name, tape->cur_frames, block);
@@ -3490,7 +3493,7 @@ static int idetape_verify_stage (ide_drive_t *drive, idetape_stage_t *stage, int
return 0;
}
if (ntohl(aux->format_id) != 0) {
- printk(KERN_INFO "ide-tape: %s: skipping frame, format_id %lu\n", tape->name, ntohl(aux->format_id));
+ printk(KERN_INFO "ide-tape: %s: skipping frame, format_id %u\n", tape->name, ntohl(aux->format_id));
return 0;
}
if (memcmp(aux->application_sig, tape->application_sig, 4) != 0) {
@@ -3514,7 +3517,7 @@ static int idetape_verify_stage (ide_drive_t *drive, idetape_stage_t *stage, int
return 0;
}
if (ntohs(par->wrt_pass_cntr) != tape->wrt_pass_cntr) {
- printk(KERN_INFO "ide-tape: %s: skipping frame, wrt_pass_cntr %d (expected %d)(logical_blk_num %lu)\n", tape->name, ntohs(par->wrt_pass_cntr), tape->wrt_pass_cntr, ntohl(aux->logical_blk_num));
+ printk(KERN_INFO "ide-tape: %s: skipping frame, wrt_pass_cntr %d (expected %d)(logical_blk_num %u)\n", tape->name, ntohs(par->wrt_pass_cntr), tape->wrt_pass_cntr, ntohl(aux->logical_blk_num));
return 0;
}
if (aux->frame_seq_num != aux->logical_blk_num) {
@@ -3523,7 +3526,7 @@ static int idetape_verify_stage (ide_drive_t *drive, idetape_stage_t *stage, int
}
if (logical_blk_num != -1 && ntohl(aux->logical_blk_num) != logical_blk_num) {
if (!quiet)
- printk(KERN_INFO "ide-tape: %s: skipping frame, logical_blk_num %lu (expected %d)\n", tape->name, ntohl(aux->logical_blk_num), logical_blk_num);
+ printk(KERN_INFO "ide-tape: %s: skipping frame, logical_blk_num %u (expected %d)\n", tape->name, ntohl(aux->logical_blk_num), logical_blk_num);
return 0;
}
if (aux->frame_type == OS_FRAME_TYPE_MARKER) {
@@ -4492,7 +4495,8 @@ static void __idetape_write_header (ide_drive_t *drive, int block, int cnt)
idetape_position_tape(drive, block, 0, 0);
memset(&header, 0, sizeof(header));
strcpy(header.ident_str, "ADR_SEQ");
- header.major_rev = header.minor_rev = 2;
+ header.major_rev = 1;
+ header.minor_rev = 2;
header.par_num = 1;
header.partition.partition_num = OS_DATA_PARTITION;
header.partition.par_desc_ver = OS_PARTITION_VERSION;
@@ -5113,7 +5117,11 @@ static int idetape_analyze_headers (ide_drive_t *drive)
for (block = 5; block < 10; block++)
if (__idetape_analyze_headers(drive, block))
goto ok;
+#if 0
+ for (block = 0xbae; block < 0xbb8; block++)
+#else
for (block = 0xbae; block < 0xbb3; block++)
+#endif
if (__idetape_analyze_headers(drive, block))
goto ok;
printk(KERN_ERR "ide-tape: %s: failed to find valid ADRL header\n", tape->name);
@@ -5866,8 +5874,7 @@ int idetape_init (void)
ide_register_module (&idetape_module);
MOD_DEC_USE_COUNT;
#if ONSTREAM_DEBUG
- if (tape->debug_level >= 6)
- printk(KERN_INFO "ide-tape: MOD_DEC_USE_COUNT in idetape_init\n");
+ printk(KERN_INFO "ide-tape: MOD_DEC_USE_COUNT in idetape_init\n");
#endif
return 0;
}
diff --git a/drivers/block/ide.c b/drivers/block/ide.c
index 873f57cc9..396369651 100644
--- a/drivers/block/ide.c
+++ b/drivers/block/ide.c
@@ -655,14 +655,17 @@ static ide_startstop_t reset_pollfunc (ide_drive_t *drive)
static void pre_reset (ide_drive_t *drive)
{
+ if (drive->driver != NULL)
+ DRIVER(drive)->pre_reset(drive);
+
if (!drive->keep_settings) {
- drive->unmask = 0;
- drive->io_32bit = 0;
- if (drive->using_dma)
+ if (drive->using_dma) {
(void) HWIF(drive)->dmaproc(ide_dma_off, drive);
+ } else {
+ drive->unmask = 0;
+ drive->io_32bit = 0;
+ }
}
- if (drive->driver != NULL)
- DRIVER(drive)->pre_reset(drive);
}
/*
@@ -901,7 +904,7 @@ ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, byte stat)
try_to_flush_leftover_data(drive);
}
if (GET_STAT() & (BUSY_STAT|DRQ_STAT))
- rq->errors |= ERROR_RESET; /* Mmmm.. timing problem */
+ OUT_BYTE(WIN_IDLEIMMEDIATE,IDE_COMMAND_REG); /* force an abort */
if (rq->errors >= ERROR_MAX) {
if (drive->driver != NULL)
@@ -1825,7 +1828,7 @@ static void ide_init_module (int type)
revalidate_drives();
#ifdef CONFIG_KMOD
if (!found && type == IDE_PROBE_MODULE)
- (void) request_module("ide-probe");
+ (void) request_module("ide-probe-mod");
#endif /* CONFIG_KMOD */
}
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index a54f40e00..731a2aece 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -118,11 +118,6 @@ int * max_readahead[MAX_BLKDEV] = { NULL, NULL, };
*/
int * max_sectors[MAX_BLKDEV] = { NULL, NULL, };
-/*
- * Max number of segments per request
- */
-int * max_segments[MAX_BLKDEV] = { NULL, NULL, };
-
static inline int get_max_sectors(kdev_t dev)
{
if (!max_sectors[MAJOR(dev)])
@@ -130,13 +125,6 @@ static inline int get_max_sectors(kdev_t dev)
return max_sectors[MAJOR(dev)][MINOR(dev)];
}
-static inline int get_max_segments(kdev_t dev)
-{
- if (!max_segments[MAJOR(dev)])
- return MAX_SEGMENTS;
- return max_segments[MAJOR(dev)][MINOR(dev)];
-}
-
/*
* Is called with the request spinlock aquired.
* NOTE: the device-specific queue() functions
@@ -167,24 +155,52 @@ void blk_queue_pluggable(request_queue_t * q, int use_plug)
q->use_plug = use_plug;
}
+static int ll_merge_fn(request_queue_t *q, struct request *req,
+ struct buffer_head *bh)
+{
+ if (req->bhtail->b_data + req->bhtail->b_size != bh->b_data) {
+ if (req->nr_segments < MAX_SEGMENTS) {
+ req->nr_segments++;
+ return 1;
+ }
+ return 0;
+ }
+ return 1;
+}
+
+static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
+ struct request *next)
+{
+ int total_segments = req->nr_segments + next->nr_segments;
+
+ if (req->bhtail->b_data + req->bhtail->b_size == next->bh->b_data)
+ total_segments--;
+
+ if (total_segments > MAX_SEGMENTS)
+ return 0;
+
+ req->nr_segments = total_segments;
+ return 1;
+}
+
void blk_init_queue(request_queue_t * q, request_fn_proc * rfn)
{
- q->request_fn = rfn;
- q->current_request = NULL;
- q->merge_fn = NULL;
- q->merge_requests_fn = NULL;
- q->plug_tq.sync = 0;
- q->plug_tq.routine = &unplug_device;
- q->plug_tq.data = q;
- q->plugged = 0;
+ q->request_fn = rfn;
+ q->current_request = NULL;
+ q->merge_fn = ll_merge_fn;
+ q->merge_requests_fn = ll_merge_requests_fn;
+ q->plug_tq.sync = 0;
+ q->plug_tq.routine = unplug_device;
+ q->plug_tq.data = q;
+ q->plugged = 0;
/*
* These booleans describe the queue properties. We set the
* default (and most common) values here. Other drivers can
* use the appropriate functions to alter the queue properties.
* as appropriate.
*/
- q->use_plug = 1;
- q->head_active = 1;
+ q->use_plug = 1;
+ q->head_active = 1;
}
/*
@@ -427,11 +443,9 @@ out:
*/
static inline void attempt_merge (request_queue_t * q,
struct request *req,
- int max_sectors,
- int max_segments)
+ int max_sectors)
{
struct request *next = req->next;
- int total_segments;
if (!next)
return;
@@ -439,29 +453,15 @@ static inline void attempt_merge (request_queue_t * q,
return;
if (next->sem || req->cmd != next->cmd || req->rq_dev != next->rq_dev || req->nr_sectors + next->nr_sectors > max_sectors)
return;
- total_segments = req->nr_segments + next->nr_segments;
- if (req->bhtail->b_data + req->bhtail->b_size == next->bh->b_data)
- total_segments--;
- if (total_segments > max_segments)
- return;
- if( q->merge_requests_fn != NULL )
- {
- /*
- * If we are not allowed to merge these requests, then
- * return. If we are allowed to merge, then the count
- * will have been updated to the appropriate number,
- * and we shouldn't do it here too.
- */
- if( !(q->merge_requests_fn)(q, req, next) )
- {
- return;
- }
- }
- else
- {
- req->nr_segments = total_segments;
- }
+ /*
+ * If we are not allowed to merge these requests, then
+ * return. If we are allowed to merge, then the count
+ * will have been updated to the appropriate number,
+ * and we shouldn't do it here too.
+ */
+ if(!(q->merge_requests_fn)(q, req, next))
+ return;
req->bhtail->b_reqnext = next->bh;
req->bhtail = next->bhtail;
@@ -478,7 +478,7 @@ static void __make_request(request_queue_t * q,
{
unsigned int sector, count;
struct request * req;
- int rw_ahead, max_req, max_sectors, max_segments;
+ int rw_ahead, max_req, max_sectors;
unsigned long flags;
count = bh->b_size >> 9;
@@ -570,7 +570,6 @@ static void __make_request(request_queue_t * q,
* Try to coalesce the new request with old requests
*/
max_sectors = get_max_sectors(bh->b_rdev);
- max_segments = get_max_segments(bh->b_rdev);
/*
* Now we acquire the request spinlock, we have to be mega careful
@@ -584,162 +583,88 @@ static void __make_request(request_queue_t * q,
major != DDV_MAJOR && major != NBD_MAJOR
&& q->use_plug)
plug_device(q); /* is atomic */
- } else switch (major) {
- /*
- * FIXME(eric) - this entire switch statement is going away
- * soon, and we will instead key off of q->head_active to decide
- * whether the top request in the queue is active on the device
- * or not.
- */
- case IDE0_MAJOR: /* same as HD_MAJOR */
- case IDE1_MAJOR:
- case FLOPPY_MAJOR:
- case IDE2_MAJOR:
- case IDE3_MAJOR:
- case IDE4_MAJOR:
- case IDE5_MAJOR:
- case IDE6_MAJOR:
- case IDE7_MAJOR:
- case IDE8_MAJOR:
- case IDE9_MAJOR:
- case ACSI_MAJOR:
- case MFM_ACORN_MAJOR:
+ goto get_rq;
+ }
+
+ if (q->head_active && !q->plugged) {
/*
* The scsi disk and cdrom drivers completely remove the request
* from the queue when they start processing an entry. For this
- * reason it is safe to continue to add links to the top entry for
- * those devices.
+ * reason it is safe to continue to add links to the top entry
+ * for those devices.
*
* All other drivers need to jump over the first entry, as that
- * entry may be busy being processed and we thus can't change it.
+ * entry may be busy being processed and we thus can't change
+ * it.
*/
- if (req == q->current_request)
- req = req->next;
- if (!req)
- break;
- /* fall through */
-
- case SCSI_DISK0_MAJOR:
- case SCSI_DISK1_MAJOR:
- case SCSI_DISK2_MAJOR:
- case SCSI_DISK3_MAJOR:
- case SCSI_DISK4_MAJOR:
- case SCSI_DISK5_MAJOR:
- case SCSI_DISK6_MAJOR:
- case SCSI_DISK7_MAJOR:
- case SCSI_CDROM_MAJOR:
- case DAC960_MAJOR+0:
- case DAC960_MAJOR+1:
- case DAC960_MAJOR+2:
- case DAC960_MAJOR+3:
- case DAC960_MAJOR+4:
- case DAC960_MAJOR+5:
- case DAC960_MAJOR+6:
- case DAC960_MAJOR+7:
- case I2O_MAJOR:
- case COMPAQ_SMART2_MAJOR+0:
- case COMPAQ_SMART2_MAJOR+1:
- case COMPAQ_SMART2_MAJOR+2:
- case COMPAQ_SMART2_MAJOR+3:
- case COMPAQ_SMART2_MAJOR+4:
- case COMPAQ_SMART2_MAJOR+5:
- case COMPAQ_SMART2_MAJOR+6:
- case COMPAQ_SMART2_MAJOR+7:
-
- do {
- if (req->sem)
- continue;
- if (req->cmd != rw)
- continue;
- if (req->nr_sectors + count > max_sectors)
- continue;
- if (req->rq_dev != bh->b_rdev)
+ if ((req = req->next) == NULL)
+ goto get_rq;
+ }
+
+ do {
+ if (req->sem)
+ continue;
+ if (req->cmd != rw)
+ continue;
+ if (req->nr_sectors + count > max_sectors)
+ continue;
+ if (req->rq_dev != bh->b_rdev)
+ continue;
+ /* Can we add it to the end of this request? */
+ if (req->sector + req->nr_sectors == sector) {
+ /*
+ * The merge_fn is a more advanced way
+ * of accomplishing the same task. Instead
+ * of applying a fixed limit of some sort
+ * we instead define a function which can
+ * determine whether or not it is safe to
+ * merge the request or not.
+ *
+ * See if this queue has rules that
+ * may suggest that we shouldn't merge
+ * this
+ */
+ if(!(q->merge_fn)(q, req, bh))
continue;
- /* Can we add it to the end of this request? */
- if (req->sector + req->nr_sectors == sector) {
- /*
- * The merge_fn is a more advanced way
- * of accomplishing the same task. Instead
- * of applying a fixed limit of some sort
- * we instead define a function which can
- * determine whether or not it is safe to
- * merge the request or not.
- */
- if( q->merge_fn == NULL )
- {
- if (req->bhtail->b_data + req->bhtail->b_size
- != bh->b_data) {
- if (req->nr_segments < max_segments)
- req->nr_segments++;
- else continue;
- }
- }
- else
- {
- /*
- * See if this queue has rules that
- * may suggest that we shouldn't merge
- * this
- */
- if( !(q->merge_fn)(q, req, bh) )
- {
- continue;
- }
- }
- req->bhtail->b_reqnext = bh;
- req->bhtail = bh;
- req->nr_sectors += count;
- drive_stat_acct(req, count, 0);
- /* Can we now merge this req with the next? */
- attempt_merge(q, req, max_sectors, max_segments);
- /* or to the beginning? */
- } else if (req->sector - count == sector) {
- /*
- * The merge_fn is a more advanced way
- * of accomplishing the same task. Instead
- * of applying a fixed limit of some sort
- * we instead define a function which can
- * determine whether or not it is safe to
- * merge the request or not.
- */
- if( q->merge_fn == NULL )
- {
- if (bh->b_data + bh->b_size
- != req->bh->b_data) {
- if (req->nr_segments < max_segments)
- req->nr_segments++;
- else continue;
- }
- }
- else
- {
- /*
- * See if this queue has rules that
- * may suggest that we shouldn't merge
- * this
- */
- if( !(q->merge_fn)(q, req, bh) )
- {
- continue;
- }
- }
- bh->b_reqnext = req->bh;
- req->bh = bh;
- req->buffer = bh->b_data;
- req->current_nr_sectors = count;
- req->sector = sector;
- req->nr_sectors += count;
- drive_stat_acct(req, count, 0);
- } else
+ req->bhtail->b_reqnext = bh;
+ req->bhtail = bh;
+ req->nr_sectors += count;
+ drive_stat_acct(req, count, 0);
+ /* Can we now merge this req with the next? */
+ attempt_merge(q, req, max_sectors);
+ /* or to the beginning? */
+ } else if (req->sector - count == sector) {
+ /*
+ * The merge_fn is a more advanced way
+ * of accomplishing the same task. Instead
+ * of applying a fixed limit of some sort
+ * we instead define a function which can
+ * determine whether or not it is safe to
+ * merge the request or not.
+ *
+ * See if this queue has rules that
+ * may suggest that we shouldn't merge
+ * this
+ */
+ if(!(q->merge_fn)(q, req, bh))
continue;
+ bh->b_reqnext = req->bh;
+ req->bh = bh;
+ req->buffer = bh->b_data;
+ req->current_nr_sectors = count;
+ req->sector = sector;
+ req->nr_sectors += count;
+ drive_stat_acct(req, count, 0);
+ } else
+ continue;
- spin_unlock_irqrestore(&io_request_lock,flags);
- return;
+ spin_unlock_irqrestore(&io_request_lock,flags);
+ return;
- } while ((req = req->next) != NULL);
- }
+ } while ((req = req->next) != NULL);
/* find an unused request. */
+get_rq:
req = get_request(max_req, bh->b_rdev);
spin_unlock_irqrestore(&io_request_lock,flags);
@@ -758,6 +683,7 @@ static void __make_request(request_queue_t * q,
req->nr_sectors = count;
req->current_nr_sectors = count;
req->nr_segments = 1; /* Always 1 for a new request. */
+ req->nr_hw_segments = 1; /* Always 1 for a new request. */
req->buffer = bh->b_data;
req->sem = NULL;
req->bh = bh;
diff --git a/drivers/block/ns87415.c b/drivers/block/ns87415.c
index 330943531..8b8bb3f60 100644
--- a/drivers/block/ns87415.c
+++ b/drivers/block/ns87415.c
@@ -89,6 +89,7 @@ static int ns87415_dmaproc(ide_dma_action_t func, ide_drive_t *drive)
dma_stat = inb(hwif->dma_base+2);
outb(inb(hwif->dma_base)&~1, hwif->dma_base); /* stop DMA */
outb(inb(hwif->dma_base)|6, hwif->dma_base); /* from ERRATA: clear the INTR & ERROR bits */
+ ide_destroy_dmatable(drive); /* and free any DMA resources */
return (dma_stat & 7) != 4; /* verify good DMA status */
case ide_dma_write:
case ide_dma_read:
diff --git a/drivers/block/paride/Config.in b/drivers/block/paride/Config.in
index 8d4dc1742..28ef310f4 100644
--- a/drivers/block/paride/Config.in
+++ b/drivers/block/paride/Config.in
@@ -1,6 +1,17 @@
#
# PARIDE configuration
#
+
+# PARIDE doesn't need PARPORT, but if PARPORT is configured as a module,
+# PARIDE must also be a module. The bogus CONFIG_PARIDE_PARPORT option
+# controls the choices given to the user ...
+
+if [ "$CONFIG_PARPORT" = "y" -o "$CONFIG_PARPORT" = "n" ]; then
+ define_tristate CONFIG_PARIDE_PARPORT y
+else
+ define_tristate CONFIG_PARIDE_PARPORT m
+fi
+
comment 'Parallel IDE high-level drivers'
dep_tristate ' Parallel port IDE disks' CONFIG_PARIDE_PD $CONFIG_PARIDE
dep_tristate ' Parallel port ATAPI CD-ROMs' CONFIG_PARIDE_PCD $CONFIG_PARIDE
diff --git a/drivers/block/piix.c b/drivers/block/piix.c
index b8ffb5a4b..64cf45853 100644
--- a/drivers/block/piix.c
+++ b/drivers/block/piix.c
@@ -33,11 +33,16 @@
*
* 4a 84|21 hdb|hda
* 4b 84|21 hdd|hdc
- *
- * 00|00 udma 0
- * 01|01 udma 1
- * 10|10 udma 2
- * 11|11 reserved
+ *
+ * ata-33/82371AB
+ * ata-33/82371EB
+ * ata-33/82801AB ata-66/82801AA
+ * 00|00 udma 0 00|00 reserved
+ * 01|01 udma 1 01|01 udma 3
+ * 10|10 udma 2 10|10 udma 4
+ * 11|11 reserved 11|11 reserved
+ *
+ * 54 8421|8421 ata66 drive|ata66 enable
*
* pci_read_config_word(HWIF(drive)->pci_dev, 0x40, &reg40);
* pci_read_config_word(HWIF(drive)->pci_dev, 0x42, &reg42);
@@ -195,71 +200,78 @@ static int piix_config_drive_for_dma (ide_drive_t *drive)
struct pci_dev *dev = hwif->pci_dev;
int sitre;
- short reg4042, reg44, reg48, reg4a;
+ short reg4042, reg44, reg48, reg4a, reg54;
byte speed;
- int u_speed;
byte maslave = hwif->channel ? 0x42 : 0x40;
byte udma_66 = ((id->hw_config & 0x2000) && (hwif->udma_four)) ? 1 : 0;
int ultra = ((dev->device == PCI_DEVICE_ID_INTEL_82371AB) ||
- (dev->device == PCI_DEVICE_ID_INTEL_82801AA_1)) ? 1 : 0;
- int ultra66 = (dev->device == PCI_DEVICE_ID_INTEL_82801AB_1) ? 1 : 0;
+ (dev->device == PCI_DEVICE_ID_INTEL_82801AB_1)) ? 1 : 0;
+ int ultra66 = (dev->device == PCI_DEVICE_ID_INTEL_82801AA_1) ? 1 : 0;
int drive_number = ((hwif->channel ? 2 : 0) + (drive->select.b.unit & 0x01));
int a_speed = 2 << (drive_number * 4);
int u_flag = 1 << drive_number;
+ int u_speed = 0;
pci_read_config_word(dev, maslave, &reg4042);
- sitre = (reg4042 & 0x4000) ? 1 : 0;
+ sitre = (reg4042 & 0x4000) ? 1 : 0;
pci_read_config_word(dev, 0x44, &reg44);
pci_read_config_word(dev, 0x48, &reg48);
pci_read_config_word(dev, 0x4a, &reg4a);
+ pci_read_config_word(dev, 0x54, &reg54);
- if (id->dma_ultra && (ultra)) {
- if (!(reg48 & u_flag)) {
- pci_write_config_word(dev, 0x48, reg48|u_flag);
- }
- } else {
- if (reg48 & u_flag) {
- pci_write_config_word(dev, 0x48, reg48 & ~u_flag);
- }
- }
-
- if (((id->dma_ultra & 0x0010) || (id->dma_ultra & 0x0008) || (id->dma_ultra & 0x0004)) && (ultra)) {
+ if ((id->dma_ultra & 0x0010) && (ultra)) {
+ u_speed = 2 << (drive_number * 4);
+ speed = ((udma_66) && (ultra66)) ? XFER_UDMA_4 : XFER_UDMA_2;
+ } else if ((id->dma_ultra & 0x0008) && (ultra)) {
+ u_speed = 1 << (drive_number * 4);
+ speed = ((udma_66) && (ultra66)) ? XFER_UDMA_3 : XFER_UDMA_1;
+ } else if ((id->dma_ultra & 0x0004) && (ultra)) {
u_speed = 2 << (drive_number * 4);
- if (!(reg4a & u_speed)) {
- pci_write_config_word(dev, 0x4a, reg4a|u_speed);
- }
speed = XFER_UDMA_2;
} else if ((id->dma_ultra & 0x0002) && (ultra)) {
u_speed = 1 << (drive_number * 4);
- if (!(reg4a & u_speed)) {
- pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
- pci_write_config_word(dev, 0x4a, reg4a|u_speed);
- }
speed = XFER_UDMA_1;
} else if ((id->dma_ultra & 0x0001) && (ultra)) {
u_speed = 0 << (drive_number * 4);
- if (!(reg4a & u_speed)) {
- pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
- pci_write_config_word(dev, 0x4a, reg4a|u_speed);
- }
speed = XFER_UDMA_0;
} else if (id->dma_mword & 0x0004) {
- if (reg4a & a_speed)
- pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
speed = XFER_MW_DMA_2;
} else if (id->dma_mword & 0x0002) {
- if (reg4a & a_speed)
- pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
speed = XFER_MW_DMA_1;
} else if (id->dma_1word & 0x0004) {
- if (reg4a & a_speed)
- pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
speed = XFER_SW_DMA_2;
} else {
speed = XFER_PIO_0 + ide_get_best_pio_mode(drive, 255, 5, NULL);
}
+ /*
+ * This is !@#$% ugly and stupid.............
+ * But ugly harware generates ugly code.........
+ */
+ if (speed >= XFER_UDMA_0) {
+ if (!(reg48 & u_flag))
+ pci_write_config_word(dev, 0x48, reg48|u_flag);
+ if (!(reg4a & u_speed)) {
+ pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
+ pci_write_config_word(dev, 0x4a, reg4a|u_speed);
+ }
+ if ((speed > XFER_UDMA_2) && (!(reg54 & u_flag))) {
+ pci_write_config_word(dev, 0x54, reg54|u_flag);
+ } else {
+ pci_write_config_word(dev, 0x54, reg54 & ~u_flag);
+ }
+ }
+
+ if (speed < XFER_UDMA_0) {
+ if (reg48 & u_flag)
+ pci_write_config_word(dev, 0x48, reg48 & ~u_flag);
+ if (reg4a & a_speed)
+ pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
+ if (reg54 & u_flag)
+ pci_write_config_word(dev, 0x54, reg54 & ~u_flag);
+ }
+
piix_tune_drive(drive, piix_dma_2_pio(speed));
(void) ide_config_drive_speed(drive, speed);
@@ -301,11 +313,21 @@ unsigned int __init pci_init_piix (struct pci_dev *dev, const char *name)
return 0;
}
+/*
+ * Sheesh, someone at Intel needs to go read the ATA-4/5 T13 standards.
+ * It does not specify device detection, but channel!!!
+ * You determine later if bit 13 of word93 is set...
+ */
unsigned int __init ata66_piix (ide_hwif_t *hwif)
{
- if (0)
- return 1;
- return 0;
+ byte reg54h = 0, reg55h = 0, ata66 = 0;
+ byte mask = hwif->channel ? 0x0c : 0x03;
+
+ pci_read_config_byte(hwif->pci_dev, 0x54, &reg54h);
+ pci_read_config_byte(hwif->pci_dev, 0x55, &reg55h);
+ ata66 = (reg54h & mask) ? 0 : 1;
+
+ return ata66;
}
void __init ide_init_piix (ide_hwif_t *hwif)
diff --git a/drivers/block/trm290.c b/drivers/block/trm290.c
index 1308c7bd7..4ec1d09c6 100644
--- a/drivers/block/trm290.c
+++ b/drivers/block/trm290.c
@@ -187,7 +187,7 @@ static int trm290_dmaproc (ide_dma_action_t func, ide_drive_t *drive)
if (!(count = ide_build_dmatable(drive, func)))
break; /* try PIO instead of DMA */
trm290_prepare_drive(drive, 1); /* select DMA xfer */
- outl(virt_to_bus(hwif->dmatable)|reading|writing, hwif->dma_base);
+ outl(hwif->dmatable_dma|reading|writing, hwif->dma_base);
drive->waiting_for_dma = 1;
outw((count * 2) - 1, hwif->dma_base+2); /* start DMA */
if (drive->media != ide_disk)
@@ -199,6 +199,7 @@ static int trm290_dmaproc (ide_dma_action_t func, ide_drive_t *drive)
return 0;
case ide_dma_end:
drive->waiting_for_dma = 0;
+ ide_destroy_dmatable(drive); /* purge DMA mappings */
return (inw(hwif->dma_base+2) != 0x00ff);
case ide_dma_test_irq:
return (inw(hwif->dma_base+2) == 0x00ff);
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index dfd0b0199..e585bb34c 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -144,7 +144,7 @@ get_chipram( void )
{
chip_count++;
z2ram_map[ z2ram_size ] =
- (u_long)amiga_chip_alloc( Z2RAM_CHUNKSIZE );
+ (u_long)amiga_chip_alloc( Z2RAM_CHUNKSIZE, "z2ram" );
if ( z2ram_map[ z2ram_size ] == 0 )
{