summaryrefslogtreecommitdiffstats
path: root/drivers/i2o
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-05-12 21:05:59 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-05-12 21:05:59 +0000
commitba2dacab305c598cd4c34a604f8e276bf5bab5ff (patch)
tree78670a0139bf4d5ace617b29b7eba82bbc74d602 /drivers/i2o
parentb77bf69998121e689c5e86cc5630d39a0a9ee6ca (diff)
Merge with Linux 2.3.99-pre7 and various other bits.
Diffstat (limited to 'drivers/i2o')
-rw-r--r--drivers/i2o/i2o_block.c12
-rw-r--r--drivers/i2o/i2o_config.c4
-rw-r--r--drivers/i2o/i2o_core.c451
-rw-r--r--drivers/i2o/i2o_lan.c264
-rw-r--r--drivers/i2o/i2o_lan.h9
-rw-r--r--drivers/i2o/i2o_pci.c31
6 files changed, 378 insertions, 393 deletions
diff --git a/drivers/i2o/i2o_block.c b/drivers/i2o/i2o_block.c
index 566e94b3b..c365b0a12 100644
--- a/drivers/i2o/i2o_block.c
+++ b/drivers/i2o/i2o_block.c
@@ -25,11 +25,17 @@
* Independent queues per IOP
* Support for dynamic device creation/deletion
* Code cleanup
- * Support for larger I/Os through merge* functions
- * (taken from DAC960 driver)
+ * Support for larger I/Os through merge* functions
+ * (taken from DAC960 driver)
+ * Boji T Kannanthanam:
+ * Reduced the timeout during RAID 5 creation.
+ * This is to prevent race condition when a RAID volume
+ * is created and immediately deleted.
*
* To do:
* Serial number scanning to find duplicates for FC multipathing
+ * Remove the random timeout in the code needed for RAID 5
+ * volume creation.
*/
#include <linux/major.h>
@@ -1376,7 +1382,7 @@ void i2ob_new_device(struct i2o_controller *c, struct i2o_device *d)
* so we just sleep for a little while and let it do it's thing
*/
current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(10*HZ);
+ schedule_timeout(3*HZ);
if(i2o_claim_device(d, &i2o_block_handler))
{
diff --git a/drivers/i2o/i2o_config.c b/drivers/i2o/i2o_config.c
index 0202afb67..5f51f6ca1 100644
--- a/drivers/i2o/i2o_config.c
+++ b/drivers/i2o/i2o_config.c
@@ -515,7 +515,7 @@ int ioctl_html(unsigned long arg)
msg[0] = NINE_WORD_MSG_SIZE|SGL_OFFSET_5;
msg[5] = 0x50000000|65536;
msg[7] = 0xD4000000|(kcmd.qlen);
- msg[8] = virt_to_phys(query);
+ msg[8] = virt_to_bus(query);
}
token = i2o_post_wait(c, msg, 9*4, 10);
@@ -592,7 +592,7 @@ int ioctl_swdl(unsigned long arg)
msg[5]= swlen;
msg[6]= kxfer.sw_id;
msg[7]= (0xD0000000 | fragsize);
- msg[8]= virt_to_phys(buffer);
+ msg[8]= virt_to_bus(buffer);
// printk("i2o_config: swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
status = i2o_post_wait(c, msg, sizeof(msg), 60);
diff --git a/drivers/i2o/i2o_core.c b/drivers/i2o/i2o_core.c
index 05ed6d851..80d001468 100644
--- a/drivers/i2o/i2o_core.c
+++ b/drivers/i2o/i2o_core.c
@@ -18,6 +18,7 @@
* Juha Sievänen <Juha.Sievanen@cs.Helsinki.FI>
* Auvo Häkkinen <Auvo.Hakkinen@cs.Helsinki.FI>
* Deepak Saxena <deepak@plexity.net>
+ * Boji T Kannanthanam <boji.t.kannanthanam@intel.com>
*
*/
@@ -70,12 +71,11 @@ static int core_context = 0;
/* Initialization && shutdown functions */
static void i2o_sys_init(void);
static void i2o_sys_shutdown(void);
-static int i2o_clear_controller(struct i2o_controller *);
+static int i2o_reset_controller(struct i2o_controller *);
static int i2o_reboot_event(struct notifier_block *, unsigned long , void *);
static int i2o_online_controller(struct i2o_controller *);
static int i2o_init_outbound_q(struct i2o_controller *);
static int i2o_post_outbound_messages(struct i2o_controller *);
-static int i2o_issue_claim(struct i2o_controller *, int, int, int, u32);
/* Reply handler */
static void i2o_core_reply(struct i2o_handler *, struct i2o_controller *,
@@ -111,6 +111,14 @@ static struct i2o_sys_tbl *sys_tbl = NULL;
static int sys_tbl_ind = 0;
static int sys_tbl_len = 0;
+/*
+ * This spin lock is used to keep a device from being
+ * added and deleted concurrently across CPUs or interrupts.
+ * This can occur when a user creates a device and immediatelly
+ * deletes it before the new_dev_notify() handler is called.
+ */
+static spinlock_t i2o_dev_lock = SPIN_LOCK_UNLOCKED;
+
#ifdef MODULE
/*
* Function table to send to bus specific layers
@@ -214,24 +222,12 @@ void i2o_core_reply(struct i2o_handler *h, struct i2o_controller *c,
u32 status;
u32 context = msg[2];
-#if 0
- i2o_report_status(KERN_INFO, "i2o_core", msg);
-#endif
-
- if (msg[0] & (1<<13)) // Fail bit is set
+ if (msg[0] & MSG_FAIL) // Fail bit is set
{
u32 *preserved_msg = (u32*)(c->mem_offset + msg[7]);
-// i2o_report_failure(KERN_INFO, c, "i2o_core", msg);
- printk(KERN_ERR "%s: Failed to process the msg:\n", c->name);
- printk(KERN_ERR " Cmd = 0x%02X, InitiatorTid = %d, TargetTid =% d\n",
- (msg[1] >> 24) & 0xFF, (msg[1] >> 12) & 0xFFF, msg[1] & 0xFFF);
- printk(KERN_ERR " FailureCode = 0x%02X\n Severity = 0x%02X\n"
- "LowestVersion = 0x%02X\n HighestVersion = 0x%02X\n",
- msg[4] >> 24, (msg[4] >> 16) & 0xFF,
- (msg[4] >> 8) & 0xFF, msg[4] & 0xFF);
- printk(KERN_ERR " FailingHostUnit = 0x%04X\n FailingIOP = 0x%03X\n",
- msg[5] >> 16, msg[5] & 0xFFF);
+ i2o_report_status(KERN_INFO, "i2o_core", msg);
+ i2o_dump_message(preserved_msg);
/* If the failed request needs special treatment,
* it should be done here. */
@@ -244,16 +240,18 @@ void i2o_core_reply(struct i2o_handler *h, struct i2o_controller *c,
i2o_post_message(c, msg[7]);
/* If reply to i2o_post_wait failed, return causes a timeout */
+
return;
}
+#ifdef DRIVERDEBUG
+ i2o_report_status(KERN_INFO, "i2o_core", msg);
+#endif
+
if(msg[2]&0x80000000) // Post wait message
{
if (msg[4] >> 24)
- {
- i2o_report_status(KERN_INFO, "i2o_core: post_wait reply", msg);
status = -(msg[4] & 0xFFFF);
- }
else
status = I2O_POST_WAIT_OK;
@@ -263,7 +261,7 @@ void i2o_core_reply(struct i2o_handler *h, struct i2o_controller *c,
if(m->function == I2O_CMD_UTIL_EVT_REGISTER)
{
- memcpy(events[evt_in].msg, msg, MSG_FRAME_SIZE);
+ memcpy(events[evt_in].msg, msg, (msg[0]>>16)<<2);
events[evt_in].iop = c;
spin_lock(&i2o_evt_lock);
@@ -471,7 +469,7 @@ int i2o_delete_controller(struct i2o_controller *c)
char name[16];
int stat;
- dprintk(KERN_INFO "Deleting controller iop%d\n", c->unit);
+ dprintk(KERN_INFO "Deleting controller %s\n", c->name);
/*
* Clear event registration as this can cause weird behavior
@@ -482,7 +480,7 @@ int i2o_delete_controller(struct i2o_controller *c)
spin_lock(&i2o_configuration_lock);
if((users=atomic_read(&c->users)))
{
- dprintk(KERN_INFO "I2O: %d users for controller iop%d\n", users,
+ dprintk(KERN_INFO "I2O: %d users for controller %s\n", users,
c->name);
spin_unlock(&i2o_configuration_lock);
return -EBUSY;
@@ -523,9 +521,8 @@ int i2o_delete_controller(struct i2o_controller *c)
{
if(*p==c)
{
- /* Ask the IOP to switch to HOLD state */
- if (i2o_clear_controller(c) < 0)
- printk(KERN_ERR "Unable to clear iop%d\n", c->unit);
+ /* Ask the IOP to switch to RESET state */
+ i2o_reset_controller(c);
/* Release IRQ */
c->destructor(c);
@@ -578,7 +575,21 @@ struct i2o_controller *i2o_find_controller(int n)
spin_unlock(&i2o_configuration_lock);
return c;
}
+
+/*
+ * Issue UTIL_CLAIM or UTIL_RELEASE message
+ */
+static int i2o_issue_claim(u32 cmd, struct i2o_controller *c, int tid, u32 type)
+{
+ u32 msg[5];
+
+ msg[0] = FIVE_WORD_MSG_SIZE | SGL_OFFSET_0;
+ msg[1] = cmd << 24 | HOST_TID<<12 | tid;
+ msg[3] = 0;
+ msg[4] = type;
+ return i2o_post_wait(c, msg, sizeof(msg), 60);
+}
/*
* Claim a device for use by an OSM
@@ -586,15 +597,15 @@ struct i2o_controller *i2o_find_controller(int n)
int i2o_claim_device(struct i2o_device *d, struct i2o_handler *h)
{
spin_lock(&i2o_configuration_lock);
- if(d->owner)
- {
- printk(KERN_INFO "issue claim called, but dev as owner!");
+ if (d->owner) {
+ printk(KERN_INFO "Device claim called, but dev allready owned by %s!",
+ h->name);
spin_unlock(&i2o_configuration_lock);
return -EBUSY;
}
- if(i2o_issue_claim(d->controller,d->lct_data.tid, h->context, 1,
- I2O_CLAIM_PRIMARY))
+ if(i2o_issue_claim(I2O_CMD_UTIL_CLAIM ,d->controller,d->lct_data.tid,
+ I2O_CLAIM_PRIMARY))
{
spin_unlock(&i2o_configuration_lock);
return -EBUSY;
@@ -606,21 +617,22 @@ int i2o_claim_device(struct i2o_device *d, struct i2o_handler *h)
}
/*
- * Release a device that the OS is using
+ * Release a device that the OSM is using
*/
int i2o_release_device(struct i2o_device *d, struct i2o_handler *h)
{
int err = 0;
spin_lock(&i2o_configuration_lock);
- if(d->owner != h)
- {
+ if (d->owner != h) {
+ printk(KERN_INFO "Claim release called, but not owned by %s!",
+ h->name);
spin_unlock(&i2o_configuration_lock);
return -ENOENT;
}
- if(i2o_issue_claim(d->controller, d->lct_data.tid, h->context, 0,
- I2O_CLAIM_PRIMARY))
+ if(i2o_issue_claim(I2O_CMD_UTIL_RELEASE, d->controller, d->lct_data.tid,
+ I2O_CLAIM_PRIMARY))
{
err = -ENXIO;
}
@@ -684,13 +696,13 @@ int i2o_event_register(struct i2o_controller *c, u32 tid,
u32 init_context, u32 tr_context, u32 evt_mask)
{
u32 msg[5]; // Not performance critical, so we just
- // i2o_post_this it instead of building it
- // in IOP memory
+ // i2o_post_this it instead of building it
+ // in IOP memory
msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
msg[1] = I2O_CMD_UTIL_EVT_REGISTER<<24 | HOST_TID<<12 | tid;
- msg[2] = (u32)init_context;
- msg[3] = (u32)tr_context;
+ msg[2] = init_context;
+ msg[3] = tr_context;
msg[4] = evt_mask;
return i2o_post_this(c, msg, sizeof(msg));
@@ -703,6 +715,7 @@ int i2o_event_register(struct i2o_controller *c, u32 tid,
* message and change the function code since that's what spec
* describes an EventAck message looking like.
*/
+
int i2o_event_ack(struct i2o_controller *c, u32 *msg)
{
struct i2o_message *m = (struct i2o_message *)msg;
@@ -819,7 +832,11 @@ static int i2o_core_evt(void *reply_data)
if(i2o_handlers[i] &&
i2o_handlers[i]->new_dev_notify &&
(i2o_handlers[i]->class&d->lct_data.class_id))
+ {
+ spin_lock(&i2o_dev_lock);
i2o_handlers[i]->new_dev_notify(c,d);
+ spin_unlock(&i2o_dev_lock);
+ }
}
break;
@@ -855,7 +872,7 @@ static int i2o_core_evt(void *reply_data)
break;
default:
- printk(KERN_WARNING "%s: Unknown event (0x%08x)...check config\n", c->name, msg[4]);
+ printk(KERN_WARNING "%s: No handler for event (0x%08x)\n", c->name, msg[4]);
break;
}
}
@@ -939,7 +956,9 @@ static int i2o_dyn_lct(void *foo)
if(!found)
{
dprintk(KERN_INFO "Deleted device!\n");
+ spin_lock(&i2o_dev_lock);
i2o_delete_device(d);
+ spin_unlock(&i2o_dev_lock);
}
d = d1;
}
@@ -1114,29 +1133,6 @@ u32 i2o_wait_message(struct i2o_controller *c, char *why)
}
return m;
}
-
-
-/*
- * Wait up to timeout seconds for a reply to be available.
- */
-
-u32 i2o_wait_reply(struct i2o_controller *c, char *why, int timeout)
-{
- u32 m;
- long time=jiffies;
-
- while((m=I2O_REPLY_READ32(c))==0xFFFFFFFF)
- {
- if(jiffies-time >= timeout*HZ )
- {
- dprintk(KERN_ERR "%s: timeout waiting for %s reply.\n",
- c->name, why);
- return 0xFFFFFFFF;
- }
- schedule();
- }
- return m;
-}
/*
* Dump the information block associated with a given unit (TID)
@@ -1371,12 +1367,12 @@ int i2o_quiesce_controller(struct i2o_controller *c)
/* Long timeout needed for quiesce if lots of devices */
if ((ret = i2o_post_wait(c, msg, sizeof(msg), 240)))
- printk(KERN_INFO "%s: Unable to quiesce (status=%#10x).\n",
- c->name, ret);
+ printk(KERN_INFO "%s: Unable to quiesce (status=%#x).\n",
+ c->name, -ret);
else
dprintk(KERN_INFO "%s: Quiesced.\n", c->name);
- i2o_status_get(c); // Reread the Status Block
+ i2o_status_get(c); // Entered READY state
return ret;
@@ -1402,12 +1398,12 @@ int i2o_enable_controller(struct i2o_controller *c)
/* How long of a timeout do we need? */
if ((ret = i2o_post_wait(c, msg, sizeof(msg), 240)))
- printk(KERN_ERR "%s: Could not enable (status=%#10x).\n",
- c->name, ret);
+ printk(KERN_ERR "%s: Could not enable (status=%#x).\n",
+ c->name, -ret);
else
dprintk(KERN_INFO "%s: Enabled.\n", c->name);
- i2o_status_get(c);
+ i2o_status_get(c); // entered OPERATIONAL state
return ret;
}
@@ -1434,8 +1430,8 @@ int i2o_clear_controller(struct i2o_controller *c)
msg[3]=0;
if ((ret=i2o_post_wait(c, msg, sizeof(msg), 30)))
- printk(KERN_INFO "%s: Unable to clear (status=%#10x).\n",
- c->name, ret);
+ printk(KERN_INFO "%s: Unable to clear (status=%#x).\n",
+ c->name, -ret);
else
dprintk(KERN_INFO "%s: Cleared.\n",c->name);
@@ -1470,7 +1466,6 @@ static int i2o_reset_controller(struct i2o_controller *c)
for (iop = i2o_controller_chain; iop; iop = iop->next)
i2o_quiesce_controller(iop);
- /* Get a message */
m=i2o_wait_message(c, "AdapterReset");
if(m==0xFFFFFFFF)
return -ETIMEDOUT;
@@ -1489,7 +1484,7 @@ static int i2o_reset_controller(struct i2o_controller *c)
msg[3]=0;
msg[4]=0;
msg[5]=0;
- msg[6]=virt_to_phys(status);
+ msg[6]=virt_to_bus(status);
msg[7]=0; /* 64bit host FIXME */
i2o_post_message(c,m);
@@ -1508,7 +1503,7 @@ static int i2o_reset_controller(struct i2o_controller *c)
barrier();
}
- if (status[0]==0x01)
+ if (status[0]==I2O_CMD_IN_PROGRESS)
{
/*
* Once the reset is sent, the IOP goes into the INIT state
@@ -1519,7 +1514,7 @@ static int i2o_reset_controller(struct i2o_controller *c)
* time, we assume the IOP could not reboot properly.
*/
- dprintk(KERN_INFO "Reset succeeded...waiting for reboot\n");
+ dprintk(KERN_INFO "Reset in progress, waiting for reboot\n");
time = jiffies;
m = I2O_POST_READ32(c);
@@ -1534,21 +1529,21 @@ static int i2o_reset_controller(struct i2o_controller *c)
schedule();
barrier();
m = I2O_POST_READ32(c);
- }
-
+ }
i2o_flush_reply(c,m);
-
- dprintk(KERN_INFO "%s: Reset completed.\n", c->name);
}
/* If IopReset was rejected or didn't perform reset, try IopClear */
i2o_status_get(c);
- if (status[0] == 0x02 || c->status_block->iop_state != ADAPTER_STATE_RESET)
+ if (status[0] == I2O_CMD_REJECTED ||
+ c->status_block->iop_state != ADAPTER_STATE_RESET)
{
printk(KERN_WARNING "%s: Reset rejected, trying to clear\n",c->name);
i2o_clear_controller(c);
}
+ else
+ dprintk(KERN_INFO "%s: Reset completed.\n", c->name);
/* Enable other IOPs */
@@ -1598,7 +1593,7 @@ int i2o_status_get(struct i2o_controller *c)
msg[3]=0;
msg[4]=0;
msg[5]=0;
- msg[6]=virt_to_phys(c->status_block);
+ msg[6]=virt_to_bus(c->status_block);
msg[7]=0; /* 64bit host FIXME */
msg[8]=sizeof(i2o_status_block); /* always 88 bytes */
@@ -1618,9 +1613,6 @@ int i2o_status_get(struct i2o_controller *c)
barrier();
}
- /* Ok the reply has arrived. Fill in the important stuff */
- c->inbound_size = (status_block[12]|(status_block[13]<<8))*4;
-
#ifdef DRIVERDEBUG
printk(KERN_INFO "%s: State = ", c->name);
switch (c->status_block->iop_state) {
@@ -1678,11 +1670,11 @@ int i2o_hrt_get(struct i2o_controller *c)
msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
msg[3]= 0;
msg[4]= (0xD0000000 | size); /* Simple transaction */
- msg[5]= virt_to_phys(c->hrt); /* Dump it here */
+ msg[5]= virt_to_bus(c->hrt); /* Dump it here */
if ((ret = i2o_post_wait(c, msg, sizeof(msg), 20))) {
- printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n",
- c->name, ret);
+ printk(KERN_ERR "%s: Unable to get HRT (status=%#x)\n",
+ c->name, -ret);
return ret;
}
@@ -1730,18 +1722,20 @@ static int i2o_systab_send(struct i2o_controller *iop)
* Private i/o space declaration
*/
msg[6] = 0x54000000 | sys_tbl_len;
- msg[7] = virt_to_phys(sys_tbl);
+ msg[7] = virt_to_bus(sys_tbl);
msg[8] = 0x54000000 | 0;
- msg[9] = virt_to_phys(privmem);
+ msg[9] = virt_to_bus(privmem);
msg[10] = 0xD4000000 | 0;
- msg[11] = virt_to_phys(privio);
+ msg[11] = virt_to_bus(privio);
if ((ret=i2o_post_wait(iop, msg, sizeof(msg), 120)))
- printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
- iop->name, ret);
+ printk(KERN_INFO "%s: Unable to set SysTab (status=%#x).\n",
+ iop->name, -ret);
else
dprintk(KERN_INFO "%s: SysTab set.\n", iop->name);
+ i2o_status_get(iop); // Entered READY state
+
return ret;
}
@@ -1761,7 +1755,8 @@ static void __init i2o_sys_init()
dprintk(KERN_INFO "Calling i2o_activate_controller for %s\n",
iop->name);
niop = iop->next;
- i2o_activate_controller(iop);
+ if (i2o_activate_controller(iop) < 0)
+ i2o_delete_controller(iop);
}
/* Active IOPs in HOLD state */
@@ -1784,8 +1779,10 @@ rebuild_sys_tab:
for (iop = i2o_controller_chain; iop; iop = niop) {
niop = iop->next;
dprintk(KERN_INFO "Calling i2o_online_controller for %s\n", iop->name);
- if (i2o_online_controller(iop) < 0)
+ if (i2o_online_controller(iop) < 0) {
+ i2o_delete_controller(iop);
goto rebuild_sys_tab;
+ }
}
/* Active IOPs now in OPERATIONAL state */
@@ -1835,17 +1832,12 @@ int i2o_activate_controller(struct i2o_controller *iop)
if (i2o_status_get(iop) < 0) {
printk(KERN_INFO "Unable to obtain status of IOP, attempting a reset.\n");
- i2o_reset_controller(iop);
- if (i2o_status_get(iop) < 0) {
- printk(KERN_ERR "%s: IOP not responding.\n", iop->name);
- i2o_delete_controller(iop);
+ if (i2o_reset_controller(iop) < 0)
return -1;
- }
}
if(iop->status_block->iop_state == ADAPTER_STATE_FAULTED) {
printk(KERN_CRIT "%s: hardware fault\n", iop->name);
- i2o_delete_controller(iop);
return -1;
}
@@ -1855,37 +1847,26 @@ int i2o_activate_controller(struct i2o_controller *iop)
iop->status_block->iop_state == ADAPTER_STATE_FAILED)
{
u32 m[MSG_FRAME_SIZE];
- dprintk(KERN_INFO "%s: already running...trying to reset\n",
+ dprintk(KERN_INFO "%s: Already running, trying to reset\n",
iop->name);
i2o_init_outbound_q(iop);
- I2O_REPLY_WRITE32(iop,virt_to_phys(m));
+ I2O_REPLY_WRITE32(iop,virt_to_bus(m));
- i2o_reset_controller(iop);
-
- if (i2o_status_get(iop) < 0 ||
- iop->status_block->iop_state != ADAPTER_STATE_RESET)
- {
- printk(KERN_CRIT "%s: Failed to initialize.\n", iop->name);
- i2o_delete_controller(iop);
+ if (i2o_reset_controller(iop) < 0)
return -1;
- }
}
- if (i2o_init_outbound_q(iop) < 0) {
- i2o_delete_controller(iop);
+ if (i2o_init_outbound_q(iop) < 0)
return -1;
- }
if (i2o_post_outbound_messages(iop))
return -1;
/* In HOLD state */
- if (i2o_hrt_get(iop) < 0) {
- i2o_delete_controller(iop);
+ if (i2o_hrt_get(iop) < 0)
return -1;
- }
return 0;
}
@@ -1909,29 +1890,28 @@ int i2o_init_outbound_q(struct i2o_controller *c)
status = kmalloc(4,GFP_KERNEL);
if (status==NULL) {
- printk(KERN_ERR "%s: IOP reset failed - no free memory.\n",
+ printk(KERN_ERR "%s: Outbound Queue initialization failed - no free memory.\n",
c->name);
return -ENOMEM;
}
memset(status, 0, 4);
-
msg[0]= EIGHT_WORD_MSG_SIZE| TRL_OFFSET_6;
msg[1]= I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID;
msg[2]= core_context;
- msg[3]= 0x0106; /* Transaction context */
- msg[4]= 4096; /* Host page frame size */
+ msg[3]= 0x0106; /* Transaction context */
+ msg[4]= 4096; /* Host page frame size */
/* Frame size is in words. Pick 128, its what everyone elses uses and
other sizes break some adapters. */
msg[5]= MSG_FRAME_SIZE<<16|0x80; /* Outbound msg frame size and Initcode */
- msg[6]= 0xD0000004; /* Simple SG LE, EOB */
+ msg[6]= 0xD0000004; /* Simple SG LE, EOB */
msg[7]= virt_to_bus(status);
i2o_post_message(c,m);
barrier();
time=jiffies;
- while(status[0]<0x02)
+ while(status[0] < I2O_CMD_REJECTED)
{
if((jiffies-time)>=30*HZ)
{
@@ -1948,7 +1928,7 @@ int i2o_init_outbound_q(struct i2o_controller *c)
barrier();
}
- if(status[0] != I2O_CMD_OUTBOUND_INIT_COMPLETE)
+ if(status[0] != I2O_CMD_COMPLETED)
{
printk(KERN_ERR "%s: IOP outbound initialise failed.\n", c->name);
kfree(status);
@@ -1970,7 +1950,7 @@ int i2o_post_outbound_messages(struct i2o_controller *c)
c->name);
return -ENOMEM;
}
- m=virt_to_phys(c->page_frame);
+ m=virt_to_bus(c->page_frame);
/* Post frames */
@@ -2012,8 +1992,8 @@ int i2o_lct_get(struct i2o_controller *c)
msg[7] = virt_to_bus(c->lct);
if ((ret=i2o_post_wait(c, msg, sizeof(msg), 120))) {
- printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
- c->name, ret);
+ printk(KERN_ERR "%s: LCT Get failed (status=%#x.\n",
+ c->name, -ret);
return ret;
}
@@ -2057,26 +2037,20 @@ int i2o_lct_notify(struct i2o_controller *c)
*/
int i2o_online_controller(struct i2o_controller *iop)
{
- if (i2o_systab_send(iop) < 0) {
- i2o_delete_controller(iop);
+ if (i2o_systab_send(iop) < 0)
return -1;
- }
/* In READY state */
dprintk(KERN_INFO "Attempting to enable iop%d\n", iop->unit);
- if (i2o_enable_controller(iop) < 0) {
- i2o_delete_controller(iop);
+ if (i2o_enable_controller(iop) < 0)
return -1;
- }
/* In OPERATIONAL state */
dprintk(KERN_INFO "Attempting to get/parse lct iop%d\n", iop->unit);
- if (i2o_lct_get(iop) < 0){
- i2o_delete_controller(iop);
+ if (i2o_lct_get(iop) < 0)
return -1;
- }
return 0;
}
@@ -2147,7 +2121,7 @@ static int i2o_build_sys_table(void)
sys_tbl->iops[count].iop_capabilities =
iop->status_block->iop_capabilities;
sys_tbl->iops[count].inbound_low =
- (u32)virt_to_phys(iop->post_port);
+ (u32)virt_to_bus(iop->post_port);
sys_tbl->iops[count].inbound_high = 0; // TODO: 64-bit support
count++;
@@ -2241,7 +2215,7 @@ int i2o_post_wait(struct i2o_controller *c, u32 *msg, int len, int timeout)
#ifdef DRIVERDEBUG
if(status == -ETIMEDOUT)
- printk(KERN_INFO "POST WAIT TIMEOUT\n");
+ printk(KERN_INFO "%s: POST WAIT TIMEOUT\n",c->name);
#endif
/*
@@ -2305,26 +2279,6 @@ static void i2o_post_wait_complete(u32 context, int status)
printk(KERN_DEBUG "i2o_post_wait reply after timeout!\n");
}
-/*
- * Issue UTIL_CLAIM or UTIL_RELEASE messages
- */
-static int i2o_issue_claim(struct i2o_controller *c, int tid, int context,
- int onoff, u32 type)
-{
- u32 msg[5];
-
- msg[0] = FIVE_WORD_MSG_SIZE | SGL_OFFSET_0;
- if(onoff)
- msg[1] = I2O_CMD_UTIL_CLAIM << 24 | HOST_TID<<12 | tid;
- else
- msg[1] = I2O_CMD_UTIL_RELEASE << 24 | HOST_TID << 12 | tid;
-
- msg[3] = 0;
- msg[4] = type;
-
- return i2o_post_wait(c, msg, sizeof(msg), 30);
-}
-
/* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
*
* This function can be used for all UtilParamsGet/Set operations.
@@ -2541,44 +2495,69 @@ int i2o_row_add_table(struct i2o_controller *iop, int tid,
return size;
}
-/*
- * Delete rows from a table group.
- */
-int i2o_row_delete_table(struct i2o_controller *iop, int tid,
- int group, int keycount, void *keys, int keyslen)
-{
- u16 *opblk;
- u8 resblk[32]; /* min 8 bytes for header */
- int size;
- opblk = kmalloc(keyslen+64, GFP_KERNEL);
- if (opblk == NULL)
- {
- printk(KERN_ERR "i2o: no memory for operation buffer.\n");
- return -ENOMEM;
- }
-
- opblk[0] = 1; /* operation count */
- opblk[1] = 0; /* pad */
- opblk[2] = I2O_PARAMS_ROW_DELETE;
- opblk[3] = group;
- opblk[4] = keycount;
- memcpy(opblk+5, keys, keyslen);
-
- size = i2o_issue_params(I2O_CMD_UTIL_PARAMS_SET, iop, tid,
- opblk, 10+keyslen, resblk, sizeof(resblk));
+/*
+ * Used for error reporting/debugging purposes.
+ * Following fail status are common to all classes.
+ * The preserved message must be handled in the reply handler.
+ */
+void i2o_report_fail_status(u8 req_status, u32* msg)
+{
+ static char *FAIL_STATUS[] = {
+ "0x80", /* not used */
+ "SERVICE_SUSPENDED", /* 0x81 */
+ "SERVICE_TERMINATED", /* 0x82 */
+ "CONGESTION",
+ "FAILURE",
+ "STATE_ERROR",
+ "TIME_OUT",
+ "ROUTING_FAILURE",
+ "INVALID_VERSION",
+ "INVALID_OFFSET",
+ "INVALID_MSG_FLAGS",
+ "FRAME_TOO_SMALL",
+ "FRAME_TOO_LARGE",
+ "INVALID_TARGET_ID",
+ "INVALID_INITIATOR_ID",
+ "INVALID_INITIATOR_CONTEX", /* 0x8F */
+ "UNKNOWN_FAILURE" /* 0xFF */
+ };
- kfree(opblk);
- return size;
+ if (req_status == I2O_FSC_TRANSPORT_UNKNOWN_FAILURE)
+ printk("TRANSPORT_UNKNOWN_FAILURE (%0#2x)\n.", req_status);
+ else
+ printk("TRANSPORT_%s.\n", FAIL_STATUS[req_status & 0x0F]);
+
+ /* Dump some details */
+
+ printk(KERN_ERR " InitiatorId = %d, TargetId = %d\n",
+ (msg[1] >> 12) & 0xFFF, msg[1] & 0xFFF);
+ printk(KERN_ERR " LowestVersion = 0x%02X, HighestVersion = 0x%02X\n",
+ (msg[4] >> 8) & 0xFF, msg[4] & 0xFF);
+ printk(KERN_ERR " FailingHostUnit = 0x%04X, FailingIOP = 0x%03X\n",
+ msg[5] >> 16, msg[5] & 0xFFF);
+
+ printk(KERN_ERR " Severity: 0x%02X ", (msg[4] >> 16) & 0xFF);
+ if (msg[4] & (1<<16))
+ printk("(FormatError), "
+ "this msg can never be delivered/processed.\n");
+ if (msg[4] & (1<<17))
+ printk("(PathError), "
+ "this msg can no longer be delivered/processed.\n");
+ if (msg[4] & (1<<18))
+ printk("(PathState), "
+ "the system state does not allow delivery.\n");
+ if (msg[4] & (1<<19))
+ printk("(Congestion), resources temporarily not available;"
+ "do not retry immediately.\n");
}
/*
- * Used for error reporting/debugging purposes
+ * Used for error reporting/debugging purposes.
+ * Following reply status are common to all classes.
*/
void i2o_report_common_status(u8 req_status)
{
- /* the following reply status strings are common to all classes */
-
static char *REPLY_STATUS[] = {
"SUCCESS",
"ABORT_DIRTY",
@@ -2595,23 +2574,18 @@ void i2o_report_common_status(u8 req_status)
};
if (req_status > I2O_REPLY_STATUS_PROGRESS_REPORT)
- printk("%0#4x / ", req_status);
+ printk("RequestStatus = %0#2x", req_status);
else
- printk("%s / ", REPLY_STATUS[req_status]);
-
- return;
+ printk("%s", REPLY_STATUS[req_status]);
}
/*
- * Used for error reporting/debugging purposes
+ * Used for error reporting/debugging purposes.
+ * Following detailed status are valid for executive class,
+ * utility class, DDM class and for transaction error replies.
*/
static void i2o_report_common_dsc(u16 detailed_status)
{
- /* The following detailed statuscodes are valid
- - for executive class, utility class, DDM class and
- - for transaction error replies
- */
-
static char *COMMON_DSC[] = {
"SUCCESS",
"0x01", // not used
@@ -2645,11 +2619,9 @@ static void i2o_report_common_dsc(u16 detailed_status)
};
if (detailed_status > I2O_DSC_DEVICE_NOT_AVAILABLE)
- printk("%0#4x.\n", detailed_status);
+ printk(" / DetailedStatus = %0#4x.\n", detailed_status);
else
- printk("%s.\n", COMMON_DSC[detailed_status]);
-
- return;
+ printk(" / %s.\n", COMMON_DSC[detailed_status]);
}
/*
@@ -2680,11 +2652,9 @@ static void i2o_report_lan_dsc(u16 detailed_status)
};
if (detailed_status > I2O_DSC_INVALID_REQUEST)
- printk("%0#4x.\n", detailed_status);
+ printk(" / %0#4x.\n", detailed_status);
else
- printk("%s.\n", LAN_DSC[detailed_status]);
-
- return;
+ printk(" / %s.\n", LAN_DSC[detailed_status]);
}
/*
@@ -2736,10 +2706,8 @@ static void i2o_report_util_cmd(u8 cmd)
printk("UTIL_REPLY_FAULT_NOTIFY, ");
break;
default:
- printk("%0#2x, ",cmd);
+ printk("Cmd = %0#2x, ",cmd);
}
-
- return;
}
/*
@@ -2848,10 +2816,8 @@ static void i2o_report_exec_cmd(u8 cmd)
printk("EXEC_SYS_TAB_SET, ");
break;
default:
- printk("%02x, ",cmd);
+ printk("Cmd = %#02x, ",cmd);
}
-
- return;
}
/*
@@ -2876,51 +2842,48 @@ static void i2o_report_lan_cmd(u8 cmd)
printk("LAN_SUSPEND, ");
break;
default:
- printk("%02x, ",cmd);
+ printk("Cmd = %0#2x, ",cmd);
}
-
- return;
}
/*
- * Used for error reporting/debugging purposes
+ * Used for error reporting/debugging purposes.
+ * Report Cmd name, Request status, Detailed Status.
*/
-void i2o_report_status(const char *severity, const char *module, u32 *msg)
+void i2o_report_status(const char *severity, const char *str, u32 *msg)
{
u8 cmd = (msg[1]>>24)&0xFF;
u8 req_status = (msg[4]>>24)&0xFF;
u16 detailed_status = msg[4]&0xFFFF;
struct i2o_handler *h = i2o_handlers[msg[2] & (MAX_I2O_MODULES-1)];
- printk("%s%s: ", severity, module);
+ printk("%s%s: ", severity, str);
- switch (h->class) {
- case I2O_CLASS_EXECUTIVE:
- if (cmd < 0x1F) { // Utility cmd
- i2o_report_util_cmd(cmd);
- i2o_report_common_status(req_status);
- i2o_report_common_dsc(detailed_status);
- }
- if (cmd >= 0xA0 && cmd <= 0xEF) { // Executive cmd
- i2o_report_exec_cmd(cmd);
- i2o_report_common_status(req_status);
- i2o_report_common_dsc(detailed_status);
- }
- break;
+ if (cmd < 0x1F) // Utility cmd
+ i2o_report_util_cmd(cmd);
+
+ else if (cmd >= 0xA0 && cmd <= 0xEF) // Executive cmd
+ i2o_report_exec_cmd(cmd);
+
+ else if (h->class == I2O_CLASS_LAN && cmd >= 0x30 && cmd <= 0x3F)
+ i2o_report_lan_cmd(cmd); // LAN cmd
+ else
+ printk("Cmd = %0#2x, ", cmd); // Other cmds
- case I2O_CLASS_LAN:
- i2o_report_lan_cmd(cmd);
- i2o_report_common_status(req_status);
- i2o_report_lan_dsc(detailed_status);
- break;
-/*
- case I2O_CLASS_RANDOM_BLOCK_STORAGE:
- break;
-*/
- default:
- printk(KERN_INFO "%02x, %02x / %04x.\n",
- cmd, req_status, detailed_status);
+ if (msg[0] & MSG_FAIL) {
+ i2o_report_fail_status(req_status, msg);
+ return;
}
+
+ i2o_report_common_status(req_status);
+
+ if (cmd < 0x1F || (cmd >= 0xA0 && cmd <= 0xEF))
+ i2o_report_common_dsc(detailed_status);
+
+ if (h->class == I2O_CLASS_LAN && cmd >= 0x30 && cmd <= 0x3F)
+ i2o_report_lan_dsc(detailed_status);
+ else
+ printk(" / DetailedStatus = %0#4x.\n", detailed_status);
}
/* Used to dump a message to syslog during debugging */
@@ -3005,7 +2968,6 @@ EXPORT_SYMBOL(i2o_set_scalar);
EXPORT_SYMBOL(i2o_query_table);
EXPORT_SYMBOL(i2o_clear_table);
EXPORT_SYMBOL(i2o_row_add_table);
-EXPORT_SYMBOL(i2o_row_delete_table);
EXPORT_SYMBOL(i2o_issue_params);
EXPORT_SYMBOL(i2o_event_register);
@@ -3013,6 +2975,7 @@ EXPORT_SYMBOL(i2o_event_ack);
EXPORT_SYMBOL(i2o_report_status);
EXPORT_SYMBOL(i2o_dump_message);
+
EXPORT_SYMBOL(i2o_get_class_name);
MODULE_AUTHOR("Red Hat Software");
diff --git a/drivers/i2o/i2o_lan.c b/drivers/i2o/i2o_lan.c
index 2957424d6..90b252041 100644
--- a/drivers/i2o/i2o_lan.c
+++ b/drivers/i2o/i2o_lan.c
@@ -1,7 +1,7 @@
/*
* drivers/i2o/i2o_lan.c
*
- * I2O LAN CLASS OSM April 3rd 2000
+ * I2O LAN CLASS OSM May 4th 2000
*
* (C) Copyright 1999, 2000 University of Helsinki,
* Department of Computer Science
@@ -22,8 +22,7 @@
* in Gigabit Eth environment (using SysKonnect's DDM)
* in Fast Ethernet environment (using Intel 82558 DDM)
*
- * TODO: check error checking / timeouts
- * code / test for other LAN classes
+ * TODO: tests for other LAN classes (Token Ring, Fibre Channel)
*/
#include <linux/config.h>
@@ -62,8 +61,8 @@
static u32 max_buckets_out = I2O_LAN_MAX_BUCKETS_OUT;
static u32 bucket_thresh = I2O_LAN_BUCKET_THRESH;
static u32 rx_copybreak = I2O_LAN_RX_COPYBREAK;
-static tx_batch_mode = I2O_LAN_TX_BATCH_MODE;
-static i2o_event_mask = I2O_LAN_EVENT_MASK;
+static u8 tx_batch_mode = I2O_LAN_TX_BATCH_MODE;
+static u32 i2o_event_mask = I2O_LAN_EVENT_MASK;
#define MAX_LAN_CARDS 16
static struct net_device *i2o_landevs[MAX_LAN_CARDS+1];
@@ -139,8 +138,7 @@ static void i2o_lan_handle_failure(struct net_device *dev, u32 *msg)
struct sk_buff *skb = NULL;
u8 le_flag;
-// To be added to i2o_core.c
-// i2o_report_failure(KERN_INFO, iop, dev->name, msg);
+ i2o_report_status(KERN_INFO, dev->name, msg);
/* If PacketSend failed, free sk_buffs reserved by upper layers */
@@ -190,8 +188,7 @@ static void i2o_lan_handle_transaction_error(struct net_device *dev, u32 *msg)
struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
struct sk_buff *skb;
-// To be added to i2o_core.c
-// i2o_report_transaction_error(KERN_INFO, dev->name, msg);
+ i2o_report_status(KERN_INFO, dev->name, msg);
/* If PacketSend was rejected, free sk_buff reserved by upper layers */
@@ -253,17 +250,15 @@ static void i2o_lan_send_post_reply(struct i2o_handler *h,
struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
u8 trl_count = msg[3] & 0x000000FF;
-#ifdef DRIVERDEBUG
- i2o_report_status(KERN_INFO, dev->name, msg);
-#endif
-
if ((msg[4] >> 24) != I2O_REPLY_STATUS_SUCCESS) {
if (i2o_lan_handle_status(dev, msg))
return;
-
- /* Else we get pending transmit request(s) back */
}
+#ifdef DRIVERDEBUG
+ i2o_report_status(KERN_INFO, dev->name, msg);
+#endif
+
/* DDM has handled transmit request(s), free sk_buffs */
while (trl_count) {
@@ -284,7 +279,7 @@ static void i2o_lan_send_post_reply(struct i2o_handler *h,
* i2o_lan_receive_post_reply(): Callback function to process incoming packets.
*/
static void i2o_lan_receive_post_reply(struct i2o_handler *h,
- struct i2o_controller *iop, struct i2o_message *m)
+ struct i2o_controller *iop, struct i2o_message *m)
{
u32 *msg = (u32 *)m;
u8 unit = (u8)(msg[2]>>16); // InitiatorContext
@@ -297,10 +292,6 @@ static void i2o_lan_receive_post_reply(struct i2o_handler *h,
struct sk_buff *skb, *old_skb;
unsigned long flags = 0;
-#ifdef DRIVERDEBUG
- i2o_report_status(KERN_INFO, dev->name, msg);
-#endif
-
if ((msg[4] >> 24) != I2O_REPLY_STATUS_SUCCESS) {
if (i2o_lan_handle_status(dev, msg))
return;
@@ -313,44 +304,46 @@ static void i2o_lan_receive_post_reply(struct i2o_handler *h,
return;
}
- /* Which DetailedStatusCodes need special treatment? */
+ /* If other DetailedStatusCodes need special code, add it here */
}
+#ifdef DRIVERDEBUG
+ i2o_report_status(KERN_INFO, dev->name, msg);
+#endif
+
/* Else we are receiving incoming post. */
while (trl_count--) {
skb = (struct sk_buff *)bucket->context;
packet = (struct i2o_packet_info *)bucket->packet_info;
atomic_dec(&priv->buckets_out);
-#if 0
-/* Is this enough? If we get erroneous bucket, we can't assume that skb could
- * be reused, can we?
- */
- /* Should we optimise these ifs away from the fast path? -taneli */
- if (packet->flags & 0x0f) {
+ /* Sanity checks: Any weird characteristics in bucket? */
+ if (packet->flags & 0x0f || ! packet->flags & 0x40) {
if (packet->flags & 0x01)
- printk(KERN_WARNING "%s: packet with errors.\n", dev->name);
- if (packet->flags & 0x0c)
- /* This actually means that the hw is b0rken, since we
- have asked it to not send fragmented packets. */
- printk(KERN_DEBUG "%s: multi-bucket packets not supported!\n", dev->name);
- bucket++;
- if (skb)
- dev_kfree_skb_irq(skb);
- continue;
- }
+ printk(KERN_WARNING "%s: packet with errors, error code=0x%02x.\n",
+ dev->name, packet->status & 0xff);
+
+ /* The following shouldn't happen, unless parameters in
+ * LAN_OPERATION group are changed during the run time.
+ */
+ if (packet->flags & 0x0c)
+ printk(KERN_DEBUG "%s: multi-bucket packets not supported!\n",
+ dev->name);
+
+ if (! packet->flags & 0x40)
+ printk(KERN_DEBUG "%s: multiple packets in a bucket not supported!\n",
+ dev->name);
+
+ dev_kfree_skb_irq(skb);
- if (packet->status & 0xff) {
- /* Silently discard, unless debugging. */
- dprintk(KERN_DEBUG "%s: toasted packet received.\n", dev->name);
bucket++;
- if (skb)
- dev_kfree_skb_irq(skb);
continue;
}
-#endif
+
+ /* Copy short packet to a new skb */
+
if (packet->len < priv->rx_copybreak) {
old_skb = skb;
skb = (struct sk_buff *)dev_alloc_skb(packet->len+2);
@@ -366,13 +359,17 @@ static void i2o_lan_receive_post_reply(struct i2o_handler *h,
priv->i2o_fbl[++priv->i2o_fbl_tail] = old_skb;
else
dev_kfree_skb_irq(old_skb);
+
spin_unlock_irqrestore(&priv->fbl_lock, flags);
} else
skb_put(skb, packet->len);
+ /* Deliver to upper layers */
+
skb->dev = dev;
skb->protocol = priv->type_trans(skb, dev);
netif_rx(skb);
+
dev->last_rx = jiffies;
dprintk(KERN_INFO "%s: Incoming packet (%d bytes) delivered "
@@ -387,7 +384,7 @@ static void i2o_lan_receive_post_reply(struct i2o_handler *h,
dev->name, atomic_read(&priv->buckets_out));
#endif
- /* If DDM has already consumed bucket_tresh buckets, post new ones */
+ /* If DDM has already consumed bucket_thresh buckets, post new ones */
if (atomic_read(&priv->buckets_out) <= priv->max_buckets_out - priv->bucket_thresh) {
i2o_post_buckets_task.data = (void *)dev;
@@ -409,10 +406,6 @@ static void i2o_lan_reply(struct i2o_handler *h, struct i2o_controller *iop,
u8 unit = (u8)(msg[2]>>16); // InitiatorContext
struct net_device *dev = i2o_landevs[unit];
-#ifdef DRIVERDEBUG
- i2o_report_status(KERN_INFO, dev->name, msg);
-#endif
-
if ((msg[4] >> 24) != I2O_REPLY_STATUS_SUCCESS) {
if (i2o_lan_handle_status(dev, msg))
return;
@@ -420,6 +413,10 @@ static void i2o_lan_reply(struct i2o_handler *h, struct i2o_controller *iop,
/* This should NOT be reached */
}
+#ifdef DRIVERDEBUG
+ i2o_report_status(KERN_INFO, dev->name, msg);
+#endif
+
switch (msg[1] >> 24) {
case LAN_RESET:
case LAN_SUSPEND:
@@ -465,6 +462,7 @@ static void i2o_lan_handle_event(struct net_device *dev, u32 *msg)
struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
struct i2o_device *i2o_dev = priv->i2o_dev;
struct i2o_controller *iop = i2o_dev->controller;
+ u32 max_evt_data_size =iop->status_block->inbound_frame_size-5;
struct i2o_reply {
u8 version_offset;
u8 msg_flags;
@@ -475,7 +473,7 @@ static void i2o_lan_handle_event(struct net_device *dev, u32 *msg)
u32 initiator_context;
u32 transaction_context;
u32 evt_indicator;
- u32 data[(iop->inbound_size - 20) / 4]; /* max */
+ u32 data[max_evt_data_size]; /* max */
} *evt = (struct i2o_reply *)msg;
int evt_data_len = (evt->msg_size - 5) * 4; /* real */
@@ -534,8 +532,8 @@ static void i2o_lan_handle_event(struct net_device *dev, u32 *msg)
case I2O_EVT_IND_FIELD_MODIFIED: {
u16 *work16 = (u16 *)evt->data;
- printk("Group 0x%04x, field %d changed.\n", work16[0],
- work16[1]);
+ printk("Group 0x%04x, field %d changed.\n", work16[0], work16[1]);
+
break;
}
@@ -564,7 +562,7 @@ static void i2o_lan_handle_event(struct net_device *dev, u32 *msg)
break;
default:
- printk("Event Indicator = 0x%08x.\n", evt->evt_indicator);
+ printk("0x%08x. No handler.\n", evt->evt_indicator);
}
/* Note: EventAck necessary only for events that cause the device to
@@ -660,7 +658,7 @@ static int i2o_lan_reset(struct net_device *dev)
msg[1] = LAN_RESET<<24 | HOST_TID<<12 | i2o_dev->lct_data.tid;
msg[2] = priv->unit << 16 | lan_context; // InitiatorContext
msg[3] = 0; // TransactionContext
- msg[4] = 0; // keep posted buckets
+ msg[4] = 0; // Keep posted buckets
if (i2o_post_this(iop, msg, sizeof(msg)) < 0)
return -ETIMEDOUT;
@@ -694,43 +692,22 @@ static int i2o_lan_suspend(struct net_device *dev)
}
/*
- * i2o_set_batch_mode(): Set DDM into batch mode.
+ * i2o_set_ddm_parameters:
+ * These settings are done to ensure proper initial values for DDM.
+ * They can be changed via proc file system or vai configuration utility.
*/
-static void i2o_set_batch_mode(struct net_device *dev)
+static void i2o_set_ddm_parameters(struct net_device *dev)
{
struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
struct i2o_device *i2o_dev = priv->i2o_dev;
struct i2o_controller *iop = i2o_dev->controller;
u32 val;
- /* Set defaults LAN_BATCH_CONTROL attributes */
- /* May be changed via /proc or Configuration Utility */
-
- val = 0x00000000; // enable batch mode, toggle automatically
- if (i2o_set_scalar(iop, i2o_dev->lct_data.tid, 0x0003, 0, &val, sizeof(val)) <0)
- printk(KERN_WARNING "%s: Unable to enter I2O LAN batch mode.\n",
- dev->name);
- else
- dprintk(KERN_INFO "%s: I2O LAN batch mode enabled.\n", dev->name);
-
- /* Set LAN_OPERATION attributes */
-
-#ifdef DRIVERDEBUG
-/* Added for testing: this will be removed */
- val = 0x00000003; // 1 = UserFlags
- if (i2o_set_scalar(iop, i2o_dev->lct_data.tid, 0x0004, 1, &val, sizeof(val)) < 0)
- printk(KERN_WARNING "%s: Can't enable ErrorReporting & BadPacketHandling.\n",
- dev->name);
- else
- dprintk(KERN_INFO "%s: ErrorReporting enabled, "
- "BadPacketHandling enabled.\n", dev->name);
-#endif /* DRIVERDEBUG */
-
/*
- * When PacketOrphanlimit is same as the maximum packet length,
+ * When PacketOrphanlimit is set to the maximum packet length,
* the packets will never be split into two separate buckets
*/
- val = dev->mtu + dev->hard_header_len; // 2 = PacketOrphanLimit
+ val = dev->mtu + dev->hard_header_len;
if (i2o_set_scalar(iop, i2o_dev->lct_data.tid, 0x0004, 2, &val, sizeof(val)) < 0)
printk(KERN_WARNING "%s: Unable to set PacketOrphanLimit.\n",
dev->name);
@@ -738,6 +715,15 @@ static void i2o_set_batch_mode(struct net_device *dev)
dprintk(KERN_INFO "%s: PacketOrphanLimit set to %d.\n",
dev->name, val);
+ /* When RxMaxPacketsBucket = 1, DDM puts only one packet into bucket */
+
+ val = 1;
+ if (i2o_set_scalar(iop, i2o_dev->lct_data.tid, 0x0008, 4, &val, sizeof(val)) <0)
+ printk(KERN_WARNING "%s: Unable to set RxMaxPacketsBucket.\n",
+ dev->name);
+ else
+ dprintk(KERN_INFO "%s: RxMaxPacketsBucket set to &d.\n",
+ dev->name, val);
return;
}
@@ -779,7 +765,7 @@ static int i2o_lan_open(struct net_device *dev)
priv->i2o_fbl_tail = -1;
priv->send_active = 0;
- i2o_set_batch_mode(dev);
+ i2o_set_ddm_parameters(dev);
i2o_lan_receive_post(dev);
netif_start_queue(dev);
@@ -795,9 +781,9 @@ static int i2o_lan_close(struct net_device *dev)
struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
struct i2o_device *i2o_dev = priv->i2o_dev;
struct i2o_controller *iop = i2o_dev->controller;
+ int ret = 0;
netif_stop_queue(dev);
-
i2o_lan_suspend(dev);
if (i2o_event_register(iop, i2o_dev->lct_data.tid,
@@ -805,19 +791,20 @@ static int i2o_lan_close(struct net_device *dev)
printk(KERN_WARNING "%s: Unable to clear the event mask.\n",
dev->name);
+ while (priv->i2o_fbl_tail >= 0)
+ dev_kfree_skb(priv->i2o_fbl[priv->i2o_fbl_tail--]);
+
+ kfree(priv->i2o_fbl);
+
if (i2o_release_device(i2o_dev, &i2o_lan_handler)) {
printk(KERN_WARNING "%s: Unable to unclaim I2O LAN device "
"(tid=%d).\n", dev->name, i2o_dev->lct_data.tid);
- return -EBUSY;
+ ret = -EBUSY;
}
- while (priv->i2o_fbl_tail >= 0)
- dev_kfree_skb(priv->i2o_fbl[priv->i2o_fbl_tail--]);
- kfree(priv->i2o_fbl);
-
MOD_DEC_USE_COUNT;
- return 0;
+ return ret;
}
/*
@@ -829,12 +816,12 @@ static void i2o_lan_tx_timeout(struct net_device *dev)
netif_start_queue(dev);
}
-#define batching(x, cond) ( (x)->tx_batch_mode==1 || ((x)->tx_batch_mode==2 && (cond)) )
-
/*
- * Batch send packets. Both i2o_lan_sdu_send and i2o_lan_packet_send
- * use this. I'm still not pleased. If you come up with
- * something better, please tell me. -taneli
+ * i2o_lan_batch_send(): Send packets in batch.
+ * Both i2o_lan_sdu_send and i2o_lan_packet_send use this.
+ *
+ * This is a coarse first approximation for the tx_batching.
+ * If you come up with something better, please tell me. -taneli
*/
static void i2o_lan_batch_send(struct net_device *dev)
{
@@ -848,20 +835,16 @@ static void i2o_lan_batch_send(struct net_device *dev)
dprintk(KERN_DEBUG "%s: %d packets sent.\n", dev->name, priv->tx_count);
priv->tx_count = 0;
}
- spin_unlock_irq(&priv->tx_lock);
-
priv->send_active = 0;
+ spin_unlock_irq(&priv->tx_lock);
}
+#ifdef CONFIG_NET_FC
/*
* i2o_lan_sdu_send(): Send a packet, MAC header added by the DDM.
* Must be supported by Fibre Channel, optional for Ethernet/802.3,
* Token Ring, FDDI
*/
-
-/*
- * This is a coarse first approximation. Needs testing. Any takers? -taneli
- */
static int i2o_lan_sdu_send(struct sk_buff *skb, struct net_device *dev)
{
struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
@@ -876,6 +859,16 @@ static int i2o_lan_sdu_send(struct sk_buff *skb, struct net_device *dev)
priv->tx_count++;
atomic_inc(&priv->tx_out);
+ /*
+ * If tx_batch_mode = 0x00 forced to immediate mode
+ * If tx_batch_mode = 0x01 forced to batch mode
+ * If tx_batch_mode = 0x10 switch automatically, current mode immediate
+ * If tx_batch_mode = 0x11 switch automatically, current mode batch
+ * If gap between two packets is > 2 ticks, switch to immediate
+ */
+ if (priv->tx_batch_mode >> 1) // switch automatically
+ priv->tx_batch_mode = tickssofar ? 0x02 : 0x03;
+
if (priv->tx_count == 1) {
m = I2O_POST_READ32(iop);
if (m == 0xFFFFFFFF) {
@@ -888,14 +881,15 @@ static int i2o_lan_sdu_send(struct sk_buff *skb, struct net_device *dev)
__raw_writel(NINE_WORD_MSG_SIZE | 1<<12 | SGL_OFFSET_4, msg);
__raw_writel(LAN_PACKET_SEND<<24 | HOST_TID<<12 | i2o_dev->lct_data.tid, msg+1);
__raw_writel(priv->unit << 16 | lan_send_context, msg+2); // InitiatorContext
- __raw_writel(1 << 3, msg+3); // TransmitControlWord
+ __raw_writel(1 << 30 | 1 << 3, msg+3); // TransmitControlWord
__raw_writel(0xD7000000 | skb->len, msg+4); // MAC hdr included
__raw_writel((u32)skb, msg+5); // TransactionContext
__raw_writel(virt_to_bus(skb->data), msg+6);
__raw_writel((u32)skb->mac.raw, msg+7);
__raw_writel((u32)skb->mac.raw+4, msg+8);
- if (batching(priv, !tickssofar) && !priv->send_active) {
+
+ if ((priv->tx_batch_mode & 0x01) && !priv->send_active) {
priv->send_active = 1;
queue_task(&priv->i2o_batch_send_task, &tq_scheduler);
}
@@ -915,7 +909,7 @@ static int i2o_lan_sdu_send(struct sk_buff *skb, struct net_device *dev)
/* If tx not in batch mode or frame is full, send immediatelly */
- if (!batching(priv, !tickssofar) || priv->tx_count == priv->sgl_max) {
+ if (!(priv->tx_batch_mode & 0x01) || priv->tx_count == priv->sgl_max) {
dev->trans_start = jiffies;
i2o_post_message(iop, priv->m);
dprintk(KERN_DEBUG "%s: %d packets sent.\n", dev->name, priv->tx_count);
@@ -930,6 +924,7 @@ static int i2o_lan_sdu_send(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irq(&priv->tx_lock);
return 0;
}
+#endif CONFIG_NET_FC
/*
* i2o_lan_packet_send(): Send a packet as is, including the MAC header.
@@ -951,6 +946,16 @@ static int i2o_lan_packet_send(struct sk_buff *skb, struct net_device *dev)
priv->tx_count++;
atomic_inc(&priv->tx_out);
+ /*
+ * If tx_batch_mode = 0x00 forced to immediate mode
+ * If tx_batch_mode = 0x01 forced to batch mode
+ * If tx_batch_mode = 0x10 switch automatically, current mode immediate
+ * If tx_batch_mode = 0x11 switch automatically, current mode batch
+ * If gap between two packets is > 0 ticks, switch to immediate
+ */
+ if (priv->tx_batch_mode >> 1) // switch automatically
+ priv->tx_batch_mode = tickssofar ? 0x02 : 0x03;
+
if (priv->tx_count == 1) {
m = I2O_POST_READ32(iop);
if (m == 0xFFFFFFFF) {
@@ -963,12 +968,14 @@ static int i2o_lan_packet_send(struct sk_buff *skb, struct net_device *dev)
__raw_writel(SEVEN_WORD_MSG_SIZE | 1<<12 | SGL_OFFSET_4, msg);
__raw_writel(LAN_PACKET_SEND<<24 | HOST_TID<<12 | i2o_dev->lct_data.tid, msg+1);
__raw_writel(priv->unit << 16 | lan_send_context, msg+2); // InitiatorContext
- __raw_writel(1 << 3, msg+3); // TransmitControlWord
-
+ __raw_writel(1 << 30 | 1 << 3, msg+3); // TransmitControlWord
+ // bit 30: reply as soon as transmission attempt is complete
+ // bit 3: Supress CRC generation
__raw_writel(0xD5000000 | skb->len, msg+4); // MAC hdr included
__raw_writel((u32)skb, msg+5); // TransactionContext
__raw_writel(virt_to_bus(skb->data), msg+6);
- if (batching(priv, !tickssofar) && !priv->send_active) {
+
+ if ((priv->tx_batch_mode & 0x01) && !priv->send_active) {
priv->send_active = 1;
queue_task(&priv->i2o_batch_send_task, &tq_scheduler);
}
@@ -984,9 +991,9 @@ static int i2o_lan_packet_send(struct sk_buff *skb, struct net_device *dev)
__raw_writel(virt_to_bus(skb->data), sgl_elem+2);
}
- /* If tx not in batch mode or frame is full, send immediatelly */
+ /* If tx is in immediate mode or frame is full, send now */
- if (!batching(priv, !tickssofar) || priv->tx_count == priv->sgl_max) {
+ if (!(priv->tx_batch_mode & 0x01) || priv->tx_count == priv->sgl_max) {
dev->trans_start = jiffies;
i2o_post_message(iop, priv->m);
dprintk(KERN_DEBUG"%s: %d packets sent.\n", dev->name, priv->tx_count);
@@ -1141,12 +1148,14 @@ static void i2o_lan_set_mc_list(struct net_device *dev)
u32 max_size_mc_table;
u32 mc_addr_group[64];
-// This isn't safe yet. Needs to be async.
+// This isn't safe yet in SMP. Needs to be async.
+// Seems to work in uniprocessor environment.
+
return;
// read_lock_bh(&dev_mc_lock);
-// spin_lock(&dev->xmit_lock);
-// dev->xmit_lock_owner = smp_processor_id();
+ spin_lock(&dev->xmit_lock);
+ dev->xmit_lock_owner = smp_processor_id();
if (i2o_query_scalar(iop, i2o_dev->lct_data.tid, 0x0001, -1,
&mc_addr_group, sizeof(mc_addr_group)) < 0 ) {
@@ -1212,15 +1221,13 @@ static struct tq_struct i2o_lan_set_mc_list_task = {
* Queue routine i2o_lan_set_mc_list() to be called later.
* Needs to be async.
*/
-
static void i2o_lan_set_multicast_list(struct net_device *dev)
{
- if (!in_interrupt()) {
+ if (in_interrupt()) {
i2o_lan_set_mc_list_task.data = (void *)dev;
queue_task(&i2o_lan_set_mc_list_task, &tq_scheduler);
- } else {
+ } else
i2o_lan_set_mc_list(dev);
- }
}
/*
@@ -1236,10 +1243,20 @@ static int i2o_lan_change_mtu(struct net_device *dev, int new_mtu)
0x0000, 6, &max_pkt_size, 4) < 0)
return -EFAULT;
- if (new_mtu < 68 || max_pkt_size < new_mtu)
+ if (new_mtu < 68 || new_mtu > 9000 || new_mtu > max_pkt_size)
return -EINVAL;
dev->mtu = new_mtu;
+
+ i2o_lan_suspend(dev); // to SUSPENDED state, return buckets
+
+ while (priv->i2o_fbl_tail >= 0) // free buffered buckets
+ dev_kfree_skb(priv->i2o_fbl[priv->i2o_fbl_tail--]);
+
+ i2o_lan_reset(dev); // to OPERATIONAL state
+ i2o_set_ddm_parameters(dev); // reset some parameters
+ i2o_lan_receive_post(dev); // post new buckets (new size)
+
return 0;
}
@@ -1287,15 +1304,13 @@ struct net_device *i2o_lan_register_device(struct i2o_device *i2o_dev)
#ifdef CONFIG_FDDI
case I2O_LAN_FDDI:
{
- int size = sizeof(struct net_device) + sizeof(struct i2o_lan_local)
- + sizeof("fddi%d ");
+ int size = sizeof(struct net_device) + sizeof(struct i2o_lan_local);
dev = (struct net_device *) kmalloc(size, GFP_KERNEL);
if (dev == NULL)
return NULL;
memset((char *)dev, 0, size);
dev->priv = (void *)(dev + 1);
- dev->name = (char *)(dev + 1) + sizeof(struct i2o_lan_local);
if (dev_alloc_name(dev, "fddi%d") < 0) {
printk(KERN_WARNING "i2o_lan: Too many FDDI devices.\n");
@@ -1335,7 +1350,7 @@ struct net_device *i2o_lan_register_device(struct i2o_device *i2o_dev)
priv = (struct i2o_lan_local *)dev->priv;
priv->i2o_dev = i2o_dev;
priv->type_trans = type_trans;
- priv->sgl_max = (i2o_dev->controller->inbound_size - 16) / 12;
+ priv->sgl_max = (i2o_dev->controller->status_block->inbound_frame_size - 4) / 3;
atomic_set(&priv->buckets_out, 0);
/* Set default values for user configurable parameters */
@@ -1344,7 +1359,7 @@ struct net_device *i2o_lan_register_device(struct i2o_device *i2o_dev)
priv->max_buckets_out = max_buckets_out;
priv->bucket_thresh = bucket_thresh;
priv->rx_copybreak = rx_copybreak;
- priv->tx_batch_mode = tx_batch_mode;
+ priv->tx_batch_mode = tx_batch_mode & 0x03;
priv->i2o_event_mask = i2o_event_mask;
priv->tx_lock = SPIN_LOCK_UNLOCKED;
@@ -1362,7 +1377,6 @@ struct net_device *i2o_lan_register_device(struct i2o_device *i2o_dev)
kfree(dev);
return NULL;
}
-
dprintk(KERN_DEBUG "%s: hwaddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
dev->name, hw_addr[0], hw_addr[1], hw_addr[2], hw_addr[3],
hw_addr[4], hw_addr[5]);
@@ -1417,9 +1431,9 @@ int __init i2o_lan_init(void)
struct net_device *dev;
int i;
- printk(KERN_INFO "I2O LAN OSM (c) 1999 University of Helsinki.\n");
+ printk(KERN_INFO "I2O LAN OSM (C) 1999 University of Helsinki.\n");
- /* Module params used as global defaults for private values */
+ /* Module params are used as global defaults for private values */
if (max_buckets_out > I2O_LAN_MAX_BUCKETS_OUT)
max_buckets_out = I2O_LAN_MAX_BUCKETS_OUT;
@@ -1549,7 +1563,7 @@ MODULE_PARM(bucket_thresh, "1-" __MODULE_STRING(I2O_LAN_MAX_BUCKETS_OUT) "i");
MODULE_PARM_DESC(bucket_thresh, "Bucket post threshold (1-)");
MODULE_PARM(rx_copybreak, "1-" "i");
MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy only small frames (1-)");
-MODULE_PARM(tx_batch_mode, "0-1" "i");
-MODULE_PARM_DESC(tx_batch_mode, "0=Use immediate mode send, 1=Use batch mode send");
+MODULE_PARM(tx_batch_mode, "0-2" "i");
+MODULE_PARM_DESC(tx_batch_mode, "0=Send immediatelly, 1=Send in batches, 2=Switch automatically");
#endif
diff --git a/drivers/i2o/i2o_lan.h b/drivers/i2o/i2o_lan.h
index 72a87af25..17064a29a 100644
--- a/drivers/i2o/i2o_lan.h
+++ b/drivers/i2o/i2o_lan.h
@@ -18,12 +18,12 @@
/* Default values for tunable parameters first */
-#define I2O_LAN_MAX_BUCKETS_OUT 256
+#define I2O_LAN_MAX_BUCKETS_OUT 96
#define I2O_LAN_BUCKET_THRESH 18 /* 9 buckets in one message */
#define I2O_LAN_RX_COPYBREAK 200
#define I2O_LAN_TX_TIMEOUT (1*HZ)
-#define I2O_LAN_TX_BATCH_MODE 1 /* 1=on, 0=off */
-#define I2O_LAN_EVENT_MASK 0 /* 0=None, 0xFFC00002=All */
+#define I2O_LAN_TX_BATCH_MODE 2 /* 2=automatic, 1=on, 0=off */
+#define I2O_LAN_EVENT_MASK 0; /* 0=None, 0xFFC00002=All */
/* LAN types */
#define I2O_LAN_ETHERNET 0x0030
@@ -126,6 +126,7 @@ struct i2o_bucket_descriptor {
struct i2o_lan_local {
u8 unit;
struct i2o_device *i2o_dev;
+
struct fddi_statistics stats; /* see also struct net_device_stats */
unsigned short (*type_trans)(struct sk_buff *, struct net_device *);
atomic_t buckets_out; /* nbr of unused buckets on DDM */
@@ -133,7 +134,7 @@ struct i2o_lan_local {
u8 tx_count; /* packets in one TX message frame */
u16 tx_max_out; /* DDM's Tx queue len */
u8 sgl_max; /* max SGLs in one message frame */
- u32 m; /* IOP address of msg frame */
+ u32 m; /* IOP address of the batch msg frame */
struct tq_struct i2o_batch_send_task;
int send_active;
diff --git a/drivers/i2o/i2o_pci.c b/drivers/i2o/i2o_pci.c
index 16b8aaedc..0961d5624 100644
--- a/drivers/i2o/i2o_pci.c
+++ b/drivers/i2o/i2o_pci.c
@@ -6,6 +6,7 @@
*
* Written by Alan Cox, Building Number Three Ltd
* Modified by Deepak Saxena <deepak@plexity.net>
+ * Modified by Boji T Kannanthanam <boji.t.kannanthanam@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -53,10 +54,10 @@ static void i2o_pci_dispose(struct i2o_controller *c)
iounmap(((u8 *)c->post_port)-0x40);
#ifdef CONFIG_MTRR
- if(c->bus.pci.mtrr_reg0 > 0)
- mtrr_del(c->bus.pci.mtrr_reg0, 0, 0);
- if(c->bus.pci.mtrr_reg1 > 0)
- mtrr_del(c->bus.pci.mtrr_reg1, 0, 0);
+ if(c->bus.pci.mtrr_reg0 > 0)
+ mtrr_del(c->bus.pci.mtrr_reg0, 0, 0);
+ if(c->bus.pci.mtrr_reg1 > 0)
+ mtrr_del(c->bus.pci.mtrr_reg1, 0, 0);
#endif
}
@@ -178,21 +179,21 @@ int __init i2o_pci_install(struct pci_dev *dev)
* Enable Write Combining MTRR for IOP's memory region
*/
#ifdef CONFIG_MTRR
- c->bus.pci.mtrr_reg0 =
- mtrr_add(c->mem_phys, size, MTRR_TYPE_WRCOMB, 1);
+ c->bus.pci.mtrr_reg0 =
+ mtrr_add(c->mem_phys, size, MTRR_TYPE_WRCOMB, 1);
/*
* If it is an INTEL i960 I/O processor then set the first 64K to Uncacheable
* since the region contains the Messaging unit which shouldn't be cached.
*/
- c->bus.pci.mtrr_reg1 = -1;
- if(dev->vendor == PCI_VENDOR_ID_INTEL)
- {
- printk(KERN_INFO "i2o_pci: MTRR workaround for Intel i960 processor\n");
- c->bus.pci.mtrr_reg1 =
- mtrr_add(c->mem_phys, 65536, MTRR_TYPE_UNCACHABLE, 1);
- if(c->bus.pci.mtrr_reg1< 0)
- printk(KERN_INFO "i2o_pci: Error in setting MTRR_TYPE_UNCACHABLE\n");
- }
+ c->bus.pci.mtrr_reg1 = -1;
+ if(dev->vendor == PCI_VENDOR_ID_INTEL)
+ {
+ printk(KERN_INFO "I2O: MTRR workaround for Intel i960 processor\n");
+ c->bus.pci.mtrr_reg1 =
+ mtrr_add(c->mem_phys, 65536, MTRR_TYPE_UNCACHABLE, 1);
+ if(c->bus.pci.mtrr_reg1< 0)
+ printk(KERN_INFO "i2o_pci: Error in setting MTRR_TYPE_UNCACHABLE\n");
+ }
#endif