summaryrefslogtreecommitdiffstats
path: root/drivers/i2o
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1999-06-17 14:08:29 +0000
committerRalf Baechle <ralf@linux-mips.org>1999-06-17 14:08:29 +0000
commit57d569635c05dc4ea9b9f1f8dcec69b9ddc989b2 (patch)
tree1f703abf7d95dcd50ee52da3b96eb1b4b2b4ea53 /drivers/i2o
parent59223edaa18759982db0a8aced0e77457d10c68e (diff)
The rest of 2.3.6.
Diffstat (limited to 'drivers/i2o')
-rw-r--r--drivers/i2o/Config.in12
-rw-r--r--drivers/i2o/Makefile75
-rw-r--r--drivers/i2o/README78
-rw-r--r--drivers/i2o/README.ioctl398
-rw-r--r--drivers/i2o/README.lan38
-rw-r--r--drivers/i2o/i2o_block.c1071
-rw-r--r--drivers/i2o/i2o_config.c613
-rw-r--r--drivers/i2o/i2o_core.c2053
-rw-r--r--drivers/i2o/i2o_lan.c853
-rw-r--r--drivers/i2o/i2o_lan.h112
-rw-r--r--drivers/i2o/i2o_pci.c243
-rw-r--r--drivers/i2o/i2o_proc.c2382
-rw-r--r--drivers/i2o/i2o_proc.h141
-rw-r--r--drivers/i2o/i2o_scsi.c871
-rw-r--r--drivers/i2o/i2o_scsi.h48
15 files changed, 8988 insertions, 0 deletions
diff --git a/drivers/i2o/Config.in b/drivers/i2o/Config.in
new file mode 100644
index 000000000..d6ae26f64
--- /dev/null
+++ b/drivers/i2o/Config.in
@@ -0,0 +1,12 @@
+mainmenu_option next_comment
+comment 'I2O device support'
+
+tristate 'I2O support' CONFIG_I2O
+
+dep_tristate 'I2O PCI support' CONFIG_I2O_PCI $CONFIG_I2O
+dep_tristate 'I2O Block OSM' CONFIG_I2O_BLOCK $CONFIG_I2O
+dep_tristate 'I2O LAN OSM' CONFIG_I2O_LAN $CONFIG_I2O
+dep_tristate 'I2O SCSI OSM' CONFIG_I2O_SCSI $CONFIG_I2O
+dep_tristate 'I2O /proc support' CONFIG_I2O_PROC $CONFIG_I2O
+
+endmenu
diff --git a/drivers/i2o/Makefile b/drivers/i2o/Makefile
new file mode 100644
index 000000000..d70b42310
--- /dev/null
+++ b/drivers/i2o/Makefile
@@ -0,0 +1,75 @@
+#
+# Makefile for the kernel I2O OSM.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definition is now inherited from the
+# parent makefile.
+#
+
+#
+# Note : at this point, these files are compiled on all systems.
+# In the future, some of these should be built conditionally.
+#
+
+SUB_DIRS :=
+MOD_SUB_DIRS := $(SUB_DIRS)
+ALL_SUB_DIRS := $(SUB_DIRS)
+
+
+L_TARGET := i2o.a
+L_OBJS :=
+M_OBJS :=
+
+ifeq ($(CONFIG_I2O_PCI),y)
+L_OBJS += i2o_pci.o
+else
+ ifeq ($(CONFIG_I2O_PCI),m)
+ M_OBJS += i2o_pci.o
+ endif
+endif
+
+ifeq ($(CONFIG_I2O),y)
+LX_OBJS += i2o_core.o i2o_config.o
+else
+ ifeq ($(CONFIG_I2O),m)
+ MX_OBJS += i2o_core.o i2o_config.o
+ endif
+endif
+
+ifeq ($(CONFIG_I2O_BLOCK),y)
+LX_OBJS += i2o_block.o
+else
+ ifeq ($(CONFIG_I2O_BLOCK),m)
+ MX_OBJS += i2o_block.o
+ endif
+endif
+
+ifeq ($(CONFIG_I2O_LAN),y)
+LX_OBJS += i2o_lan.o
+else
+ ifeq ($(CONFIG_I2O_LAN),m)
+ MX_OBJS += i2o_lan.o
+ endif
+endif
+
+ifeq ($(CONFIG_I2O_SCSI),y)
+LX_OBJS += i2o_scsi.o
+else
+ ifeq ($(CONFIG_I2O_SCSI),m)
+ MX_OBJS += i2o_scsi.o
+ endif
+endif
+
+ifeq ($(CONFIG_I2O_PROC),y)
+LX_OBJS += i2o_proc.o
+else
+ ifeq ($(CONFIG_I2O_PROC),m)
+ MX_OBJS += i2o_proc.o
+ endif
+endif
+
+include $(TOPDIR)/Rules.make
+
diff --git a/drivers/i2o/README b/drivers/i2o/README
new file mode 100644
index 000000000..4e6d2c16d
--- /dev/null
+++ b/drivers/i2o/README
@@ -0,0 +1,78 @@
+
+ Linux I2O Support (c) Copyright 1999 Red Hat Software
+ and others.
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version
+ 2 of the License, or (at your option) any later version.
+
+AUTHORS (so far)
+
+Alan Cox, Building Number Three Ltd.
+ Core code, SCSI and Block OSMs
+
+Steve Ralston, LSI Logic Corp.
+ Debugging SCSI and Block OSM
+
+Deepak Saxena, Intel Corp.
+ /proc interface, bug fixes
+ Ioctl interfaces for control
+
+Philip Rumpf
+ Fixed assorted dumb SMP locking bugs
+
+Juha Sievanen, University Of Helsinki Finland
+ LAN OSM
+ Bug fixes
+ Core code extensions
+
+CREDITS
+
+ This work was made possible by
+
+Red Hat Software
+ Funding for the Building #3 part of the project
+
+Symbios Logic (Now LSI)
+ Host adapters, hints, known to work platforms when I hit
+ compatibility problems
+
+BoxHill Corporation
+ Loan of initial FibreChannel disk array used for development work.
+
+STATUS:
+
+o The core setup works within limits.
+o The scsi layer seems to almost work. I'm still chasing down the hang
+ bug.
+o The block OSM is fairly minimal but does seem to work.
+
+
+TO DO:
+
+General:
+o Support multiple IOP's and tell them about each other
+o Provide hidden address space if asked
+o Long term message flow control
+o PCI IOP's without interrupts are not supported yet
+o Push FAIL handling into the core
+o DDM control interfaces for module load etc
+
+Block:
+o Real error handler
+o Multiple major numbers
+o Read ahead and cache handling stuff. Talk to Ingo and people
+o Power management
+o Finish Media changers
+
+SCSI:
+o Find the right way to associate drives/luns/busses
+
+Net:
+o Port the existing RCPCI work to the frame work or write a new
+ driver. This one is with the Finns
+
+Tape:
+o Anyone seen anything implementing this ?
+
diff --git a/drivers/i2o/README.ioctl b/drivers/i2o/README.ioctl
new file mode 100644
index 000000000..501c93af9
--- /dev/null
+++ b/drivers/i2o/README.ioctl
@@ -0,0 +1,398 @@
+
+Linux I2O User Space Interface
+rev 0.3 - 04/20/99
+
+=============================================================================
+Originally written by Deepak Saxena(deepak.saxena@intel.com)
+Currently maintained by Deepak Saxena(deepak.saxena@intel.com)
+=============================================================================
+
+I. Introduction
+
+The Linux I2O susbsytem provides a set of ioctl() commands than can be
+utilized by user space applications to communicate with IOPs and devices
+on individual IOPs. This document defines the specific ioctl() commands
+that are available to the user and provides examples of their uses.
+
+This document assumes the reader is familiar with or has access to the
+I2O specification as no I2O message parameters are outlined. For information
+on the specification, see http://www.i2osig.org
+
+This document and the I2O user space interface are currently maintained
+by Deepak Saxena. Please send all comments, errata, and bug fixes to
+deepak.saxena@intel.com
+
+II. IOP Access
+
+Access to the I2O subsystem is provided through the device file named
+/dev/i2octl. This file is a character file with major number 10 and minor
+number 166. It can be created through the following command:
+
+ mknod /dev/i2octl c 10 166
+
+III. Determining the IOP Count
+
+ SYNOPSIS
+
+ ioctl(fd, I2OGETIOPS, int *count);
+
+ u8 count[MAX_I2O_CONTROLLERS];
+
+ DESCRIPTION
+
+ This function returns the system's active IOP table. count should
+ point to a buffer containing MAX_I2O_CONTROLLERS entries. Upon
+ returning, each entry will contain a non-zero value if the given
+ IOP unit is active, and NULL if it is inactive or non-existent.
+
+ RETURN VALUE.
+
+ Returns 0 if no errors occur, and -1 otherwise. If an error occurs,
+ errno is set appropriately:
+
+ EIO Unkown error
+
+IV. ExecHrtGet Message
+
+ SYNOPSIS
+
+ ioctl(fd, I2OHRTGET, struct i2o_cmd_hrt *hrt);
+
+ struct i2o_cmd_hrtlct
+ {
+ u32 iop; /* IOP unit number */
+ void *resbuf; /* Buffer for result */
+ u32 *reslen; /* Buffer length in bytes */
+ };
+
+ DESCRIPTION
+
+ This function posts an ExecHrtHet message to the IOP specified by
+ hrt->iop and returns the data in the buffer pointed to by hrt->buf
+ The size of the data written is placed into the memory pointed to
+ by hrt->len.
+
+ RETURNS
+
+ This function returns 0 if no errors occur. If an error occurs, -1
+ is returned and errno is set appropriately:
+
+ ETIMEDOUT Timeout waiting for reply message
+ ENOMEM Kernel memory allocation error
+ ENOBUFS Buffer not large enough. If this occurs, the required
+ buffer length is written into *(hrt->reslen)
+ EFAULT Invalid user space pointer was passed
+ ENXIO Invalid IOP number
+ EIO Unkown error
+
+V. ExecLctNotify Message
+
+ SYNOPSIS
+
+ ioctl(fd, I2OLCTGET, struct i2o_cmd_lct *lct);
+
+ struct i2o_cmd_hrtlct
+ {
+ u32 iop; /* IOP unit number */
+ void *resbuf; /* Buffer for result */
+ u32 *reslen; /* Buffer length in bytes */
+ };
+
+ DESCRIPTION
+
+ This function posts an ExecLctGet message to the IOP specified by
+ lct->iop and returns the data in the buffer pointed to by lct->buf
+ The size of the data written is placed into the memory pointed to
+ by lct->reslen.
+
+ RETURNS
+
+ This function returns 0 if no errors occur. If an error occurs, -1
+ is returned and errno is set appropriately:
+
+ ETIMEDOUT Timeout waiting for reply message
+ ENOMEM Kernel memory allocation error
+ ENOBUFS Buffer not large enough. If this occurs, the required
+ buffer length is written into *(lct->reslen)
+ EFAULT Invalid user space pointer was passed
+ ENXIO Invalid IOP number
+ EIO Unkown error
+
+VI. UtilParamsSet Message
+
+ SYNOPSIS
+
+ ioctl(fd, I2OPARMSET, struct i2o_parm_setget *ops);
+
+ struct i2o_cmd_psetget
+ {
+ u32 iop; /* IOP unit number */
+ u32 tid; /* Target device TID */
+ void *opbuf; /* Operation List buffer */
+ u32 oplen; /* Operation List buffer length in bytes */
+ void *resbuf; /* Result List buffer */
+ u32 *reslen; /* Result List buffer length in bytes */
+ };
+
+ DESCRIPTION
+
+ This function posts a UtilParamsSet message to the device identified
+ by ops->iop and ops->tid. The operation list for the message is
+ sent through the ops->oplen buffer, and the result list is written
+ into the buffer pointed to by ops->oplen. The number of bytes
+ written is placed into *(ops->reslen).
+
+ RETURNS
+
+ The return value is the size in bytes of the data written into
+ ops->resbuf if no errors occur. If an error occurs, -1 is returned
+ and errno is set appropriatly:
+
+ ETIMEDOUT Timeout waiting for reply message
+ ENOMEM Kernel memory allocation error
+ ENOBUFS Buffer not large enough. If this occurs, the required
+ buffer length is written into *(ops->reslen)
+ EFAULT Invalid user space pointer was passed
+ ENXIO Invalid IOP number
+ EIO Unkown error
+
+ A return value of 0 does not mean that the value was actually
+ changed properly on the IOP. The user should check the result
+ list to determine the specific status of the transaction.
+
+VII. UtilParamsGet Message
+
+ SYNOPSIS
+
+ ioctl(fd, I2OPARMGET, struct i2o_parm_setget *ops);
+
+ struct i2o_parm_setget
+ {
+ u32 iop; /* IOP unit number */
+ u32 tid; /* Target device TID */
+ void *opbuf; /* Operation List buffer */
+ u32 oplen; /* Operation List buffer length in bytes */
+ void *resbuf; /* Result List buffer */
+ u32 *reslen; /* Result List buffer length in bytes */
+ };
+
+ DESCRIPTION
+
+ This function posts a UtilParamsGet message to the device identified
+ by ops->iop and ops->tid. The operation list for the message is
+ sent through the ops->oplen buffer, and the result list is written
+ into the buffer pointed to by ops->oplen. The actual size of data
+ written is placed into *(ops->reslen).
+
+ RETURNS
+
+ ETIMEDOUT Timeout waiting for reply message
+ ENOMEM Kernel memory allocation error
+ ENOBUFS Buffer not large enough. If this occurs, the required
+ buffer length is written into *(ops->reslen)
+ EFAULT Invalid user space pointer was passed
+ ENXIO Invalid IOP number
+ EIO Unkown error
+
+ A return value of 0 does not mean that the value was actually
+ properly retreived. The user should check the result list
+ to determine the specific status of the transaction.
+
+VIII. ExecSwDownload Message
+
+ SYNOPSIS
+
+ ioctl(fd, I2OSWDL, struct i2o_sw_xfer *sw);
+
+ struct i2o_sw_xfer
+ {
+ u32 iop; /* IOP unit number */
+ u8 dl_flags; /* DownLoadFlags field */
+ u8 sw_type; /* Software type */
+ u32 sw_id; /* Software ID */
+ void *buf; /* Pointer to software buffer */
+ u32 *swlen; /* Length of software data */
+ u32 *maxfrag; /* Number of fragments */
+ u32 *curfrag; /* Current fragment number */
+ };
+
+ DESCRIPTION
+
+ This function downloads the software pointed to by sw->buf to the
+ iop identified by sw->iop. The DownloadFlags, SwID, and SwType fields
+ of the ExecSwDownload message are filed in with the values of
+ sw->dl_flags, sw->sw_id, and sw->sw_type.
+
+ Once the ioctl() is called and software transfer begins, the
+ user can read the value *(sw->maxfrag) and *(sw->curfrag) to
+ determine the status of the software transfer. As the IOP
+ is very slow when it comes to SW transfers, this can be
+ used by a separate thread to report status to the user. The
+ user _should not_ write to this memory location until the ioctl()
+ has returned.
+
+ RETURNS
+
+ This function returns 0 no errors occur. If an error occurs, -1
+ is returned and errno is set appropriatly:
+
+ ETIMEDOUT Timeout waiting for reply message
+ ENOMEM Kernel memory allocation error
+ ENOBUFS Buffer not large enough. If this occurs, the required
+ buffer length is written into *(ops->reslen)
+ EFAULT Invalid user space pointer was passed
+ ENXIO Invalid IOP number
+ EIO Unkown error
+
+IX. ExecSwUpload Message
+
+ SYNOPSIS
+
+ ioctl(fd, I2OSWUL, struct i2o_sw_xfer *sw);
+
+ struct i2o_sw_xfer
+ {
+ u32 iop; /* IOP unit number */
+ u8 flags; /* Unused */
+ u8 sw_type; /* Software type */
+ u32 sw_id; /* Software ID */
+ void *buf; /* Pointer to software buffer */
+ u32 *swlen; /* Length in bytes of software */
+ u32 *maxfrag; /* Number of fragments */
+ u32 *curfrag; /* Current fragment number */
+ };
+
+ DESCRIPTION
+
+ This function uploads software from the IOP identified by sw->iop
+ and places it in the buffer pointed to by sw->buf. The SwID, SwType
+ and SwSize fields of the ExecSwDownload message are filed in
+ with the values of sw->sw_id, sw->sw_type, sw->swlen, and. The
+ actual size of the module is written into *(sw->buflen).
+
+ Once the ioctl() is called and software transfer begins, the
+ user can read the value *(sw->maxfrag) and *(sw->curfrag) to
+ determine the status of the software transfer. As the IOP
+ is very slow when it comes to SW transfers, this can be
+ used by a separate thread to report status to the user. The
+ user _should not_ write to this memory location until the ioctl()
+ has returned.
+
+ RETURNS
+
+ This function returns 0 if no errors occur. If an error occurs, -1
+ is returned and errno is set appropriatly:
+
+ ETIMEDOUT Timeout waiting for reply message
+ ENOMEM Kernel memory allocation error
+ ENOBUFS Buffer not large enough. If this occurs, the required
+ buffer length is written into *(ops->reslen)
+ EFAULT Invalid user space pointer was passed
+ ENXIO Invalid IOP number
+ EIO Unkown error
+
+X. ExecSwRemove Message
+
+ SYNOPSIS
+
+ ioctl(fd, I2OSWDEL, struct i2o_sw_xfer *sw);
+
+ struct i2o_sw_xfer
+ {
+ u32 iop; /* IOP unit number */
+ u8 flags; /* Unused */
+ u8 sw_type; /* Software type */
+ u32 sw_id; /* Software ID */
+ void *buf; /* Unused */
+ u32 *swlen; /* Length in bytes of software data */
+ u32 *maxfrag; /* Unused */
+ u32 *curfrag; /* Unused */
+ };
+
+ DESCRIPTION
+
+ This function uploads software from the IOP identified by sw->iop
+ and places it in the buffer pointed to by sw->buf. The SwID, SwType
+ and SwSize fields of the ExecSwDownload message are filed in
+ with the values of sw->dl_flags, sw->sw_id, and sw->sw_type. The
+ actual size of the module is written into *(sw->buflen).
+
+ RETURNS
+
+ This function returns 0 if no errors occur. If an error occurs, -1
+ is returned and errno is set appropriatly:
+
+ ETIMEDOUT Timeout waiting for reply message
+ ENOMEM Kernel memory allocation error
+ ENOBUFS Buffer not large enough. If this occurs, the required
+ buffer length is written into *(ops->reslen)
+ EFAULT Invalid user space pointer was passed
+ ENXIO Invalid IOP number
+ EIO Unkown error
+
+X. UtilConfigDialog Message
+
+ SYNOPSIS
+
+ ioctl(fd, I2OHTML, struct i2o_html *htquery);
+
+ struct i2o_html
+ {
+ u32 iop; /* IOP unit number */
+ u32 tid; /* Target device ID */
+ u32 page; /* HTML page */
+ void *resbuf; /* Buffer for reply HTML page */
+ u32 *reslen; /* Length in bytes of reply buffer */
+ void *qbuf; /* Pointer to HTTP query string */
+ u32 qlen; /* Length in bytes of query string buffer */
+ };
+
+ DESCRIPTION
+
+ This function posts an UtilConfigDialog message to the device identified
+ by htquery->iop and htquery->tid. The requested HTML page number is
+ provided by the htquery->page field, and the resultant data is stored
+ in the buffer pointed to by htquery->resbuf. If there is an HTTP query
+ string that is to be sent to the device, it should be sent in the buffer
+ pointed to by htquery->qbuf. If there is no query string, this field
+ should be set to NULL. The actual size of the reply received is written
+ into *(htquery->reslen)
+
+ RETURNS
+
+ This function returns 0 if no error occur. If an error occurs, -1
+ is returned and J errno is set appropriatly:
+
+ ETIMEDOUT Timeout waiting for reply message
+ ENOMEM Kernel memory allocation error
+ ENOBUFS Buffer not large enough. If this occurs, the required
+ buffer length is written into *(ops->reslen)
+ EFAULT Invalid user space pointer was passed
+ ENXIO Invalid IOP number
+ EIO Unkown error
+
+XI. Events
+
+ In the process of determining this. Current idea is to have use
+ the select() interface to allow user apps to periodically poll
+ the /dev/i2octl device for events. When select() notifies the user
+ that an event is available, the user would call read() to retrieve
+ a list of all the events that are pending for the specific device.
+
+=============================================================================
+Revision History
+=============================================================================
+
+Rev 0.1 - 04/01/99
+- Initial revision
+
+Rev 0.2 - 04/06/99
+- Changed return values to match UNIX ioctl() standard. Only return values
+ are 0 and -1. All errors are reported through errno.
+- Added summary of proposed possible event interfaces
+
+Rev 0.3 - 04/20/99
+- Changed all ioctls() to use pointers to user data instead of actual data
+- Updated error values to match the code
+
+
diff --git a/drivers/i2o/README.lan b/drivers/i2o/README.lan
new file mode 100644
index 000000000..1d1ba0f14
--- /dev/null
+++ b/drivers/i2o/README.lan
@@ -0,0 +1,38 @@
+
+ Linux I2O LAN OSM
+ (c) University of Helsinki, Department of Computer Science
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version
+ 2 of the License, or (at your option) any later version.
+
+AUTHORS
+Auvo Häkkinen, Auvo.Hakkinen@cs.Helsinki.FI
+Juha Sievänen, Juha.Sievanen@cs.Helsinki.FI
+
+CREDITS
+
+ This work was made possible by
+
+European Committee
+ Funding for the project
+
+SysKonnect
+ Loaning of FDDI cards
+
+ASUSTeK
+ I2O motherboard
+
+STATUS:
+o The FDDI part of LAN OSM is working to some extent.
+o Only packet per bucket is now supported.
+
+TO DO:
+
+LAN:
+o Add support for bactches
+o Find why big packets flow from I2O box out, but don't want to come in
+o Find the bug in i2o_set_multicast_list(), which kills interrupt
+ handler in i2o_wait_reply()
+o Add support for Ethernet, Token Ring, AnyLAN, Fibre Channel
diff --git a/drivers/i2o/i2o_block.c b/drivers/i2o/i2o_block.c
new file mode 100644
index 000000000..5d543b1cc
--- /dev/null
+++ b/drivers/i2o/i2o_block.c
@@ -0,0 +1,1071 @@
+/*
+ * I2O block device driver.
+ *
+ * (C) Copyright 1999 Red Hat Software
+ *
+ * Written by Alan Cox, Building Number Three Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This is an initial test release. Most of the good code was taken
+ * from the nbd driver by Pavel Machek, who in turn took some of it
+ * from loop.c. Isn't free software great for reusability 8)
+ *
+ * Fixes:
+ * Steve Ralston: Multiple device handling error fixes,
+ * Added a queue depth.
+ */
+
+#include <linux/major.h>
+
+#include <linux/module.h>
+
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/stat.h>
+#include <linux/errno.h>
+#include <linux/file.h>
+#include <linux/ioctl.h>
+#include <linux/i2o.h>
+#include <linux/blkdev.h>
+#include <linux/malloc.h>
+#include <linux/hdreg.h>
+
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+
+#define MAJOR_NR I2O_MAJOR
+
+#include <linux/blk.h>
+
+#define MAX_I2OB 16
+
+#define MAX_I2OB_DEPTH 4
+
+/*
+ * Some of these can be made smaller later
+ */
+
+static int i2ob_blksizes[MAX_I2OB<<4];
+static int i2ob_hardsizes[MAX_I2OB<<4];
+static int i2ob_sizes[MAX_I2OB<<4];
+static int i2ob_media_change_flag[MAX_I2OB];
+static u32 i2ob_max_sectors[MAX_I2OB<<4];
+
+static int i2ob_context;
+
+#ifdef __SMP__
+static spinlock_t i2ob_lock = SPIN_LOCK_UNLOCKED;
+#endif
+
+struct i2ob_device
+{
+ struct i2o_controller *controller;
+ struct i2o_device *i2odev;
+ int tid;
+ int flags;
+ int refcnt;
+ struct request *head, *tail;
+ int done_flag;
+};
+
+/*
+ * Each I2O disk is one of these.
+ */
+
+static struct i2ob_device i2ob_dev[MAX_I2OB<<4];
+static int i2ob_devices = 0;
+static struct hd_struct i2ob[MAX_I2OB<<4];
+static struct gendisk i2ob_gendisk; /* Declared later */
+
+static atomic_t queue_depth; /* For flow control later on */
+
+#define DEBUG( s )
+/* #define DEBUG( s ) printk( s )
+ */
+
+static int i2ob_install_device(struct i2o_controller *, struct i2o_device *, int);
+static void i2ob_end_request(struct request *);
+static void do_i2ob_request(void);
+
+/*
+ * Get a message
+ */
+
+static u32 i2ob_get(struct i2ob_device *dev)
+{
+ struct i2o_controller *c=dev->controller;
+ return I2O_POST_READ32(c);
+}
+
+/*
+ * Turn a Linux block request into an I2O block read/write.
+ */
+
+static int i2ob_send(u32 m, struct i2ob_device *dev, struct request *req, u32 base, int unit)
+{
+ struct i2o_controller *c = dev->controller;
+ int tid = dev->tid;
+ u32 *msg;
+ u32 *mptr;
+ u64 offset;
+ struct buffer_head *bh = req->bh;
+ static int old_qd = 2;
+ int count = req->nr_sectors<<9;
+
+ /*
+ * Build a message
+ */
+
+ msg = bus_to_virt(c->mem_offset + m);
+
+ msg[2] = i2ob_context|(unit<<8);
+ msg[3] = (u32)req; /* 64bit issue again here */
+ msg[5] = req->nr_sectors << 9;
+
+ /* This can be optimised later - just want to be sure its right for
+ starters */
+ offset = ((u64)(req->sector+base)) << 9;
+ msg[6] = offset & 0xFFFFFFFF;
+ msg[7] = (offset>>32);
+ mptr=msg+8;
+
+ if(req->cmd == READ)
+ {
+ msg[1] = I2O_CMD_BLOCK_READ<<24|HOST_TID<<12|tid;
+ /* We don't yet do cache/readahead and other magic */
+ msg[4] = 1<<16;
+ while(bh!=NULL)
+ {
+ *mptr++ = 0x10000000|(bh->b_size);
+ *mptr++ = virt_to_bus(bh->b_data);
+ count -= bh->b_size;
+ bh = bh->b_reqnext;
+ }
+ }
+ else if(req->cmd == WRITE)
+ {
+ msg[1] = I2O_CMD_BLOCK_WRITE<<24|HOST_TID<<12|tid;
+ msg[4] = 1<<16;
+ while(bh!=NULL)
+ {
+ *mptr++ = 0x14000000|(bh->b_size);
+ count -= bh->b_size;
+ *mptr++ = virt_to_bus(bh->b_data);
+ bh = bh->b_reqnext;
+ }
+ }
+ mptr[-2]|= 0xC0000000;
+ msg[0] = I2O_MESSAGE_SIZE(mptr-msg) | SGL_OFFSET_8;
+
+ if(req->current_nr_sectors > 8)
+ printk("Gathered sectors %ld.\n",
+ req->current_nr_sectors);
+
+ if(count != 0)
+ {
+ printk("Request count botched by %d.\n", count);
+ msg[5] -= count;
+ }
+
+// printk("Send for %p\n", req);
+
+ i2o_post_message(c,m);
+ atomic_inc(&queue_depth);
+ if(atomic_read(&queue_depth)>old_qd)
+ {
+ old_qd=atomic_read(&queue_depth);
+ printk("Depth now %d.\n", old_qd);
+ }
+ return 0;
+}
+
+/*
+ * Remove a request from the _locked_ request list. We update both the
+ * list chain and if this is the last item the tail pointer.
+ */
+
+static void i2ob_unhook_request(struct i2ob_device *dev, struct request *req)
+{
+ struct request **p = &dev->head;
+ struct request *nt = NULL;
+ static int crap = 0;
+
+ while(*p!=NULL)
+ {
+ if(*p==req)
+ {
+ if(dev->tail==req)
+ dev->tail = nt;
+ *p=req->next;
+ return;
+ }
+ nt=*p;
+ p=&(nt->next);
+ }
+ if(!crap++)
+ printk("i2o_block: request queue corrupt!\n");
+}
+
+/*
+ * Request completion handler
+ */
+
+static void i2ob_end_request(struct request *req)
+{
+ /*
+ * Loop until all of the buffers that are linked
+ * to this request have been marked updated and
+ * unlocked.
+ */
+ while (end_that_request_first( req, !req->errors, "i2o block" ));
+
+ /*
+ * It is now ok to complete the request.
+ */
+ end_that_request_last( req );
+}
+
+
+/*
+ * OSM reply handler. This gets all the message replies
+ */
+
+static void i2o_block_reply(struct i2o_handler *h, struct i2o_controller *c, struct i2o_message *msg)
+{
+ struct request *req;
+ u8 st;
+ u32 *m = (u32 *)msg;
+ u8 unit = (m[2]>>8)&0xF0; /* low 4 bits are partition */
+
+ if(m[0] & (1<<13))
+ {
+ printk("IOP fail.\n");
+ printk("From %d To %d Cmd %d.\n",
+ (m[1]>>12)&0xFFF,
+ m[1]&0xFFF,
+ m[1]>>24);
+ printk("Failure Code %d.\n", m[4]>>24);
+ if(m[4]&(1<<16))
+ printk("Format error.\n");
+ if(m[4]&(1<<17))
+ printk("Path error.\n");
+ if(m[4]&(1<<18))
+ printk("Path State.\n");
+ if(m[4]&(1<<18))
+ printk("Congestion.\n");
+
+ m=(u32 *)bus_to_virt(m[7]);
+ printk("Failing message is %p.\n", m);
+
+ /* We need to up the request failure count here and maybe
+ abort it */
+ req=(struct request *)m[3];
+ /* Now flush the message by making it a NOP */
+ m[0]&=0x00FFFFFF;
+ m[0]|=(I2O_CMD_UTIL_NOP)<<24;
+ i2o_post_message(c,virt_to_bus(m));
+
+ }
+ else
+ {
+ if(m[2]&0x80000000)
+ {
+ int * ptr = (int *)m[3];
+ if(m[4]>>24)
+ *ptr = -1;
+ else
+ *ptr = 1;
+ return;
+ }
+ /*
+ * Lets see what is cooking. We stuffed the
+ * request in the context.
+ */
+
+ req=(struct request *)m[3];
+ st=m[4]>>24;
+
+ if(st!=0)
+ {
+ printk(KERN_ERR "i2ob: error %08X\n", m[4]);
+ /*
+ * Now error out the request block
+ */
+ req->errors++;
+ }
+ }
+ /*
+ * Dequeue the request.
+ */
+
+ spin_lock(&io_request_lock);
+ spin_lock(&i2ob_lock);
+ i2ob_unhook_request(&i2ob_dev[unit], req);
+ i2ob_end_request(req);
+
+ /*
+ * We may be able to do more I/O
+ */
+
+ atomic_dec(&queue_depth);
+ do_i2ob_request();
+ spin_unlock(&i2ob_lock);
+ spin_unlock(&io_request_lock);
+}
+
+static struct i2o_handler i2o_block_handler =
+{
+ i2o_block_reply,
+ "I2O Block OSM",
+ 0
+};
+
+
+/*
+ * Flush all pending requests as errors. Must call with the queue
+ * locked.
+ */
+
+#if 0
+static void i2ob_clear_queue(struct i2ob_device *dev)
+{
+ struct request *req;
+
+ while (1) {
+ req = dev->tail;
+ if (!req)
+ return;
+ req->errors++;
+ i2ob_end_request(req);
+
+ if (dev->tail == dev->head)
+ dev->head = NULL;
+ dev->tail = dev->tail->next;
+ }
+}
+#endif
+
+/*
+ * The I2O block driver is listed as one of those that pulls the
+ * front entry off the queue before processing it. This is important
+ * to remember here. If we drop the io lock then CURRENT will change
+ * on us. We must unlink CURRENT in this routine before we return, if
+ * we use it.
+ */
+
+static void do_i2ob_request(void)
+{
+ struct request *req;
+ int unit;
+ struct i2ob_device *dev;
+ u32 m;
+
+ while (CURRENT) {
+ /*
+ * On an IRQ completion if there is an inactive
+ * request on the queue head it means it isnt yet
+ * ready to dispatch.
+ */
+ if(CURRENT->rq_status == RQ_INACTIVE)
+ return;
+
+ /*
+ * Queue depths probably belong with some kind of
+ * generic IOP commit control. Certainly its not right
+ * its global!
+ */
+ if(atomic_read(&queue_depth)>=MAX_I2OB_DEPTH)
+ break;
+
+ req = CURRENT;
+ unit = MINOR(req->rq_dev);
+ dev = &i2ob_dev[(unit&0xF0)];
+ /* Get a message */
+ m = i2ob_get(dev);
+ /* No messages -> punt
+ FIXME: if we have no messages, and there are no messages
+ we deadlock now. Need a timer/callback ?? */
+ if(m==0xFFFFFFFF)
+ {
+ printk("i2ob: no messages!\n");
+ break;
+ }
+ req->errors = 0;
+ CURRENT = CURRENT->next;
+ req->next = NULL;
+
+ if (dev->head == NULL) {
+ dev->head = req;
+ dev->tail = req;
+ } else {
+ dev->tail->next = req;
+ dev->tail = req;
+ }
+ i2ob_send(m, dev, req, i2ob[unit].start_sect, (unit&0xF0));
+ }
+}
+
+static void i2ob_request(void)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&i2ob_lock, flags);
+ do_i2ob_request();
+ spin_unlock_irqrestore(&i2ob_lock, flags);
+}
+
+/*
+ * SCSI-CAM for ioctl geometry mapping
+ * Duplicated with SCSI - this should be moved into somewhere common
+ * perhaps genhd ?
+ */
+
+static void i2o_block_biosparam(
+ unsigned long capacity,
+ unsigned short *cyls,
+ unsigned char *hds,
+ unsigned char *secs)
+{
+ unsigned long heads, sectors, cylinders, temp;
+
+ cylinders = 1024L; /* Set number of cylinders to max */
+ sectors = 62L; /* Maximize sectors per track */
+
+ temp = cylinders * sectors; /* Compute divisor for heads */
+ heads = capacity / temp; /* Compute value for number of heads */
+ if (capacity % temp) { /* If no remainder, done! */
+ heads++; /* Else, increment number of heads */
+ temp = cylinders * heads; /* Compute divisor for sectors */
+ sectors = capacity / temp; /* Compute value for sectors per
+ track */
+ if (capacity % temp) { /* If no remainder, done! */
+ sectors++; /* Else, increment number of sectors */
+ temp = heads * sectors; /* Compute divisor for cylinders */
+ cylinders = capacity / temp;/* Compute number of cylinders */
+ }
+ }
+ /* if something went wrong, then apparently we have to return
+ a geometry with more than 1024 cylinders */
+ if (cylinders == 0 || heads > 255 || sectors > 63 || cylinders >1023)
+ {
+ unsigned long temp_cyl;
+
+ heads = 64;
+ sectors = 32;
+ temp_cyl = capacity / (heads * sectors);
+ if (temp_cyl > 1024)
+ {
+ heads = 255;
+ sectors = 63;
+ }
+ cylinders = capacity / (heads * sectors);
+ }
+ *cyls = (unsigned int) cylinders; /* Stuff return values */
+ *secs = (unsigned int) sectors;
+ *hds = (unsigned int) heads;
+}
+
+/*
+ * Rescan the partition tables
+ */
+
+static int do_i2ob_revalidate(kdev_t dev, int maxu)
+{
+ int minor=MINOR(dev);
+ int i;
+
+ minor&=0xF0;
+
+ i2ob_dev[minor].refcnt++;
+ if(i2ob_dev[minor].refcnt>maxu+1)
+ {
+ i2ob_dev[minor].refcnt--;
+ return -EBUSY;
+ }
+
+ for( i = 15; i>=0 ; i--)
+ {
+ int m = minor+i;
+ kdev_t d = MKDEV(MAJOR_NR, m);
+ struct super_block *sb = get_super(d);
+
+ sync_dev(d);
+ if(sb)
+ invalidate_inodes(sb);
+ invalidate_buffers(d);
+ i2ob_gendisk.part[m].start_sect = 0;
+ i2ob_gendisk.part[m].nr_sects = 0;
+ }
+
+ /*
+ * Do a physical check and then reconfigure
+ */
+
+ i2ob_install_device(i2ob_dev[minor].controller, i2ob_dev[minor].i2odev,
+ minor);
+ i2ob_dev[minor].refcnt--;
+ return 0;
+}
+
+/*
+ * Issue device specific ioctl calls.
+ */
+
+static int i2ob_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct i2ob_device *dev;
+ int minor;
+
+ /* Anyone capable of this syscall can do *real bad* things */
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (!inode)
+ return -EINVAL;
+ minor = MINOR(inode->i_rdev);
+ if (minor >= (MAX_I2OB<<4))
+ return -ENODEV;
+
+ dev = &i2ob_dev[minor];
+ switch (cmd) {
+ case BLKRASET:
+ if(!capable(CAP_SYS_ADMIN)) return -EACCES;
+ if(arg > 0xff) return -EINVAL;
+ read_ahead[MAJOR(inode->i_rdev)] = arg;
+ return 0;
+
+ case BLKRAGET:
+ if (!arg) return -EINVAL;
+ return put_user(read_ahead[MAJOR(inode->i_rdev)],
+ (long *) arg);
+ case BLKGETSIZE:
+ return put_user(i2ob[minor].nr_sects, (long *) arg);
+
+ case BLKFLSBUF:
+ if(!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ fsync_dev(inode->i_rdev);
+ invalidate_buffers(inode->i_rdev);
+ return 0;
+
+ case HDIO_GETGEO:
+ {
+ struct hd_geometry g;
+ int u=minor&0xF0;
+ i2o_block_biosparam(i2ob_sizes[u]<<1,
+ &g.cylinders, &g.heads, &g.sectors);
+ g.start = i2ob[minor].start_sect;
+ return copy_to_user((void *)arg,&g, sizeof(g))?-EFAULT:0;
+ }
+
+ case BLKRRPART:
+ if(!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ return do_i2ob_revalidate(inode->i_rdev,1);
+
+ default:
+ return blk_ioctl(inode->i_rdev, cmd, arg);
+ }
+}
+
+/*
+ * Issue UTIL_CLAIM messages
+ */
+
+static int i2ob_claim_device(struct i2ob_device *dev, int onoff)
+{
+ return i2o_issue_claim(dev->controller, dev->tid, i2ob_context, onoff, &dev->done_flag);
+}
+
+/*
+ * Close the block device down
+ */
+
+static int i2ob_release(struct inode *inode, struct file *file)
+{
+ struct i2ob_device *dev;
+ int minor;
+
+ minor = MINOR(inode->i_rdev);
+ if (minor >= (MAX_I2OB<<4))
+ return -ENODEV;
+ sync_dev(inode->i_rdev);
+ dev = &i2ob_dev[(minor&0xF0)];
+ if (dev->refcnt <= 0)
+ printk(KERN_ALERT "i2ob_release: refcount(%d) <= 0\n", dev->refcnt);
+ dev->refcnt--;
+ if(dev->refcnt==0)
+ {
+ /*
+ * Flush the onboard cache on unmount
+ */
+ u32 msg[5];
+ int *query_done = &dev->done_flag;
+ msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1] = I2O_CMD_BLOCK_CFLUSH<<24|HOST_TID<<12|dev->tid;
+ msg[2] = i2ob_context|0x80000000;
+ msg[3] = (u32)query_done;
+ msg[4] = 60<<16;
+ i2o_post_wait(dev->controller, dev->tid, msg, 20, query_done,2);
+ /*
+ * Unlock the media
+ */
+ msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1] = I2O_CMD_BLOCK_MUNLOCK<<24|HOST_TID<<12|dev->tid;
+ msg[2] = i2ob_context|0x80000000;
+ msg[3] = (u32)query_done;
+ msg[4] = -1;
+ i2o_post_wait(dev->controller, dev->tid, msg, 20, query_done,2);
+
+ /*
+ * Now unclaim the device.
+ */
+ if (i2ob_claim_device(dev, 0)<0)
+ printk(KERN_ERR "i2ob_release: controller rejected unclaim.\n");
+
+ }
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+/*
+ * Open the block device.
+ */
+
+static int i2ob_open(struct inode *inode, struct file *file)
+{
+ int minor;
+ struct i2ob_device *dev;
+
+ if (!inode)
+ return -EINVAL;
+ minor = MINOR(inode->i_rdev);
+ if (minor >= MAX_I2OB<<4)
+ return -ENODEV;
+ dev=&i2ob_dev[(minor&0xF0)];
+
+ if(dev->refcnt++==0)
+ {
+ u32 msg[6];
+ int *query_done;
+
+
+ if(i2ob_claim_device(dev, 1)<0)
+ {
+ dev->refcnt--;
+ return -EBUSY;
+ }
+
+ query_done = &dev->done_flag;
+ /*
+ * Mount the media if needed. Note that we don't use
+ * the lock bit. Since we have to issue a lock if it
+ * refuses a mount (quite possible) then we might as
+ * well just send two messages out.
+ */
+ msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1] = I2O_CMD_BLOCK_MMOUNT<<24|HOST_TID<<12|dev->tid;
+ msg[2] = i2ob_context|0x80000000;
+ msg[3] = (u32)query_done;
+ msg[4] = -1;
+ msg[5] = 0;
+ i2o_post_wait(dev->controller, dev->tid, msg, 24, query_done,2);
+ /*
+ * Lock the media
+ */
+ msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1] = I2O_CMD_BLOCK_MLOCK<<24|HOST_TID<<12|dev->tid;
+ msg[2] = i2ob_context|0x80000000;
+ msg[3] = (u32)query_done;
+ msg[4] = -1;
+ i2o_post_wait(dev->controller, dev->tid, msg, 20, query_done,2);
+ }
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+/*
+ * Issue a device query
+ */
+
+static int i2ob_query_device(struct i2ob_device *dev, int table,
+ int field, void *buf, int buflen)
+{
+ return i2o_query_scalar(dev->controller, dev->tid, i2ob_context,
+ table, field, buf, buflen, &dev->done_flag);
+}
+
+
+/*
+ * Install the I2O block device we found.
+ */
+
+static int i2ob_install_device(struct i2o_controller *c, struct i2o_device *d, int unit)
+{
+ u64 size;
+ u32 blocksize;
+ u32 limit;
+ u8 type;
+ u32 flags, status;
+ struct i2ob_device *dev=&i2ob_dev[unit];
+ int i;
+
+ /*
+ * Ask for the current media data. If that isn't supported
+ * then we ask for the device capacity data
+ */
+
+ if(i2ob_query_device(dev, 0x0004, 1, &blocksize, 4) != 0
+ || i2ob_query_device(dev, 0x0004, 0, &size, 8) !=0 )
+ {
+ i2ob_query_device(dev, 0x0000, 3, &blocksize, 4);
+ i2ob_query_device(dev, 0x0000, 4, &size, 8);
+ }
+
+ i2ob_query_device(dev, 0x0000, 5, &flags, 4);
+ i2ob_query_device(dev, 0x0000, 6, &status, 4);
+ i2ob_sizes[unit] = (int)(size>>10);
+ i2ob_hardsizes[unit] = blocksize;
+ i2ob_gendisk.part[unit].nr_sects = i2ob_sizes[unit];
+
+ /* Setting this higher than 1024 breaks the symbios for some reason */
+
+ limit=4096; /* 8 deep scatter gather */
+
+ printk("Byte limit is %d.\n", limit);
+
+ for(i=unit;i<=unit+15;i++)
+ i2ob_max_sectors[i]=(limit>>9);
+
+ i2ob[unit].nr_sects = (int)(size>>9);
+
+ i2ob_query_device(dev, 0x0000, 0, &type, 1);
+
+ sprintf(d->dev_name, "%s%c", i2ob_gendisk.major_name, 'a' + (unit>>4));
+
+ printk("%s: ", d->dev_name);
+ if(status&(1<<10))
+ printk("RAID ");
+ switch(type)
+ {
+ case 0: printk("Disk Storage");break;
+ case 4: printk("WORM");break;
+ case 5: printk("CD-ROM");break;
+ case 7: printk("Optical device");break;
+ default:
+ printk("Type %d", type);
+ }
+ if(((flags & (1<<3)) && !(status & (1<<3))) ||
+ ((flags & (1<<4)) && !(status & (1<<4))))
+ {
+ printk(" Not loaded.\n");
+ return 0;
+ }
+ printk(" %dMb, %d byte sectors",
+ (int)(size>>20), blocksize);
+ if(status&(1<<0))
+ {
+ u32 cachesize;
+ i2ob_query_device(dev, 0x0003, 0, &cachesize, 4);
+ cachesize>>=10;
+ if(cachesize>4095)
+ printk(", %dMb cache", cachesize>>10);
+ else
+ printk(", %dKb cache", cachesize);
+ }
+ printk(".\n");
+ printk("%s: Maximum sectors/read set to %d.\n",
+ d->dev_name, i2ob_max_sectors[unit]);
+ resetup_one_dev(&i2ob_gendisk, unit>>4);
+ return 0;
+}
+
+static void i2ob_probe(void)
+{
+ int i;
+ int unit = 0;
+ int warned = 0;
+
+ for(i=0; i< MAX_I2O_CONTROLLERS; i++)
+ {
+ struct i2o_controller *c=i2o_find_controller(i);
+ struct i2o_device *d;
+
+ if(c==NULL)
+ continue;
+
+ for(d=c->devices;d!=NULL;d=d->next)
+ {
+ if(d->class!=I2O_CLASS_RANDOM_BLOCK_STORAGE)
+ continue;
+
+ if(unit<MAX_I2OB<<4)
+ {
+ /*
+ * Get the device and fill in the
+ * Tid and controller.
+ */
+ struct i2ob_device *dev=&i2ob_dev[unit];
+ dev->i2odev = d;
+ dev->controller = c;
+ dev->tid = d->id;
+
+ /*
+ * Insure the device can be claimed
+ * before installing it.
+ */
+ if(i2ob_claim_device(dev, 1)==0)
+ {
+ printk(KERN_INFO "Claimed Dev %x Tid %d Unit %d\n",dev,dev->tid,unit);
+ i2ob_install_device(c,d,unit);
+ unit+=16;
+
+ /*
+ * Now that the device has been
+ * installed, unclaim it so that
+ * it can be claimed by either
+ * the block or scsi driver.
+ */
+ if (i2ob_claim_device(dev, 0)<0)
+ printk(KERN_INFO "Could not unclaim Dev %x Tid %d\n",dev,dev->tid);
+
+ }
+ else
+ printk(KERN_INFO "TID %d not claimed\n",dev->tid);
+ }
+ else
+ {
+ if(!warned++)
+ printk("i2o_block: too many controllers, registering only %d.\n", unit>>4);
+ }
+ }
+ }
+ i2ob_devices = unit;
+}
+
+/*
+ * Have we seen a media change ?
+ */
+
+static int i2ob_media_change(kdev_t dev)
+{
+ int i=MINOR(dev);
+ i>>=4;
+ if(i2ob_media_change_flag[i])
+ {
+ i2ob_media_change_flag[i]=0;
+ return 1;
+ }
+ return 0;
+}
+
+static int i2ob_revalidate(kdev_t dev)
+{
+ return do_i2ob_revalidate(dev, 0);
+}
+
+static int i2ob_reboot_event(struct notifier_block *n, unsigned long code, void *p)
+{
+ int i;
+
+ if(code != SYS_RESTART && code != SYS_HALT && code != SYS_POWER_OFF)
+ return NOTIFY_DONE;
+ for(i=0;i<MAX_I2OB;i++)
+ {
+ struct i2ob_device *dev=&i2ob_dev[(i<<4)];
+
+ if(dev->refcnt!=0)
+ {
+ /*
+ * Flush the onboard cache on power down
+ * also unlock the media
+ */
+ u32 msg[5];
+ int *query_done = &dev->done_flag;
+ msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1] = I2O_CMD_BLOCK_CFLUSH<<24|HOST_TID<<12|dev->tid;
+ msg[2] = i2ob_context|0x80000000;
+ msg[3] = (u32)query_done;
+ msg[4] = 60<<16;
+ i2o_post_wait(dev->controller, dev->tid, msg, 20, query_done,2);
+ /*
+ * Unlock the media
+ */
+ msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1] = I2O_CMD_BLOCK_MUNLOCK<<24|HOST_TID<<12|dev->tid;
+ msg[2] = i2ob_context|0x80000000;
+ msg[3] = (u32)query_done;
+ msg[4] = -1;
+ i2o_post_wait(dev->controller, dev->tid, msg, 20, query_done,2);
+ }
+ }
+ return NOTIFY_DONE;
+}
+
+struct notifier_block i2ob_reboot_notifier =
+{
+ i2ob_reboot_event,
+ NULL,
+ 0
+};
+
+static struct file_operations i2ob_fops =
+{
+ NULL, /* lseek - default */
+ block_read, /* read - general block-dev read */
+ block_write, /* write - general block-dev write */
+ NULL, /* readdir - bad */
+ NULL, /* select */
+ i2ob_ioctl, /* ioctl */
+ NULL, /* mmap */
+ i2ob_open, /* open */
+ NULL, /* flush */
+ i2ob_release, /* release */
+ NULL, /* fsync */
+ NULL, /* fasync */
+ i2ob_media_change, /* Media Change */
+ i2ob_revalidate, /* Revalidate */
+ NULL /* File locks */
+};
+
+/*
+ * Partitioning
+ */
+
+static void i2ob_geninit(struct gendisk *gd)
+{
+}
+
+static struct gendisk i2ob_gendisk =
+{
+ MAJOR_NR,
+ "i2ohd",
+ 4,
+ 1<<4,
+ MAX_I2OB,
+ i2ob_geninit,
+ i2ob,
+ i2ob_sizes,
+ 0,
+ NULL,
+ NULL
+};
+
+/*
+ * And here should be modules and kernel interface
+ * (Just smiley confuses emacs :-)
+ */
+
+#ifdef MODULE
+#define i2ob_init init_module
+#endif
+
+int i2ob_init(void)
+{
+ int i;
+
+ printk("I2O block device OSM v0.06. (C) 1999 Red Hat Software.\n");
+
+ /*
+ * Register the block device interfaces
+ */
+
+ if (register_blkdev(MAJOR_NR, "i2o_block", &i2ob_fops)) {
+ printk("Unable to get major number %d for i2o_block\n",
+ MAJOR_NR);
+ return -EIO;
+ }
+#ifdef MODULE
+ printk("i2o_block: registered device at major %d\n", MAJOR_NR);
+#endif
+
+ /*
+ * Now fill in the boiler plate
+ */
+
+ blksize_size[MAJOR_NR] = i2ob_blksizes;
+ hardsect_size[MAJOR_NR] = i2ob_hardsizes;
+ blk_size[MAJOR_NR] = i2ob_sizes;
+ max_sectors[MAJOR_NR] = i2ob_max_sectors;
+
+ blk_dev[MAJOR_NR].request_fn = i2ob_request;
+ for (i = 0; i < MAX_I2OB << 4; i++) {
+ i2ob_dev[i].refcnt = 0;
+ i2ob_dev[i].flags = 0;
+ i2ob_dev[i].controller = NULL;
+ i2ob_dev[i].i2odev = NULL;
+ i2ob_dev[i].tid = 0;
+ i2ob_dev[i].head = NULL;
+ i2ob_dev[i].tail = NULL;
+ i2ob_blksizes[i] = 1024;
+ i2ob_max_sectors[i] = 2;
+ }
+
+ /*
+ * Register the OSM handler as we will need this to probe for
+ * drives, geometry and other goodies.
+ */
+
+ if(i2o_install_handler(&i2o_block_handler)<0)
+ {
+ unregister_blkdev(MAJOR_NR, "i2o_block");
+ printk(KERN_ERR "i2o_block: unable to register OSM.\n");
+ return -EINVAL;
+ }
+ i2ob_context = i2o_block_handler.context;
+
+ /*
+ * Finally see what is actually plugged in to our controllers
+ */
+
+ i2ob_probe();
+
+ register_reboot_notifier(&i2ob_reboot_notifier);
+ return 0;
+}
+
+#ifdef MODULE
+
+EXPORT_NO_SYMBOLS;
+MODULE_AUTHOR("Red Hat Software");
+MODULE_DESCRIPTION("I2O Block Device OSM");
+
+void cleanup_module(void)
+{
+ struct gendisk **gdp;
+
+ unregister_reboot_notifier(&i2ob_reboot_notifier);
+
+ /*
+ * Flush the OSM
+ */
+
+ i2o_remove_handler(&i2o_block_handler);
+
+ /*
+ * Return the block device
+ */
+ if (unregister_blkdev(MAJOR_NR, "i2o_block") != 0)
+ printk("i2o_block: cleanup_module failed\n");
+ else
+ printk("i2o_block: module cleaned up.\n");
+
+ /*
+ * Why isnt register/unregister gendisk in the kernel ???
+ */
+
+ for (gdp = &gendisk_head; *gdp; gdp = &((*gdp)->next))
+ if (*gdp == &i2ob_gendisk)
+ break;
+
+}
+#endif
diff --git a/drivers/i2o/i2o_config.c b/drivers/i2o/i2o_config.c
new file mode 100644
index 000000000..c3c644883
--- /dev/null
+++ b/drivers/i2o/i2o_config.c
@@ -0,0 +1,613 @@
+/*
+ * I2O Configuration Interface Driver
+ *
+ * (C) Copyright 1999 Red Hat Software
+ *
+ * Written by Alan Cox, Building Number Three Ltd
+ *
+ * Modified 04/20/199 by Deepak Saxena
+ * - Added basic ioctl() support
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/i2o.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/malloc.h>
+#include <linux/miscdevice.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/spinlock.h>
+
+#include "i2o_proc.h"
+
+static int i2o_cfg_token = 0;
+static int i2o_cfg_context = -1;
+static void *page_buf;
+static void *i2o_buffer;
+static int i2o_ready;
+static int i2o_pagelen;
+static int i2o_error;
+static int cfg_inuse;
+static int i2o_eof;
+static spinlock_t i2o_config_lock = SPIN_LOCK_UNLOCKED;
+struct wait_queue *i2o_wait_queue;
+
+static int ioctl_getiops(unsigned long);
+static int ioctl_gethrt(unsigned long);
+static int ioctl_getlct(unsigned long);
+static int ioctl_parms(unsigned long, unsigned int);
+static int ioctl_html(unsigned long);
+static int ioctl_swdl(unsigned long);
+static int ioctl_swul(unsigned long);
+static int ioctl_swdel(unsigned long);
+
+/*
+ * This is the callback for any message we have posted. The message itself
+ * will be returned to the message pool when we return from the IRQ
+ *
+ * This runs in irq context so be short and sweet.
+ */
+static void i2o_cfg_reply(struct i2o_handler *h, struct i2o_controller *c, struct i2o_message *m)
+{
+ i2o_cfg_token = I2O_POST_WAIT_OK;
+
+ return;
+}
+
+/*
+ * Each of these describes an i2o message handler. They are
+ * multiplexed by the i2o_core code
+ */
+
+struct i2o_handler cfg_handler=
+{
+ i2o_cfg_reply,
+ "Configuration",
+ 0
+};
+
+static long long cfg_llseek(struct file *file, long long offset, int origin)
+{
+ return -ESPIPE;
+}
+
+/* i2ocontroller/i2odevice/page/?data */
+
+static ssize_t cfg_write(struct file *file, const char *buf, size_t count, loff_t *ppos)
+{
+ printk(KERN_INFO "i2o_config write not yet supported\n");
+
+ return 0;
+}
+
+/* To be written for event management support */
+static ssize_t cfg_read(struct file *file, char *buf, size_t count, loff_t *ptr)
+{
+ return 0;
+}
+
+static int cfg_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+
+ /* Only 1 token, so lock... */
+ spin_lock(&i2o_config_lock);
+
+ switch(cmd)
+ {
+ case I2OGETIOPS:
+ ret = ioctl_getiops(arg);
+ break;
+
+ case I2OHRTGET:
+ ret = ioctl_gethrt(arg);
+ break;
+
+ case I2OLCTGET:
+ ret = ioctl_getlct(arg);
+ break;
+
+ case I2OPARMSET:
+ ret = ioctl_parms(arg, I2OPARMSET);
+ break;
+
+ case I2OPARMGET:
+ ret = ioctl_parms(arg, I2OPARMGET);
+ break;
+
+ case I2OSWDL:
+ ret = ioctl_swdl(arg);
+ break;
+
+ case I2OSWUL:
+ ret = ioctl_swul(arg);
+ break;
+
+ case I2OSWDEL:
+ ret = ioctl_swdel(arg);
+ break;
+
+ case I2OHTML:
+ ret = ioctl_html(arg);
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ spin_unlock(&i2o_config_lock);
+ return ret;
+}
+
+int ioctl_getiops(unsigned long arg)
+{
+ u8 *user_iop_table = (u8*)arg;
+ struct i2o_controller *c = NULL;
+ int i;
+ u8 foo[MAX_I2O_CONTROLLERS];
+
+ if(!access_ok(VERIFY_WRITE, user_iop_table, MAX_I2O_CONTROLLERS))
+ return -EFAULT;
+
+ for(i = 0; i < MAX_I2O_CONTROLLERS; i++)
+ {
+ c = i2o_find_controller(i);
+ if(c)
+ {
+ printk(KERN_INFO "ioctl: iop%d found\n", i);
+ foo[i] = 1;
+ i2o_unlock_controller(c);
+ }
+ else
+ {
+ printk(KERN_INFO "ioctl: iop%d not found\n", i);
+ foo[i] = 0;
+ }
+ }
+
+ __copy_to_user(user_iop_table, foo, MAX_I2O_CONTROLLERS);
+ return 0;
+}
+
+int ioctl_gethrt(unsigned long arg)
+{
+ struct i2o_controller *c;
+ struct i2o_cmd_hrtlct *cmd = (struct i2o_cmd_hrtlct*)arg;
+ struct i2o_cmd_hrtlct kcmd;
+ pi2o_hrt hrt;
+ u32 msg[6];
+ u32 *workspace;
+ int len;
+ int token;
+ u32 reslen;
+ int ret = 0;
+
+ if(copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_hrtlct)))
+ return -EFAULT;
+
+ if(get_user(reslen, kcmd.reslen) < 0)
+ return -EFAULT;
+
+ if(kcmd.resbuf == NULL)
+ return -EFAULT;
+
+ c = i2o_find_controller(kcmd.iop);
+ if(!c)
+ return -ENXIO;
+
+ workspace = kmalloc(8192, GFP_KERNEL);
+ hrt = (pi2o_hrt)workspace;
+ if(workspace==NULL)
+ return -ENOMEM;
+
+ memset(workspace, 0, 8192);
+
+ msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
+ msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
+ msg[2]= (u32)cfg_handler.context;
+ msg[3]= 0;
+ msg[4]= (0xD0000000 | 8192);
+ msg[5]= virt_to_phys(workspace);
+
+ token = i2o_post_wait(c, ADAPTER_TID, msg, 6*4, &i2o_cfg_token,2);
+ if(token == I2O_POST_WAIT_TIMEOUT)
+ {
+ kfree(workspace);
+ i2o_unlock_controller(c);
+ return -ETIMEDOUT;
+ }
+ i2o_unlock_controller(c);
+
+ len = 8 + ((hrt->entry_len * hrt->num_entries) << 2);
+ /* We did a get user...so assuming mem is ok...is this bad? */
+ put_user(len, kcmd.reslen);
+ if(len > reslen)
+ ret = -ENOBUFS;
+ if(copy_to_user(kcmd.resbuf, (void*)hrt, len))
+ ret = -EINVAL;
+
+ kfree(workspace);
+ return ret;
+}
+
+int ioctl_getlct(unsigned long arg)
+{
+ struct i2o_controller *c;
+ struct i2o_cmd_hrtlct *cmd = (struct i2o_cmd_hrtlct*)arg;
+ struct i2o_cmd_hrtlct kcmd;
+ pi2o_lct lct;
+ u32 msg[9];
+ u32 *workspace;
+ int len;
+ int token;
+ int ret = 0;
+ u32 reslen;
+
+ if(copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_hrtlct)))
+ return -EFAULT;
+
+ if(get_user(reslen, kcmd.reslen) < 0)
+ return -EFAULT;
+
+ if(kcmd.resbuf == NULL)
+ return -EFAULT;
+
+ c = i2o_find_controller(kcmd.iop);
+ if(!c)
+ return -ENXIO;
+
+ workspace = kmalloc(8192, GFP_KERNEL);
+ lct = (pi2o_lct)workspace;
+ if(workspace==NULL)
+ return -ENOMEM;
+
+ memset(workspace, 0, 8192);
+
+ msg[0]= EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6;
+ msg[1]= I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
+ msg[2]= (u32)cfg_handler.context;
+ msg[3]= 0;
+ msg[4]= 0xFFFFFFFF;
+ msg[5]= 0;
+ msg[6]= (0xD0000000 | 8192);
+ msg[7]= virt_to_phys(workspace);
+
+ token = i2o_post_wait(c, ADAPTER_TID, msg, 8*4, &i2o_cfg_token,2);
+ if(token == I2O_POST_WAIT_TIMEOUT)
+ {
+ kfree(workspace);
+ i2o_unlock_controller(c);
+ return -ETIMEDOUT;
+ }
+ i2o_unlock_controller(c);
+
+ len = (unsigned int)lct->table_size << 2;
+ put_user(len, kcmd.reslen);
+ if(len > reslen)
+ ret = -ENOBUFS;
+ else if(copy_to_user(kcmd.resbuf, (void*)lct, len))
+ ret = -EINVAL;
+
+ kfree(workspace);
+ return ret;
+}
+
+static int ioctl_parms(unsigned long arg, unsigned int type)
+{
+ int ret = 0;
+ struct i2o_controller *c;
+ struct i2o_cmd_psetget *cmd = (struct i2o_cmd_psetget*)arg;
+ struct i2o_cmd_psetget kcmd;
+ u32 msg[9];
+ u32 reslen;
+ int token;
+ u8 *ops;
+ u8 *res;
+ u16 *res16;
+ u32 *res32;
+ u16 count;
+ int len;
+ int i,j;
+
+ u32 i2o_cmd = (type == I2OPARMGET ?
+ I2O_CMD_UTIL_PARAMS_GET :
+ I2O_CMD_UTIL_PARAMS_SET);
+
+ if(copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_psetget)))
+ return -EFAULT;
+
+ if(get_user(reslen, kcmd.reslen))
+ return -EFAULT;
+
+ c = i2o_find_controller(kcmd.iop);
+ if(!c)
+ return -ENXIO;
+
+ ops = (u8*)kmalloc(kcmd.oplen, GFP_KERNEL);
+ if(!ops)
+ return -ENOMEM;
+
+ if(copy_from_user(ops, kcmd.opbuf, kcmd.oplen))
+ {
+ kfree(ops);
+ return -EFAULT;
+ }
+
+ /*
+ * It's possible to have a _very_ large table
+ * and that the user asks for all of it at once...
+ */
+ res = (u8*)kmalloc(65536, GFP_KERNEL);
+ if(!res)
+ {
+ kfree(ops);
+ return -ENOMEM;
+ }
+
+ res16 = (u16*)res;
+
+ msg[0]=NINE_WORD_MSG_SIZE|SGL_OFFSET_5;
+ msg[1]=i2o_cmd<<24|HOST_TID<<12|cmd->tid;
+ msg[2]=(u32)cfg_handler.context;
+ msg[3]=0;
+ msg[4]=0;
+ msg[5]=0x54000000|kcmd.oplen;
+ msg[6]=virt_to_bus(ops);
+ msg[7]=0xD0000000|(65536);
+ msg[8]=virt_to_bus(res);
+
+ /*
+ * Parm set sometimes takes a little while for some reason
+ */
+ token = i2o_post_wait(c, kcmd.tid, msg, 9*4, &i2o_cfg_token,10);
+ if(token == I2O_POST_WAIT_TIMEOUT)
+ {
+ kfree(ops);
+ kfree(res);
+ return -ETIMEDOUT;
+ }
+
+ kfree(ops);
+
+ /*
+ * Determine required size...there's got to be a quicker way?
+ * Dump data to syslog for debugging failures
+ */
+ count = res16[0];
+ printk(KERN_INFO "%0#6x\n%0#6x\n", res16[0], res16[1]);
+ len = 4;
+ res16 += 2;
+ for(i = 0; i < count; i++ )
+ {
+ len += res16[0] << 2; /* BlockSize field in ResultBlock */
+ res32 = (u32*)res16;
+ for(j = 0; j < res16[0]; j++)
+ printk(KERN_INFO "%0#10x\n", res32[j]);
+ res16 += res16[0] << 1; /* Shift to next block */
+ }
+
+ put_user(len, kcmd.reslen);
+ if(len > reslen)
+ ret = -ENOBUFS;
+ else if(copy_to_user(cmd->resbuf, res, len))
+ ret = -EFAULT;
+
+ kfree(res);
+
+ return ret;
+}
+
+int ioctl_html(unsigned long arg)
+{
+ struct i2o_html *cmd = (struct i2o_html*)arg;
+ struct i2o_html kcmd;
+ struct i2o_controller *c;
+ u8 *res = NULL;
+ void *query = NULL;
+ int ret = 0;
+ int token;
+ u32 len;
+ u32 reslen;
+ u32 msg[MSG_FRAME_SIZE/4];
+
+ if(copy_from_user(&kcmd, cmd, sizeof(struct i2o_html)))
+ {
+ printk(KERN_INFO "i2o_config: can't copy html cmd\n");
+ return -EFAULT;
+ }
+
+ if(get_user(reslen, kcmd.reslen) < 0)
+ {
+ printk(KERN_INFO "i2o_config: can't copy html reslen\n");
+ return -EFAULT;
+ }
+
+ if(!kcmd.resbuf)
+ {
+ printk(KERN_INFO "i2o_config: NULL html buffer\n");
+ return -EFAULT;
+ }
+
+ c = i2o_find_controller(kcmd.iop);
+ if(!c)
+ return -ENXIO;
+
+ if(kcmd.qlen) /* Check for post data */
+ {
+ query = kmalloc(kcmd.qlen, GFP_KERNEL);
+ if(!query)
+ return -ENOMEM;
+ if(copy_from_user(query, kcmd.qbuf, kcmd.qlen))
+ {
+ printk(KERN_INFO "i2o_config: could not get query\n");
+ kfree(query);
+ return -EFAULT;
+ }
+ }
+
+ res = kmalloc(4096, GFP_KERNEL);
+ if(!res)
+ return -ENOMEM;
+
+ msg[1] = (I2O_CMD_UTIL_CONFIG_DIALOG << 24)|HOST_TID<<12|kcmd.tid;
+ msg[2] = i2o_cfg_context;
+ msg[3] = 0;
+ msg[4] = kcmd.page;
+ msg[5] = 0xD0000000|4096;
+ msg[6] = virt_to_bus(res);
+ if(!kcmd.qlen) /* Check for post data */
+ msg[0] = SEVEN_WORD_MSG_SIZE|SGL_OFFSET_5;
+ else
+ {
+ msg[0] = NINE_WORD_MSG_SIZE|SGL_OFFSET_5;
+ msg[5] = 0x50000000|4096;
+ msg[7] = 0xD4000000|(kcmd.qlen);
+ msg[8] = virt_to_phys(query);
+ }
+
+ token = i2o_post_wait(c, cmd->tid, msg, 9*4, &i2o_cfg_token, 10);
+ if(token == I2O_POST_WAIT_TIMEOUT)
+ {
+ kfree(res);
+ if(kcmd.qlen) kfree(query);
+
+ return -ETIMEDOUT;
+ }
+
+ len = strnlen(res, 8192);
+ put_user(len, kcmd.reslen);
+ if(len > reslen)
+ ret = -ENOMEM;
+ if(copy_to_user(kcmd.resbuf, res, len))
+ ret = -EFAULT;
+
+ kfree(res);
+ if(kcmd.qlen)
+ kfree(query);
+
+ return ret;
+}
+
+/* To be written */
+int ioctl_swdl(unsigned long arg)
+{
+ return -ENOSYS;
+}
+
+/* To be written */
+int ioctl_swul(unsigned long arg)
+{
+ return -EINVAL;
+}
+
+/* To be written */
+int ioctl_swdel(unsigned long arg)
+{
+ return 0;
+}
+
+static int cfg_open(struct inode *inode, struct file *file)
+{
+ /*
+ * Should support multiple management users
+ */
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static int cfg_release(struct inode *inode, struct file *file)
+{
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+
+static struct file_operations config_fops =
+{
+ cfg_llseek,
+ cfg_read,
+ cfg_write,
+ NULL,
+ NULL /*cfg_poll*/,
+ cfg_ioctl,
+ NULL, /* No mmap */
+ cfg_open,
+ NULL, /* No flush */
+ cfg_release
+};
+
+static struct miscdevice i2o_miscdev = {
+ I2O_MINOR,
+ "i2octl",
+ &config_fops
+};
+
+#ifdef MODULE
+int init_module(void)
+#else
+int i2o_config_init(void)
+#endif
+{
+ printk(KERN_INFO "i2o configuration manager v 0.02\n");
+
+ if((page_buf = kmalloc(4096, GFP_KERNEL))==NULL)
+ {
+ printk(KERN_ERR "i2o_config: no memory for page buffer.\n");
+ return -ENOBUFS;
+ }
+ if(misc_register(&i2o_miscdev)==-1)
+ {
+ printk(KERN_ERR "i2o_config: can't register device.\n");
+ kfree(page_buf);
+ return -EBUSY;
+ }
+ /*
+ * Install our handler
+ */
+ if(i2o_install_handler(&cfg_handler)<0)
+ {
+ kfree(page_buf);
+ printk(KERN_ERR "i2o_config: handler register failed.\n");
+ misc_deregister(&i2o_miscdev);
+ return -EBUSY;
+ }
+ /*
+ * The low 16bits of the transaction context must match this
+ * for everything we post. Otherwise someone else gets our mail
+ */
+ i2o_cfg_context = cfg_handler.context;
+ return 0;
+}
+
+#ifdef MODULE
+
+void cleanup_module(void)
+{
+ misc_deregister(&i2o_miscdev);
+
+ if(page_buf)
+ kfree(page_buf);
+ if(i2o_cfg_context != -1)
+ i2o_remove_handler(&cfg_handler);
+ if(i2o_buffer)
+ kfree(i2o_buffer);
+}
+
+EXPORT_NO_SYMBOLS;
+MODULE_AUTHOR("Red Hat Software");
+MODULE_DESCRIPTION("I2O Configuration");
+
+#endif
diff --git a/drivers/i2o/i2o_core.c b/drivers/i2o/i2o_core.c
new file mode 100644
index 000000000..3a3f1fe94
--- /dev/null
+++ b/drivers/i2o/i2o_core.c
@@ -0,0 +1,2053 @@
+/*
+ * Core I2O structure managment
+ *
+ * (C) Copyright 1999 Red Hat Software
+ *
+ * Written by Alan Cox, Building Number Three Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * A lot of the I2O message side code from this is taken from the
+ * Red Creek RCPCI45 adapter driver by Red Creek Communications
+ *
+ * Some fixes and cleanup by Philipp Rumpf
+ *
+ * Additional fixes by Juha Sievänen <Juha.Sievanen@cs.Helsinki.FI>
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/i2o.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/malloc.h>
+
+#include <asm/io.h>
+#include <asm/spinlock.h>
+
+#include "i2o_lan.h"
+
+/*
+ * Size of the I2O module table
+ */
+
+
+static struct i2o_handler *i2o_handlers[MAX_I2O_MODULES];
+static struct i2o_controller *i2o_controllers[MAX_I2O_CONTROLLERS];
+int i2o_num_controllers = 0;
+
+
+extern int i2o_online_controller(struct i2o_controller *c);
+
+/*
+ * I2O configuration spinlock. This isnt a big deal for contention
+ * so we have one only
+ */
+
+#ifdef __SMP__
+static spinlock_t i2o_configuration_lock = SPIN_LOCK_UNLOCKED;
+#endif
+
+/*
+ * Install an I2O handler - these handle the asynchronous messaging
+ * from the card once it has initialised.
+ */
+
+int i2o_install_handler(struct i2o_handler *h)
+{
+ int i;
+ spin_lock(&i2o_configuration_lock);
+ for(i=0;i<MAX_I2O_MODULES;i++)
+ {
+ if(i2o_handlers[i]==NULL)
+ {
+ h->context = i;
+ i2o_handlers[i]=h;
+ spin_unlock(&i2o_configuration_lock);
+ return 0;
+ }
+ }
+ spin_unlock(&i2o_configuration_lock);
+ return -ENOSPC;
+}
+
+int i2o_remove_handler(struct i2o_handler *h)
+{
+ i2o_handlers[h->context]=NULL;
+ return 0;
+}
+
+
+/*
+ * Each I2O controller has a chain of devices on it - these match
+ * the useful parts of the LCT of the board.
+ */
+
+int i2o_install_device(struct i2o_controller *c, struct i2o_device *d)
+{
+ spin_lock(&i2o_configuration_lock);
+ d->controller=c;
+ d->owner=NULL;
+ d->next=c->devices;
+ c->devices=d;
+ *d->dev_name = 0;
+ spin_unlock(&i2o_configuration_lock);
+ return 0;
+}
+
+/* we need this version to call out of i2o_delete_controller */
+
+int __i2o_delete_device(struct i2o_device *d)
+{
+ struct i2o_device **p;
+
+ p=&(d->controller->devices);
+
+ /*
+ * Hey we have a driver!
+ */
+
+ if(d->owner)
+ return -EBUSY;
+
+ /*
+ * Seek, locate
+ */
+
+ while(*p!=NULL)
+ {
+ if(*p==d)
+ {
+ /*
+ * Destroy
+ */
+ *p=d->next;
+ kfree(d);
+ return 0;
+ }
+ p=&((*p)->next);
+ }
+ printk(KERN_ERR "i2o_delete_device: passed invalid device.\n");
+ return -EINVAL;
+}
+
+int i2o_delete_device(struct i2o_device *d)
+{
+ int ret;
+
+ spin_lock(&i2o_configuration_lock);
+
+ ret = __i2o_delete_device(d);
+
+ spin_unlock(&i2o_configuration_lock);
+
+ return ret;
+}
+
+/*
+ * Add and remove controllers from the I2O controller list
+ */
+
+int i2o_install_controller(struct i2o_controller *c)
+{
+ int i;
+ spin_lock(&i2o_configuration_lock);
+ for(i=0;i<MAX_I2O_CONTROLLERS;i++)
+ {
+ if(i2o_controllers[i]==NULL)
+ {
+ i2o_controllers[i]=c;
+ c->next=i2o_controller_chain;
+ i2o_controller_chain=c;
+ c->unit = i;
+ sprintf(c->name, "i2o/iop%d", i);
+ i2o_num_controllers++;
+ spin_unlock(&i2o_configuration_lock);
+ return 0;
+ }
+ }
+ printk(KERN_ERR "No free i2o controller slots.\n");
+ spin_unlock(&i2o_configuration_lock);
+ return -EBUSY;
+}
+
+int i2o_delete_controller(struct i2o_controller *c)
+{
+ struct i2o_controller **p;
+
+ spin_lock(&i2o_configuration_lock);
+ if(atomic_read(&c->users))
+ {
+ spin_unlock(&i2o_configuration_lock);
+ return -EBUSY;
+ }
+ while(c->devices)
+ {
+ if(__i2o_delete_device(c->devices)<0)
+ {
+ /* Shouldnt happen */
+ spin_unlock(&i2o_configuration_lock);
+ return -EBUSY;
+ }
+ }
+ c->destructor(c);
+
+ p=&i2o_controller_chain;
+
+ while(*p)
+ {
+ if(*p==c)
+ {
+ /* Prepare for restart */
+// i2o_clear_controller(c);
+
+ *p=c->next;
+ spin_unlock(&i2o_configuration_lock);
+ if(c->page_frame);
+ kfree(c->page_frame);
+ i2o_controllers[c->unit]=NULL;
+ kfree(c);
+ i2o_num_controllers--;
+ return 0;
+ }
+ p=&((*p)->next);
+ }
+ spin_unlock(&i2o_configuration_lock);
+ printk(KERN_ERR "i2o_delete_controller: bad pointer!\n");
+ return -ENOENT;
+}
+
+void i2o_unlock_controller(struct i2o_controller *c)
+{
+ atomic_dec(&c->users);
+}
+
+struct i2o_controller *i2o_find_controller(int n)
+{
+ struct i2o_controller *c;
+
+ if(n<0 || n>=MAX_I2O_CONTROLLERS)
+ return NULL;
+
+ spin_lock(&i2o_configuration_lock);
+ c=i2o_controllers[n];
+ if(c!=NULL)
+ atomic_inc(&c->users);
+ spin_unlock(&i2o_configuration_lock);
+ return c;
+}
+
+
+/*
+ * Track if a device is being used by a driver
+ */
+
+int i2o_claim_device(struct i2o_device *d, struct i2o_driver *r)
+{
+ spin_lock(&i2o_configuration_lock);
+ if(d->owner)
+ {
+ spin_unlock(&i2o_configuration_lock);
+ return -EBUSY;
+ }
+ atomic_inc(&d->controller->users);
+ d->owner=r;
+ spin_unlock(&i2o_configuration_lock);
+ return 0;
+}
+
+int i2o_release_device(struct i2o_device *d)
+{
+ spin_lock(&i2o_configuration_lock);
+ if(d->owner==NULL)
+ {
+ spin_unlock(&i2o_configuration_lock);
+ return -EINVAL;
+ }
+ atomic_dec(&d->controller->users);
+ d->owner=NULL;
+ spin_unlock(&i2o_configuration_lock);
+ return 0;
+}
+
+/*
+ * This is called by the bus specific driver layer when an interrupt
+ * or poll of this card interface is desired.
+ */
+
+void i2o_run_queue(struct i2o_controller *c)
+{
+ struct i2o_message *m;
+ u32 mv;
+
+ while((mv=I2O_REPLY_READ32(c))!=0xFFFFFFFF)
+ {
+ struct i2o_handler *i;
+ m=(struct i2o_message *)bus_to_virt(mv);
+ /*
+ * Temporary Debugging
+ */
+ if(((m->function_addr>>24)&0xFF)==0x15)
+ printk("UTFR!\n");
+// printk("dispatching.\n");
+ i=i2o_handlers[m->initiator_context&(MAX_I2O_MODULES-1)];
+ if(i)
+ i->reply(i,c,m);
+ else
+ printk("Spurious reply\n");
+ i2o_flush_reply(c,mv);
+ mb();
+ }
+}
+
+
+/*
+ * Do i2o class name lookup
+ */
+const char *i2o_get_class_name(int class)
+{
+ int idx = 16;
+ static char *i2o_class_name[] = {
+ "Executive",
+ "Device Driver Module",
+ "Block Device",
+ "Tape Device",
+ "LAN Inteface",
+ "WAN Interface",
+ "Fibre Channel Port",
+ "Fibre Channel Device",
+ "SCSI Device",
+ "ATE Port",
+ "ATE Device",
+ "Floppy Controller",
+ "Floppy Device",
+ "Secondary Bus Port",
+ "Peer Transport Agent",
+ "Peer Transport",
+ "Unknown"
+ };
+
+ switch(class&0xFFF)
+ {
+ case I2O_CLASS_EXECUTIVE:
+ idx = 0; break;
+ case I2O_CLASS_DDM:
+ idx = 1; break;
+ case I2O_CLASS_RANDOM_BLOCK_STORAGE:
+ idx = 2; break;
+ case I2O_CLASS_SEQUENTIAL_STORAGE:
+ idx = 3; break;
+ case I2O_CLASS_LAN:
+ idx = 4; break;
+ case I2O_CLASS_WAN:
+ idx = 5; break;
+ case I2O_CLASS_FIBRE_CHANNEL_PORT:
+ idx = 6; break;
+ case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
+ idx = 7; break;
+ case I2O_CLASS_SCSI_PERIPHERAL:
+ idx = 8; break;
+ case I2O_CLASS_ATE_PORT:
+ idx = 9; break;
+ case I2O_CLASS_ATE_PERIPHERAL:
+ idx = 10; break;
+ case I2O_CLASS_FLOPPY_CONTROLLER:
+ idx = 11; break;
+ case I2O_CLASS_FLOPPY_DEVICE:
+ idx = 12; break;
+ case I2O_CLASS_BUS_ADAPTER_PORT:
+ idx = 13; break;
+ case I2O_CLASS_PEER_TRANSPORT_AGENT:
+ idx = 14; break;
+ case I2O_CLASS_PEER_TRANSPORT:
+ idx = 15; break;
+ }
+
+ return i2o_class_name[idx];
+}
+
+
+/*
+ * Wait up to 5 seconds for a message slot to be available.
+ */
+
+u32 i2o_wait_message(struct i2o_controller *c, char *why)
+{
+ long time=jiffies;
+ u32 m;
+ while((m=I2O_POST_READ32(c))==0xFFFFFFFF)
+ {
+ if((jiffies-time)>=5*HZ)
+ {
+ printk(KERN_ERR "%s: Timeout waiting for message to send %s.\n",
+ c->name, why);
+ return 0xFFFFFFFF;
+ }
+ schedule();
+ barrier();
+ }
+ return m;
+}
+
+
+/*
+ * Wait up to 5 seconds for a reply to be available.
+ */
+
+u32 i2o_wait_reply(struct i2o_controller *c, char *why, int timeout)
+{
+ u32 m;
+ long time=jiffies;
+
+ while((m=I2O_REPLY_READ32(c))==0xFFFFFFFF)
+ {
+ if(jiffies-time >= timeout*HZ )
+ {
+ printk(KERN_ERR "%s: timeout waiting for %s reply.\n",
+ c->name, why);
+ return 0xFFFFFFFF;
+ }
+ schedule();
+ }
+ return m;
+}
+
+
+
+/* Quiesce and clear IOP */
+int i2o_quiesce_controller(struct i2o_controller *c)
+{
+ u32 m;
+ u32 *msg;
+
+ /* now we stop receiving messages to this IOP */
+ m=i2o_wait_message(c, "Quiesce IOP");
+ if(m==0xFFFFFFFF)
+ return -ETIMEDOUT;
+
+ msg=(u32 *)(c->mem_offset+m);
+
+ msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1]=I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
+ msg[2]=0;
+ msg[3]=0;
+
+ printk(KERN_DEBUG "Sending SysQuiesce to %s\n", c->name);
+ i2o_post_message(c,m);
+
+ m=i2o_wait_reply(c, "System Quiesce", 20);
+
+ if (m==0xFFFFFFFF)
+ return -ETIMEDOUT;
+ /* Someday we should check return status... */
+
+ return 0;
+}
+
+int i2o_clear_controller(struct i2o_controller *c)
+{
+ u32 m;
+ u32 *msg;
+
+ m=i2o_wait_message(c, "IOP Clear");
+ if (m==0xFFFFFFFF)
+ return -ETIMEDOUT;
+
+ msg=(u32 *)(c->mem_offset+m);
+
+ msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1]=I2O_CMD_ADAPTER_CLEAR<<24|HOST_TID<<12|ADAPTER_TID;
+ msg[2]=0;
+ msg[3]=0;
+
+ printk(KERN_DEBUG "Sending IOPClear to %s\n", c->name);
+ i2o_post_message(c, m);
+
+ m=i2o_wait_reply(c, "IOP Clear timeout", 5);
+
+ if(m==0xFFFFFFFF)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+
+/*
+ * i2o table walking. We just provide a single element retrieve. You can
+ * all sorts of fancy lookups in I2O but we have no performance critical
+ * lookups so why write all the code for it.
+ */
+
+#if 0
+static int i2o_query_table_polled(struct i2o_controller *c, int tid, void *buf, int buflen,
+ int group, int field, u32 *key, int keylen)
+{
+ u32 m;
+ u32 *msg;
+ u16 op[64];
+ u32 *p;
+ int i;
+ u32 *rbuf;
+
+ op[0]=1; /* One Operation */
+ op[1]=0; /* PAD */
+ op[2]=2; /* LIST_GET */
+ op[3]=group; /* group number */
+ op[4]=1; /* 1 field */
+ op[5]=field; /* Field number */
+ op[6]=1; /* Key count */
+ memcpy(op+7, key, keylen); /* Key */
+
+ m=i2o_wait_message(c, "I2O query table.");
+ if(m==0xFFFFFFFF)
+ {
+ return -ETIMEDOUT;
+ }
+
+ msg=(u32 *)(c->mem_offset+m);
+
+ rbuf=kmalloc(buflen+32, GFP_KERNEL);
+ if(rbuf==NULL)
+ {
+ printk(KERN_ERR "No free memory for table read.\n");
+ return -ENOMEM;
+ }
+ msg[0]=NINE_WORD_MSG_SIZE|SGL_OFFSET_5;
+ msg[1]=I2O_CMD_UTIL_PARAMS_GET<<24|HOST_TID<<12|tid;
+ msg[2]=0; /* Context */
+ msg[3]=0;
+ msg[4]=0;
+ msg[5]=0x54000000|(14);
+ msg[6]=virt_to_bus(op);
+ msg[7]=0xD0000000|(32+buflen);
+ msg[8]=virt_to_bus(rbuf);
+
+ i2o_post_message(c,m);
+ barrier();
+
+ /*
+ * Now wait for a reply
+ */
+
+
+ m=i2o_wait_reply(c, "Table read timeout", 5);
+
+ if(m==0xFFFFFFFF)
+ {
+ kfree(rbuf);
+ return -ETIMEDOUT;
+ }
+
+ msg = (u32 *)bus_to_virt(m);
+
+ if(msg[4]>>24)
+ {
+ i2o_report_status(KERN_WARNING, "i2o_core",
+ (msg[1]>>24)&0xFF, (msg[4]>>24)&0xFF,
+ msg[4]&0xFFFF);
+ }
+
+ p=rbuf;
+
+ /* Ok 'p' is the reply block - lets see what happened */
+ /* p0->p2 are the header */
+
+ /* FIXME: endians - turn p3 to little endian */
+
+ i=(p[0]&0xFFFF)<<2; /* Message size */
+ if(i<buflen)
+ buflen=i;
+
+ /* Do we have an error block ? */
+ if(p[0]&0xFF000000)
+ {
+ printk(KERN_ERR "%s: error in field read.\n",
+ c->name);
+ kfree(rbuf);
+ return -EBADR;
+ }
+
+ /* p[1] holds the more flag and row count - we dont care */
+
+ /* Ok it worked p[2]-> hold the data */
+ memcpy(buf, p+2, buflen);
+
+ kfree(rbuf);
+
+ /* Finally return the message */
+ I2O_REPLY_WRITE32(c,m);
+ return buflen;
+}
+#endif
+
+static int i2o_query_scalar_polled(struct i2o_controller *c, int tid, void *buf, int buflen,
+ int group, int field)
+{
+ u32 m;
+ u32 *msg;
+ u16 op[8];
+ u32 *p;
+ int i;
+ u32 *rbuf;
+
+ op[0]=1; /* One Operation */
+ op[1]=0; /* PAD */
+ op[2]=1; /* FIELD_GET */
+ op[3]=group; /* group number */
+ op[4]=1; /* 1 field */
+ op[5]=field; /* Field number */
+
+ m=i2o_wait_message(c, "I2O query scalar.");
+ if(m==0xFFFFFFFF)
+ {
+ return -ETIMEDOUT;
+ }
+
+ msg=(u32 *)(c->mem_offset+m);
+
+ rbuf=kmalloc(buflen+32, GFP_KERNEL);
+ if(rbuf==NULL)
+ {
+ printk(KERN_ERR "No free memory for scalar read.\n");
+ return -ENOMEM;
+ }
+
+ msg[0]=NINE_WORD_MSG_SIZE|SGL_OFFSET_5;
+ msg[1]=I2O_CMD_UTIL_PARAMS_GET<<24|HOST_TID<<12|tid;
+ msg[2]=0; /* Context */
+ msg[3]=0;
+ msg[4]=0;
+ msg[5]=0x54000000|12;
+ msg[6]=virt_to_bus(op);
+ msg[7]=0xD0000000|(32+buflen);
+ msg[8]=virt_to_bus(rbuf);
+
+ i2o_post_message(c,m);
+ barrier();
+
+ /*
+ * Now wait for a reply
+ */
+
+
+ m=i2o_wait_reply(c, "Scalar read timeout", 5);
+
+ if(m==0xFFFFFFFF)
+ {
+ kfree(rbuf);
+ return -ETIMEDOUT;
+ }
+
+ msg = (u32 *)bus_to_virt(m);
+ if(msg[4]>>24)
+ {
+ i2o_report_status(KERN_WARNING, "i2o_core",
+ (msg[1]>>24)&0xFF, (msg[4]>>24)&0xFF,
+ msg[4]&0xFFFF);
+ }
+
+ p=rbuf;
+
+ /* Ok 'p' is the reply block - lets see what happened */
+ /* p0->p2 are the header */
+
+ /* FIXME: endians - turn p3 to little endian */
+
+ if((p[0]&0xFFFF)!=1)
+ printk(KERN_WARNING "Suspicious field read return 0x%08X\n", p[0]);
+
+ i=(p[1]&0xFFFF)<<2; /* Message size */
+ if(i<buflen)
+ buflen=i;
+
+ /* Do we have an error block ? */
+ if(p[1]&0xFF000000)
+ {
+ printk(KERN_ERR "%s: error in field read.\n",
+ c->name);
+ kfree(rbuf);
+ return -EBADR;
+ }
+
+ /* p[1] holds the more flag and row count - we dont care */
+
+ /* Ok it worked p[2]-> hold the data */
+ memcpy(buf, p+2, buflen);
+
+ kfree(rbuf);
+
+ /* Finally return the message */
+ I2O_REPLY_WRITE32(c,m);
+ return buflen;
+}
+
+/*
+ * Dump the information block associated with a given unit (TID)
+ */
+
+void i2o_report_controller_unit(struct i2o_controller *c, int unit)
+{
+ char buf[64];
+
+ if(i2o_query_scalar_polled(c, unit, buf, 16, 0xF100, 3)>=0)
+ {
+ buf[16]=0;
+ printk(KERN_INFO " Vendor: %s\n", buf);
+ }
+ if(i2o_query_scalar_polled(c, unit, buf, 16, 0xF100, 4)>=0)
+ {
+ buf[16]=0;
+ printk(KERN_INFO " Device: %s\n", buf);
+ }
+#if 0
+ if(i2o_query_scalar_polled(c, unit, buf, 16, 0xF100, 5)>=0)
+ {
+ buf[16]=0;
+ printk(KERN_INFO "Description: %s\n", buf);
+ }
+#endif
+ if(i2o_query_scalar_polled(c, unit, buf, 8, 0xF100, 6)>=0)
+ {
+ buf[8]=0;
+ printk(KERN_INFO " Rev: %s\n", buf);
+ }
+}
+
+
+/*
+ * Parse the hardware resource table. Right now we print it out
+ * and don't do a lot with it. We should collate these and then
+ * interact with the Linux resource allocation block.
+ *
+ * Lets prove we can read it first eh ?
+ *
+ * This is full of endianisms!
+ */
+
+static int i2o_parse_hrt(struct i2o_controller *c, u8 *p)
+{
+ u32 *rows=(u32 *)p;
+ u8 *d;
+ int count;
+ int length;
+ int i;
+ int state;
+
+ if(p[3]!=0)
+ {
+ printk(KERN_ERR "i2o: HRT table for controller is too new a version.\n");
+ return -1;
+ }
+
+ count=p[0]|(p[1]<<8);
+ length = p[2];
+
+ printk(KERN_INFO "HRT has %d entries of %d bytes each.\n",
+ count, length<<2);
+
+ rows+=2;
+
+ for(i=0;i<count;i++)
+ {
+ printk(KERN_INFO "Adapter %08X: ", rows[0]);
+ p=(u8 *)(rows+1);
+ d=(u8 *)(rows+2);
+ state=p[1]<<8|p[0];
+
+ printk("TID %04X:[", state&0xFFF);
+ state>>=12;
+ if(state&(1<<0))
+ printk("H"); /* Hidden */
+ if(state&(1<<2))
+ {
+ printk("P"); /* Present */
+ if(state&(1<<1))
+ printk("C"); /* Controlled */
+ }
+ if(state>9)
+ printk("*"); /* Hard */
+
+ printk("]:");
+
+ switch(p[3]&0xFFFF)
+ {
+ case 0:
+ /* Adapter private bus - easy */
+ printk("Local bus %d: I/O at 0x%04X Mem 0x%08X",
+ p[2], d[1]<<8|d[0], *(u32 *)(d+4));
+ break;
+ case 1:
+ /* ISA bus */
+ printk("ISA %d: CSN %d I/O at 0x%04X Mem 0x%08X",
+ p[2], d[2], d[1]<<8|d[0], *(u32 *)(d+4));
+ break;
+
+ case 2: /* EISA bus */
+ printk("EISA %d: Slot %d I/O at 0x%04X Mem 0x%08X",
+ p[2], d[3], d[1]<<8|d[0], *(u32 *)(d+4));
+ break;
+
+ case 3: /* MCA bus */
+ printk("MCA %d: Slot %d I/O at 0x%04X Mem 0x%08X",
+ p[2], d[3], d[1]<<8|d[0], *(u32 *)(d+4));
+ break;
+
+ case 4: /* PCI bus */
+ printk("PCI %d: Bus %d Device %d Function %d",
+ p[2], d[2], d[1], d[0]);
+ break;
+
+ case 0x80: /* Other */
+ default:
+ printk("Unsupported bus type.");
+ break;
+ }
+ printk("\n");
+ rows+=length;
+ }
+ return 0;
+}
+
+/*
+ * The logical configuration table tells us what we can talk to
+ * on the board. Most of the stuff isn't interesting to us.
+ */
+
+static int i2o_parse_lct(struct i2o_controller *c, u32 *lct)
+{
+ int i;
+ int max;
+ int tid;
+ u32 *p;
+ struct i2o_device *d;
+ char str[22];
+
+ max=lct[0]&0xFFFF;
+
+ max-=3;
+ max/=9;
+
+ printk(KERN_INFO "LCT has %d entries.\n", max);
+
+ if(max > 128)
+ {
+ printk(KERN_INFO "LCT was truncated.\n");
+ max=128;
+ }
+
+ if(lct[1]&(1<<0))
+ printk(KERN_WARNING "Configuration dialog desired.\n");
+
+ p=lct+3;
+
+ for(i=0;i<max;i++)
+ {
+ d = (struct i2o_device *)kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
+ if(d==NULL)
+ {
+ printk("i2o_core: Out of memory for LCT data.\n");
+ return -ENOMEM;
+ }
+
+ d->controller = c;
+ d->next = NULL;
+
+ d->id = tid = (p[0]>>16)&0xFFF;
+ d->class = p[3]&0xFFF;
+ d->subclass = p[4]&0xFFF;
+ d->parent = (p[5]>>12)&0xFFF;
+ d->flags = 0;
+
+ printk(KERN_INFO "TID %d.\n", tid);
+
+ i2o_report_controller_unit(c, tid);
+
+ i2o_install_device(c, d);
+
+ printk(KERN_INFO " Class: ");
+
+ sprintf(str, "%-21s", i2o_get_class_name(d->class));
+ printk("%s", str);
+
+ printk(" Subclass: 0x%03X Flags: ",
+ d->subclass);
+
+ if(p[2]&(1<<0))
+ printk("C"); // ConfigDialog requested
+ if(p[2]&(1<<1))
+ printk("M"); // Multi-user capable
+ if(!(p[2]&(1<<4)))
+ printk("P"); // Peer service enabled!
+ if(!(p[2]&(1<<5)))
+ printk("m"); // Mgmt service enabled!
+ printk("\n");
+ p+=9;
+ }
+ return 0;
+}
+
+#if 0
+/* Reset the IOP to sane state */
+/* I think we need handler for core (or executive class in I2O terms) */
+static int i2o_reset_adapter(struct i2o_controller *c)
+{
+ u32 m;
+ u8 *work8;
+ u32 *msg;
+ long time;
+
+ /* First stop extral operations */
+ m=i2o_wait_message(c, "quiesce IOP");
+ if(m==0xFFFFFFFF)
+ return -ETIMEDOUT;
+
+ msg=(u32 *)(c->mem_offset+m);
+
+ msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1]=I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
+ msg[2]=0;
+ msg[3]=0;
+
+ i2o_post_message(c,m);
+
+ m=i2o_wait_reply(c, "System Quiesce timeout", 5);
+
+ if(m==0xFFFFFFFF)
+ return -ETIMEDOUT;
+
+ /* Then reset the IOP */
+ m=i2o_wait_message(c, "reset IOP");
+ if(m==0xFFFFFFFF)
+ return -ETIMEDOUT;
+
+ msg=(u32 *)(c->mem_offset+m);
+
+ work8=(void *)kmalloc(4, GFP_KERNEL);
+ if(work8==NULL) {
+ printk(KERN_ERR "IOP reset failed - no free memory.\n");
+ return -ENOMEM;
+ }
+
+ memset(work8, 0, 4);
+
+ msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
+ msg[2]=0;
+ msg[3]=0;
+ msg[4]=0;
+ msg[5]=0;
+ msg[6]=virt_to_phys(work8);
+ msg[7]=0; /* 64bit host FIXME */
+
+ i2o_post_message(c,m);
+
+ /* Wait for a reply */
+ time=jiffies;
+
+ while(work8[0]==0x01) {
+ if((jiffies-time)>=5*HZ) {
+ printk(KERN_ERR "IOP reset timeout.\n");
+ kfree(work8);
+ return -ETIMEDOUT;
+ }
+ schedule();
+ barrier();
+ }
+
+ if (work8[0]==0x02)
+ printk(KERN_WARNING "IOP Reset rejected\n");
+
+ return 0;
+}
+#endif
+
+/*
+ * Bring an I2O controller into HOLD state. See the 1.5
+ * spec. Basically we go
+ *
+ * Wait for the message queue to initialise.
+ * If it didnt -> controller is dead
+ *
+ * Send a get status using the message queue
+ * Poll for a reply block 88 bytes long
+ *
+ * Send an initialise outbound queue
+ * Poll for a reply
+ *
+ * Post our blank messages to the queue FIFO
+ *
+ * Send GetHRT, Parse it
+ */
+
+int i2o_activate_controller(struct i2o_controller *c)
+{
+ long time;
+ u32 m;
+ u8 *workspace;
+ u32 *msg;
+ int i;
+
+ printk(KERN_INFO "Configuring I2O controller at 0x%08X.\n", (u32)c->mem_phys);
+
+ /* First reset the IOP to sane state */
+// i2o_reset_adapter(c)
+
+ m=i2o_wait_message(c, "initialise");
+ if(m==0xFFFFFFFF)
+ return -ETIMEDOUT;
+
+ msg=(u32 *)(c->mem_offset+m);
+
+ workspace = (void *)kmalloc(88, GFP_KERNEL);
+ if(workspace==NULL)
+ {
+ printk(KERN_ERR "IOP initialisation failed - no free memory.\n");
+ return -ENOMEM;
+ }
+
+ memset(workspace, 0, 88);
+
+ msg[0]=NINE_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1]=I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID;
+ msg[2]=0;
+ msg[3]=0;
+ msg[4]=0;
+ msg[5]=0;
+ msg[6]=virt_to_phys(workspace);
+ msg[7]=0; /* 64bit host FIXME */
+ msg[8]=88;
+
+ i2o_post_message(c,m);
+
+ /*
+ * Wait for a reply
+ */
+
+ time=jiffies;
+
+ while(workspace[87]!=0xFF)
+ {
+ if((jiffies-time)>=5*HZ)
+ {
+ printk(KERN_ERR "IOP get status timeout.\n");
+ kfree(workspace);
+ return -ETIMEDOUT;
+ }
+ schedule();
+ barrier();
+ }
+
+ /*
+ * Ok the reply has arrived. Fill in the important stuff
+ */
+
+ c->status = workspace[10];
+ c->i2oversion = (workspace[9]>>4)&0xFF;
+ c->inbound_size = (workspace[12]|(workspace[13]<<8))*4; /* 32bit words */
+
+ /*
+ * If the board is running, reset it - we have no idea
+ * what kind of a mess the previous owner left it in.
+ */
+
+// if(c->status == ADAPTER_STATE_OPERATIONAL)
+// i2o_reset_device(c);
+
+
+ m=i2o_wait_message(c, "initqueue");
+ if(m==0xFFFFFFFF)
+ {
+ kfree(workspace);
+ return -ETIMEDOUT;
+ }
+
+ msg=(u32 *)(c->mem_offset+m);
+
+ msg[0]= EIGHT_WORD_MSG_SIZE| TRL_OFFSET_6;
+ msg[1]= I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID;
+ msg[2]= 0;
+ msg[3]= 0x0106; /* Transaction context */
+ msg[4]= 4096; /* Host page frame size */
+ msg[5]= MSG_FRAME_SIZE<<16|0x80; /* Outbound msg frame size and Initcode */
+ msg[6]= 0xD0000004; /* Simple SG LE, EOB */
+ msg[7]= virt_to_phys(workspace);
+ *((u32 *)workspace)=0;
+
+ /*
+ * Post it
+ */
+
+ i2o_post_message(c,m);
+
+ barrier();
+
+ time=jiffies;
+
+ while(workspace[0]!=I2O_CMD_OUTBOUND_INIT_COMPLETE)
+ {
+ if((jiffies-time)>=5*HZ)
+ {
+ printk(KERN_ERR "IOP outbound initialise failed.\n");
+ kfree(workspace);
+ return -ETIMEDOUT;
+ }
+ schedule();
+ barrier();
+ }
+
+ kfree(workspace);
+
+ c->page_frame = kmalloc(MSG_POOL_SIZE, GFP_KERNEL);
+ if(c->page_frame==NULL)
+ {
+ printk(KERN_ERR "IOP init failed: no memory for message page.\n");
+ return -ENOMEM;
+ }
+
+ m=virt_to_phys(c->page_frame);
+
+ for(i=0; i< NMBR_MSG_FRAMES; i++)
+ {
+ I2O_REPLY_WRITE32(c,m);
+ mb();
+ m+=MSG_FRAME_SIZE;
+ }
+
+ /*
+ * The outbound queue is initialised and loaded,
+ *
+ * Now we need the Hardware Resource Table. We must ask for
+ * this next we can't issue random messages yet.
+ */
+
+
+ workspace=kmalloc(2048, GFP_KERNEL);
+ if(workspace==NULL)
+ {
+ printk(KERN_ERR "IOP init failed; no memory.\n");
+ return -ENOMEM;
+ }
+
+ m=i2o_wait_message(c, "I2O HRT timeout.");
+ if(m==0xFFFFFFFF)
+ {
+ kfree(workspace);
+ return -ETIMEDOUT;
+ }
+
+ msg=(u32 *)(c->mem_offset+m);
+
+ msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
+ msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
+ msg[2]= 0x0;
+ msg[3]= 0x0; /* Transaction context */
+ msg[4]= (0xD0000000 | 2048); /* Simple transaction , 2K */
+ msg[5]= virt_to_phys(workspace); /* Dump it here */
+ *((u32 *)workspace)=0xFFFFFFFF;
+
+ i2o_post_message(c,m);
+
+ barrier();
+
+ /*
+ * Now wait for a reply
+ */
+
+ m=i2o_wait_reply(c, "HRT table", 5);
+
+ if(m==0xFFFFFFFF)
+ {
+ kfree(workspace);
+ return -ETIMEDOUT;
+ }
+
+ msg=(u32 *)bus_to_virt(m);
+
+ if(msg[4]>>24)
+ {
+ i2o_report_status(KERN_WARNING, "i2o_core",
+ (msg[1]>>24)&0xFF, (msg[4]>>24)&0xFF,
+ msg[4]&0xFFFF);
+ }
+ I2O_REPLY_WRITE32(c,m);
+
+ i2o_parse_hrt(c, workspace);
+
+ kfree(workspace);
+
+ return i2o_online_controller(c);
+// i2o_report_controller_unit(c, ADAPTER_TID);
+}
+
+
+/*
+ * Bring a controller online. Needs completing for multiple controllers
+ */
+
+int i2o_online_controller(struct i2o_controller *c)
+{
+ u32 m;
+ u32 *msg;
+ u32 systab[32];
+ u32 privmem[2];
+ u32 privio[2];
+ u32 *workspace;
+
+ systab[0]=1;
+ systab[1]=0;
+ systab[2]=0;
+ systab[3]=0;
+ systab[4]=0; /* Organisation ID */
+ systab[5]=2; /* Ident 2 for now */
+ systab[6]=0<<24|0<<16|I2OVERSION<<12|1; /* Memory mapped, IOPState, v1.5, segment 1 */
+ systab[7]=MSG_FRAME_SIZE>>2; /* Message size */
+ systab[8]=0; /* LastChanged */
+ systab[9]=0; /* Should be IOP capabilities */
+ systab[10]=virt_to_phys(c->post_port);
+ systab[11]=0;
+
+ privmem[0]=c->priv_mem; /* Private memory space base address */
+ privmem[1]=c->priv_mem_size;
+ privio[0]=c->priv_io; /* Private I/O address */
+ privio[1]=c->priv_io_size;
+
+ m=i2o_wait_message(c, "SetSysTab");
+ if(m==0xFFFFFFFF)
+ return -ETIMEDOUT;
+
+ /* Now we build the systab */
+ msg=(u32 *)(c->mem_offset+m);
+
+ msg[0] = NINE_WORD_MSG_SIZE|SGL_OFFSET_6;
+ msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
+ msg[2] = 0; /* Context not needed */
+ msg[3] = 0;
+ msg[4] = (1<<16)|(2<<12); /* Host 1 I2O 2 */
+ msg[5] = 1; /* Segment 1 */
+
+ /*
+ * Scatter Gather List
+ */
+
+ msg[6] = 0x54000000|48; /* One table for now */
+ msg[7] = virt_to_phys(systab);
+ msg[8] = 0xD4000000|48; /* One table for now */
+ msg[9] = virt_to_phys(privmem);
+/* msg[10] = virt_to_phys(privio); */
+
+ i2o_post_message(c,m);
+
+ barrier();
+
+ /*
+ * Now wait for a reply
+ */
+
+
+ m=i2o_wait_reply(c, "Systab read", 5);
+
+ if(m==0xFFFFFFFF)
+ return -ETIMEDOUT;
+
+ msg=(u32 *)bus_to_virt(m);
+
+ if(msg[4]>>24)
+ {
+ i2o_report_status(KERN_ERR, "i2o_core",
+ (msg[1]>>24)&0xFF, (msg[4]>>24)&0xFF,
+ msg[4]&0xFFFF);
+ }
+ I2O_REPLY_WRITE32(c,m);
+
+ /*
+ * Finally we go online
+ */
+
+ m=i2o_wait_message(c, "No message for SysEnable");
+
+ if(m==0xFFFFFFFF)
+ return -ETIMEDOUT;
+
+ msg=(u32 *)(c->mem_offset+m);
+
+ msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1] = I2O_CMD_SYS_ENABLE<<24 | HOST_TID<<12 | ADAPTER_TID;
+ msg[2] = 0; /* Context not needed */
+ msg[3] = 0;
+
+ i2o_post_message(c,m);
+
+ barrier();
+
+ /*
+ * Now wait for a reply
+ */
+
+
+ m=i2o_wait_reply(c, "Enable", 240);
+
+ if(m==0xFFFFFFFF)
+ return -ETIMEDOUT;
+
+ msg=(u32 *)bus_to_virt(m);
+
+ if(msg[4]>>24)
+ {
+ i2o_report_status(KERN_ERR, "i2o_core",
+ (msg[1]>>24)&0xFF, (msg[4]>>24)&0xFF,
+ msg[4]&0xFFFF);
+ }
+ I2O_REPLY_WRITE32(c,m);
+
+ /*
+ * Grab the LCT, see what is attached
+ */
+
+ m=i2o_wait_message(c, "No message for LCT");
+
+ if(m==0xFFFFFFFF)
+ return -ETIMEDOUT;
+
+ msg=(u32 *)(c->mem_offset+m);
+
+
+ workspace = kmalloc(8192, GFP_KERNEL);
+ if(workspace==NULL)
+ {
+ msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1]= HOST_TID<<12|ADAPTER_TID; /* NOP */
+ i2o_post_message(c,m);
+ printk(KERN_ERR "No free memory for i2o controller buffer.\n");
+ return -ENOMEM;
+ }
+
+ memset(workspace, 0, 8192);
+
+ msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_6;
+ msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
+ msg[2] = 0; /* Context not needed */
+ msg[3] = 0;
+ msg[4] = 0xFFFFFFFF; /* All devices */
+ msg[5] = 0x00000000; /* Report now */
+ msg[6] = 0xD0000000|8192;
+ msg[7] = virt_to_bus(workspace);
+
+ i2o_post_message(c,m);
+
+ barrier();
+
+ /*
+ * Now wait for a reply
+ */
+
+ m=i2o_wait_reply(c, "LCT", 5);
+
+ if(m==0xFFFFFFFF)
+ {
+ kfree(workspace);
+ return -ETIMEDOUT;
+ }
+
+ msg=(u32 *)bus_to_virt(m);
+
+ if(msg[4]>>24)
+ {
+ i2o_report_status(KERN_ERR, "i2o_core",
+ (msg[1]>>24)&0xFF, (msg[4]>>24)&0xFF,
+ msg[4]&0xFFFF);
+ }
+
+ i2o_parse_lct(c, workspace);
+ kfree(workspace);
+
+ I2O_REPLY_WRITE32(c,m);
+
+ return 0;
+}
+
+/*
+ * Run time support routines
+ */
+
+/*
+ * Generic "post and forget" helpers. This is less efficient - we do
+ * a memcpy for example that isnt strictly needed, but for most uses
+ * this is simply not worth optimising
+ */
+
+int i2o_post_this(struct i2o_controller *c, int tid, u32 *data, int len)
+{
+ u32 m;
+ u32 *msg;
+ unsigned long t=jiffies;
+
+ do
+ {
+ mb();
+ m = I2O_POST_READ32(c);
+ }
+ while(m==0xFFFFFFFF && (jiffies-t)<HZ);
+
+
+ if(m==0xFFFFFFFF)
+ {
+ printk(KERN_ERR "i2o: controller not responding.\n");
+ return -1;
+ }
+ msg = bus_to_virt(c->mem_offset + m);
+ memcpy(msg, data, len);
+ i2o_post_message(c,m);
+ return 0;
+}
+
+/*
+ * Post a message and wait for a response flag to be set. This API will
+ * change to use wait_queue's one day
+ */
+
+int i2o_post_wait(struct i2o_controller *c, int tid, u32 *data, int len, int *flag, int timeout)
+{
+ unsigned long t=jiffies;
+
+ *flag = 0;
+
+ if(i2o_post_this(c, tid, data, len))
+ return -1;
+
+ while(!*flag && (jiffies-t)<timeout*HZ)
+ {
+ schedule();
+ mb();
+ }
+ if(*flag <= 0)
+ return -1;
+ return 0;
+}
+
+/*
+ * Issue UTIL_CLAIM messages
+ */
+
+int i2o_issue_claim(struct i2o_controller *c, int tid, int context, int onoff, int *flag)
+{
+ u32 msg[6];
+
+ msg[0] = FIVE_WORD_MSG_SIZE | SGL_OFFSET_0;
+ if(onoff)
+ msg[1] = I2O_CMD_UTIL_CLAIM << 24 | HOST_TID<<12 | tid;
+ else
+ msg[1] = I2O_CMD_UTIL_RELEASE << 24 | HOST_TID << 12 | tid;
+
+ /* The 0x80000000 convention for flagging is assumed by this helper */
+
+ msg[2] = 0x80000000|context;
+ msg[3] = (u32)flag;
+ msg[4] = 0x01<<24; /* Primary user */
+
+ return i2o_post_wait(c, tid, msg, 20, flag,2);
+}
+
+/*
+ * Query a scalar value
+ */
+
+int i2o_query_scalar(struct i2o_controller *c, int tid, int context,
+ int group, int field, void *buf, int buflen, int *flag)
+{
+ u16 *op;
+ u32 *bl;
+ u32 msg[9];
+
+ bl=kmalloc(buflen+64, GFP_KERNEL); /* Enough space for error replys */
+ if(bl==NULL)
+ {
+ printk(KERN_ERR "i2o: no memory for query buffer.\n");
+ return -ENOMEM;
+ }
+
+ op = (u16*)bl;
+ op[0]=1; /* One Operation */
+ op[1]=0; /* PAD */
+ op[2]=1; /* FIELD_GET */
+ op[3]=group; /* group number */
+ op[4]=1; /* field count, default = 1 */
+ op[5]=field; /* field index */
+
+ if(field == -1)
+ /* Single value or the whole group? */
+ {
+ op[4]=-1;
+ op[5]=0;
+ }
+
+ msg[0]=NINE_WORD_MSG_SIZE|SGL_OFFSET_5;
+ msg[1]=I2O_CMD_UTIL_PARAMS_GET<<24|HOST_TID<<12|tid;
+ msg[2]=context|0x80000000; /* So we can pick it out */
+ msg[3]=(u32)flag;
+ msg[4]=0;
+ msg[5]=0x54000000|12;
+ msg[6]=virt_to_bus(bl);
+ /*
+ * There are 8 bytes of "overhead" required to pull in
+ * a Params ResultsList; 2 bytes for ResultCount
+ * (which should have value=1), plus 2 bytes for pad,
+ * plus 2 bytes for BlockSize, plus 1 byte BlockStatus,
+ * plus 1 byte ErrorInfoSize (8 bytes total overhead).
+ * This is followed finally by actual result value(s).
+ *
+ * Tell the IOP to return 8 + buflen bytes.
+ */
+ msg[7]=0xD0000000|(8+buflen);
+ msg[8]=virt_to_bus(bl+3);
+
+ bl[3]=0xFCFCFCFC; // Pad,ResultCount
+ bl[4]=0xFAFAFCFC; // ErrorInfoSize,BlockStatus,BlockSize
+
+ /*
+ * Post the message and await a reply
+ */
+
+ if (i2o_post_wait(c, tid, msg, sizeof(msg), flag,2) < 0)
+ {
+ kfree(bl);
+ return -1;
+ }
+
+ if(bl[4]&0x00FF00000) /* BlockStatus != SUCCESS */
+ {
+ printk(KERN_WARNING "i2o_query_scalar - Error\n"
+ "ErrorInfoSize = 0x%02x, BlockStatus = 0x%02x, "
+ "BlockSize = 0x%04x\n",
+ bl[4]>>24, (bl[4]>>16)&0xFF, bl[4]&0xFFFF);
+ kfree(bl);
+ return -1;
+ }
+ if((bl[3] & 0xFFFF) != 1)
+ {
+ printk(KERN_ERR "i2o: query ResultCount = 0x%04x\n", bl[3]&0xFFFF);
+ }
+
+ memcpy(buf, bl+5, buflen);
+ kfree(bl);
+ return 0;
+}
+
+
+#if 0
+/*
+ * Query a table field
+ * FIXME: NOT TESTED!
+ */
+int i2o_query_table(struct i2o_controller *c, int tid, int context,
+ void *buf, int buflen,
+ int table,
+ int *field, int fieldlen,
+ u32 *key, int keylen,
+ int *flag)
+{
+ static u16 op[32];
+ u32 *bl;
+ u32 msg[9];
+ int i;
+
+ bl=kmalloc(buflen+64, GFP_KERNEL);
+ if(bl==NULL)
+ {
+ printk(KERN_ERR "i2o: no memory for query buffer.\n");
+ return -ENOMEM;
+ }
+
+ op[0]=1; /* Operation count */
+ op[1]=0; /* Reserved */
+ op[2]=I2O_PARAMS_LIST_GET; /* Operation */
+ op[3]=table; /* Group */
+ /* Specific fields or the whole group? */
+ if(*field != -1)
+ { /* FIXME: Fields can be variable size */
+ op[4]=fieldlen;
+ for (i=0; i < fieldlen; i++)
+ op[4+i]=field[i];
+ }
+ else
+ {
+ op[4]=-1;
+ op[5]=0;
+ }
+
+ memcpy(bl, op, 12);
+
+ msg[0]=NINE_WORD_MSG_SIZE|SGL_OFFSET_5;
+ msg[1]=I2O_CMD_UTIL_PARAMS_GET<<24|HOST_TID<<12|tid;
+ msg[2]=context|0x80000000; /* So we can pick it out */
+ msg[3]=(u32)flag;
+ msg[4]=0;
+ msg[5]=0x54000000|12;
+ msg[6]=virt_to_bus(bl);
+
+ msg[7]=0xD0000000|(buflen+48);
+ msg[8]=virt_to_bus(bl+4);
+
+ /*
+ * Post the message and await a reply
+ */
+
+ if(i2o_post_wait(c, tid, msg, sizeof(msg), flag,2)<0)
+ return -1;
+
+ if(bl[5]&0x00FF00000) /* BlockStatus != SUCCESS */
+ {
+ printk(KERN_WARNING "i2o_query_table - Error\n"
+ "ErrorInfoSize = 0x%02x, BlockStatus = 0x%02x, "
+ "BlockSize = 0x%04x\n",
+ bl[5]>>24, (bl[5]>>16)&0xFF, bl[5]&0xFFFF);
+ kfree(bl);
+ return -1;
+ }
+
+ if((bl[4]&0xFFFF)!=1)
+ printk(KERN_ERR "i2o: query ResultCount = %0#4x\n",
+ bl[4]&0xFFFF);
+
+ memcpy(buf, bl+6, buflen);
+ kfree(bl);
+ return 0;
+}
+#endif
+
+/*
+ * Set (for now) scalar value
+ *
+ * TODO: Add support for table groups
+ */
+
+int i2o_params_set(struct i2o_controller *c, int tid, int context, int table,
+ int field, void *buf, int buflen, int *flag)
+{
+ static u16 opdata[]={1,0,6,0,1,4,0};
+ u32 *bl;
+ u32 msg[9];
+
+ bl=kmalloc(buflen+64, GFP_KERNEL);
+ if(bl==NULL)
+ {
+ printk(KERN_ERR "i2o: no memory for set buffer.\n");
+ return -ENOMEM;
+ }
+
+ opdata[3]=table;
+ /* Single value or the whole group? */
+ if(field != -1) {
+ opdata[4]=1;
+ opdata[5]=field;
+ opdata[6]=*(u16 *)buf;
+ }
+ else {
+ opdata[4]=-1;
+ opdata[5]=0;
+ }
+
+ memcpy(bl, opdata, 14);
+
+ msg[0]=NINE_WORD_MSG_SIZE|SGL_OFFSET_5;
+ msg[1]=I2O_CMD_UTIL_PARAMS_SET<<24|HOST_TID<<12|tid;
+ msg[2]=context|0x80000000; /* So we can pick it out */
+ msg[3]=(u32)flag;
+ msg[4]=0;
+ msg[5]=0x54000000|14;
+ msg[6]=virt_to_bus(bl);
+ msg[7]=0xD0000000|(buflen+48);
+ msg[8]=virt_to_bus(bl+4);
+
+ /* Post the message and wait for a reply */
+ if(i2o_post_wait(c, tid, msg, 36, flag, 5)<0)
+ {
+ kfree(bl);
+ return -1;
+ }
+
+ /* Perhaps we should check errors, eh? */
+ if(bl[5]&0x00FF00000) /* BlockStatus != SUCCESS */
+ {
+ printk(KERN_WARNING "i2o_params_set - Error\n"
+ "ErrorInfoSize = %0#2x, BlockStatus = %0#2x, "
+ "BlockSize = %0#4x\n",
+ bl[5]>>24, (bl[5]>>16)&0xFF, bl[5]&0xFFFF);
+ kfree(bl);
+ return -1;
+ }
+
+ if((bl[4] & 0xFFFF) != 1)
+ {
+ printk(KERN_ERR "i2o: params set ResultCount = %0#4x\n",
+ bl[4]&0xFFFF);
+ }
+
+ kfree(bl);
+ return 0;
+}
+
+
+void report_common_status(u8 req_status)
+{
+ /* the following reply status strings are common to all classes */
+
+ static char *REPLY_STATUS[] = {
+ "SUCCESS",
+ "ABORT_DIRTY",
+ "ABORT_NO_DATA_TRANSFER",
+ "ABORT_PARTIAL_TRANSFER",
+ "ERROR_DIRTY",
+ "ERROR_NO_DATA_TRANSFER",
+ "ERROR_PARTIAL_TRANSFER",
+ "PROCESS_ABORT_DIRTY",
+ "PROCESS_ABORT_NO_DATA_TRANSFER",
+ "PROCESS_ABORT_PARTIAL_TRANSFER",
+ "TRANSACTION_ERROR",
+ "PROGRESS_REPORT"
+ };
+
+ if (req_status > I2O_REPLY_STATUS_PROGRESS_REPORT)
+ printk("%0#4x / ", req_status);
+ else
+ printk("%s / ", REPLY_STATUS[req_status]);
+
+ return;
+}
+
+static void report_common_dsc(u16 detailed_status)
+{
+ /* The following detailed statuscodes are valid
+ - for executive class, utility class, DDM class and
+ - for transaction error replies
+ */
+
+ static char *COMMON_DSC[] = {
+ "SUCCESS",
+ "0x01", // not used
+ "BAD_KEY",
+ "TCL_ERROR",
+ "REPLY_BUFFER_FULL",
+ "NO_SUCH_PAGE",
+ "INSUFFICIENT_RESOURCE_SOFT",
+ "INSUFFICIENT_RESOURCE_HARD",
+ "0x08", // not used
+ "CHAIN_BUFFER_TOO_LARGE",
+ "UNSUPPORTED_FUNCTION",
+ "DEVICE_LOCKED",
+ "DEVICE_RESET",
+ "INAPPROPRIATE_FUNCTION",
+ "INVALID_INITIATOR_ADDRESS",
+ "INVALID_MESSAGE_FLAGS",
+ "INVALID_OFFSET",
+ "INVALID_PARAMETER",
+ "INVALID_REQUEST",
+ "INVALID_TARGET_ADDRESS",
+ "MESSAGE_TOO_LARGE",
+ "MESSAGE_TOO_SMALL",
+ "MISSING_PARAMETER",
+ "TIMEOUT",
+ "UNKNOWN_ERROR",
+ "UNKNOWN_FUNCTION",
+ "UNSUPPORTED_VERSION",
+ "DEVICE_BUSY",
+ "DEVICE_NOT_AVAILABLE"
+ };
+
+ if (detailed_status > I2O_DSC_DEVICE_NOT_AVAILABLE)
+ printk("%0#4x.\n", detailed_status);
+ else
+ printk("%s.\n", COMMON_DSC[detailed_status]);
+
+ return;
+}
+
+void report_lan_dsc(u16 detailed_status)
+{
+ static char *LAN_DSC[] = { // Lan detailed status code strings
+ "SUCCESS",
+ "DEVICE_FAILURE",
+ "DESTINATION_NOT_FOUND",
+ "TRANSMIT_ERROR",
+ "TRANSMIT_ABORTED",
+ "RECEIVE_ERROR",
+ "RECEIVE_ABORTED",
+ "DMA_ERROR",
+ "BAD_PACKET_DETECTED",
+ "OUT_OF_MEMORY",
+ "BUCKET_OVERRUN",
+ "IOP_INTERNAL_ERROR",
+ "CANCELED",
+ "INVALID_TRANSACTION_CONTEXT",
+ "DEST_ADDRESS_DETECTED",
+ "DEST_ADDRESS_OMITTED",
+ "PARTIAL_PACKET_RETURNED",
+ "TEMP_SUSPENDED_STATE"
+ };
+
+ if (detailed_status > I2O_LAN_DSC_TEMP_SUSPENDED_STATE)
+ printk("%0#4x.\n", detailed_status);
+ else
+ printk("%s.\n", LAN_DSC[detailed_status]);
+
+ return;
+}
+
+static void report_util_cmd(u8 cmd)
+{
+ switch (cmd) {
+ case I2O_CMD_UTIL_NOP:
+ printk("UTIL_NOP, ");
+ break;
+ case I2O_CMD_UTIL_ABORT:
+ printk("UTIL_ABORT, ");
+ break;
+ case I2O_CMD_UTIL_CLAIM:
+ printk("UTIL_CLAIM, ");
+ break;
+ case I2O_CMD_UTIL_RELEASE:
+ printk("UTIL_CLAIM_RELEASE, ");
+ break;
+ case I2O_CMD_UTIL_CONFIG_DIALOG:
+ printk("UTIL_CONFIG_DIALOG, ");
+ break;
+ case I2O_CMD_UTIL_DEVICE_RESERVE:
+ printk("UTIL_DEVICE_RESERVE, ");
+ break;
+ case I2O_CMD_UTIL_DEVICE_RELEASE:
+ printk("UTIL_DEVICE_RELEASE, ");
+ break;
+ case I2O_CMD_UTIL_ACK:
+ printk("UTIL_EVENT_ACKNOWLEDGE, ");
+ break;
+ case I2O_CMD_UTIL_EVT_REGISTER:
+ printk("UTIL_EVENT_REGISTER, ");
+ break;
+ case I2O_CMD_UTIL_LOCK:
+ printk("UTIL_LOCK, ");
+ break;
+ case I2O_CMD_UTIL_LOCK_RELEASE:
+ printk("UTIL_LOCK_RELEASE, ");
+ break;
+ case I2O_CMD_UTIL_PARAMS_GET:
+ printk("UTIL_PARAMS_GET, ");
+ break;
+ case I2O_CMD_UTIL_PARAMS_SET:
+ printk("UTIL_PARAMS_SET, ");
+ break;
+ case I2O_CMD_UTIL_REPLY_FAULT_NOTIFY:
+ printk("UTIL_REPLY_FAULT_NOTIFY, ");
+ break;
+ default:
+ printk("%0#2x, ",cmd);
+ }
+
+ return;
+}
+
+
+static void report_exec_cmd(u8 cmd)
+{
+ switch (cmd) {
+ case I2O_CMD_ADAPTER_ASSIGN:
+ printk("EXEC_ADAPTER_ASSIGN, ");
+ break;
+ case I2O_CMD_ADAPTER_READ:
+ printk("EXEC_ADAPTER_READ, ");
+ break;
+ case I2O_CMD_ADAPTER_RELEASE:
+ printk("EXEC_ADAPTER_RELEASE, ");
+ break;
+ case I2O_CMD_BIOS_INFO_SET:
+ printk("EXEC_BIOS_INFO_SET, ");
+ break;
+ case I2O_CMD_BOOT_DEVICE_SET:
+ printk("EXEC_BOOT_DEVICE_SET, ");
+ break;
+ case I2O_CMD_CONFIG_VALIDATE:
+ printk("EXEC_CONFIG_VALIDATE, ");
+ break;
+ case I2O_CMD_CONN_SETUP:
+ printk("EXEC_CONN_SETUP, ");
+ break;
+ case I2O_CMD_DDM_DESTROY:
+ printk("EXEC_DDM_DESTROY, ");
+ break;
+ case I2O_CMD_DDM_ENABLE:
+ printk("EXEC_DDM_ENABLE, ");
+ break;
+ case I2O_CMD_DDM_QUIESCE:
+ printk("EXEC_DDM_QUIESCE, ");
+ break;
+ case I2O_CMD_DDM_RESET:
+ printk("EXEC_DDM_RESET, ");
+ break;
+ case I2O_CMD_DDM_SUSPEND:
+ printk("EXEC_DDM_SUSPEND, ");
+ break;
+ case I2O_CMD_DEVICE_ASSIGN:
+ printk("EXEC_DEVICE_ASSIGN, ");
+ break;
+ case I2O_CMD_DEVICE_RELEASE:
+ printk("EXEC_DEVICE_RELEASE, ");
+ break;
+ case I2O_CMD_HRT_GET:
+ printk("EXEC_HRT_GET, ");
+ break;
+ case I2O_CMD_ADAPTER_CLEAR:
+ printk("EXEC_IOP_CLEAR, ");
+ break;
+ case I2O_CMD_ADAPTER_CONNECT:
+ printk("EXEC_IOP_CONNECT, ");
+ break;
+ case I2O_CMD_ADAPTER_RESET:
+ printk("EXEC_IOP_RESET, ");
+ break;
+ case I2O_CMD_LCT_NOTIFY:
+ printk("EXEC_LCT_NOTIFY, ");
+ break;
+ case I2O_CMD_OUTBOUND_INIT:
+ printk("EXEC_OUTBOUND_INIT, ");
+ break;
+ case I2O_CMD_PATH_ENABLE:
+ printk("EXEC_PATH_ENABLE, ");
+ break;
+ case I2O_CMD_PATH_QUIESCE:
+ printk("EXEC_PATH_QUIESCE, ");
+ break;
+ case I2O_CMD_PATH_RESET:
+ printk("EXEC_PATH_RESET, ");
+ break;
+ case I2O_CMD_STATIC_MF_CREATE:
+ printk("EXEC_STATIC_MF_CREATE, ");
+ break;
+ case I2O_CMD_STATIC_MF_RELEASE:
+ printk("EXEC_STATIC_MF_RELEASE, ");
+ break;
+ case I2O_CMD_STATUS_GET:
+ printk("EXEC_STATUS_GET, ");
+ break;
+ case I2O_CMD_SW_DOWNLOAD:
+ printk("EXEC_SW_DOWNLOAD, ");
+ break;
+ case I2O_CMD_SW_UPLOAD:
+ printk("EXEC_SW_UPLOAD, ");
+ break;
+ case I2O_CMD_SW_REMOVE:
+ printk("EXEC_SW_REMOVE, ");
+ break;
+ case I2O_CMD_SYS_ENABLE:
+ printk("EXEC_SYS_ENABLE, ");
+ break;
+ case I2O_CMD_SYS_MODIFY:
+ printk("EXEC_SYS_MODIFY, ");
+ break;
+ case I2O_CMD_SYS_QUIESCE:
+ printk("EXEC_SYS_QUIESCE, ");
+ break;
+ case I2O_CMD_SYS_TAB_SET:
+ printk("EXEC_SYS_TAB_SET, ");
+ break;
+ default:
+ printk("%02x, ",cmd);
+ }
+
+ return;
+}
+
+static void report_lan_cmd(u8 cmd)
+{
+ switch (cmd) {
+ case LAN_PACKET_SEND:
+ printk("LAN_PACKET_SEND, ");
+ break;
+ case LAN_SDU_SEND:
+ printk("LAN_SDU_SEND, ");
+ break;
+ case LAN_RECEIVE_POST:
+ printk("LAN_RECEIVE_POST, ");
+ break;
+ case LAN_RESET:
+ printk("LAN_RESET, ");
+ break;
+ case LAN_SUSPEND:
+ printk("LAN_SUSPEND, ");
+ break;
+ default:
+ printk("%02x, ",cmd);
+ }
+
+ return;
+}
+
+/* TODO: Add support for other classes */
+void i2o_report_status(const char *severity, const char *module, u8 cmd,
+ u8 req_status, u16 detailed_status)
+{
+ printk("%s", severity);
+ printk("%s: ", module);
+
+ if (cmd < 0x1F) { // Utility Class
+ report_util_cmd(cmd);
+ report_common_status(req_status);
+ report_common_dsc(detailed_status);
+ return;
+ }
+
+ if (cmd >= 0x30 && cmd <= 0x3F) { // LAN class
+ report_lan_cmd(cmd);
+ report_common_status(req_status);
+ report_lan_dsc(detailed_status);
+ return;
+ }
+
+ if (cmd >= 0xA0 && cmd <= 0xEF) { // Executive class
+ report_exec_cmd(cmd);
+ report_common_status(req_status);
+ report_common_dsc(detailed_status);
+ return;
+ }
+
+ printk("%02x, %02x / %04x.\n", cmd, req_status, detailed_status);
+ return;
+}
+
+
+EXPORT_SYMBOL(i2o_install_handler);
+EXPORT_SYMBOL(i2o_remove_handler);
+EXPORT_SYMBOL(i2o_install_device);
+EXPORT_SYMBOL(i2o_delete_device);
+EXPORT_SYMBOL(i2o_quiesce_controller);
+EXPORT_SYMBOL(i2o_clear_controller);
+EXPORT_SYMBOL(i2o_install_controller);
+EXPORT_SYMBOL(i2o_delete_controller);
+EXPORT_SYMBOL(i2o_unlock_controller);
+EXPORT_SYMBOL(i2o_find_controller);
+EXPORT_SYMBOL(i2o_num_controllers);
+EXPORT_SYMBOL(i2o_claim_device);
+EXPORT_SYMBOL(i2o_release_device);
+EXPORT_SYMBOL(i2o_run_queue);
+EXPORT_SYMBOL(i2o_report_controller_unit);
+EXPORT_SYMBOL(i2o_activate_controller);
+EXPORT_SYMBOL(i2o_online_controller);
+EXPORT_SYMBOL(i2o_get_class_name);
+
+EXPORT_SYMBOL(i2o_query_scalar);
+EXPORT_SYMBOL(i2o_params_set);
+EXPORT_SYMBOL(i2o_post_this);
+EXPORT_SYMBOL(i2o_post_wait);
+EXPORT_SYMBOL(i2o_issue_claim);
+
+EXPORT_SYMBOL(i2o_report_status);
+EXPORT_SYMBOL(report_common_status);
+EXPORT_SYMBOL(report_lan_dsc);
+
+EXPORT_SYMBOL(i2o_wait_message);
+
+MODULE_AUTHOR("Red Hat Software");
+MODULE_DESCRIPTION("I2O Core");
diff --git a/drivers/i2o/i2o_lan.c b/drivers/i2o/i2o_lan.c
new file mode 100644
index 000000000..1ebbe0b49
--- /dev/null
+++ b/drivers/i2o/i2o_lan.c
@@ -0,0 +1,853 @@
+/*
+ * linux/drivers/i2o/i2o_lan.c
+ *
+ * I2O LAN CLASS OSM Prototyping, May 7th 1999
+ *
+ * (C) Copyright 1999 University of Helsinki,
+ * Department of Computer Science
+ *
+ * This code is still under development / test.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Author: Auvo Häkkinen <Auvo.Hakkinen@cs.Helsinki.FI>
+ *
+ * Tested: in FDDI environment (using SysKonnect's DDM)
+ * in ETH environment (using Intel 82558 DDM proto)
+ *
+ * TODO: batch mode networking
+ * - this one assumes that we always get one packet in a bucket
+ * - we've not been able to test batch replies and batch receives
+ * error checking / timeouts
+ * - code/test for other LAN classes
+ */
+
+#include <linux/module.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/fddidevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/malloc.h>
+#include <linux/trdevice.h>
+#include <asm/io.h>
+
+#include <linux/errno.h>
+
+#include <linux/i2o.h>
+#include "i2o_lan.h"
+
+//#define DRIVERDEBUG
+#ifdef DRIVERDEBUG
+#define dprintk(s, args...) printk(s, ## args)
+#else
+#define dprintk(s, args...)
+#endif
+
+#define MAX_LAN_CARDS 4
+static struct device *i2o_landevs[MAX_LAN_CARDS+1];
+static int unit = -1; /* device unit number */
+
+struct i2o_lan_local {
+ u8 unit;
+ struct i2o_device *i2o_dev;
+ int reply_flag; // needed by scalar/table queries
+ struct fddi_statistics stats;
+/* first fields are same as in struct net_device_stats stats; */
+ unsigned short (*type_trans)(struct sk_buff *, struct device *);
+};
+
+/* function prototypes */
+static int i2o_lan_receive_post(struct device *dev);
+static int i2o_lan_receive_post_reply(struct device *dev, struct i2o_message *m);
+
+
+static void i2o_lan_reply(struct i2o_handler *h, struct i2o_controller *iop,
+ struct i2o_message *m)
+{
+ u32 *msg = (u32 *)m;
+ u8 unit = (u8)(msg[2]>>16); // InitiatorContext
+ struct device *dev = i2o_landevs[unit];
+
+#ifdef DRIVERDEBUG
+ i2o_report_status(KERN_INFO, "i2o_lan", msg[1]>>24, msg[4]>>24,
+ msg[4]&0xFFFF);
+#endif
+ if (msg[0] & (1<<13)) // Fail bit is set
+ {
+ printk(KERN_INFO "IOP failed to process the msg\n");
+ printk("From tid=%d to tid=%d",(msg[1]>>12)&0xFFF,msg[1]&0xFFF);
+ return;
+ }
+
+ switch (msg[1] >> 24) {
+ case LAN_RECEIVE_POST:
+ if (dev->start)
+ i2o_lan_receive_post_reply(dev,m);
+ else {
+ // we are getting unused buckets back
+ u8 trl_count = msg[3] & 0x000000FF;
+ struct i2o_bucket_descriptor *bucket =
+ (struct i2o_bucket_descriptor *)&msg[6];
+ struct sk_buff *skb;
+ do {
+ dprintk("Releasing unused bucket\n");
+ skb = (struct sk_buff *)bucket->context;
+ dev_kfree_skb(skb);
+ bucket++;
+ } while (--trl_count);
+ }
+ break;
+
+ case LAN_PACKET_SEND:
+ case LAN_SDU_SEND:
+ {
+ u8 trl_count = msg[3] & 0x000000FF;
+
+ if (msg[4] >> 24) // ReqStatus != SUCCESS
+ {
+ printk(KERN_WARNING "%s: ",dev->name);
+ report_common_status(msg[4]>>24);
+ report_lan_dsc(msg[4]&0xFFFF);
+ }
+
+ do { // The HDM has handled the outgoing packet
+ dev_kfree_skb((struct sk_buff *)msg[4 + trl_count]);
+ dprintk(KERN_INFO "%s: Request skb freed (trl_count=%d).\n",
+ dev->name,trl_count);
+ } while (--trl_count);
+
+ dev->tbusy = 0;
+ mark_bh(NET_BH); /* inform upper layers */
+ }
+ break;
+
+ default:
+ if (msg[2] & 0x80000000) // reply to a util get/set
+ { // flag for the i2o_post_wait
+ int *flag = (int *)msg[3];
+ // ReqStatus != I2O_REPLY_STATUS_SUCCESS
+ *flag = (msg[4] >> 24) ? I2O_POST_WAIT_TIMEOUT
+ : I2O_POST_WAIT_OK ;
+ }
+ }
+}
+
+static struct i2o_handler i2o_lan_handler =
+{
+ i2o_lan_reply,
+ "I2O Lan OSM",
+ 0 // context
+};
+static int lan_context;
+
+
+static int i2o_lan_receive_post_reply(struct device *dev, struct i2o_message *m)
+{
+ u32 *msg = (u32 *)m;
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_bucket_descriptor *bucket = (struct i2o_bucket_descriptor *)&msg[6];
+ struct i2o_packet_info *packet;
+
+ u8 trl_count = msg[3] & 0x000000FF;
+ struct sk_buff *skb;
+
+#ifdef 0
+ dprintk(KERN_INFO "TrlFlags = 0x%02X, TrlElementSize = %d, TrlCount = %d\n"
+ "msgsize = %d, buckets_remaining = %d\n",
+ msg[3]>>24, msg[3]&0x0000FF00, trl_count, msg[0]>>16, msg[5]);
+#endif
+
+/*
+ * NOTE: here we assume that also in batch mode we will get only
+ * one packet per bucket. This can be ensured by setting the
+ * PacketOrphanLimit to MaxPacketSize, as well as the bucket size.
+ */
+ do {
+ /* packet is not at all needed here */
+ packet = (struct i2o_packet_info *)bucket->packet_info;
+#ifdef 0
+ dprintk(KERN_INFO "flags = 0x%02X, offset = 0x%06X, status = 0x%02X, length = %d\n",
+ packet->flags, packet->offset, packet->status, packet->len);
+#endif
+ skb = (struct sk_buff *)(bucket->context);
+ skb_put(skb,packet->len);
+ skb->dev = dev;
+ skb->protocol = priv->type_trans(skb, dev);
+ netif_rx(skb);
+
+ dprintk(KERN_INFO "%s: Incoming packet (%d bytes) delivered "
+ "to upper level.\n",dev->name,packet->len);
+
+ bucket++; // to next Packet Descriptor Block
+
+ } while (--trl_count);
+
+ if (msg[5] <= I2O_BUCKET_THRESH) // BucketsRemaining
+ i2o_lan_receive_post(dev);
+
+ return 0;
+}
+
+/* ====================================================
+ * Interface to i2o: functions to send lan class request
+ */
+
+/*
+ * i2o_lan_receive_post(): Post buckets to receive packets.
+ */
+static int i2o_lan_receive_post(struct device *dev)
+{
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_device *i2o_dev = priv->i2o_dev;
+ struct i2o_controller *iop = i2o_dev->controller;
+ struct sk_buff *skb;
+ u32 m; u32 *msg;
+
+ u32 bucket_len = (dev->mtu + dev->hard_header_len);
+ u32 bucket_count;
+ int n_elems = (iop->inbound_size - 16 ) / 12; // msg header + SGLs
+ u32 total = 0;
+ int i;
+
+ dprintk(KERN_INFO "%s: Allocating %d buckets (size %d).\n",
+ dev->name, I2O_BUCKET_COUNT, bucket_len);
+
+ while (total < I2O_BUCKET_COUNT)
+ {
+ m = I2O_POST_READ32(iop);
+ if (m == 0xFFFFFFFF)
+ return -ETIMEDOUT;
+ msg = bus_to_virt(iop->mem_offset + m);
+
+ bucket_count = (total + n_elems < I2O_BUCKET_COUNT)
+ ? n_elems
+ : I2O_BUCKET_COUNT - total;
+
+ msg[0] = I2O_MESSAGE_SIZE(4 + 3 * bucket_count) | 1<<12 | SGL_OFFSET_4;
+ msg[1] = LAN_RECEIVE_POST<<24 | HOST_TID<<12 | i2o_dev->id;
+ msg[2] = priv->unit << 16 | lan_context; // InitiatorContext
+ msg[3] = bucket_count; // BucketCount
+
+ for (i = 0; i < bucket_count; i++)
+ {
+ skb = dev_alloc_skb(bucket_len + 2);
+ if (skb == NULL)
+ return -ENOMEM;
+ skb_reserve(skb, 2);
+ msg[4 + 3*i] = 0x51000000 | bucket_len;
+ msg[5 + 3*i] = (u32)skb;
+ msg[6 + 3*i] = virt_to_bus(skb->data);
+ }
+ msg[4 + 3*i - 3] |= 0x80000000; // set LE flag
+ i2o_post_message(iop,m);
+
+ dprintk(KERN_INFO "%s: Sending %d buckets (size %d) to LAN HDM.\n",
+ dev->name,bucket_count,bucket_len);
+
+ total += bucket_count;
+ }
+ return 0;
+}
+
+/*
+ * i2o_lan_reset(): Reset the LAN adapter into the operational state and
+ * restore it to full operation.
+ */
+static int i2o_lan_reset(struct device *dev)
+{
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_device *i2o_dev = priv->i2o_dev;
+ struct i2o_controller *iop = i2o_dev->controller;
+ u32 m; u32 *msg;
+
+ m = I2O_POST_READ32(iop);
+ if (m == 0xFFFFFFFF)
+ return -ETIMEDOUT;
+ msg = bus_to_virt(iop->mem_offset + m);
+
+ msg[0] = FIVE_WORD_MSG_SIZE | SGL_OFFSET_0;
+ msg[1] = LAN_RESET<<24 | HOST_TID<<12 | i2o_dev->id;
+ msg[2] = priv->unit << 16 | lan_context; // InitiatorContext
+ msg[3] = 0; // TransactionContext
+ msg[4] = 1 << 16; // return posted buckets
+
+ i2o_post_message(iop,m);
+
+ return 0;
+}
+
+/*
+ * i2o_lan_suspend(): Put LAN adapter into a safe, non-active state.
+ * Reply to any LAN class message with status error_no_data_transfer
+ * / suspended.
+ */
+static int i2o_lan_suspend(struct device *dev)
+{
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_device *i2o_dev = priv->i2o_dev;
+ struct i2o_controller *iop = i2o_dev->controller;
+ u32 m; u32 *msg;
+
+ m = I2O_POST_READ32(iop);
+ if (m == 0xFFFFFFFF)
+ return -ETIMEDOUT;
+ msg = bus_to_virt(iop->mem_offset + m);
+
+ msg[0] = FIVE_WORD_MSG_SIZE | SGL_OFFSET_0;
+ msg[1] = LAN_SUSPEND<<24 | HOST_TID<<12 | i2o_dev->id;
+ msg[2] = priv->unit << 16 | lan_context; // InitiatorContext
+ msg[3] = 0; // TransactionContext
+ msg[4] = 1 << 16; // return posted buckets
+
+ i2o_post_message(iop,m);
+
+ return 0;
+}
+
+/*
+ * Set DDM into batch mode.
+ */
+static void i2o_set_batch_mode(struct device *dev)
+{
+
+/*
+ * NOTE: we have not been able to test batch mode
+ * since HDMs we have, don't implement it
+ */
+
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_device *i2o_dev = priv->i2o_dev;
+ struct i2o_controller *iop = i2o_dev->controller;
+ u32 val;
+
+ /* set LAN_BATCH_CONTROL attributes */
+
+ // enable batch mode, toggle automatically
+ val = 0x00000000;
+ if (i2o_params_set(iop, i2o_dev->id, lan_context, 0x0003, 0,
+ &val, 4, &priv->reply_flag) <0)
+ printk(KERN_WARNING "Unable to enter I2O LAN batch mode.\n");
+ else
+ dprintk(KERN_INFO "%s: I2O LAN batch mode enabled.\n",dev->name);
+
+ /*
+ * When PacketOrphanlimit is same as the maximum packet length,
+ * the packets will never be split into two separate buckets
+ */
+
+ /* set LAN_OPERATION attributes */
+
+ val = dev->mtu + dev->hard_header_len; // PacketOrphanLimit
+ if (i2o_params_set(iop, i2o_dev->id, lan_context, 0x0004, 2,
+ &val, 4, &priv->reply_flag) < 0)
+ printk(KERN_WARNING "i2o_lan: Unable to set PacketOrphanLimit.\n");
+ else
+ dprintk(KERN_INFO "PacketOrphanLimit set to %d\n",val);
+
+#ifdef 0
+/*
+ * I2O spec 2.0: there should be proper default values for other attributes
+ * used in batch mode.
+ */
+
+ /* set LAN_RECEIVE_INFO attributes */
+
+ val = 10; // RxMaxBucketsReply
+ if (i2o_params_set(iop, i2o_dev->id, lan_context, 0x0008, 3,
+ &val, 4, &priv->reply_flag) < 0)
+ printk(KERN_WARNING "%s: Unable to set RxMaxBucketsReply.\n",
+ dev->name);
+
+ val = 10; // RxMaxPacketsBuckets
+ if (i2o_params_set(iop, i2o_dev->id, lan_context, 0x0008, 4,
+ &val, 4, &priv->reply_flag) < 0)
+ printk(KERN_WARNING "%s: Unable to set RxMaxPacketsBucket.\n",
+ dev->name);
+
+ /* set LAN_BATCH_CONTROL attributes */
+
+ val = 10; // MaxRxBatchCount
+ if (i2o_params_set(iop, i2o_dev->id, lan_context, 0x0003, 5,
+ &val, 4, &priv->reply_flag) < 0)
+ printk(KERN_WARNING "%s: Unable to set MaxRxBatchCount.\n",
+ dev->name);
+
+ val = 10; // MaxTxBatchCount
+ if (i2o_params_set(iop, i2o_dev->id, lan_context, 0x0003, 8,
+ &val, 4, &priv->reply_flag) < 0)
+ printk(KERN_WARNING "%s Unable to set MaxTxBatchCount.\n",
+ dev->name);
+#endif
+
+ return;
+}
+
+/*
+ * i2o_lan_open(): Open the device to send/receive packets via
+ * the network device.
+ */
+static int i2o_lan_open(struct device *dev)
+{
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_device *i2o_dev = priv->i2o_dev;
+ struct i2o_controller *iop = i2o_dev->controller;
+
+ i2o_lan_reset(dev);
+
+ if (i2o_issue_claim(iop, i2o_dev->id, lan_context, 1,
+ &priv->reply_flag) < 0)
+ {
+ printk(KERN_WARNING "%s: Unable to claim the I2O LAN device.\n", dev->name);
+ return -EAGAIN;
+ }
+ dprintk(KERN_INFO "%s: I2O LAN device claimed (tid=%d).\n", dev->name, i2o_dev->id);
+
+ dev->tbusy = 0;
+ dev->start = 1;
+
+ i2o_set_batch_mode(dev);
+ i2o_lan_receive_post(dev);
+
+ MOD_INC_USE_COUNT;
+
+ return 0;
+}
+
+/*
+ * i2o_lan_close(): End the transfering.
+ */
+static int i2o_lan_close(struct device *dev)
+{
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_device *i2o_dev = priv->i2o_dev;
+ struct i2o_controller *iop = i2o_dev->controller;
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ if (i2o_issue_claim(iop, i2o_dev->id, lan_context, 0,
+ &priv->reply_flag) < 0)
+ {
+ printk(KERN_WARNING "%s: Unable to unclaim I2O LAN device (tid=%d)\n",
+ dev->name, i2o_dev->id);
+ }
+
+ i2o_lan_suspend(dev);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+}
+
+/*
+ * i2o_lan_sdu_send(): Send a packet, MAC header added by the HDM.
+ * Must be supported by Fibre Channel, optional for Ethernet/802.3,
+ * Token Ring, FDDI
+ */
+static int i2o_lan_sdu_send(struct sk_buff *skb, struct device *dev)
+{
+#ifdef 0
+/* not yet tested */
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_device *i2o_dev = priv->i2o_dev;
+ struct i2o_controller *iop = i2o_dev->controller;
+ u32 m; u32 *msg;
+
+ dprintk(KERN_INFO "LanSDUSend called, skb->len = %d\n", skb->len);
+
+ m = *iop->post_port;
+ if (m == 0xFFFFFFFF)
+ {
+ dev_kfree_skb(skb);
+ return -1;
+ }
+ msg = bus_to_virt(iop->mem_offset + m);
+
+ msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_4;
+ msg[1] = LAN_SDU_SEND<<24 | HOST_TID<<12 | i2o_dev->id;
+ msg[2] = priv->unit << 16 | lan_context; // IntiatorContext
+ msg[3] = 1<<4; // TransmitControlWord: suppress CRC generation
+
+ // create a simple SGL, see fig. 3-26
+ // D7 = 1101 0111 = LE eob 0 1 LA dir bc1 bc0
+
+ msg[4] = 0xD7000000 | (skb->len); // no MAC hdr included
+ msg[5] = (u32)skb; // TransactionContext
+ memcpy(&msg[6], skb->data, 8); // Destination MAC Addr ??
+ msg[7] &= 0x0000FFFF; // followed by two bytes zeros
+ msg[8] = virt_to_bus(skb->data);
+ dev->trans_start = jiffies;
+ i2o_post_message(iop,m);
+
+ dprintk(KERN_INFO "%s: Packet (%d bytes) sent to network.\n",
+ dev->name,skb->len);
+#endif
+ return 0;
+}
+
+/*
+ * i2o_lan_packet_send(): Send a packet as is, including the MAC header.
+ *
+ * Must be supported by Ethernet/802.3, Token Ring, FDDI, optional for
+ * Fibre Channel
+ */
+static int i2o_lan_packet_send(struct sk_buff *skb, struct device *dev)
+{
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_device *i2o_dev = priv->i2o_dev;
+ struct i2o_controller *iop = i2o_dev->controller;
+ u32 m; u32 *msg;
+
+ m = *iop->post_port;
+ if (m == 0xFFFFFFFF) {
+ dev_kfree_skb(skb);
+ return -1;
+ }
+
+ msg = bus_to_virt(iop->mem_offset + m);
+
+ msg[0] = SEVEN_WORD_MSG_SIZE | 1<<12 | SGL_OFFSET_4;
+ msg[1] = LAN_PACKET_SEND<<24 | HOST_TID<<12 | i2o_dev->id;
+ msg[2] = priv->unit << 16 | lan_context; // IntiatorContext
+ msg[3] = 1 << 4; // TransmitControlWord
+
+ // create a simple SGL, see fig. 3-26
+ // D5 = 1101 0101 = LE eob 0 1 LA dir bc1 bc0
+
+ msg[4] = 0xD5000000 | skb->len; // MAC hdr included
+ msg[5] = (u32)skb; // TransactionContext
+ msg[6] = virt_to_bus(skb->data);
+
+ i2o_post_message(iop,m);
+
+ dprintk(KERN_INFO "%s: Packet (%d bytes) sent to network.\n",
+ dev->name, skb->len);
+
+ return 0;
+}
+
+/*
+ * net_device_stats(): Return statistical information.
+ */
+static struct net_device_stats *i2o_lan_get_stats(struct device *dev)
+{
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_device *i2o_dev = priv->i2o_dev;
+ struct i2o_controller *iop = i2o_dev->controller;
+ u64 val[16];
+
+ /* query LAN_HISTORICAL_STATS scalar parameter group 0x0100 */
+
+ i2o_query_scalar(iop, i2o_dev->id, lan_context, 0x0100, -1,
+ &val, 16*8, &priv->reply_flag);
+ priv->stats.tx_packets = val[0];
+ priv->stats.tx_bytes = val[1];
+ priv->stats.rx_packets = val[2];
+ priv->stats.rx_bytes = val[3];
+ priv->stats.tx_errors = val[4];
+ priv->stats.rx_errors = val[5];
+ priv->stats.rx_dropped = val[6];
+
+ // other net_device_stats and FDDI class specific fields follow ...
+
+ return (struct net_device_stats *)&priv->stats;
+}
+
+/*
+ * i2o_lan_set_multicast_list(): Enable a network device to receive packets
+ * not send to the protocol address.
+ */
+static void i2o_lan_set_multicast_list(struct device *dev)
+{
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_device *i2o_dev = priv->i2o_dev;
+ struct i2o_controller *iop = i2o_dev->controller;
+ u32 filter_mask;
+
+ dprintk(KERN_INFO "Entered i2o_lan_set_multicast_list().\n");
+
+return;
+
+/*
+ * FIXME: For some reason this kills interrupt handler in i2o_post_wait :-(
+ *
+ */
+ dprintk(KERN_INFO "dev->flags = 0x%08X, dev->mc_count = 0x%08X\n",
+ dev->flags,dev->mc_count);
+
+ if (i2o_query_scalar(iop, i2o_dev->id, lan_context, 0x0001, 3,
+ &filter_mask, 4, &priv->reply_flag) < 0 )
+ printk(KERN_WARNING "i2o_lan: Unable to query filter mask.\n");
+
+ dprintk(KERN_INFO "filter_mask = 0x%08X\n",filter_mask);
+
+ if (dev->flags & IFF_PROMISC)
+ {
+ // Enable promiscuous mode
+
+ filter_mask |= 0x00000002;
+ if (i2o_params_set(iop, i2o_dev->id, lan_context, 0x0001, 3,
+ &filter_mask, 4, &priv->reply_flag) <0)
+ printk(KERN_WARNING "i2o_lan: Unable to enable promiscuous multicast mode.\n");
+ else
+ dprintk(KERN_INFO "i2o_lan: Promiscuous multicast mode enabled.\n");
+
+ return;
+ }
+
+// if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > HW_MAX_ADDRS)
+// {
+// // Disable promiscuous mode, use normal mode.
+// hardware_set_filter(NULL);
+//
+// dprintk(KERN_INFO "i2o_lan: Disabled promiscuous mode, uses normal mode\n");
+//
+// filter_mask = 0x00000000;
+// i2o_params_set(iop, i2o_dev->id, lan_context, 0x0001, 3,
+// &filter_mask, 4, &priv->reply_flag);
+//
+// return;
+// }
+
+ if (dev->mc_count)
+ {
+ // Walk the address list, and load the filter
+// hardware_set_filter(dev->mc_list);
+
+ filter_mask = 0x00000004;
+ if (i2o_params_set(iop, i2o_dev->id, lan_context, 0x0001, 3,
+ &filter_mask, 4, &priv->reply_flag) <0)
+ printk(KERN_WARNING "i2o_lan: Unable to enable Promiscuous multicast mode.\n");
+ else
+ dprintk(KERN_INFO "i2o_lan: Promiscuous multicast mode enabled.\n");
+
+ return;
+ }
+
+ // Unicast
+
+ filter_mask |= 0x00000300; // Broadcast, Multicast disabled
+ if (i2o_params_set(iop, i2o_dev->id, lan_context, 0x0001, 3,
+ &filter_mask, 4, &priv->reply_flag) <0)
+ printk(KERN_WARNING "i2o_lan: Unable to enable unicast mode.\n");
+ else
+ dprintk(KERN_INFO "i2o_lan: Unicast mode enabled.\n");
+
+ return;
+}
+
+struct device *i2o_lan_register_device(struct i2o_device *i2o_dev)
+{
+ struct device *dev = NULL;
+ struct i2o_lan_local *priv = NULL;
+ u8 hw_addr[8];
+ unsigned short (*type_trans)(struct sk_buff *, struct device *);
+
+ switch (i2o_dev->subclass)
+ {
+ case I2O_LAN_ETHERNET:
+ /* Note: init_etherdev calls
+ ether_setup() and register_netdevice()
+ and allocates the priv structure */
+
+ dev = init_etherdev(NULL, sizeof(struct i2o_lan_local));
+ if (dev == NULL)
+ return NULL;
+ type_trans = eth_type_trans;
+ break;
+
+/*
+#ifdef CONFIG_ANYLAN
+ case I2O_LAN_100VG:
+ printk(KERN_WARNING "i2o_lan: 100base VG not yet supported\n");
+ break;
+#endif
+*/
+
+#ifdef CONFIG_TR
+ case I2O_LAN_TR:
+ dev = init_trdev(NULL, sizeof(struct i2o_lan_local));
+ if(dev==NULL)
+ return NULL;
+ type_trans = tr_type_trans;
+ break;
+#endif
+
+#ifdef CONFIG_FDDI
+ case I2O_LAN_FDDI:
+ {
+ int size = sizeof(struct device) + sizeof(struct i2o_lan_local)
+ + sizeof("fddi%d ");
+
+ dev = (struct device *) kmalloc(size, GFP_KERNEL);
+ memset((char *)dev, 0, size);
+ dev->priv = (void *)(dev + 1);
+ dev->name = (char *)(dev + 1) + sizeof(struct i2o_lan_local);
+
+ if (dev_alloc_name(dev,"fddi%d") < 0)
+ {
+ printk(KERN_WARNING "i2o_lan: Too many FDDI devices.\n");
+ kfree(dev);
+ return NULL;
+ }
+ type_trans = fddi_type_trans;
+
+ fddi_setup(dev);
+ register_netdev(dev);
+ }
+ break;
+#endif
+
+/*
+#ifdef CONFIG_FIBRE_CHANNEL
+ case I2O_LAN_FIBRE_CHANNEL:
+ printk(KERN_WARNING "i2o_lan: Fibre Channel not yet supported\n");
+ break;
+#endif
+*/
+ case I2O_LAN_UNKNOWN:
+ default:
+ printk(KERN_WARNING "i2o_lan: LAN type 0x%08X not supported\n",
+ i2o_dev->subclass);
+ return NULL;
+ }
+
+ priv = (struct i2o_lan_local *)dev->priv;
+ priv->i2o_dev = i2o_dev;
+ priv->type_trans = type_trans;
+
+ if (i2o_query_scalar(i2o_dev->controller, i2o_dev->id, lan_context,
+ 0x0001, 0, &hw_addr, 8, &priv->reply_flag) < 0)
+ {
+ printk("%s: Unable to query hardware address.\n",
+ dev->name);
+ return NULL;
+ }
+
+ dprintk("%s hwaddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
+ dev->name,hw_addr[0], hw_addr[1], hw_addr[2], hw_addr[3],
+ hw_addr[4], hw_addr[5]);
+
+ dev->addr_len = 6;
+ memcpy(dev->dev_addr, hw_addr, 6);
+
+ dev->open = i2o_lan_open;
+ dev->stop = i2o_lan_close;
+ dev->hard_start_xmit = i2o_lan_packet_send;
+ dev->get_stats = i2o_lan_get_stats;
+ dev->set_multicast_list = i2o_lan_set_multicast_list;
+
+ return dev;
+}
+
+#ifdef MODULE
+
+int init_module(void)
+{
+ struct device *dev;
+ struct i2o_lan_local *priv;
+ int i;
+
+ if (i2o_install_handler(&i2o_lan_handler) < 0)
+ {
+ printk(KERN_ERR "Unable to register I2O LAN OSM.\n");
+ return -EINVAL;
+ }
+
+ lan_context = i2o_lan_handler.context;
+
+ for (i=0; i < MAX_I2O_CONTROLLERS; i++)
+ {
+ struct i2o_controller *iop = i2o_find_controller(i);
+ struct i2o_device *i2o_dev;
+
+ if (iop==NULL)
+ continue;
+
+ for (i2o_dev=iop->devices;i2o_dev != NULL;i2o_dev=i2o_dev->next)
+ {
+ int class = i2o_dev->class;
+
+ if (class != 0x020) /* not I2O_CLASS_LAN device*/
+ continue;
+
+ if (unit == MAX_LAN_CARDS)
+ {
+ printk(KERN_WARNING "Too many I2O LAN devices.\n");
+ return -EINVAL;
+ }
+
+ dev = i2o_lan_register_device(i2o_dev);
+ if (dev == NULL)
+ {
+ printk(KERN_WARNING "Unable to register I2O LAN device\n");
+ continue; // try next one
+ }
+ priv = (struct i2o_lan_local *)dev->priv;
+
+ unit++;
+ i2o_landevs[unit] = dev;
+ priv->unit = unit;
+
+ printk(KERN_INFO "%s: I2O LAN device registered, tid = %d,"
+ " subclass = 0x%08X, unit = %d.\n",
+ dev->name, i2o_dev->id, i2o_dev->subclass,
+ priv->unit);
+ }
+ }
+
+ dprintk(KERN_INFO "%d I2O LAN devices found and registered.\n", unit+1);
+
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ int i;
+
+ for (i = 0; i <= unit; i++)
+ {
+ struct device *dev = i2o_landevs[i];
+ struct i2o_lan_local *priv = (struct i2o_lan_local *)dev->priv;
+ struct i2o_device *i2o_dev = priv->i2o_dev;
+
+ switch (i2o_dev->subclass)
+ {
+ case I2O_LAN_ETHERNET:
+ unregister_netdev(dev);
+ kfree(dev);
+ break;
+#ifdef CONFIG_FDDI
+ case I2O_LAN_FDDI:
+ unregister_netdevice(dev);
+ kfree(dev);
+ break;
+#endif
+#ifdef CONFIG_TR
+ case I2O_LAN_TR:
+ unregister_netdev(dev);
+ kfree(dev);
+ break;
+#endif
+ default:
+ printk(KERN_WARNING "i2o_lan: Spurious I2O LAN subclass 0x%08X.\n",
+ i2o_dev->subclass);
+ }
+
+ dprintk(KERN_INFO "%s: I2O LAN device unregistered.\n",
+ dev->name);
+ }
+
+ i2o_remove_handler(&i2o_lan_handler);
+}
+
+EXPORT_NO_SYMBOLS;
+MODULE_AUTHOR("Univ of Helsinki, CS Department");
+MODULE_DESCRIPTION("I2O Lan OSM");
+
+#endif
diff --git a/drivers/i2o/i2o_lan.h b/drivers/i2o/i2o_lan.h
new file mode 100644
index 000000000..c8e82bf41
--- /dev/null
+++ b/drivers/i2o/i2o_lan.h
@@ -0,0 +1,112 @@
+/*
+ * i2o_lan.h LAN Class specific definitions
+ *
+ * I2O LAN CLASS OSM Prototyping, May 7th 1999
+ *
+ * (C) Copyright 1999 University of Helsinki,
+ * Department of Computer Science
+ *
+ * This code is still under development / test.
+ *
+ * Author: Auvo Häkkinen <Auvo.Hakkinen@cs.Helsinki.FI>
+ *
+ */
+
+#ifndef I2O_LAN_H
+#define I2O_LAN_H
+
+/* Tunable parameters first */
+
+#define I2O_BUCKET_COUNT 64
+#define I2O_BUCKET_THRESH 5
+
+/* LAN types */
+#define I2O_LAN_ETHERNET 0x0030
+#define I2O_LAN_100VG 0x0040
+#define I2O_LAN_TR 0x0050
+#define I2O_LAN_FDDI 0x0060
+#define I2O_LAN_FIBRE_CHANNEL 0x0070
+#define I2O_LAN_UNKNOWN 0x00000000
+
+/* Connector types */
+
+/* Ethernet */
+#define I2O_LAN_AUI (I2O_LAN_ETHERNET << 4) + 0x00000001
+#define I2O_LAN_10BASE5 (I2O_LAN_ETHERNET << 4) + 0x00000002
+#define I2O_LAN_FIORL (I2O_LAN_ETHERNET << 4) + 0x00000003
+#define I2O_LAN_10BASE2 (I2O_LAN_ETHERNET << 4) + 0x00000004
+#define I2O_LAN_10BROAD36 (I2O_LAN_ETHERNET << 4) + 0x00000005
+#define I2O_LAN_10BASE_T (I2O_LAN_ETHERNET << 4) + 0x00000006
+#define I2O_LAN_10BASE_FP (I2O_LAN_ETHERNET << 4) + 0x00000007
+#define I2O_LAN_10BASE_FB (I2O_LAN_ETHERNET << 4) + 0x00000008
+#define I2O_LAN_10BASE_FL (I2O_LAN_ETHERNET << 4) + 0x00000009
+#define I2O_LAN_100BASE_TX (I2O_LAN_ETHERNET << 4) + 0x0000000A
+#define I2O_LAN_100BASE_FX (I2O_LAN_ETHERNET << 4) + 0x0000000B
+#define I2O_LAN_100BASE_T4 (I2O_LAN_ETHERNET << 4) + 0x0000000C
+#define I2O_LAN_1000BASE_SX (I2O_LAN_ETHERNET << 4) + 0x0000000D
+#define I2O_LAN_1000BASE_LX (I2O_LAN_ETHERNET << 4) + 0x0000000E
+#define I2O_LAN_1000BASE_CX (I2O_LAN_ETHERNET << 4) + 0x0000000F
+#define I2O_LAN_1000BASE_T (I2O_LAN_ETHERNET << 4) + 0x00000010
+
+/* AnyLAN */
+#define I2O_LAN_100VG_ETHERNET (I2O_LAN_100VG << 4) + 0x00000001
+#define I2O_LAN_100VG_TR (I2O_LAN_100VG << 4) + 0x00000002
+
+/* Token Ring */
+#define I2O_LAN_4MBIT (I2O_LAN_TR << 4) + 0x00000001
+#define I2O_LAN_16MBIT (I2O_LAN_TR << 4) + 0x00000002
+
+/* FDDI */
+#define I2O_LAN_125MBAUD (I2O_LAN_FDDI << 4) + 0x00000001
+
+/* Fibre Channel */
+#define I2O_LAN_POINT_POINT (I2O_LAN_FIBRE_CHANNEL << 4) + 0x00000001
+#define I2O_LAN_ARB_LOOP (I2O_LAN_FIBRE_CHANNEL << 4) + 0x00000002
+#define I2O_LAN_PUBLIC_LOOP (I2O_LAN_FIBRE_CHANNEL << 4) + 0x00000003
+#define I2O_LAN_FABRIC (I2O_LAN_FIBRE_CHANNEL << 4) + 0x00000004
+
+#define I2O_LAN_EMULATION 0x00000F00
+#define I2O_LAN_OTHER 0x00000F01
+#define I2O_LAN_DEFAULT 0xFFFFFFFF
+
+/* LAN class functions */
+
+#define LAN_PACKET_SEND 0x3B
+#define LAN_SDU_SEND 0x3D
+#define LAN_RECEIVE_POST 0x3E
+#define LAN_RESET 0x35
+#define LAN_SUSPEND 0x37
+
+/* LAN DetailedStatusCode defines */
+#define I2O_LAN_DSC_SUCCESS 0x00
+#define I2O_LAN_DSC_DEVICE_FAILURE 0x01
+#define I2O_LAN_DSC_DESTINATION_NOT_FOUND 0x02
+#define I2O_LAN_DSC_TRANSMIT_ERROR 0x03
+#define I2O_LAN_DSC_TRANSMIT_ABORTED 0x04
+#define I2O_LAN_DSC_RECEIVE_ERROR 0x05
+#define I2O_LAN_DSC_RECEIVE_ABORTED 0x06
+#define I2O_LAN_DSC_DMA_ERROR 0x07
+#define I2O_LAN_DSC_BAD_PACKET_DETECTED 0x08
+#define I2O_LAN_DSC_OUT_OF_MEMORY 0x09
+#define I2O_LAN_DSC_BUCKET_OVERRUN 0x0A
+#define I2O_LAN_DSC_IOP_INTERNAL_ERROR 0x0B
+#define I2O_LAN_DSC_CANCELED 0x0C
+#define I2O_LAN_DSC_INVALID_TRANSACTION_CONTEXT 0x0D
+#define I2O_LAN_DSC_DEST_ADDRESS_DETECTED 0x0E
+#define I2O_LAN_DSC_DEST_ADDRESS_OMITTED 0x0F
+#define I2O_LAN_DSC_PARTIAL_PACKET_RETURNED 0x10
+#define I2O_LAN_DSC_TEMP_SUSPENDED_STATE 0x11
+
+struct i2o_packet_info {
+ u32 offset : 24;
+ u32 flags : 8;
+ u32 len : 24;
+ u32 status : 8;
+};
+
+struct i2o_bucket_descriptor {
+ u32 context; /* FIXME: 64bit support */
+ struct i2o_packet_info packet_info[1];
+};
+
+#endif /* I2O_LAN_H */
diff --git a/drivers/i2o/i2o_pci.c b/drivers/i2o/i2o_pci.c
new file mode 100644
index 000000000..596d9f953
--- /dev/null
+++ b/drivers/i2o/i2o_pci.c
@@ -0,0 +1,243 @@
+/*
+ * Find I2O capable controllers on the PCI bus, and register/install
+ * them with the I2O layer
+ *
+ * (C) Copyright 1999 Red Hat Software
+ *
+ * Written by Alan Cox, Building Number Three Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/i2o.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/malloc.h>
+#include <asm/io.h>
+
+/*
+ * Free bus specific resources
+ */
+
+static void i2o_pci_dispose(struct i2o_controller *c)
+{
+ I2O_IRQ_WRITE32(c,0xFFFFFFFF);
+ if(c->bus.pci.irq > 0)
+ free_irq(c->bus.pci.irq, c);
+ iounmap(((u8 *)c->post_port)-0x40);
+}
+
+/*
+ * No real bus specific handling yet (note that later we will
+ * need to 'steal' PCI devices on i960 mainboards)
+ */
+
+static int i2o_pci_bind(struct i2o_controller *c, struct i2o_device *dev)
+{
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static int i2o_pci_unbind(struct i2o_controller *c, struct i2o_device *dev)
+{
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+/*
+ * Bus specific interrupt handler
+ */
+
+static void i2o_pci_interrupt(int irq, void *dev_id, struct pt_regs *r)
+{
+ struct i2o_controller *c = dev_id;
+ i2o_run_queue(c);
+}
+
+/*
+ * Install a PCI (or in theory AGP) i2o controller
+ */
+
+int __init i2o_pci_install(struct pci_dev *dev)
+{
+ struct i2o_controller *c=kmalloc(sizeof(struct i2o_controller),
+ GFP_KERNEL);
+ u8 *mem;
+ u32 memptr = 0;
+ u32 size;
+
+ int i;
+
+ if(c==NULL)
+ {
+ printk(KERN_ERR "i2o_pci: insufficient memory to add controller.\n");
+ return -ENOMEM;
+ }
+ memset(c, 0, sizeof(*c));
+
+ for(i=0; i<6; i++)
+ {
+ /* Skip I/O spaces */
+ if(!(dev->base_address[i]&PCI_BASE_ADDRESS_SPACE))
+ {
+ memptr=PCI_BASE_ADDRESS_MEM_MASK&dev->base_address[i];
+ break;
+ }
+ }
+
+ if(i==6)
+ {
+ printk(KERN_ERR "i2o_pci: I2O controller has no memory regions defined.\n");
+ return -ENOMEM;
+ }
+
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0+4*i, 0xFFFFFFFF);
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0+4*i, &size);
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0+4*i, dev->base_address[i]);
+
+ /* Map the I2O controller */
+
+ printk(KERN_INFO "PCI I2O controller at 0x%08X size=%d\n", memptr, -size);
+ mem = ioremap(memptr, -size);
+
+ c->bus.pci.irq = -1;
+
+ c->irq_mask = (volatile u32 *)(mem+0x34);
+ c->post_port = (volatile u32 *)(mem+0x40);
+ c->reply_port = (volatile u32 *)(mem+0x44);
+
+ c->mem_phys = memptr;
+ c->mem_offset = (u32)mem;
+ c->destructor = i2o_pci_dispose;
+
+ c->bind = i2o_pci_bind;
+ c->unbind = i2o_pci_unbind;
+
+ c->type = I2O_TYPE_PCI;
+
+ I2O_IRQ_WRITE32(c,0xFFFFFFFF);
+
+ i = i2o_install_controller(c);
+
+ if(i<0)
+ {
+ printk(KERN_ERR "i2o: unable to install controller.\n");
+ return i;
+ }
+
+ c->bus.pci.irq = dev->irq;
+ if(c->bus.pci.irq)
+ {
+ i=request_irq(dev->irq, i2o_pci_interrupt, SA_SHIRQ,
+ c->name, c);
+ if(i<0)
+ {
+ printk(KERN_ERR "%s: unable to allocate interrupt %d.\n",
+ c->name, dev->irq);
+ c->bus.pci.irq = -1;
+ i2o_delete_controller(c);
+ return -EBUSY;
+ }
+ }
+ return 0;
+}
+
+int __init i2o_pci_scan(void)
+{
+ struct pci_dev *dev;
+ int count=0;
+
+ printk(KERN_INFO "Checking for PCI I2O controllers...\n");
+
+ for(dev=pci_devices; dev!=NULL; dev=dev->next)
+ {
+ if((dev->class>>8)!=PCI_CLASS_INTELLIGENT_I2O)
+ continue;
+ if((dev->class&0xFF)>1)
+ {
+ printk(KERN_INFO "I2O controller found but does not support I2O 1.5 (skipping).\n");
+ continue;
+ }
+ printk(KERN_INFO "I2O controller on bus %d at %d.\n",
+ dev->bus->number, dev->devfn);
+ if(!dev->master)
+ printk(KERN_WARNING "Controller not master enabled.\n");
+ if(i2o_pci_install(dev)==0)
+ count++;
+ }
+ if(count)
+ printk(KERN_INFO "%d I2O controller%s found and installed.\n", count,
+ count==1?"":"s");
+ return count?count:-ENODEV;
+}
+
+static void i2o_pci_unload(void)
+{
+ int i=0;
+ struct i2o_controller *c;
+
+ for(i = 0; i < MAX_I2O_CONTROLLERS; i++)
+ {
+ c=i2o_find_controller(i);
+ if(c==NULL)
+ continue;
+ if(c->type == I2O_TYPE_PCI)
+ i2o_delete_controller(c);
+ i2o_unlock_controller(c);
+ }
+}
+
+static void i2o_pci_activate(void)
+{
+ int i=0;
+ struct i2o_controller *c;
+
+ for(i = 0; i < MAX_I2O_CONTROLLERS; i++)
+ {
+ c=i2o_find_controller(i);
+ if(c==NULL)
+ continue;
+ if(c->type == I2O_TYPE_PCI)
+ {
+ if(i2o_activate_controller(c))
+ {
+ printk("I2O: Failed to initialize iop%d\n", c->unit);
+ i2o_unlock_controller(c);
+ free_irq(c->bus.pci.irq, c);
+ i2o_delete_controller(c);
+ continue;
+ }
+
+ I2O_IRQ_WRITE32(c,0);
+ }
+ i2o_unlock_controller(c);
+ }
+}
+
+#ifdef MODULE
+
+EXPORT_NO_SYMBOLS;
+MODULE_AUTHOR("Red Hat Software");
+MODULE_DESCRIPTION("I2O PCI Interface");
+
+int init_module(void)
+{
+ if(i2o_pci_scan()<0)
+ return -ENODEV;
+ i2o_pci_activate();
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ i2o_pci_unload();
+}
+
+#endif
diff --git a/drivers/i2o/i2o_proc.c b/drivers/i2o/i2o_proc.c
new file mode 100644
index 000000000..ce38b3914
--- /dev/null
+++ b/drivers/i2o/i2o_proc.c
@@ -0,0 +1,2382 @@
+/*
+ * procfs handler for Linux I2O subsystem
+ *
+ * Copyright (c) 1999 Intel Corporation
+ *
+ * Originally written by Deepak Saxena(deepak.saxena@intel.com)
+ *
+ * This program is free software. You can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * This is an initial test release. The code is based on the design
+ * of the ide procfs system (drivers/block/ide-proc.c). Some code
+ * taken from i2o-core module by Alan Cox.
+ *
+ * DISCLAIMER: This code is still under development/test and may cause
+ * your system to behave unpredictably. Use at your own discretion.
+ *
+ * LAN entries by Juha Sievänen(Juha.Sievanen@cs.Helsinki.FI),
+ * University of Helsinki, Department of Computer Science
+ *
+ */
+
+/*
+ * set tabstop=3
+ */
+
+/*
+ * TODO List
+ *
+ * - Add support for any version 2.0 spec changes once 2.0 IRTOS is
+ * is available to test with
+ * - Clean up code to use official structure definitions
+ */
+
+// FIXME!
+#define FMT_U64_HEX "0x%08x%08x"
+#define U64_VAL(pu64) *((u32*)(pu64)+1), *((u32*)(pu64))
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/i2o.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/byteorder.h>
+#include <asm/spinlock.h>
+
+
+#include "i2o_proc.h"
+
+#include "i2o_lan.h"
+
+/*
+ * Structure used to define /proc entries
+ */
+typedef struct _i2o_proc_entry_t
+{
+ char *name; /* entry name */
+ mode_t mode; /* mode */
+ read_proc_t *read_proc; /* read func */
+ write_proc_t *write_proc; /* write func */
+} i2o_proc_entry;
+
+static int proc_context = 0;
+
+
+static int i2o_proc_read_lct(char *, char **, off_t, int, int *, void *);
+static int i2o_proc_read_hrt(char *, char **, off_t, int, int *, void *);
+static int i2o_proc_read_stat(char *, char **, off_t, int, int *, void *);
+static int i2o_proc_read_hw(char *, char **, off_t, int, int *, void *);
+static int i2o_proc_read_dev(char *, char **, off_t, int, int *, void *);
+static int i2o_proc_read_dev_name(char *, char **, off_t, int, int *, void *);
+static int i2o_proc_read_ddm(char *, char **, off_t, int, int *, void *);
+static int i2o_proc_read_uinfo(char *, char **, off_t, int, int *, void *);
+static int print_serial_number(char *, int, u8 *, int);
+static int i2o_proc_create_entries(void *,
+ i2o_proc_entry *p, struct proc_dir_entry *);
+static void i2o_proc_remove_entries(i2o_proc_entry *p,
+ struct proc_dir_entry *);
+static int i2o_proc_add_controller(struct i2o_controller *,
+ struct proc_dir_entry * );
+static void i2o_proc_remove_controller(struct i2o_controller *,
+ struct proc_dir_entry * );
+static int create_i2o_procfs(void);
+static int destroy_i2o_procfs(void);
+static void i2o_proc_reply(struct i2o_handler *, struct i2o_controller *,
+ struct i2o_message *);
+
+static int i2o_proc_read_lan_dev_info(char *, char **, off_t, int, int *,
+ void *);
+static int i2o_proc_read_lan_mac_addr(char *, char **, off_t, int, int *,
+ void *);
+static int i2o_proc_read_lan_curr_addr(char *, char **, off_t, int, int *,
+ void *);
+#if 0
+static int i2o_proc_read_lan_mcast_addr(char *, char **, off_t, int, int *,
+ void *);
+#endif
+static int i2o_proc_read_lan_batch_control(char *, char **, off_t, int, int *,
+ void *);
+static int i2o_proc_read_lan_operation(char *, char **, off_t, int, int *,
+ void *);
+static int i2o_proc_read_lan_media_operation(char *, char **, off_t, int,
+ int *, void *);
+#if 0
+static int i2o_proc_read_lan_alt_addr(char *, char **, off_t, int, int *,
+ void *);
+#endif
+static int i2o_proc_read_lan_tx_info(char *, char **, off_t, int, int *,
+ void *);
+static int i2o_proc_read_lan_rx_info(char *, char **, off_t, int, int *,
+ void *);
+static int i2o_proc_read_lan_hist_stats(char *, char **, off_t, int, int *,
+ void *);
+static int i2o_proc_read_lan_opt_tx_hist_stats(char *, char **, off_t, int,
+ int *, void *);
+static int i2o_proc_read_lan_opt_rx_hist_stats(char *, char **, off_t, int,
+ int *, void *);
+static int i2o_proc_read_lan_fddi_stats(char *, char **, off_t, int, int *,
+ void *);
+
+#if 0
+/* Do we really need this??? */
+
+static loff_t i2o_proc_lseek(struct file *file, loff_t off, int whence)
+{
+ return 0;
+}
+#endif
+
+static struct proc_dir_entry *i2o_proc_dir_root;
+
+/*
+ * Message handler
+ */
+static struct i2o_handler i2o_proc_handler =
+{
+ (void *)i2o_proc_reply,
+ "I2O procfs Layer",
+ 0
+};
+
+/*
+ * IOP specific entries...write field just in case someone
+ * ever wants one.
+ */
+static i2o_proc_entry generic_iop_entries[] =
+{
+ {"hrt", S_IFREG|S_IRUGO, i2o_proc_read_hrt, NULL},
+ {"lct", S_IFREG|S_IRUGO, i2o_proc_read_lct, NULL},
+ {"stat", S_IFREG|S_IRUGO, i2o_proc_read_stat, NULL},
+ {"hw", S_IFREG|S_IRUGO, i2o_proc_read_hw, NULL},
+ {NULL, 0, NULL, NULL}
+};
+
+/*
+ * Device specific entries
+ */
+static i2o_proc_entry generic_dev_entries[] =
+{
+ {"dev_identity", S_IFREG|S_IRUGO, i2o_proc_read_dev, NULL},
+ {"ddm_identity", S_IFREG|S_IRUGO, i2o_proc_read_ddm, NULL},
+ {"user_info", S_IFREG|S_IRUGO, i2o_proc_read_uinfo, NULL},
+ {NULL, 0, NULL, NULL}
+};
+
+/*
+ * Storage unit specific entries (SCSI Periph, BS) with device names
+ */
+static i2o_proc_entry rbs_dev_entries[] =
+{
+ {"dev_name", S_IFREG|S_IRUGO, i2o_proc_read_dev_name, NULL},
+ {NULL, 0, NULL, NULL}
+};
+
+#define SCSI_TABLE_SIZE 13
+ static char *scsi_devices[] =
+ {
+ "Direct-Access Read/Write",
+ "Sequential-Access Storage",
+ "Printer",
+ "Processor",
+ "WORM Device",
+ "CD-ROM Device",
+ "Scanner Device",
+ "Optical Memory Device",
+ "Medium Changer Device",
+ "Communications Device",
+ "Graphics Art Pre-Press Device",
+ "Graphics Art Pre-Press Device",
+ "Array Controller Device"
+ };
+
+/* private */
+
+/*
+ * LAN specific entries
+ *
+ * Should groups with r/w entries have their own subdirectory?
+ *
+ */
+static i2o_proc_entry lan_entries[] =
+{
+ /* LAN param groups 0000h-0008h */
+ {"lan_dev_info", S_IFREG|S_IRUGO, i2o_proc_read_lan_dev_info, NULL},
+ {"lan_mac_addr", S_IFREG|S_IRUGO, i2o_proc_read_lan_mac_addr, NULL},
+#if 0
+ {"lan_mcast_addr", S_IFREG|S_IRUGO|S_IWUSR,
+ i2o_proc_read_lan_mcast_addr, NULL},
+#endif
+ {"lan_batch_ctrl", S_IFREG|S_IRUGO|S_IWUSR,
+ i2o_proc_read_lan_batch_control, NULL},
+ {"lan_operation", S_IFREG|S_IRUGO, i2o_proc_read_lan_operation, NULL},
+ {"lan_media_operation", S_IFREG|S_IRUGO,
+ i2o_proc_read_lan_media_operation, NULL},
+#if 0
+ {"lan_alt_addr", S_IFREG|S_IRUGO, i2o_proc_read_lan_alt_addr, NULL},
+#endif
+ {"lan_tx_info", S_IFREG|S_IRUGO, i2o_proc_read_lan_tx_info, NULL},
+ {"lan_rx_info", S_IFREG|S_IRUGO, i2o_proc_read_lan_rx_info, NULL},
+ {"lan_stats", S_IFREG|S_IRUGO, i2o_proc_read_lan_hist_stats, NULL},
+ {"lan_opt_tx_stats", S_IFREG|S_IRUGO,
+ i2o_proc_read_lan_opt_tx_hist_stats, NULL},
+ {"lan_opt_rx_stats", S_IFREG|S_IRUGO,
+ i2o_proc_read_lan_opt_rx_hist_stats, NULL},
+ {"lan_fddi_stats", S_IFREG|S_IRUGO, i2o_proc_read_lan_fddi_stats, NULL},
+ /* some useful r/w entries, no write yet */
+ {"lan_curr_addr", S_IFREG|S_IRUGO|S_IWUSR,
+ i2o_proc_read_lan_curr_addr, NULL},
+ {NULL, 0, NULL, NULL}
+};
+
+static u32 i2o_proc_token = 0;
+
+static char* bus_strings[] =
+{
+ "Local Bus",
+ "ISA",
+ "EISA",
+ "MCA",
+ "PCI",
+ "PCMCIA",
+ "NUBUS",
+ "CARDBUS"
+};
+
+static spinlock_t i2o_proc_lock = SPIN_LOCK_UNLOCKED;
+
+void i2o_proc_reply(struct i2o_handler *phdlr, struct i2o_controller *pctrl,
+ struct i2o_message *pmsg)
+{
+ i2o_proc_token = I2O_POST_WAIT_OK;
+}
+
+int i2o_proc_read_hrt(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ struct i2o_controller *c = (struct i2o_controller *)data;
+ pi2o_hrt hrt;
+ u32 msg[6];
+ u32 *workspace;
+ u32 bus;
+ int count;
+ int i;
+ int token;
+
+ spin_lock(&i2o_proc_lock);
+
+ len = 0;
+
+ workspace = kmalloc(2048, GFP_KERNEL);
+ hrt = (pi2o_hrt)workspace;
+ if(workspace==NULL)
+ {
+ len += sprintf(buf, "No free memory for HRT buffer\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ memset(workspace, 0, 2048);
+
+ msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
+ msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
+ msg[2]= (u32)proc_context;
+ msg[3]= 0;
+ msg[4]= (0xD0000000 | 2048);
+ msg[5]= virt_to_phys(workspace);
+
+ token = i2o_post_wait(c, ADAPTER_TID, msg, 6*4, &i2o_proc_token,2);
+ if(token == I2O_POST_WAIT_TIMEOUT)
+ {
+ kfree(workspace);
+ len += sprintf(buf, "Timeout waiting for HRT\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ if(hrt->hrt_version)
+ {
+ len += sprintf(buf+len,
+ "HRT table for controller is too new a version.\n");
+ return len;
+ }
+
+ count = hrt->num_entries;
+
+ if((count * hrt->entry_len + 8) > 2048) {
+ printk(KERN_WARNING "i2o_proc: HRT does not fit into buffer\n");
+ len += sprintf(buf+len,
+ "HRT table too big to fit in buffer.\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ len += sprintf(buf+len, "HRT has %d entries of %d bytes each.\n",
+ count, hrt->entry_len);
+
+ for(i = 0; i < count; i++)
+ {
+ len += sprintf(buf+len, "Entry %d:\n", i);
+ len += sprintf(buf+len, " Adapter ID: %0#10x\n",
+ hrt->hrt_entry[i].adapter_id);
+ len += sprintf(buf+len, " Controlled by: %0#6x\n",
+ hrt->hrt_entry[i].parent_tid);
+ len += sprintf(buf+len, " Bus#%d\n",
+ hrt->hrt_entry[i].bus_num);
+
+ if(hrt->hrt_entry[i].bus_type != 0x80)
+ {
+ bus = hrt->hrt_entry[i].bus_type;
+ len += sprintf(buf+len, " %s Information\n", bus_strings[bus]);
+
+ switch(bus)
+ {
+ case I2O_BUS_LOCAL:
+ len += sprintf(buf+len, " IOBase: %0#6x,",
+ hrt->hrt_entry[i].bus.local_bus.LbBaseIOPort);
+ len += sprintf(buf+len, " MemoryBase: %0#10x\n",
+ hrt->hrt_entry[i].bus.local_bus.LbBaseMemoryAddress);
+ break;
+
+ case I2O_BUS_ISA:
+ len += sprintf(buf+len, " IOBase: %0#6x,",
+ hrt->hrt_entry[i].bus.isa_bus.IsaBaseIOPort);
+ len += sprintf(buf+len, " MemoryBase: %0#10x,",
+ hrt->hrt_entry[i].bus.isa_bus.IsaBaseMemoryAddress);
+ len += sprintf(buf+len, " CSN: %0#4x,",
+ hrt->hrt_entry[i].bus.isa_bus.CSN);
+ break;
+
+ case I2O_BUS_EISA:
+ len += sprintf(buf+len, " IOBase: %0#6x,",
+ hrt->hrt_entry[i].bus.eisa_bus.EisaBaseIOPort);
+ len += sprintf(buf+len, " MemoryBase: %0#10x,",
+ hrt->hrt_entry[i].bus.eisa_bus.EisaBaseMemoryAddress);
+ len += sprintf(buf+len, " Slot: %0#4x,",
+ hrt->hrt_entry[i].bus.eisa_bus.EisaSlotNumber);
+ break;
+
+ case I2O_BUS_MCA:
+ len += sprintf(buf+len, " IOBase: %0#6x,",
+ hrt->hrt_entry[i].bus.mca_bus.McaBaseIOPort);
+ len += sprintf(buf+len, " MemoryBase: %0#10x,",
+ hrt->hrt_entry[i].bus.mca_bus.McaBaseMemoryAddress);
+ len += sprintf(buf+len, " Slot: %0#4x,",
+ hrt->hrt_entry[i].bus.mca_bus.McaSlotNumber);
+ break;
+
+ case I2O_BUS_PCI:
+ len += sprintf(buf+len, " Bus: %0#4x",
+ hrt->hrt_entry[i].bus.pci_bus.PciBusNumber);
+ len += sprintf(buf+len, " Dev: %0#4x",
+ hrt->hrt_entry[i].bus.pci_bus.PciDeviceNumber);
+ len += sprintf(buf+len, " Func: %0#4x",
+ hrt->hrt_entry[i].bus.pci_bus.PciFunctionNumber);
+ len += sprintf(buf+len, " Vendor: %0#6x",
+ hrt->hrt_entry[i].bus.pci_bus.PciVendorID);
+ len += sprintf(buf+len, " Device: %0#6x\n",
+ hrt->hrt_entry[i].bus.pci_bus.PciDeviceID);
+ break;
+
+ default:
+ len += sprintf(buf+len, " Unsupported Bus Type\n");
+ }
+ }
+ else
+ len += sprintf(buf+len, " Unknown Bus Type\n");
+ }
+
+ kfree(workspace);
+
+ spin_unlock(&i2o_proc_lock);
+
+ return len;
+}
+
+int i2o_proc_read_lct(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ struct i2o_controller *c = (struct i2o_controller*)data;
+ u32 msg[8];
+ u32 *workspace;
+ pi2o_lct lct; /* = (pi2o_lct)c->lct; */
+ int entries;
+ int token;
+ int i;
+
+#define BUS_TABLE_SIZE 3
+ static char *bus_ports[] =
+ {
+ "Generic Bus",
+ "SCSI Bus",
+ "Fibre Channel Bus"
+ };
+
+ spin_lock(&i2o_proc_lock);
+
+ len = 0;
+
+ workspace = kmalloc(8192, GFP_KERNEL);
+ lct = (pi2o_lct)workspace;
+ if(workspace==NULL)
+ {
+ len += sprintf(buf, "No free memory for LCT buffer\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ memset(workspace, 0, 8192);
+
+ msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_6;
+ msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
+ msg[2] = (u32)proc_context;
+ msg[3] = 0;
+ msg[4] = 0xFFFFFFFF; /* All devices */
+ msg[5] = 0x00000000; /* Report now */
+ msg[6] = 0xD0000000|8192;
+ msg[7] = virt_to_bus(workspace);
+
+ token = i2o_post_wait(c, ADAPTER_TID, msg, 8*4, &i2o_proc_token,2);
+ if(token == I2O_POST_WAIT_TIMEOUT)
+ {
+ kfree(workspace);
+ len += sprintf(buf, "Timeout waiting for LCT\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ entries = (lct->table_size - 3)/9;
+
+ len += sprintf(buf, "LCT contains %d %s\n", entries,
+ entries == 1 ? "entry" : "entries");
+ if(lct->boot_tid)
+ len += sprintf(buf+len, "Boot Device @ ID %d\n", lct->boot_tid);
+
+ for(i = 0; i < entries; i++)
+ {
+ len += sprintf(buf+len, "Entry %d\n", i);
+
+ len += sprintf(buf+len, " %s", i2o_get_class_name(lct->lct_entry[i].class_id));
+
+ /*
+ * Classes which we'll print subclass info for
+ */
+ switch(lct->lct_entry[i].class_id & 0xFFF)
+ {
+ case I2O_CLASS_RANDOM_BLOCK_STORAGE:
+ switch(lct->lct_entry[i].sub_class)
+ {
+ case 0x00:
+ len += sprintf(buf+len, ": Direct-Access Read/Write");
+ break;
+
+ case 0x04:
+ len += sprintf(buf+len, ": WORM Drive");
+ break;
+
+ case 0x05:
+ len += sprintf(buf+len, ": CD-ROM Drive");
+ break;
+
+ case 0x07:
+ len += sprintf(buf+len, ": Optical Memory Device");
+ break;
+
+ default:
+ len += sprintf(buf+len, ": Unknown");
+ break;
+ }
+ break;
+
+ case I2O_CLASS_LAN:
+ switch(lct->lct_entry[i].sub_class & 0xFF)
+ {
+ case 0x30:
+ len += sprintf(buf+len, ": Ethernet");
+ break;
+
+ case 0x40:
+ len += sprintf(buf+len, ": 100base VG");
+ break;
+
+ case 0x50:
+ len += sprintf(buf+len, ": IEEE 802.5/Token-Ring");
+ break;
+
+ case 0x60:
+ len += sprintf(buf+len, ": ANSI X3T9.5 FDDI");
+ break;
+
+ case 0x70:
+ len += sprintf(buf+len, ": Fibre Channel");
+ break;
+
+ default:
+ len += sprintf(buf+len, ": Unknown Sub-Class");
+ break;
+ }
+ break;
+
+ case I2O_CLASS_SCSI_PERIPHERAL:
+ if(lct->lct_entry[i].sub_class < SCSI_TABLE_SIZE)
+ len += sprintf(buf+len, ": %s",
+ scsi_devices[lct->lct_entry[i].sub_class]);
+ else
+ len += sprintf(buf+len, ": Unknown Device Type");
+ break;
+
+ case I2O_CLASS_BUS_ADAPTER_PORT:
+ if(lct->lct_entry[i].sub_class < BUS_TABLE_SIZE)
+ len += sprintf(buf+len, ": %s",
+ bus_ports[lct->lct_entry[i].sub_class]);
+ else
+ len += sprintf(buf+len, ": Unknown Bus Type");
+ break;
+ }
+ len += sprintf(buf+len, "\n");
+
+ len += sprintf(buf+len, " Local TID: 0x%03x\n", lct->lct_entry[i].tid);
+ len += sprintf(buf+len, " User TID: 0x%03x\n", lct->lct_entry[i].user_tid);
+ len += sprintf(buf+len, " Parent TID: 0x%03x\n",
+ lct->lct_entry[i].parent_tid);
+ len += sprintf(buf+len, " Identity Tag: 0x%x%x%x%x%x%x%x%x\n",
+ lct->lct_entry[i].identity_tag[0],
+ lct->lct_entry[i].identity_tag[1],
+ lct->lct_entry[i].identity_tag[2],
+ lct->lct_entry[i].identity_tag[3],
+ lct->lct_entry[i].identity_tag[4],
+ lct->lct_entry[i].identity_tag[5],
+ lct->lct_entry[i].identity_tag[6],
+ lct->lct_entry[i].identity_tag[7]);
+ len += sprintf(buf+len, " Change Indicator: %0#10x\n",
+ lct->lct_entry[i].change_ind);
+ len += sprintf(buf+len, " Device Flags: %0#10x\n",
+ lct->lct_entry[i].device_flags);
+ }
+
+ kfree(workspace);
+ spin_unlock(&i2o_proc_lock);
+
+ return len;
+}
+
+int i2o_proc_read_stat(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ struct i2o_controller *c = (struct i2o_controller*)data;
+ u32 *msg;
+ u32 m;
+ u8 *workspace;
+ u16 *work16;
+ u32 *work32;
+ long time;
+ char prodstr[25];
+ int version;
+
+ spin_lock(&i2o_proc_lock);
+
+ len = 0;
+
+ workspace = (u8*)kmalloc(88, GFP_KERNEL);
+ if(!workspace)
+ {
+ len += sprintf(buf, "No memory for status transfer\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ m = I2O_POST_READ32(c);
+ if(m == 0xFFFFFFFF)
+ {
+ len += sprintf(buf, "Could not get inbound message frame from IOP!\n");
+ kfree(workspace);
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ msg = (u32 *)(m+c->mem_offset);
+
+ memset(workspace, 0, 88);
+ work32 = (u32*)workspace;
+ work16 = (u16*)workspace;
+
+ msg[0] = NINE_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1] = I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID;
+ msg[2] = msg[3] = msg[4] = msg[5] = 0;
+ msg[6] = virt_to_phys(workspace);
+ msg[7] = 0; /* FIXME: 64-bit */
+ msg[8] = 88;
+
+ /*
+ * hmm...i2o_post_message should just take ptr to message, and
+ * determine offset on it's own...less work for OSM developers
+ */
+ i2o_post_message(c, m);
+
+ time = jiffies;
+
+ while(workspace[87] != 0xFF)
+ {
+ if(jiffies-time >= 2*HZ)
+ {
+ len += sprintf(buf, "Timeout waiting for status reply\n");
+ kfree(workspace);
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+ schedule();
+ barrier();
+ }
+
+ len += sprintf(buf+len, "Organization ID: %0#6x\n", work16[0]);
+
+ version = workspace[9]&0xF0>>4;
+ if(version == 0x02) {
+ len += sprintf(buf+len, "Lowest I2O version supported: ");
+ switch(workspace[2]) {
+ case 0x00:
+ case 0x01:
+ len += sprintf(buf+len, "1.5\n");
+ break;
+ case 0x02:
+ len += sprintf(buf+len, "2.0\n");
+ break;
+ }
+
+ len += sprintf(buf+len, "Highest I2O version supported: ");
+ switch(workspace[3]) {
+ case 0x00:
+ case 0x01:
+ len += sprintf(buf+len, "1.5\n");
+ break;
+ case 0x02:
+ len += sprintf(buf+len, "2.0\n");
+ break;
+ }
+ }
+
+ len += sprintf(buf+len, "IOP ID: %0#5x\n", work16[2]&0xFFF);
+ len += sprintf(buf+len, "Host Unit ID: %0#6x\n", work16[3]);
+ len += sprintf(buf+len, "Segment Number: %0#5x\n", work16[4]&0XFFF);
+
+ len += sprintf(buf+len, "I2O Version: ");
+ switch(version)
+ {
+ case 0x00:
+ case 0x01:
+ len += sprintf(buf+len, "1.5\n");
+ break;
+ case 0x02:
+ len += sprintf(buf+len, "2.0\n");
+ break;
+ default:
+ len += sprintf(buf+len, "Unknown version\n");
+ }
+
+ len += sprintf(buf+len, "IOP State: ");
+ switch(workspace[10])
+ {
+ case 0x01:
+ len += sprintf(buf+len, "Init\n");
+ break;
+
+ case 0x02:
+ len += sprintf(buf+len, "Reset\n");
+ break;
+
+ case 0x04:
+ len += sprintf(buf+len, "Hold\n");
+ break;
+
+ case 0x05:
+ len += sprintf(buf+len, "Hold\n");
+ break;
+
+ case 0x08:
+ len += sprintf(buf+len, "Operational\n");
+ break;
+
+ case 0x10:
+ len += sprintf(buf+len, "FAILED\n");
+ break;
+
+ case 0x11:
+ len += sprintf(buf+len, "FAULTED\n");
+ break;
+
+ default:
+ len += sprintf(buf+len, "Unknown\n");
+ break;
+ }
+
+ /* 0x00 is the only type supported w/spec 1.5 */
+ /* Added 2.0 types */
+ len += sprintf(buf+len, "Messenger Type: ");
+ switch (workspace[11])
+ {
+ case 0x00:
+ len += sprintf(buf+len, "Memory Mapped\n");
+ break;
+ case 0x01:
+ len += sprintf(buf+len, "Memory mapped only\n");
+ break;
+ case 0x02:
+ len += sprintf(buf+len, "Remote only\n");
+ break;
+ case 0x03:
+ len += sprintf(buf+len, "Memory mapped and remote\n");
+ break;
+ default:
+ len += sprintf(buf+len, "Unknown\n");
+ break;
+ }
+ len += sprintf(buf+len, "Inbound Frame Size: %d bytes\n", work16[6]*4);
+ len += sprintf(buf+len, "Max Inbound Frames: %d\n", work32[4]);
+ len += sprintf(buf+len, "Current Inbound Frames: %d\n", work32[5]);
+ len += sprintf(buf+len, "Max Outbound Frames: %d\n", work32[6]);
+
+ /* Spec doesn't say if NULL terminated or not... */
+ memcpy(prodstr, work32+7, 24);
+ prodstr[24] = '\0';
+ len += sprintf(buf+len, "Product ID: %s\n", prodstr);
+
+ len += sprintf(buf+len, "LCT Size: %d\n", work32[13]);
+
+ len += sprintf(buf+len, "Desired Private Memory Space: %d kB\n",
+ work32[15]>>10);
+ len += sprintf(buf+len, "Allocated Private Memory Space: %d kB\n",
+ work32[16]>>10);
+ len += sprintf(buf+len, "Private Memory Base Address: %0#10x\n",
+ work32[17]);
+ len += sprintf(buf+len, "Desired Private I/O Space: %d kB\n",
+ work32[18]>>10);
+ len += sprintf(buf+len, "Allocated Private I/O Space: %d kB\n",
+ work32[19]>>10);
+ len += sprintf(buf+len, "Private I/O Base Address: %0#10x\n",
+ work32[20]);
+
+ spin_unlock(&i2o_proc_lock);
+
+ return len;
+}
+
+int i2o_proc_read_hw(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ struct i2o_controller *c = (struct i2o_controller*)data;
+ static u32 work32[5];
+ static u8 *work8 = (u8*)work32;
+ static u16 *work16 = (u16*)work32;
+ int token;
+ u32 hwcap;
+
+ static char *cpu_table[] =
+ {
+ "Intel 80960 Series",
+ "AMD2900 Series",
+ "Motorola 68000 Series",
+ "ARM Series",
+ "MIPS Series",
+ "Sparc Series",
+ "PowerPC Series",
+ "Intel x86 Series"
+ };
+
+ spin_lock(&i2o_proc_lock);
+
+ len = 0;
+
+ token = i2o_query_scalar(c, ADAPTER_TID, proc_context,
+ 0, // ParamGroup 0x0000h
+ -1, // all fields
+ &work32,
+ sizeof(work32),
+ &i2o_proc_token);
+
+ if(token < 0)
+ {
+ len += sprintf(buf, "Timeout waiting for reply from IOP\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ len += sprintf(buf, "IOP Hardware Information Table\n");
+
+ len += sprintf(buf+len, "I2O Vendor ID: %0#6x\n", work16[0]);
+ len += sprintf(buf+len, "Product ID: %0#6x\n", work16[1]);
+ len += sprintf(buf+len, "RAM: %dkB\n", work32[1]>>10);
+ len += sprintf(buf+len, "Non-Volatile Storage: %dkB\n", work32[2]>>10);
+
+ hwcap = work32[3];
+ len += sprintf(buf+len, "Capabilities:\n");
+ if(hwcap&0x00000001)
+ len += sprintf(buf+len, " Self-booting\n");
+ if(hwcap&0x00000002)
+ len += sprintf(buf+len, " Upgradable IRTOS\n");
+ if(hwcap&0x00000004)
+ len += sprintf(buf+len, " Supports downloading DDMs\n");
+ if(hwcap&0x00000008)
+ len += sprintf(buf+len, " Supports installing DDMs\n");
+ if(hwcap&0x00000010)
+ len += sprintf(buf+len, " Battery-backed RAM\n");
+
+ len += sprintf(buf+len, "CPU: ");
+ if(work8[16] > 8)
+ len += sprintf(buf+len, "Unknown\n");
+ else
+ len += sprintf(buf+len, "%s\n", cpu_table[work8[16]]);
+ /* Anyone using ProcessorVersion? */
+
+ spin_unlock(&i2o_proc_lock);
+
+ return len;
+}
+
+int i2o_proc_read_dev(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+ static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
+ // == (allow) 512d bytes (max)
+ static u16 *work16 = (u16*)work32;
+ char sz[17];
+ int token;
+
+ spin_lock(&i2o_proc_lock);
+
+ len = 0;
+
+ token = i2o_query_scalar(d->controller, d->id, proc_context,
+ 0xF100, // ParamGroup F100h (Device Identity)
+ -1, // all fields
+ &work32,
+ sizeof(work32),
+ &i2o_proc_token);
+
+ if(token < 0)
+ {
+ len += sprintf(buf, "Timeout waiting for reply from IOP\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ len += sprintf(buf, "Device Class: %s\n", i2o_get_class_name(work16[0]));
+
+ len += sprintf(buf+len, "Owner TID: %0#5x\n", work16[2]);
+ len += sprintf(buf+len, "Parent TID: %0#5x\n", work16[3]);
+
+ memcpy(sz, work32+2, 16);
+ sz[16] = '\0';
+ len += sprintf(buf+len, "Vendor Info: %s\n", sz);
+
+ memcpy(sz, work32+6, 16);
+ sz[16] = '\0';
+ len += sprintf(buf+len, "Product Info: %s\n", sz);
+
+ memcpy(sz, work32+10, 16);
+ sz[16] = '\0';
+ len += sprintf(buf+len, "Description: %s\n", sz);
+
+ memcpy(sz, work32+14, 8);
+ sz[8] = '\0';
+ len += sprintf(buf+len, "Product Revision: %s\n", sz);
+
+ len += sprintf(buf+len, "Serial Number: ");
+ len = print_serial_number(buf, len,
+ (u8*)(work32+16),
+ /* allow for SNLen plus
+ * possible trailing '\0'
+ */
+ sizeof(work32)-(16*sizeof(u32))-2
+ );
+ len += sprintf(buf+len, "\n");
+
+ spin_unlock(&i2o_proc_lock);
+
+ return len;
+}
+
+
+int i2o_proc_read_dev_name(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+
+ if ( d->dev_name[0] == '\0' )
+ return 0;
+
+ len = sprintf(buf, "%s\n", d->dev_name);
+
+ return len;
+}
+
+
+
+int i2o_proc_read_ddm(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+ static u32 work32[128];
+ static u16 *work16 = (u16*)work32;
+ int token;
+ char mod[25];
+
+ spin_lock(&i2o_proc_lock);
+
+ len = 0;
+
+ token = i2o_query_scalar(d->controller, d->id, proc_context,
+ 0xF101, // ParamGroup F101h (DDM Identity)
+ -1, // all fields
+ &work32,
+ sizeof(work32),
+ &i2o_proc_token);
+
+ if(token < 0)
+ {
+ len += sprintf(buf, "Timeout waiting for reply from IOP\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ len += sprintf(buf, "Registering DDM TID: 0x%03x\n", work16[0]&0xFFF);
+
+ memcpy(mod, (char*)(work16+1), 24);
+ mod[24] = '\0';
+ len += sprintf(buf+len, "Module Name: %s\n", mod);
+
+ memcpy(mod, (char*)(work16+13), 8);
+ mod[8] = '\0';
+ len += sprintf(buf+len, "Module Rev: %s\n", mod);
+
+ len += sprintf(buf+len, "Serial Number: ");
+ len = print_serial_number(buf, len,
+ (u8*)(work16+17),
+ /* allow for SNLen plus
+ * possible trailing '\0'
+ */
+ sizeof(work32)-(17*sizeof(u16))-2
+ );
+ len += sprintf(buf+len, "\n");
+
+ spin_unlock(&i2o_proc_lock);
+
+ return len;
+}
+
+int i2o_proc_read_uinfo(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+ static u32 work32[128];
+ int token;
+ char sz[65];
+
+ spin_lock(&i2o_proc_lock);
+
+ len = 0;
+
+ token = i2o_query_scalar(d->controller, d->id, proc_context,
+ 0xF102, // ParamGroup F102h (User Information)
+ -1, // all fields
+ &work32,
+ sizeof(work32),
+ &i2o_proc_token);
+
+ if(token < 0)
+ {
+ len += sprintf(buf, "Timeout waiting for reply from IOP\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ memcpy(sz, (char*)work32, 64);
+ sz[64] = '\0';
+ len += sprintf(buf, "Device Name: %s\n", sz);
+
+ memcpy(sz, (char*)(work32+16), 64);
+ sz[64] = '\0';
+ len += sprintf(buf+len, "Service Name: %s\n", sz);
+
+ memcpy(sz, (char*)(work32+32), 64);
+ sz[64] = '\0';
+ len += sprintf(buf+len, "Physical Name: %s\n", sz);
+
+ memcpy(sz, (char*)(work32+48), 4);
+ sz[4] = '\0';
+ len += sprintf(buf+len, "Instance Number: %s\n", sz);
+
+ spin_unlock(&i2o_proc_lock);
+
+ return len;
+}
+
+static int print_serial_number(char *buff, int pos, u8 *serialno, int max_len)
+{
+ int i;
+
+ /* 19990419 -sralston
+ * The I2O v1.5 (and v2.0 so far) "official specification"
+ * got serial numbers WRONG!
+ * Apparently, and despite what Section 3.4.4 says and
+ * Figure 3-35 shows (pg 3-39 in the pdf doc),
+ * the convention / consensus seems to be:
+ * + First byte is SNFormat
+ * + Second byte is SNLen (but only if SNFormat==7 (?))
+ * + (v2.0) SCSI+BS may use IEEE Registered (64 or 128 bit) format
+ */
+ switch(serialno[0])
+ {
+ case I2O_SNFORMAT_BINARY: /* Binary */
+ pos += sprintf(buff+pos, "0x");
+ for(i = 0; i < serialno[1]; i++)
+ {
+ pos += sprintf(buff+pos, "%02X", serialno[2+i]);
+ }
+ break;
+
+ case I2O_SNFORMAT_ASCII: /* ASCII */
+ if ( serialno[1] < ' ' ) /* printable or SNLen? */
+ {
+ /* sanity */
+ max_len = (max_len < serialno[1]) ? max_len : serialno[1];
+ serialno[1+max_len] = '\0';
+
+ /* just print it */
+ pos += sprintf(buff+pos, "%s", &serialno[2]);
+ }
+ else
+ {
+ /* print chars for specified length */
+ for(i = 0; i < serialno[1]; i++)
+ {
+ pos += sprintf(buff+pos, "%c", serialno[2+i]);
+ }
+ }
+ break;
+
+ case I2O_SNFORMAT_UNICODE: /* UNICODE */
+ pos += sprintf(buff+pos, "UNICODE Format. Can't Display\n");
+ break;
+
+ case I2O_SNFORMAT_LAN48_MAC: /* LAN-48 MAC Address */
+ pos += sprintf(buff+pos,
+ "LAN-48 MAC Address @ %02X:%02X:%02X:%02X:%02X:%02X",
+ serialno[2], serialno[3],
+ serialno[4], serialno[5],
+ serialno[6], serialno[7]);
+
+ case I2O_SNFORMAT_WAN: /* WAN MAC Address */
+ /* FIXME: Figure out what a WAN access address looks like?? */
+ pos += sprintf(buff+pos, "WAN Access Address");
+ break;
+
+
+/* plus new in v2.0 */
+ case I2O_SNFORMAT_LAN64_MAC: /* LAN-64 MAC Address */
+ /* FIXME: Figure out what a LAN-64 address really looks like?? */
+ pos += sprintf(buff+pos,
+ "LAN-64 MAC Address @ [?:%02X:%02X:?] %02X:%02X:%02X:%02X:%02X:%02X",
+ serialno[8], serialno[9],
+ serialno[2], serialno[3],
+ serialno[4], serialno[5],
+ serialno[6], serialno[7]);
+ break;
+
+
+ case I2O_SNFORMAT_DDM: /* I2O DDM */
+ pos += sprintf(buff+pos,
+ "DDM: Tid=%03Xh, Rsvd=%04Xh, OrgId=%04Xh",
+ *(u16*)&serialno[2],
+ *(u16*)&serialno[4],
+ *(u16*)&serialno[6]);
+ break;
+
+ case I2O_SNFORMAT_IEEE_REG64: /* IEEE Registered (64-bit) */
+ case I2O_SNFORMAT_IEEE_REG128: /* IEEE Registered (128-bit) */
+ /* FIXME: Figure if this is even close?? */
+ pos += sprintf(buff+pos,
+ "IEEE NodeName(hi,lo)=(%08Xh:%08Xh), PortName(hi,lo)=(%08Xh:%08Xh)\n",
+ *(u32*)&serialno[2],
+ *(u32*)&serialno[6],
+ *(u32*)&serialno[10],
+ *(u32*)&serialno[14]);
+ break;
+
+
+ case I2O_SNFORMAT_UNKNOWN: /* Unknown 0 */
+ case I2O_SNFORMAT_UNKNOWN2: /* Unknown 0xff */
+ default:
+ pos += sprintf(buff+pos, "Unknown Data Format");
+ break;
+ }
+
+ return pos;
+}
+
+/* LAN group 0000h - Device info (scalar) */
+int i2o_proc_read_lan_dev_info(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+ static u32 work32[56];
+ static u8 *work8 = (u8*)work32;
+ static u16 *work16 = (u16*)work32;
+ static u64 *work64 = (u64*)work32;
+ int token;
+
+ spin_lock(&i2o_proc_lock);
+
+ len = 0;
+
+ token = i2o_query_scalar(d->controller, d->id, proc_context,
+ 0x0000, -1, &work32, 56*4, &i2o_proc_token);
+ if(token < 0)
+ {
+ len += sprintf(buf, "Timeout waiting for reply from IOP\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ len += sprintf(buf, "LAN Type ........... ");
+ switch (work16[0])
+ {
+ case 0x0030:
+ len += sprintf(buf+len, "Ethernet, ");
+ break;
+ case 0x0040:
+ len += sprintf(buf+len, "100Base VG, ");
+ break;
+ case 0x0050:
+ len += sprintf(buf+len, "Token Ring, ");
+ break;
+ case 0x0060:
+ len += sprintf(buf+len, "FDDI, ");
+ break;
+ case 0x0070:
+ len += sprintf(buf+len, "Fibre Channel, ");
+ break;
+ default:
+ len += sprintf(buf+len, "Unknown type, ");
+ break;
+ }
+
+ if (work16[1]&0x00000001)
+ len += sprintf(buf+len, "emulated LAN, ");
+ else
+ len += sprintf(buf+len, "physical LAN port, ");
+
+ if (work16[1]&0x00000002)
+ len += sprintf(buf+len, "full duplex\n");
+ else
+ len += sprintf(buf+len, "simplex\n");
+
+ len += sprintf(buf+len, "Address format: ");
+ switch(work8[4]) {
+ case 0x00:
+ len += sprintf(buf+len, "IEEE 48bit\n");
+ break;
+ case 0x01:
+ len += sprintf(buf+len, "FC IEEE\n");
+ break;
+ default:
+ len += sprintf(buf+len, "Unknown\n");
+ break;
+ }
+
+ len += sprintf(buf+len, "State: ");
+ switch(work8[5])
+ {
+ case 0x00:
+ len += sprintf(buf+len, "Unknown\n");
+ break;
+ case 0x01:
+ len += sprintf(buf+len, "Unclaimed\n");
+ break;
+ case 0x02:
+ len += sprintf(buf+len, "Operational\n");
+ break;
+ case 0x03:
+ len += sprintf(buf+len, "Suspended\n");
+ break;
+ case 0x04:
+ len += sprintf(buf+len, "Resetting\n");
+ break;
+ case 0x05:
+ len += sprintf(buf+len, "Error\n");
+ break;
+ case 0x06:
+ len += sprintf(buf+len, "Operational no Rx\n");
+ break;
+ case 0x07:
+ len += sprintf(buf+len, "Suspended no Rx\n");
+ break;
+ default:
+ len += sprintf(buf+len, "Unspecified\n");
+ break;
+ }
+
+ len += sprintf(buf+len, "Error status: ");
+ if(work16[3]&0x0001)
+ len += sprintf(buf+len, "Transmit Control Unit Inoperative ");
+ if(work16[3]&0x0002)
+ len += sprintf(buf+len, "Receive Control Unit Inoperative\n");
+ if(work16[3]&0x0004)
+ len += sprintf(buf+len, "Local memory Allocation Error\n");
+ len += sprintf(buf+len, "\n");
+
+ len += sprintf(buf+len, "Min Packet size: %d\n", work32[2]);
+ len += sprintf(buf+len, "Max Packet size: %d\n", work32[3]);
+ len += sprintf(buf+len, "HW Address: "
+ "%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ work8[16],work8[17],work8[18],work8[19],
+ work8[20],work8[21],work8[22],work8[23]);
+
+ len += sprintf(buf+len, "Max Tx Wire Speed: " FMT_U64_HEX " bps\n", U64_VAL(&work64[3]));
+ len += sprintf(buf+len, "Max Rx Wire Speed: " FMT_U64_HEX " bps\n", U64_VAL(&work64[4]));
+
+ len += sprintf(buf+len, "Min SDU packet size: 0x%08x\n", work32[10]);
+ len += sprintf(buf+len, "Max SDU packet size: 0x%08x\n", work32[11]);
+
+ spin_unlock(&i2o_proc_lock);
+ return len;
+}
+
+/* LAN group 0001h - MAC address table (scalar) */
+int i2o_proc_read_lan_mac_addr(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+ static u32 work32[48];
+ static u8 *work8 = (u8*)work32;
+ int token;
+
+ spin_lock(&i2o_proc_lock);
+ len = 0;
+
+ token = i2o_query_scalar(d->controller, d->id, proc_context,
+ 0x0001, -1, &work32, 48*4, &i2o_proc_token);
+ if(token < 0)
+ {
+ len += sprintf(buf, "Timeout waiting for reply from IOP\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ len += sprintf(buf, "Active address: "
+ "%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ work8[0],work8[1],work8[2],work8[3],
+ work8[4],work8[5],work8[6],work8[7]);
+ len += sprintf(buf+len, "Current address: "
+ "%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ work8[8],work8[9],work8[10],work8[11],
+ work8[12],work8[13],work8[14],work8[15]);
+ len += sprintf(buf+len, "Functional address mask: "
+ "%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ work8[16],work8[17],work8[18],work8[19],
+ work8[20],work8[21],work8[22],work8[23]);
+
+ len += sprintf(buf+len, "Filter mask: 0x%08x\n", work32[6]);
+ len += sprintf(buf+len, "HW/DDM capabilities: 0x%08x\n", work32[7]);
+ len += sprintf(buf+len, " Unicast packets %ssupported (%sabled)\n",
+ (work32[7]&0x00000001)?"":"not ",
+ (work32[6]&0x00000001)?"en":"dis");
+ len += sprintf(buf+len, " Promiscuous mode %ssupported (%sabled)\n",
+ (work32[7]&0x00000002)?"":"not",
+ (work32[6]&0x00000002)?"en":"dis");
+ len += sprintf(buf+len,
+ " Multicast promiscuous mode %ssupported (%sabled)\n",
+ (work32[7]&0x00000004)?"":"not ",
+ (work32[6]&0x00000004)?"en":"dis");
+ len += sprintf(buf+len,
+ " Broadcast Reception disabling %ssupported (%sabled)\n",
+ (work32[7]&0x00000100)?"":"not ",
+ (work32[6]&0x00000100)?"en":"dis");
+ len += sprintf(buf+len,
+ " Multicast Reception disabling %ssupported (%sabled)\n",
+ (work32[7]&0x00000200)?"":"not ",
+ (work32[6]&0x00000200)?"en":"dis");
+ len += sprintf(buf+len,
+ " Functional address disabling %ssupported (%sabled)\n",
+ (work32[7]&0x00000400)?"":"not ",
+ (work32[6]&0x00000400)?"en":"dis");
+ len += sprintf(buf+len, " MAC reporting %ssupported\n",
+ (work32[7]&0x00000800)?"":"not ");
+
+ len += sprintf(buf+len, " MAC Reporting mode: ");
+ if (work32[6]&0x00000800)
+ len += sprintf(buf+len, "Pass only priority MAC packets\n");
+ else if (work32[6]&0x00001000)
+ len += sprintf(buf+len, "Pass all MAC packets\n");
+ else if (work32[6]&0x00001800)
+ len += sprintf(buf+len, "Pass all MAC packets (promiscuous)\n");
+ else
+ len += sprintf(buf+len, "Do not pass MAC packets\n");
+
+ len += sprintf(buf+len, "Number of multicast addesses: %d\n", work32[8]);
+ len += sprintf(buf+len, "Perfect filtering for max %d multicast addesses\n",
+ work32[9]);
+ len += sprintf(buf+len, "Imperfect filtering for max %d multicast addesses\n",
+ work32[10]);
+
+ spin_unlock(&i2o_proc_lock);
+
+ return len;
+}
+
+/* LAN group 0001h, field 1 - Current MAC (scalar) */
+int i2o_proc_read_lan_curr_addr(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+ static u32 work32[2];
+ static u8 *work8 = (u8*)work32;
+ int token;
+
+ spin_lock(&i2o_proc_lock);
+ len = 0;
+
+ token = i2o_query_scalar(d->controller, d->id, proc_context,
+ 0x0001, 2, &work32, 8, &i2o_proc_token);
+ if(token < 0)
+ {
+ len += sprintf(buf, "Timeout waiting for reply from IOP\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ len += sprintf(buf, "Current address: "
+ "%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ work8[0],work8[1],work8[2],work8[3],
+ work8[4],work8[5],work8[6],work8[7]);
+
+ spin_unlock(&i2o_proc_lock);
+ return len;
+}
+
+
+#if 0
+/* LAN group 0002h - Multicast MAC address table (table) */
+int i2o_proc_read_lan_mcast_addr(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+ static u8 work8[32];
+ static u32 field32[8];
+ static u8 *field8 = (u8 *)field32;
+ int token;
+
+ spin_lock(&i2o_proc_lock);
+ len = 0;
+
+ token = i2o_query_table_polled(d->controller, d->id, &work8, 32,
+ 0x0002, 0, field32, 8);
+
+ switch (token) {
+ case -ETIMEDOUT:
+ len += sprintf(buf, "Timeout reading table.\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ break;
+ case -ENOMEM:
+ len += sprintf(buf, "No free memory to read the table.\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ break;
+ case -EBADR:
+ len += sprintf(buf, "Error reading field.\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ break;
+ default:
+ break;
+ }
+
+ len += sprintf(buf, "Multicast MAC address: "
+ "%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ field8[0],field8[1],field8[2],field8[3],
+ field8[4],field8[5],field8[6],field8[7]);
+
+ spin_unlock(&i2o_proc_lock);
+ return len;
+}
+#endif
+
+/* LAN group 0003h - Batch Control (scalar) */
+int i2o_proc_read_lan_batch_control(char *buf, char **start, off_t offset,
+ int len, int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+ static u32 work32[18];
+ int token;
+
+ spin_lock(&i2o_proc_lock);
+ len = 0;
+
+ token = i2o_query_scalar(d->controller, d->id, proc_context,
+ 0x0003, -1, &work32, 72, &i2o_proc_token);
+ if(token < 0)
+ {
+ len += sprintf(buf, "Timeout waiting for reply from IOP\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ len += sprintf(buf, "Batch mode ");
+ if (work32[0]&0x00000001)
+ len += sprintf(buf+len, "disabled");
+ else
+ len += sprintf(buf+len, "enabled");
+ if (work32[0]&0x00000002)
+ len += sprintf(buf+len, " (current setting)");
+ if (work32[0]&0x00000004)
+ len += sprintf(buf+len, ", forced");
+ else
+ len += sprintf(buf+len, ", toggle");
+ len += sprintf(buf+len, "\n");
+
+ if(d->i2oversion == 0x00) { /* Reserved in 1.53 and 2.0 */
+ len += sprintf(buf+len, "Rising Load Delay: %d ms\n",
+ work32[1]/10);
+ len += sprintf(buf+len, "Rising Load Threshold: %d ms\n",
+ work32[2]/10);
+ len += sprintf(buf+len, "Falling Load Delay: %d ms\n",
+ work32[3]/10);
+ len += sprintf(buf+len, "Falling Load Threshold: %d ms\n",
+ work32[4]/10);
+ }
+
+ len += sprintf(buf+len, "Max Rx Batch Count: %d\n", work32[5]);
+ len += sprintf(buf+len, "Max Rx Batch Delay: %d\n", work32[6]);
+
+ if(d->i2oversion == 0x00) {
+ len += sprintf(buf+len,
+ "Transmission Completion Reporting Delay: %d ms\n",
+ work32[7]);
+ } else {
+ len += sprintf(buf+len, "Max Tx Batch Delay: %d\n", work32[7]);
+ len += sprintf(buf+len, "Max Tx Batch Count: %d\n", work32[8]);
+ }
+
+ spin_unlock(&i2o_proc_lock);
+ return len;
+}
+
+/* LAN group 0004h - LAN Operation (scalar) */
+int i2o_proc_read_lan_operation(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+ static u32 work32[5];
+ int token;
+
+ spin_lock(&i2o_proc_lock);
+ len = 0;
+
+ token = i2o_query_scalar(d->controller, d->id, proc_context,
+ 0x0004, -1, &work32, 20, &i2o_proc_token);
+ if(token < 0)
+ {
+ len += sprintf(buf, "Timeout waiting for reply from IOP\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ len += sprintf(buf, "Packet prepadding (32b words): %d\n", work32[0]);
+ len += sprintf(buf+len, "Transmission error reporting: %s\n",
+ (work32[1]&1)?"on":"off");
+ len += sprintf(buf+len, "Bad packet handling: %s\n",
+ (work32[1]&0x2)?"by host":"by DDM");
+ len += sprintf(buf+len, "Packet orphan limit: %d\n", work32[2]);
+
+ len += sprintf(buf+len, "Tx modes:\n");
+ if (work32[3]&0x00000004)
+ len += sprintf(buf+len, " HW CRC supressed\n");
+ else
+ len += sprintf(buf+len, " HW CRC\n");
+ if (work32[3]&0x00000100)
+ len += sprintf(buf+len, " HW IPv4 checksumming\n");
+ if (work32[3]&0x00000200)
+ len += sprintf(buf+len, " HW TCP checksumming\n");
+ if (work32[3]&0x00000400)
+ len += sprintf(buf+len, " HW UDP checksumming\n");
+ if (work32[3]&0x00000800)
+ len += sprintf(buf+len, " HW RSVP checksumming\n");
+ if (work32[3]&0x00001000)
+ len += sprintf(buf+len, " HW ICMP checksumming\n");
+ if (work32[3]&0x00002000)
+ len += sprintf(buf+len, " Loopback packet not delivered\n");
+
+ len += sprintf(buf+len, "Rx modes:\n");
+ if (work32[4]&0x00000004)
+ len += sprintf(buf+len, " FCS in payload\n");
+ if (work32[4]&0x00000100)
+ len += sprintf(buf+len, " HW IPv4 checksum validation\n");
+ if (work32[4]&0x00000200)
+ len += sprintf(buf+len, " HW TCP checksum validation\n");
+ if (work32[4]&0x00000400)
+ len += sprintf(buf+len, " HW UDP checksum validation\n");
+ if (work32[4]&0x00000800)
+ len += sprintf(buf+len, " HW RSVP checksum validation\n");
+ if (work32[4]&0x00001000)
+ len += sprintf(buf+len, " HW ICMP checksum validation\n");
+
+ spin_unlock(&i2o_proc_lock);
+ return len;
+}
+
+/* LAN group 0005h - Media operation (scalar) */
+int i2o_proc_read_lan_media_operation(char *buf, char **start, off_t offset,
+ int len, int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+ static u32 work32[9];
+ static u8 *work8 = (u8*)work32;
+ static u64 *work64 = (u64*)work32;
+ int token;
+
+ spin_lock(&i2o_proc_lock);
+ len = 0;
+
+ token = i2o_query_scalar(d->controller, d->id, proc_context,
+ 0x0005, -1, &work32, 36, &i2o_proc_token);
+ if(token < 0)
+ {
+ len += sprintf(buf, "Timeout waiting for reply from IOP\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ len += sprintf(buf, "Connector type: ");
+ switch(work32[0])
+ {
+ case 0x00000000:
+ len += sprintf(buf+len, "OTHER\n");
+ break;
+ case 0x00000001:
+ len += sprintf(buf+len, "UNKNOWN\n");
+ break;
+ case 0x00000002:
+ len += sprintf(buf+len, "AUI\n");
+ break;
+ case 0x00000003:
+ len += sprintf(buf+len, "UTP\n");
+ break;
+ case 0x00000004:
+ len += sprintf(buf+len, "BNC\n");
+ break;
+ case 0x00000005:
+ len += sprintf(buf+len, "RJ45\n");
+ break;
+ case 0x00000006:
+ len += sprintf(buf+len, "STP DB9\n");
+ break;
+ case 0x00000007:
+ len += sprintf(buf+len, "FIBER MIC\n");
+ break;
+ case 0x00000008:
+ len += sprintf(buf+len, "APPLE AUI\n");
+ break;
+ case 0x00000009:
+ len += sprintf(buf+len, "MII\n");
+ break;
+ case 0x0000000A:
+ len += sprintf(buf+len, "DB9\n");
+ break;
+ case 0x0000000B:
+ len += sprintf(buf+len, "HSSDC\n");
+ break;
+ case 0x0000000C:
+ len += sprintf(buf+len, "DUPLEX SC FIBER\n");
+ break;
+ case 0x0000000D:
+ len += sprintf(buf+len, "DUPLEX ST FIBER\n");
+ break;
+ case 0x0000000E:
+ len += sprintf(buf+len, "TNC/BNC\n");
+ break;
+ case 0xFFFFFFFF:
+ len += sprintf(buf+len, "HW DEFAULT\n");
+ break;
+ }
+
+ len += sprintf(buf+len, "Connection type: ");
+ switch(work32[1])
+ {
+ case I2O_LAN_UNKNOWN:
+ len += sprintf(buf+len, "UNKNOWN\n");
+ break;
+ case I2O_LAN_AUI:
+ len += sprintf(buf+len, "AUI\n");
+ break;
+ case I2O_LAN_10BASE5:
+ len += sprintf(buf+len, "10BASE5\n");
+ break;
+ case I2O_LAN_FIORL:
+ len += sprintf(buf+len, "FIORL\n");
+ break;
+ case I2O_LAN_10BASE2:
+ len += sprintf(buf+len, "10BASE2\n");
+ break;
+ case I2O_LAN_10BROAD36:
+ len += sprintf(buf+len, "10BROAD36\n");
+ break;
+ case I2O_LAN_10BASE_T:
+ len += sprintf(buf+len, "10BASE-T\n");
+ break;
+ case I2O_LAN_10BASE_FP:
+ len += sprintf(buf+len, "10BASE-FP\n");
+ break;
+ case I2O_LAN_10BASE_FB:
+ len += sprintf(buf+len, "10BASE-FB\n");
+ break;
+ case I2O_LAN_10BASE_FL:
+ len += sprintf(buf+len, "10BASE-FL\n");
+ break;
+ case I2O_LAN_100BASE_TX:
+ len += sprintf(buf+len, "100BASE-TX\n");
+ break;
+ case I2O_LAN_100BASE_FX:
+ len += sprintf(buf+len, "100BASE-FX\n");
+ break;
+ case I2O_LAN_100BASE_T4:
+ len += sprintf(buf+len, "100BASE-T4\n");
+ break;
+ case I2O_LAN_1000BASE_SX:
+ len += sprintf(buf+len, "1000BASE-SX\n");
+ break;
+ case I2O_LAN_1000BASE_LX:
+ len += sprintf(buf+len, "1000BASE-LX\n");
+ break;
+ case I2O_LAN_1000BASE_CX:
+ len += sprintf(buf+len, "1000BASE-CX\n");
+ break;
+ case I2O_LAN_1000BASE_T:
+ len += sprintf(buf+len, "1000BASE-T\n");
+ break;
+ case I2O_LAN_100VG_ETHERNET:
+ len += sprintf(buf+len, "100VG-ETHERNET\n");
+ break;
+ case I2O_LAN_100VG_TR:
+ len += sprintf(buf+len, "100VG-TOKEN RING\n");
+ break;
+ case I2O_LAN_4MBIT:
+ len += sprintf(buf+len, "4MBIT TOKEN RING\n");
+ break;
+ case I2O_LAN_16MBIT:
+ len += sprintf(buf+len, "16 Mb Token Ring\n");
+ break;
+ case I2O_LAN_125MBAUD:
+ len += sprintf(buf+len, "125 MBAUD FDDI\n");
+ break;
+ case I2O_LAN_POINT_POINT:
+ len += sprintf(buf+len, "Point-to-point\n");
+ break;
+ case I2O_LAN_ARB_LOOP:
+ len += sprintf(buf+len, "Arbitrated loop\n");
+ break;
+ case I2O_LAN_PUBLIC_LOOP:
+ len += sprintf(buf+len, "Public loop\n");
+ break;
+ case I2O_LAN_FABRIC:
+ len += sprintf(buf+len, "Fabric\n");
+ break;
+ case I2O_LAN_EMULATION:
+ len += sprintf(buf+len, "Emulation\n");
+ break;
+ case I2O_LAN_OTHER:
+ len += sprintf(buf+len, "Other\n");
+ break;
+ case I2O_LAN_DEFAULT:
+ len += sprintf(buf+len, "HW default\n");
+ break;
+ }
+
+ len += sprintf(buf+len, "Current Tx Wire Speed: " FMT_U64_HEX " bps\n",
+ U64_VAL(&work64[1]));
+ len += sprintf(buf+len, "Current Rx Wire Speed: " FMT_U64_HEX " bps\n",
+ U64_VAL(&work64[2]));
+
+ len += sprintf(buf+len, "%s duplex\n", (work8[24]&1)?"Full":"Half");
+
+ len += sprintf(buf+len, "Link status: ");
+ if(work8[25] == 0x00)
+ len += sprintf(buf+len, "Unknown\n");
+ else if(work8[25] == 0x01)
+ len += sprintf(buf+len, "Normal\n");
+ else if(work8[25] == 0x02)
+ len += sprintf(buf+len, "Failure\n");
+ else if(work8[25] == 0x03)
+ len += sprintf(buf+len, "Reset\n");
+ else
+ len += sprintf(buf+len, "Unspecified\n");
+
+ if (d->i2oversion == 0x00) { /* Reserved in 1.53 and 2.0 */
+ len += sprintf(buf+len, "Bad packets handled by: %s\n",
+ (work8[26] == 0xFF)?"host":"DDM");
+ }
+ if (d->i2oversion != 0x00) {
+ len += sprintf(buf+len, "Duplex mode target: ");
+ switch (work8[27]) {
+ case 0:
+ len += sprintf(buf+len, "Half Duplex\n");
+ break;
+ case 1:
+ len += sprintf(buf+len, "Full Duplex\n");
+ break;
+ default:
+ len += sprintf(buf+len, "\n");
+ break;
+ }
+
+ len += sprintf(buf+len, "Connector type target: ");
+ switch(work32[7])
+ {
+ case 0x00000000:
+ len += sprintf(buf+len, "OTHER\n");
+ break;
+ case 0x00000001:
+ len += sprintf(buf+len, "UNKNOWN\n");
+ break;
+ case 0x00000002:
+ len += sprintf(buf+len, "AUI\n");
+ break;
+ case 0x00000003:
+ len += sprintf(buf+len, "UTP\n");
+ break;
+ case 0x00000004:
+ len += sprintf(buf+len, "BNC\n");
+ break;
+ case 0x00000005:
+ len += sprintf(buf+len, "RJ45\n");
+ break;
+ case 0x00000006:
+ len += sprintf(buf+len, "STP DB9\n");
+ break;
+ case 0x00000007:
+ len += sprintf(buf+len, "FIBER MIC\n");
+ break;
+ case 0x00000008:
+ len += sprintf(buf+len, "APPLE AUI\n");
+ break;
+ case 0x00000009:
+ len += sprintf(buf+len, "MII\n");
+ break;
+ case 0x0000000A:
+ len += sprintf(buf+len, "DB9\n");
+ break;
+ case 0x0000000B:
+ len += sprintf(buf+len, "HSSDC\n");
+ break;
+ case 0x0000000C:
+ len += sprintf(buf+len, "DUPLEX SC FIBER\n");
+ break;
+ case 0x0000000D:
+ len += sprintf(buf+len, "DUPLEX ST FIBER\n");
+ break;
+ case 0x0000000E:
+ len += sprintf(buf+len, "TNC/BNC\n");
+ break;
+ case 0xFFFFFFFF:
+ len += sprintf(buf+len, "HW DEFAULT\n");
+ break;
+ default:
+ len += sprintf(buf+len, "\n");
+ break;
+ }
+
+ len += sprintf(buf+len, "Connection type target: ");
+ switch(work32[8])
+ {
+ case I2O_LAN_UNKNOWN:
+ len += sprintf(buf+len, "UNKNOWN\n");
+ break;
+ case I2O_LAN_AUI:
+ len += sprintf(buf+len, "AUI\n");
+ break;
+ case I2O_LAN_10BASE5:
+ len += sprintf(buf+len, "10BASE5\n");
+ break;
+ case I2O_LAN_FIORL:
+ len += sprintf(buf+len, "FIORL\n");
+ break;
+ case I2O_LAN_10BASE2:
+ len += sprintf(buf+len, "10BASE2\n");
+ break;
+ case I2O_LAN_10BROAD36:
+ len += sprintf(buf+len, "10BROAD36\n");
+ break;
+ case I2O_LAN_10BASE_T:
+ len += sprintf(buf+len, "10BASE-T\n");
+ break;
+ case I2O_LAN_10BASE_FP:
+ len += sprintf(buf+len, "10BASE-FP\n");
+ break;
+ case I2O_LAN_10BASE_FB:
+ len += sprintf(buf+len, "10BASE-FB\n");
+ break;
+ case I2O_LAN_10BASE_FL:
+ len += sprintf(buf+len, "10BASE-FL\n");
+ break;
+ case I2O_LAN_100BASE_TX:
+ len += sprintf(buf+len, "100BASE-TX\n");
+ break;
+ case I2O_LAN_100BASE_FX:
+ len += sprintf(buf+len, "100BASE-FX\n");
+ break;
+ case I2O_LAN_100BASE_T4:
+ len += sprintf(buf+len, "100BASE-T4\n");
+ break;
+ case I2O_LAN_1000BASE_SX:
+ len += sprintf(buf+len, "1000BASE-SX\n");
+ break;
+ case I2O_LAN_1000BASE_LX:
+ len += sprintf(buf+len, "1000BASE-LX\n");
+ break;
+ case I2O_LAN_1000BASE_CX:
+ len += sprintf(buf+len, "1000BASE-CX\n");
+ break;
+ case I2O_LAN_1000BASE_T:
+ len += sprintf(buf+len, "1000BASE-T\n");
+ break;
+ case I2O_LAN_100VG_ETHERNET:
+ len += sprintf(buf+len, "100VG-ETHERNET\n");
+ break;
+ case I2O_LAN_100VG_TR:
+ len += sprintf(buf+len, "100VG-TOKEN RING\n");
+ break;
+ case I2O_LAN_4MBIT:
+ len += sprintf(buf+len, "4MBIT TOKEN RING\n");
+ break;
+ case I2O_LAN_16MBIT:
+ len += sprintf(buf+len, "16 Mb Token Ring\n");
+ break;
+ case I2O_LAN_125MBAUD:
+ len += sprintf(buf+len, "125 MBAUD FDDI\n");
+ break;
+ case I2O_LAN_POINT_POINT:
+ len += sprintf(buf+len, "Point-to-point\n");
+ break;
+ case I2O_LAN_ARB_LOOP:
+ len += sprintf(buf+len, "Arbitrated loop\n");
+ break;
+ case I2O_LAN_PUBLIC_LOOP:
+ len += sprintf(buf+len, "Public loop\n");
+ break;
+ case I2O_LAN_FABRIC:
+ len += sprintf(buf+len, "Fabric\n");
+ break;
+ case I2O_LAN_EMULATION:
+ len += sprintf(buf+len, "Emulation\n");
+ break;
+ case I2O_LAN_OTHER:
+ len += sprintf(buf+len, "Other\n");
+ break;
+ case I2O_LAN_DEFAULT:
+ len += sprintf(buf+len, "HW default\n");
+ break;
+ default:
+ len += sprintf(buf+len, "\n");
+ break;
+ }
+ }
+ spin_unlock(&i2o_proc_lock);
+ return len;
+}
+
+#if 0
+/* LAN group 0006h - Alternate address (table) */
+int i2o_proc_read_lan_alt_addr(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+ static u8 work8[32];
+ static u32 field32[2];
+ static u8 *field8 = (u8 *)field32;
+ int token;
+
+ spin_lock(&i2o_proc_lock);
+ len = 0;
+
+ token = i2o_query_table_polled(d->controller, d->id, &work8, 32,
+ 0x0006, 0, field32, 8);
+ switch (token) {
+ case -ETIMEDOUT:
+ len += sprintf(buf, "Timeout reading table.\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ break;
+ case -ENOMEM:
+ len += sprintf(buf, "No free memory to read the table.\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ break;
+ case -EBADR:
+ len += sprintf(buf, "Error reading field.\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ break;
+ default:
+ break;
+ }
+
+ len += sprintf(buf, "Alternate Address: "
+ "%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
+ field8[0],field8[1],field8[2],field8[3],
+ field8[4],field8[5],field8[6],field8[7]);
+
+ spin_unlock(&i2o_proc_lock);
+ return len;
+}
+#endif
+
+/* LAN group 0007h - Transmit info (scalar) */
+int i2o_proc_read_lan_tx_info(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+ static u32 work32[10];
+ int token;
+
+ spin_lock(&i2o_proc_lock);
+ len = 0;
+
+ token = i2o_query_scalar(d->controller, d->id, proc_context,
+ 0x0007, -1, &work32, 8, &i2o_proc_token);
+ if(token < 0)
+ {
+ len += sprintf(buf, "Timeout waiting for reply from IOP\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ len += sprintf(buf, "Max SG Elements per packet: %d\n", work32[0]);
+ len += sprintf(buf+len, "Max SG Elements per chain: %d\n", work32[1]);
+ len += sprintf(buf+len, "Max outstanding packets: %d\n", work32[2]);
+ len += sprintf(buf+len, "Max packets per request: %d\n", work32[3]);
+
+ len += sprintf(buf+len, "Tx modes:\n");
+ if(work32[4]&0x00000002)
+ len += sprintf(buf+len, " No DA in SGL\n");
+ if(work32[4]&0x00000004)
+ len += sprintf(buf+len, " CRC suppression\n");
+ if(work32[4]&0x00000008)
+ len += sprintf(buf+len, " Loop suppression\n");
+ if(work32[4]&0x00000010)
+ len += sprintf(buf+len, " MAC insertion\n");
+ if(work32[4]&0x00000020)
+ len += sprintf(buf+len, " RIF insertion\n");
+ if(work32[4]&0x00000100)
+ len += sprintf(buf+len, " IPv4 Checksum\n");
+ if(work32[4]&0x00000200)
+ len += sprintf(buf+len, " TCP Checksum\n");
+ if(work32[4]&0x00000400)
+ len += sprintf(buf+len, " UDP Checksum\n");
+ if(work32[4]&0x00000800)
+ len += sprintf(buf+len, " RSVP Checksum\n");
+ if(work32[4]&0x00001000)
+ len += sprintf(buf+len, " ICMP Checksum\n");
+ if (d->i2oversion == 0x00) {
+ if(work32[4]&0x00008000)
+ len += sprintf(buf+len, " Loopback Enabled\n");
+ if(work32[4]&0x00010000)
+ len += sprintf(buf+len, " Loopback Suppression Enabled\n");
+ } else {
+ if(work32[4]&0x00010000)
+ len += sprintf(buf+len, " Loopback Enabled\n");
+ if(work32[4]&0x00020000)
+ len += sprintf(buf+len, " Loopback Suppression Enabled\n");
+ }
+
+ spin_unlock(&i2o_proc_lock);
+ return len;
+}
+
+/* LAN group 0008h - Receive info (scalar) */
+int i2o_proc_read_lan_rx_info(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+ static u32 work32[10];
+ int token;
+
+ spin_lock(&i2o_proc_lock);
+ len = 0;
+
+ token = i2o_query_scalar(d->controller, d->id, proc_context,
+ 0x0008, -1, &work32, 8, &i2o_proc_token);
+ if(token < 0)
+ {
+ len += sprintf(buf, "Timeout waiting for reply from IOP\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ len += sprintf(buf, "Max size of chain element: %d\n", work32[0]);
+ len += sprintf(buf+len, "Max number of buckets: %d\n", work32[1]);
+
+ if (d->i2oversion > 0x00) { /* not in 1.5 */
+ len += sprintf(buf+len, "Rx modes: %d\n", work32[2]);
+ len += sprintf(buf+len, "RxMaxBucketsReply: %d\n", work32[3]);
+ len += sprintf(buf+len, "RxMaxPacketsPerBuckets: %d\n", work32[4]);
+ len += sprintf(buf+len, "RxMaxPostBuckets: %d\n", work32[5]);
+ }
+
+ spin_unlock(&i2o_proc_lock);
+ return len;
+}
+
+
+/* LAN group 0100h - LAN Historical statistics (scalar) */
+int i2o_proc_read_lan_hist_stats(char *buf, char **start, off_t offset, int len,
+ int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+ static u64 work64[9];
+ int token;
+
+ spin_lock(&i2o_proc_lock);
+ len = 0;
+
+ token = i2o_query_scalar(d->controller, d->id, proc_context,
+ 0x0100, -1, &work64, 9*8, &i2o_proc_token);
+ if(token < 0)
+ {
+ len += sprintf(buf, "Timeout waiting for reply from IOP\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ len += sprintf(buf, "Tx packets: " FMT_U64_HEX "\n", U64_VAL(&work64[0]));
+ len += sprintf(buf+len, "Tx bytes: " FMT_U64_HEX "\n", U64_VAL(&work64[1]));
+ len += sprintf(buf+len, "Rx packets: " FMT_U64_HEX "\n", U64_VAL(&work64[2]));
+ len += sprintf(buf+len, "Rx bytes: " FMT_U64_HEX "\n", U64_VAL(&work64[3]));
+ len += sprintf(buf+len, "Tx errors: " FMT_U64_HEX "\n", U64_VAL(&work64[4]));
+ len += sprintf(buf+len, "Rx errors: " FMT_U64_HEX "\n", U64_VAL(&work64[5]));
+ len += sprintf(buf+len, "Rx dropped: " FMT_U64_HEX "\n", U64_VAL(&work64[6]));
+ len += sprintf(buf+len, "Adapter resets: " FMT_U64_HEX "\n", U64_VAL(&work64[7]));
+ len += sprintf(buf+len, "Adapter suspends: " FMT_U64_HEX "\n", U64_VAL(&work64[8]));
+
+ spin_unlock(&i2o_proc_lock);
+ return len;
+}
+
+
+/* LAN group 0182h - Optional Non Media Specific Transmit Historical Statistics
+ * (scalar) */
+int i2o_proc_read_lan_opt_tx_hist_stats(char *buf, char **start, off_t offset,
+ int len, int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+ static u64 work64[9];
+ int token;
+
+ spin_lock(&i2o_proc_lock);
+
+ len = 0;
+
+ token = i2o_query_scalar(d->controller, d->id, proc_context,
+ 0x0182, -1, &work64, 9*8, &i2o_proc_token);
+ if(token < 0)
+ {
+ len += sprintf(buf, "Timeout waiting for reply from IOP\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ len += sprintf(buf, "TxRetryCount: " FMT_U64_HEX "\n", U64_VAL(&work64[0]));
+ len += sprintf(buf+len, "DirectedBytesTx: " FMT_U64_HEX "\n", U64_VAL(&work64[1]));
+ len += sprintf(buf+len, "DirectedPacketsTx: " FMT_U64_HEX "\n", U64_VAL(&work64[2]));
+ len += sprintf(buf+len, "MulticastBytesTx: " FMT_U64_HEX "\n", U64_VAL(&work64[3]));
+ len += sprintf(buf+len, "MulticastPacketsTx: " FMT_U64_HEX "\n", U64_VAL(&work64[4]));
+ len += sprintf(buf+len, "BroadcastBytesTx: " FMT_U64_HEX "\n", U64_VAL(&work64[5]));
+ len += sprintf(buf+len, "BroadcastPacketsTx: " FMT_U64_HEX "\n", U64_VAL(&work64[6]));
+ len += sprintf(buf+len, "TotalGroupAddrTxCount: " FMT_U64_HEX "\n", U64_VAL(&work64[7]));
+ len += sprintf(buf+len, "TotalTxPacketsTooShort: " FMT_U64_HEX "\n", U64_VAL(&work64[8]));
+
+ spin_unlock(&i2o_proc_lock);
+ return len;
+}
+
+/* LAN group 0183h - Optional Non Media Specific Receive Historical Statistics
+ * (scalar) */
+int i2o_proc_read_lan_opt_rx_hist_stats(char *buf, char **start, off_t offset,
+ int len, int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+ static u64 work64[11];
+ int token;
+
+ spin_lock(&i2o_proc_lock);
+
+ len = 0;
+
+ token = i2o_query_scalar(d->controller, d->id, proc_context,
+ 0x0183, -1, &work64, 11*8, &i2o_proc_token);
+ if(token < 0)
+ {
+ len += sprintf(buf, "Timeout waiting for reply from IOP\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ len += sprintf(buf, "ReceiveCRCErrorCount: " FMT_U64_HEX "\n", U64_VAL(&work64[0]));
+ len += sprintf(buf+len, "DirectedBytesRx: " FMT_U64_HEX "\n", U64_VAL(&work64[1]));
+ len += sprintf(buf+len, "DirectedPacketsRx: " FMT_U64_HEX "\n", U64_VAL(&work64[2]));
+ len += sprintf(buf+len, "MulticastBytesRx: " FMT_U64_HEX "\n", U64_VAL(&work64[3]));
+ len += sprintf(buf+len, "MulticastPacketsRx: " FMT_U64_HEX "\n", U64_VAL(&work64[4]));
+ len += sprintf(buf+len, "BroadcastBytesRx: " FMT_U64_HEX "\n", U64_VAL(&work64[5]));
+ len += sprintf(buf+len, "BroadcastPacketsRx: " FMT_U64_HEX "\n", U64_VAL(&work64[6]));
+ len += sprintf(buf+len, "TotalGroupAddrRxCount: " FMT_U64_HEX "\n", U64_VAL(&work64[7]));
+ len += sprintf(buf+len, "TotalRxPacketsTooShort: " FMT_U64_HEX "\n", U64_VAL(&work64[8]));
+ len += sprintf(buf+len, "TotalRxPacketsTooLong: " FMT_U64_HEX "\n", U64_VAL(&work64[9]));
+ len += sprintf(buf+len, "TotalRuntPacketsReceived: " FMT_U64_HEX "\n", U64_VAL(&work64[10]));
+
+ spin_unlock(&i2o_proc_lock);
+ return len;
+}
+
+
+/* LAN group 0400h - Required FDDI Statistics (scalar) */
+int i2o_proc_read_lan_fddi_stats(char *buf, char **start, off_t offset,
+ int len, int *eof, void *data)
+{
+ struct i2o_device *d = (struct i2o_device*)data;
+ static u64 work64[11];
+ int token;
+
+ static char *conf_state[] =
+ {
+ "Isolated",
+ "Local a",
+ "Local b",
+ "Local ab",
+ "Local s",
+ "Wrap a",
+ "Wrap b",
+ "Wrap ab",
+ "Wrap s",
+ "C-Wrap a",
+ "C-Wrap b",
+ "C-Wrap s",
+ "Through",
+ };
+
+ static char *ring_state[] =
+ {
+ "Isolated",
+ "Non-op",
+ "Rind-op",
+ "Detect",
+ "Non-op-Dup",
+ "Ring-op-Dup",
+ "Directed",
+ "Trace"
+ };
+
+ static char *link_state[] =
+ {
+ "Off",
+ "Break",
+ "Trace",
+ "Connect",
+ "Next",
+ "Signal",
+ "Join",
+ "Verify",
+ "Active",
+ "Maintenance"
+ };
+
+ spin_lock(&i2o_proc_lock);
+
+ len = 0;
+
+ token = i2o_query_scalar(d->controller, d->id, proc_context,
+ 0x0400, -1, &work64, 11*8, &i2o_proc_token);
+ if(token < 0)
+ {
+ len += sprintf(buf, "Timeout waiting for reply from IOP\n");
+ spin_unlock(&i2o_proc_lock);
+ return len;
+ }
+
+ len += sprintf(buf, "ConfigurationState: %s\n", conf_state[work64[0]]);
+ len += sprintf(buf+len, "UpstreamNode: " FMT_U64_HEX "\n", U64_VAL(&work64[1]));
+ len += sprintf(buf+len, "DownStreamNode: " FMT_U64_HEX "\n", U64_VAL(&work64[2]));
+ len += sprintf(buf+len, "FrameErrors: " FMT_U64_HEX "\n", U64_VAL(&work64[3]));
+ len += sprintf(buf+len, "FramesLost: " FMT_U64_HEX "\n", U64_VAL(&work64[4]));
+ len += sprintf(buf+len, "RingMgmtState: %s\n", ring_state[work64[5]]);
+ len += sprintf(buf+len, "LCTFailures: " FMT_U64_HEX "\n", U64_VAL(&work64[6]));
+ len += sprintf(buf+len, "LEMRejects: " FMT_U64_HEX "\n", U64_VAL(&work64[7]));
+ len += sprintf(buf+len, "LEMCount: " FMT_U64_HEX "\n", U64_VAL(&work64[8]));
+ len += sprintf(buf+len, "LConnectionState: %s\n", link_state[work64[9]]);
+
+ spin_unlock(&i2o_proc_lock);
+ return len;
+}
+
+static int i2o_proc_create_entries(void *data,
+ i2o_proc_entry *pentry, struct proc_dir_entry *parent)
+{
+ struct proc_dir_entry *ent;
+
+ while(pentry->name != NULL)
+ {
+ ent = create_proc_entry(pentry->name, pentry->mode, parent);
+ if(!ent) return -1;
+
+ ent->data = data;
+ ent->read_proc = pentry->read_proc;
+ ent->write_proc = pentry->write_proc;
+ ent->nlink = 1;
+
+ pentry++;
+ }
+
+ return 0;
+}
+
+static void i2o_proc_remove_entries(i2o_proc_entry *pentry,
+ struct proc_dir_entry *parent)
+{
+ while(pentry->name != NULL)
+ {
+ remove_proc_entry(pentry->name, parent);
+ pentry++;
+ }
+}
+
+static int i2o_proc_add_controller(struct i2o_controller *pctrl,
+ struct proc_dir_entry *root )
+{
+ struct proc_dir_entry *dir, *dir1;
+ struct i2o_device *dev;
+ char buff[10];
+
+ sprintf(buff, "iop%d", pctrl->unit);
+
+ dir = create_proc_entry(buff, S_IFDIR, root);
+ if(!dir)
+ return -1;
+
+ pctrl->proc_entry = dir;
+
+ i2o_proc_create_entries(pctrl, generic_iop_entries, dir);
+
+ for(dev = pctrl->devices; dev; dev = dev->next)
+ {
+ sprintf(buff, "%0#5x", dev->id);
+
+ dir1 = create_proc_entry(buff, S_IFDIR, dir);
+ dev->proc_entry = dir1;
+
+ if(!dir1)
+ printk(KERN_INFO "i2o_proc: Could not allocate proc dir\n");
+
+ i2o_proc_create_entries(dev, generic_dev_entries, dir1);
+
+ switch(dev->class)
+ {
+ case I2O_CLASS_SCSI_PERIPHERAL:
+ case I2O_CLASS_RANDOM_BLOCK_STORAGE:
+ i2o_proc_create_entries(dev, rbs_dev_entries, dir1);
+ break;
+ case I2O_CLASS_LAN:
+ i2o_proc_create_entries(dev, lan_entries, dir1);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void i2o_proc_remove_controller(struct i2o_controller *pctrl,
+ struct proc_dir_entry *parent)
+{
+ char buff[10];
+
+ sprintf(buff, "iop%d", pctrl->unit);
+
+ i2o_proc_remove_entries(generic_iop_entries, pctrl->proc_entry);
+
+ remove_proc_entry(buff, parent);
+
+ pctrl->proc_entry = NULL;
+}
+
+static int create_i2o_procfs(void)
+{
+ struct i2o_controller *pctrl = NULL;
+ int i;
+
+ i2o_proc_dir_root = create_proc_entry("i2o", S_IFDIR, 0);
+ if(!i2o_proc_dir_root)
+ return -1;
+
+ for(i = 0; i < MAX_I2O_CONTROLLERS; i++)
+ {
+ pctrl = i2o_find_controller(i);
+ if(pctrl)
+ i2o_proc_add_controller(pctrl, i2o_proc_dir_root);
+ };
+
+ return 0;
+}
+
+static int destroy_i2o_procfs(void)
+{
+ struct i2o_controller *pctrl = NULL;
+ int i;
+
+ if(!i2o_find_controller(0))
+ return -1;
+
+ for(i = 0; i < MAX_I2O_CONTROLLERS; i++)
+ {
+ pctrl = i2o_find_controller(i);
+ if(pctrl)
+ i2o_proc_remove_controller(pctrl, i2o_proc_dir_root);
+ };
+
+ remove_proc_entry("i2o", 0);
+ return 0;
+}
+
+#ifdef MODULE
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("I2O procfs Handler");
+
+int init_module(void)
+{
+ if(create_i2o_procfs())
+ return -EBUSY;
+
+ if (i2o_install_handler(&i2o_proc_handler) < 0)
+ {
+ printk(KERN_ERR "i2o_proc: Unable to install PROC handler.\n");
+ return 0;
+ }
+
+ proc_context = i2o_proc_handler.context;
+
+ return 0;
+}
+
+void cleanup_module(void)
+{
+ destroy_i2o_procfs();
+ i2o_remove_handler(&i2o_proc_handler);
+}
+#endif
diff --git a/drivers/i2o/i2o_proc.h b/drivers/i2o/i2o_proc.h
new file mode 100644
index 000000000..fd659c396
--- /dev/null
+++ b/drivers/i2o/i2o_proc.h
@@ -0,0 +1,141 @@
+#ifndef i2oproc_h
+#define i2oproc_h
+
+/*
+ * Fixme: make this dependent on architecture
+ * The official header files to this already...but we can't use them
+ */
+#define I2O_64BIT_CONTEXT 0
+
+typedef struct _i2o_msg {
+ u8 ver_offset;
+ u8 msg_flags;
+ u16 msg_size;
+ u32 target_addr:12;
+ u32 initiator_addr:12;
+ u32 function:8;
+ u32 init_context; /* FIXME: 64-bit support! */
+} i2o_msg, *pi2o_msg;
+
+typedef struct _i2o_reply_message {
+ i2o_msg msg_frame;
+ u32 tctx; /* FIXME: 64-bit */
+ u16 detailed_status_code;
+ u8 reserved;
+ u8 req_status;
+} i2o_reply_msg, *pi2o_reply_msg;
+
+typedef struct _i2o_mult_reply_message {
+ i2o_msg msg_frame;
+ u32 tctx; /* FIXME: 64-bit */
+ u16 detailed_status_code;
+ u8 reserved;
+ u8 req_status;
+} i2o_mult_reply_msg, *pi2o_mult_reply_msg;
+
+/**************************************************************************
+ * HRT related constants and structures
+ **************************************************************************/
+#define I2O_BUS_LOCAL 0
+#define I2O_BUS_ISA 1
+#define I2O_BUS_EISA 2
+#define I2O_BUS_MCA 3
+#define I2O_BUS_PCI 4
+#define I2O_BUS_PCMCIA 5
+#define I2O_BUS_NUBUS 6
+#define I2O_BUS_CARDBUS 7
+#define I2O_BUS_UNKNOWN 0x80
+
+typedef struct _i2o_pci_bus {
+ u8 PciFunctionNumber;
+ u8 PciDeviceNumber;
+ u8 PciBusNumber;
+ u8 reserved;
+ u16 PciVendorID;
+ u16 PciDeviceID;
+} i2o_pci_bus, *pi2o_pci_bus;
+
+typedef struct _i2o_local_bus {
+ u16 LbBaseIOPort;
+ u16 reserved;
+ u32 LbBaseMemoryAddress;
+} i2o_local_bus, *pi2o_local_bus;
+
+typedef struct _i2o_isa_bus {
+ u16 IsaBaseIOPort;
+ u8 CSN;
+ u8 reserved;
+ u32 IsaBaseMemoryAddress;
+} i2o_isa_bus, *pi2o_isa_bus;
+
+/* I2O_EISA_BUS_INFO */
+typedef struct _i2o_eisa_bus_info {
+ u16 EisaBaseIOPort;
+ u8 reserved;
+ u8 EisaSlotNumber;
+ u32 EisaBaseMemoryAddress;
+} i2o_eisa_bus, *pi2o_eisa_bus;
+
+typedef struct _i2o_mca_bus {
+ u16 McaBaseIOPort;
+ u8 reserved;
+ u8 McaSlotNumber;
+ u32 McaBaseMemoryAddress;
+} i2o_mca_bus, *pi2o_mca_bus;
+
+typedef struct _i2o_other_bus {
+ u16 BaseIOPort;
+ u16 reserved;
+ u32 BaseMemoryAddress;
+} i2o_other_bus, *pi2o_other_bus;
+
+
+typedef struct _i2o_hrt_entry {
+ u32 adapter_id;
+ u32 parent_tid:12;
+ u32 state:4;
+ u32 bus_num:8;
+ u32 bus_type:8;
+ union {
+ i2o_pci_bus pci_bus;
+ i2o_local_bus local_bus;
+ i2o_isa_bus isa_bus;
+ i2o_eisa_bus eisa_bus;
+ i2o_mca_bus mca_bus;
+ i2o_other_bus other_bus;
+ } bus;
+} i2o_hrt_entry, *pi2o_hrt_entry;
+
+typedef struct _i2o_hrt {
+ u16 num_entries;
+ u8 entry_len;
+ u8 hrt_version;
+ u32 change_ind;
+ i2o_hrt_entry hrt_entry[1];
+} i2o_hrt, *pi2o_hrt;
+
+typedef struct _i2o_lct_entry {
+ u32 entry_size:16;
+ u32 tid:12;
+ u32 reserved:4;
+ u32 change_ind;
+ u32 device_flags;
+ u32 class_id;
+ u32 sub_class;
+ u32 user_tid:12;
+ u32 parent_tid:12;
+ u32 bios_info:8;
+ u8 identity_tag[8];
+ u32 event_capabilities;
+} i2o_lct_entry, *pi2o_lct_entry;
+
+typedef struct _i2o_lct {
+ u32 table_size:16;
+ u32 boot_tid:12;
+ u32 lct_ver:4;
+ u32 iop_flags;
+ u32 current_change_ind;
+ i2o_lct_entry lct_entry[1];
+} i2o_lct, *pi2o_lct;
+
+#endif /* i2oproc_h */
diff --git a/drivers/i2o/i2o_scsi.c b/drivers/i2o/i2o_scsi.c
new file mode 100644
index 000000000..505e3c22d
--- /dev/null
+++ b/drivers/i2o/i2o_scsi.c
@@ -0,0 +1,871 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Complications for I2O scsi
+ *
+ * o Each (bus,lun) is a logical device in I2O. We keep a map
+ * table. We spoof failed selection for unmapped units
+ * o Request sense buffers can come back for free.
+ * o Scatter gather is a bit dynamic. We have to investigate at
+ * setup time.
+ * o Some of our resources are dynamically shared. The i2o core
+ * needs a message reservation protocol to avoid swap v net
+ * deadlocking. We need to back off queue requests.
+ *
+ * In general the firmware wants to help. Where its help isn't performance
+ * useful we just ignore the aid. Its not worth the code in truth.
+ *
+ * Fixes:
+ * Steve Ralston : Scatter gather now works
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/proc_fs.h>
+#include <asm/dma.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+#include <linux/blk.h>
+#include <linux/version.h>
+#include <linux/i2o.h>
+#include "../scsi/scsi.h"
+#include "../scsi/hosts.h"
+#include "../scsi/sd.h"
+#include "i2o_scsi.h"
+
+#define VERSION_STRING "Version 0.0.1"
+
+#define dprintk(x)
+
+#define MAXHOSTS 32
+
+struct proc_dir_entry proc_scsi_i2o_scsi = {
+ PROC_SCSI_I2O, 8, "i2o_scsi", S_IFDIR | S_IRUGO | S_IXUGO, 2
+};
+
+struct i2o_scsi_host
+{
+ struct i2o_controller *controller;
+ s16 task[16][8]; /* Allow 16 devices for now */
+ unsigned long tagclock[16][8]; /* Tag clock for queueing */
+ s16 bus_task; /* The adapter TID */
+};
+
+static int scsi_context;
+static int lun_done;
+static int i2o_scsi_hosts;
+
+static u32 *retry[32];
+static struct i2o_controller *retry_ctrl[32];
+static struct timer_list retry_timer;
+static int retry_ct = 0;
+
+static atomic_t queue_depth;
+
+/*
+ * SG Chain buffer support...
+ */
+#define SG_MAX_FRAGS 64
+
+/*
+ * FIXME: we should allocate one of these per bus we find as we
+ * locate them not in a lump at boot.
+ */
+
+typedef struct _chain_buf
+{
+ u32 sg_flags_cnt[SG_MAX_FRAGS];
+ u32 sg_buf[SG_MAX_FRAGS];
+} chain_buf;
+
+#define SG_CHAIN_BUF_SZ sizeof(chain_buf)
+
+#define SG_MAX_BUFS (i2o_num_controllers * I2O_SCSI_CAN_QUEUE)
+#define SG_CHAIN_POOL_SZ (SG_MAX_BUFS * SG_CHAIN_BUF_SZ)
+
+static int max_sg_len = 0;
+static chain_buf *sg_chain_pool = NULL;
+static int sg_chain_tag = 0;
+static int sg_max_frags = SG_MAX_FRAGS;
+
+/*
+ * Retry congested frames. This actually needs pushing down into
+ * i2o core. We should only bother the OSM with this when we can't
+ * queue and retry the frame. Or perhaps we should call the OSM
+ * and its default handler should be this in the core, and this
+ * call a 2nd "I give up" handler in the OSM ?
+ */
+
+static void i2o_retry_run(unsigned long f)
+{
+ int i;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+
+ for(i=0;i<retry_ct;i++)
+ i2o_post_message(retry_ctrl[i], virt_to_bus(retry[i]));
+ retry_ct=0;
+
+ restore_flags(flags);
+}
+
+static void flush_pending(void)
+{
+ int i;
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+
+ for(i=0;i<retry_ct;i++)
+ {
+ retry[i][0]&=~0xFFFFFF;
+ retry[i][0]|=I2O_CMD_UTIL_NOP<<24;
+ i2o_post_message(retry_ctrl[i],virt_to_bus(retry[i]));
+ }
+ retry_ct=0;
+
+ restore_flags(flags);
+}
+
+static void i2o_scsi_reply(struct i2o_handler *h, struct i2o_controller *c, struct i2o_message *msg)
+{
+ Scsi_Cmnd *current_command;
+ u32 *m = (u32 *)msg;
+ u8 as,ds,st;
+
+ if(m[0] & (1<<13))
+ {
+ printk("IOP fail.\n");
+ printk("From %d To %d Cmd %d.\n",
+ (m[1]>>12)&0xFFF,
+ m[1]&0xFFF,
+ m[1]>>24);
+ printk("Failure Code %d.\n", m[4]>>24);
+ if(m[4]&(1<<16))
+ printk("Format error.\n");
+ if(m[4]&(1<<17))
+ printk("Path error.\n");
+ if(m[4]&(1<<18))
+ printk("Path State.\n");
+ if(m[4]&(1<<18))
+ printk("Congestion.\n");
+
+ m=(u32 *)bus_to_virt(m[7]);
+ printk("Failing message is %p.\n", m);
+
+ if((m[4]&(1<<18)) && retry_ct < 32)
+ {
+ retry_ctrl[retry_ct]=c;
+ retry[retry_ct]=m;
+ if(!retry_ct++)
+ {
+ retry_timer.expires=jiffies+1;
+ add_timer(&retry_timer);
+ }
+ }
+ else
+ {
+ /* Create a scsi error for this */
+ current_command = (Scsi_Cmnd *)m[3];
+ printk("Aborted %ld\n", current_command->serial_number);
+
+ spin_lock_irq(&io_request_lock);
+ current_command->result = DID_ERROR << 16;
+ current_command->scsi_done(current_command);
+ spin_unlock_irq(&io_request_lock);
+
+ /* Now flush the message by making it a NOP */
+ m[0]&=0x00FFFFFF;
+ m[0]|=(I2O_CMD_UTIL_NOP)<<24;
+ i2o_post_message(c,virt_to_bus(m));
+ }
+ return;
+ }
+
+
+ /* Low byte is the adapter status, next is the device */
+ as=(u8)m[4];
+ ds=(u8)(m[4]>>8);
+ st=(u8)(m[4]>>24);
+
+ dprintk(("i2o got a scsi reply %08X: ", m[0]));
+ dprintk(("m[2]=%08X: ", m[2]));
+ dprintk(("m[4]=%08X\n", m[4]));
+
+ if(m[2]&0x80000000)
+ {
+ if(m[2]&0x40000000)
+ {
+ dprintk(("Event.\n"));
+ lun_done=1;
+ return;
+ }
+ printk(KERN_ERR "i2o_scsi: bus reset reply.\n");
+ return;
+ }
+
+ current_command = (Scsi_Cmnd *)m[3];
+
+ /*
+ * Is this a control request coming back - eg an abort ?
+ */
+
+ if(current_command==NULL)
+ {
+ if(st)
+ dprintk(("SCSI abort: %08X", m[4]));
+ dprintk(("SCSI abort completed.\n"));
+ return;
+ }
+
+ dprintk(("Completed %ld\n", current_command->serial_number));
+
+ atomic_dec(&queue_depth);
+
+ if(st == 0x06)
+ {
+ if(m[5] < current_command->underflow)
+ {
+ int i;
+ printk(KERN_ERR "SCSI: underflow 0x%08X 0x%08X\n",
+ m[5], current_command->underflow);
+ printk("Cmd: ");
+ for(i=0;i<15;i++)
+ printk("%02X ", current_command->cmnd[i]);
+ printk(".\n");
+ }
+ else st=0;
+ }
+
+ if(st)
+ {
+ /* An error has occured */
+
+ dprintk((KERN_DEBUG "SCSI error %08X", m[4]));
+
+ if (ds == 0x0E)
+ /* SCSI Reset */
+ current_command->result = DID_RESET << 16;
+ else if (ds == 0x0F)
+ current_command->result = DID_PARITY << 16;
+ else
+ current_command->result = DID_ERROR << 16;
+ }
+ else
+ /*
+ * It worked maybe ?
+ */
+ current_command->result = DID_OK << 16 | ds;
+ spin_lock(&io_request_lock);
+ current_command->scsi_done(current_command);
+ spin_unlock(&io_request_lock);
+ return;
+}
+
+struct i2o_handler i2o_scsi_handler=
+{
+ i2o_scsi_reply,
+ "I2O SCSI OSM",
+ 0
+};
+
+static int i2o_find_lun(struct i2o_controller *c, struct i2o_device *d, int *target, int *lun)
+{
+ u8 reply[8];
+
+ if(i2o_query_scalar(c, d->id, scsi_context|0x40000000,
+ 0, 3, reply, 4, &lun_done)<0)
+ return -1;
+
+ *target=reply[0];
+
+ if(i2o_query_scalar(c, d->id, scsi_context|0x40000000,
+ 0, 4, reply, 8, &lun_done)<0)
+ return -1;
+
+ *lun=reply[1];
+
+ dprintk(("SCSI (%d,%d)\n", *target, *lun));
+ return 0;
+}
+
+static void i2o_scsi_init(struct i2o_controller *c, struct i2o_device *d, struct Scsi_Host *shpnt)
+{
+ struct i2o_device *unit;
+ struct i2o_scsi_host *h =(struct i2o_scsi_host *)shpnt->hostdata;
+ int lun;
+ int target;
+
+ h->controller=c;
+ h->bus_task=d->id;
+
+ for(target=0;target<16;target++)
+ for(lun=0;lun<8;lun++)
+ h->task[target][lun] = -1;
+
+ for(unit=c->devices;unit!=NULL;unit=unit->next)
+ {
+ dprintk(("Class %03X, parent %d, want %d.\n",
+ unit->class, unit->parent, d->id));
+
+ /* Only look at scsi and fc devices */
+ if ( (unit->class != I2O_CLASS_SCSI_PERIPHERAL)
+ && (unit->class != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL)
+ )
+ continue;
+
+ /* On our bus ? */
+ dprintk(("Found a disk.\n"));
+ if ( (unit->parent == d->id)
+ || (unit->parent == d->parent)
+ )
+ {
+ u16 limit;
+ dprintk(("Its ours.\n"));
+ if(i2o_find_lun(c, unit, &target, &lun)==-1)
+ {
+ printk(KERN_ERR "i2o_scsi: Unable to get lun for tid %d.\n", d->id);
+ continue;
+ }
+ dprintk(("Found disk %d %d.\n", target, lun));
+ h->task[target][lun]=unit->id;
+ h->tagclock[target][lun]=jiffies;
+
+ /* Get the max fragments/request */
+ i2o_query_scalar(c, d->id, scsi_context|0x40000000,
+ 0xF103, 3, &limit, 2, &lun_done);
+
+ /* sanity */
+ if ( limit == 0 )
+ {
+ printk(KERN_WARNING "i2o_scsi: Ignoring unreasonable SG limit of 0 from IOP!\n");
+ limit = 1;
+ }
+
+ shpnt->sg_tablesize = limit;
+
+ dprintk(("i2o_scsi: set scatter-gather to %d.\n",
+ shpnt->sg_tablesize));
+ }
+ }
+}
+
+int i2o_scsi_detect(Scsi_Host_Template * tpnt)
+{
+ unsigned long flags;
+ struct Scsi_Host *shpnt = NULL;
+ int i;
+ int count;
+
+ printk("i2o_scsi.c: %s\n", VERSION_STRING);
+
+ if(i2o_install_handler(&i2o_scsi_handler)<0)
+ {
+ printk(KERN_ERR "i2o_scsi: Unable to install OSM handler.\n");
+ return 0;
+ }
+ scsi_context = i2o_scsi_handler.context;
+
+ if((sg_chain_pool = kmalloc(SG_CHAIN_POOL_SZ, GFP_KERNEL)) == NULL)
+ {
+ printk("i2o_scsi: Unable to alloc %d byte SG chain buffer pool.\n", SG_CHAIN_POOL_SZ);
+ printk("i2o_scsi: SG chaining DISABLED!\n");
+ sg_max_frags = 11;
+ }
+ else
+ {
+ printk(" chain_pool: %d bytes @ %p\n", SG_CHAIN_POOL_SZ, sg_chain_pool);
+ printk(" (%d byte buffers X %d can_queue X %d i2o controllers)\n",
+ SG_CHAIN_BUF_SZ, I2O_SCSI_CAN_QUEUE, i2o_num_controllers);
+ sg_max_frags = SG_MAX_FRAGS; // 64
+ }
+
+ init_timer(&retry_timer);
+ retry_timer.data = 0UL;
+ retry_timer.function = i2o_retry_run;
+
+// printk("SCSI OSM at %d.\n", scsi_context);
+
+ for (count = 0, i = 0; i < MAX_I2O_CONTROLLERS; i++)
+ {
+ struct i2o_controller *c=i2o_find_controller(i);
+ struct i2o_device *d;
+ /*
+ * This controller doesn't exist.
+ */
+
+ if(c==NULL)
+ continue;
+
+ /*
+ * Fixme - we need some altered device locking. This
+ * is racing with device addition in theory. Easy to fix.
+ */
+
+ for(d=c->devices;d!=NULL;d=d->next)
+ {
+ /*
+ * bus_adapter, SCSI (obsolete), or FibreChannel busses only
+ */
+ if( (d->class!=I2O_CLASS_BUS_ADAPTER_PORT) // bus_adapter
+ && (d->class!=I2O_CLASS_FIBRE_CHANNEL_PORT) // FC_PORT
+ )
+ continue;
+
+// printk("Found a controller.\n");
+ shpnt = scsi_register(tpnt, sizeof(struct i2o_scsi_host));
+ save_flags(flags);
+ cli();
+ shpnt->unique_id = (u32)d;
+ shpnt->io_port = 0;
+ shpnt->n_io_port = 0;
+ shpnt->irq = 0;
+ shpnt->this_id = /* Good question */15;
+ restore_flags(flags);
+// printk("Scanning I2O port %d.\n", d->id);
+ i2o_scsi_init(c, d, shpnt);
+ count++;
+ }
+ }
+ i2o_scsi_hosts = count;
+
+ if(count==0)
+ {
+ if(sg_chain_pool!=NULL)
+ {
+ kfree(sg_chain_pool);
+ sg_chain_pool = NULL;
+ }
+ flush_pending();
+ del_timer(&retry_timer);
+ i2o_remove_handler(&i2o_scsi_handler);
+ }
+
+ return count;
+}
+
+int i2o_scsi_release(struct Scsi_Host *host)
+{
+ if(--i2o_scsi_hosts==0)
+ {
+ if(sg_chain_pool!=NULL)
+ {
+ kfree(sg_chain_pool);
+ sg_chain_pool = NULL;
+ }
+ flush_pending();
+ del_timer(&retry_timer);
+ i2o_remove_handler(&i2o_scsi_handler);
+ }
+ return 0;
+}
+
+
+const char *i2o_scsi_info(struct Scsi_Host *SChost)
+{
+ struct i2o_scsi_host *hostdata;
+
+ hostdata = (struct i2o_scsi_host *)SChost->hostdata;
+
+ return(&hostdata->controller->name[0]);
+}
+
+
+/*
+ * From the wd93 driver:
+ * Returns true if there will be a DATA_OUT phase with this command,
+ * false otherwise.
+ * (Thanks to Joerg Dorchain for the research and suggestion.)
+ *
+ */
+static int is_dir_out(Scsi_Cmnd *cmd)
+{
+ switch (cmd->cmnd[0])
+ {
+ case WRITE_6: case WRITE_10: case WRITE_12:
+ case WRITE_LONG: case WRITE_SAME: case WRITE_BUFFER:
+ case WRITE_VERIFY: case WRITE_VERIFY_12:
+ case COMPARE: case COPY: case COPY_VERIFY:
+ case SEARCH_EQUAL: case SEARCH_HIGH: case SEARCH_LOW:
+ case SEARCH_EQUAL_12: case SEARCH_HIGH_12: case SEARCH_LOW_12:
+ case FORMAT_UNIT: case REASSIGN_BLOCKS: case RESERVE:
+ case MODE_SELECT: case MODE_SELECT_10: case LOG_SELECT:
+ case SEND_DIAGNOSTIC: case CHANGE_DEFINITION: case UPDATE_BLOCK:
+ case SET_WINDOW: case MEDIUM_SCAN: case SEND_VOLUME_TAG:
+ case 0xea:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+int i2o_scsi_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
+{
+ int i;
+ int tid;
+ struct i2o_controller *c;
+ Scsi_Cmnd *current_command;
+ struct Scsi_Host *host;
+ struct i2o_scsi_host *hostdata;
+ u32 *msg, *mptr;
+ u32 m;
+ u32 *lenptr;
+ int direction;
+ int scsidir;
+ u32 len;
+
+ static int max_qd = 1;
+
+ /*
+ * The scsi layer should be handling this stuff
+ */
+
+ if(is_dir_out(SCpnt))
+ {
+ direction=0x04000000;
+ scsidir=0x80000000;
+ }
+ else
+ {
+ scsidir=0x40000000;
+ direction=0x00000000;
+ }
+
+ /*
+ * Do the incoming paperwork
+ */
+
+ host = SCpnt->host;
+ hostdata = (struct i2o_scsi_host *)host->hostdata;
+ SCpnt->scsi_done = done;
+
+ if(SCpnt->target > 15)
+ {
+ printk(KERN_ERR "i2o_scsi: Wild target %d.\n", SCpnt->target);
+ return -1;
+ }
+
+ tid = hostdata->task[SCpnt->target][SCpnt->lun];
+
+ dprintk(("qcmd: Tid = %d\n", tid));
+
+ current_command = SCpnt; /* set current command */
+ current_command->scsi_done = done; /* set ptr to done function */
+
+ /* We don't have such a device. Pretend we did the command
+ and that selection timed out */
+
+ if(tid == -1)
+ {
+ SCpnt->result = DID_NO_CONNECT << 16;
+ done(SCpnt);
+ return 0;
+ }
+
+ dprintk(("Real scsi messages.\n"));
+
+ c = hostdata->controller;
+
+ /*
+ * Obtain an I2O message. Right now we _have_ to obtain one
+ * until the scsi layer stuff is cleaned up.
+ */
+
+ do
+ {
+ mb();
+ m = I2O_POST_READ32(c);
+ }
+ while(m==0xFFFFFFFF);
+ msg = bus_to_virt(c->mem_offset + m);
+
+ /*
+ * Put together a scsi execscb message
+ */
+
+ msg[1] = I2O_CMD_SCSI_EXEC<<24|HOST_TID<<12|tid;
+ msg[2] = scsi_context; /* So the I2O layer passes to us */
+ /* Sorry 64bit folks. FIXME */
+ msg[3] = (u32)SCpnt; /* We want the SCSI control block back */
+ /* Direction, disconnect ok, no tagging (yet) */
+ msg[4] = scsidir|(1<<29)|SCpnt->cmd_len;
+
+ /*
+ * Attach tags to the devices
+ */
+ if(SCpnt->device->tagged_supported)
+ {
+ /*
+ * Some drives are too stupid to handle fairness issues
+ * with tagged queueing. We throw in the odd ordered
+ * tag to stop them starving themselves.
+ */
+ if((jiffies - hostdata->tagclock[SCpnt->target][SCpnt->lun]) > (5*HZ))
+ {
+ msg[4]|=(1<<23)|(1<<24);
+ hostdata->tagclock[SCpnt->target][SCpnt->lun]=jiffies;
+ }
+ else switch(SCpnt->tag)
+ {
+ case SIMPLE_QUEUE_TAG:
+ msg[4]|=(1<<23);
+ break;
+ case HEAD_OF_QUEUE_TAG:
+ msg[4]|=(1<<24);
+ break;
+ case ORDERED_QUEUE_TAG:
+ msg[4]|=(1<<23)|(1<<24);
+ break;
+ default:
+ msg[4]|=(1<<23);
+ }
+ }
+
+ mptr=msg+5;
+
+ /*
+ * Write SCSI command into the message - always 16 byte block
+ */
+
+ memcpy(mptr, SCpnt->cmnd, 16);
+ mptr+=4;
+ lenptr=mptr++; /* Remember me - fill in when we know */
+
+
+ /*
+ * Now fill in the SGList and command
+ *
+ * FIXME: we need to set the sglist limits according to the
+ * message size of the I2O controller. We might only have room
+ * for 6 or so worst case
+ */
+
+ if(SCpnt->use_sg)
+ {
+ struct scatterlist *sg = (struct scatterlist *)SCpnt->request_buffer;
+
+ if((sg_max_frags > 11) && (SCpnt->use_sg > 11))
+ {
+ /*
+ * Need to chain!
+ */
+ SCpnt->host_scribble = (void*)(sg_chain_pool + sg_chain_tag);
+ *mptr++=direction|0xB0000000|(SCpnt->use_sg*2*4);
+ *mptr=virt_to_bus(SCpnt->host_scribble);
+ mptr = (u32*)SCpnt->host_scribble;
+ if (SCpnt->use_sg > max_sg_len)
+ {
+ max_sg_len = SCpnt->use_sg;
+ printk("i2o_scsi: Chain SG! SCpnt=%p, SG_FragCnt=%d, SG_idx=%d\n",
+ SCpnt, SCpnt->use_sg, (chain_buf*)SCpnt->host_scribble-sg_chain_pool);
+ }
+ if ( ++sg_chain_tag == SG_MAX_BUFS )
+ sg_chain_tag = 0;
+ }
+
+ len = 0;
+
+ for(i = 0 ; i < SCpnt->use_sg; i++)
+ {
+ *mptr++=direction|0x10000000|sg->length;
+ len+=sg->length;
+ *mptr++=virt_to_bus(sg->address);
+ sg++;
+ }
+ mptr[-2]|=0xC0000000; /* End of List and block */
+ *lenptr=len;
+ if(len != SCpnt->underflow)
+ printk("Cmd len %08X Cmd underflow %08X\n",
+ len, SCpnt->underflow);
+ }
+ else
+ {
+ dprintk(("non sg for %p, %d\n", SCpnt->request_buffer,
+ SCpnt->request_bufflen));
+ *mptr++=0xD0000000|direction|SCpnt->request_bufflen;
+ *mptr++=virt_to_bus(SCpnt->request_buffer);
+ *lenptr = len = SCpnt->request_bufflen;
+ /* No transfer ? - fix up the request */
+ if(len == 0)
+ msg[4]&=~0xC0000000;
+ }
+
+ /*
+ * Stick the headers on
+ */
+
+ msg[0] = (mptr-msg)<<16 | SGL_OFFSET_10;
+
+ /* Queue the message */
+ i2o_post_message(c,m);
+
+ atomic_inc(&queue_depth);
+
+ if(atomic_read(&queue_depth)> max_qd)
+ {
+ max_qd=atomic_read(&queue_depth);
+ printk("Queue depth now %d.\n", max_qd);
+ }
+
+ mb();
+ dprintk(("Issued %ld\n", current_command->serial_number));
+
+ return 0;
+}
+
+static void internal_done(Scsi_Cmnd * SCpnt)
+{
+ SCpnt->SCp.Status++;
+}
+
+int i2o_scsi_command(Scsi_Cmnd * SCpnt)
+{
+ i2o_scsi_queuecommand(SCpnt, internal_done);
+ SCpnt->SCp.Status = 0;
+ while (!SCpnt->SCp.Status)
+ barrier();
+ return SCpnt->result;
+}
+
+int i2o_scsi_abort(Scsi_Cmnd * SCpnt)
+{
+ struct i2o_controller *c;
+ struct Scsi_Host *host;
+ struct i2o_scsi_host *hostdata;
+ u32 *msg;
+ u32 m;
+ int tid;
+
+ printk("i2o_scsi_abort\n");
+
+ host = SCpnt->host;
+ hostdata = (struct i2o_scsi_host *)host->hostdata;
+ tid = hostdata->task[SCpnt->target][SCpnt->lun];
+ if(tid==-1)
+ {
+ printk(KERN_ERR "impossible command to abort.\n");
+ return SCSI_ABORT_NOT_RUNNING;
+ }
+ c = hostdata->controller;
+
+ /*
+ * Obtain an I2O message. Right now we _have_ to obtain one
+ * until the scsi layer stuff is cleaned up.
+ */
+
+ do
+ {
+ mb();
+ m = I2O_POST_READ32(c);
+ }
+ while(m==0xFFFFFFFF);
+ msg = bus_to_virt(c->mem_offset + m);
+
+ msg[0] = FIVE_WORD_MSG_SIZE;
+ msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|tid;
+ msg[2] = scsi_context;
+ msg[3] = 0; /* Not needed for an abort */
+ msg[4] = (u32)SCpnt;
+ wmb();
+ i2o_post_message(c,m);
+ wmb();
+// SCpnt->result = DID_RESET << 16;
+// SCpnt->scsi_done(SCpnt);
+ return SCSI_ABORT_PENDING;
+}
+
+int i2o_scsi_reset(Scsi_Cmnd * SCpnt, unsigned int reset_flags)
+{
+ int tid;
+ struct i2o_controller *c;
+ struct Scsi_Host *host;
+ struct i2o_scsi_host *hostdata;
+ u32 m;
+ u32 *msg;
+
+ printk("i2o_scsi_reset\n");
+
+ /*
+ * Find the TID for the bus
+ */
+
+ host = SCpnt->host;
+ hostdata = (struct i2o_scsi_host *)host->hostdata;
+ tid = hostdata->bus_task;
+ c = hostdata->controller;
+
+ /*
+ * Now send a SCSI reset request. Any remaining commands
+ * will be aborted by the IOP. We need to catch the reply
+ * possibly ?
+ */
+
+ m = I2O_POST_READ32(c);
+
+ /*
+ * No free messages, try again next time - no big deal
+ */
+
+ if(m == 0xFFFFFFFF)
+ return SCSI_RESET_PUNT;
+
+ msg = bus_to_virt(c->mem_offset + m);
+ msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
+ msg[1] = I2O_CMD_SCSI_BUSRESET<<24|HOST_TID<<12|tid;
+ msg[2] = scsi_context|0x80000000;
+ /* We use the top bit to split controller and unit transactions */
+ /* Now store unit,tid so we can tie the completion back to a specific device */
+ msg[3] = c->unit << 16 | tid;
+ i2o_post_message(c,m);
+ return SCSI_RESET_PENDING;
+}
+
+/*
+ * This is anyones guess quite frankly.
+ */
+
+int i2o_scsi_bios_param(Disk * disk, kdev_t dev, int *ip)
+{
+ int size;
+
+ size = disk->capacity;
+ ip[0] = 64; /* heads */
+ ip[1] = 32; /* sectors */
+ if ((ip[2] = size >> 11) > 1024) { /* cylinders, test for big disk */
+ ip[0] = 255; /* heads */
+ ip[1] = 63; /* sectors */
+ ip[2] = size / (255 * 63); /* cylinders */
+ }
+ return 0;
+}
+
+/* Loadable module support */
+#ifdef MODULE
+
+MODULE_AUTHOR("Red Hat Software");
+
+Scsi_Host_Template driver_template = I2OSCSI;
+
+#include "../scsi/scsi_module.c"
+#endif
diff --git a/drivers/i2o/i2o_scsi.h b/drivers/i2o/i2o_scsi.h
new file mode 100644
index 000000000..c6c88bc5e
--- /dev/null
+++ b/drivers/i2o/i2o_scsi.h
@@ -0,0 +1,48 @@
+#ifndef _I2O_SCSI_H
+#define _I2O_SCSI_H
+
+#if !defined(LINUX_VERSION_CODE)
+#include <linux/version.h>
+#endif
+
+#define LinuxVersionCode(v, p, s) (((v)<<16)+((p)<<8)+(s))
+
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+
+#define I2O_SCSI_ID 15
+#define I2O_SCSI_CAN_QUEUE 8
+#define I2O_SCSI_CMD_PER_LUN 6
+
+extern struct proc_dir_entry proc_scsi_i2o_scsi;
+
+extern int i2o_scsi_detect(Scsi_Host_Template *);
+extern const char *i2o_scsi_info(struct Scsi_Host *);
+extern int i2o_scsi_command(Scsi_Cmnd *);
+extern int i2o_scsi_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
+extern int i2o_scsi_abort(Scsi_Cmnd *);
+extern int i2o_scsi_reset(Scsi_Cmnd *, unsigned int);
+extern int i2o_scsi_bios_param(Disk *, kdev_t, int *);
+extern void i2o_scsi_setup(char *str, int *ints);
+
+#define I2OSCSI { \
+ next: NULL, \
+ proc_dir: &proc_scsi_i2o_scsi, \
+ name: "I2O SCSI Layer", \
+ detect: i2o_scsi_detect, \
+ release: i2o_scsi_release, \
+ info: i2o_scsi_info, \
+ command: i2o_scsi_command, \
+ queuecommand: i2o_scsi_queuecommand, \
+ abort: i2o_scsi_abort, \
+ reset: i2o_scsi_reset, \
+ bios_param: i2o_scsi_bios_param, \
+ can_queue: I2O_SCSI_CAN_QUEUE, \
+ this_id: I2O_SCSI_ID, \
+ sg_tablesize: 8, \
+ cmd_per_lun: I2O_SCSI_CMD_PER_LUN, \
+ unchecked_isa_dma: 0, \
+ use_clustering: ENABLE_CLUSTERING \
+ }
+
+#endif