summaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/Config.in70
-rw-r--r--drivers/s390/Makefile40
-rw-r--r--drivers/s390/block/Makefile31
-rw-r--r--drivers/s390/block/dasd.c1560
-rw-r--r--drivers/s390/block/dasd_ccwstuff.c419
-rw-r--r--drivers/s390/block/dasd_ccwstuff.h9
-rw-r--r--drivers/s390/block/dasd_eckd.c973
-rw-r--r--drivers/s390/block/dasd_erp.c21
-rw-r--r--drivers/s390/block/dasd_erp.h15
-rw-r--r--drivers/s390/block/dasd_mdsk.c14
-rw-r--r--drivers/s390/block/dasd_proc.c116
-rw-r--r--drivers/s390/block/dasd_profile.c208
-rw-r--r--drivers/s390/block/dasd_types.h284
-rw-r--r--drivers/s390/block/mdisk.c790
-rw-r--r--drivers/s390/block/mdisk.h94
-rw-r--r--drivers/s390/char/Makefile16
-rw-r--r--drivers/s390/char/con3215.c1129
-rw-r--r--drivers/s390/char/hwc.h249
-rw-r--r--drivers/s390/char/hwc_con.c99
-rw-r--r--drivers/s390/char/hwc_rw.c2016
-rw-r--r--drivers/s390/char/hwc_rw.h113
-rw-r--r--drivers/s390/char/hwc_tty.c265
-rw-r--r--drivers/s390/misc/Makefile13
-rw-r--r--drivers/s390/misc/chandev.c759
-rw-r--r--drivers/s390/net/Makefile16
-rw-r--r--drivers/s390/net/ctc.c1581
-rw-r--r--drivers/s390/net/iucv.c1178
-rw-r--r--drivers/s390/net/iucv.h146
28 files changed, 12224 insertions, 0 deletions
diff --git a/drivers/s390/Config.in b/drivers/s390/Config.in
new file mode 100644
index 000000000..257c65d9a
--- /dev/null
+++ b/drivers/s390/Config.in
@@ -0,0 +1,70 @@
+mainmenu_option next_comment
+comment 'S/390 block device drivers'
+
+tristate 'Loopback device support' CONFIG_BLK_DEV_LOOP
+if [ "$CONFIG_NET" = "y" ]; then
+ tristate 'Network block device support' CONFIG_BLK_DEV_NBD
+fi
+bool 'Multiple devices driver support' CONFIG_BLK_DEV_MD
+if [ "$CONFIG_BLK_DEV_MD" = "y" ]; then
+ tristate ' Linear (append) mode' CONFIG_MD_LINEAR
+ tristate ' RAID-0 (striping) mode' CONFIG_MD_STRIPED
+ tristate ' RAID-1 (mirroring) mode' CONFIG_MD_MIRRORING
+ tristate ' RAID-4/RAID-5 mode' CONFIG_MD_RAID5
+fi
+if [ "$CONFIG_MD_LINEAR" = "y" -o "$CONFIG_MD_STRIPED" = "y" ]; then
+ bool ' Boot support (linear, striped)' CONFIG_MD_BOOT
+fi
+tristate 'RAM disk support' CONFIG_BLK_DEV_RAM
+if [ "$CONFIG_BLK_DEV_RAM" = "y" ]; then
+ bool ' Initial RAM disk (initrd) support' CONFIG_BLK_DEV_INITRD
+fi
+
+bool 'Support for VM minidisk (VM only)' CONFIG_MDISK
+if [ "$CONFIG_MDISK" = "y" ]; then
+ bool ' Support for synchronous read-write' CONFIG_MDISK_SYNC
+fi
+
+tristate 'Support for DASD devices' CONFIG_DASD
+if [ "$CONFIG_DASD" != "n" ]; then
+ comment 'DASD disciplines'
+ bool ' Support for ECKD Disks' CONFIG_DASD_ECKD
+# bool ' Support for CKD Disks' CONFIG_DASD_CKD
+ bool ' Support for DIAG access to CMS reserved Disks' CONFIG_DASD_MDSK
+fi
+
+#menu_option next_comment
+#endmenu
+
+if [ "$CONFIG_NET" = "y" ]; then
+ mainmenu_option next_comment
+ comment 'S/390 Network device support'
+ bool 'Channel Device Configuration (Temporary Option)' CONFIG_CHANDEV
+ bool 'Network device support' CONFIG_NETDEVICES
+ if [ "$CONFIG_NETDEVICES" = "y" ]; then
+ menu_option next_comment
+ comment 'S390 Network devices'
+ bool 'CTC device support' CONFIG_CTC
+ bool 'IUCV device support (VM only)' CONFIG_IUCV
+ tristate 'Dummy net driver support' CONFIG_DUMMY
+ bool 'Ethernet (10 or 100Mbit)' CONFIG_NET_ETHERNET
+ bool 'Token Ring driver support' CONFIG_TR
+ bool 'FDDI driver support' CONFIG_FDDI
+ fi
+ endmenu
+fi
+
+mainmenu_option next_comment
+comment 'S/390 Terminal and Console options'
+
+bool 'Support for 3215 line mode terminal' CONFIG_3215
+if [ "$CONFIG_3215" = "y" ]; then
+ bool 'Support for console on 3215 line mode terminal' CONFIG_3215_CONSOLE
+fi
+
+bool 'Support for HWC line mode terminal' CONFIG_HWC
+if [ "$CONFIG_HWC" = "y" ]; then
+ bool 'console on HWC line mode terminal' CONFIG_HWC_CONSOLE
+fi
+endmenu
+
diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile
new file mode 100644
index 000000000..009916137
--- /dev/null
+++ b/drivers/s390/Makefile
@@ -0,0 +1,40 @@
+#
+# Makefile for the linux i386-specific parts of the memory manager.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definition is now in the main makefile...
+
+all: io.o
+
+CFLAGS +=
+O_TARGET := io.o
+O_OBJS :=
+M_OBJS :=
+
+SUBDIRS := $(SUBDIRS) arch/s390/drivers/block arch/s390/drivers/char \
+ arch/s390/drivers/misc arch/s390/drivers/net
+MOD_SUB_DIRS += ./net
+
+O_OBJS := block/s390-block.o \
+ char/s390-char.o \
+ misc/s390-misc.o \
+ net/s390-net.o
+
+io.o: $(O_OBJS)
+
+block/s390-block.o: dummy
+ $(MAKE) -C block
+
+char/s390-char.o: dummy
+ $(MAKE) -C char
+
+misc/s390-misc.o: dummy
+ $(MAKE) -C misc
+
+net/s390-net.o: dummy
+ $(MAKE) -C net
+
+include $(TOPDIR)/Rules.make
diff --git a/drivers/s390/block/Makefile b/drivers/s390/block/Makefile
new file mode 100644
index 000000000..16a56acb1
--- /dev/null
+++ b/drivers/s390/block/Makefile
@@ -0,0 +1,31 @@
+all: s390-block.o
+
+CFLAGS +=
+O_TARGET := s390-block.o
+O_OBJS :=
+M_OBJS :=
+
+ifeq ($(CONFIG_DASD),y)
+ O_OBJS += dasd.o dasd_ccwstuff.o dasd_erp.o
+ ifeq ($(CONFIG_PROC_FS),y)
+ O_OBJS += dasd_proc.o dasd_profile.o
+ endif
+ ifeq ($(CONFIG_DASD_ECKD),y)
+ O_OBJS += dasd_eckd.o
+ endif
+ ifeq ($(CONFIG_DASD_MDSK),y)
+ O_OBJS += dasd_mdsk.o
+ endif
+# ifeq ($(CONFIG_DASD_CKD),y)
+# O_OBJS += dasd_ckd.o
+# endif
+endif
+
+ifeq ($(CONFIG_MDISK),y)
+ O_OBJS += mdisk.o
+endif
+
+dasd_mod.o: $(D_OBJS)
+ $(LD) $(LD_RFLAG) -r -o $@ $+
+
+include $(TOPDIR)/Rules.make
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
new file mode 100644
index 000000000..79e8f27dc
--- /dev/null
+++ b/drivers/s390/block/dasd.c
@@ -0,0 +1,1560 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd.c
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * : Utz Bacher <utz.bacher@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+
+#ifdef MODULE
+#include <linux/module.h>
+#endif /* MODULE */
+
+#include <linux/tqueue.h>
+#include <linux/timer.h>
+#include <linux/malloc.h>
+#include <linux/genhd.h>
+#include <linux/devfs_fs_kernel.h>
+#include <linux/hdreg.h>
+#include <linux/interrupt.h>
+#include <linux/ctype.h>
+#include <asm/io.h>
+#include <asm/semaphore.h>
+#include <asm/ebcdic.h>
+#include <asm/uaccess.h>
+
+#include <asm/irq.h>
+
+#include <linux/dasd.h>
+#include <linux/blk.h>
+
+#include "dasd_erp.h"
+#include "dasd_types.h"
+#include "dasd_ccwstuff.h"
+
+#define PRINTK_HEADER DASD_NAME":"
+
+#define CCW_READ_DEVICE_CHARACTERISTICS 0x64
+
+#define DASD_SSCH_RETRIES 2
+
+/* This macro is a little tricky, but makes the code more easy to read... */
+#define MATCH(info,ct,cm,dt,dm) ( \
+(( info -> sid_data.cu_type ct ) && ( info -> sid_data.cu_model cm )) && \
+(( info -> sid_data.dev_type dt ) && ( info -> sid_data.dev_model dm )) )
+
+/* Prototypes for the functions called from external */
+static int dasd_ioctl (struct inode *, struct file *, unsigned int, unsigned long);
+static int dasd_open (struct inode *, struct file *);
+static int dasd_release (struct inode *, struct file *);
+
+void dasd_debug (unsigned long tag);
+void dasd_profile_add (cqr_t *cqr);
+void dasd_proc_init (void);
+
+static int dasd_format( int, format_data_t * );
+
+static struct block_device_operations dasd_device_operations;
+
+spinlock_t dasd_lock; /* general purpose lock for the dasd driver */
+
+/* All asynchronous I/O should waint on this wait_queue */
+wait_queue_head_t dasd_waitq;
+
+static int dasd_autodetect = 1;
+static int dasd_devno[DASD_MAX_DEVICES] =
+{0,};
+static int dasd_count = 0;
+
+extern dasd_chanq_t *cq_head;
+
+static int
+dasd_get_hexdigit (char c)
+{
+ if ((c >= '0') && (c <= '9'))
+ return c - '0';
+ if ((c >= 'a') && (c <= 'f'))
+ return c + 10 - 'a';
+ if ((c >= 'A') && (c <= 'F'))
+ return c + 10 - 'A';
+ return -1;
+}
+
+/* sets the string pointer after the next comma */
+static void
+dasd_scan_for_next_comma (char **strptr)
+{
+ while (((**strptr) != ',') && ((**strptr)++))
+ (*strptr)++;
+
+ /* set the position AFTER the comma */
+ if (**strptr == ',')
+ (*strptr)++;
+}
+
+/*sets the string pointer after the next comma, if a parse error occured */
+static int
+dasd_get_next_int (char **strptr)
+{
+ int j, i = -1; /* for cosmetic reasons first -1, then 0 */
+ if (isxdigit (**strptr)) {
+ for (i = 0; isxdigit (**strptr);) {
+ i <<= 4;
+ j = dasd_get_hexdigit (**strptr);
+ if (j == -1) {
+ PRINT_ERR ("no integer: skipping range.\n");
+ dasd_scan_for_next_comma (strptr);
+ i = -1;
+ break;
+ }
+ i += j;
+ (*strptr)++;
+ if (i > 0xffff) {
+ PRINT_ERR (" value too big, skipping range.\n");
+ dasd_scan_for_next_comma (strptr);
+ i = -1;
+ break;
+ }
+ }
+ }
+ return i;
+}
+
+static inline int
+devindex_from_devno (int devno)
+{
+ int i;
+ for (i = 0; i < dasd_count; i++) {
+ if (dasd_devno[i] == devno)
+ return i;
+ }
+ if (dasd_autodetect) {
+ if (dasd_count < DASD_MAX_DEVICES) {
+ dasd_devno[dasd_count] = devno;
+ return dasd_count++;
+ }
+ return -EOVERFLOW;
+ }
+ return -ENODEV;
+}
+
+/* returns 1, if dasd_no is in the specified ranges, otherwise 0 */
+static inline int
+dasd_is_accessible (int devno)
+{
+ return (devindex_from_devno (devno) >= 0);
+}
+
+/* dasd_insert_range skips ranges, if the start or the end is -1 */
+static void
+dasd_insert_range (int start, int end)
+{
+ int curr;
+ FUNCTION_ENTRY ("dasd_insert_range");
+ if (dasd_count >= DASD_MAX_DEVICES) {
+ PRINT_ERR (" too many devices specified, ignoring some.\n");
+ FUNCTION_EXIT ("dasd_insert_range");
+ return;
+ }
+ if ((start == -1) || (end == -1)) {
+ PRINT_ERR
+ ("invalid format of parameter, skipping range\n");
+ FUNCTION_EXIT ("dasd_insert_range");
+ return;
+ }
+ if (end < start) {
+ PRINT_ERR (" ignoring range from %x to %x - start value " \
+ "must be less than end value.\n", start, end);
+ FUNCTION_EXIT ("dasd_insert_range");
+ return;
+ }
+/* concurrent execution would be critical, but will not occur here */
+ for (curr = start; curr <= end; curr++) {
+ if (dasd_is_accessible (curr)) {
+ PRINT_WARN (" %x is already in list as device %d\n",
+ curr, devindex_from_devno (curr));
+ }
+ dasd_devno[dasd_count] = curr;
+ dasd_count++;
+ if (dasd_count >= DASD_MAX_DEVICES) {
+ PRINT_ERR (" too many devices specified, ignoring some.\n");
+ break;
+ }
+ }
+ PRINT_INFO (" added dasd range from %x to %x.\n",
+ start, dasd_devno[dasd_count - 1]);
+
+ FUNCTION_EXIT ("dasd_insert_range");
+}
+
+static int __init
+dasd_setup (char *str)
+{
+ int devno, devno2;
+
+ FUNCTION_ENTRY ("dasd_setup");
+ dasd_autodetect = 0;
+ while (*str && *str != 1) {
+ if (!isxdigit (*str)) {
+ str++; /* to avoid looping on two commas */
+ PRINT_ERR (" kernel parameter in invalid format.\n");
+ continue;
+ }
+ devno = dasd_get_next_int (&str);
+
+ /* range was skipped? -> scan for comma has been done */
+ if (devno == -1)
+ continue;
+
+ if (*str == ',') {
+ str++;
+ dasd_insert_range (devno, devno);
+ continue;
+ }
+ if (*str == '-') {
+ str++;
+ devno2 = dasd_get_next_int (&str);
+ if (devno2 == -1) {
+ PRINT_ERR (" invalid character in " \
+ "kernel parameters.");
+ } else {
+ dasd_insert_range (devno, devno2);
+ }
+ dasd_scan_for_next_comma (&str);
+ continue;
+ }
+ if (*str == 0) {
+ dasd_insert_range (devno, devno);
+ break;
+ }
+ PRINT_ERR (" unexpected character in kernel parameter, " \
+ "skipping range.\n");
+ }
+ FUNCTION_EXIT ("dasd_setup");
+ return 1;
+}
+
+__setup("dasd=", dasd_setup);
+
+dasd_information_t *dasd_info[DASD_MAX_DEVICES] = {NULL,};
+static struct hd_struct dd_hdstruct[DASD_MAX_DEVICES << PARTN_BITS];
+static int dasd_blks[256] = {0,};
+static int dasd_secsize[256] = {0,};
+static int dasd_blksize[256] = {0,};
+static int dasd_maxsecs[256] = {0,};
+
+struct gendisk dd_gendisk =
+{
+ MAJOR_NR, /* Major number */
+ "dasd", /* Major name */
+ PARTN_BITS, /* Bits to shift to get real from partn */
+ 1 << PARTN_BITS, /* Number of partitions per real */
+ dd_hdstruct, /* hd struct */
+ dasd_blks, /* sizes in blocks */
+ DASD_MAX_DEVICES, /* number */
+ NULL, /* internal */
+ NULL /* next */
+
+};
+
+static atomic_t bh_scheduled = ATOMIC_INIT (0);
+
+static inline void
+schedule_bh (void (*func) (void))
+{
+ static struct tq_struct dasd_tq =
+ {0,};
+ /* Protect against rescheduling, when already running */
+ if (atomic_compare_and_swap (0, 1, &bh_scheduled))
+ return;
+ dasd_tq.routine = (void *) (void *) func;
+ queue_task (&dasd_tq, &tq_immediate);
+ mark_bh (IMMEDIATE_BH);
+ return;
+}
+
+void
+sleep_done (struct semaphore *sem)
+{
+ if (sem != NULL) {
+ up (sem);
+ }
+}
+
+void
+sleep (int timeout)
+{
+ struct semaphore sem;
+ struct timer_list timer;
+
+ init_MUTEX_LOCKED (&sem);
+ init_timer (&timer);
+ timer.data = (unsigned long) &sem;
+ timer.expires = jiffies + timeout;
+ timer.function = (void (*)(unsigned long)) sleep_done;
+ printk (KERN_DEBUG PRINTK_HEADER
+ "Sleeping for timer tics %d\n", timeout);
+ add_timer (&timer);
+ down (&sem);
+ del_timer (&timer);
+}
+
+#ifdef CONFIG_DASD_ECKD
+extern dasd_operations_t dasd_eckd_operations;
+#endif /* CONFIG_DASD_ECKD */
+#ifdef CONFIG_DASD_MDSK
+extern dasd_operations_t dasd_mdsk_operations;
+#endif /* CONFIG_DASD_MDSK */
+
+dasd_operations_t *dasd_disciplines[] =
+{
+#ifdef CONFIG_DASD_ECKD
+ &dasd_eckd_operations,
+#endif /* CONFIG_DASD_ECKD */
+#ifdef CONFIG_DASD_MDSK
+ &dasd_mdsk_operations,
+#endif /* CONFIG_DASD_MDSK */
+#ifdef CONFIG_DASD_CKD
+ &dasd_ckd_operations,
+#endif /* CONFIG_DASD_CKD */
+ NULL
+};
+
+char *dasd_name[] =
+{
+#ifdef CONFIG_DASD_ECKD
+ "ECKD",
+#endif /* CONFIG_DASD_ECKD */
+#ifdef CONFIG_DASD_MDSK
+ "MDSK",
+#endif /* CONFIG_DASD_MDSK */
+#ifdef CONFIG_DASD_CKD
+ "CKD",
+#endif /* CONFIG_DASD_CKD */
+ "END"
+};
+
+
+static inline int
+do_dasd_ioctl (struct inode *inp, unsigned int no, unsigned long data)
+{
+ int rc;
+ int di;
+ dasd_information_t *dev;
+
+ di = DEVICE_NR (inp->i_rdev);
+ if (!dasd_info[di]) {
+ PRINT_WARN ("No device registered as %d\n", inp->i_rdev);
+ return -EINVAL;
+ }
+ if ((_IOC_DIR (no) != _IOC_NONE) && (data == 0)) {
+ PRINT_DEBUG ("empty data ptr");
+ return -EINVAL;
+ }
+ dev = dasd_info[di];
+ if (!dev) {
+ PRINT_WARN ("No device registered as %d\n", inp->i_rdev);
+ return -EINVAL;
+ }
+ PRINT_INFO ("ioctl 0x%08x %s'0x%x'%d(%d) on dev %d/%d (%d) with data %8lx\n", no,
+ _IOC_DIR (no) == _IOC_NONE ? "0" :
+ _IOC_DIR (no) == _IOC_READ ? "r" :
+ _IOC_DIR (no) == _IOC_WRITE ? "w" :
+ _IOC_DIR (no) == (_IOC_READ | _IOC_WRITE) ? "rw" : "u",
+ _IOC_TYPE (no), _IOC_NR (no), _IOC_SIZE (no),
+ MAJOR (inp->i_rdev), MINOR (inp->i_rdev), di, data);
+
+ switch (no) {
+ case BLKGETSIZE:{ /* Return device size */
+ unsigned long blocks;
+ if (inp->i_rdev & 0x01) {
+ blocks = (dev->sizes.blocks - 3) <<
+ dev->sizes.s2b_shift;
+ } else {
+ blocks = dev->sizes.kbytes << dev->sizes.s2b_shift;
+ }
+ rc = copy_to_user ((long *) data, &blocks, sizeof (long));
+ break;
+ }
+ case BLKFLSBUF:{
+ rc = fsync_dev (inp->i_rdev);
+ break;
+ }
+ case BLKRAGET:{
+ rc = copy_to_user ((long *) data,
+ read_ahead + MAJOR_NR, sizeof (long));
+ break;
+ }
+ case BLKRASET:{
+ rc = copy_from_user (read_ahead + MAJOR_NR,
+ (long *) data, sizeof (long));
+ break;
+ }
+ case BLKRRPART:{
+ INTERNAL_CHECK ("BLKRPART not implemented%s", "");
+ rc = -EINVAL;
+ break;
+ }
+ case HDIO_GETGEO:{
+ INTERNAL_CHECK ("HDIO_GETGEO not implemented%s", "");
+ rc = -EINVAL;
+ break;
+ }
+
+ case BIODASDRSID:{
+ rc = copy_to_user ((void *) data,
+ &(dev->info.sid_data),
+ sizeof (senseid_t));
+ break;
+ }
+ case BIODASDRWTB:{
+ int offset = 0;
+ int xlt;
+ rc = copy_from_user (&xlt, (void *) data,
+ sizeof (int));
+ PRINT_INFO("Xlating %d to",xlt);
+ if (rc)
+ break;
+ if (MINOR (inp->i_rdev) & 1)
+ offset = 3;
+ xlt += offset;
+ printk(" %d \n",xlt);
+ rc = copy_to_user ((void *) data, &xlt,
+ sizeof (int));
+ break;
+ }
+ case BIODASDFORMAT:{
+ /* fdata == NULL is a valid arg to dasd_format ! */
+ format_data_t *fdata = NULL;
+ if (data) {
+ fdata = kmalloc (sizeof (format_data_t),
+ GFP_ATOMIC);
+ if (!fdata) {
+ rc = -ENOMEM;
+ break;
+ }
+ rc = copy_from_user (fdata, (void *) data,
+ sizeof (format_data_t));
+ if (rc)
+ break;
+ }
+ rc = dasd_format (inp->i_rdev, fdata);
+ if (fdata) {
+ kfree (fdata);
+ }
+ break;
+ }
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ return rc;
+}
+
+static void
+dasd_end_request (struct request *req, int uptodate)
+{
+ struct buffer_head *bh;
+ FUNCTION_ENTRY ("dasd_end_request");
+#if DASD_PARANOIA > 2
+ if (!req) {
+ INTERNAL_CHECK ("end_request called with zero arg%s\n", "");
+ }
+#endif /* DASD_PARANOIA */
+ while ((bh = req->bh) != NULL) {
+ req->bh = bh->b_reqnext;
+ bh->b_reqnext = NULL;
+ bh->b_end_io (bh, uptodate);
+ }
+ if (!end_that_request_first (req, uptodate, DEVICE_NAME)) {
+#ifndef DEVICE_NO_RANDOM
+ add_blkdev_randomness (MAJOR (req->rq_dev));
+#endif
+ DEVICE_OFF (req->rq_dev);
+ end_that_request_last (req);
+ }
+ FUNCTION_EXIT ("dasd_end_request");
+ return;
+}
+
+void
+dasd_wakeup (void)
+{
+ wake_up (&dasd_waitq);
+}
+
+int
+dasd_unregister_dasd (int irq, dasd_type_t dt, dev_info_t * info)
+{
+ int rc = 0;
+ FUNCTION_ENTRY ("dasd_unregister_dasd");
+ INTERNAL_CHECK ("dasd_unregister_dasd not implemented%s\n", "");
+ FUNCTION_EXIT ("dasd_unregister_dasd");
+ return rc;
+}
+
+/* Below you find the functions already cleaned up */
+static dasd_type_t
+check_type (dev_info_t * info)
+{
+ dasd_type_t type = dasd_none;
+
+ FUNCTION_ENTRY ("check_type");
+#ifdef CONFIG_DASD_ECKD
+ if (MATCH (info, == 0x3990, ||1, == 0x3390, ||1) ||
+ MATCH (info, == 0x9343, ||1, == 0x9345, ||1) ||
+ MATCH (info, == 0x3990, ||1, == 0x3380, ||1)) {
+ type = dasd_eckd;
+ } else
+#endif /* CONFIG_DASD_ECKD */
+#ifdef CONFIG_DASD_MDSK
+ if ( MACHINE_IS_VM ) {
+ type = dasd_mdsk;
+ } else
+#endif /* CONFIG_DASD_MDSK */
+ {
+ type = dasd_none;
+ }
+
+ FUNCTION_EXIT ("check_type");
+ return type;
+}
+
+static int
+dasd_read_characteristics (dasd_information_t * info)
+{
+ int rc;
+ int ct = 0;
+ dev_info_t *di;
+ dasd_type_t dt;
+
+ FUNCTION_ENTRY ("read_characteristics");
+ if (info == NULL) {
+ return -ENODEV;
+ }
+ di = &(info->info);
+ if (di == NULL) {
+ return -ENODEV;
+ }
+ dt = check_type (di);
+ /* Some cross-checks, if the cu supports RDC */
+ if (MATCH (di, == 0x2835, ||1, ||1, ||1) ||
+ MATCH (di, == 0x3830, ||1, ||1, ||1) ||
+ MATCH (di, == 0x3830, ||1, ||1, ||1) ||
+ MATCH (di, == 0x3990, <=0x03, == 0x3380, <=0x0d)) {
+ PRINT_WARN ("Device %d (%x/%x at %x/%x) supports no RDC\n",
+ info->info.irq,
+ di->sid_data.dev_type,
+ di->sid_data.dev_model,
+ di->sid_data.cu_type,
+ di->sid_data.cu_model);
+ return -EINVAL;
+ }
+ switch (dt) {
+#ifdef CONFIG_DASD_ECKD
+ case dasd_eckd:
+ ct = 64;
+ rc = read_dev_chars (info->info.irq,
+ (void *) &(info->rdc_data), ct);
+ break;
+#endif /* CONFIG_DASD_ECKD */
+#ifdef CONFIG_DASD_MDSK
+ case dasd_mdsk:
+ ct = 0;
+ break;
+#endif /* CONFIG_DASD_MDSK */
+ default:
+ INTERNAL_ERROR ("don't know dasd type %d\n", dt);
+ }
+ if (rc) {
+ PRINT_WARN ("RDC resulted in rc=%d\n", rc);
+ }
+ FUNCTION_EXIT ("read_characteristics");
+ return rc;
+}
+
+/* How many sectors must be in a request to dequeue it ? */
+#define QUEUE_BLOCKS 25
+#define QUEUE_SECTORS (QUEUE_BLOCKS << dasd_info[di]->sizes.s2b_shift)
+
+/* How often to retry an I/O before raising an error */
+#define DASD_MAX_RETRIES 5
+
+
+static inline
+ cqr_t *
+dasd_cqr_from_req (struct request *req)
+{
+ cqr_t *cqr = NULL;
+ int di;
+ dasd_information_t *info;
+
+ if (!req) {
+ PRINT_ERR ("No request passed!");
+ return NULL;
+ }
+ di = DEVICE_NR (req->rq_dev);
+ info = dasd_info[di];
+ if (!info)
+ return NULL;
+ /* if applicable relocate block */
+ if (MINOR (req->rq_dev) & ((1 << PARTN_BITS) - 1) ) {
+ req->sector +=
+ dd_gendisk.part[MINOR(req->rq_dev)].start_sect;
+ }
+ /* Now check for consistency */
+ if (!req->nr_sectors) {
+ PRINT_WARN ("req: %p dev: %08x sector: %ld nr_sectors: %ld bh: %p\n",
+ req, req->rq_dev, req->sector, req->nr_sectors, req->bh);
+ return NULL;
+ }
+ if (((req->sector + req->nr_sectors) >> 1) > info->sizes.kbytes) {
+ printk (KERN_ERR PRINTK_HEADER
+ "Requesting I/O past end of device %d\n",
+ di);
+ return NULL;
+ }
+ cqr = dasd_disciplines[info->type]->get_req_ccw (di, req);
+ if (!cqr) {
+ PRINT_WARN ("empty CQR generated\n");
+ } else {
+ cqr->req = req;
+ cqr->int4cqr = cqr;
+ cqr->devindex = di;
+#ifdef DASD_PROFILE
+ asm volatile ("STCK %0":"=m" (cqr->buildclk));
+#endif /* DASD_PROFILE */
+ if (atomic_compare_and_swap (CQR_STATUS_EMPTY,
+ CQR_STATUS_FILLED,
+ &cqr->status)) {
+ PRINT_WARN ("cqr from req stat changed %d\n",
+ atomic_read (&cqr->status));
+ }
+ }
+ return cqr;
+}
+
+int
+dasd_start_IO (cqr_t * cqr)
+{
+ int rc = 0;
+ int retries = DASD_SSCH_RETRIES;
+ int di, irq;
+
+ dasd_debug ((unsigned long) cqr); /* cqr */
+
+ if (!cqr) {
+ PRINT_WARN ("(start_IO) no cqr passed\n");
+ return -EINVAL;
+ }
+ if (cqr->magic != DASD_MAGIC) {
+ PRINT_WARN ("(start_IO) magic number mismatch\n");
+ return -EINVAL;
+ }
+ if (atomic_compare_and_swap (CQR_STATUS_QUEUED,
+ CQR_STATUS_IN_IO,
+ &cqr->status)) {
+ PRINT_WARN ("start_IO: status changed %d\n",
+ atomic_read (&cqr->status));
+ atomic_set (&cqr->status, CQR_STATUS_ERROR);
+ return -EINVAL;
+ }
+ di = cqr->devindex;
+ irq = dasd_info[di]->info.irq;
+ do {
+ asm volatile ("STCK %0":"=m" (cqr->startclk));
+ rc = do_IO (irq, cqr->cpaddr, (long) cqr, 0x00, cqr->options);
+ switch (rc) {
+ case 0:
+ if (!(cqr->options & DOIO_WAIT_FOR_INTERRUPT))
+ atomic_set_mask (DASD_CHANQ_BUSY,
+ &dasd_info[di]->queue.flags);
+ break;
+ case -ENODEV:
+ PRINT_WARN ("cqr %p: 0x%04x error, %d retries left\n",
+ cqr, dasd_info[di]->info.devno, retries);
+ break;
+ case -EIO:
+ PRINT_WARN ("cqr %p: 0x%04x I/O, %d retries left\n",
+ cqr, dasd_info[di]->info.devno, retries);
+ break;
+ case -EBUSY: /* set up timer, try later */
+
+ PRINT_WARN ("cqr %p: 0x%04x busy, %d retries left\n",
+ cqr, dasd_info[di]->info.devno, retries);
+ break;
+ default:
+
+ PRINT_WARN ("cqr %p: 0x%04x %d, %d retries left\n",
+ cqr, rc, dasd_info[di]->info.devno,
+ retries);
+ break;
+ }
+ } while (rc && --retries);
+ if (rc) {
+ if (atomic_compare_and_swap (CQR_STATUS_IN_IO,
+ CQR_STATUS_ERROR,
+ &cqr->status)) {
+ PRINT_WARN ("start_IO:(done) status changed %d\n",
+ atomic_read (&cqr->status));
+ atomic_set (&cqr->status, CQR_STATUS_ERROR);
+ }
+ }
+ return rc;
+}
+
+static inline
+void
+dasd_end_cqr (cqr_t * cqr, int uptodate)
+{
+ struct request *req = cqr->req;
+ asm volatile ("STCK %0":"=m" (cqr->endclk));
+#ifdef DASD_PROFILE
+ dasd_profile_add (cqr);
+#endif /* DASD_PROFILE */
+ dasd_chanq_deq (&dasd_info[cqr->devindex]->queue, cqr);
+ if (req) {
+ dasd_end_request (req, uptodate);
+ }
+}
+
+void
+dasd_dump_sense (devstat_t * stat)
+{
+ int sl, sct;
+ if ( ! stat->flag | DEVSTAT_FLAG_SENSE_AVAIL) {
+ PRINT_INFO("I/O status w/o sense data");
+ } else {
+ printk (KERN_INFO PRINTK_HEADER
+ "-------------------I/O result:-----------\n");
+ for (sl = 0; sl < 4; sl++) {
+ printk (KERN_INFO PRINTK_HEADER "Sense:");
+ for (sct = 0; sct < 8; sct++) {
+ printk (" %2d:0x%02X", 8 * sl + sct,
+ stat->ii.sense.data[8 * sl + sct]);
+ }
+ printk ("\n");
+ }
+ }
+}
+
+static int
+register_dasd_last (int di)
+{
+ int rc = 0;
+ int minor;
+ struct buffer_head *bh;
+ rc = dasd_disciplines[dasd_info[di]->type]->fill_sizes_last (di);
+ switch (rc) {
+ case -EMEDIUMTYPE:
+ dasd_info[di]->flags |= DASD_INFO_FLAGS_NOT_FORMATTED;
+ break;
+ }
+ PRINT_INFO ("%ld kB <- 'soft'-block: %d, hardsect %d Bytes\n",
+ dasd_info[di]->sizes.kbytes,
+ dasd_info[di]->sizes.bp_block,
+ dasd_info[di]->sizes.bp_sector);
+ switch (dasd_info[di]->type) {
+#ifdef CONFIG_DASD_ECKD
+ case dasd_eckd:
+ dasd_info[di]->sizes.label_block = 2;
+ break;
+#endif /* CONFIG_DASD_ECKD */
+#ifdef CONFIG_DASD_MDSK
+ case dasd_mdsk:
+ dasd_info[di]->sizes.label_block = -1;
+ break;
+#endif /* CONFIG_DASD_ECKD */
+
+ default:
+ INTERNAL_CHECK ("Unknown dasd type %d\n", dasd_info[di]->type);
+ }
+ minor = di << PARTN_BITS;
+ dasd_blks[minor] = dasd_info[di]->sizes.kbytes;
+ dasd_secsize[minor] = dasd_info[di]->sizes.bp_sector;
+ dasd_blksize[minor] = dasd_info[di]->sizes.bp_block;
+ dasd_maxsecs[minor] = 252<<dasd_info[di]->sizes.s2b_shift;
+ dasd_secsize[minor+1] = dasd_info[di]->sizes.bp_sector;
+ dasd_blksize[minor+1] = dasd_info[di]->sizes.bp_block;
+ dasd_maxsecs[minor+1] = 252<<dasd_info[di]->sizes.s2b_shift;
+
+ {
+#define DASD_NAME_PREFIX "dasd_"
+ char * name = (char *) kmalloc ( 1+strlen (DASD_NAME_PREFIX) +
+ 2 /* 0x */ + 4 /* devno */,
+ GFP_KERNEL);
+ sprintf ( name , DASD_NAME_PREFIX "%04x%c",
+ dasd_info[di]->info.devno,'\0' );
+ dasd_info[di] -> devfs_entry =
+ devfs_register ( NULL /* dir */,
+ name, strlen(name),
+ 0 /* flags */,
+ DASD_MAJOR, minor,
+ 0755 /* mode */,
+ 0 /* uid */ , 0 /* gid */,
+ &dasd_device_operations,
+ (void *)dasd_info[di]);
+ }
+ /* end of that stuff */
+ return rc;
+}
+
+void
+dasd_partn_detect ( int di )
+{
+ int minor = di << PARTN_BITS;
+ LOOP_CONTROL ("Setting partitions of DASD %d\n", di);
+ register_disk (&dd_gendisk,
+ MKDEV(DASD_MAJOR,minor),
+ 1 << PARTN_BITS,
+ &dasd_device_operations,
+ dasd_info[di]->sizes.kbytes << 1);
+}
+
+void
+dasd_do_chanq (void)
+{
+ dasd_chanq_t *qp = NULL;
+ cqr_t *cqr;
+ long flags;
+ int irq;
+ int tasks;
+ atomic_set (&bh_scheduled, 0);
+ dasd_debug (0xc4c40000); /* DD */
+ while ((tasks = atomic_read(&chanq_tasks)) != 0) {
+/* initialization and wraparound */
+ if (qp == NULL) {
+ dasd_debug (0xc4c46df0); /* DD_0 */
+ qp = cq_head;
+ if (!qp) {
+ dasd_debug (0xc4c46ff1); /* DD?1 */
+ dasd_debug (tasks);
+ PRINT_ERR("Mismatch of NULL queue pointer and "
+ "still %d chanq_tasks to do!!\n"
+ "Please send output of /proc/dasd/debug "
+ "to Linux390@de.ibm.com\n", tasks);
+ atomic_set(&chanq_tasks,0);
+ break;
+ }
+ }
+/* Get first request */
+ dasd_debug ((unsigned long) qp);
+ cqr = (cqr_t *) (qp->head);
+/* empty queue -> dequeue and proceed */
+ if (!cqr) {
+ dasd_chanq_t *nqp = qp->next_q;
+ cql_deq (qp);
+ qp = nqp;
+ continue;
+ }
+/* process all requests on that queue */
+ do {
+ cqr_t *next;
+ dasd_debug ((unsigned long) cqr); /* cqr */
+ if (cqr->magic != DASD_MAGIC) {
+ dasd_debug (0xc4c46ff2); /* DD?2 */
+ panic ( PRINTK_HEADER "do_cq:"
+ "magic mismatch %p -> %x\n",
+ cqr, cqr -> magic);
+ break;
+ }
+ irq = dasd_info[cqr->devindex]->info.irq;
+ s390irq_spin_lock_irqsave (irq, flags);
+ switch (atomic_read (&cqr->status)) {
+ case CQR_STATUS_IN_IO:
+ dasd_debug (0xc4c4c9d6); /* DDIO */
+ cqr = NULL;
+ break;
+ case CQR_STATUS_QUEUED:
+ dasd_debug (0xc4c4e2e3); /* DDST */
+ if (dasd_start_IO (cqr) == 0) {
+ atomic_dec (&chanq_tasks);
+ cqr = NULL;
+ }
+ break;
+ case CQR_STATUS_ERROR:
+ dasd_debug (0xc4c4c5d9); /* DDER */
+ dasd_dump_sense (cqr->dstat);
+ if ( ++ cqr->retries < 2 ) {
+ atomic_set (&cqr->status,
+ CQR_STATUS_QUEUED);
+ dasd_debug (0xc4c4e2e3); /* DDST */
+ if (dasd_start_IO (cqr) == 0) {
+ atomic_dec ( &qp ->
+ dirty_requests);
+ atomic_dec (&chanq_tasks);
+ cqr = NULL;
+ }
+ } else {
+ atomic_set (&cqr->status,
+ CQR_STATUS_FAILED);
+ }
+ break;
+ case CQR_STATUS_DONE:
+ next = cqr->next;
+ dasd_debug (0xc4c49692); /* DDok */
+ dasd_end_cqr (cqr, 1);
+ atomic_dec (&chanq_tasks);
+ cqr = next;
+ break;
+ case CQR_STATUS_FAILED:
+ next = cqr->next;
+ dasd_debug (0xc4c47a7a); /* DD:: */
+ if ( ! ( dasd_info[cqr->devindex]-> flags &
+ DASD_INFO_FLAGS_INITIALIZED ) ) {
+ dasd_info[cqr->devindex]-> flags |=
+ DASD_INFO_FLAGS_INITIALIZED |
+ DASD_INFO_FLAGS_NOT_FORMATTED;
+ }
+ dasd_end_cqr (cqr, 0);
+ atomic_dec ( &qp -> dirty_requests );
+ atomic_dec (&chanq_tasks);
+ cqr = next;
+ break;
+ default:
+ PRINT_WARN ("unknown cqrstatus\n");
+ cqr = NULL;
+ }
+ s390irq_spin_unlock_irqrestore (irq, flags);
+ } while (cqr);
+ qp = qp->next_q;
+ }
+ spin_lock (&io_request_lock);
+ do_dasd_request (&blk_dev[DASD_MAJOR].request_queue);
+ spin_unlock (&io_request_lock);
+ dasd_debug (0xc4c46d6d); /* DD__ */
+}
+
+/*
+ The request_fn is called from ll_rw_blk for any new request.
+ We use it to feed the chanqs.
+ This implementation assumes we are serialized by the io_request_lock.
+ */
+
+#define QUEUE_THRESHOLD 5
+
+void
+do_dasd_request (request_queue_t *queue)
+{
+ struct request *req;
+ cqr_t *cqr;
+ dasd_chanq_t *q;
+ long flags;
+ int di, irq, go;
+ int broken, busy;
+
+ dasd_debug (0xc4d90000); /* DR */
+ dasd_debug ((unsigned long) __builtin_return_address(0));
+ go = 1;
+ while (go && !list_empty(&queue->queue_head)) {
+ req = blkdev_entry_next_request(&queue->queue_head);
+ req = blkdev_entry_next_request(&queue->queue_head);
+ di = DEVICE_NR (req->rq_dev);
+ dasd_debug ((unsigned long) req); /* req */
+ dasd_debug (0xc4d90000 + /* DR## */
+ ((((di/16)<9?(di/16)+0xf0:(di/16)+0xc1))<<8) +
+ (((di%16)<9?(di%16)+0xf0:(di%16)+0xc1)));
+ irq = dasd_info[di]->info.irq;
+ s390irq_spin_lock_irqsave (irq, flags);
+ q = &dasd_info[di]->queue;
+ busy = atomic_read(&q->flags) & DASD_CHANQ_BUSY;
+ broken = atomic_read(&q->flags)&DASD_REQUEST_Q_BROKEN;
+ if ( ! busy ||
+ ( ! broken &&
+ (req->nr_sectors >= QUEUE_SECTORS))) {
+ blkdev_dequeue_request(req);
+ /*
+ printk ( KERN_INFO "0x%04x %c %d %d\n",
+ req->rq_dev,req->cmd ?'w':'r',
+ req->sector,req->nr_sectors);
+ */
+ cqr = dasd_cqr_from_req (req);
+ if (!cqr) {
+ dasd_debug (0xc4d96ff1); /* DR?1 */
+ dasd_end_request (req, 0);
+ goto cont;
+ }
+ dasd_debug ((unsigned long) cqr); /* cqr */
+ dasd_chanq_enq (q, cqr);
+ if (!(atomic_read (&q->flags) &
+ DASD_CHANQ_ACTIVE)) {
+ cql_enq_head (q);
+ }
+ if ( ! busy ) {
+ atomic_clear_mask (DASD_REQUEST_Q_BROKEN,
+ &q->flags );
+ if (atomic_read( &q->dirty_requests) == 0 ) {
+ if ( dasd_start_IO (cqr) == 0 ) {
+ } else {
+ atomic_inc (&chanq_tasks);
+ schedule_bh (dasd_do_chanq);
+ }
+ }
+ }
+ } else {
+ dasd_debug (0xc4d9c2d9); /* DRBR */
+ atomic_set_mask (DASD_REQUEST_Q_BROKEN, &q->flags );
+ go = 0;
+ }
+ cont:
+ s390irq_spin_unlock_irqrestore (irq, flags);
+ }
+ dasd_debug (0xc4d96d6d); /* DR__ */
+}
+
+void
+dasd_handler (int irq, void *ds, struct pt_regs *regs)
+{
+ devstat_t *stat = (devstat_t *) ds;
+ int ip;
+ cqr_t *cqr;
+ int done_fast_io = 0;
+
+ dasd_debug (0xc4c80000); /* DH */
+ if (!stat)
+ PRINT_ERR ("handler called without devstat");
+ ip = stat->intparm;
+ dasd_debug (ip); /* intparm */
+ switch (ip) { /* filter special intparms... */
+ case 0x00000000: /* no intparm: unsolicited interrupt */
+ dasd_debug (0xc4c8a489); /* DHui */
+ PRINT_INFO ("Unsolicited interrupt on device %04X\n",
+ stat->devno);
+ dasd_dump_sense (stat);
+ return;
+ default:
+ if (ip & 0x80000001) {
+ dasd_debug (0xc4c8a489); /* DHui */
+ PRINT_INFO ("Spurious interrupt %08x on device %04X\n",
+ ip, stat->devno);
+ return;
+ }
+ cqr = (cqr_t *) ip;
+ if (cqr->magic != DASD_MAGIC) {
+ dasd_debug (0xc4c86ff1); /* DH?1 */
+ PRINT_ERR ("handler:magic mismatch on %p %08x\n",
+ cqr, cqr->magic);
+ return;
+ }
+ asm volatile ("STCK %0":"=m" (cqr->stopclk));
+ if ( ( stat->cstat == 0x00 &&
+ stat->dstat == (DEV_STAT_CHN_END|DEV_STAT_DEV_END) ) ||
+ dasd_erp_examine ( cqr ) == dasd_era_none ) {
+ dasd_debug (0xc4c89692); /* DHok */
+ if (atomic_compare_and_swap (CQR_STATUS_IN_IO,
+ CQR_STATUS_DONE,
+ &cqr->status)) {
+ PRINT_WARN ("handler: cqrstat changed%d\n",
+ atomic_read (&cqr->status));
+ atomic_set(&cqr->status, CQR_STATUS_DONE);
+ }
+ if ( ! ( dasd_info[cqr->devindex]-> flags &
+ DASD_INFO_FLAGS_INITIALIZED ) ) {
+ int rc = register_dasd_last ( cqr->devindex );
+ dasd_info[cqr->devindex]-> flags |=
+ DASD_INFO_FLAGS_INITIALIZED;
+ if ( rc ) {
+ dasd_info[cqr->devindex]->flags &=
+ ~DASD_INFO_FLAGS_NOT_FORMATTED;
+ } else {
+ dasd_info[cqr->devindex]->flags |=
+ DASD_INFO_FLAGS_NOT_FORMATTED;
+ }
+ }
+ if (cqr->next) {
+ dasd_debug (0xc4c8e2e3); /* DHST */
+ if (dasd_start_IO (cqr->next) == 0) {
+ done_fast_io = 1;
+ } else {
+ atomic_inc (&chanq_tasks);
+ }
+ }
+ break;
+ }
+ /* only visited in case of error ! */
+ dasd_debug (0xc4c8c5d9); /* DHER */
+ if (!cqr->dstat)
+ cqr->dstat = kmalloc (sizeof (devstat_t),
+ GFP_ATOMIC);
+ if (cqr->dstat) {
+ memcpy (cqr->dstat, stat, sizeof (devstat_t));
+ } else {
+ PRINT_ERR ("no memory for dstat\n");
+ }
+ /* errorprocessing */
+ atomic_set (&cqr->status, CQR_STATUS_ERROR);
+ atomic_inc (&dasd_info[cqr->devindex]->queue.dirty_requests);
+ }
+ if (done_fast_io == 0)
+ atomic_clear_mask (DASD_CHANQ_BUSY,
+ &dasd_info[cqr->devindex]->queue.flags);
+
+ if (cqr->flags & DASD_DO_IO_SLEEP) {
+ dasd_debug (0xc4c8a6a4); /* DHwu */
+ dasd_wakeup ();
+ } else if (! (cqr->options & DOIO_WAIT_FOR_INTERRUPT) ){
+ dasd_debug (0xc4c8a293); /* DHsl */
+ atomic_inc (&chanq_tasks);
+ schedule_bh (dasd_do_chanq);
+ } else {
+ dasd_debug (0x64686f6f); /* DH_g */
+ dasd_debug (cqr->flags); /* DH_g */
+ }
+ dasd_debug (0xc4c86d6d); /* DHwu */
+}
+
+static int
+dasd_format (int dev, format_data_t * fdata)
+{
+ int rc;
+ int devindex = DEVICE_NR (dev);
+ dasd_chanq_t *q;
+ cqr_t *cqr;
+ int irq;
+ long flags;
+ PRINT_INFO ("Format called with devno %x\n", dev);
+ if (MINOR (dev) & (0xff >> (8 - PARTN_BITS))) {
+ PRINT_WARN ("Can't format partition! minor %x %x\n",
+ MINOR (dev), 0xff >> (8 - PARTN_BITS));
+ return -EINVAL;
+ }
+ down (&dasd_info[devindex]->sem);
+ if (dasd_info[devindex]->open_count == 1) {
+ rc = dasd_disciplines[dasd_info[devindex]->type]->
+ dasd_format (devindex, fdata);
+ if (rc) {
+ PRINT_WARN ("Formatting failed rc=%d\n", rc);
+ }
+ } else {
+ PRINT_WARN ("device is open! %d\n", dasd_info[devindex]->open_count);
+ rc = -EINVAL;
+ }
+ if (!rc) {
+#if DASD_PARANOIA > 1
+ if (!dasd_disciplines[dasd_info[devindex]->type]->fill_sizes_first) {
+ INTERNAL_CHECK ("No fill_sizes for dt=%d\n", dasd_info[devindex]->type);
+ } else
+#endif /* DASD_PARANOIA */
+ {
+ dasd_info[devindex]->flags &= ~DASD_INFO_FLAGS_INITIALIZED;
+ irq = dasd_info[devindex]->info.irq;
+ PRINT_INFO ("Trying to access DASD %x, irq %x, index %d\n",
+ get_devno_by_irq(irq), irq, devindex);
+ s390irq_spin_lock_irqsave (irq, flags);
+ q = &dasd_info[devindex]->queue;
+ cqr = dasd_disciplines[dasd_info[devindex]->type]->
+ fill_sizes_first (devindex);
+ dasd_chanq_enq (q, cqr);
+ schedule_bh(dasd_do_chanq);
+ s390irq_spin_unlock_irqrestore (irq, flags);
+ }
+ }
+ up (&dasd_info[devindex]->sem);
+ return rc;
+}
+
+
+static int
+register_dasd (int irq, dasd_type_t dt, dev_info_t * info)
+{
+ int rc = 0;
+ int di;
+ unsigned long flags;
+ dasd_chanq_t *q;
+ cqr_t * cqr;
+ static spinlock_t register_lock = SPIN_LOCK_UNLOCKED;
+ spin_lock (&register_lock);
+ FUNCTION_ENTRY ("register_dasd");
+ di = devindex_from_devno (info->devno);
+ if (di < 0) {
+ INTERNAL_CHECK ("Can't get index for devno %d\n", info->devno);
+ return -ENODEV;
+ }
+ if (dasd_info[di]) { /* devindex is not free */
+ INTERNAL_CHECK ("reusing allocated deviceindex %d\n", di);
+ return -ENODEV;
+ }
+ dasd_info[di] = (dasd_information_t *)
+ kmalloc (sizeof (dasd_information_t), GFP_ATOMIC);
+ if (dasd_info[di] == NULL) {
+ PRINT_WARN ("No memory for dasd_info_t on irq %d\n", irq);
+ return -ENOMEM;
+ }
+ memset (dasd_info[di], 0, sizeof (dasd_information_t));
+ memcpy (&(dasd_info[di]->info), info, sizeof (dev_info_t));
+ spin_lock_init (&dasd_info[di]->queue.f_lock);
+ spin_lock_init (&dasd_info[di]->queue.q_lock);
+ dasd_info[di]->type = dt;
+ dasd_info[di]->irq = irq;
+ init_MUTEX (&dasd_info[di]->sem);
+ rc = dasd_read_characteristics (dasd_info[di]);
+ if (rc) {
+ PRINT_WARN ("RDC returned error %d\n", rc);
+ rc = -ENODEV;
+ goto unalloc;
+ }
+#if DASD_PARANOIA > 1
+ if (dasd_disciplines[dt]->ck_characteristics)
+#endif /* DASD_PARANOIA */
+ rc = dasd_disciplines[dt]->
+ ck_characteristics (dasd_info[di]->rdc_data);
+
+ if (rc) {
+ INTERNAL_CHECK ("Discipline returned non-zero when"
+ "checking device characteristics%s\n", "");
+ rc = -ENODEV;
+ goto unalloc;
+ }
+ rc = request_irq (irq, dasd_handler, 0, "dasd",
+ &(dasd_info[di]->dev_status));
+ if (rc) {
+#if DASD_PARANOIA > 0
+ printk (KERN_WARNING PRINTK_HEADER
+ "Cannot register irq %d, rc=%d\n",
+ irq, rc);
+#endif /* DASD_PARANOIA */
+ rc = -ENODEV;
+ goto unalloc;
+ }
+#if DASD_PARANOIA > 1
+ if (!dasd_disciplines[dt]->fill_sizes_first) {
+ INTERNAL_CHECK ("No fill_sizes for dt=%d\n", dt);
+ goto unregister;
+ }
+#endif /* DASD_PARANOIA */
+ irq = dasd_info[di]->info.irq;
+ PRINT_INFO ("Trying to access DASD %x, irq %x, index %d\n",
+ get_devno_by_irq(irq), irq, di);
+ s390irq_spin_lock_irqsave (irq, flags);
+ q = &dasd_info[di]->queue;
+ cqr = dasd_disciplines[dt]->fill_sizes_first (di);
+ dasd_chanq_enq (q, cqr);
+ cql_enq_head(q);
+ if (dasd_start_IO(cqr) != 0) {
+ atomic_inc(&chanq_tasks);
+ }
+ s390irq_spin_unlock_irqrestore (irq, flags);
+
+ goto exit;
+
+ unregister:
+ free_irq (irq, &(dasd_info[di]->dev_status));
+ unalloc:
+ kfree (dasd_info[di]);
+ exit:
+ spin_unlock (&register_lock);
+ FUNCTION_EXIT ("register_dasd");
+ return rc;
+}
+
+static int
+probe_for_dasd (int irq)
+{
+ int rc;
+ dev_info_t info;
+ dasd_type_t dt;
+
+ FUNCTION_ENTRY ("probe_for_dasd");
+
+ rc = get_dev_info_by_irq (irq, &info);
+ if (rc == -ENODEV) { /* end of device list */
+ return rc;
+ }
+#if DASD_PARANOIA > 2
+ if (rc) {
+ INTERNAL_CHECK ("unknown rc %d of get_dev_info", rc);
+ return rc;
+ }
+#endif /* DASD_PARANOIA */
+ if ((info.status & DEVSTAT_NOT_OPER)) {
+ return -ENODEV;
+ }
+ dt = check_type (&info);
+ switch (dt) {
+#ifdef CONFIG_DASD_ECKD
+ case dasd_eckd:
+#endif /* CONFIG_DASD_ECKD */
+ FUNCTION_CONTROL ("Probing devno %d...\n", info.devno);
+ if (!dasd_is_accessible (info.devno)) {
+ FUNCTION_CONTROL ("out of range...skip%s\n", "");
+ return -ENODEV;
+ }
+ if (dasd_disciplines[dt]->ck_devinfo) {
+ rc = dasd_disciplines[dt]->ck_devinfo (&info);
+ }
+#if DASD_PARANOIA > 1
+ else {
+ INTERNAL_ERROR ("no ck_devinfo function%s\n", "");
+ return -ENODEV;
+ }
+#endif /* DASD_PARANOIA */
+ if (rc == -ENODEV) {
+ return rc;
+ }
+#if DASD_PARANOIA > 2
+ if (rc) {
+ INTERNAL_CHECK ("unknown error rc=%d\n", rc);
+ return -ENODEV;
+ }
+#endif /* DASD_PARANOIA */
+ rc = register_dasd (irq, dt, &info);
+ if (rc) {
+ PRINT_INFO ("devno %x not enabled as minor %d due to errors\n",
+ info.devno,
+ devindex_from_devno (info.devno) <<
+ PARTN_BITS);
+ } else {
+ PRINT_INFO ("devno %x added as minor %d (%s)\n",
+ info.devno,
+ devindex_from_devno (info.devno) << PARTN_BITS,
+ dasd_name[dt]);
+ }
+ case dasd_none:
+ break;
+ default:
+ PRINT_DEBUG ("unknown device type\n");
+ break;
+ }
+ FUNCTION_EXIT ("probe_for_dasd");
+ return rc;
+}
+
+static int
+register_major (int major)
+{
+ request_queue_t *q;
+ int rc = 0;
+
+ FUNCTION_ENTRY ("register_major");
+ rc = devfs_register_blkdev (major, DASD_NAME, &dasd_device_operations);
+#if DASD_PARANOIA > 1
+ if (rc) {
+ PRINT_WARN ("registering major -> rc=%d aborting... \n", rc);
+ return rc;
+ }
+#endif /* DASD_PARANOIA */
+ q = BLK_DEFAULT_QUEUE(major);
+ blk_init_queue(q, do_dasd_request);
+ blk_queue_headactive(BLK_DEFAULT_QUEUE(major), 0);
+ FUNCTION_CONTROL ("successfully registered major: %d\n", major);
+ FUNCTION_EXIT ("register_major");
+ return rc;
+}
+
+/*
+ Below you find functions which are called from outside. Some of them may be
+ static, because they are called by their function pointers only. Thus static
+ modifier is to make sure, that they are only called via the kernel's methods
+ */
+
+static int
+dasd_ioctl (struct inode *inp, struct file *filp,
+ unsigned int no, unsigned long data)
+{
+ int rc = 0;
+ FUNCTION_ENTRY ("dasd_ioctl");
+ if ((!inp) || !(inp->i_rdev)) {
+ return -EINVAL;
+ }
+ rc = do_dasd_ioctl (inp, no, data);
+ FUNCTION_EXIT ("dasd_ioctl");
+ return rc;
+}
+
+static int
+dasd_open (struct inode *inp, struct file *filp)
+{
+ int rc = 0;
+ dasd_information_t *dev;
+ FUNCTION_ENTRY ("dasd_open");
+ if ((!inp) || !(inp->i_rdev)) {
+ return -EINVAL;
+ }
+ dev = dasd_info[DEVICE_NR (inp->i_rdev)];
+ if (!dev) {
+ PRINT_DEBUG ("No device registered as %d (%d)\n",
+ inp->i_rdev, DEVICE_NR (inp->i_rdev));
+ return -EINVAL;
+ }
+ down (&dev->sem);
+ up (&dev->sem);
+#ifdef MODULE
+ MOD_INC_USE_COUNT;
+#endif /* MODULE */
+#if DASD_PARANOIA > 2
+ if (dev->open_count < 0) {
+ INTERNAL_ERROR ("open count cannot be less than 0: %d",
+ dev->open_count);
+ return -EINVAL;
+ }
+#endif /* DASD_PARANOIA */
+ dev->open_count++;
+ FUNCTION_EXIT ("dasd_open");
+ return rc;
+}
+
+static int
+dasd_release (struct inode *inp, struct file *filp)
+{
+ int rc = 0;
+ dasd_information_t *dev;
+ FUNCTION_ENTRY ("dasd_release");
+ if ((!inp) || !(inp->i_rdev)) {
+ return -EINVAL;
+ }
+ dev = dasd_info[DEVICE_NR (inp->i_rdev)];
+ if (!dev) {
+ PRINT_WARN ("No device registered as %d\n", inp->i_rdev);
+ return -EINVAL;
+ }
+#ifdef MODULE
+ MOD_DEC_USE_COUNT;
+#endif /* MODULE */
+#if DASD_PARANOIA > 2
+ if (!dev->open_count) {
+ PRINT_WARN ("device %d has not been opened before:\n",
+ inp->i_rdev);
+ }
+#endif /* DASD_PARANOIA */
+ dev->open_count--;
+#if DASD_PARANOIA > 2
+ if (dev->open_count < 0) {
+ INTERNAL_ERROR ("open count cannot be less than 0: %d",
+ dev->open_count);
+ return -EINVAL;
+ }
+#endif /* DASD_PARANOIA */
+ FUNCTION_EXIT ("dasd_release");
+ return rc;
+}
+
+static struct
+block_device_operations dasd_device_operations =
+{
+ ioctl: dasd_ioctl,
+ open: dasd_open,
+ release: dasd_release,
+};
+
+int
+dasd_init (void)
+{
+ int rc = 0;
+ int i;
+
+ FUNCTION_ENTRY ("dasd_init");
+ PRINT_INFO ("initializing...\n");
+ atomic_set (&chanq_tasks, 0);
+ atomic_set (&bh_scheduled, 0);
+ spin_lock_init (&dasd_lock);
+ init_waitqueue_head(&dasd_waitq);
+ /* First register to the major number */
+ rc = register_major (MAJOR_NR);
+#if DASD_PARANOIA > 1
+ if (rc) {
+ PRINT_WARN ("registering major_nr returned rc=%d\n", rc);
+ return rc;
+ }
+#endif /* DASD_PARANOIA */
+ read_ahead[MAJOR_NR] = 8;
+ blk_size[MAJOR_NR] = dasd_blks;
+ hardsect_size[MAJOR_NR] = dasd_secsize;
+ blksize_size[MAJOR_NR] = dasd_blksize;
+ max_sectors[MAJOR_NR] = dasd_maxsecs;
+#ifdef CONFIG_PROC_FS
+ dasd_proc_init ();
+#endif /* CONFIG_PROC_FS */
+ /* Now scan the device list for DASDs */
+ FUNCTION_CONTROL ("entering detection loop%s\n", "");
+ for (i = 0; i < NR_IRQS; i++) {
+ int irc; /* Internal return code */
+ LOOP_CONTROL ("Probing irq %d...\n", i);
+ irc = probe_for_dasd (i);
+ switch (irc) {
+ case 0:
+ LOOP_CONTROL ("Added DASD%s\n", "");
+ break;
+ case -ENODEV:
+ LOOP_CONTROL ("No DASD%s\n", "");
+ break;
+ case -EMEDIUMTYPE:
+ PRINT_WARN ("DASD not formatted%s\n", "");
+ break;
+ default:
+ INTERNAL_CHECK ("probe_for_dasd: unknown rc=%d", irc);
+ break;
+ }
+ }
+ FUNCTION_CONTROL ("detection loop completed %s partn check...\n", "");
+/* Finally do the genhd stuff */
+ dd_gendisk.next = gendisk_head;
+ gendisk_head = &dd_gendisk;
+ for ( i = 0; i < DASD_MAX_DEVICES; i ++ )
+ if ( dasd_info[i] )
+ dasd_partn_detect ( i );
+
+ FUNCTION_EXIT ("dasd_init");
+ return rc;
+}
+
+#ifdef MODULE
+int
+init_module (void)
+{
+ int rc = 0;
+
+ FUNCTION_ENTRY ("init_module");
+ PRINT_INFO ("trying to load module\n");
+ rc = dasd_init ();
+ if (rc == 0) {
+ PRINT_INFO ("module loaded successfully\n");
+ } else {
+ PRINT_WARN ("warning: Module load returned rc=%d\n", rc);
+ }
+ FUNCTION_EXIT ("init_module");
+ return rc;
+}
+
+void
+cleanup_module (void)
+{
+ int rc = 0;
+
+ FUNCTION_ENTRY ("cleanup_module");
+ PRINT_INFO ("trying to unload module \n");
+
+ /* FIXME: replace by proper unload functionality */
+ INTERNAL_ERROR ("Modules not yet implemented %s", "");
+
+ if (rc == 0) {
+ PRINT_INFO ("module unloaded successfully\n");
+ } else {
+ PRINT_WARN ("module unloaded with errors\n");
+ }
+ FUNCTION_EXIT ("cleanup_module");
+}
+#endif /* MODULE */
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/s390/block/dasd_ccwstuff.c b/drivers/s390/block/dasd_ccwstuff.c
new file mode 100644
index 000000000..4f666da28
--- /dev/null
+++ b/drivers/s390/block/dasd_ccwstuff.c
@@ -0,0 +1,419 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd_ccwstuff.c
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
+ */
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/malloc.h>
+#include <linux/dasd.h>
+#include <asm/atomic.h>
+
+#include "dasd_types.h"
+
+#define PRINTK_HEADER "dasd_ccw:"
+#define MAX_CP_POWER 9 /* Maximum allowed index */
+#define CP_PER_PAGE_POWER 9 /* Maximum index, fitting on page */
+
+#define get_free_pages __get_free_pages
+
+/* Stuff for the handling task_list */
+dasd_chanq_t *cq_head = NULL; /* head of task_list */
+atomic_t chanq_tasks;
+
+/* Array of freelists for the channel programs' -space */
+static ccw1_t *ccwarea[CP_PER_PAGE_POWER + 1] =
+{NULL,};
+
+/* array of pages retrieved for internal use */
+#define MAX_DASD_PAGES 64
+static int dasd_page_count = 0;
+static long dasd_page[MAX_DASD_PAGES];
+
+static spinlock_t ccw_lock=SPIN_LOCK_UNLOCKED; /* spinlock for ccwareas */
+static spinlock_t cq_lock=SPIN_LOCK_UNLOCKED; /* spinlock for cq_head */
+
+void
+ccwarea_enq (int index, ccw1_t * area)
+{
+ FUNCTION_ENTRY ("ccwarea_enq");
+#if DASD_PARANOIA > 2
+ if (!area) {
+ INTERNAL_CHECK ("zero area %s\n", "");
+ }
+ if (index > CP_PER_PAGE_POWER) {
+ INTERNAL_CHECK ("index too large %d\n", index);
+ }
+#endif
+ *(ccw1_t **) area = ccwarea[index];
+ ccwarea[index] = area;
+ FUNCTION_EXIT ("ccwarea_enq");
+ return;
+}
+
+ccw1_t *
+ccwarea_deq (int index)
+{
+ ccw1_t *cp;
+ FUNCTION_ENTRY ("ccwarea_deq");
+#if DASD_PARANOIA > 2
+ if (index > CP_PER_PAGE_POWER) {
+ INTERNAL_CHECK ("index too large %d\n", index);
+ }
+#endif
+ cp = ccwarea[index];
+ ccwarea[index] = *(ccw1_t **) ccwarea[index];
+#if DASD_PARANOIA > 2
+ if (!cp) {
+ INTERNAL_CHECK ("returning NULL %s\n", "");
+ }
+#endif
+ FUNCTION_EXIT ("ccwarea_deq");
+ return cp;
+}
+
+ccw1_t *
+request_cpa (int index)
+{
+ ccw1_t *freeblk;
+ FUNCTION_ENTRY ("request_cpa");
+ if (index > MAX_CP_POWER) {
+ INTERNAL_ERROR ("index too large %d\n", index);
+ freeblk = NULL;
+ goto exit;
+ }
+ if (index > CP_PER_PAGE_POWER) {
+ int pc = 1 << (index - CP_PER_PAGE_POWER);
+ do {
+ freeblk = (ccw1_t *) get_free_pages (GFP_ATOMIC, index - CP_PER_PAGE_POWER);
+ if (dasd_page_count + pc >= MAX_DASD_PAGES) {
+ PRINT_WARN ("Requesting too many pages...");
+ } else {
+ int i;
+ for (i = 0; i < pc; i++)
+ dasd_page[dasd_page_count++] =
+ (long) freeblk + i * PAGE_SIZE;
+ }
+ FUNCTION_CONTROL ("requesting index %d", index);
+ if ( ! freeblk ) {
+ panic ("No memory received\n");
+ }
+ } while (!freeblk);
+ memset(freeblk,0,PAGE_SIZE<<(index-CP_PER_PAGE_POWER));
+ goto exit;
+ }
+ while (ccwarea[index] == NULL) {
+ ccw1_t *blk;
+ if (index == CP_PER_PAGE_POWER) {
+ do {
+ blk = (ccw1_t *) get_free_page (GFP_ATOMIC);
+ if (dasd_page_count + 1 >= MAX_DASD_PAGES) {
+ PRINT_WARN ("Requesting too many pages...");
+ } else {
+ dasd_page[dasd_page_count++] =
+ (long) blk;
+ }
+ if (blk == NULL) {
+ PRINT_WARN ("Can't allocate page!\n");
+ }
+ } while ( ! blk );
+ memset(blk,0,PAGE_SIZE);
+ ccwarea_enq (CP_PER_PAGE_POWER, blk);
+ continue;
+ }
+ blk = request_cpa (index + 1);
+#if DASD_PARANOIA > 1
+ if (!blk) {
+ PRINT_WARN ("retrieved NULL");
+ }
+#endif /* DASD_PARANOIA */
+ ccwarea_enq (index, blk);
+ ccwarea_enq (index, blk + (1 << index));
+ }
+#if DASD_PARANOIA > 2
+ if (!ccwarea[index]) {
+ INTERNAL_ERROR ("ccwarea is NULL\n%s", "");
+ }
+#endif /* DASD_PARANOIA */
+
+ freeblk = ccwarea_deq (index);
+#if DASD_PARANOIA > 1
+ if (!freeblk) {
+ INTERNAL_ERROR ("freeblk is NULL\n%s", "");
+ }
+#endif /* DASD_PARANOIA */
+ exit:
+ FUNCTION_EXIT ("request_cpa");
+ return freeblk;
+}
+
+ccw1_t *
+request_cp (int size)
+{
+ ccw1_t *freeblk;
+ int index;
+ int blksize;
+ /* Determine the index of ccwarea to look at */
+ for (index = 0, blksize = 1;
+ size > blksize;
+ index++, blksize = blksize << 1) {
+ }
+ if (index > MAX_CP_POWER) {
+ INTERNAL_ERROR ("index too large %d\n", index);
+ }
+ spin_lock (&ccw_lock);
+ freeblk = request_cpa (index);
+ spin_unlock (&ccw_lock);
+ if (freeblk == NULL) {
+ printk (KERN_WARNING PRINTK_HEADER
+ "No way to deliver free ccw space\n");
+ }
+ return freeblk;
+}
+
+void
+release_cp (int size, ccw1_t * area)
+{
+ int index;
+ int blksize;
+ /* Determine the index of ccwarea to look at */
+ for (index = 0, blksize = 1;
+ size > blksize;
+ index++, blksize = blksize << 1) {
+ }
+ if (index > MAX_CP_POWER) {
+ INTERNAL_ERROR ("index too large %d\n", index);
+ } else if (index > CP_PER_PAGE_POWER) {
+ free_pages ((unsigned long) area,
+ index - CP_PER_PAGE_POWER);
+ INTERNAL_CHECK ("large index used: %d\n", index);
+ } else {
+ spin_lock (&ccw_lock);
+ ccwarea_enq (index, area);
+ spin_unlock (&ccw_lock);
+ }
+ return;
+}
+
+/* ---------------------------------------------------------- */
+
+static cqr_t *cqrp = NULL;
+static spinlock_t cqr_lock=SPIN_LOCK_UNLOCKED;
+
+void
+cqf_enq (cqr_t * cqf)
+{
+ *(cqr_t **) cqf = cqrp;
+ cqrp = cqf;
+}
+
+cqr_t *
+cqf_deq (void)
+{
+ cqr_t *cqr = cqrp;
+ cqrp = *(cqr_t **) cqrp;
+ return cqr;
+}
+
+cqr_t *
+request_cq (void)
+{
+ cqr_t *cqr = NULL;
+ int i;
+ cqr_t *area;
+
+ spin_lock (&cqr_lock);
+ while (cqrp == NULL) {
+ do {
+ area = (cqr_t *) get_free_page (GFP_ATOMIC);
+ if (area == NULL) {
+ printk (KERN_WARNING PRINTK_HEADER
+ "No memory for chanq area\n");
+ }
+ } while ( ! area );
+ memset(area,0,PAGE_SIZE);
+ if (dasd_page_count + 1 >= MAX_DASD_PAGES) {
+ PRINT_WARN ("Requesting too many pages...");
+ } else {
+ dasd_page[dasd_page_count++] =
+ (long) area;
+ }
+ for (i = 0; i < 4096 / sizeof (cqr_t); i++) {
+ cqf_enq (area + i);
+ }
+ }
+ cqr = cqf_deq ();
+ spin_unlock (&cqr_lock);
+ return cqr;
+}
+
+void
+release_cq (cqr_t * cqr)
+{
+ spin_lock (&cqr_lock);
+ cqf_enq (cqr);
+ spin_unlock (&cqr_lock);
+ return;
+}
+
+/* ----------------------------------------------------------- */
+cqr_t *
+request_cqr (int cpsize, int datasize)
+{
+ cqr_t *cqr = NULL;
+ cqr = request_cq ();
+ if (cqr == NULL) {
+ printk (KERN_WARNING PRINTK_HEADER __FILE__
+ "No memory for chanq request\n");
+ goto exit;
+ }
+ memset (cqr, 0, sizeof (cqr_t));
+ cqr -> magic = DASD_MAGIC;
+ if (cpsize) {
+ cqr->cpaddr = request_cp (cpsize);
+ if (cqr->cpaddr == NULL) {
+ printk (KERN_WARNING PRINTK_HEADER __FILE__
+ "No memory for channel program\n");
+ goto nocp;
+ }
+ cqr->cplength = cpsize;
+ }
+ if (datasize) {
+ do {
+ cqr->data = (char *) kmalloc (datasize, GFP_ATOMIC);
+ if (cqr->data == NULL) {
+ printk (KERN_WARNING PRINTK_HEADER __FILE__
+ "No memory for cqr data area\n");
+ }
+ } while (!cqr->data);
+ memset (cqr->data,0,datasize);
+ }
+ goto exit;
+ nocp:
+ release_cq (cqr);
+ cqr = NULL;
+ exit:
+ return cqr;
+}
+
+int
+release_cqr (cqr_t * cqr)
+{
+ int rc = 0;
+ if (cqr == NULL) {
+ rc = -ENOENT;
+ return rc;
+ }
+ if (cqr->data) {
+ kfree (cqr->data);
+ }
+ if (cqr->dstat) {
+ kfree (cqr->dstat);
+ }
+ if (cqr->cpaddr) {
+ release_cp (cqr->cplength, cqr->cpaddr);
+ }
+ cqr -> magic = dasd_MAGIC;
+ release_cq (cqr);
+ return rc;
+}
+
+/* -------------------------------------------------------------- */
+void
+dasd_chanq_enq (dasd_chanq_t * q, cqr_t * cqr)
+{
+ if (q->head != NULL) {
+ q->tail->next = cqr;
+ } else
+ q->head = cqr;
+ cqr->next = NULL;
+ q->tail = cqr;
+ q->queued_requests ++;
+ if (atomic_compare_and_swap(CQR_STATUS_FILLED,
+ CQR_STATUS_QUEUED,
+ &cqr->status)) {
+ PRINT_WARN ("q_cqr: %p status changed %d\n",
+ cqr,atomic_read(&cqr->status));
+ atomic_set(&cqr->status,CQR_STATUS_QUEUED);
+ }
+}
+
+int
+dasd_chanq_deq (dasd_chanq_t * q, cqr_t * cqr)
+{
+ cqr_t *prev;
+
+ if (cqr == NULL)
+ return -ENOENT;
+ if (cqr == (cqr_t *) q->head) {
+ q->head = cqr->next;
+ if (q->head == NULL)
+ q->tail = NULL;
+ } else {
+ prev = (cqr_t *) q->head;
+ while (prev && prev->next != cqr)
+ prev = prev->next;
+ if (prev == NULL)
+ return -ENOENT;
+ prev->next = cqr->next;
+ if (prev->next == NULL)
+ q->tail = prev;
+ }
+ cqr->next = NULL;
+ q->queued_requests --;
+ return release_cqr(cqr);
+}
+
+/* -------------------------------------------------------------------------- */
+void
+cql_enq_head (dasd_chanq_t * q)
+{
+ if (q == NULL) {
+ INTERNAL_ERROR ("NULL queue passed%s\n", "");
+ return;
+ }
+ if (atomic_read(&q->flags) & DASD_CHANQ_ACTIVE) {
+ PRINT_WARN("Queue already active");
+ return;
+ }
+ spin_lock(&cq_lock);
+ atomic_set_mask(DASD_CHANQ_ACTIVE,&q->flags);
+ q->next_q = cq_head;
+ cq_head = q;
+ spin_unlock(&cq_lock);
+}
+
+void
+cql_deq (dasd_chanq_t * q)
+{
+ dasd_chanq_t *c;
+
+ if (cq_head == NULL) {
+ INTERNAL_ERROR ("Channel queue is empty%s\n", "");
+ return;
+ }
+ if (q == NULL) {
+ INTERNAL_ERROR ("NULL queue passed%s\n", "");
+ return;
+ }
+ spin_lock(&cq_lock);
+ if (! (atomic_read(&q->flags) & DASD_CHANQ_ACTIVE)) {
+ PRINT_WARN("Queue not active\n");
+ }
+ else if (cq_head == q) {
+ cq_head = q->next_q;
+ } else {
+ c = cq_head;
+ while (c->next_q && c->next_q != q)
+ c = c->next_q;
+ if (c->next_q != q)
+ INTERNAL_ERROR ("Entry not in queue%s\n", "");
+ else
+ c->next_q = q->next_q;
+ }
+ q->next_q = NULL;
+ atomic_clear_mask(DASD_CHANQ_ACTIVE,&q->flags);
+ spin_unlock(&cq_lock);
+}
diff --git a/drivers/s390/block/dasd_ccwstuff.h b/drivers/s390/block/dasd_ccwstuff.h
new file mode 100644
index 000000000..611777e17
--- /dev/null
+++ b/drivers/s390/block/dasd_ccwstuff.h
@@ -0,0 +1,9 @@
+extern atomic_t chanq_tasks;
+extern dasd_chanq_t *cq_head;
+
+cqr_t *request_cqr (int, int);
+int release_cqr (cqr_t *);
+int dasd_chanq_enq (dasd_chanq_t *, cqr_t *);
+int dasd_chanq_deq (dasd_chanq_t *, cqr_t *);
+void cql_enq_head (dasd_chanq_t * q);
+void cql_deq (dasd_chanq_t * q);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
new file mode 100644
index 000000000..30e41f815
--- /dev/null
+++ b/drivers/s390/block/dasd_eckd.c
@@ -0,0 +1,973 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd_eckd.c
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
+ */
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+
+#ifdef MODULE
+#include <linux/module.h>
+#endif /* MODULE */
+
+#include <linux/malloc.h>
+#include <linux/dasd.h>
+#include <asm/io.h>
+
+#include <asm/irq.h>
+
+#include "dasd_types.h"
+#include "dasd_ccwstuff.h"
+
+
+#ifdef PRINTK_HEADER
+#undef PRINTK_HEADER
+#endif /* PRINTK_HEADER */
+#define PRINTK_HEADER "dasd(eckd):"
+
+#define ECKD_C0(i) (i->home_bytes)
+#define ECKD_F(i) (i -> formula)
+#define ECKD_F1(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f1):(i->factors.f_0x02.f1))
+#define ECKD_F2(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f2):(i->factors.f_0x02.f2))
+#define ECKD_F3(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f3):(i->factors.f_0x02.f3))
+#define ECKD_F4(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f4):0)
+#define ECKD_F5(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f5):0)
+#define ECKD_F6(i) (i -> factor6)
+#define ECKD_F7(i) (i -> factor7)
+#define ECKD_F8(i) (i -> factor8)
+
+#define DASD_ECKD_CCW_LOCATE_RECORD 0x47
+
+#define DASD_ECKD_CCW_READ_HOME_ADDRESS 0x0a
+#define DASD_ECKD_CCW_WRITE_HOME_ADDRESS 0x09
+
+#define DASD_ECKD_CCW_READ_RECORD_ZERO 0x16
+#define DASD_ECKD_CCW_WRITE_RECORD_ZERO 0x15
+
+#define DASD_ECKD_CCW_READ_COUNT 0x12
+#define DASD_ECKD_CCW_READ 0x06
+#define DASD_ECKD_CCW_READ_MT 0x86
+#define DASD_ECKD_CCW_WRITE 0x05
+#define DASD_ECKD_CCW_WRITE_MT 0x85
+#define DASD_ECKD_CCW_READ_CKD 0x1e
+#define DASD_ECKD_CCW_READ_CKD_MT 0x9e
+#define DASD_ECKD_CCW_WRITE_CKD 0x1d
+#define DASD_ECKD_CCW_WRITE_CKD_MT 0x9d
+
+typedef
+struct {
+ __u16 cyl;
+ __u16 head;
+} __attribute__ ((packed))
+
+ch_t;
+
+typedef
+struct {
+ __u16 cyl;
+ __u16 head;
+ __u32 sector;
+} __attribute__ ((packed))
+
+chs_t;
+
+typedef
+struct {
+ __u16 cyl;
+ __u16 head;
+ __u8 record;
+} __attribute__ ((packed))
+
+chr_t;
+
+typedef
+struct {
+ __u16 cyl;
+ __u16 head;
+ __u32 sector;
+} geom_t;
+
+typedef struct {
+ struct {
+ struct {
+ unsigned char identifier:2;
+ unsigned char token_id:1;
+ unsigned char sno_valid:1;
+ unsigned char subst_sno:1;
+ unsigned char recNED:1;
+ unsigned char emuNED:1;
+ unsigned char reserved:1;
+ } __attribute__ ((packed)) flags;
+ __u8 descriptor;
+ __u8 dev_class;
+ __u8 reserved;
+ unsigned char dev_type[6];
+ unsigned char dev_model[3];
+ unsigned char HDA_manufacturer[3];
+ unsigned char HDA_location[2];
+ unsigned char HDA_seqno[12];
+ __u16 ID;
+ } __attribute__ ((packed)) ned1;
+ struct {
+ struct {
+ unsigned char identifier:2;
+ unsigned char token_id:1;
+ unsigned char sno_valid:1;
+ unsigned char subst_sno:1;
+ unsigned char recNED:1;
+ unsigned char emuNED:1;
+ unsigned char reserved:1;
+ } __attribute__ ((packed)) flags;
+ __u8 descriptor;
+ __u8 reserved[2];
+ unsigned char dev_type[6];
+ unsigned char dev_model[3];
+ unsigned char DASD_manufacturer[3];
+ unsigned char DASD_location[2];
+ unsigned char DASD_seqno[12];
+ __u16 ID;
+ } __attribute__ ((packed)) ned2;
+ struct {
+ struct {
+ unsigned char identifier:2;
+ unsigned char token_id:1;
+ unsigned char sno_valid:1;
+ unsigned char subst_sno:1;
+ unsigned char recNED:1;
+ unsigned char emuNED:1;
+ unsigned char reserved:1;
+ } __attribute__ ((packed)) flags;
+ __u8 descriptor;
+ __u8 reserved[2];
+ unsigned char cont_type[6];
+ unsigned char cont_model[3];
+ unsigned char cont_manufacturer[3];
+ unsigned char cont_location[2];
+ unsigned char cont_seqno[12];
+ __u16 ID;
+ } __attribute__ ((packed)) ned3;
+ struct {
+ struct {
+ unsigned char identifier:2;
+ unsigned char token_id:1;
+ unsigned char sno_valid:1;
+ unsigned char subst_sno:1;
+ unsigned char recNED:1;
+ unsigned char emuNED:1;
+ unsigned char reserved:1;
+ } __attribute__ ((packed)) flags;
+ __u8 descriptor;
+ __u8 reserved[2];
+ unsigned char cont_type[6];
+ unsigned char empty[3];
+ unsigned char cont_manufacturer[3];
+ unsigned char cont_location[2];
+ unsigned char cont_seqno[12];
+ __u16 ID;
+ } __attribute__ ((packed)) ned4;
+ unsigned char ned5[32];
+ unsigned char ned6[32];
+ unsigned char ned7[32];
+ struct {
+ struct {
+ unsigned char identifier:2;
+ unsigned char reserved:6;
+ } __attribute__ ((packed)) flags;
+ __u8 selector;
+ __u16 interfaceID;
+ __u32 reserved;
+ __u16 subsystemID;
+ struct {
+ unsigned char sp0:1;
+ unsigned char sp1:1;
+ unsigned char reserved:5;
+ unsigned char scluster:1;
+ } __attribute__ ((packed)) spathID;
+ __u8 unit_address;
+ __u8 dev_ID;
+ __u8 dev_address;
+ __u8 adapterID;
+ __u16 link_address;
+ struct {
+ unsigned char parallel:1;
+ unsigned char escon:1;
+ unsigned char reserved:1;
+ unsigned char ficon:1;
+ unsigned char reserved2:4;
+ } __attribute__ ((packed)) protocol_type;
+ struct {
+ unsigned char PID_in_236:1;
+ unsigned char reserved:7;
+ } __attribute__ ((packed)) format_flags;
+ __u8 log_dev_address;
+ unsigned char reserved2[12];
+ } __attribute__ ((packed)) neq;
+
+} __attribute__ ((packed))
+
+eckd_confdata_t;
+
+typedef
+struct {
+ struct {
+ unsigned char perm:2; /* Permissions on this extent */
+ unsigned char reserved:1;
+ unsigned char seek:2; /* Seek control */
+ unsigned char auth:2; /* Access authorization */
+ unsigned char pci:1; /* PCI Fetch mode */
+ } __attribute__ ((packed)) mask;
+ struct {
+ unsigned char mode:2; /* Architecture mode */
+ unsigned char ckd:1; /* CKD Conversion */
+ unsigned char operation:3; /* Operation mode */
+ unsigned char cfw:1; /* Cache fast write */
+ unsigned char dfw:1; /* DASD fast write */
+ } __attribute__ ((packed)) attributes;
+ __u16 short blk_size; /* Blocksize */
+ __u16 fast_write_id;
+ __u8 unused;
+ __u8 reserved;
+ ch_t beg_ext;
+ ch_t end_ext;
+} __attribute__ ((packed, aligned (32)))
+
+DE_eckd_data_t;
+
+typedef
+struct {
+ struct {
+ unsigned char orientation:2;
+ unsigned char operation:6;
+ } __attribute__ ((packed)) operation;
+ struct {
+ unsigned char last_bytes_used:1;
+ unsigned char reserved:6;
+ unsigned char read_count_suffix:1;
+ } __attribute__ ((packed)) auxiliary;
+ __u8 unused;
+ __u8 count;
+ ch_t seek_addr;
+ chr_t search_arg;
+ __u8 sector;
+ __u16 length;
+} __attribute__ ((packed, aligned (32)))
+
+LO_eckd_data_t;
+
+/* Stuff for handling home addresses */
+typedef struct {
+ __u8 skip_control[14];
+ __u16 cell_number;
+ __u8 physical_addr[3];
+ __u8 flag;
+ ch_t track_addr;
+ __u8 reserved;
+ __u8 key_length;
+ __u8 reserved2[2];
+} __attribute__ ((packed, aligned (32)))
+
+eckd_home_t;
+
+
+static unsigned int
+round_up_multiple (unsigned int no, unsigned int mult)
+{
+ int rem = no % mult;
+ return (rem ? no - rem + mult : no);
+/* return (no % mult ? no - (no % mult) + mult : no); */
+}
+
+static unsigned int
+ceil_quot (unsigned int d1, unsigned int d2)
+{
+ return (d1 + (d2 - 1)) / d2;
+}
+
+static int
+bytes_per_record (dasd_eckd_characteristics_t * rdc,
+ int kl, /* key length */
+ int dl /* data length */ )
+{
+ int bpr = 0;
+ switch (rdc->formula) {
+ case 0x01:{
+ unsigned int fl1, fl2;
+ fl1 = round_up_multiple (ECKD_F2 (rdc) + dl,
+ ECKD_F1 (rdc));
+ fl2 = round_up_multiple (kl ? ECKD_F2 (rdc) + kl : 0,
+ ECKD_F1 (rdc));
+ bpr = fl1 + fl2;
+ break;
+ }
+ case 0x02:{
+ unsigned int fl1, fl2, int1, int2;
+ int1 = ceil_quot (dl + ECKD_F6 (rdc),
+ ECKD_F5 (rdc) << 1);
+ int2 = ceil_quot (kl + ECKD_F6 (rdc),
+ ECKD_F5 (rdc) << 1);
+ fl1 = round_up_multiple (ECKD_F1 (rdc) *
+ ECKD_F2 (rdc) +
+ (dl + ECKD_F6 (rdc) +
+ ECKD_F4 (rdc) * int1),
+ ECKD_F1 (rdc));
+ fl2 = round_up_multiple (ECKD_F1 (rdc) *
+ ECKD_F3 (rdc) +
+ (kl + ECKD_F6 (rdc) +
+ ECKD_F4 (rdc) * int2),
+ ECKD_F1 (rdc));
+ bpr = fl1 + fl2;
+ break;
+ }
+ default:
+ INTERNAL_ERROR ("unknown formula%d\n", rdc->formula);
+ }
+ return bpr;
+}
+
+static inline unsigned int
+bytes_per_track (dasd_eckd_characteristics_t * rdc)
+{
+ return *(unsigned int *) (rdc->byte_per_track) >> 8;
+}
+
+static unsigned int
+recs_per_track (dasd_eckd_characteristics_t * rdc,
+ unsigned int kl, unsigned int dl)
+{
+ int rpt = 0;
+ int dn;
+ switch ( rdc -> dev_type ) {
+ case 0x3380:
+ if (kl)
+ return 1499 / (15 +
+ 7 + ceil_quot (kl + 12, 32) +
+ ceil_quot (dl + 12, 32));
+ else
+ return 1499 / (15 + ceil_quot (dl + 12, 32));
+ case 0x3390:
+ dn = ceil_quot (dl + 6, 232) + 1;
+ if (kl) {
+ int kn = ceil_quot (kl + 6, 232) + 1;
+ return 1729 / (10 +
+ 9 + ceil_quot (kl + 6 * kn, 34) +
+ 9 + ceil_quot (dl + 6 * dn, 34));
+ } else
+ return 1729 / (10 +
+ 9 + ceil_quot (dl + 6 * dn, 34));
+ case 0x9345:
+ dn = ceil_quot (dl + 6, 232) + 1;
+ if (kl) {
+ int kn = ceil_quot (kl + 6, 232) + 1;
+ return 1420 / (18 +
+ 7 + ceil_quot (kl + 6 * kn, 34) +
+ ceil_quot (dl + 6 * dn, 34));
+ } else
+ return 1420 / (18 +
+ 7 + ceil_quot (dl + 6 * dn, 34));
+ }
+ return rpt;
+}
+
+static
+void
+define_extent (ccw1_t * de_ccw,
+ DE_eckd_data_t * data,
+ int trk,
+ int totrk,
+ int cmd,
+ dasd_information_t * info)
+{
+ ch_t geo, beg, end;
+
+ geo.cyl = info->rdc_data->eckd.no_cyl;
+ geo.head = info->rdc_data->eckd.trk_per_cyl;
+ beg.cyl = trk / geo.head;
+ beg.head = trk % geo.head;
+ end.cyl = totrk / geo.head;
+ end.head = totrk % geo.head;
+
+ memset (de_ccw, 0, sizeof (ccw1_t));
+ de_ccw->cmd_code = CCW_DEFINE_EXTENT;
+ de_ccw->count = 16;
+ de_ccw->cda = (void *) virt_to_phys (data);
+
+ memset (data, 0, sizeof (DE_eckd_data_t));
+ switch (cmd) {
+ case DASD_ECKD_CCW_READ_HOME_ADDRESS:
+ case DASD_ECKD_CCW_READ_RECORD_ZERO:
+ case DASD_ECKD_CCW_READ:
+ case DASD_ECKD_CCW_READ_MT:
+ case DASD_ECKD_CCW_READ_CKD: /* Fallthrough */
+ case DASD_ECKD_CCW_READ_CKD_MT:
+ case DASD_ECKD_CCW_READ_COUNT:
+ data->mask.perm = 0x1;
+ data->attributes.operation = 0x3; /* enable seq. caching */
+ break;
+ case DASD_ECKD_CCW_WRITE:
+ case DASD_ECKD_CCW_WRITE_MT:
+ data->attributes.operation = 0x3; /* enable seq. caching */
+ break;
+ case DASD_ECKD_CCW_WRITE_CKD:
+ case DASD_ECKD_CCW_WRITE_CKD_MT:
+ data->attributes.operation = 0x1; /* format through cache */
+ break;
+ case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
+ case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
+ data->mask.perm = 0x3;
+ data->mask.auth = 0x1;
+ data->attributes.operation = 0x1; /* format through cache */
+ break;
+ default:
+ INTERNAL_ERROR ("unknown opcode 0x%x\n", cmd);
+ break;
+ }
+ data->attributes.mode = 0x3;
+ data->beg_ext.cyl = beg.cyl;
+ data->beg_ext.head = beg.head;
+ data->end_ext.cyl = end.cyl;
+ data->end_ext.head = end.head;
+}
+
+static inline void
+locate_record (ccw1_t * lo_ccw,
+ LO_eckd_data_t * data,
+ int trk,
+ int rec_on_trk,
+ int no_rec,
+ int cmd,
+ dasd_information_t * info)
+{
+ ch_t geo =
+ {info->rdc_data->eckd.no_cyl,
+ info->rdc_data->eckd.trk_per_cyl};
+ ch_t seek =
+ {trk / (geo.head), trk % (geo.head)};
+ int reclen = info->sizes.bp_block;
+ memset (lo_ccw, 0, sizeof (ccw1_t));
+ lo_ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
+ lo_ccw->count = 16;
+ lo_ccw->cda = (void *) virt_to_phys (data);
+
+ memset (data, 0, sizeof (LO_eckd_data_t));
+ switch (cmd) {
+ case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
+ data->operation.orientation = 0x3;
+ data->operation.operation = 0x03;
+ break;
+ case DASD_ECKD_CCW_READ_HOME_ADDRESS:
+ data->operation.orientation = 0x3;
+ data->operation.operation = 0x16;
+ break;
+ case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
+ data->operation.orientation = 0x3;
+ data->operation.operation = 0x03;
+ data->count++;
+ break;
+ case DASD_ECKD_CCW_READ_RECORD_ZERO:
+ data->operation.orientation = 0x3;
+ data->operation.operation = 0x16;
+ data->count++;
+ break;
+ case DASD_ECKD_CCW_WRITE:
+ case DASD_ECKD_CCW_WRITE_MT:
+ data->auxiliary.last_bytes_used = 0x1;
+ data->length = reclen;
+ data->operation.operation = 0x01;
+ break;
+ case DASD_ECKD_CCW_WRITE_CKD:
+ case DASD_ECKD_CCW_WRITE_CKD_MT:
+ data->auxiliary.last_bytes_used = 0x1;
+ data->length = reclen;
+ data->operation.operation = 0x03;
+ break;
+ case DASD_ECKD_CCW_READ:
+ case DASD_ECKD_CCW_READ_MT:
+ data->auxiliary.last_bytes_used = 0x1;
+ data->length = reclen;
+ data->operation.operation = 0x06;
+ break;
+ case DASD_ECKD_CCW_READ_CKD:
+ case DASD_ECKD_CCW_READ_CKD_MT:
+ data->auxiliary.last_bytes_used = 0x1;
+ data->length = reclen;
+ data->operation.operation = 0x16;
+ break;
+ case DASD_ECKD_CCW_READ_COUNT:
+ data->operation.operation = 0x06;
+ break;
+ default:
+ INTERNAL_ERROR ("unknown opcode 0x%x\n", cmd);
+ }
+ memcpy (&(data->seek_addr), &seek, sizeof (ch_t));
+ memcpy (&(data->search_arg), &seek, sizeof (ch_t));
+ data->search_arg.record = rec_on_trk;
+ data->count += no_rec;
+}
+
+void
+dasd_eckd_print_error (devstat_t * stat)
+{
+ int sct, sl;
+ char *sense = stat->ii.sense.data;
+ PRINT_WARN ("IRQ on devno %x: with intparm:%x DS:0x%02x CS:0x%02x\n",
+ stat->devno, stat->intparm, stat->dstat, stat->cstat);
+ PRINT_WARN ("Failing CCW: %p\n", (ccw1_t *) stat->cpa);
+ for (sl = 0; sl < 4; sl++) {
+ PRINT_DEBUG ("Sense:");
+ for (sct = 0; sct < 8; sct++) {
+ printk (" %2d:0x%02x",
+ 8 * sl + sct, sense[8 * sl + sct]);
+ }
+ printk ("\n");
+ }
+ if (sense[27] & 0x80) { /* 32 Byte Sense Data */
+ PRINT_INFO ("Sense Data is 32 Byte information\n");
+ PRINT_INFO ("Format: %x Exception class %x\n",
+ sense[6] & 0x0f, sense[22] >> 4);
+ } else { /* 24 Byte Sense Data */
+ PRINT_INFO ("Sense Data is 24 Byte information\n");
+ PRINT_INFO ("FMT: %x MSG %x, %s MSGb to SYSOP\n",
+ sense[7] >> 4, sense[7] & 0x0f,
+ sense[1] & 0x10 ? "" : "no");
+ }
+}
+
+int
+dasd_eckd_format_track (int di, int trk, int bs)
+{
+ int rc = 0;
+ int i;
+ int flags = 0x00; /* FORMAT_R0 = 0x01, FORMAT_HA = 0x03 */
+ dasd_information_t * info=dasd_info[di];
+ cqr_t *fcp;
+ DE_eckd_data_t *DE_data;
+ LO_eckd_data_t *LO_data;
+ eckd_count_t *ct_data;
+ eckd_count_t *r0_data;
+ ccw1_t *last_ccw;
+ int retries = 5;
+
+ int rpt = recs_per_track (&(info->rdc_data->eckd), 0, bs);
+ int cyl = trk / info->rdc_data->eckd.trk_per_cyl;
+ int head = trk % info->rdc_data->eckd.trk_per_cyl;
+
+ fcp = request_cqr (2 + 1 + rpt,
+ sizeof (DE_eckd_data_t) +
+ sizeof (LO_eckd_data_t) +
+ (rpt + 1) * sizeof (eckd_count_t));
+ fcp -> devindex=di;
+ DE_data = (DE_eckd_data_t *) fcp->data;
+ LO_data = (LO_eckd_data_t *) (((long) DE_data) +
+ sizeof (DE_eckd_data_t));
+ r0_data = (eckd_count_t *) (((long) LO_data) +
+ sizeof (LO_eckd_data_t));
+ ct_data = (eckd_count_t *) (((long) r0_data) +
+ sizeof (eckd_count_t));
+ last_ccw = fcp->cpaddr;
+ switch (flags) {
+ case 0x03:
+ define_extent (last_ccw, DE_data, trk, trk,
+ DASD_ECKD_CCW_WRITE_HOME_ADDRESS, info);
+ last_ccw->flags = CCW_FLAG_CC;
+ last_ccw++;
+ locate_record (last_ccw, LO_data, trk, 0, rpt,
+ DASD_ECKD_CCW_WRITE_HOME_ADDRESS, info);
+ last_ccw->flags = CCW_FLAG_CC;
+ last_ccw++;
+ break;
+ case 0x01:
+ define_extent (last_ccw, DE_data, trk, trk,
+ DASD_ECKD_CCW_WRITE_RECORD_ZERO, info);
+ last_ccw->flags = CCW_FLAG_CC;
+ last_ccw++;
+ locate_record (last_ccw, LO_data, trk, 0, rpt,
+ DASD_ECKD_CCW_WRITE_RECORD_ZERO, info);
+ last_ccw->flags = CCW_FLAG_CC;
+ last_ccw++;
+ break;
+ case 0x00:
+ define_extent (last_ccw, DE_data, trk, trk,
+ DASD_ECKD_CCW_WRITE_CKD, info);
+ last_ccw->flags = CCW_FLAG_CC;
+ last_ccw++;
+ locate_record (last_ccw, LO_data, trk, 0, rpt,
+ DASD_ECKD_CCW_WRITE_CKD, info);
+ LO_data->length = bs;
+ last_ccw->flags = CCW_FLAG_CC;
+ last_ccw++;
+ break;
+ default:
+ PRINT_WARN ("Unknown format flags...%d\n", flags);
+ return -EINVAL;
+ }
+ if (flags & 0x02) {
+ PRINT_WARN ("Unsupported format flag...%d\n", flags);
+ return -EINVAL;
+ }
+ if (flags & 0x01) { /* write record zero */
+ memset (r0_data, 0, sizeof (eckd_count_t));
+ r0_data->cyl = cyl;
+ r0_data->head = head;
+ r0_data->record = 0;
+ r0_data->kl = 0;
+ r0_data->dl = 8;
+ last_ccw->cmd_code = 0x03;
+ last_ccw->count = 8;
+ last_ccw->flags = CCW_FLAG_CC | CCW_FLAG_SLI;
+ last_ccw->cda = (void *) virt_to_phys (r0_data);
+ last_ccw++;
+ }
+ /* write remaining records */
+ for (i = 0; i < rpt; i++, last_ccw++) {
+ memset (ct_data + i, 0, sizeof (eckd_count_t));
+ (ct_data + i)->cyl = cyl;
+ (ct_data + i)->head = head;
+ (ct_data + i)->record = i + 1;
+ (ct_data + i)->kl = 0;
+ (ct_data + i)->dl = bs;
+ last_ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
+ last_ccw->flags = CCW_FLAG_CC | CCW_FLAG_SLI;
+ last_ccw->count = 8;
+ last_ccw->cda = (void *)
+ virt_to_phys (ct_data + i);
+ }
+ (last_ccw - 1)->flags &= ~(CCW_FLAG_CC | CCW_FLAG_DC);
+ fcp -> devindex = di;
+ fcp -> flags = DASD_DO_IO_SLEEP;
+ do {
+ DECLARE_WAITQUEUE(wait, current);
+ unsigned long flags;
+ int irq;
+ int cs;
+
+ irq = dasd_info[fcp->devindex]->info.irq;
+ s390irq_spin_lock_irqsave (irq, flags);
+ atomic_set(&fcp->status,CQR_STATUS_QUEUED);
+ rc = dasd_start_IO ( fcp );
+ add_wait_queue (&dasd_waitq, &wait);
+ do {
+ current->state = TASK_UNINTERRUPTIBLE;
+ s390irq_spin_unlock_irqrestore (irq, flags);
+ schedule ();
+ s390irq_spin_lock_irqsave (irq, flags);
+ } while (((cs = atomic_read (&fcp->status)) !=
+ CQR_STATUS_DONE) &&
+ (cs != CQR_STATUS_ERROR));
+ remove_wait_queue (&dasd_waitq, &wait);
+ s390irq_spin_unlock_irqrestore (irq, flags);
+
+ retries --;
+ } while ( (rc || (atomic_read(&fcp->status) != CQR_STATUS_DONE)) &&
+ retries);
+ if ((rc || (atomic_read(&fcp->status) != CQR_STATUS_DONE)))
+ rc = -EIO;
+ release_cqr (fcp);
+ return rc;
+}
+
+int
+dasd_eckd_ck_devinfo (dev_info_t * info)
+{
+ return 0;
+}
+
+cqr_t *
+dasd_eckd_build_req (int devindex,
+ struct request * req)
+{
+ cqr_t *rw_cp = NULL;
+ ccw1_t *ccw;
+
+ DE_eckd_data_t *DE_data;
+ LO_eckd_data_t *LO_data;
+ struct buffer_head *bh;
+ int rw_cmd;
+ dasd_information_t *info = dasd_info[devindex];
+ int blk_per_trk = recs_per_track (&(info->rdc_data->eckd),
+ 0, info->sizes.bp_block);
+ int byt_per_blk = info->sizes.bp_block;
+ int noblk = req-> nr_sectors >> info->sizes.s2b_shift;
+ int btrk = (req->sector >> info->sizes.s2b_shift) / blk_per_trk;
+ int etrk = ((req->sector + req->nr_sectors - 1) >>
+ info->sizes.s2b_shift) / blk_per_trk;
+
+ if ( ! noblk ) {
+ PRINT_ERR("No blocks to write...returning\n");
+ return NULL;
+ }
+
+ if (req->cmd == READ) {
+ rw_cmd = DASD_ECKD_CCW_READ_MT;
+ } else
+#if DASD_PARANOIA > 2
+ if (req->cmd == WRITE)
+#endif /* DASD_PARANOIA */
+ {
+ rw_cmd = DASD_ECKD_CCW_WRITE_MT;
+ }
+#if DASD_PARANOIA > 2
+ else {
+ PRINT_ERR ("Unknown command %d\n", req->cmd);
+ return NULL;
+ }
+#endif /* DASD_PARANOIA */
+ /* Build the request */
+ rw_cp = request_cqr (2 + noblk,
+ sizeof (DE_eckd_data_t) +
+ sizeof (LO_eckd_data_t));
+ if ( ! rw_cp ) {
+ return NULL;
+ }
+ DE_data = rw_cp->data;
+ LO_data = rw_cp->data + sizeof (DE_eckd_data_t);
+ ccw = rw_cp->cpaddr;
+
+ define_extent (ccw, DE_data, btrk, etrk, rw_cmd, info);
+ ccw->flags = CCW_FLAG_CC;
+ ccw++;
+ locate_record (ccw, LO_data, btrk,
+ (req->sector >> info->sizes.s2b_shift) %
+ blk_per_trk + 1,
+ req->nr_sectors >> info->sizes.s2b_shift,
+ rw_cmd, info);
+ ccw->flags = CCW_FLAG_CC;
+ for (bh = req->bh; bh; bh = bh->b_reqnext) {
+ long size;
+ for (size = 0; size < bh->b_size; size += byt_per_blk) {
+ ccw++;
+ ccw->flags = CCW_FLAG_CC;
+ ccw->cmd_code = rw_cmd;
+ ccw->count = byt_per_blk;
+ ccw->cda = (void *) virt_to_phys (bh->b_data + size);
+ }
+ }
+ ccw->flags &= ~(CCW_FLAG_DC | CCW_FLAG_CC);
+ return rw_cp;
+}
+
+cqr_t *
+dasd_eckd_rw_label (int devindex, int rw, char *buffer)
+{
+ int cmd_code = 0x03;
+ dasd_information_t *info = dasd_info[devindex];
+ cqr_t *cqr;
+ ccw1_t *ccw;
+
+ switch (rw) {
+ case READ:
+ cmd_code = DASD_ECKD_CCW_READ;
+ break;
+ case WRITE:
+ cmd_code = DASD_ECKD_CCW_WRITE;
+ break;
+#if DASD_PARANOIA > 2
+ default:
+ INTERNAL_ERROR ("unknown cmd %d", rw);
+ return NULL;
+#endif /* DASD_PARANOIA */
+ }
+ cqr = request_cqr (3, sizeof (DE_eckd_data_t) +
+ sizeof (LO_eckd_data_t));
+ ccw = cqr->cpaddr;
+ define_extent (ccw, cqr->data, 0, 0, cmd_code, info);
+ ccw->flags |= CCW_FLAG_CC;
+ ccw++;
+ locate_record (ccw, cqr->data + 1, 0, 2, 1, cmd_code, info);
+ ccw->flags |= CCW_FLAG_CC;
+ ccw++;
+ ccw->cmd_code = cmd_code;
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw->count = sizeof (dasd_volume_label_t);
+ ccw->cda = (void *) virt_to_phys ((void *) buffer);
+ return cqr;
+
+}
+
+void
+dasd_eckd_print_char (dasd_characteristics_t * i)
+{
+ dasd_eckd_characteristics_t * c =
+ (dasd_eckd_characteristics_t *)i;
+ PRINT_INFO ("%x/%x (%x/%x) Cyl: %d Head: %d Sec: %d \n",
+ c->dev_type, c->dev_model,
+ c->cu_type, c->cu_model.model,
+ c->no_cyl, c->trk_per_cyl,
+ c->sec_per_trk);
+ PRINT_INFO ("Estimate: %d Byte/trk %d byte/kByte %d kByte/trk \n",
+ bytes_per_track (c),
+ bytes_per_record (c, 0, 1024),
+ recs_per_track (c, 0, 1024));
+};
+
+int
+dasd_eckd_ck_char (dasd_characteristics_t * i)
+{
+ int rc = 0;
+ dasd_eckd_print_char (i);
+ return rc;
+}
+
+int
+dasd_eckd_format (int devindex, format_data_t * fdata)
+{
+ int rc = 0;
+ int i;
+ dasd_information_t *info = dasd_info[devindex];
+ format_data_t fd;
+
+ if (!fdata) {
+ fd.start_unit = 0;
+ fd.stop_unit = info->rdc_data->eckd.no_cyl *
+ info->rdc_data->eckd.trk_per_cyl - 1;
+ fd.blksize = 4096;
+ } else {
+ memcpy (&fd, fdata, sizeof (format_data_t));
+ if ( fd.stop_unit == -1 ) {
+ fd.stop_unit = info->rdc_data->eckd.no_cyl *
+ info->rdc_data->eckd.trk_per_cyl - 1;
+ }
+ if ( fd.blksize == 0 ) {
+ fd.blksize = 4096;
+ }
+ }
+ PRINT_INFO("Formatting device %d from %d to %d with bs %d\n",
+ devindex,fd.start_unit,fd.stop_unit,fd.blksize);
+ if ( fd.start_unit > fd.stop_unit ) {
+ PRINT_WARN ("start unit .gt. stop unit\n");
+ return -EINVAL;
+ }
+ if ( (fd.start_unit > info->rdc_data->eckd.no_cyl *
+ info->rdc_data->eckd.trk_per_cyl - 1) ) {
+ PRINT_WARN ("start unit beyond end of disk\n");
+ return -EINVAL;
+ }
+ if ( (fd.stop_unit > info->rdc_data->eckd.no_cyl *
+ info->rdc_data->eckd.trk_per_cyl - 1) ) {
+ PRINT_WARN ("stop unit beyond end of disk\n");
+ return -EINVAL;
+ }
+ switch (fd.blksize) {
+ case 512:
+ case 1024:
+ case 2048:
+ case 4096:
+ break;
+ default:
+ PRINT_WARN ("invalid blocksize\n");
+ return -EINVAL;
+ }
+ for (i = fd.start_unit; i <= fd.stop_unit; i++) {
+ /* print 20 messages per disk at all */
+ if ( ! ( i % (info->rdc_data->eckd.trk_per_cyl *
+ (info->rdc_data->eckd.no_cyl / 20 ) ))) {
+ PRINT_INFO ("Format %d Cylinder: %d\n",devindex,
+ i/info->rdc_data->eckd.trk_per_cyl);
+ }
+ rc = dasd_eckd_format_track (devindex, i, fd.blksize);
+ if (rc) {
+ PRINT_WARN ("Formatting of Track %d failed...exiting\n", i);
+ break;
+ }
+ }
+ PRINT_INFO("Formated device %d from %d to %d with bs %d\n",
+ devindex,fd.start_unit,fd.stop_unit,fd.blksize);
+ return rc;
+}
+
+cqr_t *
+dasd_eckd_fill_sizes_first (int di)
+{
+ cqr_t *rw_cp = NULL;
+ ccw1_t *ccw;
+ DE_eckd_data_t *DE_data;
+ LO_eckd_data_t *LO_data;
+ dasd_information_t *info = dasd_info[di];
+ eckd_count_t *count_data= &(info->private.eckd.count_data);
+ rw_cp = request_cqr (3,
+ sizeof (DE_eckd_data_t) +
+ sizeof (LO_eckd_data_t));
+ DE_data = rw_cp->data;
+ LO_data = rw_cp->data + sizeof (DE_eckd_data_t);
+ ccw = rw_cp->cpaddr;
+ define_extent (ccw, DE_data, 0, 0, DASD_ECKD_CCW_READ_COUNT, info);
+ ccw->flags = CCW_FLAG_CC;
+ ccw++;
+ locate_record (ccw, LO_data, 0, 1, 1, DASD_ECKD_CCW_READ_COUNT, info);
+ ccw->flags = CCW_FLAG_CC;
+ ccw++;
+ ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
+ ccw->count = 8;
+ ccw->cda = (void *) __pa (count_data);
+ rw_cp->devindex = di;
+ atomic_set(&rw_cp->status,CQR_STATUS_FILLED);
+ return rw_cp;
+}
+
+int dasd_eckd_fill_sizes_last (int devindex)
+{
+ int sb;
+ dasd_information_t *in = dasd_info[devindex];
+ int bs = in->private.eckd.count_data.dl;
+ if (bs <= 0) {
+ PRINT_INFO("Cannot figure out blocksize. did you format the disk?\n");
+ memset (&(in -> sizes), 0, sizeof(dasd_sizes_t ));
+ return -EMEDIUMTYPE;
+ } else {
+ in->sizes.bp_block = bs;
+ }
+ in->sizes.bp_sector = in->sizes.bp_block;
+
+ in->sizes.b2k_shift = 0; /* bits to shift a block to get 1k */
+ for (sb = 1024; sb < bs; sb = sb << 1)
+ in->sizes.b2k_shift++;
+
+ in->sizes.s2b_shift = 0; /* bits to shift 512 to get a block */
+ for (sb = 512; sb < bs; sb = sb << 1)
+ in->sizes.s2b_shift++;
+
+ in->sizes.blocks = in->rdc_data->eckd.no_cyl *
+ in->rdc_data->eckd.trk_per_cyl *
+ recs_per_track (&(in->rdc_data->eckd), 0, bs);
+ in->sizes.kbytes = in->sizes.blocks << in->sizes.b2k_shift;
+
+ PRINT_INFO ("Verified: %d B/trk %d B/Blk(%d B) %d Blks/trk %d kB/trk \n",
+ bytes_per_track (&(in->rdc_data->eckd)),
+ bytes_per_record (&(in->rdc_data->eckd), 0, in->sizes.bp_block),
+ in->sizes.bp_block,
+ recs_per_track (&(in->rdc_data->eckd), 0, in->sizes.bp_block),
+ (recs_per_track (&(in->rdc_data->eckd), 0, in->sizes.bp_block) <<
+ in->sizes.b2k_shift ));
+ return 0;
+}
+
+dasd_operations_t dasd_eckd_operations =
+{
+ ck_devinfo: dasd_eckd_ck_devinfo,
+ get_req_ccw: dasd_eckd_build_req,
+ rw_label: dasd_eckd_rw_label,
+ ck_characteristics: dasd_eckd_ck_char,
+ fill_sizes_first: dasd_eckd_fill_sizes_first,
+ fill_sizes_last: dasd_eckd_fill_sizes_last,
+ dasd_format: dasd_eckd_format,
+};
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * ---------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
new file mode 100644
index 000000000..9028b797f
--- /dev/null
+++ b/drivers/s390/block/dasd_erp.c
@@ -0,0 +1,21 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd_erp.c
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000
+ */
+
+#include <asm/irq.h>
+#include <linux/dasd.h>
+#include "dasd_erp.h"
+#include "dasd_types.h"
+
+dasd_era_t
+dasd_erp_examine ( cqr_t * cqr)
+{
+ devstat_t *stat = cqr->dstat ;
+ if ( stat->cstat == 0x00 &&
+ stat->dstat == (DEV_STAT_CHN_END|DEV_STAT_DEV_END ) )
+ return dasd_era_none;
+ return dasd_era_fatal;
+}
diff --git a/drivers/s390/block/dasd_erp.h b/drivers/s390/block/dasd_erp.h
new file mode 100644
index 000000000..ab92bd122
--- /dev/null
+++ b/drivers/s390/block/dasd_erp.h
@@ -0,0 +1,15 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd_erp.h
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000
+ */
+
+#include "dasd_types.h"
+
+typedef enum {
+ dasd_era_fatal = -1,
+ dasd_era_none = 0
+} dasd_era_t;
+
+dasd_era_t dasd_erp_examine ( cqr_t * );
diff --git a/drivers/s390/block/dasd_mdsk.c b/drivers/s390/block/dasd_mdsk.c
new file mode 100644
index 000000000..63de4a6b4
--- /dev/null
+++ b/drivers/s390/block/dasd_mdsk.c
@@ -0,0 +1,14 @@
+#include <linux/dasd.h>
+#include "dasd_types.h"
+#include "dasd_erp.h"
+
+dasd_operations_t dasd_mdsk_operations =
+{
+ NULL,
+ /* dasd_mdsk_ck_devinfo,
+ dasd_mdsk_build_req,
+ dasd_mdsk_rw_label,
+ dasd_mdsk_ck_char,
+ dasd_mdsk_fill_sizes,
+ dasd_mdsk_format, */
+};
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
new file mode 100644
index 000000000..e757c435f
--- /dev/null
+++ b/drivers/s390/block/dasd_proc.c
@@ -0,0 +1,116 @@
+/*
+ Structure of the proc filesystem:
+ /proc/dasd/
+ /proc/dasd/devices # List of devices
+ /proc/dasd/ddabcd # Device node for devno abcd
+ /proc/dasd/ddabcd1 # Device node for partition abcd
+ /proc/dasd/abcd # Device information for devno abcd
+*/
+
+#include <linux/proc_fs.h>
+
+#include <linux/dasd.h>
+
+#include "dasd_types.h"
+
+int dasd_proc_read_devices ( char *, char **, off_t, int);
+#ifdef DASD_PROFILE
+extern int dasd_proc_read_statistics ( char *, char **, off_t, int);
+extern int dasd_proc_read_debug ( char *, char **, off_t, int);
+#endif /* DASD_PROFILE */
+
+struct proc_dir_entry dasd_proc_root_entry = {
+ 0,
+ 4,"dasd",
+ S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR | S_IWGRP,
+ 1,0,0,
+ 0,
+ NULL,
+};
+
+struct proc_dir_entry dasd_proc_devices_entry = {
+ 0,
+ 7,"devices",
+ S_IFREG | S_IRUGO | S_IXUGO | S_IWUSR | S_IWGRP,
+ 1,0,0,
+ 0,
+ NULL,
+ &dasd_proc_read_devices,
+};
+
+#ifdef DASD_PROFILE
+struct proc_dir_entry dasd_proc_stats_entry = {
+ 0,
+ 10,"statistics",
+ S_IFREG | S_IRUGO | S_IXUGO | S_IWUSR | S_IWGRP,
+ 1,0,0,
+ 0,
+ NULL,
+ &dasd_proc_read_statistics,
+};
+
+struct proc_dir_entry dasd_proc_debug_entry = {
+ 0,
+ 5,"debug",
+ S_IFREG | S_IRUGO | S_IXUGO | S_IWUSR | S_IWGRP,
+ 1,0,0,
+ 0,
+ NULL,
+ &dasd_proc_read_debug,
+};
+#endif /* DASD_PROFILE */
+
+struct proc_dir_entry dasd_proc_device_template = {
+ 0,
+ 6,"dd????",
+ S_IFBLK | S_IRUGO | S_IWUSR | S_IWGRP,
+ 1,0,0,
+ 0,
+ NULL,
+};
+
+void
+dasd_proc_init ( void )
+{
+ proc_register( & proc_root, & dasd_proc_root_entry);
+ proc_register( & dasd_proc_root_entry, & dasd_proc_devices_entry);
+#ifdef DASD_PROFILE
+ proc_register( & dasd_proc_root_entry, & dasd_proc_stats_entry);
+ proc_register( & dasd_proc_root_entry, & dasd_proc_debug_entry);
+#endif /* DASD_PROFILE */
+}
+
+
+int
+dasd_proc_read_devices ( char * buf, char **start, off_t off, int len)
+{
+ int i;
+ len = sprintf ( buf, "dev# MAJ minor node Format\n");
+ for ( i = 0; i < DASD_MAX_DEVICES; i++ ) {
+ dasd_information_t *info = dasd_info[i];
+ if ( ! info )
+ continue;
+ if ( len >= PAGE_SIZE - 80 )
+ len += sprintf ( buf + len, "terminated...\n");
+ len += sprintf ( buf + len,
+ "%04X %3d %5d /dev/dasd%c",
+ dasd_info[i]->info.devno,
+ DASD_MAJOR,
+ i << PARTN_BITS,
+ 'a' + i );
+ if (info->flags == DASD_INFO_FLAGS_NOT_FORMATTED) {
+ len += sprintf ( buf + len, " n/a");
+ } else {
+ len += sprintf ( buf + len, " %6d",
+ info->sizes.bp_block);
+ }
+ len += sprintf ( buf + len, "\n");
+ }
+ return len;
+}
+
+
+void
+dasd_proc_add_node (int di)
+{
+}
diff --git a/drivers/s390/block/dasd_profile.c b/drivers/s390/block/dasd_profile.c
new file mode 100644
index 000000000..7484f2be2
--- /dev/null
+++ b/drivers/s390/block/dasd_profile.c
@@ -0,0 +1,208 @@
+#include <linux/mm.h>
+
+#include <linux/dasd.h>
+
+#include "dasd_types.h"
+
+#define PRINTK_HEADER "dasd_profile:"
+
+static long dasd_io_reqs=0; /* number of requests processed at all */
+static long dasd_io_secs[16]; /* histogram of request's sizes */
+static long dasd_io_times[16]; /* histogram of requests's times */
+static long dasd_io_timps[16]; /* histogram of requests's times per sector */
+static long dasd_io_time1[16]; /* histogram of time from build to start */
+static long dasd_io_time2[16]; /* histogram of time from start to irq */
+static long dasd_io_time2ps[16]; /* histogram of time from start to irq */
+static long dasd_io_time3[16]; /* histogram of time from irq to end */
+
+void
+dasd_profile_add ( cqr_t *cqr )
+{
+ int ind;
+ long strtime,irqtime,endtime,tottime;
+ long tottimeps,sectors;
+ long help;
+ if ( ! cqr -> req )
+ return;
+ sectors = cqr -> req -> nr_sectors;
+ strtime = ((cqr->startclk - cqr->buildclk) >> 12);
+ irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
+ endtime = ((cqr->endclk - cqr->stopclk) >> 12);
+ tottime = ((cqr->endclk - cqr->buildclk) >> 12);
+ tottimeps = tottime / sectors;
+
+ if (! dasd_io_reqs ++){
+ for ( ind = 0; ind < 16; ind ++) {
+ dasd_io_secs[ind] = 0;
+ dasd_io_times[ind]=0;
+ dasd_io_timps[ind]=0;
+ dasd_io_time1[ind]=0;
+ dasd_io_time2[ind]=0;
+ dasd_io_time2ps[ind]=0;
+ dasd_io_time3[ind]=0;
+ }
+ };
+
+ for ( ind = 0, help = sectors >> 3;
+ ind < 15 && help;
+ help = help >> 1,ind ++);
+ dasd_io_secs[ind] ++;
+
+ for ( ind = 0, help = tottime >> 3;
+ ind < 15 && help;
+ help = help >> 1,ind ++);
+ dasd_io_times[ind] ++;
+
+ for ( ind = 0, help = tottimeps >> 3;
+ ind < 15 && help;
+ help = help >> 1,ind ++);
+ dasd_io_timps[ind] ++;
+
+ for ( ind = 0, help = strtime >> 3;
+ ind < 15 && help;
+ help = help >> 1,ind ++);
+ dasd_io_time1[ind] ++;
+
+ for ( ind = 0, help = irqtime >> 3;
+ ind < 15 && help;
+ help = help >> 1,ind ++);
+ dasd_io_time2[ind] ++;
+
+ for ( ind = 0, help = (irqtime/sectors) >> 3;
+ ind < 15 && help;
+ help = help >> 1,ind ++);
+ dasd_io_time2ps[ind] ++;
+
+ for ( ind = 0, help = endtime >> 3;
+ ind < 15 && help;
+ help = help >> 1,ind ++);
+ dasd_io_time3[ind] ++;
+}
+
+int
+dasd_proc_read_statistics ( char * buf, char **start,
+ off_t off, int len, int d)
+{
+ int i;
+ int shift, help;
+
+ for ( shift = 0, help = dasd_io_reqs;
+ help > 8192;
+ help = help >> 1,shift ++);
+ len = sprintf ( buf, "%ld dasd I/O requests\n", dasd_io_reqs);
+ len += sprintf ( buf+len, "__<4 ___8 __16 __32 __64 _128 _256 _512 __1k __2k __4k __8k _16k _32k _64k >64k\n");
+ len += sprintf ( buf+len, "Histogram of sizes (512B secs)\n");
+ for ( i = 0; i < 16; i ++) {
+ len += sprintf ( buf+len, "%4ld ",dasd_io_secs[i] >> shift );
+ }
+ len += sprintf ( buf+len, "\n");
+ len += sprintf ( buf+len, "Histogram of I/O times\n");
+ for ( i = 0; i < 16; i ++) {
+ len += sprintf ( buf+len, "%4ld ",dasd_io_times[i] >> shift );
+ }
+ len += sprintf ( buf+len, "\n");
+ len += sprintf ( buf+len, "Histogram of I/O times per sector\n");
+ for ( i = 0; i < 16; i ++) {
+ len += sprintf ( buf+len, "%4ld ",dasd_io_timps[i] >> shift );
+ }
+ len += sprintf ( buf+len, "\n");
+ len += sprintf ( buf+len, "Histogram of I/O time till ssch\n");
+ for ( i = 0; i < 16; i ++) {
+ len += sprintf ( buf+len, "%4ld ",dasd_io_time1[i] >> shift );
+ }
+ len += sprintf ( buf+len, "\n");
+ len += sprintf ( buf+len, "Histogram of I/O time between ssch and irq\n");
+ for ( i = 0; i < 16; i ++) {
+ len += sprintf ( buf+len, "%4ld ",dasd_io_time2[i] >> shift );
+ }
+ len += sprintf ( buf+len, "\n");
+ len += sprintf ( buf+len, "Histogram of I/O time between ssch and irq per sector\n");
+ for ( i = 0; i < 16; i ++) {
+ len += sprintf ( buf+len, "%4ld ",dasd_io_time2ps[i] >> shift );
+ }
+ len += sprintf ( buf+len, "\n");
+ len += sprintf ( buf+len, "Histogram of I/O time between irq and end\n");
+ for ( i = 0; i < 16; i ++) {
+ len += sprintf ( buf+len, "%4ld ",dasd_io_time3[i] >> shift );
+ }
+ len += sprintf ( buf+len, "\n");
+ return len;
+}
+typedef
+struct {
+ union {
+ unsigned long long clock;
+ struct {
+ unsigned int ts1;
+ unsigned int ts2 : 20;
+ unsigned int unused : 8;
+ unsigned int cpu : 4;
+ } __attribute__ ((packed)) s;
+ } __attribute__ ((packed)) u;
+ unsigned long caller_address;
+ unsigned long tag;
+} __attribute__ ((packed)) dasd_debug_entry;
+
+static dasd_debug_entry *dasd_debug_area = NULL;
+static dasd_debug_entry *dasd_debug_actual;
+static spinlock_t debug_lock = SPIN_LOCK_UNLOCKED;
+
+void
+dasd_debug ( unsigned long tag )
+{
+ long flags;
+ dasd_debug_entry *d;
+ /* initialize in first call ... */
+ if ( ! dasd_debug_area ) {
+ dasd_debug_actual = dasd_debug_area =
+ (dasd_debug_entry *) get_free_page (GFP_ATOMIC);
+ if ( ! dasd_debug_area ) {
+ PRINT_WARN("No debug area allocated\n");
+ return;
+ }
+ memset (dasd_debug_area,0,PAGE_SIZE);
+ }
+ /* renormalize to page */
+ spin_lock_irqsave(&debug_lock,flags);
+ dasd_debug_actual = (dasd_debug_entry *)
+ ( (unsigned long) dasd_debug_area +
+ ( ( (unsigned long)dasd_debug_actual -
+ (unsigned long)dasd_debug_area ) % PAGE_SIZE ) );
+ d = dasd_debug_actual ++;
+ spin_unlock_irqrestore(&debug_lock,flags);
+ /* write CPUID to lowest 12 bits of clock... */
+ __asm__ __volatile__ ( "STCK %0"
+ :"=m" (d->u.clock));
+ d->tag = tag;
+ d -> caller_address = (unsigned long) __builtin_return_address(0);
+ d->u.s.cpu = smp_processor_id();
+}
+
+int
+dasd_proc_read_debug ( char * buf, char **start,
+ off_t off, int len, int dd)
+{
+ dasd_debug_entry *d;
+ char tag[9] = { 0, };
+ long flags;
+ spin_lock_irqsave(&debug_lock,flags);
+ len = 0;
+ for( d = dasd_debug_area;
+ len < 4068 ;
+ d ++ ) {
+ if ( *(char*)(&d->tag) == 'D' ) {
+ memcpy(tag,&(d->tag),4);
+ tag[4]=0;
+ }
+ else {
+ sprintf(tag,"%08lx", d->tag);
+ tag[8]=0;
+ }
+ len += sprintf ( buf+len,
+ "%x %08x%05x %08lx (%8s)\n",
+ d->u.s.cpu, d->u.s.ts1, d->u.s.ts2,
+ d->caller_address,tag);
+ }
+ spin_unlock_irqrestore(&debug_lock,flags);
+ return len;
+}
diff --git a/drivers/s390/block/dasd_types.h b/drivers/s390/block/dasd_types.h
new file mode 100644
index 000000000..b453bc2f8
--- /dev/null
+++ b/drivers/s390/block/dasd_types.h
@@ -0,0 +1,284 @@
+/*
+ * File...........: linux/drivers/s390/block/dasd_types.h
+ * Author.........: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Created........: 08/31/1999
+ * Last Modified..: 09/29/1999
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
+
+ * List of Changes:
+ - Initial Release as of 09/29/1999
+
+ * Description
+
+ * Restrictions
+
+ * Known Bugs
+
+ * Todo-List
+
+ */
+
+#ifndef DASD_TYPES_H
+#define DASD_TYPES_H
+
+#include <linux/config.h>
+#include <linux/dasd.h>
+#include <linux/blkdev.h>
+
+#include <asm/irq.h>
+
+#define CCW_DEFINE_EXTENT 0x63
+#define CCW_LOCATE_RECORD 0x43
+#define CCW_READ_DEVICE_CHARACTERISTICS 0x64
+
+typedef
+enum {
+ dasd_none = -1,
+#ifdef CONFIG_DASD_ECKD
+ dasd_eckd,
+#endif /* CONFIG_DASD_ECKD */
+#ifdef CONFIG_DASD_MDSK
+ dasd_mdsk,
+#endif /* CONFIG_DASD_MDSK */
+#ifdef CONFIG_DASD_CKD
+ dasd_ckd,
+#endif /* CONFIG_DASD_CKD */
+ dasd_end
+} dasd_type_t;
+
+typedef
+struct {
+ __u16 cu_type;
+ struct {
+ unsigned char support:2;
+ unsigned char async:1;
+ unsigned char reserved:1;
+ unsigned char cache_info:1;
+ unsigned char model:3;
+ } __attribute__ ((packed)) cu_model;
+ __u16 dev_type;
+ __u8 dev_model;
+ struct {
+ unsigned char mult_burst:1;
+ unsigned char RT_in_LR:1;
+ unsigned char reserved1:1;
+ unsigned char RD_IN_LR:1;
+ unsigned char reserved2:4;
+ unsigned char reserved3:8;
+ unsigned char defect_wr:1;
+ unsigned char reserved4:2;
+ unsigned char striping:1;
+ unsigned char reserved5:4;
+ unsigned char cfw:1;
+ unsigned char reserved6:2;
+ unsigned char cache:1;
+ unsigned char dual_copy:1;
+ unsigned char dfw:1;
+ unsigned char reset_alleg:1;
+ unsigned char sense_down:1;
+ } __attribute__ ((packed)) facilities;
+ __u8 dev_class;
+ __u8 unit_type;
+ __u16 no_cyl;
+ __u16 trk_per_cyl;
+ __u8 sec_per_trk;
+ __u8 byte_per_track[3];
+ __u16 home_bytes;
+ __u8 formula;
+ union {
+ struct {
+ __u8 f1;
+ __u16 f2;
+ __u16 f3;
+ } __attribute__ ((packed)) f_0x01;
+ struct {
+ __u8 f1;
+ __u8 f2;
+ __u8 f3;
+ __u8 f4;
+ __u8 f5;
+ } __attribute__ ((packed)) f_0x02;
+ } __attribute__ ((packed)) factors;
+ __u16 first_alt_trk;
+ __u16 no_alt_trk;
+ __u16 first_dia_trk;
+ __u16 no_dia_trk;
+ __u16 first_sup_trk;
+ __u16 no_sup_trk;
+ __u8 MDR_ID;
+ __u8 OBR_ID;
+ __u8 director;
+ __u8 rd_trk_set;
+ __u16 max_rec_zero;
+ __u8 reserved1;
+ __u8 RWANY_in_LR;
+ __u8 factor6;
+ __u8 factor7;
+ __u8 factor8;
+ __u8 reserved2[3];
+ __u8 reserved3[10];
+} __attribute__ ((packed, aligned (32)))
+
+dasd_eckd_characteristics_t;
+
+/* eckd count area */
+typedef struct {
+ __u16 cyl;
+ __u16 head;
+ __u8 record;
+ __u8 kl;
+ __u16 dl;
+} __attribute__ ((packed))
+
+eckd_count_t;
+
+#ifdef CONFIG_DASD_CKD
+struct dasd_ckd_characteristics {
+ char info[64];
+};
+
+#endif /* CONFIG_DASD_CKD */
+
+#ifdef CONFIG_DASD_ECKD
+struct dasd_eckd_characteristics {
+ char info[64];
+};
+
+#endif /* CONFIG_DASD_ECKD */
+
+typedef
+union {
+ char __attribute__ ((aligned (32))) bytes[64];
+#ifdef CONFIG_DASD_CKD
+ struct dasd_ckd_characteristics ckd;
+#endif /* CONFIG_DASD_CKD */
+#ifdef CONFIG_DASD_ECKD
+ dasd_eckd_characteristics_t eckd;
+#endif /* CONFIG_DASD_ECKD */
+} __attribute__ ((aligned (32)))
+
+dasd_characteristics_t;
+
+#define CQR_STATUS_EMPTY 0x00
+#define CQR_STATUS_FILLED 0x01
+#define CQR_STATUS_QUEUED 0x02
+#define CQR_STATUS_IN_IO 0x04
+#define CQR_STATUS_DONE 0x08
+#define CQR_STATUS_RETRY 0x10
+#define CQR_STATUS_ERROR 0x20
+#define CQR_STATUS_FAILED 0x40
+#define CQR_STATUS_SLEEP 0x80
+
+#define CQR_FLAGS_SLEEP 0x01
+#define CQR_FLAGS_WAIT 0x02
+#define CQR_FLAGS_NOLOCK 0x04
+#define CQR_FLAGS_NORETRY 0x08
+
+typedef
+struct cqr_t {
+ unsigned int magic; /* magic number should be "DASD" */
+ atomic_t status; /* current status of request */
+ unsigned short retries; /* counter for retry in error case */
+ unsigned short cplength;/* Length of channel program (CP) */
+ unsigned short devindex;/* device number */
+ unsigned short flags; /* Flags for execution */
+
+ void * data; /* additional data area for CP */
+ ccw1_t *cpaddr; /* Address of CP */
+ struct request *req; /* backpointer to struct request */
+ struct cqr_t *next; /* forward chain in chanq */
+ struct cqr_t *int4cqr; /* which cqr ist the nect PCI for? */
+ unsigned long long buildclk;
+ unsigned long long startclk;
+ unsigned long long stopclk;
+ unsigned long long endclk;
+ devstat_t *dstat; /* savearea for devstat */
+ spinlock_t lock;
+ int options;
+} __attribute__ ((packed))
+cqr_t;
+
+typedef
+struct {
+ unsigned long int kbytes;
+ unsigned int bp_sector;
+ unsigned int bp_block;
+ unsigned int blocks;
+ unsigned int s2b_shift;
+ unsigned int b2k_shift;
+ unsigned int label_block;
+} dasd_sizes_t;
+
+#define DASD_CHANQ_ACTIVE 0x01
+#define DASD_CHANQ_BUSY 0x02
+#define DASD_REQUEST_Q_BROKEN 0x04
+
+typedef
+struct dasd_chanq_t {
+ volatile cqr_t *head;
+ volatile cqr_t *tail;
+ spinlock_t q_lock; /* lock for queue operations */
+ spinlock_t f_lock; /* lock for flag operations */
+ int queued_requests;
+ atomic_t flags;
+ atomic_t dirty_requests;
+ struct dasd_chanq_t *next_q; /* pointer to next queue */
+} __attribute__ ((packed, aligned (16)))
+dasd_chanq_t;
+
+#define DASD_INFO_FLAGS_INITIALIZED 0x01
+#define DASD_INFO_FLAGS_NOT_FORMATTED 0x02
+#define DASD_INFO_FLAGS_PARTNS_DETECTED 0x04
+
+typedef
+struct dasd_information_t {
+ devstat_t dev_status;
+ dasd_characteristics_t *rdc_data;
+ dasd_volume_label_t *label;
+ dasd_type_t type;
+ dev_info_t info;
+ dasd_sizes_t sizes;
+ dasd_chanq_t queue;
+ int open_count;
+ spinlock_t lock;
+ struct semaphore sem;
+ unsigned long flags;
+ int irq;
+ struct proc_dir_entry *proc_device;
+ devfs_handle_t devfs_entry;
+ union {
+ struct {
+ eckd_count_t count_data;
+ } eckd;
+ struct {
+ char dummy;
+ } fba;
+ struct {
+ char dummy;
+ } mdsk;
+ struct {
+ char dummy;
+ } ckd;
+ } private;
+} dasd_information_t;
+
+typedef struct {
+ int start_unit;
+ int stop_unit;
+ int blksize;
+} format_data_t;
+
+typedef
+struct {
+ int (*ck_devinfo) (dev_info_t *);
+ cqr_t *(*get_req_ccw) (int, struct request *);
+ cqr_t *(*rw_label) (int, int, char *);
+ int (*ck_characteristics) (dasd_characteristics_t *);
+ cqr_t *(*fill_sizes_first) (int);
+ int (*fill_sizes_last) (int);
+ int (*dasd_format) (int, format_data_t *);
+} dasd_operations_t;
+
+extern dasd_information_t *dasd_info[];
+
+#endif /* DASD_TYPES_H */
diff --git a/drivers/s390/block/mdisk.c b/drivers/s390/block/mdisk.c
new file mode 100644
index 000000000..f485cb668
--- /dev/null
+++ b/drivers/s390/block/mdisk.c
@@ -0,0 +1,790 @@
+/*
+ * drivers/s390/block/mdisk.c
+ * VM minidisk device driver.
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Hartmut Penner (hp@de.ibm.com)
+ */
+
+
+#ifndef __KERNEL__
+# define __KERNEL__
+#endif
+
+#define __NO_VERSION__
+#include <linux/config.h>
+#include <linux/version.h>
+
+char kernel_version [] = UTS_RELEASE;
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h> /* printk() */
+#include <linux/malloc.h> /* kmalloc() */
+#include <linux/vmalloc.h> /* vmalloc() */
+#include <linux/fs.h> /* everything... */
+#include <linux/errno.h> /* error codes */
+#include <linux/timer.h>
+#include <linux/types.h> /* size_t */
+#include <linux/fcntl.h> /* O_ACCMODE */
+#include <linux/hdreg.h> /* HDIO_GETGEO */
+#include <linux/init.h> /* initfunc */
+#include <linux/interrupt.h>
+#include <linux/ctype.h>
+
+#include <asm/system.h> /* cli(), *_flags */
+#include <asm/uaccess.h> /* access_ok */
+#include <asm/io.h> /* virt_to_phys */
+
+ /* Added statement HSM 12/03/99 */
+#include <asm/irq.h>
+
+#define MAJOR_NR MDISK_MAJOR /* force definitions on in blk.h */
+
+#include <linux/blk.h>
+
+
+#include "mdisk.h" /* local definitions */
+
+/*
+ * structure for all device specific information
+ */
+
+typedef struct mdisk_Dev {
+ u32 vdev; /* vdev of mindisk */
+ u32 size; /* size in blocks */
+ u32 status; /* status of last io operation */
+ u32 nr_bhs; /* number of buffer of last io operation */
+ u32 blksize; /* blksize from minidisk */
+ u32 blkmult; /* multiplier between blksize and 512 HARDSECT */
+ u32 blkshift; /* loe2 of multiplier above */
+ /*
+ * each device has own iob and bio,
+ * it's possible to run io in parallel
+ * not used yet due to only one CURRENT per MAJOR
+ */
+
+ mdisk_rw_io_t* iob; /* each device has it own iob and bio */
+ mdisk_bio_t* bio;
+ /* Added statement HSM 12/03/99 */
+ devstat_t dev_status; /* Here we hold the I/O status */
+
+ int usage; /* usage counter */
+
+ struct tq_struct tqueue; /* per device task queue */
+} mdisk_Dev;
+
+
+/*
+ * appended to global structures in mdisk_init;
+ */
+
+static int mdisk_blksizes[MDISK_DEVS];
+static int mdisk_sizes[MDISK_DEVS] = { 0 };
+static int mdisk_hardsects[MDISK_DEVS];
+static int mdisk_maxsectors[MDISK_DEVS];
+
+/*
+ * structure hold device specific information
+ */
+
+static mdisk_Dev mdisk_devices[MDISK_DEVS];
+static mdisk_rw_io_t mdisk_iob[MDISK_DEVS] __attribute__ ((aligned(8)));
+static mdisk_bio_t mdisk_bio[MDISK_DEVS][256]__attribute__ ((aligned(8)));
+
+
+/*
+ * Parameter parsing
+ */
+struct {
+ long vdev[MDISK_DEVS];
+ long size[MDISK_DEVS];
+ long offset[MDISK_DEVS];
+ long blksize[MDISK_DEVS];
+} mdisk_setup_data;
+
+/*
+ * Parameter parsing function, called from init/main.c
+ * vdev : virtual device number
+ * size : size in kbyte
+ * offset : offset after which minidisk is available
+ * blksize : blocksize minidisk is formated
+ * Format is: mdisk=<vdev>:<size>:<offset>:<blksize>,<vdev>:<size>:<offset>...
+ * <vdev>:<size>:<offset>:<blksize> can be shortened to <vdev>:<size> with offset=0,blksize=512
+ */
+int __init mdisk_setup(char *str)
+{
+ char *cur = str;
+ int vdev, size, offset=0,blksize;
+ static int i = 0;
+ if (!i)
+ memset(&mdisk_setup_data,0,sizeof(mdisk_setup_data));
+
+ while (*cur != 0) {
+ blksize=MDISK_HARDSECT;
+ vdev = size = offset = 0;
+ if (!isxdigit(*cur)) goto syntax_error;
+ vdev = simple_strtoul(cur,&cur,16);
+ if (*cur != 0 && *cur != ',') {
+ if (*cur++ != ':') goto syntax_error;
+ if (!isxdigit(*cur)) goto syntax_error;
+ size = simple_strtoul(cur,&cur,16);
+ if (*cur == ':') { /* another colon -> offset specified */
+ cur++;
+ if (!isxdigit(*cur)) goto syntax_error;
+ offset = simple_strtoul(cur,&cur,16);
+ if (*cur == ':') { /* another colon -> blksize */
+ cur++;
+ if (!isxdigit(*cur)) goto syntax_error;
+ blksize = simple_strtoul(cur,&cur,16);
+ }
+ }
+ if (*cur != ',' && *cur != 0) goto syntax_error;
+ }
+ if (*cur == ',') cur++;
+ if (i >= MDISK_DEVS) {
+ printk(KERN_WARNING "mnd: too many devices\n");
+ return 1;
+ }
+ mdisk_setup_data.vdev[i] = vdev;
+ mdisk_setup_data.size[i] = size;
+ mdisk_setup_data.offset[i] = offset;
+ mdisk_setup_data.blksize[i] = blksize;
+
+ i++;
+ }
+
+ return 1;
+
+syntax_error:
+ printk(KERN_WARNING "mnd: syntax error in parameter string: %s\n", str);
+ return 0;
+}
+
+__setup("mdisk=", mdisk_setup);
+
+/*
+ * Open and close
+ */
+
+static int mdisk_open (struct inode *inode, struct file *filp)
+{
+ mdisk_Dev *dev; /* device information */
+ int num = MINOR(inode->i_rdev);
+
+ /*
+ * size 0 means device not installed
+ */
+ if ((num >= MDISK_DEVS) || (mdisk_sizes[num] == 0))
+ return -ENODEV;
+ MOD_INC_USE_COUNT;
+ dev = &mdisk_devices[num];
+ dev->usage++;
+ return 0; /* success */
+}
+
+static int mdisk_release (struct inode *inode, struct file *filp)
+{
+ mdisk_Dev *dev = &mdisk_devices[MINOR(inode->i_rdev)];
+
+ /*
+ * flush device
+ */
+
+ fsync_dev(inode->i_rdev);
+ dev->usage--;
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+
+/*
+ * The mdisk() implementation
+ */
+
+static int mdisk_ioctl (struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ int err,rc, size=0;
+ struct hd_geometry *geo = (struct hd_geometry *)arg;
+ mdisk_Dev *dev = mdisk_devices + MINOR(inode->i_rdev);
+
+ switch(cmd) {
+
+ case BLKGETSIZE:
+ rc = copy_to_user ((long *) arg, &dev->size, sizeof (long));
+ printk(KERN_WARNING "mnd: ioctl BLKGETSIZE %d\n",dev->size);
+ return rc;
+ case BLKFLSBUF: /* flush */
+ if (!suser()) return -EACCES; /* only root */
+ fsync_dev(inode->i_rdev);
+ invalidate_buffers(inode->i_rdev);
+ return 0;
+
+ case BLKRAGET: /* return the readahead value */
+ if (!arg) return -EINVAL;
+ err = access_ok(VERIFY_WRITE, (long *) arg, sizeof(long));
+ if (err) return err;
+ put_user(read_ahead[MAJOR(inode->i_rdev)],(long *) arg);
+ return 0;
+
+ case BLKRASET: /* set the readahead value */
+ if (!suser()) return -EACCES;
+ if (arg > 0xff) return -EINVAL; /* limit it */
+ read_ahead[MAJOR(inode->i_rdev)] = arg;
+ return 0;
+
+ case BLKRRPART: /* re-read partition table: can't do it */
+ return -EINVAL;
+
+ case HDIO_GETGEO:
+ /*
+ * get geometry of device -> linear
+ */
+ size = dev->size;
+ if (geo==NULL) return -EINVAL;
+ err = access_ok(VERIFY_WRITE, geo, sizeof(*geo));
+ if (err) return err;
+ put_user(1, &geo->cylinders);
+ put_user(1, &geo->heads);
+ put_user(size, &geo->sectors);
+ put_user(0, &geo->start);
+ return 0;
+ }
+
+ return -EINVAL; /* unknown command */
+}
+
+/*
+ * The file operations
+ */
+
+static struct block_device_operations mdisk_fops = {
+ ioctl: mdisk_ioctl,
+ open: mdisk_open,
+ release: mdisk_release,
+};
+
+/*
+ * The 'low level' IO function
+ */
+
+
+static __inline__ int
+dia250(void* iob,int cmd)
+{
+ int rc;
+
+ iob = (void*) virt_to_phys(iob);
+
+ asm volatile (" lr 2,%1\n"
+ " lr 3,%2\n"
+ " .long 0x83230250\n"
+ " lr %0,3"
+ : "=d" (rc)
+ : "d" (iob) , "d" (cmd)
+ : "2", "3" );
+ return rc;
+}
+/*
+ * Init of minidisk device
+ */
+
+static __inline__ int
+mdisk_init_io(mdisk_Dev *dev,int blocksize,int offset,int size)
+{
+ mdisk_init_io_t *iob = (mdisk_init_io_t*) dev->iob;
+ int rc;
+
+ memset(iob,0,sizeof(mdisk_init_io_t));
+
+ iob->dev_nr = dev->vdev;
+ iob->block_size = blocksize;
+ iob->offset = offset;
+ iob->start_block= 0;
+ iob->end_block = size;
+
+ rc = dia250(iob,INIT_BIO);
+
+ /*
+ * clear for following io once
+ */
+
+ memset(iob,0,sizeof(mdisk_rw_io_t));
+
+ return rc;
+}
+
+/*
+ * release of minidisk device
+ */
+
+static __inline__ int
+mdisk_term_io(mdisk_Dev *dev)
+{
+ mdisk_init_io_t *iob = (mdisk_init_io_t*) dev->iob;
+
+ memset(iob,0,sizeof(mdisk_init_io_t));
+
+ iob->dev_nr = dev->vdev;
+
+ return dia250(iob,TERM_BIO);
+}
+
+/*
+ * setup and start of minidisk io request
+ */
+
+static __inline__ int
+mdisk_rw_io_clustered (mdisk_Dev *dev,
+ mdisk_bio_t* bio_array,
+ int length,
+ int req,
+ int sync)
+{
+ int rc;
+ mdisk_rw_io_t *iob = dev->iob;
+
+ iob->dev_nr = dev->vdev;
+ iob->key = 0;
+ iob->flags = sync;
+
+ iob->block_count = length;
+ iob->interrupt_params = req;
+ iob->bio_list = virt_to_phys(bio_array);
+
+ rc = dia250(iob,RW_BIO);
+ return rc;
+}
+
+
+
+/*
+ * The device characteristics function
+ */
+
+static __inline__ int
+dia210(void* devchar)
+{
+ int rc;
+
+ devchar = (void*) virt_to_phys(devchar);
+
+ asm volatile (" lr 2,%1\n"
+ " .long 0x83200210\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (rc)
+ : "d" (devchar)
+ : "2" );
+ return rc;
+}
+/*
+ * read the label of a minidisk and extract its characteristics
+ */
+
+static __inline__ int
+mdisk_read_label (mdisk_Dev *dev, int i)
+{
+ static mdisk_dev_char_t devchar;
+ static long label[1024];
+ int block, b;
+ int rc;
+ mdisk_bio_t *bio;
+
+ devchar.dev_nr = dev -> vdev;
+ devchar.rdc_len = sizeof(mdisk_dev_char_t);
+
+ if (dia210(&devchar) == 0) {
+ if (devchar.vdev_class == DEV_CLASS_FBA) {
+ block = 2;
+ }
+ else {
+ block = 3;
+ }
+ bio = dev->bio;
+ for (b=512;b<4097;b=b*2) {
+ rc = mdisk_init_io(dev, b, 0, 64);
+ if (rc > 4) {
+ continue;
+ }
+ memset(&bio[0], 0, sizeof(mdisk_bio_t));
+ bio[0].type = MDISK_READ_REQ;
+ bio[0].block_number = block;
+ bio[0].buffer = virt_to_phys(&label);
+ dev->nr_bhs = 1;
+ if (mdisk_rw_io_clustered(dev,
+ &bio[0],
+ 1,
+ (unsigned long) dev,
+ MDISK_SYNC)
+ == 0 ) {
+ if (label[0] != 0xc3d4e2f1) { /* CMS1 */
+ printk ( KERN_WARNING "mnd: %4lX "
+ "is not CMS format\n",
+ mdisk_setup_data.vdev[i]);
+ rc = mdisk_term_io(dev);
+ return 1;
+ }
+ if (label[13] == 0) {
+ printk ( KERN_WARNING "mnd: %4lX "
+ "is not reserved\n",
+ mdisk_setup_data.vdev[i]);
+ rc = mdisk_term_io(dev);
+ return 2;
+ }
+ mdisk_setup_data.size[i] =
+ (label[7] - 1 - label[13]) *
+ (label[3] >> 9) >> 1;
+ mdisk_setup_data.blksize[i] = label[3];
+ mdisk_setup_data.offset[i] = label[13] + 1;
+ rc = mdisk_term_io(dev);
+ return rc;
+ }
+ rc = mdisk_term_io(dev);
+ }
+ printk ( KERN_WARNING "mnd: Cannot read label of %4lX "
+ "- is it formatted?\n",
+ mdisk_setup_data.vdev[i]);
+ return 3;
+ }
+ return 4;
+}
+
+
+
+
+
+/*
+ * this handles a clustered request in success case
+ * all buffers are detach and marked uptodate to the kernel
+ * then CURRENT->bh is set to the last processed but not
+ * update buffer
+ */
+
+static __inline__ void
+mdisk_end_request(int nr_bhs)
+{
+ int i;
+ struct buffer_head *bh;
+ struct request *req;
+
+ if (nr_bhs > 1) {
+ req = CURRENT;
+ bh = req->bh;
+
+ for (i=0; i < nr_bhs-1; i++) {
+ req->bh = bh->b_reqnext;
+ bh->b_reqnext = NULL;
+ bh->b_end_io(bh,1);
+ bh = req->bh;
+ }
+
+ /*
+ * set CURRENT to last processed, not marked buffer
+ */
+ req->buffer = bh->b_data;
+ req->current_nr_sectors = bh->b_size >> 9;
+ CURRENT = req;
+ }
+ end_request(1);
+}
+
+
+
+/*
+ * Block-driver specific functions
+ */
+
+void mdisk_request(request_queue_t *queue)
+{
+ mdisk_Dev *dev;
+ mdisk_bio_t *bio;
+ struct buffer_head *bh;
+ unsigned int sector, nr, offset;
+ int rc,rw,i;
+
+ i = 0;
+ while(CURRENT) {
+ INIT_REQUEST;
+
+ /* Check if the minor number is in range */
+ if (DEVICE_NR(CURRENT_DEV) > MDISK_DEVS) {
+ static int count = 0;
+ if (count++ < 5) /* print the message at most five times */
+ printk(KERN_WARNING "mnd: request for minor %d out of range\n",
+ DEVICE_NR(CURRENT_DEV) ) ;
+ end_request(0);
+ continue;
+ }
+
+ /*
+ * Pointer to device structure, from the static array
+ */
+ dev = mdisk_devices + DEVICE_NR(CURRENT_DEV);
+
+ /*
+ * check, if operation is past end of devices
+ */
+ if (CURRENT->nr_sectors + CURRENT->sector > dev->size) {
+ static int count = 0;
+ if (count++ < 5)
+ printk(KERN_WARNING "mnd%c: request past end of device\n",
+ DEVICE_NR(CURRENT_DEV));
+ end_request(0);
+ continue;
+ }
+
+ /*
+ * do command (read or write)
+ */
+ switch(CURRENT->cmd) {
+ case READ:
+ rw = MDISK_READ_REQ;
+ break;
+ case WRITE:
+ rw = MDISK_WRITE_REQ;
+ break;
+ default:
+ /* can't happen */
+ end_request(0);
+ continue;
+ }
+
+ /*
+ * put the clustered requests in mdisk_bio array
+ * nr_sectors is checked against max_sectors in make_request
+ * nr_sectors and sector are always blocks of 512
+ * but bh_size depends on the filesystems size
+ */
+ sector = CURRENT->sector>>dev->blkshift;
+ bh = CURRENT->bh;
+ bio = dev->bio;
+ dev->nr_bhs = 0;
+
+ /*
+ * sector is translated to block in minidisk context
+ *
+ */
+ offset = 0;
+
+
+
+ for (nr = 0,i = 0;
+ nr < CURRENT->nr_sectors && bh;
+ nr+=dev->blkmult, sector++,i++) {
+ memset(&bio[i], 0, sizeof(mdisk_bio_t));
+ bio[i].type = rw;
+ bio[i].block_number = sector;
+ bio[i].buffer = virt_to_phys(bh->b_data+offset);
+ offset += dev->blksize;
+ if (bh->b_size <= offset) {
+ offset = 0;
+ bh = bh->b_reqnext;
+ dev->nr_bhs++;
+ }
+ }
+
+ if (( rc = mdisk_rw_io_clustered(dev, &bio[0], i,
+ (unsigned long) dev,
+#ifdef CONFIG_MDISK_SYNC
+ MDISK_SYNC
+#else
+ MDISK_ASYNC
+#endif
+ )) > 8 ) {
+ printk(KERN_WARNING "mnd%c: %s request failed rc %d"
+ " sector %ld nr_sectors %ld \n",
+ DEVICE_NR(CURRENT_DEV),
+ rw == MDISK_READ_REQ ? "read" : "write",
+ rc, CURRENT->sector, CURRENT->nr_sectors);
+ end_request(0);
+ continue;
+ }
+ i = 0;
+ /*
+ * Synchron: looping to end of request (INIT_REQUEST has return)
+ * Asynchron: end_request done in bottom half
+ */
+#ifdef CONFIG_MDISK_SYNC
+ mdisk_end_request(dev->nr_bhs);
+#else
+ if (rc == 0)
+ mdisk_end_request(dev->nr_bhs);
+ else
+ return;
+#endif
+ }
+}
+
+
+/*
+ * mdisk interrupt handler called when read/write request finished
+ * queues and marks a bottom half.
+ *
+ */
+void do_mdisk_interrupt(void)
+{
+ u16 code;
+ mdisk_Dev *dev;
+
+ code = S390_lowcore.cpu_addr;
+
+ if ((code >> 8) != 0x03) {
+ printk("mnd: wrong sub-interruption code %d",code>>8);
+ return;
+ }
+
+ /*
+ * pointer to devives structure given as external interruption
+ * parameter
+ */
+ dev = (mdisk_Dev*) S390_lowcore.ext_params;
+ dev->status = code & 0x00ff;
+
+ queue_task(&dev->tqueue, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+}
+
+/*
+ * the bottom half checks the status of request
+ * on success it calls end_request and calls mdisk_request
+ * if more transfer to do
+ */
+
+static void
+do_mdisk_bh(void *data)
+{
+ mdisk_Dev *dev = (mdisk_Dev*) data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&io_request_lock, flags);
+ /*
+ * check for status of asynchronous rw
+ */
+ if (dev->status != 0x00) {
+ printk("mnd: status of async rw %d",dev->status);
+ end_request(0);
+ } else {
+ /*
+ * end request for clustered requests
+ */
+ if (CURRENT)
+ mdisk_end_request(dev->nr_bhs);
+ }
+
+ /*
+ * if more to do, call mdisk_request
+ */
+ if (CURRENT)
+ mdisk_request(NULL);
+ spin_unlock_irqrestore(&io_request_lock, flags);
+}
+
+void /* Added fuction HSM 12/03/99 */
+mdisk_handler (int cpu, void *ds, struct pt_regs *regs)
+{
+ printk (KERN_ERR "mnd: received I/O interrupt... shouldn't happen\n");
+}
+
+int __init mdisk_init(void)
+{
+ int rc,i;
+ mdisk_Dev *dev;
+ request_queue_t *q;
+
+ /*
+ * register block device
+ */
+ if (register_blkdev(MAJOR_NR,"mnd",&mdisk_fops) < 0) {
+ printk("mnd: unable to get major %d for mini disk\n"
+ ,MAJOR_NR);
+ return MAJOR_NR;
+ }
+ q = BLK_DEFAULT_QUEUE(MAJOR_NR);
+ blk_init_queue(q, mdisk_request);
+ blk_queue_headactive(BLK_DEFAULT_QUEUE(major), 0);
+
+ /*
+ * setup sizes for available devices
+ */
+ read_ahead[MAJOR_NR] = MDISK_RAHEAD; /* 8 sector (4kB) read-ahead */
+ blk_size[MAJOR_NR] = mdisk_sizes; /* size of reserved mdisk */
+ blksize_size[MAJOR_NR] = mdisk_blksizes; /* blksize of device */
+ hardsect_size[MAJOR_NR] = mdisk_hardsects;
+ max_sectors[MAJOR_NR] = mdisk_maxsectors;
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
+
+ for (i=0;i<MDISK_DEVS;i++) {
+ if (mdisk_setup_data.vdev[i] == 0) {
+ continue;
+ }
+ /* Added block HSM 12/03/99 */
+ if ( request_irq(get_irq_by_devno(mdisk_setup_data.vdev[i]),
+ mdisk_handler, 0, "mnd",
+ &(mdisk_devices[i].dev_status)) ){
+ printk ( KERN_WARNING "mnd: Cannot acquire I/O irq of"
+ " %4lX for paranoia reasons, skipping\n",
+ mdisk_setup_data.vdev[i]);
+ continue;
+ }
+ /*
+ * open VM minidisk low level device
+ */
+ dev = &mdisk_devices[i];
+ dev->bio=mdisk_bio[i];
+ dev->iob=&mdisk_iob[i];
+ dev->vdev = mdisk_setup_data.vdev[i];
+
+ if ( mdisk_setup_data.size[i] == 0 )
+ rc = mdisk_read_label(dev, i);
+ dev->size = mdisk_setup_data.size[i] * 2; /* buffer 512 b */
+ dev->blksize = mdisk_setup_data.blksize[i];
+ dev->tqueue.routine = do_mdisk_bh;
+ dev->tqueue.data = dev;
+ dev->blkmult = dev->blksize/512;
+ dev->blkshift =
+ dev->blkmult==1?0:
+ dev->blkmult==2?1:
+ dev->blkmult==4?2:
+ dev->blkmult==8?3:-1;
+
+ mdisk_sizes[i] = mdisk_setup_data.size[i];
+ mdisk_blksizes[i] = mdisk_setup_data.blksize[i];
+ mdisk_hardsects[i] = mdisk_setup_data.blksize[i];
+
+ /*
+ * max sectors for one clustered req
+ */
+ mdisk_maxsectors[i] = MDISK_MAXSECTORS*dev->blkmult;
+
+ rc = mdisk_init_io(dev,
+ mdisk_setup_data.blksize[i],
+ mdisk_setup_data.offset[i],/* offset in vdev*/
+ dev->size>>dev->blkshift /* size in blocks */
+ );
+ if (rc > 4) {
+ printk("mnd%c: init failed (rc: %d)\n",'a'+i,rc);
+ mdisk_sizes[i] = 0;
+ continue;
+ }
+
+ /*
+ * set vdev in device structure for further rw access
+ * vdev and size given by linload
+ */
+ printk("mnd%c: register device at major %X with %d blocks %d blksize \n",
+ 'a' + i, MAJOR_NR, dev->size>>dev->blkshift,dev->blkmult*512);
+ }
+
+ /*
+ * enable service-signal external interruptions,
+ * Control Register 0 bit 22 := 1
+ * (besides PSW bit 7 must be set to 1 somewhere for external
+ * interruptions)
+ */
+ ctl_set_bit(0, 9);
+
+ return 0;
+}
diff --git a/drivers/s390/block/mdisk.h b/drivers/s390/block/mdisk.h
new file mode 100644
index 000000000..084293031
--- /dev/null
+++ b/drivers/s390/block/mdisk.h
@@ -0,0 +1,94 @@
+/*
+ * drivers/s390/block/mdisk.h
+ * VM minidisk device driver.
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Hartmut Penner (hp@de.ibm.com)
+ */
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define MDISK_DEVS 8 /* for disks */
+#define MDISK_RAHEAD 8 /* read ahead */
+#define MDISK_BLKSIZE 1024 /* 1k blocks */
+#define MDISK_HARDSECT 512 /* FIXME -- 512 byte blocks */
+#define MDISK_MAXSECTORS 256 /* max sectors for one request */
+
+
+
+/*
+ * low level io defines for diagnose 250
+ */
+
+#define MDISK_WRITE_REQ 0x01
+#define MDISK_READ_REQ 0x02
+
+#define MDISK_SYNC 0x00
+#define MDISK_ASYNC 0x02
+#define INIT_BIO 0x00
+#define RW_BIO 0x01
+#define TERM_BIO 0x02
+
+/*
+ * This stucture is used for clustered request
+ * up to 256 different request can be handled with one invocation
+ */
+
+typedef struct {
+ u8 type;
+ u8 status;
+ u16 spare1;
+ u32 block_number;
+ u32 alet;
+ u32 buffer;
+} mdisk_bio_t;
+
+typedef struct {
+ u16 dev_nr;
+ u16 spare1[11];
+ u32 block_size;
+ u32 offset;
+ u32 start_block;
+ u32 end_block;
+ u32 spare2[6];
+} mdisk_init_io_t;
+
+typedef struct {
+ u16 dev_nr;
+ u16 spare1[11];
+ u8 key;
+ u8 flags;
+ u16 spare2;
+ u32 block_count;
+ u32 alet;
+ u32 bio_list;
+ u32 interrupt_params;
+ u32 spare3[5];
+} mdisk_rw_io_t;
+
+/*
+ * low level definitions for Diagnose 210
+ */
+
+#define DEV_CLASS_FBA 0x01
+
+/*
+ * Data structures for Diagnose 210
+ */
+
+typedef struct {
+ u16 dev_nr;
+ u16 rdc_len;
+ u8 vdev_class;
+ u8 vdev_type;
+ u8 vdev_status;
+ u8 vdev_flags;
+ u8 rdev_class;
+ u8 rdev_type;
+ u8 rdev_model;
+ u8 rdev_features;
+} mdisk_dev_char_t;
+
+
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
new file mode 100644
index 000000000..1cc89d053
--- /dev/null
+++ b/drivers/s390/char/Makefile
@@ -0,0 +1,16 @@
+all: s390-char.o
+
+CFLAFS +=
+O_TARGET := s390-char.o
+O_OBJS :=
+M_OBJS :=
+
+ifeq ($(CONFIG_3215_CONSOLE),y)
+ O_OBJS += con3215.o
+endif
+
+ifeq ($(CONFIG_HWC),y)
+ O_OBJS += hwc_con.o hwc_rw.o hwc_tty.o
+endif
+
+include $(TOPDIR)/Rules.make
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
new file mode 100644
index 000000000..96e35aece
--- /dev/null
+++ b/drivers/s390/char/con3215.c
@@ -0,0 +1,1129 @@
+/*
+ * drivers/s390/char/con3215.c
+ * 3215 line mode terminal driver.
+ *
+ * S390 version
+ * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+#include <linux/tty.h>
+#include <linux/vt_kern.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/interrupt.h>
+
+#include <linux/malloc.h>
+#include <linux/bootmem.h>
+#include <asm/io.h>
+#include <asm/ebcdic.h>
+#include <asm/uaccess.h>
+
+#include "../../../arch/s390/kernel/cpcmd.h"
+#include <asm/irq.h>
+
+#define NR_3215 1
+#define NR_3215_REQ (4*NR_3215)
+#define RAW3215_BUFFER_SIZE 65536 /* output buffer size */
+#define RAW3215_INBUF_SIZE 256 /* input buffer size */
+#define RAW3215_MIN_SPACE 128 /* minimum free space for wakeup */
+#define RAW3215_MIN_WRITE 1024 /* min. length for immediate output */
+#define RAW3215_MAX_CCWLEN 3968 /* max. bytes to write with one ccw */
+#define RAW3215_NR_CCWS ((RAW3215_BUFFER_SIZE/RAW3215_MAX_CCWLEN)+2)
+#define RAW3215_TIMEOUT HZ/10 /* time for delayed output */
+
+#define RAW3215_FIXED 1 /* 3215 console device is not be freed */
+#define RAW3215_ACTIVE 2 /* set if the device is in use */
+#define RAW3215_WORKING 4 /* set if a request is being worked on */
+#define RAW3215_THROTTLED 8 /* set if reading is disabled */
+#define RAW3215_STOPPED 16 /* set if writing is disabled */
+#define RAW3215_CLOSING 32 /* set while in close process */
+#define RAW3215_TIMER_RUNS 64 /* set if the output delay timer is on */
+#define RAW3215_FLUSHING 128 /* set to flush buffer (no delay) */
+#define RAW3215_BH_PENDING 256 /* indication for bh scheduling */
+
+struct _raw3215_info; /* forward declaration ... */
+
+int raw3215_condevice = -1; /* preset console device */
+
+/*
+ * Request types for a 3215 device
+ */
+typedef enum {
+ RAW3215_FREE, RAW3215_READ, RAW3215_WRITE
+} raw3215_type;
+
+/*
+ * Request structure for a 3215 device
+ */
+typedef struct _raw3215_req {
+ raw3215_type type; /* type of the request */
+ int start, end; /* start/end index into output buffer */
+ int residual; /* residual count for read request */
+ ccw1_t ccws[RAW3215_NR_CCWS]; /* space for the channel program */
+ struct _raw3215_info *info; /* pointer to main structure */
+ struct _raw3215_req *next; /* pointer to next request */
+} raw3215_req __attribute__ ((aligned(8)));
+
+typedef struct _raw3215_info {
+ int flags; /* state flags */
+ int irq; /* interrupt number to do_IO */
+ char *buffer; /* pointer to output buffer */
+ char *inbuf; /* pointer to input buffer */
+ int head; /* first free byte in output buffer */
+ int count; /* number of bytes in output buffer */
+ devstat_t devstat; /* device status structure for do_IO */
+ struct tty_struct *tty; /* pointer to tty structure if present */
+ struct tq_struct tqueue; /* task queue to bottom half */
+ raw3215_req *queued_read; /* pointer to queued read requests */
+ raw3215_req *queued_write; /* pointer to queued write requests */
+ wait_queue_head_t empty_wait; /* wait queue for flushing */
+ struct timer_list timer; /* timer for delayed output */
+ char *message; /* pending message from raw3215_irq */
+ int msg_dstat; /* dstat for pending message */
+ int msg_cstat; /* cstat for pending message */
+} raw3215_info;
+
+static raw3215_info *raw3215[NR_3215]; /* array of 3215 devices structures */
+static raw3215_req *raw3215_freelist; /* list of free request structures */
+static spinlock_t raw3215_freelist_lock;/* spinlock to protect free list */
+
+static struct tty_driver tty3215_driver;
+static struct tty_struct *tty3215_table[NR_3215];
+static struct termios *tty3215_termios[NR_3215];
+static struct termios *tty3215_termios_locked[NR_3215];
+static int tty3215_refcount;
+
+#ifndef MIN
+#define MIN(a,b) ((a) < (b) ? (a) : (b))
+#endif
+
+static int __init con3215_setup(char *str)
+{
+ int vdev;
+
+ vdev = simple_strtoul(str,&str,16);
+ if (vdev >= 0 && vdev < 65536)
+ raw3215_condevice = vdev;
+ return 1;
+}
+
+__setup("condev=", con3215_setup);
+
+/*
+ * Get a request structure from the free list
+ */
+extern inline raw3215_req *raw3215_alloc_req(void) {
+ raw3215_req *req;
+ unsigned long flags;
+
+ spin_lock_irqsave(&raw3215_freelist_lock, flags);
+ req = raw3215_freelist;
+ raw3215_freelist = req->next;
+ spin_unlock_irqrestore(&raw3215_freelist_lock, flags);
+ return req;
+}
+
+/*
+ * Put a request structure back to the free list
+ */
+extern inline void raw3215_free_req(raw3215_req *req) {
+ unsigned long flags;
+
+ if (req->type == RAW3215_FREE)
+ return; /* don't free a free request */
+ req->type = RAW3215_FREE;
+ spin_lock_irqsave(&raw3215_freelist_lock, flags);
+ req->next = raw3215_freelist;
+ raw3215_freelist = req;
+ spin_unlock_irqrestore(&raw3215_freelist_lock, flags);
+}
+
+/*
+ * Get a write request structure. That is either a new or the last
+ * queued write request. The request structure is set up in
+ * raw3215_mk_write_ccw.
+ */
+static raw3215_req *raw3215_mk_write_req(raw3215_info *raw)
+{
+ raw3215_req *req;
+
+ /* check if there is a queued write request */
+ req = raw->queued_write;
+ if (req == NULL) {
+ /* no queued write request, use new req structure */
+ req = raw3215_alloc_req();
+ req->type = RAW3215_WRITE;
+ req->info = raw;
+ req->start = raw->head;
+ } else
+ raw->queued_write = NULL;
+ return req;
+}
+
+/*
+ * Get a read request structure. If there is a queued read request
+ * it is used, but that shouldn't happen because a 3215 terminal
+ * won't accept a new read before the old one is completed.
+ */
+static raw3215_req *raw3215_mk_read_req(raw3215_info *raw)
+{
+ raw3215_req *req;
+
+ /* there can only be ONE read request at a time */
+ req = raw->queued_read;
+ if (req == NULL) {
+ /* no queued read request, use new req structure */
+ req = raw3215_alloc_req();
+ req->type = RAW3215_READ;
+ req->info = raw;
+ } else
+ raw->queued_read = NULL;
+ return req;
+}
+
+/*
+ * Set up a write request with the information from the main structure.
+ * A ccw chain is created that writes everything in the output buffer
+ * to the 3215 device.
+ */
+static int raw3215_mk_write_ccw(raw3215_info *raw, raw3215_req *req)
+{
+ ccw1_t *ccw;
+ int len, count, ix;
+
+ ccw = req->ccws;
+ req->end = (raw->head - 1) & (RAW3215_BUFFER_SIZE - 1);
+ len = ((req->end - req->start) & (RAW3215_BUFFER_SIZE - 1)) + 1;
+ ix = req->start;
+ while (len > 0) {
+ if (ccw > req->ccws)
+ ccw[-1].flags |= 0x40; /* use command chaining */
+ ccw->cmd_code = 0x01; /* write, auto carrier return */
+ ccw->flags = 0x20; /* ignore incorrect length ind. */
+ ccw->cda =
+ (void *) virt_to_phys(raw->buffer + ix);
+ count = (len > RAW3215_MAX_CCWLEN) ?
+ RAW3215_MAX_CCWLEN : len;
+ if (ix + count > RAW3215_BUFFER_SIZE)
+ count = RAW3215_BUFFER_SIZE-ix;
+ ccw->count = count;
+ len -= count;
+ ix = (ix + count) & (RAW3215_BUFFER_SIZE - 1);
+ ccw++;
+ }
+ return len;
+}
+
+/*
+ * Set up a read request that reads up to 160 byte from the 3215 device.
+ */
+static void raw3215_mk_read_ccw(raw3215_info *raw, raw3215_req *req)
+{
+ ccw1_t *ccw;
+
+ ccw = req->ccws;
+ ccw->cmd_code = 0x0A; /* read inquiry */
+ ccw->flags = 0x20; /* ignore incorrect length */
+ ccw->count = 160;
+ ccw->cda = (void *) virt_to_phys(raw->inbuf);
+}
+
+/*
+ * Start a read or a write request
+ */
+static void raw3215_start_io(raw3215_info *raw)
+{
+ raw3215_req *req;
+ int res;
+
+ req = raw->queued_read;
+ if (req != NULL &&
+ !(raw->flags & (RAW3215_WORKING | RAW3215_THROTTLED))) {
+ /* dequeue request */
+ raw->queued_read = NULL;
+ res = do_IO(raw->irq, req->ccws, (__u32) req, 0, 0);
+ if (res != 0) {
+ /* do_IO failed, put request back to queue */
+ raw->queued_read = req;
+ } else {
+ raw->flags |= RAW3215_WORKING;
+ }
+ }
+ req = raw->queued_write;
+ if (req != NULL &&
+ !(raw->flags & (RAW3215_WORKING | RAW3215_STOPPED))) {
+ /* dequeue request */
+ raw->queued_write = NULL;
+ res = do_IO(raw->irq, req->ccws, (__u32) req, 0, 0);
+ if (res != 0) {
+ /* do_IO failed, put request back to queue */
+ raw->queued_write = req;
+ } else {
+ raw->flags |= RAW3215_WORKING;
+ }
+ }
+}
+
+/*
+ * Function to start a delayed output after RAW3215_TIMEOUT seconds
+ */
+static void raw3215_timeout(unsigned long __data)
+{
+ raw3215_info *raw = (raw3215_info *) __data;
+ unsigned long flags;
+
+ s390irq_spin_lock_irqsave(raw->irq, flags);
+ if (raw->flags & RAW3215_TIMER_RUNS) {
+ del_timer(&raw->timer);
+ raw->flags &= ~RAW3215_TIMER_RUNS;
+ raw3215_start_io(raw);
+ }
+ s390irq_spin_unlock_irqrestore(raw->irq, flags);
+}
+
+/*
+ * Function to conditionally start an IO. A read is started immediatly,
+ * a write is only started immediatly if the flush flag is on or the
+ * amount of data is bigger than RAW3215_MIN_WRITE. If a write is not
+ * done immediatly a timer is started with a delay of RAW3215_TIMEOUT.
+ */
+extern inline void raw3215_try_io(raw3215_info *raw)
+{
+ if (!(raw->flags & RAW3215_ACTIVE))
+ return;
+ if (raw->queued_read != NULL)
+ raw3215_start_io(raw);
+ else if (raw->queued_write != NULL) {
+ if (raw->count >= RAW3215_MIN_WRITE ||
+ (raw->flags & RAW3215_FLUSHING)) {
+ /* execute write requests bigger than minimum size */
+ raw3215_start_io(raw);
+ if (raw->flags & RAW3215_TIMER_RUNS) {
+ del_timer(&raw->timer);
+ raw->flags &= ~RAW3215_TIMER_RUNS;
+ }
+ } else if (!(raw->flags & RAW3215_TIMER_RUNS)) {
+ /* delay small writes */
+ init_timer(&raw->timer);
+ raw->timer.expires = RAW3215_TIMEOUT + jiffies;
+ raw->timer.data = (unsigned long) raw;
+ raw->timer.function = raw3215_timeout;
+ add_timer(&raw->timer);
+ raw->flags |= RAW3215_TIMER_RUNS;
+ }
+ }
+}
+
+/*
+ * The bottom half handler routine for 3215 devices. It tries to start
+ * the next IO and wakes up processes waiting on the tty.
+ */
+static void raw3215_softint(void *data)
+{
+ raw3215_info *raw;
+ struct tty_struct *tty;
+ unsigned long flags;
+
+ raw = (raw3215_info *) data;
+ s390irq_spin_lock_irqsave(raw->irq, flags);
+ raw3215_try_io((raw3215_info *) data);
+ raw->flags &= ~RAW3215_BH_PENDING;
+ s390irq_spin_unlock_irqrestore(raw->irq, flags);
+ /* Check for pending message from raw3215_irq */
+ if (raw->message != NULL) {
+ printk(raw->message, raw->irq, raw->msg_dstat, raw->msg_cstat);
+ raw->message = NULL;
+ }
+ tty = raw->tty;
+ if (tty != NULL &&
+ RAW3215_BUFFER_SIZE - raw->count >= RAW3215_MIN_SPACE) {
+ if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
+ tty->ldisc.write_wakeup)
+ (tty->ldisc.write_wakeup)(tty);
+ wake_up_interruptible(&tty->write_wait);
+ }
+}
+
+/*
+ * Function to safely add raw3215_softint to tq_immediate.
+ * The s390irq spinlock must be held.
+ */
+static inline void raw3215_sched_bh(raw3215_info *raw)
+{
+ if (raw->flags & RAW3215_BH_PENDING)
+ return; /* already pending */
+ raw->flags |= RAW3215_BH_PENDING;
+ raw->tqueue.routine = raw3215_softint;
+ raw->tqueue.data = raw;
+ queue_task(&raw->tqueue, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+}
+
+/*
+ * Find the raw3215_info structure associated with irq
+ */
+static inline raw3215_info *raw3215_find_info(int irq) {
+ raw3215_info *raw;
+ int i;
+
+ for (i = 0; i < NR_3215; i++) {
+ raw = raw3215[i];
+ if (raw != NULL && raw->irq == irq &&
+ (raw->flags & RAW3215_ACTIVE))
+ break;
+ }
+ return (i >= NR_3215) ? NULL : raw;
+}
+
+/*
+ * Interrupt routine, called from Ingo's I/O layer
+ */
+static void raw3215_irq(int irq, void *int_parm, struct pt_regs *regs)
+{
+ raw3215_info *raw;
+ raw3215_req *req;
+ struct tty_struct *tty;
+ devstat_t *stat;
+ int cstat, dstat;
+ int count, slen;
+
+ stat = (devstat_t *) int_parm;
+ req = (raw3215_req *) stat->intparm;
+ cstat = stat->cstat;
+ dstat = stat->dstat;
+ if (cstat != 0) {
+ raw = raw3215_find_info(irq);
+ if (raw != NULL) {
+ raw->message = KERN_WARNING
+ "Got nonzero channel status in raw3215_irq "
+ "(dev %i, dev sts 0x%2x, sch sts 0x%2x)";
+ raw->msg_dstat = dstat;
+ raw->msg_cstat = cstat;
+ raw3215_sched_bh(raw);
+ }
+ }
+ if (dstat & 0x01) { /* we got a unit exception */
+ dstat &= ~0x01; /* we can ignore it */
+ }
+ switch (dstat) {
+ case 0x80:
+ if (cstat != 0)
+ break;
+ /* Attention interrupt, someone hit the enter key */
+ if ((raw = raw3215_find_info(irq)) == NULL)
+ return; /* That shouldn't happen ... */
+ /* Setup a read request */
+ req = raw3215_mk_read_req(raw);
+ raw3215_mk_read_ccw(raw, req);
+ raw->queued_read = req;
+ if (MACHINE_IS_P390)
+ memset(raw->inbuf, 0, RAW3215_INBUF_SIZE);
+ raw3215_sched_bh(raw);
+ break;
+ case 0x08:
+ case 0x0C:
+ /* Channel end interrupt. */
+ raw = req->info;
+ if (req->type == RAW3215_READ) {
+ /* store residual count, then wait for device end */
+ req->residual = stat->rescnt;
+ }
+ if (dstat == 0x08)
+ break;
+ case 0x04:
+ /* Device end interrupt. */
+ raw = req->info;
+ if (req->type == RAW3215_READ && raw->tty != NULL) {
+ tty = raw->tty;
+ count = 160 - req->residual;
+ if (MACHINE_IS_P390) {
+ slen = strnlen(raw->inbuf, RAW3215_INBUF_SIZE);
+ if (count > slen)
+ count = slen;
+ } else
+ if (count >= TTY_FLIPBUF_SIZE - tty->flip.count)
+ count = TTY_FLIPBUF_SIZE - tty->flip.count - 1;
+ EBCASC(raw->inbuf, count);
+ if (count == 2 && (
+ /* hat is 0xb0 in codepage 037 (US etc.) and thus */
+ /* converted to 0x5e in ascii ('^') */
+ strncmp(raw->inbuf, "^c", 2) == 0 ||
+ /* hat is 0xb0 in several other codepages (German,*/
+ /* UK, ...) and thus converted to ascii octal 252 */
+ strncmp(raw->inbuf, "\252c", 2) == 0) ) {
+ /* emulate a control C = break */
+ tty->flip.count++;
+ *tty->flip.flag_buf_ptr++ = TTY_NORMAL;
+ *tty->flip.char_buf_ptr++ = INTR_CHAR(tty);
+ tty_flip_buffer_push(raw->tty);
+ } else if (count == 2 && (
+ strncmp(raw->inbuf, "^d", 2) == 0 ||
+ strncmp(raw->inbuf, "\252d", 2) == 0) ) {
+ /* emulate a control D = end of file */
+ tty->flip.count++;
+ *tty->flip.flag_buf_ptr++ = TTY_NORMAL;
+ *tty->flip.char_buf_ptr++ = EOF_CHAR(tty);
+ tty_flip_buffer_push(raw->tty);
+ } else if (count == 2 && (
+ strncmp(raw->inbuf, "^z", 2) == 0 ||
+ strncmp(raw->inbuf, "\252z", 2) == 0) ) {
+ /* emulate a control Z = suspend */
+ tty->flip.count++;
+ *tty->flip.flag_buf_ptr++ = TTY_NORMAL;
+ *tty->flip.char_buf_ptr++ = SUSP_CHAR(tty);
+ tty_flip_buffer_push(raw->tty);
+ } else {
+ memcpy(tty->flip.char_buf_ptr,
+ raw->inbuf, count);
+ if (count < 2 ||
+ (strncmp(raw->inbuf+count-2, "^n", 2) ||
+ strncmp(raw->inbuf+count-2, "\252n", 2)) ) { /* don't add the auto \n */
+ tty->flip.char_buf_ptr[count] = '\n';
+ memset(tty->flip.flag_buf_ptr,
+ TTY_NORMAL, count + 1);
+ count++;
+ } else
+ count-=2;
+ tty->flip.char_buf_ptr += count;
+ tty->flip.flag_buf_ptr += count;
+ tty->flip.count += count;
+ tty_flip_buffer_push(raw->tty);
+ }
+ } else if (req->type == RAW3215_WRITE) {
+ raw->count -= ((req->end - req->start) &
+ (RAW3215_BUFFER_SIZE - 1)) + 1;
+ }
+ raw->flags &= ~RAW3215_WORKING;
+ raw3215_free_req(req);
+ /* check for empty wait */
+ if (waitqueue_active(&raw->empty_wait) &&
+ raw->queued_write == NULL &&
+ raw->queued_read == NULL) {
+ wake_up_interruptible(&raw->empty_wait);
+ }
+ raw3215_sched_bh(raw);
+ break;
+ default:
+ /* Strange interrupt, I'll do my best to clean up */
+ if ((raw = raw3215_find_info(irq)) == NULL)
+ return; /* That shouldn't happen ... */
+ if (raw == NULL) break;
+ if (req != NULL && req->type != RAW3215_FREE) {
+ if (req->type == RAW3215_WRITE)
+ raw->count -= ((req->end - req->start) &
+ (RAW3215_BUFFER_SIZE-1))+1;
+ raw->flags &= ~RAW3215_WORKING;
+ raw3215_free_req(req);
+ }
+ raw->message = KERN_WARNING
+ "Spurious interrupt in in raw3215_irq "
+ "(dev %i, dev sts 0x%2x, sch sts 0x%2x)";
+ raw->msg_dstat = dstat;
+ raw->msg_cstat = cstat;
+ raw3215_sched_bh(raw);
+ }
+ return;
+}
+
+/*
+ * String write routine for 3215 devices
+ */
+static int
+raw3215_write(raw3215_info *raw, const char *str,
+ int from_user, unsigned int length)
+{
+ raw3215_req *req;
+ unsigned long flags;
+ int ret, c;
+ int count;
+
+ ret = 0;
+ while (length > 0) {
+ s390irq_spin_lock_irqsave(raw->irq, flags);
+ count = (length > RAW3215_BUFFER_SIZE) ?
+ RAW3215_BUFFER_SIZE : length;
+ length -= count;
+
+ while (RAW3215_BUFFER_SIZE - raw->count < count) {
+ /* there might be a request pending */
+ raw3215_try_io(raw);
+ if (wait_cons_dev(raw->irq) != 0) {
+ /* that shouldn't happen */
+ raw->count = 0;
+ }
+ }
+
+ req = raw3215_mk_write_req(raw);
+ /* copy string to output buffer and convert it to EBCDIC */
+ if (from_user) {
+ while (1) {
+ c = MIN(count,
+ MIN(RAW3215_BUFFER_SIZE - raw->count,
+ RAW3215_BUFFER_SIZE - raw->head));
+ if (c <= 0)
+ break;
+ c -= copy_from_user(raw->buffer + raw->head,
+ str, c);
+ if (c == 0) {
+ if (!ret)
+ ret = -EFAULT;
+ break;
+ }
+ ASCEBC(raw->buffer + raw->head, c);
+ raw->head = (raw->head + c) &
+ (RAW3215_BUFFER_SIZE - 1);
+ raw->count += c;
+ str += c;
+ count -= c;
+ ret += c;
+ }
+ } else {
+ while (1) {
+ c = MIN(count,
+ MIN(RAW3215_BUFFER_SIZE - raw->count,
+ RAW3215_BUFFER_SIZE - raw->head));
+ if (c <= 0)
+ break;
+ memcpy(raw->buffer + raw->head, str, c);
+ ASCEBC(raw->buffer + raw->head, c);
+ raw->head = (raw->head + c) &
+ (RAW3215_BUFFER_SIZE - 1);
+ raw->count += c;
+ str += c;
+ count -= c;
+ ret += c;
+ }
+ }
+ raw3215_mk_write_ccw(raw, req);
+ raw->queued_write = req;
+ /* start or queue request */
+ raw3215_try_io(raw);
+ s390irq_spin_unlock_irqrestore(raw->irq, flags);
+
+ }
+
+ return ret;
+}
+
+/*
+ * Put character routine for 3215 devices
+ */
+static void raw3215_putchar(raw3215_info *raw, unsigned char ch)
+{
+ raw3215_req *req;
+ unsigned long flags;
+
+ s390irq_spin_lock_irqsave(raw->irq, flags);
+ while (RAW3215_BUFFER_SIZE - raw->count < 1) {
+ /* there might be a request pending */
+ raw3215_try_io(raw);
+ if (wait_cons_dev(raw->irq) != 0) {
+ /* that shouldn't happen */
+ raw->count = 0;
+ }
+ }
+
+ req = raw3215_mk_write_req(raw);
+ raw->buffer[raw->head] = (char) _ascebc[(int) ch];
+ raw->head = (raw->head + 1) & (RAW3215_BUFFER_SIZE - 1);
+ raw->count++;
+ raw3215_mk_write_ccw(raw, req);
+ raw->queued_write = req;
+ /* start or queue request */
+ raw3215_try_io(raw);
+ s390irq_spin_unlock_irqrestore(raw->irq, flags);
+}
+
+/*
+ * Flush routine, it simply sets the flush flag and tries to start
+ * pending IO.
+ */
+static void raw3215_flush_buffer(raw3215_info *raw)
+{
+ unsigned long flags;
+
+ s390irq_spin_lock_irqsave(raw->irq, flags);
+ if (raw->count > 0) {
+ raw->flags |= RAW3215_FLUSHING;
+ raw3215_try_io(raw);
+ raw->flags &= ~RAW3215_FLUSHING;
+ }
+ s390irq_spin_unlock_irqrestore(raw->irq, flags);
+}
+
+/*
+ * Fire up a 3215 device.
+ */
+static int raw3215_startup(raw3215_info *raw)
+{
+ unsigned long flags;
+
+ if (raw->flags & RAW3215_ACTIVE)
+ return 0;
+ if (request_irq(raw->irq, raw3215_irq, SA_INTERRUPT,
+ "3215 terminal driver", &raw->devstat) != 0)
+ return -1;
+ raw->flags |= RAW3215_ACTIVE;
+ s390irq_spin_lock_irqsave(raw->irq, flags);
+ raw3215_try_io(raw);
+ s390irq_spin_unlock_irqrestore(raw->irq, flags);
+
+ return 0;
+}
+
+/*
+ * Shutdown a 3215 device.
+ */
+static void raw3215_shutdown(raw3215_info *raw)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ unsigned long flags;
+
+ if (!(raw->flags & RAW3215_ACTIVE) || (raw->flags & RAW3215_FIXED))
+ return;
+ /* Wait for outstanding requests, then free irq */
+ s390irq_spin_lock_irqsave(raw->irq, flags);
+ if ((raw->flags & RAW3215_WORKING) ||
+ raw->queued_write != NULL ||
+ raw->queued_read != NULL) {
+ raw->flags |= RAW3215_CLOSING;
+ add_wait_queue(&raw->empty_wait, &wait);
+ current->state = TASK_INTERRUPTIBLE;
+ s390irq_spin_unlock_irqrestore(raw->irq, flags);
+ schedule();
+ s390irq_spin_lock_irqsave(raw->irq, flags);
+ current->state = TASK_RUNNING;
+ raw->flags &= ~(RAW3215_ACTIVE | RAW3215_CLOSING);
+ }
+ free_irq(raw->irq, NULL);
+ s390irq_spin_unlock_irqrestore(raw->irq, flags);
+}
+
+static int
+raw3215_find_dev(int number)
+{
+ dev_info_t dinfo;
+ int irq;
+ int count;
+
+ irq = get_irq_first();
+ count = 0;
+ while (count <= number && irq != -ENODEV) {
+ if (get_dev_info(irq, &dinfo) == -ENODEV)
+ break;
+ if (dinfo.devno == raw3215_condevice ||
+ dinfo.sid_data.cu_type == 0x3215) {
+ count++;
+ if (count > number)
+ return irq;
+ }
+ irq = get_irq_next(irq);
+ }
+ return -1; /* console not found */
+}
+
+#ifdef CONFIG_3215_CONSOLE
+
+/*
+ * Try to request the console IRQ. Called from init/main.c
+ */
+int con3215_activate(void)
+{
+ raw3215_info *raw;
+
+ if (!MACHINE_IS_VM && !MACHINE_IS_P390)
+ return 0;
+ raw = raw3215[0]; /* 3215 console is the first one */
+ if (raw->irq == -1) /* now console device found in con3215_init */
+ return -1;
+ return raw3215_startup(raw);
+}
+
+/*
+ * Write a string to the 3215 console
+ */
+static void
+con3215_write(struct console *co, const char *str, unsigned int count)
+{
+ raw3215_info *raw;
+
+ if (count <= 0)
+ return;
+ raw = raw3215[0]; /* console 3215 is the first one */
+ raw3215_write(raw, str, 0, count);
+}
+
+kdev_t con3215_device(struct console *c)
+{
+ return MKDEV(TTY_MAJOR, c->index);
+}
+
+/*
+ * panic() calls console_unblank before the system enters a
+ * disabled, endless loop.
+ */
+void con3215_unblank(void)
+{
+ raw3215_info *raw;
+ unsigned long flags;
+
+ raw = raw3215[0]; /* console 3215 is the first one */
+ s390irq_spin_lock_irqsave(raw->irq, flags);
+ while (raw->count > 0) {
+ /* there might be a request pending */
+ raw->flags |= RAW3215_FLUSHING;
+ raw3215_try_io(raw);
+ if (wait_cons_dev(raw->irq) != 0) {
+ /* that shouldn't happen */
+ raw->count = 0;
+ }
+ raw->flags &= ~RAW3215_FLUSHING;
+ }
+ s390irq_spin_unlock_irqrestore(raw->irq, flags);
+}
+
+static int __init con3215_consetup(struct console *co, char *options)
+{
+ return 0;
+}
+
+/*
+ * The console structure for the 3215 console
+ */
+static struct console con3215 = {
+ "tty3215",
+ con3215_write,
+ NULL,
+ con3215_device,
+ NULL,
+ con3215_unblank,
+ con3215_consetup,
+ CON_PRINTBUFFER,
+ 0,
+ 0,
+ NULL
+};
+
+#endif
+
+/*
+ * tty3215_open
+ *
+ * This routine is called whenever a 3215 tty is opened.
+ */
+static int tty3215_open(struct tty_struct *tty, struct file * filp)
+{
+ raw3215_info *raw;
+ int retval, line;
+
+ line = MINOR(tty->device) - tty->driver.minor_start;
+ if ((line < 0) || (line >= NR_3215))
+ return -ENODEV;
+
+ raw = raw3215[line];
+ if (raw == NULL) {
+ raw = kmalloc(sizeof(raw3215_info) +
+ RAW3215_INBUF_SIZE, GFP_KERNEL);
+ if (raw == NULL)
+ return -ENOMEM;
+ raw->irq = raw3215_find_dev(line);
+ if (raw->irq == -1) {
+ kfree(raw);
+ return -ENODEV;
+ }
+ raw->inbuf = (char *) raw + sizeof(raw3215_info);
+ memset(raw, 0, sizeof(raw3215_info));
+ raw->buffer = (char *) kmalloc(RAW3215_BUFFER_SIZE, GFP_KERNEL);
+ if (raw->buffer == NULL) {
+ kfree_s(raw, sizeof(raw3215_info));
+ return -ENOMEM;
+ }
+ raw->tqueue.routine = raw3215_softint;
+ raw->tqueue.data = raw;
+ init_waitqueue_head(&raw->empty_wait);
+ raw3215[line] = raw;
+ }
+
+ tty->driver_data = raw;
+ raw->tty = tty;
+
+ tty->low_latency = 0; /* don't use bottom half for pushing chars */
+ /*
+ * Start up 3215 device
+ */
+ retval = raw3215_startup(raw);
+ if (retval)
+ return retval;
+
+ return 0;
+}
+
+/*
+ * tty3215_close()
+ *
+ * This routine is called when the 3215 tty is closed. We wait
+ * for the remaining request to be completed. Then we clean up.
+ */
+static void tty3215_close(struct tty_struct *tty, struct file * filp)
+{
+ raw3215_info *raw;
+
+ raw = (raw3215_info *) tty->driver_data;
+ if (raw == NULL || tty->count > 1)
+ return;
+ tty->closing = 1;
+ /* Shutdown the terminal */
+ raw3215_shutdown(raw);
+ tty->closing = 0;
+ raw->tty = NULL;
+}
+
+/*
+ * Returns the amount of free space in the output buffer.
+ */
+static int tty3215_write_room(struct tty_struct *tty)
+{
+ raw3215_info *raw;
+
+ raw = (raw3215_info *) tty->driver_data;
+ return RAW3215_BUFFER_SIZE - raw->count;
+}
+
+/*
+ * String write routine for 3215 ttys
+ */
+static int tty3215_write(struct tty_struct * tty, int from_user,
+ const unsigned char *buf, int count)
+{
+ raw3215_info *raw;
+ int ret;
+
+ if (!tty)
+ return 0;
+ raw = (raw3215_info *) tty->driver_data;
+ ret = raw3215_write(raw, buf, from_user, count);
+ return ret;
+}
+
+/*
+ * Put character routine for 3215 ttys
+ */
+static void tty3215_put_char(struct tty_struct *tty, unsigned char ch)
+{
+ raw3215_info *raw;
+
+ if (!tty)
+ return;
+ raw = (raw3215_info *) tty->driver_data;
+ raw3215_putchar(raw, ch);
+}
+
+static void tty3215_flush_chars(struct tty_struct *tty)
+{
+}
+
+/*
+ * Returns the number of characters in the output buffer
+ */
+static int tty3215_chars_in_buffer(struct tty_struct *tty)
+{
+ raw3215_info *raw;
+
+ raw = (raw3215_info *) tty->driver_data;
+ return raw->count;
+}
+
+static void tty3215_flush_buffer(struct tty_struct *tty)
+{
+ raw3215_info *raw;
+
+ raw = (raw3215_info *) tty->driver_data;
+ raw3215_flush_buffer(raw);
+ wake_up_interruptible(&tty->write_wait);
+ if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
+ tty->ldisc.write_wakeup)
+ (tty->ldisc.write_wakeup)(tty);
+}
+
+/*
+ * Currently we don't have any io controls for 3215 ttys
+ */
+static int tty3215_ioctl(struct tty_struct *tty, struct file * file,
+ unsigned int cmd, unsigned long arg)
+{
+ if (tty->flags & (1 << TTY_IO_ERROR))
+ return -EIO;
+
+ switch (cmd) {
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+/*
+ * Disable reading from a 3215 tty
+ */
+static void tty3215_throttle(struct tty_struct * tty)
+{
+ raw3215_info *raw;
+
+ raw = (raw3215_info *) tty->driver_data;
+ raw->flags |= RAW3215_THROTTLED;
+}
+
+/*
+ * Enable reading from a 3215 tty
+ */
+static void tty3215_unthrottle(struct tty_struct * tty)
+{
+ raw3215_info *raw;
+ unsigned long flags;
+
+ raw = (raw3215_info *) tty->driver_data;
+ if (raw->flags & RAW3215_THROTTLED) {
+ s390irq_spin_lock_irqsave(raw->irq, flags);
+ raw->flags &= ~RAW3215_THROTTLED;
+ raw3215_try_io(raw);
+ s390irq_spin_unlock_irqrestore(raw->irq, flags);
+ }
+}
+
+/*
+ * Disable writing to a 3215 tty
+ */
+static void tty3215_stop(struct tty_struct *tty)
+{
+ raw3215_info *raw;
+
+ raw = (raw3215_info *) tty->driver_data;
+ raw->flags |= RAW3215_STOPPED;
+}
+
+/*
+ * Enable writing to a 3215 tty
+ */
+static void tty3215_start(struct tty_struct *tty)
+{
+ raw3215_info *raw;
+ unsigned long flags;
+
+ raw = (raw3215_info *) tty->driver_data;
+ if (raw->flags & RAW3215_STOPPED) {
+ s390irq_spin_lock_irqsave(raw->irq, flags);
+ raw->flags &= ~RAW3215_STOPPED;
+ raw3215_try_io(raw);
+ s390irq_spin_unlock_irqrestore(raw->irq, flags);
+ }
+}
+
+/*
+ * 3215 console driver boottime initialization code.
+ * Register console. We can't request the IRQ here, because
+ * it's too early (kmalloc isn't working yet). We'll have to
+ * buffer all the console requests until we can request the
+ * irq. For this purpose we use some pages of fixed memory.
+ */
+void __init con3215_init(void)
+{
+ raw3215_info *raw;
+ raw3215_req *req;
+ int i;
+
+ if (!MACHINE_IS_VM && !MACHINE_IS_P390)
+ return;
+ if (MACHINE_IS_VM) {
+ cpcmd("TERM CONMODE 3215", NULL, 0);
+ cpcmd("TERM AUTOCR OFF", NULL, 0);
+ }
+
+ /* allocate 3215 request structures */
+ raw3215_freelist = NULL;
+ spin_lock_init(&raw3215_freelist_lock);
+ for (i = 0; i < NR_3215_REQ; i++) {
+ req = (raw3215_req *) alloc_bootmem(sizeof(raw3215_req));
+ req->next = raw3215_freelist;
+ raw3215_freelist = req;
+ }
+
+#ifdef CONFIG_3215_CONSOLE
+ raw3215[0] = raw = (raw3215_info *)
+ alloc_bootmem(sizeof(raw3215_info));
+ memset(raw, 0, sizeof(raw3215_info));
+ raw->buffer = (char *) alloc_bootmem(RAW3215_BUFFER_SIZE);
+ raw->inbuf = (char *) alloc_bootmem(RAW3215_INBUF_SIZE);
+ /* Find the first console */
+ raw->irq = raw3215_find_dev(0);
+ raw->flags |= RAW3215_FIXED;
+ raw->tqueue.routine = raw3215_softint;
+ raw->tqueue.data = raw;
+ init_waitqueue_head(&raw->empty_wait);
+
+ if (raw->irq != -1) {
+ register_console(&con3215);
+ s390irq_spin_lock(raw->irq);
+ set_cons_dev(raw->irq);
+ s390irq_spin_unlock(raw->irq);
+ } else {
+ free_bootmem((unsigned long) raw->inbuf, RAW3215_INBUF_SIZE);
+ free_bootmem((unsigned long) raw->buffer, RAW3215_BUFFER_SIZE);
+ free_bootmem((unsigned long) raw, sizeof(raw3215_info));
+ raw3215[0] = NULL;
+ printk("Couldn't find a 3215 console device\n");
+ }
+#endif
+
+ /*
+ * Initialize the tty_driver structure
+ * Entries in tty3215_driver that are NOT initialized:
+ * proc_entry, set_termios, flush_buffer, set_ldisc, write_proc
+ */
+
+ memset(&tty3215_driver, 0, sizeof(struct tty_driver));
+ tty3215_driver.magic = TTY_DRIVER_MAGIC;
+ tty3215_driver.driver_name = "tty3215";
+ tty3215_driver.name = "ttyS";
+ tty3215_driver.name_base = 0;
+ tty3215_driver.major = TTY_MAJOR;
+ tty3215_driver.minor_start = 64;
+ tty3215_driver.num = NR_3215;
+ tty3215_driver.type = TTY_DRIVER_TYPE_SYSTEM;
+ tty3215_driver.subtype = SYSTEM_TYPE_TTY;
+ tty3215_driver.init_termios = tty_std_termios;
+ tty3215_driver.init_termios.c_iflag = IGNBRK | IGNPAR;
+ tty3215_driver.init_termios.c_oflag = ONLCR;
+ tty3215_driver.init_termios.c_lflag = ISIG;
+ tty3215_driver.flags = TTY_DRIVER_REAL_RAW;
+ tty3215_driver.refcount = &tty3215_refcount;
+ tty3215_driver.table = tty3215_table;
+ tty3215_driver.termios = tty3215_termios;
+ tty3215_driver.termios_locked = tty3215_termios_locked;
+
+ tty3215_driver.open = tty3215_open;
+ tty3215_driver.close = tty3215_close;
+ tty3215_driver.write = tty3215_write;
+ tty3215_driver.put_char = tty3215_put_char;
+ tty3215_driver.flush_chars = tty3215_flush_chars;
+ tty3215_driver.write_room = tty3215_write_room;
+ tty3215_driver.chars_in_buffer = tty3215_chars_in_buffer;
+ tty3215_driver.flush_buffer = tty3215_flush_buffer;
+ tty3215_driver.ioctl = tty3215_ioctl;
+ tty3215_driver.throttle = tty3215_throttle;
+ tty3215_driver.unthrottle = tty3215_unthrottle;
+ tty3215_driver.send_xchar = NULL;
+ tty3215_driver.set_termios = NULL;
+ tty3215_driver.stop = tty3215_stop;
+ tty3215_driver.start = tty3215_start;
+ tty3215_driver.hangup = NULL;
+ tty3215_driver.break_ctl = NULL;
+ tty3215_driver.wait_until_sent = NULL;
+ tty3215_driver.read_proc = NULL;
+
+ if (tty_register_driver(&tty3215_driver))
+ panic("Couldn't register tty3215 driver\n");
+
+}
diff --git a/drivers/s390/char/hwc.h b/drivers/s390/char/hwc.h
new file mode 100644
index 000000000..5de3d1b6f
--- /dev/null
+++ b/drivers/s390/char/hwc.h
@@ -0,0 +1,249 @@
+/*
+ * drivers/s390/char/hwc.h
+ *
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Peschke <peschke@fh-brandenburg.de>
+ *
+ *
+ *
+ */
+
+#ifndef __HWC_H__
+#define __HWC_H__
+
+#define ET_OpCmd 0x01
+#define ET_Msg 0x02
+#define ET_StateChange 0x08
+#define ET_PMsgCmd 0x09
+#define ET_CntlProgOpCmd 0x20
+
+#define ET_OpCmd_Mask 0x80000000
+#define ET_Msg_Mask 0x40000000
+#define ET_StateChange_Mask 0x01000000
+#define ET_PMsgCmd_Mask 0x00800000
+#define ET_CtlProgOpCmd_Mask 0x00000001
+
+#define GMF_DOM 0x8000
+#define GMF_SndAlrm 0x4000
+#define GMF_HoldMsg 0x2000
+
+#define LTF_CntlText 0x8000
+#define LTF_LabelText 0x4000
+#define LTF_DataText 0x2000
+#define LTF_EndText 0x1000
+#define LTF_PromptText 0x0800
+
+#define HWC_COMMAND_INITIATED 0
+#define HWC_BUSY 2
+#define HWC_NOT_OPERATIONAL 3
+
+#define HWC_CMDW_READDATA 0x00770005
+
+#define HWC_CMDW_WRITEDATA 0x00760005
+
+#define HWC_CMDW_WRITEMASK 0x00780005
+
+#define GDS_ID_MDSMU 0x1310
+
+#define GDS_ID_MDSRouteInfo 0x1311
+
+#define GDS_ID_AgUnWrkCorr 0x1549
+
+#define GDS_ID_SNACondReport 0x1532
+
+#define GDS_ID_CPMSU 0x1212
+
+#define GDS_ID_RoutTargInstr 0x154D
+
+#define GDS_ID_OpReq 0x8070
+
+#define GDS_ID_TextCmd 0x1320
+
+#define GDS_KEY_SelfDefTextMsg 0x31
+
+#define _HWCB_HEADER u16 length; \
+ u8 function_code; \
+ u8 control_mask[3]; \
+ u16 response_code;
+
+#define _EBUF_HEADER u16 length; \
+ u8 type; \
+ u8 flags; \
+ u16 _reserved;
+
+typedef struct {
+ _EBUF_HEADER
+} __attribute__ ((packed))
+
+evbuf_t;
+
+#define _MDB_HEADER u16 length; \
+ u16 type; \
+ u32 tag; \
+ u32 revision_code;
+
+#define _GO_HEADER u16 length; \
+ u16 type; \
+ u32 domid; \
+ u8 hhmmss_time[8]; \
+ u8 th_time[3]; \
+ u8 _reserved_0; \
+ u8 dddyyyy_date[7]; \
+ u8 _reserved_1; \
+ u16 general_msg_flags; \
+ u8 _reserved_2[10]; \
+ u8 originating_system_name[8]; \
+ u8 job_guest_name[8];
+
+#define _MTO_HEADER u16 length; \
+ u16 type; \
+ u16 line_type_flags; \
+ u8 alarm_control; \
+ u8 _reserved[3];
+
+typedef struct {
+ _GO_HEADER
+} __attribute__ ((packed))
+
+go_t;
+
+typedef struct {
+ go_t go;
+} __attribute__ ((packed))
+
+mdb_body_t;
+
+typedef struct {
+ _MDB_HEADER
+ mdb_body_t mdb_body;
+} __attribute__ ((packed))
+
+mdb_t;
+
+typedef struct {
+ _EBUF_HEADER
+ mdb_t mdb;
+} __attribute__ ((packed))
+
+msgbuf_t;
+
+typedef struct {
+ _HWCB_HEADER
+ msgbuf_t msgbuf;
+} __attribute__ ((packed))
+
+write_hwcb_t;
+
+typedef struct {
+ _MTO_HEADER
+} __attribute__ ((packed))
+
+mto_t;
+
+static write_hwcb_t write_hwcb_template =
+{
+ sizeof (write_hwcb_t),
+ 0x00,
+ {
+ 0x00,
+ 0x00,
+ 0x00
+ },
+ 0x0000,
+ {
+ sizeof (msgbuf_t),
+ ET_Msg,
+ 0x00,
+ 0x0000,
+ {
+ sizeof (mdb_t),
+ 0x0001,
+ 0xD4C4C240,
+ 0x00000001,
+ {
+ {
+ sizeof (go_t),
+ 0x0001
+
+ }
+ }
+ }
+ }
+};
+
+static mto_t mto_template =
+{
+ sizeof (mto_t),
+ 0x0004,
+ LTF_EndText,
+ 0x00
+};
+
+typedef u32 _hwcb_mask_t;
+
+typedef struct {
+ _HWCB_HEADER
+ u16 _reserved;
+ u16 mask_length;
+ _hwcb_mask_t cp_receive_mask;
+ _hwcb_mask_t cp_send_mask;
+ _hwcb_mask_t hwc_receive_mask;
+ _hwcb_mask_t hwc_send_mask;
+} __attribute__ ((packed))
+
+init_hwcb_t;
+
+static init_hwcb_t init_hwcb_template =
+{
+ sizeof (init_hwcb_t),
+ 0x00,
+ {
+ 0x00,
+ 0x00,
+ 0x00
+ },
+ 0x0000,
+ 0x0000,
+ sizeof (_hwcb_mask_t),
+ ET_OpCmd_Mask | ET_PMsgCmd_Mask,
+ ET_Msg_Mask
+};
+
+#define _GDS_VECTOR_HEADER u16 length; \
+ u16 gds_id;
+
+#define _GDS_SUBVECTOR_HEADER u8 length; \
+ u8 key;
+
+typedef struct {
+ _GDS_VECTOR_HEADER
+} __attribute__ ((packed))
+
+gds_vector_t;
+
+typedef struct {
+ _GDS_SUBVECTOR_HEADER
+} __attribute__ ((packed))
+
+gds_subvector_t;
+
+typedef struct {
+ _HWCB_HEADER
+} __attribute__ ((packed))
+
+read_hwcb_t;
+
+static read_hwcb_t read_hwcb_template =
+{
+ PAGE_SIZE,
+ 0x00,
+ {
+ 0x00,
+ 0x00,
+ 0x80
+ }
+};
+
+#endif /* __HWC_H__ */
diff --git a/drivers/s390/char/hwc_con.c b/drivers/s390/char/hwc_con.c
new file mode 100644
index 000000000..16d5553e8
--- /dev/null
+++ b/drivers/s390/char/hwc_con.c
@@ -0,0 +1,99 @@
+/*
+ * drivers/s390/char/hwc_con.c
+ * HWC line mode console driver
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Peschke <peschke@fh-brandenburg.de>
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/kdev_t.h>
+#include <linux/string.h>
+#include <linux/console.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+
+#include "hwc_rw.h"
+
+extern void hwc_tty_init (void);
+
+#ifdef CONFIG_HWC_CONSOLE
+
+#define hwc_console_major 4
+#define hwc_console_minor 0
+#define hwc_console_name "console"
+
+void hwc_console_write (struct console *, const char *, unsigned int);
+kdev_t hwc_console_device (struct console *);
+
+#define HWC_CON_PRINT_HEADER "hwc console driver: "
+
+struct console hwc_console =
+{
+
+ hwc_console_name,
+ hwc_console_write,
+ NULL,
+ hwc_console_device,
+ NULL,
+ NULL,
+ NULL,
+ CON_PRINTBUFFER,
+ 0,
+ 0,
+ NULL
+};
+
+void
+hwc_console_write (
+ struct console *console,
+ const char *message,
+ unsigned int count)
+{
+
+ if (console->device (console) != hwc_console.device (&hwc_console)) {
+
+ hwc_printk (KERN_WARNING HWC_CON_PRINT_HEADER
+ "hwc_console_write() called with wrong "
+ "device number");
+ return;
+ }
+ hwc_write (0, message, count);
+}
+
+kdev_t
+hwc_console_device (struct console * c)
+{
+ return MKDEV (hwc_console_major, hwc_console_minor);
+}
+
+#endif
+
+void __init
+hwc_console_init (void)
+{
+
+#ifdef CONFIG_3215
+ if (MACHINE_IS_VM)
+ return;
+#endif
+ if (MACHINE_IS_P390)
+ return;
+
+ if (hwc_init () == 0) {
+
+#ifdef CONFIG_HWC_CONSOLE
+
+ register_console (&hwc_console);
+#endif
+
+ hwc_tty_init ();
+ } else
+ panic (HWC_CON_PRINT_HEADER "hwc initialisation failed !");
+
+ return;
+}
diff --git a/drivers/s390/char/hwc_rw.c b/drivers/s390/char/hwc_rw.c
new file mode 100644
index 000000000..6ffecb799
--- /dev/null
+++ b/drivers/s390/char/hwc_rw.c
@@ -0,0 +1,2016 @@
+/*
+ * drivers/s390/char/hwc_rw.c
+ * driver: reading from and writing to system console on S/390 via HWC
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Peschke <peschke@fh-brandenburg.de>
+ *
+ *
+ *
+ *
+ *
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ctype.h>
+#include <linux/mm.h>
+#include <linux/timer.h>
+#include <linux/bootmem.h>
+
+#include <asm/ebcdic.h>
+#include <asm/uaccess.h>
+#include <asm/types.h>
+#include <asm/bitops.h>
+#include <asm/setup.h>
+#include <asm/page.h>
+
+#ifndef MIN
+#define MIN(a,b) ((a<b) ? a : b)
+#endif
+
+#define HWC_RW_PRINT_HEADER "hwc low level driver: "
+
+#define USE_VM_DETECTION
+
+#define DEFAULT_CASE_DELIMITER '%'
+
+#define DUMP_HWC_INIT_ERROR
+
+#define DUMP_HWC_WRITE_ERROR
+
+#define DUMP_HWC_WRITE_LIST_ERROR
+
+#define DUMP_HWC_READ_ERROR
+
+#undef DUMP_HWCB_INPUT
+
+#undef BUFFER_STRESS_TEST
+
+typedef struct {
+ unsigned char *next;
+ unsigned short int mto_char_sum;
+ unsigned char mto_number;
+ unsigned char times_lost;
+ unsigned short int mto_number_lost;
+ unsigned long int mto_char_sum_lost;
+} __attribute__ ((packed))
+
+hwcb_list_t;
+
+#define MAX_HWCB_ROOM (PAGE_SIZE - sizeof(hwcb_list_t))
+
+#define MAX_MESSAGE_SIZE (MAX_HWCB_ROOM - sizeof(write_hwcb_t))
+
+#define BUF_HWCB hwc_data.hwcb_list_tail
+#define OUT_HWCB hwc_data.hwcb_list_head
+#define ALL_HWCB_MTO hwc_data.mto_number
+#define ALL_HWCB_CHAR hwc_data.mto_char_sum
+
+#define _LIST(hwcb) ((hwcb_list_t*)(&(hwcb)[PAGE_SIZE-sizeof(hwcb_list_t)]))
+
+#define _HWCB_CHAR(hwcb) (_LIST(hwcb)->mto_char_sum)
+
+#define _HWCB_MTO(hwcb) (_LIST(hwcb)->mto_number)
+
+#define _HWCB_CHAR_LOST(hwcb) (_LIST(hwcb)->mto_char_sum_lost)
+
+#define _HWCB_MTO_LOST(hwcb) (_LIST(hwcb)->mto_number_lost)
+
+#define _HWCB_TIMES_LOST(hwcb) (_LIST(hwcb)->times_lost)
+
+#define _HWCB_NEXT(hwcb) (_LIST(hwcb)->next)
+
+#define BUF_HWCB_CHAR _HWCB_CHAR(BUF_HWCB)
+
+#define BUF_HWCB_MTO _HWCB_MTO(BUF_HWCB)
+
+#define BUF_HWCB_NEXT _HWCB_NEXT(BUF_HWCB)
+
+#define OUT_HWCB_CHAR _HWCB_CHAR(OUT_HWCB)
+
+#define OUT_HWCB_MTO _HWCB_MTO(OUT_HWCB)
+
+#define OUT_HWCB_NEXT _HWCB_NEXT(OUT_HWCB)
+
+#define BUF_HWCB_CHAR_LOST _HWCB_CHAR_LOST(BUF_HWCB)
+
+#define BUF_HWCB_MTO_LOST _HWCB_MTO_LOST(BUF_HWCB)
+
+#define OUT_HWCB_CHAR_LOST _HWCB_CHAR_LOST(OUT_HWCB)
+
+#define OUT_HWCB_MTO_LOST _HWCB_MTO_LOST(OUT_HWCB)
+
+#define BUF_HWCB_TIMES_LOST _HWCB_TIMES_LOST(BUF_HWCB)
+
+#include "hwc.h"
+
+#define __HWC_RW_C__
+#include "hwc_rw.h"
+#undef __HWC_RW_C__
+
+static unsigned char _obuf[MAX_HWCB_ROOM];
+
+static unsigned char
+ _page[PAGE_SIZE] __attribute__ ((aligned (PAGE_SIZE)));
+
+typedef u32 kmem_pages_t;
+
+#define MAX_KMEM_PAGES (sizeof(kmem_pages_t) << 3)
+
+#define HWC_TIMER_RUNS 1
+#define FLUSH_HWCBS 2
+
+static struct {
+
+ hwc_ioctls_t ioctls;
+
+ hwc_ioctls_t init_ioctls;
+
+ unsigned char *hwcb_list_head;
+
+ unsigned char *hwcb_list_tail;
+
+ unsigned short int mto_number;
+
+ unsigned int mto_char_sum;
+
+ unsigned char hwcb_count;
+
+ unsigned long kmem_start;
+
+ unsigned long kmem_end;
+
+ kmem_pages_t kmem_pages;
+
+ unsigned char *obuf;
+
+ unsigned short int obuf_cursor;
+
+ unsigned short int obuf_count;
+
+ unsigned short int obuf_start;
+
+ unsigned char *page;
+
+ u32 current_servc;
+
+ unsigned char *current_hwcb;
+
+ unsigned char write_nonprio:1;
+ unsigned char write_prio:1;
+ unsigned char read_nonprio:1;
+ unsigned char read_prio:1;
+
+ unsigned char flags;
+
+ spinlock_t lock;
+
+ struct timer_list write_timer;
+} hwc_data =
+{
+ {
+ },
+ {
+ 8,
+ 0,
+ 80,
+ CODE_ASCII,
+ 1,
+ 50,
+ MAX_KMEM_PAGES,
+
+ 0,
+
+ 0x6c
+
+ },
+ NULL,
+ NULL,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ _obuf,
+ 0,
+ 0,
+ 0,
+ _page,
+ 0,
+ NULL,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0
+
+};
+
+#define DELAYED_WRITE 0
+#define IMMEDIATE_WRITE 1
+
+static signed int do_hwc_write (int from_user, unsigned char *,
+ unsigned int,
+ unsigned char,
+ unsigned char);
+
+static asmlinkage int
+internal_print (char write_time, const char *fmt,...)
+{
+ va_list args;
+ int i;
+ unsigned char buf[512];
+
+ va_start (args, fmt);
+ i = vsprintf (buf, fmt, args);
+ va_end (args);
+ return do_hwc_write (0, buf, i, CODE_ASCII, write_time);
+}
+
+int
+hwc_printk (const char *fmt,...)
+{
+ va_list args;
+ int i;
+ unsigned char buf[512];
+ unsigned long flags;
+ int retval;
+
+ spin_lock_irqsave (&hwc_data.lock, flags);
+
+ i = vsprintf (buf, fmt, args);
+ va_end (args);
+ retval = do_hwc_write (0, buf, i, CODE_ASCII, IMMEDIATE_WRITE);
+
+ spin_unlock_irqrestore (&hwc_data.lock, flags);
+
+ return retval;
+}
+
+#ifdef DUMP_HWCB_INPUT
+
+static void
+dump_storage_area (unsigned char *area, unsigned short int count)
+{
+ unsigned short int index;
+ ioctl_nl_t old_final_nl;
+
+ if (!area || !count)
+ return;
+
+ old_final_nl = hwc_data.ioctls.final_nl;
+ hwc_data.ioctls.final_nl = 1;
+
+ internal_print (DELAYED_WRITE, "\n%8x ", area);
+
+ for (index = 0; index < count; index++) {
+
+ if (area[index] <= 0xF)
+ internal_print (DELAYED_WRITE, "0%x", area[index]);
+ else
+ internal_print (DELAYED_WRITE, "%x", area[index]);
+
+ if ((index & 0xF) == 0xF)
+ internal_print (DELAYED_WRITE, "\n%8x ",
+ &area[index + 1]);
+ else if ((index & 3) == 3)
+ internal_print (DELAYED_WRITE, " ");
+ }
+
+ internal_print (IMMEDIATE_WRITE, "\n");
+
+ hwc_data.ioctls.final_nl = old_final_nl;
+}
+#endif
+
+static inline u32
+service_call (
+ u32 hwc_command_word,
+ unsigned char hwcb[])
+{
+ unsigned int condition_code = 1;
+
+ __asm__ __volatile__ ("L 1, 0(0,%0) \n\t"
+ "LRA 2, 0(0,%1) \n\t"
+ ".long 0xB2200012 \n\t"
+ :
+ :"a" (&hwc_command_word), "a" (hwcb)
+ :"1", "2", "memory");
+
+ __asm__ __volatile__ ("IPM %0 \n\t"
+ "SRL %0, 28 \n\t"
+ :"=r" (condition_code));
+
+ return condition_code;
+}
+
+static inline unsigned char *
+ext_int_param (void)
+{
+ u32 param;
+
+ __asm__ __volatile__ ("L %0,128(0,0)\n\t"
+ :"=r" (param));
+
+ return ((unsigned char *) param);
+}
+
+static int
+prepare_write_hwcb (void)
+{
+ write_hwcb_t *hwcb;
+
+ if (!BUF_HWCB)
+ return -ENOMEM;
+
+ BUF_HWCB_MTO = 0;
+ BUF_HWCB_CHAR = 0;
+
+ hwcb = (write_hwcb_t *) BUF_HWCB;
+
+ memcpy (hwcb, &write_hwcb_template, sizeof (write_hwcb_t));
+
+ if (!hwc_data.write_nonprio && hwc_data.write_prio)
+ hwcb->msgbuf.type = ET_PMsgCmd;
+
+ return 0;
+}
+
+static int
+sane_write_hwcb (void)
+{
+ unsigned short int lost_msg;
+ unsigned int lost_char;
+ unsigned char lost_hwcb;
+ unsigned char *bad_addr;
+ unsigned long page;
+ int page_nr;
+
+ if (!OUT_HWCB)
+ return -ENOMEM;
+
+ if ((unsigned long) OUT_HWCB & 0xFFF) {
+
+ bad_addr = OUT_HWCB;
+
+#ifdef DUMP_HWC_WRITE_LIST_ERROR
+ __asm__ ("LHI 1,0xe30\n\t"
+ "LRA 2,0(0,%0) \n\t"
+ "J .+0 \n\t"
+ :
+ : "a" (bad_addr)
+ : "1", "2");
+#endif
+
+ hwc_data.kmem_pages = 0;
+ if ((unsigned long) BUF_HWCB & 0xFFF) {
+
+ lost_hwcb = hwc_data.hwcb_count;
+ lost_msg = ALL_HWCB_MTO;
+ lost_char = ALL_HWCB_CHAR;
+
+ OUT_HWCB = NULL;
+ BUF_HWCB = NULL;
+ ALL_HWCB_MTO = 0;
+ ALL_HWCB_CHAR = 0;
+ hwc_data.hwcb_count = 0;
+ } else {
+
+ lost_hwcb = hwc_data.hwcb_count - 1;
+ lost_msg = ALL_HWCB_MTO - BUF_HWCB_MTO;
+ lost_char = ALL_HWCB_CHAR - BUF_HWCB_CHAR;
+ OUT_HWCB = BUF_HWCB;
+ ALL_HWCB_MTO = BUF_HWCB_MTO;
+ ALL_HWCB_CHAR = BUF_HWCB_CHAR;
+ hwc_data.hwcb_count = 1;
+ page = (unsigned long) BUF_HWCB;
+
+ if (page >= hwc_data.kmem_start &&
+ page < hwc_data.kmem_end) {
+
+ page_nr = (int)
+ ((page - hwc_data.kmem_start) >> 12);
+ set_bit (page_nr, &hwc_data.kmem_pages);
+ }
+ }
+
+ internal_print (
+ DELAYED_WRITE,
+ HWC_RW_PRINT_HEADER
+ "found invalid HWCB at address 0x%x. List corrupted. "
+ "Lost %i HWCBs with %i characters within up to %i "
+ "messages. Saved %i HWCB with last %i characters i"
+ "within up to %i messages.\n",
+ (unsigned int) bad_addr,
+ lost_hwcb, lost_char, lost_msg,
+ hwc_data.hwcb_count,
+ ALL_HWCB_CHAR, ALL_HWCB_MTO);
+ }
+ return 0;
+}
+
+static int
+reuse_write_hwcb (void)
+{
+ int retval;
+
+ if (hwc_data.hwcb_count < 2)
+#ifdef DUMP_HWC_WRITE_LIST_ERROR
+ __asm__ ("LHI 1,0xe31\n\t"
+ "LRA 2,0(0,%0)\n\t"
+ "LRA 3,0(0,%1)\n\t"
+ "J .+0 \n\t"
+ :
+ : "a" (BUF_HWCB), "a" (OUT_HWCB)
+ : "1", "2", "3");
+#else
+ return -EPERM;
+#endif
+
+ if (hwc_data.current_hwcb == OUT_HWCB) {
+
+ if (hwc_data.hwcb_count > 2) {
+
+ BUF_HWCB_NEXT = OUT_HWCB_NEXT;
+
+ BUF_HWCB = OUT_HWCB_NEXT;
+
+ OUT_HWCB_NEXT = BUF_HWCB_NEXT;
+
+ BUF_HWCB_NEXT = NULL;
+ }
+ } else {
+
+ BUF_HWCB_NEXT = OUT_HWCB;
+
+ BUF_HWCB = OUT_HWCB;
+
+ OUT_HWCB = OUT_HWCB_NEXT;
+
+ BUF_HWCB_NEXT = NULL;
+ }
+
+ BUF_HWCB_TIMES_LOST += 1;
+ BUF_HWCB_CHAR_LOST += BUF_HWCB_CHAR;
+ BUF_HWCB_MTO_LOST += BUF_HWCB_MTO;
+ ALL_HWCB_MTO -= BUF_HWCB_MTO;
+ ALL_HWCB_CHAR -= BUF_HWCB_CHAR;
+
+ retval = prepare_write_hwcb ();
+
+ if (hwc_data.hwcb_count == hwc_data.ioctls.max_hwcb)
+ internal_print (
+ DELAYED_WRITE,
+ HWC_RW_PRINT_HEADER
+ "reached my own limit of "
+ "allowed buffer space for output (%i HWCBs = %li "
+ "bytes), skipped content of oldest HWCB %i time(s) "
+ "(%i lines = %i characters)\n",
+ hwc_data.ioctls.max_hwcb,
+ hwc_data.ioctls.max_hwcb * PAGE_SIZE,
+ BUF_HWCB_TIMES_LOST,
+ BUF_HWCB_MTO_LOST,
+ BUF_HWCB_CHAR_LOST);
+ else
+ internal_print (
+ DELAYED_WRITE,
+ HWC_RW_PRINT_HEADER
+ "page allocation failed, "
+ "could not expand buffer for output (currently in "
+ "use: %i HWCBs = %li bytes), skipped content of "
+ "oldest HWCB %i time(s) (%i lines = %i characters)\n",
+ hwc_data.hwcb_count,
+ hwc_data.hwcb_count * PAGE_SIZE,
+ BUF_HWCB_TIMES_LOST,
+ BUF_HWCB_MTO_LOST,
+ BUF_HWCB_CHAR_LOST);
+
+ return retval;
+}
+
+static int
+allocate_write_hwcb (void)
+{
+ unsigned char *page;
+ int page_nr;
+
+ if (hwc_data.hwcb_count == hwc_data.ioctls.max_hwcb)
+ return -ENOMEM;
+
+ page_nr = find_first_zero_bit (&hwc_data.kmem_pages, MAX_KMEM_PAGES);
+ if (page_nr < hwc_data.ioctls.kmem_hwcb) {
+
+ page = (unsigned char *)
+ (hwc_data.kmem_start + (page_nr << 12));
+ set_bit (page_nr, &hwc_data.kmem_pages);
+ } else
+ page = (unsigned char *) __get_free_page (GFP_ATOMIC);
+
+ if (!page)
+ return -ENOMEM;
+
+ if (!OUT_HWCB)
+ OUT_HWCB = page;
+ else
+ BUF_HWCB_NEXT = page;
+
+ BUF_HWCB = page;
+
+ BUF_HWCB_NEXT = NULL;
+
+ hwc_data.hwcb_count++;
+
+ prepare_write_hwcb ();
+
+ BUF_HWCB_TIMES_LOST = 0;
+ BUF_HWCB_MTO_LOST = 0;
+ BUF_HWCB_CHAR_LOST = 0;
+
+#ifdef BUFFER_STRESS_TEST
+
+ internal_print (
+ DELAYED_WRITE,
+ "*** " HWC_RW_PRINT_HEADER
+ "page #%i at 0x%x for buffering allocated. ***\n",
+ hwc_data.hwcb_count, page);
+
+#endif
+
+ return 0;
+}
+
+static int
+release_write_hwcb (void)
+{
+ unsigned long page;
+ int page_nr;
+
+ if (!hwc_data.hwcb_count)
+ return -ENODATA;
+
+ if (hwc_data.hwcb_count == 1) {
+
+ prepare_write_hwcb ();
+
+ ALL_HWCB_CHAR = 0;
+ ALL_HWCB_MTO = 0;
+ BUF_HWCB_TIMES_LOST = 0;
+ BUF_HWCB_MTO_LOST = 0;
+ BUF_HWCB_CHAR_LOST = 0;
+ } else {
+ page = (unsigned long) OUT_HWCB;
+
+ ALL_HWCB_MTO -= OUT_HWCB_MTO;
+ ALL_HWCB_CHAR -= OUT_HWCB_CHAR;
+ hwc_data.hwcb_count--;
+
+ OUT_HWCB = OUT_HWCB_NEXT;
+
+ if (page >= hwc_data.kmem_start &&
+ page < hwc_data.kmem_end) {
+
+ memset ((void *) page, 0, PAGE_SIZE);
+
+ page_nr = (int) ((page - hwc_data.kmem_start) >> 12);
+ clear_bit (page_nr, &hwc_data.kmem_pages);
+ } else
+ free_page (page);
+#ifdef BUFFER_STRESS_TEST
+
+ internal_print (
+ DELAYED_WRITE,
+ "*** " HWC_RW_PRINT_HEADER
+ "page at 0x%x released, %i pages still in use ***\n",
+ page, hwc_data.hwcb_count);
+
+#endif
+ }
+ return 0;
+}
+
+static int
+add_mto (
+ unsigned char *message,
+ unsigned short int count)
+{
+ unsigned short int mto_size;
+ write_hwcb_t *hwcb;
+ mto_t *mto;
+ void *dest;
+
+ if (!BUF_HWCB)
+ return -ENOMEM;
+
+ if (BUF_HWCB == hwc_data.current_hwcb)
+ return -ENOMEM;
+
+ mto_size = sizeof (mto_t) + count;
+
+ hwcb = (write_hwcb_t *) BUF_HWCB;
+
+ if ((MAX_HWCB_ROOM - hwcb->length) < mto_size)
+ return -ENOMEM;
+
+ mto = (mto_t *) (((unsigned long) hwcb) + hwcb->length);
+
+ memcpy (mto, &mto_template, sizeof (mto_t));
+
+ dest = (void *) (((unsigned long) mto) + sizeof (mto_t));
+
+ memcpy (dest, message, count);
+
+ mto->length += count;
+
+ hwcb->length += mto_size;
+ hwcb->msgbuf.length += mto_size;
+ hwcb->msgbuf.mdb.length += mto_size;
+
+ BUF_HWCB_MTO++;
+ ALL_HWCB_MTO++;
+ BUF_HWCB_CHAR += count;
+ ALL_HWCB_CHAR += count;
+
+ return count;
+}
+
+static int
+write_event_data_1 (void)
+{
+ unsigned short int condition_code;
+ int retval;
+
+ if ((!hwc_data.write_prio) && (!hwc_data.write_nonprio))
+ return -EPERM;
+
+ if (hwc_data.current_servc)
+ return -EBUSY;
+
+ retval = sane_write_hwcb ();
+ if (retval < 0)
+ return retval;
+
+ if (!OUT_HWCB_MTO)
+ return -ENODATA;
+
+ condition_code = service_call (HWC_CMDW_WRITEDATA, OUT_HWCB);
+
+#ifdef DUMP_HWC_WRITE_ERROR
+ if (condition_code != HWC_COMMAND_INITIATED)
+ __asm__ ("LHI 1,0xe20\n\t"
+ "L 2,0(0,%0)\n\t"
+ "LRA 3,0(0,%1)\n\t"
+ "J .+0 \n\t"
+ :
+ : "a" (&condition_code), "a" (OUT_HWCB)
+ : "1", "2", "3");
+#endif
+
+ switch (condition_code) {
+ case HWC_COMMAND_INITIATED:
+ hwc_data.current_servc = HWC_CMDW_WRITEDATA;
+ hwc_data.current_hwcb = OUT_HWCB;
+ retval = condition_code;
+ break;
+ case HWC_BUSY:
+ retval = -EBUSY;
+ break;
+ default:
+ retval = -EIO;
+ }
+
+ return retval;
+}
+
+static void
+flush_hwcbs (void)
+{
+ while (hwc_data.hwcb_count > 1)
+ release_write_hwcb ();
+
+ release_write_hwcb ();
+
+ hwc_data.flags &= ~FLUSH_HWCBS;
+}
+
+static int
+write_event_data_2 (void)
+{
+ write_hwcb_t *hwcb;
+ int retval;
+ unsigned char *param;
+
+ param = ext_int_param ();
+ if (param != hwc_data.current_hwcb)
+ return -EINVAL;
+
+ hwcb = (write_hwcb_t *) OUT_HWCB;
+
+#ifdef DUMP_HWC_WRITE_ERROR
+#if 0
+ if (((unsigned char *) hwcb) != param)
+ __asm__ ("LHI 1,0xe22\n\t"
+ "LRA 2,0(0,%0)\n\t"
+ "LRA 3,0(0,%1)\n\t"
+ "LRA 4,0(0,%2)\n\t"
+ "LRA 5,0(0,%3)\n\t"
+ "J .+0 \n\t"
+ :
+ : "a" (OUT_HWCB),
+ "a" (hwc_data.current_hwcb),
+ "a" (BUF_HWCB),
+ "a" (param)
+ : "1", "2", "3", "4", "5");
+#endif
+ if (hwcb->response_code != 0x0020)
+#if 0
+ internal_print (DELAYED_WRITE, HWC_RW_PRINT_HEADER
+ "\n************************ error in write_event_data_2()\n"
+ "OUT_HWCB: 0x%x\n"
+ "BUF_HWCB: 0x%x\n"
+ "response_code: 0x%x\n"
+ "hwc_data.hwcb_count: %d\n"
+ "hwc_data.kmem_pages: 0x%x\n"
+ "hwc_data.ioctls.kmem_hwcb: %d\n"
+ "hwc_data.ioctls.max_hwcb: %d\n"
+ "hwc_data.kmem_start: 0x%x\n"
+ "hwc_data.kmem_end: 0x%x\n"
+ "*****************************************************\n",
+ OUT_HWCB,
+ BUF_HWCB,
+ hwcb->response_code,
+ hwc_data.hwcb_count,
+ hwc_data.kmem_pages,
+ hwc_data.ioctls.kmem_hwcb,
+ hwc_data.ioctls.max_hwcb,
+ hwc_data.kmem_start,
+ hwc_data.kmem_end);
+#endif
+ __asm__ ("LHI 1,0xe21\n\t"
+ "LRA 2,0(0,%0)\n\t"
+ "LRA 3,0(0,%1)\n\t"
+ "LRA 4,0(0,%2)\n\t"
+ "LH 5,0(0,%3)\n\t"
+ "SRL 5,8(0)\n\t"
+ "J .+0 \n\t"
+ :
+ : "a" (OUT_HWCB), "a" (hwc_data.current_hwcb),
+ "a" (BUF_HWCB),
+ "a" (&(hwc_data.hwcb_count))
+ : "1", "2", "3", "4", "5");
+#endif
+
+ if (hwcb->response_code == 0x0020) {
+
+ retval = OUT_HWCB_CHAR;
+ release_write_hwcb ();
+ } else
+ retval = -EIO;
+
+ hwc_data.current_servc = 0;
+ hwc_data.current_hwcb = NULL;
+
+ if (hwc_data.flags & FLUSH_HWCBS)
+ flush_hwcbs ();
+
+ return retval;
+}
+
+static void
+do_put_line (
+ unsigned char *message,
+ unsigned short count)
+{
+
+ if (add_mto (message, count) != count) {
+
+ if (allocate_write_hwcb () < 0)
+ reuse_write_hwcb ();
+
+#ifdef DUMP_HWC_WRITE_LIST_ERROR
+ if (add_mto (message, count) != count)
+ __asm__ ("LHI 1,0xe32\n\t"
+ "LRA 2,0(0,%0)\n\t"
+ "L 3,0(0,%1)\n\t"
+ "LRA 4,0(0,%2)\n\t"
+ "LRA 5,0(0,%3)\n\t"
+ "J .+0 \n\t"
+ :
+ : "a" (message), "a" (&hwc_data.kmem_pages),
+ "a" (BUF_HWCB), "a" (OUT_HWCB)
+ : "1", "2", "3", "4", "5");
+#else
+ add_mto (message, count);
+#endif
+ }
+}
+
+static void
+put_line (
+ unsigned char *message,
+ unsigned short count)
+{
+
+ if ((!hwc_data.obuf_start) && (hwc_data.flags & HWC_TIMER_RUNS)) {
+ del_timer (&hwc_data.write_timer);
+ hwc_data.flags &= ~HWC_TIMER_RUNS;
+ }
+ hwc_data.obuf_start += count;
+
+ do_put_line (message, count);
+
+ hwc_data.obuf_start -= count;
+}
+
+static void
+set_alarm (void)
+{
+ write_hwcb_t *hwcb;
+
+ if ((!BUF_HWCB) || (BUF_HWCB == hwc_data.current_hwcb))
+ allocate_write_hwcb ();
+
+ hwcb = (write_hwcb_t *) BUF_HWCB;
+ hwcb->msgbuf.mdb.mdb_body.go.general_msg_flags |= GMF_SndAlrm;
+}
+
+static void
+hwc_write_timeout (unsigned long data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave (&hwc_data.lock, flags);
+
+ hwc_data.obuf_start = hwc_data.obuf_count;
+ if (hwc_data.obuf_count)
+ put_line (hwc_data.obuf, hwc_data.obuf_count);
+ hwc_data.obuf_start = 0;
+
+ hwc_data.obuf_cursor = 0;
+ hwc_data.obuf_count = 0;
+
+ write_event_data_1 ();
+
+ spin_unlock_irqrestore (&hwc_data.lock, flags);
+}
+
+static int
+do_hwc_write (
+ int from_user,
+ unsigned char *msg,
+ unsigned int count,
+ unsigned char code,
+ unsigned char write_time)
+{
+ unsigned int i_msg = 0;
+ unsigned short int spaces = 0;
+ unsigned int processed_characters = 0;
+ unsigned char ch, orig_ch;
+ unsigned short int obuf_count;
+ unsigned short int obuf_cursor;
+ unsigned short int obuf_columns;
+
+ if (hwc_data.obuf_start) {
+ obuf_cursor = 0;
+ obuf_count = 0;
+ obuf_columns = MIN (hwc_data.ioctls.columns,
+ MAX_MESSAGE_SIZE - hwc_data.obuf_start);
+ } else {
+ obuf_cursor = hwc_data.obuf_cursor;
+ obuf_count = hwc_data.obuf_count;
+ obuf_columns = hwc_data.ioctls.columns;
+ }
+
+ for (i_msg = 0; i_msg < count; i_msg++) {
+
+ if (from_user)
+ get_user (orig_ch, msg + i_msg);
+ else
+ orig_ch = msg[i_msg];
+ if (code == CODE_EBCDIC)
+ ch = _ebcasc[orig_ch];
+ else
+ ch = orig_ch;
+
+ processed_characters++;
+
+ if ((obuf_cursor == obuf_columns) &&
+
+ (ch != '\n') &&
+
+ (ch != '\t')) {
+ put_line (&hwc_data.obuf[hwc_data.obuf_start],
+ obuf_columns);
+ obuf_cursor = 0;
+ obuf_count = 0;
+ }
+ switch (ch) {
+
+ case '\n':
+
+ put_line (&hwc_data.obuf[hwc_data.obuf_start],
+ obuf_count);
+ obuf_cursor = 0;
+ obuf_count = 0;
+ break;
+
+ case '\a':
+
+ hwc_data.obuf_start += obuf_count;
+ set_alarm ();
+ hwc_data.obuf_start -= obuf_count;
+
+ break;
+
+ case '\t':
+
+ do {
+ if (obuf_cursor < obuf_columns) {
+ hwc_data.obuf[hwc_data.obuf_start +
+ obuf_cursor]
+ = 0x20;
+ obuf_cursor++;
+ } else
+ break;
+ } while (obuf_cursor % hwc_data.ioctls.width_htab);
+
+ break;
+
+ case '\f':
+ case '\v':
+
+ spaces = obuf_cursor;
+ put_line (&hwc_data.obuf[hwc_data.obuf_start],
+ obuf_count);
+ obuf_count = obuf_cursor;
+ while (spaces) {
+ hwc_data.obuf[hwc_data.obuf_start +
+ obuf_cursor - spaces]
+ = 0x20;
+ spaces--;
+ }
+
+ break;
+
+ case '\b':
+
+ if (obuf_cursor)
+ obuf_cursor--;
+ break;
+
+ case '\r':
+
+ obuf_cursor = 0;
+ break;
+
+ case 0x00:
+
+ put_line (&hwc_data.obuf[hwc_data.obuf_start],
+ obuf_count);
+ obuf_cursor = 0;
+ obuf_count = 0;
+ goto out;
+
+ default:
+
+ if (isprint (ch))
+ hwc_data.obuf[hwc_data.obuf_start +
+ obuf_cursor++]
+ = (code == CODE_ASCII) ?
+ _ascebc[orig_ch] : orig_ch;
+ }
+ if (obuf_cursor > obuf_count)
+ obuf_count = obuf_cursor;
+ }
+
+ if (obuf_cursor) {
+
+ if (hwc_data.obuf_start ||
+ (hwc_data.ioctls.final_nl == 0)) {
+
+ put_line (&hwc_data.obuf[hwc_data.obuf_start],
+ obuf_count);
+ obuf_cursor = 0;
+ obuf_count = 0;
+ } else {
+
+ if (hwc_data.ioctls.final_nl > 0) {
+
+ if (hwc_data.flags & HWC_TIMER_RUNS) {
+
+ hwc_data.write_timer.expires =
+ jiffies +
+ hwc_data.ioctls.final_nl * HZ / 10;
+ } else {
+
+ init_timer (&hwc_data.write_timer);
+ hwc_data.write_timer.function =
+ hwc_write_timeout;
+ hwc_data.write_timer.data =
+ (unsigned long) NULL;
+ hwc_data.write_timer.expires =
+ jiffies +
+ hwc_data.ioctls.final_nl * HZ / 10;
+ add_timer (&hwc_data.write_timer);
+ hwc_data.flags |= HWC_TIMER_RUNS;
+ }
+ } else;
+
+ }
+ } else;
+
+ out:
+
+ if (!hwc_data.obuf_start) {
+ hwc_data.obuf_cursor = obuf_cursor;
+ hwc_data.obuf_count = obuf_count;
+ }
+ if (write_time == IMMEDIATE_WRITE)
+ write_event_data_1 ();
+
+ return processed_characters;
+}
+
+signed int
+hwc_write (int from_user, const unsigned char *msg, unsigned int count)
+{
+ unsigned long flags;
+ int retval;
+
+ spin_lock_irqsave (&hwc_data.lock, flags);
+
+ retval = do_hwc_write (from_user, msg, count, hwc_data.ioctls.code,
+ IMMEDIATE_WRITE);
+
+ spin_unlock_irqrestore (&hwc_data.lock, flags);
+
+ return retval;
+}
+
+unsigned int
+hwc_chars_in_buffer (unsigned char flag)
+{
+ unsigned short int number = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave (&hwc_data.lock, flags);
+
+ if (flag & IN_HWCB)
+ number += ALL_HWCB_CHAR;
+
+ if (flag & IN_WRITE_BUF)
+ number += hwc_data.obuf_cursor;
+
+ spin_unlock_irqrestore (&hwc_data.lock, flags);
+
+ return number;
+}
+
+static inline int
+nr_setbits (kmem_pages_t arg)
+{
+ int i;
+ int nr = 0;
+
+ for (i = 0; i < (sizeof (arg) << 3); i++) {
+ if (arg & 1)
+ nr++;
+ arg >>= 1;
+ }
+
+ return nr;
+}
+
+unsigned int
+hwc_write_room (unsigned char flag)
+{
+ unsigned int number = 0;
+ unsigned long flags;
+ write_hwcb_t *hwcb;
+
+ spin_lock_irqsave (&hwc_data.lock, flags);
+
+ if (flag & IN_HWCB) {
+
+ if (BUF_HWCB) {
+ hwcb = (write_hwcb_t *) BUF_HWCB;
+ number += MAX_HWCB_ROOM - hwcb->length;
+ }
+ number += (hwc_data.ioctls.kmem_hwcb -
+ nr_setbits (hwc_data.kmem_pages)) *
+ (MAX_HWCB_ROOM -
+ (sizeof (write_hwcb_t) + sizeof (mto_t)));
+ }
+ if (flag & IN_WRITE_BUF)
+ number += MAX_HWCB_ROOM - hwc_data.obuf_cursor;
+
+ spin_unlock_irqrestore (&hwc_data.lock, flags);
+
+ return number;
+}
+
+void
+hwc_flush_buffer (unsigned char flag)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave (&hwc_data.lock, flags);
+
+ if (flag & IN_HWCB) {
+ if (hwc_data.current_servc != HWC_CMDW_WRITEDATA)
+ flush_hwcbs ();
+ else
+ hwc_data.flags |= FLUSH_HWCBS;
+ }
+ if (flag & IN_WRITE_BUF) {
+ hwc_data.obuf_cursor = 0;
+ hwc_data.obuf_count = 0;
+ }
+ spin_unlock_irqrestore (&hwc_data.lock, flags);
+}
+
+unsigned short int
+seperate_cases (unsigned char *buf, unsigned short int count)
+{
+
+ unsigned short int i_in;
+
+ unsigned short int i_out = 0;
+
+ unsigned char _case = 0;
+
+ for (i_in = 0; i_in < count; i_in++) {
+
+ if (buf[i_in] == hwc_data.ioctls.delim) {
+
+ if ((i_in + 1 < count) &&
+ (buf[i_in + 1] == hwc_data.ioctls.delim)) {
+
+ buf[i_out] = hwc_data.ioctls.delim;
+
+ i_out++;
+
+ i_in++;
+
+ } else
+ _case = ~_case;
+
+ } else {
+
+ if (_case) {
+
+ if (hwc_data.ioctls.tolower)
+ buf[i_out] = _ebc_toupper[buf[i_in]];
+
+ else
+ buf[i_out] = _ebc_tolower[buf[i_in]];
+
+ } else
+ buf[i_out] = buf[i_in];
+
+ i_out++;
+ }
+ }
+
+ return i_out;
+}
+
+#ifdef DUMP_HWCB_INPUT
+
+static int
+gds_vector_name (u16 id, unsigned char name[])
+{
+ int retval = 0;
+
+ switch (id) {
+ case GDS_ID_MDSMU:
+ name = "Multiple Domain Support Message Unit";
+ break;
+ case GDS_ID_MDSRouteInfo:
+ name = "MDS Routing Information";
+ break;
+ case GDS_ID_AgUnWrkCorr:
+ name = "Agent Unit of Work Correlator";
+ break;
+ case GDS_ID_SNACondReport:
+ name = "SNA Condition Report";
+ break;
+ case GDS_ID_CPMSU:
+ name = "CP Management Services Unit";
+ break;
+ case GDS_ID_RoutTargInstr:
+ name = "Routing and Targeting Instructions";
+ break;
+ case GDS_ID_OpReq:
+ name = "Operate Request";
+ break;
+ case GDS_ID_TextCmd:
+ name = "Text Command";
+ break;
+
+ default:
+ name = "unknown GDS variable";
+ retval = -EINVAL;
+ }
+
+ return retval;
+}
+#endif
+
+inline static gds_vector_t *
+find_gds_vector (
+ gds_vector_t * start, void *end, u16 id)
+{
+ gds_vector_t *vec;
+ gds_vector_t *retval = NULL;
+
+ vec = start;
+
+ while (((void *) vec) < end) {
+ if (vec->gds_id == id) {
+
+#ifdef DUMP_HWCB_INPUT
+ int retval_name;
+ unsigned char name[64];
+
+ retval_name = gds_vector_name (id, name);
+ internal_print (
+ DELAYED_WRITE,
+ HWC_RW_PRINT_HEADER
+ "%s at 0x%x up to 0x%x, length: %d",
+ name,
+ (unsigned long) vec,
+ ((unsigned long) vec) + vec->length - 1,
+ vec->length);
+ if (retval_name < 0)
+ internal_print (
+ IMMEDIATE_WRITE,
+ ", id: 0x%x\n",
+ vec->gds_id);
+ else
+ internal_print (
+ IMMEDIATE_WRITE,
+ "\n");
+#endif
+
+ retval = vec;
+ break;
+ }
+ vec = (gds_vector_t *) (((unsigned long) vec) + vec->length);
+ }
+
+ return retval;
+}
+
+inline static gds_subvector_t *
+find_gds_subvector (
+ gds_subvector_t * start, void *end, u8 key)
+{
+ gds_subvector_t *subvec;
+ gds_subvector_t *retval = NULL;
+
+ subvec = start;
+
+ while (((void *) subvec) < end) {
+ if (subvec->key == key) {
+ retval = subvec;
+ break;
+ }
+ subvec = (gds_subvector_t *)
+ (((unsigned long) subvec) + subvec->length);
+ }
+
+ return retval;
+}
+
+inline static int
+get_input (void *start, void *end)
+{
+ int count;
+
+ count = ((unsigned long) end) - ((unsigned long) start);
+
+ if (hwc_data.ioctls.tolower)
+ EBC_TOLOWER (start, count);
+
+ if (hwc_data.ioctls.delim)
+ count = seperate_cases (start, count);
+
+ if (hwc_data.ioctls.echo)
+ do_hwc_write (0, start, count, CODE_EBCDIC, IMMEDIATE_WRITE);
+
+ if (hwc_data.ioctls.code == CODE_ASCII)
+ EBCASC (start, count);
+
+ store_hwc_input (start, count);
+
+ return count;
+}
+
+inline static int
+eval_selfdeftextmsg (gds_subvector_t * start, void *end)
+{
+ gds_subvector_t *subvec;
+ void *subvec_data;
+ void *subvec_end;
+ int retval = 0;
+
+ subvec = start;
+
+ while (((void *) subvec) < end) {
+ subvec = find_gds_subvector (subvec, end, 0x30);
+ if (!subvec)
+ break;
+ subvec_data = (void *)
+ (((unsigned long) subvec) +
+ sizeof (gds_subvector_t));
+ subvec_end = (void *)
+ (((unsigned long) subvec) + subvec->length);
+ retval += get_input (subvec_data, subvec_end);
+ subvec = (gds_subvector_t *) subvec_end;
+ }
+
+ return retval;
+}
+
+inline static int
+eval_textcmd (gds_subvector_t * start, void *end)
+{
+ gds_subvector_t *subvec;
+ gds_subvector_t *subvec_data;
+ void *subvec_end;
+ int retval = 0;
+
+ subvec = start;
+
+ while (((void *) subvec) < end) {
+ subvec = find_gds_subvector (
+ subvec, end, GDS_KEY_SelfDefTextMsg);
+ if (!subvec)
+ break;
+ subvec_data = (gds_subvector_t *)
+ (((unsigned long) subvec) +
+ sizeof (gds_subvector_t));
+ subvec_end = (void *)
+ (((unsigned long) subvec) + subvec->length);
+ retval += eval_selfdeftextmsg (subvec_data, subvec_end);
+ subvec = (gds_subvector_t *) subvec_end;
+ }
+
+ return retval;
+}
+
+inline static int
+eval_cpmsu (gds_vector_t * start, void *end)
+{
+ gds_vector_t *vec;
+ gds_subvector_t *vec_data;
+ void *vec_end;
+ int retval = 0;
+
+ vec = start;
+
+ while (((void *) vec) < end) {
+ vec = find_gds_vector (vec, end, GDS_ID_TextCmd);
+ if (!vec)
+ break;
+ vec_data = (gds_subvector_t *)
+ (((unsigned long) vec) + sizeof (gds_vector_t));
+ vec_end = (void *) (((unsigned long) vec) + vec->length);
+ retval += eval_textcmd (vec_data, vec_end);
+ vec = (gds_vector_t *) vec_end;
+ }
+
+ return retval;
+}
+
+inline static int
+eval_mdsmu (gds_vector_t * start, void *end)
+{
+ gds_vector_t *vec;
+ gds_vector_t *vec_data;
+ void *vec_end;
+ int retval = 0;
+
+ vec = find_gds_vector (start, end, GDS_ID_CPMSU);
+ if (vec) {
+ vec_data = (gds_vector_t *)
+ (((unsigned long) vec) + sizeof (gds_vector_t));
+ vec_end = (void *) (((unsigned long) vec) + vec->length);
+ retval = eval_cpmsu (vec_data, vec_end);
+ }
+ return retval;
+}
+
+inline static int
+eval_evbuf (gds_vector_t * start, void *end)
+{
+ gds_vector_t *vec;
+ gds_vector_t *vec_data;
+ void *vec_end;
+ int retval = 0;
+
+ vec = find_gds_vector (start, end, GDS_ID_MDSMU);
+ if (vec) {
+ vec_data = (gds_vector_t *)
+ (((unsigned long) vec) + sizeof (gds_vector_t));
+ vec_end = (void *) (((unsigned long) vec) + vec->length);
+ retval = eval_mdsmu (vec_data, vec_end);
+ }
+ return retval;
+}
+
+static int
+process_evbufs (void *start, void *end)
+{
+ int retval = 0;
+ evbuf_t *evbuf;
+ void *evbuf_end;
+ gds_vector_t *evbuf_data;
+
+ evbuf = (evbuf_t *) start;
+ while (((void *) evbuf) < end) {
+ evbuf_data = (gds_vector_t *)
+ (((unsigned long) evbuf) + sizeof (evbuf_t));
+ evbuf_end = (void *) (((unsigned long) evbuf) + evbuf->length);
+ switch (evbuf->type) {
+ case ET_OpCmd:
+ case ET_CntlProgOpCmd:
+ case ET_PMsgCmd:
+#ifdef DUMP_HWCB_INPUT
+
+ internal_print (
+ DELAYED_WRITE,
+ HWC_RW_PRINT_HEADER
+ "event buffer "
+ "at 0x%x up to 0x%x, length: %d\n",
+ (unsigned long) evbuf,
+ (unsigned long) (evbuf_end - 1),
+ evbuf->length);
+ dump_storage_area ((void *) evbuf, evbuf->length);
+#endif
+ retval += eval_evbuf (evbuf_data, evbuf_end);
+ break;
+ case ET_StateChange:
+
+ retval = -ENOSYS;
+ break;
+ default:
+ printk (
+ KERN_WARNING
+ HWC_RW_PRINT_HEADER
+ "unconditional read: "
+ "unknown event buffer found, "
+ "type 0x%x",
+ evbuf->type);
+ retval = -ENOSYS;
+ }
+ evbuf = (evbuf_t *) evbuf_end;
+ }
+ return retval;
+}
+
+static int
+unconditional_read_1 (void)
+{
+ unsigned short int condition_code;
+ read_hwcb_t *hwcb = (read_hwcb_t *) hwc_data.page;
+ int retval;
+
+ if ((!hwc_data.read_prio) && (!hwc_data.read_nonprio))
+ return -EOPNOTSUPP;
+
+ if (hwc_data.current_servc)
+ return -EBUSY;
+
+ memset (hwcb, 0x00, PAGE_SIZE);
+ memcpy (hwcb, &read_hwcb_template, sizeof (read_hwcb_t));
+
+ condition_code = service_call (HWC_CMDW_READDATA, hwc_data.page);
+
+#ifdef DUMP_HWC_READ_ERROR
+ if (condition_code == HWC_NOT_OPERATIONAL)
+ __asm__ ("LHI 1,0xe40\n\t"
+ "L 2,0(0,%0)\n\t"
+ "LRA 3,0(0,%1)\n\t"
+ "J .+0 \n\t"
+ :
+ : "a" (&condition_code), "a" (hwc_data.page)
+ : "1", "2", "3");
+#endif
+
+ switch (condition_code) {
+ case HWC_COMMAND_INITIATED:
+ hwc_data.current_servc = HWC_CMDW_READDATA;
+ hwc_data.current_hwcb = hwc_data.page;
+ retval = condition_code;
+ break;
+ case HWC_BUSY:
+ retval = -EBUSY;
+ break;
+ default:
+ retval = -EIO;
+ }
+
+ return retval;
+}
+
+static int
+unconditional_read_2 (void)
+{
+ read_hwcb_t *hwcb = (read_hwcb_t *) hwc_data.page;
+
+#ifdef DUMP_HWC_READ_ERROR
+ if ((hwcb->response_code != 0x0020) &&
+ (hwcb->response_code != 0x0220) &&
+ (hwcb->response_code != 0x60F0) &&
+ (hwcb->response_code != 0x62F0))
+ __asm__ ("LHI 1,0xe41\n\t"
+ "LRA 2,0(0,%0)\n\t"
+ "L 3,0(0,%1)\n\t"
+ "J .+0\n\t"
+ :
+ : "a" (hwc_data.page), "a" (&(hwcb->response_code))
+ : "1", "2", "3");
+#endif
+
+ hwc_data.current_servc = 0;
+ hwc_data.current_hwcb = NULL;
+
+ switch (hwcb->response_code) {
+
+ case 0x0020:
+ case 0x0220:
+ return process_evbufs (
+ (void *) (((unsigned long) hwcb) + sizeof (read_hwcb_t)),
+ (void *) (((unsigned long) hwcb) + hwcb->length));
+
+ case 0x60F0:
+ case 0x62F0:
+ return 0;
+
+ case 0x0100:
+ internal_print (
+ IMMEDIATE_WRITE,
+ HWC_RW_PRINT_HEADER
+ "unconditional read: HWCB boundary violation - this "
+ "must not occur in a correct driver, please contact "
+ "author\n");
+ return -EIO;
+
+ case 0x0300:
+ internal_print (
+ IMMEDIATE_WRITE,
+ HWC_RW_PRINT_HEADER
+ "unconditional read: "
+ "insufficient HWCB length - this must not occur in a "
+ "correct driver, please contact author\n");
+ return -EIO;
+
+ case 0x01F0:
+ internal_print (
+ IMMEDIATE_WRITE,
+ HWC_RW_PRINT_HEADER
+ "unconditional read: "
+ "invalid command - this must not occur in a correct "
+ "driver, please contact author\n");
+ return -EIO;
+
+ case 0x40F0:
+ internal_print (
+ IMMEDIATE_WRITE,
+ HWC_RW_PRINT_HEADER
+ "unconditional read: invalid function code - this "
+ "must not occur in a correct driver, please contact "
+ "author\n");
+ return -EIO;
+
+ case 0x70F0:
+ internal_print (
+ IMMEDIATE_WRITE,
+ HWC_RW_PRINT_HEADER
+ "unconditional read: invalid selection mask - this "
+ "must not occur in a correct driver, please contact "
+ "author\n");
+ return -EIO;
+
+ case 0x0040:
+ internal_print (
+ IMMEDIATE_WRITE,
+ HWC_RW_PRINT_HEADER
+ "unconditional read: HWC equipment check - don't "
+ "know how to handle this case\n");
+ return -EIO;
+
+ default:
+ internal_print (
+ IMMEDIATE_WRITE,
+ HWC_RW_PRINT_HEADER
+ "unconditional read: invalid response code %x - this "
+ "must not occur in a correct driver, please contact "
+ "author\n",
+ hwcb->response_code);
+ return -EIO;
+ }
+}
+
+static int
+write_event_mask_1 (void)
+{
+ unsigned int condition_code;
+ int retval;
+
+ memcpy (hwc_data.page, &init_hwcb_template, sizeof (init_hwcb_t));
+
+ condition_code = service_call (HWC_CMDW_WRITEMASK, hwc_data.page);
+
+#ifdef DUMP_HWC_INIT_ERROR
+
+ if (condition_code != HWC_COMMAND_INITIATED)
+ __asm__ ("LHI 1,0xe10\n\t"
+ "L 2,0(0,%0)\n\t"
+ "LRA 3,0(0,%1)\n\t"
+ "J .+0\n\t"
+ :
+ : "a" (&condition_code), "a" (hwc_data.page)
+ : "1", "2", "3");
+#endif
+
+ switch (condition_code) {
+ case HWC_COMMAND_INITIATED:
+ hwc_data.current_servc = HWC_CMDW_WRITEMASK;
+ hwc_data.current_hwcb = hwc_data.page;
+ retval = condition_code;
+ break;
+ case HWC_BUSY:
+ retval = -EBUSY;
+ break;
+ default:
+ retval = -EIO;
+ }
+
+ return retval;
+}
+
+static int
+write_event_mask_2 (void)
+{
+ init_hwcb_t *hwcb = (init_hwcb_t *) hwc_data.page;
+ int retval = 0;
+
+ if (hwcb->hwc_receive_mask & ET_Msg_Mask)
+ hwc_data.write_nonprio = 1;
+
+ if (hwcb->hwc_receive_mask & ET_PMsgCmd_Mask)
+ hwc_data.write_prio = 1;
+
+ if (hwcb->hwc_send_mask & ET_OpCmd_Mask)
+ hwc_data.read_nonprio = 1;
+
+ if (hwcb->hwc_send_mask & ET_PMsgCmd_Mask)
+ hwc_data.read_nonprio = 1;
+
+ if ((hwcb->response_code != 0x0020) ||
+ (!hwc_data.write_nonprio) ||
+ ((!hwc_data.read_nonprio) && (!hwc_data.read_prio)))
+#ifdef DUMP_HWC_INIT_ERROR
+ __asm__ ("LHI 1,0xe11\n\t"
+ "LRA 2,0(0,%0)\n\t"
+ "L 3,0(0,%1)\n\t"
+ "J .+0\n\t"
+ :
+ : "a" (hwcb), "a" (&(hwcb->response_code))
+ : "1", "2", "3");
+#else
+ retval = -EIO
+#endif
+
+ hwc_data.current_servc = 0;
+ hwc_data.current_hwcb = NULL;
+
+ return retval;
+}
+
+static int
+set_hwc_ioctls (hwc_ioctls_t * ioctls, char correct)
+{
+ int retval = 0;
+ hwc_ioctls_t tmp;
+
+ if (ioctls->width_htab > MAX_MESSAGE_SIZE) {
+ if (correct)
+ tmp.width_htab = MAX_MESSAGE_SIZE;
+ else
+ retval = -EINVAL;
+ } else
+ tmp.width_htab = ioctls->width_htab;
+
+ tmp.echo = ioctls->echo;
+
+ if (ioctls->columns > MAX_MESSAGE_SIZE) {
+ if (correct)
+ tmp.columns = MAX_MESSAGE_SIZE;
+ else
+ retval = -EINVAL;
+ } else
+ tmp.columns = ioctls->columns;
+
+ switch (ioctls->code) {
+ case CODE_EBCDIC:
+ case CODE_ASCII:
+ tmp.code = ioctls->code;
+ break;
+ default:
+ if (correct)
+ tmp.code = CODE_ASCII;
+ else
+ retval = -EINVAL;
+ }
+
+ tmp.final_nl = ioctls->final_nl;
+
+ if (ioctls->max_hwcb < 2) {
+ if (correct)
+ tmp.max_hwcb = 2;
+ else
+ retval = -EINVAL;
+ } else
+ tmp.max_hwcb = ioctls->max_hwcb;
+
+ tmp.tolower = ioctls->tolower;
+
+ if (ioctls->kmem_hwcb > ioctls->max_hwcb) {
+ if (correct)
+ tmp.kmem_hwcb = ioctls->max_hwcb;
+ else
+ retval = -EINVAL;
+ } else
+ tmp.kmem_hwcb = ioctls->kmem_hwcb;
+
+ if (ioctls->kmem_hwcb > MAX_KMEM_PAGES) {
+ if (correct)
+ ioctls->kmem_hwcb = MAX_KMEM_PAGES;
+ else
+ retval = -EINVAL;
+ }
+ if (ioctls->kmem_hwcb < 2) {
+ if (correct)
+ ioctls->kmem_hwcb = 2;
+ else
+ retval = -EINVAL;
+ }
+ tmp.delim = ioctls->delim;
+
+ if (!(retval < 0))
+ hwc_data.ioctls = tmp;
+
+ return retval;
+}
+
+int
+hwc_init (void)
+{
+ int retval;
+#ifdef BUFFER_STRESS_TEST
+
+ init_hwcb_t *hwcb;
+ int i;
+
+#endif
+
+#ifdef CONFIG_3215
+ if (MACHINE_IS_VM)
+ return 0;
+#endif
+
+ spin_lock_init (&hwc_data.lock);
+
+ retval = write_event_mask_1 ();
+ if (retval < 0)
+ return retval;
+
+#ifdef USE_VM_DETECTION
+
+ if (MACHINE_IS_VM) {
+
+ if (hwc_data.init_ioctls.columns > 76)
+ hwc_data.init_ioctls.columns = 76;
+ hwc_data.init_ioctls.tolower = 1;
+ if (!hwc_data.init_ioctls.delim)
+ hwc_data.init_ioctls.delim = DEFAULT_CASE_DELIMITER;
+ } else {
+ hwc_data.init_ioctls.tolower = 0;
+ hwc_data.init_ioctls.delim = 0;
+ }
+#endif
+ retval = set_hwc_ioctls (&hwc_data.init_ioctls, 1);
+
+ hwc_data.kmem_start = (unsigned long)
+ alloc_bootmem_pages (hwc_data.ioctls.kmem_hwcb * PAGE_SIZE);
+ hwc_data.kmem_end = hwc_data.kmem_start +
+ hwc_data.ioctls.kmem_hwcb * PAGE_SIZE - 1;
+
+ ctl_set_bit (0, 9);
+
+#ifdef BUFFER_STRESS_TEST
+
+ internal_print (
+ DELAYED_WRITE,
+ HWC_RW_PRINT_HEADER
+ "use %i bytes for buffering.\n",
+ hwc_data.ioctls.kmem_hwcb * PAGE_SIZE);
+ for (i = 0; i < 500; i++) {
+ hwcb = (init_hwcb_t *) BUF_HWCB;
+ internal_print (
+ DELAYED_WRITE,
+ HWC_RW_PRINT_HEADER
+ "This is stress test message #%i, free: %i bytes\n",
+ i,
+ MAX_HWCB_ROOM - (hwcb->length + sizeof (mto_t)));
+ }
+
+#endif
+
+ return retval;
+}
+
+void
+do_hwc_interrupt (void)
+{
+
+ spin_lock (&hwc_data.lock);
+
+ if (!hwc_data.current_servc) {
+
+ unconditional_read_1 ();
+
+ } else {
+
+ switch (hwc_data.current_servc) {
+
+ case HWC_CMDW_WRITEMASK:
+
+ write_event_mask_2 ();
+ break;
+
+ case HWC_CMDW_WRITEDATA:
+
+ write_event_data_2 ();
+ break;
+
+ case HWC_CMDW_READDATA:
+
+ unconditional_read_2 ();
+ break;
+ }
+
+ write_event_data_1 ();
+ }
+
+ wake_up_hwc_tty ();
+
+ spin_unlock (&hwc_data.lock);
+}
+
+int
+hwc_ioctl (unsigned int cmd, unsigned long arg)
+{
+ hwc_ioctls_t tmp = hwc_data.ioctls;
+ int retval = 0;
+ unsigned long flags;
+ unsigned int obuf;
+
+ spin_lock_irqsave (&hwc_data.lock, flags);
+
+ switch (cmd) {
+
+ case TIOCHWCSHTAB:
+ if (get_user (tmp.width_htab, (ioctl_htab_t *) arg))
+ goto fault;
+ break;
+
+ case TIOCHWCSECHO:
+ if (get_user (tmp.echo, (ioctl_echo_t *) arg))
+ goto fault;
+ break;
+
+ case TIOCHWCSCOLS:
+ if (get_user (tmp.columns, (ioctl_cols_t *) arg))
+ goto fault;
+ break;
+
+ case TIOCHWCSCODE:
+ if (get_user (tmp.code, (ioctl_code_t *) arg))
+ goto fault;
+
+ break;
+
+ case TIOCHWCSNL:
+ if (get_user (tmp.final_nl, (ioctl_nl_t *) arg))
+ goto fault;
+ break;
+
+ case TIOCHWCSOBUF:
+ if (get_user (obuf, (unsigned int *) arg))
+ goto fault;
+ if (obuf & 0xFFF)
+ tmp.max_hwcb = (((obuf | 0xFFF) + 1) >> 12);
+ else
+ tmp.max_hwcb = (obuf >> 12);
+ break;
+
+ case TIOCHWCSCASE:
+ if (get_user (tmp.tolower, (ioctl_case_t *) arg))
+ goto fault;
+ break;
+
+ case TIOCHWCSDELIM:
+ if (get_user (tmp.delim, (ioctl_delim_t *) arg))
+ goto fault;
+ break;
+
+ case TIOCHWCSINIT:
+ retval = set_hwc_ioctls (&hwc_data.init_ioctls, 1);
+ break;
+
+ case TIOCHWCGHTAB:
+ if (put_user (tmp.width_htab, (ioctl_htab_t *) arg))
+ goto fault;
+ break;
+
+ case TIOCHWCGECHO:
+ if (put_user (tmp.echo, (ioctl_echo_t *) arg))
+ goto fault;
+ break;
+
+ case TIOCHWCGCOLS:
+ if (put_user (tmp.columns, (ioctl_cols_t *) arg))
+ goto fault;
+ break;
+
+ case TIOCHWCGCODE:
+ if (put_user (tmp.code, (ioctl_code_t *) arg))
+ goto fault;
+
+ break;
+
+ case TIOCHWCGNL:
+ if (put_user (tmp.final_nl, (ioctl_nl_t *) arg))
+ goto fault;
+ break;
+
+ case TIOCHWCGOBUF:
+ if (put_user (tmp.max_hwcb, (ioctl_obuf_t *) arg))
+ goto fault;
+ break;
+
+ case TIOCHWCGKBUF:
+ if (put_user (tmp.kmem_hwcb, (ioctl_obuf_t *) arg))
+ goto fault;
+ break;
+
+ case TIOCHWCGCASE:
+ if (put_user (tmp.tolower, (ioctl_case_t *) arg))
+ goto fault;
+ break;
+
+ case TIOCHWCGDELIM:
+ if (put_user (tmp.delim, (ioctl_delim_t *) arg))
+ goto fault;
+ break;
+#if 0
+
+ case TIOCHWCGINIT:
+ if (put_user (&hwc_data.init_ioctls, (hwc_ioctls_t *) arg))
+ goto fault;
+ break;
+
+ case TIOCHWCGCURR:
+ if (put_user (&hwc_data.ioctls, (hwc_ioctls_t *) arg))
+ goto fault;
+ break;
+#endif
+
+ default:
+ goto noioctlcmd;
+ }
+
+ if (_IOC_DIR (cmd) == _IOC_WRITE)
+ retval = set_hwc_ioctls (&tmp, 0);
+
+ goto out;
+
+ fault:
+ retval = -EFAULT;
+ goto out;
+ noioctlcmd:
+ retval = -ENOIOCTLCMD;
+ out:
+ spin_unlock_irqrestore (&hwc_data.lock, flags);
+ return retval;
+}
diff --git a/drivers/s390/char/hwc_rw.h b/drivers/s390/char/hwc_rw.h
new file mode 100644
index 000000000..04c939cd8
--- /dev/null
+++ b/drivers/s390/char/hwc_rw.h
@@ -0,0 +1,113 @@
+/*
+ * drivers/s390/char/hwc_rw.h
+ * interface to the HWC-read/write driver
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Peschke <peschke@fh-brandenburg.de>
+ */
+
+#ifndef __HWC_RW_H__
+#define __HWC_RW_H__
+
+#include <linux/ioctl.h>
+
+#ifndef __HWC_RW_C__
+
+extern int hwc_init (void);
+
+extern int hwc_write (int from_user, const unsigned char *, unsigned int);
+
+extern unsigned int hwc_chars_in_buffer (unsigned char);
+
+extern unsigned int hwc_write_room (unsigned char);
+
+extern void hwc_flush_buffer (unsigned char);
+
+extern signed int hwc_ioctl (unsigned int, unsigned long);
+
+extern void do_hwc_interrupt (void);
+
+extern int hwc_printk (const char *,...);
+
+#else
+
+extern void store_hwc_input (unsigned char *, unsigned int);
+
+extern void wake_up_hwc_tty (void);
+
+#endif
+
+#define IN_HWCB 1
+#define IN_WRITE_BUF 2
+#define IN_BUFS_TOTAL (IN_HWCB | IN_WRITE_BUF)
+
+typedef unsigned short int ioctl_htab_t;
+typedef unsigned char ioctl_echo_t;
+typedef unsigned short int ioctl_cols_t;
+typedef unsigned char ioctl_code_t;
+typedef signed char ioctl_nl_t;
+typedef unsigned short int ioctl_obuf_t;
+typedef unsigned char ioctl_case_t;
+typedef unsigned char ioctl_delim_t;
+
+typedef struct {
+ ioctl_htab_t width_htab;
+ ioctl_echo_t echo;
+ ioctl_cols_t columns;
+ ioctl_code_t code;
+ ioctl_nl_t final_nl;
+ ioctl_obuf_t max_hwcb;
+ ioctl_obuf_t kmem_hwcb;
+ ioctl_case_t tolower;
+ ioctl_delim_t delim;
+} hwc_ioctls_t;
+
+static hwc_ioctls_t _hwc_ioctls;
+
+#define HWC_IOCTL_LETTER 'B'
+
+#define TIOCHWCSHTAB _IOW(HWC_IOCTL_LETTER, 0, _hwc_ioctls.width_htab)
+
+#define TIOCHWCSECHO _IOW(HWC_IOCTL_LETTER, 1, _hwc_ioctls.echo)
+
+#define TIOCHWCSCOLS _IOW(HWC_IOCTL_LETTER, 2, _hwc_ioctls.columns)
+
+#define TIOCHWCSCODE _IOW(HWC_IOCTL_LETTER, 3, _hwc_ioctls.code)
+
+#define TIOCHWCSNL _IOW(HWC_IOCTL_LETTER, 4, _hwc_ioctls.final_nl)
+
+#define TIOCHWCSOBUF _IOW(HWC_IOCTL_LETTER, 5, _hwc_ioctls.max_hwcb)
+
+#define TIOCHWCSINIT _IO(HWC_IOCTL_LETTER, 6)
+
+#define TIOCHWCSCASE _IOW(HWC_IOCTL_LETTER, 7, _hwc_ioctls.tolower)
+
+#define TIOCHWCSDELIM _IOW(HWC_IOCTL_LETTER, 9, _hwc_ioctls.delim)
+
+#define TIOCHWCGHTAB _IOR(HWC_IOCTL_LETTER, 10, _hwc_ioctls.width_htab)
+
+#define TIOCHWCGECHO _IOR(HWC_IOCTL_LETTER, 11, _hwc_ioctls.echo)
+
+#define TIOCHWCGCOLS _IOR(HWC_IOCTL_LETTER, 12, _hwc_ioctls.columns)
+
+#define TIOCHWCGCODE _IOR(HWC_IOCTL_LETTER, 13, _hwc_ioctls.code)
+
+#define TIOCHWCGNL _IOR(HWC_IOCTL_LETTER, 14, _hwc_ioctls.final_nl)
+
+#define TIOCHWCGOBUF _IOR(HWC_IOCTL_LETTER, 15, _hwc_ioctls.max_hwcb)
+
+#define TIOCHWCGINIT _IOR(HWC_IOCTL_LETTER, 16, _hwc_ioctls)
+
+#define TIOCHWCGCASE _IOR(HWC_IOCTL_LETTER, 17, _hwc_ioctls.tolower)
+
+#define TIOCHWCGDELIM _IOR(HWC_IOCTL_LETTER, 19, _hwc_ioctls.delim)
+
+#define TIOCHWCGKBUF _IOR(HWC_IOCTL_LETTER, 20, _hwc_ioctls.max_hwcb)
+
+#define TIOCHWCGCURR _IOR(HWC_IOCTL_LETTER, 21, _hwc_ioctls)
+
+#define CODE_ASCII 0x0
+#define CODE_EBCDIC 0x1
+
+#endif
diff --git a/drivers/s390/char/hwc_tty.c b/drivers/s390/char/hwc_tty.c
new file mode 100644
index 000000000..d58504168
--- /dev/null
+++ b/drivers/s390/char/hwc_tty.c
@@ -0,0 +1,265 @@
+/*
+ * drivers/s390/char/hwc_tty.c
+ * HWC line mode terminal driver.
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Peschke <peschke@fh-brandenburg.de>
+ *
+ * Thanks to Martin Schwidefsky.
+ */
+
+#include <linux/major.h>
+#include <linux/termios.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+
+#include <asm/uaccess.h>
+
+#include "hwc_rw.h"
+
+#define HWC_TTY_PRINT_HEADER "hwc tty driver: "
+
+#define HWC_TTY_BUF_SIZE 512
+
+typedef struct {
+
+ struct tty_struct *tty;
+
+ unsigned char buf[HWC_TTY_BUF_SIZE];
+
+ unsigned short int buf_count;
+
+ spinlock_t lock;
+} hwc_tty_data_struct;
+
+static hwc_tty_data_struct hwc_tty_data;
+static struct tty_driver hwc_tty_driver;
+static struct tty_struct *hwc_tty_table[1];
+static struct termios *hwc_tty_termios[1];
+static struct termios *hwc_tty_termios_locked[1];
+static int hwc_tty_refcount = 0;
+
+extern struct termios tty_std_termios;
+
+static int
+hwc_tty_open (struct tty_struct *tty,
+ struct file *filp)
+{
+
+ if (MINOR (tty->device) - tty->driver.minor_start)
+ return -ENODEV;
+
+ tty->driver_data = &hwc_tty_data;
+ hwc_tty_data.buf_count = 0;
+ hwc_tty_data.tty = tty;
+ tty->low_latency = 0;
+
+ return 0;
+}
+
+void
+wake_up_hwc_tty (void)
+{
+ if (hwc_tty_data.tty == NULL)
+ return;
+ if ((hwc_tty_data.tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
+ hwc_tty_data.tty->ldisc.write_wakeup)
+ (hwc_tty_data.tty->ldisc.write_wakeup) (hwc_tty_data.tty);
+ wake_up_interruptible (&hwc_tty_data.tty->write_wait);
+}
+
+static void
+hwc_tty_close (struct tty_struct *tty,
+ struct file *filp)
+{
+ if (MINOR (tty->device) != tty->driver.minor_start) {
+ printk (KERN_WARNING HWC_TTY_PRINT_HEADER
+ "do not close hwc tty because of wrong device number");
+ return;
+ }
+ hwc_tty_data.tty = NULL;
+}
+
+static int
+hwc_tty_write_room (struct tty_struct *tty)
+{
+ int retval;
+
+ retval = hwc_write_room (IN_BUFS_TOTAL);
+ return retval;
+}
+
+static int
+hwc_tty_write (struct tty_struct *tty,
+ int from_user,
+ const unsigned char *buf,
+ int count)
+{
+ int retval;
+
+ if (hwc_tty_data.buf_count > 0) {
+ hwc_write (0, hwc_tty_data.buf, hwc_tty_data.buf_count);
+ hwc_tty_data.buf_count = 0;
+ }
+ retval = hwc_write (from_user, buf, count);
+ return retval;
+}
+
+static void
+hwc_tty_put_char (struct tty_struct *tty,
+ unsigned char ch)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave (&hwc_tty_data.lock, flags);
+ if (hwc_tty_data.buf_count >= HWC_TTY_BUF_SIZE) {
+ hwc_write (0, hwc_tty_data.buf, hwc_tty_data.buf_count);
+ hwc_tty_data.buf_count = 0;
+ }
+ hwc_tty_data.buf[hwc_tty_data.buf_count] = ch;
+ hwc_tty_data.buf_count++;
+ spin_unlock_irqrestore (&hwc_tty_data.lock, flags);
+}
+
+static void
+hwc_tty_flush_chars (struct tty_struct *tty)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave (&hwc_tty_data.lock, flags);
+ hwc_write (0, hwc_tty_data.buf, hwc_tty_data.buf_count);
+ hwc_tty_data.buf_count = 0;
+ spin_unlock_irqrestore (&hwc_tty_data.lock, flags);
+}
+
+static int
+hwc_tty_chars_in_buffer (struct tty_struct *tty)
+{
+ int retval;
+
+ retval = hwc_chars_in_buffer (IN_BUFS_TOTAL);
+ return retval;
+}
+
+static void
+hwc_tty_flush_buffer (struct tty_struct *tty)
+{
+ wake_up_hwc_tty ();
+}
+
+static int
+hwc_tty_ioctl (
+ struct tty_struct *tty,
+ struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ if (tty->flags & (1 << TTY_IO_ERROR))
+ return -EIO;
+
+ return hwc_ioctl (cmd, arg);
+}
+
+void
+store_hwc_input (unsigned char *buf, unsigned int count)
+{
+ struct tty_struct *tty = hwc_tty_data.tty;
+
+ if (tty != NULL) {
+
+ if (count == 2 && (
+ /* hat is 0xb0 in codepage 037 (US etc.) and thus */
+ /* converted to 0x5e in ascii ('^') */
+ strncmp (buf, "^c", 2) == 0 ||
+ /* hat is 0xb0 in several other codepages (German, */
+ /* UK, ...) and thus converted to ascii octal 252 */
+ strncmp (buf, "\0252c", 2) == 0)) {
+ tty->flip.count++;
+ *tty->flip.flag_buf_ptr++ = TTY_NORMAL;
+ *tty->flip.char_buf_ptr++ = INTR_CHAR (tty);
+ } else if (count == 2 && (
+ strncmp (buf, "^d", 2) == 0 ||
+ strncmp (buf, "\0252d", 2) == 0)) {
+ tty->flip.count++;
+ *tty->flip.flag_buf_ptr++ = TTY_NORMAL;
+ *tty->flip.char_buf_ptr++ = EOF_CHAR (tty);
+ } else if (count == 2 && (
+ strncmp (buf, "^z", 2) == 0 ||
+ strncmp (buf, "\0252z", 2) == 0)) {
+ tty->flip.count++;
+ *tty->flip.flag_buf_ptr++ = TTY_NORMAL;
+ *tty->flip.char_buf_ptr++ = SUSP_CHAR (tty);
+ } else {
+
+ memcpy (tty->flip.char_buf_ptr, buf, count);
+ if (count < 2 || (
+ strncmp (buf + count - 2, "^n", 2) ||
+ strncmp (buf + count - 2, "\0252n", 2))) {
+ tty->flip.char_buf_ptr[count] = '\n';
+ count++;
+ } else
+ count -= 2;
+ memset (tty->flip.flag_buf_ptr, TTY_NORMAL, count);
+ tty->flip.char_buf_ptr += count;
+ tty->flip.flag_buf_ptr += count;
+ tty->flip.count += count;
+ }
+ tty_flip_buffer_push (tty);
+ wake_up_hwc_tty ();
+ }
+}
+
+void
+hwc_tty_init (void)
+{
+ memset (&hwc_tty_driver, 0, sizeof (struct tty_driver));
+ memset (&hwc_tty_data, 0, sizeof (hwc_tty_data_struct));
+ hwc_tty_driver.magic = TTY_DRIVER_MAGIC;
+ hwc_tty_driver.driver_name = "tty_hwc";
+ hwc_tty_driver.name = "ttyS";
+ hwc_tty_driver.name_base = 0;
+ hwc_tty_driver.major = TTY_MAJOR;
+ hwc_tty_driver.minor_start = 64;
+ hwc_tty_driver.num = 1;
+ hwc_tty_driver.type = TTY_DRIVER_TYPE_SYSTEM;
+ hwc_tty_driver.subtype = SYSTEM_TYPE_TTY;
+ hwc_tty_driver.init_termios = tty_std_termios;
+ hwc_tty_driver.init_termios.c_iflag = IGNBRK | IGNPAR;
+ hwc_tty_driver.init_termios.c_oflag = ONLCR;
+ hwc_tty_driver.init_termios.c_lflag = ISIG | ECHO;
+ hwc_tty_driver.flags = TTY_DRIVER_REAL_RAW;
+ hwc_tty_driver.refcount = &hwc_tty_refcount;
+
+ hwc_tty_driver.table = hwc_tty_table;
+ hwc_tty_driver.termios = hwc_tty_termios;
+ hwc_tty_driver.termios_locked = hwc_tty_termios_locked;
+
+ hwc_tty_driver.open = hwc_tty_open;
+ hwc_tty_driver.close = NULL /* hwc_tty_close */ ;
+ hwc_tty_driver.write = hwc_tty_write;
+ hwc_tty_driver.put_char = hwc_tty_put_char;
+ hwc_tty_driver.flush_chars = hwc_tty_flush_chars;
+ hwc_tty_driver.write_room = hwc_tty_write_room;
+ hwc_tty_driver.chars_in_buffer = hwc_tty_chars_in_buffer;
+ hwc_tty_driver.flush_buffer = hwc_tty_flush_buffer;
+ hwc_tty_driver.ioctl = hwc_tty_ioctl;
+
+ hwc_tty_driver.throttle = NULL;
+ hwc_tty_driver.unthrottle = NULL;
+ hwc_tty_driver.send_xchar = NULL;
+ hwc_tty_driver.set_termios = NULL;
+ hwc_tty_driver.set_ldisc = NULL;
+ hwc_tty_driver.stop = NULL;
+ hwc_tty_driver.start = NULL;
+ hwc_tty_driver.hangup = NULL;
+ hwc_tty_driver.break_ctl = NULL;
+ hwc_tty_driver.wait_until_sent = NULL;
+ hwc_tty_driver.read_proc = NULL;
+ hwc_tty_driver.write_proc = NULL;
+
+ if (tty_register_driver (&hwc_tty_driver))
+ panic ("Couldn't register hwc_tty driver\n");
+}
diff --git a/drivers/s390/misc/Makefile b/drivers/s390/misc/Makefile
new file mode 100644
index 000000000..3d8c93271
--- /dev/null
+++ b/drivers/s390/misc/Makefile
@@ -0,0 +1,13 @@
+all: s390-misc.o
+
+CFLAFS +=
+O_TARGET := s390-misc.o
+O_OBJS :=
+M_OBJS :=
+
+ifeq ($(CONFIG_CHANDEV),y)
+ O_OBJS += chandev.o
+endif
+
+include $(TOPDIR)/Rules.make
+
diff --git a/drivers/s390/misc/chandev.c b/drivers/s390/misc/chandev.c
new file mode 100644
index 000000000..4345f1211
--- /dev/null
+++ b/drivers/s390/misc/chandev.c
@@ -0,0 +1,759 @@
+/*
+ * drivers/s390/misc/chandev.c
+ * common channel device layer
+ *
+ * S390 version
+ * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ */
+
+#define __KERNEL_SYSCALLS__
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <asm/queue.h>
+#include <asm/uaccess.h>
+#include <linux/slab.h>
+#include <asm/irq.h>
+#include <linux/init.h>
+#include <linux/unistd.h>
+#include <asm/chandev.h>
+#include <linux/proc_fs.h>
+#include <linux/vmalloc.h>
+
+static chandev_model_info *chandev_models_head=NULL;
+static chandev *chandev_head=NULL;
+static chandev_noauto_range *chandev_noauto_head=NULL;
+static chandev_force *chandev_force_head=NULL;
+static chandev_probelist *chandev_probelist_head=NULL;
+static int use_devno_names=FALSE;
+static int chandev_conf_read=FALSE;
+
+static void *chandev_alloc_listmember(list **listhead,size_t size)
+{
+ void *newmember=kmalloc(GFP_KERNEL,size);
+ if(newmember)
+ add_to_list(listhead,newmember);
+ return(newmember);
+}
+
+void chandev_free_listmember(list **listhead,list *member)
+{
+ if(remove_from_list(listhead,member))
+ kfree(member);
+ else
+ printk(KERN_CRIT"chandev_free_listmember detected nonexistant"
+ "listmember listhead=%p member %p\n",listhead,member);
+}
+
+void chandev_free_all(list **listhead)
+{
+ while(*listhead)
+ chandev_free_listmember(listhead,*listhead);
+}
+
+void chandev_add_model(chandev_type chan_type,u16 cu_type,u8 cu_model,u8 max_port_no)
+{
+ chandev_model_info *newmodel;
+
+ if((newmodel=chandev_alloc_listmember(
+ (list **)&chandev_models_head,sizeof(chandev_model_info))))
+ {
+ newmodel->chan_type=chan_type;
+ newmodel->cu_type=cu_type;
+ newmodel->cu_model=cu_model;
+ newmodel->max_port_no=max_port_no;
+ }
+}
+
+
+void chandev_remove(chandev *member)
+{
+ chandev_free_listmember((list **)&chandev_head,(list *)member);
+}
+
+
+void chandev_remove_all(void)
+{
+ chandev_free_all((list **)&chandev_head);
+}
+
+void chandev_remove_model(chandev_model_info *model)
+{
+ chandev *curr_chandev;
+ for(curr_chandev=chandev_head;curr_chandev!=NULL;
+ curr_chandev=curr_chandev->next)
+ if(curr_chandev->model_info==model)
+ chandev_remove(curr_chandev);
+ chandev_free_listmember((list **)&chandev_models_head,(list *)model);
+}
+
+void chandev_remove_all_models(void)
+{
+ while(chandev_models_head)
+ chandev_remove_model(chandev_models_head);
+}
+
+void chandev_del_model(u16 cu_type,u8 cu_model)
+{
+ chandev_model_info *curr_model;
+ for(curr_model=chandev_models_head;curr_model!=NULL;
+ curr_model=curr_model->next)
+ if(curr_model->cu_type==cu_type&&curr_model->cu_model==cu_model)
+ chandev_remove_model(curr_model);
+}
+
+static void chandev_init_default_models(void)
+{
+ /* P390/Planter 3172 emulation assume maximum 16 to be safe. */
+ chandev_add_model(lcs,0x3088,0x1,15);
+
+ /* 3172/2216 Paralell the 2216 allows 16 ports per card the */
+ /* the original 3172 only allows 4 we will assume the max of 16 */
+ chandev_add_model(lcs|ctc,0x3088,0x8,15);
+
+ /* 3172/2216 Escon serial the 2216 allows 16 ports per card the */
+ /* the original 3172 only allows 4 we will assume the max of 16 */
+ chandev_add_model(lcs|escon,0x3088,0x1F,15);
+
+ /* Only 2 ports allowed on OSA2 cards model 0x60 */
+ chandev_add_model(lcs,0x3088,0x60,1);
+
+ /* Osa-D we currently aren't too emotionally involved with this */
+ chandev_add_model(osad,0x3088,0x62,0);
+}
+
+void chandev_add(dev_info_t *newdevinfo,chandev_model_info *newmodelinfo)
+{
+ chandev *new_chandev;
+
+ if((new_chandev=chandev_alloc_listmember(
+ (list **)&chandev_head,sizeof(chandev))))
+ {
+ new_chandev->model_info=newmodelinfo;
+ new_chandev->devno=newdevinfo->devno;
+ new_chandev->irq=newdevinfo->irq;
+ }
+}
+
+
+void chandev_collect_devices(void)
+{
+ int curr_irq,loopcnt=0,err;
+ dev_info_t curr_devinfo;
+ chandev_model_info *curr_model;
+
+
+ for(curr_irq=get_irq_first();curr_irq>=0; curr_irq=get_irq_next(curr_irq))
+ {
+ /* check read chandev
+ * we had to do the cu_model check also because ctc devices
+ * have the same cutype & after asking some people
+ * the model numbers are given out pseudo randomly so
+ * we can't just take a range of them also the dev_type & models are 0
+ */
+ loopcnt++;
+ if(loopcnt>0x10000)
+ {
+ printk(KERN_ERR"chandev_collect_devices detected infinite loop bug in get_irq_next\n");
+ break;
+ }
+ if((err=get_dev_info_by_irq(curr_irq,&curr_devinfo)))
+ {
+ printk("chandev_collect_devices get_dev_info_by_irq reported err=%X on irq %d\n"
+ "should not happen\n",err,curr_irq);
+ continue;
+ }
+ for(curr_model=chandev_models_head;curr_model!=NULL;
+ curr_model=curr_model->next)
+ {
+ if((curr_model->cu_type==curr_devinfo.sid_data.cu_type)&&
+ (curr_model->cu_model==curr_devinfo.sid_data.cu_model)
+ &&((curr_devinfo.status&DEVSTAT_DEVICE_OWNED)==0))
+ chandev_add(&curr_devinfo,curr_model);
+ }
+ }
+}
+
+void chandev_add_force(chandev_type chan_type,s32 devif_num,u16 read_devno,
+u16 write_devno,s16 port_no,u8 do_ip_checksumming,u8 use_hw_stats)
+
+{
+ chandev_force *new_chandev_force;
+
+ if((new_chandev_force=chandev_alloc_listmember(
+ (list **)&chandev_force_head,sizeof(chandev_force))))
+ {
+ new_chandev_force->chan_type=chan_type;
+ new_chandev_force->devif_num=devif_num;
+ new_chandev_force->read_devno=read_devno;
+ new_chandev_force->write_devno=write_devno;
+ new_chandev_force->port_no=port_no;
+ new_chandev_force->do_ip_checksumming=do_ip_checksumming;
+ new_chandev_force->use_hw_stats=use_hw_stats;
+ }
+}
+
+void chandev_del_force(u16 read_devno)
+{
+ chandev_force *curr_force;
+ for(curr_force=chandev_force_head;curr_force!=NULL;
+ curr_force=curr_force->next)
+ {
+ if(curr_force->read_devno==read_devno)
+ chandev_free_listmember((list **)&chandev_force_head,
+ (list *)curr_force);
+ }
+}
+
+void chandev_pack_args(char *str)
+{
+ char *newstr=str;
+ while(*str)
+ {
+ if(isspace(*str))
+ str++;
+ else
+ *newstr++=*str++;
+ }
+ *newstr=0;
+}
+
+typedef enum
+{
+ isnull=0,
+ isstr=1,
+ isnum=2,
+ iscomma=4,
+} chandev_strval;
+
+chandev_strval chandev_strcmp(char *teststr,char **str,long *endlong)
+{
+ char *cur=*str;
+ chandev_strval retval=isnull;
+
+ int len=strlen(teststr);
+ if(strncmp(teststr,*str,len)==0)
+ {
+ *str+=len;
+ retval=isstr;
+ *endlong=simple_strtol(cur,str,0);
+ if(cur!=*str)
+ retval|=isnum;
+ if(**str==',')
+ retval|=iscomma;
+ }
+ return(retval);
+}
+
+static char *argstrs[]=
+{
+ "noauto",
+ "lcs",
+ "ctc",
+ "escon",
+ "del_force",
+ "use_devno_names"
+ "dont_use_devno_names",
+ "add_model"
+ "del_model"
+ "del_all_models"
+};
+
+typedef enum
+{
+ stridx_mult=16,
+ first_stridx=0,
+ noauto_stridx=first_stridx,
+ lcs_stridx,
+ ctc_stridx,
+ escon_stridx,
+ del_force_stridx,
+ use_devno_names_stridx,
+ dont_use_devno_names_stridx,
+ add_model_stridx,
+ del_model_stridx,
+ del_all_models_stridx,
+ last_stridx,
+} chandev_str_enum;
+
+void chandev_add_noauto(u16 lo_devno,u16 hi_devno)
+{
+ chandev_noauto_range *new_range;
+
+ if((new_range=chandev_alloc_listmember(
+ (list **)&chandev_noauto_head,sizeof(chandev_noauto_range))))
+ {
+ new_range->lo_devno=lo_devno;
+ new_range->hi_devno=hi_devno;
+ }
+}
+
+static char chandev_keydescript[]=
+"chan_type key bitfield\nctc=0x1,escon=0x2,lcs=0x4,lcs=0x4,osad=0x8,claw=0x16\n";
+
+static void chandev_print_args(void)
+{
+ printk("valid chandev arguments are"
+ "<> indicate optional parameters | indicate a choice.\n");
+ printk("noauto,<lo_devno>,<hi_devno>\n"
+ "don't probe a range of device numbers for channel devices\n");
+ printk("lcs|ctc|escon<devif_num>,read_devno,write_devno,<port_no>,"
+ "<do_ip_checksumming>,<use_hw_stats>\n");
+ printk("e.g. ctc0,0x7c00,0x7c01,-1,0,0\n");
+ printk(" tells the channel layer to force ctc0 if detected to use\n"
+ " cuu's 7c00 & 7c01 port ( rel adapter no ) is invalid for\n"
+ " ctc's so use -1 don't do checksumming on received ip\n"
+ " packets & as ctc doesn't have hardware stats ignore this\n"
+ " parameter\n\n");
+ printk("del_force read_devno\n"
+ "delete a forced channel device from force list.\n");
+ printk("use_devno_names, tells the channel layer to assign device\n"
+ "names based on the read channel cuu number\n"
+ "e.g. a token ring read channel 0x7c00 would have an interface"
+ "called tr0x7c00 this avoids name collisions on devices.");
+ printk("add_model chan_type cu_model max_port no\n"
+ "tells the channel layer to probe for the device described\n");
+ printk("%s use max_port_no of 0 for devices where this field "
+ "is invalid.\n",chandev_keydescript);
+ printk("del_model cu_type cu_model\n");
+ printk("del_all_models\n");
+}
+
+
+static int chandev_setup(char *str)
+{
+ chandev_strval val=isnull;
+ chandev_str_enum stridx;
+ long endlong;
+ chandev_type chan_type;
+#define CHANDEV_MAX_EXTRA_INTS 5
+ int ints[CHANDEV_MAX_EXTRA_INTS+1];
+ memset(ints,0,sizeof(ints));
+ chandev_pack_args(str);
+ for(stridx=first_stridx;stridx<last_stridx;stridx++)
+ if((val=chandev_strcmp(argstrs[stridx],&str,&endlong)))
+ break;
+ if(val)
+ {
+ if(val&iscomma)
+ get_options(str,CHANDEV_MAX_EXTRA_INTS,ints);
+ else
+ ints[0]=0;
+ val=(((chandev_strval)stridx)*stridx_mult)+(val&~isstr);
+ switch(val)
+ {
+ case noauto_stridx*stridx_mult:
+ case (noauto_stridx*stridx_mult)|iscomma:
+ switch(ints[0])
+ {
+ case 0:
+ chandev_free_all((list **)&chandev_noauto_head);
+ chandev_add_noauto(0,0xffff);
+ break;
+ case 1:
+ ints[2]=ints[1];
+ case 2:
+ chandev_add_noauto(ints[1],ints[2]);
+
+ }
+ break;
+ case (ctc_stridx*stridx_mult)|isnum|iscomma:
+ case (escon_stridx*stridx_mult)|isnum|iscomma:
+ case (lcs_stridx*stridx_mult)|isnum|iscomma:
+ switch(val)
+ {
+ case (ctc_stridx*stridx_mult)|isnum|iscomma:
+ chan_type=ctc;
+ break;
+ case (escon_stridx*stridx_mult)|isnum|iscomma:
+ chan_type=escon;
+ break;
+ case (lcs_stridx*stridx_mult)|isnum|iscomma:
+ chan_type=lcs;
+ break;
+ default:
+ goto BadArgs;
+ }
+ chandev_add_force(chan_type,endlong,ints[1],ints[2],
+ ints[3],ints[4],ints[5]);
+ break;
+ case (del_force_stridx*stridx_mult)|iscomma:
+ if(ints[0]!=1)
+ goto BadArgs;
+ chandev_del_force(ints[1]);
+ break;
+ case (use_devno_names_stridx*stridx_mult):
+ use_devno_names=1;
+ break;
+ case (dont_use_devno_names_stridx*stridx_mult):
+ use_devno_names=0;
+ case (add_model_stridx*stridx_mult)|iscomma:
+ if(ints[0]<3)
+ goto BadArgs;
+ if(ints[0]==3)
+ {
+ ints[0]=4;
+ ints[4]=-1;
+ }
+ chandev_add_model(ints[1],ints[2],ints[3],ints[4]);
+ break;
+ case (del_model_stridx*stridx_mult)|iscomma:
+ if(ints[0]!=2)
+ goto BadArgs;
+ chandev_del_model(ints[1],ints[2]);
+ break;
+ case del_all_models_stridx*stridx_mult:
+ chandev_remove_all_models();
+ break;
+ default:
+ goto BadArgs;
+ }
+ }
+ return(1);
+ BadArgs:
+ chandev_print_args();
+ return(0);
+}
+
+__setup("chandev=",chandev_setup);
+
+int chandev_doprobe(chandev_force *force,chandev *read_chandev,
+chandev *write_chandev)
+{
+ chandev_probelist *probe;
+ chandev_model_info *model_info;
+ chandev_probeinfo probeinfo;
+ int retval=-1,hint=-1;
+
+ model_info=read_chandev->model_info;
+ if(read_chandev->model_info!=write_chandev->model_info||
+ (force&&((force->chan_type&model_info->chan_type)==0)))
+ return(-1); /* inconsistent */
+ for(probe=chandev_probelist_head;
+ probe!=NULL;
+ probe=probe->next)
+ {
+ if(probe->chan_type&model_info->chan_type)
+ {
+ if(use_devno_names)
+ probeinfo.devif_num=read_chandev->devno;
+ else
+ probeinfo.devif_num=-1;
+ probeinfo.read_irq=read_chandev->irq;
+ probeinfo.write_irq=write_chandev->irq;
+
+ probeinfo.max_port_no=model_info->max_port_no;
+ if(force)
+ {
+ probeinfo.forced_port_no=force->port_no;
+ if(force->devif_num!=-1)
+ probeinfo.devif_num=force->devif_num;
+ probeinfo.do_ip_checksumming=force->do_ip_checksumming;
+ probeinfo.use_hw_stats=force->use_hw_stats;
+
+ }
+ else
+ {
+ probeinfo.forced_port_no=-1;
+ probeinfo.do_ip_checksumming=FALSE;
+ probeinfo.use_hw_stats=FALSE;
+ if(probe->chan_type&lcs)
+ {
+ hint=(read_chandev->devno&0xFF)>>1;
+ if(hint>model_info->max_port_no)
+ {
+ /* The card is possibly emulated e.g P/390 */
+ /* or possibly configured to use a shared */
+ /* port configured by osa-sf. */
+ hint=0;
+ }
+ }
+ }
+ probeinfo.hint_port_no=hint;
+ retval=probe->probefunc(&probeinfo);
+ if(retval==0)
+ break;
+ }
+ }
+ return(retval);
+}
+
+void chandev_probe(void)
+{
+ chandev *read_chandev,*write_chandev,*curr_chandev;
+ chandev_force *curr_force;
+ chandev_noauto_range *curr_noauto;
+
+ chandev_collect_devices();
+ for(curr_force=chandev_force_head;curr_force!=NULL;
+ curr_force=curr_force->next)
+ {
+ for(read_chandev=chandev_head;
+ read_chandev!=NULL;
+ read_chandev=read_chandev->next)
+ if(read_chandev->devno==curr_force->read_devno)
+ {
+ for(write_chandev=chandev_head;
+ write_chandev!=NULL;
+ write_chandev=write_chandev->next)
+ if(write_chandev->devno==
+ curr_force->write_devno)
+ {
+ if(chandev_doprobe(curr_force,
+ read_chandev,
+ write_chandev)==0)
+ {
+ chandev_remove(read_chandev);
+ chandev_remove(write_chandev);
+ goto chandev_probe_skip;
+ }
+ }
+ }
+ chandev_probe_skip:
+ }
+ for(curr_chandev=chandev_head;
+ curr_chandev!=NULL;
+ curr_chandev=curr_chandev->next)
+ {
+ for(curr_noauto=chandev_noauto_head;curr_noauto!=NULL;
+ curr_noauto=curr_noauto->next)
+ {
+ if(curr_chandev->devno>=curr_noauto->lo_devno&&
+ curr_chandev->devno<=curr_noauto->hi_devno)
+ {
+ chandev_remove(curr_chandev);
+ break;
+ }
+ }
+ }
+ for(curr_chandev=chandev_head;curr_chandev!=NULL;
+ curr_chandev=curr_chandev->next)
+ {
+ if(curr_chandev->next&&curr_chandev->model_info==
+ curr_chandev->next->model_info)
+ {
+
+ chandev_doprobe(NULL,curr_chandev,curr_chandev->next);
+ curr_chandev=curr_chandev->next;
+ }
+ }
+ chandev_remove_all();
+}
+
+int chandev_do_setup(char *buff,int size)
+{
+ int curr,startline=0,comment=FALSE,newline=FALSE,oldnewline=TRUE;
+ int rc=1;
+
+ buff[size]=0;
+ for(curr=0;curr<=size;curr++)
+ {
+ if(buff[curr]=='#')
+ {
+ comment=TRUE;
+ newline=FALSE;
+ }
+ else if(buff[curr]==10||buff[curr]==13||buff[curr]==0)
+ {
+ buff[curr]=0;
+ comment=FALSE;
+ newline=TRUE;
+ }
+ if(comment==FALSE&&curr>startline
+ &&((oldnewline==TRUE&&newline==FALSE)||curr==size))
+ {
+ if((rc=chandev_setup(&buff[startline]))==0)
+ break;
+ startline=curr+1;
+ }
+ oldnewline=newline;
+ }
+ return(rc);
+}
+void chandev_read_conf(void)
+{
+#define CHANDEV_FILE "/etc/chandev.conf"
+ struct stat statbuf;
+ char *buff;
+ int curr,left,len,fd;
+
+ chandev_conf_read=TRUE;
+ set_fs(KERNEL_DS);
+ if(stat(CHANDEV_FILE,&statbuf)==0)
+ {
+ set_fs(USER_DS);
+ buff=vmalloc(statbuf.st_size+1);
+ if(buff)
+ {
+ set_fs(KERNEL_DS);
+ if((fd=open(CHANDEV_FILE,O_RDONLY,0))!=-1)
+ {
+ curr=0;
+ left=statbuf.st_size;
+ while((len=read(fd,&buff[curr],left))>0)
+ {
+ curr+=len;
+ left-=len;
+ }
+ close(fd);
+ }
+ set_fs(USER_DS);
+ chandev_do_setup(buff,statbuf.st_size);
+ vfree(buff);
+ }
+ }
+ set_fs(USER_DS);
+}
+
+void chandev_register_and_probe(chandev_probefunc probefunc,chandev_type chan_type)
+{
+ chandev_probelist *new_probe;
+ if(!chandev_conf_read)
+ chandev_read_conf();
+ if((new_probe=chandev_alloc_listmember((list **)&
+ chandev_probelist_head,sizeof(chandev_probelist))))
+ {
+ new_probe->probefunc=probefunc;
+ new_probe->chan_type=chan_type;
+ chandev_probe();
+ }
+}
+
+void chandev_unregister(chandev_probefunc probefunc)
+{
+ chandev_probelist *curr_probe=NULL;
+
+ for(curr_probe=chandev_probelist_head;curr_probe!=NULL;
+ curr_probe=curr_probe->next)
+ {
+ if(curr_probe->probefunc==probefunc)
+ chandev_free_listmember((list **)&chandev_probelist_head,
+ (list *)curr_probe);
+ }
+}
+
+
+#ifdef CONFIG_PROC_FS
+#define chandev_printf(exitchan,args...) \
+splen=sprintf(spbuff,##args); \
+spoffset+=splen; \
+if(spoffset>offset) { \
+ spbuff+=splen; \
+ currlen+=splen; \
+} \
+if(currlen>=length) \
+ goto exitchan;
+
+
+
+static int chandev_read_proc(char *page, char **start, off_t offset,
+ int length, int *eof, void *data)
+{
+ char *spbuff=*start=page;
+ int currlen=0,splen;
+ off_t spoffset=0;
+ chandev_model_info *curr_model;
+ chandev_noauto_range *curr_noauto;
+ chandev_force *curr_force;
+
+
+ chandev_printf(chan_exit,"Channels enabled for detection\n");
+ chandev_printf(chan_exit,"chan_type cu_type cu_model max_port_no\n");
+ chandev_printf(chan_exit,"=================================================\n");
+ for(curr_model=chandev_models_head;curr_model!=NULL;
+ curr_model=curr_model->next)
+ {
+ chandev_printf(chan_exit,"0x%02x 0x%04x 0x%02x %d\n",
+ curr_model->chan_type,(int)curr_model->cu_type,
+ (int)curr_model->cu_model,(int)curr_model->max_port_no);
+ }
+
+ chandev_printf(chan_exit,"%s",chandev_keydescript);
+ chandev_printf(chan_exit,"No auto devno ranges\n");
+ chandev_printf(chan_exit," From To \n");
+ chandev_printf(chan_exit,"====================\n");
+ for(curr_noauto=chandev_noauto_head;curr_noauto!=NULL;
+ curr_noauto=curr_noauto->next)
+ {
+ chandev_printf(chan_exit,"0x%4x 0x%4x\n",
+ curr_noauto->lo_devno,
+ curr_noauto->hi_devno);
+ }
+ chandev_printf(chan_exit,"\nForced devices\n");
+ chandev_printf(chan_exit,"chan_type defif_num read_devno write_devno port_no ip_cksum hw_stats\n");
+ chandev_printf(chan_exit,"====================================================================\n");
+ for(curr_force=chandev_force_head;curr_force!=NULL;
+ curr_force=curr_force->next)
+ {
+ chandev_printf(chan_exit,"0x%2x %d 0x%4x 0x%4x %4d %1d %1d\n",
+ curr_force->chan_type,curr_force->devif_num,
+ curr_force->read_devno,curr_force->write_devno,
+ curr_force->port_no,curr_force->do_ip_checksumming,
+ curr_force->use_hw_stats);
+ }
+ *eof=TRUE;
+ chan_exit:
+ if(currlen>length) {
+ /* rewind to previous printf so that we are correctly
+ * aligned if we get called to print another page.
+ */
+ currlen-=splen;
+ }
+ return(currlen);
+}
+
+
+static int chandev_write_proc(struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ int rc;
+ char *buff;
+
+ buff=vmalloc(count+1);
+ if(buff)
+ {
+ rc = copy_from_user(buff,buffer,count);
+ if (rc)
+ goto chandev_write_exit;
+ chandev_do_setup(buff,count);
+ rc=count;
+ chandev_write_exit:
+ vfree(buff);
+ return rc;
+ }
+ else
+ return -ENOMEM;
+ return(0);
+}
+
+static void __init chandev_create_proc(void)
+{
+ struct proc_dir_entry *dir_entry=
+ create_proc_entry("chandev",0644,
+ &proc_root);
+ if(dir_entry)
+ {
+ dir_entry->read_proc=&chandev_read_proc;
+ dir_entry->write_proc=&chandev_write_proc;
+ }
+}
+
+
+#endif
+static int __init chandev_init(void)
+{
+ chandev_init_default_models();
+#if CONFIG_PROC_FS
+ chandev_create_proc();
+#endif
+ return(0);
+}
+__initcall(chandev_init);
+
+
+
+
+
+
+
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile
new file mode 100644
index 000000000..2c2a182b6
--- /dev/null
+++ b/drivers/s390/net/Makefile
@@ -0,0 +1,16 @@
+all: s390-net.o
+
+CFLAFS +=
+O_TARGET := s390-net.o
+O_OBJS :=
+M_OBJS :=
+
+ifeq ($(CONFIG_CTC),y)
+ O_OBJS += ctc.o
+endif
+
+ifeq ($(CONFIG_IUCV),y)
+ O_OBJS += iucv.o
+endif
+
+include $(TOPDIR)/Rules.make
diff --git a/drivers/s390/net/ctc.c b/drivers/s390/net/ctc.c
new file mode 100644
index 000000000..4acc6e543
--- /dev/null
+++ b/drivers/s390/net/ctc.c
@@ -0,0 +1,1581 @@
+/*
+ * drivers/s390/net/ctc.c
+ * CTC / ESCON network driver
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Dieter Wellerdiek (wel@de.ibm.com)
+ *
+ * 2.3 Updates Martin Schwidefsky (schwidefsky@de.ibm.com)
+ * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ *
+ *
+ * Description of the Kernel Parameter
+ * Normally the CTC driver selects the channels in order (automatic channel
+ * selection). If your installation needs to use the channels in a different
+ * order or doesn't want to have automatic channel selection on, you can do
+ * this with the "ctc= kernel keyword".
+ *
+ * ctc=0,0xrrrr,0xwwww,ddddd
+ *
+ * Where:
+ *
+ * "rrrr" is the read channel address
+ * "wwww" is the write channel address
+ * "dddd" is the network device (ctc0 to ctc7 for a parallel channel, escon0
+ * to escon7 for ESCON channels).
+ *
+ * To switch the automatic channel selection off use the ctc= keyword with
+ * parameter "noauto". This may be necessary if you 3271 devices or other devices
+ * which use the ctc device type and model, but operate with a different protocol.
+ *
+ * ctc=noauto
+ *
+ * Change History
+ * 0.50 Initial release shipped
+ * 0.51 Bug fixes
+ * - CTC / ESCON network device can now handle up to 64 channels
+ * - 3088-61 info message supperssed - CISCO 7206 - CLAW - ESCON
+ * - 3088-62 info message suppressed - OSA/D
+ * - channel: def ffffffed ... error message suppressed
+ * - CTC / ESCON device was not recoverable after a lost connection with
+ * IFCONFIG dev DOWN and IFCONFIG dev UP
+ * - Possibility to switch the automatic selection off
+ * - Minor bug fixes
+ */
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/malloc.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+
+#include <linux/signal.h>
+#include <linux/string.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/if_arp.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+
+#include <asm/io.h>
+#include <asm/bitops.h>
+
+#include <asm/irq.h>
+
+
+//#define DEBUG
+
+/* Redefine message level, so that all messages occur on 3215 console in DEBUG mode */
+#ifdef DEBUG
+ #undef KERN_INFO
+ #undef KERN_WARNING
+ #undef KERN_DEBUG
+ #define KERN_INFO KERN_EMERG
+ #define KERN_WARNING KERN_EMERG
+ #define KERN_DEBUG KERN_EMERG
+#endif
+//#undef DEBUG
+
+#define CCW_CMD_WRITE 0x01
+#define CCW_CMD_READ 0x02
+#define CCW_CMD_SET_EXTENDED 0xc3
+#define CCW_CMD_PREPARE 0xe3
+
+#define MAX_CHANNEL_DEVICES 64
+#define MAX_ADAPTERS 8
+#define CTC_DEFAULT_MTU_SIZE 1500
+#define READ 0
+#define WRITE 1
+#define CTC 0
+#define ESCON 1
+#define CHANNEL_MEDIA 2
+#define CTC_BLOCKS 8 /* 8 blocks * 2 times * 64k = 1M */
+
+#define TB_TX 0 /* sk buffer handling in process */
+#define TB_STOP 1 /* network device stop in process */
+#define TB_RETRY 2 /* retry in process */
+#define TB_NOBUFFER 3 /* no buffer on free queue */
+
+/* state machine codes used in ctc_irq_handler */
+#define CTC_STOP 0
+#define CTC_START_HALT_IO 1
+#define CTC_START_SET_X_MODE 2
+#define CTC_START_SELECT 4
+#define CTC_START_READ_TEST 32
+#define CTC_START_READ 33
+#define CTC_START_WRITE_TEST 64
+#define CTC_START_WRITE 65
+
+
+typedef enum {
+ channel_type_none, /* Device is not a channel */
+ channel_type_undefined, /* Device is a channel but we don't know anything about it */
+ channel_type_ctca, /* Device is a CTC/A and we can deal with it */
+ channel_type_escon, /* Device is a ESCON channel and we can deal with it */
+ channel_type_unsupported /* Device is a unsupported model */
+} channel_type_t;
+
+
+
+/*
+ * Structures needed in the initial phase
+ *
+ */
+
+static int channel_tab_initialized = 0; /* channel[] structure initialized */
+
+struct devicelist {
+ unsigned int devno;
+ __u8 flag;
+#define CHANNEL_IN_USE 0x08 /* - Show that channel is in use */
+};
+
+static struct {
+ struct devicelist list[MAX_CHANNEL_DEVICES];
+ int count;
+ int left;
+} channel[CHANNEL_MEDIA];
+
+
+
+static int ctc_no_auto = 0;
+
+struct adapterlist{
+ unsigned int devno[2];
+ __u16 protocol;
+};
+
+static struct adapterlist ctc_adapter[CHANNEL_MEDIA][MAX_ADAPTERS]; /* 0 = CTC / 1 = ESCON */
+
+
+/*
+ * Structure used after the initial phase
+ *
+ */
+
+struct buffer {
+ struct buffer *next;
+ int packets;
+ struct block *block;
+};
+
+#if LINUX_VERSION_CODE>=0x020300
+typedef struct net_device net_device;
+#else
+typedef struct device net_device;
+typedef struct wait_queue* wait_queue_head_t;
+#define DECLARE_WAITQUEUE(waitqname,waitqtask) struct wait_queue waitqname = {waitqtask, NULL }
+#define init_waitqueue_head(nothing)
+#endif
+
+
+struct channel {
+ unsigned int devno;
+ int irq;
+ unsigned long IO_active;
+ ccw1_t ccw[3];
+ __u32 state;
+ int buffer_count;
+ struct buffer *free_anchor;
+ struct buffer *proc_anchor;
+ devstat_t *devstat;
+ net_device *dev; /* backward pointer to the network device */
+ wait_queue_head_t wait;
+ struct tq_struct tq;
+ struct timer_list timer;
+ unsigned long flag_a; /* atomic flags */
+#define CTC_BH_ACTIVE 0
+ __u8 last_dstat;
+ __u8 flag;
+#define CTC_WRITE 0x01 /* - Set if this is a write channel */
+#define CTC_TIMER 0x80 /* - Set if timer made the wake_up */
+};
+
+
+struct ctc_priv {
+ struct net_device_stats stats;
+#if LINUX_VERSION_CODE>=0x02032D
+ int tbusy;
+#endif
+ struct channel channel[2];
+ __u16 protocol;
+};
+
+/*
+ * This structure works as shuttle between two systems
+ * - A block can contain one or more packets
+ */
+
+#define PACKET_HEADER_LENGTH 6
+struct packet {
+ __u16 length;
+ __u16 type;
+ __u16 unused;
+ __u8 data;
+};
+
+#define BLOCK_HEADER_LENGTH 2
+struct block {
+ __u16 length;
+ struct packet data;
+};
+
+#if LINUX_VERSION_CODE>=0x02032D
+#define ctc_protect_busy(dev) \
+s390irq_spin_lock(((struct ctc_priv *)dev->priv)->channel[WRITE].irq)
+#define ctc_unprotect_busy(dev) \
+s390irq_spin_unlock(((struct ctc_priv *)dev->priv)->channel[WRITE].irq)
+
+#define ctc_protect_busy_irqsave(dev,flags) \
+s390irq_spin_lock_irqsave(((struct ctc_priv *)dev->priv)->channel[WRITE].irq,flags)
+#define ctc_unprotect_busy_irqrestore(dev,flags) \
+s390irq_spin_unlock_irqrestore(((struct ctc_priv *)dev->priv)->channel[WRITE].irq,flags)
+
+static __inline__ void ctc_set_busy(net_device *dev)
+{
+ ((struct ctc_priv *)dev->priv)->tbusy=1;
+ netif_stop_queue(dev);
+}
+
+static __inline__ void ctc_clear_busy(net_device *dev)
+{
+ ((struct ctc_priv *)dev->priv)->tbusy=0;
+ netif_start_queue(dev);
+}
+
+static __inline__ int ctc_check_busy(net_device *dev)
+{
+ eieio();
+ return(((struct ctc_priv *)dev->priv)->tbusy);
+}
+
+
+static __inline__ void ctc_setbit_busy(int nr,net_device *dev)
+{
+ set_bit(nr,&(((struct ctc_priv *)dev->priv)->tbusy));
+ netif_stop_queue(dev);
+}
+
+static __inline__ void ctc_clearbit_busy(int nr,net_device *dev)
+{
+ clear_bit(nr,&(((struct ctc_priv *)dev->priv)->tbusy));
+ if(((struct ctc_priv *)dev->priv)->tbusy==0)
+ netif_start_queue(dev);
+}
+
+static __inline__ int ctc_test_and_setbit_busy(int nr,net_device *dev)
+{
+ netif_stop_queue(dev);
+ return(test_and_set_bit(nr,&((struct ctc_priv *)dev->priv)->tbusy));
+}
+#else
+
+#define ctc_protect_busy(dev)
+#define ctc_unprotect_busy(dev)
+#define ctc_protect_busy_irqsave(dev,flags)
+#define ctc_unprotect_busy_irqrestore(dev,flags)
+
+static __inline__ void ctc_set_busy(net_device *dev)
+{
+ dev->tbusy=1;
+ eieio();
+}
+
+static __inline__ void ctc_clear_busy(net_device *dev)
+{
+ dev->tbusy=0;
+ eieio();
+}
+
+static __inline__ int ctc_check_busy(net_device *dev)
+{
+ eieio();
+ return(dev->tbusy);
+}
+
+
+static __inline__ void ctc_setbit_busy(int nr,net_device *dev)
+{
+ set_bit(nr,(void *)&dev->tbusy);
+}
+
+static __inline__ void ctc_clearbit_busy(int nr,net_device *dev)
+{
+ clear_bit(nr,(void *)&dev->tbusy);
+}
+
+static __inline__ int ctc_test_and_setbit_busy(int nr,net_device *dev)
+{
+ return(test_and_set_bit(nr,(void *)&dev->tbusy));
+}
+#endif
+
+
+
+
+
+/* Interrupt handler */
+static void ctc_irq_handler(int irq, void *initparm, struct pt_regs *regs);
+static void ctc_irq_bh(struct channel *ctc);
+static void ctc_read_retry (struct channel *ctc);
+static void ctc_write_retry (struct channel *ctc);
+
+
+/* Functions for the DEV methods */
+int ctc_probe(net_device *dev);
+
+
+static int ctc_open(net_device *dev);
+static void ctc_timer (struct channel *ctc);
+static int ctc_release(net_device *dev);
+static int ctc_tx(struct sk_buff *skb, net_device *dev);
+static int ctc_change_mtu(net_device *dev, int new_mtu);
+struct net_device_stats* ctc_stats(net_device *dev);
+
+
+/*
+ * Channel Routines
+ *
+ */
+
+static void channel_init(void);
+static void channel_scan(void);
+static int channel_get(int media, int devno);
+static int channel_get_next(int media);
+static int channel_free(int media, int devno);
+static channel_type_t channel_check_for_type (senseid_t *id);
+static void channel_sort(struct devicelist list[], int n);
+
+
+/*
+ * initialize the channel[].list
+ */
+static void channel_init(void)
+{
+ int m;
+#ifdef DEBUG
+ int c;
+#endif
+
+ if (!test_and_set_bit(0, (void *)& channel_tab_initialized)){
+ channel_scan();
+ for (m = 0; m < CHANNEL_MEDIA; m++) {
+ channel_sort (channel[m].list, MAX_CHANNEL_DEVICES);
+ channel[m].left = channel[m].count;
+ }
+ if (channel[CTC].count == 0 && channel[ESCON].count == 0)
+ printk(KERN_INFO "channel: no Channel devices recognized\n");
+ else
+ printk(KERN_INFO "channel: %d Parallel channel found - %d ESCON channel found\n",
+ channel[CTC].count, channel[ESCON].count);
+#ifdef DEBUG
+ for (m = 0; m < CHANNEL_MEDIA; m++) {
+ for (c = 0; c < MAX_CHANNEL_DEVICES; c++){
+ printk(KERN_DEBUG "channel: Adapter=%x Entry=%x devno=%04x\n",
+ m, c, channel[m].list[c].devno);
+ }
+ }
+#endif
+ }
+}
+
+
+/*
+* scan for all channels and put the device numbers into the channel[].list
+*/
+static void channel_scan(void)
+{
+ int m;
+ int c;
+ int irq;
+ dev_info_t temp;
+
+ for (m = 0; m < CHANNEL_MEDIA; m++) {
+ for (c = 0; c < MAX_CHANNEL_DEVICES; c++){
+ channel[m].list[c].devno = -ENODEV;
+ }
+ }
+
+ for (irq = 0; irq < NR_IRQS; irq++) {
+ /* CTC/A */
+ if (channel[CTC].count < MAX_CHANNEL_DEVICES ) {
+ if (get_dev_info(irq, &temp) == 0 &&
+ channel_check_for_type(&temp.sid_data) == channel_type_ctca) {
+ channel[CTC].list[channel[CTC].count].devno = temp.devno;
+ channel[CTC].count++;
+ }
+ }
+
+ /* ESCON */
+ if (channel[ESCON].count < MAX_CHANNEL_DEVICES ) {
+ if (get_dev_info(irq, &temp) == 0 &&
+ channel_check_for_type(&temp.sid_data) == channel_type_escon) {
+ channel[ESCON].list[channel[ESCON].count].devno = temp.devno;
+ channel[ESCON].count++;
+
+ }
+ }
+ }
+}
+
+
+/*
+ * free specific channel from the channel[].list
+ */
+static int channel_free(int media, int devno)
+{
+ int i;
+
+ for (i = 0; i < channel[media].count; i++) {
+ if ((devno == channel[media].list[i].devno) &&
+ ((channel[media].list[i].flag & CHANNEL_IN_USE) != 0x00)) {
+ channel[media].list[i].flag &= ~CHANNEL_IN_USE;
+ return 0;
+ }
+ }
+ printk(KERN_WARNING "channel: dev %04x is not a channel or in use\n", devno);
+ return -ENODEV;
+}
+
+
+/*
+ * get specific channel from the channel[].list
+ */
+static int channel_get(int media, int devno)
+{
+ int i;
+
+ for (i = 0; i < channel[media].count; i++) {
+ if ((devno == channel[media].list[i].devno) &&
+ ((channel[media].list[i].flag & CHANNEL_IN_USE) == 0x00)) {
+ channel[media].list[i].flag |= CHANNEL_IN_USE;
+ return channel[media].list[i].devno;
+ }
+ }
+ printk(KERN_WARNING "channel: dev %04x is not a channel or in use\n", devno);
+ return -ENODEV;
+
+}
+
+
+/*
+ * get the next free channel from the channel[].list
+ */
+static int channel_get_next(int media)
+{
+ int i;
+
+ for (i = 0; i < channel[media].count; i++) {
+ if ((channel[media].list[i].flag & CHANNEL_IN_USE) == 0x00) {
+#ifdef DEBUG
+ printk(KERN_DEBUG "channel: picked=%04x\n", channel[media].list[i].devno);
+#endif
+ channel[media].list[i].flag |= CHANNEL_IN_USE;
+ return channel[media].list[i].devno;
+ }
+ }
+ return -ENODEV;
+}
+
+
+/*
+ * picks the next free channel from the channel[].list
+ */
+static int channel_left(int media)
+{
+ return channel[media].left;
+}
+
+
+/*
+ * defines all devices which are channels
+ */
+static channel_type_t channel_check_for_type (senseid_t *id)
+ {
+ channel_type_t type;
+
+ switch (id->cu_type) {
+ case 0x3088:
+
+ switch (id->cu_model) {
+ case 0x08:
+ type = channel_type_ctca; /* 3088-08 ==> CTCA */
+ break;
+
+ case 0x1F:
+ type = channel_type_escon; /* 3088-1F ==> ESCON channel */
+ break;
+
+ case 0x01: /* 3088-01 ==> P390 OSA emulation */
+ case 0x60: /* 3088-60 ==> OSA/2 adapter */
+ case 0x61: /* 3088-61 ==> CISCO 7206 CLAW protocol ESCON connected */
+ case 0x62: /* 3088-62 ==> OSA/D device */
+ type = channel_type_unsupported;
+ break;
+
+ default:
+ type = channel_type_undefined;
+ printk(KERN_INFO "channel: Unknown model found 3088-%02x\n",id->cu_model);
+ }
+ break;
+
+ default:
+ type = channel_type_none;
+
+ }
+ return type;
+}
+
+
+/*
+ * sort the channel[].list
+ */
+static void channel_sort(struct devicelist list[], int n)
+{
+ int i;
+ int sorted = 0;
+ struct devicelist tmp;
+
+ while (!sorted) {
+ sorted = 1;
+
+ for (i = 0; i < n-1; i++) {
+ if (list[i].devno > list[i+1].devno) {
+ tmp = list[i];
+ list[i] = list[i+1];
+ list[i+1] = tmp;
+ sorted = 0;
+ }
+ }
+ }
+}
+
+
+/*
+ * General routines
+ *
+ */
+
+static int inline extract_channel_id(char *name)
+{
+ if (name[0] == 'c')
+ return (name[3]-'0');
+ else
+ return (name[5]-'0');
+}
+
+
+static int inline extract_channel_media(char *name)
+{
+ if (name[0] == 'c')
+ return CTC;
+ else
+ return ESCON;
+}
+
+
+static void ctc_tab_init(void)
+{
+ int m;
+ int i;
+ static int t;
+
+ if (t == 0){
+ for (m = 0; m < CHANNEL_MEDIA; m++) {
+ for (i = 0; i < MAX_ADAPTERS; i++) {
+ ctc_adapter[m][i].devno[WRITE] = -ENODEV;
+ ctc_adapter[m][i].devno[READ] = -ENODEV;
+ }
+ }
+ t = 1;
+ }
+}
+
+
+static int ctc_buffer_alloc(struct channel *ctc) {
+
+ struct buffer *p;
+ struct buffer *q;
+
+ p = kmalloc(sizeof(p), GFP_KERNEL);
+ if (p == NULL)
+ return -ENOMEM;
+ else {
+ p->next = NULL;
+ p->packets = 0;
+ p->block = (struct block *) __get_free_pages(GFP_KERNEL+GFP_DMA, 4);
+ if (p->block == NULL) {
+ kfree(p);
+ return -ENOMEM;
+ }
+ }
+
+ if (ctc->free_anchor == NULL)
+ ctc->free_anchor = p;
+ else {
+ q = ctc->free_anchor;
+ while (q->next != NULL)
+ q = q->next;
+ q->next = p;
+ }
+ ctc->buffer_count++;
+ return 0;
+}
+
+
+static int ctc_buffer_free(struct channel *ctc) {
+
+ struct buffer *p;
+
+
+ if (ctc->free_anchor == NULL)
+ return -ENOMEM;
+
+ p = ctc->free_anchor;
+ ctc->free_anchor = p->next;
+ free_pages((__u32)p->block, 4);
+ kfree(p);
+
+ return 0;
+}
+
+
+static int inline ctc_buffer_swap(struct buffer **from, struct buffer **to) {
+
+ struct buffer *p = NULL;
+ struct buffer *q = NULL;
+
+ if (*from == NULL)
+ return -ENOMEM;
+
+ p = *from;
+ *from = p->next;
+ p->next = NULL;
+
+ if (*to == NULL)
+ *to = p;
+ else {
+ q = *to;
+ while (q->next != NULL)
+ q = q->next;
+ q->next = p;
+
+ }
+ return 0;
+}
+
+
+/*
+ * ctc_setup function
+ * this function is called for each ctc= keyword passed into the kernel
+ *
+ * valid parameter are: ctc=n,0xnnnn,0xnnnn,ctcx
+ * where n is the channel protocol always 0
+ * 0xnnnn is the cu number read
+ * 0xnnnn is the cu number write
+ * ctcx can be ctc0 to ctc7 or escon0 to escon7
+ */
+#if LINUX_VERSION_CODE>=0x020300
+static int __init ctc_setup(char *dev_name)
+#else
+__initfunc(void ctc_setup(char *dev_name,int *ints))
+#endif
+{
+ struct adapterlist tmp;
+#if LINUX_VERSION_CODE>=0x020300
+ #define CTC_MAX_PARMS 4
+ int ints[CTC_MAX_PARMS+1];
+ get_options(dev_name,CTC_MAX_PARMS,ints);
+ #define ctc_setup_return return(1)
+#else
+ #define ctc_setup_return return
+#endif
+ ctc_tab_init();
+
+ ctc_no_auto = 1;
+
+ if (!strcmp(dev_name,"noauto")) {
+ printk(KERN_INFO "ctc: automatic channel selection deactivated\n");
+ ctc_setup_return;
+ }
+
+ tmp.devno[WRITE] = -ENODEV;
+ tmp.devno[READ] = -ENODEV;
+
+ switch (ints[0]) {
+
+ case 3: /* write channel passed */
+ tmp.devno[WRITE] = ints[3];
+
+ case 2: /* read channel passed */
+ tmp.devno[READ] = ints[2];
+ if (tmp.devno[WRITE] == -ENODEV)
+ tmp.devno[WRITE] = tmp.devno[READ] + 1;
+
+ case 1: /* protocol type passed */
+ tmp.protocol = ints[1];
+ if (tmp.protocol == 0) {
+ break;
+ } else {
+ printk(KERN_WARNING "%s: wrong Channel protocol type passed\n", dev_name);
+ ctc_setup_return;
+ }
+ break;
+
+ default:
+ printk(KERN_WARNING "ctc: wrong number of parameter passed\n");
+ ctc_setup_return;
+ }
+ ctc_adapter[extract_channel_media(dev_name)][extract_channel_id(dev_name)] = tmp;
+#ifdef DEBUG
+ printk(DEBUG "%s: protocol=%x read=%04x write=%04x\n",
+ dev_name, tmp.protocol, tmp.devno[READ], tmp.devno[WRITE]);
+#endif
+ ctc_setup_return;
+
+}
+#if LINUX_VERSION_CODE>=0x020300
+__setup("ctc=", ctc_setup);
+#endif
+
+/*
+ * ctc_probe
+ * this function is called for each channel network device,
+ * which is defined in the /init/main.c
+ */
+int ctc_probe(net_device *dev)
+{
+ int rc;
+ int c;
+ int i;
+ int m;
+
+ struct ctc_priv *privptr;
+
+ /* Only the first time the ctc_probe gets control */
+ if (channel_tab_initialized == 0) {
+ channel_init();
+
+
+ }
+
+ ctc_tab_init();
+
+ m = extract_channel_media(dev->name);
+ i = extract_channel_id(dev->name);
+
+ if (channel_left(m) <=1)
+ return -ENODEV;
+
+ if (ctc_no_auto == 1 && (ctc_adapter[m][i].devno[READ] == -ENODEV || ctc_adapter[m][i].devno[WRITE] == -ENODEV))
+ return -ENODEV;
+
+ dev->priv = kmalloc(sizeof(struct ctc_priv), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0, sizeof(struct ctc_priv));
+ privptr = (struct ctc_priv *) (dev->priv);
+
+
+ for (c = 0; c < 2; c++) {
+
+ privptr->channel[c].devstat = kmalloc(sizeof(devstat_t), GFP_KERNEL);
+ if (privptr->channel[c].devstat == NULL){
+ if (i == WRITE)
+ kfree(privptr->channel[READ].devstat);
+ return -ENOMEM;
+ }
+ memset(privptr->channel[c].devstat, 0, sizeof(devstat_t));
+
+ if (ctc_no_auto == 0)
+ ctc_adapter[m][i].devno[c] = channel_get_next(m);
+ else
+ ctc_adapter[m][i].devno[c] = channel_get(m, ctc_adapter[m][i].devno[c]);
+
+ if ( ctc_adapter[m][i].devno[c] != -ENODEV){
+ rc = request_irq(get_irq_by_devno(ctc_adapter[m][i].devno[c]),
+ (void *)ctc_irq_handler, SA_INTERRUPT, dev->name,
+ privptr->channel[c].devstat);
+ if (rc) {
+ printk(KERN_WARNING "%s: requested device busy %02x\n", dev->name, rc);
+ return -EBUSY;
+ }
+ } else {
+ if (i == WRITE) {
+ free_irq(get_irq_by_devno(ctc_adapter[m][i].devno[c]), privptr->channel[i].devstat);
+ channel_free(m, ctc_adapter[m][i].devno[READ]);
+ kfree(privptr->channel[READ].devstat);
+ }
+ kfree(privptr->channel[i].devstat);
+ return -ENODEV;
+ }
+ }
+
+ privptr->channel[READ].devno = ctc_adapter[m][i].devno[READ];
+ privptr->channel[READ].irq = get_irq_by_devno(ctc_adapter[m][i].devno[READ]);
+ privptr->channel[WRITE].devno = ctc_adapter[m][i].devno[WRITE];
+ privptr->channel[WRITE].irq = get_irq_by_devno(ctc_adapter[m][i].devno[WRITE]);
+ privptr->protocol = ctc_adapter[m][i].protocol;
+ channel[m].left = channel[m].left - 2;
+
+ printk(KERN_INFO "%s: read dev: %04x irq: %04x - write dev: %04x irq: %04x \n",
+ dev->name, privptr->channel[READ].devno, privptr->channel[READ].irq,
+ privptr->channel[WRITE].devno, privptr->channel[WRITE].irq);
+
+ dev->mtu = CTC_DEFAULT_MTU_SIZE;
+ dev->hard_start_xmit = ctc_tx;
+ dev->open = ctc_open;
+ dev->stop = ctc_release;
+ dev->get_stats = ctc_stats;
+ dev->change_mtu = ctc_change_mtu;
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->type = ARPHRD_SLIP;
+ dev->tx_queue_len = 100;
+ dev_init_buffers(dev);
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+
+ return 0;
+}
+
+
+/*
+ * Interrupt processing
+ *
+ */
+
+static void inline ccw_check_return_code (net_device *dev, int return_code)
+{
+ if (return_code != 0) {
+ switch (return_code) {
+ case -EBUSY:
+ printk(KERN_INFO "%s: Busy !\n", dev->name);
+ break;
+ case -ENODEV:
+ printk(KERN_EMERG "%s: Invalid device called for IO\n", dev->name);
+ break;
+ case -EIO:
+ printk(KERN_EMERG "%s: Status pending... \n", dev->name);
+ break;
+ default:
+ printk(KERN_EMERG "%s: Unknown error in Do_IO %04x\n",
+ dev->name, return_code);
+ }
+ }
+}
+
+
+static void inline ccw_check_unit_check (net_device *dev, char sense)
+{
+#ifdef DEBUG
+ printk(KERN_INFO "%s: Unit Check with sense code: %02x\n",
+ dev->name, sense);
+#endif
+
+ if (sense & 0x40) {
+#ifdef DEBUG
+ if (sense & 0x01)
+ printk(KERN_DEBUG "%s: Interface disconnect or Selective reset occurred (remote side)\n", dev->name);
+ else
+ printk(KERN_DEBUG "%s: System reset occured (remote side)\n", dev->name);
+#endif
+ } else if (sense & 0x20) {
+ if (sense & 0x04)
+ printk(KERN_WARNING "%s: Data-streaming timeout)\n", dev->name);
+ else
+ printk(KERN_WARNING "%s: Data-transfer parity error\n", dev->name);
+ } else if (sense & 0x10) {
+ if (sense & 0x20)
+ printk(KERN_WARNING "%s: Hardware malfunction (remote side)\n", dev->name);
+ else
+ printk(KERN_WARNING "%s: Read-data parity error (remote side)\n", dev->name);
+ }
+
+}
+
+
+static void ctc_irq_handler (int irq, void *initparm, struct pt_regs *regs)
+{
+ int rc = 0;
+ __u32 parm;
+ __u8 flags = 0x00;
+ struct channel *ctc = NULL;
+ struct ctc_priv *privptr = NULL;
+ net_device *dev = NULL;
+
+ ccw1_t ccw_set_x_mode[2] = {{CCW_CMD_SET_EXTENDED, CCW_FLAG_SLI | CCW_FLAG_CC, 0, NULL},
+ {CCW_CMD_NOOP, CCW_FLAG_SLI, 0, NULL}};
+
+ devstat_t *devstat = ((devstat_t *)initparm);
+
+ /* Bypass all 'unsolited interrupts' */
+ if (devstat->intparm == 0) {
+#ifdef DEBUG
+ printk(KERN_DEBUG "ctc: unsolited interrupt for device: %04x received c-%02x d-%02x f-%02x\n",
+ devstat->devno, devstat->cstat, devstat->dstat, devstat->flag);
+#endif
+ /* FIXME - find the related intparm!!! No IO outstanding!!!! */
+ return;
+ }
+
+ ctc = (struct channel *) (devstat->intparm);
+ dev = (net_device *) ctc->dev;
+ privptr = dev->priv;
+
+#ifdef DEBUG
+ printk(KERN_DEBUG "%s: interrupt for device: %04x received c-%02x d-%02x f-%02x state-%02x\n",
+ dev->name, ctc->devno, devstat->cstat, devstat->dstat, devstat->flag, ctc->state);
+#endif
+
+ /* Check for good subchannel return code, otherwise error message */
+ if (devstat->cstat) {
+ printk(KERN_WARNING "%s: subchannel check for device: %04x - %02x\n",
+ dev->name, ctc->devno, devstat->cstat);
+ return;
+ }
+
+
+ /* Check the reason-code of a unit check */
+ if (devstat->dstat & DEV_STAT_UNIT_CHECK)
+ ccw_check_unit_check(dev, devstat->ii.sense.data[0]);
+
+
+ /* State machine to bring the connection up / down and to restart */
+
+ ctc->last_dstat = devstat->dstat;
+
+ switch (ctc->state) {
+
+ case CTC_STOP: /* HALT_IO issued by ctc_release (halt sequence) */
+ if (!devstat->flag & DEVSTAT_FINAL_STATUS)
+ return;
+ wake_up(&ctc->wait); /* wake up ctc_release */
+ return;
+
+
+ case CTC_START_HALT_IO: /* HALT_IO issued by ctc_open (start sequence) */
+ if (!devstat->flag & DEVSTAT_FINAL_STATUS)
+ return;
+
+ ctc->state = CTC_START_SET_X_MODE;
+ parm = (__u32) ctc;
+ rc = do_IO (ctc->irq, &ccw_set_x_mode[0], parm, 0xff, flags);
+ if (rc != 0)
+ ccw_check_return_code(dev, rc);
+ return;
+
+
+ case CTC_START_SET_X_MODE:
+ if (devstat->dstat & DEV_STAT_UNIT_CHECK) {
+ if ((devstat->ii.sense.data[0] & 0x41) != 0x41 ||
+ (devstat->ii.sense.data[0] & 0x40) != 0x40) {
+ wake_up(&ctc->wait); /* wake up ctc_open (READ or WRITE) */
+ return;
+ }
+ }
+ if (!devstat->flag & DEVSTAT_FINAL_STATUS)
+ return;
+ ctc->state = CTC_START_SELECT;
+
+
+ case CTC_START_SELECT:
+ if (!ctc->flag & CTC_WRITE) {
+ ctc->state = CTC_START_READ_TEST;
+ ctc->ccw[1].cda = (char *)virt_to_phys(ctc->free_anchor->block);
+ parm = (__u32) ctc;
+ rc = do_IO (ctc->irq, &ctc->ccw[0], parm, 0xff, flags );
+ if (rc != 0)
+ ccw_check_return_code(dev, rc);
+ wake_up(&ctc->wait); /* wake up ctc_open (READ) */
+
+ } else {
+ ctc->state = CTC_START_WRITE_TEST;
+ /* ADD HERE THE RIGHT PACKET TO ISSUE A ROUND TRIP - PART 1 */
+ ctc->ccw[1].count = 0;
+ ctc->ccw[1].cda = (char *)virt_to_phys(ctc->free_anchor->block);
+ parm = (__u32) ctc;
+ rc = do_IO (ctc->irq, &ctc->ccw[0], parm, 0xff, flags);
+ if (rc != 0)
+ ccw_check_return_code(dev, rc);
+ }
+ return;
+
+
+ case CTC_START_READ_TEST:
+ if (devstat->dstat & DEV_STAT_UNIT_CHECK) {
+ if ((devstat->ii.sense.data[0] & 0x41) == 0x41 ||
+ (devstat->ii.sense.data[0] & 0x40) == 0x40 ||
+ devstat->ii.sense.data[0] == 0 ) {
+ init_timer(&ctc->timer);
+ ctc->timer.function = (void *)ctc_read_retry;
+ ctc->timer.data = (__u32)ctc;
+ ctc->timer.expires = jiffies + 10*HZ;
+ add_timer(&ctc->timer);
+#ifdef DEBUG
+ printk(KERN_DEBUG "%s: read connection restarted\n",dev->name);
+#endif
+ }
+ return;
+ }
+
+ if ((devstat->dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) {
+ if ((devstat->dstat & DEV_STAT_ATTENTION) &&
+ (devstat->dstat & DEV_STAT_BUSY)) {
+ printk(KERN_WARNING "%s: read channel is connected with the remote side read channel\n", dev->name);
+ }
+ wake_up(&privptr->channel[WRITE].wait); /* wake up ctc_open (WRITE) */
+ return;
+ }
+
+ ctc->state = CTC_START_READ;
+ set_bit(0, (void *)&ctc->IO_active);
+
+ /* ADD HERE THE RIGHT PACKET TO ISSUE A ROUND TRIP - PART 2 */
+ /* wake_up(&privptr->channel[WRITE].wait);*/ /* wake up ctc_open (WRITE) */
+
+
+ case CTC_START_READ:
+ if (devstat->dstat & DEV_STAT_UNIT_CHECK) {
+ if ((devstat->ii.sense.data[0] & 0x41) == 0x41 ||
+ (devstat->ii.sense.data[0] & 0x40) == 0x40 ||
+ devstat->ii.sense.data[0] == 0 ) {
+ privptr->stats.rx_errors++;
+ /* Need protection here cos we are in the read irq */
+ /* handler the tbusy is for the write subchannel */
+ ctc_protect_busy(dev);
+ ctc_setbit_busy(TB_RETRY,dev);
+ ctc_unprotect_busy(dev);
+ init_timer(&ctc->timer);
+ ctc->timer.function = (void *)ctc_read_retry;
+ ctc->timer.data = (__u32)ctc;
+ ctc->timer.expires = jiffies + 30*HZ;
+ add_timer(&ctc->timer);
+ printk(KERN_INFO "%s: connection restarted!! problem on remote side\n",dev->name);
+ }
+ return;
+ }
+
+ if(!devstat->flag & DEVSTAT_FINAL_STATUS)
+ return;
+ ctc_protect_busy(dev);
+ ctc_clearbit_busy(TB_RETRY,dev);
+ ctc_unprotect_busy(dev);
+ ctc_buffer_swap(&ctc->free_anchor, &ctc->proc_anchor);
+
+ if (ctc->free_anchor != NULL) {
+ ctc->ccw[1].cda = (char *)virt_to_phys(ctc->free_anchor->block);
+ parm = (__u32) ctc;
+ rc = do_IO (ctc->irq, &ctc->ccw[0], parm, 0xff, flags );
+ if (rc != 0)
+ ccw_check_return_code(dev, rc);
+ } else {
+ clear_bit(0, (void *)&ctc->IO_active);
+#ifdef DEBUG
+ printk(KERN_DEBUG "%s: No HOT READ started in IRQ\n",dev->name);
+#endif
+ }
+
+ if (test_and_set_bit(CTC_BH_ACTIVE, (void *)&ctc->flag_a) == 0) {
+ queue_task(&ctc->tq, &tq_immediate);
+ mark_bh(IMMEDIATE_BH);
+ }
+ return;
+
+
+ case CTC_START_WRITE_TEST:
+ if (devstat->dstat & DEV_STAT_UNIT_CHECK) {
+ if ((devstat->ii.sense.data[0] & 0x41) == 0x41 ||
+ (devstat->ii.sense.data[0] & 0x40) == 0x40 ||
+ devstat->ii.sense.data[0] == 0 ) {
+ init_timer(&ctc->timer);
+ ctc->timer.function = (void *)ctc_write_retry;
+ ctc->timer.data = (__u32)ctc;
+ ctc->timer.expires = jiffies + 10*HZ;
+ add_timer(&ctc->timer);
+#ifdef DEBUG
+ printk(KERN_DEBUG "%s: write connection restarted\n",dev->name);
+#endif
+ }
+ return;
+ }
+
+ ctc->state = CTC_START_WRITE;
+ wake_up(&ctc->wait); /* wake up ctc_open (WRITE) */
+ return;
+
+
+ case CTC_START_WRITE:
+ if (devstat->dstat & DEV_STAT_UNIT_CHECK) {
+ privptr->stats.tx_errors += ctc->proc_anchor->packets;
+#ifdef DEBUG
+ printk(KERN_DEBUG "%s: Unit Check on write channel\n",dev->name);
+#endif
+ } else {
+ if (!devstat->flag & DEVSTAT_FINAL_STATUS)
+ return;
+ privptr->stats.tx_packets += ctc->proc_anchor->packets;
+ }
+
+ ctc->proc_anchor->block->length = 0;
+ ctc_buffer_swap(&ctc->proc_anchor, &ctc->free_anchor);
+ ctc_clearbit_busy(TB_NOBUFFER,dev);
+ if (ctc->proc_anchor != NULL) {
+#ifdef DEBUG
+ printk(KERN_DEBUG "%s: IRQ early swap buffer\n",dev->name);
+#endif
+ ctc->ccw[1].count = ctc->proc_anchor->block->length;
+ ctc->ccw[1].cda = (char *)virt_to_phys(ctc->proc_anchor->block);
+ parm = (__u32) ctc;
+ rc = do_IO (ctc->irq, &ctc->ccw[0], parm, 0xff, flags );
+ if (rc != 0)
+ ccw_check_return_code(dev, rc);
+ dev->trans_start = jiffies;
+ return;
+
+ }
+
+ if (ctc->free_anchor->block->length != 0) {
+ if (ctc_test_and_setbit_busy(TB_TX,dev) == 0) {
+ /* set transmission to busy */
+ ctc_buffer_swap(&ctc->free_anchor, &ctc->proc_anchor);
+ ctc_clearbit_busy(TB_TX,dev);
+#ifdef DEBUG
+ printk(KERN_DEBUG "%s: last buffer move in IRQ\n",dev->name);
+#endif
+ ctc->ccw[1].count = ctc->proc_anchor->block->length;
+ ctc->ccw[1].cda = (char *)virt_to_phys(ctc->proc_anchor->block);
+ parm = (__u32) ctc;
+ rc = do_IO (ctc->irq, &ctc->ccw[0], parm, 0xff, flags );
+ if (rc != 0)
+ ccw_check_return_code(dev, rc);
+ dev->trans_start = jiffies;
+ return;
+ }
+ }
+
+ clear_bit(0, (void *)&ctc->IO_active); /* set by ctc_tx or ctc_bh */
+ return;
+
+
+ default:
+ printk(KERN_WARNING "%s: wrong selection code - irq\n",dev->name);
+ return;
+ }
+}
+
+
+static void ctc_irq_bh (struct channel *ctc)
+{
+ int rc = 0;
+ __u16 data_len;
+ __u32 parm;
+
+ __u8 flags = 0x00;
+ __u32 saveflags;
+ net_device *dev;
+ struct ctc_priv *privptr;
+ struct packet *lp;
+ struct sk_buff *skb;
+
+ dev = (net_device *) ctc->dev;
+ privptr = (struct ctc_priv *) dev->priv;
+
+#ifdef DEBUG
+ printk(KERN_DEBUG "%s: bh routine - state-%02x\n" ,dev->name, ctc->state);
+#endif
+
+ while (ctc->proc_anchor != NULL) {
+
+ lp = &ctc->proc_anchor->block->data;
+
+ while ((__u8 *) lp < (__u8 *) &ctc->proc_anchor->block->length + ctc->proc_anchor->block->length) {
+ data_len = lp->length - PACKET_HEADER_LENGTH;
+ skb = dev_alloc_skb(data_len);
+ if (skb) {
+ memcpy(skb_put(skb, data_len),&lp->data, data_len);
+ skb->mac.raw = skb->data;
+ skb->dev = dev;
+ skb->protocol = htons(ETH_P_IP);
+ skb->ip_summed = CHECKSUM_UNNECESSARY; /* no UC happened!!! */
+ netif_rx(skb);
+ privptr->stats.rx_packets++;
+ } else {
+ privptr->stats.rx_dropped++;
+ printk(KERN_WARNING "%s: is low on memory\n",dev->name);
+ }
+ (__u8 *)lp += lp->length;
+ }
+
+ s390irq_spin_lock_irqsave(ctc->irq, saveflags);
+ ctc_buffer_swap(&ctc->proc_anchor, &ctc->free_anchor);
+
+ if (test_and_set_bit(0, (void *)&ctc->IO_active) == 0) {
+#ifdef DEBUG
+ printk(KERN_DEBUG "%s: HOT READ started in bh routine\n" ,dev->name);
+#endif
+ ctc->ccw[1].cda = (char *)virt_to_phys(ctc->free_anchor->block);
+ parm = (__u32) ctc;
+ rc = do_IO (ctc->irq, &ctc->ccw[0], parm, 0xff, flags );
+ if (rc != 0)
+ ccw_check_return_code(dev, rc);
+ }
+ s390irq_spin_unlock_irqrestore(ctc->irq, saveflags);
+ }
+ clear_bit(CTC_BH_ACTIVE, (void *)&ctc->flag_a);
+ return;
+}
+
+
+static void ctc_read_retry (struct channel *ctc)
+{
+ int rc = 0;
+ __u32 parm;
+ __u8 flags = 0x00;
+ __u32 saveflags;
+ net_device *dev;
+
+ dev = (net_device *) ctc->dev;
+
+#ifdef DEBUG
+ printk(KERN_DEBUG "%s: read retry - state-%02x\n" ,dev->name, ctc->state);
+#endif
+ s390irq_spin_lock_irqsave(ctc->irq, saveflags);
+ ctc->ccw[1].cda = (char *)virt_to_phys(ctc->free_anchor->block);
+ parm = (__u32) ctc;
+ rc = do_IO (ctc->irq, &ctc->ccw[0], parm, 0xff, flags );
+ s390irq_spin_unlock_irqrestore(ctc->irq, saveflags);
+ if (rc != 0)
+ ccw_check_return_code(dev, rc);
+ return;
+}
+
+
+static void ctc_write_retry (struct channel *ctc)
+{
+ int rc = 0;
+ __u32 parm;
+ __u8 flags = 0x00;
+ __u32 saveflags;
+ net_device *dev;
+
+ dev = (net_device *) ctc->dev;
+
+#ifdef DEBUG
+ printk(KERN_DEBUG "%s: write retry - state-%02x\n" ,dev->name, ctc->state);
+#endif
+ s390irq_spin_lock_irqsave(ctc->irq, saveflags);
+ ctc->ccw[1].count = 0;
+ ctc->ccw[1].cda = (char *)virt_to_phys(ctc->proc_anchor->block);
+ parm = (__u32) ctc;
+ rc = do_IO (ctc->irq, &ctc->ccw[0], parm, 0xff, flags );
+ s390irq_spin_unlock_irqrestore(ctc->irq, saveflags);
+ if (rc != 0)
+ ccw_check_return_code(dev, rc);
+ return;
+}
+
+
+
+/*
+ * ctc_open
+ *
+ */
+static int ctc_open(net_device *dev)
+{
+ int rc;
+ int i;
+ int j;
+ __u8 flags = 0x00;
+ __u32 saveflags;
+ __u32 parm;
+ struct ctc_priv *privptr;
+ DECLARE_WAITQUEUE(wait, current);
+ struct timer_list timer;
+
+
+ ctc_set_busy(dev);
+
+ privptr = (struct ctc_priv *) (dev->priv);
+
+ privptr->channel[READ].flag = 0x00;
+ privptr->channel[WRITE].flag = CTC_WRITE;
+
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < CTC_BLOCKS; j++) {
+ rc = ctc_buffer_alloc(&privptr->channel[i]);
+ if (rc != 0)
+ return -ENOMEM;
+ }
+ init_waitqueue_head(&privptr->channel[i].wait);
+ privptr->channel[i].tq.next = NULL;
+ privptr->channel[i].tq.sync = 0;
+ privptr->channel[i].tq.routine = (void *)(void *)ctc_irq_bh;
+ privptr->channel[i].tq.data = &privptr->channel[i];
+
+ privptr->channel[i].dev = dev;
+
+ privptr->channel[i].flag_a = 0;
+ privptr->channel[i].IO_active = 0;
+
+ privptr->channel[i].ccw[0].cmd_code = CCW_CMD_PREPARE;
+ privptr->channel[i].ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ privptr->channel[i].ccw[0].count = 0;
+ privptr->channel[i].ccw[0].cda = NULL;
+ if (i == READ) {
+ privptr->channel[i].ccw[1].cmd_code = CCW_CMD_READ;
+ privptr->channel[i].ccw[1].flags = CCW_FLAG_SLI;
+ privptr->channel[i].ccw[1].count = 0xffff; /* MAX size */
+ privptr->channel[i].ccw[1].cda = NULL;
+ } else {
+ privptr->channel[i].ccw[1].cmd_code = CCW_CMD_WRITE;
+ privptr->channel[i].ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ privptr->channel[i].ccw[1].count = 0;
+ privptr->channel[i].ccw[1].cda = NULL;
+ }
+ privptr->channel[i].ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE+DE */
+ privptr->channel[i].ccw[2].flags = CCW_FLAG_SLI;
+ privptr->channel[i].ccw[2].count = 0;
+ privptr->channel[i].ccw[2].cda = NULL;
+
+ privptr->channel[i].flag &= ~CTC_TIMER;
+ init_timer(&timer);
+ timer.function = (void *)ctc_timer;
+ timer.data = (__u32)&privptr->channel[i];
+ timer.expires = jiffies + 150*HZ; /* time to connect with the remote side */
+ add_timer(&timer);
+
+ s390irq_spin_lock_irqsave(privptr->channel[i].irq, saveflags);
+ parm = (unsigned long) &privptr->channel[i];
+ privptr->channel[i].state = CTC_START_HALT_IO;
+ rc = halt_IO(privptr->channel[i].irq, parm, flags);
+ add_wait_queue(&privptr->channel[i].wait, &wait);
+ current->state = TASK_INTERRUPTIBLE;
+ s390irq_spin_unlock_irqrestore(privptr->channel[i].irq, saveflags);
+ schedule();
+ remove_wait_queue(&privptr->channel[i].wait, &wait);
+ if(rc != 0)
+ ccw_check_return_code(dev, rc);
+ if((privptr->channel[i].flag & CTC_TIMER) == 0x00)
+ del_timer(&timer);
+ }
+
+ if ((((privptr->channel[READ].last_dstat | privptr->channel[WRITE].last_dstat) &
+ ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) ||
+ (((privptr->channel[READ].flag | privptr->channel[WRITE].flag) & CTC_TIMER) != 0x00)) {
+#ifdef DEBUG
+ printk(KERN_DEBUG "%s: channel problems during open - read: %02x - write: %02x\n",
+ dev->name, privptr->channel[READ].last_dstat, privptr->channel[WRITE].last_dstat);
+#endif
+ printk(KERN_INFO "%s: remote side is currently not ready\n", dev->name);
+
+ for (i = 0; i < 2; i++) {
+ s390irq_spin_lock_irqsave(privptr->channel[i].irq, saveflags);
+ parm = (unsigned long) &privptr->channel[i];
+ privptr->channel[i].state = CTC_STOP;
+ rc = halt_IO(privptr->channel[i].irq, parm, flags);
+ s390irq_spin_unlock_irqrestore(privptr->channel[i].irq, saveflags);
+ if (rc != 0)
+ ccw_check_return_code(dev, rc);
+ for (j = 0; j < CTC_BLOCKS; j++)
+ ctc_buffer_free(&privptr->channel[i]);
+ }
+ return -EIO;
+ }
+
+ printk(KERN_INFO "%s: connected with remote side\n",dev->name);
+ ctc_clear_busy(dev);
+ return 0;
+}
+
+
+static void ctc_timer (struct channel *ctc)
+{
+#ifdef DEBUG
+ net_device *dev;
+
+ dev = (net_device *) ctc->dev;
+ printk(KERN_DEBUG "%s: timer return\n" ,dev->name);
+#endif
+ ctc->flag |= CTC_TIMER;
+ wake_up(&ctc->wait);
+ return;
+}
+
+/*
+ * ctc_release
+ *
+ */
+static int ctc_release(net_device *dev)
+{
+ int rc;
+ int i;
+ int j;
+ __u8 flags = 0x00;
+ __u32 saveflags;
+ __u32 parm;
+ struct ctc_priv *privptr;
+ DECLARE_WAITQUEUE(wait, current);
+
+ privptr = (struct ctc_priv *) dev->priv;
+
+ ctc_protect_busy_irqsave(dev,saveflags);
+ ctc_setbit_busy(TB_STOP,dev);
+ ctc_unprotect_busy_irqrestore(dev,flags);
+ for (i = 0; i < 2; i++) {
+ s390irq_spin_lock_irqsave(privptr->channel[i].irq, saveflags);
+ privptr->channel[i].state = CTC_STOP;
+ parm = (__u32) &privptr->channel[i];
+ rc = halt_IO (privptr->channel[i].irq, parm, flags );
+ add_wait_queue(&privptr->channel[i].wait, &wait);
+ current->state = TASK_INTERRUPTIBLE;
+ s390irq_spin_unlock_irqrestore(privptr->channel[i].irq, saveflags);
+ schedule();
+ remove_wait_queue(&privptr->channel[i].wait, &wait);
+ if (rc != 0) {
+ ccw_check_return_code(dev, rc);
+ }
+
+ for (j = 0; j < CTC_BLOCKS; j++) {
+ ctc_buffer_swap(&privptr->channel[i].proc_anchor, &privptr->channel[i].free_anchor);
+ ctc_buffer_free(&privptr->channel[i]);
+ }
+ }
+
+ if (((privptr->channel[READ].last_dstat | privptr->channel[WRITE].last_dstat) &
+ ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) {
+ printk(KERN_WARNING "%s: channel problems during close - read: %02x - write: %02x\n",
+ dev->name, privptr->channel[READ].last_dstat, privptr->channel[WRITE].last_dstat);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+
+/*
+ * ctc_tx
+ *
+ *
+ */
+static int ctc_tx(struct sk_buff *skb, net_device *dev)
+{
+ int rc=0,rc2;
+ __u32 parm;
+ __u8 flags = 0x00;
+ __u32 saveflags;
+ struct ctc_priv *privptr;
+ struct packet *lp;
+
+
+ privptr = (struct ctc_priv *) (dev->priv);
+
+ if (skb == NULL) {
+ printk(KERN_WARNING "%s: NULL pointer as sk_buffer passed\n", dev->name);
+ privptr->stats.tx_dropped++;
+ return -EIO;
+ }
+
+ s390irq_spin_lock_irqsave(privptr->channel[WRITE].irq, saveflags);
+ if (ctc_check_busy(dev)) {
+ rc=-EBUSY;
+ goto Done;
+ }
+
+ if (ctc_test_and_setbit_busy(TB_TX,dev)) { /* set transmission to busy */
+ rc=-EBUSY;
+ goto Done;
+ }
+
+ if (65535 - privptr->channel[WRITE].free_anchor->block->length - PACKET_HEADER_LENGTH <= skb->len + PACKET_HEADER_LENGTH + 2) {
+#ifdef DEBUG
+ printk(KERN_DEBUG "%s: early swap\n", dev->name);
+#endif
+
+ ctc_buffer_swap(&privptr->channel[WRITE].free_anchor, &privptr->channel[WRITE].proc_anchor);
+ if (privptr->channel[WRITE].free_anchor == NULL){
+ ctc_setbit_busy(TB_NOBUFFER,dev);
+ rc=-EBUSY;
+ goto Done2;
+ }
+ }
+
+ if (privptr->channel[WRITE].free_anchor->block->length == 0) {
+ privptr->channel[WRITE].free_anchor->block->length = BLOCK_HEADER_LENGTH;
+ privptr->channel[WRITE].free_anchor->packets = 0;
+ }
+
+
+ (__u8 *)lp = (__u8 *) &privptr->channel[WRITE].free_anchor->block->length + privptr->channel[WRITE].free_anchor->block->length;
+ privptr->channel[WRITE].free_anchor->block->length += skb->len + PACKET_HEADER_LENGTH;
+ lp->length = skb->len + PACKET_HEADER_LENGTH;
+ lp->type = 0x0800;
+ lp->unused = 0;
+ memcpy(&lp->data, skb->data, skb->len);
+ (__u8 *) lp += lp->length;
+ lp->length = 0;
+ dev_kfree_skb(skb);
+ privptr->channel[WRITE].free_anchor->packets++;
+
+ if (test_and_set_bit(0, (void *)&privptr->channel[WRITE].IO_active) == 0) {
+ ctc_buffer_swap(&privptr->channel[WRITE].free_anchor,&privptr->channel[WRITE].proc_anchor);
+ privptr->channel[WRITE].ccw[1].count = privptr->channel[WRITE].proc_anchor->block->length;
+ privptr->channel[WRITE].ccw[1].cda = (char *)virt_to_phys(privptr->channel[WRITE].proc_anchor->block);
+ parm = (__u32) &privptr->channel[WRITE];
+ rc2 = do_IO (privptr->channel[WRITE].irq, &privptr->channel[WRITE].ccw[0], parm, 0xff, flags );
+ if (rc2 != 0)
+ ccw_check_return_code(dev, rc2);
+ dev->trans_start = jiffies;
+ }
+ if (privptr->channel[WRITE].free_anchor == NULL)
+ ctc_setbit_busy(TB_NOBUFFER,dev);
+Done2:
+ ctc_clearbit_busy(TB_TX,dev);
+Done:
+ s390irq_spin_unlock_irqrestore(privptr->channel[WRITE].irq, saveflags);
+ return(rc);
+}
+
+
+/*
+ * ctc_change_mtu
+ *
+ * S/390 can handle MTU sizes from 576 to 32760 for VM, VSE
+ * 576 to 65527 for OS/390
+ *
+ */
+static int ctc_change_mtu(net_device *dev, int new_mtu)
+{
+ if ((new_mtu < 576) || (new_mtu > 65528))
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+
+/*
+ * ctc_stats
+ *
+ */
+struct net_device_stats *ctc_stats(net_device *dev)
+{
+ struct ctc_priv *privptr;
+
+ privptr = dev->priv;
+ return &privptr->stats;
+}
+
+
+/* Module code goes here */
+
+/*
+ free_irq(privptr->channel[i].irq, privptr->channel[i].devstat);
+ kfree(privptr->channel[i].devstat);
+
+*/
+/* --- This is the END my friend --- */
diff --git a/drivers/s390/net/iucv.c b/drivers/s390/net/iucv.c
new file mode 100644
index 000000000..b8b365753
--- /dev/null
+++ b/drivers/s390/net/iucv.c
@@ -0,0 +1,1178 @@
+/*
+ * drivers/s390/net/iucv.c
+ * Network driver for VM using iucv
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Stefan Hegewald <hegewald@de.ibm.com>
+ * Hartmut Penner <hpenner@de.ibm.com>
+ *
+ * 2.3 Updates Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ * Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *
+
+ */
+
+#ifndef __KERNEL__
+#define __KERNEL__
+#endif
+
+#include <linux/version.h>
+#include <linux/sched.h>
+#include <linux/kernel.h> /* printk() */
+#include <linux/malloc.h> /* kmalloc() */
+#include <linux/errno.h> /* error codes */
+#include <linux/types.h> /* size_t */
+#include <linux/interrupt.h> /* mark_bh */
+#include <linux/netdevice.h> /* struct net_device, and other headers */
+#include <linux/inetdevice.h> /* struct net_device, and other headers */
+#include <linux/if_arp.h>
+#include <linux/rtnetlink.h>
+#include <linux/ip.h> /* struct iphdr */
+#include <linux/tcp.h> /* struct tcphdr */
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <asm/checksum.h>
+#include <asm/io.h>
+#include <asm/string.h>
+
+#include "iucv.h"
+
+
+
+
+#define DEBUG123
+#define MAX_DEVICES 10
+
+extern char _ascebc[];
+
+/*
+ * global structures
+ */
+static char iucv_userid[MAX_DEVICES][8];
+static char iucv_ascii_userid[MAX_DEVICES][8];
+static int iucv_pathid[MAX_DEVICES] = {0};
+static unsigned char iucv_ext_int_buffer[40] __attribute__((aligned (8))) ={0};
+static unsigned char glob_command_buffer[40] __attribute__((aligned (8)));
+
+#if LINUX_VERSION_CODE>=0x20300
+typedef struct net_device net_device;
+#else
+typedef struct device net_device;
+#endif
+net_device iucv_devs[];
+
+
+/* This structure is private to each device. It is used to pass */
+/* packets in and out, so there is place for a packet */
+struct iucv_priv {
+ struct net_device_stats stats;
+ int packetlen;
+ int status;
+ u8 *packetdata;
+ int pathid; /* used device */
+ unsigned char command_buffer[40] __attribute__((aligned (8)));
+ unsigned char ext_int_buffer[40] __attribute__((aligned (8)));
+ u8* receive_buffer;
+ int receive_buffer_len;
+ u8* send_buffer;
+ int send_buffer_len;
+ char * new_send_buf; /* send buffer ptr */
+ unsigned char recv_buf[2048]; /* size is just a guess */
+ unsigned char userid[8];
+};
+
+struct iucv_header {
+ short len;
+};
+
+
+
+static __inline__ int netif_is_busy(net_device *dev)
+{
+#if LINUX_VERSION_CODE<0x02032D
+ return(dev->tbusy);
+#else
+ return(test_bit(__LINK_STATE_XOFF,&dev->flags));
+#endif
+}
+
+
+
+#if LINUX_VERSION_CODE<0x02032D
+#define netif_enter_interrupt(dev) dev->interrupt=1
+#define netif_exit_interrupt(dev) dev->interrupt=0
+#define netif_start(dev) dev->start=1
+#define netif_stop(dev) dev->start=0
+
+static __inline__ void netif_stop_queue(net_device *dev)
+{
+ dev->tbusy=1;
+}
+
+static __inline__ void netif_start_queue(net_device *dev)
+{
+ dev->tbusy=0;
+}
+
+static __inline__ void netif_wake_queue(net_device *dev)
+{
+ dev->tbusy=0;
+ mark_bh(NET_BH);
+}
+
+#else
+#define netif_enter_interrupt(dev)
+#define netif_exit_interrupt(dev)
+#define netif_start(dev)
+#define netif_stop(dev)
+#endif
+
+
+
+/*
+ * Following the iucv primitives
+ */
+
+
+extern inline void b2f0(int code,void* parm)
+{
+ asm volatile ("LR 1,%1\n\tLR 0,%0\n\t.long 0xb2f01000" ::
+ "d" (code) ,"a" (parm) :"0", "1");
+}
+
+int iucv_enable(void *parms)
+{
+ MASK_T *parm = parms;
+ memset(parms,0,sizeof(parm));
+ parm->ipmask = 0xF8;
+ b2f0(SETMASK,parm);
+ memset(parms,0,sizeof(parm));
+ parm->ipmask = 0xF8;
+ b2f0(SETCMASK,parm);
+ return parm->iprcode;
+}
+
+
+int iucv_declare_buffer(void *parms, DCLBFR_T *buffer)
+{
+ DCLBFR_T *parm = parms;
+ memset(parms,0,sizeof(parm));
+ parm->ipflags1= 0x00;
+ parm->ipbfadr1 = virt_to_phys(buffer);
+ b2f0(DECLARE_BUFFER, parm);
+ return parm->iprcode;
+}
+
+
+int iucv_retrieve_buffer(void *parms)
+{
+ DCLBFR_T *parm = parms;
+ memset(parms,0x0,sizeof(parm));
+ parm->iprcode = 0x0;
+ b2f0(RETRIEVE_BUFFER, parm);
+ return parm->iprcode;
+}
+
+
+int iucv_connect(void *parms,
+ const char *userid,
+ const char *host,
+ const char *ipusr,
+ unsigned short * used_pathid)
+{
+ CONNECT_T *parm = parms; /* ipflags was 0x60*/
+ memset(parms,0x0,sizeof(parm));
+ parm->ipflags1 = 0x80;
+ parm->ipmsglim = 0x0a;
+ memcpy(parm->ipvmid,userid,8);
+ if (ipusr)
+ memcpy(parm->ipuser,ipusr,16);
+ memcpy(parm->iptarget,host,8);
+ b2f0(CONNECT, parm);
+ *used_pathid = parm->ippathid;
+ return parm->iprcode;
+}
+
+
+
+int iucv_accept(void *parms,int pathid)
+{
+#ifdef DEBUG
+ int i=0;
+#endif
+ ACCEPT_T *parm = parms;
+ memset(parms,0,sizeof(parm));
+ parm->ippathid = pathid;
+ parm->ipflags1 = 0x80;
+ parm->ipmsglim = 0x0a;
+#ifdef DEBUG
+ printk("iucv: iucv_accept input.\n");
+ for (i=0;i<40; i++)
+ {
+ printk("%02x ",((char *)parms)[i]);
+ }
+ printk("\n");
+#endif
+ b2f0(ACCEPT, parm);
+ return parm->iprcode;
+}
+
+
+
+int iucv_receive(void *parms,void *bufferarray,int len)
+{
+#ifdef DEBUG
+ int i=0;
+#endif
+ RECEIVE_T *parm = parms;
+ memset(parms,0x0,sizeof(parm));
+ /*parm->ipflags1 = 0x42;*/
+ parm->ipflags1 = 0x0;
+ parm->ipmsgid = 0x0;
+ parm->iptrgcls = 0x0;
+ parm->ipbfadr1 = (ULONG) virt_to_phys(bufferarray);
+ parm->ipbfln1f = len;
+ parm->ipbfln2f = 0x0;
+ b2f0(RECEIVE, parm);
+ if (parm->iprcode == 0)
+ len = parm->ipbfln1f;
+// len = len-parm->ipbfln1f;
+#ifdef DEBUG
+ printk("iucv: iucv_receive command input:\n");
+ for (i=0;i<40;i++) /* show iucv buffer before send */
+ {
+ printk("%02x ",((char *)parms)[i]);
+ }
+ printk("\n");
+
+ printk("iucv: iucv_receive data buffer:\n");
+ for (i=0;i<len;i++) /* show data received */
+ {
+ printk("%02x ",((char *)bufferarray)[i]);
+ }
+ printk("\n");
+ printk("received length: %02x ",len);
+
+#endif
+ return parm->iprcode;
+}
+
+
+int iucv_send(void *parms,int pathid,void *bufferarray,int len,
+ void *recv_buf, int recv_len)
+{
+#ifdef DEBUG
+ int i=0;
+#endif
+ SEND_T *parm = parms;
+ memset(parms,0x0,sizeof(parm));
+ /* parm->ipflags1 = 0x48; ??*/
+ parm->ippathid = pathid;
+ parm->ipflags1 = 0x14; /* any options ?? */
+ parm->ipmsgid = 0x0;
+ parm->iptrgcls = 0x0;
+ parm->ipbfadr1 = virt_to_phys(bufferarray);
+ parm->ipbfln1f = len;
+ parm->ipsrccls = 0x0;
+ parm->ipmsgtag = 0x0;
+ parm->ipbfadr2 = virt_to_phys(recv_buf);
+ parm->ipbfln2f = recv_len;
+
+
+#ifdef DEBUG
+ printk("iucv: iucv_send command input:\n");
+ for (i=0;i<40;i++) /* show iucv buffer before send */
+ {
+ printk("%02x ",((char *)parms)[i]);
+ }
+ printk("\n");
+
+ printk("iucv: iucv_send data buffer:\n");
+ for (i=0;i<len;i++) /* show send data before send */
+ {
+ printk("%02x ",((char *)bufferarray)[i]);
+ }
+ printk("\n");
+#endif
+
+ b2f0(SEND, parm);
+
+#ifdef DEBUGXX
+ printk("iucv: iucv_send buffer after send:\n");
+ for (i=0;i<len;i++) /* show send buffer after send */
+ {
+ printk("%1x",((char *)bufferarray)[i]);
+ }
+ printk("\n");
+#endif
+
+ return parm->iprcode;
+}
+
+
+
+int iucv_sever(void *parms)
+{
+ SEVER_T *parm = parms;
+ memset(parms,0x0,sizeof(parm));
+ parm->ippathid = 0x0;
+ parm->ipflags1 = 0x0;
+ parm->iprcode = 0xF;
+ memset(parm->ipuser,0,16);
+ b2f0(SEVER, parm);
+ return parm->iprcode;
+}
+
+
+#ifdef DEBUG
+/*--------------------------*/
+/* Dump buffer formatted */
+/*--------------------------*/
+static void dumpit(char* buf, int len)
+{
+ int i;
+ for (i=0;i<len;i++) {
+ if (!(i%16)&&i!=0)
+ printk("\n");
+ else if (!(i%4)&&i!=0)
+ printk(" ");
+ printk( "%02X",buf[i]);
+ }
+ if (len%16)
+ printk( "\n");
+}
+#endif
+
+
+
+/*--------------------------*/
+/* Get device from pathid */
+/*--------------------------*/
+net_device * get_device_from_pathid(int pathid)
+{
+ int i;
+ for (i=0;i<=MAX_DEVICES;i++)
+ {
+ if (iucv_pathid[i] == pathid)
+ return &iucv_devs[i];
+ }
+ printk("iucv: get_device_from_pathid: no device for pathid %X\n",pathid);
+ return 0;
+}
+
+
+
+/*--------------------------*/
+/* Get device from userid */
+/*--------------------------*/
+net_device * get_device_from_userid(char * userid)
+{
+ int i;
+ net_device * dev;
+ struct iucv_priv *privptr;
+ for (i=0;i<=MAX_DEVICES;i++)
+ {
+ dev = &iucv_devs[i];
+ privptr = (struct iucv_priv *)(dev->priv);
+ if (memcmp(privptr->userid,userid,8)==0)
+ return &iucv_devs[i];
+ }
+ printk("iucv: get_device_from_uid: no device for userid %s\n",userid);
+ return 0;
+}
+
+
+/*--------------------------*/
+/* Open iucv Device Driver */
+/*--------------------------*/
+int iucv_open(net_device *dev)
+{
+ int rc;
+ unsigned short iucv_used_pathid;
+ struct iucv_priv *privptr;
+ char iucv_host[8] ={0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00};
+ char vmident[16] ={0xf0,0x40,0x40,0x40,0x40,0x40,0x40,0x40,
+ 0xf0,0x40,0x40,0x40,0x40,0x40,0x40,0x40};
+
+#ifdef DEBUG
+ printk( "iucv: iucv_open, device: %s\n",dev->name);
+#endif
+
+ privptr = (struct iucv_priv *)(dev->priv);
+ if(privptr->pathid != -1) {
+ netif_start(dev);
+ netif_start_queue(dev);
+ return 0;
+ }
+ if ((rc = iucv_connect(privptr->command_buffer,
+ privptr->userid,
+ iucv_host,
+ vmident,
+ &iucv_used_pathid))!=0) {
+ printk( "iucv: iucv connect failed with rc %X\n",rc);
+ iucv_retrieve_buffer(privptr->command_buffer);
+ return -ENODEV;
+ }
+
+ privptr->pathid = iucv_used_pathid;
+ iucv_pathid[dev-iucv_devs]=privptr->pathid;
+
+#ifdef DEBUG
+ printk( "iucv: iucv_connect ended with rc: %X\n",rc);
+ printk( "iucv[%d] pathid %X \n",(int)(dev-iucv_devs),privptr->pathid);
+#endif
+ netif_start(dev);
+ netif_start_queue(dev);
+ return 0;
+}
+
+
+
+/*-----------------------------------------------------------------------*/
+/* Receive a packet: retrieve, encapsulate and pass over to upper levels */
+/*-----------------------------------------------------------------------*/
+void iucv_rx(net_device *dev, int len, unsigned char *buf)
+{
+
+ struct sk_buff *skb;
+ struct iucv_priv *privptr = (struct iucv_priv *)dev->priv;
+
+#ifdef DEBUG
+ printk( "iucv: iucv_rx len: %X, device %s\n",len,dev->name);
+ printk( "iucv rx: received orig:\n");
+ dumpit(buf,len);
+#endif
+
+ /* strip iucv header now */
+ len = len - 2; /* short header */
+ buf = buf + 2; /* short header */
+
+ skb = dev_alloc_skb(len+2); /* why +2 ? alignment ? */
+ if (!skb) {
+ printk( "iucv rx: low on mem, returning...\n");
+ return;
+ }
+ skb_reserve(skb, 2); /* align IP on 16B boundary*/
+ memcpy(skb_put(skb, len), buf, len);
+#ifdef DEBUG
+ printk( "iucv rx: data before netif_rx()\n");
+ dumpit(buf,len);
+#endif
+
+ /* Write metadata, and then pass to the receive level */
+ skb->mac.raw = skb->data;
+ skb->pkt_type = PACKET_HOST;
+ skb->dev = dev;
+ skb->protocol = htons(ETH_P_IP);
+ skb->ip_summed = CHECKSUM_UNNECESSARY; /* don't check it*/
+ privptr->stats.rx_packets++;
+ netif_rx(skb);
+
+ return;
+} /* end iucv_rx() */
+
+
+
+
+/*----------------------------*/
+/* handle interrupts */
+/*----------------------------*/
+void do_iucv_interrupt(void)
+{
+ int rc;
+ struct in_device *indev;
+ struct in_ifaddr *inaddr;
+ unsigned long len=0;
+ net_device *dev=0;
+ struct iucv_priv *privptr;
+ INTERRUPT_T * extern_int_buffer;
+ unsigned short iucv_data_len=0;
+ unsigned short iucv_next=0;
+ unsigned char * rcvptr;
+
+ /* get own buffer: */
+ extern_int_buffer = (INTERRUPT_T*) iucv_ext_int_buffer;
+
+ netif_enter_interrupt(dev); /* lock ! */
+
+#ifdef DEBUG
+ printk( "iucv: do_iucv_interrupt %x received; pathid: %02X\n",
+ extern_int_buffer->iptype,extern_int_buffer->ippathid);
+ printk( "iucv: extern_int_buffer:\n");
+ dumpit((char *)&extern_int_buffer[0],40);
+#endif
+
+ switch (extern_int_buffer->iptype)
+ {
+ case 0x01: /* connection pending ext interrrupt */
+#ifdef DEBUG
+ printk( "iucv: connection pending IRQ.\n");
+#endif
+
+ rc = iucv_accept(glob_command_buffer,
+ extern_int_buffer->ippathid);
+ if (rc != 0) {
+ printk( "iucv: iucv_accept failed with rc: %X\n",rc);
+ iucv_retrieve_buffer(glob_command_buffer);
+ break;
+ }
+#ifdef DEBUG
+ dumpit(&((char *)extern_int_buffer)[8],8);
+#endif
+ dev = get_device_from_userid(&((char*)extern_int_buffer)[8]);
+ privptr = (struct iucv_priv *)(dev->priv);
+ privptr->pathid = extern_int_buffer->ippathid;
+
+#ifdef DEBUG
+ printk( "iucv: iucv_accept ended with rc: %X\n",rc);
+ printk( "iucv: device %s found.\n",dev->name);
+#endif
+ break;
+
+ case 0x02: /* connection completed ext interrrupt */
+ /* set own global IP address */
+ /* & set global routing addr */
+#ifdef DEBUG
+ printk( "connection completed.\n");
+#endif
+
+ if( extern_int_buffer->ipmsgtag !=0)
+ {
+ /* get ptr's to kernel struct with local & broadcast address */
+ dev = get_device_from_pathid(extern_int_buffer->ippathid);
+ privptr = (struct iucv_priv *)(dev->priv);
+ indev = dev->ip_ptr;
+ inaddr = (struct in_ifaddr*) indev->ifa_list;
+ }
+ break;
+
+
+ case 0x03: /* connection severed ext interrrupt */
+ /* we do not handle this one at this time */
+#ifdef DEBUG
+ printk( "connection severed.\n");
+#endif
+ break;
+
+
+ case 0x04: /* connection quiesced ext interrrupt */
+ /* we do not handle this one at this time */
+#ifdef DEBUG
+ printk( "connection quiesced.\n");
+#endif
+ break;
+
+
+ case 0x05: /* connection resumed ext interrrupt */
+ /* we do not handle this one at this time */
+#ifdef DEBUG
+ printk( "connection resumed.\n");
+#endif
+ break;
+
+
+ case 0x06: /* priority message complete ext interrupt */
+ case 0x07: /* non priority message complete ext interrupt */
+ /* send it to iucv_rx for handling */
+#ifdef DEBUG
+ printk( "message completed.\n");
+#endif
+
+ if (extern_int_buffer->ipaudit ==0) /* ok case */
+ {
+#ifdef DEBUG
+ printk( "iucv: msg complete interrupt successful, rc: %X\n",
+ (unsigned int)extern_int_buffer->ipaudit);
+#endif
+ ;
+ }
+ else
+ {
+ printk( "iucv: msg complete interrupt error, rc: %X\n",
+ (unsigned int)extern_int_buffer->ipaudit);
+ }
+ /* a transmission is over: tell we are no more busy */
+ dev = get_device_from_pathid(extern_int_buffer->ippathid);
+ privptr = (struct iucv_priv *)(dev->priv);
+ privptr->stats.tx_packets++;
+ netif_wake_queue(dev); /* transmission is no longer busy*/
+ break;
+
+
+ case 0x08: /* priority message pending */
+ case 0x09: /* non priority message pending */
+#ifdef DEBUG
+ printk( "message pending.\n");
+#endif
+ dev = get_device_from_pathid(extern_int_buffer->ippathid);
+ privptr = (struct iucv_priv *)(dev->priv);
+ rcvptr = &privptr->receive_buffer[0];
+
+ /* re-set receive buffer */
+ memset(privptr->receive_buffer,0,privptr->receive_buffer_len);
+ len = privptr->receive_buffer_len;
+
+ /* get data now */
+ if (extern_int_buffer->ipflags1 & 0x80)
+ { /* data is in the message */
+#ifdef DEBUG
+ printk( "iucv: iucv_receive data is in header!\n");
+#endif
+ memcpy(privptr->receive_buffer,
+ (char *)extern_int_buffer->iprmmsg1,
+ (unsigned long)(extern_int_buffer->iprmmsg2));
+ }
+ else /* data is in buffer, do a receive */
+ {
+ rc = iucv_receive(privptr->command_buffer,rcvptr,len);
+ if (rc != 0 || len == 0)
+ {
+ printk( "iucv: iucv_receive failed with rc: %X, length: %lX\n",rc,len);
+ iucv_retrieve_buffer(privptr->command_buffer);
+ break;
+ }
+ } /* end else */
+
+ iucv_next = 0;
+ /* get next packet offset */
+ iucv_data_len= *((unsigned short*)rcvptr);
+ do{ /* until receive buffer is empty, i.e. iucv_next == 0 ! */
+
+ /* get data length: */
+ iucv_data_len= iucv_data_len - iucv_next;
+
+#ifdef DEBUG
+ printk( "iucv: iucv_receive: len is %02X, last: %02X\n",
+ iucv_data_len,iucv_next);
+#endif
+ /* transmit upstairs */
+ iucv_rx(dev,(iucv_data_len),rcvptr);
+
+#ifdef DEBUG
+ printk( "iucv: transaction complete now.\n");
+#endif
+ iucv_next = *((unsigned short*)rcvptr);
+ rcvptr = rcvptr + iucv_data_len;
+ /* get next packet offset */
+ iucv_data_len= *((unsigned short*)rcvptr);
+
+ } while (iucv_data_len != 0);
+ netif_start_queue(dev); /* transmission is no longer busy*/
+ break;
+
+ default:
+ printk( "unknown iucv interrupt \n");
+ break;
+
+ } /* end switch */
+ netif_exit_interrupt(dev); /* release lock*/
+
+#ifdef DEBUG
+ printk( "iucv: leaving do_iucv_interrupt.\n");
+#endif
+
+} /* end do_iucv_interrupt() */
+
+
+
+/*-------------------------------------------*/
+/* Transmit a packet (low level interface) */
+/*-------------------------------------------*/
+int iucv_hw_tx(char *send_buf, int len,net_device *dev)
+{
+ /* This function deals with hw details. */
+ /* This interface strips off the ethernet header details. */
+ /* In other words, this function implements the iucv behaviour,*/
+ /* while all other procedures are rather device-independent */
+ struct iucv_priv *privptr;
+ int rc, recv_len=2000;
+
+ privptr = (struct iucv_priv *)(dev->priv);
+
+#ifdef DEBUG
+ printk( "iucv: iucv_hw_tx, device %s\n",dev->name);
+ printk( "iucv: hw_TX_data len: %X\n",len);
+ dumpit(send_buf,len);
+#endif
+
+ /* I am paranoid. Ain't I? */
+ if (len < sizeof(struct iphdr))
+ {
+ printk( "iucv: Hmm... packet too short (%i octets)\n",len);
+ return -EINVAL;
+ }
+
+ /*
+ * build IUCV header (preceeding halfword offset)
+ * works as follows: Each packet is preceded by the
+ * halfword offset to the next one.
+ * The last packet is followed by an offset of zero.
+ * E.g., AL2(12),10-byte packet, AL2(34), 32-byte packet, AL2(0)
+ */
+
+ memcpy(&privptr->send_buffer[2],send_buf,len+2);
+ privptr->send_buffer[len+2] = 0;
+ privptr->send_buffer[len+3] = 0;
+ *((unsigned short*) &privptr->send_buffer[0]) = len + 2;
+
+#ifdef DEBUG
+ printk( "iucv: iucv_hw_tx, device %s\n",dev->name);
+ printk( "iucv: send len: %X\n",len+4);
+ dumpit(privptr->send_buffer,len+4);
+#endif
+ *((unsigned short*) &privptr->send_buffer[0]) = len + 2;
+
+ /* Ok, now the packet is ready for transmission: send it. */
+ if ((rc = iucv_send(privptr->command_buffer,
+ privptr->pathid,
+ &privptr->send_buffer[0],len+4,
+ privptr->recv_buf,recv_len))!=0) {
+ printk( "iucv: send_iucv failed, rc: %X\n",rc);
+ iucv_retrieve_buffer(privptr->command_buffer);
+ }
+#ifdef DEBUG
+ printk( "iucv: send_iucv ended, rc: %X\n",rc);
+#endif
+ return rc;
+} /* end iucv_hw_tx() */
+
+
+
+
+
+
+/*------------------------------------------*/
+/* Transmit a packet (called by the kernel) */
+/*------------------------------------------*/
+int iucv_tx(struct sk_buff *skb, net_device *dev)
+{
+ int retval=0;
+
+ struct iucv_priv *privptr;
+
+ if (dev == NULL)
+ {
+ printk("iucv: NULL dev passed\n");
+ return 0;
+ }
+
+ privptr = (struct iucv_priv *) (dev->priv);
+
+ if (skb == NULL)
+ {
+ printk("iucv: %s: NULL buffer passed\n", dev->name);
+ privptr->stats.tx_errors++;
+ return 0;
+ }
+
+#ifdef DEBUG
+ printk( "iucv: enter iucv_tx, using %s\n",dev->name);
+#endif
+
+ if (netif_is_busy(dev)) /* shouldn't happen */
+ {
+ privptr->stats.tx_errors++;
+ dev_kfree_skb(skb);
+ printk("iucv: %s: transmit access conflict ! leaving iucv_tx.\n", dev->name);
+ }
+
+ netif_stop_queue(dev); /* transmission is busy*/
+ dev->trans_start = jiffies; /* save the timestamp*/
+
+ /* actual deliver of data is device-specific, and not shown here */
+ retval = iucv_hw_tx(skb->data, skb->len, dev);
+
+ dev_kfree_skb(skb); /* release it*/
+
+#ifdef DEBUG
+ printk( "iucv:leaving iucv_tx, device %s\n",dev->name);
+#endif
+
+ return retval; /* zero == done; nonzero == fail*/
+} /* end iucv_tx( struct sk_buff *skb, struct device *dev) */
+
+
+
+
+
+
+/*---------------*/
+/* iucv_release */
+/*---------------*/
+int iucv_release(net_device *dev)
+{
+ int rc =0;
+ struct iucv_priv *privptr;
+ privptr = (struct iucv_priv *) (dev->priv);
+
+ netif_stop(dev);
+ netif_stop_queue(dev); /* can't transmit any more*/
+ rc = iucv_sever(privptr->command_buffer);
+ if (rc!=0)
+ {
+ printk("iucv: %s: iucv_release pending...rc:%02x\n",dev->name,rc);
+ }
+
+#ifdef DEBUG
+ printk("iucv: iucv_sever ended with rc: %X\n",rc);
+#endif
+
+ return rc;
+} /* end iucv_release() */
+
+
+
+
+
+/*-----------------------------------------------*/
+/* Configuration changes (passed on by ifconfig) */
+/*-----------------------------------------------*/
+int iucv_config(net_device *dev, struct ifmap *map)
+{
+ if (dev->flags & IFF_UP) /* can't act on a running interface*/
+ return -EBUSY;
+
+ /* ignore other fields */
+ return 0;
+}
+/* end iucv_config() */
+
+
+
+
+
+/*----------------*/
+/* Ioctl commands */
+/*----------------*/
+int iucv_ioctl(net_device *dev, struct ifreq *rq, int cmd)
+{
+#ifdef DEBUG
+ printk( "iucv: device %s; iucv_ioctl\n",dev->name);
+#endif
+ return 0;
+}
+
+/*---------------------------------*/
+/* Return statistics to the caller */
+/*---------------------------------*/
+struct net_device_stats *iucv_stats(net_device *dev)
+{
+ struct iucv_priv *priv = (struct iucv_priv *)dev->priv;
+#ifdef DEBUG
+ printk( "iucv: device %s; iucv_stats\n",dev->name);
+#endif
+ return &priv->stats;
+}
+
+
+/*
+ * iucv_change_mtu
+ * IUCV can handle MTU sizes from 576 to approx. 32000
+ */
+
+static int iucv_change_mtu(net_device *dev, int new_mtu)
+{
+#ifdef DEBUG
+ printk( "iucv: device %s; iucv_change_mtu\n",dev->name);
+#endif
+ if ((new_mtu < 64) || (new_mtu > 32000))
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+
+
+
+/*--------------------------------------------*/
+/* The init function (sometimes called probe).*/
+/* It is invoked by register_netdev() */
+/*--------------------------------------------*/
+int iucv_init(net_device *dev)
+{
+ int rc;
+ struct iucv_priv *privptr;
+
+#ifdef DEBUG
+ printk( "iucv: iucv_init, device: %s\n",dev->name);
+#endif
+
+ dev->open = iucv_open;
+ dev->stop = iucv_release;
+ dev->set_config = iucv_config;
+ dev->hard_start_xmit = iucv_tx;
+ dev->do_ioctl = iucv_ioctl;
+ dev->get_stats = iucv_stats;
+ dev->change_mtu = iucv_change_mtu;
+
+ /* keep the default flags, just add NOARP */
+
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->type = ARPHRD_SLIP;
+ dev->tx_queue_len = 100;
+ dev->flags = IFF_NOARP|IFF_POINTOPOINT;
+ dev->mtu = 4092;
+
+ dev_init_buffers(dev);
+
+ /* Then, allocate the priv field. This encloses the statistics */
+ /* and a few private fields.*/
+ dev->priv = kmalloc(sizeof(struct iucv_priv), GFP_KERNEL);
+ if (dev->priv == NULL){
+ printk( "iucv: no memory for dev->priv.\n");
+ return -ENOMEM;
+ }
+ memset(dev->priv, 0, sizeof(struct iucv_priv));
+ privptr = (struct iucv_priv *)(dev->priv);
+
+
+ privptr->send_buffer = (u8*) __get_free_pages(GFP_KERNEL+GFP_DMA,8);
+ if (privptr->send_buffer == NULL) {
+ printk(KERN_INFO "%s: could not get pages for send buffer\n",
+ dev->name);
+ return -ENOMEM;
+ }
+ memset(privptr->send_buffer, 0, 8*PAGE_SIZE);
+ privptr->send_buffer_len=8*PAGE_SIZE;
+
+ privptr->receive_buffer = (u8*) __get_free_pages(GFP_KERNEL+GFP_DMA,8);
+ if (privptr->receive_buffer == NULL) {
+ printk(KERN_INFO "%s: could not get pages for receive buffer\n",
+ dev->name);
+ return -ENOMEM;
+ }
+ memset(privptr->receive_buffer, 0, 8*PAGE_SIZE);
+ privptr->receive_buffer_len=8*PAGE_SIZE;
+
+ /* now use the private fields ... */
+ /* init pathid */
+ privptr->pathid = -1;
+
+ /* init private userid from global userid */
+ memcpy(privptr->userid,iucv_userid[dev-iucv_devs],8);
+
+
+ /* we can use only ONE buffer for external interrupt ! */
+ rc=iucv_declare_buffer(privptr->command_buffer,
+ (DCLBFR_T *)iucv_ext_int_buffer);
+ if (rc!=0 && rc!=19) /* ignore existing buffer */
+ {
+ printk( "iucv:iucv_declare failed, rc: %X\n",rc);
+ return -ENODEV;
+ }
+
+ rc = iucv_enable(privptr->command_buffer);
+ if (rc!=0)
+ {
+ printk( "iucv:iucv_enable failed, rc: %x\n",rc);
+ iucv_retrieve_buffer(privptr->command_buffer);
+ return -ENODEV;
+ }
+#ifdef DEBUG
+ printk( "iucv: iucv_init endend OK for device %s.\n",dev->name);
+#endif
+ return 0;
+}
+
+
+/*
+ * setup iucv devices
+ *
+ * string passed: iucv=userid1,...,useridn
+ */
+#if LINUX_VERSION_CODE>=0x020300
+static int __init iucv_setup(char *str)
+#else
+__initfunc(void iucv_setup(char *str,int *ints))
+#endif
+{
+ int result=0, i=0,j=0, k=0, device_present=0;
+ char *s = str;
+ net_device * dev ={0};
+
+#ifdef DEBUG
+ printk( "iucv: start registering device(s)... \n");
+#endif
+
+ /*
+ * scan device userids
+ */
+
+ while(*s != 0x20 && *s != '\0'){
+ if(*s == ','){
+ /* fill userid up to 8 chars */
+ for(k=i;k<8;k++){
+ iucv_userid[j][k] = 0x40;
+ } /* end for */
+ /* new device */
+ j++;
+ s++; /* ignore current char */
+ i=0;
+ if (j>MAX_DEVICES) {
+ printk("iucv: setup devices: max devices %d reached.\n",
+ MAX_DEVICES);
+ break;
+ } /* end if */
+ continue;
+ } /* end if */
+ iucv_ascii_userid[j][i] = (int)*s;
+ iucv_userid[j][i] = _ascebc[(int)*s++];
+ i++;
+ } /* end while */
+
+ /*
+ * fill last userid up to 8 chars
+ */
+ for(k=i;k<8;k++) {
+ iucv_userid[j][k] = 0x40;
+ }
+
+ /*
+ * set device name and register
+ */
+
+ for (k=0;k<=j;k++) {
+ memcpy(iucv_devs[k].name, "iucv0", 4);
+ dev = &iucv_devs[k];
+ dev->name[4] = k + '0';
+
+#ifdef DEBUGX
+ printk("iucv: (ASCII- )Userid:%s\n",&iucv_ascii_userid[k][0]);
+ printk("iucv: (ASCII-)Userid: ");
+ for (i=0;i<8;i++) {
+ printk( "%02X ",(int)iucv_ascii_userid[k][i]);
+ }
+ printk("\n");
+ printk("iucv: (EBCDIC-)Userid: ");
+ for (i=0;i<8;i++) {
+ printk( "%02X ",(int)iucv_userid[k][i]);
+ }
+ printk("\n");
+ printk("iucv: device name :%s\n",iucv_devs[k].name);
+#endif
+
+ if ( (result = register_netdev(iucv_devs + k)) )
+ printk("iucv: error %i registering device \"%s\"\n",
+ result, iucv_devs[k].name);
+ else
+ {
+ device_present++;
+ }
+ } /* end for */
+
+#ifdef DEBUG
+ printk( "iucv: end register devices, %d devices present\n",device_present);
+#endif
+ /* return device_present ? 0 : -ENODEV; */
+#if LINUX_VERSION_CODE>=0x020300
+ return 1;
+#else
+ return;
+#endif
+}
+
+#if LINUX_VERSION_CODE>=0x020300
+__setup("iucv=", iucv_setup);
+#endif
+
+
+/*-------------*/
+/* The devices */
+/*-------------*/
+char iucv_names[MAX_DEVICES*8]; /* MAX_DEVICES eight-byte buffers */
+net_device iucv_devs[MAX_DEVICES] = {
+ {
+ iucv_names, /* name -- set at load time */
+ 0, 0, 0, 0, /* shmem addresses */
+ 0x000, /* ioport */
+ 0, /* irq line */
+ 0, 0, 0, /* various flags: init to 0 */
+ NULL, /* next ptr */
+ iucv_init, /* init function, fill other fields with NULL's */
+ },
+ {
+ iucv_names+8,/* name -- set at load time */
+ 0, 0, 0, 0, /* shmem addresses */
+ 0x000, /* ioport */
+ 0, /* irq line */
+ 0, 0, 0, /* various flags: init to 0 */
+ NULL, /* next ptr */
+ iucv_init, /* init function, fill other fields with NULL's */
+ },
+ {
+ iucv_names+16,/* name -- set at load time */
+ 0, 0, 0, 0, /* shmem addresses */
+ 0x000, /* ioport */
+ 0, /* irq line */
+ 0, 0, 0, /* various flags: init to 0 */
+ NULL, /* next ptr */
+ iucv_init, /* init function, fill other fields with NULL's */
+ },
+ {
+ iucv_names+24,/* name -- set at load time */
+ 0, 0, 0, 0, /* shmem addresses */
+ 0x000, /* ioport */
+ 0, /* irq line */
+ 0, 0, 0, /* various flags: init to 0 */
+ NULL, /* next ptr */
+ iucv_init, /* init function, fill other fields with NULL's */
+ },
+ {
+ iucv_names+32,/* name -- set at load time */
+ 0, 0, 0, 0, /* shmem addresses */
+ 0x000, /* ioport */
+ 0, /* irq line */
+ 0, 0, 0, /* various flags: init to 0 */
+ NULL, /* next ptr */
+ iucv_init, /* init function, fill other fields with NULL's */
+ },
+ {
+ iucv_names+40,/* name -- set at load time */
+ 0, 0, 0, 0, /* shmem addresses */
+ 0x000, /* ioport */
+ 0, /* irq line */
+ 0, 0, 0, /* various flags: init to 0 */
+ NULL, /* next ptr */
+ iucv_init, /* init function, fill other fields with NULL's */
+ },
+ {
+ iucv_names+48,/* name -- set at load time */
+ 0, 0, 0, 0, /* shmem addresses */
+ 0x000, /* ioport */
+ 0, /* irq line */
+ 0, 0, 0, /* various flags: init to 0 */
+ NULL, /* next ptr */
+ iucv_init, /* init function, fill other fields with NULL's */
+ },
+ {
+ iucv_names+56,/* name -- set at load time */
+ 0, 0, 0, 0, /* shmem addresses */
+ 0x000, /* ioport */
+ 0, /* irq line */
+ 0, 0, 0, /* various flags: init to 0 */
+ NULL, /* next ptr */
+ iucv_init, /* init function, fill other fields with NULL's */
+ },
+ {
+ iucv_names+64,/* name -- set at load time */
+ 0, 0, 0, 0, /* shmem addresses */
+ 0x000, /* ioport */
+ 0, /* irq line */
+ 0, 0, 0, /* various flags: init to 0 */
+ NULL, /* next ptr */
+ iucv_init, /* init function, fill other fields with NULL's */
+ },
+ {
+ iucv_names+72,/* name -- set at load time */
+ 0, 0, 0, 0, /* shmem addresses */
+ 0x000, /* ioport */
+ 0, /* irq line */
+ 0, 0, 0, /* various flags: init to 0 */
+ NULL, /* next ptr */
+ iucv_init, /* init function, fill other fields with NULL's */
+ }
+};
+
diff --git a/drivers/s390/net/iucv.h b/drivers/s390/net/iucv.h
new file mode 100644
index 000000000..7905fb49a
--- /dev/null
+++ b/drivers/s390/net/iucv.h
@@ -0,0 +1,146 @@
+/*
+ * drivers/s390/net/iucv.h
+ * Network driver for VM using iucv
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Stefan Hegewald <hegewald@de.ibm.com>
+ * Hartmut Penner <hpenner@de.ibm.com>
+ */
+
+#ifndef _IUCV_H
+#define _IUCV_H
+
+
+#define UCHAR unsigned char
+#define USHORT unsigned short
+#define ULONG unsigned long
+
+#define DEFAULT_BUFFERSIZE 2048
+#define DEFAULT_FN_LENGTH 27
+#define TRANSFERLENGTH 10
+
+
+
+/* function ID's */
+#define RETRIEVE_BUFFER 2
+#define REPLY 3
+#define SEND 4
+#define RECEIVE 5
+#define ACCEPT 10
+#define CONNECT 11
+#define DECLARE_BUFFER 12
+#define SEVER 15
+#define SETMASK 16
+#define SETCMASK 17
+#define PURGE 9999
+
+/* structures */
+typedef struct {
+ USHORT res0;
+ UCHAR ipflags1;
+ UCHAR iprcode;
+ ULONG res1;
+ ULONG res2;
+ ULONG ipbfadr1;
+ ULONG res[6];
+} DCLBFR_T;
+
+typedef struct {
+ USHORT ippathid;
+ UCHAR ipflags1;
+ UCHAR iprcode;
+ USHORT ipmsglim;
+ USHORT res1;
+ UCHAR ipvmid[8];
+ UCHAR ipuser[16];
+ UCHAR iptarget[8];
+} CONNECT_T;
+
+typedef struct {
+ USHORT ippathid;
+ UCHAR ipflags1;
+ UCHAR iprcode;
+ USHORT ipmsglim;
+ USHORT res1;
+ UCHAR res2[8];
+ UCHAR ipuser[16];
+ UCHAR res3[8];
+} ACCEPT_T;
+
+typedef struct {
+ USHORT ippathid;
+ UCHAR ipflags1;
+ UCHAR iprcode;
+ ULONG ipmsgid;
+ ULONG iptrgcls;
+ ULONG ipbfadr1;
+ ULONG ipbfln1f;
+ ULONG ipsrccls;
+ ULONG ipmsgtag;
+ ULONG ipbfadr2;
+ ULONG ipbfln2f;
+ ULONG res;
+} SEND_T;
+
+typedef struct {
+ USHORT ippathid;
+ UCHAR ipflags1;
+ UCHAR iprcode;
+ ULONG ipmsgid;
+ ULONG iptrgcls;
+ ULONG iprmmsg1;
+ ULONG iprmmsg2;
+ ULONG res1[2];
+ ULONG ipbfadr2;
+ ULONG ipbfln2f;
+ ULONG res2;
+} REPLY_T;
+
+typedef struct {
+ USHORT ippathid;
+ UCHAR ipflags1;
+ UCHAR iprcode;
+ ULONG ipmsgid;
+ ULONG iptrgcls;
+ ULONG ipbfadr1;
+ ULONG ipbfln1f;
+ ULONG res1[3];
+ ULONG ipbfln2f;
+ ULONG res2;
+} RECEIVE_T;
+
+typedef struct {
+ USHORT ippathid;
+ UCHAR ipflags1;
+ UCHAR iprcode;
+ ULONG res1[3];
+ UCHAR ipuser[16];
+ ULONG res2[2];
+} SEVER_T;
+
+typedef struct {
+ UCHAR ipmask;
+ UCHAR res1[2];
+ UCHAR iprcode;
+ ULONG res2[9];
+} MASK_T;
+
+typedef struct {
+ USHORT ippathid;
+ UCHAR ipflags1;
+ UCHAR iptype;
+ ULONG ipmsgid;
+ ULONG ipaudit;
+ ULONG iprmmsg1;
+ ULONG iprmmsg2;
+ ULONG ipsrccls;
+ ULONG ipmsgtag;
+ ULONG ipbfadr2;
+ ULONG ipbfln2f;
+ UCHAR ippollfg;
+ UCHAR res2[3];
+} INTERRUPT_T;
+
+
+#endif