summaryrefslogtreecommitdiffstats
path: root/drivers/net/hamradio/pciscc4.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/hamradio/pciscc4.c')
-rw-r--r--drivers/net/hamradio/pciscc4.c2494
1 files changed, 2494 insertions, 0 deletions
diff --git a/drivers/net/hamradio/pciscc4.c b/drivers/net/hamradio/pciscc4.c
new file mode 100644
index 000000000..c167bebb7
--- /dev/null
+++ b/drivers/net/hamradio/pciscc4.c
@@ -0,0 +1,2494 @@
+/*****************************************************************************
+ *
+ * pciscc4.c This is the device driver for the PCISCC-4 card or any other
+ * board based on the Siemens PEB-20534H (DSCC-4) communication
+ * controller. The PCISCC-4 is a four-channel medium-speed (up
+ * to 10 respectively 52 Mbps/channel) synchronous serial
+ * interface controller with HDLC protocol processor and
+ * busmaster-DMA facilities.
+ *
+ * Info: http://www.afthd.tu-darmstadt.de/~dg1kjd/pciscc4
+ *
+ * Authors: (c) 1999 Jens David <dg1kjd@afthd.tu-darmstadt.de>
+ *
+ * Policy: Please contact me before making structural changes.
+ * Before applying changes to the communication with
+ * the DSCC-4 please read:
+ * - Data Sheet 09/98 PEB-20534 Version 2.0
+ * - Delta Sheet Chip Rev. 2.0-2.1
+ * - Addendum/Corrections to Data Sheet 09/98 as of 01/99
+ * - DSCC-4 Errata Sheet (Book?) 03/99 Chip Rev. 2.1
+ * - Sample driver source code as of 07/27/99
+ * All these documents are available from Infineon on
+ * request or from http://www.infineon.de/... .
+ * At least the current version of this beast likes to be
+ * treated _very_ carefully. If you don't do this, it crashes
+ * itself or the system. I have made comments on these common
+ * traps where appropriate. No, there isn't such thing as a
+ * "master reset".
+ *
+ * CVS: $Id: pciscc4.c,v 1.60 2000/02/13 19:18:41 dg1kjd Exp $
+ *
+ * Changelog: Please log any changes here.
+ * | 08/23/99 Initial version Jens
+ * | 08/25/99 Reworked buffer concept to use last-mode Jens
+ * | policy and implemented Siemens' workarounds
+ * | 08/27/99 Reworked transmitter to use internal timers Jens
+ * | for better resolution at txdelay/txtail
+ * | 09/01/99 Ioctl()s implemented Jens
+ * | 09/10/99 Descriptor chain reworked. RX hold condition Jens
+ * | can't occur any more. TX descriptors are not Jens
+ * | re-initialized after transmit any more.
+ * | 09/12/99 TX reworked. TX-Timeout added. Jens
+ * | 09/13/99 TX timeout fixed Jens
+ * | 10/09/99 Cosmetic fixes and comments added Jens
+ * | 10/16/99 Cosmetic stuff and non-module mode fixed Jens
+ * | 10/21/99 TX-skbs are not freed in xmit()-statemachine Jens
+ * | 10/25/99 Default configuration more sensible now Jens
+ * | 02/13/00 Converted to new driver interface Jens
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *----------------------------------------------------------------------------
+ *
+ * | Please note that the GPL allows you to use the driver, NOT the radio. |
+ * | In order to use the radio, you need a license from the communications |
+ * | authority of your country. |
+ *
+ *----------------------------------------------------------------------------
+ *
+ *****************************************************************************
+ *
+ * Concept of Operation
+ * --------------------
+ *
+ * I. SCCs
+ * We use all SCC cores in HDLC mode. Asyncronous and BiSync operation is not
+ * supported and probably never will. We do not make use of the built-in
+ * LAPB/LAPD protocol processor features (Auto Mode). Moreover can't use
+ * automatic address recognition either because it is implemented in a way
+ * which allows only operation with fixed header sizes and address fields on
+ * static positions. Thus we use HDLC address mode 0. As far as the clock modes
+ * are concerned we make use of mode 0a (for DF9IC-like modems, RX and TX clock
+ * from pin header), 0b (G3RUH-"like", external RX clock, internal TX clock
+ * from BRG but unfornately without oversampling), 6b (for TCM-3105-like simple
+ * modems, using on-chip DPLL for RX clock recovery. Internal TX clock from BRG
+ * which can optionaly be provided on TxClk and/or RTS pins. No oversampling.)
+ * and 4 (external RX and TX clock like DF9IC, but with clock gapping
+ * function, see Data Book 09/98 pp. 141 ff). Channel coding is user-selectable
+ * on a per-channel basis. DF9IC-type modems like NRZ (conversion NRZ->NRZI
+ * done internally), TCM-3105-like modems are usually used with NRZI coding.
+ * Moreover manchester, FM0 and FM1 can be selected (untested).
+ * The internal SCC-DMAC interface seem to obey the KISS-concept. The drawback
+ * of this fact is, that the chip fills our data buffers in memory completely
+ * sequential. If at the end of a frame the SCC realizes, that the FCS failed,
+ * it does not "discard" the frame. That is, it requests an interrupt and
+ * uses up a packet buffer as if the frame was valid. The frame is, however,
+ * marked invalid, but of cause the host processor still needs to clean the
+ * mess up, which costs time. Now consider that every six ones in series on a
+ * empty channel will cause an interrupt and work to the handler. The only
+ * way around this is to gate the receive data with the DCD signal. Of cause
+ * the modem's DCD needs to be very fast to accomplish this. The standard-DCD
+ * on DF9IC-type modems currently isn't. As far as modem handshake is concerned
+ * we program the DCD input of each channel as general purpose input and read
+ * its state whenever L2 requests us to do so. TX handshake can be used in two
+ * modes I called "hard" and "soft". Hard mode is reserved for future
+ * (smart) modems which tell the controller when they are ready to transmit
+ * using the CTS (clear to send) signal. In soft mode we use each channel's
+ * internal timer to generate txdelay and txtail. The advantage of this concept
+ * is, that we have a resolution of one bit since the timers are clocked with
+ * the effective TxClk, which also allows us to probe the TX-bitrate in external
+ * clock modes (L2 likes this information). The SCC cores have some other
+ * goodies, as preample transmission, one insertion after 7 consecutive zeros
+ * and stuff like this which we make user selectable.
+ *
+ * II. DMA Controller and IRQs
+ * For maximum performance and least host processor load, the design of the
+ * DMA controller is descriptor orientated. For both, RX and TX channels
+ * descriptor "queues" are set up on device initialization. Each descriptor
+ * contains a link to its subsequent desriptor, a pointer to the buffer
+ * associated with it and the buffer's size. The buffer itself is _not_ part
+ * of the descriptor, but can be located anywhere else in address space.
+ * Thus, in TX case all we have to do when a packet to be sent arrives from
+ * L2, is painting out a descriptor (pointer to the sk_buf's data buffer,
+ * length of the frame and so on) and telling the DMAC to process it. We do
+ * not have to move the data around in memory. When the descriptor is finished
+ * (i.e. packet sent out completely or at least data completely in FIFO), the
+ * DMAC sets a flag (C) in the descriptor and issues an IRQ. We check the flag
+ * and if it is set, we can skb_free up the packet. Both descriptor queues (RX
+ * and TX) are organized circular with a user setable size and allocated
+ * statically at device initialization. As far as the RX queue ("ring") is
+ * concerned we also already allocate the sk_buffs associated with them.
+ * Whenever the DMAC processed a RX descriptor (thus "filled" the buffer
+ * associated with it) we release the buffer to L2 and allocate a new one.
+ * No copying. The structure of the RX descriptor chain never changes either.
+ * It stays the same since inititalization on device initialization and
+ * descriptor memory itself is only freed when the device destructor is called.
+ * The fact that both descriptor queues are kept statically has two advantages:
+ * It is save, because the DMAC can not "escape" due to a race condition and
+ * mess up our memory and it works around a hardware bug in the DSCC-4.
+ * A few words on linux mm:
+ * When a device driver allocates memory using functions like malloc() or
+ * alloc_skb(), the returned address pointers are pointers to virtual memory.
+ * In case of access to this memory, the MMU, as part of the CPU translates
+ * the virtual addresses to physical ones, which are e.g. used to drive the
+ * RAM address bus lines. If a PCI bus master accesses the same memory, it
+ * needs to know the right address vom _its_ point of view, the so-called
+ * "bus" address. On most architectures this is the same as the physical
+ * address. We use the funktion virt_to_bus() and bus_to_virt() to translate
+ * them. The descriptor structures contain both types, just to make the code
+ * faster and more readable. The symbol names for "bus"-pointers end on
+ * "addr", for example rx_desc_t.next --(virt-to-bus)--> rx_desc_t.nextptr.
+ * When we accessed "common" memory (i.e. descriptor or buffer memory) we
+ * issue a flush_cache_all() due to the fact that some architectures (not PC)
+ * don't keep memory caches consistent on DMAs. Where it isn't apropriate gcc
+ * will optimize it away for us.
+ * Another word on IRQ management:
+ * The DMAC is not only responsible for moving around network data from/to
+ * the SCC cores, but also maintains 10 so-called "interrupt queues" (IQs).
+ * These are intended to help speeding up operation and minimize load on the
+ * host CPU. There is the configuration queue (IQCFG) which is responsible
+ * for answers to DMAC configuration commands, the peripheral queue (IQPER),
+ * which cares about interrupt sources on the local bus, SSC (not SCC!) or GPP
+ * if enabled, and one TX and one RX queue per channel (IQRX and IQTX
+ * respectively), which are responsible for RX and TX paths and not only
+ * indicate DMAC exceptions (packet finished etc.) but also SCC exceptions
+ * (FIFO over/underrun, frame length exceeded etc.). Each element in the
+ * queues is a dword. The queues are "naturally" organized circular as well.
+ * Naturally means, that there is no such thing as a "next pointer" as in
+ * the frame descriptors, but that we tell the DMAC the length of each queue
+ * (user configurable in 32 dword-steps) and it does the wrap-around
+ * automagically. Whenever an element is added to a queue an IRQ is issued.
+ * The IRQ handler acks all interrupts by writing back the global status
+ * register (GSTAR) and starts processing ALL queues, independent of who
+ * caused the IRQ.
+ *
+ * III. General Purpose Port (GPP)
+ * The general purpose port is used for status LED connection. We support
+ * only 2 LEDs per port. These can be controlled with an ioctl(). We do not
+ * care about it, this ought to be done by a user-space daemon. The SSC port
+ * is not used. The LBI can be configured with the global settings and
+ * controlled by an own ioctl(). We don't care any more.
+ *
+ * IV. Configuration
+ * We divide configuration into global (i.e. concerning all ports, chipwide)
+ * and local. We have one template for each, chipcfg_default and devcfg_default
+ * which is hardcoded and never changes. On module load it is copied for each
+ * chip and each device respectively (chipctl_t.cfg and devctl_t.cfg). The
+ * silicon is initialized with these values only in chip_open() and
+ * device_open() and the structures themselves can only be changed when the
+ * corresponding interface (or all interfaces for global config) is down.
+ * Changes take effect when the interface is brought up the next time.
+ *
+ * V. Initialization
+ * When module_init is called, the PCI driver already took care of assigning
+ * two pieces of memory space and an IRQ to each board. On module load we do
+ * nothing else than building up our internal structures (devctl_t and
+ * chipctl_t), grabbing the interface names and registering them with the
+ * network subsystem. Chip_open() and device_open() are called only upon uping
+ * a device and perform IRQ grabbing, memory mapping and allocation and
+ * hardware initialization.
+ *
+ * VI. RX Handling
+ * When a frame is received completely, the C flag in the corresponding
+ * descriptor is set by the DSCC-4, an interrupt vector transfered to the
+ * channel's RX IQ, and the IRQ line asserted. The IRQ handler takes control
+ * of the system. The first step it performs is reading the global status
+ * register and writing it back, thus ack'ing the IRQ. Then it is analyzed
+ * bit-by-bit to find out where it originated from. The channel's RX IQ
+ * is examined and the function pciscc_isr_receiver() called for the
+ * corresponding port. This functions processes the rx descriptor queue
+ * starting with the element (devctl_t.dq_rx_next) following the last element
+ * processed the last time the function was called. All descriptors with the
+ * C-flag ("C"omplete) set are processed. And at the end the dq_rx_next pointer
+ * is updated to the next to last element processed. During "processing" at
+ * first two pieces of information are examined: The status field of the
+ * descriptor, mainly containing the length of the received frame and a flag
+ * telling us wether the frame reception was aborted or not (RA), which
+ * was written by the DMAC and the so-called Receive Data Section Status Byte
+ * (RSTA) which was appended to the end of the data buffer by the channel's
+ * SCC core. Both are checked and if they yield a valid frame and we success-
+ * fully allocated a new skb we remove the old one from the descriptor and
+ * hand it off to pciscc_rx_skb() which paints out some of the skb's members
+ * and fires it up to the MAC layer. The descriptor's fields are re-initialized
+ * anyway to prepare it for the next reception.
+ * After all complete descriptors were processed we must tell the DMAC that the
+ * last ready-to-fill descriptor (LRDA, Last Receive Descriptor Address) is the
+ * one pre-previous to the last one processed. In fact experiments show that
+ * normally this is not neccessary since we get an interrupt for _every_
+ * received frame so we can re-prepare the descriptor then. This is just to
+ * prevent the DMAC from "running around" uncontrolled in the circular
+ * structure, eventually losing sync with the devctl_t.dq_rx_next pointer in
+ * case of _very_ high bus/interrupt latency on heavy system load conditions.
+ *
+ * VII. TX Handling
+ * We assume we are in half duplex mode with software txdelay since everything
+ * else is a lot easier. The current TX state is kept track of in the
+ * devctl_t.txstate variable. When L2 hands us a frame, the first thing we
+ * do is check wether there is a free TX descriptor ready in the device's
+ * ring. The element dq_tx_last is a pointer to the last descriptor which
+ * is currently to be sent or already is sent. Thus, the element next to this
+ * element is the one we would like to fill. The variable dq_tx_cleanup
+ * of the device control structure tells us the next element to be "cleaned
+ * up" (free skb etc.) after transmission. If it points not to the element
+ * we like to fill, the element we like to fill is free. If it does, we must
+ * discard the new frame due to the full descriptor queue. Now that we are
+ * sure to have found our descriptor candidate will can paint it out, but
+ * we can't start transmission yet. We check what state the TX is in. If
+ * it is idle, we load the timer with the txdelay value and start it. Of cause
+ * we also need to manually assert the RTS line. If we were already
+ * transmitting, or sending trailing flags we immediately schedule the
+ * descriptor for process by the DMAC by pointing LTDA (Last Transmit
+ * Descriptor Address) to it. In the first case, the txdelay-timer will run
+ * out after the txdelay period is over, causing an IRQ. The interrupt handler
+ * can now schedule the transmission. During transmission we use the timer
+ * as some kind of software watchdog. When transmission finishes, we again
+ * program and start the timer, this time with the txtail value. The txstate
+ * variable is set to TX_TAIL and as soon as the timer runs out we can
+ * deassert the RTS line and reset txstate to TX_IDLE. The frame(s) were
+ * (hopefully) transmitted successfully. Anyway, each transmitted frame
+ * causes a ALLS TX IRQ. The only thing we need to do then, is to call
+ * tx_cleanup() which walks through the tx descriptor ring, cleaning up
+ * all finished entries (freeing up the skbs and things like this).
+ *
+ *****************************************************************************
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_arp.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/ioctl.h>
+#include <linux/delay.h>
+#include <net/ax25.h>
+#include <net/ax25dev.h>
+#include <linux/pciscc4.h>
+
+/* ------------------------------------------------------------------------- */
+/* user serviceable area or module parameter */
+
+static int xtal = 29491200; /* oscillator frequency in HZ */
+static int probebit = 9600; /* number of cycles for tx_bitrate test */
+static int txtimeout= 30; /* TX watchdog - timeout in seconds */
+#define PCISCC_DEBUG /* enable debugging */
+#undef PCISCC_VDEBUG /* enable verbose buffer debugging */
+#define FULLDUP_PTT /* pull PTT on full duplex */
+
+/* ------------------------------------------------------------------------- */
+/* global variables */
+
+static const char PCISCC_VERSION[] = "$Id: pciscc4.c,v 1.60 2000/02/13 19:18:41 dg1kjd Exp $";
+
+static int chipcnt;
+static struct devctl_t *devctl[64];
+static struct chipctl_t *chipctl[16];
+static struct tq_struct txto_task;
+static unsigned char *dummybuf;
+
+/* template for global configuration, copied on driver init */
+static struct chipcfg_t chipcfg_default = {
+ 0, /* LBI mode */
+ 1, /* oscillator power */
+ 16, /* number of RX descriptors and buffers per channel */
+ 128, /* number of TX descriptors per channel */
+ 32, /* interrupt queue length */
+ -1, /* priority channel */
+ 16, /* RX main fifo DMA threshold */
+ 0 /* endian swap for data buffers */
+};
+
+/* template for device configuration, copied on driver init */
+static struct devcfg_t devcfg_default = {
+ CFG_CHCODE_NRZ, /* channel coding */
+ CFG_CM_DF9IC, /* clocking mode */
+ CFG_DUPLEX_HALF, /* duplex mode */
+ 0, /* DPLL */
+ 10, /* BRG "M" */
+ 0, /* BRG "N" (N+1)*2^M; M=10, N=0 => 9600 baud */
+ 0, /* clock-out enable */
+ 0, /* data inversion */
+ CFG_TXDDRIVE_TP, /* TxD driver type */
+ 0, /* carrier detect inversion */
+ 0, /* test loop */
+ 32, /* TX main fifo share */
+ 24, /* TX main fifo refill threshold in dw */
+ 20, /* TX main fifo forward */
+ 24, /* RX channel fifo forward threshold */
+ CFG_TXDEL_SOFT, /* TX-delay mode */
+ 2000, /* software TX-delay in bitclk cycles => default 250 ms @9600 baud */
+ 400, /* TX-tail, see above */
+ 1, /* shared flags */
+ 0, /* CRC mode */
+ 0, /* preamble repeatitions */
+ 0, /* preamble */
+ 0 /* HDLC extensions */
+};
+
+/* ------------------------------------------------------------------------- */
+
+#ifdef PCISCC_DEBUG
+/* dump DMAC's register to syslog */
+static void pciscc_dmac_regdump(struct chipctl_t *cctlp)
+{
+ printk(KERN_INFO "PCISCC: ---------- begin DMAC register dump ----------\n");
+ printk(KERN_INFO "CH BRDA LRDA FRDA BTDA LTDA FTDA CFG\n");
+ printk(KERN_INFO " 0 B0x%08lx B0x%08lx B0x%08lx B0x%08lx B0x%08lx B0x%08lx %08lx\n",
+ (unsigned long) readl(cctlp->io_base+CH0BRDA),
+ (unsigned long) readl(cctlp->io_base+CH0LRDA),
+ (unsigned long) readl(cctlp->io_base+CH0FRDA),
+ (unsigned long) readl(cctlp->io_base+CH0BTDA),
+ (unsigned long) readl(cctlp->io_base+CH0LTDA),
+ (unsigned long) readl(cctlp->io_base+CH0FTDA),
+ (unsigned long) readl(cctlp->io_base+CH0CFG));
+ printk(KERN_INFO " 1 B0x%08lx B0x%08lx B0x%08lx B0x%08lx B0x%08lx B0x%08lx %08lx\n",
+ (unsigned long) readl(cctlp->io_base+CH1BRDA),
+ (unsigned long) readl(cctlp->io_base+CH1LRDA),
+ (unsigned long) readl(cctlp->io_base+CH1FRDA),
+ (unsigned long) readl(cctlp->io_base+CH1BTDA),
+ (unsigned long) readl(cctlp->io_base+CH1LTDA),
+ (unsigned long) readl(cctlp->io_base+CH1FTDA),
+ (unsigned long) readl(cctlp->io_base+CH1CFG));
+ printk(KERN_INFO " 2 B0x%08lx B0x%08lx B0x%08lx B0x%08lx B0x%08lx B0x%08lx %08lx\n",
+ (unsigned long) readl(cctlp->io_base+CH2BRDA),
+ (unsigned long) readl(cctlp->io_base+CH2LRDA),
+ (unsigned long) readl(cctlp->io_base+CH2FRDA),
+ (unsigned long) readl(cctlp->io_base+CH2BTDA),
+ (unsigned long) readl(cctlp->io_base+CH2LTDA),
+ (unsigned long) readl(cctlp->io_base+CH2FTDA),
+ (unsigned long) readl(cctlp->io_base+CH2CFG));
+ printk(KERN_INFO " 3 B0x%08lx B0x%08lx B0x%08lx B0x%08lx B0x%08lx B0x%08lx %08lx\n",
+ (unsigned long) readl(cctlp->io_base+CH3BRDA),
+ (unsigned long) readl(cctlp->io_base+CH3LRDA),
+ (unsigned long) readl(cctlp->io_base+CH3FRDA),
+ (unsigned long) readl(cctlp->io_base+CH3BTDA),
+ (unsigned long) readl(cctlp->io_base+CH3LTDA),
+ (unsigned long) readl(cctlp->io_base+CH3FTDA),
+ (unsigned long) readl(cctlp->io_base+CH3CFG));
+ printk(KERN_INFO "PCISCC: ---------- end DMAC register dump ----------\n");
+ return;
+}
+#endif /* PCISCC_DEBUG */
+
+/* ------------------------------------------------------------------------- */
+
+#ifdef PCISCC_DEBUG
+/* dump descriptor rings to syslog */
+static void pciscc_queuedump(struct devctl_t *dctlp)
+{
+ unsigned int i;
+ struct rx_desc_t *rdp;
+ struct tx_desc_t *tdp;
+
+ if (!dctlp) return;
+ printk(KERN_INFO "PCISCC: ---------- begin queue dump RX iface %s ----------\n", dctlp->name);
+ printk(KERN_INFO "# &desc &next &prev &nextptr &dataptr &skb &feptr flags result\n");
+ for (rdp=dctlp->dq_rx,i=0; ((rdp!=dctlp->dq_rx) || (i==0)) && (i<256); rdp=rdp->next,i++) {
+ if (!rdp) break;
+ printk(KERN_INFO "%3u V0x%08lx V0x%08lx V0x%08lx B0x%08lx B0x%08lx V0x%08lx B0x%08lx 0x%08lx 0x%08lx\n",
+ i,
+ (unsigned long) rdp,
+ (unsigned long) rdp->next,
+ (unsigned long) rdp->prev,
+ (unsigned long) rdp->nextptr,
+ (unsigned long) rdp->dataptr,
+ (unsigned long) rdp->skb,
+ (unsigned long) rdp->feptr,
+ (unsigned long) rdp->flags,
+ (unsigned long) rdp->result);
+ }
+ printk(KERN_INFO "PCISCC: ---------- end queue dump RX iface %s ----------\n", dctlp->name);
+ printk(KERN_INFO "PCISCC: ---------- begin queue dump TX iface %s ----------\n", dctlp->name);
+ printk(KERN_INFO "%s->dq_tx=V0x%08lx %s->dq_tx_cleanup=V0x%08lx %s->dq_tx_last=V0x%08lx.\n",
+ dctlp->name, (unsigned long) dctlp->dq_tx,
+ dctlp->name, (unsigned long) dctlp->dq_tx_cleanup,
+ dctlp->name, (unsigned long) dctlp->dq_tx_last);
+ printk(KERN_INFO "# &desc &next &prev &nextptr &dataptr &skb flags result\n");
+ for (tdp=dctlp->dq_tx,i=0; ((tdp!=dctlp->dq_tx) || (i==0)) && (i<256); tdp=tdp->next,i++) {
+ if (!tdp) break;
+ printk(KERN_INFO "%3u V0x%08lx V0x%08lx V0x%08lx B0x%08lx B0x%08lx V0x%08lx 0x%08lx 0x%08lx\n",
+ i,
+ (unsigned long) tdp,
+ (unsigned long) tdp->next,
+ (unsigned long) tdp->prev,
+ (unsigned long) tdp->nextptr,
+ (unsigned long) tdp->dataptr,
+ (unsigned long) tdp->skb,
+ (unsigned long) tdp->flags,
+ (unsigned long) tdp->result);
+ }
+ printk(KERN_INFO "PCISCC: ---------- end queue dump TX iface %s ----------\n", dctlp->name);
+ return;
+}
+#endif /* PCISCC_DEBUG */
+
+/* ------------------------------------------------------------------------- */
+
+/*
+ * initialize chip, called when first interface of a chip is brought up
+ * action sequency was carefully chosen, don't mess with it
+ */
+static int pciscc_chip_open(struct chipctl_t *cctlp)
+{
+ unsigned long start_time;
+ volatile unsigned long gcc_optimizer_safe;
+ int i;
+ int fifo_sum;
+ char pci_latency;
+
+ if (cctlp->initialized) return 0;
+#ifdef PCISCC_DEBUG
+ printk(KERN_INFO "PCISCC: chip_open().\n");
+#endif
+ /*
+ * make sure FIFO space requested by all devices is not larger than
+ * what's available on the silicon.
+ */
+ cli();
+ for (i=0, fifo_sum=0; i<4; i++) {
+ fifo_sum += cctlp->device[i]->cfg.mfifo_tx_p;
+ }
+ sti();
+ if (fifo_sum > 128) {
+ printk(KERN_ERR "PCISCC: chip_open(): TX main_fifo misconfiguration.\n");
+ return -EAGAIN;
+ }
+ /* map control and LBI memory */
+ cctlp->io_base=ioremap_nocache(cctlp->pcidev->base_address[0], 2*1024);
+ cctlp->lbi_base=ioremap_nocache(cctlp->pcidev->base_address[1], 64*1024);
+ /* enable bus mastering */
+ pci_set_master(cctlp->pcidev);
+ /* tweak latency */
+ pcibios_read_config_byte(cctlp->pcidev->bus->number, cctlp->pcidev->devfn, PCI_LATENCY_TIMER, &pci_latency);
+#ifdef PCISCC_DEBUG
+ printk(KERN_INFO "PCISCC: chip_open(): Old PCI latency timer: %u.\n", (unsigned int) pci_latency);
+#endif
+ pcibios_write_config_byte(cctlp->pcidev->bus->number, cctlp->pcidev->devfn, PCI_LATENCY_TIMER, 255);
+ /* request IRQ */
+ if (request_irq(cctlp->pcidev->irq, pciscc_isr, SA_SHIRQ, "pciscc", (void *) cctlp)) {
+ printk(KERN_ERR "PCISCC: chip_open(): Could not get IRQ #%u.\n", cctlp->pcidev->irq);
+ pciscc_chip_close(cctlp);
+ return -EAGAIN;
+ }
+ cctlp->irq=cctlp->pcidev->irq;
+ /* allocate and initialize peripheral queue */
+ if (!(cctlp->iq_per = kmalloc(cctlp->cfg.iqlen*4, GFP_DMA | GFP_KERNEL))) {
+ printk(KERN_ERR "PCISCC: chip_open(): Out of memory allocating peripheral interrupt queue.\n");
+ return -ENOMEM;
+ }
+ memset(cctlp->iq_per, 0, cctlp->cfg.iqlen*4);
+ cctlp->iq_per_next = cctlp->iq_per;
+ /* configuration interrupt queue */
+ if (!(cctlp->iq_cfg = kmalloc(cctlp->cfg.iqlen*4, GFP_DMA | GFP_KERNEL))) {
+ printk(KERN_ERR "PCISCC: chip_open(): Out of memory allocating configuration interrupt queue.\n");
+ return -ENOMEM;
+ }
+ memset(cctlp->iq_cfg, 0, cctlp->cfg.iqlen*4);
+ cctlp->iq_cfg_next = cctlp->iq_cfg;
+ /* global hardware initialization */
+ cli();
+ writel((cctlp->cfg.endianswap ? ENDIAN : 0)
+ | (cctlp->cfg.prichan != -1 ? (SPRI | (cctlp->cfg.prichan * CHN)) : 0)
+ | (4 * PERCFG)
+ | (3 * LCD)
+ | CMODE
+ | (cctlp->cfg.oscpwr ? 0 : OSCPD), cctlp->io_base+GMODE);
+ writel(virt_to_bus(cctlp->iq_per), cctlp->io_base+IQPBAR);
+ writel(virt_to_bus(cctlp->iq_cfg), cctlp->io_base+IQCFGBAR);
+ writel((((cctlp->cfg.iqlen/32)-1)*IQCFGLEN) | (((cctlp->cfg.iqlen/32)-1)*IQPLEN), cctlp->io_base+IQLENR2);
+ writel(((cctlp->device[0]->cfg.mfifo_tx_p/4)*TFSIZE0)
+ | ((cctlp->device[1]->cfg.mfifo_tx_p/4)*TFSIZE1)
+ | ((cctlp->device[2]->cfg.mfifo_tx_p/4)*TFSIZE2)
+ | ((cctlp->device[3]->cfg.mfifo_tx_p/4)*TFSIZE3), cctlp->io_base+FIFOCR1);
+ writel(((cctlp->device[0]->cfg.mfifo_tx_r/4)*TFRTHRES0)
+ | ((cctlp->device[1]->cfg.mfifo_tx_r/4)*TFRTHRES1)
+ | ((cctlp->device[2]->cfg.mfifo_tx_r/4)*TFRTHRES2)
+ | ((cctlp->device[3]->cfg.mfifo_tx_r/4)*TFRTHRES3)
+ | M4_0 | M4_1 | M4_2 | M4_3, cctlp->io_base+FIFOCR2);
+ writel(cctlp->cfg.mfifo_rx_t, cctlp->io_base+FIFOCR3);
+ writel(((cctlp->device[0]->cfg.mfifo_tx_f)*TFFTHRES0)
+ | ((cctlp->device[1]->cfg.mfifo_tx_f)*TFFTHRES1)
+ | ((cctlp->device[2]->cfg.mfifo_tx_f)*TFFTHRES2)
+ | ((cctlp->device[3]->cfg.mfifo_tx_f)*TFFTHRES3), cctlp->io_base+FIFOCR4);
+ /* mask out all DMAC interrupts */
+ writel((MRFI | MTFI | MRERR | MTERR), cctlp->io_base+CH0CFG);
+ writel((MRFI | MTFI | MRERR | MTERR), cctlp->io_base+CH1CFG);
+ writel((MRFI | MTFI | MRERR | MTERR), cctlp->io_base+CH2CFG);
+ writel((MRFI | MTFI | MRERR | MTERR), cctlp->io_base+CH3CFG);
+ /* all SCC cores in reset state */
+ writel(0x00000000, cctlp->io_base+SCCBASE[0]+CCR0);
+ writel(0x00000000, cctlp->io_base+SCCBASE[1]+CCR0);
+ writel(0x00000000, cctlp->io_base+SCCBASE[2]+CCR0);
+ writel(0x00000000, cctlp->io_base+SCCBASE[3]+CCR0);
+ /* mask out all SCC interrupts */
+ writel(0xffffffff, cctlp->io_base+SCCBASE[0]+IMR);
+ writel(0xffffffff, cctlp->io_base+SCCBASE[1]+IMR);
+ writel(0xffffffff, cctlp->io_base+SCCBASE[2]+IMR);
+ writel(0xffffffff, cctlp->io_base+SCCBASE[3]+IMR);
+ /* peripheral configuration */
+ writel((BTYP*3), cctlp->io_base+LCONF);
+ writel(0x00000000, cctlp->io_base+SSCCON);
+ writel(0x00000000, cctlp->io_base+SSCIM);
+ writel(0x000000ff, cctlp->io_base+GPDIR);
+ writel(0x00000000, cctlp->io_base+GPDATA);
+ writel(0x00000000, cctlp->io_base+GPIM);
+ sti();
+ /* initialize configuration and peripheral IQs */
+ start_time = jiffies;
+ cctlp->mailbox = MAILBOX_NONE;
+ writel(CFGIQCFG | CFGIQP | AR, cctlp->io_base+GCMDR);
+ do {
+ gcc_optimizer_safe=(jiffies-start_time)<20 && !cctlp->mailbox;
+ } while (gcc_optimizer_safe); /* timeout 20 jiffies */
+ switch (cctlp->mailbox) { /* mailbox was written by isr */
+ case MAILBOX_OK:
+#ifdef PCISCC_DEBUG
+ printk(KERN_INFO "PCISCC: chip_open(): Success on IQ config request.\n");
+#endif
+ break;
+ case MAILBOX_NONE:
+ printk(KERN_ERR "PCISCC: chip_open(): Timeout on IQ config request. Sync HDDs and hardware-reset NOW!\n");
+ pciscc_chip_close(cctlp);
+ return -EIO;
+ case MAILBOX_FAILURE:
+ printk(KERN_ERR "PCISCC: chip_open(): Failure on IQ config request. Sync HDDs and hardware-reset NOW!\n");
+ pciscc_chip_close(cctlp);
+ return -EIO;
+ }
+ cctlp->initialized=1;
+ return 0;
+}
+
+/* ------------------------------------------------------------------------- */
+
+/*
+ * close chip, called when last device (channel) of a chip was closed.
+ * don't mess up.
+ */
+static int pciscc_chip_close(struct chipctl_t *cctlp)
+{
+ if (cctlp->usecnt) {
+ printk(KERN_ERR "PCISCC: chip_close() called while channels active.\n");
+ return -EBUSY;
+ }
+#ifdef PCISCC_DEBUG
+ printk(KERN_INFO "PCISCC: chip_close().\n");
+#endif
+ /* global configuration to reset state */
+ cli();
+ writel((cctlp->cfg.endianswap ? ENDIAN : 0)
+ | (4 * PERCFG)
+ | (3 * LCD)
+ | CMODE
+ | OSCPD, cctlp->io_base+GMODE);
+ /* mask all DMAC interrupts */
+ writel((MRFI | MTFI | MRERR | MTERR), cctlp->io_base+CH0CFG);
+ writel((MRFI | MTFI | MRERR | MTERR), cctlp->io_base+CH1CFG);
+ writel((MRFI | MTFI | MRERR | MTERR), cctlp->io_base+CH2CFG);
+ writel((MRFI | MTFI | MRERR | MTERR), cctlp->io_base+CH3CFG);
+ /* SCC cores to reset state */
+ writel(0x00000000, cctlp->io_base+SCCBASE[0]+CCR0);
+ writel(0x00000000, cctlp->io_base+SCCBASE[1]+CCR0);
+ writel(0x00000000, cctlp->io_base+SCCBASE[2]+CCR0);
+ writel(0x00000000, cctlp->io_base+SCCBASE[3]+CCR0);
+ /* mask all SCC interrupts */
+ writel(0xffffffff, cctlp->io_base+SCCBASE[0]+IMR);
+ writel(0xffffffff, cctlp->io_base+SCCBASE[1]+IMR);
+ writel(0xffffffff, cctlp->io_base+SCCBASE[2]+IMR);
+ writel(0xffffffff, cctlp->io_base+SCCBASE[3]+IMR);
+ sti();
+ /* free IQs, free IRQ, unmap control address space */
+ if (cctlp->iq_per) {
+ kfree(cctlp->iq_per);
+ cctlp->iq_per=0;
+ cctlp->iq_per_next=0;
+ }
+ if (cctlp->iq_cfg) {
+ kfree(cctlp->iq_cfg);
+ cctlp->iq_cfg=0;
+ cctlp->iq_cfg_next=0;
+ }
+ if (cctlp->irq) {
+ free_irq(cctlp->irq, (void *) cctlp);
+ cctlp->irq=0;
+ }
+ if (cctlp->io_base) {
+ iounmap(cctlp->io_base);
+ cctlp->io_base=0;
+ }
+ if (cctlp->lbi_base) {
+ iounmap(cctlp->lbi_base);
+ cctlp->lbi_base=0;
+ }
+ cctlp->initialized=0;
+ return 0;
+}
+
+/* ------------------------------------------------------------------------- */
+
+/*
+ * open one channel, chip must have been initialized by chip_open() before.
+ * the sequence of actions done here was carefully chosen, don't mess with
+ * it unless you know exactly what you are doing...
+ */
+static int pciscc_channel_open(struct devctl_t *dctlp)
+{
+ struct chipctl_t *cctlp = dctlp->chip;
+ struct net_device *dev = &dctlp->dev;
+ int channel = dctlp->channel;
+ struct rx_desc_t *rdp, *last_rdp;
+ struct tx_desc_t *tdp, *last_tdp;
+ unsigned long l;
+ unsigned long start_time;
+ volatile unsigned long gcc_optimizer_safe;
+ int i;
+ unsigned char *data;
+
+ if (dctlp->dev.start) return 0;
+#ifdef PCISCC_DEBUG
+ printk(KERN_INFO "PCISCC: channel_open().\n");
+#endif
+ /* allocate and initialize RX and TX IQs */
+ if (!(dctlp->iq_rx = kmalloc(cctlp->cfg.iqlen*4, GFP_DMA | GFP_KERNEL))) {
+ printk(KERN_ERR "PCISCC: channel_open(): Out of memory allocating rx interrupt queue.\n");
+ return -ENOMEM;
+ }
+ memset(dctlp->iq_rx, 0, cctlp->cfg.iqlen*4);
+ dctlp->iq_rx_next = dctlp->iq_rx;
+ if (!(dctlp->iq_tx = kmalloc(cctlp->cfg.iqlen*4, GFP_DMA | GFP_KERNEL))) {
+ printk(KERN_ERR "PCISCC: channel_open(): Out of memory allocating tx interrupt queue.\n");
+ return -ENOMEM;
+ }
+ memset(dctlp->iq_tx, 0, cctlp->cfg.iqlen*4);
+ dctlp->iq_tx_next = dctlp->iq_tx;
+ cli();
+ writel(0, cctlp->io_base+SCCBASE[channel]+CCR1); /* stop SCC */
+ writel(0, cctlp->io_base+SCCBASE[channel]+CCR2);
+ writel(0, cctlp->io_base+SCCBASE[channel]+CCR0);
+ writel(0, cctlp->io_base+SCCBASE[channel]+TIMR);
+ /* set IQ lengths and base addresses */
+ l = readl(cctlp->io_base+IQLENR1);
+ switch (channel) {
+ case 0: writel(MRFI | MTFI | MRERR | MTERR, cctlp->io_base+CH0CFG);
+ writel(virt_to_bus(dctlp->iq_rx), cctlp->io_base+IQSCC0RXBAR);
+ writel(virt_to_bus(dctlp->iq_tx), cctlp->io_base+IQSCC0TXBAR);
+ l &= 0x0fff0fff;
+ l |= (((cctlp->cfg.iqlen/32)-1)*IQSCC0RXLEN) | (((cctlp->cfg.iqlen/32)-1)*IQSCC0TXLEN);
+ break;
+ case 1: writel(MRFI | MTFI | MRERR | MTERR, cctlp->io_base+CH1CFG);
+ writel(virt_to_bus(dctlp->iq_rx), cctlp->io_base+IQSCC1RXBAR);
+ writel(virt_to_bus(dctlp->iq_tx), cctlp->io_base+IQSCC1TXBAR);
+ l &= 0xf0fff0ff;
+ l |= (((cctlp->cfg.iqlen/32)-1)*IQSCC1RXLEN) | (((cctlp->cfg.iqlen/32)-1)*IQSCC1TXLEN);
+ break;
+ case 2: writel(MRFI | MTFI | MRERR | MTERR, cctlp->io_base+CH2CFG);
+ writel(virt_to_bus(dctlp->iq_rx), cctlp->io_base+IQSCC2RXBAR);
+ writel(virt_to_bus(dctlp->iq_tx), cctlp->io_base+IQSCC2TXBAR);
+ l &= 0xff0fff0f;
+ l |= (((cctlp->cfg.iqlen/32)-1)*IQSCC2RXLEN) | (((cctlp->cfg.iqlen/32)-1)*IQSCC2TXLEN);
+ break;
+ case 3: writel(MRFI | MTFI | MRERR | MTERR, cctlp->io_base+CH3CFG);
+ writel(virt_to_bus(dctlp->iq_rx), cctlp->io_base+IQSCC3RXBAR);
+ writel(virt_to_bus(dctlp->iq_tx), cctlp->io_base+IQSCC3TXBAR);
+ l &= 0xfff0fff0;
+ l |= (((cctlp->cfg.iqlen/32)-1)*IQSCC3RXLEN) | (((cctlp->cfg.iqlen/32)-1)*IQSCC3TXLEN);
+ break;
+ }
+ writel(l, cctlp->io_base+IQLENR1);
+ sti();
+ start_time = jiffies;
+ cctlp->mailbox = MAILBOX_NONE;
+ writel(AR /* Action Request */
+ | (channel == 0 ? (CFGIQSCC0RX | CFGIQSCC0TX) : 0)
+ | (channel == 1 ? (CFGIQSCC1RX | CFGIQSCC1TX) : 0)
+ | (channel == 2 ? (CFGIQSCC2RX | CFGIQSCC2TX) : 0)
+ | (channel == 3 ? (CFGIQSCC3RX | CFGIQSCC3TX) : 0), cctlp->io_base+GCMDR);
+ do {
+ gcc_optimizer_safe=(jiffies-start_time)<20 && !cctlp->mailbox;
+ } while (gcc_optimizer_safe); /* timeout 20 jiffies */
+ switch (cctlp->mailbox) { /* mailbox was written by isr */
+ case MAILBOX_OK:
+#ifdef PCISCC_DEBUG
+ printk(KERN_INFO "PCISCC: channel_open(): Success on IQSCC config request.\n");
+#endif
+ break;
+ case MAILBOX_NONE:
+ printk(KERN_ERR "PCISCC: channel_open(): Timeout on IQSCC config request. Sync HDDs and hardware-reset NOW!\n");
+ pciscc_channel_close(dctlp);
+ return -EIO;
+ case MAILBOX_FAILURE:
+ printk(KERN_ERR "PCISCC: channel_open(): Failure on IQSCC config request. Sync HDDs and hardware-reset NOW!\n");
+ pciscc_channel_close(dctlp);
+ return -EIO;
+ }
+ /* initialize channel's SCC core */
+ cli();
+ l = PU
+ | ((dctlp->cfg.coding == CFG_CHCODE_NRZ) ? 0*SC : 0)
+ | ((dctlp->cfg.coding == CFG_CHCODE_NRZI) ? 2*SC : 0)
+ | ((dctlp->cfg.coding == CFG_CHCODE_FM0) ? 4*SC : 0)
+ | ((dctlp->cfg.coding == CFG_CHCODE_FM1) ? 5*SC : 0)
+ | ((dctlp->cfg.coding == CFG_CHCODE_MANCH) ? 6*SC : 0)
+ | VIS
+ | ((dctlp->cfg.dpll & CFG_DPLL_PS) ? 0 : PSD)
+ | ((dctlp->cfg.clkout & CFG_TXTXCLK) ? TOE : 0)
+ | ((dctlp->cfg.clockmode == CFG_CM_G3RUH) ? SSEL : 0)
+ | ((dctlp->cfg.clockmode == CFG_CM_TCM3105) ? SSEL : 0)
+ | ((dctlp->cfg.clockmode == CFG_CM_HS) ? HS : 0)
+ | ((dctlp->cfg.clockmode == CFG_CM_DF9IC) ? 0*CM : 0) /* clockmode 0a */
+ | ((dctlp->cfg.clockmode == CFG_CM_G3RUH) ? 0*CM : 0) /* clockmode 0b */
+ | ((dctlp->cfg.clockmode == CFG_CM_TCM3105) ? 6*CM : 0) /* clockmode 6b */
+ | ((dctlp->cfg.clockmode == CFG_CM_HS) ? 4*CM : 0); /* clockmode 4 */
+ writel(l, cctlp->io_base+SCCBASE[channel]+CCR0);
+ l = (dctlp->cfg.datainv ? DIV : 0)
+ | ((dctlp->cfg.txddrive & CFG_TXDDRIVE_TP) ? ODS : 0)
+ | (dctlp->cfg.cdinv ? 0 : ICD)
+ | ((dctlp->cfg.clkout & CFG_TXRTS) ? TCLKO : 0)
+ | ((dctlp->cfg.txdelmode == CFG_TXDEL_HARD) ? 0 : FCTS)
+ | MDS1
+ | (dctlp->cfg.testloop ? TLP : 0)
+ | (dctlp->cfg.sharedflg ? SFLAG : 0)
+ | ((dctlp->cfg.crcmode & CFG_CRCMODE_RESET_0000) ? CRL : 0)
+ | ((dctlp->cfg.crcmode & CFG_CRCMODE_CRC32) ? C32 : 0);
+ writel(l, cctlp->io_base+SCCBASE[channel]+CCR1);
+ l = RAC
+ | ((dctlp->cfg.crcmode & CFG_CRCMODE_RXCD) ? DRCRC : 0)
+ | ((dctlp->cfg.crcmode & CFG_CRCMODE_RXCRCFWD) ? RCRC : 0)
+ | ((dctlp->cfg.crcmode & CFG_CRCMODE_TXNOCRC) ? XCRC : 0)
+ /* 1 and 2 dwords somehow don't work */
+ | ((dctlp->cfg.cfifo_rx_t == 1) ? 1*RFTH : 0)
+ | ((dctlp->cfg.cfifo_rx_t == 2) ? 1*RFTH : 0)
+ | ((dctlp->cfg.cfifo_rx_t == 4) ? 1*RFTH : 0)
+ | ((dctlp->cfg.cfifo_rx_t == 16) ? 2*RFTH : 0)
+ | ((dctlp->cfg.cfifo_rx_t == 24) ? 3*RFTH : 0)
+ | ((dctlp->cfg.cfifo_rx_t == 32) ? 4*RFTH : 0)
+ | ((dctlp->cfg.cfifo_rx_t == 60) ? 5*RFTH : 0)
+ | (dctlp->cfg.preamble*PRE)
+ | (dctlp->cfg.preamb_rpt ? EPT : 0)
+ | ((dctlp->cfg.preamb_rpt == 2) ? PRE0 : 0)
+ | ((dctlp->cfg.preamb_rpt == 4) ? PRE1 : 0)
+ | ((dctlp->cfg.preamb_rpt == 8) ? PRE0|PRE1 : 0)
+ | ((dctlp->cfg.hdlcext & CFG_HDLCEXT_ONEFILL) ? 0 : ITF)
+ | ((dctlp->cfg.hdlcext & CFG_HDLCEXT_ONEINS) ? OIN : 0);
+ writel(l, cctlp->io_base+SCCBASE[channel]+CCR2);
+ writel((dctlp->cfg.brate_m*BRM) | (dctlp->cfg.brate_n*BRN), cctlp->io_base+SCCBASE[channel]+BRR);
+ writel(RCE | (dctlp->dev.mtu*RL), cctlp->io_base+SCCBASE[channel]+RLCR);
+ /*
+ * all sent | tx device underrun | timer int | tx message repeat |
+ * tx pool ready | rx device overflow | receive FIFO overflow |
+ * frame length exceeded => interrupt mask register
+ */
+ writel(~(ALLS | XDU | TIN | XMR | XPR | RDO | RFO | FLEX), cctlp->io_base+SCCBASE[channel]+IMR);
+ sti();
+ /* wait until command_executing (CEC) is clear */
+ start_time=jiffies;
+ do {
+ l=readl(cctlp->io_base+SCCBASE[channel]+STAR);
+ gcc_optimizer_safe=(jiffies-start_time)<20 && (l & CEC);
+ } while (gcc_optimizer_safe);
+ if (l & CEC) {
+ /* not ready, but we will execute reset anyway */
+ printk(KERN_ERR "PCISCC: channel_open(): Timeout waiting for SCC being ready for reset.\n");
+ }
+ /* execute channel's SCC core RX and TX reset */
+ writel(RRES | XRES, cctlp->io_base+SCCBASE[channel]+CMDR);
+ start_time = jiffies;
+ dctlp->tx_mailbox = 0xffffffff;
+ do {
+ gcc_optimizer_safe=(jiffies-start_time)<20 && (dctlp->tx_mailbox==0xffffffff);
+ } while (gcc_optimizer_safe); /* timeout 20 jiffies */
+ switch (dctlp->tx_mailbox & 0x03ffffff) { /* mailbox was written by isr */
+ case 0x02001000: /* SCC XPR interrupt received */
+#ifdef PCISCC_DEBUG
+ printk(KERN_INFO "PCISCC: channel_open(): Success on SCC reset.\n");
+#endif
+ break;
+ case 0xffffffff:
+ printk(KERN_ERR "PCISCC: channel_open(): Timeout on SCC reset. Clocking problem?\n");
+ break;
+ default:
+ printk(KERN_ERR "PCISCC: channel_open(): Failure on SCC reset: mailbox=0x%0lx.\n", dctlp->tx_mailbox);
+ break;
+ }
+ if (!(dctlp->tx_bitrate=pciscc_probe_txrate(dctlp))) dctlp->tx_bitrate=9600;
+ /*
+ * Prepare circular RX and TX descriptor queues ("FIFO" rings)
+ * Attention:
+ * This beast gets _very_ angry if you try to hand it a
+ * descriptor with a data length of 0. In fact it crashes
+ * the system by asserting /SERR or something.
+ */
+ cli();
+ rdp = last_rdp = NULL;
+ for (i=0; i<cctlp->cfg.rxbufcnt; i++, last_rdp=rdp) {
+ if (!(rdp=kmalloc(sizeof(struct rx_desc_t), GFP_DMA | GFP_KERNEL))) {
+ printk(KERN_ERR "PCISCC: channel_open(): Out of memory allocating rx descriptor chain.\n");
+ sti();
+ pciscc_channel_close(dctlp);
+ return -ENOMEM;
+ }
+ memset(rdp, 0, sizeof(struct rx_desc_t));
+ if (i==0) {
+ dctlp->dq_rx=rdp; /* queue (ring) "head" */
+ } else {
+ rdp->prev=last_rdp;
+ last_rdp->next=rdp;
+ last_rdp->nextptr=(void *) virt_to_bus(rdp);
+ }
+ if (!(rdp->skb=alloc_skb(dctlp->dev.mtu+10+SKB_HEADROOM, GFP_DMA | GFP_KERNEL))) {
+ printk(KERN_ERR "PCISCC: channel_open(): Out of memory allocating socket buffers.\n");
+ sti();
+ pciscc_channel_close(dctlp);
+ return -ENOMEM;
+ }
+ skb_reserve(rdp->skb, SKB_HEADROOM);
+ rdp->dataptr=(void *) virt_to_bus(data=skb_put(rdp->skb, dctlp->dev.mtu+10)); /* we will skb_trim() it after */
+ rdp->flags=(dctlp->dev.mtu*NO); /* reception when we know frame length */
+ }
+ rdp->next=dctlp->dq_rx; /* close ring structure */
+ rdp->nextptr=(void *) virt_to_bus(dctlp->dq_rx);
+ dctlp->dq_rx->prev=rdp;
+ dctlp->dq_rx_next=dctlp->dq_rx; /* first descriptor to be processed = "first" descriptor in chain */
+ /* TX queue */
+ tdp = last_tdp = NULL;
+ for (i=0; i<cctlp->cfg.txbufcnt; i++, last_tdp=tdp) {
+ if (!(tdp=kmalloc(sizeof(struct tx_desc_t), GFP_DMA | GFP_KERNEL))) {
+ printk(KERN_ERR "PCISCC: channel_open(): Out of memory allocating tx descriptor chain.\n");
+ sti();
+ pciscc_channel_close(dctlp);
+ return -ENOMEM;
+ }
+ memset(tdp, 0, sizeof(struct tx_desc_t));
+ if (i==0) {
+ dctlp->dq_tx=tdp;
+ } else {
+ tdp->prev=last_tdp;
+ last_tdp->next=tdp;
+ last_tdp->nextptr=(void *) virt_to_bus(tdp);
+ }
+ tdp->skb=NULL;
+ tdp->dataptr=(void *) virt_to_bus(dummybuf);
+ tdp->flags=(8*NO) | FE;
+ }
+ tdp->next=dctlp->dq_tx; /* close ring structure */
+ tdp->nextptr=(void *) virt_to_bus(dctlp->dq_tx);
+ dctlp->dq_tx->prev=tdp;
+ dctlp->dq_tx_last=dctlp->dq_tx; /* last descriptor to be transmitted */
+ dctlp->dq_tx_cleanup=dctlp->dq_tx; /* first descriptor to be cleaned up after transmission */
+ flush_cache_all();
+ /* initialize DMAC channel's RX */
+ switch (channel) {
+ case 0: writel(IDR, cctlp->io_base+CH0CFG);
+ writel(virt_to_bus(dctlp->dq_rx), cctlp->io_base+CH0BRDA);
+ writel(virt_to_bus(dctlp->dq_rx), cctlp->io_base+CH0FRDA);
+ writel(virt_to_bus(dctlp->dq_rx_next->prev->prev), cctlp->io_base+CH0LRDA);
+ break;
+ case 1: writel(IDR, cctlp->io_base+CH1CFG);
+ writel(virt_to_bus(dctlp->dq_rx), cctlp->io_base+CH1BRDA);
+ writel(virt_to_bus(dctlp->dq_rx), cctlp->io_base+CH1FRDA);
+ writel(virt_to_bus(dctlp->dq_rx_next->prev->prev), cctlp->io_base+CH1LRDA);
+ break;
+ case 2: writel(IDR, cctlp->io_base+CH2CFG);
+ writel(virt_to_bus(dctlp->dq_rx), cctlp->io_base+CH2BRDA);
+ writel(virt_to_bus(dctlp->dq_rx), cctlp->io_base+CH2FRDA);
+ writel(virt_to_bus(dctlp->dq_rx_next->prev->prev), cctlp->io_base+CH2LRDA);
+ break;
+ case 3: writel(IDR, cctlp->io_base+CH3CFG);
+ writel(virt_to_bus(dctlp->dq_rx), cctlp->io_base+CH3BRDA);
+ writel(virt_to_bus(dctlp->dq_rx), cctlp->io_base+CH3FRDA);
+ writel(virt_to_bus(dctlp->dq_rx_next->prev->prev), cctlp->io_base+CH3LRDA);
+ break;
+ }
+ sti();
+ start_time=jiffies;
+ cctlp->mailbox=MAILBOX_NONE;
+ writel(AR, cctlp->io_base+GCMDR);
+ do {
+ gcc_optimizer_safe=(jiffies-start_time)<20 && !cctlp->mailbox;
+ } while (gcc_optimizer_safe);
+ switch (cctlp->mailbox) { /* mailbox was written by isr */
+ case MAILBOX_OK:
+#ifdef PCISCC_DEBUG
+ printk(KERN_INFO "PCISCC: channel_open(): Success on DMAC-RX config request.\n");
+#endif
+ dctlp->dmac_rx=DMAC_RX_INIT;
+ break;
+ case MAILBOX_NONE:
+ printk(KERN_ERR "PCISCC: channel_open(): Timeout on DMAC-RX config request. Sync HDDs and hardware-reset NOW!\n");
+ break;
+ case MAILBOX_FAILURE:
+ printk(KERN_ERR "PCISCC: channel_open(): Failure on DMAC-RX config request. Sync HDDs and hardware-reset NOW!\n");
+ break;
+ }
+ /* mask all DMAC interrupts (needed) */
+ switch (channel) {
+ case 0: writel(MRFI | MTFI | MRERR | MTERR, cctlp->io_base+CH0CFG);
+ break;
+ case 1: writel(MRFI | MTFI | MRERR | MTERR, cctlp->io_base+CH1CFG);
+ break;
+ case 2: writel(MRFI | MTFI | MRERR | MTERR, cctlp->io_base+CH2CFG);
+ break;
+ case 3: writel(MRFI | MTFI | MRERR | MTERR, cctlp->io_base+CH3CFG);
+ break;
+ }
+ /* SCC core TX reset (again) */
+ start_time=jiffies;
+ do {
+ l=readl(cctlp->io_base+SCCBASE[channel]+STAR);
+ gcc_optimizer_safe=(jiffies-start_time)<20 && (l & CEC);
+ } while (gcc_optimizer_safe);
+ if (l & CEC) {
+ /* not ready, but we will execute reset anyway */
+ printk(KERN_ERR "PCISCC: channel_open(): Timeout waiting for SCC being ready for TX-reset.\n");
+ }
+ writel(XRES, cctlp->io_base+SCCBASE[channel]+CMDR);
+ start_time = jiffies;
+ dctlp->tx_mailbox = 0xffffffff;
+ do {
+ gcc_optimizer_safe=(jiffies-start_time)<20 && (dctlp->tx_mailbox==0xffffffff);
+ } while (gcc_optimizer_safe); /* timeout 20 jiffies */
+ switch (dctlp->tx_mailbox & 0x03ffffff) { /* mailbox was written by isr */
+ case 0x02001000: /* SCC XPR interrupt received */
+#ifdef PCISCC_DEBUG
+ printk(KERN_INFO "PCISCC: channel_open(): Success on SCC TX-reset.\n");
+#endif
+ break;
+ case 0xffffffff:
+ printk(KERN_ERR "PCISCC: channel_open(): Timeout on SCC TX-reset. Clocking problem?\n");
+ break;
+ default:
+ printk(KERN_ERR "PCISCC: channel_open(): Failure on SCC TX-reset: mailbox=0x%0lx.\n", dctlp->tx_mailbox);
+ break;
+ }
+ /*
+ * initialize DMAC's TX channel, FI must stay masked all the time
+ * even during operation, see device errata 03/99
+ */
+ switch (channel) {
+ case 0: writel(IDT | MTFI, cctlp->io_base+CH0CFG);
+ writel(virt_to_bus(dctlp->dq_tx), cctlp->io_base+CH0BTDA);
+ writel(virt_to_bus(dctlp->dq_tx), cctlp->io_base+CH0FTDA);
+ writel(virt_to_bus(dctlp->dq_tx), cctlp->io_base+CH0LTDA);
+ break;
+ case 1: writel(IDT | MTFI, cctlp->io_base+CH1CFG);
+ writel(virt_to_bus(dctlp->dq_tx), cctlp->io_base+CH1BTDA);
+ writel(virt_to_bus(dctlp->dq_tx), cctlp->io_base+CH1FTDA);
+ writel(virt_to_bus(dctlp->dq_tx), cctlp->io_base+CH1LTDA);
+ break;
+ case 2: writel(IDT | MTFI, cctlp->io_base+CH2CFG);
+ writel(virt_to_bus(dctlp->dq_tx), cctlp->io_base+CH2BTDA);
+ writel(virt_to_bus(dctlp->dq_tx), cctlp->io_base+CH2FTDA);
+ writel(virt_to_bus(dctlp->dq_tx), cctlp->io_base+CH2LTDA);
+ break;
+ case 3: writel(IDT | MTFI, cctlp->io_base+CH3CFG);
+ writel(virt_to_bus(dctlp->dq_tx), cctlp->io_base+CH3BTDA);
+ writel(virt_to_bus(dctlp->dq_tx), cctlp->io_base+CH3FTDA);
+ writel(virt_to_bus(dctlp->dq_tx), cctlp->io_base+CH3LTDA);
+ break;
+ }
+ start_time=jiffies;
+ cctlp->mailbox=MAILBOX_NONE;
+ writel(AR, cctlp->io_base+GCMDR);
+ do {
+ gcc_optimizer_safe=(jiffies-start_time)<20 && !cctlp->mailbox;
+ } while (gcc_optimizer_safe);
+ switch (cctlp->mailbox) { /* mailbox was written by isr */
+ case MAILBOX_OK:
+#ifdef PCISCC_DEBUG
+ printk(KERN_INFO "PCISCC: channel_open(): Success on DMAC-TX config request.\n");
+#endif
+ dctlp->txstate=TX_IDLE;
+ break;
+ case MAILBOX_NONE:
+ printk(KERN_ERR "PCISCC: channel_open(): Timeout on DMAC-TX config request. Sync HDDs and hardware-reset NOW!\n");
+ break;
+ case MAILBOX_FAILURE:
+ printk(KERN_ERR "PCISCC: channel_open(): Failure on DMAC-TX config request. Sync HDDs and hardware-reset NOW!\n");
+ break;
+ }
+#ifdef FULLDUP_PTT
+ if (dctlp->cfg.duplex == CFG_DUPLEX_FULL) {
+ l = readl(cctlp->io_base+SCCBASE[channel]+CCR1);
+ l |= RTS;
+ writel(l, cctlp->io_base+SCCBASE[channel]+CCR1);
+ }
+#endif
+ flush_cache_all();
+#ifdef PCISCC_DEBUG
+ pciscc_dmac_regdump(cctlp);
+ pciscc_queuedump(dctlp);
+#endif
+ dctlp->chip->usecnt++;
+ dctlp->dev.start=1;
+ /* clear statistics */
+ mdelay(10);
+ memset(&dctlp->stats, 0, sizeof(struct net_device_stats));
+ /* some housekeeping */
+ pciscc_update_values(dev);
+ return 0;
+}
+
+/* ------------------------------------------------------------------------- */
+
+/* close one channel - don't mess with it either */
+static void pciscc_channel_close(struct devctl_t *dctlp)
+{
+ struct chipctl_t *cctlp = dctlp->chip;
+ int channel = dctlp->channel;
+ struct rx_desc_t *rdp, *last_rdp;
+ struct tx_desc_t *tdp, *last_tdp;
+ unsigned long l;
+ unsigned long start_time;
+ volatile unsigned long gcc_optimizer_safe;
+
+#ifdef PCISCC_DEBUG
+ pciscc_dmac_regdump(cctlp);
+ pciscc_queuedump(dctlp);
+#endif
+ /* at first stop timer */
+ writel(0, cctlp->io_base+SCCBASE[channel]+TIMR);
+ /* wait until command_executing (CEC) is clear */
+ start_time=jiffies;
+ do {
+ l=readl(cctlp->io_base+SCCBASE[channel]+STAR);
+ gcc_optimizer_safe=(jiffies-start_time)<20 && (l & CEC);
+ } while (gcc_optimizer_safe);
+ if (l & CEC) {
+ /* not ready, but we will execute reset anyway */
+ printk(KERN_ERR "PCISCC: channel_close(): Timeout waiting for SCC being ready for reset.\n");
+ }
+ /* RX and TX SCC reset */
+ writel(RRES | XRES, cctlp->io_base+SCCBASE[channel]+CMDR);
+ start_time = jiffies;
+ dctlp->tx_mailbox = 0xffffffff;
+ do {
+ gcc_optimizer_safe=(jiffies-start_time)<20 && (dctlp->tx_mailbox==0xffffffff);
+ } while (gcc_optimizer_safe); /* timeout 20 jiffies */
+ switch (dctlp->tx_mailbox & 0x03ffffff) { /* mailbox was written by isr */
+ case 0x02001000: /* SCC XPR interrupt received */
+#ifdef PCISCC_DEBUG
+ printk(KERN_INFO "PCISCC: channel_close(): Success on SCC reset.\n");
+#endif
+ break;
+ case 0xffffffff:
+ printk(KERN_ERR "PCISCC: channel_close(): Timeout on SCC reset.\n");
+ break;
+ default:
+ printk(KERN_ERR "PCISCC: channel_close(): Failure on SCC reset: mailbox=0x%0lx.\n", dctlp->tx_mailbox);
+ break;
+ }
+ /* stop SCC core */
+ writel(0, cctlp->io_base+SCCBASE[channel]+CCR1);
+ writel(0, cctlp->io_base+SCCBASE[channel]+CCR2);
+ writel(0, cctlp->io_base+SCCBASE[channel]+CCR0);
+ /*
+ * Give the isr some time to "refill" the rx-dq
+ * we _MUST_ guarantee that the DMAC-RX is _NOT_ in
+ * hold state when issuing the RESET command, otherwise the DMAC
+ * will crash. (DSCC-4 Rev. <= 2.1)
+ * In addition to that we may only issue a RESET if channel is
+ * currently really initialized, otherwise something horrible will
+ * result.
+ */
+ start_time=jiffies;
+ do {
+ gcc_optimizer_safe=(jiffies-start_time)<5;
+ } while (gcc_optimizer_safe);
+ /* OK, now we should be ready to put the DMAC into reset state */
+ switch (channel) {
+ case 0: writel(RDR | RDT | MRFI | MTFI | MRERR | MTERR, cctlp->io_base+CH0CFG);
+ break;
+ case 1: writel(RDR | RDT | MRFI | MTFI | MRERR | MTERR, cctlp->io_base+CH1CFG);
+ break;
+ case 2: writel(RDR | RDT | MRFI | MTFI | MRERR | MTERR, cctlp->io_base+CH2CFG);
+ break;
+ case 3: writel(RDR | RDT | MRFI | MTFI | MRERR | MTERR, cctlp->io_base+CH3CFG);
+ break;
+ }
+ start_time = jiffies;
+ cctlp->mailbox = MAILBOX_NONE;
+ writel(AR, cctlp->io_base+GCMDR);
+ do {
+ gcc_optimizer_safe=(jiffies-start_time)<20 && !cctlp->mailbox;
+ } while (gcc_optimizer_safe); /* timeout 20 jiffies */
+ switch (cctlp->mailbox) { /* mailbox was written by isr */
+ case MAILBOX_OK:
+#ifdef PCISCC_DEBUG
+ printk(KERN_INFO "PCISCC: channel_close(): Success on DMAC reset channel %u.\n", channel);
+#endif
+ dctlp->dmac_rx=DMAC_RX_RESET;
+ dctlp->txstate=TX_RESET;
+ break;
+ case MAILBOX_NONE:
+ printk(KERN_ERR "PCISCC: channel_close(): Timeout on DMAC reset channel %u. Sync HDDs and hardware-reset NOW!\n", channel);
+ break;
+ case MAILBOX_FAILURE:
+ printk(KERN_ERR "PCISCC: channel_close(): Failure on DMAC reset channel %u. Sync HDDs and hardware-reset NOW!\n", channel);
+ break;
+ }
+ /* clear IQs */
+ l = readl(cctlp->io_base+IQLENR1);
+ switch (channel) {
+ case 0: l &= 0x0fff0fff;
+ writel(l, cctlp->io_base+IQLENR1);
+ writel(virt_to_bus(dummybuf), cctlp->io_base+IQSCC0RXBAR);
+ writel(virt_to_bus(dummybuf), cctlp->io_base+IQSCC0TXBAR);
+ break;
+ case 1: l &= 0xf0fff0ff;
+ writel(l, cctlp->io_base+IQLENR1);
+ writel(virt_to_bus(dummybuf), cctlp->io_base+IQSCC1RXBAR);
+ writel(virt_to_bus(dummybuf), cctlp->io_base+IQSCC1TXBAR);
+ break;
+ case 2: l &= 0xff0fff0f;
+ writel(l, cctlp->io_base+IQLENR1);
+ writel(virt_to_bus(dummybuf), cctlp->io_base+IQSCC2RXBAR);
+ writel(virt_to_bus(dummybuf), cctlp->io_base+IQSCC2TXBAR);
+ break;
+ case 3: l &= 0xfff0fff0;
+ writel(l, cctlp->io_base+IQLENR1);
+ writel(virt_to_bus(dummybuf), cctlp->io_base+IQSCC3RXBAR);
+ writel(virt_to_bus(dummybuf), cctlp->io_base+IQSCC3TXBAR);
+ break;
+ }
+ start_time = jiffies;
+ cctlp->mailbox = MAILBOX_NONE;
+ writel(AR
+ | (channel == 0 ? (CFGIQSCC0RX | CFGIQSCC0TX) : 0)
+ | (channel == 1 ? (CFGIQSCC1RX | CFGIQSCC1TX) : 0)
+ | (channel == 2 ? (CFGIQSCC2RX | CFGIQSCC2TX) : 0)
+ | (channel == 3 ? (CFGIQSCC3RX | CFGIQSCC3TX) : 0), cctlp->io_base+GCMDR);
+ do {
+ gcc_optimizer_safe=(jiffies-start_time)<20 && !cctlp->mailbox;
+ } while (gcc_optimizer_safe); /* timeout 20 jiffies */
+ switch (cctlp->mailbox) { /* mailbox was written by isr */
+ case MAILBOX_OK:
+#ifdef PCISCC_DEBUG
+ printk(KERN_INFO "PCISCC: channel_close(): Success on IQSCC config request.\n");
+#endif
+ break;
+ case MAILBOX_NONE:
+ printk(KERN_ERR "PCISCC: channel_close(): Timeout on IQSCC config request. Sync HDDs and hardware-reset NOW!\n");
+ break;
+ case MAILBOX_FAILURE:
+ printk(KERN_ERR "PCISCC: channel_close(): Failure on IQSCC config request. Sync HDDs and hardware-reset NOW!\n");
+ break;
+ }
+ if (dctlp->dq_rx) {
+ rdp=dctlp->dq_rx; /* free descriptor chains and buffers */
+ do {
+ if (rdp->skb) {
+ kfree_skb(rdp->skb);
+ rdp->skb=NULL;
+ }
+ last_rdp=rdp;
+ rdp=rdp->next;
+ kfree(last_rdp);
+ } while (rdp!=dctlp->dq_rx);
+ dctlp->dq_rx=NULL;
+ }
+ dctlp->dq_rx_next=NULL;
+ if (dctlp->dq_tx) {
+ tdp=dctlp->dq_tx;
+ do {
+ if (tdp->skb) {
+ kfree_skb(tdp->skb);
+ tdp->skb=NULL;
+ }
+ last_tdp=tdp;
+ tdp=tdp->next;
+ kfree(last_tdp);
+ } while (tdp!=dctlp->dq_tx);
+ dctlp->dq_tx=NULL;
+ }
+ dctlp->dq_tx_cleanup=NULL;
+ dctlp->dq_tx_last=NULL;
+ if (dctlp->iq_rx) { /* free IQs */
+ kfree(dctlp->iq_rx);
+ dctlp->iq_rx=NULL;
+ }
+ if (dctlp->iq_tx) {
+ kfree(dctlp->iq_tx);
+ dctlp->iq_tx=NULL;
+ }
+ dctlp->dev.start=0;
+ dctlp->chip->usecnt--;
+ return;
+}
+
+/* ------------------------------------------------------------------------- */
+
+/* interrupt handler root */
+static void pciscc_isr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct chipctl_t *cctlp = (struct chipctl_t *) dev_id;
+ struct devctl_t *dctlp;
+ unsigned long flags;
+ unsigned long status;
+ unsigned long l;
+ int channel;
+ unsigned long *iqp;
+ int processed;
+
+ status = readl(cctlp->io_base+GSTAR);
+ writel(status, cctlp->io_base+GSTAR); /* ack' irq */
+ if (!status) return;
+ /* do not disturb... */
+ save_flags(flags);
+ cli();
+ if (status & (IIPGPP | IIPLBI | IIPSSC)) {
+ /* process peripheral queue */
+ processed = 0;
+ for (iqp=cctlp->iq_per_next; *iqp!=0; iqp=((iqp==(cctlp->iq_per+cctlp->cfg.iqlen-1)) ? cctlp->iq_per : iqp+1)) { /* note wrap-arround */
+ /* I just love raping for-statements */
+ printk(KERN_INFO "PCISCC: isr: IQPER vector: 0x%0lx.\n", *iqp);
+ processed++;
+ *iqp=0;
+ }
+ cctlp->iq_per_next=iqp;
+ }
+ if (status & IICFG) {
+ /* process configuration queue */
+ cctlp->mailbox = MAILBOX_NONE;
+ processed = 0;
+ /* I love raping for-statements... */
+ for (iqp=cctlp->iq_cfg_next; *iqp!=0; iqp=((iqp==(cctlp->iq_cfg+cctlp->cfg.iqlen-1)) ? cctlp->iq_cfg : iqp+1)) { /* note wrap-arround */
+#ifdef PCISCC_DEBUG
+ printk(KERN_INFO "PCISCC: isr: IQCFG vector: 0x%0lx.\n", *iqp);
+#endif
+ if ((*iqp) & ARACK) {
+ cctlp->mailbox = MAILBOX_OK;
+ processed++;
+ }
+ if ((*iqp) & ARF) {
+ cctlp->mailbox = MAILBOX_FAILURE;
+ processed++;
+ }
+ *iqp=0;
+ }
+ cctlp->iq_cfg_next=iqp;
+ if (processed != 1) {
+ printk(KERN_ERR "PCISCC: isr: Something weird going on... IICFG:processed=%u.\n", processed);
+ }
+ }
+ for (channel=0; channel<4; channel++) if (status & (1<<(24+channel))) {
+ /* process TX queue */
+ dctlp=cctlp->device[channel];
+ if (!dctlp->iq_tx || !dctlp->iq_tx_next) continue;
+ processed = 0;
+ for (iqp=dctlp->iq_tx_next; *iqp!=0; iqp=((iqp==(dctlp->iq_tx+cctlp->cfg.iqlen-1)) ? dctlp->iq_tx : iqp+1)) { /* note wrap-arround */
+ if (*iqp & TIN) {
+ /* timer interrupt */
+ writel(0, cctlp->io_base+SCCBASE[channel]+TIMR);
+ /* now, which state are we in? */
+ switch (dctlp->txstate) {
+ case TX_DELAY:
+ /* data transmit */
+ dctlp->txstate=TX_XMIT;
+ switch (channel) {
+ case 0: writel(virt_to_bus(dctlp->dq_tx_last), cctlp->io_base+CH0LTDA);
+ break;
+ case 1: writel(virt_to_bus(dctlp->dq_tx_last), cctlp->io_base+CH1LTDA);
+ break;
+ case 2: writel(virt_to_bus(dctlp->dq_tx_last), cctlp->io_base+CH2LTDA);
+ break;
+ case 3: writel(virt_to_bus(dctlp->dq_tx_last), cctlp->io_base+CH3LTDA);
+ break;
+ }
+ writel(txtimeout*dctlp->tx_bitrate*TVALUE, cctlp->io_base+SCCBASE[channel]+TIMR);
+ writel(STI, cctlp->io_base+SCCBASE[channel]+CMDR);
+ break;
+ case TX_TAIL:
+ /* transmitting tail */
+ dctlp->txstate=TX_IDLE;
+ l=readl(cctlp->io_base+SCCBASE[channel]+CCR1);
+ l=l & (~RTS);
+ writel(l, cctlp->io_base+SCCBASE[channel]+CCR1);
+ break;
+ case TX_PROBE:
+ /* tx bitrate test going on */
+ do_gettimeofday(&dctlp->tv);
+ dctlp->txstate=TX_RESET;
+ dctlp->tx_mailbox=1;
+ break;
+ case TX_CAL:
+ /* we are (i.e. were) calibrating */
+ if (dctlp->dq_tx_last != dctlp->dq_tx_cleanup) {
+ /* we have something in the tx queue */
+ dctlp->txstate = TX_XMIT;
+ switch (channel) {
+ case 0: writel(virt_to_bus(dctlp->dq_tx_last), cctlp->io_base+CH0LTDA);
+ break;
+ case 1: writel(virt_to_bus(dctlp->dq_tx_last), cctlp->io_base+CH1LTDA);
+ break;
+ case 2: writel(virt_to_bus(dctlp->dq_tx_last), cctlp->io_base+CH2LTDA);
+ break;
+ case 3: writel(virt_to_bus(dctlp->dq_tx_last), cctlp->io_base+CH3LTDA);
+ break;
+ }
+ writel(txtimeout*dctlp->tx_bitrate*TVALUE, cctlp->io_base+SCCBASE[channel]+TIMR);
+ writel(STI, cctlp->io_base+SCCBASE[channel]+CMDR);
+ } else {
+ dctlp->txstate=TX_IDLE;
+#ifdef FULLDUP_PTT
+ if (dctlp->cfg.duplex == CFG_DUPLEX_HALF) {
+ l=readl(cctlp->io_base+SCCBASE[channel]+CCR1);
+ l=l & (~RTS);
+ writel(l, cctlp->io_base+SCCBASE[channel]+CCR1);
+ }
+#else
+ l=readl(cctlp->io_base+SCCBASE[channel]+CCR1);
+ l=l & (~RTS);
+ writel(l, cctlp->io_base+SCCBASE[channel]+CCR1);
+#endif
+ }
+ break;
+ case TX_XMIT:
+ /* watchdog just ran out */
+ l=readl(cctlp->io_base+SCCBASE[channel]+CCR1);
+ l=l & (~RTS);
+ writel(l, cctlp->io_base+SCCBASE[channel]+CCR1);
+ dctlp->txstate=TX_IDLE;
+ txto_task.routine=pciscc_bh_txto;
+ txto_task.data=(void *) dctlp;
+ queue_task(&txto_task, &tq_scheduler);
+ break;
+ default:
+ printk(KERN_ERR "PCISCC: isr: Timer interrupt while txstate=%u.\n", dctlp->txstate);
+ dctlp->txstate=TX_IDLE;
+ }
+ }
+ if (*iqp & ALLS) {
+ /* a TX frame was just completed */
+ pciscc_isr_txcleanup(dctlp);
+ if (dctlp->dq_tx_cleanup == dctlp->dq_tx_last) {
+ /* complete TX-queue sent out */
+ if (dctlp->cfg.duplex == CFG_DUPLEX_FULL) {
+ /* just update txstate */
+ dctlp->txstate=TX_IDLE;
+#ifndef FULLDUP_PTT
+ /* release PTT */
+ l=readl(cctlp->io_base+SCCBASE[channel]+CCR1);
+ l=l & (~RTS);
+ writel(l, cctlp->io_base+SCCBASE[channel]+CCR1);
+#endif
+ } else if (dctlp->cfg.txdelmode == CFG_TXDEL_SOFT) {
+ /* start txtail */
+ dctlp->txstate=TX_TAIL;
+ writel(dctlp->cfg.txtailval*TVALUE, cctlp->io_base+SCCBASE[channel]+TIMR);
+ writel(STI, cctlp->io_base+SCCBASE[channel]+CMDR);
+ } else if (dctlp->cfg.txdelmode == CFG_TXDEL_HARD) {
+ /* deassert RTS immediately */
+ dctlp->txstate=TX_IDLE;
+ l=readl(cctlp->io_base+SCCBASE[channel]+CCR1);
+ l=l & (~RTS);
+ writel(l, cctlp->io_base+SCCBASE[channel]+CCR1);
+ }
+ }
+ }
+ if (*iqp & XDU) {
+ /*
+ * TX stall - now we are _really_ in trouble.
+ * We must reset the SCC core and re-init DMAC-TX.
+ * This includes delay loops and we are in interrupt
+ * context, with all interrupts disabled...
+ */
+#ifdef PCISCC_DEBUG
+ printk(KERN_INFO "PCISCC: isr: TX data underrun occured iface=%s.\n", dctlp->name);
+#endif
+ dctlp->stats.tx_fifo_errors++;
+ /* reset TX-DMAC */
+ switch (channel) {
+ case 0: writel(MRFI | MTFI | MRERR | MTERR | RDT, cctlp->io_base+CH0CFG);
+ writel(virt_to_bus(dctlp->dq_tx_last), cctlp->io_base+CH0BTDA);
+ writel(virt_to_bus(dctlp->dq_tx_last), cctlp->io_base+CH0LTDA);
+ case 1: writel(MRFI | MTFI | MRERR | MTERR | RDT, cctlp->io_base+CH1CFG);
+ writel(virt_to_bus(dctlp->dq_tx_last), cctlp->io_base+CH1BTDA);
+ writel(virt_to_bus(dctlp->dq_tx_last), cctlp->io_base+CH1LTDA);
+ case 2: writel(MRFI | MTFI | MRERR | MTERR | RDT, cctlp->io_base+CH2CFG);
+ writel(virt_to_bus(dctlp->dq_tx_last), cctlp->io_base+CH2BTDA);
+ writel(virt_to_bus(dctlp->dq_tx_last), cctlp->io_base+CH2LTDA);
+ case 3: writel(MRFI | MTFI | MRERR | MTERR | RDT, cctlp->io_base+CH3CFG);
+ writel(virt_to_bus(dctlp->dq_tx_last), cctlp->io_base+CH3BTDA);
+ writel(virt_to_bus(dctlp->dq_tx_last), cctlp->io_base+CH3LTDA);
+ }
+ writel(AR, cctlp->io_base+GCMDR);
+ mdelay(10);
+ /* reset SCC core TX */
+ writel(XRES, cctlp->io_base+SCCBASE[channel]+CMDR);
+ mdelay(30);
+ /* re-init TX-DMAC */
+ switch (channel) {
+ case 0: writel(virt_to_bus(dctlp->dq_tx_last), cctlp->io_base+CH0BTDA);
+ writel(virt_to_bus(dctlp->dq_tx_last), cctlp->io_base+CH0LTDA);
+ writel(MTFI | IDT, cctlp->io_base+CH0CFG);
+ case 1: writel(virt_to_bus(dctlp->dq_tx_last), cctlp->io_base+CH1BTDA);
+ writel(virt_to_bus(dctlp->dq_tx_last), cctlp->io_base+CH1LTDA);
+ writel(MTFI | IDT, cctlp->io_base+CH1CFG);
+ case 2: writel(virt_to_bus(dctlp->dq_tx_last), cctlp->io_base+CH2BTDA);
+ writel(virt_to_bus(dctlp->dq_tx_last), cctlp->io_base+CH2LTDA);
+ writel(MTFI | IDT, cctlp->io_base+CH2CFG);
+ case 3: writel(virt_to_bus(dctlp->dq_tx_last), cctlp->io_base+CH3BTDA);
+ writel(virt_to_bus(dctlp->dq_tx_last), cctlp->io_base+CH3LTDA);
+ writel(MTFI | IDT, cctlp->io_base+CH3CFG);
+ }
+ writel(AR, cctlp->io_base+GCMDR);
+ mdelay(10);
+ /* We can't do anything more since we are still
+ * in the interrupt handler. We can only hope that
+ * the reset succeeded. */
+ }
+ if (*iqp & XMR) {
+ /*
+ * TX message repeat - not critical, since
+ * resolved automagically by abort sequence
+ * and retransmit.
+ */
+#ifdef PCISCC_VDEBUG
+ printk(KERN_INFO "PCISCC: isr: TX message repeat interrupt iface=%s.\n", dctlp->name);
+#endif
+ }
+ dctlp->tx_mailbox=*iqp;
+ *iqp=0;
+ processed++;
+ }
+ dctlp->iq_tx_next=iqp;
+ }
+ for (channel=0; channel<4; channel++) if (status & (1<<(28+channel))) {
+ dctlp=cctlp->device[channel];
+ /* process RX queue */
+ if (!dctlp->iq_rx || !dctlp->iq_rx_next) {
+ printk(KERN_ERR "PCISCC: isr: IQCHAN%uRX interrupt for non-initialized queue!\n", channel);
+ continue;
+ }
+ processed = 0;
+ for (iqp=dctlp->iq_rx_next; *iqp!=0; iqp=((iqp==(dctlp->iq_rx+cctlp->cfg.iqlen-1)) ? dctlp->iq_rx : iqp+1)) { /* note wrap-arround */
+ /* statistics only */
+ if ((*iqp & SCCIV_SCC) && (*iqp & SCCIV_RDO)) {
+ dctlp->stats.rx_fifo_errors++;
+ }
+ if ((*iqp & SCCIV_SCC) && (*iqp & SCCIV_RFO)) {
+ dctlp->stats.rx_over_errors++;
+ }
+ if ((*iqp & SCCIV_SCC) && (*iqp & SCCIV_FLEX)) {
+ dctlp->stats.rx_length_errors++;
+ }
+ if (!(*iqp & SCCIV_SCC) && (*iqp & SCCIV_ERR)) {
+ dctlp->stats.rx_errors++;
+ }
+ if (!(*iqp & SCCIV_SCC) && (*iqp & SCCIV_HI)) {
+ printk(KERN_ERR "PCISCC: isr: Weird... received HI interrupt.\n");
+ }
+ if (!(*iqp & SCCIV_SCC) && (*iqp & SCCIV_FI)) {
+ }
+ dctlp->rx_mailbox=*iqp;
+ *iqp=0;
+ processed++;
+ }
+ /* in any case check RX descriptor queue for received frames */
+ if (dctlp->dev.start) pciscc_isr_receiver(dctlp);
+ dctlp->iq_rx_next=iqp;
+ }
+ restore_flags(flags);
+ return;
+}
+
+/* ------------------------------------------------------------------------- */
+
+/* called by interrupt handler root when RX interrupt occurred */
+static __inline__ void pciscc_isr_receiver(struct devctl_t *dctlp)
+{
+ struct chipctl_t *cctlp = dctlp->chip;
+ int channel = dctlp->channel;
+ struct rx_desc_t *rdp;
+ long status;
+ unsigned char rdsb; /* receive data status byte, generated by DMAC at buffer end */
+ int bno;
+ int valid;
+ struct sk_buff *new_skb;
+ int processed;
+
+#ifdef PCISCC_DEBUG
+ if (!dctlp->dev.start) {
+ printk(KERN_INFO "PCISCC: isr_receiver: frame received while !dev->start.\n");
+ }
+#endif
+ for (rdp=dctlp->dq_rx_next, processed=0; (rdp->result & C); rdp=rdp->next, processed++) {
+#ifdef PCISCC_DEBUG
+ if ((rdp->nextptr != (void *) virt_to_bus(rdp->next)) || (rdp->dataptr != (void *) virt_to_bus(rdp->skb->data))) {
+ panic("PCISCC: isr_receiver(): mm fucked with our buffers");
+ }
+#endif
+ status = rdp->result;
+ bno = (status >> 16) & 0x1fff;
+ valid = 1; /* we assume frame valid */
+ if ((status & RA) || (bno <= 0) || (bno > dctlp->dev.mtu) || !(status & FE) || (rdp->feptr != (void *) virt_to_bus(rdp))) {
+ /* aborted or invalid length */
+ valid = 0;
+ } else {
+ rdsb = rdp->skb->data[bno-1];
+ if (!(rdsb & SB_VFR)) { /* incorrect bit length */
+ valid = 0;
+ dctlp->stats.rx_frame_errors++;
+ }
+ if (rdsb & SB_RDO) { /* data overflow */
+ valid = 0; /* areadly counted */
+ }
+ if (!(rdsb & SB_CRC) && !(dctlp->cfg.crcmode & CFG_CRCMODE_RXCD)) {
+ /* CRC error */
+ valid = 0;
+ dctlp->stats.rx_crc_errors++;
+ }
+ if (rdsb & SB_RAB) { /* receive message aborted */
+ valid = 0;
+ }
+ }
+ /* OK, this is a little bit tricky. We have to make sure
+ * that every descriptor has a buffer assigned. Thus we
+ * can only release a buffer to the link layer if we get
+ * a new one in turn from mm before. */
+ if (valid) {
+ if ((new_skb = alloc_skb(dctlp->dev.mtu+10+SKB_HEADROOM, GFP_DMA | GFP_ATOMIC))) {
+ skb_reserve(new_skb, SKB_HEADROOM);
+ skb_trim(rdp->skb, bno-1);
+ pciscc_rx_skb(rdp->skb, dctlp);
+ rdp->skb = new_skb;
+ rdp->dataptr=(void *) virt_to_bus(skb_put(rdp->skb, dctlp->dev.mtu+10));
+ } else {
+#ifdef PCISCC_DEBUG
+ printk(KERN_INFO "PCISCC: isr_receiver: Out of memory allocating new skb!\n");
+#endif
+ }
+ }
+ rdp->flags=dctlp->dev.mtu*NO; /* prepare descriptor for next time */
+ rdp->result=0;
+ rdp->feptr=0;
+ flush_cache_all();
+ }
+#ifdef PCISCC_VDEBUG
+ printk(KERN_INFO "PCISCC: isr_receiver: Processed %u frames at once.\n", processed);
+#endif
+ dctlp->dq_rx_next = rdp;
+ /*
+ * tell DMAC last available descriptor - keep up one
+ * descriptor space for security (paranoia) (...->prev->prev)
+ */
+ switch (channel) {
+ case 0: writel(virt_to_bus(rdp->prev->prev), cctlp->io_base+CH0LRDA);
+ break;
+ case 1: writel(virt_to_bus(rdp->prev->prev), cctlp->io_base+CH1LRDA);
+ break;
+ case 2: writel(virt_to_bus(rdp->prev->prev), cctlp->io_base+CH2LRDA);
+ break;
+ case 3: writel(virt_to_bus(rdp->prev->prev), cctlp->io_base+CH3LRDA);
+ break;
+ }
+ return;
+}
+
+/* ------------------------------------------------------------------------- */
+
+/* called by IRQ handler root when a TX descriptor was completed */
+static __inline__ void pciscc_isr_txcleanup(struct devctl_t *dctlp)
+{
+ struct tx_desc_t *tdp;
+ int processed;
+
+ processed=0;
+ tdp=dctlp->dq_tx_cleanup;
+ while ((tdp != dctlp->dq_tx_last) && (tdp->result & C)) {
+ /* clean up all (C)omplete descriptors */
+ if (tdp->skb) {
+#ifdef PCISCC_DEBUG
+ if ((tdp->nextptr != (void *) virt_to_bus(tdp->next)) || (tdp->dataptr != (void *) virt_to_bus(tdp->skb->data))) {
+ /*
+ * paranoia check -
+ * this should _never_ever_occur_ .
+ * if it does, the memory subsystem moved around
+ * our buffers in address space, and it's the
+ * last you will see.
+ */
+ printk(KERN_ERR "PCISCC: isr_txcleanup(): mm fucked with our buffers");
+ }
+#endif
+ dctlp->stats.tx_packets++;
+ dctlp->stats.tx_bytes += tdp->skb->len;
+ kfree_skb(tdp->skb);
+ tdp->skb = NULL;
+ }
+ tdp->flags = (FE | (NO*8)); /* dummy */
+ tdp->dataptr = (void *) virt_to_bus(dummybuf); /* paranoia */
+ tdp->result = 0;
+ tdp = tdp->next;
+ processed++;
+#ifdef PCISCC_DEBUG
+ if (processed>100) {
+ printk(KERN_ERR "PCISCC: trouble in isr_txcleanup or please reduce bit rate by 20 dB.\n");
+ dctlp->dev.start=0;
+ break;
+ }
+#endif
+ }
+ dctlp->dq_tx_cleanup = tdp;
+ flush_cache_all();
+#ifdef PCISCC_VDEBUG
+ printk(KERN_INFO "PCISCC: isr_txcleanup: Processed %u frames.\n", processed);
+#endif
+ return;
+}
+
+/* ------------------------------------------------------------------------- */
+
+/* called by TIN ISR as bh when TX timeout has occured (watchdog) */
+static void pciscc_bh_txto(void *arg)
+{
+ struct devctl_t *dctlp = (struct devctl_t *) arg;
+
+ printk(KERN_ERR "PCISCC: Taking interface %s down due to TX hang. Clocking problem?\n", dctlp->name);
+ dev_close(&dctlp->dev);
+ return;
+}
+
+/* ------------------------------------------------------------------------- */
+
+/* *REALLY* ugly work-around for timer race */
+static __inline__ void pciscc_clear_timer(struct devctl_t *dctlp)
+{
+ struct chipctl_t *cctlp = dctlp->chip;
+ unsigned long *iqp;
+
+ /* walk through TX queue eliminating TINs FIXME */
+ if (!dctlp->iq_tx || !dctlp->iq_tx_next) return;
+ for (iqp=dctlp->iq_tx_next; *iqp!=0; iqp=((iqp==(dctlp->iq_tx+cctlp->cfg.iqlen-1)) ? dctlp->iq_tx : iqp+1)) { /* note wrap-arround */
+ if (*iqp & TIN) *iqp = SCCIV_IGNORE;
+ }
+ return;
+}
+
+/* ------------------------------------------------------------------------- */
+
+/*
+ * probe TX bitrate of a channel, called from device_open()
+ * idea behind it:
+ * load the channels timer with a known value and measure how long it
+ * takes to reach zero, using the system timer
+ */
+static long pciscc_probe_txrate(struct devctl_t *dctlp)
+{
+ struct chipctl_t *cctlp = dctlp->chip;
+ int channel = dctlp->channel;
+ struct timeval tv_start;
+ volatile unsigned long gcc_optimizer_safe;
+ unsigned long start_time;
+ long delta_us;
+ unsigned long long tx_bitrate;
+ unsigned long l;
+
+ dctlp->txstate = TX_PROBE;
+ dctlp->tx_mailbox = 0;
+ start_time = jiffies;
+ /* assert RTS line */
+ l=readl(cctlp->io_base+SCCBASE[dctlp->channel]+CCR1);
+ l |= RTS;
+ writel(l, cctlp->io_base+SCCBASE[dctlp->channel]+CCR1);
+ writel((probebit*TVALUE), cctlp->io_base+SCCBASE[dctlp->channel]+TIMR);
+ do_gettimeofday(&tv_start);
+ writel(STI, cctlp->io_base+SCCBASE[dctlp->channel]+CMDR);
+ do {
+ gcc_optimizer_safe = !dctlp->tx_mailbox && ((jiffies-start_time)<1000);
+ } while (gcc_optimizer_safe);
+ dctlp->txstate = TX_RESET;
+ if (!dctlp->tx_mailbox) {
+ printk(KERN_ERR "PCISCC: probe_txrate(): Timeout probing %s-TxClk. Clocking problem?\n", dctlp->dev.name);
+ return 0;
+
+ } else {
+ delta_us = (dctlp->tv.tv_sec - tv_start.tv_sec)*1000000+(dctlp->tv.tv_usec - tv_start.tv_usec);
+ }
+ tx_bitrate = 10000*probebit/(delta_us/100);
+ printk(KERN_INFO "PCISCC: probe_txrate(): tx_bitrate=%ld.\n", (long) tx_bitrate);
+ if (dctlp->cfg.duplex == CFG_DUPLEX_HALF) {
+ l=readl(cctlp->io_base+SCCBASE[channel]+CCR1);
+ l=l & (~RTS);
+ writel(l, cctlp->io_base+SCCBASE[channel]+CCR1);
+ } else {
+#ifndef FULLDUP_PTT
+ l=readl(cctlp->io_base+SCCBASE[channel]+CCR1);
+ l=l & (~RTS);
+ writel(l, cctlp->io_base+SCCBASE[channel]+CCR1);
+#endif
+ }
+ return tx_bitrate;
+}
+
+/* ------------------------------------------------------------------------- */
+
+/* DDI support: immediately assert RTS, downcall from MAC layer */
+static void pciscc_setptt(struct net_device *dev)
+{
+#ifdef AX25_ARBITER_NOT_BUGGY
+ struct devctl_t *dctlp = (struct devctl_t *) dev->priv;
+ struct chipctl_t *cctlp = dctlp->chip;
+ unsigned long l;
+
+ l = readl(cctlp->io_base+SCCBASE[dctlp->channel]+CCR1);
+ l |= RTS;
+ writel(l, cctlp->io_base+SCCBASE[dctlp->channel]+CCR1);
+#endif
+ return;
+}
+
+/* ------------------------------------------------------------------------- */
+
+/* report DCD state to DDI layer */
+static unsigned int pciscc_getdcd(struct net_device *dev)
+{
+ struct devctl_t *dctlp = (struct devctl_t *) dev->priv;
+ struct chipctl_t *cctlp = dctlp->chip;
+ unsigned long l;
+ unsigned int dcd;
+
+ l = readl(cctlp->io_base+SCCBASE[dctlp->channel]+STAR);
+ dcd = dctlp->cfg.cdinv ? !!(l & CD) : !(l & CD);
+ return dcd;
+}
+
+/* ------------------------------------------------------------------------- */
+
+/* return CTS state to DDI layer */
+static unsigned int pciscc_getcts(struct net_device *dev)
+{
+ struct devctl_t *dctlp = (struct devctl_t *) dev->priv;
+ struct chipctl_t *cctlp = dctlp->chip;
+ unsigned int cts;
+
+ cts = !!(readl(cctlp->io_base+SCCBASE[dctlp->channel]+STAR) & CTS);
+ return cts;
+}
+
+/* ------------------------------------------------------------------------- */
+
+/* report PTT state to DDI layer */
+static unsigned int pciscc_getptt(struct net_device *dev)
+{
+ struct devctl_t *dctlp = (struct devctl_t *) dev->priv;
+ struct chipctl_t *cctlp = dctlp->chip;
+ unsigned int ptt;
+
+ ptt = !(readl(cctlp->io_base+SCCBASE[dctlp->channel]+STAR) & CTS);
+ ptt = (dctlp->cfg.cdinv ? ptt : !ptt);
+ return ptt;
+}
+
+/* ------------------------------------------------------------------------- */
+
+/*
+ * this function is called by DDI layer whenever a media parameter change
+ * was suggested by either proc/sysctl or MAC/LAP internal cause
+ */
+static void pciscc_parameter_notify(struct net_device *dev, int valueno, int old, int new)
+{
+ struct devctl_t *dctlp = (struct devctl_t *) dev->priv;
+ int m,n;
+ int br, bits;
+
+ printk(KERN_ERR "PCISCC: pciscc_parameter_notify(%s, %u).\n", dev->name, valueno);
+ switch (valueno) {
+ case AX25_VALUES_MEDIA_DUPLEX:
+ if (dev->start) goto reject;
+ dctlp->cfg.duplex = new;
+ break;
+ case AX25_VALUES_MEDIA_TXBITRATE:
+ case AX25_VALUES_MEDIA_RXBITRATE:
+ if (dev->start) goto reject;
+ pciscc_rate2brg(new, &m, &n);
+ dctlp->cfg.brate_m = m;
+ dctlp->cfg.brate_n = n;
+ break;
+ case AX25_VALUES_MEDIA_TXDELAY:
+ case AX25_VALUES_MEDIA_TXTAIL:
+ br = ax25_dev_get_value(dev, AX25_VALUES_MEDIA_TXBITRATE);
+ if (br == 0) goto reject;
+ bits = (br*new)/1000;
+ if (bits == 0) bits=16; /* two flags */
+ if (valueno == AX25_VALUES_MEDIA_TXDELAY) {
+ dctlp->cfg.txdelval = bits;
+ } else {
+ dctlp->cfg.txtailval = bits;
+ }
+ break;
+ case AX25_VALUES_MEDIA_SLOTTIME:
+ case AX25_VALUES_MEDIA_PPERSISTENCE:
+ case AX25_VALUES_MEDIA_AUTO_ADJUST:
+ default:
+ /*
+ * We do not care about changes of
+ * those - they are somebody else's problem (now).
+ */
+ return;
+ }
+
+reject:
+ /*
+ * (Re)store values from our (changed)
+ * configuration information
+ */
+ pciscc_update_values(dev);
+ return;
+}
+
+/* ------------------------------------------------------------------------- */
+
+/* convert BRG N and M into bitrate in bps */
+static int pciscc_brg2rate(int m, int n)
+{
+ int br = xtal / ((n+1) * (1<<m));
+
+ if (br == 0) br=1;
+ return br;
+}
+
+/* ------------------------------------------------------------------------- */
+
+/* find best BRG N and M match for given bitrate */
+static void pciscc_rate2brg(int rate, int *m, int *n)
+{
+ int ratio = xtal/rate;
+ int brg_best_n = 0;
+ int brg_best_m = 0;
+ int brg_tmp_n = 0;
+ int brg_tmp = 0;
+ int brg_best = 0;
+ int i;
+
+ *m = *n = 0;
+ if (ratio > 2097152) return;
+ for (i=0; i<16; i++) {
+ brg_tmp_n = (ratio/(1<<i))-1;
+ if (brg_tmp_n > 63 || brg_tmp_n < 0) continue;
+ brg_tmp = (brg_tmp_n+1)*(1<<i);
+ if (abs(brg_best-ratio) < abs(brg_tmp-ratio)) continue;
+ brg_best = brg_tmp;
+ brg_best_n = brg_tmp_n;
+ brg_best_m = i;
+ }
+ *m = brg_best_m;
+ *n = brg_best_n;
+ return;
+}
+
+/* ------------------------------------------------------------------------- */
+
+static void pciscc_update_values(struct net_device *dev)
+{
+ struct devctl_t *dctlp = (struct devctl_t *) dev->priv;
+ int rx_br, tx_br;
+ int clk_ext;
+
+ AX25_PTR(dev)->hw.fast = 0;
+ tx_br = rx_br = 0;
+ clk_ext = (dctlp->cfg.clockmode == CFG_CM_G3RUH
+ || dctlp->cfg.clockmode == CFG_CM_TCM3105);
+ if (clk_ext) {
+ tx_br = rx_br = pciscc_brg2rate(dctlp->cfg.brate_m, dctlp->cfg.brate_n);
+ }
+ if (dev->start) {
+ tx_br = dctlp->tx_bitrate;
+ if (clk_ext) rx_br = tx_br; /* assumption; we have no way to find out */
+ }
+ if (tx_br > 0) {
+ ax25_dev_set_value(dev, AX25_VALUES_MEDIA_DUPLEX, dctlp->cfg.duplex);
+ ax25_dev_set_value(dev, AX25_VALUES_MEDIA_TXBITRATE, tx_br);
+ ax25_dev_set_value(dev, AX25_VALUES_MEDIA_RXBITRATE, rx_br);
+ ax25_dev_set_value(dev, AX25_VALUES_MEDIA_TXDELAY, dctlp->cfg.txdelval/tx_br);
+ ax25_dev_set_value(dev, AX25_VALUES_MEDIA_TXTAIL, dctlp->cfg.txtailval/tx_br);
+ }
+ return;
+}
+
+/* ------------------------------------------------------------------------- */
+
+/* netdevice UP -> DOWN routine */
+static int pciscc_dev_close(struct net_device *dev)
+{
+ struct devctl_t *dctlp = (struct devctl_t *) dev->priv;
+
+ if (dctlp->dev.start) {
+ pciscc_channel_close(dctlp);
+ }
+ dctlp->dev.start=0;
+ pciscc_update_values(dev);
+ if (dctlp->chip->initialized && !dctlp->chip->usecnt) {
+ pciscc_chip_close(dctlp->chip);
+ }
+#ifdef MODULE
+ MOD_DEC_USE_COUNT;
+#endif
+ return 0;
+}
+
+/* ------------------------------------------------------------------------- */
+
+/* netdevice DOWN -> UP routine */
+static int pciscc_dev_open(struct net_device *dev)
+{
+ struct devctl_t *dctlp = (struct devctl_t *) dev->priv;
+ int res;
+
+ if (!dctlp->chip->initialized) {
+ if ((res=pciscc_chip_open(dctlp->chip))) return res;
+ }
+ if (!dctlp->dev.start) {
+ if ((res=pciscc_channel_open(dctlp))) return res;
+ pciscc_update_values(dev);
+ }
+#ifdef MODULE
+ MOD_INC_USE_COUNT;
+#endif
+ return 0;
+}
+
+/* ------------------------------------------------------------------------- */
+
+/* netdevice change MTU request */
+static int pciscc_change_mtu(struct net_device *dev, int new_mtu)
+{
+ dev->mtu=new_mtu;
+ return 0;
+}
+
+/* ------------------------------------------------------------------------- */
+
+/* netdevice get statistics request */
+static struct net_device_stats *pciscc_get_stats(struct net_device *dev)
+{
+ struct devctl_t *dctlp;
+
+ if (!dev || !dev->priv) return NULL; /* paranoia */
+ dctlp = (struct devctl_t *) dev->priv;
+ return &dctlp->stats;
+}
+
+/* ------------------------------------------------------------------------- */
+
+/* netdevice register - finish painting netdev structure */
+static int pciscc_dev_init(struct net_device *dev)
+{
+ struct devctl_t *dctlp = (struct devctl_t *) dev->priv;
+ int br;
+
+ dev->mtu = 1500;
+ dev->hard_start_xmit = pciscc_xmit;
+ dev->open = pciscc_dev_open;
+ dev->stop = pciscc_dev_close;
+ dev->get_stats = pciscc_get_stats;
+ dev->change_mtu = pciscc_change_mtu;
+ dev->do_ioctl = pciscc_dev_ioctl;
+ dev->set_mac_address = pciscc_dev_set_mac_address;
+ dev->hard_header_len = AX25_MAX_HEADER_LEN;
+ dev->addr_len = AX25_ADDR_LEN;
+ dev->type = ARPHRD_AX25;
+ dev->tx_queue_len = 10;
+ dev->flags = (IFF_BROADCAST | IFF_MULTICAST);
+ AX25_PTR(dev) = &((struct devctl_t *) dev->priv)->ax25dev;
+ memset(AX25_PTR(dev), 0, sizeof(struct ax25_dev));
+ AX25_PTR(dev)->hw.dcd = pciscc_getdcd;
+ AX25_PTR(dev)->hw.ptt = pciscc_getptt;
+ AX25_PTR(dev)->hw.rts = pciscc_setptt;
+ AX25_PTR(dev)->hw.cts = pciscc_getcts;
+ AX25_PTR(dev)->hw.parameter_change_notify = pciscc_parameter_notify;
+ dev_init_buffers(dev);
+ memcpy(&dctlp->cfg, &devcfg_default, sizeof(struct devcfg_t));
+ br = pciscc_brg2rate(dctlp->cfg.brate_m, dctlp->cfg.brate_n);
+ pciscc_update_values(dev);
+ return 0;
+}
+
+/* ------------------------------------------------------------------------- */
+
+/* set device's L2 address */
+static int pciscc_dev_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct sockaddr *sa = addr;
+
+ memcpy(dev->dev_addr, sa->sa_data, AX25_ADDR_LEN);
+ return 0;
+}
+
+/* ------------------------------------------------------------------------- */
+/*
+ * IOCTLs:
+ *
+ * SIOCPCISCCGCCFG PCISCC Get Chip ConFiG
+ * SIOCPCISCCSCCFG PCISCC Set Chip ConFiG
+ * SIOCPCISCCGDCFG PCISCC Get Device ConFiG
+ * SIOCPCISCCSDCFG PCISCC Set Device ConFiG
+ * SIOCPCISCCSLED PCISCC Set LEDs
+ * SIOCPCISCCGDSTAT PCISCC Get Device STATus
+ * SIOCPCISCCDCAL PCISCC Device CALibrate
+ * SIOCPCISCCLBI PCISCC DSCC-4 Local Bus Interface transaction
+ */
+
+static int pciscc_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct devctl_t *dctlp = (struct devctl_t *) dev->priv;
+ struct chipctl_t *cctlp = dctlp->chip;
+ struct devcfg_t dcfg;
+ struct chipcfg_t ccfg;
+ struct lbi_xfer lbi;
+ int channel = dctlp->channel;
+ int i;
+ unsigned long l;
+ unsigned long status;
+ unsigned long time;
+
+ switch (cmd) {
+ case SIOCPCISCCGCCFG:
+ /* return cctlp->cfg structure in user-provided buffer */
+ if (copy_to_user(ifr->ifr_data, &cctlp->cfg, sizeof(struct chipcfg_t))) {
+ return -EFAULT;
+ }
+ return 0;
+ case SIOCPCISCCSCCFG:
+ /* copy user-provided data buffer to cctlp->cfg */
+ if (!suser()) return -EPERM;
+ for (i=0; i<4; i++) {
+ if (cctlp->device[i]->dev.start) return -EBUSY;
+ }
+ if (copy_from_user(&ccfg, ifr->ifr_data, sizeof(struct chipcfg_t))) {
+ return -EFAULT;
+ }
+ if ((ccfg.rxbufcnt < 4) || (ccfg.rxbufcnt > 128)) return -EINVAL;
+ if ((ccfg.txbufcnt < 4) || (ccfg.txbufcnt > 256)) return -EINVAL;
+ if ((ccfg.iqlen < 32) || (ccfg.iqlen > 512) || (ccfg.iqlen % 32 != 0)) return -EINVAL;
+ if ((ccfg.prichan > 3) || (ccfg.prichan < -1)) return -EINVAL;
+ if ((ccfg.mfifo_rx_t > 124) || (ccfg.mfifo_rx_t < 4)) return -EINVAL;
+ memcpy((unsigned char *) &cctlp->cfg, (unsigned char *) &ccfg, sizeof(struct chipcfg_t));
+ return 0;
+ case SIOCPCISCCGDCFG:
+ /* return dctlp->cfg structure in user-provided buffer */
+ if (copy_to_user(ifr->ifr_data, &dctlp->cfg, sizeof(struct devcfg_t))) {
+ return -EFAULT;
+ }
+ return 0;
+ case SIOCPCISCCSDCFG:
+ /* copy user-provided data buffer to dctlp->cfg */
+ if (!suser()) return -EPERM;
+ for (i=0; i<4; i++) {
+ if (cctlp->device[i]->dev.start) return -EBUSY;
+ }
+ if (copy_from_user(&dcfg, ifr->ifr_data, sizeof(struct devcfg_t))) {
+ return -EFAULT;
+ }
+ if ((dcfg.coding < CFG_CHCODE_MIN) || (dcfg.coding > CFG_CHCODE_MAX)) return -EINVAL;
+ if ((dcfg.clockmode < CFG_CM_MIN) || (dcfg.clockmode > CFG_CM_MAX)) return -EINVAL;
+ if ((dcfg.duplex < CFG_DUPLEX_MIN) || (dcfg.duplex > CFG_DUPLEX_MAX)) return -EINVAL;
+ if (dcfg.brate_m > CFG_BRATEM_MAX) return -EINVAL;
+ if (dcfg.brate_n > CFG_BRATEN_MAX) return -EINVAL;
+ if ((dcfg.txddrive < CFG_TXDDRIVE_MIN) || (dcfg.txddrive > CFG_TXDDRIVE_MAX)) return -EINVAL;
+ if ((dcfg.mfifo_tx_p < 4) || (dcfg.mfifo_tx_p > 124) || (dcfg.mfifo_tx_p % 4 != 0)) return -EINVAL;
+ if ((dcfg.mfifo_tx_r < 1) || (dcfg.mfifo_tx_r > dcfg.mfifo_tx_p) || (dcfg.mfifo_tx_r % 4 != 0)) return -EINVAL;
+ if (dcfg.mfifo_tx_f > (dcfg.mfifo_tx_p-1)) return -EINVAL;
+ if ((dcfg.cfifo_rx_t != 1) && (dcfg.cfifo_rx_t != 2) && (dcfg.cfifo_rx_t != 4)
+ && (dcfg.cfifo_rx_t != 16) && (dcfg.cfifo_rx_t != 24) && (dcfg.cfifo_rx_t != 32)
+ && (dcfg.cfifo_rx_t != 60)) return -EINVAL;
+ if ((dcfg.txdelmode < CFG_TXDEL_MIN) || (dcfg.txdelmode > CFG_TXDEL_MAX)) return -EINVAL;
+ if ((dcfg.preamb_rpt!=0) && (dcfg.preamb_rpt!=1) && (dcfg.preamb_rpt!=2) && (dcfg.preamb_rpt!=4) && (dcfg.preamb_rpt!=8)) return -EINVAL;
+ memcpy((unsigned char *) &dctlp->cfg, (unsigned char *) &dcfg, sizeof(struct devcfg_t));
+ pciscc_update_values(dev);
+ return 0;
+ case SIOCPCISCCSLED:
+ /* set channel LEDs */
+ if (!suser()) return -EPERM;
+ l = readl(cctlp->io_base+GPDATA);
+ switch (channel) {
+ case 0: l &= ~((1<<0) | (1<<1));
+ l |= (((unsigned long) ifr->ifr_data & 3) << 0);
+ break;
+ case 1: l &= ~((1<<2) | (1<<3));
+ l |= (((unsigned long) ifr->ifr_data & 3) << 2);
+ break;
+ case 2: l &= ~((1<<4) | (1<<5));
+ l |= (((unsigned long) ifr->ifr_data & 3) << 4);
+ break;
+ case 3: l &= ~((1<<6) | (1<<7));
+ l |= (((unsigned long) ifr->ifr_data & 3) << 6);
+ break;
+ }
+ writel(l, cctlp->io_base+GPDATA);
+ return 0;
+ case SIOCPCISCCGDSTAT:
+ /* get channel status */
+ status = (dctlp->txstate & 0x0f);
+ l = readl(cctlp->io_base+SCCBASE[channel]+STAR);
+ if (l & DPLA) status |= STATUS_DPLA;
+ if (l & RLI) status |= STATUS_RLI;
+ if (dctlp->cfg.cdinv) {
+ if (l & CD) status |= STATUS_CD;
+ } else {
+ if (!(l & CD)) status |= STATUS_CD;
+ }
+ if (l & CTS) status |= STATUS_CTS;
+ if (readl(cctlp->io_base+SCCBASE[dctlp->channel]+CCR1) & RTS) status |= STATUS_RTS;
+ ifr->ifr_data = (void *) status;
+ return 0;
+ case SIOCPCISCCDCAL:
+ /* calibrate */
+ if (!suser()) return -EPERM;
+ if (!dev->start) return -EAGAIN;
+ if ((dctlp->txstate != TX_IDLE) && (dctlp->txstate != TX_IDLE)) return -EAGAIN;
+ time = (unsigned long) ifr->ifr_data;
+ if (time > 0xffffff) return -EINVAL;
+ writel((time*TVALUE), cctlp->io_base+SCCBASE[channel]+TIMR);
+ if (time == 0) {
+ dctlp->txstate = TX_IDLE;
+ if (dctlp->cfg.duplex == CFG_DUPLEX_HALF) {
+ l = readl(cctlp->io_base+SCCBASE[channel]+CCR1);
+ l &= ~RTS;
+ writel(l, cctlp->io_base+SCCBASE[channel]+CCR1);
+ } else {
+ /* duplex */
+#ifndef FULLDUP_PTT
+ l = readl(cctlp->io_base+SCCBASE[channel]+CCR1);
+ l &= ~RTS;
+ writel(l, cctlp->io_base+SCCBASE[channel]+CCR1);
+#endif
+ }
+ } else {
+ dctlp->txstate = TX_CAL;
+ l=readl(cctlp->io_base+SCCBASE[channel]+CCR1);
+ writel(l | RTS, cctlp->io_base+SCCBASE[channel]+CCR1);
+ writel(STI, cctlp->io_base+SCCBASE[channel]+CMDR);
+ }
+ return 0;
+ case SIOCPCISCCLBI:
+ /* local bus transaction */
+ if (!suser()) return -EPERM;
+ if (copy_from_user(&lbi, ifr->ifr_data, sizeof(struct lbi_xfer))) {
+ return -EFAULT;
+ }
+ if (lbi.mode == LBI_WRITE) {
+ writew(lbi.data, cctlp->lbi_base+lbi.addr);
+ } else {
+ lbi.data = readl(cctlp->lbi_base+lbi.addr);
+ if (copy_to_user(ifr->ifr_data, &lbi, sizeof(struct lbi_xfer)))
+ return -EFAULT;
+ }
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* ------------------------------------------------------------------------- */
+
+/* transmit frame, downcall from MAC layer */
+static int pciscc_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct devctl_t *dctlp = (struct devctl_t *) dev->priv;
+ struct chipctl_t *cctlp = (struct chipctl_t *) dctlp->chip;
+ int channel = dctlp->channel;
+ unsigned long flags;
+ struct tx_desc_t *txdp;
+ unsigned long l;
+
+ if (!dev->start) {
+ printk(KERN_ERR "PCISCC: xmit(): Call when iface %s is down\n", dev->name);
+ kfree_skb(skb);
+ return 0;
+ }
+ if (!skb) {
+ printk(KERN_ERR "PCISCC: xmit(): L2 handed us a NULL skb!\n");
+ return 0;
+ }
+ if (!skb->len) {
+ printk(KERN_ERR "PCISCC: xmit(): L2 tried to trick us into sending a skb of len 0!\n");
+ kfree_skb(skb);
+ return 0;
+ }
+ save_flags(flags);
+ cli();
+ txdp=dctlp->dq_tx_last->next;
+ if ((txdp == dctlp->dq_tx_cleanup) || (txdp->next == dctlp->dq_tx_cleanup) || (txdp->result & C) || (txdp->next->result & C)) {
+ /* desriptor chain "full" */
+#ifdef PCISCC_VDEBUG
+ printk(KERN_INFO "PCISCC: xmit(): Dropping frame due to full TX queue interface %s.\n", dev->name);
+#endif
+ dctlp->stats.tx_dropped++;
+ kfree_skb(skb);
+ restore_flags(flags);
+ return 0;
+ }
+ /* prepare TX descriptor */
+ txdp->result=0;
+ txdp->skb=skb;
+ txdp->flags=FE | (BNO*skb->len);
+ txdp->dataptr=(void *) virt_to_bus(skb->data);
+ dctlp->dq_tx_last=txdp;
+ flush_cache_all();
+ if (dctlp->cfg.duplex == CFG_DUPLEX_FULL) {
+ /* in full duplex mode we can start frame transmit at once */
+ dctlp->txstate=TX_XMIT;
+ l=readl(cctlp->io_base+SCCBASE[channel]+CCR1);
+ writel(l | RTS, cctlp->io_base+SCCBASE[channel]+CCR1);
+ switch (channel) {
+ case 0: writel(virt_to_bus(txdp), cctlp->io_base+CH0LTDA);
+ break;
+ case 1: writel(virt_to_bus(txdp), cctlp->io_base+CH1LTDA);
+ break;
+ case 2: writel(virt_to_bus(txdp), cctlp->io_base+CH2LTDA);
+ break;
+ case 3: writel(virt_to_bus(txdp), cctlp->io_base+CH3LTDA);
+ break;
+ }
+ } else if ((dctlp->cfg.txdelmode == CFG_TXDEL_HARD) || !dctlp->cfg.txdelval) {
+ /* Hardware TX-delay control using RTS/CTS or zero TX-delay */
+ if (dctlp->txstate == TX_IDLE) {
+ writel(txtimeout*dctlp->tx_bitrate*TVALUE, cctlp->io_base+SCCBASE[channel]+TIMR);
+ writel(STI, cctlp->io_base+SCCBASE[channel]+CMDR);
+ }
+ dctlp->txstate=TX_XMIT;
+ if (dctlp->cfg.txdelmode == CFG_TXDEL_SOFT) {
+ l=readl(cctlp->io_base+SCCBASE[channel]+CCR1);
+ writel(l | RTS, cctlp->io_base+SCCBASE[channel]+CCR1);
+ }
+ switch (channel) {
+ case 0: writel(virt_to_bus(txdp), cctlp->io_base+CH0LTDA);
+ break;
+ case 1: writel(virt_to_bus(txdp), cctlp->io_base+CH1LTDA);
+ break;
+ case 2: writel(virt_to_bus(txdp), cctlp->io_base+CH2LTDA);
+ break;
+ case 3: writel(virt_to_bus(txdp), cctlp->io_base+CH3LTDA);
+ break;
+ }
+ } else {
+ /* half duplex, software txdelay */
+ switch (dctlp->txstate) {
+ case TX_RESET:
+ /* TX not initialized */
+ printk(KERN_INFO "PCISCC: xmit(): %s: Cannot transmit frame since TX is not inititalized!\n", dev->name);
+ case TX_IDLE:
+ /* TX is idle, key up and start txdelay */
+ dctlp->txstate=TX_DELAY;
+ l=readl(cctlp->io_base+SCCBASE[channel]+CCR1);
+ writel(l | RTS, cctlp->io_base+SCCBASE[channel]+CCR1);
+ writel(dctlp->cfg.txdelval*TVALUE, cctlp->io_base+SCCBASE[channel]+TIMR);
+ writel(STI, cctlp->io_base+SCCBASE[channel]+CMDR);
+ break;
+ case TX_DELAY:
+ /* tx is already keyed but not yet ready */
+ break;
+ case TX_TAIL:
+ /* tx is currently transmitting closing txtail sequence */
+ writel(txtimeout*dctlp->tx_bitrate*TVALUE, cctlp->io_base+SCCBASE[channel]+TIMR);
+ pciscc_clear_timer(dctlp);
+ writel(STI, cctlp->io_base+SCCBASE[channel]+CMDR);
+ case TX_XMIT: /* note fall-through */
+ /* tx is already transmitting preamble or data */
+ dctlp->txstate=TX_XMIT;
+ switch (channel) {
+ case 0: writel(virt_to_bus(txdp), cctlp->io_base+CH0LTDA);
+ break;
+ case 1: writel(virt_to_bus(txdp), cctlp->io_base+CH1LTDA);
+ break;
+ case 2: writel(virt_to_bus(txdp), cctlp->io_base+CH2LTDA);
+ break;
+ case 3: writel(virt_to_bus(txdp), cctlp->io_base+CH3LTDA);
+ break;
+ }
+ break;
+ case TX_PROBE:
+ case TX_CAL:
+ /* we are busy with diagnostic stuff */
+ break;
+ default:
+ /* should not occur */
+ printk(KERN_ERR "PCISCC: Unhandled txstate in xmit() iface=%s.\n", dev->name);
+ }
+ }
+ /* skb will be kfree()d by isr_txcleanup after transmission */
+ restore_flags(flags);
+ return 0;
+}
+
+/* ------------------------------------------------------------------------- */
+
+/* called by receiver function - prepare received skb and fire up to L2 */
+static __inline__ void pciscc_rx_skb(struct sk_buff *skb, struct devctl_t *dctlp)
+{
+ if (!skb) {
+ printk(KERN_ERR "PCISCC: rx_skb(): Received NULL skb iface=%s.\n", dctlp->name);
+ return;
+ }
+ dctlp->stats.rx_packets++;
+ dctlp->stats.rx_bytes += skb->len;
+ skb->protocol = htons(ETH_P_AX25);
+ skb->dev = &dctlp->dev;
+ skb->mac.raw = skb->data;
+ skb->pkt_type = PACKET_HOST;
+ netif_rx(skb);
+ return;
+}
+
+/* ------------------------------------------------------------------------- */
+
+/* Initialize pciscc control device */
+#ifdef MODULE
+int pciscc_init(void)
+#else /* !MODULE */
+__initfunc(int pciscc_init(struct net_device *dummy))
+#endif /* !MODULE */
+{
+ int i,j;
+ int devnum;
+ struct pci_dev *pcidev = NULL;
+
+ printk(KERN_ERR "PCISCC: version %s\n", PCISCC_VERSION);
+ if (!(dummybuf = kmalloc(256, GFP_DMA | GFP_KERNEL))) {
+ printk(KERN_ERR "PCISCC: init: Could not get memory for dummybuf.\n");
+ return -ENOMEM;
+ }
+ chipcnt=0;
+ j=0;
+ while ((pcidev = pci_find_device(PCI_VENDOR_ID_SIEMENS, PCI_DEVICE_ID_SIEMENS_PEB20534H, pcidev))) {
+ if (!(chipctl[chipcnt]=kmalloc(sizeof(struct chipctl_t), GFP_KERNEL))) {
+ printk(KERN_ERR "PCISCC: Out of memory allocating chipctl-structure\n");
+#ifdef MODULE
+ cleanup_module();
+#endif
+ return -ENOMEM;
+ }
+ memset(chipctl[chipcnt], 0, sizeof(struct chipctl_t));
+ chipctl[chipcnt]->pcidev=pcidev;
+ memcpy(&chipctl[chipcnt]->cfg, &chipcfg_default, sizeof(struct chipcfg_t));
+ for (i=0;i<4;i++) {
+ devnum = chipcnt*4+i;
+ if (!(devctl[devnum]=kmalloc(sizeof(struct devctl_t), GFP_KERNEL))) {
+ printk(KERN_ERR "PCISCC: Out of memory allocating devctl-structure.\n");
+#ifdef MODULE
+ cleanup_module();
+#endif
+ return -ENOMEM;
+ }
+ memset(devctl[devnum], 0, sizeof(struct devctl_t));
+ do {
+ sprintf(devctl[devnum]->name, "dscc%u", devnum);
+ j++;
+ } while (dev_get(devctl[devnum]->name) && j<60); /* find free device name */
+ if (j>=60) { /* none left */
+ printk(KERN_ERR "PCISCC: Could not find free netdev name.\n");
+#ifdef MODULE
+ cleanup_module();
+#endif
+ return -EEXIST;
+ }
+ devctl[devnum]->dev.priv = (void *) devctl[devnum];
+ devctl[devnum]->dev.name = devctl[devnum]->name;
+ devctl[devnum]->dev.init = pciscc_dev_init;
+ devctl[devnum]->chip = chipctl[chipcnt];
+ devctl[devnum]->channel = i;
+ chipctl[chipcnt]->device[i] = devctl[devnum];
+ register_netdev(&devctl[devnum]->dev);
+ }
+ chipcnt++;
+ }
+ printk(KERN_ERR "PCISCC: %u controller(s) found.\n", chipcnt);
+ return 0;
+}
+
+/* ------------------------------------------------------------------------- */
+
+#ifndef MODULE
+__initfunc(void pciscc_setup(char *str, int *ints))
+{
+ return;
+}
+#endif
+
+/* ------------------------------------------------------------------------- */
+
+
+/*****************************************************************************
+ * Module stuff. *
+ *****************************************************************************/
+
+#ifdef MODULE
+MODULE_AUTHOR("Jens David, DG1KJD <dg1kjd@afthd.tu-darmstadt.de>");
+MODULE_DESCRIPTION("AX.25 Device Driver for Siemens PEB-20534H (DSCC-4) based SCC cards");
+MODULE_SUPPORTED_DEVICE("pciscc");
+MODULE_PARM(xtal, "i");
+MODULE_PARM(probebit, "i");
+MODULE_PARM(txtimeout, "i");
+
+int init_module(void)
+{
+ return pciscc_init();
+}
+
+void cleanup_module(void)
+{
+ int i;
+ struct chipctl_t *cctlp;
+ struct devctl_t *dctlp;
+
+ for (i=0; i<4*chipcnt; i++) {
+ dctlp=devctl[i];
+ pciscc_dev_close(&dctlp->dev);
+ if (dctlp) {
+ unregister_netdev(&dctlp->dev);
+ kfree(dctlp);
+ devctl[i]=NULL;
+ }
+ }
+ for (i=0; i<chipcnt; i++) {
+ cctlp=chipctl[i];
+ if (cctlp) {
+ if (cctlp->irq) {
+ free_irq(cctlp->irq, (void *) cctlp);
+ cctlp->irq=0;
+ }
+ if (cctlp->io_base) {
+ iounmap(cctlp->io_base);
+ cctlp->io_base=0;
+ }
+ if (cctlp->lbi_base) {
+ iounmap(cctlp->lbi_base);
+ cctlp->lbi_base=0;
+ }
+ kfree(cctlp);
+ chipctl[i]=NULL;
+ }
+ }
+ if (dummybuf) {
+ kfree(dummybuf);
+ }
+ return;
+}
+#endif /* MODULE */