summaryrefslogtreecommitdiffstats
path: root/drivers/ap1000
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ap1000')
-rw-r--r--drivers/ap1000/Makefile29
-rw-r--r--drivers/ap1000/am79c830.h276
-rw-r--r--drivers/ap1000/am79c864.h162
-rw-r--r--drivers/ap1000/ap.c318
-rw-r--r--drivers/ap1000/apfddi-reg.h14
-rw-r--r--drivers/ap1000/apfddi.c702
-rw-r--r--drivers/ap1000/apfddi.h142
-rw-r--r--drivers/ap1000/bif.c289
-rw-r--r--drivers/ap1000/ddv.c1008
-rw-r--r--drivers/ap1000/ddv_util.c116
-rw-r--r--drivers/ap1000/mac.c1177
-rw-r--r--drivers/ap1000/mac.h82
-rw-r--r--drivers/ap1000/plc.c393
-rw-r--r--drivers/ap1000/plc.h53
-rw-r--r--drivers/ap1000/ringbuf.c327
-rw-r--r--drivers/ap1000/smt-types.h167
16 files changed, 5255 insertions, 0 deletions
diff --git a/drivers/ap1000/Makefile b/drivers/ap1000/Makefile
new file mode 100644
index 000000000..47f37720a
--- /dev/null
+++ b/drivers/ap1000/Makefile
@@ -0,0 +1,29 @@
+# File: drivers/ap1000/Makefile
+#
+# Makefile for the AP1000 drivers
+#
+
+L_TARGET := ap1000.a
+L_OBJS := bif.o apfddi.o mac.o plc.o ringbuf.o
+
+ifeq ($(CONFIG_APBLOCK),y)
+L_OBJS += ap.o
+else
+ ifeq ($(CONFIG_APBLOCK),m)
+ M_OBJS += ap.o
+ endif
+endif
+
+ifeq ($(CONFIG_DDV),y)
+L_OBJS += ddv.o ddv_util.o
+else
+ ifeq ($(CONFIG_DDV),m)
+ M_OBJS += ddv.o ddv_util.o
+ endif
+endif
+
+include $(TOPDIR)/Rules.make
+
+clean:
+ rm -f core *.o *.a *.s
+
diff --git a/drivers/ap1000/am79c830.h b/drivers/ap1000/am79c830.h
new file mode 100644
index 000000000..f9ba50910
--- /dev/null
+++ b/drivers/ap1000/am79c830.h
@@ -0,0 +1,276 @@
+ /*
+ * Copyright 1996 The Australian National University.
+ * Copyright 1996 Fujitsu Laboratories Limited
+ *
+ * This software may be distributed under the terms of the Gnu
+ * Public License version 2 or later
+ */
+/*
+ * Definitions for the AM79C830 FORMAC (Fiber Optic Ring MAC) chip.
+ */
+
+typedef int formac_reg;
+
+struct formac {
+ formac_reg cmdreg1; /* command register 1 */
+ formac_reg cmdreg2; /* command register 2 */
+#define st1u cmdreg1 /* status reg 1, upper */
+#define st1l cmdreg2 /* status reg 1, lower */
+ formac_reg st2u; /* status reg 2, upper */
+ formac_reg st2l; /* status reg 2, lower */
+ formac_reg imsk1u; /* interrupt mask 1, upper */
+ formac_reg imsk1l; /* interrupt mask 1, lower */
+ formac_reg imsk2u; /* interrupt mask 2, upper */
+ formac_reg imsk2l; /* interrupt mask 2, lower */
+ formac_reg said; /* short address, individual */
+ formac_reg laim; /* long adrs, indiv, MS word */
+ formac_reg laic; /* long adrs, indiv, middle word */
+ formac_reg lail; /* long adrs, indiv, LS word */
+ formac_reg sagp; /* short address, group */
+ formac_reg lagm; /* short adrs, group, MS word */
+ formac_reg lagc; /* short adrs, group, middle word */
+ formac_reg lagl; /* short adrs, group, LS word */
+ formac_reg mdreg1; /* mode reg 1 */
+ formac_reg stmchn; /* state machine reg */
+ formac_reg mir1; /* MAC information reg, upper */
+ formac_reg mir0; /* MAC information reg, lower */
+ formac_reg tmax; /* TMax value (2's-comp) */
+ formac_reg tvx; /* TVX value (2's-comp) */
+ formac_reg trt; /* TRT timer value */
+ formac_reg tht; /* THT timer value */
+ formac_reg tneg; /* current TNeg (2's-comp) */
+ formac_reg tmrs; /* extra bits of tneg, trt, tht; late count */
+ formac_reg treq0; /* our TReq (2's-comp), lower */
+ formac_reg treq1; /* our TReq (2's-comp), upper */
+ formac_reg pri0; /* priority reg for async queue 0 */
+ formac_reg pri1; /* priority reg for async queue 1 */
+ formac_reg pri2; /* priority reg for async queue 2 */
+ formac_reg tsync; /* TSync value (2's-comp) */
+ formac_reg mdreg2; /* mode reg 2 */
+ formac_reg frmthr; /* frame threshold reg */
+ formac_reg eacb; /* end address of claim/beacon area */
+ formac_reg earv; /* end address of receive area */
+ formac_reg eas; /* end address of sync queue */
+ formac_reg eaa0; /* end address of async queue 0 */
+ formac_reg eaa1; /* end address of async queue 1 */
+ formac_reg eaa2; /* end address of async queue 2 */
+ formac_reg sacl; /* start address of claim frame */
+ formac_reg sabc; /* start address of beacon frame */
+ formac_reg wpxsf; /* write pointer, special frames */
+ formac_reg rpxsf; /* read pointer, special frames */
+ formac_reg dummy1; /* not used */
+ formac_reg rpr; /* read pointer, receive */
+ formac_reg wpr; /* write pointer, receive */
+ formac_reg swpr; /* shadow write pointer, receive */
+ formac_reg wpxs; /* write pointer, sync queue */
+ formac_reg wpxa0; /* write pointer, async queue 0 */
+ formac_reg wpxa1; /* write pointer, async queue 1 */
+ formac_reg wpxa2; /* write pointer, async queue 2 */
+ formac_reg swpxs; /* shadow write pointer, sync queue */
+ formac_reg swpxa0; /* shadow write pointer, async queue 0 */
+ formac_reg swpxa1; /* shadow write pointer, async queue 1 */
+ formac_reg swpxa2; /* shadow write pointer, async queue 2 */
+ formac_reg rpxs; /* read pointer, sync queue */
+ formac_reg rpxa0; /* read pointer, async queue 0 */
+ formac_reg rpxa1; /* read pointer, async queue 1 */
+ formac_reg rpxa2; /* read pointer, async queue 2 */
+ formac_reg marr; /* memory address for random reads */
+ formac_reg marw; /* memory address for random writes */
+ formac_reg mdru; /* memory data register, upper */
+ formac_reg mdrl; /* memory data register, lower */
+ formac_reg tmsync; /* TSync timer value */
+ formac_reg fcntr; /* frame counter */
+ formac_reg lcntr; /* lost counter */
+ formac_reg ecntr; /* error counter */
+};
+
+/* Values for cmdreg1 */
+#define C1_SOFTWARE_RESET 1
+#define C1_IRMEMWI 2
+#define C1_IRMEMWO 3
+#define C1_IDLE_LISTEN 4
+#define C1_CLAIM_LISTEN 5
+#define C1_BEACON_LISTEN 6
+#define C1_LOAD_TVX 7
+#define C1_SEND_NR_TOKEN 0x0c
+#define C1_SEND_R_TOKEN 0x0d
+#define C1_ENTER_SI_MODE 0x0e
+#define C1_EXIT_SI_MODE 0x0f
+#define C1_CLR_SYNCQ_LOCK 0x11
+#define C1_CLR_ASYNCQ0_LOCK 0x12
+#define C1_CLR_ASYNCQ1_LOCK 0x14
+#define C1_CLR_ASYNCQ2_LOCK 0x18
+#define C1_CLR_RECVQ_LOCK 0x20
+#define C1_CLR_ALL_LOCKS 0x3f
+
+/* Values for cmdreg2 */
+#define C2_XMIT_SYNCQ 1
+#define C2_XMIT_ASYNCQ0 2
+#define C2_XMIT_ASYNCQ1 4
+#define C2_XMIT_ASYNCQ2 8
+#define C2_ABORT_XMIT 0x10
+#define C2_RESET_XMITQS 0x20
+#define C2_SET_TAG 0x30
+#define C2_EN_RECV_FRAME 0x40
+
+/* Bits in (st1u << 16) + st1l (and (imsk1u << 16) + imsk1l) */
+#define S1_XMIT_ABORT 0x80000000
+#define S1_XABORT_ASYNC2 0x40000000
+#define S1_XABORT_ASYNC1 0x20000000
+#define S1_XABORT_ASYNC0 0x10000000
+#define S1_XABORT_SYNC 0x08000000
+#define S1_XBUF_FULL_SYNC 0x04000000
+#define S1_XBUF_FULL_ASYNC 0x02000000
+#define S1_XDONE_SYNC 0x01000000
+#define S1_END_CHAIN_ASYNC2 0x00800000
+#define S1_END_CHAIN_ASYNC1 0x00400000
+#define S1_END_CHAIN_ASYNC0 0x00200000
+#define S1_END_CHAIN_SYNC 0x00100000
+#define S1_END_FRAME_ASYNC2 0x00080000
+#define S1_END_FRAME_ASYNC1 0x00040000
+#define S1_END_FRAME_ASYNC0 0x00020000
+#define S1_END_FRAME_SYNC 0x00010000
+#define S1_BUF_UNDERRUN_ASYNC2 0x00008000
+#define S1_BUF_UNDERRUN_ASYNC1 0x00004000
+#define S1_BUF_UNDERRUN_ASYNC0 0x00002000
+#define S1_BUF_UNDERRUN_SYNC 0x00001000
+#define S1_PAR_ERROR_ASYNC2 0x00000800
+#define S1_PAR_ERROR_ASYNC1 0x00000400
+#define S1_PAR_ERROR_ASYNC0 0x00000200
+#define S1_PAR_ERROR_SYNC 0x00000100
+#define S1_XINSTR_FULL_ASYNC2 0x00000080
+#define S1_XINSTR_FULL_ASYNC1 0x00000040
+#define S1_XINSTR_FULL_ASYNC0 0x00000020
+#define S1_XINSTR_FULL_SYNC 0x00000010
+#define S1_QUEUE_LOCK_ASYNC2 0x00000008
+#define S1_QUEUE_LOCK_ASYNC1 0x00000004
+#define S1_QUEUE_LOCK_ASYNC0 0x00000002
+#define S1_QUEUE_LOCK_SYNC 0x00000001
+
+/* Bits in (st2u << 16) + st2l (and (imsk2u << 16) + imsk2l) */
+#define S2_RECV_COMPLETE 0x80000000
+#define S2_RECV_BUF_EMPTY 0x40000000
+#define S2_RECV_ABORT 0x20000000
+#define S2_RECV_BUF_FULL 0x10000000
+#define S2_RECV_FIFO_OVF 0x08000000
+#define S2_RECV_FRAME 0x04000000
+#define S2_RECV_FRCT_OVF 0x02000000
+#define S2_NP_SIMULT_LOAD 0x01000000
+#define S2_ERR_SPECIAL_FR 0x00800000
+#define S2_CLAIM_STATE 0x00400000
+#define S2_MY_CLAIM 0x00200000
+#define S2_HIGHER_CLAIM 0x00100000
+#define S2_LOWER_CLAIM 0x00080000
+#define S2_BEACON_STATE 0x00040000
+#define S2_MY_BEACON 0x00020000
+#define S2_OTHER_BEACON 0x00010000
+#define S2_RING_OP 0x00008000
+#define S2_MULTIPLE_DA 0x00004000
+#define S2_TOKEN_ERR 0x00002000
+#define S2_TOKEN_ISSUED 0x00001000
+#define S2_TVX_EXP 0x00000800
+#define S2_TRT_EXP 0x00000400
+#define S2_MISSED_FRAME 0x00000200
+#define S2_ADDRESS_DET 0x00000100
+#define S2_PHY_INVALID 0x00000080
+#define S2_LOST_CTR_OVF 0x00000040
+#define S2_ERR_CTR_OVF 0x00000020
+#define S2_FRAME_CTR_OVF 0x00000010
+#define S2_SHORT_IFG 0x00000008
+#define S2_DUPL_CLAIM 0x00000004
+#define S2_TRT_EXP_RECOV 0x00000002
+
+/* Bits in mdreg1 */
+#define M1_SINGLE_FRAME 0x8000
+#define M1_MODE 0x7000
+#define M1_MODE_INITIALIZE 0x0000
+#define M1_MODE_MEMORY 0x1000
+#define M1_MODE_ONLINE_SP 0x2000
+#define M1_MODE_ONLINE 0x3000
+#define M1_MODE_INT_LOOP 0x4000
+#define M1_MODE_EXT_LOOP 0x7000
+#define M1_SHORT_ADRS 0x0800
+#define M1_ADDET 0x0700
+#define M1_ADDET_NORM 0x0000
+#define M1_ADDET_METOO 0x0100
+#define M1_ADDET_NSA_NOTME 0x0200
+#define M1_ADDET_NSA 0x0300
+#define M1_ADDET_DISABLE_RECV 0x0400
+#define M1_ADDET_LIM_PROMISC 0x0600
+#define M1_ADDET_PROMISC 0x0700
+#define M1_SELECT_RA 0x0080
+#define M1_DISABLE_CARRY 0x0040
+#define M1_EXT_GRP 0x0030
+#define M1_EXT_GRP_MYGRP 0x0000
+#define M1_EXT_GRP_SOFT 0x0010
+#define M1_EXT_GRP_UPPER24 0x0020
+#define M1_EXT_GRP_UPPER16 0x0030
+#define M1_LOCK_XMIT_QS 0x0008
+#define M1_FULL_DUPLEX 0x0004
+#define M1_XMTINH_PIN 0x0002
+
+/* Bits in mdreg2 */
+#define M2_TAGMODE 0x8000
+#define M2_STRIP_FCS 0x4000
+#define M2_CHECK_PARITY 0x2000
+#define M2_EVEN_PARITY 0x1000
+#define M2_LSB_FIRST 0x0800
+#define M2_RCV_BYTE_BDRY_MASK 0x0600
+#define M2_RCV_BYTE_BDRY 0x0200
+#define M2_ENABLE_HSREQ 0x0100
+#define M2_ENABLE_NPDMA 0x0080
+#define M2_SYNC_NPDMA 0x0040
+#define M2_SYMBOL_CTRL 0x0020
+#define M2_RECV_BAD_FRAMES 0x0010
+#define M2_AFULL_MASK 0x000f
+#define M2_AFULL 0x0001
+
+/* Bits in stmchn */
+#define SM_REV_MASK 0xe000
+#define SM_REV 0x2000
+#define SM_SEND_IMM_MODE 0x1000
+#define SM_TOKEN_MODE 0x0c00
+#define SM_TOKEN_MODE_NR 0x0000
+#define SM_TOKEN_MODE_ENTER_R 0x0400
+#define SM_TOKEN_MODE_ENTER_NR 0x0800
+#define SM_TOKEN_MODE_R 0x0c00
+#define SM_RCV_STATE 0x0380
+#define SM_XMIT_STATE 0x0070
+#define SM_MDR_PENDING 0x0008
+#define SM_MDR_TAG 0x0004
+
+/* Bits in transmit descriptor */
+#define TD_MORE 0x80000000
+#define TD_MAGIC 0x40000000
+#define TD_BYTE_BDRY_MASK 0x18000000
+#define TD_BYTE_BDRY_1 0x08000000
+#define TD_XMIT_DONE 0x04000000
+#define TD_NO_FCS 0x02000000
+#define TD_XMIT_ABORT 0x01000000
+#define TD_BYTE_BDRY_LG 27
+
+/* Bits in pointer in buffer memory (nontag mode) */
+#define PT_MAGIC 0xa0000000
+
+/* Bits in receive status word */
+#define RS_VALID 0x80000000
+#define RS_ABORTED 0x40000000
+#define RS_SRC_ROUTE 0x10000000
+#define RS_E_INDIC 0x08000000
+#define RS_A_INDIC 0x04000000
+#define RS_C_INDIC 0x02000000
+#define RS_ERROR 0x01000000
+#define RS_ADDR_MATCH 0x00800000
+#define RS_FRAME_TYPE 0x00700000
+#define RS_FT_SMT 0x00000000
+#define RS_FT_LLC 0x00100000
+#define RS_FT_IMPL 0x00200000
+#define RS_FT_MAC 0x00400000
+#define RS_FT_LLC_SYNC 0x00500000
+#define RS_FT_IMPL_SYNC 0x00600000
+#define RS_BYTE_BDRY_MASK 0x00030000
+#define RS_BYTE_BDRY 0x00010000
+#define RS_BYTE_BDRY_LG 16
+
+#define RS_LENGTH 0x0000ffff
+
diff --git a/drivers/ap1000/am79c864.h b/drivers/ap1000/am79c864.h
new file mode 100644
index 000000000..0fef95791
--- /dev/null
+++ b/drivers/ap1000/am79c864.h
@@ -0,0 +1,162 @@
+ /*
+ * Copyright 1996 The Australian National University.
+ * Copyright 1996 Fujitsu Laboratories Limited
+ *
+ * This software may be distributed under the terms of the Gnu
+ * Public License version 2 or later
+ */
+/*
+ * Definitions for Am79c864 PLC (Physical Layer Controller)
+ */
+
+typedef int plc_reg;
+
+struct plc {
+ plc_reg ctrl_a;
+ plc_reg ctrl_b;
+ plc_reg intr_mask;
+ plc_reg xmit_vector;
+ plc_reg vec_length;
+ plc_reg le_threshold;
+ plc_reg c_min;
+ plc_reg tl_min;
+ plc_reg tb_min;
+ plc_reg t_out;
+ plc_reg dummy1;
+ plc_reg lc_length;
+ plc_reg t_scrub;
+ plc_reg ns_max;
+ plc_reg tpc_load;
+ plc_reg tne_load;
+ plc_reg status_a;
+ plc_reg status_b;
+ plc_reg tpc;
+ plc_reg tne;
+ plc_reg clk_div;
+ plc_reg bist_sig;
+ plc_reg rcv_vector;
+ plc_reg intr_event;
+ plc_reg viol_sym_ct;
+ plc_reg min_idle_ct;
+ plc_reg link_err_ct;
+};
+
+/* Bits in ctrl_a */
+#define CA_NOISE_TIMER 0x4000
+#define CA_TNE_16BIT 0x2000
+#define CA_TPC_16BIT 0x1000
+#define CA_REQ_SCRUB 0x0800
+#define CA_VSYM_INTR_MODE 0x0200
+#define CA_MINI_INTR_MODE 0x0100
+#define CA_LOOPBACK 0x0080
+#define CA_FOT_OFF 0x0040
+#define CA_EB_LOOP 0x0020
+#define CA_LM_LOOP 0x0010
+#define CA_BYPASS 0x0008
+#define CA_REM_LOOP 0x0004
+#define CA_RF_DISABLE 0x0002
+#define CA_RUN_BIST 0x0001
+
+/* Bits in ctrl_b */
+#define CB_CONFIG_CTRL 0x8000
+#define CB_MATCH_LS 0x7800
+#define CB_MATCH_LS_ANY 0x0000
+#define CB_MATCH_LS_QLS 0x4000
+#define CB_MATCH_LS_MLS 0x2000
+#define CB_MATCH_LS_HLS 0x1000
+#define CB_MATCH_LS_ILS 0x0800
+#define CB_MAINT_LS 0x0700
+#define CB_MAINT_LS_QLS 0x0000
+#define CB_MAINT_LS_ILS 0x0100
+#define CB_MAINT_LS_HLS 0x0200
+#define CB_MAINT_LS_MLS 0x0300
+#define CB_MAINT_LS_PDR 0x0600
+#define CB_CLASS_S 0x0080
+#define CB_PC_LCT 0x0060
+#define CB_PC_LCT_NONE 0x0000
+#define CB_PC_LCT_PDR 0x0020
+#define CB_PC_LCT_IDLE 0x0040
+#define CB_PC_LCT_LOOP 0x0060
+#define CB_PC_JOIN 0x0010
+#define CB_LONG_LCT 0x0008
+#define CB_PC_MAINT 0x0004
+#define CB_PCM_CTRL 0x0003
+#define CB_PC_START 0x0001
+#define CB_PC_TRACE 0x0002
+#define CB_PC_STOP 0x0003
+
+/* Bits in status_a */
+#define SA_SIG_DETECT 0x0400
+#define SA_PREV_LS 0x0300
+#define SA_PREV_LS_QLS 0x0000
+#define SA_PREV_LS_MLS 0x0100
+#define SA_PREV_LS_HLS 0x0200
+#define SA_PREV_LS_ILS 0x0300
+#define SA_LINE_ST 0x00e0
+#define SA_LINE_ST_NLS 0x0000
+#define SA_LINE_ST_ALS 0x0020
+#define SA_LINE_ST_ILS4 0x0060
+#define SA_LINE_ST_QLS 0x0080
+#define SA_LINE_ST_MLS 0x00a0
+#define SA_LINE_ST_HLS 0x00c0
+#define SA_LINE_ST_ILS 0x00e0
+#define SA_LSM_STATE 0x0010
+#define SA_UNKN_LINE_ST 0x0008
+#define SA_SYM_PAIR_CTR 0x0007
+
+/* Bits in status_b */
+#define SB_RF_STATE 0xc000
+#define SB_RF_STATE_REPEAT 0x0000
+#define SB_RF_STATE_IDLE 0x4000
+#define SB_RF_STATE_HALT1 0x8000
+#define SB_RF_STATE_HALT2 0xc000
+#define SB_PCI_STATE 0x3000
+#define SB_PCI_STATE_REMOVED 0x0000
+#define SB_PCI_STATE_INS_SCR 0x1000
+#define SB_PCI_STATE_REM_SCR 0x2000
+#define SB_PCI_STATE_INSERTED 0x3000
+#define SB_PCI_SCRUB 0x0800
+#define SB_PCM_STATE 0x0780
+#define SB_PCM_STATE_OFF 0x0000
+#define SB_PCM_STATE_BREAK 0x0080
+#define SB_PCM_STATE_TRACE 0x0100
+#define SB_PCM_STATE_CONNECT 0x0180
+#define SB_PCM_STATE_NEXT 0x0200
+#define SB_PCM_STATE_SIGNAL 0x0280
+#define SB_PCM_STATE_JOIN 0x0300
+#define SB_PCM_STATE_VERIFY 0x0380
+#define SB_PCM_STATE_ACTIVE 0x0400
+#define SB_PCM_STATE_MAIN 0x0480
+#define SB_PCM_SIGNALING 0x0040
+#define SB_LSF 0x0020
+#define SB_RCF 0x0010
+#define SB_TCF 0x0008
+#define SB_BREAK_REASON 0x0007
+#define SB_BREAK_REASON_NONE 0x0000
+#define SB_BREAK_REASON_START 0x0001
+#define SB_BREAK_REASON_T_OUT 0x0002
+#define SB_BREAK_REASON_NS_MAX 0x0003
+#define SB_BREAK_REASON_QLS 0x0004
+#define SB_BREAK_REASON_ILS 0x0005
+#define SB_BREAK_REASON_HLS 0x0006
+
+/* Bits in intr_event and intr_mask */
+#define IE_NP_ERROR 0x8000
+#define IE_SIGNAL_OFF 0x4000
+#define IE_LE_CTR 0x2000
+#define IE_MINI_CTR 0x1000
+#define IE_VSYM_CTR 0x0800
+#define IE_PHY_INVALID 0x0400
+#define IE_EBUF_ERR 0x0200
+#define IE_TNE_EXP 0x0100
+#define IE_TPC_EXP 0x0080
+#define IE_PCM_ENABLED 0x0040
+#define IE_PCM_BREAK 0x0020
+#define IE_SELF_TEST 0x0010
+#define IE_TRACE_PROP 0x0008
+#define IE_PCM_CODE 0x0004
+#define IE_LS_MATCH 0x0002
+#define IE_PARITY_ERR 0x0001
+
+/* Correct value for BIST signature */
+#define BIST_CORRECT 0x6ecd
diff --git a/drivers/ap1000/ap.c b/drivers/ap1000/ap.c
new file mode 100644
index 000000000..ae2071cf9
--- /dev/null
+++ b/drivers/ap1000/ap.c
@@ -0,0 +1,318 @@
+ /*
+ * Copyright 1996 The Australian National University.
+ * Copyright 1996 Fujitsu Laboratories Limited
+ *
+ * This software may be distributed under the terms of the Gnu
+ * Public License version 2 or later
+ */
+/*
+ * ap.c - Single AP1000 block driver.
+ *
+ * (C) dwalsh, Pious project, DCS, ANU 1996
+ *
+ * This block driver is designed to simply to perform
+ * io operations to the hosts file system.
+ *
+ * Heavily modified by tridge
+ *
+ */
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/ext2_fs.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/module.h>
+#include <asm/uaccess.h>
+#include <asm/ap1000/apservice.h>
+
+#define AP_DEBUG 0
+
+#define MAJOR_NR APBLOCK_MAJOR
+#define AP_DRIVER 1
+#include <linux/blk.h>
+
+#define NUM_APDEVS 8
+#define MAX_REQUESTS 1
+
+static struct wait_queue * busy_wait = NULL;
+
+static int ap_blocksizes[NUM_APDEVS];
+static int ap_length[NUM_APDEVS];
+static int ap_fds[NUM_APDEVS];
+
+#define SECTOR_BLOCK_SHIFT 9
+#define AP_BLOCK_SHIFT 12 /* 4k blocks */
+#define AP_BLOCK_SIZE (1<<AP_BLOCK_SHIFT)
+
+static volatile int request_count = 0;
+
+#ifdef MODULE
+static void ap_release(struct inode * inode, struct file * filp)
+{
+ MOD_DEC_USE_COUNT;
+}
+#endif
+
+static void ap_request(void)
+{
+ struct cap_request creq;
+ unsigned int minor;
+ int offset, len;
+ struct request *req;
+
+ if (request_count >= MAX_REQUESTS) return;
+
+repeat:
+
+ if (!CURRENT) {
+ return;
+ }
+
+ if (MAJOR(CURRENT->rq_dev) != MAJOR_NR) {
+ panic(DEVICE_NAME ": request list destroyed");
+ }
+ if (CURRENT->bh) {
+ if (!buffer_locked(CURRENT->bh)) {
+ panic(DEVICE_NAME ": block not locked");
+ }
+ }
+
+ req = CURRENT;
+
+ minor = MINOR(req->rq_dev);
+
+ if (minor >= NUM_APDEVS) {
+ printk("apblock: request for invalid minor %d\n",minor);
+ end_request(0);
+ goto repeat;
+ }
+
+ offset = req->sector;
+ len = req->current_nr_sectors;
+
+ if ((offset + len) > ap_length[minor]) {
+ printk("apblock: request for invalid sectors %d -> %d\n",
+ offset,offset+len);
+ end_request(0);
+ goto repeat;
+ }
+
+ if (ap_fds[minor] == -1) {
+ printk("apblock: minor %d not open\n",minor);
+ end_request(0);
+ goto repeat;
+ }
+
+ /* convert to our units */
+ offset <<= SECTOR_BLOCK_SHIFT;
+ len <<= SECTOR_BLOCK_SHIFT;
+
+ /* setup a request for the host */
+ creq.cid = mpp_cid();
+ creq.size = sizeof(creq);
+ creq.header = 0;
+ creq.data[0] = (int)(req);
+ creq.data[1] = ap_fds[minor];
+ creq.data[2] = offset;
+ creq.data[3] = len;
+
+ switch (req->cmd) {
+ case READ:
+#if AP_DEBUG
+ printk("apblock: read req=0x%x len=%d offset=%d\n",
+ req,len,offset);
+#endif
+ creq.type = REQ_BREAD;
+ if (bif_queue(&creq,0,0)) {
+ return;
+ }
+ break;
+
+ case WRITE:
+#if AP_DEBUG
+ printk("apblock: write req=0x%x len=%d offset=%d\n",
+ req,len,offset);
+#endif
+ creq.type = REQ_BWRITE;
+ creq.size += len;
+ if (bif_queue_nocopy(&creq,req->buffer,creq.size - sizeof(creq))) {
+ return;
+ }
+ break;
+
+ default:
+ printk("apblock: unknown ap op %d\n",req->cmd);
+ end_request(0);
+ return;
+ }
+
+ if (++request_count < MAX_REQUESTS)
+ goto repeat;
+}
+
+/* this is called by ap1000/bif.c when a read/write has completed */
+void ap_complete(struct cap_request *creq)
+{
+#if AP_DEBUG
+ struct request *req = (struct request *)(creq->data[0]);
+
+ printk("request 0x%x complete\n",req);
+#endif
+ end_request(1);
+ request_count--;
+ ap_request();
+}
+
+
+/* this is called by ap1000/bif.c to find a buffer to put a BREAD into
+ using DMA */
+char *ap_buffer(struct cap_request *creq)
+{
+ struct request *req = (struct request *)(creq->data[0]);
+
+ return(req->buffer);
+}
+
+
+static int ap_open(struct inode * inode, struct file * filp)
+{
+ struct cap_request creq;
+ int minor;
+ minor = DEVICE_NR(inode->i_rdev);
+
+#if AP_DEBUG
+ printk("ap_open: minor=%x\n", minor);
+#endif
+
+ if (minor >= NUM_APDEVS)
+ return -ENODEV;
+
+ /* if its already open then don't do anything */
+ if (ap_fds[minor] != -1)
+ return 0;
+
+ /* send the open request to the front end */
+ creq.cid = mpp_cid();
+ creq.type = REQ_BOPEN;
+ creq.header = 0;
+ creq.size = sizeof(creq);
+ creq.data[0] = minor;
+
+ bif_queue(&creq,0,0);
+
+ /* wait for the reply */
+ while (ap_fds[minor] == -1)
+ sleep_on(&busy_wait);
+
+ return 0;
+}
+
+
+static int ap_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ if (!inode || !inode->i_rdev)
+ return -EINVAL;
+
+ switch (cmd) {
+ case BLKGETSIZE: /* Return device size */
+ if (put_user(ap_length[MINOR(inode->i_rdev)],(long *) arg))
+ return -EFAULT;
+ return 0;
+
+ default:
+ break;
+ };
+
+ return 0;
+}
+
+
+/* this is called by ap1000/bif.c when a open reply comes in */
+void ap_open_reply(struct cap_request *creq)
+{
+ int minor = creq->data[0];
+
+ ap_fds[minor] = creq->data[1];
+ ap_length[minor] = creq->data[2] >> SECTOR_BLOCK_SHIFT;
+
+#if AP_DEBUG
+ printk("ap opened minor %d length=%d fd=%d\n",
+ minor,ap_length[minor],ap_fds[minor]);
+#endif
+
+ wake_up(&busy_wait);
+}
+
+static struct file_operations ap_fops = {
+ NULL, /* lseek - default */
+ block_read, /* read - general block-dev read */
+ block_write, /* write - general block-dev write */
+ NULL, /* readdir - bad */
+ NULL, /* poll */
+ ap_ioctl, /* ioctl */
+ NULL, /* mmap */
+ ap_open, /* open */
+#ifndef MODULE
+ NULL, /* no special release code... */
+#else
+ ap_release, /* module needs to decrement use count */
+#endif
+ block_fsync, /* fsync */
+};
+
+
+int ap_init(void)
+{
+ int i;
+ static int done = 0;
+
+ if (done) return(1);
+
+ if (register_blkdev(MAJOR_NR,"apblock",&ap_fops)) {
+ printk("ap: unable to get major %d for ap block dev\n",MAJOR_NR);
+ return -1;
+ }
+ printk("ap_init: register dev %d\n", MAJOR_NR);
+ blk_dev[MAJOR_NR].request_fn = &ap_request;
+
+ for (i=0;i<NUM_APDEVS;i++) {
+ ap_blocksizes[i] = AP_BLOCK_SIZE;
+ ap_length[i] = 0;
+ ap_fds[i] = -1;
+ }
+
+ blksize_size[MAJOR_NR] = ap_blocksizes;
+
+ read_ahead[MAJOR_NR] = 32; /* 16k read ahead */
+
+ return(0);
+}
+
+/* loadable module support */
+
+#ifdef MODULE
+
+int init_module(void)
+{
+ int error = ap_init();
+ if (!error)
+ printk(KERN_INFO "APBLOCK: Loaded as module.\n");
+ return error;
+}
+
+/* Before freeing the module, invalidate all of the protected buffers! */
+void cleanup_module(void)
+{
+ int i;
+
+ for (i = 0 ; i < NUM_APDEVS; i++)
+ invalidate_buffers(MKDEV(MAJOR_NR, i));
+
+ unregister_blkdev( MAJOR_NR, "apblock" );
+ blk_dev[MAJOR_NR].request_fn = 0;
+}
+
+#endif /* MODULE */
diff --git a/drivers/ap1000/apfddi-reg.h b/drivers/ap1000/apfddi-reg.h
new file mode 100644
index 000000000..b3f25fb2a
--- /dev/null
+++ b/drivers/ap1000/apfddi-reg.h
@@ -0,0 +1,14 @@
+ /*
+ * Copyright 1996 The Australian National University.
+ * Copyright 1996 Fujitsu Laboratories Limited
+ *
+ * This software may be distributed under the terms of the Gnu
+ * Public License version 2 or later
+ */
+/* FDDI register pointers */
+extern volatile struct formac *mac;
+extern volatile struct plc *plc;
+extern volatile int *csr0;
+extern volatile int *csr1;
+extern volatile int *buffer_mem;
+extern volatile int *fifo;
diff --git a/drivers/ap1000/apfddi.c b/drivers/ap1000/apfddi.c
new file mode 100644
index 000000000..21f609ea2
--- /dev/null
+++ b/drivers/ap1000/apfddi.c
@@ -0,0 +1,702 @@
+ /*
+ * Copyright 1996 The Australian National University.
+ * Copyright 1996 Fujitsu Laboratories Limited
+ *
+ * This software may be distributed under the terms of the Gnu
+ * Public License version 2 or later
+ */
+/*
+ * $Id: apfddi.c,v 1.6 1996/12/18 01:45:51 tridge Exp $
+ *
+ * Network interface definitions for AP1000 fddi device.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/socket.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/in.h>
+#include <linux/if_ether.h> /* For the statistics structure. */
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+
+#include <asm/system.h>
+#include <asm/segment.h>
+#include <asm/io.h>
+
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+
+#include <asm/ap1000/apservice.h>
+#include <asm/ap1000/apreg.h>
+#include <asm/irq.h>
+
+#include <net/arp.h>
+
+#include "apfddi.h"
+#include "smt-types.h"
+#include "mac.h"
+#include "plc.h"
+#include "am79c830.h"
+#include "apfddi-reg.h"
+
+volatile struct formac *mac;
+volatile struct plc *plc;
+volatile int *csr0;
+volatile int *csr1;
+volatile int *buffer_mem;
+volatile int *fifo;
+
+#define APFDDI_DEBUG 0
+
+#define APFDDI_IRQ 7
+
+#define T(x) (-SECS_TO_FDDI_TIME(x))
+
+struct plc_info plc_info = {
+ pt_s, /* port_type */
+ T(1.6e-3), /* c_min */
+ T(50e-6), /* tl_min */
+ T(5e-3), /* tb_min */
+ T(100e-3), /* t_out */
+ T(50e-3), /* lc_short */
+ T(500e-3), /* lc_medium */
+ T(5.0), /* lc_long */
+ T(50.0), /* lc_extended */
+ T(3.5e-3), /* t_scrub */
+ T(1.3e-3), /* ns_max */
+};
+
+struct mac_info mac_info = {
+ T(165e-3), /* tmax */
+ T(3.5e-3), /* tvx */
+ T(20e-3), /* treq */
+ { 0x42, 0x59 }, /* s_address */
+ { 0x42, 0x59, 0x10, 0x76, 0x88, 0x82 }, /* l_address */
+ { 0 }, /* s_group_adrs */
+ { 0 }, /* l_group_adrs */
+ 0, /* rcv_own_frames */
+ 1, /* only_good_frames */
+};
+
+u_char fddi_bitrev[256] = {
+ 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0,
+ 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
+ 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
+ 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
+ 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4,
+ 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
+ 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec,
+ 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
+ 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
+ 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
+ 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea,
+ 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
+ 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6,
+ 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
+ 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
+ 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
+ 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1,
+ 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
+ 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9,
+ 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
+ 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
+ 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
+ 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed,
+ 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
+ 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3,
+ 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
+ 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
+ 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
+ 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7,
+ 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
+ 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef,
+ 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff,
+};
+
+/* XXX our hardware address, canonical bit order */
+static u_char apfddi_saddr[6] = { 0x42, 0x9a, 0x08, 0x6e, 0x11, 0x41 };
+
+struct device *apfddi_device = NULL;
+struct net_device_stats *apfddi_stats = NULL;
+
+volatile struct apfddi_queue *apfddi_queue_top = NULL;
+
+void map_regs(void)
+{
+ unsigned long reg_base_addr = 0xfbf00000;
+
+ mac = (volatile struct formac *) (reg_base_addr + FORMAC);
+ plc = (volatile struct plc *) (reg_base_addr + PLC);
+ csr0 = (volatile int *) (reg_base_addr + CSR0);
+ csr1 = (volatile int *) (reg_base_addr + CSR1);
+ buffer_mem = (volatile int *) (reg_base_addr + BUFFER_MEM);
+ fifo = (volatile int *) (reg_base_addr + FIFO);
+}
+
+int ring_op;
+
+void apfddi_startup(void)
+{
+ int reason;
+
+#if APFDDI_DEBUG
+ printk("In apfddi_startup\n");
+#endif
+
+ *csr0 = CS0_LED0;
+ ring_op = 0;
+ if (*csr1 & 0xf078) {
+ *csr1 = CS1_RESET_MAC | CS1_RESET_FIFO;
+ *csr1 = 0;
+ reason = 1;
+ printk("resetting after power-on\n");
+ } else {
+ *csr1 = CS1_RESET_FIFO;
+ *csr1 = 0;
+ reason = plc_inited(&plc_info);
+ if (reason)
+ printk("resetting: plc reason %d\n", reason);
+ }
+ if (reason) {
+#if APFDDI_DEBUG
+ printk("Calling plc_init\n");
+#endif
+ plc_init(&plc_info);
+#if APFDDI_DEBUG
+ printk("Calling mac_init\n");
+#endif
+ mac_init(&mac_info);
+ *csr0 |= CS0_LED1;
+ pc_start(loop_none);
+
+ } else {
+ *csr0 |= CS0_LED2 | CS0_LED1;
+ reason = mac_inited(&mac_info);
+ if (reason) {
+ printk("resetting mac: reason %d\n", reason);
+ mac_init(&mac_info);
+ mac_reset(loop_none);
+ mac_claim();
+ } else {
+ ring_op = 1;
+ *csr0 &= ~(CS0_LED0 | CS0_LED1 | CS0_LED2);
+ }
+ }
+}
+
+void apfddi_off(void)
+{
+ *csr0 &= ~CS0_LED1;
+ pc_stop();
+}
+
+void apfddi_sleep(void)
+{
+ mac_sleep();
+ plc_sleep();
+}
+
+void apfddi_poll(void)
+{
+ if (*csr0 & CS0_PHY_IRQ)
+ plc_poll();
+ if (*csr0 & CS0_MAC_IRQ)
+ mac_poll();
+}
+
+void set_cf_join(int on)
+{
+ if (on) {
+#if APFDDI_DEBUG
+ printk("apfddi: joined the ring!\n");
+#endif
+ mac_reset(loop_none);
+ *csr0 |= CS0_LED2;
+ mac_claim();
+ } else {
+ mac_disable();
+ ring_op = 0;
+ *csr0 = (*csr0 & ~CS0_LED2) | CS0_LED1 | CS0_LED0;
+ }
+}
+
+void set_ring_op(int up)
+{
+ ring_op = up;
+ if (up) {
+#if APFDDI_DEBUG
+ printk("apfddi: ring operational!\n");
+#endif
+ *csr0 &= ~(CS0_LED2 | CS0_LED1 | CS0_LED0);
+ } else
+ *csr0 |= CS0_LED2 | CS0_LED1 | CS0_LED0;
+}
+
+void rmt_event(int st)
+{
+ if (st & (S2_BEACON_STATE|S2_MULTIPLE_DA|S2_TOKEN_ERR
+ |S2_DUPL_CLAIM|S2_TRT_EXP_RECOV)) {
+ printk("st2 = %x\n", st);
+ }
+}
+
+
+int apfddi_init(struct device *dev);
+static void apfddi_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static int apfddi_xmit(struct sk_buff *skb, struct device *dev);
+int apfddi_rx(struct mac_buf *mbuf);
+static struct net_device_stats *apfddi_get_stats(struct device *dev);
+#if APFDDI_DEBUG
+void dump_packet(char *action, char *buf, int len, int seq);
+#endif
+
+/*
+ * Create FDDI header for an arbitrary protocol layer
+ *
+ * saddr=NULL means use device source address (always will anyway)
+ * daddr=NULL means leave destination address (eg unresolved arp)
+ */
+static int apfddi_hard_header(struct sk_buff *skb, struct device *dev,
+ unsigned short type, void *daddr,
+ void *saddr, unsigned len)
+{
+ struct fddi_header *fh;
+ struct llc_header *lh;
+ u_char *base_header;
+ u_char *fd_daddr = (u_char *)daddr;
+ int i;
+
+#if APFDDI_DEBUG
+ printk("In apfddi_hard_header\n");
+#endif
+
+ if (skb == NULL) {
+ printk("Null skb in apfddi_hard_header... returning...\n");
+ return 0;
+ }
+
+ switch(type) {
+ case ETH_P_IP:
+#if APFDDI_DEBUG
+ printk("apfddi_hard_header: Processing IP packet\n");
+#endif
+ break;
+ case ETH_P_ARP:
+#if APFDDI_DEBUG
+ printk("apfddi_hard_header: Processing ARP packet\n");
+#endif
+ break;
+ case ETH_P_RARP:
+#if APFDDI_DEBUG
+ printk("apfddi_hard_header: Processing RARP packet\n");
+#endif
+ break;
+ default:
+ printk("apfddi_hard_header: I don't understand protocol %d (0x%x)\n",
+ type, type);
+ apfddi_stats->tx_errors++;
+ return 0;
+ }
+
+ base_header = (u_char *)skb_push(skb, FDDI_HARDHDR_LEN-4);
+ if (base_header == NULL) {
+ printk("apfddi_hard_header: Memory squeeze, dropping packet.\n");
+ apfddi_stats->tx_dropped++;
+ return 0;
+ }
+ fh = (struct fddi_header *)(base_header + 3);
+ lh = (struct llc_header *)((char *)fh + FDDI_HDRLEN);
+
+ lh->llc_dsap = lh->llc_ssap = LLC_SNAP_LSAP;
+ lh->snap_control = LLC_UI;
+ lh->snap_org_code[0] = 0;
+ lh->snap_org_code[1] = 0;
+ lh->snap_org_code[2] = 0;
+ lh->snap_ether_type = htons(type);
+
+#if APFDDI_DEBUG
+ printk("snap_ether_type is %d (0x%x)\n", lh->snap_ether_type,
+ lh->snap_ether_type);
+#endif
+
+ fh->fddi_fc = FDDI_FC_LLC;
+
+ /*
+ * Fill in the source address.
+ */
+ for (i = 0; i < 6; i++)
+ fh->fddi_shost[i] = fddi_bitrev[apfddi_saddr[i]];
+
+ /*
+ * Fill in the destination address.
+ */
+ if (daddr) {
+#if APFDDI_DEBUG
+ printk("daddr is: ");
+#endif
+ for (i = 0; i < 6; i++) {
+ fh->fddi_dhost[i] = fddi_bitrev[fd_daddr[i]];
+#if APFDDI_DEBUG
+ printk("%x(%x):",fh->fddi_dhost[i], fd_daddr[i]);
+#endif
+ }
+#if APFDDI_DEBUG
+ printk("\n");
+#endif
+ return(FDDI_HARDHDR_LEN-4);
+ }
+ else {
+#if APFDDI_DEBUG
+ printk("apfddi_hard_header, daddr was NULL\n");
+#endif
+ return -(FDDI_HARDHDR_LEN-4);
+ }
+}
+
+/*
+ * Rebuild the FDDI header. This is called after an ARP (or in future
+ * other address resolution) has completed on this sk_buff. We now let
+ * ARP fill in the other fields.
+ */
+static int apfddi_rebuild_header(void *buff, struct device *dev,
+ unsigned long raddr, struct sk_buff *skb)
+{
+ int i, status;
+ struct fddi_header *fh = (struct fddi_header *)(buff+3);
+
+#if APFDDI_DEBUG
+ printk("In apfddi_rebuild_header, dev is %x apfddi_device is %x\n", dev,
+ apfddi_device);
+ printk("rebuild header for fc 0x%x\n", fh->fddi_fc);
+ printk("dest address is:\n");
+ for (i = 0; i < 6; i++) printk("%x:", fh->fddi_dhost[i]);
+#endif
+ status = arp_find(raddr, skb) ? 1 : 0;
+
+ if (!status) {
+#if APFDDI_DEBUG
+ printk("dest address is now:\n");
+ for (i = 0; i < 6; i++) printk("%x:", fh->fddi_dhost[i]);
+ printk("status is %d\n", status);
+#endif
+ /*
+ * Bit reverse the dest_address.
+ */
+ for (i = 0; i < 6; i++)
+ fh->fddi_dhost[i] = fddi_bitrev[fh->fddi_dhost[i]];
+ }
+#if APFDDI_DEBUG
+ printk("\n");
+#endif
+ return(status);
+}
+
+static int apfddi_set_mac_address(struct device *dev, void *addr)
+{
+#if APFDDI_DEBUG
+ printk("In apfddi_set_mac_address\n");
+#endif
+ return (0);
+}
+
+static void apfddi_set_multicast_list(struct device *dev)
+{
+#if APFDDI_DEBUG
+ printk("In apfddi_set_multicast_list\n");
+#endif
+}
+
+static int apfddi_do_ioctl(struct device *dev, struct ifreq *ifr, int cmd)
+{
+#if APFDDI_DEBUG
+ printk("In apfddi_do_ioctl\n");
+#endif
+ return (0);
+}
+
+static int apfddi_set_config(struct device *dev, struct ifmap *map)
+{
+#if APFDDI_DEBUG
+ printk("In apfddi_set_config\n");
+#endif
+ return (0);
+}
+
+/*
+ * Opening the fddi device through ifconfig.
+ */
+int apfddi_open(struct device *dev)
+{
+ static int already_run = 0;
+ unsigned flags;
+ int res;
+
+ if (already_run) {
+ apfddi_startup();
+ *csr0 |= CS0_INT_ENABLE;
+ return 0;
+ }
+ already_run = 1;
+
+ map_regs();
+ apfddi_startup();
+
+ save_flags(flags); cli();
+ if ((res = request_irq(APFDDI_IRQ, apfddi_interrupt, SA_INTERRUPT,
+ "apfddi", dev))) {
+ printk("Failed to install apfddi handler error=%d\n", res);
+ restore_flags(flags);
+ return(0);
+ }
+ enable_irq(APFDDI_IRQ);
+ restore_flags(flags);
+
+#if APFDDI_DEBUG
+ printk("Installed apfddi interrupt handler\n");
+#endif
+ *csr0 |= CS0_INT_ENABLE;
+#if APFDDI_DEBUG
+ printk("Enabled fddi interrupts\n");
+#endif
+
+ return 0;
+}
+
+/*
+ * Stop the fddi device through ifconfig.
+ */
+int apfddi_stop(struct device *dev)
+{
+ *csr0 &= ~CS0_INT_ENABLE;
+ apfddi_sleep();
+ return 0;
+}
+
+
+/*
+ * Initialise fddi network interface.
+ */
+int apfddi_init(struct device *dev)
+{
+ int i;
+ printk("apfddi_init(): Initialising fddi interface\n");
+
+ apfddi_device = dev;
+
+ dev->open = apfddi_open;
+ dev->stop = apfddi_stop;
+ dev->hard_start_xmit = apfddi_xmit;
+ dev->get_stats = apfddi_get_stats;
+ dev->priv = kmalloc(sizeof(struct net_device_stats), GFP_ATOMIC);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0, sizeof(struct net_device_stats));
+ apfddi_stats = (struct net_device_stats *)apfddi_device->priv;
+
+ /* Initialise the fddi device structure */
+ for (i = 0; i < DEV_NUMBUFFS; i++)
+ skb_queue_head_init(&dev->buffs[i]);
+
+ dev->hard_header = apfddi_hard_header;
+ dev->rebuild_header = apfddi_rebuild_header;
+ dev->set_mac_address = apfddi_set_mac_address;
+ dev->header_cache_update = NULL;
+ dev->do_ioctl = apfddi_do_ioctl;
+ dev->set_config = apfddi_set_config;
+ dev->set_multicast_list = apfddi_set_multicast_list;
+ dev->type = ARPHRD_ETHER;
+ dev->hard_header_len = FDDI_HARDHDR_LEN;
+ dev->mtu = FDDIMTU;
+ dev->addr_len = 6;
+ memcpy(dev->dev_addr, apfddi_saddr, sizeof(apfddi_saddr));
+ dev->tx_queue_len = 100; /* XXX What should this be? */
+ dev->irq = APFDDI_IRQ;
+
+ memset(dev->broadcast, 0xFF, ETH_ALEN);
+
+ dev->family = AF_INET;
+ dev->pa_addr = in_aton("150.203.142.28"); /* hibana-f */
+ dev->pa_mask = in_aton("255.255.255.0");
+ dev->pa_brdaddr = dev->pa_addr | ~dev->pa_mask;
+ dev->pa_alen = 4;
+
+ return(0);
+}
+
+static void apfddi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+#if APFDDI_DEBUG
+ static int times = 0;
+#endif
+ unsigned flags;
+ save_flags(flags); cli();
+
+#if APFDDI_DEBUG
+ printk("In apfddi_interrupt irq %d dev_id %p times %d\n",
+ irq, dev_id, ++times);
+#endif
+
+ apfddi_poll();
+ restore_flags(flags);
+}
+
+#if APFDDI_DEBUG
+static char *flagbits[8] = {
+ "fin", "syn", "rst", "push", "ack", "urg", "6", "7"
+};
+
+void dump_packet(action, buf, len, seq)
+ char *action, *buf;
+ int len, seq;
+{
+ int i, flags;
+ char *sep;
+
+ printk("%s packet %d of %d bytes at %d:\n", action, seq,
+ len, jiffies);
+ printk(" from %x to %x pktid=%d ttl=%d pcol=%d len=%d\n",
+ *(long *)(buf+12), *(long *)(buf+16), *(u_short *)(buf+4),
+ *(unsigned char *)(buf+8), buf[9], *(u_short *)(buf+2));
+ if( buf[9] == 6 || buf[9] == 17 ){
+ /* TCP or UDP */
+ printk(" sport=%d dport=%d",
+ *(u_short *)(buf+20), *(u_short *)(buf+22));
+ if( buf[9] == 6 ){
+ printk(" seq=%d ack=%d win=%d flags=<",
+ *(long *)(buf+24), *(long *)(buf+28),
+ *(unsigned short *)(buf+34));
+ flags = buf[33];
+ sep = "";
+ for (i = 7; i >= 0; --i) {
+ if (flags & (1 << i)) {
+ printk("%s%s", sep, flagbits[i]);
+ sep = "+";
+ }
+ }
+ printk(">");
+ }
+ printk("\n");
+ }
+}
+#endif
+
+#if APFDDI_DEBUG
+static void apfddi_print_frame(struct sk_buff *skb)
+{
+ int i;
+ struct llc_header *lh;
+ static int seq = 0;
+
+#if 0
+ printk("skb->len is %d\n", skb->len);
+ printk("fc is 0x%x\n", *(u_char *)(skb->data+3));
+ printk("dest address is:\n");
+ for (i = 0; i < 6; i++) {
+ printk("%x:", fddi_bitrev[*(u_char *)(skb->data+4+i)]);
+ }
+ printk("\n");
+ printk("source address is:\n");
+ for (i = 0; i < 6; i++) {
+ printk("%x:", fddi_bitrev[*(u_char *)(skb->data+10+i)]);
+ }
+ printk("\n");
+#endif
+ lh = (struct llc_header *)(skb->data+16);
+#if 0
+ printk("llc_dsp %d llc_ssap %d snap_control %d org_code [0]=%d [1]=%d [2]=%d ether_type=%d\n",
+ lh->llc_dsap, lh->llc_ssap, lh->snap_control,
+ lh->snap_org_code[0], lh->snap_org_code[1], lh->snap_org_code[2],
+ lh->snap_ether_type);
+#endif
+ if (lh->snap_ether_type == ETH_P_IP)
+ dump_packet("apfddi_xmit:", skb->data+24, skb->len-24, seq++);
+}
+#endif
+
+/*
+ * Transmitting packet over FDDI.
+ */
+static int apfddi_xmit(struct sk_buff *skb, struct device *dev)
+{
+ unsigned long flags;
+
+#if APFDDI_DEBUG
+ printk("In apfddi_xmit\n");
+#endif
+
+ /*
+ * Check there is some work to do.
+ */
+ if (skb == NULL || dev == NULL)
+ return(0);
+
+#if APFDDI_DEBUG
+ printk("skb address is for apfddi 0x%x\n", skb);
+#endif
+
+ /*
+ * Check lock variable.
+ */
+ save_flags(flags); cli();
+ if (dev->tbusy != 0) {
+ restore_flags(flags);
+ printk("apfddi_xmit: device busy\n");
+ apfddi_stats->tx_errors++;
+ return 1;
+ }
+ restore_flags(flags);
+ dev->tbusy = 1;
+
+ dev->trans_start = jiffies;
+
+ skb->mac.raw = skb->data;
+
+ /*
+ * Append packet onto send queue.
+ */
+ if (mac_queue_append(skb)) {
+ /*
+ * No memory.
+ */
+ return 1;
+ }
+
+ /*
+ * Process packet queue.
+ */
+ mac_process();
+
+ apfddi_stats->tx_packets++;
+ dev->tbusy = 0;
+ return 0;
+}
+
+#if APFDDI_DEBUG
+void print_mbuf(struct mac_buf *mbuf)
+{
+ printk("mac %p length=%d ptr=%p wraplen=%d wrapptr=%x fr_start=%d fr_end=%d\n",
+ mbuf, mbuf->length, mbuf->ptr, mbuf->wraplen, mbuf->wrapptr,
+ mbuf->fr_start, mbuf->fr_end);
+}
+#endif
+
+/*
+ * Return statistics of fddi driver.
+ */
+static struct net_device_stats *apfddi_get_stats(struct device *dev)
+{
+ return((struct net_device_stats *)dev->priv);
+}
+
+
+
+
diff --git a/drivers/ap1000/apfddi.h b/drivers/ap1000/apfddi.h
new file mode 100644
index 000000000..33230186e
--- /dev/null
+++ b/drivers/ap1000/apfddi.h
@@ -0,0 +1,142 @@
+ /*
+ * Copyright 1996 The Australian National University.
+ * Copyright 1996 Fujitsu Laboratories Limited
+ *
+ * This software may be distributed under the terms of the Gnu
+ * Public License version 2 or later
+ */
+#define BUFFER_MEM 0x40000
+#define CSR0 0x60000
+#define CSR1 0x60004
+#define PLC 0x60080
+#define FORMAC 0x60200
+#define FIFO 0x68000
+
+/* Size of buffer memory */
+#define BUFFER_SIZE 32768 /* words; 128kB */
+
+/* Bits in CSR0 */
+#define CS0_INT_REQ 0x8000 /* board interrupt request asserted */
+#define CS0_MAC_IRQ 0x4000 /* FORMAC is requesting interrupt */
+#define CS0_PHY_IRQ 0x2000 /* PLC is requesting interrupt */
+#define CS0_LED2 0x1000 /* turn on led 2 */
+#define CS0_DO_IRQ 0x0200 /* request interrupt */
+#define CS0_INT_ENABLE 0x0100 /* enable interrupt requests */
+#define CS0_DMA_ENABLE 0x0080 /* enable DMA requests */
+#define CS0_DMA_RECV 0x0040 /* DMA requests are in receive dirn. */
+#define CS0_LED1 0x0010 /* turn on led 1 */
+#define CS0_LED0 0x0008 /* turn on led 0 (red) */
+#define CS0_HREQ 0x0007 /* host request to FORMAC */
+#define CS0_HREQ_WSPEC 0x0002 /* write special frames */
+#define CS0_HREQ_RECV 0x0003 /* read receive queue */
+#define CS0_HREQ_WS 0x0004 /* write synchronous queue */
+#define CS0_HREQ_WA0 0x0005 /* write async queue 0 */
+#define CS0_HREQ_WA1 0x0006 /* write async queue 1 */
+#define CS0_HREQ_WA2 0x0007 /* write async queue 2 */
+
+/* Bits in CSR1 */
+#define CS1_THIS_QAF 0x0800 /* this queue almost full */
+#define CS1_FIFO_TAG 0x0400 /* tag of word at head of fifo */
+#define CS1_BUF_RD_TAG 0x0200 /* tag of last word read from buffer */
+#define CS1_BUF_WR_TAG 0x0100 /* tag to write to buffer */
+#define CS1_TAGMODE 0x0080 /* enable tag mode */
+#define CS1_RESET_MAC 0x0040 /* reset FORMAC and PLC */
+#define CS1_RESET_FIFO 0x0020 /* reset FIFO */
+#define CS1_CLEAR_QAF 0x0010 /* clear queue-almost-full bits */
+#define CS1_FIFO_LEVEL 0x0007 /* # words in FIFO (0 - 4) */
+
+/*
+ * FDDI Frame Control values.
+ */
+#define FDDI_SMT 0x41
+#define FDDI_SMT_NSA 0x4f
+#define FDDI_FC_LLC 0x50
+#define FDDI_FC_LLC_MASK 0xf0
+
+/*
+ * Unnumbered LLC format commands
+ */
+#define LLC_UI 0x3
+#define LLC_UI_P 0x13
+#define LLC_DISC 0x43
+#define LLC_DISC_P 0x53
+#define LLC_UA 0x63
+#define LLC_UA_P 0x73
+#define LLC_TEST 0xe3
+#define LLC_TEST_P 0xf3
+#define LLC_FRMR 0x87
+#define LLC_FRMR_P 0x97
+#define LLC_DM 0x0f
+#define LLC_DM_P 0x1f
+#define LLC_XID 0xaf
+#define LLC_XID_P 0xbf
+#define LLC_SABME 0x6f
+#define LLC_SABME_P 0x7f
+
+/*
+ * Supervisory LLC commands
+ */
+#define LLC_RR 0x01
+#define LLC_RNR 0x05
+#define LLC_REJ 0x09
+
+/*
+ * Info format - dummy only
+ */
+#define LLC_INFO 0x00
+
+/*
+ * ISO PDTR 10178 contains among others
+ */
+#define LLC_X25_LSAP 0x7e
+#define LLC_SNAP_LSAP 0xaa
+#define LLC_ISO_LSAP 0xfe
+
+/*
+ * Structure of the FDDI MAC header.
+ */
+struct fddi_header {
+ u_char fddi_fc; /* frame control field */
+ u_char fddi_dhost[6]; /* destination address */
+ u_char fddi_shost[6]; /* source address */
+};
+
+/*
+ * Structure of LLC/SNAP header.
+ */
+struct llc_header {
+ u_char llc_dsap;
+ u_char llc_ssap;
+ u_char snap_control;
+ u_char snap_org_code[3];
+ u_short snap_ether_type;
+};
+
+#define FDDI_HDRLEN 13 /* sizeof(struct fddi_header) */
+#define LLC_SNAPLEN 8 /* bytes for LLC/SNAP header */
+#define FDDI_HARDHDR_LEN 28 /* Hard header size */
+
+#define FDDIMTU 4352
+
+
+/* Types of loopback we can do. */
+typedef enum {
+ loop_none,
+ loop_formac,
+ loop_plc_lm,
+ loop_plc_eb,
+ loop_pdx
+} LoopbackType;
+
+/* Offset from fifo for writing word with tag. */
+#define FIFO_TAG 0x80
+
+#define MAX_FRAME_LEN 4500
+
+void set_ring_op(int up);
+void rmt_event(int st);
+void set_cf_join(int on);
+
+extern struct device *apfddi_device;
+extern struct net_device_stats *apfddi_stats;
+
diff --git a/drivers/ap1000/bif.c b/drivers/ap1000/bif.c
new file mode 100644
index 000000000..45ae418f4
--- /dev/null
+++ b/drivers/ap1000/bif.c
@@ -0,0 +1,289 @@
+ /*
+ * Copyright 1996 The Australian National University.
+ * Copyright 1996 Fujitsu Laboratories Limited
+ *
+ * This software may be distributed under the terms of the Gnu
+ * Public License version 2 or later
+ */
+/*
+ * $Id: bif.c,v 1.13 1996/12/18 01:45:52 tridge Exp $
+ *
+ * Network interface definitions for bif device.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/socket.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/in.h>
+#include <linux/if_ether.h> /* For the statistics structure. */
+#include <linux/netdevice.h>
+#include <linux/if_arp.h> /* For ARPHRD_BIF */
+
+#include <asm/system.h>
+#include <asm/segment.h>
+#include <asm/io.h>
+
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+
+#include <asm/ap1000/apservice.h>
+#include <asm/ap1000/apreg.h>
+
+#define BIF_DEBUG 0
+#if BIF_DEBUG
+static int seq = 0;
+#endif
+
+#define BIF_MTU 10240
+
+static struct device *bif_device = 0;
+static struct net_device_stats *bif_stats = 0;
+
+int bif_init(struct device *dev);
+int bif_open(struct device *dev);
+static int bif_xmit(struct sk_buff *skb, struct device *dev);
+int bif_rx(struct sk_buff *skb);
+int bif_stop(struct device *dev);
+static struct net_device_stats *bif_get_stats(struct device *dev);
+
+static int bif_hard_header(struct sk_buff *skb, struct device *dev,
+ unsigned short type, void *daddr,
+ void *saddr, unsigned len)
+{
+#if BIF_DEBUG
+ printk("bif_hard_header()\n");
+#endif
+
+ skb_push(skb,dev->hard_header_len);
+
+ if (daddr) skb->arp = 1;
+
+ /* tell IP how much space we took */
+ return (dev->hard_header_len);
+}
+
+static int bif_rebuild_header(void *buff, struct device *dev,
+ unsigned long raddr, struct sk_buff *skb)
+{
+ /* this would normally be used to fill in hardware addresses after
+ an ARP */
+#if BIF_DEBUG
+ printk("bif_rebuild_header()\n");
+#endif
+ if (skb) skb->arp = 1;
+ return(0);
+}
+
+static int bif_set_mac_address(struct device *dev, void *addr)
+{
+ printk("BIF: set_mac_address called\n");
+ return (0);
+}
+
+static void bif_set_multicast_list(struct device *dev)
+{
+ return;
+}
+
+static int bif_do_ioctl(struct device *dev, struct ifreq *ifr, int cmd)
+{
+ printk("BIF: Called do_ioctl\n");
+ return (0);
+}
+
+static int bif_set_config(struct device *dev, struct ifmap *map)
+{
+ printk("BIF: Called bif_set_config\n");
+ return (0);
+}
+
+/*
+ * Initialise bif network interface.
+ */
+int bif_init(struct device *dev)
+{
+ int i;
+
+ printk("bif_init(): Initialising bif interface\n");
+ bif_device = dev;
+
+ dev->mtu = BIF_MTU;
+ dev->tbusy = 0;
+ dev->hard_start_xmit = bif_xmit;
+ dev->hard_header = bif_hard_header;
+ dev->hard_header_len = sizeof(struct cap_request);
+ dev->addr_len = 0;
+ dev->tx_queue_len = 50000; /* no limit (almost!) */
+ dev->type = ARPHRD_BIF;
+ dev->rebuild_header = bif_rebuild_header;
+ dev->open = bif_open;
+ dev->flags = IFF_NOARP; /* Don't use ARP on this device */
+ dev->family = AF_INET;
+ dev->priv = kmalloc(sizeof(struct net_device_stats), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0, sizeof(struct net_device_stats));
+ bif_stats = (struct net_device_stats *)bif_device->priv;
+
+
+ dev->stop = bif_stop;
+ dev->get_stats = bif_get_stats;
+
+ /* Initialise the bif device structure */
+ for (i = 0; i < DEV_NUMBUFFS; i++)
+ skb_queue_head_init(&dev->buffs[i]);
+
+ dev->set_mac_address = bif_set_mac_address;
+ dev->header_cache_update = NULL;
+ dev->do_ioctl = bif_do_ioctl;
+ dev->set_config = bif_set_config;
+ dev->set_multicast_list = bif_set_multicast_list;
+
+ memset(dev->broadcast, 0xFF, ETH_ALEN);
+
+ dev->pa_addr = 0;
+ dev->pa_brdaddr = 0;
+ dev->pa_mask = 0;
+ dev->pa_alen = 4;
+
+ return(0);
+}
+
+int bif_open(struct device *dev)
+{
+ printk("In bif_open\n");
+ dev->tbusy = 0;
+ dev->start = 1;
+ return 0;
+}
+
+#if BIF_DEBUG
+static void dump_packet(char *action, char *buf, int len, int seq)
+{
+ int flags;
+ char *sep;
+
+ printk("%s packet %d of %d bytes at %d:\n", action, seq,
+ len, (int)jiffies);
+ printk(" from %x to %x pktid=%d ttl=%d pcol=%d len=%d\n",
+ *(long *)(buf+12), *(long *)(buf+16), *(u_short *)(buf+4),
+ *(unsigned char *)(buf+8), buf[9], *(u_short *)(buf+2));
+ if( buf[9] == 6 || buf[9] == 17 ){
+ /* TCP or UDP */
+ printk(" sport=%d dport=%d",
+ *(u_short *)(buf+20), *(u_short *)(buf+22));
+ if( buf[9] == 6 ){
+ printk(" seq=%d ack=%d win=%d flags=<",
+ *(long *)(buf+24), *(long *)(buf+28),
+ *(unsigned short *)(buf+34));
+ flags = buf[33];
+ sep = "";
+ printk(">");
+ }
+ printk("\n");
+ }
+ else {
+ printk(" protocol = %d\n", buf[9]);
+ }
+}
+#endif
+
+
+static int bif_xmit(struct sk_buff *skb, struct device *dev)
+{
+ extern int bif_send_ip(int cid,struct sk_buff *skb);
+ extern int tnet_send_ip(int cid,struct sk_buff *skb);
+ extern int msc_blocked, tnet_ip_enabled;
+ u_long destip;
+ int cid;
+
+ if (skb == NULL || dev == NULL)
+ return(0);
+
+ destip = *(u_long *)(skb->data+sizeof(struct cap_request)+16);
+ cid = ap_ip_to_cid(destip);
+
+ skb->dev = dev;
+ skb->mac.raw = skb->data;
+
+ if (cid != -1 && tnet_ip_enabled && !msc_blocked) {
+ tnet_send_ip(cid,skb);
+ } else {
+ bif_send_ip(cid, skb);
+ }
+
+ dev->tbusy = 0;
+
+ bif_stats->tx_packets++;
+
+ mark_bh(NET_BH);
+
+ return 0;
+}
+
+
+/*
+ * Receive a packet from the BIF - called from interrupt handler.
+ */
+int bif_rx(struct sk_buff *skb)
+{
+#if BIF_DEBUG
+ dump_packet("bif_rx:", skb->data, skb->len, seq++);
+#endif
+
+ if (bif_device == NULL) {
+ printk("bif: bif_device is NULL in bif_rx\n");
+ dev_kfree_skb(skb, FREE_WRITE);
+ return 0;
+ }
+ skb->dev = bif_device;
+ skb->protocol = ETH_P_IP;
+
+#if 1
+ /* try disabling checksums on receive */
+ if (ap_ip_to_cid(*(u_long *)(((char *)skb->data)+12)) != -1)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+#endif
+
+ /*
+ * Inform the network layer of the new packet.
+ */
+ skb->mac.raw = skb->data;
+ netif_rx(skb);
+
+ if (bif_stats == NULL) {
+ printk("bif: bif_stats is NULL is bif_rx\n");
+ return 0;
+ }
+ bif_stats->rx_packets++;
+
+ return 0;
+}
+
+int bif_stop(struct device *dev)
+{
+ printk("in bif_close\n");
+
+ dev->tbusy = 1;
+ dev->start = 0;
+
+ return 0;
+}
+
+/*
+ * Return statistics of bif driver.
+ */
+static struct net_device_stats *bif_get_stats(struct device *dev)
+{
+ return((struct net_device_stats *)dev->priv);
+}
+
diff --git a/drivers/ap1000/ddv.c b/drivers/ap1000/ddv.c
new file mode 100644
index 000000000..7cfdafdc9
--- /dev/null
+++ b/drivers/ap1000/ddv.c
@@ -0,0 +1,1008 @@
+ /*
+ * Copyright 1996 The Australian National University.
+ * Copyright 1996 Fujitsu Laboratories Limited
+ *
+ * This software may be distributed under the terms of the Gnu
+ * Public License version 2 or later
+ */
+/*
+ * ddv.c - Single AP1000 block driver.
+ *
+ * This block driver performs io operations to the ddv option
+ * board. (Hopefully:)
+ *
+ */
+
+#include <linux/errno.h>
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/ext2_fs.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/mm.h>
+#include <linux/malloc.h>
+#define __KERNEL_SYSCALLS__
+#include <linux/unistd.h>
+#include <linux/sched.h>
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+#include <asm/ap1000/apreg.h>
+#include <asm/ap1000/DdvReqTable.h>
+
+#define MAJOR_NR DDV_MAJOR
+
+#include <linux/blk.h>
+#include <linux/genhd.h>
+#include <linux/hdreg.h>
+
+#define DDV_DEBUG 0
+#define AIR_DISK 1
+
+#define SECTOR_SIZE 512
+
+/* we can have lots of partitions */
+#define PARTN_BITS 6
+#define NUM_DDVDEVS (1<<PARTN_BITS)
+
+#define PARDISK_BASE (1<<5) /* partitions above this number are
+ striped across all the cells */
+#define STRIPE_SHIFT 6
+#define STRIPE_SECTORS (1<<STRIPE_SHIFT) /* number of sectors per stripe */
+
+#define MAX_BNUM 16
+#define MAX_REQUEST (TABLE_SIZE - 2)
+#define REQUEST_LOW 16
+#define REQUEST_HIGH 4
+
+
+/* we fake up a block size larger than the physical block size to try
+ to make things a bit more efficient */
+#define SECTOR_BLOCK_SHIFT 9
+
+/* try to read ahead a bit */
+#define DDV_READ_AHEAD 64
+
+static int have_ddv_board = 1;
+static unsigned num_options = 0;
+static unsigned this_option = 0;
+
+extern int ddv_get_mlist(unsigned mptr[],int bnum);
+extern int ddv_set_request(struct request *req,
+ int request_type,int bnum,int mlist,int len,int offset);
+extern void ddv_load_kernel(char *opcodep);
+extern int ddv_restart_cpu(void);
+extern int ddv_mlist_available(void);
+static int ddv_revalidate(kdev_t dev, struct gendisk *gdev);
+static void ddv_geninit(struct gendisk *ignored);
+static void ddv_release(struct inode * inode, struct file * filp);
+static void ddv_request1(void);
+
+
+static char *ddv_opcodep = NULL;
+static struct request *next_request = NULL;
+
+static struct wait_queue * busy_wait = NULL;
+
+static int ddv_blocksizes[NUM_DDVDEVS]; /* in bytes */
+int ddv_sect_length[NUM_DDVDEVS]; /* in sectors */
+int ddv_blk_length[NUM_DDVDEVS]; /* in blocks */
+
+/* these are used by the ddv_daemon, which services remote disk requests */
+static struct remote_request *rem_queue = NULL;
+static struct remote_request *rem_queue_end;
+static struct wait_queue *ddv_daemon_wait = NULL;
+
+static int opiu_kernel_loaded = 0;
+
+static struct {
+ unsigned reads, writes, blocks, rq_started, rq_finished, errors;
+ unsigned sectors_read, sectors_written;
+} ddv_stats;
+
+static struct hd_struct partition_tables[NUM_DDVDEVS];
+
+static struct gendisk ddv_gendisk = {
+ MAJOR_NR, /* Major number */
+ DEVICE_NAME, /* Major name */
+ PARTN_BITS, /* Bits to shift to get real from partition */
+ 1 << PARTN_BITS, /* Number of partitions per real */
+ 1, /* maximum number of real */
+#ifdef MODULE
+ NULL, /* called from init_module */
+#else
+ ddv_geninit, /* init function */
+#endif
+ partition_tables,/* hd struct */
+ ddv_blk_length, /* block sizes */
+ 1, /* number */
+ (void *) NULL, /* internal */
+ NULL /* next */
+};
+
+
+struct ddv_geometry {
+ unsigned char heads;
+ unsigned char sectors;
+ unsigned short cylinders;
+ unsigned long start;
+};
+
+static struct ddv_geometry ddv_geometry;
+
+
+struct remote_request {
+ union {
+ struct remote_request *next;
+ void (*fn)(void);
+ } u;
+ unsigned bnum; /* how many blocks does this contain */
+ struct request *reqp; /* pointer to the request on the original cell */
+ unsigned cell; /* what cell is the request from */
+ struct request req; /* details of the request */
+};
+
+
+static void ddv_set_optadr(void)
+{
+ unsigned addr = 0x11000000;
+ OPT_IO(OBASE) = addr;
+ MSC_IO(MSC_OPTADR) =
+ ((addr & 0xff000000)>>16) |
+ ((OPTION_BASE & 0xf0000000)>>24) |
+ ((OPTION_BASE + 0x10000000)>>28);
+ OPT_IO(PRST) = 0;
+}
+
+extern struct RequestTable *RTable;
+extern struct OPrintBufArray *PrintBufs;
+extern struct OAlignBufArray *AlignBufs;
+extern struct DiskInfo *DiskInfo;
+
+static void ddv_release(struct inode * inode, struct file * filp)
+{
+#if DEBUG
+ printk("ddv_release started\n");
+#endif
+ sync_dev(inode->i_rdev);
+#if DEBUG
+ printk("ddv_release done\n");
+#endif
+}
+
+
+static unsigned in_request = 0;
+static unsigned req_queued = 0;
+
+static void ddv_end_request(int uptodate,struct request *req)
+{
+ struct buffer_head * bh;
+
+ ddv_stats.rq_finished++;
+
+/* printk("ddv_end_request(%d,%p)\n",uptodate,req); */
+
+ req->errors = 0;
+ if (!uptodate) {
+ printk("end_request: I/O error, dev %s, sector %lu\n",
+ kdevname(req->rq_dev), req->sector);
+ req->nr_sectors--;
+ req->nr_sectors &= ~SECTOR_MASK;
+ req->sector += (BLOCK_SIZE / SECTOR_SIZE);
+ req->sector &= ~SECTOR_MASK;
+ ddv_stats.errors++;
+ }
+
+ if ((bh = req->bh) != NULL) {
+ req->bh = bh->b_reqnext;
+ bh->b_reqnext = NULL;
+ mark_buffer_uptodate(bh, uptodate);
+ unlock_buffer(bh);
+ if ((bh = req->bh) != NULL) {
+ req->current_nr_sectors = bh->b_size >> 9;
+ if (req->nr_sectors < req->current_nr_sectors) {
+ req->nr_sectors = req->current_nr_sectors;
+ printk("end_request: buffer-list destroyed\n");
+ }
+ req->buffer = bh->b_data;
+ printk("WARNING: ddv: more sectors!\n");
+ ddv_stats.errors++;
+ return;
+ }
+ }
+ if (req->sem != NULL)
+ up(req->sem);
+ req->rq_status = RQ_INACTIVE;
+ wake_up(&wait_for_request);
+}
+
+
+/* check that a request is all OK to process */
+static int request_ok(struct request *req)
+{
+ int minor;
+ if (!req) return 0;
+
+ if (MAJOR(req->rq_dev) != MAJOR_NR)
+ panic(DEVICE_NAME ": bad major number\n");
+ if (!buffer_locked(req->bh))
+ panic(DEVICE_NAME ": block not locked");
+
+ minor = MINOR(req->rq_dev);
+ if (minor >= NUM_DDVDEVS) {
+ printk("ddv_request: Invalid minor (%d)\n", minor);
+ return 0;
+ }
+
+ if ((req->sector + req->current_nr_sectors) > ddv_sect_length[minor]) {
+ printk("ddv: out of range minor=%d offset=%d len=%d sect_length=%d\n",
+ minor,(int)req->sector,(int)req->current_nr_sectors,
+ ddv_sect_length[minor]);
+ return 0;
+ }
+
+ if (req->cmd != READ && req->cmd != WRITE) {
+ printk("unknown request type %d\n",req->cmd);
+ return 0;
+ }
+
+ /* it seems to be OK */
+ return 1;
+}
+
+
+static void complete_request(struct request *req,int bnum)
+{
+ while (bnum--) {
+ ddv_end_request(1,req);
+ req = req->next;
+ }
+}
+
+
+static int completion_pointer = 0;
+
+static void check_completion(void)
+{
+ int i,bnum;
+ struct request *req;
+
+ if (!RTable) return;
+
+ for (;
+ (i=completion_pointer) != RTable->ddv_pointer &&
+ RTable->async_info[i].status == DDV_REQ_FREE;
+ completion_pointer = INC_T(completion_pointer))
+ {
+ req = (struct request *)RTable->async_info[i].argv[7];
+ bnum = RTable->async_info[i].bnum;
+ if (!req || !bnum) {
+ printk("%s(%d)\n",__FILE__,__LINE__);
+ ddv_stats.errors++;
+ continue;
+ }
+
+ RTable->async_info[i].status = 0;
+ RTable->async_info[i].argv[7] = 0;
+
+ complete_request(req,bnum);
+ in_request--;
+ }
+}
+
+
+static struct request *get_request_queue(struct request *oldq)
+{
+ struct request *req,*req2;
+
+ /* skip any non-active or bad requests */
+ skip1:
+ if (!(req = CURRENT))
+ return oldq;
+
+ if (req->rq_status != RQ_ACTIVE) {
+ CURRENT = req->next;
+ goto skip1;
+ }
+
+ if (!request_ok(req)) {
+ ddv_end_request(0,req);
+ CURRENT = req->next;
+ goto skip1;
+ }
+
+ /* now grab as many as we can */
+ req_queued++;
+
+ for (req2 = req;
+ req2->next &&
+ req2->next->rq_status == RQ_ACTIVE &&
+ request_ok(req2->next);
+ req2 = req2->next)
+ req_queued++;
+
+ /* leave CURRENT pointing at the bad ones */
+ CURRENT = req2->next;
+
+ /* chop our list at that point */
+ req2->next = NULL;
+
+ if (!oldq)
+ return req;
+
+ for (req2=oldq;req2->next;req2=req2->next) ;
+
+ req2->next = req;
+
+ return oldq;
+}
+
+
+static void ddv_rem_complete(struct remote_request *rem)
+{
+ unsigned flags;
+ int bnum = rem->bnum;
+ struct request *req = rem->reqp;
+
+ complete_request(req,bnum);
+ in_request--;
+
+ save_flags(flags); cli();
+ ddv_request1();
+ restore_flags(flags);
+}
+
+
+/*
+ * The background ddv daemon. This receives remote disk requests
+ * and processes them via the normal block operations
+ */
+static int ddv_daemon(void *unused)
+{
+ current->session = 1;
+ current->pgrp = 1;
+ sprintf(current->comm, "ddv_daemon");
+ current->blocked = ~0UL; /* block all signals */
+
+ /* Give it a realtime priority. */
+ current->policy = SCHED_FIFO;
+ current->priority = 32; /* Fixme --- we need to standardise our
+ namings for POSIX.4 realtime scheduling
+ priorities. */
+
+ printk("Started ddv_daemon\n");
+
+ while (1) {
+ struct remote_request *rem;
+ unsigned flags;
+ struct buffer_head *bhlist[MAX_BNUM*4];
+ int i,j,minor,len,shift,offset;
+
+ save_flags(flags); cli();
+
+ while (!rem_queue) {
+ current->signal = 0;
+ interruptible_sleep_on(&ddv_daemon_wait);
+ }
+
+ rem = rem_queue;
+ rem_queue = rem->u.next;
+ restore_flags(flags);
+
+
+ minor = MINOR(rem->req.rq_dev);
+ len = rem->req.current_nr_sectors;
+ offset = rem->req.sector;
+
+ /* work out the conversion to the local block size from
+ sectors */
+ for (shift=0;
+ (SECTOR_SIZE<<shift) != ddv_blocksizes[minor];
+ shift++) ;
+
+ /* do the request */
+ for (i=0; len; i++) {
+ bhlist[i] = getblk(rem->req.rq_dev,
+ offset >> shift,
+ ddv_blocksizes[minor]);
+ if (!buffer_uptodate(bhlist[i]))
+ ll_rw_block(READ,1,&bhlist[i]);
+ offset += 1<<shift;
+ len -= 1<<shift;
+ }
+
+ for (j=0;j<i;j++)
+ if (!buffer_uptodate(bhlist[j]))
+ wait_on_buffer(bhlist[j]);
+
+
+ /* put() the data */
+
+
+ /* release the buffers */
+ for (j=0;j<i;j++)
+ brelse(bhlist[j]);
+
+ /* tell the originator that its done */
+ rem->u.fn = ddv_rem_complete;
+ tnet_rpc(rem->cell,rem,sizeof(int)*3,1);
+ }
+}
+
+
+/* receive a remote disk request */
+static void ddv_rem_queue(char *data,unsigned size)
+{
+ unsigned flags;
+ struct remote_request *rem = (struct remote_request *)
+ kmalloc(size,GFP_ATOMIC);
+
+ if (!rem) {
+ /* oh bugger! */
+ ddv_stats.errors++;
+ return;
+ }
+
+ memcpy(rem,data,size);
+ rem->u.next = NULL;
+
+ save_flags(flags); cli();
+
+ /* add it to our remote request queue */
+ if (!rem_queue)
+ rem_queue = rem;
+ else
+ rem_queue_end->u.next = rem;
+ rem_queue_end = rem;
+
+ restore_flags(flags);
+
+ wake_up(&ddv_daemon_wait);
+}
+
+
+/* which disk should this request go to */
+static inline unsigned pardisk_num(struct request *req)
+{
+ int minor = MINOR(req->rq_dev);
+ unsigned stripe;
+ unsigned cell;
+
+ if (minor < PARDISK_BASE)
+ return this_option;
+
+ stripe = req->sector >> STRIPE_SHIFT;
+ cell = stripe % num_options;
+
+ return cell;
+}
+
+
+/* check if a 2nd request can be tacked onto the first */
+static inline int contiguous(struct request *req1,struct request *req2)
+{
+ if (req2->cmd != req1->cmd ||
+ req2->rq_dev != req1->rq_dev ||
+ req2->sector != req1->sector + req1->current_nr_sectors ||
+ req2->current_nr_sectors != req1->current_nr_sectors)
+ return 0;
+ if (pardisk_num(req1) != pardisk_num(req2))
+ return 0;
+ return 1;
+}
+
+static void ddv_request1(void)
+{
+ struct request *req,*req1,*req2;
+ unsigned offset,len,req_num,mlist,bnum,available=0;
+ static unsigned mptrs[MAX_BNUM];
+ unsigned cell;
+
+ if (in_request > REQUEST_HIGH)
+ return;
+
+ next_request = get_request_queue(next_request);
+
+ while ((req = next_request)) {
+ int minor;
+
+ if (in_request >= MAX_REQUEST)
+ return;
+
+ if (in_request>1 && req_queued<REQUEST_LOW)
+ return;
+
+ /* make sure we have room for a request */
+ available = ddv_mlist_available();
+ if (available < 1) return;
+ if (available > MAX_BNUM)
+ available = MAX_BNUM;
+
+ offset = req->sector;
+ len = req->current_nr_sectors;
+ minor = MINOR(req->rq_dev);
+
+ mptrs[0] = (int)req->buffer;
+
+ for (bnum=1,req1=req,req2=req->next;
+ req2 && bnum<available && contiguous(req1,req2);
+ req1=req2,req2=req2->next) {
+ mptrs[bnum++] = (int)req2->buffer;
+ }
+
+ next_request = req2;
+
+
+ req_queued -= bnum;
+ ddv_stats.blocks += bnum;
+ ddv_stats.rq_started += bnum;
+
+ if (req->cmd == READ) {
+ ddv_stats.reads++;
+ ddv_stats.sectors_read += len*bnum;
+ } else {
+ ddv_stats.writes++;
+ ddv_stats.sectors_written += len*bnum;
+ }
+
+ if (minor >= PARDISK_BASE) {
+ /* translate the request to the normal partition */
+ unsigned stripe;
+ minor -= PARDISK_BASE;
+
+ stripe = offset >> STRIPE_SHIFT;
+ stripe /= num_options;
+ offset = (stripe << STRIPE_SHIFT) +
+ (offset & ((1<<STRIPE_SHIFT)-1));
+#if AIR_DISK
+ /* like an air-guitar :-) */
+ complete_request(req,bnum);
+ continue;
+#endif
+ }
+
+ if ((cell=pardisk_num(req)) != this_option) {
+ /* its a remote request */
+ struct remote_request *rem;
+ unsigned *remlist;
+ unsigned size = sizeof(*rem) + sizeof(int)*bnum;
+
+ rem = (struct remote_request *)kmalloc(size,GFP_ATOMIC);
+ if (!rem) {
+ /* hopefully we can get it on the next go */
+ return;
+ }
+ remlist = (unsigned *)(rem+1);
+
+ rem->u.fn = ddv_rem_queue;
+ rem->cell = this_option;
+ rem->bnum = bnum;
+ rem->req = *req;
+ rem->reqp = req;
+ rem->req.rq_dev = MKDEV(MAJOR_NR,minor);
+ rem->req.sector = offset;
+ memcpy(remlist,mptrs,sizeof(mptrs[0])*bnum);
+
+ if (tnet_rpc(cell,rem,size,1) != 0) {
+ kfree_s(rem,size);
+ return;
+ }
+ } else {
+ /* its a local request */
+ if ((mlist = ddv_get_mlist(mptrs,bnum)) == -1) {
+ ddv_stats.errors++;
+ panic("ddv: mlist corrupted");
+ }
+
+ req_num = RTable->cell_pointer;
+ RTable->async_info[req_num].status =
+ req->cmd==READ?DDV_RAWREAD_REQ:DDV_RAWWRITE_REQ;
+ RTable->async_info[req_num].bnum = bnum;
+ RTable->async_info[req_num].argv[0] = mlist;
+ RTable->async_info[req_num].argv[1] = len;
+ RTable->async_info[req_num].argv[2] = offset +
+ partition_tables[minor].start_sect;
+ RTable->async_info[req_num].argv[3] = bnum;
+ RTable->async_info[req_num].argv[7] = (unsigned)req;
+ RTable->cell_pointer = INC_T(RTable->cell_pointer);
+
+ }
+
+ in_request++;
+ }
+}
+
+
+static void ddv_request(void)
+{
+ cli();
+ ddv_request1();
+ sti();
+}
+
+
+static void check_printbufs(void)
+{
+ int i;
+
+ if (!PrintBufs) return;
+
+ while (PrintBufs->option_counter != PrintBufs->cell_counter) {
+ i = PrintBufs->cell_counter;
+ printk("opiu (%d): ",i);
+ if (((unsigned)PrintBufs->bufs[i].fmt) > 0x100000)
+ printk("Error: bad format in printk at %p\n",
+ PrintBufs->bufs[i].fmt);
+ else
+ printk(PrintBufs->bufs[i].fmt + OPIBUS_BASE,
+ PrintBufs->bufs[i].args[0],
+ PrintBufs->bufs[i].args[1],
+ PrintBufs->bufs[i].args[2],
+ PrintBufs->bufs[i].args[3],
+ PrintBufs->bufs[i].args[4],
+ PrintBufs->bufs[i].args[5]);
+ if (++PrintBufs->cell_counter == PRINT_BUFS)
+ PrintBufs->cell_counter = 0;
+ }
+}
+
+static void ddv_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ unsigned long flags;
+ save_flags(flags); cli();
+ OPT_IO(IRC1) = 0x80000000;
+
+ check_printbufs();
+ check_completion();
+
+ ddv_request1();
+ restore_flags(flags);
+}
+
+static int ddv_open(struct inode * inode, struct file * filp)
+{
+ int minor = MINOR(inode->i_rdev);
+
+ if (!have_ddv_board || minor >= NUM_DDVDEVS)
+ return -ENODEV;
+
+ if (minor >= PARDISK_BASE) {
+ ddv_sect_length[minor] = ddv_sect_length[minor - PARDISK_BASE];
+ ddv_blk_length[minor] = ddv_blk_length[minor - PARDISK_BASE];
+ }
+
+ return 0;
+}
+
+
+static void ddv_open_reply(struct cap_request *creq)
+{
+ int size = creq->size - sizeof(*creq);
+ ddv_opcodep = (char *)kmalloc(size,GFP_ATOMIC);
+ read_bif(ddv_opcodep, size);
+#if DEBUG
+ printk("received opiu kernel of size %d\n",size);
+#endif
+ if (size == 0)
+ have_ddv_board = 0;
+ wake_up(&busy_wait);
+}
+
+
+static void ddv_load_opiu(void)
+{
+ int i;
+ struct cap_request creq;
+
+ /* if the opiu kernel is already loaded then we don't do anything */
+ if (!have_ddv_board || opiu_kernel_loaded)
+ return;
+
+ bif_register_request(REQ_DDVOPEN,ddv_open_reply);
+
+ /* send the open request to the front end */
+ creq.cid = mpp_cid();
+ creq.type = REQ_DDVOPEN;
+ creq.header = 0;
+ creq.size = sizeof(creq);
+
+ bif_queue(&creq,0,0);
+
+ ddv_set_optadr();
+
+ while (!ddv_opcodep)
+ sleep_on(&busy_wait);
+
+ if (!have_ddv_board)
+ return;
+
+ ddv_load_kernel(ddv_opcodep);
+
+ kfree(ddv_opcodep);
+ ddv_opcodep = NULL;
+
+ if (ddv_restart_cpu())
+ return;
+
+ ddv_sect_length[0] = DiskInfo->blocks;
+ ddv_blk_length[0] = DiskInfo->blocks >> 1;
+ ddv_blocksizes[0] = BLOCK_SIZE;
+
+ ddv_geometry.cylinders = ddv_sect_length[0] /
+ (ddv_geometry.heads*ddv_geometry.sectors);
+
+ ddv_gendisk.part[0].start_sect = 0;
+ ddv_gendisk.part[0].nr_sects = ddv_sect_length[0];
+
+ resetup_one_dev(&ddv_gendisk, 0);
+
+ for (i=0;i<PARDISK_BASE;i++) {
+ ddv_sect_length[i] = ddv_gendisk.part[i].nr_sects;
+ ddv_blk_length[i] = ddv_gendisk.part[i].nr_sects >> 1;
+ }
+
+ /* setup the parallel partitions by multiplying the normal
+ partition by the number of options */
+ for (;i<NUM_DDVDEVS;i++) {
+ ddv_sect_length[i] = ddv_sect_length[i-PARDISK_BASE]*num_options;
+ ddv_blk_length[i] = ddv_blk_length[i-PARDISK_BASE]*num_options;
+ ddv_gendisk.part[i].start_sect = ddv_gendisk.part[i-PARDISK_BASE].start_sect;
+ ddv_gendisk.part[i].nr_sects = ddv_sect_length[i];
+ }
+
+
+ opiu_kernel_loaded = 1;
+}
+
+
+/*
+ * This routine is called to flush all partitions and partition tables
+ * for a changed disk, and then re-read the new partition table.
+ */
+static int ddv_revalidate(kdev_t dev, struct gendisk *gdev)
+{
+ int target;
+ int max_p;
+ int start;
+ int i;
+
+ target = DEVICE_NR(dev);
+
+ max_p = gdev->max_p;
+ start = target << gdev->minor_shift;
+
+ printk("ddv_revalidate dev=%d target=%d max_p=%d start=%d\n",
+ dev,target,max_p,start);
+
+ for (i=max_p - 1; i >=0 ; i--) {
+ int minor = start + i;
+ kdev_t devi = MKDEV(gdev->major, minor);
+ sync_dev(devi);
+ invalidate_inodes(devi);
+ invalidate_buffers(devi);
+ gdev->part[minor].start_sect = 0;
+ gdev->part[minor].nr_sects = 0;
+ };
+
+ ddv_sect_length[start] = DiskInfo->blocks;
+ ddv_blk_length[start] = DiskInfo->blocks >> 1;
+
+ gdev->part[start].nr_sects = ddv_sect_length[start];
+ resetup_one_dev(gdev, target);
+
+ printk("sect_length[%d]=%d blk_length[%d]=%d\n",
+ start,ddv_sect_length[start],
+ start,ddv_blk_length[start]);
+
+ for (i=0;i<max_p;i++) {
+ ddv_sect_length[start+i] = gdev->part[start+i].nr_sects;
+ ddv_blk_length[start+i] = gdev->part[start+i].nr_sects >> 1;
+ if (gdev->part[start+i].nr_sects)
+ printk("partition[%d] start=%d length=%d\n",i,
+ (int)gdev->part[start+i].start_sect,
+ (int)gdev->part[start+i].nr_sects);
+ }
+
+ return 0;
+}
+
+
+
+
+static int ddv_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int err;
+ struct ddv_geometry *loc = (struct ddv_geometry *) arg;
+ int dev;
+ int minor = MINOR(inode->i_rdev);
+
+ if ((!inode) || !(inode->i_rdev))
+ return -EINVAL;
+ dev = DEVICE_NR(inode->i_rdev);
+#if DEBUG
+ printk("ddv_ioctl: cmd=%x dev=%x minor=%d\n", cmd, dev, minor);
+#endif
+ switch (cmd) {
+ case HDIO_GETGEO:
+ printk("\tHDIO_GETGEO\n");
+ if (!loc) return -EINVAL;
+ if (put_user(ddv_geometry.heads, (char *) &loc->heads)) return -EFAULT;
+ if (put_user(ddv_geometry.sectors, (char *) &loc->sectors)) return -EFAULT;
+ if (put_user(ddv_geometry.cylinders, (short *) &loc->cylinders)) return -EFAULT;
+ if (put_user(ddv_geometry.start, (long *) &loc->start)) return -EFAULT;
+ return 0;
+
+ case HDIO_GET_MULTCOUNT :
+ printk("\tHDIO_GET_MULTCOUNT\n");
+ return -EINVAL;
+
+ case HDIO_GET_IDENTITY :
+ printk("\tHDIO_GET_IDENTITY\n");
+ return -EINVAL;
+
+ case HDIO_GET_NOWERR :
+ printk("\tHDIO_GET_NOWERR\n");
+ return -EINVAL;
+
+ case HDIO_SET_NOWERR :
+ printk("\tHDIO_SET_NOWERR\n");
+ return -EINVAL;
+
+ case BLKRRPART:
+ printk("\tBLKRRPART\n");
+ return ddv_revalidate(inode->i_rdev,&ddv_gendisk);
+
+ case BLKGETSIZE: /* Return device size */
+ if (put_user(ddv_sect_length[minor],(long *) arg)) return -EFAULT;
+#if DEBUG
+ printk("BLKGETSIZE gave %d\n",ddv_sect_length[minor]);
+#endif
+ return 0;
+
+ default:
+ printk("ddv_ioctl: Invalid cmd=%d(0x%x)\n", cmd, cmd);
+ return -EINVAL;
+ };
+}
+
+static struct file_operations ddv_fops = {
+ NULL, /* lseek - default */
+ block_read, /* read */
+ block_write, /* write */
+ NULL, /* readdir - bad */
+ NULL, /* poll */
+ ddv_ioctl, /* ioctl */
+ NULL, /* mmap */
+ ddv_open, /* open */
+ ddv_release,
+ block_fsync /* fsync */
+};
+
+
+static void ddv_status(void)
+{
+ if (!have_ddv_board) {
+ printk("no ddv board\n");
+ return;
+ }
+
+ printk("
+in_request %u req_queued %u
+MTable: start=%u end=%u
+Requests: started=%u finished=%u
+Requests: completion_pointer=%u ddv_pointer=%u cell_pointer=%u
+PrintBufs: option_counter=%u cell_counter=%u
+ddv_stats: reads=%u writes=%u blocks=%u
+ddv_stats: sectors_read=%u sectors_written=%u
+CURRENT=%p next_request=%p errors=%u
+",
+ in_request,req_queued,
+ RTable->start_mtable,RTable->end_mtable,
+ ddv_stats.rq_started,ddv_stats.rq_finished,
+ completion_pointer,RTable->ddv_pointer,RTable->cell_pointer,
+ PrintBufs->option_counter,PrintBufs->cell_counter,
+ ddv_stats.reads,ddv_stats.writes,ddv_stats.blocks,
+ ddv_stats.sectors_read,ddv_stats.sectors_written,
+ CURRENT,next_request,
+ ddv_stats.errors);
+}
+
+
+int ddv_init(void)
+{
+ int cid;
+
+ cid = mpp_cid();
+
+ if (register_blkdev(MAJOR_NR,DEVICE_NAME,&ddv_fops)) {
+ printk("ap: unable to get major %d for ap block dev\n",
+ MAJOR_NR);
+ return -1;
+ }
+
+ printk("ddv_init: register dev %d\n", MAJOR_NR);
+ blk_dev[MAJOR_NR].request_fn = DEVICE_REQUEST;
+ read_ahead[MAJOR_NR] = DDV_READ_AHEAD;
+
+ bif_add_debug_key('d',ddv_status,"DDV status");
+ ddv_gendisk.next = gendisk_head;
+ gendisk_head = &ddv_gendisk;
+
+ num_options = mpp_num_cells();
+ this_option = mpp_cid();
+
+ kernel_thread(ddv_daemon, NULL, 0);
+
+ return(0);
+}
+
+
+static void ddv_geninit(struct gendisk *ignored)
+{
+ int i;
+ static int done = 0;
+
+ if (done)
+ printk("ddv_geninit already done!\n");
+
+ done = 1;
+
+ printk("ddv_geninit\n");
+
+ /* request interrupt line 2 */
+ if (request_irq(APOPT0_IRQ,ddv_interrupt,SA_INTERRUPT,"apddv",NULL)) {
+ printk("Failed to install ddv interrupt handler\n");
+ }
+
+ for (i=0;i<NUM_DDVDEVS;i++) {
+ ddv_blocksizes[i] = BLOCK_SIZE;
+ ddv_sect_length[i] = 0;
+ ddv_blk_length[i] = 0;
+ }
+
+ ddv_geometry.heads = 32;
+ ddv_geometry.sectors = 32;
+ ddv_geometry.cylinders = 1;
+ ddv_geometry.start = 0;
+
+ blksize_size[MAJOR_NR] = ddv_blocksizes;
+
+ ddv_load_opiu();
+}
+
+
+/* loadable module support */
+
+#ifdef MODULE
+
+int init_module(void)
+{
+ int error = ddv_init();
+ ddv_geninit(&(struct gendisk) { 0,0,0,0,0,0,0,0,0,0,0 });
+ if (!error)
+ printk(KERN_INFO "DDV: Loaded as module.\n");
+ return error;
+}
+
+/* Before freeing the module, invalidate all of the protected buffers! */
+void cleanup_module(void)
+{
+ int i;
+
+ for (i = 0 ; i < NUM_DDVDEVS; i++)
+ invalidate_buffers(MKDEV(MAJOR_NR, i));
+
+ /* reset the opiu */
+ OPT_IO(OPIU_OP) = OPIU_RESET;
+ OPT_IO(PRST) = PRST_IRST;
+
+ unregister_blkdev( MAJOR_NR, DEVICE_NAME );
+ free_irq(APOPT0_IRQ, NULL);
+ blk_dev[MAJOR_NR].request_fn = 0;
+}
+
+#endif /* MODULE */
+
+
diff --git a/drivers/ap1000/ddv_util.c b/drivers/ap1000/ddv_util.c
new file mode 100644
index 000000000..b853a3216
--- /dev/null
+++ b/drivers/ap1000/ddv_util.c
@@ -0,0 +1,116 @@
+ /*
+ * Copyright 1996 The Australian National University.
+ * Copyright 1996 Fujitsu Laboratories Limited
+ *
+ * This software may be distributed under the terms of the Gnu
+ * Public License version 2 or later
+ */
+#define __NO_VERSION__
+#include <linux/module.h>
+#include <linux/blk.h>
+#include <linux/genhd.h>
+#include <asm/pgtable.h>
+#include <asm/ap1000/apreg.h>
+#include <asm/ap1000/DdvReqTable.h>
+
+
+#define GENDISK_STRUCT ddv_gendisk
+
+struct RequestTable *RTable=NULL;
+struct OPrintBufArray *PrintBufs=NULL;
+struct OAlignBufArray *AlignBufs=NULL;
+struct DiskInfo *DiskInfo=NULL;
+
+extern int ddv_length[];
+
+int ddv_mlist_available(void)
+{
+ int start = RTable->start_mtable;
+ int end = RTable->end_mtable;
+
+ if (start >= end)
+ return (MTABLE_SIZE - start);
+ return (end+1) - start;
+}
+
+
+int ddv_get_mlist(unsigned mptr[],int bnum)
+{
+ int available = ddv_mlist_available();
+ int i;
+ int start = RTable->start_mtable;
+
+ if (available < bnum) {
+ return -1;
+ }
+
+ for (i = 0; i < bnum; i++) {
+ unsigned phys = (unsigned)mmu_v2p((unsigned)mptr[i]);
+ if (phys == -1)
+ panic("bad address %x in ddv_get_mlist\n",mptr[i]);
+ RTable->mtable[RTable->start_mtable] = phys;
+ RTable->start_mtable = INC_ML(RTable->start_mtable);
+ }
+
+ return start;
+}
+
+
+
+void ddv_load_kernel(char *opcodep)
+{
+ int tsize;
+ char *p;
+ struct exec *mhead;
+
+ mhead = (struct exec *)opcodep;
+ p = opcodep + sizeof(*mhead);
+
+ tsize = (mhead->a_text + (PAGE_SIZE-1)) & ~(PAGE_SIZE-1);
+ memcpy((char *)OPIBUS_BASE+mhead->a_entry,p,mhead->a_text);
+ memcpy((char *)OPIBUS_BASE+mhead->a_entry+tsize,
+ p+mhead->a_text,mhead->a_data);
+ memset((char *)OPIBUS_BASE+mhead->a_entry+tsize+mhead->a_data,0,
+ mhead->a_bss+PAGE_SIZE);
+
+#ifdef DDV_DEBUG
+ printk("CELL(%d) loaded opiu kernel of size %ld %ld %ld (%ld)\n",
+ ap_getcid(),
+ mhead->a_text,mhead->a_data,mhead->a_bss,mhead->a_entry);
+#endif
+}
+
+
+int ddv_restart_cpu(void)
+{
+ unsigned long timeout;
+
+ OPT_IO(OPIU_OP) = OPIU_RESET;
+ OPT_IO(PRST) = PRST_IRST;
+ if (OPT_IO(PRST) != PRST_IRST) {
+ printk("_iu_load reset release error.\n");
+ return(-1);
+ }
+ for (timeout=jiffies + 10;
+ (jiffies < timeout) || (OPT_IO(PBUF0) == 0);
+ ) /* wait */ ;
+ if (OPT_IO(PBUF0) == 0) {
+ printk("WARNING: option kernel didn't startup\n");
+ return(-1);
+ } else {
+ printk("option kernel IU running\n");
+ DiskInfo = (struct DiskInfo *)(OPT_IO(PBUF0) + OPIBUS_BASE);
+ RTable = (struct RequestTable *)(DiskInfo->ptrs[0]+OPIBUS_BASE);
+ PrintBufs = (struct OPrintBufArray *)(DiskInfo->ptrs[1]+OPIBUS_BASE);
+ AlignBufs = (struct OAlignBufArray *)(DiskInfo->ptrs[2]+OPIBUS_BASE);
+
+ printk("Disk capacity: %d blocks of size %d\n",
+ (int)DiskInfo->blocks,(int)DiskInfo->blk_size);
+
+ OPT_IO(PBUF0) = 0;
+ }
+ return(0);
+}
+
+
+
diff --git a/drivers/ap1000/mac.c b/drivers/ap1000/mac.c
new file mode 100644
index 000000000..c17b4eec7
--- /dev/null
+++ b/drivers/ap1000/mac.c
@@ -0,0 +1,1177 @@
+ /*
+ * Copyright 1996 The Australian National University.
+ * Copyright 1996 Fujitsu Laboratories Limited
+ *
+ * This software may be distributed under the terms of the Gnu
+ * Public License version 2 or later
+ */
+/*
+ * Routines for controlling the FORMAC+
+ */
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h> /* For the statistics structure. */
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/inet.h>
+#include <net/sock.h>
+
+#include <asm/ap1000/apreg.h>
+#include <asm/ap1000/apservice.h>
+#include <asm/pgtable.h>
+
+#include "apfddi.h"
+#include "smt-types.h"
+#include "am79c830.h"
+#include "mac.h"
+#include "plc.h"
+#include "apfddi-reg.h"
+
+#define MAC_DEBUG 0
+
+/* Values for dma_state */
+#define IDLE 0
+#define XMITTING 1
+#define RECVING 2
+
+/*
+ * Messages greater than this value are transferred to the FDDI send buffer
+ * using DMA.
+ */
+#define DMA_XMIT_THRESHOLD 64
+#define DMA_RECV_THRESHOLD 64
+
+/*
+ * If the FDDI receive buffer is occupied by less than this value, then
+ * sending has priority.
+ */
+#define RECV_THRESHOLD (20*1024)
+
+#define DMA_RESET_MASKS ((AP_CLR_INTR_MASK<<DMA_INTR_NORMAL_SH) | \
+ (AP_CLR_INTR_MASK<<DMA_INTR_ERROR_SH))
+
+#define DMA_INTR_REQS ((AP_INTR_REQ<<DMA_INTR_NORMAL_SH) | \
+ (AP_INTR_REQ<<DMA_INTR_ERROR_SH))
+
+static void mac_print_state(void);
+
+typedef unsigned int mac_status_t;
+
+static volatile struct mac_queue *mac_queue_top = NULL;
+static volatile struct mac_queue *mac_queue_bottom = NULL;
+
+struct formac_state {
+ LoopbackType loopback;
+ int ring_op;
+ int recv_ptr;
+ int recv_empty;
+ int recv_ovf;
+ int xmit_ptr;
+ int xmit_free;
+ int xmit_start;
+ int xmit_chains;
+ int xmit_more_ptr;
+ int frames_xmitted;
+ int xmit_chain_start[3];
+ int frames_recvd;
+ int recv_aborted;
+ int xmit_aborted;
+ int wrong_bb;
+ int recv_error;
+ volatile struct mac_queue *cur_macq; /* Current queue el for send DMA */
+ volatile struct mac_buf cur_mbuf; /* Current mac_buf for send DMA */
+ struct sk_buff *cur_skb; /* skb for received packets by DMA */
+ int dma_state;
+};
+
+#define SPFRAMES_SIZE 64 /* # words for special frames area */
+#define RECV_BUF_START SPFRAMES_SIZE
+#define RECV_BUF_END (BUFFER_SIZE / 2 + 2048)
+#define RECV_BUF_SIZE (RECV_BUF_END - RECV_BUF_START)
+#define XMIT_BUF_START RECV_BUF_END
+#define XMIT_BUF_END BUFFER_SIZE
+
+#define S2_RMT_EVENTS (S2_CLAIM_STATE | S2_MY_CLAIM | S2_HIGHER_CLAIM | \
+ S2_LOWER_CLAIM | S2_BEACON_STATE | S2_MY_BEACON | \
+ S2_OTHER_BEACON | S2_RING_OP | S2_MULTIPLE_DA | \
+ S2_TOKEN_ERR | S2_DUPL_CLAIM | S2_TRT_EXP_RECOV)
+
+struct mac_info *this_mac_info;
+struct formac_state this_mac_state;
+
+int
+mac_init(struct mac_info *mip)
+{
+ struct formac_state *msp = &this_mac_state;
+
+ bif_add_debug_key('f',mac_print_state,"show FDDI mac state");
+
+ this_mac_info = mip;
+
+ mac->cmdreg1 = C1_SOFTWARE_RESET;
+ mac->said = (mip->s_address[0] << 8) + mip->s_address[1];
+ mac->laim = (mip->l_address[0] << 8) + mip->l_address[1];
+ mac->laic = (mip->l_address[2] << 8) + mip->l_address[3];
+ mac->lail = (mip->l_address[4] << 8) + mip->l_address[5];
+ mac->sagp = (mip->s_group_adrs[0] << 8) + mip->s_group_adrs[1];
+ mac->lagm = (mip->l_group_adrs[0] << 8) + mip->l_group_adrs[1];
+ mac->lagc = (mip->l_group_adrs[2] << 8) + mip->l_group_adrs[3];
+ mac->lagl = (mip->l_group_adrs[4] << 8) + mip->l_group_adrs[5];
+ mac->tmax = mip->tmax >> 5;
+ mac->tvx = (mip->tvx - 254) / 255; /* it's -ve, round downwards */
+ mac->treq0 = mip->treq;
+ mac->treq1 = mip->treq >> 16;
+ mac->pri0 = ~0;
+ mac->pri1 = ~0;
+ mac->pri2 = ~0;
+ mac->mdreg2 = /*M2_STRIP_FCS +*/ M2_CHECK_PARITY + M2_EVEN_PARITY
+ + 3 * M2_RCV_BYTE_BDRY + M2_ENABLE_HSREQ
+ + M2_ENABLE_NPDMA + M2_SYNC_NPDMA + M2_RECV_BAD_FRAMES;
+ mac->eacb = RECV_BUF_START - 1;
+ mac->earv = XMIT_BUF_START - 1;
+ mac->eas = mac->earv;
+ mac->eaa0 = BUFFER_SIZE - 1;
+ mac->eaa1 = mac->eaa0;
+ mac->eaa2 = mac->eaa1;
+ mac->wpxsf = 0;
+ mac->rpr = RECV_BUF_START;
+ mac->wpr = RECV_BUF_START + 1;
+ mac->swpr = RECV_BUF_START;
+ mac->wpxs = mac->eas;
+ mac->swpxs = mac->eas;
+ mac->rpxs = mac->eas;
+ mac->wpxa0 = XMIT_BUF_START;
+ mac->rpxa0 = XMIT_BUF_START;
+
+ memset(msp, 0, sizeof(*msp));
+ msp->recv_ptr = RECV_BUF_START;
+ msp->recv_empty = 1;
+ msp->xmit_ptr = XMIT_BUF_START;
+ msp->xmit_free = XMIT_BUF_START + 1;
+ msp->xmit_start = XMIT_BUF_START;
+ msp->xmit_chains = 0;
+ msp->frames_xmitted = 0;
+ msp->frames_recvd = 0;
+ msp->recv_aborted = 0;
+
+ mac->mdreg1 = M1_MODE_MEMORY;
+
+ mac_make_spframes();
+
+ return 0;
+}
+
+int
+mac_inited(struct mac_info *mip)
+{
+ struct formac_state *msp = &this_mac_state;
+ mac_status_t st1, st2;
+
+ if (mac->said != (mip->s_address[0] << 8) + mip->s_address[1]
+ || mac->laim != (mip->l_address[0] << 8) + mip->l_address[1]
+ || mac->laic != (mip->l_address[2] << 8) + mip->l_address[3]
+ || mac->lail != (mip->l_address[4] << 8) + mip->l_address[5]
+ || mac->sagp != (mip->s_group_adrs[0] << 8) + mip->s_group_adrs[1]
+ || mac->lagm != (mip->l_group_adrs[0] << 8) + mip->l_group_adrs[1]
+ || mac->lagc != (mip->l_group_adrs[2] << 8) + mip->l_group_adrs[3]
+ || mac->lagl != (mip->l_group_adrs[4] << 8) + mip->l_group_adrs[5])
+ return 1;
+ if ((mac->mdreg1 & ~M1_ADDET) != (M1_MODE_ONLINE | M1_SELECT_RA
+ | M1_FULL_DUPLEX))
+ return 3;
+ if (mac->treq0 != (mip->treq & 0xffff)
+ || mac->treq1 != ((unsigned)mip->treq >> 16))
+ return 4;
+
+ st1 = (mac->st1u << 16) + mac->st1l;
+ st2 = (mac->st2u << 16) + mac->st2l;
+ if ((st2 & S2_RING_OP) == 0)
+ return 5;
+
+ /* It's probably OK, reset some things to be safe. */
+ this_mac_info = mip;
+ *csr0 &= ~CS0_HREQ;
+ mac->tmax = mip->tmax >> 5;
+ mac->tvx = (mip->tvx - 254) / 255; /* it's -ve, round downwards */
+ mac->pri0 = ~0;
+ mac->pri1 = ~0;
+ mac->pri2 = ~0;
+ mac->mdreg2 = /*M2_STRIP_FCS +*/ M2_CHECK_PARITY + M2_EVEN_PARITY
+ + 3 * M2_RCV_BYTE_BDRY + M2_ENABLE_HSREQ
+ + M2_ENABLE_NPDMA + M2_SYNC_NPDMA + M2_RECV_BAD_FRAMES;
+
+ /* clear out the receive queue */
+ mac->mdreg1 = (mac->mdreg1 & ~M1_ADDET) | M1_ADDET_DISABLE_RECV;
+ mac->rpr = RECV_BUF_START;
+ mac->wpr = RECV_BUF_START + 1;
+ mac->swpr = RECV_BUF_START;
+
+ memset(msp, 0, sizeof(*msp));
+ msp->recv_ptr = RECV_BUF_START;
+ msp->recv_empty = 1;
+
+ /* XXX reset transmit pointers */
+ mac->cmdreg2 = C2_ABORT_XMIT;
+ mac->cmdreg2 = C2_RESET_XMITQS;
+ mac->wpxa0 = XMIT_BUF_START;
+ mac->rpxa0 = XMIT_BUF_START;
+ msp->xmit_ptr = XMIT_BUF_START;
+ msp->xmit_free = XMIT_BUF_START + 1;
+ msp->xmit_start = XMIT_BUF_START;
+ msp->xmit_chains = 0;
+
+ mac_make_spframes();
+ mac->cmdreg1 = C1_CLR_ALL_LOCKS;
+
+ msp->frames_xmitted = 0;
+ msp->frames_recvd = 0;
+ msp->recv_aborted = 0;
+ msp->ring_op = 1;
+
+ mac->mdreg1 = (mac->mdreg1 & ~M1_ADDET) | M1_ADDET_NSA;
+ mac->imsk1u = ~(S1_XMIT_ABORT | S1_END_FRAME_ASYNC0) >> 16;
+ mac->imsk1l = ~(S1_PAR_ERROR_ASYNC0 | S1_QUEUE_LOCK_ASYNC0);
+ mac->imsk2u = ~(S2_RECV_COMPLETE | S2_RECV_BUF_FULL | S2_RECV_FIFO_OVF
+ | S2_ERR_SPECIAL_FR | S2_RMT_EVENTS
+ | S2_NP_SIMULT_LOAD) >> 16;
+ mac->imsk2l = ~(S2_RMT_EVENTS | S2_MISSED_FRAME);
+
+ return 0;
+}
+
+void mac_make_spframes(void)
+{
+ volatile int *bp;
+ struct mac_info *mip = this_mac_info;
+ int sa;
+ struct formac_state *msp = &this_mac_state;
+
+ /* initialize memory to avoid parity errors */
+ *csr0 &= ~CS0_HREQ;
+ *csr1 &= ~CS1_BUF_WR_TAG;
+ for (bp = &buffer_mem[BUFFER_SIZE]; bp > &buffer_mem[XMIT_BUF_START];)
+ *--bp = 0xdeadbeef;
+ for (; bp > buffer_mem;)
+ *--bp = 0xfeedf00d;
+ buffer_mem[msp->recv_ptr] = 0;
+
+ bp = buffer_mem;
+ *bp++ = 0; /* auto-void frame pointer (not used) */
+
+ /* make claim frame */
+ sa = bp - buffer_mem;
+ *bp++ = 0xd8000011; /* claim frame descr. + length */
+ *bp++ = 0xc3; /* FC value for claim frame, long addr */
+ *bp++ = (mip->l_address[0] << 24) + (mip->l_address[1] << 16)
+ + (mip->l_address[2] << 8) + mip->l_address[3];
+ *bp++ = (mip->l_address[4] << 24) + (mip->l_address[5] << 16)
+ + (mip->l_address[0] << 8) + mip->l_address[1];
+ *bp++ = (mip->l_address[2] << 24) + (mip->l_address[3] << 16)
+ + (mip->l_address[4] << 8) + mip->l_address[5];
+ *bp++ = mip->treq;
+ mac->sacl = bp - buffer_mem; /* points to pointer to claim frame */
+ *bp++ = 0xa0000000 + sa; /* pointer to start of claim frame */
+
+ /* make beacon frame */
+ sa = bp - buffer_mem;
+ *bp++ = 0xd8000011; /* beacon frame descr. + length */
+ *bp++ = 0xc2; /* FC value for beacon frame, long addr */
+ *bp++ = 0; /* DA = 0 */
+ *bp++ = (mip->l_address[0] << 8) + mip->l_address[1];
+ *bp++ = (mip->l_address[2] << 24) + (mip->l_address[3] << 16)
+ + (mip->l_address[4] << 8) + mip->l_address[5];
+ *bp++ = 0; /* beacon reason = failed claim */
+ mac->sabc = bp - buffer_mem;
+ *bp++ = 0xa0000000 + sa; /* pointer to start of beacon frame */
+}
+
+void mac_reset(LoopbackType loopback)
+{
+ int mode;
+ struct formac_state *msp = &this_mac_state;
+
+ msp->loopback = loopback;
+ switch (loopback) {
+ case loop_none:
+ mode = M1_MODE_ONLINE;
+ break;
+ case loop_formac:
+ mode = M1_MODE_INT_LOOP;
+ break;
+ default:
+ mode = M1_MODE_EXT_LOOP;
+ break;
+ }
+ mac->mdreg1 = mode | M1_ADDET_NSA | M1_SELECT_RA | M1_FULL_DUPLEX;
+ mac->cmdreg1 = C1_IDLE_LISTEN;
+ mac->cmdreg1 = C1_CLR_ALL_LOCKS;
+ mac->imsk1u = ~(S1_XMIT_ABORT | S1_END_FRAME_ASYNC0) >> 16;
+ mac->imsk1l = ~(S1_PAR_ERROR_ASYNC0 | S1_QUEUE_LOCK_ASYNC0);
+ mac->imsk2u = ~(S2_RECV_COMPLETE | S2_RECV_BUF_FULL | S2_RECV_FIFO_OVF
+ | S2_ERR_SPECIAL_FR | S2_RMT_EVENTS
+ | S2_NP_SIMULT_LOAD) >> 16;
+ mac->imsk2l = ~(S2_RMT_EVENTS | S2_MISSED_FRAME);
+}
+
+void mac_claim(void)
+{
+ mac->cmdreg1 = C1_CLAIM_LISTEN;
+}
+
+void mac_disable(void)
+{
+ mac->mdreg1 = M1_MODE_MEMORY;
+ mac->imsk1u = ~0;
+ mac->imsk1l = ~0;
+ mac->imsk2u = ~0;
+ mac->imsk2l = ~0;
+ mac->wpr = mac->swpr + 1;
+ if (mac->wpr > mac->earv)
+ mac->wpr = mac->eacb + 1;
+ buffer_mem[mac->swpr] = 0;
+}
+
+void mac_stats(void)
+{
+ struct formac_state *msp = &this_mac_state;
+
+ if (msp->recv_ovf)
+ printk("%d receive buffer overflows\n", msp->recv_ovf);
+ if (msp->wrong_bb)
+ printk("%d frames on wrong byte bdry\n", msp->wrong_bb);
+ printk("%d frames transmitted, %d aborted\n", msp->frames_xmitted,
+ msp->xmit_aborted);
+ printk("%d frames received, %d aborted\n", msp->frames_recvd,
+ msp->recv_aborted);
+ printk("%d frames received with errors\n", msp->recv_error);
+}
+
+void mac_sleep(void)
+{
+ /* disable the receiver */
+ mac->mdreg1 = (mac->mdreg1 & ~M1_ADDET) | M1_ADDET_DISABLE_RECV;
+}
+
+void mac_poll(void)
+{
+ mac_status_t st1, st2;
+ struct formac_state *msp = &this_mac_state;
+ int up, f, d, l, r, e, i;
+
+ st1 = (mac->st1u << 16) + mac->st1l;
+ st2 = (mac->st2u << 16) + mac->st2l;
+
+ if (st2 & S2_NP_SIMULT_LOAD)
+ panic("NP/formac simultaneous load!!!");
+
+ up = (st2 & S2_RING_OP) != 0;
+ if (up != msp->ring_op) {
+ /* ring has come up or down */
+ msp->ring_op = up;
+ printk("mac: ring %s\n", up? "up": "down");
+ set_ring_op(up);
+ }
+
+ if (up) {
+ if (st1 & S1_XMIT_ABORT) {
+ ++msp->xmit_aborted;
+ if (st1 & S1_QUEUE_LOCK_ASYNC0) {
+ printk("mac: xmit queue locked, resetting xmit buffer\n");
+ mac->cmdreg2 = C2_RESET_XMITQS; /* XXX bit gross */
+ mac->rpxa0 = XMIT_BUF_START;
+ buffer_mem[XMIT_BUF_START] = 0;
+ msp->xmit_ptr = XMIT_BUF_START;
+ msp->xmit_start = XMIT_BUF_START;
+ msp->xmit_chains = 0;
+ mac->cmdreg1 = C1_CLR_ASYNCQ0_LOCK;
+ st1 &= ~(S1_END_CHAIN_ASYNC0 | S1_END_FRAME_ASYNC0
+ | S1_XINSTR_FULL_ASYNC0);
+ } else
+ st1 |= S1_END_FRAME_ASYNC0;
+ } else if (st1 & S1_QUEUE_LOCK_ASYNC0) {
+ printk("mac: xmit queue locked, why?\n");
+ mac->cmdreg1 = C1_CLR_ASYNCQ0_LOCK;
+ }
+
+ if (st1 & S1_END_FRAME_ASYNC0) {
+ /* advance xmit_start */
+ e = msp->xmit_start;
+ while (e != msp->xmit_ptr) {
+ /* find the end of the current frame */
+ f = buffer_mem[e]; /* read pointer */
+ if (f == 0)
+ break; /* huh?? */
+ f &= 0xffff;
+ d = buffer_mem[f]; /* read descriptor */
+ l = ((d & 0xffff) + ((d >> TD_BYTE_BDRY_LG) & 3) + 3) >> 2;
+ e = f + 1 + l; /* index of ptr at end of frame */
+ r = mac->rpxa0;
+ if ((r <= msp->xmit_ptr && r < e && e <= msp->xmit_ptr)
+ || (r > msp->xmit_ptr && (r < e || e <= msp->xmit_ptr)))
+ break; /* up to current frame */
+ /* printk("frame @ %x done\n", msp->xmit_start); */
+ msp->xmit_start = e;
+ if ((st1 & S1_XMIT_ABORT) == 0)
+ ++msp->frames_xmitted;
+ if ((msp->xmit_chains == 1 && e == msp->xmit_ptr) ||
+ (msp->xmit_chains > 1 && e == msp->xmit_chain_start[1])) {
+ /* we've finished chain 0 */
+ --msp->xmit_chains;
+ for (i = 0; i < msp->xmit_chains; ++i)
+ msp->xmit_chain_start[i] = msp->xmit_chain_start[i+1];
+ if (msp->xmit_chains >= 2) {
+ mac->cmdreg2 = C2_XMIT_ASYNCQ0;
+ /* printk("mac_poll: xmit chain\n"); */
+ }
+ if (msp->xmit_chains == 0)
+ *csr0 &= ~CS0_LED1;
+ }
+ }
+ /*
+ * Now that we have a bit more space in the transmit buffer,
+ * see if we want to put another frame in.
+ */
+#if MAC_DEBUG
+ printk("Removed space in transmit buffer.\n");
+#endif
+ mac_process();
+ }
+ }
+
+ if (st2 & S2_RMT_EVENTS) {
+ rmt_event(st2);
+ }
+
+ if (st2 & S2_RECV_COMPLETE) {
+ /*
+ * A frame has just finished arriving in the receive buffer.
+ */
+ *csr0 |= CS0_LED2;
+ msp->recv_empty = 0;
+#if MAC_DEBUG
+ printk("Frame has just trickled in...\n");
+#endif
+ mac_process();
+ }
+
+ if (st2 & S2_RECV_BUF_FULL) {
+ /*
+ * receive buffer overflow: reset and unlock the receive buffer.
+ */
+/* printk("mac: receive buffer full\n"); */
+ mac->rpr = RECV_BUF_START;
+ mac->wpr = RECV_BUF_START + 1;
+ mac->swpr = RECV_BUF_START;
+ msp->recv_ptr = RECV_BUF_START;
+ msp->recv_empty = 1;
+ buffer_mem[RECV_BUF_START] = 0;
+ mac->cmdreg1 = C1_CLR_RECVQ_LOCK;
+ ++msp->recv_ovf;
+
+#if 0
+ } else if (st2 & S2_RECV_FIFO_OVF) {
+ printk("mac: receive FIFO overflow\n");
+ /* any further action required here? */
+
+ } else if (st2 & S2_MISSED_FRAME) {
+ printk("mac: missed frame\n");
+#endif
+ }
+
+ if (st2 & S2_ERR_SPECIAL_FR) {
+ printk("mac: bug: error in special frame\n");
+ mac_disable();
+ }
+}
+
+void
+mac_xmit_alloc(sp, bb)
+ struct mac_buf *sp;
+ int bb;
+{
+ int nwords;
+
+ nwords = (sp->length + bb + 3) >> 2;
+ sp->fr_start = mac_xalloc(nwords + 2);
+ sp->fr_end = sp->fr_start + nwords + 1;
+ sp->ptr = (char *) &buffer_mem[sp->fr_start + 1] + bb;
+ buffer_mem[sp->fr_start] = TD_MAGIC + (bb << TD_BYTE_BDRY_LG) + sp->length;
+}
+
+void
+mac_queue_frame(sp)
+ struct mac_buf *sp;
+{
+ struct formac_state *msp = &this_mac_state;
+
+ buffer_mem[sp->fr_end] = 0; /* null pointer at end of frame */
+ buffer_mem[msp->xmit_ptr] = PT_MAGIC + sp->fr_start;
+ if (msp->xmit_chains <= 2) {
+ msp->xmit_chain_start[msp->xmit_chains] = msp->xmit_ptr;
+ if (msp->xmit_chains < 2)
+ mac->cmdreg2 = C2_XMIT_ASYNCQ0;
+ ++msp->xmit_chains;
+ } else {
+ buffer_mem[msp->xmit_more_ptr] |= TD_MORE;
+ }
+ msp->xmit_ptr = sp->fr_end;
+ msp->xmit_more_ptr = sp->fr_start;
+ *csr0 |= CS0_LED1;
+}
+
+int
+mac_xalloc(int nwords)
+{
+ int fr_start;
+ struct formac_state *msp = &this_mac_state;
+
+ /*
+ * Find some room in the transmit buffer.
+ */
+ fr_start = msp->xmit_free;
+ if (fr_start > msp->xmit_start) {
+ if (fr_start + nwords > XMIT_BUF_END) {
+ /* no space at end - see if we can start again from the front */
+ fr_start = XMIT_BUF_START;
+ if (fr_start + nwords > msp->xmit_start)
+ panic("no space in xmit buffer (1)");
+ }
+ } else {
+ if (fr_start + nwords > msp->xmit_start)
+ panic("no space in xmit buffer (2)");
+ }
+
+ msp->xmit_free = fr_start + nwords;
+
+ return fr_start;
+}
+
+int
+mac_recv_frame(sp)
+ struct mac_buf *sp;
+{
+ struct formac_state *msp = &this_mac_state;
+ int status, bb, orig_recv_ptr;
+
+ orig_recv_ptr = msp->recv_ptr;
+ for (;;) {
+ status = buffer_mem[msp->recv_ptr];
+ if ((status & RS_VALID) == 0) {
+ if (status != 0) {
+ printk("recv buf out of sync: recv_ptr=%x status=%x\n",
+ msp->recv_ptr, status);
+ printk(" rpr=%x swpr=%x, buf[rpr]=%x\n", mac->rpr, mac->swpr,
+ buffer_mem[mac->rpr]);
+ msp->recv_ptr = mac->swpr;
+ }
+ *csr0 &= ~CS0_LED2;
+ msp->recv_empty = 1;
+ if (mac->rpr == orig_recv_ptr)
+ mac->rpr = msp->recv_ptr;
+ return 0;
+ }
+ if (status & RS_ABORTED)
+ ++msp->recv_aborted;
+ else {
+ bb = (status >> RS_BYTE_BDRY_LG) & 3;
+ if (bb != 3) {
+ ++msp->wrong_bb;
+ bb = 3;
+ }
+ if ((status & RS_ERROR) == 0)
+ break;
+ ++msp->recv_error;
+ msp->recv_ptr += NWORDS((status & RS_LENGTH) + bb);
+ }
+ if (++msp->recv_ptr >= RECV_BUF_END)
+ msp->recv_ptr -= RECV_BUF_SIZE;
+ }
+ ++msp->frames_recvd;
+ if (mac->rpr == orig_recv_ptr)
+ mac->rpr = msp->recv_ptr;
+
+ sp->fr_start = msp->recv_ptr;
+ sp->length = (status & RS_LENGTH) + bb; /* + 4 (status) - 4 (FCS) */
+ sp->ptr = (void *) &buffer_mem[sp->fr_start];
+ if ((msp->recv_ptr += NWORDS(sp->length) + 1) >= RECV_BUF_END)
+ msp->recv_ptr -= RECV_BUF_SIZE;
+ sp->fr_end = msp->recv_ptr;
+ sp->wraplen = (RECV_BUF_END - sp->fr_start) * 4;
+ sp->wrapptr = (void *) &buffer_mem[RECV_BUF_START];
+
+ return 1;
+}
+
+void
+mac_discard_frame(sp)
+ struct mac_buf *sp;
+{
+ mac->rpr = sp->fr_end;
+}
+
+/*
+ * Return the number of bytes free in the async 0 transmit queue.
+ */
+int
+mac_xmit_space(void)
+{
+ struct formac_state *msp = &this_mac_state;
+ int nw;
+
+ if (msp->xmit_free > msp->xmit_start) {
+ nw = XMIT_BUF_END - msp->xmit_free;
+ if (nw < msp->xmit_start - XMIT_BUF_START)
+ nw = msp->xmit_start - XMIT_BUF_START;
+ } else
+ nw = msp->xmit_start - msp->xmit_free;
+ return nw <= 2? 0: (nw - 2) << 2;
+}
+
+/*
+ * Return the number of bytes of frames available in the receive queue.
+ */
+int
+mac_recv_level(void)
+{
+ int nw;
+
+ nw = mac->swpr - mac->rpr;
+ if (nw < 0)
+ nw += mac->earv - mac->eacb;
+ return nw << 2;
+}
+
+/*
+ * Return 1 iff all transmission has been completed, 0 otherwise.
+ */
+int mac_xmit_done(void)
+{
+ struct formac_state *msp = &this_mac_state;
+
+ return msp->xmit_chains == 0;
+}
+
+/*
+ * Append skbuff packet to queue.
+ */
+int mac_queue_append (struct sk_buff *skb)
+{
+ struct mac_queue *el;
+ unsigned flags;
+ save_flags(flags); cli();
+
+#if MAC_DEBUG
+ printk("Appending queue element skb 0x%x\n", skb);
+#endif
+
+ if ((el = (struct mac_queue *)kmalloc(sizeof(*el), GFP_ATOMIC)) == NULL) {
+ restore_flags(flags);
+ return 1;
+ }
+ el->next = NULL;
+ el->skb = skb;
+
+ if (mac_queue_top == NULL) {
+ mac_queue_top = mac_queue_bottom = el;
+ }
+ else {
+ mac_queue_bottom->next = el;
+ mac_queue_bottom = el;
+ }
+ restore_flags(flags);
+ return 0;
+}
+
+/*
+ * If the packet originated from the same FDDI subnet as we are on,
+ * there is no need to perform checksumming as FDDI will does this
+ * us.
+ */
+#define CHECK_IF_CHECKSUM_REQUIRED(skb) \
+ if ((skb)->protocol == ETH_P_IP) { \
+ extern struct cap_init cap_init; \
+ int *from_ip = (int *)((skb)->data+12); \
+ int *to_ip = (int *)((skb)->data+16); \
+ if ((*from_ip & cap_init.netmask) == (*to_ip & cap_init.netmask)) \
+ (skb)->ip_summed = CHECKSUM_UNNECESSARY; \
+ }
+
+/*
+ * Try to send and/or recv frames.
+ */
+void mac_process(void)
+{
+ volatile struct dma_chan *dma = (volatile struct dma_chan *) DMA3;
+ struct formac_state *msp = &this_mac_state;
+ struct mac_queue *el;
+ int nw=0, mrl = 0, fstart, send_buffer_full = 0;
+ unsigned flags;
+
+ save_flags(flags); cli();
+
+#if MAC_DEBUG
+ printk("In mac_process()\n");
+#endif
+
+ /*
+ * Check if the DMA is being used.
+ */
+ if (msp->dma_state != IDLE) {
+ restore_flags(flags);
+ return;
+ }
+
+ while (mac_queue_top != NULL || /* Something to transmit */
+ (mrl = mac_recv_level()) > 0) { /* Frames in receive buffer */
+ send_buffer_full = 0;
+#if MAC_DEBUG
+ printk("mac_process(): something to do... mqt %x mrl is %d\n",
+ mac_queue_top, mrl);
+#endif
+ if (mac_queue_top != NULL && mrl < RECV_THRESHOLD) {
+ el = (struct mac_queue *)mac_queue_top;
+
+ /*
+ * Check there is enough space in the FDDI send buffer.
+ */
+ if (mac_xmit_space() < el->skb->len) {
+#if MAC_DEBUG
+ printk("process_queue(): FDDI send buffer is full\n");
+#endif
+ send_buffer_full = 1;
+ }
+ else {
+#if MAC_DEBUG
+ printk("mac_process(): sending a frame\n");
+#endif
+ /*
+ * Update mac_queue_top.
+ */
+ mac_queue_top = mac_queue_top->next;
+
+ /*
+ * Allocate space in the FDDI send buffer.
+ */
+ msp->cur_mbuf.length = el->skb->len-3;
+ mac_xmit_alloc((struct mac_buf *)&msp->cur_mbuf, 3);
+
+ /*
+ * If message size is greater than DMA_XMIT_THRESHOLD, send
+ * using DMA, otherwise use memcpy().
+ */
+ if (el->skb->len > DMA_XMIT_THRESHOLD) {
+ /*
+ * Start the DMA.
+ */
+#if MAC_DEBUG
+ printk("mac_process(): Starting send DMA...\n");
+#endif
+ nw = msp->cur_mbuf.fr_end - msp->cur_mbuf.fr_start + 1;
+ mac->wpxa0 = msp->cur_mbuf.fr_start + 1;
+
+ *csr0 |= CS0_HREQ_WA0;
+
+ msp->cur_macq = el;
+ msp->dma_state = XMITTING;
+ dma->st = DMA_DMST_RST;
+ dma->st = DMA_RESET_MASKS;
+ dma->hskip = 1; /* skip = 0, count = 1 */
+ dma->vskip = 1; /* skip = 0, count = 1 */
+ dma->maddr = (u_char *)
+ mmu_v2p((unsigned long)el->skb->data);
+ dma->cmd = DMA_DCMD_ST + DMA_DCMD_TYP_AUTO +
+ DMA_DCMD_TD_MD + nw;
+ *csr0 &= ~CS0_DMA_RECV;
+ *csr0 |= CS0_DMA_ENABLE;
+
+ /*
+ * Don't process any more packets since the DMA is
+ * being used.
+ */
+ break;
+ }
+ else { /* el->skb->len <= DMA_XMIT_THRESHOLD */
+ /*
+ * Copy the data directly into the FDDI buffer.
+ */
+#if MAC_DEBUG
+ printk("mac_proces(): Copying send data...\n");
+#endif
+ memcpy(msp->cur_mbuf.ptr - 3, el->skb->data,
+ ROUND4(el->skb->len));
+ mac_queue_frame((struct mac_buf *)&msp->cur_mbuf);
+ dev_kfree_skb(el->skb, FREE_WRITE);
+ kfree_s(el, sizeof(*el));
+ continue;
+ }
+ }
+
+ /*
+ * We have reached here if there is not enough space in the
+ * send buffer. Try to receive some packets instead.
+ */
+ }
+
+ if (mac_recv_frame((struct mac_buf *)&msp->cur_mbuf)) {
+ volatile int fc, llc_header_word2;
+ int pkt_len = 0;
+
+#if MAC_DEBUG
+ printk("mac_process(): Receiving frames...\n");
+#endif
+ /*
+ * Get the fc, note only word accesses are allowed from the
+ * FDDI buffers.
+ */
+ if (msp->cur_mbuf.wraplen > 4) {
+ fc = *(int *)(msp->cur_mbuf.ptr+4);
+ }
+ else {
+ /*
+ * fc_word must be at the start of the FDDI buffer.
+ */
+#if MAC_DEBUG
+ printk("Grabbed fc_word from wrapptr, wraplen %d\n",
+ msp->cur_mbuf.wraplen);
+#endif
+ fc = *(int *)msp->cur_mbuf.wrapptr;
+ }
+ fc &= 0xff;
+
+#if MAC_DEBUG
+ printk("fc is 0x%x\n", fc);
+#endif
+ if (fc < 0x50 || fc > 0x57) {
+ mac_discard_frame((struct mac_buf *)&msp->cur_mbuf);
+ continue;
+ }
+
+ /*
+ * Determine the size of the packet data and allocate a socket
+ * buffer.
+ */
+ pkt_len = msp->cur_mbuf.length - FDDI_HARDHDR_LEN;
+#if MAC_DEBUG
+ printk("Packet of length %d\n", pkt_len);
+#endif
+ msp->cur_skb = dev_alloc_skb(ROUND4(pkt_len));
+
+ if (msp->cur_skb == NULL) {
+ printk("mac_process(): Memory squeeze, dropping packet.\n");
+ apfddi_stats->rx_dropped++;
+ restore_flags(flags);
+ return;
+ }
+ msp->cur_skb->dev = apfddi_device;
+
+ /*
+ * Hardware header isn't copied to skbuff.
+ */
+ msp->cur_skb->mac.raw = msp->cur_skb->data;
+ apfddi_stats->rx_packets++;
+
+ /*
+ * Determine protocol from llc header.
+ */
+ if (msp->cur_mbuf.wraplen < FDDI_HARDHDR_LEN) {
+ llc_header_word2 = *(int *)(msp->cur_mbuf.wrapptr +
+ (FDDI_HARDHDR_LEN -
+ msp->cur_mbuf.wraplen - 4));
+ }
+ else {
+ llc_header_word2 = *(int *)(msp->cur_mbuf.ptr +
+ FDDI_HARDHDR_LEN - 4);
+ }
+ msp->cur_skb->protocol = llc_header_word2 & 0xFFFF;
+#if MAC_DEBUG
+ printk("Got protocol 0x%x\n", msp->cur_skb->protocol);
+#endif
+
+ /*
+ * Copy data into socket buffer, which may be wrapped around the
+ * FDDI buffer. Use memcpy if the size of the data is less
+ * than DMA_RECV_THRESHOLD. Note if DMA is used, then wrap-
+ * arounds are handled automatically.
+ */
+ if (pkt_len < DMA_RECV_THRESHOLD) {
+ if (msp->cur_mbuf.length < msp->cur_mbuf.wraplen) {
+ memcpy(skb_put(msp->cur_skb, ROUND4(pkt_len)),
+ msp->cur_mbuf.ptr + FDDI_HARDHDR_LEN,
+ ROUND4(pkt_len));
+ }
+ else if (msp->cur_mbuf.wraplen < FDDI_HARDHDR_LEN) {
+#if MAC_DEBUG
+ printk("Wrap case 2\n");
+#endif
+ memcpy(skb_put(msp->cur_skb, ROUND4(pkt_len)),
+ msp->cur_mbuf.wrapptr +
+ (FDDI_HARDHDR_LEN - msp->cur_mbuf.wraplen),
+ ROUND4(pkt_len));
+ }
+ else {
+#if MAC_DEBUG
+ printk("wrap case 3\n");
+#endif
+ memcpy(skb_put(msp->cur_skb,
+ ROUND4(msp->cur_mbuf.wraplen-
+ FDDI_HARDHDR_LEN)),
+ msp->cur_mbuf.ptr + FDDI_HARDHDR_LEN,
+ ROUND4(msp->cur_mbuf.wraplen - FDDI_HARDHDR_LEN));
+ memcpy(skb_put(msp->cur_skb,
+ ROUND4(msp->cur_mbuf.length -
+ msp->cur_mbuf.wraplen)),
+ msp->cur_mbuf.wrapptr,
+ ROUND4(msp->cur_mbuf.length -
+ msp->cur_mbuf.wraplen));
+ }
+
+#if MAC_DEBUG
+ if (msp->cur_skb->protocol == ETH_P_IP) {
+ dump_packet("apfddi_rx:", msp->cur_skb->data, pkt_len, 0);
+ }
+ else if (msp->cur_skb->protocol == ETH_P_ARP) {
+ struct arphdr *arp = (struct arphdr *)msp->cur_skb->data;
+ printk("arp->ar_op is 0x%x ar_hrd %d ar_pro 0x%x ar_hln %d ar_ln %d\n",
+ arp->ar_op, arp->ar_hrd, arp->ar_pro, arp->ar_hln,
+ arp->ar_pln);
+ printk("sender hardware address: %x:%x:%x:%x:%x:%x\n",
+ *((u_char *)msp->cur_skb->data+8),
+ *((u_char *)msp->cur_skb->data+9),
+ *((u_char *)msp->cur_skb->data+10),
+ *((u_char *)msp->cur_skb->data+11),
+ *((u_char *)msp->cur_skb->data+12),
+ *((u_char *)msp->cur_skb->data+13));
+ printk("sender IP number %d.%d.%d.%d\n",
+ *((u_char *)msp->cur_skb->data+14),
+ *((u_char *)msp->cur_skb->data+15),
+ *((u_char *)msp->cur_skb->data+16),
+ *((u_char *)msp->cur_skb->data+17));
+ printk("receiver hardware address: %x:%x:%x:%x:%x:%x\n",
+ *((u_char *)msp->cur_skb->data+18),
+ *((u_char *)msp->cur_skb->data+19),
+ *((u_char *)msp->cur_skb->data+20),
+ *((u_char *)msp->cur_skb->data+21),
+ *((u_char *)msp->cur_skb->data+22),
+ *((u_char *)msp->cur_skb->data+23));
+ printk("receiver IP number %d.%d.%d.%d\n",
+ *((u_char *)msp->cur_skb->data+24),
+ *((u_char *)msp->cur_skb->data+25),
+ *((u_char *)msp->cur_skb->data+26),
+ *((u_char *)msp->cur_skb->data+27));
+ }
+#endif
+ CHECK_IF_CHECKSUM_REQUIRED(msp->cur_skb);
+
+ /*
+ * Inform the network layer of the new packet.
+ */
+#if MAC_DEBUG
+ printk("Calling netif_rx()\n");
+#endif
+ netif_rx(msp->cur_skb);
+
+ /*
+ * Remove frame from FDDI buffer.
+ */
+ mac_discard_frame((struct mac_buf *)&msp->cur_mbuf);
+ continue;
+ }
+ else {
+ /*
+ * Set up dma and break.
+ */
+#if MAC_DEBUG
+ printk("mac_process(): Starting receive DMA...\n");
+#endif
+ nw = NWORDS(pkt_len);
+ msp->dma_state = RECVING;
+ *csr0 &= ~(CS0_HREQ | CS0_DMA_ENABLE);
+/* *csr1 |= CS1_RESET_FIFO;
+ *csr1 &= ~CS1_RESET_FIFO; */
+ if ((*csr1 & CS1_FIFO_LEVEL) != 0) {
+ int x;
+ printk("fifo not empty! (csr1 = 0x%x) emptying...", *csr1);
+ do {
+ x = *fifo;
+ } while ((*csr1 & CS1_FIFO_LEVEL) != 0);
+ printk("done\n");
+ }
+ fstart = msp->cur_mbuf.fr_start + NWORDS(FDDI_HARDHDR_LEN);
+ if (fstart >= RECV_BUF_END)
+ fstart -= RECV_BUF_SIZE;
+ mac->rpr = fstart;
+#if MAC_DEBUG
+ printk("rpr=0x%x, nw=0x%x, stat=0x%x\n",
+ mac->rpr, nw, buffer_mem[msp->cur_mbuf.fr_start]);
+#endif
+ dma->st = DMA_DMST_RST;
+ dma->st = DMA_RESET_MASKS;
+ dma->hskip = 1; /* skip = 0, count = 1 */
+ dma->vskip = 1; /* skip = 0, count = 1 */
+ dma->maddr = (u_char *)
+ mmu_v2p((unsigned long)
+ skb_put(msp->cur_skb, ROUND4(pkt_len)));
+ dma->cmd = DMA_DCMD_ST + DMA_DCMD_TYP_AUTO + DMA_DCMD_TD_DM
+ + nw - 4;
+ *csr0 |= CS0_HREQ_RECV | CS0_DMA_RECV;
+ *csr0 |= CS0_DMA_ENABLE;
+#if MAC_DEBUG
+ printk("mac_process(): DMA is away!\n");
+#endif
+ break;
+ }
+ }
+ else {
+#if MAC_DEBUG
+ printk("mac_recv_frame failed\n");
+#endif
+ if (msp->recv_empty && send_buffer_full)
+ break;
+ }
+ }
+ /*
+ * Update mac_queue_bottom.
+ */
+ if (mac_queue_top == NULL)
+ mac_queue_bottom = NULL;
+
+#if MAC_DEBUG
+ printk("End of mac_process()\n");
+#endif
+ restore_flags(flags);
+}
+
+
+#define DMA_IN(reg) (*(volatile unsigned *)(reg))
+#define DMA_OUT(reg,v) (*(volatile unsigned *)(reg) = (v))
+
+/*
+ * DMA completion handler.
+ */
+void mac_dma_complete(void)
+{
+ volatile struct dma_chan *dma;
+ struct formac_state *msp = &this_mac_state;
+ unsigned a;
+
+ a = DMA_IN(DMA3_DMST);
+ if (!(a & DMA_INTR_REQS)) {
+ if (msp->dma_state != IDLE && (a & DMA_DMST_AC) == 0) {
+ printk("dma completed but no interrupt!\n");
+ msp->dma_state = IDLE;
+ }
+ return;
+ }
+
+ DMA_OUT(DMA3_DMST,AP_CLR_INTR_REQ<<DMA_INTR_NORMAL_SH);
+ DMA_OUT(DMA3_DMST,AP_CLR_INTR_REQ<<DMA_INTR_ERROR_SH);
+
+ dma = (volatile struct dma_chan *) DMA3;
+
+#if MAC_DEBUG
+ printk("In mac_dma_complete\n");
+#endif
+
+ if (msp->dma_state == XMITTING && ((dma->st & DMA_DMST_AC) == 0)) {
+ /*
+ * Transmit DMA finished.
+ */
+ int i = 20;
+#if MAC_DEBUG
+ printk("In mac_dma_complete for transmit complete\n");
+#endif
+ while (*csr1 & CS1_FIFO_LEVEL) {
+ if (--i <= 0) {
+ printk("csr0=0x%x csr1=0x%x: fifo not emptying\n", *csr0,
+ *csr1);
+ return;
+ }
+ }
+ *csr0 &= ~(CS0_HREQ | CS0_DMA_ENABLE);
+ msp->dma_state = IDLE;
+#if MAC_DEBUG
+ printk("mac_dma_complete(): Calling mac_queue_frame\n");
+#endif
+ mac_queue_frame((struct mac_buf *)&msp->cur_mbuf);
+ dev_kfree_skb(msp->cur_macq->skb, FREE_WRITE);
+ kfree_s((struct mac_buf *)msp->cur_macq, sizeof(*(msp->cur_macq)));
+ msp->cur_macq = NULL;
+#if MAC_DEBUG
+ printk("mac_dma_complete(): Calling mac_process()\n");
+#endif
+ mac_process();
+#if MAC_DEBUG
+ printk("End of mac_dma_complete transmitting\n");
+#endif
+ }
+ else if (msp->dma_state == RECVING && ((dma->st & DMA_DMST_AC) == 0)) {
+ /*
+ * Receive DMA finished. Copy the last four words from the
+ * fifo into the buffer, after turning off the host requests.
+ * We do this to avoid reading past the end of frame.
+ */
+ int *ip, i;
+
+#if MAC_DEBUG
+ printk("In mac_dma_complete for receive complete\n");
+#endif
+ msp->dma_state = IDLE;
+ ip = (int *)mmu_p2v((unsigned long)dma->cmaddr);
+
+#if MAC_DEBUG
+ printk("ip is 0x%x, skb->data is 0x%x\n", ip, msp->cur_skb->data);
+#endif
+
+ *csr0 &= ~(CS0_DMA_ENABLE | CS0_HREQ);
+
+ for (i = 0; (*csr1 & CS1_FIFO_LEVEL); ++i)
+ ip[i] = *fifo;
+ if (i != 4)
+ printk("mac_dma_complete(): not four words remaining in fifo?\n");
+#if MAC_DEBUG
+ printk("Copied last four words out of fifo\n");
+#endif
+
+ /*
+ * Remove the frame from the FDDI receive buffer.
+ */
+ mac_discard_frame((struct mac_buf *)&msp->cur_mbuf);
+
+ CHECK_IF_CHECKSUM_REQUIRED(msp->cur_skb);
+
+ /*
+ * Now inject the packet into the network system.
+ */
+ netif_rx(msp->cur_skb);
+
+#if MAC_DEBUG
+ dump_packet("mac_dma_complete:", msp->cur_skb->data, 0, 0);
+#endif
+
+ /*
+ * Check if any more frames can be processed.
+ */
+ mac_process();
+
+#if MAC_DEBUG
+ printk("End of mac_dma_complete receiving\n");
+#endif
+ }
+#if MAC_DEBUG
+ printk("End of mac_dma_complete()\n");
+#endif
+}
+
+static void mac_print_state(void)
+{
+ struct formac_state *msp = &this_mac_state;
+
+ printk("DMA3_DMST is 0x%x dma_state is %d\n", DMA_IN(DMA3_DMST),
+ msp->dma_state);
+ printk("csr0 = 0x%x, csr1 = 0x%x\n", *csr0, *csr1);
+}
+
+
diff --git a/drivers/ap1000/mac.h b/drivers/ap1000/mac.h
new file mode 100644
index 000000000..85f02b4a3
--- /dev/null
+++ b/drivers/ap1000/mac.h
@@ -0,0 +1,82 @@
+ /*
+ * Copyright 1996 The Australian National University.
+ * Copyright 1996 Fujitsu Laboratories Limited
+ *
+ * This software may be distributed under the terms of the Gnu
+ * Public License version 2 or later
+ */
+/*
+ * Definitions of MAC state structures etc.
+ */
+
+struct mac_info {
+ TimerTwosComplement tmax;
+ TimerTwosComplement tvx;
+ TimerTwosComplement treq;
+ ShortAddressType s_address;
+ LongAddressType l_address;
+ ShortAddressType s_group_adrs;
+ LongAddressType l_group_adrs;
+ int rcv_own_frames;
+ int only_good_frames;
+};
+
+
+struct mac_buf {
+ struct mac_buf *next;
+ int ack;
+ int length;
+ void *ptr;
+ int wraplen;
+ void *wrapptr;
+ int fr_start;
+ int fr_end;
+};
+
+int mac_xmit_space(void);
+void mac_xmit_alloc(struct mac_buf *, int);
+void mac_queue_frame(struct mac_buf *);
+int mac_recv_frame(struct mac_buf *);
+void mac_discard_frame(struct mac_buf *);
+int mac_init(struct mac_info *mip);
+int mac_inited(struct mac_info *mip);
+void mac_reset(LoopbackType loopback);
+void mac_claim(void);
+void mac_sleep(void);
+void mac_poll(void);
+void mac_disable(void);
+void mac_make_spframes(void);
+int mac_xalloc(int nwords);
+int mac_xmit_dma(struct sk_buff *skb);
+void mac_dma_complete(void);
+void mac_process(void);
+int mac_queue_append(struct sk_buff *skb);
+
+struct dma_chan {
+ int cmd; /* cmd << 16 + size */
+ int st; /* status << 16 + current size */
+ int hskip; /* hskip << 16 + hcnt */
+ int vskip; /* vskip << 16 + vcnt */
+ unsigned char *maddr; /* memory address */
+ unsigned char *cmaddr; /* current memory address */
+ int ccount; /* h_count << 16 + v_count */
+ int *tblp; /* table pointer */
+ int *ctblp; /* current table pointer */
+ unsigned char *hdptr; /* header pointer */
+};
+
+#define ROUND4(x) (((x) + 3) & -4)
+#define ROUND8(x) (((x) + 7) & -8)
+#define ROUND16(x) (((x) + 15) & -16)
+#define ROUNDLINE(x) ROUND16(x)
+
+#define NWORDS(x) (((x) + 3) >> 2)
+#define NLINES(x) (((x) + 15) >> 4)
+
+/*
+ * Queue element used to queue transmit requests on the FDDI.
+ */
+struct mac_queue {
+ volatile struct mac_queue *next;
+ struct sk_buff *skb;
+};
diff --git a/drivers/ap1000/plc.c b/drivers/ap1000/plc.c
new file mode 100644
index 000000000..b29b1a4c2
--- /dev/null
+++ b/drivers/ap1000/plc.c
@@ -0,0 +1,393 @@
+ /*
+ * Copyright 1996 The Australian National University.
+ * Copyright 1996 Fujitsu Laboratories Limited
+ *
+ * This software may be distributed under the terms of the Gnu
+ * Public License version 2 or later
+ */
+/*
+ * Routines for controlling the Am79c864 physical layer controller.
+ *
+ * This chip implements some parts of the FDDI SMT standard
+ * (PCM: physical connection management, LEM: link error monitor, etc.)
+ * as well as the FDDI PHY standard.
+ */
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include "apfddi.h"
+#include "smt-types.h"
+#include "am79c864.h"
+#include "plc.h"
+#include "apfddi-reg.h"
+
+typedef enum {
+ off,
+ signalling,
+ doing_lct,
+ joining,
+ active
+} PlcPhase;
+
+struct plc_state {
+ LoopbackType loopback;
+ char t_val[16];
+ char r_val[16];
+ int n;
+ PortType peer_type;
+ PlcPhase phase;
+};
+
+struct plc_info *this_plc_info;
+struct plc_state this_plc_state;
+
+void plc_init(struct plc_info *pip)
+{
+ int class, x;
+ struct plc_state *psp = &this_plc_state;
+
+ this_plc_info = pip;
+
+ /* first turn it off, clear registers */
+ class = pip->port_type == pt_s? CB_CLASS_S: 0;
+ plc->ctrl_b = CB_PC_STOP + class;
+ plc->intr_mask = IE_NP_ERROR;
+ x = plc->intr_event; /* these register clear when read */
+ x = plc->viol_sym_ct;
+ x = plc->min_idle_ct;
+ x = plc->link_err_ct;
+
+ /* initialize registers */
+ plc->ctrl_a = 0;
+ plc->ctrl_b = class;
+ plc->c_min = pip->c_min >> 8;
+ plc->tl_min = pip->tl_min >> 8;
+ plc->tb_min = pip->tb_min >> 8;
+ plc->t_out = pip->t_out >> 8;
+ plc->t_scrub = pip->t_scrub >> 8;
+ plc->ns_max = pip->ns_max >> 2;
+
+ psp->phase = off;
+}
+
+int
+plc_inited(struct plc_info *pip)
+{
+ int class, x;
+ struct plc_state *psp = &this_plc_state;
+
+ class = pip->port_type == pt_s? CB_CLASS_S: 0;
+ if ((plc->ctrl_a & (CA_LOOPBACK|CA_FOT_OFF|CA_EB_LOOP|CA_LM_LOOP)) != 0)
+ return 1;
+ if ((plc->ctrl_b & (CB_CONFIG_CTRL|CB_CLASS_S|CB_PC_MAINT)) != class)
+ return 2;
+ if (plc->status_a & SA_SIG_DETECT)
+ return 3;
+ if ((plc->status_b & (SB_PCI_STATE|SB_PCM_STATE))
+ != (SB_PCI_STATE_INSERTED|SB_PCM_STATE_ACTIVE))
+ return 4;
+
+ /* all seems OK, reset the timers and counters just to be sure */
+ plc->intr_mask = IE_NP_ERROR;
+ x = plc->intr_event; /* these register clear when read */
+ x = plc->viol_sym_ct;
+ x = plc->min_idle_ct;
+ x = plc->link_err_ct;
+
+ plc->c_min = pip->c_min >> 8;
+ plc->tl_min = pip->tl_min >> 8;
+ plc->tb_min = pip->tb_min >> 8;
+ plc->t_out = pip->t_out >> 8;
+ plc->t_scrub = pip->t_scrub >> 8;
+ plc->ns_max = pip->ns_max >> 2;
+
+ psp->phase = active;
+ /* XXX should initialize other fields of this_plc_state */
+
+ return 0;
+}
+
+void plc_sleep(void)
+{
+}
+
+void pc_start(LoopbackType loopback)
+{
+ int x;
+ struct plc_info *pip = this_plc_info;
+ struct plc_state *psp = &this_plc_state;
+
+ /* make sure it's off */
+ plc->ctrl_b &= ~CB_PCM_CTRL;
+ plc->ctrl_b |= CB_PC_STOP;
+
+ /* set up loopback required */
+ psp->loopback = loopback;
+ x = 0;
+ switch (loopback) {
+ case loop_plc_lm:
+ x = CA_LM_LOOP;
+ break;
+ case loop_plc_eb:
+ x = CA_EB_LOOP;
+ break;
+ case loop_pdx:
+ x = CA_LOOPBACK;
+ break;
+ default:
+ x = 0;
+ }
+ plc->ctrl_a = x;
+
+ /* set up bits to be exchanged */
+ psp->t_val[0] = 0;
+ psp->t_val[1] = ((int) pip->port_type >> 1) & 1;
+ psp->t_val[2] = (int) pip->port_type & 1;
+ psp->t_val[4] = 0; /* XXX assume we want short LCT */
+ psp->t_val[5] = 0;
+ psp->t_val[6] = 0; /* XXX too lazy to fire up my MAC for LCT */
+ psp->t_val[8] = 0; /* XXX don't wanna local loop */
+ psp->t_val[9] = 1; /* gotta MAC on port output */
+
+ pc_restart();
+}
+
+void pc_restart(void)
+{
+ struct plc_state *psp = &this_plc_state;
+
+ if (psp->phase != off)
+ printk("restarting pcm\n");
+ if (psp->phase == active)
+ set_cf_join(0); /* we're down :-( */
+
+ psp->n = 0;
+ plc->vec_length = 3 - 1;
+ plc->xmit_vector = psp->t_val[0] + (psp->t_val[1] << 1)
+ + (psp->t_val[2] << 2);
+
+ plc->intr_mask = IE_NP_ERROR | IE_PCM_BREAK | IE_PCM_CODE;
+ plc->ctrl_b &= ~CB_PCM_CTRL;
+ plc->ctrl_b |= CB_PC_START; /* light blue paper and stand clear */
+
+ psp->phase = signalling;
+}
+
+void pc_stop(void)
+{
+ struct plc_state *psp = &this_plc_state;
+
+ if (psp->phase == active)
+ set_cf_join(0);
+ plc->ctrl_b &= ~CB_PCM_CTRL;
+ plc->ctrl_b |= CB_PC_STOP;
+ plc->intr_mask = IE_NP_ERROR;
+ psp->phase = off;
+}
+
+void plc_poll(void)
+{
+ struct plc_state *psp = &this_plc_state;
+ int events, i;
+
+ if ((*csr0 & CS0_PHY_IRQ) == 0)
+ return;
+ events = plc->intr_event & plc->intr_mask;
+ if (events & IE_NP_ERROR) {
+ printk("plc: NP error!\n");
+ }
+ if (events & IE_PCM_BREAK) {
+ i = plc->status_b & SB_BREAK_REASON;
+ if (i > SB_BREAK_REASON_START) {
+ if (psp->phase == signalling || psp->phase == doing_lct)
+ pcm_dump_rtcodes();
+ printk("pcm: break reason %d\n", i);
+ if (psp->phase != off)
+ pc_restart();
+ /* XXX need to check for trace? */
+ }
+ }
+ if (events & IE_PCM_CODE) {
+ if (psp->phase == signalling)
+ pcm_pseudo_code();
+ else if (psp->phase == doing_lct)
+ pcm_lct_done();
+ else
+ printk("XXX pcm_code interrupt in phase %d?\n", psp->phase);
+ }
+ if (events & IE_PCM_ENABLED) {
+ if (psp->phase == joining)
+ pcm_enabled();
+ else
+ printk("XXX pcm_enabled interrupt in phase %d?\n", psp->phase);
+ }
+ if (events & IE_TRACE_PROP) {
+ if (psp->phase == active)
+ pcm_trace_prop();
+ else
+ printk("XXX trace_prop interrupt in phase %d\n", psp->phase);
+ }
+}
+
+void pcm_pseudo_code(void)
+{
+ struct plc_info *pip = this_plc_info;
+ struct plc_state *psp = &this_plc_state;
+ int i, nb, lct, hislct;
+
+ /* unpack the bits from the peer */
+ nb = plc->vec_length + 1;
+ i = plc->rcv_vector;
+ do {
+ psp->r_val[psp->n++] = i & 1;
+ i >>= 1;
+ } while (--nb > 0);
+
+ /* send some more, do LCT, whatever */
+ switch (psp->n) {
+ case 3:
+ /*
+ * Got escape flag, port type; send compatibility,
+ * LCT duration, MAC for LCT flag.
+ */
+ if (psp->r_val[0]) {
+ /* help! what do I do now? */
+ pcm_dump_rtcodes();
+ pc_restart();
+ break;
+ }
+ psp->peer_type = (PortType) ((psp->r_val[1] << 1) + psp->r_val[2]);
+ /* XXX we're type S, we talk to anybody */
+ psp->t_val[3] = 1;
+
+ plc->vec_length = 4 - 1;
+ plc->xmit_vector = psp->t_val[3] + (psp->t_val[4] << 1)
+ + (psp->t_val[5] << 2) + (psp->t_val[6] << 3);
+ break;
+
+ case 7:
+ /*
+ * Got compatibility, LCT duration, MAC for LCT flag;
+ * time to do the LCT.
+ */
+ lct = (psp->t_val[4] << 1) + psp->t_val[5];
+ hislct = (psp->r_val[4] << 1) + psp->r_val[5];
+ if (hislct > lct)
+ lct = hislct;
+
+ /* set LCT duration */
+ switch (lct) {
+ case 0:
+ plc->lc_length = pip->lc_short >> 8;
+ plc->ctrl_b &= ~CB_LONG_LCT;
+ break;
+ case 1:
+ plc->lc_length = pip->lc_medium >> 8;
+ plc->ctrl_b &= ~CB_LONG_LCT;
+ break;
+ case 2:
+ plc->ctrl_b |= CB_LONG_LCT;
+ /* XXX set up a timeout for pip->lc_long */
+ break;
+ case 3:
+ plc->ctrl_b |= CB_LONG_LCT;
+ /* XXX set up a timeout for pip->lc_extended */
+ break;
+ }
+
+ /* start the LCT */
+ i = plc->link_err_ct; /* clear the register */
+ plc->ctrl_b &= ~CB_PC_LCT;
+ /* XXX assume we're not using the MAC for LCT;
+ if he's got a MAC, loop his stuff back, otherwise send idle. */
+ if (psp->r_val[6])
+ plc->ctrl_b |= CB_PC_LCT_LOOP;
+ else
+ plc->ctrl_b |= CB_PC_LCT_IDLE;
+ psp->phase = doing_lct;
+ break;
+
+ case 8:
+ /*
+ * Got LCT result, send MAC for local loop and MAC on port
+ * output flags.
+ */
+ if (psp->t_val[7] || psp->r_val[7]) {
+ printk("LCT failed, restarting.\n");
+ /* LCT failed - do at least a medium length test next time. */
+ if (psp->t_val[4] == 0 && psp->t_val[5] == 0)
+ psp->t_val[5] = 1;
+ pcm_dump_rtcodes();
+ pc_restart();
+ break;
+ }
+ plc->vec_length = 2 - 1;
+ plc->xmit_vector = psp->t_val[8] + (psp->t_val[9] << 1);
+ break;
+
+ case 10:
+ /*
+ * Got MAC for local loop and MAC on port output flags.
+ * Let's join.
+ */
+ plc->intr_mask = IE_NP_ERROR | IE_PCM_BREAK | IE_PCM_ENABLED;
+ plc->ctrl_b |= CB_PC_JOIN;
+ psp->phase = joining;
+ /* printk("pcm: joining\n"); */
+ break;
+
+ default:
+ printk("pcm_pseudo_code bug: n = %d\n", psp->n);
+ }
+}
+
+void pcm_lct_done(void)
+{
+ struct plc_state *psp = &this_plc_state;
+ int i;
+
+ i = plc->link_err_ct;
+ psp->t_val[7] = i > 0;
+ printk("pcm: lct %s (%d errors)\n", psp->t_val[7]? "failed": "passed", i);
+ plc->ctrl_b &= ~(CB_PC_LCT | CB_LONG_LCT);
+ plc->vec_length = 1 - 1;
+ plc->xmit_vector = psp->t_val[7];
+ psp->phase = signalling;
+}
+
+void pcm_dump_rtcodes(void)
+{
+ struct plc_state *psp = &this_plc_state;
+ int i;
+
+ if (psp->n > 0) {
+ printk("pcm signalling interrupted after %d bits:\nt_val:", psp->n);
+ for (i = 0; i < psp->n; ++i)
+ printk(" %d", psp->t_val[i]);
+ printk("\nr_val:");
+ for (i = 0; i < psp->n; ++i)
+ printk(" %d", psp->r_val[i]);
+ printk("\n");
+ }
+}
+
+void pcm_enabled(void)
+{
+ struct plc_state *psp = &this_plc_state;
+ int i;
+
+ printk("pcm: enabled\n");
+ psp->phase = active;
+ i = plc->link_err_ct; /* clear the register */
+ /* XXX should set up LEM here */
+ /* XXX do we want to count violation symbols, minimum idle gaps,
+ or elasticity buffer errors? */
+ plc->intr_mask = IE_NP_ERROR | IE_PCM_BREAK | IE_TRACE_PROP;
+ set_cf_join(1); /* we're up :-) */
+}
+
+void pcm_trace_prop(void)
+{
+ /* XXX help! what do I do now? */
+ pc_stop();
+}
diff --git a/drivers/ap1000/plc.h b/drivers/ap1000/plc.h
new file mode 100644
index 000000000..f87783f57
--- /dev/null
+++ b/drivers/ap1000/plc.h
@@ -0,0 +1,53 @@
+ /*
+ * Copyright 1996 The Australian National University.
+ * Copyright 1996 Fujitsu Laboratories Limited
+ *
+ * This software may be distributed under the terms of the Gnu
+ * Public License version 2 or later
+ */
+/*
+ * Definitions for PLC state structures etc.
+ */
+
+struct plc_info {
+ PortType port_type;
+ TimerTwosComplement c_min;
+ TimerTwosComplement tl_min;
+ TimerTwosComplement tb_min;
+ TimerTwosComplement t_out;
+ TimerTwosComplement lc_short;
+ TimerTwosComplement lc_medium;
+ TimerTwosComplement lc_long;
+ TimerTwosComplement lc_extended;
+ TimerTwosComplement t_scrub;
+ TimerTwosComplement ns_max;
+ Counter link_errors;
+ Counter viol_syms;
+ Counter mini_occur;
+ int min_idle_gap;
+ double link_error_rate;
+};
+
+void plc_init(struct plc_info *pip);
+int plc_inited(struct plc_info *pip);
+void pc_start(LoopbackType loopback);
+void plc_sleep(void);
+void plc_poll(void);
+void pc_stop(void);
+void pc_restart(void);
+void pcm_dump_rtcodes(void);
+void pcm_pseudo_code(void);
+void pcm_lct_done(void);
+void pcm_enabled(void);
+void pcm_trace_prop(void);
+
+
+
+
+
+
+
+
+
+
+
diff --git a/drivers/ap1000/ringbuf.c b/drivers/ap1000/ringbuf.c
new file mode 100644
index 000000000..b8bcbb541
--- /dev/null
+++ b/drivers/ap1000/ringbuf.c
@@ -0,0 +1,327 @@
+ /*
+ * Copyright 1996 The Australian National University.
+ * Copyright 1996 Fujitsu Laboratories Limited
+ *
+ * This software may be distributed under the terms of the Gnu
+ * Public License version 2 or later
+ */
+/*
+ * linux/drivers/ap1000/ringbuf.c
+ *
+ * This provides the /proc/XX/ringbuf interface to the Tnet ring buffer
+ */
+#define _APLIB_
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/malloc.h>
+#include <linux/mm.h>
+
+#include <asm/page.h>
+#include <asm/segment.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/segment.h>
+#include <asm/uaccess.h>
+
+#include <asm/ap1000/pgtapmmu.h>
+#include <asm/ap1000/apreg.h>
+#include <asm/ap1000/apservice.h>
+
+
+/* we have a small number of reserved ring buffers to ensure that at
+ least one parallel program can always run */
+#define RBUF_RESERVED 4
+#define RBUF_RESERVED_ORDER 5
+static struct {
+ char *rb_ptr;
+ char *shared_ptr;
+ int used;
+} reserved_ringbuf[RBUF_RESERVED];
+
+
+void ap_ringbuf_init(void)
+{
+ int i,j;
+ char *rb_ptr, *shared_ptr;
+ int rb_size = PAGE_SIZE * (1<<RBUF_RESERVED_ORDER);
+
+ /* preallocate some ringbuffers */
+ for (i=0;i<RBUF_RESERVED;i++) {
+ if (!(rb_ptr = (char *)__get_free_pages(GFP_ATOMIC,RBUF_RESERVED_ORDER,0))) {
+ printk("failed to preallocate ringbuf %d\n",i);
+ return;
+ }
+ for (j = MAP_NR(rb_ptr); j <= MAP_NR(rb_ptr+rb_size-1); j++) {
+ set_bit(PG_reserved,&mem_map[j].flags);
+ }
+
+ if (!(shared_ptr = (char *)__get_free_page(GFP_ATOMIC))) {
+ printk("failed to preallocate shared ptr %d\n",i);
+ return;
+ }
+ set_bit(PG_reserved,&mem_map[MAP_NR(shared_ptr)].flags);
+
+ reserved_ringbuf[i].used = 0;
+ reserved_ringbuf[i].rb_ptr = rb_ptr;
+ reserved_ringbuf[i].shared_ptr = shared_ptr;
+ }
+}
+
+
+
+void exit_ringbuf(struct task_struct *tsk)
+{
+ int i;
+
+ if (!tsk->ringbuf) return;
+
+ if (tsk->ringbuf->ringbuf) {
+ char *rb_ptr = tsk->ringbuf->ringbuf;
+ char *shared_ptr = tsk->ringbuf->shared;
+ int order = tsk->ringbuf->order;
+ int rb_size = PAGE_SIZE * (1<<order);
+
+ for (i=0;i<RBUF_RESERVED;i++)
+ if (rb_ptr == reserved_ringbuf[i].rb_ptr) break;
+
+ if (i < RBUF_RESERVED) {
+ reserved_ringbuf[i].used = 0;
+ } else {
+ for (i = MAP_NR(rb_ptr); i <= MAP_NR(rb_ptr+rb_size-1); i++) {
+ clear_bit(PG_reserved, &mem_map[i].flags);
+ }
+ free_pages((unsigned)rb_ptr,order);
+
+ i = MAP_NR(shared_ptr);
+ clear_bit(PG_reserved,&mem_map[i]);
+ free_page((unsigned)shared_ptr);
+ }
+ }
+
+ kfree_s(tsk->ringbuf,sizeof(*(tsk->ringbuf)));
+ tsk->ringbuf = NULL;
+}
+
+
+/*
+ * map the ring buffer into users memory
+ */
+static int cap_map(int rb_size)
+{
+ struct task_struct *tsk=current;
+ int i;
+ char *rb_ptr=NULL;
+ char *shared_ptr=NULL;
+ int order = 0;
+ int error,old_uid;
+
+ error = verify_area(VERIFY_WRITE,(char *)RBUF_VBASE,rb_size);
+ if (error) return error;
+
+ if (!MPP_IS_PAR_TASK(tsk->taskid)) {
+ printk("ringbuf_mmap called from non-parallel task\n");
+ return -EINVAL;
+ }
+
+
+ if (tsk->ringbuf) return -EINVAL;
+
+ rb_size -= RBUF_RING_BUFFER_OFFSET;
+ rb_size >>= 1;
+
+ switch (rb_size/1024) {
+ case 128:
+ order = 5;
+ break;
+ case 512:
+ order = 7;
+ break;
+ case 2048:
+ order = 9;
+ break;
+ case 8192:
+ order = 11;
+ break;
+ default:
+ printk("ringbuf_mmap with invalid size %d\n",rb_size);
+ return -EINVAL;
+ }
+
+ if (order == RBUF_RESERVED_ORDER) {
+ for (i=0;i<RBUF_RESERVED;i++)
+ if (!reserved_ringbuf[i].used) {
+ rb_ptr = reserved_ringbuf[i].rb_ptr;
+ shared_ptr = reserved_ringbuf[i].shared_ptr;
+ reserved_ringbuf[i].used = 1;
+ break;
+ }
+ }
+
+ if (!rb_ptr) {
+ rb_ptr = (char *)__get_free_pages(GFP_USER,order,0);
+ if (!rb_ptr) return -ENOMEM;
+
+ for (i = MAP_NR(rb_ptr); i <= MAP_NR(rb_ptr+rb_size-1); i++) {
+ set_bit(PG_reserved,&mem_map[i].flags);
+ }
+
+ shared_ptr = (char *)__get_free_page(GFP_USER);
+ if (!shared_ptr)
+ return -ENOMEM;
+ set_bit(PG_reserved,&mem_map[MAP_NR(shared_ptr)].flags);
+ }
+
+ if (!rb_ptr)
+ return -ENOMEM;
+
+ memset(rb_ptr,0,rb_size);
+ memset(shared_ptr,0,PAGE_SIZE);
+
+ if (remap_page_range(RBUF_VBASE + RBUF_RING_BUFFER_OFFSET,
+ mmu_v2p((unsigned)rb_ptr),
+ rb_size,APMMU_PAGE_SHARED))
+ return -EAGAIN;
+
+ if (remap_page_range(RBUF_VBASE + RBUF_RING_BUFFER_OFFSET + rb_size,
+ mmu_v2p((unsigned)rb_ptr),
+ rb_size,APMMU_PAGE_SHARED))
+ return -EAGAIN;
+
+ /* the shared area */
+ if (remap_page_range(RBUF_VBASE + RBUF_SHARED_PAGE_OFF,
+ mmu_v2p((unsigned)shared_ptr),
+ PAGE_SIZE,APMMU_PAGE_SHARED))
+ return -EAGAIN;
+
+#if 0
+ /* lock the ringbuffer in memory */
+ old_uid = current->euid;
+ current->euid = 0;
+ error = sys_mlock(RBUF_VBASE,2*rb_size+RBUF_RING_BUFFER_OFFSET);
+ current->euid = old_uid;
+ if (error) {
+ printk("ringbuffer mlock failed\n");
+ return error;
+ }
+#endif
+
+ /* the queue pages */
+#define MAP_QUEUE(offset,phys) \
+ io_remap_page_range(RBUF_VBASE + offset, \
+ phys<<PAGE_SHIFT,PAGE_SIZE,APMMU_PAGE_SHARED,0xa)
+
+ MAP_QUEUE(RBUF_PUT_QUEUE, 0x00000);
+ MAP_QUEUE(RBUF_GET_QUEUE, 0x00001);
+ MAP_QUEUE(RBUF_SEND_QUEUE, 0x00040);
+
+ MAP_QUEUE(RBUF_XY_QUEUE, 0x00640);
+ MAP_QUEUE(RBUF_X_QUEUE, 0x00240);
+ MAP_QUEUE(RBUF_Y_QUEUE, 0x00440);
+ MAP_QUEUE(RBUF_XYG_QUEUE, 0x00600);
+ MAP_QUEUE(RBUF_XG_QUEUE, 0x00200);
+ MAP_QUEUE(RBUF_YG_QUEUE, 0x00400);
+ MAP_QUEUE(RBUF_CSI_QUEUE, 0x02004);
+ MAP_QUEUE(RBUF_FOP_QUEUE, 0x02005);
+
+#undef MAP_QUEUE
+
+ if (!tsk->ringbuf) {
+ tsk->ringbuf = (void *)kmalloc(sizeof(*(tsk->ringbuf)),GFP_ATOMIC);
+ if (!tsk->ringbuf)
+ return -ENOMEM;
+ }
+
+ memset(tsk->ringbuf,0,sizeof(*tsk->ringbuf));
+ tsk->ringbuf->ringbuf = rb_ptr;
+ tsk->ringbuf->shared = shared_ptr;
+ tsk->ringbuf->order = order;
+ tsk->ringbuf->write_ptr = mmu_v2p((unsigned)rb_ptr)<<1;
+ tsk->ringbuf->vaddr = RBUF_VBASE;
+
+ memset(tsk->ringbuf->vaddr+RBUF_SHARED_PAGE_OFF,0,PAGE_SIZE);
+ {
+ struct _kernel_cap_shared *_kernel =
+ (struct _kernel_cap_shared *)tsk->ringbuf->vaddr;
+ _kernel->rbuf_read_ptr = (rb_size>>5) - 1;
+ }
+
+ return 0;
+}
+
+
+static int
+ringbuf_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int numcells, *phys_cells;
+ extern struct cap_init cap_init;
+
+ switch (cmd) {
+ case CAP_GETINIT:
+ if (copy_to_user((char *)arg,(char *)&cap_init,sizeof(cap_init)))
+ return -EFAULT;
+ break;
+
+ case CAP_SYNC:
+ if (verify_area(VERIFY_READ, (void *) arg, sizeof(int)*2))
+ return -EFAULT;
+ if (get_user(numcells,(int *)arg)) return -EFAULT;
+ if (get_user((unsigned)phys_cells,
+ ((int *)arg)+1)) return -EFAULT;
+ if (verify_area(VERIFY_READ,phys_cells,sizeof(int)*numcells))
+ return -EFAULT;
+ return ap_sync(numcells,phys_cells);
+ break;
+
+ case CAP_SETGANG:
+ {
+ int v;
+ if (get_user(v,(int *)arg)) return -EFAULT;
+ mpp_set_gang_factor(v);
+ break;
+ }
+
+ case CAP_MAP:
+ return cap_map(arg);
+
+ default:
+ printk("unknown ringbuf ioctl %d\n",cmd);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+
+static struct file_operations proc_ringbuf_operations = {
+ NULL,
+ NULL,
+ NULL,
+ NULL, /* readdir */
+ NULL, /* poll */
+ ringbuf_ioctl, /* ioctl */
+ NULL, /* mmap */
+ NULL, /* no special open code */
+ NULL, /* no special release code */
+ NULL /* can't fsync */
+};
+
+struct inode_operations proc_ringbuf_inode_operations = {
+ &proc_ringbuf_operations, /* default base directory file-ops */
+ NULL, /* create */
+ NULL, /* lookup */
+ NULL, /* link */
+ NULL, /* unlink */
+ NULL, /* symlink */
+ NULL, /* mkdir */
+ NULL, /* rmdir */
+ NULL, /* mknod */
+ NULL, /* rename */
+ NULL, /* readlink */
+ NULL, /* follow_link */
+ NULL, /* readpage */
+ NULL, /* writepage */
+ NULL, /* bmap */
+ NULL, /* truncate */
+ NULL /* permission */
+};
diff --git a/drivers/ap1000/smt-types.h b/drivers/ap1000/smt-types.h
new file mode 100644
index 000000000..b17c83176
--- /dev/null
+++ b/drivers/ap1000/smt-types.h
@@ -0,0 +1,167 @@
+ /*
+ * Copyright 1996 The Australian National University.
+ * Copyright 1996 Fujitsu Laboratories Limited
+ *
+ * This software may be distributed under the terms of the Gnu
+ * Public License version 2 or later
+ */
+/*
+ * Definitions for FDDI Station Management.
+ */
+
+/*
+ * FDDI-COMMON types.
+ */
+
+typedef unsigned int Counter; /* 32-bit event counter */
+
+typedef enum {
+ cp_isolated,
+ cp_local,
+ cp_secondary,
+ cp_primary,
+ cp_concatenated,
+ cp_thru
+} CurrentPath;
+
+typedef char Flag;
+
+typedef unsigned char LongAddressType[6];
+
+typedef enum {
+ pt_a,
+ pt_b,
+ pt_s,
+ pt_m,
+ pt_none
+} PortType;
+
+typedef unsigned short ResourceId;
+
+typedef int Time; /* time in 80ns units */
+#define FDDI_TIME_UNIT 80e-9 /* 80 nanoseconds */
+#define SECS_TO_FDDI_TIME(s) ((int)((s)/FDDI_TIME_UNIT+0.99))
+
+typedef int TimerTwosComplement;
+
+/*
+ * FDDI-SMT types.
+ */
+typedef enum {
+ ec_Out,
+ ec_In,
+ ec_Trace,
+ ec_Leave,
+ ec_Path_Test,
+ ec_Insert,
+ ec_Check,
+ ec_Deinsert
+} ECMState;
+
+/*
+ * FDDI-MAC types.
+ */
+typedef enum {
+ dat_none,
+ dat_pass,
+ dat_fail
+} DupAddressTest;
+
+typedef unsigned short DupCondition;
+#define DC_MYDUP 1
+#define DC_UNADUP 2
+
+typedef unsigned short FS_Functions;
+#define FSF_FS_REPEATING 1
+#define FSF_FS_SETTING 2
+#define FSF_FS_CLEARING 4
+
+typedef unsigned char NACondition;
+#define NAC_UNACHANGE 1
+#define NAC_DNACHANGE 2
+
+typedef enum {
+ rmt_Isolated,
+ rmt_Non_Op,
+ rmt_Ring_Op,
+ rmt_Detect,
+ rmt_Non_Op_Dup,
+ rmt_Ring_Op_Dup,
+ rmt_Directed,
+ rmt_Trace
+} RMTState;
+
+typedef unsigned char ShortAddressType[2];
+
+/*
+ * FDDI-PATH types.
+ */
+typedef unsigned short TraceStatus;
+#define TS_TRACEINITIATED 1
+#define TS_TRACEPROPAGATED 2
+#define TS_TRACETERMINATED 4
+#define TS_TRACETIMEOUT 8
+
+/*
+ * FDDI-PORT types.
+ */
+typedef enum {
+ PC_Maint,
+ PC_Enable,
+ PC_Disable,
+ PC_Start,
+ PC_Stop
+} ActionType;
+
+typedef unsigned char ConnectionPolicies;
+#define PC_MAC_LCT 1
+#define PC_MAC_LOOP 2
+
+typedef enum {
+ cs_disabled,
+ cs_connecting,
+ cs_standby,
+ cs_active
+} ConnectState;
+
+typedef enum {
+ ls_qls,
+ ls_ils,
+ ls_mls,
+ ls_hls,
+ ls_pdr,
+ ls_lsu,
+ ls_nls
+} LineState;
+
+typedef enum {
+ pc_Off,
+ pc_Break,
+ pc_Trace,
+ pc_Connect,
+ pc_Next,
+ pc_Signal,
+ pc_Join,
+ pc_Verify,
+ pc_Active,
+ pc_Maint
+} PCMState;
+
+typedef enum {
+ pcw_none,
+ pcw_mm,
+ pcw_otherincompatible,
+ pcw_pathnotavailable
+} PC_Withhold;
+
+typedef enum {
+ pmd_multimode,
+ pmd_single_mode1,
+ pmd_single_mode2,
+ pmd_sonet,
+ pmd_low_cost_fiber,
+ pmd_twisted_pair,
+ pmd_unknown,
+ pmd_unspecified
+} PMDClass;
+