summaryrefslogtreecommitdiffstats
path: root/arch/ia64/sn
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/ia64/sn
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/ia64/sn')
-rw-r--r--arch/ia64/sn/Makefile14
-rw-r--r--arch/ia64/sn/include/ioerror.h81
-rw-r--r--arch/ia64/sn/include/pci/pcibr_provider.h149
-rw-r--r--arch/ia64/sn/include/pci/pcibus_provider_defs.h43
-rw-r--r--arch/ia64/sn/include/pci/pcidev.h54
-rw-r--r--arch/ia64/sn/include/pci/pic.h261
-rw-r--r--arch/ia64/sn/include/pci/tiocp.h256
-rw-r--r--arch/ia64/sn/include/tio.h37
-rw-r--r--arch/ia64/sn/include/xtalk/hubdev.h67
-rw-r--r--arch/ia64/sn/include/xtalk/xbow.h291
-rw-r--r--arch/ia64/sn/include/xtalk/xwidgetdev.h70
-rw-r--r--arch/ia64/sn/kernel/Makefile12
-rw-r--r--arch/ia64/sn/kernel/bte.c453
-rw-r--r--arch/ia64/sn/kernel/bte_error.c198
-rw-r--r--arch/ia64/sn/kernel/huberror.c201
-rw-r--r--arch/ia64/sn/kernel/idle.c30
-rw-r--r--arch/ia64/sn/kernel/io_init.c411
-rw-r--r--arch/ia64/sn/kernel/iomv.c70
-rw-r--r--arch/ia64/sn/kernel/irq.c431
-rw-r--r--arch/ia64/sn/kernel/klconflib.c108
-rw-r--r--arch/ia64/sn/kernel/machvec.c11
-rw-r--r--arch/ia64/sn/kernel/mca.c135
-rw-r--r--arch/ia64/sn/kernel/setup.c621
-rw-r--r--arch/ia64/sn/kernel/sn2/Makefile13
-rw-r--r--arch/ia64/sn/kernel/sn2/cache.c34
-rw-r--r--arch/ia64/sn/kernel/sn2/io.c101
-rw-r--r--arch/ia64/sn/kernel/sn2/prominfo_proc.c279
-rw-r--r--arch/ia64/sn/kernel/sn2/ptc_deadlock.S82
-rw-r--r--arch/ia64/sn/kernel/sn2/sn2_smp.c295
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c690
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_proc_fs.c149
-rw-r--r--arch/ia64/sn/kernel/sn2/timer.c36
-rw-r--r--arch/ia64/sn/kernel/sn2/timer_interrupt.c63
-rw-r--r--arch/ia64/sn/pci/Makefile10
-rw-r--r--arch/ia64/sn/pci/pci_dma.c363
-rw-r--r--arch/ia64/sn/pci/pcibr/Makefile11
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_ate.c188
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_dma.c379
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_provider.c170
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_reg.c282
40 files changed, 7149 insertions, 0 deletions
diff --git a/arch/ia64/sn/Makefile b/arch/ia64/sn/Makefile
new file mode 100644
index 000000000000..a269f6d84c29
--- /dev/null
+++ b/arch/ia64/sn/Makefile
@@ -0,0 +1,14 @@
+# arch/ia64/sn/Makefile
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2004 Silicon Graphics, Inc. All Rights Reserved.
+#
+# Makefile for the sn ia64 subplatform
+#
+
+CPPFLAGS += -I$(srctree)/arch/ia64/sn/include
+
+obj-y += kernel/ pci/
diff --git a/arch/ia64/sn/include/ioerror.h b/arch/ia64/sn/include/ioerror.h
new file mode 100644
index 000000000000..e68f2b0789a7
--- /dev/null
+++ b/arch/ia64/sn/include/ioerror.h
@@ -0,0 +1,81 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_IOERROR_H
+#define _ASM_IA64_SN_IOERROR_H
+
+/*
+ * IO error structure.
+ *
+ * This structure would expand to hold the information retrieved from
+ * all IO related error registers.
+ *
+ * This structure is defined to hold all system specific
+ * information related to a single error.
+ *
+ * This serves a couple of purpose.
+ * - Error handling often involves translating one form of address to other
+ * form. So, instead of having different data structures at each level,
+ * we have a single structure, and the appropriate fields get filled in
+ * at each layer.
+ * - This provides a way to dump all error related information in any layer
+ * of erorr handling (debugging aid).
+ *
+ * A second possibility is to allow each layer to define its own error
+ * data structure, and fill in the proper fields. This has the advantage
+ * of isolating the layers.
+ * A big concern is the potential stack usage (and overflow), if each layer
+ * defines these structures on stack (assuming we don't want to do kmalloc.
+ *
+ * Any layer wishing to pass extra information to a layer next to it in
+ * error handling hierarchy, can do so as a separate parameter.
+ */
+
+typedef struct io_error_s {
+ /* Bit fields indicating which structure fields are valid */
+ union {
+ struct {
+ unsigned ievb_errortype:1;
+ unsigned ievb_widgetnum:1;
+ unsigned ievb_widgetdev:1;
+ unsigned ievb_srccpu:1;
+ unsigned ievb_srcnode:1;
+ unsigned ievb_errnode:1;
+ unsigned ievb_sysioaddr:1;
+ unsigned ievb_xtalkaddr:1;
+ unsigned ievb_busspace:1;
+ unsigned ievb_busaddr:1;
+ unsigned ievb_vaddr:1;
+ unsigned ievb_memaddr:1;
+ unsigned ievb_epc:1;
+ unsigned ievb_ef:1;
+ unsigned ievb_tnum:1;
+ } iev_b;
+ unsigned iev_a;
+ } ie_v;
+
+ short ie_errortype; /* error type: extra info about error */
+ short ie_widgetnum; /* Widget number that's in error */
+ short ie_widgetdev; /* Device within widget in error */
+ cpuid_t ie_srccpu; /* CPU on srcnode generating error */
+ cnodeid_t ie_srcnode; /* Node which caused the error */
+ cnodeid_t ie_errnode; /* Node where error was noticed */
+ iopaddr_t ie_sysioaddr; /* Sys specific IO address */
+ iopaddr_t ie_xtalkaddr; /* Xtalk (48bit) addr of Error */
+ iopaddr_t ie_busspace; /* Bus specific address space */
+ iopaddr_t ie_busaddr; /* Bus specific address */
+ caddr_t ie_vaddr; /* Virtual address of error */
+ iopaddr_t ie_memaddr; /* Physical memory address */
+ caddr_t ie_epc; /* pc when error reported */
+ caddr_t ie_ef; /* eframe when error reported */
+ short ie_tnum; /* Xtalk TNUM field */
+} ioerror_t;
+
+#define IOERROR_INIT(e) do { (e)->ie_v.iev_a = 0; } while (0)
+#define IOERROR_SETVALUE(e,f,v) do { (e)->ie_ ## f = (v); (e)->ie_v.iev_b.ievb_ ## f = 1; } while (0)
+
+#endif /* _ASM_IA64_SN_IOERROR_H */
diff --git a/arch/ia64/sn/include/pci/pcibr_provider.h b/arch/ia64/sn/include/pci/pcibr_provider.h
new file mode 100644
index 000000000000..b1f05ffec70b
--- /dev/null
+++ b/arch/ia64/sn/include/pci/pcibr_provider.h
@@ -0,0 +1,149 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992-1997,2000-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H
+#define _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H
+
+/* Workarounds */
+#define PV907516 (1 << 1) /* TIOCP: Don't write the write buffer flush reg */
+
+#define BUSTYPE_MASK 0x1
+
+/* Macros given a pcibus structure */
+#define IS_PCIX(ps) ((ps)->pbi_bridge_mode & BUSTYPE_MASK)
+#define IS_PCI_BRIDGE_ASIC(asic) (asic == PCIIO_ASIC_TYPE_PIC || \
+ asic == PCIIO_ASIC_TYPE_TIOCP)
+#define IS_PIC_SOFT(ps) (ps->pbi_bridge_type == PCIBR_BRIDGETYPE_PIC)
+
+
+/*
+ * The different PCI Bridge types supported on the SGI Altix platforms
+ */
+#define PCIBR_BRIDGETYPE_UNKNOWN -1
+#define PCIBR_BRIDGETYPE_PIC 2
+#define PCIBR_BRIDGETYPE_TIOCP 3
+
+/*
+ * Bridge 64bit Direct Map Attributes
+ */
+#define PCI64_ATTR_PREF (1ull << 59)
+#define PCI64_ATTR_PREC (1ull << 58)
+#define PCI64_ATTR_VIRTUAL (1ull << 57)
+#define PCI64_ATTR_BAR (1ull << 56)
+#define PCI64_ATTR_SWAP (1ull << 55)
+#define PCI64_ATTR_VIRTUAL1 (1ull << 54)
+
+#define PCI32_LOCAL_BASE 0
+#define PCI32_MAPPED_BASE 0x40000000
+#define PCI32_DIRECT_BASE 0x80000000
+
+#define IS_PCI32_MAPPED(x) ((uint64_t)(x) < PCI32_DIRECT_BASE && \
+ (uint64_t)(x) >= PCI32_MAPPED_BASE)
+#define IS_PCI32_DIRECT(x) ((uint64_t)(x) >= PCI32_MAPPED_BASE)
+
+
+/*
+ * Bridge PMU Address Transaltion Entry Attibutes
+ */
+#define PCI32_ATE_V (0x1 << 0)
+#define PCI32_ATE_CO (0x1 << 1)
+#define PCI32_ATE_PREC (0x1 << 2)
+#define PCI32_ATE_PREF (0x1 << 3)
+#define PCI32_ATE_BAR (0x1 << 4)
+#define PCI32_ATE_ADDR_SHFT 12
+
+#define MINIMAL_ATES_REQUIRED(addr, size) \
+ (IOPG(IOPGOFF(addr) + (size) - 1) == IOPG((size) - 1))
+
+#define MINIMAL_ATE_FLAG(addr, size) \
+ (MINIMAL_ATES_REQUIRED((uint64_t)addr, size) ? 1 : 0)
+
+/* bit 29 of the pci address is the SWAP bit */
+#define ATE_SWAPSHIFT 29
+#define ATE_SWAP_ON(x) ((x) |= (1 << ATE_SWAPSHIFT))
+#define ATE_SWAP_OFF(x) ((x) &= ~(1 << ATE_SWAPSHIFT))
+
+/*
+ * I/O page size
+ */
+#if PAGE_SIZE < 16384
+#define IOPFNSHIFT 12 /* 4K per mapped page */
+#else
+#define IOPFNSHIFT 14 /* 16K per mapped page */
+#endif
+
+#define IOPGSIZE (1 << IOPFNSHIFT)
+#define IOPG(x) ((x) >> IOPFNSHIFT)
+#define IOPGOFF(x) ((x) & (IOPGSIZE-1))
+
+#define PCIBR_DEV_SWAP_DIR (1ull << 19)
+#define PCIBR_CTRL_PAGE_SIZE (0x1 << 21)
+
+/*
+ * PMU resources.
+ */
+struct ate_resource{
+ uint64_t *ate;
+ uint64_t num_ate;
+ uint64_t lowest_free_index;
+};
+
+struct pcibus_info {
+ struct pcibus_bussoft pbi_buscommon; /* common header */
+ uint32_t pbi_moduleid;
+ short pbi_bridge_type;
+ short pbi_bridge_mode;
+
+ struct ate_resource pbi_int_ate_resource;
+ uint64_t pbi_int_ate_size;
+
+ uint64_t pbi_dir_xbase;
+ char pbi_hub_xid;
+
+ uint64_t pbi_devreg[8];
+ spinlock_t pbi_lock;
+
+ uint32_t pbi_valid_devices;
+ uint32_t pbi_enabled_devices;
+};
+
+/*
+ * pcibus_info structure locking macros
+ */
+inline static unsigned long
+pcibr_lock(struct pcibus_info *pcibus_info)
+{
+ unsigned long flag;
+ spin_lock_irqsave(&pcibus_info->pbi_lock, flag);
+ return(flag);
+}
+#define pcibr_unlock(pcibus_info, flag) spin_unlock_irqrestore(&pcibus_info->pbi_lock, flag)
+
+extern void *pcibr_bus_fixup(struct pcibus_bussoft *);
+extern uint64_t pcibr_dma_map(struct pcidev_info *, unsigned long, size_t, unsigned int);
+extern void pcibr_dma_unmap(struct pcidev_info *, dma_addr_t, int);
+
+/*
+ * prototypes for the bridge asic register access routines in pcibr_reg.c
+ */
+extern void pcireg_control_bit_clr(struct pcibus_info *, uint64_t);
+extern void pcireg_control_bit_set(struct pcibus_info *, uint64_t);
+extern uint64_t pcireg_tflush_get(struct pcibus_info *);
+extern uint64_t pcireg_intr_status_get(struct pcibus_info *);
+extern void pcireg_intr_enable_bit_clr(struct pcibus_info *, uint64_t);
+extern void pcireg_intr_enable_bit_set(struct pcibus_info *, uint64_t);
+extern void pcireg_intr_addr_addr_set(struct pcibus_info *, int, uint64_t);
+extern void pcireg_force_intr_set(struct pcibus_info *, int);
+extern uint64_t pcireg_wrb_flush_get(struct pcibus_info *, int);
+extern void pcireg_int_ate_set(struct pcibus_info *, int, uint64_t);
+extern uint64_t * pcireg_int_ate_addr(struct pcibus_info *, int);
+extern void pcibr_force_interrupt(struct sn_irq_info *sn_irq_info);
+extern void pcibr_change_devices_irq(struct sn_irq_info *sn_irq_info);
+extern int pcibr_ate_alloc(struct pcibus_info *, int);
+extern void pcibr_ate_free(struct pcibus_info *, int);
+extern void ate_write(struct pcibus_info *, int, int, uint64_t);
+#endif
diff --git a/arch/ia64/sn/include/pci/pcibus_provider_defs.h b/arch/ia64/sn/include/pci/pcibus_provider_defs.h
new file mode 100644
index 000000000000..07065615bbea
--- /dev/null
+++ b/arch/ia64/sn/include/pci/pcibus_provider_defs.h
@@ -0,0 +1,43 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H
+#define _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H
+
+/*
+ * SN pci asic types. Do not ever renumber these or reuse values. The
+ * values must agree with what prom thinks they are.
+ */
+
+#define PCIIO_ASIC_TYPE_UNKNOWN 0
+#define PCIIO_ASIC_TYPE_PPB 1
+#define PCIIO_ASIC_TYPE_PIC 2
+#define PCIIO_ASIC_TYPE_TIOCP 3
+
+/*
+ * Common pciio bus provider data. There should be one of these as the
+ * first field in any pciio based provider soft structure (e.g. pcibr_soft
+ * tioca_soft, etc).
+ */
+
+struct pcibus_bussoft {
+ uint32_t bs_asic_type; /* chipset type */
+ uint32_t bs_xid; /* xwidget id */
+ uint64_t bs_persist_busnum; /* Persistent Bus Number */
+ uint64_t bs_legacy_io; /* legacy io pio addr */
+ uint64_t bs_legacy_mem; /* legacy mem pio addr */
+ uint64_t bs_base; /* widget base */
+ struct xwidget_info *bs_xwidget_info;
+};
+
+/*
+ * DMA mapping flags
+ */
+
+#define SN_PCIDMA_CONSISTENT 0x0001
+
+#endif /* _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H */
diff --git a/arch/ia64/sn/include/pci/pcidev.h b/arch/ia64/sn/include/pci/pcidev.h
new file mode 100644
index 000000000000..81eb95d3bf47
--- /dev/null
+++ b/arch/ia64/sn/include/pci/pcidev.h
@@ -0,0 +1,54 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_PCI_PCIDEV_H
+#define _ASM_IA64_SN_PCI_PCIDEV_H
+
+#include <linux/pci.h>
+
+extern struct sn_irq_info **sn_irq;
+
+#define SN_PCIDEV_INFO(pci_dev) \
+ ((struct pcidev_info *)(pci_dev)->sysdata)
+
+/*
+ * Given a pci_bus, return the sn pcibus_bussoft struct. Note that
+ * this only works for root busses, not for busses represented by PPB's.
+ */
+
+#define SN_PCIBUS_BUSSOFT(pci_bus) \
+ ((struct pcibus_bussoft *)(PCI_CONTROLLER((pci_bus))->platform_data))
+
+/*
+ * Given a struct pci_dev, return the sn pcibus_bussoft struct. Note
+ * that this is not equivalent to SN_PCIBUS_BUSSOFT(pci_dev->bus) due
+ * due to possible PPB's in the path.
+ */
+
+#define SN_PCIDEV_BUSSOFT(pci_dev) \
+ (SN_PCIDEV_INFO(pci_dev)->pdi_host_pcidev_info->pdi_pcibus_info)
+
+#define PCIIO_BUS_NONE 255 /* bus 255 reserved */
+#define PCIIO_SLOT_NONE 255
+#define PCIIO_FUNC_NONE 255
+#define PCIIO_VENDOR_ID_NONE (-1)
+
+struct pcidev_info {
+ uint64_t pdi_pio_mapped_addr[7]; /* 6 BARs PLUS 1 ROM */
+ uint64_t pdi_slot_host_handle; /* Bus and devfn Host pci_dev */
+
+ struct pcibus_bussoft *pdi_pcibus_info; /* Kernel common bus soft */
+ struct pcidev_info *pdi_host_pcidev_info; /* Kernel Host pci_dev */
+ struct pci_dev *pdi_linux_pcidev; /* Kernel pci_dev */
+
+ struct sn_irq_info *pdi_sn_irq_info;
+};
+
+extern void sn_irq_fixup(struct pci_dev *pci_dev,
+ struct sn_irq_info *sn_irq_info);
+
+#endif /* _ASM_IA64_SN_PCI_PCIDEV_H */
diff --git a/arch/ia64/sn/include/pci/pic.h b/arch/ia64/sn/include/pci/pic.h
new file mode 100644
index 000000000000..fd18acecb1e6
--- /dev/null
+++ b/arch/ia64/sn/include/pci/pic.h
@@ -0,0 +1,261 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_PCI_PIC_H
+#define _ASM_IA64_SN_PCI_PIC_H
+
+/*
+ * PIC AS DEVICE ZERO
+ * ------------------
+ *
+ * PIC handles PCI/X busses. PCI/X requires that the 'bridge' (i.e. PIC)
+ * be designated as 'device 0'. That is a departure from earlier SGI
+ * PCI bridges. Because of that we use config space 1 to access the
+ * config space of the first actual PCI device on the bus.
+ * Here's what the PIC manual says:
+ *
+ * The current PCI-X bus specification now defines that the parent
+ * hosts bus bridge (PIC for example) must be device 0 on bus 0. PIC
+ * reduced the total number of devices from 8 to 4 and removed the
+ * device registers and windows, now only supporting devices 0,1,2, and
+ * 3. PIC did leave all 8 configuration space windows. The reason was
+ * there was nothing to gain by removing them. Here in lies the problem.
+ * The device numbering we do using 0 through 3 is unrelated to the device
+ * numbering which PCI-X requires in configuration space. In the past we
+ * correlated Configs pace and our device space 0 <-> 0, 1 <-> 1, etc.
+ * PCI-X requires we start a 1, not 0 and currently the PX brick
+ * does associate our:
+ *
+ * device 0 with configuration space window 1,
+ * device 1 with configuration space window 2,
+ * device 2 with configuration space window 3,
+ * device 3 with configuration space window 4.
+ *
+ * The net effect is that all config space access are off-by-one with
+ * relation to other per-slot accesses on the PIC.
+ * Here is a table that shows some of that:
+ *
+ * Internal Slot#
+ * |
+ * | 0 1 2 3
+ * ----------|---------------------------------------
+ * config | 0x21000 0x22000 0x23000 0x24000
+ * |
+ * even rrb | 0[0] n/a 1[0] n/a [] == implied even/odd
+ * |
+ * odd rrb | n/a 0[1] n/a 1[1]
+ * |
+ * int dev | 00 01 10 11
+ * |
+ * ext slot# | 1 2 3 4
+ * ----------|---------------------------------------
+ */
+
+#define PIC_ATE_TARGETID_SHFT 8
+#define PIC_HOST_INTR_ADDR 0x0000FFFFFFFFFFFFUL
+#define PIC_PCI64_ATTR_TARG_SHFT 60
+
+
+/*****************************************************************************
+ *********************** PIC MMR structure mapping ***************************
+ *****************************************************************************/
+
+/* NOTE: PIC WAR. PV#854697. PIC does not allow writes just to [31:0]
+ * of a 64-bit register. When writing PIC registers, always write the
+ * entire 64 bits.
+ */
+
+struct pic {
+
+ /* 0x000000-0x00FFFF -- Local Registers */
+
+ /* 0x000000-0x000057 -- Standard Widget Configuration */
+ uint64_t p_wid_id; /* 0x000000 */
+ uint64_t p_wid_stat; /* 0x000008 */
+ uint64_t p_wid_err_upper; /* 0x000010 */
+ uint64_t p_wid_err_lower; /* 0x000018 */
+ #define p_wid_err p_wid_err_lower
+ uint64_t p_wid_control; /* 0x000020 */
+ uint64_t p_wid_req_timeout; /* 0x000028 */
+ uint64_t p_wid_int_upper; /* 0x000030 */
+ uint64_t p_wid_int_lower; /* 0x000038 */
+ #define p_wid_int p_wid_int_lower
+ uint64_t p_wid_err_cmdword; /* 0x000040 */
+ uint64_t p_wid_llp; /* 0x000048 */
+ uint64_t p_wid_tflush; /* 0x000050 */
+
+ /* 0x000058-0x00007F -- Bridge-specific Widget Configuration */
+ uint64_t p_wid_aux_err; /* 0x000058 */
+ uint64_t p_wid_resp_upper; /* 0x000060 */
+ uint64_t p_wid_resp_lower; /* 0x000068 */
+ #define p_wid_resp p_wid_resp_lower
+ uint64_t p_wid_tst_pin_ctrl; /* 0x000070 */
+ uint64_t p_wid_addr_lkerr; /* 0x000078 */
+
+ /* 0x000080-0x00008F -- PMU & MAP */
+ uint64_t p_dir_map; /* 0x000080 */
+ uint64_t _pad_000088; /* 0x000088 */
+
+ /* 0x000090-0x00009F -- SSRAM */
+ uint64_t p_map_fault; /* 0x000090 */
+ uint64_t _pad_000098; /* 0x000098 */
+
+ /* 0x0000A0-0x0000AF -- Arbitration */
+ uint64_t p_arb; /* 0x0000A0 */
+ uint64_t _pad_0000A8; /* 0x0000A8 */
+
+ /* 0x0000B0-0x0000BF -- Number In A Can or ATE Parity Error */
+ uint64_t p_ate_parity_err; /* 0x0000B0 */
+ uint64_t _pad_0000B8; /* 0x0000B8 */
+
+ /* 0x0000C0-0x0000FF -- PCI/GIO */
+ uint64_t p_bus_timeout; /* 0x0000C0 */
+ uint64_t p_pci_cfg; /* 0x0000C8 */
+ uint64_t p_pci_err_upper; /* 0x0000D0 */
+ uint64_t p_pci_err_lower; /* 0x0000D8 */
+ #define p_pci_err p_pci_err_lower
+ uint64_t _pad_0000E0[4]; /* 0x0000{E0..F8} */
+
+ /* 0x000100-0x0001FF -- Interrupt */
+ uint64_t p_int_status; /* 0x000100 */
+ uint64_t p_int_enable; /* 0x000108 */
+ uint64_t p_int_rst_stat; /* 0x000110 */
+ uint64_t p_int_mode; /* 0x000118 */
+ uint64_t p_int_device; /* 0x000120 */
+ uint64_t p_int_host_err; /* 0x000128 */
+ uint64_t p_int_addr[8]; /* 0x0001{30,,,68} */
+ uint64_t p_err_int_view; /* 0x000170 */
+ uint64_t p_mult_int; /* 0x000178 */
+ uint64_t p_force_always[8]; /* 0x0001{80,,,B8} */
+ uint64_t p_force_pin[8]; /* 0x0001{C0,,,F8} */
+
+ /* 0x000200-0x000298 -- Device */
+ uint64_t p_device[4]; /* 0x0002{00,,,18} */
+ uint64_t _pad_000220[4]; /* 0x0002{20,,,38} */
+ uint64_t p_wr_req_buf[4]; /* 0x0002{40,,,58} */
+ uint64_t _pad_000260[4]; /* 0x0002{60,,,78} */
+ uint64_t p_rrb_map[2]; /* 0x0002{80,,,88} */
+ #define p_even_resp p_rrb_map[0] /* 0x000280 */
+ #define p_odd_resp p_rrb_map[1] /* 0x000288 */
+ uint64_t p_resp_status; /* 0x000290 */
+ uint64_t p_resp_clear; /* 0x000298 */
+
+ uint64_t _pad_0002A0[12]; /* 0x0002{A0..F8} */
+
+ /* 0x000300-0x0003F8 -- Buffer Address Match Registers */
+ struct {
+ uint64_t upper; /* 0x0003{00,,,F0} */
+ uint64_t lower; /* 0x0003{08,,,F8} */
+ } p_buf_addr_match[16];
+
+ /* 0x000400-0x0005FF -- Performance Monitor Registers (even only) */
+ struct {
+ uint64_t flush_w_touch; /* 0x000{400,,,5C0} */
+ uint64_t flush_wo_touch; /* 0x000{408,,,5C8} */
+ uint64_t inflight; /* 0x000{410,,,5D0} */
+ uint64_t prefetch; /* 0x000{418,,,5D8} */
+ uint64_t total_pci_retry; /* 0x000{420,,,5E0} */
+ uint64_t max_pci_retry; /* 0x000{428,,,5E8} */
+ uint64_t max_latency; /* 0x000{430,,,5F0} */
+ uint64_t clear_all; /* 0x000{438,,,5F8} */
+ } p_buf_count[8];
+
+
+ /* 0x000600-0x0009FF -- PCI/X registers */
+ uint64_t p_pcix_bus_err_addr; /* 0x000600 */
+ uint64_t p_pcix_bus_err_attr; /* 0x000608 */
+ uint64_t p_pcix_bus_err_data; /* 0x000610 */
+ uint64_t p_pcix_pio_split_addr; /* 0x000618 */
+ uint64_t p_pcix_pio_split_attr; /* 0x000620 */
+ uint64_t p_pcix_dma_req_err_attr; /* 0x000628 */
+ uint64_t p_pcix_dma_req_err_addr; /* 0x000630 */
+ uint64_t p_pcix_timeout; /* 0x000638 */
+
+ uint64_t _pad_000640[120]; /* 0x000{640,,,9F8} */
+
+ /* 0x000A00-0x000BFF -- PCI/X Read&Write Buffer */
+ struct {
+ uint64_t p_buf_addr; /* 0x000{A00,,,AF0} */
+ uint64_t p_buf_attr; /* 0X000{A08,,,AF8} */
+ } p_pcix_read_buf_64[16];
+
+ struct {
+ uint64_t p_buf_addr; /* 0x000{B00,,,BE0} */
+ uint64_t p_buf_attr; /* 0x000{B08,,,BE8} */
+ uint64_t p_buf_valid; /* 0x000{B10,,,BF0} */
+ uint64_t __pad1; /* 0x000{B18,,,BF8} */
+ } p_pcix_write_buf_64[8];
+
+ /* End of Local Registers -- Start of Address Map space */
+
+ char _pad_000c00[0x010000 - 0x000c00];
+
+ /* 0x010000-0x011fff -- Internal ATE RAM (Auto Parity Generation) */
+ uint64_t p_int_ate_ram[1024]; /* 0x010000-0x011fff */
+
+ /* 0x012000-0x013fff -- Internal ATE RAM (Manual Parity Generation) */
+ uint64_t p_int_ate_ram_mp[1024]; /* 0x012000-0x013fff */
+
+ char _pad_014000[0x18000 - 0x014000];
+
+ /* 0x18000-0x197F8 -- PIC Write Request Ram */
+ uint64_t p_wr_req_lower[256]; /* 0x18000 - 0x187F8 */
+ uint64_t p_wr_req_upper[256]; /* 0x18800 - 0x18FF8 */
+ uint64_t p_wr_req_parity[256]; /* 0x19000 - 0x197F8 */
+
+ char _pad_019800[0x20000 - 0x019800];
+
+ /* 0x020000-0x027FFF -- PCI Device Configuration Spaces */
+ union {
+ uint8_t c[0x1000 / 1]; /* 0x02{0000,,,7FFF} */
+ uint16_t s[0x1000 / 2]; /* 0x02{0000,,,7FFF} */
+ uint32_t l[0x1000 / 4]; /* 0x02{0000,,,7FFF} */
+ uint64_t d[0x1000 / 8]; /* 0x02{0000,,,7FFF} */
+ union {
+ uint8_t c[0x100 / 1];
+ uint16_t s[0x100 / 2];
+ uint32_t l[0x100 / 4];
+ uint64_t d[0x100 / 8];
+ } f[8];
+ } p_type0_cfg_dev[8]; /* 0x02{0000,,,7FFF} */
+
+ /* 0x028000-0x028FFF -- PCI Type 1 Configuration Space */
+ union {
+ uint8_t c[0x1000 / 1]; /* 0x028000-0x029000 */
+ uint16_t s[0x1000 / 2]; /* 0x028000-0x029000 */
+ uint32_t l[0x1000 / 4]; /* 0x028000-0x029000 */
+ uint64_t d[0x1000 / 8]; /* 0x028000-0x029000 */
+ union {
+ uint8_t c[0x100 / 1];
+ uint16_t s[0x100 / 2];
+ uint32_t l[0x100 / 4];
+ uint64_t d[0x100 / 8];
+ } f[8];
+ } p_type1_cfg; /* 0x028000-0x029000 */
+
+ char _pad_029000[0x030000-0x029000];
+
+ /* 0x030000-0x030007 -- PCI Interrupt Acknowledge Cycle */
+ union {
+ uint8_t c[8 / 1];
+ uint16_t s[8 / 2];
+ uint32_t l[8 / 4];
+ uint64_t d[8 / 8];
+ } p_pci_iack; /* 0x030000-0x030007 */
+
+ char _pad_030007[0x040000-0x030008];
+
+ /* 0x040000-0x030007 -- PCIX Special Cycle */
+ union {
+ uint8_t c[8 / 1];
+ uint16_t s[8 / 2];
+ uint32_t l[8 / 4];
+ uint64_t d[8 / 8];
+ } p_pcix_cycle; /* 0x040000-0x040007 */
+};
+
+#endif /* _ASM_IA64_SN_PCI_PIC_H */
diff --git a/arch/ia64/sn/include/pci/tiocp.h b/arch/ia64/sn/include/pci/tiocp.h
new file mode 100644
index 000000000000..f07c83b2bf6e
--- /dev/null
+++ b/arch/ia64/sn/include/pci/tiocp.h
@@ -0,0 +1,256 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_PCI_TIOCP_H
+#define _ASM_IA64_SN_PCI_TIOCP_H
+
+#define TIOCP_HOST_INTR_ADDR 0x003FFFFFFFFFFFFFUL
+#define TIOCP_PCI64_CMDTYPE_MEM (0x1ull << 60)
+
+
+/*****************************************************************************
+ *********************** TIOCP MMR structure mapping ***************************
+ *****************************************************************************/
+
+struct tiocp{
+
+ /* 0x000000-0x00FFFF -- Local Registers */
+
+ /* 0x000000-0x000057 -- (Legacy Widget Space) Configuration */
+ uint64_t cp_id; /* 0x000000 */
+ uint64_t cp_stat; /* 0x000008 */
+ uint64_t cp_err_upper; /* 0x000010 */
+ uint64_t cp_err_lower; /* 0x000018 */
+ #define cp_err cp_err_lower
+ uint64_t cp_control; /* 0x000020 */
+ uint64_t cp_req_timeout; /* 0x000028 */
+ uint64_t cp_intr_upper; /* 0x000030 */
+ uint64_t cp_intr_lower; /* 0x000038 */
+ #define cp_intr cp_intr_lower
+ uint64_t cp_err_cmdword; /* 0x000040 */
+ uint64_t _pad_000048; /* 0x000048 */
+ uint64_t cp_tflush; /* 0x000050 */
+
+ /* 0x000058-0x00007F -- Bridge-specific Configuration */
+ uint64_t cp_aux_err; /* 0x000058 */
+ uint64_t cp_resp_upper; /* 0x000060 */
+ uint64_t cp_resp_lower; /* 0x000068 */
+ #define cp_resp cp_resp_lower
+ uint64_t cp_tst_pin_ctrl; /* 0x000070 */
+ uint64_t cp_addr_lkerr; /* 0x000078 */
+
+ /* 0x000080-0x00008F -- PMU & MAP */
+ uint64_t cp_dir_map; /* 0x000080 */
+ uint64_t _pad_000088; /* 0x000088 */
+
+ /* 0x000090-0x00009F -- SSRAM */
+ uint64_t cp_map_fault; /* 0x000090 */
+ uint64_t _pad_000098; /* 0x000098 */
+
+ /* 0x0000A0-0x0000AF -- Arbitration */
+ uint64_t cp_arb; /* 0x0000A0 */
+ uint64_t _pad_0000A8; /* 0x0000A8 */
+
+ /* 0x0000B0-0x0000BF -- Number In A Can or ATE Parity Error */
+ uint64_t cp_ate_parity_err; /* 0x0000B0 */
+ uint64_t _pad_0000B8; /* 0x0000B8 */
+
+ /* 0x0000C0-0x0000FF -- PCI/GIO */
+ uint64_t cp_bus_timeout; /* 0x0000C0 */
+ uint64_t cp_pci_cfg; /* 0x0000C8 */
+ uint64_t cp_pci_err_upper; /* 0x0000D0 */
+ uint64_t cp_pci_err_lower; /* 0x0000D8 */
+ #define cp_pci_err cp_pci_err_lower
+ uint64_t _pad_0000E0[4]; /* 0x0000{E0..F8} */
+
+ /* 0x000100-0x0001FF -- Interrupt */
+ uint64_t cp_int_status; /* 0x000100 */
+ uint64_t cp_int_enable; /* 0x000108 */
+ uint64_t cp_int_rst_stat; /* 0x000110 */
+ uint64_t cp_int_mode; /* 0x000118 */
+ uint64_t cp_int_device; /* 0x000120 */
+ uint64_t cp_int_host_err; /* 0x000128 */
+ uint64_t cp_int_addr[8]; /* 0x0001{30,,,68} */
+ uint64_t cp_err_int_view; /* 0x000170 */
+ uint64_t cp_mult_int; /* 0x000178 */
+ uint64_t cp_force_always[8]; /* 0x0001{80,,,B8} */
+ uint64_t cp_force_pin[8]; /* 0x0001{C0,,,F8} */
+
+ /* 0x000200-0x000298 -- Device */
+ uint64_t cp_device[4]; /* 0x0002{00,,,18} */
+ uint64_t _pad_000220[4]; /* 0x0002{20,,,38} */
+ uint64_t cp_wr_req_buf[4]; /* 0x0002{40,,,58} */
+ uint64_t _pad_000260[4]; /* 0x0002{60,,,78} */
+ uint64_t cp_rrb_map[2]; /* 0x0002{80,,,88} */
+ #define cp_even_resp cp_rrb_map[0] /* 0x000280 */
+ #define cp_odd_resp cp_rrb_map[1] /* 0x000288 */
+ uint64_t cp_resp_status; /* 0x000290 */
+ uint64_t cp_resp_clear; /* 0x000298 */
+
+ uint64_t _pad_0002A0[12]; /* 0x0002{A0..F8} */
+
+ /* 0x000300-0x0003F8 -- Buffer Address Match Registers */
+ struct {
+ uint64_t upper; /* 0x0003{00,,,F0} */
+ uint64_t lower; /* 0x0003{08,,,F8} */
+ } cp_buf_addr_match[16];
+
+ /* 0x000400-0x0005FF -- Performance Monitor Registers (even only) */
+ struct {
+ uint64_t flush_w_touch; /* 0x000{400,,,5C0} */
+ uint64_t flush_wo_touch; /* 0x000{408,,,5C8} */
+ uint64_t inflight; /* 0x000{410,,,5D0} */
+ uint64_t prefetch; /* 0x000{418,,,5D8} */
+ uint64_t total_pci_retry; /* 0x000{420,,,5E0} */
+ uint64_t max_pci_retry; /* 0x000{428,,,5E8} */
+ uint64_t max_latency; /* 0x000{430,,,5F0} */
+ uint64_t clear_all; /* 0x000{438,,,5F8} */
+ } cp_buf_count[8];
+
+
+ /* 0x000600-0x0009FF -- PCI/X registers */
+ uint64_t cp_pcix_bus_err_addr; /* 0x000600 */
+ uint64_t cp_pcix_bus_err_attr; /* 0x000608 */
+ uint64_t cp_pcix_bus_err_data; /* 0x000610 */
+ uint64_t cp_pcix_pio_split_addr; /* 0x000618 */
+ uint64_t cp_pcix_pio_split_attr; /* 0x000620 */
+ uint64_t cp_pcix_dma_req_err_attr; /* 0x000628 */
+ uint64_t cp_pcix_dma_req_err_addr; /* 0x000630 */
+ uint64_t cp_pcix_timeout; /* 0x000638 */
+
+ uint64_t _pad_000640[24]; /* 0x000{640,,,6F8} */
+
+ /* 0x000700-0x000737 -- Debug Registers */
+ uint64_t cp_ct_debug_ctl; /* 0x000700 */
+ uint64_t cp_br_debug_ctl; /* 0x000708 */
+ uint64_t cp_mux3_debug_ctl; /* 0x000710 */
+ uint64_t cp_mux4_debug_ctl; /* 0x000718 */
+ uint64_t cp_mux5_debug_ctl; /* 0x000720 */
+ uint64_t cp_mux6_debug_ctl; /* 0x000728 */
+ uint64_t cp_mux7_debug_ctl; /* 0x000730 */
+
+ uint64_t _pad_000738[89]; /* 0x000{738,,,9F8} */
+
+ /* 0x000A00-0x000BFF -- PCI/X Read&Write Buffer */
+ struct {
+ uint64_t cp_buf_addr; /* 0x000{A00,,,AF0} */
+ uint64_t cp_buf_attr; /* 0X000{A08,,,AF8} */
+ } cp_pcix_read_buf_64[16];
+
+ struct {
+ uint64_t cp_buf_addr; /* 0x000{B00,,,BE0} */
+ uint64_t cp_buf_attr; /* 0x000{B08,,,BE8} */
+ uint64_t cp_buf_valid; /* 0x000{B10,,,BF0} */
+ uint64_t __pad1; /* 0x000{B18,,,BF8} */
+ } cp_pcix_write_buf_64[8];
+
+ /* End of Local Registers -- Start of Address Map space */
+
+ char _pad_000c00[0x010000 - 0x000c00];
+
+ /* 0x010000-0x011FF8 -- Internal ATE RAM (Auto Parity Generation) */
+ uint64_t cp_int_ate_ram[1024]; /* 0x010000-0x011FF8 */
+
+ char _pad_012000[0x14000 - 0x012000];
+
+ /* 0x014000-0x015FF8 -- Internal ATE RAM (Manual Parity Generation) */
+ uint64_t cp_int_ate_ram_mp[1024]; /* 0x014000-0x015FF8 */
+
+ char _pad_016000[0x18000 - 0x016000];
+
+ /* 0x18000-0x197F8 -- TIOCP Write Request Ram */
+ uint64_t cp_wr_req_lower[256]; /* 0x18000 - 0x187F8 */
+ uint64_t cp_wr_req_upper[256]; /* 0x18800 - 0x18FF8 */
+ uint64_t cp_wr_req_parity[256]; /* 0x19000 - 0x197F8 */
+
+ char _pad_019800[0x1C000 - 0x019800];
+
+ /* 0x1C000-0x1EFF8 -- TIOCP Read Response Ram */
+ uint64_t cp_rd_resp_lower[512]; /* 0x1C000 - 0x1CFF8 */
+ uint64_t cp_rd_resp_upper[512]; /* 0x1D000 - 0x1DFF8 */
+ uint64_t cp_rd_resp_parity[512]; /* 0x1E000 - 0x1EFF8 */
+
+ char _pad_01F000[0x20000 - 0x01F000];
+
+ /* 0x020000-0x021FFF -- Host Device (CP) Configuration Space (not used) */
+ char _pad_020000[0x021000 - 0x20000];
+
+ /* 0x021000-0x027FFF -- PCI Device Configuration Spaces */
+ union {
+ uint8_t c[0x1000 / 1]; /* 0x02{0000,,,7FFF} */
+ uint16_t s[0x1000 / 2]; /* 0x02{0000,,,7FFF} */
+ uint32_t l[0x1000 / 4]; /* 0x02{0000,,,7FFF} */
+ uint64_t d[0x1000 / 8]; /* 0x02{0000,,,7FFF} */
+ union {
+ uint8_t c[0x100 / 1];
+ uint16_t s[0x100 / 2];
+ uint32_t l[0x100 / 4];
+ uint64_t d[0x100 / 8];
+ } f[8];
+ } cp_type0_cfg_dev[7]; /* 0x02{1000,,,7FFF} */
+
+ /* 0x028000-0x028FFF -- PCI Type 1 Configuration Space */
+ union {
+ uint8_t c[0x1000 / 1]; /* 0x028000-0x029000 */
+ uint16_t s[0x1000 / 2]; /* 0x028000-0x029000 */
+ uint32_t l[0x1000 / 4]; /* 0x028000-0x029000 */
+ uint64_t d[0x1000 / 8]; /* 0x028000-0x029000 */
+ union {
+ uint8_t c[0x100 / 1];
+ uint16_t s[0x100 / 2];
+ uint32_t l[0x100 / 4];
+ uint64_t d[0x100 / 8];
+ } f[8];
+ } cp_type1_cfg; /* 0x028000-0x029000 */
+
+ char _pad_029000[0x030000-0x029000];
+
+ /* 0x030000-0x030007 -- PCI Interrupt Acknowledge Cycle */
+ union {
+ uint8_t c[8 / 1];
+ uint16_t s[8 / 2];
+ uint32_t l[8 / 4];
+ uint64_t d[8 / 8];
+ } cp_pci_iack; /* 0x030000-0x030007 */
+
+ char _pad_030007[0x040000-0x030008];
+
+ /* 0x040000-0x040007 -- PCIX Special Cycle */
+ union {
+ uint8_t c[8 / 1];
+ uint16_t s[8 / 2];
+ uint32_t l[8 / 4];
+ uint64_t d[8 / 8];
+ } cp_pcix_cycle; /* 0x040000-0x040007 */
+
+ char _pad_040007[0x200000-0x040008];
+
+ /* 0x200000-0x7FFFFF -- PCI/GIO Device Spaces */
+ union {
+ uint8_t c[0x100000 / 1];
+ uint16_t s[0x100000 / 2];
+ uint32_t l[0x100000 / 4];
+ uint64_t d[0x100000 / 8];
+ } cp_devio_raw[6]; /* 0x200000-0x7FFFFF */
+
+ #define cp_devio(n) cp_devio_raw[((n)<2)?(n*2):(n+2)]
+
+ char _pad_800000[0xA00000-0x800000];
+
+ /* 0xA00000-0xBFFFFF -- PCI/GIO Device Spaces w/flush */
+ union {
+ uint8_t c[0x100000 / 1];
+ uint16_t s[0x100000 / 2];
+ uint32_t l[0x100000 / 4];
+ uint64_t d[0x100000 / 8];
+ } cp_devio_raw_flush[6]; /* 0xA00000-0xBFFFFF */
+
+ #define cp_devio_flush(n) cp_devio_raw_flush[((n)<2)?(n*2):(n+2)]
+
+};
+
+#endif /* _ASM_IA64_SN_PCI_TIOCP_H */
diff --git a/arch/ia64/sn/include/tio.h b/arch/ia64/sn/include/tio.h
new file mode 100644
index 000000000000..0139124dd54a
--- /dev/null
+++ b/arch/ia64/sn/include/tio.h
@@ -0,0 +1,37 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#ifndef _ASM_IA64_SN_TIO_H
+#define _ASM_IA64_SN_TIO_H
+
+#define TIO_MMR_ADDR_MOD
+
+#define TIO_NODE_ID TIO_MMR_ADDR_MOD(0x0000000090060e80)
+
+#define TIO_ITTE_BASE 0xb0008800 /* base of translation table entries */
+#define TIO_ITTE(bigwin) (TIO_ITTE_BASE + 8*(bigwin))
+
+#define TIO_ITTE_OFFSET_BITS 8 /* size of offset field */
+#define TIO_ITTE_OFFSET_MASK ((1<<TIO_ITTE_OFFSET_BITS)-1)
+#define TIO_ITTE_OFFSET_SHIFT 0
+
+#define TIO_ITTE_WIDGET_BITS 2 /* size of widget field */
+#define TIO_ITTE_WIDGET_MASK ((1<<TIO_ITTE_WIDGET_BITS)-1)
+#define TIO_ITTE_WIDGET_SHIFT 12
+#define TIO_ITTE_VALID_MASK 0x1
+#define TIO_ITTE_VALID_SHIFT 16
+
+
+#define TIO_ITTE_PUT(nasid, bigwin, widget, addr, valid) \
+ REMOTE_HUB_S((nasid), TIO_ITTE(bigwin), \
+ (((((addr) >> TIO_BWIN_SIZE_BITS) & \
+ TIO_ITTE_OFFSET_MASK) << TIO_ITTE_OFFSET_SHIFT) | \
+ (((widget) & TIO_ITTE_WIDGET_MASK) << TIO_ITTE_WIDGET_SHIFT)) | \
+ (( (valid) & TIO_ITTE_VALID_MASK) << TIO_ITTE_VALID_SHIFT))
+
+#endif /* _ASM_IA64_SN_TIO_H */
diff --git a/arch/ia64/sn/include/xtalk/hubdev.h b/arch/ia64/sn/include/xtalk/hubdev.h
new file mode 100644
index 000000000000..868e7ecae84b
--- /dev/null
+++ b/arch/ia64/sn/include/xtalk/hubdev.h
@@ -0,0 +1,67 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+#ifndef _ASM_IA64_SN_XTALK_HUBDEV_H
+#define _ASM_IA64_SN_XTALK_HUBDEV_H
+
+#define HUB_WIDGET_ID_MAX 0xf
+#define DEV_PER_WIDGET (2*2*8)
+#define IIO_ITTE_WIDGET_BITS 4 /* size of widget field */
+#define IIO_ITTE_WIDGET_MASK ((1<<IIO_ITTE_WIDGET_BITS)-1)
+#define IIO_ITTE_WIDGET_SHIFT 8
+
+/*
+ * Use the top big window as a surrogate for the first small window
+ */
+#define SWIN0_BIGWIN HUB_NUM_BIG_WINDOW
+#define IIO_NUM_ITTES 7
+#define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1)
+
+struct sn_flush_device_list {
+ int sfdl_bus;
+ int sfdl_slot;
+ int sfdl_pin;
+ struct bar_list {
+ unsigned long start;
+ unsigned long end;
+ } sfdl_bar_list[6];
+ unsigned long sfdl_force_int_addr;
+ unsigned long sfdl_flush_value;
+ volatile unsigned long *sfdl_flush_addr;
+ uint64_t sfdl_persistent_busnum;
+ struct pcibus_info *sfdl_pcibus_info;
+ spinlock_t sfdl_flush_lock;
+};
+
+/*
+ * **widget_p - Used as an array[wid_num][device] of sn_flush_device_list.
+ */
+struct sn_flush_nasid_entry {
+ struct sn_flush_device_list **widget_p; /* Used as a array of wid_num */
+ uint64_t iio_itte[8];
+};
+
+struct hubdev_info {
+ geoid_t hdi_geoid;
+ short hdi_nasid;
+ short hdi_peer_nasid; /* Dual Porting Peer */
+
+ struct sn_flush_nasid_entry hdi_flush_nasid_list;
+ struct xwidget_info hdi_xwidget_info[HUB_WIDGET_ID_MAX + 1];
+
+
+ void *hdi_nodepda;
+ void *hdi_node_vertex;
+ void *hdi_xtalk_vertex;
+};
+
+extern void hubdev_init_node(nodepda_t *, cnodeid_t);
+extern void hub_error_init(struct hubdev_info *);
+extern void ice_error_init(struct hubdev_info *);
+
+
+#endif /* _ASM_IA64_SN_XTALK_HUBDEV_H */
diff --git a/arch/ia64/sn/include/xtalk/xbow.h b/arch/ia64/sn/include/xtalk/xbow.h
new file mode 100644
index 000000000000..ec56b3432f17
--- /dev/null
+++ b/arch/ia64/sn/include/xtalk/xbow.h
@@ -0,0 +1,291 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992-1997,2000-2004 Silicon Graphics, Inc. All Rights Reserved.
+ */
+#ifndef _ASM_IA64_SN_XTALK_XBOW_H
+#define _ASM_IA64_SN_XTALK_XBOW_H
+
+#define XBOW_PORT_8 0x8
+#define XBOW_PORT_C 0xc
+#define XBOW_PORT_F 0xf
+
+#define MAX_XBOW_PORTS 8 /* number of ports on xbow chip */
+#define BASE_XBOW_PORT XBOW_PORT_8 /* Lowest external port */
+
+#define XBOW_CREDIT 4
+
+#define MAX_XBOW_NAME 16
+
+/* Register set for each xbow link */
+typedef volatile struct xb_linkregs_s {
+/*
+ * we access these through synergy unswizzled space, so the address
+ * gets twiddled (i.e. references to 0x4 actually go to 0x0 and vv.)
+ * That's why we put the register first and filler second.
+ */
+ uint32_t link_ibf;
+ uint32_t filler0; /* filler for proper alignment */
+ uint32_t link_control;
+ uint32_t filler1;
+ uint32_t link_status;
+ uint32_t filler2;
+ uint32_t link_arb_upper;
+ uint32_t filler3;
+ uint32_t link_arb_lower;
+ uint32_t filler4;
+ uint32_t link_status_clr;
+ uint32_t filler5;
+ uint32_t link_reset;
+ uint32_t filler6;
+ uint32_t link_aux_status;
+ uint32_t filler7;
+} xb_linkregs_t;
+
+typedef volatile struct xbow_s {
+ /* standard widget configuration 0x000000-0x000057 */
+ struct widget_cfg xb_widget; /* 0x000000 */
+
+ /* helper fieldnames for accessing bridge widget */
+
+#define xb_wid_id xb_widget.w_id
+#define xb_wid_stat xb_widget.w_status
+#define xb_wid_err_upper xb_widget.w_err_upper_addr
+#define xb_wid_err_lower xb_widget.w_err_lower_addr
+#define xb_wid_control xb_widget.w_control
+#define xb_wid_req_timeout xb_widget.w_req_timeout
+#define xb_wid_int_upper xb_widget.w_intdest_upper_addr
+#define xb_wid_int_lower xb_widget.w_intdest_lower_addr
+#define xb_wid_err_cmdword xb_widget.w_err_cmd_word
+#define xb_wid_llp xb_widget.w_llp_cfg
+#define xb_wid_stat_clr xb_widget.w_tflush
+
+/*
+ * we access these through synergy unswizzled space, so the address
+ * gets twiddled (i.e. references to 0x4 actually go to 0x0 and vv.)
+ * That's why we put the register first and filler second.
+ */
+ /* xbow-specific widget configuration 0x000058-0x0000FF */
+ uint32_t xb_wid_arb_reload; /* 0x00005C */
+ uint32_t _pad_000058;
+ uint32_t xb_perf_ctr_a; /* 0x000064 */
+ uint32_t _pad_000060;
+ uint32_t xb_perf_ctr_b; /* 0x00006c */
+ uint32_t _pad_000068;
+ uint32_t xb_nic; /* 0x000074 */
+ uint32_t _pad_000070;
+
+ /* Xbridge only */
+ uint32_t xb_w0_rst_fnc; /* 0x00007C */
+ uint32_t _pad_000078;
+ uint32_t xb_l8_rst_fnc; /* 0x000084 */
+ uint32_t _pad_000080;
+ uint32_t xb_l9_rst_fnc; /* 0x00008c */
+ uint32_t _pad_000088;
+ uint32_t xb_la_rst_fnc; /* 0x000094 */
+ uint32_t _pad_000090;
+ uint32_t xb_lb_rst_fnc; /* 0x00009c */
+ uint32_t _pad_000098;
+ uint32_t xb_lc_rst_fnc; /* 0x0000a4 */
+ uint32_t _pad_0000a0;
+ uint32_t xb_ld_rst_fnc; /* 0x0000ac */
+ uint32_t _pad_0000a8;
+ uint32_t xb_le_rst_fnc; /* 0x0000b4 */
+ uint32_t _pad_0000b0;
+ uint32_t xb_lf_rst_fnc; /* 0x0000bc */
+ uint32_t _pad_0000b8;
+ uint32_t xb_lock; /* 0x0000c4 */
+ uint32_t _pad_0000c0;
+ uint32_t xb_lock_clr; /* 0x0000cc */
+ uint32_t _pad_0000c8;
+ /* end of Xbridge only */
+ uint32_t _pad_0000d0[12];
+
+ /* Link Specific Registers, port 8..15 0x000100-0x000300 */
+ xb_linkregs_t xb_link_raw[MAX_XBOW_PORTS];
+#define xb_link(p) xb_link_raw[(p) & (MAX_XBOW_PORTS - 1)]
+
+} xbow_t;
+
+#define XB_FLAGS_EXISTS 0x1 /* device exists */
+#define XB_FLAGS_MASTER 0x2
+#define XB_FLAGS_SLAVE 0x0
+#define XB_FLAGS_GBR 0x4
+#define XB_FLAGS_16BIT 0x8
+#define XB_FLAGS_8BIT 0x0
+
+/* is widget port number valid? (based on version 7.0 of xbow spec) */
+#define XBOW_WIDGET_IS_VALID(wid) ((wid) >= XBOW_PORT_8 && (wid) <= XBOW_PORT_F)
+
+/* whether to use upper or lower arbitration register, given source widget id */
+#define XBOW_ARB_IS_UPPER(wid) ((wid) >= XBOW_PORT_8 && (wid) <= XBOW_PORT_B)
+#define XBOW_ARB_IS_LOWER(wid) ((wid) >= XBOW_PORT_C && (wid) <= XBOW_PORT_F)
+
+/* offset of arbitration register, given source widget id */
+#define XBOW_ARB_OFF(wid) (XBOW_ARB_IS_UPPER(wid) ? 0x1c : 0x24)
+
+#define XBOW_WID_ID WIDGET_ID
+#define XBOW_WID_STAT WIDGET_STATUS
+#define XBOW_WID_ERR_UPPER WIDGET_ERR_UPPER_ADDR
+#define XBOW_WID_ERR_LOWER WIDGET_ERR_LOWER_ADDR
+#define XBOW_WID_CONTROL WIDGET_CONTROL
+#define XBOW_WID_REQ_TO WIDGET_REQ_TIMEOUT
+#define XBOW_WID_INT_UPPER WIDGET_INTDEST_UPPER_ADDR
+#define XBOW_WID_INT_LOWER WIDGET_INTDEST_LOWER_ADDR
+#define XBOW_WID_ERR_CMDWORD WIDGET_ERR_CMD_WORD
+#define XBOW_WID_LLP WIDGET_LLP_CFG
+#define XBOW_WID_STAT_CLR WIDGET_TFLUSH
+#define XBOW_WID_ARB_RELOAD 0x5c
+#define XBOW_WID_PERF_CTR_A 0x64
+#define XBOW_WID_PERF_CTR_B 0x6c
+#define XBOW_WID_NIC 0x74
+
+/* Xbridge only */
+#define XBOW_W0_RST_FNC 0x00007C
+#define XBOW_L8_RST_FNC 0x000084
+#define XBOW_L9_RST_FNC 0x00008c
+#define XBOW_LA_RST_FNC 0x000094
+#define XBOW_LB_RST_FNC 0x00009c
+#define XBOW_LC_RST_FNC 0x0000a4
+#define XBOW_LD_RST_FNC 0x0000ac
+#define XBOW_LE_RST_FNC 0x0000b4
+#define XBOW_LF_RST_FNC 0x0000bc
+#define XBOW_RESET_FENCE(x) ((x) > 7 && (x) < 16) ? \
+ (XBOW_W0_RST_FNC + ((x) - 7) * 8) : \
+ ((x) == 0) ? XBOW_W0_RST_FNC : 0
+#define XBOW_LOCK 0x0000c4
+#define XBOW_LOCK_CLR 0x0000cc
+/* End of Xbridge only */
+
+/* used only in ide, but defined here within the reserved portion */
+/* of the widget0 address space (before 0xf4) */
+#define XBOW_WID_UNDEF 0xe4
+
+/* xbow link register set base, legal value for x is 0x8..0xf */
+#define XB_LINK_BASE 0x100
+#define XB_LINK_OFFSET 0x40
+#define XB_LINK_REG_BASE(x) (XB_LINK_BASE + ((x) & (MAX_XBOW_PORTS - 1)) * XB_LINK_OFFSET)
+
+#define XB_LINK_IBUF_FLUSH(x) (XB_LINK_REG_BASE(x) + 0x4)
+#define XB_LINK_CTRL(x) (XB_LINK_REG_BASE(x) + 0xc)
+#define XB_LINK_STATUS(x) (XB_LINK_REG_BASE(x) + 0x14)
+#define XB_LINK_ARB_UPPER(x) (XB_LINK_REG_BASE(x) + 0x1c)
+#define XB_LINK_ARB_LOWER(x) (XB_LINK_REG_BASE(x) + 0x24)
+#define XB_LINK_STATUS_CLR(x) (XB_LINK_REG_BASE(x) + 0x2c)
+#define XB_LINK_RESET(x) (XB_LINK_REG_BASE(x) + 0x34)
+#define XB_LINK_AUX_STATUS(x) (XB_LINK_REG_BASE(x) + 0x3c)
+
+/* link_control(x) */
+#define XB_CTRL_LINKALIVE_IE 0x80000000 /* link comes alive */
+ /* reserved: 0x40000000 */
+#define XB_CTRL_PERF_CTR_MODE_MSK 0x30000000 /* perf counter mode */
+#define XB_CTRL_IBUF_LEVEL_MSK 0x0e000000 /* input packet buffer level */
+#define XB_CTRL_8BIT_MODE 0x01000000 /* force link into 8 bit mode */
+#define XB_CTRL_BAD_LLP_PKT 0x00800000 /* force bad LLP packet */
+#define XB_CTRL_WIDGET_CR_MSK 0x007c0000 /* LLP widget credit mask */
+#define XB_CTRL_WIDGET_CR_SHFT 18 /* LLP widget credit shift */
+#define XB_CTRL_ILLEGAL_DST_IE 0x00020000 /* illegal destination */
+#define XB_CTRL_OALLOC_IBUF_IE 0x00010000 /* overallocated input buffer */
+ /* reserved: 0x0000fe00 */
+#define XB_CTRL_BNDWDTH_ALLOC_IE 0x00000100 /* bandwidth alloc */
+#define XB_CTRL_RCV_CNT_OFLOW_IE 0x00000080 /* rcv retry overflow */
+#define XB_CTRL_XMT_CNT_OFLOW_IE 0x00000040 /* xmt retry overflow */
+#define XB_CTRL_XMT_MAX_RTRY_IE 0x00000020 /* max transmit retry */
+#define XB_CTRL_RCV_IE 0x00000010 /* receive */
+#define XB_CTRL_XMT_RTRY_IE 0x00000008 /* transmit retry */
+ /* reserved: 0x00000004 */
+#define XB_CTRL_MAXREQ_TOUT_IE 0x00000002 /* maximum request timeout */
+#define XB_CTRL_SRC_TOUT_IE 0x00000001 /* source timeout */
+
+/* link_status(x) */
+#define XB_STAT_LINKALIVE XB_CTRL_LINKALIVE_IE
+ /* reserved: 0x7ff80000 */
+#define XB_STAT_MULTI_ERR 0x00040000 /* multi error */
+#define XB_STAT_ILLEGAL_DST_ERR XB_CTRL_ILLEGAL_DST_IE
+#define XB_STAT_OALLOC_IBUF_ERR XB_CTRL_OALLOC_IBUF_IE
+#define XB_STAT_BNDWDTH_ALLOC_ID_MSK 0x0000ff00 /* port bitmask */
+#define XB_STAT_RCV_CNT_OFLOW_ERR XB_CTRL_RCV_CNT_OFLOW_IE
+#define XB_STAT_XMT_CNT_OFLOW_ERR XB_CTRL_XMT_CNT_OFLOW_IE
+#define XB_STAT_XMT_MAX_RTRY_ERR XB_CTRL_XMT_MAX_RTRY_IE
+#define XB_STAT_RCV_ERR XB_CTRL_RCV_IE
+#define XB_STAT_XMT_RTRY_ERR XB_CTRL_XMT_RTRY_IE
+ /* reserved: 0x00000004 */
+#define XB_STAT_MAXREQ_TOUT_ERR XB_CTRL_MAXREQ_TOUT_IE
+#define XB_STAT_SRC_TOUT_ERR XB_CTRL_SRC_TOUT_IE
+
+/* link_aux_status(x) */
+#define XB_AUX_STAT_RCV_CNT 0xff000000
+#define XB_AUX_STAT_XMT_CNT 0x00ff0000
+#define XB_AUX_STAT_TOUT_DST 0x0000ff00
+#define XB_AUX_LINKFAIL_RST_BAD 0x00000040
+#define XB_AUX_STAT_PRESENT 0x00000020
+#define XB_AUX_STAT_PORT_WIDTH 0x00000010
+ /* reserved: 0x0000000f */
+
+/*
+ * link_arb_upper/link_arb_lower(x), (reg) should be the link_arb_upper
+ * register if (x) is 0x8..0xb, link_arb_lower if (x) is 0xc..0xf
+ */
+#define XB_ARB_GBR_MSK 0x1f
+#define XB_ARB_RR_MSK 0x7
+#define XB_ARB_GBR_SHFT(x) (((x) & 0x3) * 8)
+#define XB_ARB_RR_SHFT(x) (((x) & 0x3) * 8 + 5)
+#define XB_ARB_GBR_CNT(reg,x) ((reg) >> XB_ARB_GBR_SHFT(x) & XB_ARB_GBR_MSK)
+#define XB_ARB_RR_CNT(reg,x) ((reg) >> XB_ARB_RR_SHFT(x) & XB_ARB_RR_MSK)
+
+/* XBOW_WID_STAT */
+#define XB_WID_STAT_LINK_INTR_SHFT (24)
+#define XB_WID_STAT_LINK_INTR_MASK (0xFF << XB_WID_STAT_LINK_INTR_SHFT)
+#define XB_WID_STAT_LINK_INTR(x) (0x1 << (((x)&7) + XB_WID_STAT_LINK_INTR_SHFT))
+#define XB_WID_STAT_WIDGET0_INTR 0x00800000
+#define XB_WID_STAT_SRCID_MASK 0x000003c0 /* Xbridge only */
+#define XB_WID_STAT_REG_ACC_ERR 0x00000020
+#define XB_WID_STAT_RECV_TOUT 0x00000010 /* Xbridge only */
+#define XB_WID_STAT_ARB_TOUT 0x00000008 /* Xbridge only */
+#define XB_WID_STAT_XTALK_ERR 0x00000004
+#define XB_WID_STAT_DST_TOUT 0x00000002 /* Xbridge only */
+#define XB_WID_STAT_MULTI_ERR 0x00000001
+
+#define XB_WID_STAT_SRCID_SHFT 6
+
+/* XBOW_WID_CONTROL */
+#define XB_WID_CTRL_REG_ACC_IE XB_WID_STAT_REG_ACC_ERR
+#define XB_WID_CTRL_RECV_TOUT XB_WID_STAT_RECV_TOUT
+#define XB_WID_CTRL_ARB_TOUT XB_WID_STAT_ARB_TOUT
+#define XB_WID_CTRL_XTALK_IE XB_WID_STAT_XTALK_ERR
+
+/* XBOW_WID_INT_UPPER */
+/* defined in xwidget.h for WIDGET_INTDEST_UPPER_ADDR */
+
+/* XBOW WIDGET part number, in the ID register */
+#define XBOW_WIDGET_PART_NUM 0x0 /* crossbow */
+#define XXBOW_WIDGET_PART_NUM 0xd000 /* Xbridge */
+#define XBOW_WIDGET_MFGR_NUM 0x0
+#define XXBOW_WIDGET_MFGR_NUM 0x0
+#define PXBOW_WIDGET_PART_NUM 0xd100 /* PIC */
+
+#define XBOW_REV_1_0 0x1 /* xbow rev 1.0 is "1" */
+#define XBOW_REV_1_1 0x2 /* xbow rev 1.1 is "2" */
+#define XBOW_REV_1_2 0x3 /* xbow rev 1.2 is "3" */
+#define XBOW_REV_1_3 0x4 /* xbow rev 1.3 is "4" */
+#define XBOW_REV_2_0 0x5 /* xbow rev 2.0 is "5" */
+
+#define XXBOW_PART_REV_1_0 (XXBOW_WIDGET_PART_NUM << 4 | 0x1 )
+#define XXBOW_PART_REV_2_0 (XXBOW_WIDGET_PART_NUM << 4 | 0x2 )
+
+/* XBOW_WID_ARB_RELOAD */
+#define XBOW_WID_ARB_RELOAD_INT 0x3f /* GBR reload interval */
+
+#define IS_XBRIDGE_XBOW(wid) \
+ (XWIDGET_PART_NUM(wid) == XXBOW_WIDGET_PART_NUM && \
+ XWIDGET_MFG_NUM(wid) == XXBOW_WIDGET_MFGR_NUM)
+
+#define IS_PIC_XBOW(wid) \
+ (XWIDGET_PART_NUM(wid) == PXBOW_WIDGET_PART_NUM && \
+ XWIDGET_MFG_NUM(wid) == XXBOW_WIDGET_MFGR_NUM)
+
+#define XBOW_WAR_ENABLED(pv, widid) ((1 << XWIDGET_REV_NUM(widid)) & pv)
+
+#endif /* _ASM_IA64_SN_XTALK_XBOW_H */
diff --git a/arch/ia64/sn/include/xtalk/xwidgetdev.h b/arch/ia64/sn/include/xtalk/xwidgetdev.h
new file mode 100644
index 000000000000..c5f4bc5cc033
--- /dev/null
+++ b/arch/ia64/sn/include/xtalk/xwidgetdev.h
@@ -0,0 +1,70 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All Rights Reserved.
+ */
+#ifndef _ASM_IA64_SN_XTALK_XWIDGET_H
+#define _ASM_IA64_SN_XTALK_XWIDGET_H
+
+/* WIDGET_ID */
+#define WIDGET_REV_NUM 0xf0000000
+#define WIDGET_PART_NUM 0x0ffff000
+#define WIDGET_MFG_NUM 0x00000ffe
+#define WIDGET_REV_NUM_SHFT 28
+#define WIDGET_PART_NUM_SHFT 12
+#define WIDGET_MFG_NUM_SHFT 1
+
+#define XWIDGET_PART_NUM(widgetid) (((widgetid) & WIDGET_PART_NUM) >> WIDGET_PART_NUM_SHFT)
+#define XWIDGET_REV_NUM(widgetid) (((widgetid) & WIDGET_REV_NUM) >> WIDGET_REV_NUM_SHFT)
+#define XWIDGET_MFG_NUM(widgetid) (((widgetid) & WIDGET_MFG_NUM) >> WIDGET_MFG_NUM_SHFT)
+#define XWIDGET_PART_REV_NUM(widgetid) ((XWIDGET_PART_NUM(widgetid) << 4) | \
+ XWIDGET_REV_NUM(widgetid))
+#define XWIDGET_PART_REV_NUM_REV(partrev) (partrev & 0xf)
+
+/* widget configuration registers */
+struct widget_cfg{
+ uint32_t w_id; /* 0x04 */
+ uint32_t w_pad_0; /* 0x00 */
+ uint32_t w_status; /* 0x0c */
+ uint32_t w_pad_1; /* 0x08 */
+ uint32_t w_err_upper_addr; /* 0x14 */
+ uint32_t w_pad_2; /* 0x10 */
+ uint32_t w_err_lower_addr; /* 0x1c */
+ uint32_t w_pad_3; /* 0x18 */
+ uint32_t w_control; /* 0x24 */
+ uint32_t w_pad_4; /* 0x20 */
+ uint32_t w_req_timeout; /* 0x2c */
+ uint32_t w_pad_5; /* 0x28 */
+ uint32_t w_intdest_upper_addr; /* 0x34 */
+ uint32_t w_pad_6; /* 0x30 */
+ uint32_t w_intdest_lower_addr; /* 0x3c */
+ uint32_t w_pad_7; /* 0x38 */
+ uint32_t w_err_cmd_word; /* 0x44 */
+ uint32_t w_pad_8; /* 0x40 */
+ uint32_t w_llp_cfg; /* 0x4c */
+ uint32_t w_pad_9; /* 0x48 */
+ uint32_t w_tflush; /* 0x54 */
+ uint32_t w_pad_10; /* 0x50 */
+};
+
+/*
+ * Crosstalk Widget Hardware Identification, as defined in the Crosstalk spec.
+ */
+struct xwidget_hwid{
+ int mfg_num;
+ int rev_num;
+ int part_num;
+};
+
+struct xwidget_info{
+
+ struct xwidget_hwid xwi_hwid; /* Widget Identification */
+ char xwi_masterxid; /* Hub's Widget Port Number */
+ void *xwi_hubinfo; /* Hub's provider private info */
+ uint64_t *xwi_hub_provider; /* prom provider functions */
+ void *xwi_vertex;
+};
+
+#endif /* _ASM_IA64_SN_XTALK_XWIDGET_H */
diff --git a/arch/ia64/sn/kernel/Makefile b/arch/ia64/sn/kernel/Makefile
new file mode 100644
index 000000000000..6c7f4d9e8ea0
--- /dev/null
+++ b/arch/ia64/sn/kernel/Makefile
@@ -0,0 +1,12 @@
+# arch/ia64/sn/kernel/Makefile
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1999,2001-2003 Silicon Graphics, Inc. All Rights Reserved.
+#
+
+obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \
+ huberror.o io_init.o iomv.o klconflib.o sn2/
+obj-$(CONFIG_IA64_GENERIC) += machvec.o
diff --git a/arch/ia64/sn/kernel/bte.c b/arch/ia64/sn/kernel/bte.c
new file mode 100644
index 000000000000..ce0bc4085eae
--- /dev/null
+++ b/arch/ia64/sn/kernel/bte.c
@@ -0,0 +1,453 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <asm/sn/nodepda.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/pda.h>
+#include <asm/sn/shubio.h>
+#include <asm/nodedata.h>
+#include <asm/delay.h>
+
+#include <linux/bootmem.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+
+#include <asm/sn/bte.h>
+
+#ifndef L1_CACHE_MASK
+#define L1_CACHE_MASK (L1_CACHE_BYTES - 1)
+#endif
+
+/* two interfaces on two btes */
+#define MAX_INTERFACES_TO_TRY 4
+
+static struct bteinfo_s *bte_if_on_node(nasid_t nasid, int interface)
+{
+ nodepda_t *tmp_nodepda;
+
+ tmp_nodepda = NODEPDA(nasid_to_cnodeid(nasid));
+ return &tmp_nodepda->bte_if[interface];
+
+}
+
+/************************************************************************
+ * Block Transfer Engine copy related functions.
+ *
+ ***********************************************************************/
+
+/*
+ * bte_copy(src, dest, len, mode, notification)
+ *
+ * Use the block transfer engine to move kernel memory from src to dest
+ * using the assigned mode.
+ *
+ * Paramaters:
+ * src - physical address of the transfer source.
+ * dest - physical address of the transfer destination.
+ * len - number of bytes to transfer from source to dest.
+ * mode - hardware defined. See reference information
+ * for IBCT0/1 in the SHUB Programmers Reference
+ * notification - kernel virtual address of the notification cache
+ * line. If NULL, the default is used and
+ * the bte_copy is synchronous.
+ *
+ * NOTE: This function requires src, dest, and len to
+ * be cacheline aligned.
+ */
+bte_result_t bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
+{
+ u64 transfer_size;
+ u64 transfer_stat;
+ struct bteinfo_s *bte;
+ bte_result_t bte_status;
+ unsigned long irq_flags;
+ unsigned long itc_end = 0;
+ struct bteinfo_s *btes_to_try[MAX_INTERFACES_TO_TRY];
+ int bte_if_index;
+ int bte_pri, bte_sec;
+
+ BTE_PRINTK(("bte_copy(0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%p)\n",
+ src, dest, len, mode, notification));
+
+ if (len == 0) {
+ return BTE_SUCCESS;
+ }
+
+ BUG_ON((len & L1_CACHE_MASK) ||
+ (src & L1_CACHE_MASK) || (dest & L1_CACHE_MASK));
+ BUG_ON(!(len < ((BTE_LEN_MASK + 1) << L1_CACHE_SHIFT)));
+
+ /* CPU 0 (per node) tries bte0 first, CPU 1 try bte1 first */
+ if (cpuid_to_subnode(smp_processor_id()) == 0) {
+ bte_pri = 0;
+ bte_sec = 1;
+ } else {
+ bte_pri = 1;
+ bte_sec = 0;
+ }
+
+ if (mode & BTE_USE_DEST) {
+ /* try remote then local */
+ btes_to_try[0] = bte_if_on_node(NASID_GET(dest), bte_pri);
+ btes_to_try[1] = bte_if_on_node(NASID_GET(dest), bte_sec);
+ if (mode & BTE_USE_ANY) {
+ btes_to_try[2] = bte_if_on_node(get_nasid(), bte_pri);
+ btes_to_try[3] = bte_if_on_node(get_nasid(), bte_sec);
+ } else {
+ btes_to_try[2] = NULL;
+ btes_to_try[3] = NULL;
+ }
+ } else {
+ /* try local then remote */
+ btes_to_try[0] = bte_if_on_node(get_nasid(), bte_pri);
+ btes_to_try[1] = bte_if_on_node(get_nasid(), bte_sec);
+ if (mode & BTE_USE_ANY) {
+ btes_to_try[2] = bte_if_on_node(NASID_GET(dest), bte_pri);
+ btes_to_try[3] = bte_if_on_node(NASID_GET(dest), bte_sec);
+ } else {
+ btes_to_try[2] = NULL;
+ btes_to_try[3] = NULL;
+ }
+ }
+
+retry_bteop:
+ do {
+ local_irq_save(irq_flags);
+
+ bte_if_index = 0;
+
+ /* Attempt to lock one of the BTE interfaces. */
+ while (bte_if_index < MAX_INTERFACES_TO_TRY) {
+ bte = btes_to_try[bte_if_index++];
+
+ if (bte == NULL) {
+ continue;
+ }
+
+ if (spin_trylock(&bte->spinlock)) {
+ if (!(*bte->most_rcnt_na & BTE_WORD_AVAILABLE) ||
+ (BTE_LNSTAT_LOAD(bte) & BTE_ACTIVE)) {
+ /* Got the lock but BTE still busy */
+ spin_unlock(&bte->spinlock);
+ } else {
+ /* we got the lock and it's not busy */
+ break;
+ }
+ }
+ bte = NULL;
+ }
+
+ if (bte != NULL) {
+ break;
+ }
+
+ local_irq_restore(irq_flags);
+
+ if (!(mode & BTE_WACQUIRE)) {
+ return BTEFAIL_NOTAVAIL;
+ }
+ } while (1);
+
+ if (notification == NULL) {
+ /* User does not want to be notified. */
+ bte->most_rcnt_na = &bte->notify;
+ } else {
+ bte->most_rcnt_na = notification;
+ }
+
+ /* Calculate the number of cache lines to transfer. */
+ transfer_size = ((len >> L1_CACHE_SHIFT) & BTE_LEN_MASK);
+
+ /* Initialize the notification to a known value. */
+ *bte->most_rcnt_na = BTE_WORD_BUSY;
+
+ /* Set the status reg busy bit and transfer length */
+ BTE_PRINTKV(("IBLS = 0x%lx\n", IBLS_BUSY | transfer_size));
+ BTE_LNSTAT_STORE(bte, IBLS_BUSY | transfer_size);
+
+ /* Set the source and destination registers */
+ BTE_PRINTKV(("IBSA = 0x%lx)\n", (TO_PHYS(src))));
+ BTE_SRC_STORE(bte, TO_PHYS(src));
+ BTE_PRINTKV(("IBDA = 0x%lx)\n", (TO_PHYS(dest))));
+ BTE_DEST_STORE(bte, TO_PHYS(dest));
+
+ /* Set the notification register */
+ BTE_PRINTKV(("IBNA = 0x%lx)\n",
+ TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na))));
+ BTE_NOTIF_STORE(bte,
+ TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na)));
+
+ /* Initiate the transfer */
+ BTE_PRINTK(("IBCT = 0x%lx)\n", BTE_VALID_MODE(mode)));
+ BTE_CTRL_STORE(bte, BTE_VALID_MODE(mode));
+
+ itc_end = ia64_get_itc() + (40000000 * local_cpu_data->cyc_per_usec);
+
+ spin_unlock_irqrestore(&bte->spinlock, irq_flags);
+
+ if (notification != NULL) {
+ return BTE_SUCCESS;
+ }
+
+ while ((transfer_stat = *bte->most_rcnt_na) == BTE_WORD_BUSY) {
+ if (ia64_get_itc() > itc_end) {
+ BTE_PRINTK(("BTE timeout nasid 0x%x bte%d IBLS = 0x%lx na 0x%lx\n",
+ NASID_GET(bte->bte_base_addr), bte->bte_num,
+ BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na) );
+ bte->bte_error_count++;
+ bte->bh_error = IBLS_ERROR;
+ bte_error_handler((unsigned long)NODEPDA(bte->bte_cnode));
+ *bte->most_rcnt_na = BTE_WORD_AVAILABLE;
+ goto retry_bteop;
+ }
+ }
+
+ BTE_PRINTKV((" Delay Done. IBLS = 0x%lx, most_rcnt_na = 0x%lx\n",
+ BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na));
+
+ if (transfer_stat & IBLS_ERROR) {
+ bte_status = transfer_stat & ~IBLS_ERROR;
+ } else {
+ bte_status = BTE_SUCCESS;
+ }
+ *bte->most_rcnt_na = BTE_WORD_AVAILABLE;
+
+ BTE_PRINTK(("Returning status is 0x%lx and most_rcnt_na is 0x%lx\n",
+ BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na));
+
+ return bte_status;
+}
+
+EXPORT_SYMBOL(bte_copy);
+
+/*
+ * bte_unaligned_copy(src, dest, len, mode)
+ *
+ * use the block transfer engine to move kernel
+ * memory from src to dest using the assigned mode.
+ *
+ * Paramaters:
+ * src - physical address of the transfer source.
+ * dest - physical address of the transfer destination.
+ * len - number of bytes to transfer from source to dest.
+ * mode - hardware defined. See reference information
+ * for IBCT0/1 in the SGI documentation.
+ *
+ * NOTE: If the source, dest, and len are all cache line aligned,
+ * then it would be _FAR_ preferrable to use bte_copy instead.
+ */
+bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
+{
+ int destFirstCacheOffset;
+ u64 headBteSource;
+ u64 headBteLen;
+ u64 headBcopySrcOffset;
+ u64 headBcopyDest;
+ u64 headBcopyLen;
+ u64 footBteSource;
+ u64 footBteLen;
+ u64 footBcopyDest;
+ u64 footBcopyLen;
+ bte_result_t rv;
+ char *bteBlock, *bteBlock_unaligned;
+
+ if (len == 0) {
+ return BTE_SUCCESS;
+ }
+
+ /* temporary buffer used during unaligned transfers */
+ bteBlock_unaligned = kmalloc(len + 3 * L1_CACHE_BYTES,
+ GFP_KERNEL | GFP_DMA);
+ if (bteBlock_unaligned == NULL) {
+ return BTEFAIL_NOTAVAIL;
+ }
+ bteBlock = (char *)L1_CACHE_ALIGN((u64) bteBlock_unaligned);
+
+ headBcopySrcOffset = src & L1_CACHE_MASK;
+ destFirstCacheOffset = dest & L1_CACHE_MASK;
+
+ /*
+ * At this point, the transfer is broken into
+ * (up to) three sections. The first section is
+ * from the start address to the first physical
+ * cache line, the second is from the first physical
+ * cache line to the last complete cache line,
+ * and the third is from the last cache line to the
+ * end of the buffer. The first and third sections
+ * are handled by bte copying into a temporary buffer
+ * and then bcopy'ing the necessary section into the
+ * final location. The middle section is handled with
+ * a standard bte copy.
+ *
+ * One nasty exception to the above rule is when the
+ * source and destination are not symetrically
+ * mis-aligned. If the source offset from the first
+ * cache line is different from the destination offset,
+ * we make the first section be the entire transfer
+ * and the bcopy the entire block into place.
+ */
+ if (headBcopySrcOffset == destFirstCacheOffset) {
+
+ /*
+ * Both the source and destination are the same
+ * distance from a cache line boundary so we can
+ * use the bte to transfer the bulk of the
+ * data.
+ */
+ headBteSource = src & ~L1_CACHE_MASK;
+ headBcopyDest = dest;
+ if (headBcopySrcOffset) {
+ headBcopyLen =
+ (len >
+ (L1_CACHE_BYTES -
+ headBcopySrcOffset) ? L1_CACHE_BYTES
+ - headBcopySrcOffset : len);
+ headBteLen = L1_CACHE_BYTES;
+ } else {
+ headBcopyLen = 0;
+ headBteLen = 0;
+ }
+
+ if (len > headBcopyLen) {
+ footBcopyLen = (len - headBcopyLen) & L1_CACHE_MASK;
+ footBteLen = L1_CACHE_BYTES;
+
+ footBteSource = src + len - footBcopyLen;
+ footBcopyDest = dest + len - footBcopyLen;
+
+ if (footBcopyDest == (headBcopyDest + headBcopyLen)) {
+ /*
+ * We have two contigous bcopy
+ * blocks. Merge them.
+ */
+ headBcopyLen += footBcopyLen;
+ headBteLen += footBteLen;
+ } else if (footBcopyLen > 0) {
+ rv = bte_copy(footBteSource,
+ ia64_tpa((unsigned long)bteBlock),
+ footBteLen, mode, NULL);
+ if (rv != BTE_SUCCESS) {
+ kfree(bteBlock_unaligned);
+ return rv;
+ }
+
+ memcpy(__va(footBcopyDest),
+ (char *)bteBlock, footBcopyLen);
+ }
+ } else {
+ footBcopyLen = 0;
+ footBteLen = 0;
+ }
+
+ if (len > (headBcopyLen + footBcopyLen)) {
+ /* now transfer the middle. */
+ rv = bte_copy((src + headBcopyLen),
+ (dest +
+ headBcopyLen),
+ (len - headBcopyLen -
+ footBcopyLen), mode, NULL);
+ if (rv != BTE_SUCCESS) {
+ kfree(bteBlock_unaligned);
+ return rv;
+ }
+
+ }
+ } else {
+
+ /*
+ * The transfer is not symetric, we will
+ * allocate a buffer large enough for all the
+ * data, bte_copy into that buffer and then
+ * bcopy to the destination.
+ */
+
+ /* Add the leader from source */
+ headBteLen = len + (src & L1_CACHE_MASK);
+ /* Add the trailing bytes from footer. */
+ headBteLen += L1_CACHE_BYTES - (headBteLen & L1_CACHE_MASK);
+ headBteSource = src & ~L1_CACHE_MASK;
+ headBcopySrcOffset = src & L1_CACHE_MASK;
+ headBcopyDest = dest;
+ headBcopyLen = len;
+ }
+
+ if (headBcopyLen > 0) {
+ rv = bte_copy(headBteSource,
+ ia64_tpa((unsigned long)bteBlock), headBteLen,
+ mode, NULL);
+ if (rv != BTE_SUCCESS) {
+ kfree(bteBlock_unaligned);
+ return rv;
+ }
+
+ memcpy(__va(headBcopyDest), ((char *)bteBlock +
+ headBcopySrcOffset), headBcopyLen);
+ }
+ kfree(bteBlock_unaligned);
+ return BTE_SUCCESS;
+}
+
+EXPORT_SYMBOL(bte_unaligned_copy);
+
+/************************************************************************
+ * Block Transfer Engine initialization functions.
+ *
+ ***********************************************************************/
+
+/*
+ * bte_init_node(nodepda, cnode)
+ *
+ * Initialize the nodepda structure with BTE base addresses and
+ * spinlocks.
+ */
+void bte_init_node(nodepda_t * mynodepda, cnodeid_t cnode)
+{
+ int i;
+
+ /*
+ * Indicate that all the block transfer engines on this node
+ * are available.
+ */
+
+ /*
+ * Allocate one bte_recover_t structure per node. It holds
+ * the recovery lock for node. All the bte interface structures
+ * will point at this one bte_recover structure to get the lock.
+ */
+ spin_lock_init(&mynodepda->bte_recovery_lock);
+ init_timer(&mynodepda->bte_recovery_timer);
+ mynodepda->bte_recovery_timer.function = bte_error_handler;
+ mynodepda->bte_recovery_timer.data = (unsigned long)mynodepda;
+
+ for (i = 0; i < BTES_PER_NODE; i++) {
+ /* Which link status register should we use? */
+ unsigned long link_status = (i == 0 ? IIO_IBLS0 : IIO_IBLS1);
+ mynodepda->bte_if[i].bte_base_addr = (u64 *)
+ REMOTE_HUB_ADDR(cnodeid_to_nasid(cnode), link_status);
+
+ /*
+ * Initialize the notification and spinlock
+ * so the first transfer can occur.
+ */
+ mynodepda->bte_if[i].most_rcnt_na =
+ &(mynodepda->bte_if[i].notify);
+ mynodepda->bte_if[i].notify = BTE_WORD_AVAILABLE;
+ spin_lock_init(&mynodepda->bte_if[i].spinlock);
+
+ mynodepda->bte_if[i].bte_cnode = cnode;
+ mynodepda->bte_if[i].bte_error_count = 0;
+ mynodepda->bte_if[i].bte_num = i;
+ mynodepda->bte_if[i].cleanup_active = 0;
+ mynodepda->bte_if[i].bh_error = 0;
+ }
+
+}
diff --git a/arch/ia64/sn/kernel/bte_error.c b/arch/ia64/sn/kernel/bte_error.c
new file mode 100644
index 000000000000..fd104312c6bd
--- /dev/null
+++ b/arch/ia64/sn/kernel/bte_error.c
@@ -0,0 +1,198 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+#include <linux/types.h>
+#include <asm/sn/sn_sal.h>
+#include "ioerror.h"
+#include <asm/sn/addrs.h>
+#include <asm/sn/shubio.h>
+#include <asm/sn/geo.h>
+#include "xtalk/xwidgetdev.h"
+#include "xtalk/hubdev.h"
+#include <asm/sn/bte.h>
+#include <asm/param.h>
+
+/*
+ * Bte error handling is done in two parts. The first captures
+ * any crb related errors. Since there can be multiple crbs per
+ * interface and multiple interfaces active, we need to wait until
+ * all active crbs are completed. This is the first job of the
+ * second part error handler. When all bte related CRBs are cleanly
+ * completed, it resets the interfaces and gets them ready for new
+ * transfers to be queued.
+ */
+
+void bte_error_handler(unsigned long);
+
+/*
+ * Wait until all BTE related CRBs are completed
+ * and then reset the interfaces.
+ */
+void bte_error_handler(unsigned long _nodepda)
+{
+ struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda;
+ spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock;
+ struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer;
+ nasid_t nasid;
+ int i;
+ int valid_crbs;
+ unsigned long irq_flags;
+ volatile u64 *notify;
+ bte_result_t bh_error;
+ ii_imem_u_t imem; /* II IMEM Register */
+ ii_icrb0_d_u_t icrbd; /* II CRB Register D */
+ ii_ibcr_u_t ibcr;
+ ii_icmr_u_t icmr;
+ ii_ieclr_u_t ieclr;
+
+ BTE_PRINTK(("bte_error_handler(%p) - %d\n", err_nodepda,
+ smp_processor_id()));
+
+ spin_lock_irqsave(recovery_lock, irq_flags);
+
+ if ((err_nodepda->bte_if[0].bh_error == BTE_SUCCESS) &&
+ (err_nodepda->bte_if[1].bh_error == BTE_SUCCESS)) {
+ BTE_PRINTK(("eh:%p:%d Nothing to do.\n", err_nodepda,
+ smp_processor_id()));
+ spin_unlock_irqrestore(recovery_lock, irq_flags);
+ return;
+ }
+ /*
+ * Lock all interfaces on this node to prevent new transfers
+ * from being queued.
+ */
+ for (i = 0; i < BTES_PER_NODE; i++) {
+ if (err_nodepda->bte_if[i].cleanup_active) {
+ continue;
+ }
+ spin_lock(&err_nodepda->bte_if[i].spinlock);
+ BTE_PRINTK(("eh:%p:%d locked %d\n", err_nodepda,
+ smp_processor_id(), i));
+ err_nodepda->bte_if[i].cleanup_active = 1;
+ }
+
+ /* Determine information about our hub */
+ nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode);
+
+ /*
+ * A BTE transfer can use multiple CRBs. We need to make sure
+ * that all the BTE CRBs are complete (or timed out) before
+ * attempting to clean up the error. Resetting the BTE while
+ * there are still BTE CRBs active will hang the BTE.
+ * We should look at all the CRBs to see if they are allocated
+ * to the BTE and see if they are still active. When none
+ * are active, we can continue with the cleanup.
+ *
+ * We also want to make sure that the local NI port is up.
+ * When a router resets the NI port can go down, while it
+ * goes through the LLP handshake, but then comes back up.
+ */
+ icmr.ii_icmr_regval = REMOTE_HUB_L(nasid, IIO_ICMR);
+ if (icmr.ii_icmr_fld_s.i_crb_mark != 0) {
+ /*
+ * There are errors which still need to be cleaned up by
+ * hubiio_crb_error_handler
+ */
+ mod_timer(recovery_timer, HZ * 5);
+ BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda,
+ smp_processor_id()));
+ spin_unlock_irqrestore(recovery_lock, irq_flags);
+ return;
+ }
+ if (icmr.ii_icmr_fld_s.i_crb_vld != 0) {
+
+ valid_crbs = icmr.ii_icmr_fld_s.i_crb_vld;
+
+ for (i = 0; i < IIO_NUM_CRBS; i++) {
+ if (!((1 << i) & valid_crbs)) {
+ /* This crb was not marked as valid, ignore */
+ continue;
+ }
+ icrbd.ii_icrb0_d_regval =
+ REMOTE_HUB_L(nasid, IIO_ICRB_D(i));
+ if (icrbd.d_bteop) {
+ mod_timer(recovery_timer, HZ * 5);
+ BTE_PRINTK(("eh:%p:%d Valid %d, Giving up\n",
+ err_nodepda, smp_processor_id(),
+ i));
+ spin_unlock_irqrestore(recovery_lock,
+ irq_flags);
+ return;
+ }
+ }
+ }
+
+ BTE_PRINTK(("eh:%p:%d Cleaning up\n", err_nodepda, smp_processor_id()));
+ /* Reenable both bte interfaces */
+ imem.ii_imem_regval = REMOTE_HUB_L(nasid, IIO_IMEM);
+ imem.ii_imem_fld_s.i_b0_esd = imem.ii_imem_fld_s.i_b1_esd = 1;
+ REMOTE_HUB_S(nasid, IIO_IMEM, imem.ii_imem_regval);
+
+ /* Clear BTE0/1 error bits */
+ ieclr.ii_ieclr_regval = 0;
+ if (err_nodepda->bte_if[0].bh_error != BTE_SUCCESS)
+ ieclr.ii_ieclr_fld_s.i_e_bte_0 = 1;
+ if (err_nodepda->bte_if[1].bh_error != BTE_SUCCESS)
+ ieclr.ii_ieclr_fld_s.i_e_bte_1 = 1;
+ REMOTE_HUB_S(nasid, IIO_IECLR, ieclr.ii_ieclr_regval);
+
+ /* Reinitialize both BTE state machines. */
+ ibcr.ii_ibcr_regval = REMOTE_HUB_L(nasid, IIO_IBCR);
+ ibcr.ii_ibcr_fld_s.i_soft_reset = 1;
+ REMOTE_HUB_S(nasid, IIO_IBCR, ibcr.ii_ibcr_regval);
+
+ for (i = 0; i < BTES_PER_NODE; i++) {
+ bh_error = err_nodepda->bte_if[i].bh_error;
+ if (bh_error != BTE_SUCCESS) {
+ /* There is an error which needs to be notified */
+ notify = err_nodepda->bte_if[i].most_rcnt_na;
+ BTE_PRINTK(("cnode %d bte %d error=0x%lx\n",
+ err_nodepda->bte_if[i].bte_cnode,
+ err_nodepda->bte_if[i].bte_num,
+ IBLS_ERROR | (u64) bh_error));
+ *notify = IBLS_ERROR | bh_error;
+ err_nodepda->bte_if[i].bh_error = BTE_SUCCESS;
+ }
+
+ err_nodepda->bte_if[i].cleanup_active = 0;
+ BTE_PRINTK(("eh:%p:%d Unlocked %d\n", err_nodepda,
+ smp_processor_id(), i));
+ spin_unlock(&err_nodepda->bte_if[i].spinlock);
+ }
+
+ del_timer(recovery_timer);
+
+ spin_unlock_irqrestore(recovery_lock, irq_flags);
+}
+
+/*
+ * First part error handler. This is called whenever any error CRB interrupt
+ * is generated by the II.
+ */
+void
+bte_crb_error_handler(cnodeid_t cnode, int btenum,
+ int crbnum, ioerror_t * ioe, int bteop)
+{
+ struct bteinfo_s *bte;
+
+
+ bte = &(NODEPDA(cnode)->bte_if[btenum]);
+
+ /*
+ * The caller has already figured out the error type, we save that
+ * in the bte handle structure for the thread excercising the
+ * interface to consume.
+ */
+ bte->bh_error = ioe->ie_errortype + BTEFAIL_OFFSET;
+ bte->bte_error_count++;
+
+ BTE_PRINTK(("Got an error on cnode %d bte %d: HW error type 0x%x\n",
+ bte->bte_cnode, bte->bte_num, ioe->ie_errortype));
+ bte_error_handler((unsigned long) NODEPDA(cnode));
+}
+
diff --git a/arch/ia64/sn/kernel/huberror.c b/arch/ia64/sn/kernel/huberror.c
new file mode 100644
index 000000000000..2bdf684c5066
--- /dev/null
+++ b/arch/ia64/sn/kernel/huberror.c
@@ -0,0 +1,201 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000,2002-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <asm/delay.h>
+#include <asm/sn/sn_sal.h>
+#include "ioerror.h"
+#include <asm/sn/addrs.h>
+#include <asm/sn/shubio.h>
+#include <asm/sn/geo.h>
+#include "xtalk/xwidgetdev.h"
+#include "xtalk/hubdev.h"
+#include <asm/sn/bte.h>
+
+void hubiio_crb_error_handler(struct hubdev_info *hubdev_info);
+extern void bte_crb_error_handler(cnodeid_t, int, int, ioerror_t *,
+ int);
+static irqreturn_t hub_eint_handler(int irq, void *arg, struct pt_regs *ep)
+{
+ struct hubdev_info *hubdev_info;
+ struct ia64_sal_retval ret_stuff;
+ nasid_t nasid;
+
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+ hubdev_info = (struct hubdev_info *)arg;
+ nasid = hubdev_info->hdi_nasid;
+ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT,
+ (u64) nasid, 0, 0, 0, 0, 0, 0);
+
+ if ((int)ret_stuff.v0)
+ panic("hubii_eint_handler(): Fatal TIO Error");
+
+ if (!(nasid & 1)) /* Not a TIO, handle CRB errors */
+ (void)hubiio_crb_error_handler(hubdev_info);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Free the hub CRB "crbnum" which encountered an error.
+ * Assumption is, error handling was successfully done,
+ * and we now want to return the CRB back to Hub for normal usage.
+ *
+ * In order to free the CRB, all that's needed is to de-allocate it
+ *
+ * Assumption:
+ * No other processor is mucking around with the hub control register.
+ * So, upper layer has to single thread this.
+ */
+void hubiio_crb_free(struct hubdev_info *hubdev_info, int crbnum)
+{
+ ii_icrb0_b_u_t icrbb;
+
+ /*
+ * The hardware does NOT clear the mark bit, so it must get cleared
+ * here to be sure the error is not processed twice.
+ */
+ icrbb.ii_icrb0_b_regval = REMOTE_HUB_L(hubdev_info->hdi_nasid,
+ IIO_ICRB_B(crbnum));
+ icrbb.b_mark = 0;
+ REMOTE_HUB_S(hubdev_info->hdi_nasid, IIO_ICRB_B(crbnum),
+ icrbb.ii_icrb0_b_regval);
+ /*
+ * Deallocate the register wait till hub indicates it's done.
+ */
+ REMOTE_HUB_S(hubdev_info->hdi_nasid, IIO_ICDR, (IIO_ICDR_PND | crbnum));
+ while (REMOTE_HUB_L(hubdev_info->hdi_nasid, IIO_ICDR) & IIO_ICDR_PND)
+ udelay(1);
+
+}
+
+/*
+ * hubiio_crb_error_handler
+ *
+ * This routine gets invoked when a hub gets an error
+ * interrupt. So, the routine is running in interrupt context
+ * at error interrupt level.
+ * Action:
+ * It's responsible for identifying ALL the CRBs that are marked
+ * with error, and process them.
+ *
+ * If you find the CRB that's marked with error, map this to the
+ * reason it caused error, and invoke appropriate error handler.
+ *
+ * XXX Be aware of the information in the context register.
+ *
+ * NOTE:
+ * Use REMOTE_HUB_* macro instead of LOCAL_HUB_* so that the interrupt
+ * handler can be run on any node. (not necessarily the node
+ * corresponding to the hub that encountered error).
+ */
+
+void hubiio_crb_error_handler(struct hubdev_info *hubdev_info)
+{
+ nasid_t nasid;
+ ii_icrb0_a_u_t icrba; /* II CRB Register A */
+ ii_icrb0_b_u_t icrbb; /* II CRB Register B */
+ ii_icrb0_c_u_t icrbc; /* II CRB Register C */
+ ii_icrb0_d_u_t icrbd; /* II CRB Register D */
+ ii_icrb0_e_u_t icrbe; /* II CRB Register D */
+ int i;
+ int num_errors = 0; /* Num of errors handled */
+ ioerror_t ioerror;
+
+ nasid = hubdev_info->hdi_nasid;
+
+ /*
+ * XXX - Add locking for any recovery actions
+ */
+ /*
+ * Scan through all CRBs in the Hub, and handle the errors
+ * in any of the CRBs marked.
+ */
+ for (i = 0; i < IIO_NUM_CRBS; i++) {
+ /* Check this crb entry to see if it is in error. */
+ icrbb.ii_icrb0_b_regval = REMOTE_HUB_L(nasid, IIO_ICRB_B(i));
+
+ if (icrbb.b_mark == 0) {
+ continue;
+ }
+
+ icrba.ii_icrb0_a_regval = REMOTE_HUB_L(nasid, IIO_ICRB_A(i));
+
+ IOERROR_INIT(&ioerror);
+
+ /* read other CRB error registers. */
+ icrbc.ii_icrb0_c_regval = REMOTE_HUB_L(nasid, IIO_ICRB_C(i));
+ icrbd.ii_icrb0_d_regval = REMOTE_HUB_L(nasid, IIO_ICRB_D(i));
+ icrbe.ii_icrb0_e_regval = REMOTE_HUB_L(nasid, IIO_ICRB_E(i));
+
+ IOERROR_SETVALUE(&ioerror, errortype, icrbb.b_ecode);
+
+ /* Check if this error is due to BTE operation,
+ * and handle it separately.
+ */
+ if (icrbd.d_bteop ||
+ ((icrbb.b_initiator == IIO_ICRB_INIT_BTE0 ||
+ icrbb.b_initiator == IIO_ICRB_INIT_BTE1) &&
+ (icrbb.b_imsgtype == IIO_ICRB_IMSGT_BTE ||
+ icrbb.b_imsgtype == IIO_ICRB_IMSGT_SN1NET))) {
+
+ int bte_num;
+
+ if (icrbd.d_bteop)
+ bte_num = icrbc.c_btenum;
+ else /* b_initiator bit 2 gives BTE number */
+ bte_num = (icrbb.b_initiator & 0x4) >> 2;
+
+ hubiio_crb_free(hubdev_info, i);
+
+ bte_crb_error_handler(nasid_to_cnodeid(nasid), bte_num,
+ i, &ioerror, icrbd.d_bteop);
+ num_errors++;
+ continue;
+ }
+ }
+}
+
+/*
+ * Function : hub_error_init
+ * Purpose : initialize the error handling requirements for a given hub.
+ * Parameters : cnode, the compact nodeid.
+ * Assumptions : Called only once per hub, either by a local cpu. Or by a
+ * remote cpu, when this hub is headless.(cpuless)
+ * Returns : None
+ */
+void hub_error_init(struct hubdev_info *hubdev_info)
+{
+ if (request_irq(SGI_II_ERROR, (void *)hub_eint_handler, SA_SHIRQ,
+ "SN_hub_error", (void *)hubdev_info))
+ printk("hub_error_init: Failed to request_irq for 0x%p\n",
+ hubdev_info);
+ return;
+}
+
+
+/*
+ * Function : ice_error_init
+ * Purpose : initialize the error handling requirements for a given tio.
+ * Parameters : cnode, the compact nodeid.
+ * Assumptions : Called only once per tio.
+ * Returns : None
+ */
+void ice_error_init(struct hubdev_info *hubdev_info)
+{
+ if (request_irq
+ (SGI_TIO_ERROR, (void *)hub_eint_handler, SA_SHIRQ, "SN_TIO_error",
+ (void *)hubdev_info))
+ printk("ice_error_init: request_irq() error hubdev_info 0x%p\n",
+ hubdev_info);
+ return;
+}
+
diff --git a/arch/ia64/sn/kernel/idle.c b/arch/ia64/sn/kernel/idle.c
new file mode 100644
index 000000000000..49d178f022b5
--- /dev/null
+++ b/arch/ia64/sn/kernel/idle.c
@@ -0,0 +1,30 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2001-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <asm/sn/leds.h>
+
+void snidle(int state)
+{
+ if (state) {
+ if (pda->idle_flag == 0) {
+ /*
+ * Turn the activity LED off.
+ */
+ set_led_bits(0, LED_CPU_ACTIVITY);
+ }
+
+ pda->idle_flag = 1;
+ } else {
+ /*
+ * Turn the activity LED on.
+ */
+ set_led_bits(LED_CPU_ACTIVITY, LED_CPU_ACTIVITY);
+
+ pda->idle_flag = 0;
+ }
+}
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
new file mode 100644
index 000000000000..001880812b7c
--- /dev/null
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -0,0 +1,411 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/bootmem.h>
+#include <linux/nodemask.h>
+#include <asm/sn/types.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/sn/addrs.h>
+#include "pci/pcibus_provider_defs.h"
+#include "pci/pcidev.h"
+#include "pci/pcibr_provider.h"
+#include "xtalk/xwidgetdev.h"
+#include <asm/sn/geo.h>
+#include "xtalk/hubdev.h"
+#include <asm/sn/io.h>
+#include <asm/sn/simulator.h>
+
+char master_baseio_wid;
+nasid_t master_nasid = INVALID_NASID; /* Partition Master */
+
+struct slab_info {
+ struct hubdev_info hubdev;
+};
+
+struct brick {
+ moduleid_t id; /* Module ID of this module */
+ struct slab_info slab_info[MAX_SLABS + 1];
+};
+
+int sn_ioif_inited = 0; /* SN I/O infrastructure initialized? */
+
+/*
+ * Retrieve the DMA Flush List given nasid. This list is needed
+ * to implement the WAR - Flush DMA data on PIO Reads.
+ */
+static inline uint64_t
+sal_get_widget_dmaflush_list(u64 nasid, u64 widget_num, u64 address)
+{
+
+ struct ia64_sal_retval ret_stuff;
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+
+ SAL_CALL_NOLOCK(ret_stuff,
+ (u64) SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST,
+ (u64) nasid, (u64) widget_num, (u64) address, 0, 0, 0,
+ 0);
+ return ret_stuff.v0;
+
+}
+
+/*
+ * Retrieve the hub device info structure for the given nasid.
+ */
+static inline uint64_t sal_get_hubdev_info(u64 handle, u64 address)
+{
+
+ struct ia64_sal_retval ret_stuff;
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+
+ SAL_CALL_NOLOCK(ret_stuff,
+ (u64) SN_SAL_IOIF_GET_HUBDEV_INFO,
+ (u64) handle, (u64) address, 0, 0, 0, 0, 0);
+ return ret_stuff.v0;
+}
+
+/*
+ * Retrieve the pci bus information given the bus number.
+ */
+static inline uint64_t sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
+{
+
+ struct ia64_sal_retval ret_stuff;
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+
+ SAL_CALL_NOLOCK(ret_stuff,
+ (u64) SN_SAL_IOIF_GET_PCIBUS_INFO,
+ (u64) segment, (u64) busnum, (u64) address, 0, 0, 0, 0);
+ return ret_stuff.v0;
+}
+
+/*
+ * Retrieve the pci device information given the bus and device|function number.
+ */
+static inline uint64_t
+sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
+ u64 sn_irq_info)
+{
+ struct ia64_sal_retval ret_stuff;
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+
+ SAL_CALL_NOLOCK(ret_stuff,
+ (u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
+ (u64) segment, (u64) bus_number, (u64) devfn,
+ (u64) pci_dev,
+ sn_irq_info, 0, 0);
+ return ret_stuff.v0;
+}
+
+/*
+ * sn_alloc_pci_sysdata() - This routine allocates a pci controller
+ * which is expected as the pci_dev and pci_bus sysdata by the Linux
+ * PCI infrastructure.
+ */
+static inline struct pci_controller *sn_alloc_pci_sysdata(void)
+{
+ struct pci_controller *pci_sysdata;
+
+ pci_sysdata = kmalloc(sizeof(*pci_sysdata), GFP_KERNEL);
+ if (!pci_sysdata)
+ BUG();
+
+ memset(pci_sysdata, 0, sizeof(*pci_sysdata));
+ return pci_sysdata;
+}
+
+/*
+ * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for
+ * each node in the system.
+ */
+static void sn_fixup_ionodes(void)
+{
+
+ struct sn_flush_device_list *sn_flush_device_list;
+ struct hubdev_info *hubdev;
+ uint64_t status;
+ uint64_t nasid;
+ int i, widget;
+
+ for (i = 0; i < numionodes; i++) {
+ hubdev = (struct hubdev_info *)(NODEPDA(i)->pdinfo);
+ nasid = cnodeid_to_nasid(i);
+ status = sal_get_hubdev_info(nasid, (uint64_t) __pa(hubdev));
+ if (status)
+ continue;
+
+ for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++)
+ hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev;
+
+ if (!hubdev->hdi_flush_nasid_list.widget_p)
+ continue;
+
+ hubdev->hdi_flush_nasid_list.widget_p =
+ kmalloc((HUB_WIDGET_ID_MAX + 1) *
+ sizeof(struct sn_flush_device_list *), GFP_KERNEL);
+
+ memset(hubdev->hdi_flush_nasid_list.widget_p, 0x0,
+ (HUB_WIDGET_ID_MAX + 1) *
+ sizeof(struct sn_flush_device_list *));
+
+ for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) {
+ sn_flush_device_list = kmalloc(DEV_PER_WIDGET *
+ sizeof(struct
+ sn_flush_device_list),
+ GFP_KERNEL);
+ memset(sn_flush_device_list, 0x0,
+ DEV_PER_WIDGET *
+ sizeof(struct sn_flush_device_list));
+
+ status =
+ sal_get_widget_dmaflush_list(nasid, widget,
+ (uint64_t)
+ __pa
+ (sn_flush_device_list));
+ if (status) {
+ kfree(sn_flush_device_list);
+ continue;
+ }
+
+ hubdev->hdi_flush_nasid_list.widget_p[widget] =
+ sn_flush_device_list;
+ }
+
+ if (!(i & 1))
+ hub_error_init(hubdev);
+ else
+ ice_error_init(hubdev);
+ }
+
+}
+
+/*
+ * sn_pci_fixup_slot() - This routine sets up a slot's resources
+ * consistent with the Linux PCI abstraction layer. Resources acquired
+ * from our PCI provider include PIO maps to BAR space and interrupt
+ * objects.
+ */
+static void sn_pci_fixup_slot(struct pci_dev *dev)
+{
+ int idx;
+ int segment = 0;
+ uint64_t size;
+ struct sn_irq_info *sn_irq_info;
+ struct pci_dev *host_pci_dev;
+ int status = 0;
+
+ dev->sysdata = kmalloc(sizeof(struct pcidev_info), GFP_KERNEL);
+ if (SN_PCIDEV_INFO(dev) <= 0)
+ BUG(); /* Cannot afford to run out of memory */
+ memset(SN_PCIDEV_INFO(dev), 0, sizeof(struct pcidev_info));
+
+ sn_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
+ if (sn_irq_info <= 0)
+ BUG(); /* Cannot afford to run out of memory */
+ memset(sn_irq_info, 0, sizeof(struct sn_irq_info));
+
+ /* Call to retrieve pci device information needed by kernel. */
+ status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number,
+ dev->devfn,
+ (u64) __pa(SN_PCIDEV_INFO(dev)),
+ (u64) __pa(sn_irq_info));
+ if (status)
+ BUG(); /* Cannot get platform pci device information information */
+
+ /* Copy over PIO Mapped Addresses */
+ for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
+ unsigned long start, end, addr;
+
+ if (!SN_PCIDEV_INFO(dev)->pdi_pio_mapped_addr[idx])
+ continue;
+
+ start = dev->resource[idx].start;
+ end = dev->resource[idx].end;
+ size = end - start;
+ addr = SN_PCIDEV_INFO(dev)->pdi_pio_mapped_addr[idx];
+ addr = ((addr << 4) >> 4) | __IA64_UNCACHED_OFFSET;
+ dev->resource[idx].start = addr;
+ dev->resource[idx].end = addr + size;
+ if (dev->resource[idx].flags & IORESOURCE_IO)
+ dev->resource[idx].parent = &ioport_resource;
+ else
+ dev->resource[idx].parent = &iomem_resource;
+ }
+
+ /* set up host bus linkages */
+ host_pci_dev =
+ pci_find_slot(SN_PCIDEV_INFO(dev)->pdi_slot_host_handle >> 32,
+ SN_PCIDEV_INFO(dev)->
+ pdi_slot_host_handle & 0xffffffff);
+ SN_PCIDEV_INFO(dev)->pdi_host_pcidev_info =
+ SN_PCIDEV_INFO(host_pci_dev);
+ SN_PCIDEV_INFO(dev)->pdi_linux_pcidev = dev;
+ SN_PCIDEV_INFO(dev)->pdi_pcibus_info = SN_PCIBUS_BUSSOFT(dev->bus);
+
+ /* Only set up IRQ stuff if this device has a host bus context */
+ if (SN_PCIDEV_BUSSOFT(dev) && sn_irq_info->irq_irq) {
+ SN_PCIDEV_INFO(dev)->pdi_sn_irq_info = sn_irq_info;
+ dev->irq = SN_PCIDEV_INFO(dev)->pdi_sn_irq_info->irq_irq;
+ sn_irq_fixup(dev, sn_irq_info);
+ }
+}
+
+/*
+ * sn_pci_controller_fixup() - This routine sets up a bus's resources
+ * consistent with the Linux PCI abstraction layer.
+ */
+static void sn_pci_controller_fixup(int segment, int busnum)
+{
+ int status = 0;
+ int nasid, cnode;
+ struct pci_bus *bus;
+ struct pci_controller *controller;
+ struct pcibus_bussoft *prom_bussoft_ptr;
+ struct hubdev_info *hubdev_info;
+ void *provider_soft;
+
+ status =
+ sal_get_pcibus_info((u64) segment, (u64) busnum,
+ (u64) ia64_tpa(&prom_bussoft_ptr));
+ if (status > 0) {
+ return; /* bus # does not exist */
+ }
+
+ prom_bussoft_ptr = __va(prom_bussoft_ptr);
+ controller = sn_alloc_pci_sysdata();
+ /* controller non-zero is BUG'd in sn_alloc_pci_sysdata */
+
+ bus = pci_scan_bus(busnum, &pci_root_ops, controller);
+ if (bus == NULL) {
+ return; /* error, or bus already scanned */
+ }
+
+ /*
+ * Per-provider fixup. Copies the contents from prom to local
+ * area and links SN_PCIBUS_BUSSOFT().
+ *
+ * Note: Provider is responsible for ensuring that prom_bussoft_ptr
+ * represents an asic-type that it can handle.
+ */
+
+ if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB) {
+ return; /* no further fixup necessary */
+ }
+
+ provider_soft = pcibr_bus_fixup(prom_bussoft_ptr);
+ if (provider_soft == NULL) {
+ return; /* fixup failed or not applicable */
+ }
+
+ /*
+ * Generic bus fixup goes here. Don't reference prom_bussoft_ptr
+ * after this point.
+ */
+
+ bus->sysdata = controller;
+ PCI_CONTROLLER(bus)->platform_data = provider_soft;
+
+ nasid = NASID_GET(SN_PCIBUS_BUSSOFT(bus)->bs_base);
+ cnode = nasid_to_cnodeid(nasid);
+ hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
+ SN_PCIBUS_BUSSOFT(bus)->bs_xwidget_info =
+ &(hubdev_info->hdi_xwidget_info[SN_PCIBUS_BUSSOFT(bus)->bs_xid]);
+}
+
+/*
+ * Ugly hack to get PCI setup until we have a proper ACPI namespace.
+ */
+
+#define PCI_BUSES_TO_SCAN 256
+
+static int __init sn_pci_init(void)
+{
+ int i = 0;
+ struct pci_dev *pci_dev = NULL;
+ extern void sn_init_cpei_timer(void);
+#ifdef CONFIG_PROC_FS
+ extern void register_sn_procfs(void);
+#endif
+
+ if (!ia64_platform_is("sn2") || IS_RUNNING_ON_SIMULATOR())
+ return 0;
+
+ /*
+ * This is needed to avoid bounce limit checks in the blk layer
+ */
+ ia64_max_iommu_merge_mask = ~PAGE_MASK;
+ sn_fixup_ionodes();
+ sn_irq = kmalloc(sizeof(struct sn_irq_info *) * NR_IRQS, GFP_KERNEL);
+ if (sn_irq <= 0)
+ BUG(); /* Canno afford to run out of memory. */
+ memset(sn_irq, 0, sizeof(struct sn_irq_info *) * NR_IRQS);
+
+ sn_init_cpei_timer();
+
+#ifdef CONFIG_PROC_FS
+ register_sn_procfs();
+#endif
+
+ for (i = 0; i < PCI_BUSES_TO_SCAN; i++) {
+ sn_pci_controller_fixup(0, i);
+ }
+
+ /*
+ * Generic Linux PCI Layer has created the pci_bus and pci_dev
+ * structures - time for us to add our SN PLatform specific
+ * information.
+ */
+
+ while ((pci_dev =
+ pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) != NULL) {
+ sn_pci_fixup_slot(pci_dev);
+ }
+
+ sn_ioif_inited = 1; /* sn I/O infrastructure now initialized */
+
+ return 0;
+}
+
+/*
+ * hubdev_init_node() - Creates the HUB data structure and link them to it's
+ * own NODE specific data area.
+ */
+void hubdev_init_node(nodepda_t * npda, cnodeid_t node)
+{
+
+ struct hubdev_info *hubdev_info;
+
+ if (node >= num_online_nodes()) /* Headless/memless IO nodes */
+ hubdev_info =
+ (struct hubdev_info *)alloc_bootmem_node(NODE_DATA(0),
+ sizeof(struct
+ hubdev_info));
+ else
+ hubdev_info =
+ (struct hubdev_info *)alloc_bootmem_node(NODE_DATA(node),
+ sizeof(struct
+ hubdev_info));
+ npda->pdinfo = (void *)hubdev_info;
+
+}
+
+geoid_t
+cnodeid_get_geoid(cnodeid_t cnode)
+{
+
+ struct hubdev_info *hubdev;
+
+ hubdev = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
+ return hubdev->hdi_geoid;
+
+}
+
+subsys_initcall(sn_pci_init);
diff --git a/arch/ia64/sn/kernel/iomv.c b/arch/ia64/sn/kernel/iomv.c
new file mode 100644
index 000000000000..fec6d8b8237b
--- /dev/null
+++ b/arch/ia64/sn/kernel/iomv.c
@@ -0,0 +1,70 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <asm/io.h>
+#include <asm/delay.h>
+#include <asm/sn/nodepda.h>
+#include <asm/sn/simulator.h>
+#include <asm/sn/pda.h>
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/shub_mmr.h>
+
+/**
+ * sn_io_addr - convert an in/out port to an i/o address
+ * @port: port to convert
+ *
+ * Legacy in/out instructions are converted to ld/st instructions
+ * on IA64. This routine will convert a port number into a valid
+ * SN i/o address. Used by sn_in*() and sn_out*().
+ */
+void *sn_io_addr(unsigned long port)
+{
+ if (!IS_RUNNING_ON_SIMULATOR()) {
+ /* On sn2, legacy I/O ports don't point at anything */
+ if (port < (64 * 1024))
+ return NULL;
+ return ((void *)(port | __IA64_UNCACHED_OFFSET));
+ } else {
+ /* but the simulator uses them... */
+ unsigned long addr;
+
+ /*
+ * word align port, but need more than 10 bits
+ * for accessing registers in bedrock local block
+ * (so we don't do port&0xfff)
+ */
+ addr = (is_shub2() ? 0xc00000028c000000UL : 0xc0000087cc000000UL) | ((port >> 2) << 12);
+ if ((port >= 0x1f0 && port <= 0x1f7) || port == 0x3f6 || port == 0x3f7)
+ addr |= port;
+ return (void *)addr;
+ }
+}
+
+EXPORT_SYMBOL(sn_io_addr);
+
+/**
+ * __sn_mmiowb - I/O space memory barrier
+ *
+ * See include/asm-ia64/io.h and Documentation/DocBook/deviceiobook.tmpl
+ * for details.
+ *
+ * On SN2, we wait for the PIO_WRITE_STATUS SHub register to clear.
+ * See PV 871084 for details about the WAR about zero value.
+ *
+ */
+void __sn_mmiowb(void)
+{
+ volatile unsigned long *adr = pda->pio_write_status_addr;
+ unsigned long val = pda->pio_write_status_val;
+
+ while ((*adr & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != val)
+ cpu_relax();
+}
+
+EXPORT_SYMBOL(__sn_mmiowb);
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
new file mode 100644
index 000000000000..3be44724f6c8
--- /dev/null
+++ b/arch/ia64/sn/kernel/irq.c
@@ -0,0 +1,431 @@
+/*
+ * Platform dependent support for SGI SN
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+#include <linux/irq.h>
+#include <asm/sn/intr.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/arch.h>
+#include "xtalk/xwidgetdev.h"
+#include "pci/pcibus_provider_defs.h"
+#include "pci/pcidev.h"
+#include "pci/pcibr_provider.h"
+#include <asm/sn/shub_mmr.h>
+#include <asm/sn/sn_sal.h>
+
+static void force_interrupt(int irq);
+static void register_intr_pda(struct sn_irq_info *sn_irq_info);
+static void unregister_intr_pda(struct sn_irq_info *sn_irq_info);
+
+extern int sn_force_interrupt_flag;
+extern int sn_ioif_inited;
+struct sn_irq_info **sn_irq;
+
+static inline uint64_t sn_intr_alloc(nasid_t local_nasid, int local_widget,
+ u64 sn_irq_info,
+ int req_irq, nasid_t req_nasid,
+ int req_slice)
+{
+ struct ia64_sal_retval ret_stuff;
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+
+ SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
+ (u64) SAL_INTR_ALLOC, (u64) local_nasid,
+ (u64) local_widget, (u64) sn_irq_info, (u64) req_irq,
+ (u64) req_nasid, (u64) req_slice);
+ return ret_stuff.status;
+}
+
+static inline void sn_intr_free(nasid_t local_nasid, int local_widget,
+ struct sn_irq_info *sn_irq_info)
+{
+ struct ia64_sal_retval ret_stuff;
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+
+ SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
+ (u64) SAL_INTR_FREE, (u64) local_nasid,
+ (u64) local_widget, (u64) sn_irq_info->irq_irq,
+ (u64) sn_irq_info->irq_cookie, 0, 0);
+}
+
+static unsigned int sn_startup_irq(unsigned int irq)
+{
+ return 0;
+}
+
+static void sn_shutdown_irq(unsigned int irq)
+{
+}
+
+static void sn_disable_irq(unsigned int irq)
+{
+}
+
+static void sn_enable_irq(unsigned int irq)
+{
+}
+
+static void sn_ack_irq(unsigned int irq)
+{
+ uint64_t event_occurred, mask = 0;
+ int nasid;
+
+ irq = irq & 0xff;
+ nasid = get_nasid();
+ event_occurred =
+ HUB_L((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED));
+ if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
+ mask |= (1 << SH_EVENT_OCCURRED_UART_INT_SHFT);
+ }
+ if (event_occurred & SH_EVENT_OCCURRED_IPI_INT_MASK) {
+ mask |= (1 << SH_EVENT_OCCURRED_IPI_INT_SHFT);
+ }
+ if (event_occurred & SH_EVENT_OCCURRED_II_INT0_MASK) {
+ mask |= (1 << SH_EVENT_OCCURRED_II_INT0_SHFT);
+ }
+ if (event_occurred & SH_EVENT_OCCURRED_II_INT1_MASK) {
+ mask |= (1 << SH_EVENT_OCCURRED_II_INT1_SHFT);
+ }
+ HUB_S((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED_ALIAS),
+ mask);
+ __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
+
+ move_irq(irq);
+}
+
+static void sn_end_irq(unsigned int irq)
+{
+ int nasid;
+ int ivec;
+ uint64_t event_occurred;
+
+ ivec = irq & 0xff;
+ if (ivec == SGI_UART_VECTOR) {
+ nasid = get_nasid();
+ event_occurred = HUB_L((uint64_t *) GLOBAL_MMR_ADDR
+ (nasid, SH_EVENT_OCCURRED));
+ /* If the UART bit is set here, we may have received an
+ * interrupt from the UART that the driver missed. To
+ * make sure, we IPI ourselves to force us to look again.
+ */
+ if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
+ platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR,
+ IA64_IPI_DM_INT, 0);
+ }
+ }
+ __clear_bit(ivec, (volatile void *)pda->sn_in_service_ivecs);
+ if (sn_force_interrupt_flag)
+ force_interrupt(irq);
+}
+
+static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
+{
+ struct sn_irq_info *sn_irq_info = sn_irq[irq];
+ struct sn_irq_info *tmp_sn_irq_info;
+ int cpuid, cpuphys;
+ nasid_t t_nasid; /* nasid to target */
+ int t_slice; /* slice to target */
+
+ /* allocate a temp sn_irq_info struct to get new target info */
+ tmp_sn_irq_info = kmalloc(sizeof(*tmp_sn_irq_info), GFP_KERNEL);
+ if (!tmp_sn_irq_info)
+ return;
+
+ cpuid = first_cpu(mask);
+ cpuphys = cpu_physical_id(cpuid);
+ t_nasid = cpuid_to_nasid(cpuid);
+ t_slice = cpuid_to_slice(cpuid);
+
+ while (sn_irq_info) {
+ int status;
+ int local_widget;
+ uint64_t bridge = (uint64_t) sn_irq_info->irq_bridge;
+ nasid_t local_nasid = NASID_GET(bridge);
+
+ if (!bridge)
+ break; /* irq is not a device interrupt */
+
+ if (local_nasid & 1)
+ local_widget = TIO_SWIN_WIDGETNUM(bridge);
+ else
+ local_widget = SWIN_WIDGETNUM(bridge);
+
+ /* Free the old PROM sn_irq_info structure */
+ sn_intr_free(local_nasid, local_widget, sn_irq_info);
+
+ /* allocate a new PROM sn_irq_info struct */
+ status = sn_intr_alloc(local_nasid, local_widget,
+ __pa(tmp_sn_irq_info), irq, t_nasid,
+ t_slice);
+
+ if (status == 0) {
+ /* Update kernels sn_irq_info with new target info */
+ unregister_intr_pda(sn_irq_info);
+ sn_irq_info->irq_cpuid = cpuid;
+ sn_irq_info->irq_nasid = t_nasid;
+ sn_irq_info->irq_slice = t_slice;
+ sn_irq_info->irq_xtalkaddr =
+ tmp_sn_irq_info->irq_xtalkaddr;
+ sn_irq_info->irq_cookie = tmp_sn_irq_info->irq_cookie;
+ register_intr_pda(sn_irq_info);
+
+ if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type)) {
+ pcibr_change_devices_irq(sn_irq_info);
+ }
+
+ sn_irq_info = sn_irq_info->irq_next;
+
+#ifdef CONFIG_SMP
+ set_irq_affinity_info((irq & 0xff), cpuphys, 0);
+#endif
+ } else {
+ break; /* snp_affinity failed the intr_alloc */
+ }
+ }
+ kfree(tmp_sn_irq_info);
+}
+
+struct hw_interrupt_type irq_type_sn = {
+ "SN hub",
+ sn_startup_irq,
+ sn_shutdown_irq,
+ sn_enable_irq,
+ sn_disable_irq,
+ sn_ack_irq,
+ sn_end_irq,
+ sn_set_affinity_irq
+};
+
+unsigned int sn_local_vector_to_irq(u8 vector)
+{
+ return (CPU_VECTOR_TO_IRQ(smp_processor_id(), vector));
+}
+
+void sn_irq_init(void)
+{
+ int i;
+ irq_desc_t *base_desc = irq_desc;
+
+ for (i = 0; i < NR_IRQS; i++) {
+ if (base_desc[i].handler == &no_irq_type) {
+ base_desc[i].handler = &irq_type_sn;
+ }
+ }
+}
+
+static void register_intr_pda(struct sn_irq_info *sn_irq_info)
+{
+ int irq = sn_irq_info->irq_irq;
+ int cpu = sn_irq_info->irq_cpuid;
+
+ if (pdacpu(cpu)->sn_last_irq < irq) {
+ pdacpu(cpu)->sn_last_irq = irq;
+ }
+
+ if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq) {
+ pdacpu(cpu)->sn_first_irq = irq;
+ }
+}
+
+static void unregister_intr_pda(struct sn_irq_info *sn_irq_info)
+{
+ int irq = sn_irq_info->irq_irq;
+ int cpu = sn_irq_info->irq_cpuid;
+ struct sn_irq_info *tmp_irq_info;
+ int i, foundmatch;
+
+ if (pdacpu(cpu)->sn_last_irq == irq) {
+ foundmatch = 0;
+ for (i = pdacpu(cpu)->sn_last_irq - 1; i; i--) {
+ tmp_irq_info = sn_irq[i];
+ while (tmp_irq_info) {
+ if (tmp_irq_info->irq_cpuid == cpu) {
+ foundmatch++;
+ break;
+ }
+ tmp_irq_info = tmp_irq_info->irq_next;
+ }
+ if (foundmatch) {
+ break;
+ }
+ }
+ pdacpu(cpu)->sn_last_irq = i;
+ }
+
+ if (pdacpu(cpu)->sn_first_irq == irq) {
+ foundmatch = 0;
+ for (i = pdacpu(cpu)->sn_first_irq + 1; i < NR_IRQS; i++) {
+ tmp_irq_info = sn_irq[i];
+ while (tmp_irq_info) {
+ if (tmp_irq_info->irq_cpuid == cpu) {
+ foundmatch++;
+ break;
+ }
+ tmp_irq_info = tmp_irq_info->irq_next;
+ }
+ if (foundmatch) {
+ break;
+ }
+ }
+ pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i);
+ }
+}
+
+struct sn_irq_info *sn_irq_alloc(nasid_t local_nasid, int local_widget, int irq,
+ nasid_t nasid, int slice)
+{
+ struct sn_irq_info *sn_irq_info;
+ int status;
+
+ sn_irq_info = kmalloc(sizeof(*sn_irq_info), GFP_KERNEL);
+ if (sn_irq_info == NULL)
+ return NULL;
+
+ memset(sn_irq_info, 0x0, sizeof(*sn_irq_info));
+
+ status =
+ sn_intr_alloc(local_nasid, local_widget, __pa(sn_irq_info), irq,
+ nasid, slice);
+
+ if (status) {
+ kfree(sn_irq_info);
+ return NULL;
+ } else {
+ return sn_irq_info;
+ }
+}
+
+void sn_irq_free(struct sn_irq_info *sn_irq_info)
+{
+ uint64_t bridge = (uint64_t) sn_irq_info->irq_bridge;
+ nasid_t local_nasid = NASID_GET(bridge);
+ int local_widget;
+
+ if (local_nasid & 1) /* tio check */
+ local_widget = TIO_SWIN_WIDGETNUM(bridge);
+ else
+ local_widget = SWIN_WIDGETNUM(bridge);
+
+ sn_intr_free(local_nasid, local_widget, sn_irq_info);
+
+ kfree(sn_irq_info);
+}
+
+void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
+{
+ nasid_t nasid = sn_irq_info->irq_nasid;
+ int slice = sn_irq_info->irq_slice;
+ int cpu = nasid_slice_to_cpuid(nasid, slice);
+
+ sn_irq_info->irq_cpuid = cpu;
+ sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev);
+
+ /* link it into the sn_irq[irq] list */
+ sn_irq_info->irq_next = sn_irq[sn_irq_info->irq_irq];
+ sn_irq[sn_irq_info->irq_irq] = sn_irq_info;
+
+ (void)register_intr_pda(sn_irq_info);
+}
+
+static void force_interrupt(int irq)
+{
+ struct sn_irq_info *sn_irq_info;
+
+ if (!sn_ioif_inited)
+ return;
+ sn_irq_info = sn_irq[irq];
+ while (sn_irq_info) {
+ if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) &&
+ (sn_irq_info->irq_bridge != NULL)) {
+ pcibr_force_interrupt(sn_irq_info);
+ }
+ sn_irq_info = sn_irq_info->irq_next;
+ }
+}
+
+/*
+ * Check for lost interrupts. If the PIC int_status reg. says that
+ * an interrupt has been sent, but not handled, and the interrupt
+ * is not pending in either the cpu irr regs or in the soft irr regs,
+ * and the interrupt is not in service, then the interrupt may have
+ * been lost. Force an interrupt on that pin. It is possible that
+ * the interrupt is in flight, so we may generate a spurious interrupt,
+ * but we should never miss a real lost interrupt.
+ */
+static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info)
+{
+ uint64_t regval;
+ int irr_reg_num;
+ int irr_bit;
+ uint64_t irr_reg;
+ struct pcidev_info *pcidev_info;
+ struct pcibus_info *pcibus_info;
+
+ pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
+ if (!pcidev_info)
+ return;
+
+ pcibus_info =
+ (struct pcibus_info *)pcidev_info->pdi_host_pcidev_info->
+ pdi_pcibus_info;
+ regval = pcireg_intr_status_get(pcibus_info);
+
+ irr_reg_num = irq_to_vector(irq) / 64;
+ irr_bit = irq_to_vector(irq) % 64;
+ switch (irr_reg_num) {
+ case 0:
+ irr_reg = ia64_getreg(_IA64_REG_CR_IRR0);
+ break;
+ case 1:
+ irr_reg = ia64_getreg(_IA64_REG_CR_IRR1);
+ break;
+ case 2:
+ irr_reg = ia64_getreg(_IA64_REG_CR_IRR2);
+ break;
+ case 3:
+ irr_reg = ia64_getreg(_IA64_REG_CR_IRR3);
+ break;
+ }
+ if (!test_bit(irr_bit, &irr_reg)) {
+ if (!test_bit(irq, pda->sn_soft_irr)) {
+ if (!test_bit(irq, pda->sn_in_service_ivecs)) {
+ regval &= 0xff;
+ if (sn_irq_info->irq_int_bit & regval &
+ sn_irq_info->irq_last_intr) {
+ regval &=
+ ~(sn_irq_info->
+ irq_int_bit & regval);
+ pcibr_force_interrupt(sn_irq_info);
+ }
+ }
+ }
+ }
+ sn_irq_info->irq_last_intr = regval;
+}
+
+void sn_lb_int_war_check(void)
+{
+ int i;
+
+ if (!sn_ioif_inited || pda->sn_first_irq == 0)
+ return;
+ for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) {
+ struct sn_irq_info *sn_irq_info = sn_irq[i];
+ while (sn_irq_info) {
+ /* Only call for PCI bridges that are fully initialized. */
+ if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) &&
+ (sn_irq_info->irq_bridge != NULL)) {
+ sn_check_intr(i, sn_irq_info);
+ }
+ sn_irq_info = sn_irq_info->irq_next;
+ }
+ }
+}
diff --git a/arch/ia64/sn/kernel/klconflib.c b/arch/ia64/sn/kernel/klconflib.c
new file mode 100644
index 000000000000..0f11a3299cd2
--- /dev/null
+++ b/arch/ia64/sn/kernel/klconflib.c
@@ -0,0 +1,108 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <asm/sn/types.h>
+#include <asm/sn/module.h>
+#include <asm/sn/l1.h>
+
+char brick_types[MAX_BRICK_TYPES + 1] = "cri.xdpn%#=vo^kjbf890123456789...";
+/*
+ * Format a module id for printing.
+ *
+ * There are three possible formats:
+ *
+ * MODULE_FORMAT_BRIEF is the brief 6-character format, including
+ * the actual brick-type as recorded in the
+ * moduleid_t, eg. 002c15 for a C-brick, or
+ * 101#17 for a PX-brick.
+ *
+ * MODULE_FORMAT_LONG is the hwgraph format, eg. rack/002/bay/15
+ * of rack/101/bay/17 (note that the brick
+ * type does not appear in this format).
+ *
+ * MODULE_FORMAT_LCD is like MODULE_FORMAT_BRIEF, except that it
+ * ensures that the module id provided appears
+ * exactly as it would on the LCD display of
+ * the corresponding brick, eg. still 002c15
+ * for a C-brick, but 101p17 for a PX-brick.
+ *
+ * maule (9/13/04): Removed top-level check for (fmt == MODULE_FORMAT_LCD)
+ * making MODULE_FORMAT_LCD equivalent to MODULE_FORMAT_BRIEF. It was
+ * decided that all callers should assume the returned string should be what
+ * is displayed on the brick L1 LCD.
+ */
+void
+format_module_id(char *buffer, moduleid_t m, int fmt)
+{
+ int rack, position;
+ unsigned char brickchar;
+
+ rack = MODULE_GET_RACK(m);
+ brickchar = MODULE_GET_BTCHAR(m);
+
+ /* Be sure we use the same brick type character as displayed
+ * on the brick's LCD
+ */
+ switch (brickchar)
+ {
+ case L1_BRICKTYPE_GA:
+ case L1_BRICKTYPE_OPUS_TIO:
+ brickchar = L1_BRICKTYPE_C;
+ break;
+
+ case L1_BRICKTYPE_PX:
+ case L1_BRICKTYPE_PE:
+ case L1_BRICKTYPE_PA:
+ case L1_BRICKTYPE_SA: /* we can move this to the "I's" later
+ * if that makes more sense
+ */
+ brickchar = L1_BRICKTYPE_P;
+ break;
+
+ case L1_BRICKTYPE_IX:
+ case L1_BRICKTYPE_IA:
+
+ brickchar = L1_BRICKTYPE_I;
+ break;
+ }
+
+ position = MODULE_GET_BPOS(m);
+
+ if ((fmt == MODULE_FORMAT_BRIEF) || (fmt == MODULE_FORMAT_LCD)) {
+ /* Brief module number format, eg. 002c15 */
+
+ /* Decompress the rack number */
+ *buffer++ = '0' + RACK_GET_CLASS(rack);
+ *buffer++ = '0' + RACK_GET_GROUP(rack);
+ *buffer++ = '0' + RACK_GET_NUM(rack);
+
+ /* Add the brick type */
+ *buffer++ = brickchar;
+ }
+ else if (fmt == MODULE_FORMAT_LONG) {
+ /* Fuller hwgraph format, eg. rack/002/bay/15 */
+
+ strcpy(buffer, "rack" "/"); buffer += strlen(buffer);
+
+ *buffer++ = '0' + RACK_GET_CLASS(rack);
+ *buffer++ = '0' + RACK_GET_GROUP(rack);
+ *buffer++ = '0' + RACK_GET_NUM(rack);
+
+ strcpy(buffer, "/" "bay" "/"); buffer += strlen(buffer);
+ }
+
+ /* Add the bay position, using at least two digits */
+ if (position < 10)
+ *buffer++ = '0';
+ sprintf(buffer, "%d", position);
+
+}
diff --git a/arch/ia64/sn/kernel/machvec.c b/arch/ia64/sn/kernel/machvec.c
new file mode 100644
index 000000000000..02bb9155840c
--- /dev/null
+++ b/arch/ia64/sn/kernel/machvec.c
@@ -0,0 +1,11 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+#define MACHVEC_PLATFORM_NAME sn2
+#define MACHVEC_PLATFORM_HEADER <asm/machvec_sn2.h>
+#include <asm/machvec_init.h>
diff --git a/arch/ia64/sn/kernel/mca.c b/arch/ia64/sn/kernel/mca.c
new file mode 100644
index 000000000000..857774bb2c9a
--- /dev/null
+++ b/arch/ia64/sn/kernel/mca.c
@@ -0,0 +1,135 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/vmalloc.h>
+#include <asm/mca.h>
+#include <asm/sal.h>
+#include <asm/sn/sn_sal.h>
+
+/*
+ * Interval for calling SAL to poll for errors that do NOT cause error
+ * interrupts. SAL will raise a CPEI if any errors are present that
+ * need to be logged.
+ */
+#define CPEI_INTERVAL (5*HZ)
+
+struct timer_list sn_cpei_timer;
+void sn_init_cpei_timer(void);
+
+/* Printing oemdata from mca uses data that is not passed through SAL, it is
+ * global. Only one user at a time.
+ */
+static DECLARE_MUTEX(sn_oemdata_mutex);
+static u8 **sn_oemdata;
+static u64 *sn_oemdata_size, sn_oemdata_bufsize;
+
+/*
+ * print_hook
+ *
+ * This function is the callback routine that SAL calls to log error
+ * info for platform errors. buf is appended to sn_oemdata, resizing as
+ * required.
+ */
+static int print_hook(const char *fmt, ...)
+{
+ char buf[400];
+ int len;
+ va_list args;
+ va_start(args, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, args);
+ va_end(args);
+ len = strlen(buf);
+ while (*sn_oemdata_size + len + 1 > sn_oemdata_bufsize) {
+ u8 *newbuf = vmalloc(sn_oemdata_bufsize += 1000);
+ if (!newbuf) {
+ printk(KERN_ERR "%s: unable to extend sn_oemdata\n",
+ __FUNCTION__);
+ return 0;
+ }
+ memcpy(newbuf, *sn_oemdata, *sn_oemdata_size);
+ vfree(*sn_oemdata);
+ *sn_oemdata = newbuf;
+ }
+ memcpy(*sn_oemdata + *sn_oemdata_size, buf, len + 1);
+ *sn_oemdata_size += len;
+ return 0;
+}
+
+static void sn_cpei_handler(int irq, void *devid, struct pt_regs *regs)
+{
+ /*
+ * this function's sole purpose is to call SAL when we receive
+ * a CE interrupt from SHUB or when the timer routine decides
+ * we need to call SAL to check for CEs.
+ */
+
+ /* CALL SAL_LOG_CE */
+
+ ia64_sn_plat_cpei_handler();
+}
+
+static void sn_cpei_timer_handler(unsigned long dummy)
+{
+ sn_cpei_handler(-1, NULL, NULL);
+ mod_timer(&sn_cpei_timer, jiffies + CPEI_INTERVAL);
+}
+
+void sn_init_cpei_timer(void)
+{
+ init_timer(&sn_cpei_timer);
+ sn_cpei_timer.expires = jiffies + CPEI_INTERVAL;
+ sn_cpei_timer.function = sn_cpei_timer_handler;
+ add_timer(&sn_cpei_timer);
+}
+
+static int
+sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata,
+ u64 * oemdata_size)
+{
+ down(&sn_oemdata_mutex);
+ sn_oemdata = oemdata;
+ sn_oemdata_size = oemdata_size;
+ sn_oemdata_bufsize = 0;
+ ia64_sn_plat_specific_err_print(print_hook, (char *)sect_header);
+ up(&sn_oemdata_mutex);
+ return 0;
+}
+
+/* Callback when userspace salinfo wants to decode oem data via the platform
+ * kernel and/or prom.
+ */
+int sn_salinfo_platform_oemdata(const u8 *sect_header, u8 **oemdata, u64 *oemdata_size)
+{
+ efi_guid_t guid = *(efi_guid_t *)sect_header;
+ int valid = 0;
+ *oemdata_size = 0;
+ vfree(*oemdata);
+ *oemdata = NULL;
+ if (efi_guidcmp(guid, SAL_PLAT_SPECIFIC_ERR_SECT_GUID) == 0) {
+ sal_log_plat_specific_err_info_t *psei = (sal_log_plat_specific_err_info_t *)sect_header;
+ valid = psei->valid.oem_data;
+ } else if (efi_guidcmp(guid, SAL_PLAT_MEM_DEV_ERR_SECT_GUID) == 0) {
+ sal_log_mem_dev_err_info_t *mdei = (sal_log_mem_dev_err_info_t *)sect_header;
+ valid = mdei->valid.oem_data;
+ }
+ if (valid)
+ return sn_platform_plat_specific_err_print(sect_header, oemdata, oemdata_size);
+ else
+ return 0;
+}
+
+static int __init sn_salinfo_init(void)
+{
+ salinfo_platform_oemdata = &sn_salinfo_platform_oemdata;
+ return 0;
+}
+
+module_init(sn_salinfo_init)
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
new file mode 100644
index 000000000000..f0306b516afb
--- /dev/null
+++ b/arch/ia64/sn/kernel/setup.c
@@ -0,0 +1,621 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999,2001-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/kdev_t.h>
+#include <linux/string.h>
+#include <linux/tty.h>
+#include <linux/console.h>
+#include <linux/timex.h>
+#include <linux/sched.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/serial.h>
+#include <linux/irq.h>
+#include <linux/bootmem.h>
+#include <linux/mmzone.h>
+#include <linux/interrupt.h>
+#include <linux/acpi.h>
+#include <linux/compiler.h>
+#include <linux/sched.h>
+#include <linux/root_dev.h>
+#include <linux/nodemask.h>
+
+#include <asm/io.h>
+#include <asm/sal.h>
+#include <asm/machvec.h>
+#include <asm/system.h>
+#include <asm/processor.h>
+#include <asm/sn/arch.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/pda.h>
+#include <asm/sn/nodepda.h>
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/simulator.h>
+#include <asm/sn/leds.h>
+#include <asm/sn/bte.h>
+#include <asm/sn/shub_mmr.h>
+#include <asm/sn/clksupport.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/sn/geo.h>
+#include "xtalk/xwidgetdev.h"
+#include "xtalk/hubdev.h"
+#include <asm/sn/klconfig.h>
+
+
+DEFINE_PER_CPU(struct pda_s, pda_percpu);
+
+#define MAX_PHYS_MEMORY (1UL << 49) /* 1 TB */
+
+lboard_t *root_lboard[MAX_COMPACT_NODES];
+
+extern void bte_init_node(nodepda_t *, cnodeid_t);
+
+extern void sn_timer_init(void);
+extern unsigned long last_time_offset;
+extern void (*ia64_mark_idle) (int);
+extern void snidle(int);
+extern unsigned char acpi_kbd_controller_present;
+
+unsigned long sn_rtc_cycles_per_second;
+EXPORT_SYMBOL(sn_rtc_cycles_per_second);
+
+DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
+EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
+
+partid_t sn_partid = -1;
+EXPORT_SYMBOL(sn_partid);
+char sn_system_serial_number_string[128];
+EXPORT_SYMBOL(sn_system_serial_number_string);
+u64 sn_partition_serial_number;
+EXPORT_SYMBOL(sn_partition_serial_number);
+u8 sn_partition_id;
+EXPORT_SYMBOL(sn_partition_id);
+u8 sn_system_size;
+EXPORT_SYMBOL(sn_system_size);
+u8 sn_sharing_domain_size;
+EXPORT_SYMBOL(sn_sharing_domain_size);
+u8 sn_coherency_id;
+EXPORT_SYMBOL(sn_coherency_id);
+u8 sn_region_size;
+EXPORT_SYMBOL(sn_region_size);
+
+short physical_node_map[MAX_PHYSNODE_ID];
+
+EXPORT_SYMBOL(physical_node_map);
+
+int numionodes;
+
+static void sn_init_pdas(char **);
+static void scan_for_ionodes(void);
+
+static nodepda_t *nodepdaindr[MAX_COMPACT_NODES];
+
+/*
+ * The format of "screen_info" is strange, and due to early i386-setup
+ * code. This is just enough to make the console code think we're on a
+ * VGA color display.
+ */
+struct screen_info sn_screen_info = {
+ .orig_x = 0,
+ .orig_y = 0,
+ .orig_video_mode = 3,
+ .orig_video_cols = 80,
+ .orig_video_ega_bx = 3,
+ .orig_video_lines = 25,
+ .orig_video_isVGA = 1,
+ .orig_video_points = 16
+};
+
+/*
+ * This is here so we can use the CMOS detection in ide-probe.c to
+ * determine what drives are present. In theory, we don't need this
+ * as the auto-detection could be done via ide-probe.c:do_probe() but
+ * in practice that would be much slower, which is painful when
+ * running in the simulator. Note that passing zeroes in DRIVE_INFO
+ * is sufficient (the IDE driver will autodetect the drive geometry).
+ */
+#ifdef CONFIG_IA64_GENERIC
+extern char drive_info[4 * 16];
+#else
+char drive_info[4 * 16];
+#endif
+
+/*
+ * Get nasid of current cpu early in boot before nodepda is initialized
+ */
+static int
+boot_get_nasid(void)
+{
+ int nasid;
+
+ if (ia64_sn_get_sapic_info(get_sapicid(), &nasid, NULL, NULL))
+ BUG();
+ return nasid;
+}
+
+/*
+ * This routine can only be used during init, since
+ * smp_boot_data is an init data structure.
+ * We have to use smp_boot_data.cpu_phys_id to find
+ * the physical id of the processor because the normal
+ * cpu_physical_id() relies on data structures that
+ * may not be initialized yet.
+ */
+
+static int __init pxm_to_nasid(int pxm)
+{
+ int i;
+ int nid;
+
+ nid = pxm_to_nid_map[pxm];
+ for (i = 0; i < num_node_memblks; i++) {
+ if (node_memblk[i].nid == nid) {
+ return NASID_GET(node_memblk[i].start_paddr);
+ }
+ }
+ return -1;
+}
+
+/**
+ * early_sn_setup - early setup routine for SN platforms
+ *
+ * Sets up an initial console to aid debugging. Intended primarily
+ * for bringup. See start_kernel() in init/main.c.
+ */
+
+void __init early_sn_setup(void)
+{
+ efi_system_table_t *efi_systab;
+ efi_config_table_t *config_tables;
+ struct ia64_sal_systab *sal_systab;
+ struct ia64_sal_desc_entry_point *ep;
+ char *p;
+ int i, j;
+
+ /*
+ * Parse enough of the SAL tables to locate the SAL entry point. Since, console
+ * IO on SN2 is done via SAL calls, early_printk won't work without this.
+ *
+ * This code duplicates some of the ACPI table parsing that is in efi.c & sal.c.
+ * Any changes to those file may have to be made hereas well.
+ */
+ efi_systab = (efi_system_table_t *) __va(ia64_boot_param->efi_systab);
+ config_tables = __va(efi_systab->tables);
+ for (i = 0; i < efi_systab->nr_tables; i++) {
+ if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) ==
+ 0) {
+ sal_systab = __va(config_tables[i].table);
+ p = (char *)(sal_systab + 1);
+ for (j = 0; j < sal_systab->entry_count; j++) {
+ if (*p == SAL_DESC_ENTRY_POINT) {
+ ep = (struct ia64_sal_desc_entry_point
+ *)p;
+ ia64_sal_handler_init(__va
+ (ep->sal_proc),
+ __va(ep->gp));
+ return;
+ }
+ p += SAL_DESC_SIZE(*p);
+ }
+ }
+ }
+ /* Uh-oh, SAL not available?? */
+ printk(KERN_ERR "failed to find SAL entry point\n");
+}
+
+extern int platform_intr_list[];
+extern nasid_t master_nasid;
+static int shub_1_1_found __initdata;
+
+/*
+ * sn_check_for_wars
+ *
+ * Set flag for enabling shub specific wars
+ */
+
+static inline int __init is_shub_1_1(int nasid)
+{
+ unsigned long id;
+ int rev;
+
+ if (is_shub2())
+ return 0;
+ id = REMOTE_HUB_L(nasid, SH1_SHUB_ID);
+ rev = (id & SH1_SHUB_ID_REVISION_MASK) >> SH1_SHUB_ID_REVISION_SHFT;
+ return rev <= 2;
+}
+
+static void __init sn_check_for_wars(void)
+{
+ int cnode;
+
+ if (is_shub2()) {
+ /* none yet */
+ } else {
+ for_each_online_node(cnode) {
+ if (is_shub_1_1(cnodeid_to_nasid(cnode)))
+ sn_hub_info->shub_1_1_found = 1;
+ }
+ }
+}
+
+/**
+ * sn_setup - SN platform setup routine
+ * @cmdline_p: kernel command line
+ *
+ * Handles platform setup for SN machines. This includes determining
+ * the RTC frequency (via a SAL call), initializing secondary CPUs, and
+ * setting up per-node data areas. The console is also initialized here.
+ */
+void __init sn_setup(char **cmdline_p)
+{
+ long status, ticks_per_sec, drift;
+ int pxm;
+ int major = sn_sal_rev_major(), minor = sn_sal_rev_minor();
+ extern void sn_cpu_init(void);
+
+ /*
+ * If the generic code has enabled vga console support - lets
+ * get rid of it again. This is a kludge for the fact that ACPI
+ * currtently has no way of informing us if legacy VGA is available
+ * or not.
+ */
+#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
+ if (conswitchp == &vga_con) {
+ printk(KERN_DEBUG "SGI: Disabling VGA console\n");
+#ifdef CONFIG_DUMMY_CONSOLE
+ conswitchp = &dummy_con;
+#else
+ conswitchp = NULL;
+#endif /* CONFIG_DUMMY_CONSOLE */
+ }
+#endif /* def(CONFIG_VT) && def(CONFIG_VGA_CONSOLE) */
+
+ MAX_DMA_ADDRESS = PAGE_OFFSET + MAX_PHYS_MEMORY;
+
+ memset(physical_node_map, -1, sizeof(physical_node_map));
+ for (pxm = 0; pxm < MAX_PXM_DOMAINS; pxm++)
+ if (pxm_to_nid_map[pxm] != -1)
+ physical_node_map[pxm_to_nasid(pxm)] =
+ pxm_to_nid_map[pxm];
+
+ /*
+ * Old PROMs do not provide an ACPI FADT. Disable legacy keyboard
+ * support here so we don't have to listen to failed keyboard probe
+ * messages.
+ */
+ if ((major < 2 || (major == 2 && minor <= 9)) &&
+ acpi_kbd_controller_present) {
+ printk(KERN_INFO "Disabling legacy keyboard support as prom "
+ "is too old and doesn't provide FADT\n");
+ acpi_kbd_controller_present = 0;
+ }
+
+ printk("SGI SAL version %x.%02x\n", major, minor);
+
+ /*
+ * Confirm the SAL we're running on is recent enough...
+ */
+ if ((major < SN_SAL_MIN_MAJOR) || (major == SN_SAL_MIN_MAJOR &&
+ minor < SN_SAL_MIN_MINOR)) {
+ printk(KERN_ERR "This kernel needs SGI SAL version >= "
+ "%x.%02x\n", SN_SAL_MIN_MAJOR, SN_SAL_MIN_MINOR);
+ panic("PROM version too old\n");
+ }
+
+ master_nasid = boot_get_nasid();
+
+ status =
+ ia64_sal_freq_base(SAL_FREQ_BASE_REALTIME_CLOCK, &ticks_per_sec,
+ &drift);
+ if (status != 0 || ticks_per_sec < 100000) {
+ printk(KERN_WARNING
+ "unable to determine platform RTC clock frequency, guessing.\n");
+ /* PROM gives wrong value for clock freq. so guess */
+ sn_rtc_cycles_per_second = 1000000000000UL / 30000UL;
+ } else
+ sn_rtc_cycles_per_second = ticks_per_sec;
+
+ platform_intr_list[ACPI_INTERRUPT_CPEI] = IA64_CPE_VECTOR;
+
+ /*
+ * we set the default root device to /dev/hda
+ * to make simulation easy
+ */
+ ROOT_DEV = Root_HDA1;
+
+ /*
+ * Create the PDAs and NODEPDAs for all the cpus.
+ */
+ sn_init_pdas(cmdline_p);
+
+ ia64_mark_idle = &snidle;
+
+ /*
+ * For the bootcpu, we do this here. All other cpus will make the
+ * call as part of cpu_init in slave cpu initialization.
+ */
+ sn_cpu_init();
+
+#ifdef CONFIG_SMP
+ init_smp_config();
+#endif
+ screen_info = sn_screen_info;
+
+ sn_timer_init();
+}
+
+/**
+ * sn_init_pdas - setup node data areas
+ *
+ * One time setup for Node Data Area. Called by sn_setup().
+ */
+static void __init sn_init_pdas(char **cmdline_p)
+{
+ cnodeid_t cnode;
+
+ memset(pda->cnodeid_to_nasid_table, -1,
+ sizeof(pda->cnodeid_to_nasid_table));
+ for_each_online_node(cnode)
+ pda->cnodeid_to_nasid_table[cnode] =
+ pxm_to_nasid(nid_to_pxm_map[cnode]);
+
+ numionodes = num_online_nodes();
+ scan_for_ionodes();
+
+ /*
+ * Allocate & initalize the nodepda for each node.
+ */
+ for_each_online_node(cnode) {
+ nodepdaindr[cnode] =
+ alloc_bootmem_node(NODE_DATA(cnode), sizeof(nodepda_t));
+ memset(nodepdaindr[cnode], 0, sizeof(nodepda_t));
+ memset(nodepdaindr[cnode]->phys_cpuid, -1,
+ sizeof(nodepdaindr[cnode]->phys_cpuid));
+ }
+
+ /*
+ * Allocate & initialize nodepda for TIOs. For now, put them on node 0.
+ */
+ for (cnode = num_online_nodes(); cnode < numionodes; cnode++) {
+ nodepdaindr[cnode] =
+ alloc_bootmem_node(NODE_DATA(0), sizeof(nodepda_t));
+ memset(nodepdaindr[cnode], 0, sizeof(nodepda_t));
+ }
+
+ /*
+ * Now copy the array of nodepda pointers to each nodepda.
+ */
+ for (cnode = 0; cnode < numionodes; cnode++)
+ memcpy(nodepdaindr[cnode]->pernode_pdaindr, nodepdaindr,
+ sizeof(nodepdaindr));
+
+ /*
+ * Set up IO related platform-dependent nodepda fields.
+ * The following routine actually sets up the hubinfo struct
+ * in nodepda.
+ */
+ for_each_online_node(cnode) {
+ bte_init_node(nodepdaindr[cnode], cnode);
+ }
+
+ /*
+ * Initialize the per node hubdev. This includes IO Nodes and
+ * headless/memless nodes.
+ */
+ for (cnode = 0; cnode < numionodes; cnode++) {
+ hubdev_init_node(nodepdaindr[cnode], cnode);
+ }
+}
+
+/**
+ * sn_cpu_init - initialize per-cpu data areas
+ * @cpuid: cpuid of the caller
+ *
+ * Called during cpu initialization on each cpu as it starts.
+ * Currently, initializes the per-cpu data area for SNIA.
+ * Also sets up a few fields in the nodepda. Also known as
+ * platform_cpu_init() by the ia64 machvec code.
+ */
+void __init sn_cpu_init(void)
+{
+ int cpuid;
+ int cpuphyid;
+ int nasid;
+ int subnode;
+ int slice;
+ int cnode;
+ int i;
+ static int wars_have_been_checked;
+
+ memset(pda, 0, sizeof(pda));
+ if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2, &sn_hub_info->nasid_bitmask, &sn_hub_info->nasid_shift,
+ &sn_system_size, &sn_sharing_domain_size, &sn_partition_id,
+ &sn_coherency_id, &sn_region_size))
+ BUG();
+ sn_hub_info->as_shift = sn_hub_info->nasid_shift - 2;
+
+ /*
+ * The boot cpu makes this call again after platform initialization is
+ * complete.
+ */
+ if (nodepdaindr[0] == NULL)
+ return;
+
+ cpuid = smp_processor_id();
+ cpuphyid = get_sapicid();
+
+ if (ia64_sn_get_sapic_info(cpuphyid, &nasid, &subnode, &slice))
+ BUG();
+
+ for (i=0; i < MAX_NUMNODES; i++) {
+ if (nodepdaindr[i]) {
+ nodepdaindr[i]->phys_cpuid[cpuid].nasid = nasid;
+ nodepdaindr[i]->phys_cpuid[cpuid].slice = slice;
+ nodepdaindr[i]->phys_cpuid[cpuid].subnode = subnode;
+ }
+ }
+
+ cnode = nasid_to_cnodeid(nasid);
+
+ pda->p_nodepda = nodepdaindr[cnode];
+ pda->led_address =
+ (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT));
+ pda->led_state = LED_ALWAYS_SET;
+ pda->hb_count = HZ / 2;
+ pda->hb_state = 0;
+ pda->idle_flag = 0;
+
+ if (cpuid != 0) {
+ memcpy(pda->cnodeid_to_nasid_table,
+ pdacpu(0)->cnodeid_to_nasid_table,
+ sizeof(pda->cnodeid_to_nasid_table));
+ }
+
+ /*
+ * Check for WARs.
+ * Only needs to be done once, on BSP.
+ * Has to be done after loop above, because it uses pda.cnodeid_to_nasid_table[i].
+ * Has to be done before assignment below.
+ */
+ if (!wars_have_been_checked) {
+ sn_check_for_wars();
+ wars_have_been_checked = 1;
+ }
+ sn_hub_info->shub_1_1_found = shub_1_1_found;
+
+ /*
+ * Set up addresses of PIO/MEM write status registers.
+ */
+ {
+ u64 pio1[] = {SH1_PIO_WRITE_STATUS_0, 0, SH1_PIO_WRITE_STATUS_1, 0};
+ u64 pio2[] = {SH2_PIO_WRITE_STATUS_0, SH2_PIO_WRITE_STATUS_1,
+ SH2_PIO_WRITE_STATUS_2, SH2_PIO_WRITE_STATUS_3};
+ u64 *pio;
+ pio = is_shub1() ? pio1 : pio2;
+ pda->pio_write_status_addr = (volatile unsigned long *) LOCAL_MMR_ADDR(pio[slice]);
+ pda->pio_write_status_val = is_shub1() ? SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK : 0;
+ }
+
+ /*
+ * WAR addresses for SHUB 1.x.
+ */
+ if (local_node_data->active_cpu_count++ == 0 && is_shub1()) {
+ int buddy_nasid;
+ buddy_nasid =
+ cnodeid_to_nasid(numa_node_id() ==
+ num_online_nodes() - 1 ? 0 : numa_node_id() + 1);
+ pda->pio_shub_war_cam_addr =
+ (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid,
+ SH1_PI_CAM_CONTROL);
+ }
+}
+
+/*
+ * Scan klconfig for ionodes. Add the nasids to the
+ * physical_node_map and the pda and increment numionodes.
+ */
+
+static void __init scan_for_ionodes(void)
+{
+ int nasid = 0;
+ lboard_t *brd;
+
+ /* Setup ionodes with memory */
+ for (nasid = 0; nasid < MAX_PHYSNODE_ID; nasid += 2) {
+ char *klgraph_header;
+ cnodeid_t cnodeid;
+
+ if (physical_node_map[nasid] == -1)
+ continue;
+
+ cnodeid = -1;
+ klgraph_header = __va(ia64_sn_get_klconfig_addr(nasid));
+ if (!klgraph_header) {
+ if (IS_RUNNING_ON_SIMULATOR())
+ continue;
+ BUG(); /* All nodes must have klconfig tables! */
+ }
+ cnodeid = nasid_to_cnodeid(nasid);
+ root_lboard[cnodeid] = (lboard_t *)
+ NODE_OFFSET_TO_LBOARD((nasid),
+ ((kl_config_hdr_t
+ *) (klgraph_header))->
+ ch_board_info);
+ }
+
+ /* Scan headless/memless IO Nodes. */
+ for (nasid = 0; nasid < MAX_PHYSNODE_ID; nasid += 2) {
+ /* if there's no nasid, don't try to read the klconfig on the node */
+ if (physical_node_map[nasid] == -1)
+ continue;
+ brd = find_lboard_any((lboard_t *)
+ root_lboard[nasid_to_cnodeid(nasid)],
+ KLTYPE_SNIA);
+ if (brd) {
+ brd = KLCF_NEXT_ANY(brd); /* Skip this node's lboard */
+ if (!brd)
+ continue;
+ }
+
+ brd = find_lboard_any(brd, KLTYPE_SNIA);
+
+ while (brd) {
+ pda->cnodeid_to_nasid_table[numionodes] =
+ brd->brd_nasid;
+ physical_node_map[brd->brd_nasid] = numionodes;
+ root_lboard[numionodes] = brd;
+ numionodes++;
+ brd = KLCF_NEXT_ANY(brd);
+ if (!brd)
+ break;
+
+ brd = find_lboard_any(brd, KLTYPE_SNIA);
+ }
+ }
+
+ /* Scan for TIO nodes. */
+ for (nasid = 0; nasid < MAX_PHYSNODE_ID; nasid += 2) {
+ /* if there's no nasid, don't try to read the klconfig on the node */
+ if (physical_node_map[nasid] == -1)
+ continue;
+ brd = find_lboard_any((lboard_t *)
+ root_lboard[nasid_to_cnodeid(nasid)],
+ KLTYPE_TIO);
+ while (brd) {
+ pda->cnodeid_to_nasid_table[numionodes] =
+ brd->brd_nasid;
+ physical_node_map[brd->brd_nasid] = numionodes;
+ root_lboard[numionodes] = brd;
+ numionodes++;
+ brd = KLCF_NEXT_ANY(brd);
+ if (!brd)
+ break;
+
+ brd = find_lboard_any(brd, KLTYPE_TIO);
+ }
+ }
+
+}
+
+int
+nasid_slice_to_cpuid(int nasid, int slice)
+{
+ long cpu;
+
+ for (cpu=0; cpu < NR_CPUS; cpu++)
+ if (nodepda->phys_cpuid[cpu].nasid == nasid && nodepda->phys_cpuid[cpu].slice == slice)
+ return cpu;
+
+ return -1;
+}
diff --git a/arch/ia64/sn/kernel/sn2/Makefile b/arch/ia64/sn/kernel/sn2/Makefile
new file mode 100644
index 000000000000..170bde4549da
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/Makefile
@@ -0,0 +1,13 @@
+# arch/ia64/sn/kernel/sn2/Makefile
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1999,2001-2002 Silicon Graphics, Inc. All rights reserved.
+#
+# sn2 specific kernel files
+#
+
+obj-y += cache.o io.o ptc_deadlock.o sn2_smp.o sn_proc_fs.o \
+ prominfo_proc.o timer.o timer_interrupt.o sn_hwperf.o
diff --git a/arch/ia64/sn/kernel/sn2/cache.c b/arch/ia64/sn/kernel/sn2/cache.c
new file mode 100644
index 000000000000..bc3cfa17cd0f
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/cache.c
@@ -0,0 +1,34 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
+ *
+ */
+#include <linux/module.h>
+#include <asm/pgalloc.h>
+
+/**
+ * sn_flush_all_caches - flush a range of address from all caches (incl. L4)
+ * @flush_addr: identity mapped region 7 address to start flushing
+ * @bytes: number of bytes to flush
+ *
+ * Flush a range of addresses from all caches including L4.
+ * All addresses fully or partially contained within
+ * @flush_addr to @flush_addr + @bytes are flushed
+ * from the all caches.
+ */
+void
+sn_flush_all_caches(long flush_addr, long bytes)
+{
+ flush_icache_range(flush_addr, flush_addr+bytes);
+ /*
+ * The last call may have returned before the caches
+ * were actually flushed, so we call it again to make
+ * sure.
+ */
+ flush_icache_range(flush_addr, flush_addr+bytes);
+ mb();
+}
+EXPORT_SYMBOL(sn_flush_all_caches);
diff --git a/arch/ia64/sn/kernel/sn2/io.c b/arch/ia64/sn/kernel/sn2/io.c
new file mode 100644
index 000000000000..a12c0586de38
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/io.c
@@ -0,0 +1,101 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
+ *
+ * The generic kernel requires function pointers to these routines, so
+ * we wrap the inlines from asm/ia64/sn/sn2/io.h here.
+ */
+
+#include <asm/sn/io.h>
+
+#ifdef CONFIG_IA64_GENERIC
+
+#undef __sn_inb
+#undef __sn_inw
+#undef __sn_inl
+#undef __sn_outb
+#undef __sn_outw
+#undef __sn_outl
+#undef __sn_readb
+#undef __sn_readw
+#undef __sn_readl
+#undef __sn_readq
+#undef __sn_readb_relaxed
+#undef __sn_readw_relaxed
+#undef __sn_readl_relaxed
+#undef __sn_readq_relaxed
+
+unsigned int __sn_inb(unsigned long port)
+{
+ return ___sn_inb(port);
+}
+
+unsigned int __sn_inw(unsigned long port)
+{
+ return ___sn_inw(port);
+}
+
+unsigned int __sn_inl(unsigned long port)
+{
+ return ___sn_inl(port);
+}
+
+void __sn_outb(unsigned char val, unsigned long port)
+{
+ ___sn_outb(val, port);
+}
+
+void __sn_outw(unsigned short val, unsigned long port)
+{
+ ___sn_outw(val, port);
+}
+
+void __sn_outl(unsigned int val, unsigned long port)
+{
+ ___sn_outl(val, port);
+}
+
+unsigned char __sn_readb(void __iomem *addr)
+{
+ return ___sn_readb(addr);
+}
+
+unsigned short __sn_readw(void __iomem *addr)
+{
+ return ___sn_readw(addr);
+}
+
+unsigned int __sn_readl(void __iomem *addr)
+{
+ return ___sn_readl(addr);
+}
+
+unsigned long __sn_readq(void __iomem *addr)
+{
+ return ___sn_readq(addr);
+}
+
+unsigned char __sn_readb_relaxed(void __iomem *addr)
+{
+ return ___sn_readb_relaxed(addr);
+}
+
+unsigned short __sn_readw_relaxed(void __iomem *addr)
+{
+ return ___sn_readw_relaxed(addr);
+}
+
+unsigned int __sn_readl_relaxed(void __iomem *addr)
+{
+ return ___sn_readl_relaxed(addr);
+}
+
+unsigned long __sn_readq_relaxed(void __iomem *addr)
+{
+ return ___sn_readq_relaxed(addr);
+}
+
+#endif
diff --git a/arch/ia64/sn/kernel/sn2/prominfo_proc.c b/arch/ia64/sn/kernel/sn2/prominfo_proc.c
new file mode 100644
index 000000000000..81c63b2f8ae9
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/prominfo_proc.c
@@ -0,0 +1,279 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999,2001-2004 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * Module to export the system's Firmware Interface Tables, including
+ * PROM revision numbers and banners, in /proc
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/proc_fs.h>
+#include <linux/nodemask.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/addrs.h>
+
+MODULE_DESCRIPTION("PROM version reporting for /proc");
+MODULE_AUTHOR("Chad Talbott");
+MODULE_LICENSE("GPL");
+
+/* Standard Intel FIT entry types */
+#define FIT_ENTRY_FIT_HEADER 0x00 /* FIT header entry */
+#define FIT_ENTRY_PAL_B 0x01 /* PAL_B entry */
+/* Entries 0x02 through 0x0D reserved by Intel */
+#define FIT_ENTRY_PAL_A_PROC 0x0E /* Processor-specific PAL_A entry */
+#define FIT_ENTRY_PAL_A 0x0F /* PAL_A entry, same as... */
+#define FIT_ENTRY_PAL_A_GEN 0x0F /* ...Generic PAL_A entry */
+#define FIT_ENTRY_UNUSED 0x7F /* Unused (reserved by Intel?) */
+/* OEM-defined entries range from 0x10 to 0x7E. */
+#define FIT_ENTRY_SAL_A 0x10 /* SAL_A entry */
+#define FIT_ENTRY_SAL_B 0x11 /* SAL_B entry */
+#define FIT_ENTRY_SALRUNTIME 0x12 /* SAL runtime entry */
+#define FIT_ENTRY_EFI 0x1F /* EFI entry */
+#define FIT_ENTRY_FPSWA 0x20 /* embedded fpswa entry */
+#define FIT_ENTRY_VMLINUX 0x21 /* embedded vmlinux entry */
+
+#define FIT_MAJOR_SHIFT (32 + 8)
+#define FIT_MAJOR_MASK ((1 << 8) - 1)
+#define FIT_MINOR_SHIFT 32
+#define FIT_MINOR_MASK ((1 << 8) - 1)
+
+#define FIT_MAJOR(q) \
+ ((unsigned) ((q) >> FIT_MAJOR_SHIFT) & FIT_MAJOR_MASK)
+#define FIT_MINOR(q) \
+ ((unsigned) ((q) >> FIT_MINOR_SHIFT) & FIT_MINOR_MASK)
+
+#define FIT_TYPE_SHIFT (32 + 16)
+#define FIT_TYPE_MASK ((1 << 7) - 1)
+
+#define FIT_TYPE(q) \
+ ((unsigned) ((q) >> FIT_TYPE_SHIFT) & FIT_TYPE_MASK)
+
+struct fit_type_map_t {
+ unsigned char type;
+ const char *name;
+};
+
+static const struct fit_type_map_t fit_entry_types[] = {
+ {FIT_ENTRY_FIT_HEADER, "FIT Header"},
+ {FIT_ENTRY_PAL_A_GEN, "Generic PAL_A"},
+ {FIT_ENTRY_PAL_A_PROC, "Processor-specific PAL_A"},
+ {FIT_ENTRY_PAL_A, "PAL_A"},
+ {FIT_ENTRY_PAL_B, "PAL_B"},
+ {FIT_ENTRY_SAL_A, "SAL_A"},
+ {FIT_ENTRY_SAL_B, "SAL_B"},
+ {FIT_ENTRY_SALRUNTIME, "SAL runtime"},
+ {FIT_ENTRY_EFI, "EFI"},
+ {FIT_ENTRY_VMLINUX, "Embedded Linux"},
+ {FIT_ENTRY_FPSWA, "Embedded FPSWA"},
+ {FIT_ENTRY_UNUSED, "Unused"},
+ {0xff, "Error"},
+};
+
+static const char *fit_type_name(unsigned char type)
+{
+ struct fit_type_map_t const *mapp;
+
+ for (mapp = fit_entry_types; mapp->type != 0xff; mapp++)
+ if (type == mapp->type)
+ return mapp->name;
+
+ if ((type > FIT_ENTRY_PAL_A) && (type < FIT_ENTRY_UNUSED))
+ return "OEM type";
+ if ((type > FIT_ENTRY_PAL_B) && (type < FIT_ENTRY_PAL_A))
+ return "Reserved";
+
+ return "Unknown type";
+}
+
+static int
+get_fit_entry(unsigned long nasid, int index, unsigned long *fentry,
+ char *banner, int banlen)
+{
+ return ia64_sn_get_fit_compt(nasid, index, fentry, banner, banlen);
+}
+
+
+/*
+ * These two routines display the FIT table for each node.
+ */
+static int dump_fit_entry(char *page, unsigned long *fentry)
+{
+ unsigned type;
+
+ type = FIT_TYPE(fentry[1]);
+ return sprintf(page, "%02x %-25s %x.%02x %016lx %u\n",
+ type,
+ fit_type_name(type),
+ FIT_MAJOR(fentry[1]), FIT_MINOR(fentry[1]),
+ fentry[0],
+ /* mult by sixteen to get size in bytes */
+ (unsigned)(fentry[1] & 0xffffff) * 16);
+}
+
+
+/*
+ * We assume that the fit table will be small enough that we can print
+ * the whole thing into one page. (This is true for our default 16kB
+ * pages -- each entry is about 60 chars wide when printed.) I read
+ * somewhere that the maximum size of the FIT is 128 entries, so we're
+ * OK except for 4kB pages (and no one is going to do that on SN
+ * anyway).
+ */
+static int
+dump_fit(char *page, unsigned long nasid)
+{
+ unsigned long fentry[2];
+ int index;
+ char *p;
+
+ p = page;
+ for (index=0;;index++) {
+ BUG_ON(index * 60 > PAGE_SIZE);
+ if (get_fit_entry(nasid, index, fentry, NULL, 0))
+ break;
+ p += dump_fit_entry(p, fentry);
+ }
+
+ return p - page;
+}
+
+static int
+dump_version(char *page, unsigned long nasid)
+{
+ unsigned long fentry[2];
+ char banner[128];
+ int index;
+ int len;
+
+ for (index = 0; ; index++) {
+ if (get_fit_entry(nasid, index, fentry, banner,
+ sizeof(banner)))
+ return 0;
+ if (FIT_TYPE(fentry[1]) == FIT_ENTRY_SAL_A)
+ break;
+ }
+
+ len = sprintf(page, "%x.%02x\n", FIT_MAJOR(fentry[1]),
+ FIT_MINOR(fentry[1]));
+ page += len;
+
+ if (banner[0])
+ len += snprintf(page, PAGE_SIZE-len, "%s\n", banner);
+
+ return len;
+}
+
+/* same as in proc_misc.c */
+static int
+proc_calc_metrics(char *page, char **start, off_t off, int count, int *eof,
+ int len)
+{
+ if (len <= off + count)
+ *eof = 1;
+ *start = page + off;
+ len -= off;
+ if (len > count)
+ len = count;
+ if (len < 0)
+ len = 0;
+ return len;
+}
+
+static int
+read_version_entry(char *page, char **start, off_t off, int count, int *eof,
+ void *data)
+{
+ int len = 0;
+
+ /* data holds the NASID of the node */
+ len = dump_version(page, (unsigned long)data);
+ len = proc_calc_metrics(page, start, off, count, eof, len);
+ return len;
+}
+
+static int
+read_fit_entry(char *page, char **start, off_t off, int count, int *eof,
+ void *data)
+{
+ int len = 0;
+
+ /* data holds the NASID of the node */
+ len = dump_fit(page, (unsigned long)data);
+ len = proc_calc_metrics(page, start, off, count, eof, len);
+
+ return len;
+}
+
+/* module entry points */
+int __init prominfo_init(void);
+void __exit prominfo_exit(void);
+
+module_init(prominfo_init);
+module_exit(prominfo_exit);
+
+static struct proc_dir_entry **proc_entries;
+static struct proc_dir_entry *sgi_prominfo_entry;
+
+#define NODE_NAME_LEN 11
+
+int __init prominfo_init(void)
+{
+ struct proc_dir_entry **entp;
+ struct proc_dir_entry *p;
+ cnodeid_t cnodeid;
+ unsigned long nasid;
+ char name[NODE_NAME_LEN];
+
+ if (!ia64_platform_is("sn2"))
+ return 0;
+
+ proc_entries = kmalloc(num_online_nodes() * sizeof(struct proc_dir_entry *),
+ GFP_KERNEL);
+
+ sgi_prominfo_entry = proc_mkdir("sgi_prominfo", NULL);
+
+ entp = proc_entries;
+ for_each_online_node(cnodeid) {
+ sprintf(name, "node%d", cnodeid);
+ *entp = proc_mkdir(name, sgi_prominfo_entry);
+ nasid = cnodeid_to_nasid(cnodeid);
+ p = create_proc_read_entry(
+ "fit", 0, *entp, read_fit_entry,
+ (void *)nasid);
+ if (p)
+ p->owner = THIS_MODULE;
+ p = create_proc_read_entry(
+ "version", 0, *entp, read_version_entry,
+ (void *)nasid);
+ if (p)
+ p->owner = THIS_MODULE;
+ entp++;
+ }
+
+ return 0;
+}
+
+void __exit prominfo_exit(void)
+{
+ struct proc_dir_entry **entp;
+ unsigned cnodeid;
+ char name[NODE_NAME_LEN];
+
+ entp = proc_entries;
+ for_each_online_node(cnodeid) {
+ remove_proc_entry("fit", *entp);
+ remove_proc_entry("version", *entp);
+ sprintf(name, "node%d", cnodeid);
+ remove_proc_entry(name, sgi_prominfo_entry);
+ entp++;
+ }
+ remove_proc_entry("sgi_prominfo", NULL);
+ kfree(proc_entries);
+}
diff --git a/arch/ia64/sn/kernel/sn2/ptc_deadlock.S b/arch/ia64/sn/kernel/sn2/ptc_deadlock.S
new file mode 100644
index 000000000000..7947312801ec
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/ptc_deadlock.S
@@ -0,0 +1,82 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <asm/sn/shub_mmr.h>
+
+#define DEADLOCKBIT SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_SHFT
+#define WRITECOUNTMASK SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK
+#define ALIAS_OFFSET (SH1_PIO_WRITE_STATUS_0_ALIAS-SH1_PIO_WRITE_STATUS_0)
+
+
+ .global sn2_ptc_deadlock_recovery_core
+ .proc sn2_ptc_deadlock_recovery_core
+
+sn2_ptc_deadlock_recovery_core:
+ .regstk 6,0,0,0
+
+ ptc0 = in0
+ data0 = in1
+ ptc1 = in2
+ data1 = in3
+ piowc = in4
+ zeroval = in5
+ piowcphy = r30
+ psrsave = r2
+ scr1 = r16
+ scr2 = r17
+ mask = r18
+
+
+ extr.u piowcphy=piowc,0,61;; // Convert piowc to uncached physical address
+ dep piowcphy=-1,piowcphy,63,1
+ movl mask=WRITECOUNTMASK
+
+1:
+ add scr2=ALIAS_OFFSET,piowc // Address of WRITE_STATUS alias register
+ mov scr1=7;; // Clear DEADLOCK, WRITE_ERROR, MULTI_WRITE_ERROR
+ st8.rel [scr2]=scr1;;
+
+5: ld8.acq scr1=[piowc];; // Wait for PIOs to complete.
+ and scr2=scr1,mask;; // mask of writecount bits
+ cmp.ne p6,p0=zeroval,scr2
+(p6) br.cond.sptk 5b
+
+
+
+ ////////////// BEGIN PHYSICAL MODE ////////////////////
+ mov psrsave=psr // Disable IC (no PMIs)
+ rsm psr.i | psr.dt | psr.ic;;
+ srlz.i;;
+
+ st8.rel [ptc0]=data0 // Write PTC0 & wait for completion.
+
+5: ld8.acq scr1=[piowcphy];; // Wait for PIOs to complete.
+ and scr2=scr1,mask;; // mask of writecount bits
+ cmp.ne p6,p0=zeroval,scr2
+(p6) br.cond.sptk 5b;;
+
+ tbit.nz p8,p7=scr1,DEADLOCKBIT;;// Test for DEADLOCK
+(p7) cmp.ne p7,p0=r0,ptc1;; // Test for non-null ptc1
+
+(p7) st8.rel [ptc1]=data1;; // Now write PTC1.
+
+5: ld8.acq scr1=[piowcphy];; // Wait for PIOs to complete.
+ and scr2=scr1,mask;; // mask of writecount bits
+ cmp.ne p6,p0=zeroval,scr2
+(p6) br.cond.sptk 5b
+
+ tbit.nz p8,p0=scr1,DEADLOCKBIT;;// Test for DEADLOCK
+
+ mov psr.l=psrsave;; // Reenable IC
+ srlz.i;;
+ ////////////// END PHYSICAL MODE ////////////////////
+
+(p8) br.cond.spnt 1b;; // Repeat if DEADLOCK occurred.
+
+ br.ret.sptk rp
+ .endp sn2_ptc_deadlock_recovery_core
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c
new file mode 100644
index 000000000000..7af05a7ac743
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c
@@ -0,0 +1,295 @@
+/*
+ * SN2 Platform specific SMP Support
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/threads.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mmzone.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/nodemask.h>
+
+#include <asm/processor.h>
+#include <asm/irq.h>
+#include <asm/sal.h>
+#include <asm/system.h>
+#include <asm/delay.h>
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <asm/tlb.h>
+#include <asm/numa.h>
+#include <asm/hw_irq.h>
+#include <asm/current.h>
+#include <asm/sn/sn_cpuid.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/shub_mmr.h>
+#include <asm/sn/nodepda.h>
+#include <asm/sn/rw_mmr.h>
+
+void sn2_ptc_deadlock_recovery(volatile unsigned long *, unsigned long data0,
+ volatile unsigned long *, unsigned long data1);
+
+static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock);
+
+static unsigned long sn2_ptc_deadlock_count;
+
+static inline unsigned long wait_piowc(void)
+{
+ volatile unsigned long *piows, zeroval;
+ unsigned long ws;
+
+ piows = pda->pio_write_status_addr;
+ zeroval = pda->pio_write_status_val;
+ do {
+ cpu_relax();
+ } while (((ws = *piows) & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != zeroval);
+ return ws;
+}
+
+void sn_tlb_migrate_finish(struct mm_struct *mm)
+{
+ if (mm == current->mm)
+ flush_tlb_mm(mm);
+}
+
+/**
+ * sn2_global_tlb_purge - globally purge translation cache of virtual address range
+ * @start: start of virtual address range
+ * @end: end of virtual address range
+ * @nbits: specifies number of bytes to purge per instruction (num = 1<<(nbits & 0xfc))
+ *
+ * Purges the translation caches of all processors of the given virtual address
+ * range.
+ *
+ * Note:
+ * - cpu_vm_mask is a bit mask that indicates which cpus have loaded the context.
+ * - cpu_vm_mask is converted into a nodemask of the nodes containing the
+ * cpus in cpu_vm_mask.
+ * - if only one bit is set in cpu_vm_mask & it is the current cpu,
+ * then only the local TLB needs to be flushed. This flushing can be done
+ * using ptc.l. This is the common case & avoids the global spinlock.
+ * - if multiple cpus have loaded the context, then flushing has to be
+ * done with ptc.g/MMRs under protection of the global ptc_lock.
+ */
+
+void
+sn2_global_tlb_purge(unsigned long start, unsigned long end,
+ unsigned long nbits)
+{
+ int i, shub1, cnode, mynasid, cpu, lcpu = 0, nasid, flushed = 0;
+ volatile unsigned long *ptc0, *ptc1;
+ unsigned long flags = 0, data0 = 0, data1 = 0;
+ struct mm_struct *mm = current->active_mm;
+ short nasids[MAX_NUMNODES], nix;
+ nodemask_t nodes_flushed;
+
+ nodes_clear(nodes_flushed);
+ i = 0;
+
+ for_each_cpu_mask(cpu, mm->cpu_vm_mask) {
+ cnode = cpu_to_node(cpu);
+ node_set(cnode, nodes_flushed);
+ lcpu = cpu;
+ i++;
+ }
+
+ preempt_disable();
+
+ if (likely(i == 1 && lcpu == smp_processor_id())) {
+ do {
+ ia64_ptcl(start, nbits << 2);
+ start += (1UL << nbits);
+ } while (start < end);
+ ia64_srlz_i();
+ preempt_enable();
+ return;
+ }
+
+ if (atomic_read(&mm->mm_users) == 1) {
+ flush_tlb_mm(mm);
+ preempt_enable();
+ return;
+ }
+
+ nix = 0;
+ for_each_node_mask(cnode, nodes_flushed)
+ nasids[nix++] = cnodeid_to_nasid(cnode);
+
+ shub1 = is_shub1();
+ if (shub1) {
+ data0 = (1UL << SH1_PTC_0_A_SHFT) |
+ (nbits << SH1_PTC_0_PS_SHFT) |
+ ((ia64_get_rr(start) >> 8) << SH1_PTC_0_RID_SHFT) |
+ (1UL << SH1_PTC_0_START_SHFT);
+ ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_0);
+ ptc1 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_1);
+ } else {
+ data0 = (1UL << SH2_PTC_A_SHFT) |
+ (nbits << SH2_PTC_PS_SHFT) |
+ (1UL << SH2_PTC_START_SHFT);
+ ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH2_PTC +
+ ((ia64_get_rr(start) >> 8) << SH2_PTC_RID_SHFT) );
+ ptc1 = NULL;
+ }
+
+
+ mynasid = get_nasid();
+
+ spin_lock_irqsave(&sn2_global_ptc_lock, flags);
+
+ do {
+ if (shub1)
+ data1 = start | (1UL << SH1_PTC_1_START_SHFT);
+ else
+ data0 = (data0 & ~SH2_PTC_ADDR_MASK) | (start & SH2_PTC_ADDR_MASK);
+ for (i = 0; i < nix; i++) {
+ nasid = nasids[i];
+ if (unlikely(nasid == mynasid)) {
+ ia64_ptcga(start, nbits << 2);
+ ia64_srlz_i();
+ } else {
+ ptc0 = CHANGE_NASID(nasid, ptc0);
+ if (ptc1)
+ ptc1 = CHANGE_NASID(nasid, ptc1);
+ pio_atomic_phys_write_mmrs(ptc0, data0, ptc1,
+ data1);
+ flushed = 1;
+ }
+ }
+
+ if (flushed
+ && (wait_piowc() &
+ SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK)) {
+ sn2_ptc_deadlock_recovery(ptc0, data0, ptc1, data1);
+ }
+
+ start += (1UL << nbits);
+
+ } while (start < end);
+
+ spin_unlock_irqrestore(&sn2_global_ptc_lock, flags);
+
+ preempt_enable();
+}
+
+/*
+ * sn2_ptc_deadlock_recovery
+ *
+ * Recover from PTC deadlocks conditions. Recovery requires stepping thru each
+ * TLB flush transaction. The recovery sequence is somewhat tricky & is
+ * coded in assembly language.
+ */
+void sn2_ptc_deadlock_recovery(volatile unsigned long *ptc0, unsigned long data0,
+ volatile unsigned long *ptc1, unsigned long data1)
+{
+ extern void sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long,
+ volatile unsigned long *, unsigned long, volatile unsigned long *, unsigned long);
+ int cnode, mycnode, nasid;
+ volatile unsigned long *piows;
+ volatile unsigned long zeroval;
+
+ sn2_ptc_deadlock_count++;
+
+ piows = pda->pio_write_status_addr;
+ zeroval = pda->pio_write_status_val;
+
+ mycnode = numa_node_id();
+
+ for_each_online_node(cnode) {
+ if (is_headless_node(cnode) || cnode == mycnode)
+ continue;
+ nasid = cnodeid_to_nasid(cnode);
+ ptc0 = CHANGE_NASID(nasid, ptc0);
+ if (ptc1)
+ ptc1 = CHANGE_NASID(nasid, ptc1);
+ sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows, zeroval);
+ }
+}
+
+/**
+ * sn_send_IPI_phys - send an IPI to a Nasid and slice
+ * @nasid: nasid to receive the interrupt (may be outside partition)
+ * @physid: physical cpuid to receive the interrupt.
+ * @vector: command to send
+ * @delivery_mode: delivery mechanism
+ *
+ * Sends an IPI (interprocessor interrupt) to the processor specified by
+ * @physid
+ *
+ * @delivery_mode can be one of the following
+ *
+ * %IA64_IPI_DM_INT - pend an interrupt
+ * %IA64_IPI_DM_PMI - pend a PMI
+ * %IA64_IPI_DM_NMI - pend an NMI
+ * %IA64_IPI_DM_INIT - pend an INIT interrupt
+ */
+void sn_send_IPI_phys(int nasid, long physid, int vector, int delivery_mode)
+{
+ long val;
+ unsigned long flags = 0;
+ volatile long *p;
+
+ p = (long *)GLOBAL_MMR_PHYS_ADDR(nasid, SH_IPI_INT);
+ val = (1UL << SH_IPI_INT_SEND_SHFT) |
+ (physid << SH_IPI_INT_PID_SHFT) |
+ ((long)delivery_mode << SH_IPI_INT_TYPE_SHFT) |
+ ((long)vector << SH_IPI_INT_IDX_SHFT) |
+ (0x000feeUL << SH_IPI_INT_BASE_SHFT);
+
+ mb();
+ if (enable_shub_wars_1_1()) {
+ spin_lock_irqsave(&sn2_global_ptc_lock, flags);
+ }
+ pio_phys_write_mmr(p, val);
+ if (enable_shub_wars_1_1()) {
+ wait_piowc();
+ spin_unlock_irqrestore(&sn2_global_ptc_lock, flags);
+ }
+
+}
+
+EXPORT_SYMBOL(sn_send_IPI_phys);
+
+/**
+ * sn2_send_IPI - send an IPI to a processor
+ * @cpuid: target of the IPI
+ * @vector: command to send
+ * @delivery_mode: delivery mechanism
+ * @redirect: redirect the IPI?
+ *
+ * Sends an IPI (InterProcessor Interrupt) to the processor specified by
+ * @cpuid. @vector specifies the command to send, while @delivery_mode can
+ * be one of the following
+ *
+ * %IA64_IPI_DM_INT - pend an interrupt
+ * %IA64_IPI_DM_PMI - pend a PMI
+ * %IA64_IPI_DM_NMI - pend an NMI
+ * %IA64_IPI_DM_INIT - pend an INIT interrupt
+ */
+void sn2_send_IPI(int cpuid, int vector, int delivery_mode, int redirect)
+{
+ long physid;
+ int nasid;
+
+ physid = cpu_physical_id(cpuid);
+ nasid = cpuid_to_nasid(cpuid);
+
+ /* the following is used only when starting cpus at boot time */
+ if (unlikely(nasid == -1))
+ ia64_sn_get_sapic_info(physid, &nasid, NULL, NULL);
+
+ sn_send_IPI_phys(nasid, physid, vector, delivery_mode);
+}
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
new file mode 100644
index 000000000000..197356460ee1
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -0,0 +1,690 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2005 Silicon Graphics, Inc. All rights reserved.
+ *
+ * SGI Altix topology and hardware performance monitoring API.
+ * Mark Goodwin <markgw@sgi.com>.
+ *
+ * Creates /proc/sgi_sn/sn_topology (read-only) to export
+ * info about Altix nodes, routers, CPUs and NumaLink
+ * interconnection/topology.
+ *
+ * Also creates a dynamic misc device named "sn_hwperf"
+ * that supports an ioctl interface to call down into SAL
+ * to discover hw objects, topology and to read/write
+ * memory mapped registers, e.g. for performance monitoring.
+ * The "sn_hwperf" device is registered only after the procfs
+ * file is first opened, i.e. only if/when it's needed.
+ *
+ * This API is used by SGI Performance Co-Pilot and other
+ * tools, see http://oss.sgi.com/projects/pcp
+ */
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/seq_file.h>
+#include <linux/miscdevice.h>
+#include <linux/cpumask.h>
+#include <linux/smp_lock.h>
+#include <linux/nodemask.h>
+#include <asm/processor.h>
+#include <asm/topology.h>
+#include <asm/smp.h>
+#include <asm/semaphore.h>
+#include <asm/segment.h>
+#include <asm/uaccess.h>
+#include <asm/sal.h>
+#include <asm/sn/io.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/sn/module.h>
+#include <asm/sn/geo.h>
+#include <asm/sn/sn2/sn_hwperf.h>
+
+static void *sn_hwperf_salheap = NULL;
+static int sn_hwperf_obj_cnt = 0;
+static nasid_t sn_hwperf_master_nasid = INVALID_NASID;
+static int sn_hwperf_init(void);
+static DECLARE_MUTEX(sn_hwperf_init_mutex);
+
+static int sn_hwperf_enum_objects(int *nobj, struct sn_hwperf_object_info **ret)
+{
+ int e;
+ u64 sz;
+ struct sn_hwperf_object_info *objbuf = NULL;
+
+ if ((e = sn_hwperf_init()) < 0) {
+ printk("sn_hwperf_init failed: err %d\n", e);
+ goto out;
+ }
+
+ sz = sn_hwperf_obj_cnt * sizeof(struct sn_hwperf_object_info);
+ if ((objbuf = (struct sn_hwperf_object_info *) vmalloc(sz)) == NULL) {
+ printk("sn_hwperf_enum_objects: vmalloc(%d) failed\n", (int)sz);
+ e = -ENOMEM;
+ goto out;
+ }
+
+ e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, SN_HWPERF_ENUM_OBJECTS,
+ 0, sz, (u64) objbuf, 0, 0, NULL);
+ if (e != SN_HWPERF_OP_OK) {
+ e = -EINVAL;
+ vfree(objbuf);
+ }
+
+out:
+ *nobj = sn_hwperf_obj_cnt;
+ *ret = objbuf;
+ return e;
+}
+
+static int sn_hwperf_geoid_to_cnode(char *location)
+{
+ int cnode;
+ geoid_t geoid;
+ moduleid_t module_id;
+ char type;
+ int rack, slot, slab;
+ int this_rack, this_slot, this_slab;
+
+ if (sscanf(location, "%03d%c%02d#%d", &rack, &type, &slot, &slab) != 4)
+ return -1;
+
+ for (cnode = 0; cnode < numionodes; cnode++) {
+ geoid = cnodeid_get_geoid(cnode);
+ module_id = geo_module(geoid);
+ this_rack = MODULE_GET_RACK(module_id);
+ this_slot = MODULE_GET_BPOS(module_id);
+ this_slab = geo_slab(geoid);
+ if (rack == this_rack && slot == this_slot && slab == this_slab)
+ break;
+ }
+
+ return cnode < numionodes ? cnode : -1;
+}
+
+static int sn_hwperf_obj_to_cnode(struct sn_hwperf_object_info * obj)
+{
+ if (!obj->sn_hwp_this_part)
+ return -1;
+ return sn_hwperf_geoid_to_cnode(obj->location);
+}
+
+static int sn_hwperf_generic_ordinal(struct sn_hwperf_object_info *obj,
+ struct sn_hwperf_object_info *objs)
+{
+ int ordinal;
+ struct sn_hwperf_object_info *p;
+
+ for (ordinal=0, p=objs; p != obj; p++) {
+ if (SN_HWPERF_FOREIGN(p))
+ continue;
+ if (SN_HWPERF_SAME_OBJTYPE(p, obj))
+ ordinal++;
+ }
+
+ return ordinal;
+}
+
+static const char *slabname_node = "node"; /* SHub asic */
+static const char *slabname_ionode = "ionode"; /* TIO asic */
+static const char *slabname_router = "router"; /* NL3R or NL4R */
+static const char *slabname_other = "other"; /* unknown asic */
+
+static const char *sn_hwperf_get_slabname(struct sn_hwperf_object_info *obj,
+ struct sn_hwperf_object_info *objs, int *ordinal)
+{
+ int isnode;
+ const char *slabname = slabname_other;
+
+ if ((isnode = SN_HWPERF_IS_NODE(obj)) || SN_HWPERF_IS_IONODE(obj)) {
+ slabname = isnode ? slabname_node : slabname_ionode;
+ *ordinal = sn_hwperf_obj_to_cnode(obj);
+ }
+ else {
+ *ordinal = sn_hwperf_generic_ordinal(obj, objs);
+ if (SN_HWPERF_IS_ROUTER(obj))
+ slabname = slabname_router;
+ }
+
+ return slabname;
+}
+
+static int sn_topology_show(struct seq_file *s, void *d)
+{
+ int sz;
+ int pt;
+ int e;
+ int i;
+ int j;
+ const char *slabname;
+ int ordinal;
+ cpumask_t cpumask;
+ char slice;
+ struct cpuinfo_ia64 *c;
+ struct sn_hwperf_port_info *ptdata;
+ struct sn_hwperf_object_info *p;
+ struct sn_hwperf_object_info *obj = d; /* this object */
+ struct sn_hwperf_object_info *objs = s->private; /* all objects */
+
+ if (obj == objs) {
+ seq_printf(s, "# sn_topology version 1\n");
+ seq_printf(s, "# objtype ordinal location partition"
+ " [attribute value [, ...]]\n");
+ }
+
+ if (SN_HWPERF_FOREIGN(obj)) {
+ /* private in another partition: not interesting */
+ return 0;
+ }
+
+ for (i = 0; obj->name[i]; i++) {
+ if (obj->name[i] == ' ')
+ obj->name[i] = '_';
+ }
+
+ slabname = sn_hwperf_get_slabname(obj, objs, &ordinal);
+ seq_printf(s, "%s %d %s %s asic %s", slabname, ordinal, obj->location,
+ obj->sn_hwp_this_part ? "local" : "shared", obj->name);
+
+ if (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj))
+ seq_putc(s, '\n');
+ else {
+ seq_printf(s, ", nasid 0x%x", cnodeid_to_nasid(ordinal));
+ for (i=0; i < numionodes; i++) {
+ seq_printf(s, i ? ":%d" : ", dist %d",
+ node_distance(ordinal, i));
+ }
+ seq_putc(s, '\n');
+
+ /*
+ * CPUs on this node, if any
+ */
+ cpumask = node_to_cpumask(ordinal);
+ for_each_online_cpu(i) {
+ if (cpu_isset(i, cpumask)) {
+ slice = 'a' + cpuid_to_slice(i);
+ c = cpu_data(i);
+ seq_printf(s, "cpu %d %s%c local"
+ " freq %luMHz, arch ia64",
+ i, obj->location, slice,
+ c->proc_freq / 1000000);
+ for_each_online_cpu(j) {
+ seq_printf(s, j ? ":%d" : ", dist %d",
+ node_distance(
+ cpuid_to_cnodeid(i),
+ cpuid_to_cnodeid(j)));
+ }
+ seq_putc(s, '\n');
+ }
+ }
+ }
+
+ if (obj->ports) {
+ /*
+ * numalink ports
+ */
+ sz = obj->ports * sizeof(struct sn_hwperf_port_info);
+ if ((ptdata = vmalloc(sz)) == NULL)
+ return -ENOMEM;
+ e = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
+ SN_HWPERF_ENUM_PORTS, obj->id, sz,
+ (u64) ptdata, 0, 0, NULL);
+ if (e != SN_HWPERF_OP_OK)
+ return -EINVAL;
+ for (ordinal=0, p=objs; p != obj; p++) {
+ if (!SN_HWPERF_FOREIGN(p))
+ ordinal += p->ports;
+ }
+ for (pt = 0; pt < obj->ports; pt++) {
+ for (p = objs, i = 0; i < sn_hwperf_obj_cnt; i++, p++) {
+ if (ptdata[pt].conn_id == p->id) {
+ break;
+ }
+ }
+ seq_printf(s, "numalink %d %s-%d",
+ ordinal+pt, obj->location, ptdata[pt].port);
+
+ if (i >= sn_hwperf_obj_cnt) {
+ /* no connection */
+ seq_puts(s, " local endpoint disconnected"
+ ", protocol unknown\n");
+ continue;
+ }
+
+ if (obj->sn_hwp_this_part && p->sn_hwp_this_part)
+ /* both ends local to this partition */
+ seq_puts(s, " local");
+ else if (!obj->sn_hwp_this_part && !p->sn_hwp_this_part)
+ /* both ends of the link in foreign partiton */
+ seq_puts(s, " foreign");
+ else
+ /* link straddles a partition */
+ seq_puts(s, " shared");
+
+ /*
+ * Unlikely, but strictly should query the LLP config
+ * registers because an NL4R can be configured to run
+ * NL3 protocol, even when not talking to an NL3 router.
+ * Ditto for node-node.
+ */
+ seq_printf(s, " endpoint %s-%d, protocol %s\n",
+ p->location, ptdata[pt].conn_port,
+ (SN_HWPERF_IS_NL3ROUTER(obj) ||
+ SN_HWPERF_IS_NL3ROUTER(p)) ? "LLP3" : "LLP4");
+ }
+ vfree(ptdata);
+ }
+
+ return 0;
+}
+
+static void *sn_topology_start(struct seq_file *s, loff_t * pos)
+{
+ struct sn_hwperf_object_info *objs = s->private;
+
+ if (*pos < sn_hwperf_obj_cnt)
+ return (void *)(objs + *pos);
+
+ return NULL;
+}
+
+static void *sn_topology_next(struct seq_file *s, void *v, loff_t * pos)
+{
+ ++*pos;
+ return sn_topology_start(s, pos);
+}
+
+static void sn_topology_stop(struct seq_file *m, void *v)
+{
+ return;
+}
+
+/*
+ * /proc/sgi_sn/sn_topology, read-only using seq_file
+ */
+static struct seq_operations sn_topology_seq_ops = {
+ .start = sn_topology_start,
+ .next = sn_topology_next,
+ .stop = sn_topology_stop,
+ .show = sn_topology_show
+};
+
+struct sn_hwperf_op_info {
+ u64 op;
+ struct sn_hwperf_ioctl_args *a;
+ void *p;
+ int *v0;
+ int ret;
+};
+
+static void sn_hwperf_call_sal(void *info)
+{
+ struct sn_hwperf_op_info *op_info = info;
+ int r;
+
+ r = ia64_sn_hwperf_op(sn_hwperf_master_nasid, op_info->op,
+ op_info->a->arg, op_info->a->sz,
+ (u64) op_info->p, 0, 0, op_info->v0);
+ op_info->ret = r;
+}
+
+static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info)
+{
+ u32 cpu;
+ u32 use_ipi;
+ int r = 0;
+ cpumask_t save_allowed;
+
+ cpu = (op_info->a->arg & SN_HWPERF_ARG_CPU_MASK) >> 32;
+ use_ipi = op_info->a->arg & SN_HWPERF_ARG_USE_IPI_MASK;
+ op_info->a->arg &= SN_HWPERF_ARG_OBJID_MASK;
+
+ if (cpu != SN_HWPERF_ARG_ANY_CPU) {
+ if (cpu >= num_online_cpus() || !cpu_online(cpu)) {
+ r = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (cpu == SN_HWPERF_ARG_ANY_CPU || cpu == get_cpu()) {
+ /* don't care, or already on correct cpu */
+ sn_hwperf_call_sal(op_info);
+ }
+ else {
+ if (use_ipi) {
+ /* use an interprocessor interrupt to call SAL */
+ smp_call_function_single(cpu, sn_hwperf_call_sal,
+ op_info, 1, 1);
+ }
+ else {
+ /* migrate the task before calling SAL */
+ save_allowed = current->cpus_allowed;
+ set_cpus_allowed(current, cpumask_of_cpu(cpu));
+ sn_hwperf_call_sal(op_info);
+ set_cpus_allowed(current, save_allowed);
+ }
+ }
+ r = op_info->ret;
+
+out:
+ return r;
+}
+
+/* map SAL hwperf error code to system error code */
+static int sn_hwperf_map_err(int hwperf_err)
+{
+ int e;
+
+ switch(hwperf_err) {
+ case SN_HWPERF_OP_OK:
+ e = 0;
+ break;
+
+ case SN_HWPERF_OP_NOMEM:
+ e = -ENOMEM;
+ break;
+
+ case SN_HWPERF_OP_NO_PERM:
+ e = -EPERM;
+ break;
+
+ case SN_HWPERF_OP_IO_ERROR:
+ e = -EIO;
+ break;
+
+ case SN_HWPERF_OP_BUSY:
+ case SN_HWPERF_OP_RECONFIGURE:
+ e = -EAGAIN;
+ break;
+
+ case SN_HWPERF_OP_INVAL:
+ default:
+ e = -EINVAL;
+ break;
+ }
+
+ return e;
+}
+
+/*
+ * ioctl for "sn_hwperf" misc device
+ */
+static int
+sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg)
+{
+ struct sn_hwperf_ioctl_args a;
+ struct cpuinfo_ia64 *cdata;
+ struct sn_hwperf_object_info *objs;
+ struct sn_hwperf_object_info *cpuobj;
+ struct sn_hwperf_op_info op_info;
+ void *p = NULL;
+ int nobj;
+ char slice;
+ int node;
+ int r;
+ int v0;
+ int i;
+ int j;
+
+ unlock_kernel();
+
+ /* only user requests are allowed here */
+ if ((op & SN_HWPERF_OP_MASK) < 10) {
+ r = -EINVAL;
+ goto error;
+ }
+ r = copy_from_user(&a, (const void __user *)arg,
+ sizeof(struct sn_hwperf_ioctl_args));
+ if (r != 0) {
+ r = -EFAULT;
+ goto error;
+ }
+
+ /*
+ * Allocate memory to hold a kernel copy of the user buffer. The
+ * buffer contents are either copied in or out (or both) of user
+ * space depending on the flags encoded in the requested operation.
+ */
+ if (a.ptr) {
+ p = vmalloc(a.sz);
+ if (!p) {
+ r = -ENOMEM;
+ goto error;
+ }
+ }
+
+ if (op & SN_HWPERF_OP_MEM_COPYIN) {
+ r = copy_from_user(p, (const void __user *)a.ptr, a.sz);
+ if (r != 0) {
+ r = -EFAULT;
+ goto error;
+ }
+ }
+
+ switch (op) {
+ case SN_HWPERF_GET_CPU_INFO:
+ if (a.sz == sizeof(u64)) {
+ /* special case to get size needed */
+ *(u64 *) p = (u64) num_online_cpus() *
+ sizeof(struct sn_hwperf_object_info);
+ } else
+ if (a.sz < num_online_cpus() * sizeof(struct sn_hwperf_object_info)) {
+ r = -ENOMEM;
+ goto error;
+ } else
+ if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) {
+ memset(p, 0, a.sz);
+ for (i = 0; i < nobj; i++) {
+ node = sn_hwperf_obj_to_cnode(objs + i);
+ for_each_online_cpu(j) {
+ if (node != cpu_to_node(j))
+ continue;
+ cpuobj = (struct sn_hwperf_object_info *) p + j;
+ slice = 'a' + cpuid_to_slice(j);
+ cdata = cpu_data(j);
+ cpuobj->id = j;
+ snprintf(cpuobj->name,
+ sizeof(cpuobj->name),
+ "CPU %luMHz %s",
+ cdata->proc_freq / 1000000,
+ cdata->vendor);
+ snprintf(cpuobj->location,
+ sizeof(cpuobj->location),
+ "%s%c", objs[i].location,
+ slice);
+ }
+ }
+
+ vfree(objs);
+ }
+ break;
+
+ case SN_HWPERF_GET_NODE_NASID:
+ if (a.sz != sizeof(u64) ||
+ (node = a.arg) < 0 || node >= numionodes) {
+ r = -EINVAL;
+ goto error;
+ }
+ *(u64 *)p = (u64)cnodeid_to_nasid(node);
+ break;
+
+ case SN_HWPERF_GET_OBJ_NODE:
+ if (a.sz != sizeof(u64) || a.arg < 0) {
+ r = -EINVAL;
+ goto error;
+ }
+ if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) {
+ if (a.arg >= nobj) {
+ r = -EINVAL;
+ vfree(objs);
+ goto error;
+ }
+ if (objs[(i = a.arg)].id != a.arg) {
+ for (i = 0; i < nobj; i++) {
+ if (objs[i].id == a.arg)
+ break;
+ }
+ }
+ if (i == nobj) {
+ r = -EINVAL;
+ vfree(objs);
+ goto error;
+ }
+ *(u64 *)p = (u64)sn_hwperf_obj_to_cnode(objs + i);
+ vfree(objs);
+ }
+ break;
+
+ case SN_HWPERF_GET_MMRS:
+ case SN_HWPERF_SET_MMRS:
+ case SN_HWPERF_OBJECT_DISTANCE:
+ op_info.p = p;
+ op_info.a = &a;
+ op_info.v0 = &v0;
+ op_info.op = op;
+ r = sn_hwperf_op_cpu(&op_info);
+ if (r) {
+ r = sn_hwperf_map_err(r);
+ goto error;
+ }
+ break;
+
+ default:
+ /* all other ops are a direct SAL call */
+ r = ia64_sn_hwperf_op(sn_hwperf_master_nasid, op,
+ a.arg, a.sz, (u64) p, 0, 0, &v0);
+ if (r) {
+ r = sn_hwperf_map_err(r);
+ goto error;
+ }
+ a.v0 = v0;
+ break;
+ }
+
+ if (op & SN_HWPERF_OP_MEM_COPYOUT) {
+ r = copy_to_user((void __user *)a.ptr, p, a.sz);
+ if (r != 0) {
+ r = -EFAULT;
+ goto error;
+ }
+ }
+
+error:
+ vfree(p);
+
+ lock_kernel();
+ return r;
+}
+
+static struct file_operations sn_hwperf_fops = {
+ .ioctl = sn_hwperf_ioctl,
+};
+
+static struct miscdevice sn_hwperf_dev = {
+ MISC_DYNAMIC_MINOR,
+ "sn_hwperf",
+ &sn_hwperf_fops
+};
+
+static int sn_hwperf_init(void)
+{
+ u64 v;
+ int salr;
+ int e = 0;
+
+ /* single threaded, once-only initialization */
+ down(&sn_hwperf_init_mutex);
+ if (sn_hwperf_salheap) {
+ up(&sn_hwperf_init_mutex);
+ return e;
+ }
+
+ /*
+ * The PROM code needs a fixed reference node. For convenience the
+ * same node as the console I/O is used.
+ */
+ sn_hwperf_master_nasid = (nasid_t) ia64_sn_get_console_nasid();
+
+ /*
+ * Request the needed size and install the PROM scratch area.
+ * The PROM keeps various tracking bits in this memory area.
+ */
+ salr = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
+ (u64) SN_HWPERF_GET_HEAPSIZE, 0,
+ (u64) sizeof(u64), (u64) &v, 0, 0, NULL);
+ if (salr != SN_HWPERF_OP_OK) {
+ e = -EINVAL;
+ goto out;
+ }
+
+ if ((sn_hwperf_salheap = vmalloc(v)) == NULL) {
+ e = -ENOMEM;
+ goto out;
+ }
+ salr = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
+ SN_HWPERF_INSTALL_HEAP, 0, v,
+ (u64) sn_hwperf_salheap, 0, 0, NULL);
+ if (salr != SN_HWPERF_OP_OK) {
+ e = -EINVAL;
+ goto out;
+ }
+
+ salr = ia64_sn_hwperf_op(sn_hwperf_master_nasid,
+ SN_HWPERF_OBJECT_COUNT, 0,
+ sizeof(u64), (u64) &v, 0, 0, NULL);
+ if (salr != SN_HWPERF_OP_OK) {
+ e = -EINVAL;
+ goto out;
+ }
+ sn_hwperf_obj_cnt = (int)v;
+
+out:
+ if (e < 0 && sn_hwperf_salheap) {
+ vfree(sn_hwperf_salheap);
+ sn_hwperf_salheap = NULL;
+ sn_hwperf_obj_cnt = 0;
+ }
+
+ if (!e) {
+ /*
+ * Register a dynamic misc device for ioctl. Platforms
+ * supporting hotplug will create /dev/sn_hwperf, else
+ * user can to look up the minor number in /proc/misc.
+ */
+ if ((e = misc_register(&sn_hwperf_dev)) != 0) {
+ printk(KERN_ERR "sn_hwperf_init: misc register "
+ "for \"sn_hwperf\" failed, err %d\n", e);
+ }
+ }
+
+ up(&sn_hwperf_init_mutex);
+ return e;
+}
+
+int sn_topology_open(struct inode *inode, struct file *file)
+{
+ int e;
+ struct seq_file *seq;
+ struct sn_hwperf_object_info *objbuf;
+ int nobj;
+
+ if ((e = sn_hwperf_enum_objects(&nobj, &objbuf)) == 0) {
+ e = seq_open(file, &sn_topology_seq_ops);
+ seq = file->private_data;
+ seq->private = objbuf;
+ }
+
+ return e;
+}
+
+int sn_topology_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq = file->private_data;
+
+ vfree(seq->private);
+ return seq_release(inode, file);
+}
diff --git a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
new file mode 100644
index 000000000000..6a80fca807b9
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
@@ -0,0 +1,149 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+#include <linux/config.h>
+#include <asm/uaccess.h>
+
+#ifdef CONFIG_PROC_FS
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <asm/sn/sn_sal.h>
+
+static int partition_id_show(struct seq_file *s, void *p)
+{
+ seq_printf(s, "%d\n", sn_local_partid());
+ return 0;
+}
+
+static int partition_id_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, partition_id_show, NULL);
+}
+
+static int system_serial_number_show(struct seq_file *s, void *p)
+{
+ seq_printf(s, "%s\n", sn_system_serial_number());
+ return 0;
+}
+
+static int system_serial_number_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, system_serial_number_show, NULL);
+}
+
+static int licenseID_show(struct seq_file *s, void *p)
+{
+ seq_printf(s, "0x%lx\n", sn_partition_serial_number_val());
+ return 0;
+}
+
+static int licenseID_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, licenseID_show, NULL);
+}
+
+/*
+ * Enable forced interrupt by default.
+ * When set, the sn interrupt handler writes the force interrupt register on
+ * the bridge chip. The hardware will then send an interrupt message if the
+ * interrupt line is active. This mimics a level sensitive interrupt.
+ */
+int sn_force_interrupt_flag = 1;
+
+static int sn_force_interrupt_show(struct seq_file *s, void *p)
+{
+ seq_printf(s, "Force interrupt is %s\n",
+ sn_force_interrupt_flag ? "enabled" : "disabled");
+ return 0;
+}
+
+static ssize_t sn_force_interrupt_write_proc(struct file *file,
+ const char __user *buffer, size_t count, loff_t *data)
+{
+ char val;
+
+ if (copy_from_user(&val, buffer, 1))
+ return -EFAULT;
+
+ sn_force_interrupt_flag = (val == '0') ? 0 : 1;
+ return count;
+}
+
+static int sn_force_interrupt_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, sn_force_interrupt_show, NULL);
+}
+
+static int coherence_id_show(struct seq_file *s, void *p)
+{
+ seq_printf(s, "%d\n", partition_coherence_id());
+
+ return 0;
+}
+
+static int coherence_id_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, coherence_id_show, NULL);
+}
+
+static struct proc_dir_entry *sn_procfs_create_entry(
+ const char *name, struct proc_dir_entry *parent,
+ int (*openfunc)(struct inode *, struct file *),
+ int (*releasefunc)(struct inode *, struct file *))
+{
+ struct proc_dir_entry *e = create_proc_entry(name, 0444, parent);
+
+ if (e) {
+ e->proc_fops = (struct file_operations *)kmalloc(
+ sizeof(struct file_operations), GFP_KERNEL);
+ if (e->proc_fops) {
+ memset(e->proc_fops, 0, sizeof(struct file_operations));
+ e->proc_fops->open = openfunc;
+ e->proc_fops->read = seq_read;
+ e->proc_fops->llseek = seq_lseek;
+ e->proc_fops->release = releasefunc;
+ }
+ }
+
+ return e;
+}
+
+/* /proc/sgi_sn/sn_topology uses seq_file, see sn_hwperf.c */
+extern int sn_topology_open(struct inode *, struct file *);
+extern int sn_topology_release(struct inode *, struct file *);
+
+void register_sn_procfs(void)
+{
+ static struct proc_dir_entry *sgi_proc_dir = NULL;
+ struct proc_dir_entry *e;
+
+ BUG_ON(sgi_proc_dir != NULL);
+ if (!(sgi_proc_dir = proc_mkdir("sgi_sn", NULL)))
+ return;
+
+ sn_procfs_create_entry("partition_id", sgi_proc_dir,
+ partition_id_open, single_release);
+
+ sn_procfs_create_entry("system_serial_number", sgi_proc_dir,
+ system_serial_number_open, single_release);
+
+ sn_procfs_create_entry("licenseID", sgi_proc_dir,
+ licenseID_open, single_release);
+
+ e = sn_procfs_create_entry("sn_force_interrupt", sgi_proc_dir,
+ sn_force_interrupt_open, single_release);
+ if (e)
+ e->proc_fops->write = sn_force_interrupt_write_proc;
+
+ sn_procfs_create_entry("coherence_id", sgi_proc_dir,
+ coherence_id_open, single_release);
+
+ sn_procfs_create_entry("sn_topology", sgi_proc_dir,
+ sn_topology_open, sn_topology_release);
+}
+
+#endif /* CONFIG_PROC_FS */
diff --git a/arch/ia64/sn/kernel/sn2/timer.c b/arch/ia64/sn/kernel/sn2/timer.c
new file mode 100644
index 000000000000..deb9baf4d473
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/timer.c
@@ -0,0 +1,36 @@
+/*
+ * linux/arch/ia64/sn/kernel/sn2/timer.c
+ *
+ * Copyright (C) 2003 Silicon Graphics, Inc.
+ * Copyright (C) 2003 Hewlett-Packard Co
+ * David Mosberger <davidm@hpl.hp.com>: updated for new timer-interpolation infrastructure
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+
+#include <asm/hw_irq.h>
+#include <asm/system.h>
+
+#include <asm/sn/leds.h>
+#include <asm/sn/shub_mmr.h>
+#include <asm/sn/clksupport.h>
+
+extern unsigned long sn_rtc_cycles_per_second;
+
+static struct time_interpolator sn2_interpolator = {
+ .drift = -1,
+ .shift = 10,
+ .mask = (1LL << 55) - 1,
+ .source = TIME_SOURCE_MMIO64
+};
+
+void __init sn_timer_init(void)
+{
+ sn2_interpolator.frequency = sn_rtc_cycles_per_second;
+ sn2_interpolator.addr = RTC_COUNTER_ADDR;
+ register_time_interpolator(&sn2_interpolator);
+}
diff --git a/arch/ia64/sn/kernel/sn2/timer_interrupt.c b/arch/ia64/sn/kernel/sn2/timer_interrupt.c
new file mode 100644
index 000000000000..cde7375390b0
--- /dev/null
+++ b/arch/ia64/sn/kernel/sn2/timer_interrupt.c
@@ -0,0 +1,63 @@
+/*
+ *
+ *
+ * Copyright (c) 2003 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like. Any license provided herein, whether implied or
+ * otherwise, applies only to this software file. Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA 94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/NoticeExplan
+ */
+
+#include <linux/interrupt.h>
+#include <asm/sn/pda.h>
+#include <asm/sn/leds.h>
+
+extern void sn_lb_int_war_check(void);
+extern irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+
+#define SN_LB_INT_WAR_INTERVAL 100
+
+void sn_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ /* LED blinking */
+ if (!pda->hb_count--) {
+ pda->hb_count = HZ / 2;
+ set_led_bits(pda->hb_state ^=
+ LED_CPU_HEARTBEAT, LED_CPU_HEARTBEAT);
+ }
+
+ if (enable_shub_wars_1_1()) {
+ /* Bugfix code for SHUB 1.1 */
+ if (pda->pio_shub_war_cam_addr)
+ *pda->pio_shub_war_cam_addr = 0x8000000000000010UL;
+ }
+ if (pda->sn_lb_int_war_ticks == 0)
+ sn_lb_int_war_check();
+ pda->sn_lb_int_war_ticks++;
+ if (pda->sn_lb_int_war_ticks >= SN_LB_INT_WAR_INTERVAL)
+ pda->sn_lb_int_war_ticks = 0;
+}
diff --git a/arch/ia64/sn/pci/Makefile b/arch/ia64/sn/pci/Makefile
new file mode 100644
index 000000000000..b5dca0097a8e
--- /dev/null
+++ b/arch/ia64/sn/pci/Makefile
@@ -0,0 +1,10 @@
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
+#
+# Makefile for the sn pci general routines.
+
+obj-y := pci_dma.o pcibr/
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
new file mode 100644
index 000000000000..f680824f819d
--- /dev/null
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -0,0 +1,363 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000,2002-2005 Silicon Graphics, Inc. All rights reserved.
+ *
+ * Routines for PCI DMA mapping. See Documentation/DMA-API.txt for
+ * a description of how these routines should be used.
+ */
+
+#include <linux/module.h>
+#include <asm/dma.h>
+#include <asm/sn/sn_sal.h>
+#include "pci/pcibus_provider_defs.h"
+#include "pci/pcidev.h"
+#include "pci/pcibr_provider.h"
+
+#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
+#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
+
+/**
+ * sn_dma_supported - test a DMA mask
+ * @dev: device to test
+ * @mask: DMA mask to test
+ *
+ * Return whether the given PCI device DMA address mask can be supported
+ * properly. For example, if your device can only drive the low 24-bits
+ * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
+ * this function. Of course, SN only supports devices that have 32 or more
+ * address bits when using the PMU.
+ */
+int sn_dma_supported(struct device *dev, u64 mask)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ if (mask < 0x7fffffff)
+ return 0;
+ return 1;
+}
+EXPORT_SYMBOL(sn_dma_supported);
+
+/**
+ * sn_dma_set_mask - set the DMA mask
+ * @dev: device to set
+ * @dma_mask: new mask
+ *
+ * Set @dev's DMA mask if the hw supports it.
+ */
+int sn_dma_set_mask(struct device *dev, u64 dma_mask)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ if (!sn_dma_supported(dev, dma_mask))
+ return 0;
+
+ *dev->dma_mask = dma_mask;
+ return 1;
+}
+EXPORT_SYMBOL(sn_dma_set_mask);
+
+/**
+ * sn_dma_alloc_coherent - allocate memory for coherent DMA
+ * @dev: device to allocate for
+ * @size: size of the region
+ * @dma_handle: DMA (bus) address
+ * @flags: memory allocation flags
+ *
+ * dma_alloc_coherent() returns a pointer to a memory region suitable for
+ * coherent DMA traffic to/from a PCI device. On SN platforms, this means
+ * that @dma_handle will have the %PCIIO_DMA_CMD flag set.
+ *
+ * This interface is usually used for "command" streams (e.g. the command
+ * queue for a SCSI controller). See Documentation/DMA-API.txt for
+ * more information.
+ */
+void *sn_dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t * dma_handle, int flags)
+{
+ void *cpuaddr;
+ unsigned long phys_addr;
+ struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev));
+
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ /*
+ * Allocate the memory.
+ * FIXME: We should be doing alloc_pages_node for the node closest
+ * to the PCI device.
+ */
+ if (!(cpuaddr = (void *)__get_free_pages(GFP_ATOMIC, get_order(size))))
+ return NULL;
+
+ memset(cpuaddr, 0x0, size);
+
+ /* physical addr. of the memory we just got */
+ phys_addr = __pa(cpuaddr);
+
+ /*
+ * 64 bit address translations should never fail.
+ * 32 bit translations can fail if there are insufficient mapping
+ * resources.
+ */
+
+ *dma_handle = pcibr_dma_map(pcidev_info, phys_addr, size,
+ SN_PCIDMA_CONSISTENT);
+ if (!*dma_handle) {
+ printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
+ free_pages((unsigned long)cpuaddr, get_order(size));
+ return NULL;
+ }
+
+ return cpuaddr;
+}
+EXPORT_SYMBOL(sn_dma_alloc_coherent);
+
+/**
+ * sn_pci_free_coherent - free memory associated with coherent DMAable region
+ * @dev: device to free for
+ * @size: size to free
+ * @cpu_addr: kernel virtual address to free
+ * @dma_handle: DMA address associated with this region
+ *
+ * Frees the memory allocated by dma_alloc_coherent(), potentially unmapping
+ * any associated IOMMU mappings.
+ */
+void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle)
+{
+ struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev));
+
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ pcibr_dma_unmap(pcidev_info, dma_handle, 0);
+ free_pages((unsigned long)cpu_addr, get_order(size));
+}
+EXPORT_SYMBOL(sn_dma_free_coherent);
+
+/**
+ * sn_dma_map_single - map a single page for DMA
+ * @dev: device to map for
+ * @cpu_addr: kernel virtual address of the region to map
+ * @size: size of the region
+ * @direction: DMA direction
+ *
+ * Map the region pointed to by @cpu_addr for DMA and return the
+ * DMA address.
+ *
+ * We map this to the one step pcibr_dmamap_trans interface rather than
+ * the two step pcibr_dmamap_alloc/pcibr_dmamap_addr because we have
+ * no way of saving the dmamap handle from the alloc to later free
+ * (which is pretty much unacceptable).
+ *
+ * TODO: simplify our interface;
+ * figure out how to save dmamap handle so can use two step.
+ */
+dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size,
+ int direction)
+{
+ dma_addr_t dma_addr;
+ unsigned long phys_addr;
+ struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev));
+
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ phys_addr = __pa(cpu_addr);
+ dma_addr = pcibr_dma_map(pcidev_info, phys_addr, size, 0);
+ if (!dma_addr) {
+ printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
+ return 0;
+ }
+ return dma_addr;
+}
+EXPORT_SYMBOL(sn_dma_map_single);
+
+/**
+ * sn_dma_unmap_single - unamp a DMA mapped page
+ * @dev: device to sync
+ * @dma_addr: DMA address to sync
+ * @size: size of region
+ * @direction: DMA direction
+ *
+ * This routine is supposed to sync the DMA region specified
+ * by @dma_handle into the coherence domain. On SN, we're always cache
+ * coherent, so we just need to free any ATEs associated with this mapping.
+ */
+void sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+ int direction)
+{
+ struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev));
+
+ BUG_ON(dev->bus != &pci_bus_type);
+ pcibr_dma_unmap(pcidev_info, dma_addr, direction);
+}
+EXPORT_SYMBOL(sn_dma_unmap_single);
+
+/**
+ * sn_dma_unmap_sg - unmap a DMA scatterlist
+ * @dev: device to unmap
+ * @sg: scatterlist to unmap
+ * @nhwentries: number of scatterlist entries
+ * @direction: DMA direction
+ *
+ * Unmap a set of streaming mode DMA translations.
+ */
+void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nhwentries, int direction)
+{
+ int i;
+ struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev));
+
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ for (i = 0; i < nhwentries; i++, sg++) {
+ pcibr_dma_unmap(pcidev_info, sg->dma_address, direction);
+ sg->dma_address = (dma_addr_t) NULL;
+ sg->dma_length = 0;
+ }
+}
+EXPORT_SYMBOL(sn_dma_unmap_sg);
+
+/**
+ * sn_dma_map_sg - map a scatterlist for DMA
+ * @dev: device to map for
+ * @sg: scatterlist to map
+ * @nhwentries: number of entries
+ * @direction: direction of the DMA transaction
+ *
+ * Maps each entry of @sg for DMA.
+ */
+int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
+ int direction)
+{
+ unsigned long phys_addr;
+ struct scatterlist *saved_sg = sg;
+ struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev));
+ int i;
+
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ /*
+ * Setup a DMA address for each entry in the scatterlist.
+ */
+ for (i = 0; i < nhwentries; i++, sg++) {
+ phys_addr = SG_ENT_PHYS_ADDRESS(sg);
+ sg->dma_address = pcibr_dma_map(pcidev_info, phys_addr,
+ sg->length, 0);
+
+ if (!sg->dma_address) {
+ printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
+
+ /*
+ * Free any successfully allocated entries.
+ */
+ if (i > 0)
+ sn_dma_unmap_sg(dev, saved_sg, i, direction);
+ return 0;
+ }
+
+ sg->dma_length = sg->length;
+ }
+
+ return nhwentries;
+}
+EXPORT_SYMBOL(sn_dma_map_sg);
+
+void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ size_t size, int direction)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+}
+EXPORT_SYMBOL(sn_dma_sync_single_for_cpu);
+
+void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+ size_t size, int direction)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+}
+EXPORT_SYMBOL(sn_dma_sync_single_for_device);
+
+void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nelems, int direction)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+}
+EXPORT_SYMBOL(sn_dma_sync_sg_for_cpu);
+
+void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nelems, int direction)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+}
+EXPORT_SYMBOL(sn_dma_sync_sg_for_device);
+
+int sn_dma_mapping_error(dma_addr_t dma_addr)
+{
+ return 0;
+}
+EXPORT_SYMBOL(sn_dma_mapping_error);
+
+char *sn_pci_get_legacy_mem(struct pci_bus *bus)
+{
+ if (!SN_PCIBUS_BUSSOFT(bus))
+ return ERR_PTR(-ENODEV);
+
+ return (char *)(SN_PCIBUS_BUSSOFT(bus)->bs_legacy_mem | __IA64_UNCACHED_OFFSET);
+}
+
+int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
+{
+ unsigned long addr;
+ int ret;
+
+ if (!SN_PCIBUS_BUSSOFT(bus))
+ return -ENODEV;
+
+ addr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET;
+ addr += port;
+
+ ret = ia64_sn_probe_mem(addr, (long)size, (void *)val);
+
+ if (ret == 2)
+ return -EINVAL;
+
+ if (ret == 1)
+ *val = -1;
+
+ return size;
+}
+
+int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
+{
+ int ret = size;
+ unsigned long paddr;
+ unsigned long *addr;
+
+ if (!SN_PCIBUS_BUSSOFT(bus)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* Put the phys addr in uncached space */
+ paddr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET;
+ paddr += port;
+ addr = (unsigned long *)paddr;
+
+ switch (size) {
+ case 1:
+ *(volatile u8 *)(addr) = (u8)(val);
+ break;
+ case 2:
+ *(volatile u16 *)(addr) = (u16)(val);
+ break;
+ case 4:
+ *(volatile u32 *)(addr) = (u32)(val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ out:
+ return ret;
+}
diff --git a/arch/ia64/sn/pci/pcibr/Makefile b/arch/ia64/sn/pci/pcibr/Makefile
new file mode 100644
index 000000000000..1850c4a94c41
--- /dev/null
+++ b/arch/ia64/sn/pci/pcibr/Makefile
@@ -0,0 +1,11 @@
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2002-2004 Silicon Graphics, Inc. All Rights Reserved.
+#
+# Makefile for the sn2 io routines.
+
+obj-y += pcibr_dma.o pcibr_reg.o \
+ pcibr_ate.o pcibr_provider.o
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_ate.c b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
new file mode 100644
index 000000000000..9d6854666f9b
--- /dev/null
+++ b/arch/ia64/sn/pci/pcibr/pcibr_ate.c
@@ -0,0 +1,188 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <asm/sn/sn_sal.h>
+#include "pci/pcibus_provider_defs.h"
+#include "pci/pcidev.h"
+#include "pci/pcibr_provider.h"
+
+int pcibr_invalidate_ate = 0; /* by default don't invalidate ATE on free */
+
+/*
+ * mark_ate: Mark the ate as either free or inuse.
+ */
+static void mark_ate(struct ate_resource *ate_resource, int start, int number,
+ uint64_t value)
+{
+
+ uint64_t *ate = ate_resource->ate;
+ int index;
+ int length = 0;
+
+ for (index = start; length < number; index++, length++)
+ ate[index] = value;
+
+}
+
+/*
+ * find_free_ate: Find the first free ate index starting from the given
+ * index for the desired consequtive count.
+ */
+static int find_free_ate(struct ate_resource *ate_resource, int start,
+ int count)
+{
+
+ uint64_t *ate = ate_resource->ate;
+ int index;
+ int start_free;
+
+ for (index = start; index < ate_resource->num_ate;) {
+ if (!ate[index]) {
+ int i;
+ int free;
+ free = 0;
+ start_free = index; /* Found start free ate */
+ for (i = start_free; i < ate_resource->num_ate; i++) {
+ if (!ate[i]) { /* This is free */
+ if (++free == count)
+ return start_free;
+ } else {
+ index = i + 1;
+ break;
+ }
+ }
+ } else
+ index++; /* Try next ate */
+ }
+
+ return -1;
+}
+
+/*
+ * free_ate_resource: Free the requested number of ATEs.
+ */
+static inline void free_ate_resource(struct ate_resource *ate_resource,
+ int start)
+{
+
+ mark_ate(ate_resource, start, ate_resource->ate[start], 0);
+ if ((ate_resource->lowest_free_index > start) ||
+ (ate_resource->lowest_free_index < 0))
+ ate_resource->lowest_free_index = start;
+
+}
+
+/*
+ * alloc_ate_resource: Allocate the requested number of ATEs.
+ */
+static inline int alloc_ate_resource(struct ate_resource *ate_resource,
+ int ate_needed)
+{
+
+ int start_index;
+
+ /*
+ * Check for ate exhaustion.
+ */
+ if (ate_resource->lowest_free_index < 0)
+ return -1;
+
+ /*
+ * Find the required number of free consequtive ates.
+ */
+ start_index =
+ find_free_ate(ate_resource, ate_resource->lowest_free_index,
+ ate_needed);
+ if (start_index >= 0)
+ mark_ate(ate_resource, start_index, ate_needed, ate_needed);
+
+ ate_resource->lowest_free_index =
+ find_free_ate(ate_resource, ate_resource->lowest_free_index, 1);
+
+ return start_index;
+}
+
+/*
+ * Allocate "count" contiguous Bridge Address Translation Entries
+ * on the specified bridge to be used for PCI to XTALK mappings.
+ * Indices in rm map range from 1..num_entries. Indicies returned
+ * to caller range from 0..num_entries-1.
+ *
+ * Return the start index on success, -1 on failure.
+ */
+int pcibr_ate_alloc(struct pcibus_info *pcibus_info, int count)
+{
+ int status = 0;
+ uint64_t flag;
+
+ flag = pcibr_lock(pcibus_info);
+ status = alloc_ate_resource(&pcibus_info->pbi_int_ate_resource, count);
+
+ if (status < 0) {
+ /* Failed to allocate */
+ pcibr_unlock(pcibus_info, flag);
+ return -1;
+ }
+
+ pcibr_unlock(pcibus_info, flag);
+
+ return status;
+}
+
+/*
+ * Setup an Address Translation Entry as specified. Use either the Bridge
+ * internal maps or the external map RAM, as appropriate.
+ */
+static inline uint64_t *pcibr_ate_addr(struct pcibus_info *pcibus_info,
+ int ate_index)
+{
+ if (ate_index < pcibus_info->pbi_int_ate_size) {
+ return pcireg_int_ate_addr(pcibus_info, ate_index);
+ }
+ panic("pcibr_ate_addr: invalid ate_index 0x%x", ate_index);
+}
+
+/*
+ * Update the ate.
+ */
+void inline
+ate_write(struct pcibus_info *pcibus_info, int ate_index, int count,
+ volatile uint64_t ate)
+{
+ while (count-- > 0) {
+ if (ate_index < pcibus_info->pbi_int_ate_size) {
+ pcireg_int_ate_set(pcibus_info, ate_index, ate);
+ } else {
+ panic("ate_write: invalid ate_index 0x%x", ate_index);
+ }
+ ate_index++;
+ ate += IOPGSIZE;
+ }
+
+ pcireg_tflush_get(pcibus_info); /* wait until Bridge PIO complete */
+}
+
+void pcibr_ate_free(struct pcibus_info *pcibus_info, int index)
+{
+
+ volatile uint64_t ate;
+ int count;
+ uint64_t flags;
+
+ if (pcibr_invalidate_ate) {
+ /* For debugging purposes, clear the valid bit in the ATE */
+ ate = *pcibr_ate_addr(pcibus_info, index);
+ count = pcibus_info->pbi_int_ate_resource.ate[index];
+ ate_write(pcibus_info, index, count, (ate & ~PCI32_ATE_V));
+ }
+
+ flags = pcibr_lock(pcibus_info);
+ free_ate_resource(&pcibus_info->pbi_int_ate_resource, index);
+ pcibr_unlock(pcibus_info, flags);
+}
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
new file mode 100644
index 000000000000..b1d66ac065c8
--- /dev/null
+++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
@@ -0,0 +1,379 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/sn/geo.h>
+#include "xtalk/xwidgetdev.h"
+#include "xtalk/hubdev.h"
+#include "pci/pcibus_provider_defs.h"
+#include "pci/pcidev.h"
+#include "pci/tiocp.h"
+#include "pci/pic.h"
+#include "pci/pcibr_provider.h"
+#include "pci/tiocp.h"
+#include "tio.h"
+#include <asm/sn/addrs.h>
+
+extern int sn_ioif_inited;
+
+/* =====================================================================
+ * DMA MANAGEMENT
+ *
+ * The Bridge ASIC provides three methods of doing DMA: via a "direct map"
+ * register available in 32-bit PCI space (which selects a contiguous 2G
+ * address space on some other widget), via "direct" addressing via 64-bit
+ * PCI space (all destination information comes from the PCI address,
+ * including transfer attributes), and via a "mapped" region that allows
+ * a bunch of different small mappings to be established with the PMU.
+ *
+ * For efficiency, we most prefer to use the 32bit direct mapping facility,
+ * since it requires no resource allocations. The advantage of using the
+ * PMU over the 64-bit direct is that single-cycle PCI addressing can be
+ * used; the advantage of using 64-bit direct over PMU addressing is that
+ * we do not have to allocate entries in the PMU.
+ */
+
+static uint64_t
+pcibr_dmamap_ate32(struct pcidev_info *info,
+ uint64_t paddr, size_t req_size, uint64_t flags)
+{
+
+ struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
+ struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
+ pdi_pcibus_info;
+ uint8_t internal_device = (PCI_SLOT(pcidev_info->pdi_host_pcidev_info->
+ pdi_linux_pcidev->devfn)) - 1;
+ int ate_count;
+ int ate_index;
+ uint64_t ate_flags = flags | PCI32_ATE_V;
+ uint64_t ate;
+ uint64_t pci_addr;
+ uint64_t xio_addr;
+ uint64_t offset;
+
+ /* PIC in PCI-X mode does not supports 32bit PageMap mode */
+ if (IS_PIC_SOFT(pcibus_info) && IS_PCIX(pcibus_info)) {
+ return 0;
+ }
+
+ /* Calculate the number of ATEs needed. */
+ if (!(MINIMAL_ATE_FLAG(paddr, req_size))) {
+ ate_count = IOPG((IOPGSIZE - 1) /* worst case start offset */
+ +req_size /* max mapping bytes */
+ - 1) + 1; /* round UP */
+ } else { /* assume requested target is page aligned */
+ ate_count = IOPG(req_size /* max mapping bytes */
+ - 1) + 1; /* round UP */
+ }
+
+ /* Get the number of ATEs required. */
+ ate_index = pcibr_ate_alloc(pcibus_info, ate_count);
+ if (ate_index < 0)
+ return 0;
+
+ /* In PCI-X mode, Prefetch not supported */
+ if (IS_PCIX(pcibus_info))
+ ate_flags &= ~(PCI32_ATE_PREF);
+
+ xio_addr =
+ IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
+ PHYS_TO_TIODMA(paddr);
+ offset = IOPGOFF(xio_addr);
+ ate = ate_flags | (xio_addr - offset);
+
+ /* If PIC, put the targetid in the ATE */
+ if (IS_PIC_SOFT(pcibus_info)) {
+ ate |= (pcibus_info->pbi_hub_xid << PIC_ATE_TARGETID_SHFT);
+ }
+ ate_write(pcibus_info, ate_index, ate_count, ate);
+
+ /*
+ * Set up the DMA mapped Address.
+ */
+ pci_addr = PCI32_MAPPED_BASE + offset + IOPGSIZE * ate_index;
+
+ /*
+ * If swap was set in device in pcibr_endian_set()
+ * we need to turn swapping on.
+ */
+ if (pcibus_info->pbi_devreg[internal_device] & PCIBR_DEV_SWAP_DIR)
+ ATE_SWAP_ON(pci_addr);
+
+ return pci_addr;
+}
+
+static uint64_t
+pcibr_dmatrans_direct64(struct pcidev_info * info, uint64_t paddr,
+ uint64_t dma_attributes)
+{
+ struct pcibus_info *pcibus_info = (struct pcibus_info *)
+ ((info->pdi_host_pcidev_info)->pdi_pcibus_info);
+ uint64_t pci_addr;
+
+ /* Translate to Crosstalk View of Physical Address */
+ pci_addr = (IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
+ PHYS_TO_TIODMA(paddr)) | dma_attributes;
+
+ /* Handle Bus mode */
+ if (IS_PCIX(pcibus_info))
+ pci_addr &= ~PCI64_ATTR_PREF;
+
+ /* Handle Bridge Chipset differences */
+ if (IS_PIC_SOFT(pcibus_info)) {
+ pci_addr |=
+ ((uint64_t) pcibus_info->
+ pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT);
+ } else
+ pci_addr |= TIOCP_PCI64_CMDTYPE_MEM;
+
+ /* If PCI mode, func zero uses VCHAN0, every other func uses VCHAN1 */
+ if (!IS_PCIX(pcibus_info) && PCI_FUNC(info->pdi_linux_pcidev->devfn))
+ pci_addr |= PCI64_ATTR_VIRTUAL;
+
+ return pci_addr;
+
+}
+
+static uint64_t
+pcibr_dmatrans_direct32(struct pcidev_info * info,
+ uint64_t paddr, size_t req_size, uint64_t flags)
+{
+
+ struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
+ struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
+ pdi_pcibus_info;
+ uint64_t xio_addr;
+
+ uint64_t xio_base;
+ uint64_t offset;
+ uint64_t endoff;
+
+ if (IS_PCIX(pcibus_info)) {
+ return 0;
+ }
+
+ xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
+ PHYS_TO_TIODMA(paddr);
+
+ xio_base = pcibus_info->pbi_dir_xbase;
+ offset = xio_addr - xio_base;
+ endoff = req_size + offset;
+ if ((req_size > (1ULL << 31)) || /* Too Big */
+ (xio_addr < xio_base) || /* Out of range for mappings */
+ (endoff > (1ULL << 31))) { /* Too Big */
+ return 0;
+ }
+
+ return PCI32_DIRECT_BASE | offset;
+
+}
+
+/*
+ * Wrapper routine for free'ing DMA maps
+ * DMA mappings for Direct 64 and 32 do not have any DMA maps.
+ */
+void
+pcibr_dma_unmap(struct pcidev_info *pcidev_info, dma_addr_t dma_handle,
+ int direction)
+{
+ struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
+ pdi_pcibus_info;
+
+ if (IS_PCI32_MAPPED(dma_handle)) {
+ int ate_index;
+
+ ate_index =
+ IOPG((ATE_SWAP_OFF(dma_handle) - PCI32_MAPPED_BASE));
+ pcibr_ate_free(pcibus_info, ate_index);
+ }
+}
+
+/*
+ * On SN systems there is a race condition between a PIO read response and
+ * DMA's. In rare cases, the read response may beat the DMA, causing the
+ * driver to think that data in memory is complete and meaningful. This code
+ * eliminates that race. This routine is called by the PIO read routines
+ * after doing the read. For PIC this routine then forces a fake interrupt
+ * on another line, which is logically associated with the slot that the PIO
+ * is addressed to. It then spins while watching the memory location that
+ * the interrupt is targetted to. When the interrupt response arrives, we
+ * are sure that the DMA has landed in memory and it is safe for the driver
+ * to proceed. For TIOCP use the Device(x) Write Request Buffer Flush
+ * Bridge register since it ensures the data has entered the coherence domain,
+ * unlike the PIC Device(x) Write Request Buffer Flush register.
+ */
+
+void sn_dma_flush(uint64_t addr)
+{
+ nasid_t nasid;
+ int is_tio;
+ int wid_num;
+ int i, j;
+ int bwin;
+ uint64_t flags;
+ struct hubdev_info *hubinfo;
+ volatile struct sn_flush_device_list *p;
+ struct sn_flush_nasid_entry *flush_nasid_list;
+
+ if (!sn_ioif_inited)
+ return;
+
+ nasid = NASID_GET(addr);
+ if (-1 == nasid_to_cnodeid(nasid))
+ return;
+
+ hubinfo = (NODEPDA(nasid_to_cnodeid(nasid)))->pdinfo;
+
+ if (!hubinfo) {
+ BUG();
+ }
+ is_tio = (nasid & 1);
+ if (is_tio) {
+ wid_num = TIO_SWIN_WIDGETNUM(addr);
+ bwin = TIO_BWIN_WINDOWNUM(addr);
+ } else {
+ wid_num = SWIN_WIDGETNUM(addr);
+ bwin = BWIN_WINDOWNUM(addr);
+ }
+
+ flush_nasid_list = &hubinfo->hdi_flush_nasid_list;
+ if (flush_nasid_list->widget_p == NULL)
+ return;
+ if (bwin > 0) {
+ uint64_t itte = flush_nasid_list->iio_itte[bwin];
+
+ if (is_tio) {
+ wid_num = (itte >> TIO_ITTE_WIDGET_SHIFT) &
+ TIO_ITTE_WIDGET_MASK;
+ } else {
+ wid_num = (itte >> IIO_ITTE_WIDGET_SHIFT) &
+ IIO_ITTE_WIDGET_MASK;
+ }
+ }
+ if (flush_nasid_list->widget_p == NULL)
+ return;
+ if (flush_nasid_list->widget_p[wid_num] == NULL)
+ return;
+ p = &flush_nasid_list->widget_p[wid_num][0];
+
+ /* find a matching BAR */
+ for (i = 0; i < DEV_PER_WIDGET; i++) {
+ for (j = 0; j < PCI_ROM_RESOURCE; j++) {
+ if (p->sfdl_bar_list[j].start == 0)
+ break;
+ if (addr >= p->sfdl_bar_list[j].start
+ && addr <= p->sfdl_bar_list[j].end)
+ break;
+ }
+ if (j < PCI_ROM_RESOURCE && p->sfdl_bar_list[j].start != 0)
+ break;
+ p++;
+ }
+
+ /* if no matching BAR, return without doing anything. */
+ if (i == DEV_PER_WIDGET)
+ return;
+
+ /*
+ * For TIOCP use the Device(x) Write Request Buffer Flush Bridge
+ * register since it ensures the data has entered the coherence
+ * domain, unlike PIC
+ */
+ if (is_tio) {
+ uint32_t tio_id = REMOTE_HUB_L(nasid, TIO_NODE_ID);
+ uint32_t revnum = XWIDGET_PART_REV_NUM(tio_id);
+
+ /* TIOCP BRINGUP WAR (PV907516): Don't write buffer flush reg */
+ if ((1 << XWIDGET_PART_REV_NUM_REV(revnum)) & PV907516) {
+ return;
+ } else {
+ pcireg_wrb_flush_get(p->sfdl_pcibus_info,
+ (p->sfdl_slot - 1));
+ }
+ } else {
+ spin_lock_irqsave(&((struct sn_flush_device_list *)p)->
+ sfdl_flush_lock, flags);
+
+ p->sfdl_flush_value = 0;
+
+ /* force an interrupt. */
+ *(volatile uint32_t *)(p->sfdl_force_int_addr) = 1;
+
+ /* wait for the interrupt to come back. */
+ while (*(p->sfdl_flush_addr) != 0x10f) ;
+
+ /* okay, everything is synched up. */
+ spin_unlock_irqrestore((spinlock_t *)&p->sfdl_flush_lock, flags);
+ }
+ return;
+}
+
+/*
+ * Wrapper DMA interface. Called from pci_dma.c routines.
+ */
+
+uint64_t
+pcibr_dma_map(struct pcidev_info * pcidev_info, unsigned long phys_addr,
+ size_t size, unsigned int flags)
+{
+ dma_addr_t dma_handle;
+ struct pci_dev *pcidev = pcidev_info->pdi_linux_pcidev;
+
+ if (flags & SN_PCIDMA_CONSISTENT) {
+ /* sn_pci_alloc_consistent interfaces */
+ if (pcidev->dev.coherent_dma_mask == ~0UL) {
+ dma_handle =
+ pcibr_dmatrans_direct64(pcidev_info, phys_addr,
+ PCI64_ATTR_BAR);
+ } else {
+ dma_handle =
+ (dma_addr_t) pcibr_dmamap_ate32(pcidev_info,
+ phys_addr, size,
+ PCI32_ATE_BAR);
+ }
+ } else {
+ /* map_sg/map_single interfaces */
+
+ /* SN cannot support DMA addresses smaller than 32 bits. */
+ if (pcidev->dma_mask < 0x7fffffff) {
+ return 0;
+ }
+
+ if (pcidev->dma_mask == ~0UL) {
+ /*
+ * Handle the most common case: 64 bit cards. This
+ * call should always succeed.
+ */
+
+ dma_handle =
+ pcibr_dmatrans_direct64(pcidev_info, phys_addr,
+ PCI64_ATTR_PREF);
+ } else {
+ /* Handle 32-63 bit cards via direct mapping */
+ dma_handle =
+ pcibr_dmatrans_direct32(pcidev_info, phys_addr,
+ size, 0);
+ if (!dma_handle) {
+ /*
+ * It is a 32 bit card and we cannot do direct mapping,
+ * so we use an ATE.
+ */
+
+ dma_handle =
+ pcibr_dmamap_ate32(pcidev_info, phys_addr,
+ size, PCI32_ATE_PREF);
+ }
+ }
+ }
+
+ return dma_handle;
+}
+
+EXPORT_SYMBOL(sn_dma_flush);
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
new file mode 100644
index 000000000000..92bd278cf7ff
--- /dev/null
+++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
@@ -0,0 +1,170 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <asm/sn/sn_sal.h>
+#include "xtalk/xwidgetdev.h"
+#include <asm/sn/geo.h>
+#include "xtalk/hubdev.h"
+#include "pci/pcibus_provider_defs.h"
+#include "pci/pcidev.h"
+#include "pci/pcibr_provider.h"
+#include <asm/sn/addrs.h>
+
+
+static int sal_pcibr_error_interrupt(struct pcibus_info *soft)
+{
+ struct ia64_sal_retval ret_stuff;
+ uint64_t busnum;
+ int segment;
+ ret_stuff.status = 0;
+ ret_stuff.v0 = 0;
+
+ segment = 0;
+ busnum = soft->pbi_buscommon.bs_persist_busnum;
+ SAL_CALL_NOLOCK(ret_stuff,
+ (u64) SN_SAL_IOIF_ERROR_INTERRUPT,
+ (u64) segment, (u64) busnum, 0, 0, 0, 0, 0);
+
+ return (int)ret_stuff.v0;
+}
+
+/*
+ * PCI Bridge Error interrupt handler. Gets invoked whenever a PCI
+ * bridge sends an error interrupt.
+ */
+static irqreturn_t
+pcibr_error_intr_handler(int irq, void *arg, struct pt_regs *regs)
+{
+ struct pcibus_info *soft = (struct pcibus_info *)arg;
+
+ if (sal_pcibr_error_interrupt(soft) < 0) {
+ panic("pcibr_error_intr_handler(): Fatal Bridge Error");
+ }
+ return IRQ_HANDLED;
+}
+
+void *
+pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft)
+{
+ int nasid, cnode, j;
+ struct hubdev_info *hubdev_info;
+ struct pcibus_info *soft;
+ struct sn_flush_device_list *sn_flush_device_list;
+
+ if (! IS_PCI_BRIDGE_ASIC(prom_bussoft->bs_asic_type)) {
+ return NULL;
+ }
+
+ /*
+ * Allocate kernel bus soft and copy from prom.
+ */
+
+ soft = kmalloc(sizeof(struct pcibus_info), GFP_KERNEL);
+ if (!soft) {
+ return NULL;
+ }
+
+ memcpy(soft, prom_bussoft, sizeof(struct pcibus_info));
+ soft->pbi_buscommon.bs_base =
+ (((u64) soft->pbi_buscommon.
+ bs_base << 4) >> 4) | __IA64_UNCACHED_OFFSET;
+
+ spin_lock_init(&soft->pbi_lock);
+
+ /*
+ * register the bridge's error interrupt handler
+ */
+ if (request_irq(SGI_PCIBR_ERROR, (void *)pcibr_error_intr_handler,
+ SA_SHIRQ, "PCIBR error", (void *)(soft))) {
+ printk(KERN_WARNING
+ "pcibr cannot allocate interrupt for error handler\n");
+ }
+
+ /*
+ * Update the Bridge with the "kernel" pagesize
+ */
+ if (PAGE_SIZE < 16384) {
+ pcireg_control_bit_clr(soft, PCIBR_CTRL_PAGE_SIZE);
+ } else {
+ pcireg_control_bit_set(soft, PCIBR_CTRL_PAGE_SIZE);
+ }
+
+ nasid = NASID_GET(soft->pbi_buscommon.bs_base);
+ cnode = nasid_to_cnodeid(nasid);
+ hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
+
+ if (hubdev_info->hdi_flush_nasid_list.widget_p) {
+ sn_flush_device_list = hubdev_info->hdi_flush_nasid_list.
+ widget_p[(int)soft->pbi_buscommon.bs_xid];
+ if (sn_flush_device_list) {
+ for (j = 0; j < DEV_PER_WIDGET;
+ j++, sn_flush_device_list++) {
+ if (sn_flush_device_list->sfdl_slot == -1)
+ continue;
+ if (sn_flush_device_list->
+ sfdl_persistent_busnum ==
+ soft->pbi_buscommon.bs_persist_busnum)
+ sn_flush_device_list->sfdl_pcibus_info =
+ soft;
+ }
+ }
+ }
+
+ /* Setup the PMU ATE map */
+ soft->pbi_int_ate_resource.lowest_free_index = 0;
+ soft->pbi_int_ate_resource.ate =
+ kmalloc(soft->pbi_int_ate_size * sizeof(uint64_t), GFP_KERNEL);
+ memset(soft->pbi_int_ate_resource.ate, 0,
+ (soft->pbi_int_ate_size * sizeof(uint64_t)));
+
+ return soft;
+}
+
+void pcibr_force_interrupt(struct sn_irq_info *sn_irq_info)
+{
+ struct pcidev_info *pcidev_info;
+ struct pcibus_info *pcibus_info;
+ int bit = sn_irq_info->irq_int_bit;
+
+ pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
+ if (pcidev_info) {
+ pcibus_info =
+ (struct pcibus_info *)pcidev_info->pdi_host_pcidev_info->
+ pdi_pcibus_info;
+ pcireg_force_intr_set(pcibus_info, bit);
+ }
+}
+
+void pcibr_change_devices_irq(struct sn_irq_info *sn_irq_info)
+{
+ struct pcidev_info *pcidev_info;
+ struct pcibus_info *pcibus_info;
+ int bit = sn_irq_info->irq_int_bit;
+ uint64_t xtalk_addr = sn_irq_info->irq_xtalkaddr;
+
+ pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
+ if (pcidev_info) {
+ pcibus_info =
+ (struct pcibus_info *)pcidev_info->pdi_host_pcidev_info->
+ pdi_pcibus_info;
+
+ /* Disable the device's IRQ */
+ pcireg_intr_enable_bit_clr(pcibus_info, bit);
+
+ /* Change the device's IRQ */
+ pcireg_intr_addr_addr_set(pcibus_info, bit, xtalk_addr);
+
+ /* Re-enable the device's IRQ */
+ pcireg_intr_enable_bit_set(pcibus_info, bit);
+
+ pcibr_force_interrupt(sn_irq_info);
+ }
+}
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_reg.c b/arch/ia64/sn/pci/pcibr/pcibr_reg.c
new file mode 100644
index 000000000000..74a74a7d2a13
--- /dev/null
+++ b/arch/ia64/sn/pci/pcibr/pcibr_reg.c
@@ -0,0 +1,282 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include "pci/pcibus_provider_defs.h"
+#include "pci/pcidev.h"
+#include "pci/tiocp.h"
+#include "pci/pic.h"
+#include "pci/pcibr_provider.h"
+
+union br_ptr {
+ struct tiocp tio;
+ struct pic pic;
+};
+
+/*
+ * Control Register Access -- Read/Write 0000_0020
+ */
+void pcireg_control_bit_clr(struct pcibus_info *pcibus_info, uint64_t bits)
+{
+ union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
+
+ if (pcibus_info) {
+ switch (pcibus_info->pbi_bridge_type) {
+ case PCIBR_BRIDGETYPE_TIOCP:
+ ptr->tio.cp_control &= ~bits;
+ break;
+ case PCIBR_BRIDGETYPE_PIC:
+ ptr->pic.p_wid_control &= ~bits;
+ break;
+ default:
+ panic
+ ("pcireg_control_bit_clr: unknown bridgetype bridge 0x%p",
+ (void *)ptr);
+ }
+ }
+}
+
+void pcireg_control_bit_set(struct pcibus_info *pcibus_info, uint64_t bits)
+{
+ union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
+
+ if (pcibus_info) {
+ switch (pcibus_info->pbi_bridge_type) {
+ case PCIBR_BRIDGETYPE_TIOCP:
+ ptr->tio.cp_control |= bits;
+ break;
+ case PCIBR_BRIDGETYPE_PIC:
+ ptr->pic.p_wid_control |= bits;
+ break;
+ default:
+ panic
+ ("pcireg_control_bit_set: unknown bridgetype bridge 0x%p",
+ (void *)ptr);
+ }
+ }
+}
+
+/*
+ * PCI/PCIX Target Flush Register Access -- Read Only 0000_0050
+ */
+uint64_t pcireg_tflush_get(struct pcibus_info *pcibus_info)
+{
+ union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
+ uint64_t ret = 0;
+
+ if (pcibus_info) {
+ switch (pcibus_info->pbi_bridge_type) {
+ case PCIBR_BRIDGETYPE_TIOCP:
+ ret = ptr->tio.cp_tflush;
+ break;
+ case PCIBR_BRIDGETYPE_PIC:
+ ret = ptr->pic.p_wid_tflush;
+ break;
+ default:
+ panic
+ ("pcireg_tflush_get: unknown bridgetype bridge 0x%p",
+ (void *)ptr);
+ }
+ }
+
+ /* Read of the Target Flush should always return zero */
+ if (ret != 0)
+ panic("pcireg_tflush_get:Target Flush failed\n");
+
+ return ret;
+}
+
+/*
+ * Interrupt Status Register Access -- Read Only 0000_0100
+ */
+uint64_t pcireg_intr_status_get(struct pcibus_info * pcibus_info)
+{
+ union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
+ uint64_t ret = 0;
+
+ if (pcibus_info) {
+ switch (pcibus_info->pbi_bridge_type) {
+ case PCIBR_BRIDGETYPE_TIOCP:
+ ret = ptr->tio.cp_int_status;
+ break;
+ case PCIBR_BRIDGETYPE_PIC:
+ ret = ptr->pic.p_int_status;
+ break;
+ default:
+ panic
+ ("pcireg_intr_status_get: unknown bridgetype bridge 0x%p",
+ (void *)ptr);
+ }
+ }
+ return ret;
+}
+
+/*
+ * Interrupt Enable Register Access -- Read/Write 0000_0108
+ */
+void pcireg_intr_enable_bit_clr(struct pcibus_info *pcibus_info, uint64_t bits)
+{
+ union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
+
+ if (pcibus_info) {
+ switch (pcibus_info->pbi_bridge_type) {
+ case PCIBR_BRIDGETYPE_TIOCP:
+ ptr->tio.cp_int_enable &= ~bits;
+ break;
+ case PCIBR_BRIDGETYPE_PIC:
+ ptr->pic.p_int_enable &= ~bits;
+ break;
+ default:
+ panic
+ ("pcireg_intr_enable_bit_clr: unknown bridgetype bridge 0x%p",
+ (void *)ptr);
+ }
+ }
+}
+
+void pcireg_intr_enable_bit_set(struct pcibus_info *pcibus_info, uint64_t bits)
+{
+ union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
+
+ if (pcibus_info) {
+ switch (pcibus_info->pbi_bridge_type) {
+ case PCIBR_BRIDGETYPE_TIOCP:
+ ptr->tio.cp_int_enable |= bits;
+ break;
+ case PCIBR_BRIDGETYPE_PIC:
+ ptr->pic.p_int_enable |= bits;
+ break;
+ default:
+ panic
+ ("pcireg_intr_enable_bit_set: unknown bridgetype bridge 0x%p",
+ (void *)ptr);
+ }
+ }
+}
+
+/*
+ * Intr Host Address Register (int_addr) -- Read/Write 0000_0130 - 0000_0168
+ */
+void pcireg_intr_addr_addr_set(struct pcibus_info *pcibus_info, int int_n,
+ uint64_t addr)
+{
+ union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
+
+ if (pcibus_info) {
+ switch (pcibus_info->pbi_bridge_type) {
+ case PCIBR_BRIDGETYPE_TIOCP:
+ ptr->tio.cp_int_addr[int_n] &= ~TIOCP_HOST_INTR_ADDR;
+ ptr->tio.cp_int_addr[int_n] |=
+ (addr & TIOCP_HOST_INTR_ADDR);
+ break;
+ case PCIBR_BRIDGETYPE_PIC:
+ ptr->pic.p_int_addr[int_n] &= ~PIC_HOST_INTR_ADDR;
+ ptr->pic.p_int_addr[int_n] |=
+ (addr & PIC_HOST_INTR_ADDR);
+ break;
+ default:
+ panic
+ ("pcireg_intr_addr_addr_get: unknown bridgetype bridge 0x%p",
+ (void *)ptr);
+ }
+ }
+}
+
+/*
+ * Force Interrupt Register Access -- Write Only 0000_01C0 - 0000_01F8
+ */
+void pcireg_force_intr_set(struct pcibus_info *pcibus_info, int int_n)
+{
+ union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
+
+ if (pcibus_info) {
+ switch (pcibus_info->pbi_bridge_type) {
+ case PCIBR_BRIDGETYPE_TIOCP:
+ ptr->tio.cp_force_pin[int_n] = 1;
+ break;
+ case PCIBR_BRIDGETYPE_PIC:
+ ptr->pic.p_force_pin[int_n] = 1;
+ break;
+ default:
+ panic
+ ("pcireg_force_intr_set: unknown bridgetype bridge 0x%p",
+ (void *)ptr);
+ }
+ }
+}
+
+/*
+ * Device(x) Write Buffer Flush Reg Access -- Read Only 0000_0240 - 0000_0258
+ */
+uint64_t pcireg_wrb_flush_get(struct pcibus_info *pcibus_info, int device)
+{
+ union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
+ uint64_t ret = 0;
+
+ if (pcibus_info) {
+ switch (pcibus_info->pbi_bridge_type) {
+ case PCIBR_BRIDGETYPE_TIOCP:
+ ret = ptr->tio.cp_wr_req_buf[device];
+ break;
+ case PCIBR_BRIDGETYPE_PIC:
+ ret = ptr->pic.p_wr_req_buf[device];
+ break;
+ default:
+ panic("pcireg_wrb_flush_get: unknown bridgetype bridge 0x%p", (void *)ptr);
+ }
+
+ }
+ /* Read of the Write Buffer Flush should always return zero */
+ return ret;
+}
+
+void pcireg_int_ate_set(struct pcibus_info *pcibus_info, int ate_index,
+ uint64_t val)
+{
+ union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
+
+ if (pcibus_info) {
+ switch (pcibus_info->pbi_bridge_type) {
+ case PCIBR_BRIDGETYPE_TIOCP:
+ ptr->tio.cp_int_ate_ram[ate_index] = (uint64_t) val;
+ break;
+ case PCIBR_BRIDGETYPE_PIC:
+ ptr->pic.p_int_ate_ram[ate_index] = (uint64_t) val;
+ break;
+ default:
+ panic
+ ("pcireg_int_ate_set: unknown bridgetype bridge 0x%p",
+ (void *)ptr);
+ }
+ }
+}
+
+uint64_t *pcireg_int_ate_addr(struct pcibus_info *pcibus_info, int ate_index)
+{
+ union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
+ uint64_t *ret = (uint64_t *) 0;
+
+ if (pcibus_info) {
+ switch (pcibus_info->pbi_bridge_type) {
+ case PCIBR_BRIDGETYPE_TIOCP:
+ ret =
+ (uint64_t *) & (ptr->tio.cp_int_ate_ram[ate_index]);
+ break;
+ case PCIBR_BRIDGETYPE_PIC:
+ ret =
+ (uint64_t *) & (ptr->pic.p_int_ate_ram[ate_index]);
+ break;
+ default:
+ panic
+ ("pcireg_int_ate_addr: unknown bridgetype bridge 0x%p",
+ (void *)ptr);
+ }
+ }
+ return ret;
+}