summaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-02-05 06:47:02 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-02-05 06:47:02 +0000
commit99a7e12f34b3661a0d1354eef83a0eef4df5e34c (patch)
tree3560aca9ca86792f9ab7bd87861ea143a1b3c7a3 /arch/sparc64/kernel
parente73a04659c0b8cdee4dd40e58630e2cf63afb316 (diff)
Merge with Linux 2.3.38.
Diffstat (limited to 'arch/sparc64/kernel')
-rw-r--r--arch/sparc64/kernel/Makefile31
-rw-r--r--arch/sparc64/kernel/auxio.c27
-rw-r--r--arch/sparc64/kernel/central.c286
-rw-r--r--arch/sparc64/kernel/devices.c22
-rw-r--r--arch/sparc64/kernel/dtlb_backend.S4
-rw-r--r--arch/sparc64/kernel/ebus.c82
-rw-r--r--arch/sparc64/kernel/entry.S104
-rw-r--r--arch/sparc64/kernel/head.S31
-rw-r--r--arch/sparc64/kernel/ioctl32.c14
-rw-r--r--arch/sparc64/kernel/iommu_common.c233
-rw-r--r--arch/sparc64/kernel/iommu_common.h34
-rw-r--r--arch/sparc64/kernel/ioport.c107
-rw-r--r--arch/sparc64/kernel/irq.c187
-rw-r--r--arch/sparc64/kernel/pci.c62
-rw-r--r--arch/sparc64/kernel/pci_common.c116
-rw-r--r--arch/sparc64/kernel/pci_impl.h4
-rw-r--r--arch/sparc64/kernel/pci_iommu.c616
-rw-r--r--arch/sparc64/kernel/pci_psycho.c72
-rw-r--r--arch/sparc64/kernel/pci_sabre.c152
-rw-r--r--arch/sparc64/kernel/power.c14
-rw-r--r--arch/sparc64/kernel/process.c37
-rw-r--r--arch/sparc64/kernel/sbus.c1145
-rw-r--r--arch/sparc64/kernel/semaphore.c174
-rw-r--r--arch/sparc64/kernel/setup.c89
-rw-r--r--arch/sparc64/kernel/signal.c2
-rw-r--r--arch/sparc64/kernel/signal32.c33
-rw-r--r--arch/sparc64/kernel/smp.c64
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c40
-rw-r--r--arch/sparc64/kernel/starfire.c19
-rw-r--r--arch/sparc64/kernel/sys32.S11
-rw-r--r--arch/sparc64/kernel/sys_sparc.c28
-rw-r--r--arch/sparc64/kernel/sys_sparc32.c350
-rw-r--r--arch/sparc64/kernel/sys_sunos32.c29
-rw-r--r--arch/sparc64/kernel/systbls.S22
-rw-r--r--arch/sparc64/kernel/time.c41
-rw-r--r--arch/sparc64/kernel/trampoline.S220
-rw-r--r--arch/sparc64/kernel/traps.c36
-rw-r--r--arch/sparc64/kernel/ttable.S5
38 files changed, 3273 insertions, 1270 deletions
diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile
index 34f52698e..c84f5872e 100644
--- a/arch/sparc64/kernel/Makefile
+++ b/arch/sparc64/kernel/Makefile
@@ -1,4 +1,4 @@
-# $Id: Makefile,v 1.46 1999/08/31 04:39:34 davem Exp $
+# $Id: Makefile,v 1.50 1999/12/21 04:02:24 davem Exp $
# Makefile for the linux kernel.
#
# Note! Dependencies are done automagically by 'make dep', which also
@@ -17,18 +17,25 @@ all: kernel.o head.o init_task.o
O_TARGET := kernel.o
O_OBJS := process.o setup.o cpu.o idprom.o \
- traps.o devices.o auxio.o ioport.o \
+ traps.o devices.o auxio.o \
irq.o ptrace.o time.o sys_sparc.o signal.o \
- unaligned.o sys_sunos32.o sunos_ioctl32.o \
- central.o pci.o pci_common.o pci_iommu.o \
+ unaligned.o central.o pci.o pci_common.o pci_iommu.o \
pci_psycho.o pci_sabre.o starfire.o semaphore.o \
- power.o
+ power.o sbus.o iommu_common.o
OX_OBJS := sparc64_ksyms.o
ifdef CONFIG_PCI
O_OBJS += ebus.o
endif
+ifdef CONFIG_SUNOS_EMUL
+ O_OBJS += sys_sunos32.o sunos_ioctl32.o
+else
+ ifdef CONFIG_SOLARIS_EMUL
+ O_OBJS += sys_sunos32.o sunos_ioctl32.o
+ endif
+endif
+
ifdef CONFIG_SMP
O_OBJS += smp.o trampoline.o
endif
@@ -74,11 +81,13 @@ check_asm: dummy
@echo -e "# error Please issue 'make check_asm' in linux top-level directory first\n# endif\n#endif\n" >> asm_offsets.h
@echo -e "#ifndef CONFIG_SMP\n" >> asm_offsets.h
@echo "#include <linux/config.h>" > tmp.c
+ @echo "#undef __SMP__" >> tmp.c
@echo "#undef CONFIG_SMP" >> tmp.c
@echo "#include <linux/sched.h>" >> tmp.c
- $(CC) -E tmp.c -o tmp.i
+ $(CC) $(CPPFLAGS) -E tmp.c -o tmp.i
@echo "/* Automatically generated. Do not edit. */" > check_asm.c
@echo "#include <linux/config.h>" >> check_asm.c
+ @echo "#undef __SMP__" >> check_asm.c
@echo "#undef CONFIG_SMP" >> check_asm.c
@echo "#include <linux/sched.h>" >> check_asm.c
@echo 'struct task_struct _task;' >> check_asm.c
@@ -92,7 +101,7 @@ check_asm: dummy
@rm -f tmp.[ci]
#$(CC) -o check_asm check_asm.c
# <hack> Until we can do this natively, a hack has to take place
- $(CC) $(CMODEL_CFLAG) -ffixed-g4 -S -o check_asm.s check_asm.c
+ $(CC) $(CPPFLAGS) $(CMODEL_CFLAG) -ffixed-g4 -S -o check_asm.s check_asm.c
$(HOSTCC) -Wa,-Av9a -o check_asm check_asm.s
@rm -f check_asm.s
# </hack>
@@ -104,7 +113,7 @@ check_asm: dummy
@echo "#undef CONFIG_SMP" >> tmp.c
@echo "#define CONFIG_SMP 1" >> tmp.c
@echo "#include <linux/sched.h>" >> tmp.c
- $(CC) -D__SMP__ -E tmp.c -o tmp.i
+ $(CC) $(CPPFLAGS) -D__SMP__ -E tmp.c -o tmp.i
@echo "/* Automatically generated. Do not edit. */" > check_asm.c
@echo "#include <linux/config.h>" >> check_asm.c
@echo "#undef CONFIG_SMP" >> check_asm.c
@@ -121,7 +130,7 @@ check_asm: dummy
@rm -f tmp.[ci]
#$(CC) -D__SMP__ -o check_asm check_asm.c
# <hack> Until we can do this natively, a hack has to take place
- $(CC) -D__SMP__ $(CMODEL_CFLAG) -ffixed-g4 -S -o check_asm.s check_asm.c
+ $(CC) $(CPPFLAGS) -D__SMP__ $(CMODEL_CFLAG) -ffixed-g4 -S -o check_asm.s check_asm.c
$(HOSTCC) -Wa,-Av9a -o check_asm check_asm.s
@rm -f check_asm.s
# </hack>
@@ -129,7 +138,7 @@ check_asm: dummy
@rm -f check_asm check_asm.c
@echo -e "\n#else /* SPIN_LOCK_DEBUG */\n" >> asm_offsets.h
@echo "#include <linux/sched.h>" > tmp.c
- $(CC) -D__SMP__ -DSPIN_LOCK_DEBUG -E tmp.c -o tmp.i
+ $(CC) $(CPPFLAGS) -D__SMP__ -DSPIN_LOCK_DEBUG -E tmp.c -o tmp.i
@echo "/* Automatically generated. Do not edit. */" > check_asm.c
@echo "#include <linux/config.h>" >> check_asm.c
@echo "#undef CONFIG_SMP" >> check_asm.c
@@ -146,7 +155,7 @@ check_asm: dummy
@rm -f tmp.[ci]
#$(CC) -D__SMP__ -DSPIN_LOCK_DEBUG -o check_asm check_asm.c
# <hack> Until we can do this natively, a hack has to take place
- $(CC) -D__SMP__ -DSPIN_LOCK_DEBUG $(CMODEL_CFLAG) -ffixed-g4 -S -o check_asm.s check_asm.c
+ $(CC) $(CPPFLAGS) -D__SMP__ -DSPIN_LOCK_DEBUG $(CMODEL_CFLAG) -ffixed-g4 -S -o check_asm.s check_asm.c
$(HOSTCC) -Wa,-Av9a -o check_asm check_asm.s
@rm -f check_asm.s
# </hack>
diff --git a/arch/sparc64/kernel/auxio.c b/arch/sparc64/kernel/auxio.c
index c6bcb6bd7..9be09c3b0 100644
--- a/arch/sparc64/kernel/auxio.c
+++ b/arch/sparc64/kernel/auxio.c
@@ -20,22 +20,21 @@
#include <asm/fhc.h>
/* Probe and map in the Auxiliary I/O register */
-unsigned char *auxio_register;
+unsigned long auxio_register = 0;
void __init auxio_probe(void)
{
- struct linux_sbus *bus;
- struct linux_sbus_device *sdev = 0;
- struct linux_prom_registers auxregs[1];
+ struct sbus_bus *sbus;
+ struct sbus_dev *sdev = 0;
- for_each_sbus(bus) {
- for_each_sbusdev(sdev, bus) {
- if(!strcmp(sdev->prom_name, "auxio")) {
- break;
- }
+ for_each_sbus(sbus) {
+ for_each_sbusdev(sdev, sbus) {
+ if(!strcmp(sdev->prom_name, "auxio"))
+ goto found_sdev;
}
}
+found_sdev:
if (!sdev) {
#ifdef CONFIG_PCI
struct linux_ebus *ebus;
@@ -57,19 +56,15 @@ void __init auxio_probe(void)
}
#endif
if(central_bus) {
- auxio_register = NULL;
+ auxio_register = 0UL;
return;
}
prom_printf("Cannot find auxio node, cannot continue...\n");
prom_halt();
}
- prom_getproperty(sdev->prom_node, "reg", (char *) auxregs, sizeof(auxregs));
- prom_apply_sbus_ranges(sdev->my_bus, auxregs, 0x1, sdev);
/* Map the register both read and write */
- auxio_register = (unsigned char *) sparc_alloc_io(auxregs[0].phys_addr, 0,
- auxregs[0].reg_size,
- "auxiliaryIO",
- auxregs[0].which_io, 0x0);
+ auxio_register = sbus_ioremap(&sdev->resource[0], 0,
+ sdev->reg_addrs[0].reg_size, "auxiliaryIO");
TURN_ON_LED;
}
diff --git a/arch/sparc64/kernel/central.c b/arch/sparc64/kernel/central.c
index 198841f89..2c4fb1355 100644
--- a/arch/sparc64/kernel/central.c
+++ b/arch/sparc64/kernel/central.c
@@ -1,7 +1,7 @@
-/* $Id: central.c,v 1.11 1998/12/14 12:18:16 davem Exp $
+/* $Id: central.c,v 1.13 1999/12/01 10:44:43 davem Exp $
* central.c: Central FHC driver for Sunfire/Starfire/Wildfire.
*
- * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997, 1999 David S. Miller (davem@redhat.com)
*/
#include <linux/kernel.h>
@@ -10,6 +10,8 @@
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
#include <asm/page.h>
#include <asm/fhc.h>
@@ -25,10 +27,80 @@ static inline unsigned long long_align(unsigned long addr)
~(sizeof(unsigned long) - 1));
}
-extern void prom_central_ranges_init(int cnode, struct linux_central *central);
-extern void prom_fhc_ranges_init(int fnode, struct linux_fhc *fhc);
+static void central_ranges_init(int cnode, struct linux_central *central)
+{
+ int success;
+
+ central->num_central_ranges = 0;
+ success = prom_getproperty(central->prom_node, "ranges",
+ (char *) central->central_ranges,
+ sizeof (central->central_ranges));
+ if (success != -1)
+ central->num_central_ranges = (success/sizeof(struct linux_prom_ranges));
+}
-static unsigned long probe_other_fhcs(unsigned long memory_start)
+static void fhc_ranges_init(int fnode, struct linux_fhc *fhc)
+{
+ int success;
+
+ fhc->num_fhc_ranges = 0;
+ success = prom_getproperty(fhc->prom_node, "ranges",
+ (char *) fhc->fhc_ranges,
+ sizeof (fhc->fhc_ranges));
+ if (success != -1)
+ fhc->num_fhc_ranges = (success/sizeof(struct linux_prom_ranges));
+}
+
+/* Range application routines are exported to various drivers,
+ * so do not __init this.
+ */
+static void adjust_regs(struct linux_prom_registers *regp, int nregs,
+ struct linux_prom_ranges *rangep, int nranges)
+{
+ int regc, rngc;
+
+ for (regc = 0; regc < nregs; regc++) {
+ for (rngc = 0; rngc < nranges; rngc++)
+ if (regp[regc].which_io == rangep[rngc].ot_child_space)
+ break; /* Fount it */
+ if (rngc == nranges) /* oops */
+ prom_printf("adjust_regs: Could not find range with matching bus type...\n");
+ regp[regc].which_io = rangep[rngc].ot_parent_space;
+ regp[regc].phys_addr += rangep[rngc].ot_parent_base;
+ }
+}
+
+/* Apply probed fhc ranges to registers passed, if no ranges return. */
+void apply_fhc_ranges(struct linux_fhc *fhc,
+ struct linux_prom_registers *regs,
+ int nregs)
+{
+ if(fhc->num_fhc_ranges)
+ adjust_regs(regs, nregs, fhc->fhc_ranges,
+ fhc->num_fhc_ranges);
+}
+
+/* Apply probed central ranges to registers passed, if no ranges return. */
+void apply_central_ranges(struct linux_central *central,
+ struct linux_prom_registers *regs, int nregs)
+{
+ if(central->num_central_ranges)
+ adjust_regs(regs, nregs, central->central_ranges,
+ central->num_central_ranges);
+}
+
+void * __init central_alloc_bootmem(unsigned long size)
+{
+ void *ret;
+
+ ret = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL);
+ if (ret != NULL)
+ memset(ret, 0, size);
+
+ return ret;
+}
+
+static void probe_other_fhcs(void)
{
struct linux_prom64_registers fpregs[6];
char namebuf[128];
@@ -45,9 +117,12 @@ static unsigned long probe_other_fhcs(unsigned long memory_start)
int board;
u32 tmp;
- fhc = (struct linux_fhc *)memory_start;
- memory_start += sizeof(struct linux_fhc);
- memory_start = long_align(memory_start);
+ fhc = (struct linux_fhc *)
+ central_alloc_bootmem(sizeof(struct linux_fhc));
+ if (fhc == NULL) {
+ prom_printf("probe_other_fhcs: Cannot alloc fhc.\n");
+ prom_halt();
+ }
/* Link it into the FHC chain. */
fhc->next = fhc_list;
@@ -59,7 +134,7 @@ static unsigned long probe_other_fhcs(unsigned long memory_start)
fhc->prom_node = node;
prom_getstring(node, "name", namebuf, sizeof(namebuf));
strcpy(fhc->prom_name, namebuf);
- prom_fhc_ranges_init(node, fhc);
+ fhc_ranges_init(node, fhc);
/* Non-central FHC's have 64-bit OBP format registers. */
if(prom_getproperty(node, "reg",
@@ -69,29 +144,23 @@ static unsigned long probe_other_fhcs(unsigned long memory_start)
}
/* Only central FHC needs special ranges applied. */
- fhc->fhc_regs.pregs = (struct fhc_internal_regs *)
- __va(fpregs[0].phys_addr);
- fhc->fhc_regs.ireg = (struct fhc_ign_reg *)
- __va(fpregs[1].phys_addr);
- fhc->fhc_regs.ffregs = (struct fhc_fanfail_regs *)
- __va(fpregs[2].phys_addr);
- fhc->fhc_regs.sregs = (struct fhc_system_regs *)
- __va(fpregs[3].phys_addr);
- fhc->fhc_regs.uregs = (struct fhc_uart_regs *)
- __va(fpregs[4].phys_addr);
- fhc->fhc_regs.tregs = (struct fhc_tod_regs *)
- __va(fpregs[5].phys_addr);
+ fhc->fhc_regs.pregs = fpregs[0].phys_addr;
+ fhc->fhc_regs.ireg = fpregs[1].phys_addr;
+ fhc->fhc_regs.ffregs = fpregs[2].phys_addr;
+ fhc->fhc_regs.sregs = fpregs[3].phys_addr;
+ fhc->fhc_regs.uregs = fpregs[4].phys_addr;
+ fhc->fhc_regs.tregs = fpregs[5].phys_addr;
board = prom_getintdefault(node, "board#", -1);
fhc->board = board;
- tmp = fhc->fhc_regs.pregs->fhc_jtag_ctrl;
+ tmp = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_JCTRL);
if((tmp & FHC_JTAG_CTRL_MENAB) != 0)
fhc->jtag_master = 1;
else
fhc->jtag_master = 0;
- tmp = fhc->fhc_regs.pregs->fhc_id;
+ tmp = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_ID);
printk("FHC(board %d): Version[%x] PartID[%x] Manuf[%x] %s\n",
board,
(tmp & FHC_ID_VERS) >> 28,
@@ -103,7 +172,9 @@ static unsigned long probe_other_fhcs(unsigned long memory_start)
* the system. When it is clear, this identifies
* the central board.
*/
- fhc->fhc_regs.pregs->fhc_control |= FHC_CONTROL_IXIST;
+ tmp = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
+ tmp |= FHC_CONTROL_IXIST;
+ upa_writel(tmp, fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
/* Look for the next FHC. */
node = prom_getsibling(node);
@@ -113,8 +184,6 @@ static unsigned long probe_other_fhcs(unsigned long memory_start)
if(node == 0)
break;
}
-
- return memory_start;
}
static void probe_clock_board(struct linux_central *central,
@@ -135,22 +204,20 @@ static void probe_clock_board(struct linux_central *central,
prom_halt();
}
nregs /= sizeof(struct linux_prom_registers);
- prom_apply_fhc_ranges(fhc, &cregs[0], nregs);
- prom_apply_central_ranges(central, &cregs[0], nregs);
- central->cfreg = (volatile u8 *)
- __va((((unsigned long)cregs[0].which_io) << 32) |
- (((unsigned long)cregs[0].phys_addr)+0x02));
- central->clkregs = (struct clock_board_regs *)
- __va((((unsigned long)cregs[1].which_io) << 32) |
- (((unsigned long)cregs[1].phys_addr)));
+ apply_fhc_ranges(fhc, &cregs[0], nregs);
+ apply_central_ranges(central, &cregs[0], nregs);
+ central->cfreg = ((((unsigned long)cregs[0].which_io) << 32UL) |
+ ((unsigned long)cregs[0].phys_addr));
+ central->clkregs = ((((unsigned long)cregs[1].which_io) << 32UL) |
+ ((unsigned long)cregs[1].phys_addr));
+
if(nregs == 2)
- central->clkver = NULL;
+ central->clkver = 0UL;
else
- central->clkver = (volatile u8 *)
- __va((((unsigned long)cregs[2].which_io) << 32) |
- (((unsigned long)cregs[2].phys_addr)));
+ central->clkver = ((((unsigned long)cregs[2].which_io) << 32UL) |
+ ((unsigned long)cregs[2].phys_addr));
- tmp = central->clkregs->stat1;
+ tmp = upa_readb(central->clkregs + CLOCK_STAT1);
tmp &= 0xc0;
switch(tmp) {
case 0x40:
@@ -160,9 +227,9 @@ static void probe_clock_board(struct linux_central *central,
nslots = 8;
break;
case 0x80:
- if(central->clkver != NULL &&
- *(central->clkver) != 0) {
- if((*(central->clkver) & 0x80) != 0)
+ if(central->clkver != 0UL &&
+ upa_readb(central->clkver) != 0) {
+ if((upa_readb(central->clkver) & 0x80) != 0)
nslots = 4;
else
nslots = 5;
@@ -174,11 +241,11 @@ static void probe_clock_board(struct linux_central *central,
};
central->slots = nslots;
printk("CENTRAL: Detected %d slot Enterprise system. cfreg[%02x] cver[%02x]\n",
- central->slots, *(central->cfreg),
- (central->clkver ? *(central->clkver) : 0x00));
+ central->slots, upa_readb(central->cfreg),
+ (central->clkver ? upa_readb(central->clkver) : 0x00));
}
-unsigned long central_probe(unsigned long memory_start)
+void central_probe(void)
{
struct linux_prom_registers fpregs[6];
struct linux_fhc *fhc;
@@ -190,18 +257,23 @@ unsigned long central_probe(unsigned long memory_start)
extern void starfire_check(void);
starfire_check();
- return memory_start;
+ return;
}
/* Ok we got one, grab some memory for software state. */
- memory_start = long_align(memory_start);
- central_bus = (struct linux_central *) (memory_start);
+ central_bus = (struct linux_central *)
+ central_alloc_bootmem(sizeof(struct linux_central));
+ if (central_bus == NULL) {
+ prom_printf("central_probe: Cannot alloc central_bus.\n");
+ prom_halt();
+ }
- memory_start += sizeof(struct linux_central);
- memory_start = long_align(memory_start);
- fhc = (struct linux_fhc *)(memory_start);
- memory_start += sizeof(struct linux_fhc);
- memory_start = long_align(memory_start);
+ fhc = (struct linux_fhc *)
+ central_alloc_bootmem(sizeof(struct linux_fhc));
+ if (fhc == NULL) {
+ prom_printf("central_probe: Cannot alloc central fhc.\n");
+ prom_halt();
+ }
/* First init central. */
central_bus->child = fhc;
@@ -210,7 +282,7 @@ unsigned long central_probe(unsigned long memory_start)
prom_getstring(cnode, "name", namebuf, sizeof(namebuf));
strcpy(central_bus->prom_name, namebuf);
- prom_central_ranges_init(cnode, central_bus);
+ central_ranges_init(cnode, central_bus);
/* And then central's FHC. */
fhc->next = fhc_list;
@@ -226,38 +298,32 @@ unsigned long central_probe(unsigned long memory_start)
prom_getstring(fnode, "name", namebuf, sizeof(namebuf));
strcpy(fhc->prom_name, namebuf);
- prom_fhc_ranges_init(fnode, fhc);
+ fhc_ranges_init(fnode, fhc);
/* Now, map in FHC register set. */
if (prom_getproperty(fnode, "reg", (char *)&fpregs[0], sizeof(fpregs)) == -1) {
prom_printf("CENTRAL: Fatal error, cannot get fhc regs.\n");
prom_halt();
}
- prom_apply_central_ranges(central_bus, &fpregs[0], 6);
+ apply_central_ranges(central_bus, &fpregs[0], 6);
- fhc->fhc_regs.pregs = (struct fhc_internal_regs *)
- __va((((unsigned long)fpregs[0].which_io)<<32) |
- (((unsigned long)fpregs[0].phys_addr)));
- fhc->fhc_regs.ireg = (struct fhc_ign_reg *)
- __va((((unsigned long)fpregs[1].which_io)<<32) |
- (((unsigned long)fpregs[1].phys_addr)));
- fhc->fhc_regs.ffregs = (struct fhc_fanfail_regs *)
- __va((((unsigned long)fpregs[2].which_io)<<32) |
- (((unsigned long)fpregs[2].phys_addr)));
- fhc->fhc_regs.sregs = (struct fhc_system_regs *)
- __va((((unsigned long)fpregs[3].which_io)<<32) |
- (((unsigned long)fpregs[3].phys_addr)));
- fhc->fhc_regs.uregs = (struct fhc_uart_regs *)
- __va((((unsigned long)fpregs[4].which_io)<<32) |
- (((unsigned long)fpregs[4].phys_addr)));
- fhc->fhc_regs.tregs = (struct fhc_tod_regs *)
- __va((((unsigned long)fpregs[5].which_io)<<32) |
- (((unsigned long)fpregs[5].phys_addr)));
+ fhc->fhc_regs.pregs = ((((unsigned long)fpregs[0].which_io)<<32UL) |
+ ((unsigned long)fpregs[0].phys_addr));
+ fhc->fhc_regs.ireg = ((((unsigned long)fpregs[1].which_io)<<32UL) |
+ ((unsigned long)fpregs[1].phys_addr));
+ fhc->fhc_regs.ffregs = ((((unsigned long)fpregs[2].which_io)<<32UL) |
+ ((unsigned long)fpregs[2].phys_addr));
+ fhc->fhc_regs.sregs = ((((unsigned long)fpregs[3].which_io)<<32UL) |
+ ((unsigned long)fpregs[3].phys_addr));
+ fhc->fhc_regs.uregs = ((((unsigned long)fpregs[4].which_io)<<32UL) |
+ ((unsigned long)fpregs[4].phys_addr));
+ fhc->fhc_regs.tregs = ((((unsigned long)fpregs[5].which_io)<<32UL) |
+ ((unsigned long)fpregs[5].phys_addr));
/* Obtain board number from board status register, Central's
* FHC lacks "board#" property.
*/
- err = fhc->fhc_regs.pregs->fhc_bsr;
+ err = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_BSR);
fhc->board = (((err >> 16) & 0x01) |
((err >> 12) & 0x0e));
@@ -266,23 +332,21 @@ unsigned long central_probe(unsigned long memory_start)
/* Attach the clock board registers for CENTRAL. */
probe_clock_board(central_bus, fhc, cnode, fnode);
- err = fhc->fhc_regs.pregs->fhc_id;
+ err = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_ID);
printk("FHC(board %d): Version[%x] PartID[%x] Manuf[%x] (CENTRAL)\n",
fhc->board,
((err & FHC_ID_VERS) >> 28),
((err & FHC_ID_PARTID) >> 12),
((err & FHC_ID_MANUF) >> 1));
- return probe_other_fhcs(memory_start);
+ probe_other_fhcs();
}
static __inline__ void fhc_ledblink(struct linux_fhc *fhc, int on)
{
- volatile u32 *ctrl = (volatile u32 *)
- &fhc->fhc_regs.pregs->fhc_control;
u32 tmp;
- tmp = *ctrl;
+ tmp = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
/* NOTE: reverse logic on this bit */
if (on)
@@ -291,16 +355,15 @@ static __inline__ void fhc_ledblink(struct linux_fhc *fhc, int on)
tmp |= FHC_CONTROL_RLED;
tmp &= ~(FHC_CONTROL_AOFF | FHC_CONTROL_BOFF | FHC_CONTROL_SLINE);
- *ctrl = tmp;
- tmp = *ctrl;
+ upa_writel(tmp, fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
+ upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
}
static __inline__ void central_ledblink(struct linux_central *central, int on)
{
- volatile u8 *ctrl = (volatile u8 *) &central->clkregs->control;
- int tmp;
+ u8 tmp;
- tmp = *ctrl;
+ tmp = upa_readb(central->clkregs + CLOCK_CTRL);
/* NOTE: reverse logic on this bit */
if(on)
@@ -308,8 +371,8 @@ static __inline__ void central_ledblink(struct linux_central *central, int on)
else
tmp |= CLOCK_CTRL_RLED;
- *ctrl = tmp;
- tmp = *ctrl;
+ upa_writeb(tmp, central->clkregs + CLOCK_CTRL);
+ upa_readb(central->clkregs + CLOCK_CTRL);
}
static struct timer_list sftimer;
@@ -335,41 +398,41 @@ void firetruck_init(void)
{
struct linux_central *central = central_bus;
struct linux_fhc *fhc;
+ u8 ctrl;
/* No central bus, nothing to do. */
if (central == NULL)
return;
for(fhc = fhc_list; fhc != NULL; fhc = fhc->next) {
- volatile u32 *ctrl = (volatile u32 *)
- &fhc->fhc_regs.pregs->fhc_control;
u32 tmp;
/* Clear all of the interrupt mapping registers
* just in case OBP left them in a foul state.
*/
-#define ZAP(REG1, REG2) \
-do { volatile u32 *__iclr = (volatile u32 *)(&(REG1)); \
- volatile u32 *__imap = (volatile u32 *)(&(REG2)); \
- *(__iclr) = 0; \
- (void) *(__iclr); \
- *(__imap) &= ~(0x80000000); \
- (void) *(__imap); \
-} while(0)
-
- ZAP(fhc->fhc_regs.ffregs->fhc_ff_iclr,
- fhc->fhc_regs.ffregs->fhc_ff_imap);
- ZAP(fhc->fhc_regs.sregs->fhc_sys_iclr,
- fhc->fhc_regs.sregs->fhc_sys_imap);
- ZAP(fhc->fhc_regs.uregs->fhc_uart_iclr,
- fhc->fhc_regs.uregs->fhc_uart_imap);
- ZAP(fhc->fhc_regs.tregs->fhc_tod_iclr,
- fhc->fhc_regs.tregs->fhc_tod_imap);
+#define ZAP(ICLR, IMAP) \
+do { u32 imap_tmp; \
+ upa_writel(0, (ICLR)); \
+ upa_readl(ICLR); \
+ imap_tmp = upa_readl(IMAP); \
+ imap_tmp &= ~(0x80000000); \
+ upa_writel(imap_tmp, (IMAP)); \
+ upa_readl(IMAP); \
+} while (0)
+
+ ZAP(fhc->fhc_regs.ffregs + FHC_FFREGS_ICLR,
+ fhc->fhc_regs.ffregs + FHC_FFREGS_IMAP);
+ ZAP(fhc->fhc_regs.sregs + FHC_SREGS_ICLR,
+ fhc->fhc_regs.sregs + FHC_SREGS_IMAP);
+ ZAP(fhc->fhc_regs.uregs + FHC_UREGS_ICLR,
+ fhc->fhc_regs.uregs + FHC_UREGS_IMAP);
+ ZAP(fhc->fhc_regs.tregs + FHC_TREGS_ICLR,
+ fhc->fhc_regs.tregs + FHC_TREGS_IMAP);
#undef ZAP
/* Setup FHC control register. */
- tmp = *ctrl;
+ tmp = upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
/* All non-central boards have this bit set. */
if(! IS_CENTRAL_FHC(fhc))
@@ -379,14 +442,17 @@ do { volatile u32 *__iclr = (volatile u32 *)(&(REG1)); \
* line and both low power mode enables.
*/
tmp &= ~(FHC_CONTROL_AOFF | FHC_CONTROL_BOFF | FHC_CONTROL_SLINE);
- *ctrl = tmp;
- tmp = *ctrl; /* Ensure completion */
+
+ upa_writel(tmp, fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
+ upa_readl(fhc->fhc_regs.pregs + FHC_PREGS_CTRL);
}
/* OBP leaves it on, turn it off so clock board timer LED
* is in sync with FHC ones.
*/
- central->clkregs->control &= ~(CLOCK_CTRL_RLED);
+ ctrl = upa_readb(central->clkregs + CLOCK_CTRL);
+ ctrl &= ~(CLOCK_CTRL_RLED);
+ upa_writeb(ctrl, central->clkregs + CLOCK_CTRL);
led_state = 0;
init_timer(&sftimer);
diff --git a/arch/sparc64/kernel/devices.c b/arch/sparc64/kernel/devices.c
index d8085bf06..be8771985 100644
--- a/arch/sparc64/kernel/devices.c
+++ b/arch/sparc64/kernel/devices.c
@@ -19,10 +19,9 @@ unsigned prom_cpu_nodes[64];
int linux_num_cpus = 0;
extern void cpu_probe(void);
-extern unsigned long central_probe(unsigned long);
+extern void central_probe(void);
-unsigned long __init
-device_scan(unsigned long mem_start)
+void __init device_scan(void)
{
char node_str[128];
int nd, prom_node_cpu, thismid;
@@ -54,13 +53,8 @@ device_scan(unsigned long mem_start)
prom_getproperty(scan, "upa-portid",
(char *) &thismid, sizeof(thismid));
linux_cpus[cpu_ctr].mid = thismid;
-#ifdef __SMP__
- /* Don't pollute PROM screen with these messages. If the kernel is screwed enough
- that console does not start up, then we don't care how many CPUs have been found,
- if it starts up, the user can use console=prom to see it. */
- /* prom_printf("Found CPU %d (node=%08x,mid=%d)\n", cpu_ctr, (unsigned) scan, thismid); */
- printk("Found CPU %d (node=%08x,mid=%d)\n", cpu_ctr, (unsigned) scan, thismid);
-#endif
+ printk("Found CPU %d (node=%08x,mid=%d)\n",
+ cpu_ctr, (unsigned) scan, thismid);
cpu_ctr++;
}
};
@@ -68,19 +62,15 @@ device_scan(unsigned long mem_start)
prom_printf("No CPU nodes found, cannot continue.\n");
prom_halt();
}
-#ifdef __SMP__
printk("Found %d CPU prom device tree node(s).\n", cpu_ctr);
-#endif
- };
+ }
prom_node_cpu = cpu_nds[0];
linux_num_cpus = cpu_ctr;
prom_cpu_nodes[0] = prom_node_cpu;
- mem_start = central_probe(mem_start);
+ central_probe();
cpu_probe();
-
- return mem_start;
}
diff --git a/arch/sparc64/kernel/dtlb_backend.S b/arch/sparc64/kernel/dtlb_backend.S
index 9fe613a51..81f4fd366 100644
--- a/arch/sparc64/kernel/dtlb_backend.S
+++ b/arch/sparc64/kernel/dtlb_backend.S
@@ -1,4 +1,4 @@
-/* $Id: dtlb_backend.S,v 1.7 1998/12/16 04:33:28 davem Exp $
+/* $Id: dtlb_backend.S,v 1.8 1999/12/05 10:41:35 davem Exp $
* dtlb_backend.S: Back end to DTLB miss replacement strategy.
* This is included directly into the trap table.
*
@@ -10,7 +10,7 @@
#define VPTE_SHIFT (PAGE_SHIFT - 3)
#define PMD_SHIFT (23 - PAGE_SHIFT + 3)
#define PGD_SHIFT (34 - PAGE_SHIFT + 3)
-#define VPTE_BITS (_PAGE_CP | _PAGE_P | _PAGE_W)
+#define VPTE_BITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W)
/* Ways we can get here:
*
diff --git a/arch/sparc64/kernel/ebus.c b/arch/sparc64/kernel/ebus.c
index 0b3d16007..903bcf445 100644
--- a/arch/sparc64/kernel/ebus.c
+++ b/arch/sparc64/kernel/ebus.c
@@ -1,4 +1,4 @@
-/* $Id: ebus.c,v 1.44 1999/09/05 09:28:09 ecd Exp $
+/* $Id: ebus.c,v 1.46 1999/11/19 05:52:48 davem Exp $
* ebus.c: PCI to EBus bridge device.
*
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
@@ -22,9 +22,6 @@
struct linux_ebus *ebus_chain = 0;
-extern void prom_ebus_ranges_init(struct linux_ebus *);
-extern void prom_ebus_intmap_init(struct linux_ebus *);
-
#ifdef CONFIG_SUN_OPENPROMIO
extern int openprom_init(void);
#endif
@@ -49,15 +46,49 @@ static inline void *ebus_alloc(size_t size)
return mem;
}
-void __init ebus_intmap_match(struct linux_ebus *ebus,
- struct linux_prom_registers *reg,
- int *interrupt)
+static void __init ebus_ranges_init(struct linux_ebus *ebus)
+{
+ int success;
+
+ ebus->num_ebus_ranges = 0;
+ success = prom_getproperty(ebus->prom_node, "ranges",
+ (char *)ebus->ebus_ranges,
+ sizeof(ebus->ebus_ranges));
+ if (success != -1)
+ ebus->num_ebus_ranges = (success/sizeof(struct linux_prom_ebus_ranges));
+}
+
+static void __init ebus_intmap_init(struct linux_ebus *ebus)
+{
+ int success;
+
+ ebus->num_ebus_intmap = 0;
+ success = prom_getproperty(ebus->prom_node, "interrupt-map",
+ (char *)ebus->ebus_intmap,
+ sizeof(ebus->ebus_intmap));
+ if (success == -1)
+ return;
+
+ ebus->num_ebus_intmap = (success/sizeof(struct linux_prom_ebus_intmap));
+
+ success = prom_getproperty(ebus->prom_node, "interrupt-map-mask",
+ (char *)&ebus->ebus_intmask,
+ sizeof(ebus->ebus_intmask));
+ if (success == -1) {
+ prom_printf("%s: can't get interrupt-map-mask\n", __FUNCTION__);
+ prom_halt();
+ }
+}
+
+int __init ebus_intmap_match(struct linux_ebus *ebus,
+ struct linux_prom_registers *reg,
+ int *interrupt)
{
unsigned int hi, lo, irq;
int i;
if (!ebus->num_ebus_intmap)
- return;
+ return 0;
hi = reg->which_io & ebus->ebus_intmask.phys_hi;
lo = reg->phys_addr & ebus->ebus_intmask.phys_lo;
@@ -67,13 +98,10 @@ void __init ebus_intmap_match(struct linux_ebus *ebus,
(ebus->ebus_intmap[i].phys_lo == lo) &&
(ebus->ebus_intmap[i].interrupt == irq)) {
*interrupt = ebus->ebus_intmap[i].cinterrupt;
- return;
+ return 0;
}
}
-
- prom_printf("ebus: IRQ [%08x.%08x.%08x] not found in interrupt-map\n",
- reg->which_io, reg->phys_addr, *interrupt);
- prom_halt();
+ return -1;
}
void __init fill_ebus_child(int node, struct linux_prom_registers *preg,
@@ -139,8 +167,16 @@ void __init fill_ebus_child(int node, struct linux_prom_registers *preg,
struct pci_pbm_info *pbm = dev->bus->parent;
struct pci_controller_info *p = pbm->parent;
- ebus_intmap_match(dev->bus, preg, &irqs[i]);
- dev->irqs[i] = p->irq_build(p, dev->bus->self, irqs[i]);
+ if (ebus_intmap_match(dev->bus, preg, &irqs[i]) != -1) {
+ dev->irqs[i] = p->irq_build(p,
+ dev->bus->self,
+ irqs[i]);
+ } else {
+ /* If we get a bogus interrupt property, just
+ * record the raw value instead of punting.
+ */
+ dev->irqs[i] = irqs[i];
+ }
}
}
}
@@ -194,8 +230,16 @@ void __init fill_ebus_device(int node, struct linux_ebus_device *dev)
struct pci_pbm_info *pbm = dev->bus->parent;
struct pci_controller_info *p = pbm->parent;
- ebus_intmap_match(dev->bus, &regs[0], &irqs[i]);
- dev->irqs[i] = p->irq_build(p, dev->bus->self, irqs[i]);
+ if (ebus_intmap_match(dev->bus, &regs[0], &irqs[i]) != -1) {
+ dev->irqs[i] = p->irq_build(p,
+ dev->bus->self,
+ irqs[i]);
+ } else {
+ /* If we get a bogus interrupt property, just
+ * record the raw value instead of punting.
+ */
+ dev->irqs[i] = irqs[i];
+ }
}
}
@@ -295,8 +339,8 @@ void __init ebus_init(void)
/* NOTE: Cache line size is in 32-bit word units. */
pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 64/sizeof(u32));
- prom_ebus_ranges_init(ebus);
- prom_ebus_intmap_init(ebus);
+ ebus_ranges_init(ebus);
+ ebus_intmap_init(ebus);
nd = prom_getchild(ebusnd);
if (!nd)
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index fbd64a507..f62e3506d 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -1,4 +1,4 @@
-/* $Id: entry.S,v 1.107 1999/08/31 19:25:29 davem Exp $
+/* $Id: entry.S,v 1.110 1999/11/19 05:52:50 davem Exp $
* arch/sparc64/kernel/entry.S: Sparc64 trap low-level entry points.
*
* Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
@@ -214,8 +214,8 @@ do_fptrap:
.align 32
.globl do_ivec
do_ivec:
- wr %g0, ASI_UDB_INTR_R, %asi
- ldxa [%g0 + 0x40] %asi, %g3
+ mov 0x40, %g3
+ ldxa [%g3 + %g0] ASI_UDB_INTR_R, %g3
sethi %hi(KERNBASE), %g4
cmp %g3, %g4
bgeu,pn %xcc, do_ivec_xcall
@@ -234,22 +234,25 @@ do_ivec:
sllx %g2, %g4, %g2
sllx %g4, 2, %g4
- lduw [%g1 + %g4], %g5 /* g5 = irq_work(cpu, pil) */
+ lduw [%g6 + %g4], %g5 /* g5 = irq_work(cpu, pil) */
stw %g5, [%g3 + 0x00] /* bucket->irq_chain = g5 */
- stw %g3, [%g1 + %g4] /* irq_work(cpu, pil) = bucket */
+ stw %g3, [%g6 + %g4] /* irq_work(cpu, pil) = bucket */
wr %g2, 0x0, %set_softint
retry
do_ivec_xcall:
- ldxa [%g0 + 0x50] %asi, %g6
+ mov 0x50, %g1
+ ldxa [%g1 + %g0] ASI_UDB_INTR_R, %g1
srl %g3, 0, %g3
- ldxa [%g0 + 0x60] %asi, %g7
+ mov 0x60, %g7
+ ldxa [%g7 + %g0] ASI_UDB_INTR_R, %g7
stxa %g0, [%g0] ASI_INTR_RECEIVE
membar #Sync
jmpl %g3, %g0
nop
+
do_ivec_spurious:
- stw %g3, [%g1 + 0x00] /* irq_work(cpu, 0) = bucket */
+ stw %g3, [%g6 + 0x00] /* irq_work(cpu, 0) = bucket */
rdpr %pstate, %g5
wrpr %g5, PSTATE_IG | PSTATE_AG, %pstate
@@ -261,6 +264,76 @@ do_ivec_spurious:
ba,pt %xcc, rtrap
clr %l6
+ .globl save_alternate_globals
+save_alternate_globals: /* %o0 = save_area */
+ rdpr %pstate, %o5
+ andn %o5, PSTATE_IE, %o1
+ wrpr %o1, PSTATE_AG, %pstate
+ stx %g0, [%o0 + 0x00]
+ stx %g1, [%o0 + 0x08]
+ stx %g2, [%o0 + 0x10]
+ stx %g3, [%o0 + 0x18]
+ stx %g4, [%o0 + 0x20]
+ stx %g5, [%o0 + 0x28]
+ stx %g6, [%o0 + 0x30]
+ stx %g7, [%o0 + 0x38]
+ wrpr %o1, PSTATE_IG, %pstate
+ stx %g0, [%o0 + 0x40]
+ stx %g1, [%o0 + 0x48]
+ stx %g2, [%o0 + 0x50]
+ stx %g3, [%o0 + 0x58]
+ stx %g4, [%o0 + 0x60]
+ stx %g5, [%o0 + 0x68]
+ stx %g6, [%o0 + 0x70]
+ stx %g7, [%o0 + 0x78]
+ wrpr %o1, PSTATE_MG, %pstate
+ stx %g0, [%o0 + 0x80]
+ stx %g1, [%o0 + 0x88]
+ stx %g2, [%o0 + 0x90]
+ stx %g3, [%o0 + 0x98]
+ stx %g4, [%o0 + 0xa0]
+ stx %g5, [%o0 + 0xa8]
+ stx %g6, [%o0 + 0xb0]
+ stx %g7, [%o0 + 0xb8]
+ wrpr %o5, 0x0, %pstate
+ retl
+ nop
+
+ .globl restore_alternate_globals
+restore_alternate_globals: /* %o0 = save_area */
+ rdpr %pstate, %o5
+ andn %o5, PSTATE_IE, %o1
+ wrpr %o1, PSTATE_AG, %pstate
+ ldx [%o0 + 0x00], %g0
+ ldx [%o0 + 0x08], %g1
+ ldx [%o0 + 0x10], %g2
+ ldx [%o0 + 0x18], %g3
+ ldx [%o0 + 0x20], %g4
+ ldx [%o0 + 0x28], %g5
+ ldx [%o0 + 0x30], %g6
+ ldx [%o0 + 0x38], %g7
+ wrpr %o1, PSTATE_IG, %pstate
+ ldx [%o0 + 0x40], %g0
+ ldx [%o0 + 0x48], %g1
+ ldx [%o0 + 0x50], %g2
+ ldx [%o0 + 0x58], %g3
+ ldx [%o0 + 0x60], %g4
+ ldx [%o0 + 0x68], %g5
+ ldx [%o0 + 0x70], %g6
+ ldx [%o0 + 0x78], %g7
+ wrpr %o1, PSTATE_MG, %pstate
+ ldx [%o0 + 0x80], %g0
+ ldx [%o0 + 0x88], %g1
+ ldx [%o0 + 0x90], %g2
+ ldx [%o0 + 0x98], %g3
+ ldx [%o0 + 0xa0], %g4
+ ldx [%o0 + 0xa8], %g5
+ ldx [%o0 + 0xb0], %g6
+ ldx [%o0 + 0xb8], %g7
+ wrpr %o5, 0x0, %pstate
+ retl
+ nop
+
.globl getcc, setcc
getcc:
ldx [%o0 + PT_V9_TSTATE], %o1
@@ -313,7 +386,7 @@ floppy_hardint:
ldx [%g5 + %lo(pdma_size)], %g5
next_byte:
- ldub [%g3], %g7
+ lduba [%g3] ASI_PHYS_BYPASS_EC_E, %g7
andcc %g7, 0x80, %g0
be,pn %icc, floppy_fifo_emptied
andcc %g7, 0x20, %g0
@@ -322,7 +395,9 @@ next_byte:
be,pn %icc, floppy_write
sub %g5, 1, %g5
- ldub [%g3 + 1], %g7
+ inc %g3
+ lduba [%g3] ASI_PHYS_BYPASS_EC_E, %g7
+ dec %g3
orcc %g0, %g5, %g0
stb %g7, [%g4]
bne,pn %xcc, next_byte
@@ -334,7 +409,9 @@ next_byte:
floppy_write:
ldub [%g4], %g7
orcc %g0, %g5, %g0
- stb %g7, [%g3 + 1]
+ inc %g3
+ stba %g7, [%g3] ASI_PHYS_BYPASS_EC_E
+ dec %g3
bne,pn %xcc, next_byte
add %g4, 1, %g4
@@ -368,7 +445,7 @@ floppy_fifo_emptied:
ldx [%g1 + (11 << 3)], %g3 ! irqaction[floppy_irq]
ldx [%g3 + 0x10], %g4 ! action->mask == ino_bucket ptr
ldx [%g4 + 0x10], %g4 ! bucket->iclr
- stw %g0, [%g4] ! SYSIO_ICLR_IDLE
+ stwa %g0, [%g4] ASI_PHYS_BYPASS_EC_E ! ICLR_IDLE
membar #Sync ! probably not needed...
retry
@@ -652,6 +729,8 @@ breakpoint_trap:
ba,pt %xcc, rtrap
nop
+#if defined(CONFIG_SUNOS_EMUL) || defined(CONFIG_SOLARIS_EMUL) || \
+ defined(CONFIG_SOLARIS_EMUL_MODULE)
/* SunOS uses syscall zero as the 'indirect syscall' it looks
* like indir_syscall(scall_num, arg0, arg1, arg2...); etc.
* This is complete brain damage.
@@ -705,6 +784,7 @@ sunos_getgid:
stx %o0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I1]
b,pt %xcc, ret_sys_call
stx %o0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I0]
+#endif
/* SunOS's execv() call only specifies the argv argument, the
* environment settings are the same as the calling processes.
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index ad863a71d..47a170f54 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -1,4 +1,4 @@
-/* $Id: head.S,v 1.61 1999/05/25 16:53:10 jj Exp $
+/* $Id: head.S,v 1.63 1999/11/19 05:52:49 davem Exp $
* head.S: Initial boot code for the Sparc64 port of Linux.
*
* Copyright (C) 1996,1997 David S. Miller (davem@caip.rutgers.edu)
@@ -52,7 +52,7 @@ bootup_user_stack:
.ascii "HdrS"
.word LINUX_VERSION_CODE
- .half 0x0202 /* HdrS version */
+ .half 0x0203 /* HdrS version */
root_flags:
.half 1
root_dev:
@@ -65,6 +65,7 @@ sparc_ramdisk_size:
.word 0
.xword reboot_command
.xword bootstr_len
+ .word _end
/* We must be careful, 32-bit OpenBOOT will get confused if it
* tries to save away a register window to a 64-bit kernel
@@ -92,28 +93,6 @@ sparc64_boot:
wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate
wr %g0, 0, %fprs
-#ifdef __SMP__
- /* Ugly but necessary... */
- sethi %hi(KERNBASE), %g7
- sethi %hi(sparc64_cpu_startup), %g5
- or %g5, %lo(sparc64_cpu_startup), %g5
- sub %g5, %g7, %g5
- sethi %hi(sparc64_cpu_startup_end), %g6
- or %g6, %lo(sparc64_cpu_startup_end), %g6
- sub %g6, %g7, %g6
- sethi %hi(smp_trampoline), %g3
- or %g3, %lo(smp_trampoline), %g3
- sub %g3, %g7, %g3
-1: ldx [%g5], %g1
- stx %g1, [%g3]
- membar #StoreStore
- flush %g3
- add %g5, 8, %g5
- cmp %g5, %g6
- blu,pt %xcc, 1b
- add %g3, 8, %g3
-#endif
-
create_mappings:
/* %g5 holds the tlb data */
sethi %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5
@@ -380,7 +359,7 @@ setup_tba: /* i0 = is_starfire */
wrpr %o1, (PSTATE_IG|PSTATE_IE), %pstate
#ifndef __SMP__
sethi %hi(__up_workvec), %g5
- or %g5, %lo(__up_workvec), %g1
+ or %g5, %lo(__up_workvec), %g6
#else
/* By definition of where we are, this is boot_cpu. */
sethi %hi(cpu_data), %g5
@@ -404,7 +383,7 @@ not_starfire:
set_worklist:
sllx %g1, 7, %g1
add %g5, %g1, %g5
- add %g5, 64, %g1
+ add %g5, 64, %g6
#endif
/* Kill PROM timer */
diff --git a/arch/sparc64/kernel/ioctl32.c b/arch/sparc64/kernel/ioctl32.c
index 0affcf0eb..ef0fb3e94 100644
--- a/arch/sparc64/kernel/ioctl32.c
+++ b/arch/sparc64/kernel/ioctl32.c
@@ -1,4 +1,4 @@
-/* $Id: ioctl32.c,v 1.68 1999/09/10 05:59:25 davem Exp $
+/* $Id: ioctl32.c,v 1.72 2000/01/04 15:43:45 davem Exp $
* ioctl32.c: Conversion between 32bit and 64bit native ioctls.
*
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
@@ -1921,6 +1921,9 @@ asmlinkage int sys32_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
case TIOCSCTTY:
case TIOCGPTN:
case TIOCSPTLCK:
+ case TIOCGSERIAL:
+ case TIOCSSERIAL:
+ case TIOCSERGETLSR:
/* Big F */
case FBIOGTYPE:
@@ -2124,6 +2127,9 @@ asmlinkage int sys32_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
case OPROMGETCONS:
case OPROMGETFBNAME:
case OPROMGETBOOTARGS:
+ case OPROMSETCUR:
+ case OPROMPCI2NODE:
+ case OPROMPATH2NODE:
/* Socket level stuff */
case FIOSETOWN:
@@ -2175,6 +2181,9 @@ asmlinkage int sys32_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
case PPPIOCSNPMODE:
case PPPIOCGDEBUG:
case PPPIOCSDEBUG:
+ case PPPIOCNEWUNIT:
+ case PPPIOCATTACH:
+ case PPPIOCDETACH:
/* CDROM stuff */
case CDROMPAUSE:
@@ -2204,6 +2213,9 @@ asmlinkage int sys32_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
case CDROM_DRIVE_STATUS:
case CDROM_DISC_STATUS:
case CDROM_CHANGER_NSLOTS:
+ case CDROM_LOCKDOOR:
+ case CDROM_DEBUG:
+ case CDROM_GET_CAPABILITY:
/* Big L */
case LOOP_SET_FD:
diff --git a/arch/sparc64/kernel/iommu_common.c b/arch/sparc64/kernel/iommu_common.c
new file mode 100644
index 000000000..c537353e0
--- /dev/null
+++ b/arch/sparc64/kernel/iommu_common.c
@@ -0,0 +1,233 @@
+/* $Id: iommu_common.c,v 1.2 1999/12/19 09:17:53 davem Exp $
+ * iommu_common.c: UltraSparc SBUS/PCI common iommu code.
+ *
+ * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ */
+
+#include "iommu_common.h"
+
+/* You are _strongly_ advised to enable the following debugging code
+ * any time you make changes to the sg code below, run it for a while
+ * with filesystems mounted read-only before buying the farm... -DaveM
+ */
+
+#ifdef VERIFY_SG
+int verify_lengths(struct scatterlist *sg, int nents, int npages)
+{
+ int sg_len, dma_len;
+ int i, pgcount;
+
+ sg_len = 0;
+ for (i = 0; i < nents; i++)
+ sg_len += sg[i].length;
+
+ dma_len = 0;
+ for (i = 0; i < nents && sg[i].dvma_length; i++)
+ dma_len += sg[i].dvma_length;
+
+ if (sg_len != dma_len) {
+ printk("verify_lengths: Error, different, sg[%d] dma[%d]\n",
+ sg_len, dma_len);
+ return -1;
+ }
+
+ pgcount = 0;
+ for (i = 0; i < nents && sg[i].dvma_length; i++) {
+ unsigned long start, end;
+
+ start = sg[i].dvma_address;
+ start = start & PAGE_MASK;
+
+ end = sg[i].dvma_address + sg[i].dvma_length;
+ end = (end + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+ pgcount += ((end - start) >> PAGE_SHIFT);
+ }
+
+ if (pgcount != npages) {
+ printk("verify_langths: Error, page count wrong, "
+ "npages[%d] pgcount[%d]\n",
+ npages, pgcount);
+ return -1;
+ }
+
+ /* This test passes... */
+ return 0;
+}
+
+int verify_one_map(struct scatterlist *dma_sg, struct scatterlist **__sg, int nents, iopte_t **__iopte)
+{
+ struct scatterlist *sg = *__sg;
+ iopte_t *iopte = *__iopte;
+ int retval = 0;
+ u32 dlen = dma_sg->dvma_length;
+ u32 daddr = dma_sg->dvma_address;
+ unsigned int sglen;
+ unsigned long sgaddr;
+
+ sglen = sg->length;
+ sgaddr = (unsigned long) sg->address;
+ while (dlen > 0) {
+ unsigned long paddr;
+
+ /* SG and DMA_SG must begin at the same sub-page boundary. */
+ if ((sgaddr & ~PAGE_MASK) != (daddr & ~PAGE_MASK)) {
+ printk("verify_one_map: Wrong start offset "
+ "sg[%08lx] dma[%08x]\n",
+ sgaddr, daddr);
+ retval = -nents;
+ goto out;
+ }
+
+ /* Verify the IOPTE points to the right page. */
+ paddr = iopte_val(*iopte) & IOPTE_PAGE;
+ if ((paddr + PAGE_OFFSET) != (sgaddr & PAGE_MASK)) {
+ printk("verify_one_map: IOPTE[%08lx] maps the "
+ "wrong page, should be [%08lx]\n",
+ iopte_val(*iopte), (sgaddr & PAGE_MASK) - PAGE_OFFSET);
+ retval = -nents;
+ goto out;
+ }
+
+ /* If this SG crosses a page, adjust to that next page
+ * boundary and loop.
+ */
+ if ((sgaddr & PAGE_MASK) ^ ((sgaddr + sglen - 1) & PAGE_MASK)) {
+ unsigned long next_page, diff;
+
+ next_page = (sgaddr + PAGE_SIZE) & PAGE_MASK;
+ diff = next_page - sgaddr;
+ sgaddr += diff;
+ daddr += diff;
+ sglen -= diff;
+ dlen -= diff;
+ if (dlen > 0)
+ iopte++;
+ continue;
+ }
+
+ /* SG wholly consumed within this page. */
+ daddr += sglen;
+ dlen -= sglen;
+
+ if (dlen > 0 && ((daddr & ~PAGE_MASK) == 0))
+ iopte++;
+
+ sg++;
+ sgaddr = (unsigned long) sg->address;
+ sglen = sg->length;
+ }
+ if (dlen < 0) {
+ /* Transfer overrun, big problems. */
+ printk("verify_one_map: Transfer overrun by %d bytes.\n",
+ -dlen);
+ retval = -nents;
+ } else {
+ /* Advance to next dma_sg implies that the next iopte will
+ * begin it.
+ */
+ iopte++;
+ }
+
+out:
+ *__sg = sg;
+ *__iopte = iopte;
+ return retval;
+}
+
+int verify_maps(struct scatterlist *sg, int nents, iopte_t *iopte)
+{
+ struct scatterlist *dma_sg = sg;
+ struct scatterlist *orig_dma_sg = dma_sg;
+ int orig_nents = nents;
+
+ for (;;) {
+ nents = verify_one_map(dma_sg, &sg, nents, &iopte);
+ if (nents <= 0)
+ break;
+ dma_sg++;
+ if (dma_sg->dvma_length == 0)
+ break;
+ }
+
+ if (nents > 0) {
+ printk("verify_maps: dma maps consumed by some sgs remain (%d)\n",
+ nents);
+ return -1;
+ }
+
+ if (nents < 0) {
+ printk("verify_maps: Error, messed up mappings, "
+ "at sg %d dma_sg %d\n",
+ (int) (orig_nents + nents), (int) (dma_sg - orig_dma_sg));
+ return -1;
+ }
+
+ /* This test passes... */
+ return 0;
+}
+
+void verify_sglist(struct scatterlist *sg, int nents, iopte_t *iopte, int npages)
+{
+ if (verify_lengths(sg, nents, npages) < 0 ||
+ verify_maps(sg, nents, iopte) < 0) {
+ int i;
+
+ printk("verify_sglist: Crap, messed up mappings, dumping, iodma at %08x.\n",
+ (u32) (sg->dvma_address & PAGE_MASK));
+ for (i = 0; i < nents; i++) {
+ printk("sg(%d): address(%p) length(%x) "
+ "dma_address[%08x] dma_length[%08x]\n",
+ i,
+ sg[i].address, sg[i].length,
+ sg[i].dvma_address, sg[i].dvma_length);
+ }
+ }
+
+ /* Seems to be ok */
+}
+#endif
+
+/* Two addresses are "virtually contiguous" if and only if:
+ * 1) They are equal, or...
+ * 2) They are both on a page boundry
+ */
+#define VCONTIG(__X, __Y) (((__X) == (__Y)) || \
+ (((__X) | (__Y)) << (64UL - PAGE_SHIFT)) == 0UL)
+
+unsigned long prepare_sg(struct scatterlist *sg, int nents)
+{
+ struct scatterlist *dma_sg = sg;
+ unsigned long prev;
+ u32 dent_addr, dent_len;
+
+ prev = (unsigned long) sg->address;
+ prev += (unsigned long) (dent_len = sg->length);
+ dent_addr = (u32) ((unsigned long)sg->address & (PAGE_SIZE - 1UL));
+ while (--nents) {
+ unsigned long addr;
+
+ sg++;
+ addr = (unsigned long) sg->address;
+ if (! VCONTIG(prev, addr)) {
+ dma_sg->dvma_address = dent_addr;
+ dma_sg->dvma_length = dent_len;
+ dma_sg++;
+
+ dent_addr = ((dent_addr +
+ dent_len +
+ (PAGE_SIZE - 1UL)) >> PAGE_SHIFT);
+ dent_addr <<= PAGE_SHIFT;
+ dent_addr += addr & (PAGE_SIZE - 1UL);
+ dent_len = 0;
+ }
+ dent_len += sg->length;
+ prev = addr + sg->length;
+ }
+ dma_sg->dvma_address = dent_addr;
+ dma_sg->dvma_length = dent_len;
+
+ return ((unsigned long) dent_addr +
+ (unsigned long) dent_len +
+ (PAGE_SIZE - 1UL)) >> PAGE_SHIFT;
+}
diff --git a/arch/sparc64/kernel/iommu_common.h b/arch/sparc64/kernel/iommu_common.h
new file mode 100644
index 000000000..30cbd4385
--- /dev/null
+++ b/arch/sparc64/kernel/iommu_common.h
@@ -0,0 +1,34 @@
+/* $Id: iommu_common.h,v 1.1 1999/12/17 12:31:54 jj Exp $
+ * iommu_common.h: UltraSparc SBUS/PCI common iommu declarations.
+ *
+ * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include <asm/page.h>
+#include <asm/iommu.h>
+#include <asm/scatterlist.h>
+
+/* You are _strongly_ advised to enable the following debugging code
+ * any time you make changes to the sg code below, run it for a while
+ * with filesystems mounted read-only before buying the farm... -DaveM
+ */
+#undef VERIFY_SG
+
+#ifdef VERIFY_SG
+int verify_lengths(struct scatterlist *sg, int nents, int npages);
+int verify_one_map(struct scatterlist *dma_sg, struct scatterlist **__sg, int nents, iopte_t **__iopte);
+int verify_maps(struct scatterlist *sg, int nents, iopte_t *iopte);
+void verify_sglist(struct scatterlist *sg, int nents, iopte_t *iopte, int npages);
+#endif
+
+/* Two addresses are "virtually contiguous" if and only if:
+ * 1) They are equal, or...
+ * 2) They are both on a page boundry
+ */
+#define VCONTIG(__X, __Y) (((__X) == (__Y)) || \
+ (((__X) | (__Y)) << (64UL - PAGE_SHIFT)) == 0UL)
+
+unsigned long prepare_sg(struct scatterlist *sg, int nents);
diff --git a/arch/sparc64/kernel/ioport.c b/arch/sparc64/kernel/ioport.c
deleted file mode 100644
index 84b097d3f..000000000
--- a/arch/sparc64/kernel/ioport.c
+++ /dev/null
@@ -1,107 +0,0 @@
-/* $Id: ioport.c,v 1.14 1998/05/11 06:23:36 davem Exp $
- * ioport.c: Simple io mapping allocator.
- *
- * Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
- */
-
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/ioport.h>
-#include <linux/mm.h>
-
-#include <asm/io.h>
-#include <asm/vaddrs.h>
-#include <asm/oplib.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-
-/* This points to the next to use virtual memory for io mappings */
-static unsigned long dvma_next_free = DVMA_VADDR;
-
-extern void mmu_map_dma_area(unsigned long addr, int len, __u32 *dvma_addr);
-
-/*
- * sparc_alloc_io:
- * Map and allocates an obio device.
- * Implements a simple linear allocator, you can force the function
- * to use your own mapping, but in practice this should not be used.
- *
- * Input:
- * address: Physical address to map
- * virtual: if non zero, specifies a fixed virtual address where
- * the mapping should take place, not supported on Ultra
- * and this feature is scheduled to be removed as nobody
- * uses it. -DaveM
- * len: the length of the mapping
- * bus_type: Optional high word of physical address.
- *
- * Returns:
- * The virtual address where the mapping actually took place.
- */
-
-void *sparc_alloc_io (u32 address, void *virtual, int len, char *name,
- u32 bus_type, int rdonly)
-{
- unsigned long addr = ((unsigned long)address) + (((unsigned long)bus_type)<<32);
- unsigned long vaddr = (unsigned long) __va(addr);
-
- if(virtual)
- panic("sparc_alloc_io: Fixed virtual mappings unsupported on Ultra.");
-
- if(!check_region(vaddr, len))
- request_region(vaddr, len, name);
-
- return (void *) vaddr;
-}
-
-void sparc_free_io (void *virtual, int len)
-{
- unsigned long vaddr = (unsigned long) virtual & PAGE_MASK;
- unsigned long plen = (((unsigned long)virtual & ~PAGE_MASK) +
- len + PAGE_SIZE-1) & PAGE_MASK;
- release_region(vaddr, plen);
-}
-
-/* Does DVMA allocations with PAGE_SIZE granularity. How this basically
- * works is that the ESP chip can do DVMA transfers at ANY address with
- * certain size and boundary restrictions. But other devices that are
- * attached to it and would like to do DVMA have to set things up in
- * a special way, if the DVMA sees a device attached to it transfer data
- * at addresses above DVMA_VADDR it will grab them, this way it does not
- * now have to know the peculiarities of where to read the Lance data
- * from. (for example)
- *
- * Returns CPU visible address for the buffer returned, dvma_addr is
- * set to the DVMA visible address.
- */
-void *sparc_dvma_malloc (int len, char *name, __u32 *dvma_addr)
-{
- unsigned long vaddr, base_address;
-
- vaddr = dvma_next_free;
- if(check_region (vaddr, len)) {
- prom_printf("alloc_dma: 0x%lx is already in use\n", vaddr);
- prom_halt();
- }
- if(vaddr + len > (DVMA_VADDR + DVMA_LEN)) {
- prom_printf("alloc_dvma: out of dvma memory\n");
- prom_halt();
- }
-
- /* Basically these can be mapped just like any old
- * IO pages, cacheable bit off, etc. The physical
- * pages are now mapped dynamically to save space.
- */
- base_address = vaddr;
- mmu_map_dma_area(base_address, len, dvma_addr);
-
- /* Assign the memory area. */
- dvma_next_free = PAGE_ALIGN(dvma_next_free+len);
-
- request_region(base_address, len, name);
-
- return (void *) base_address;
-}
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 598cece4e..b70d936c3 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -1,4 +1,4 @@
-/* $Id: irq.c,v 1.78 1999/08/31 06:54:54 davem Exp $
+/* $Id: irq.c,v 1.80 1999/12/06 03:14:48 davem Exp $
* irq.c: UltraSparc IRQ handling/init/registry.
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
@@ -106,89 +106,16 @@ int get_irq_list(char *buf)
return len;
}
-/* SBUS SYSIO INO number to Sparc PIL level. */
-unsigned char sysio_ino_to_pil[] = {
- 0, 1, 2, 7, 5, 7, 8, 9, /* SBUS slot 0 */
- 0, 1, 2, 7, 5, 7, 8, 9, /* SBUS slot 1 */
- 0, 1, 2, 7, 5, 7, 8, 9, /* SBUS slot 2 */
- 0, 1, 2, 7, 5, 7, 8, 9, /* SBUS slot 3 */
- 3, /* Onboard SCSI */
- 5, /* Onboard Ethernet */
-/*XXX*/ 8, /* Onboard BPP */
- 0, /* Bogon */
- 13, /* Audio */
-/*XXX*/15, /* PowerFail */
- 0, /* Bogon */
- 0, /* Bogon */
- 12, /* Zilog Serial Channels (incl. Keyboard/Mouse lines) */
- 11, /* Floppy */
- 0, /* Spare Hardware (bogon for now) */
- 0, /* Keyboard (bogon for now) */
- 0, /* Mouse (bogon for now) */
- 0, /* Serial (bogon for now) */
- 0, 0, /* Bogon, Bogon */
- 10, /* Timer 0 */
- 11, /* Timer 1 */
- 0, 0, /* Bogon, Bogon */
- 15, /* Uncorrectable SBUS Error */
- 15, /* Correctable SBUS Error */
- 15, /* SBUS Error */
-/*XXX*/ 0, /* Power Management (bogon for now) */
-};
-
-/* INO number to IMAP register offset for SYSIO external IRQ's.
- * This should conform to both Sunfire/Wildfire server and Fusion
- * desktop designs.
- */
-#define offset(x) ((unsigned long)(&(((struct sysio_regs *)0)->x)))
-#define bogon ((unsigned long) -1)
-static unsigned long sysio_irq_offsets[] = {
-/* SBUS Slot 0 --> 3, level 1 --> 7 */
-offset(imap_slot0),offset(imap_slot0),offset(imap_slot0),offset(imap_slot0),
-offset(imap_slot0),offset(imap_slot0),offset(imap_slot0),offset(imap_slot0),
-offset(imap_slot1),offset(imap_slot1),offset(imap_slot1),offset(imap_slot1),
-offset(imap_slot1),offset(imap_slot1),offset(imap_slot1),offset(imap_slot1),
-offset(imap_slot2),offset(imap_slot2),offset(imap_slot2),offset(imap_slot2),
-offset(imap_slot2),offset(imap_slot2),offset(imap_slot2),offset(imap_slot2),
-offset(imap_slot3),offset(imap_slot3),offset(imap_slot3),offset(imap_slot3),
-offset(imap_slot3),offset(imap_slot3),offset(imap_slot3),offset(imap_slot3),
-/* Onboard devices (not relevant/used on SunFire). */
-offset(imap_scsi), offset(imap_eth), offset(imap_bpp), bogon,
-offset(imap_audio), offset(imap_pfail), bogon, bogon,
-offset(imap_kms), offset(imap_flpy), offset(imap_shw),
-offset(imap_kbd), offset(imap_ms), offset(imap_ser), bogon, bogon,
-offset(imap_tim0), offset(imap_tim1), bogon, bogon,
-offset(imap_ue), offset(imap_ce), offset(imap_sberr),
-offset(imap_pmgmt),
-};
-
-#undef bogon
-
-#define NUM_SYSIO_OFFSETS (sizeof(sysio_irq_offsets) / sizeof(sysio_irq_offsets[0]))
-
-/* Convert Interrupt Mapping register pointer to assosciated
- * Interrupt Clear register pointer, SYSIO specific version.
- */
-static volatile unsigned int *sysio_imap_to_iclr(volatile unsigned int *imap)
-{
- unsigned long diff;
-
- diff = offset(iclr_unused0) - offset(imap_slot0);
- return (volatile unsigned int *) (((unsigned long)imap) + diff);
-}
-
-#undef offset
-
/* Now these are always passed a true fully specified sun4u INO. */
void enable_irq(unsigned int irq)
{
extern int this_is_starfire;
struct ino_bucket *bucket = __bucket(irq);
- volatile unsigned int *imap;
+ unsigned long imap;
unsigned long tid;
imap = bucket->imap;
- if (!imap)
+ if (imap == 0UL)
return;
if(this_is_starfire == 0) {
@@ -198,7 +125,7 @@ void enable_irq(unsigned int irq)
: "i" (ASI_UPA_CONFIG));
tid = ((tid & UPA_CONFIG_MID) << 9);
} else {
- extern unsigned int starfire_translate(volatile unsigned int *imap,
+ extern unsigned int starfire_translate(unsigned long imap,
unsigned int upaid);
tid = (starfire_translate(imap, current->processor) << 26);
@@ -208,27 +135,31 @@ void enable_irq(unsigned int irq)
* of this SYSIO's preconfigured IGN in the SYSIO Control
* Register, the hardware just mirrors that value here.
* However for Graphics and UPA Slave devices the full
- * SYSIO_IMAP_INR field can be set by the programmer here.
+ * IMAP_INR field can be set by the programmer here.
*
* Things like FFB can now be handled via the new IRQ mechanism.
*/
- *imap = SYSIO_IMAP_VALID | (tid & SYSIO_IMAP_TID);
+ upa_writel(IMAP_VALID | (tid & IMAP_TID), imap);
}
/* This now gets passed true ino's as well. */
void disable_irq(unsigned int irq)
{
struct ino_bucket *bucket = __bucket(irq);
- volatile unsigned int *imap;
+ unsigned long imap;
imap = bucket->imap;
- if (imap != NULL) {
+ if (imap != 0UL) {
+ u32 tmp;
+
/* NOTE: We do not want to futz with the IRQ clear registers
* and move the state to IDLE, the SCSI code does call
* disable_irq() to assure atomicity in the queue cmd
* SCSI adapter driver code. Thus we'd lose interrupts.
*/
- *imap &= ~(SYSIO_IMAP_VALID);
+ tmp = upa_readl(imap);
+ tmp &= ~IMAP_VALID;
+ upa_writel(tmp, imap);
}
}
@@ -243,18 +174,18 @@ static struct ino_bucket pil0_dummy_bucket = {
0, /* flags */
0, /* __unused */
NULL, /* irq_info */
- NULL, /* iclr */
- NULL, /* imap */
+ 0UL, /* iclr */
+ 0UL, /* imap */
};
-unsigned int build_irq(int pil, int inofixup, volatile unsigned int *iclr, volatile unsigned int *imap)
+unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap)
{
struct ino_bucket *bucket;
int ino;
if(pil == 0) {
- if(iclr != NULL || imap != NULL) {
- prom_printf("Invalid dummy bucket for PIL0 (%p:%p)\n",
+ if(iclr != 0UL || imap != 0UL) {
+ prom_printf("Invalid dummy bucket for PIL0 (%lx:%lx)\n",
iclr, imap);
prom_halt();
}
@@ -262,13 +193,13 @@ unsigned int build_irq(int pil, int inofixup, volatile unsigned int *iclr, volat
}
/* RULE: Both must be specified in all other cases. */
- if (iclr == NULL || imap == NULL) {
+ if (iclr == 0UL || imap == 0UL) {
prom_printf("Invalid build_irq %d %d %016lx %016lx\n",
pil, inofixup, iclr, imap);
prom_halt();
}
- ino = (*imap & (SYSIO_IMAP_IGN | SYSIO_IMAP_INO)) + inofixup;
+ ino = (upa_readl(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
if(ino > NUM_IVECS) {
prom_printf("Invalid INO %04x (%d:%d:%016lx:%016lx)\n",
ino, pil, inofixup, iclr, imap);
@@ -300,64 +231,6 @@ unsigned int build_irq(int pil, int inofixup, volatile unsigned int *iclr, volat
return __irq(bucket);
}
-unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
-{
- struct linux_sbus *sbus = (struct linux_sbus *)buscookie;
- struct sysio_regs *sregs = sbus->iommu->sysio_regs;
- unsigned long offset;
- int pil;
- volatile unsigned int *imap, *iclr;
- int sbus_level = 0;
-
- pil = sysio_ino_to_pil[ino];
- if(!pil) {
- printk("sbus_irq_build: Bad SYSIO INO[%x]\n", ino);
- panic("Bad SYSIO IRQ translations...");
- }
- offset = sysio_irq_offsets[ino];
- if(offset == ((unsigned long)-1)) {
- printk("get_irq_translations: Bad SYSIO INO[%x] cpu[%d]\n",
- ino, pil);
- panic("BAD SYSIO IRQ offset...");
- }
- offset += ((unsigned long)sregs);
- imap = ((volatile unsigned int *)offset);
-
- /* SYSIO inconsistancy. For external SLOTS, we have to select
- * the right ICLR register based upon the lower SBUS irq level
- * bits.
- */
- if(ino >= 0x20) {
- iclr = sysio_imap_to_iclr(imap);
- } else {
- unsigned long iclraddr;
- int sbus_slot = (ino & 0x18)>>3;
-
- sbus_level = ino & 0x7;
-
- switch(sbus_slot) {
- case 0:
- iclr = &sregs->iclr_slot0;
- break;
- case 1:
- iclr = &sregs->iclr_slot1;
- break;
- case 2:
- iclr = &sregs->iclr_slot2;
- break;
- default:
- case 3:
- iclr = &sregs->iclr_slot3;
- break;
- };
-
- iclraddr = (unsigned long) iclr;
- iclraddr += ((sbus_level - 1) * 8);
- iclr = (volatile unsigned int *) iclraddr;
- }
- return build_irq(pil, sbus_level, iclr, imap);
-}
-
static void atomic_bucket_insert(struct ino_bucket *bucket)
{
unsigned long pstate;
@@ -602,7 +475,7 @@ void free_irq(unsigned int irq, void *dev_id)
*(bucket->pil + irq_action) = action->next;
if(action->flags & SA_IMAP_MASKED) {
- volatile unsigned int *imap = bucket->imap;
+ unsigned long imap = bucket->imap;
void **vector, *orig;
int ent;
@@ -696,10 +569,10 @@ static void show(char * str)
int cpu = smp_processor_id();
printk("\n%s, CPU %d:\n", str, cpu);
- printk("irq: %d [%ld %ld]\n",
+ printk("irq: %d [%u %u]\n",
atomic_read(&global_irq_count),
cpu_data[0].irq_count, cpu_data[1].irq_count);
- printk("bh: %d [%ld %ld]\n",
+ printk("bh: %d [%u %u]\n",
(spin_is_locked(&global_bh_count) ? 1 : 0),
cpu_data[0].bh_count, cpu_data[1].bh_count);
}
@@ -947,10 +820,10 @@ void handler_irq(int irq, struct pt_regs *regs)
if (should_forward != 0) {
/* Push it to our buddy. */
should_forward = 0;
- *(bp->imap) = (buddy | SYSIO_IMAP_VALID);
+ upa_writel(buddy | IMAP_VALID, bp->imap);
}
#endif
- *(bp->iclr) = SYSIO_ICLR_IDLE;
+ upa_writel(ICLR_IDLE, bp->iclr);
}
} else
bp->pending = 1;
@@ -974,7 +847,7 @@ void sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs)
bucket = (struct ino_bucket *)action->mask;
floppy_interrupt(irq, dev_cookie, regs);
- *(bucket->iclr) = SYSIO_ICLR_IDLE;
+ upa_writel(ICLR_IDLE, bucket->iclr);
irq_exit(cpu, irq);
}
@@ -1116,7 +989,7 @@ void init_timers(void (*cfunc)(int, void *, struct pt_regs *),
#endif
/* Register IRQ handler. */
- err = request_irq(build_irq(0, 0, NULL, NULL), cfunc, (SA_INTERRUPT | SA_STATIC_ALLOC),
+ err = request_irq(build_irq(0, 0, 0UL, 0UL), cfunc, (SA_INTERRUPT | SA_STATIC_ALLOC),
"timer", NULL);
if(err) {
@@ -1157,7 +1030,7 @@ static int retarget_one_irq(struct irqaction *p, int goal_cpu)
{
extern int this_is_starfire;
struct ino_bucket *bucket = __bucket(p->mask);
- volatile unsigned int *imap = bucket->imap;
+ unsigned long imap = bucket->imap;
unsigned int tid;
/* Never change this, it causes problems on Ex000 systems. */
@@ -1167,12 +1040,12 @@ static int retarget_one_irq(struct irqaction *p, int goal_cpu)
if(this_is_starfire == 0) {
tid = __cpu_logical_map[goal_cpu] << 26;
} else {
- extern unsigned int starfire_translate(volatile unsigned int *imap,
+ extern unsigned int starfire_translate(unsigned long imap,
unsigned int upaid);
tid = (starfire_translate(imap, __cpu_logical_map[goal_cpu]) << 26);
}
- *imap = SYSIO_IMAP_VALID | (tid & SYSIO_IMAP_TID);
+ upa_writel(IMAP_VALID | (tid & IMAP_TID), imap);
goal_cpu++;
if(goal_cpu >= NR_CPUS ||
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
index c9e3681dd..c102a6205 100644
--- a/arch/sparc64/kernel/pci.c
+++ b/arch/sparc64/kernel/pci.c
@@ -1,4 +1,4 @@
-/* $Id: pci.c,v 1.6 1999/09/08 03:40:41 davem Exp $
+/* $Id: pci.c,v 1.13 2000/01/06 23:51:49 davem Exp $
* pci.c: UltraSparc PCI controller support.
*
* Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com)
@@ -20,8 +20,11 @@
#include <asm/irq.h>
#include <asm/ebus.h>
+#ifndef NEW_PCI_DMA_MAP
unsigned long pci_dvma_v2p_hash[PCI_DVMA_HASHSZ];
unsigned long pci_dvma_p2v_hash[PCI_DVMA_HASHSZ];
+#endif
+
unsigned long pci_memspace_mask = 0xffffffffUL;
#ifndef CONFIG_PCI
@@ -158,25 +161,19 @@ static void pci_scan_each_controller_bus(void)
*/
static void __init pci_reorder_devs(void)
{
- struct pci_dev **pci_onboard = &pci_devices;
- struct pci_dev **pci_tail = &pci_devices;
- struct pci_dev *pdev = pci_devices, *pci_other = NULL;
+ struct list_head *pci_onboard = &pci_devices;
+ struct list_head *walk = pci_onboard->next;
+
+ while (walk != pci_onboard) {
+ struct pci_dev *pdev = pci_dev_g(walk);
+ struct list_head *walk_next = walk->next;
- while (pdev) {
if (pdev->irq && (__irq_ino(pdev->irq) & 0x20)) {
- if (pci_other) {
- *pci_onboard = pdev;
- pci_onboard = &pdev->next;
- pdev = pdev->next;
- *pci_onboard = pci_other;
- *pci_tail = pdev;
- continue;
- } else
- pci_onboard = &pdev->next;
- } else if (!pci_other)
- pci_other = pdev;
- pci_tail = &pdev->next;
- pdev = pdev->next;
+ list_del(walk);
+ list_add(walk, pci_onboard);
+ }
+
+ walk = walk_next;
}
}
@@ -202,6 +199,35 @@ void pcibios_fixup_bus(struct pci_bus *pbus)
{
}
+void pcibios_update_resource(struct pci_dev *pdev, struct resource *res1,
+ struct resource *res2, int index)
+{
+}
+
+void pcibios_update_irq(struct pci_dev *pdev, int irq)
+{
+}
+
+unsigned long resource_fixup(struct pci_dev *pdev, struct resource *res,
+ unsigned long start, unsigned long size)
+{
+ return start;
+}
+
+void pcibios_fixup_pbus_ranges(struct pci_bus *pbus,
+ struct pbus_set_ranges_data *pranges)
+{
+}
+
+void pcibios_align_resource(void *data, struct resource *res, unsigned long size)
+{
+}
+
+int pci_assign_resource(struct pci_dev *dev, int i)
+{
+ return -ENOSYS; /* :-)... actually implement this soon */
+}
+
char * __init pcibios_setup(char *str)
{
if (!strcmp(str, "onboardfirst")) {
diff --git a/arch/sparc64/kernel/pci_common.c b/arch/sparc64/kernel/pci_common.c
index a3600df9c..154ee0181 100644
--- a/arch/sparc64/kernel/pci_common.c
+++ b/arch/sparc64/kernel/pci_common.c
@@ -1,4 +1,4 @@
-/* $Id: pci_common.c,v 1.3 1999/09/04 22:26:32 ecd Exp $
+/* $Id: pci_common.c,v 1.6 2000/01/06 23:51:49 davem Exp $
* pci_common.c: PCI controller common support.
*
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
@@ -59,29 +59,8 @@ static int __init find_device_prom_node(struct pci_pbm_info *pbm,
*/
static void pci_device_delete(struct pci_dev *pdev)
{
- struct pci_dev **dpp;
-
- /* First, unlink from list of all devices. */
- dpp = &pci_devices;
- while (*dpp != NULL) {
- if (*dpp == pdev) {
- *dpp = pdev->next;
- pdev->next = NULL;
- break;
- }
- dpp = &(*dpp)->next;
- }
-
- /* Next, unlink from bus sibling chain. */
- dpp = &pdev->bus->devices;
- while (*dpp != NULL) {
- if (*dpp == pdev) {
- *dpp = pdev->sibling;
- pdev->sibling = NULL;
- break;
- }
- dpp = &(*dpp)->sibling;
- }
+ list_del(&pdev->global_list);
+ list_del(&pdev->bus_list);
/* Ok, all references are gone, free it up. */
kfree(pdev);
@@ -175,23 +154,31 @@ void __init pci_fill_in_pbm_cookies(struct pci_bus *pbus,
struct pci_pbm_info *pbm,
int prom_node)
{
- struct pci_dev *pdev;
+ struct list_head *walk = &pbus->devices;
/* This loop is coded like this because the cookie
* fillin routine can delete devices from the tree.
*/
- pdev = pbus->devices;
- while (pdev != NULL) {
- struct pci_dev *next = pdev->sibling;
+ walk = walk->next;
+ while (walk != &pbus->devices) {
+ struct pci_dev *pdev = pci_dev_b(walk);
+ struct list_head *walk_next = walk->next;
pdev_cookie_fillin(pbm, pdev, prom_node);
- pdev = next;
+ walk = walk_next;
}
- for (pbus = pbus->children; pbus; pbus = pbus->next) {
- struct pcidev_cookie *pcp = pbus->self->sysdata;
- pci_fill_in_pbm_cookies(pbus, pbm, pcp->prom_node);
+ walk = &pbus->children;
+ walk = walk->next;
+ while (walk != &pbus->children) {
+ struct pci_bus *this_pbus = pci_bus_b(walk);
+ struct pcidev_cookie *pcp = this_pbus->self->sysdata;
+ struct list_head *walk_next = walk->next;
+
+ pci_fill_in_pbm_cookies(this_pbus, pbm, pcp->prom_node);
+
+ walk = walk_next;
}
}
@@ -315,13 +302,14 @@ static void __init pdev_record_assignments(struct pci_pbm_info *pbm,
void __init pci_record_assignments(struct pci_pbm_info *pbm,
struct pci_bus *pbus)
{
- struct pci_dev *pdev;
+ struct list_head *walk = &pbus->devices;
- for (pdev = pbus->devices; pdev; pdev = pdev->sibling)
- pdev_record_assignments(pbm, pdev);
+ for (walk = walk->next; walk != &pbus->devices; walk = walk->next)
+ pdev_record_assignments(pbm, pci_dev_b(walk));
- for (pbus = pbus->children; pbus; pbus = pbus->next)
- pci_record_assignments(pbm, pbus);
+ walk = &pbus->children;
+ for (walk = walk->next; walk != &pbus->children; walk = walk->next)
+ pci_record_assignments(pbm, pci_bus_b(walk));
}
static void __init pdev_assign_unassigned(struct pci_pbm_info *pbm,
@@ -362,7 +350,7 @@ static void __init pdev_assign_unassigned(struct pci_pbm_info *pbm,
size = res->end - res->start;
align = size + 1;
- if (allocate_resource(root, res, size + 1, min, max, align) < 0) {
+ if (allocate_resource(root, res, size + 1, min, max, align, NULL, NULL) < 0) {
/* uh oh */
prom_printf("PCI: Failed to allocate resource %d for %s\n",
i, pdev->name);
@@ -415,13 +403,14 @@ static void __init pdev_assign_unassigned(struct pci_pbm_info *pbm,
void __init pci_assign_unassigned(struct pci_pbm_info *pbm,
struct pci_bus *pbus)
{
- struct pci_dev *pdev;
+ struct list_head *walk = &pbus->devices;
- for (pdev = pbus->devices; pdev; pdev = pdev->sibling)
- pdev_assign_unassigned(pbm, pdev);
+ for (walk = walk->next; walk != &pbus->devices; walk = walk->next)
+ pdev_assign_unassigned(pbm, pci_dev_b(walk));
- for (pbus = pbus->children; pbus; pbus = pbus->next)
- pci_assign_unassigned(pbm, pbus);
+ walk = &pbus->children;
+ for (walk = walk->next; walk != &pbus->children; walk = walk->next)
+ pci_assign_unassigned(pbm, pci_bus_b(walk));
}
static int __init pci_intmap_match(struct pci_dev *pdev, unsigned int *interrupt)
@@ -566,13 +555,14 @@ have_irq:
void __init pci_fixup_irq(struct pci_pbm_info *pbm,
struct pci_bus *pbus)
{
- struct pci_dev *pdev;
+ struct list_head *walk = &pbus->devices;
- for (pdev = pbus->devices; pdev; pdev = pdev->sibling)
- pdev_fixup_irq(pdev);
+ for (walk = walk->next; walk != &pbus->devices; walk = walk->next)
+ pdev_fixup_irq(pci_dev_b(walk));
- for (pbus = pbus->children; pbus; pbus = pbus->next)
- pci_fixup_irq(pbm, pbus);
+ walk = &pbus->children;
+ for (walk = walk->next; walk != &pbus->children; walk = walk->next)
+ pci_fixup_irq(pbm, pci_bus_b(walk));
}
/* Generic helper routines for PCI error reporting. */
@@ -580,9 +570,10 @@ void pci_scan_for_target_abort(struct pci_controller_info *p,
struct pci_pbm_info *pbm,
struct pci_bus *pbus)
{
- struct pci_dev *pdev;
+ struct list_head *walk = &pbus->devices;
- for (pdev = pbus->devices; pdev; pdev = pdev->sibling) {
+ for (walk = walk->next; walk != &pbus->devices; walk = walk->next) {
+ struct pci_dev *pdev = pci_dev_b(walk);
u16 status, error_bits;
pci_read_config_word(pdev, PCI_STATUS, &status);
@@ -597,17 +588,19 @@ void pci_scan_for_target_abort(struct pci_controller_info *p,
}
}
- for (pbus = pbus->children; pbus; pbus = pbus->next)
- pci_scan_for_target_abort(p, pbm, pbus);
+ walk = &pbus->children;
+ for (walk = walk->next; walk != &pbus->children; walk = walk->next)
+ pci_scan_for_target_abort(p, pbm, pci_bus_b(walk));
}
void pci_scan_for_master_abort(struct pci_controller_info *p,
struct pci_pbm_info *pbm,
struct pci_bus *pbus)
{
- struct pci_dev *pdev;
+ struct list_head *walk = &pbus->devices;
- for (pdev = pbus->devices; pdev; pdev = pdev->sibling) {
+ for (walk = walk->next; walk != &pbus->devices; walk = walk->next) {
+ struct pci_dev *pdev = pci_dev_b(walk);
u16 status, error_bits;
pci_read_config_word(pdev, PCI_STATUS, &status);
@@ -621,17 +614,19 @@ void pci_scan_for_master_abort(struct pci_controller_info *p,
}
}
- for (pbus = pbus->children; pbus; pbus = pbus->next)
- pci_scan_for_master_abort(p, pbm, pbus);
+ walk = &pbus->children;
+ for (walk = walk->next; walk != &pbus->children; walk = walk->next)
+ pci_scan_for_master_abort(p, pbm, pci_bus_b(walk));
}
void pci_scan_for_parity_error(struct pci_controller_info *p,
struct pci_pbm_info *pbm,
struct pci_bus *pbus)
{
- struct pci_dev *pdev;
+ struct list_head *walk = &pbus->devices;
- for (pdev = pbus->devices; pdev; pdev = pdev->sibling) {
+ for (walk = walk->next; walk != &pbus->devices; walk = walk->next) {
+ struct pci_dev *pdev = pci_dev_b(walk);
u16 status, error_bits;
pci_read_config_word(pdev, PCI_STATUS, &status);
@@ -646,6 +641,7 @@ void pci_scan_for_parity_error(struct pci_controller_info *p,
}
}
- for (pbus = pbus->children; pbus; pbus = pbus->next)
- pci_scan_for_parity_error(p, pbm, pbus);
+ walk = &pbus->children;
+ for (walk = walk->next; walk != &pbus->children; walk = walk->next)
+ pci_scan_for_parity_error(p, pbm, pci_bus_b(walk));
}
diff --git a/arch/sparc64/kernel/pci_impl.h b/arch/sparc64/kernel/pci_impl.h
index 24ed0319b..12b6af71e 100644
--- a/arch/sparc64/kernel/pci_impl.h
+++ b/arch/sparc64/kernel/pci_impl.h
@@ -1,4 +1,4 @@
-/* $Id: pci_impl.h,v 1.3 1999/09/10 10:40:44 davem Exp $
+/* $Id: pci_impl.h,v 1.4 1999/12/17 12:32:03 jj Exp $
* pci_impl.h: Helper definitions for PCI controller support.
*
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
@@ -34,6 +34,7 @@ extern void pci_scan_for_target_abort(struct pci_controller_info *, struct pci_p
extern void pci_scan_for_master_abort(struct pci_controller_info *, struct pci_pbm_info *, struct pci_bus *);
extern void pci_scan_for_parity_error(struct pci_controller_info *, struct pci_pbm_info *, struct pci_bus *);
+#ifndef NEW_PCI_DMA_MAP
/* IOMMU/DVMA initialization. */
#define PCI_DVMA_HASH_NONE ~0UL
static __inline__ void set_dvma_hash(unsigned long dvma_offset,
@@ -46,6 +47,7 @@ static __inline__ void set_dvma_hash(unsigned long dvma_offset,
pci_dvma_v2p_hash[pci_dvma_ahashfn(paddr)] = dvma_addr - vaddr;
pci_dvma_p2v_hash[pci_dvma_ahashfn(dvma_addr)] = vaddr - dvma_addr;
}
+#endif
/* Configuration space access. */
extern spinlock_t pci_poke_lock;
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c
index a7f469ec8..12f7211d1 100644
--- a/arch/sparc64/kernel/pci_iommu.c
+++ b/arch/sparc64/kernel/pci_iommu.c
@@ -1,12 +1,17 @@
-/* $Id: pci_iommu.c,v 1.1 1999/08/30 10:00:47 davem Exp $
+/* $Id: pci_iommu.c,v 1.7 1999/12/20 14:08:15 jj Exp $
* pci_iommu.c: UltraSparc PCI controller IOM/STC support.
*
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ * Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com)
*/
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+
#include <asm/pbm.h>
-#include <asm/iommu.h>
-#include <asm/scatterlist.h>
+
+#include "iommu_common.h"
#define PCI_STC_CTXMATCH_ADDR(STC, CTX) \
((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
@@ -29,27 +34,67 @@
: "r" (__val), "r" (__reg), \
"i" (ASI_PHYS_BYPASS_EC_E))
-/* Find a range of iommu mappings of size NPAGES in page
- * table PGT. Return pointer to first iopte.
- */
-static iopte_t *iommu_find_range(unsigned long npages, iopte_t *pgt, int pgt_size)
+static iopte_t *alloc_streaming_cluster(struct pci_iommu *iommu, unsigned long npages)
{
- int i;
+ iopte_t *iopte;
+ unsigned long cnum, ent;
+
+ cnum = 0;
+ while ((1UL << cnum) < npages)
+ cnum++;
+ iopte = iommu->page_table + (cnum << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS));
+ iopte += ((ent = iommu->lowest_free[cnum]) << cnum);
+
+ if (iopte_val(iopte[(1UL << cnum)]) == 0UL) {
+ /* Fast path. */
+ iommu->lowest_free[cnum] = ent + 1;
+ } else {
+ unsigned long pte_off = 1;
- pgt_size -= npages;
- for (i = 0; i < pgt_size; i++) {
- if (!iopte_val(pgt[i]) & IOPTE_VALID) {
- int scan;
+ ent += 1;
+ do {
+ pte_off++;
+ ent++;
+ } while (iopte_val(iopte[(pte_off << cnum)]) != 0UL);
+ iommu->lowest_free[cnum] = ent;
+ }
- for (scan = 1; scan < npages; scan++) {
- if (iopte_val(pgt[i + scan]) & IOPTE_VALID) {
- i += scan;
- goto do_next;
- }
+ /* I've got your streaming cluster right here buddy boy... */
+ return iopte;
+}
+
+static inline void free_streaming_cluster(struct pci_iommu *iommu, u32 base, unsigned long npages)
+{
+ unsigned long cnum, ent;
+
+ cnum = 0;
+ while ((1UL << cnum) < npages)
+ cnum++;
+ ent = (base << (32 - PAGE_SHIFT + PBM_LOGCLUSTERS - iommu->page_table_sz_bits))
+ >> (32 + PBM_LOGCLUSTERS + cnum - iommu->page_table_sz_bits);
+ if (ent < iommu->lowest_free[cnum])
+ iommu->lowest_free[cnum] = ent;
+}
+
+/* We allocate consistant mappings from the end of cluster zero. */
+static iopte_t *alloc_consistant_cluster(struct pci_iommu *iommu, unsigned long npages)
+{
+ iopte_t *iopte;
+
+ iopte = iommu->page_table + (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS));
+ while (iopte > iommu->page_table) {
+ iopte--;
+ if (!(iopte_val(*iopte) & IOPTE_VALID)) {
+ unsigned long tmp = npages;
+
+ while (--tmp) {
+ iopte--;
+ if (iopte_val(*iopte) & IOPTE_VALID)
+ break;
}
- return &pgt[i];
+ if (tmp == 0)
+ return iopte;
}
- do_next:
}
return NULL;
}
@@ -64,123 +109,168 @@ static iopte_t *iommu_find_range(unsigned long npages, iopte_t *pgt, int pgt_siz
#define IOPTE_INVALID 0UL
-/* Map kernel buffer at ADDR of size SZ using consistant mode
- * DMA for PCI device PDEV. Return 32-bit PCI DMA address.
+/* Allocate and map kernel buffer of size SIZE using consistant mode
+ * DMA for PCI device PDEV. Return non-NULL cpu-side address if
+ * successful and set *DMA_ADDRP to the PCI side dma address.
*/
-u32 pci_map_consistant(struct pci_dev *pdev, void *addr, int sz)
+void *pci_alloc_consistant(struct pci_dev *pdev, long size, u32 *dma_addrp)
{
- struct pcidev_cookie *pcp = pdev->sysdata;
- struct pci_iommu *iommu = &pcp->pbm->parent->iommu;
- iopte_t *base;
- unsigned long flags, npages, oaddr;
- u32 ret;
+ struct pcidev_cookie *pcp;
+ struct pci_iommu *iommu;
+ iopte_t *iopte;
+ unsigned long flags, order, first_page, ctx;
+ void *ret;
+ int npages;
+
+ if (size <= 0 || pdev == NULL ||
+ pdev->sysdata == NULL || dma_addrp == NULL)
+ return NULL;
+
+ size = PAGE_ALIGN(size);
+ for (order = 0; order < 10; order++) {
+ if ((PAGE_SIZE << order) >= size)
+ break;
+ }
+ if (order == 10)
+ return NULL;
+
+ first_page = __get_free_pages(GFP_ATOMIC, order);
+ if (first_page == 0UL)
+ return NULL;
+ memset((char *)first_page, 0, PAGE_SIZE << order);
+
+ pcp = pdev->sysdata;
+ iommu = &pcp->pbm->parent->iommu;
spin_lock_irqsave(&iommu->lock, flags);
- oaddr = (unsigned long)addr;
- npages = PAGE_ALIGN(oaddr + sz) - (oaddr & PAGE_MASK);
- npages >>= PAGE_SHIFT;
- base = iommu_find_range(npages,
- iommu->page_table, iommu->page_table_sz);
- ret = 0;
- if (base != NULL) {
- unsigned long i, base_paddr, ctx;
-
- ret = (iommu->page_table_map_base +
- ((base - iommu->page_table) << PAGE_SHIFT));
- ret |= (oaddr & ~PAGE_MASK);
- base_paddr = __pa(oaddr & PAGE_MASK);
- ctx = 0;
- if (iommu->iommu_has_ctx_flush)
- ctx = iommu->iommu_cur_ctx++;
- for (i = 0; i < npages; i++, base++, base_paddr += PAGE_SIZE)
- iopte_val(*base) = IOPTE_CONSISTANT(ctx, base_paddr);
+ iopte = alloc_consistant_cluster(iommu, size >> PAGE_SHIFT);
+ if (iopte == NULL) {
+ spin_unlock_irqrestore(&iommu->lock, flags);
+ free_pages(first_page, order);
+ return NULL;
}
+
+ *dma_addrp = (iommu->page_table_map_base +
+ ((iopte - iommu->page_table) << PAGE_SHIFT));
+ ret = (void *) first_page;
+ npages = size >> PAGE_SHIFT;
+ ctx = 0;
+ if (iommu->iommu_ctxflush)
+ ctx = iommu->iommu_cur_ctx++;
+ first_page = __pa(first_page);
+ while (npages--) {
+ iopte_val(*iopte) = IOPTE_CONSISTANT(ctx, first_page);
+ iopte++;
+ first_page += PAGE_SIZE;
+ }
+
+ if (iommu->iommu_ctxflush) {
+ pci_iommu_write(iommu->iommu_ctxflush, ctx);
+ } else {
+ int i;
+ u32 daddr = *dma_addrp;
+
+ npages = size >> PAGE_SHIFT;
+ for (i = 0; i < npages; i++) {
+ pci_iommu_write(iommu->iommu_flush, daddr);
+ daddr += PAGE_SIZE;
+ }
+ }
+
spin_unlock_irqrestore(&iommu->lock, flags);
return ret;
}
-/* Unmap a consistant DMA translation. */
-void pci_unmap_consistant(struct pci_dev *pdev, u32 bus_addr, int sz)
+/* Free and unmap a consistant DMA translation. */
+void pci_free_consistant(struct pci_dev *pdev, long size, void *cpu, u32 dvma)
{
- struct pcidev_cookie *pcp = pdev->sysdata;
- struct pci_iommu *iommu = &pcp->pbm->parent->iommu;
- iopte_t *base;
- unsigned long flags, npages, i, ctx;
+ struct pcidev_cookie *pcp;
+ struct pci_iommu *iommu;
+ iopte_t *iopte;
+ unsigned long flags, order, npages, i;
+
+ if (size <= 0 || pdev == NULL ||
+ pdev->sysdata == NULL || cpu == NULL)
+ return;
+
+ npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ pcp = pdev->sysdata;
+ iommu = &pcp->pbm->parent->iommu;
+ iopte = iommu->page_table +
+ ((dvma - iommu->page_table_map_base) >> PAGE_SHIFT);
spin_lock_irqsave(&iommu->lock, flags);
- npages = PAGE_ALIGN(bus_addr + sz) - (bus_addr & PAGE_MASK);
- npages >>= PAGE_SHIFT;
- base = iommu->page_table +
- ((bus_addr - iommu->page_table_map_base) >> PAGE_SHIFT);
/* Data for consistant mappings cannot enter the streaming
- * buffers, so we only need to update the TSB and flush
- * those entries from the IOMMU's TLB.
+ * buffers, so we only need to update the TSB. Flush of the
+ * IOTLB is done later when these ioptes are used for a new
+ * allocation.
*/
- /* Step 1: Clear out the TSB entries. Save away
- * the context if necessary.
- */
- ctx = 0;
- if (iommu->iommu_has_ctx_flush)
- ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
- for (i = 0; i < npages; i++, base++)
- iopte_val(*base) = IOPTE_INVALID;
-
- /* Step 2: Flush from IOMMU TLB. */
- if (iommu->iommu_has_ctx_flush) {
- pci_iommu_write(iommu->iommu_ctxflush, ctx);
- } else {
- bus_addr &= PAGE_MASK;
- for (i = 0; i < npages; i++, bus_addr += PAGE_SIZE)
- pci_iommu_write(iommu->iommu_flush, bus_addr);
- }
-
- /* Step 3: Ensure completion of previous PIO writes. */
- (void) pci_iommu_read(iommu->write_complete_reg);
+ for (i = 0; i < npages; i++, iopte++)
+ iopte_val(*iopte) = IOPTE_INVALID;
spin_unlock_irqrestore(&iommu->lock, flags);
+
+ for (order = 0; order < 10; order++) {
+ if ((PAGE_SIZE << order) >= size)
+ break;
+ }
+ if (order < 10)
+ free_pages((unsigned long)cpu, order);
}
/* Map a single buffer at PTR of SZ bytes for PCI DMA
* in streaming mode.
*/
-u32 pci_map_single(struct pci_dev *pdev, void *ptr, int sz)
+u32 pci_map_single(struct pci_dev *pdev, void *ptr, long sz)
{
struct pcidev_cookie *pcp = pdev->sysdata;
struct pci_iommu *iommu = &pcp->pbm->parent->iommu;
+ struct pci_strbuf *strbuf = &pcp->pbm->stc;
iopte_t *base;
unsigned long flags, npages, oaddr;
- u32 ret;
+ unsigned long i, base_paddr, ctx;
+ u32 bus_addr, ret;
- spin_lock_irqsave(&iommu->lock, flags);
oaddr = (unsigned long)ptr;
npages = PAGE_ALIGN(oaddr + sz) - (oaddr & PAGE_MASK);
npages >>= PAGE_SHIFT;
- base = iommu_find_range(npages,
- iommu->page_table, iommu->page_table_sz);
- ret = 0;
- if (base != NULL) {
- unsigned long i, base_paddr, ctx;
-
- ret = (iommu->page_table_map_base +
- ((base - iommu->page_table) << PAGE_SHIFT));
- ret |= (oaddr & ~PAGE_MASK);
- base_paddr = __pa(oaddr & PAGE_MASK);
- ctx = 0;
- if (iommu->iommu_has_ctx_flush)
- ctx = iommu->iommu_cur_ctx++;
+
+ spin_lock_irqsave(&iommu->lock, flags);
+
+ base = alloc_streaming_cluster(iommu, npages);
+ bus_addr = (iommu->page_table_map_base +
+ ((base - iommu->page_table) << PAGE_SHIFT));
+ ret = bus_addr | (oaddr & ~PAGE_MASK);
+ base_paddr = __pa(oaddr & PAGE_MASK);
+ ctx = 0;
+ if (iommu->iommu_ctxflush)
+ ctx = iommu->iommu_cur_ctx++;
+ if (strbuf->strbuf_enabled) {
for (i = 0; i < npages; i++, base++, base_paddr += PAGE_SIZE)
iopte_val(*base) = IOPTE_STREAMING(ctx, base_paddr);
+ } else {
+ for (i = 0; i < npages; i++, base++, base_paddr += PAGE_SIZE)
+ iopte_val(*base) = IOPTE_CONSISTANT(ctx, base_paddr);
+ }
+
+ /* Flush the IOMMU TLB. */
+ if (iommu->iommu_ctxflush) {
+ pci_iommu_write(iommu->iommu_ctxflush, ctx);
+ } else {
+ for (i = 0; i < npages; i++, bus_addr += PAGE_SIZE)
+ pci_iommu_write(iommu->iommu_flush, bus_addr);
}
+
spin_unlock_irqrestore(&iommu->lock, flags);
return ret;
}
/* Unmap a single streaming mode DMA translation. */
-void pci_unmap_single(struct pci_dev *pdev, u32 bus_addr, int sz)
+void pci_unmap_single(struct pci_dev *pdev, u32 bus_addr, long sz)
{
struct pcidev_cookie *pcp = pdev->sysdata;
struct pci_iommu *iommu = &pcp->pbm->parent->iommu;
@@ -188,25 +278,26 @@ void pci_unmap_single(struct pci_dev *pdev, u32 bus_addr, int sz)
iopte_t *base;
unsigned long flags, npages, i, ctx;
- spin_lock_irqsave(&iommu->lock, flags);
npages = PAGE_ALIGN(bus_addr + sz) - (bus_addr & PAGE_MASK);
npages >>= PAGE_SHIFT;
base = iommu->page_table +
((bus_addr - iommu->page_table_map_base) >> PAGE_SHIFT);
bus_addr &= PAGE_MASK;
- /* Step 1: Record the context, if any. */
- ctx = 0;
- if (iommu->iommu_has_ctx_flush)
- ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
+ spin_lock_irqsave(&iommu->lock, flags);
- /* Step 2: Kick data out of streaming buffers if necessary. */
+ /* Step 1: Kick data out of streaming buffers if necessary. */
if (strbuf->strbuf_enabled) {
u32 vaddr = bus_addr;
+ /* Record the context, if any. */
+ ctx = 0;
+ if (iommu->iommu_ctxflush)
+ ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
+
PCI_STC_FLUSHFLAG_INIT(strbuf);
- if (strbuf->strbuf_has_ctx_flush &&
- iommu->iommu_has_ctx_flush) {
+ if (strbuf->strbuf_ctxflush &&
+ iommu->iommu_ctxflush) {
unsigned long matchreg, flushreg;
flushreg = strbuf->strbuf_ctxflush;
@@ -225,69 +316,159 @@ void pci_unmap_single(struct pci_dev *pdev, u32 bus_addr, int sz)
membar("#LoadLoad");
}
- /* Step 3: Clear out TSB entries. */
- for (i = 0; i < npages; i++, base++)
- iopte_val(*base) = IOPTE_INVALID;
+ /* Step 2: Clear out first TSB entry. */
+ iopte_val(*base) = IOPTE_INVALID;
- /* Step 4: Flush the IOMMU TLB. */
- if (iommu->iommu_has_ctx_flush) {
- pci_iommu_write(iommu->iommu_ctxflush, ctx);
- } else {
- for (i = 0; i < npages; i++, bus_addr += PAGE_SIZE)
- pci_iommu_write(iommu->iommu_flush, bus_addr);
- }
+ free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base, npages);
- /* Step 5: Ensure completion of previous PIO writes. */
+ /* Step 3: Ensure completion of previous PIO writes. */
(void) pci_iommu_read(iommu->write_complete_reg);
spin_unlock_irqrestore(&iommu->lock, flags);
}
+static inline struct scatterlist *fill_sg(iopte_t *iopte, struct scatterlist *sg, int nents, unsigned long ctx, int streaming)
+{
+ struct scatterlist *dma_sg = sg;
+
+ do {
+ unsigned long pteval = ~0UL;
+ u32 dma_npages;
+
+ dma_npages = ((dma_sg->dvma_address & (PAGE_SIZE - 1UL)) +
+ dma_sg->dvma_length +
+ ((u32)(PAGE_SIZE - 1UL))) >> PAGE_SHIFT;
+ do {
+ unsigned long offset;
+ signed int len;
+
+ /* If we are here, we know we have at least one
+ * more page to map. So walk forward until we
+ * hit a page crossing, and begin creating new
+ * mappings from that spot.
+ */
+ for (;;) {
+ unsigned long tmp;
+
+ tmp = (unsigned long) __pa(sg->address);
+ len = sg->length;
+ if (((tmp ^ pteval) >> PAGE_SHIFT) != 0UL) {
+ pteval = tmp & PAGE_MASK;
+ offset = tmp & (PAGE_SIZE - 1UL);
+ break;
+ }
+ if (((tmp ^ (tmp + len - 1UL)) >> PAGE_SHIFT) != 0UL) {
+ pteval = (tmp + PAGE_SIZE) & PAGE_MASK;
+ offset = 0UL;
+ len -= (PAGE_SIZE - (tmp & (PAGE_SIZE - 1UL)));
+ break;
+ }
+ sg++;
+ }
+
+ if (streaming)
+ pteval = IOPTE_STREAMING(ctx, pteval);
+ else
+ pteval = IOPTE_CONSISTANT(ctx, pteval);
+ while (len > 0) {
+ *iopte++ = __iopte(pteval);
+ pteval += PAGE_SIZE;
+ len -= (PAGE_SIZE - offset);
+ offset = 0;
+ dma_npages--;
+ }
+
+ pteval = (pteval & IOPTE_PAGE) + len;
+ sg++;
+
+ /* Skip over any tail mappings we've fully mapped,
+ * adjusting pteval along the way. Stop when we
+ * detect a page crossing event.
+ */
+ while ((pteval << (64 - PAGE_SHIFT)) != 0UL &&
+ pteval == __pa(sg->address) &&
+ ((pteval ^
+ (__pa(sg->address) + sg->length - 1UL)) >> PAGE_SHIFT) == 0UL) {
+ pteval += sg->length;
+ sg++;
+ }
+ if ((pteval << (64 - PAGE_SHIFT)) == 0UL)
+ pteval = ~0UL;
+ } while (dma_npages != 0);
+ dma_sg++;
+ } while (dma_sg->dvma_length != 0);
+ return dma_sg;
+}
+
/* Map a set of buffers described by SGLIST with NELEMS array
* elements in streaming mode for PCI DMA.
+ * When making changes here, inspect the assembly output. I was having
+ * hard time to kepp this routine out of using stack slots for holding variables.
*/
-void pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems)
+int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems)
{
- struct pcidev_cookie *pcp = pdev->sysdata;
- struct pci_iommu *iommu = &pcp->pbm->parent->iommu;
- unsigned long flags, ctx, i;
+ struct pcidev_cookie *pcp;
+ struct pci_iommu *iommu;
+ struct pci_strbuf *strbuf;
+ unsigned long flags, ctx, i, npages;
+ iopte_t *base;
+ u32 dma_base;
+ struct scatterlist *sgtmp;
+ int tmp;
+
+ /* Fast path single entry scatterlists. */
+ if (nelems == 1) {
+ sglist->dvma_address = pci_map_single(pdev, sglist->address, sglist->length);
+ sglist->dvma_length = sglist->length;
+ return 1;
+ }
+
+ pcp = pdev->sysdata;
+ iommu = &pcp->pbm->parent->iommu;
+ strbuf = &pcp->pbm->stc;
+
+ /* Step 1: Prepare scatter list. */
+
+ npages = prepare_sg(sglist, nelems);
+
+ /* Step 2: Allocate a cluster. */
spin_lock_irqsave(&iommu->lock, flags);
- /* Step 1: Choose a context if necessary. */
+ base = alloc_streaming_cluster(iommu, npages);
+ dma_base = iommu->page_table_map_base + ((base - iommu->page_table) << PAGE_SHIFT);
+
+ /* Step 3: Normalize DMA addresses. */
+ tmp = nelems;
+
+ sgtmp = sglist;
+ while (tmp-- && sgtmp->dvma_length) {
+ sgtmp->dvma_address += dma_base;
+ sgtmp++;
+ }
+
+ /* Step 4: Choose a context if necessary. */
ctx = 0;
- if (iommu->iommu_has_ctx_flush)
+ if (iommu->iommu_ctxflush)
ctx = iommu->iommu_cur_ctx++;
- /* Step 2: Create the mappings. */
- for (i = 0; i < nelems; i++) {
- unsigned long oaddr, npages;
- iopte_t *base;
-
- oaddr = (unsigned long)sglist[i].address;
- npages = PAGE_ALIGN(oaddr + sglist[i].length) - (oaddr & PAGE_MASK);
- npages >>= PAGE_SHIFT;
- base = iommu_find_range(npages,
- iommu->page_table, iommu->page_table_sz);
- if (base != NULL) {
- unsigned long j, base_paddr;
- u32 dvma_addr;
-
- dvma_addr = (iommu->page_table_map_base +
- ((base - iommu->page_table) << PAGE_SHIFT));
- dvma_addr |= (oaddr & ~PAGE_MASK);
- sglist[i].dvma_address = dvma_addr;
- sglist[i].dvma_length = sglist[i].length;
- base_paddr = __pa(oaddr & PAGE_MASK);
- for (j = 0; j < npages; j++, base++, base_paddr += PAGE_SIZE)
- iopte_val(*base) = IOPTE_STREAMING(ctx, base_paddr);
- } else {
- sglist[i].dvma_address = 0;
- sglist[i].dvma_length = 0;
- }
+ /* Step 5: Create the mappings. */
+ sgtmp = fill_sg (base, sglist, nelems, ctx, strbuf->strbuf_enabled);
+#ifdef VERIFY_SG
+ verify_sglist(sglist, nelems, base, npages);
+#endif
+
+ /* Step 6: Flush the IOMMU TLB. */
+ if (iommu->iommu_ctxflush) {
+ pci_iommu_write(iommu->iommu_ctxflush, ctx);
+ } else {
+ for (i = 0; i < npages; i++, dma_base += PAGE_SIZE)
+ pci_iommu_write(iommu->iommu_flush, dma_base);
}
spin_unlock_irqrestore(&iommu->lock, flags);
+
+ return sgtmp - sglist;
}
/* Unmap a set of streaming mode DMA translations. */
@@ -296,25 +477,38 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems)
struct pcidev_cookie *pcp = pdev->sysdata;
struct pci_iommu *iommu = &pcp->pbm->parent->iommu;
struct pci_strbuf *strbuf = &pcp->pbm->stc;
- unsigned long flags, ctx, i;
+ iopte_t *base;
+ unsigned long flags, ctx, i, npages;
+ u32 bus_addr;
+
+ bus_addr = sglist->dvma_address & PAGE_MASK;
+
+ i = 0;
+ if (nelems > 1) {
+ for (; i < nelems; i++)
+ if (sglist[i].dvma_length == 0)
+ break;
+ i--;
+ }
+ npages = (PAGE_ALIGN(sglist[i].dvma_address + sglist[i].dvma_length) - bus_addr) >> PAGE_SHIFT;
+
+ base = iommu->page_table +
+ ((bus_addr - iommu->page_table_map_base) >> PAGE_SHIFT);
spin_lock_irqsave(&iommu->lock, flags);
- /* Step 1: Record the context, if any. */
- ctx = 0;
- if (iommu->iommu_has_ctx_flush) {
- iopte_t *iopte;
+ /* Step 1: Kick data out of streaming buffers if necessary. */
+ if (strbuf->strbuf_enabled) {
+ u32 vaddr = bus_addr;
- iopte = iommu->page_table +
- ((sglist[0].dvma_address - iommu->page_table_map_base) >> PAGE_SHIFT);
- ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
- }
+ /* Record the context, if any. */
+ ctx = 0;
+ if (iommu->iommu_ctxflush)
+ ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
- /* Step 2: Kick data out of streaming buffers if necessary. */
- if (strbuf->strbuf_enabled) {
PCI_STC_FLUSHFLAG_INIT(strbuf);
- if (strbuf->strbuf_has_ctx_flush &&
- iommu->iommu_has_ctx_flush) {
+ if (strbuf->strbuf_ctxflush &&
+ iommu->iommu_ctxflush) {
unsigned long matchreg, flushreg;
flushreg = strbuf->strbuf_ctxflush;
@@ -323,66 +517,22 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems)
pci_iommu_write(flushreg, ctx);
} while(((long)pci_iommu_read(matchreg)) < 0L);
} else {
- for (i = 0; i < nelems; i++) {
- unsigned long j, npages;
- u32 vaddr;
-
- j = sglist[i].dvma_length;
- if (!j)
- break;
- vaddr = sglist[i].dvma_address;
- npages = PAGE_ALIGN(vaddr + j) - (vaddr & PAGE_MASK);
- npages >>= PAGE_SHIFT;
- vaddr &= PAGE_MASK;
- for (j = 0; j < npages; j++, vaddr += PAGE_SIZE)
- pci_iommu_write(strbuf->strbuf_pflush, vaddr);
- }
-
- pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
- (void) pci_iommu_read(iommu->write_complete_reg);
- while (!PCI_STC_FLUSHFLAG_SET(strbuf))
- membar("#LoadLoad");
+ for (i = 0; i < npages; i++, vaddr += PAGE_SIZE)
+ pci_iommu_write(strbuf->strbuf_pflush, vaddr);
}
- }
-
- /* Step 3: Clear out TSB entries. */
- for (i = 0; i < nelems; i++) {
- unsigned long j, npages;
- iopte_t *base;
- u32 vaddr;
- j = sglist[i].dvma_length;
- if (!j)
- break;
- vaddr = sglist[i].dvma_address;
- npages = PAGE_ALIGN(vaddr + j) - (vaddr & PAGE_MASK);
- npages >>= PAGE_SHIFT;
- base = iommu->page_table +
- ((vaddr - iommu->page_table_map_base) >> PAGE_SHIFT);
- for (j = 0; j < npages; j++, base++)
- iopte_val(*base) = IOPTE_INVALID;
+ pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
+ (void) pci_iommu_read(iommu->write_complete_reg);
+ while (!PCI_STC_FLUSHFLAG_SET(strbuf))
+ membar("#LoadLoad");
}
- /* Step 4: Flush the IOMMU TLB. */
- if (iommu->iommu_has_ctx_flush) {
- pci_iommu_write(iommu->iommu_ctxflush, ctx);
- } else {
- for (i = 0; i < nelems; i++) {
- unsigned long j, npages;
- u32 vaddr;
+ /* Step 2: Clear out first TSB entry. */
+ iopte_val(*base) = IOPTE_INVALID;
- j = sglist[i].dvma_length;
- if (!j)
- break;
- vaddr = sglist[i].dvma_address;
- npages = PAGE_ALIGN(vaddr + j) - (vaddr & PAGE_MASK);
- npages >>= PAGE_SHIFT;
- for (j = 0; j < npages; j++, vaddr += PAGE_SIZE)
- pci_iommu_write(iommu->iommu_flush, vaddr);
- }
- }
+ free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base, npages);
- /* Step 5: Ensure completion of previous PIO writes. */
+ /* Step 3: Ensure completion of previous PIO writes. */
(void) pci_iommu_read(iommu->write_complete_reg);
spin_unlock_irqrestore(&iommu->lock, flags);
@@ -391,7 +541,7 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems)
/* Make physical memory consistant for a single
* streaming mode DMA translation after a transfer.
*/
-void pci_dma_sync_single(struct pci_dev *pdev, u32 bus_addr, int sz)
+void pci_dma_sync_single(struct pci_dev *pdev, u32 bus_addr, long sz)
{
struct pcidev_cookie *pcp = pdev->sysdata;
struct pci_iommu *iommu = &pcp->pbm->parent->iommu;
@@ -409,8 +559,8 @@ void pci_dma_sync_single(struct pci_dev *pdev, u32 bus_addr, int sz)
/* Step 1: Record the context, if any. */
ctx = 0;
- if (iommu->iommu_has_ctx_flush &&
- strbuf->strbuf_has_ctx_flush) {
+ if (iommu->iommu_ctxflush &&
+ strbuf->strbuf_ctxflush) {
iopte_t *iopte;
iopte = iommu->page_table +
@@ -420,8 +570,8 @@ void pci_dma_sync_single(struct pci_dev *pdev, u32 bus_addr, int sz)
/* Step 2: Kick data out of streaming buffers. */
PCI_STC_FLUSHFLAG_INIT(strbuf);
- if (iommu->iommu_has_ctx_flush &&
- strbuf->strbuf_has_ctx_flush) {
+ if (iommu->iommu_ctxflush &&
+ strbuf->strbuf_ctxflush) {
unsigned long matchreg, flushreg;
flushreg = strbuf->strbuf_ctxflush;
@@ -462,8 +612,8 @@ void pci_dma_sync_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelem
/* Step 1: Record the context, if any. */
ctx = 0;
- if (iommu->iommu_has_ctx_flush &&
- strbuf->strbuf_has_ctx_flush) {
+ if (iommu->iommu_ctxflush &&
+ strbuf->strbuf_ctxflush) {
iopte_t *iopte;
iopte = iommu->page_table +
@@ -473,8 +623,8 @@ void pci_dma_sync_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelem
/* Step 2: Kick data out of streaming buffers. */
PCI_STC_FLUSHFLAG_INIT(strbuf);
- if (iommu->iommu_has_ctx_flush &&
- strbuf->strbuf_has_ctx_flush) {
+ if (iommu->iommu_ctxflush &&
+ strbuf->strbuf_ctxflush) {
unsigned long matchreg, flushreg;
flushreg = strbuf->strbuf_ctxflush;
@@ -483,21 +633,21 @@ void pci_dma_sync_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelem
pci_iommu_write(flushreg, ctx);
} while (((long)pci_iommu_read(matchreg)) < 0L);
} else {
- unsigned long i;
+ unsigned long i, npages;
+ u32 bus_addr;
- for(i = 0; i < nelems; i++) {
- unsigned long bus_addr, npages, j;
+ i = 0;
+ bus_addr = sglist[0].dvma_address & PAGE_MASK;
- j = sglist[i].dvma_length;
- if (!j)
- break;
- bus_addr = sglist[i].dvma_address;
- npages = PAGE_ALIGN(bus_addr + j) - (bus_addr & PAGE_MASK);
- npages >>= PAGE_SHIFT;
- bus_addr &= PAGE_MASK;
- for(j = 0; i < npages; i++, bus_addr += PAGE_SIZE)
- pci_iommu_write(strbuf->strbuf_pflush, bus_addr);
+ if (nelems > 1) {
+ for(; i < nelems; i++)
+ if (!sglist[i].dvma_length)
+ break;
+ i--;
}
+ npages = (PAGE_ALIGN(sglist[i].dvma_address + sglist[i].dvma_length) - bus_addr) >> PAGE_SHIFT;
+ for (i = 0; i < npages; i++, bus_addr += PAGE_SIZE)
+ pci_iommu_write(strbuf->strbuf_pflush, bus_addr);
}
/* Step 3: Perform flush synchronization sequence. */
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c
index 1afe5a67b..d66086bfa 100644
--- a/arch/sparc64/kernel/pci_psycho.c
+++ b/arch/sparc64/kernel/pci_psycho.c
@@ -1,9 +1,9 @@
-/* $Id: pci_psycho.c,v 1.4 1999/09/05 09:33:36 ecd Exp $
+/* $Id: pci_psycho.c,v 1.7 1999/12/17 12:31:57 jj Exp $
* pci_psycho.c: PSYCHO/U2P specific PCI controller support.
*
* Copyright (C) 1997, 1998, 1999 David S. Miller (davem@caipfs.rutgers.edu)
* Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be)
- * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
+ * Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com)
*/
#include <linux/kernel.h>
@@ -380,7 +380,7 @@ static unsigned int __init psycho_irq_build(struct pci_controller_info *p,
unsigned int ino)
{
struct ino_bucket *bucket;
- volatile unsigned int *imap, *iclr;
+ unsigned long imap, iclr;
unsigned long imap_off, iclr_off;
int pil, inofixup = 0;
@@ -399,12 +399,12 @@ static unsigned int __init psycho_irq_build(struct pci_controller_info *p,
/* Now build the IRQ bucket. */
pil = psycho_ino_to_pil(pdev, ino);
- imap = (volatile unsigned int *)__va(p->controller_regs + imap_off);
- imap += 1;
+ imap = p->controller_regs + imap_off;
+ imap += 4;
iclr_off = psycho_iclr_offset(ino);
- iclr = (volatile unsigned int *)__va(p->controller_regs + iclr_off);
- iclr += 1;
+ iclr = p->controller_regs + iclr_off;
+ iclr += 4;
if ((ino & 0x20) == 0)
inofixup = ino & 0x03;
@@ -838,6 +838,10 @@ static void psycho_ce_intr(int irq, void *dev_id, struct pt_regs *regs)
"DMA Read" :
((error_bits & PSYCHO_CEAFSR_PDWR) ?
"DMA Write" : "???")))));
+
+ /* XXX Use syndrome and afar to print out module string just like
+ * XXX UDB CE trap handler does... -DaveM
+ */
printk("PSYCHO%d: syndrome[%02lx] bytemask[%04lx] dword_offset[%lx] "
"UPA_MID[%02lx] was_block(%d)\n",
p->index,
@@ -1213,26 +1217,28 @@ static void __init psycho_scan_bus(struct pci_controller_info *p)
psycho_register_error_handlers(p);
}
-static void __init psycho_iommu_init(struct pci_controller_info *p, int tsbsize)
+static void __init psycho_iommu_init(struct pci_controller_info *p)
{
- extern int this_is_starfire;
- extern void *starfire_hookup(int);
+#ifndef NEW_PCI_DMA_MAP
struct linux_mlist_p1275 *mlist;
- unsigned long tsbbase, i, n, order;
+ unsigned long n;
iopte_t *iopte;
+ int tsbsize = 32;
+#endif
+ extern int this_is_starfire;
+ extern void *starfire_hookup(int);
+ unsigned long tsbbase, i;
u64 control;
/* Setup initial software IOMMU state. */
spin_lock_init(&p->iommu.lock);
p->iommu.iommu_cur_ctx = 0;
- /* PSYCHO's IOMMU lacks ctx flushing. */
- p->iommu.iommu_has_ctx_flush = 0;
-
/* Register addresses. */
p->iommu.iommu_control = p->controller_regs + PSYCHO_IOMMU_CONTROL;
p->iommu.iommu_tsbbase = p->controller_regs + PSYCHO_IOMMU_TSBBASE;
p->iommu.iommu_flush = p->controller_regs + PSYCHO_IOMMU_FLUSH;
+ /* PSYCHO's IOMMU lacks ctx flushing. */
p->iommu.iommu_ctxflush = 0;
/* We use the main control register of PSYCHO as the write
@@ -1252,18 +1258,29 @@ static void __init psycho_iommu_init(struct pci_controller_info *p, int tsbsize)
control &= ~(PSYCHO_IOMMU_CTRL_DENAB);
psycho_write(p->controller_regs + PSYCHO_IOMMU_CONTROL, control);
- for(order = 0;; order++)
- if((PAGE_SIZE << order) >= ((tsbsize * 1024) * 8))
- break;
-
- tsbbase = __get_free_pages(GFP_DMA, order);
+#ifndef NEW_PCI_DMA_MAP
+ /* Using assumed page size 64K with 32K entries we need 256KB iommu page
+ * table (32K ioptes * 8 bytes per iopte). This is
+ * page order 5 on UltraSparc.
+ */
+ tsbbase = __get_free_pages(GFP_KERNEL, 5);
+#else
+ /* Using assumed page size 8K with 128K entries we need 1MB iommu page
+ * table (128K ioptes * 8 bytes per iopte). This is
+ * page order 7 on UltraSparc.
+ */
+ tsbbase = __get_free_pages(GFP_KERNEL, 7);
+#endif
if (!tsbbase) {
prom_printf("PSYCHO_IOMMU: Error, gfp(tsb) failed.\n");
prom_halt();
}
- p->iommu.page_table = iopte = (iopte_t *)tsbbase;
- p->iommu.page_table_sz = (tsbsize * 1024);
+ p->iommu.page_table = (iopte_t *)tsbbase;
+ p->iommu.page_table_sz_bits = 17;
+ p->iommu.page_table_map_base = 0xc0000000;
+#ifndef NEW_PCI_DMA_MAP
+ iopte = (iopte_t *)tsbbase;
/* Initialize to "none" settings. */
for(i = 0; i < PCI_DVMA_HASHSZ; i++) {
pci_dvma_v2p_hash[i] = PCI_DVMA_HASH_NONE;
@@ -1329,10 +1346,11 @@ out:
prom_printf("Try booting with mem=xxxM or similar\n");
prom_halt();
}
-
+#endif
psycho_write(p->controller_regs + PSYCHO_IOMMU_TSBBASE, __pa(tsbbase));
control = psycho_read(p->controller_regs + PSYCHO_IOMMU_CONTROL);
+#ifndef NEW_PCI_DMA_MAP
control &= ~(PSYCHO_IOMMU_CTRL_TSBSZ);
control |= (PSYCHO_IOMMU_CTRL_TBWSZ | PSYCHO_IOMMU_CTRL_ENAB);
switch(tsbsize) {
@@ -1353,6 +1371,10 @@ out:
prom_halt();
break;
}
+#else
+ control &= ~(PSYCHO_IOMMU_CTRL_TSBSZ | PSYCHO_IOMMU_CTRL_TBWSZ);
+ control |= (PSYCHO_IOMMU_TSBSZ_128K | PSYCHO_IOMMU_CTRL_ENAB);
+#endif
psycho_write(p->controller_regs + PSYCHO_IOMMU_CONTROL, control);
/* If necessary, hook us up for starfire IRQ translations. */
@@ -1426,9 +1448,6 @@ static void psycho_pbm_strbuf_init(struct pci_controller_info *p,
/* Currently we don't even use it. */
pbm->stc.strbuf_enabled = 0;
- /* PSYCHO's streaming buffer lacks ctx flushing. */
- pbm->stc.strbuf_has_ctx_flush = 0;
-
if (is_pbm_a) {
pbm->stc.strbuf_control = base + PSYCHO_STRBUF_CONTROL_A;
pbm->stc.strbuf_pflush = base + PSYCHO_STRBUF_FLUSH_A;
@@ -1438,6 +1457,7 @@ static void psycho_pbm_strbuf_init(struct pci_controller_info *p,
pbm->stc.strbuf_pflush = base + PSYCHO_STRBUF_FLUSH_B;
pbm->stc.strbuf_fsync = base + PSYCHO_STRBUF_FSYNC_B;
}
+ /* PSYCHO's streaming buffer lacks ctx flushing. */
pbm->stc.strbuf_ctxflush = 0;
pbm->stc.strbuf_ctxmatch_base = 0;
@@ -1599,7 +1619,7 @@ void __init psycho_init(int node)
psycho_controller_hwinit(p);
- psycho_iommu_init(p, 32);
+ psycho_iommu_init(p);
is_pbm_a = ((pr_regs[0].phys_addr & 0x6000) == 0x2000);
psycho_pbm_init(p, node, is_pbm_a);
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c
index 46a9b31cf..3788f71d3 100644
--- a/arch/sparc64/kernel/pci_sabre.c
+++ b/arch/sparc64/kernel/pci_sabre.c
@@ -1,9 +1,9 @@
-/* $Id: pci_sabre.c,v 1.2 1999/09/05 04:58:06 davem Exp $
+/* $Id: pci_sabre.c,v 1.8 2000/01/06 23:51:49 davem Exp $
* pci_sabre.c: Sabre specific PCI controller support.
*
* Copyright (C) 1997, 1998, 1999 David S. Miller (davem@caipfs.rutgers.edu)
* Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be)
- * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
+ * Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com)
*/
#include <linux/kernel.h>
@@ -65,6 +65,14 @@
#define SABRE_IOMMUCTRL_LCKEN 0x0000000000800000UL /* IOTLB lock enable */
#define SABRE_IOMMUCTRL_LCKPTR 0x0000000000780000UL /* IOTLB lock pointer */
#define SABRE_IOMMUCTRL_TSBSZ 0x0000000000070000UL /* TSB Size */
+#define SABRE_IOMMU_TSBSZ_1K 0x0000000000000000
+#define SABRE_IOMMU_TSBSZ_2K 0x0000000000010000
+#define SABRE_IOMMU_TSBSZ_4K 0x0000000000020000
+#define SABRE_IOMMU_TSBSZ_8K 0x0000000000030000
+#define SABRE_IOMMU_TSBSZ_16K 0x0000000000040000
+#define SABRE_IOMMU_TSBSZ_32K 0x0000000000050000
+#define SABRE_IOMMU_TSBSZ_64K 0x0000000000060000
+#define SABRE_IOMMU_TSBSZ_128K 0x0000000000070000
#define SABRE_IOMMUCTRL_TBWSZ 0x0000000000000004UL /* TSB assumed page size */
#define SABRE_IOMMUCTRL_DENAB 0x0000000000000002UL /* Diagnostic Mode Enable */
#define SABRE_IOMMUCTRL_ENAB 0x0000000000000001UL /* IOMMU Enable */
@@ -601,7 +609,7 @@ static unsigned int __init sabre_irq_build(struct pci_controller_info *p,
unsigned int ino)
{
struct ino_bucket *bucket;
- volatile unsigned int *imap, *iclr;
+ unsigned long imap, iclr;
unsigned long imap_off, iclr_off;
int pil, inofixup = 0;
@@ -620,12 +628,12 @@ static unsigned int __init sabre_irq_build(struct pci_controller_info *p,
/* Now build the IRQ bucket. */
pil = sabre_ino_to_pil(pdev, ino);
- imap = (volatile unsigned int *)__va(p->controller_regs + imap_off);
- imap += 1;
+ imap = p->controller_regs + imap_off;
+ imap += 4;
iclr_off = sabre_iclr_offset(ino);
- iclr = (volatile unsigned int *)__va(p->controller_regs + iclr_off);
- iclr += 1;
+ iclr = p->controller_regs + iclr_off;
+ iclr += 4;
if ((ino & 0x20) == 0)
inofixup = ino & 0x03;
@@ -717,13 +725,13 @@ static void sabre_check_iommu_error(struct pci_controller_info *p,
type_string = "Unknown";
break;
};
- printk("SABRE%d: IOMMU TAG(%d)[error(%s)wr(%d)sz(%dK)vpg(%08lx)]\n",
- p->index, i, type_string,
+ printk("SABRE%d: IOMMU TAG(%d)[RAW(%016lx)error(%s)wr(%d)sz(%dK)vpg(%08lx)]\n",
+ p->index, i, tag, type_string,
((tag & SABRE_IOMMUTAG_WRITE) ? 1 : 0),
((tag & SABRE_IOMMUTAG_SIZE) ? 64 : 8),
((tag & SABRE_IOMMUTAG_VPN) << PAGE_SHIFT));
- printk("SABRE%d: IOMMU DATA(%d)[valid(%d)used(%d)cache(%d)ppg(%016lx)\n",
- p->index, i,
+ printk("SABRE%d: IOMMU DATA(%d)[RAW(%016lx)valid(%d)used(%d)cache(%d)ppg(%016lx)\n",
+ p->index, i, data,
((data & SABRE_IOMMUDATA_VALID) ? 1 : 0),
((data & SABRE_IOMMUDATA_USED) ? 1 : 0),
((data & SABRE_IOMMUDATA_CACHE) ? 1 : 0),
@@ -814,6 +822,10 @@ static void sabre_ce_intr(int irq, void *dev_id, struct pt_regs *regs)
"DMA Read" :
((error_bits & SABRE_CEAFSR_PDWR) ?
"DMA Write" : "???")));
+
+ /* XXX Use syndrome and afar to print out module string just like
+ * XXX UDB CE trap handler does... -DaveM
+ */
printk("SABRE%d: syndrome[%02lx] bytemask[%04lx] dword_offset[%lx] "
"was_block(%d)\n",
p->index,
@@ -1020,21 +1032,15 @@ static void __init sabre_base_address_update(struct pci_dev *pdev, int resource)
static void __init apb_init(struct pci_controller_info *p, struct pci_bus *sabre_bus)
{
- struct pci_dev *pdev;
- u32 dword;
- u16 word;
-
- for(pdev = pci_devices; pdev; pdev = pdev->next) {
- if(pdev->vendor == PCI_VENDOR_ID_SUN &&
- pdev->device == PCI_DEVICE_ID_SUN_SABRE) {
- sabre_write_byte(pdev, PCI_LATENCY_TIMER, 64);
- break;
- }
- }
+ struct list_head *walk = &sabre_bus->devices;
+
+ for (walk = walk->next; walk != &sabre_bus->devices; walk = walk->next) {
+ struct pci_dev *pdev = pci_dev_b(walk);
- for (pdev = sabre_bus->devices; pdev; pdev = pdev->sibling) {
if (pdev->vendor == PCI_VENDOR_ID_SUN &&
pdev->device == PCI_DEVICE_ID_SUN_SIMBA) {
+ u16 word;
+
sabre_read_word(pdev, PCI_COMMAND, &word);
word |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY |
PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY |
@@ -1044,32 +1050,6 @@ static void __init apb_init(struct pci_controller_info *p, struct pci_bus *sabre
/* Status register bits are "write 1 to clear". */
sabre_write_word(pdev, PCI_STATUS, 0xffff);
sabre_write_word(pdev, PCI_SEC_STATUS, 0xffff);
-
- sabre_read_word(pdev, PCI_BRIDGE_CONTROL, &word);
- word = PCI_BRIDGE_CTL_MASTER_ABORT |
- PCI_BRIDGE_CTL_SERR |
- PCI_BRIDGE_CTL_PARITY;
- sabre_write_word(pdev, PCI_BRIDGE_CONTROL, word);
-
- sabre_read_dword(pdev, APB_PCI_CONTROL_HIGH, &dword);
- dword = APB_PCI_CTL_HIGH_SERR |
- APB_PCI_CTL_HIGH_ARBITER_EN;
- sabre_write_dword(pdev, APB_PCI_CONTROL_HIGH, dword);
-
- /* Systems with SIMBA are usually workstations, so
- * we configure to park to SIMBA not to the previous
- * bus owner.
- */
- sabre_read_dword(pdev, APB_PCI_CONTROL_LOW, &dword);
- dword = APB_PCI_CTL_LOW_ERRINT_EN | 0x0f;
- sabre_write_dword(pdev, APB_PCI_CONTROL_LOW, dword);
-
- /* Don't mess with the retry limit and PIO/DMA latency
- * timer settings. But do set primary and secondary
- * latency timers.
- */
- sabre_write_byte(pdev, PCI_LATENCY_TIMER, 64);
- sabre_write_byte(pdev, PCI_SEC_LATENCY_TIMER, 64);
}
}
}
@@ -1077,7 +1057,8 @@ static void __init apb_init(struct pci_controller_info *p, struct pci_bus *sabre
static void __init sabre_scan_bus(struct pci_controller_info *p)
{
static int once = 0;
- struct pci_bus *sabre_bus, *pbus;
+ struct pci_bus *sabre_bus;
+ struct list_head *walk;
/* Unlike for PSYCHO, we can only have one SABRE
* in a system. Having multiple SABREs is thus
@@ -1100,7 +1081,9 @@ static void __init sabre_scan_bus(struct pci_controller_info *p)
&p->pbm_A);
apb_init(p, sabre_bus);
- for (pbus = sabre_bus->children; pbus; pbus = pbus->next) {
+ walk = &sabre_bus->children;
+ for (walk = walk->next; walk != &sabre_bus->children; walk = walk->next) {
+ struct pci_bus *pbus = pci_bus_b(walk);
struct pci_pbm_info *pbm;
if (pbus->number == p->pbm_A.pci_first_busno) {
@@ -1124,30 +1107,49 @@ static void __init sabre_scan_bus(struct pci_controller_info *p)
static void __init sabre_iommu_init(struct pci_controller_info *p,
int tsbsize, unsigned long dvma_offset)
{
+#ifndef NEW_PCI_DMA_MAP
struct linux_mlist_p1275 *mlist;
- unsigned long tsbbase, i, n, order;
+ unsigned long n;
iopte_t *iopte;
+#endif
+ unsigned long tsbbase, i, order;
u64 control;
+ /* Setup initial software IOMMU state. */
+ spin_lock_init(&p->iommu.lock);
+ p->iommu.iommu_cur_ctx = 0;
+
+ /* Register addresses. */
+ p->iommu.iommu_control = p->controller_regs + SABRE_IOMMU_CONTROL;
+ p->iommu.iommu_tsbbase = p->controller_regs + SABRE_IOMMU_TSBBASE;
+ p->iommu.iommu_flush = p->controller_regs + SABRE_IOMMU_FLUSH;
+ p->iommu.write_complete_reg = p->controller_regs + SABRE_WRSYNC;
+ /* Sabre's IOMMU lacks ctx flushing. */
+ p->iommu.iommu_ctxflush = 0;
+
/* Invalidate TLB Entries. */
control = sabre_read(p->controller_regs + SABRE_IOMMU_CONTROL);
- control |= IOMMU_CTRL_DENAB;
+ control |= SABRE_IOMMUCTRL_DENAB;
sabre_write(p->controller_regs + SABRE_IOMMU_CONTROL, control);
for(i = 0; i < 16; i++)
sabre_write(p->controller_regs + SABRE_IOMMU_DATA + (i * 8UL), 0);
- control &= ~(IOMMU_CTRL_DENAB);
+ control &= ~(SABRE_IOMMUCTRL_DENAB);
sabre_write(p->controller_regs + SABRE_IOMMU_CONTROL, control);
for(order = 0;; order++)
if((PAGE_SIZE << order) >= ((tsbsize * 1024) * 8))
break;
- tsbbase = __get_free_pages(GFP_DMA, order);
+ tsbbase = __get_free_pages(GFP_KERNEL, order);
if (!tsbbase) {
prom_printf("SABRE_IOMMU: Error, gfp(tsb) failed.\n");
prom_halt();
}
+ p->iommu.page_table = (iopte_t *)tsbbase;
+ p->iommu.page_table_map_base = dvma_offset;
+
+#ifndef NEW_PCI_DMA_MAP
iopte = (iopte_t *)tsbbase;
/* Initialize to "none" settings. */
@@ -1216,27 +1218,47 @@ out:
prom_printf("Try booting with mem=xxxM or similar\n");
prom_halt();
}
+#endif
sabre_write(p->controller_regs + SABRE_IOMMU_TSBBASE, __pa(tsbbase));
control = sabre_read(p->controller_regs + SABRE_IOMMU_CONTROL);
- control &= ~(IOMMU_CTRL_TSBSZ);
- control |= (IOMMU_CTRL_TBWSZ | IOMMU_CTRL_ENAB);
+#ifndef NEW_PCI_DMA_MAP
+ control &= ~(SABRE_IOMMUCTRL_TSBSZ);
+ control |= (SABRE_IOMMUCTRL_TBWSZ | SABRE_IOMMUCTRL_ENAB);
switch(tsbsize) {
case 8:
- control |= IOMMU_TSBSZ_8K;
+ control |= SABRE_IOMMU_TSBSZ_8K;
break;
case 16:
- control |= IOMMU_TSBSZ_16K;
+ control |= SABRE_IOMMU_TSBSZ_16K;
break;
case 32:
- control |= IOMMU_TSBSZ_32K;
+ control |= SABRE_IOMMU_TSBSZ_32K;
+ break;
+ default:
+ prom_printf("iommu_init: Illegal TSB size %d\n", tsbsize);
+ prom_halt();
+ break;
+ }
+#else
+ control &= ~(SABRE_IOMMUCTRL_TSBSZ | SABRE_IOMMUCTRL_TBWSZ);
+ control |= SABRE_IOMMUCTRL_ENAB;
+ switch(tsbsize) {
+ case 64:
+ control |= SABRE_IOMMU_TSBSZ_64K;
+ p->iommu.page_table_sz_bits = 16;
+ break;
+ case 128:
+ control |= SABRE_IOMMU_TSBSZ_128K;
+ p->iommu.page_table_sz_bits = 17;
break;
default:
prom_printf("iommu_init: Illegal TSB size %d\n", tsbsize);
prom_halt();
break;
}
+#endif
sabre_write(p->controller_regs + SABRE_IOMMU_CONTROL, control);
}
@@ -1445,6 +1467,7 @@ void __init sabre_init(int pnode)
}
switch(vdma[1]) {
+#ifndef NEW_PCI_DMA_MAP
case 0x20000000:
tsbsize = 8;
break;
@@ -1454,6 +1477,15 @@ void __init sabre_init(int pnode)
case 0x80000000:
tsbsize = 32;
break;
+#else
+ case 0x20000000:
+ tsbsize = 64;
+ break;
+ case 0x40000000:
+ case 0x80000000:
+ tsbsize = 128;
+ break;
+#endif
default:
prom_printf("SABRE: strange virtual-dma size.\n");
prom_halt();
diff --git a/arch/sparc64/kernel/power.c b/arch/sparc64/kernel/power.c
index 831011128..e612d0200 100644
--- a/arch/sparc64/kernel/power.c
+++ b/arch/sparc64/kernel/power.c
@@ -1,4 +1,4 @@
-/* $Id: power.c,v 1.4 1999/08/31 18:22:05 davem Exp $
+/* $Id: power.c,v 1.5 1999/12/19 23:28:00 davem Exp $
* power.c: Power management driver.
*
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
@@ -90,16 +90,18 @@ void __init power_init(void)
return;
found:
- power_reg = edev->resource[0].start;
+ power_reg = (unsigned long)ioremap(edev->resource[0].start, 0x4);
printk("power: Control reg at %016lx ... ", power_reg);
if (kernel_thread(powerd, 0, CLONE_FS) < 0) {
printk("Failed to start power daemon.\n");
return;
}
printk("powerd running.\n");
- if (request_irq(edev->irqs[0],
- power_handler, SA_SHIRQ, "power",
- (void *) power_reg) < 0)
- printk("power: Error, cannot register IRQ handler.\n");
+ if (edev->irqs[0] != 0) {
+ if (request_irq(edev->irqs[0],
+ power_handler, SA_SHIRQ, "power",
+ (void *) power_reg) < 0)
+ printk("power: Error, cannot register IRQ handler.\n");
+ }
}
#endif /* CONFIG_PCI */
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index 20a5534cb..922e74d2e 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -1,4 +1,4 @@
-/* $Id: process.c,v 1.100 1999/08/31 04:39:39 davem Exp $
+/* $Id: process.c,v 1.102 1999/12/15 22:24:49 davem Exp $
* arch/sparc64/kernel/process.c
*
* Copyright (C) 1995, 1996 David S. Miller (davem@caip.rutgers.edu)
@@ -33,6 +33,7 @@
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/page.h>
+#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/pstate.h>
@@ -268,7 +269,7 @@ void __show_regs(struct pt_regs * regs)
unsigned long flags;
spin_lock_irqsave(&regdump_lock, flags);
- printk("CPU[%d]: local_irq_count[%ld] global_irq_count[%d]\n",
+ printk("CPU[%d]: local_irq_count[%u] global_irq_count[%d]\n",
smp_processor_id(), local_irq_count,
atomic_read(&global_irq_count));
#endif
@@ -802,35 +803,3 @@ out:
unlock_kernel();
return error;
}
-
-/*
- * These bracket the sleeping functions..
- */
-extern void scheduling_functions_start_here(void);
-extern void scheduling_functions_end_here(void);
-#define first_sched ((unsigned long) scheduling_functions_start_here)
-#define last_sched ((unsigned long) scheduling_functions_end_here)
-
-unsigned long get_wchan(struct task_struct *p)
-{
- unsigned long pc, fp, bias = 0;
- unsigned long task_base = (unsigned long) p;
- struct reg_window *rw;
- int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
- return 0;
- bias = STACK_BIAS;
- fp = p->thread.ksp + bias;
- do {
- /* Bogus frame pointer? */
- if (fp < (task_base + sizeof(struct task_struct)) ||
- fp >= (task_base + (2 * PAGE_SIZE)))
- break;
- rw = (struct reg_window *) fp;
- pc = rw->ins[7];
- if (pc < first_sched || pc >= last_sched)
- return pc;
- fp = rw->ins[6] + bias;
- } while (++count < 16);
- return 0;
-}
diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c
new file mode 100644
index 000000000..9e24c7bb3
--- /dev/null
+++ b/arch/sparc64/kernel/sbus.c
@@ -0,0 +1,1145 @@
+/* $Id: sbus.c,v 1.6 1999/12/20 14:08:17 jj Exp $
+ * sbus.c: UltraSparc SBUS controller support.
+ *
+ * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/malloc.h>
+#include <linux/init.h>
+
+#include <asm/page.h>
+#include <asm/sbus.h>
+#include <asm/io.h>
+#include <asm/upa.h>
+#include <asm/cache.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+
+#include "iommu_common.h"
+
+/* These should be allocated on an SMP_CACHE_BYTES
+ * aligned boundry for optimal performance.
+ *
+ * On SYSIO, using an 8K page size we have 1GB of SBUS
+ * DMA space mapped. We divide this space into equally
+ * sized clusters. Currently we allow clusters up to a
+ * size of 1MB. If anything begins to generate DMA
+ * mapping requests larger than this we will need to
+ * increase things a bit.
+ */
+
+#define NCLUSTERS 8UL
+#define ONE_GIG (1UL * 1024UL * 1024UL * 1024UL)
+#define CLUSTER_SIZE (ONE_GIG / NCLUSTERS)
+#define CLUSTER_MASK (CLUSTER_SIZE - 1)
+#define CLUSTER_NPAGES (CLUSTER_SIZE >> PAGE_SHIFT)
+#define MAP_BASE ((u32)0xc0000000)
+
+struct sbus_iommu {
+/*0x00*/spinlock_t lock;
+
+/*0x08*/iopte_t *page_table;
+/*0x10*/unsigned long strbuf_regs;
+/*0x18*/unsigned long iommu_regs;
+/*0x20*/unsigned long sbus_control_reg;
+
+/*0x28*/volatile unsigned long strbuf_flushflag;
+
+ /* If NCLUSTERS is ever decresed to 4 or lower,
+ * you must increase the size of the type of
+ * these counters. You have been duly warned. -DaveM
+ */
+/*0x30*/u16 lowest_free[NCLUSTERS];
+};
+
+/* Flushing heuristics */
+#define IOMMU_DIAG_LIM 16
+#define STRBUF_DIAG_LIM 32
+
+/* Offsets from iommu_regs */
+#define SYSIO_IOMMUREG_BASE 0x2400UL
+#define IOMMU_CONTROL (0x2400UL - 0x2400UL) /* IOMMU control register */
+#define IOMMU_TSBBASE (0x2408UL - 0x2400UL) /* TSB base address register */
+#define IOMMU_FLUSH (0x2410UL - 0x2400UL) /* IOMMU flush register */
+#define IOMMU_VADIAG (0x4400UL - 0x2400UL) /* SBUS virtual address diagnostic */
+#define IOMMU_TAGCMP (0x4408UL - 0x2400UL) /* TLB tag compare diagnostics */
+#define IOMMU_LRUDIAG (0x4500UL - 0x2400UL) /* IOMMU LRU queue diagnostics */
+#define IOMMU_TAGDIAG (0x4580UL - 0x2400UL) /* TLB tag diagnostics */
+#define IOMMU_DRAMDIAG (0x4600UL - 0x2400UL) /* TLB data RAM diagnostics */
+
+#define IOMMU_DRAM_VALID (1UL << 30UL)
+
+static void __iommu_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages)
+{
+ int hit = 0;
+
+ if (npages <= IOMMU_DIAG_LIM) {
+ while (npages--)
+ upa_writeq(base + (npages << PAGE_SHIFT),
+ iommu->iommu_regs + IOMMU_FLUSH);
+ hit = 1;
+ } else {
+ u32 limit = base + ((npages << PAGE_SHIFT) - 1UL);
+ unsigned long dram = iommu->iommu_regs + IOMMU_DRAMDIAG;
+ unsigned long tag = iommu->iommu_regs + IOMMU_TAGDIAG;
+ int entry;
+
+ for (entry = 0; entry < 16; entry++, dram += 8, tag += 8) {
+ u32 addr = ((u32)upa_readq(tag) << PAGE_SHIFT);
+ if (addr >= base && addr <= limit) {
+ u64 val = upa_readq(dram);
+
+ if (val & IOMMU_DRAM_VALID) {
+ upa_writeq(addr,
+ iommu->iommu_regs + IOMMU_FLUSH);
+ hit = 1;
+ }
+ }
+ }
+ }
+ if (hit != 0)
+ upa_readq(iommu->sbus_control_reg);
+}
+
+/* In an effort to keep latency under control, we special
+ * case single page IOMMU flushes.
+ */
+static __inline__ void iommu_flush(struct sbus_iommu *iommu,
+ u32 base, unsigned long npages)
+{
+ if (npages == 1) {
+ upa_writeq(base, iommu->iommu_regs + IOMMU_FLUSH);
+ upa_readq(iommu->sbus_control_reg);
+ } else
+ __iommu_flush(iommu, base, npages);
+}
+
+/* Offsets from strbuf_regs */
+#define SYSIO_STRBUFREG_BASE 0x2800UL
+#define STRBUF_CONTROL (0x2800UL - 0x2800UL) /* Control */
+#define STRBUF_PFLUSH (0x2808UL - 0x2800UL) /* Page flush/invalidate */
+#define STRBUF_FSYNC (0x2810UL - 0x2800UL) /* Flush synchronization */
+#define STRBUF_DRAMDIAG (0x5000UL - 0x2800UL) /* data RAM diagnostic */
+#define STRBUF_ERRDIAG (0x5400UL - 0x2800UL) /* error status diagnostics */
+#define STRBUF_PTAGDIAG (0x5800UL - 0x2800UL) /* Page tag diagnostics */
+#define STRBUF_LTAGDIAG (0x5900UL - 0x2800UL) /* Line tag diagnostics */
+
+#define STRBUF_TAG_VALID 0x02UL
+
+static void strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages)
+{
+ int hit = 0;
+
+ iommu->strbuf_flushflag = 0UL;
+ if (npages <= STRBUF_DIAG_LIM) {
+ while (npages--)
+ upa_writeq(base + (npages << PAGE_SHIFT),
+ iommu->strbuf_regs + STRBUF_PFLUSH);
+ hit = 1;
+ } else {
+ u32 limit = base + ((npages << PAGE_SHIFT) - 1UL);
+ unsigned long tag = iommu->strbuf_regs + STRBUF_PTAGDIAG;
+ int entry;
+
+ for (entry = 0; entry < 16; entry++, tag += 8) {
+ u64 val = upa_readq(tag);
+
+ if (val & STRBUF_TAG_VALID) {
+ u32 addr = ((u32)(val & ~3UL)) << (PAGE_SHIFT - 2UL);
+ if (addr >= base && addr <= limit) {
+ upa_writeq(addr,
+ iommu->strbuf_regs + STRBUF_PFLUSH);
+ hit = 1;
+ }
+ }
+ }
+ }
+ if (hit != 0) {
+ /* Whoopee cushion! */
+ upa_writeq(__pa(&iommu->strbuf_flushflag),
+ iommu->strbuf_regs + STRBUF_FSYNC);
+ upa_readq(iommu->sbus_control_reg);
+ while (iommu->strbuf_flushflag == 0UL)
+ membar("#LoadLoad");
+ }
+}
+
+static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long npages)
+{
+ iopte_t *iopte;
+ unsigned long cnum, ent;
+
+ cnum = 0;
+ while ((1UL << cnum) < npages)
+ cnum++;
+ iopte = iommu->page_table + (cnum * CLUSTER_NPAGES);
+ iopte += ((ent = iommu->lowest_free[cnum]) << cnum);
+
+ if (iopte_val(iopte[(1UL << cnum)]) == 0UL) {
+ /* Fast path. */
+ iommu->lowest_free[cnum] = ent + 1;
+ } else {
+ unsigned long pte_off = 1;
+
+ ent += 1;
+ do {
+ pte_off++;
+ ent++;
+ } while (iopte_val(iopte[(pte_off << cnum)]) != 0UL);
+ iommu->lowest_free[cnum] = ent;
+ }
+
+ /* I've got your streaming cluster right here buddy boy... */
+ return iopte;
+}
+
+static void free_streaming_cluster(struct sbus_iommu *iommu, u32 base, unsigned long npages)
+{
+ unsigned long cnum, ent;
+ iopte_t *iopte;
+
+ cnum = 0;
+ while ((1UL << cnum) < npages)
+ cnum++;
+ ent = (base & CLUSTER_MASK) >> (PAGE_SHIFT + cnum);
+ iopte = iommu->page_table + ((base - MAP_BASE) >> PAGE_SHIFT);
+ iopte_val(*iopte) = 0UL;
+ if (ent < iommu->lowest_free[cnum])
+ iommu->lowest_free[cnum] = ent;
+}
+
+/* We allocate consistant mappings from the end of cluster zero. */
+static iopte_t *alloc_consistant_cluster(struct sbus_iommu *iommu, unsigned long npages)
+{
+ iopte_t *iopte;
+
+ iopte = iommu->page_table + (1 * CLUSTER_NPAGES);
+ while (iopte > iommu->page_table) {
+ iopte--;
+ if (!(iopte_val(*iopte) & IOPTE_VALID)) {
+ unsigned long tmp = npages;
+
+ while (--tmp) {
+ iopte--;
+ if (iopte_val(*iopte) & IOPTE_VALID)
+ break;
+ }
+ if (tmp == 0)
+ return iopte;
+ }
+ }
+ return NULL;
+}
+
+static void free_consistant_cluster(struct sbus_iommu *iommu, u32 base, unsigned long npages)
+{
+ iopte_t *iopte = iommu->page_table + ((base - MAP_BASE) >> PAGE_SHIFT);
+
+ while (npages--)
+ *iopte++ = __iopte(0UL);
+}
+
+void *sbus_alloc_consistant(struct sbus_dev *sdev, long size, u32 *dvma_addr)
+{
+ unsigned long order, first_page, flags;
+ struct sbus_iommu *iommu;
+ iopte_t *iopte;
+ void *ret;
+ int npages;
+
+ if (size <= 0 || sdev == NULL || dvma_addr == NULL)
+ return NULL;
+
+ size = PAGE_ALIGN(size);
+ for (order = 0; order < 10; order++) {
+ if ((PAGE_SIZE << order) >= size)
+ break;
+ }
+ if (order == 10)
+ return NULL;
+ first_page = __get_free_pages(GFP_KERNEL, order);
+ if (first_page == 0UL)
+ return NULL;
+ memset((char *)first_page, 0, PAGE_SIZE << order);
+
+ iommu = sdev->bus->iommu;
+
+ spin_lock_irqsave(&iommu->lock, flags);
+ iopte = alloc_consistant_cluster(iommu, size >> PAGE_SHIFT);
+ if (iopte == NULL) {
+ spin_unlock_irqrestore(&iommu->lock, flags);
+ free_pages(first_page, order);
+ return NULL;
+ }
+
+ /* Ok, we're committed at this point. */
+ *dvma_addr = MAP_BASE + ((iopte - iommu->page_table) << PAGE_SHIFT);
+ ret = (void *) first_page;
+ npages = size >> PAGE_SHIFT;
+ while (npages--) {
+ *iopte++ = __iopte(IOPTE_VALID | IOPTE_CACHE | IOPTE_WRITE |
+ (__pa(first_page) & IOPTE_PAGE));
+ first_page += PAGE_SIZE;
+ }
+ iommu_flush(iommu, *dvma_addr, size >> PAGE_SHIFT);
+ spin_unlock_irqrestore(&iommu->lock, flags);
+
+ return ret;
+}
+
+void sbus_free_consistant(struct sbus_dev *sdev, long size, void *cpu, u32 dvma)
+{
+ unsigned long order, npages;
+ struct sbus_iommu *iommu;
+
+ if (size <= 0 || sdev == NULL || cpu == NULL)
+ return;
+
+ npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ iommu = sdev->bus->iommu;
+
+ spin_lock_irq(&iommu->lock);
+ free_consistant_cluster(iommu, dvma, npages);
+ spin_unlock_irq(&iommu->lock);
+
+ for (order = 0; order < 10; order++) {
+ if ((PAGE_SIZE << order) >= size)
+ break;
+ }
+ if (order < 10)
+ free_pages((unsigned long)cpu, order);
+}
+
+u32 sbus_map_single(struct sbus_dev *sdev, void *ptr, long size)
+{
+ struct sbus_iommu *iommu = sdev->bus->iommu;
+ unsigned long npages, phys_base, flags;
+ iopte_t *iopte;
+ u32 dma_base, offset;
+
+ phys_base = (unsigned long) ptr;
+ offset = (u32) (phys_base & ~PAGE_MASK);
+ size = (PAGE_ALIGN(phys_base + size) - (phys_base & PAGE_MASK));
+ phys_base = (unsigned long) __pa(phys_base & PAGE_MASK);
+
+ spin_lock_irqsave(&iommu->lock, flags);
+ npages = size >> PAGE_SHIFT;
+ iopte = alloc_streaming_cluster(iommu, npages);
+ dma_base = MAP_BASE + ((iopte - iommu->page_table) << PAGE_SHIFT);
+ npages = size >> PAGE_SHIFT;
+ while (npages--) {
+ *iopte++ = __iopte(IOPTE_VALID | IOPTE_STBUF |
+ IOPTE_CACHE | IOPTE_WRITE |
+ (phys_base & IOPTE_PAGE));
+ phys_base += PAGE_SIZE;
+ }
+ npages = size >> PAGE_SHIFT;
+ iommu_flush(iommu, dma_base, npages);
+ spin_unlock_irqrestore(&iommu->lock, flags);
+
+ return (dma_base | offset);
+}
+
+void sbus_unmap_single(struct sbus_dev *sdev, u32 dma_addr, long size)
+{
+ struct sbus_iommu *iommu = sdev->bus->iommu;
+ u32 dma_base = dma_addr & PAGE_MASK;
+ unsigned long flags;
+
+ size = (PAGE_ALIGN(dma_addr + size) - dma_base);
+
+ spin_lock_irqsave(&iommu->lock, flags);
+ free_streaming_cluster(iommu, dma_base, size >> PAGE_SHIFT);
+ strbuf_flush(iommu, dma_base, size >> PAGE_SHIFT);
+ spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, int nents)
+{
+ struct scatterlist *dma_sg = sg;
+
+ do {
+ unsigned long pteval = ~0UL;
+ u32 dma_npages;
+
+ dma_npages = ((dma_sg->dvma_address & (PAGE_SIZE - 1UL)) +
+ dma_sg->dvma_length +
+ ((u32)(PAGE_SIZE - 1UL))) >> PAGE_SHIFT;
+ do {
+ unsigned long offset;
+ signed int len;
+
+ /* If we are here, we know we have at least one
+ * more page to map. So walk forward until we
+ * hit a page crossing, and begin creating new
+ * mappings from that spot.
+ */
+ for (;;) {
+ unsigned long tmp;
+
+ tmp = (unsigned long) __pa(sg->address);
+ len = sg->length;
+ if (((tmp ^ pteval) >> PAGE_SHIFT) != 0UL) {
+ pteval = tmp & PAGE_MASK;
+ offset = tmp & (PAGE_SIZE - 1UL);
+ break;
+ }
+ if (((tmp ^ (tmp + len - 1UL)) >> PAGE_SHIFT) != 0UL) {
+ pteval = (tmp + PAGE_SIZE) & PAGE_MASK;
+ offset = 0UL;
+ len -= (PAGE_SIZE - (tmp & (PAGE_SIZE - 1UL)));
+ break;
+ }
+ sg++;
+ }
+
+ pteval = ((pteval & IOPTE_PAGE) |
+ IOPTE_VALID | IOPTE_STBUF |
+ IOPTE_CACHE | IOPTE_WRITE);
+ while (len > 0) {
+ *iopte++ = __iopte(pteval);
+ pteval += PAGE_SIZE;
+ len -= (PAGE_SIZE - offset);
+ offset = 0;
+ dma_npages--;
+ }
+
+ pteval = (pteval & IOPTE_PAGE) + len;
+ sg++;
+
+ /* Skip over any tail mappings we've fully mapped,
+ * adjusting pteval along the way. Stop when we
+ * detect a page crossing event.
+ */
+ while ((pteval << (64 - PAGE_SHIFT)) != 0UL &&
+ pteval == __pa(sg->address) &&
+ ((pteval ^
+ (__pa(sg->address) + sg->length - 1UL)) >> PAGE_SHIFT) == 0UL) {
+ pteval += sg->length;
+ sg++;
+ }
+ if ((pteval << (64 - PAGE_SHIFT)) == 0UL)
+ pteval = ~0UL;
+ } while (dma_npages != 0);
+ dma_sg++;
+ } while (dma_sg->dvma_length != 0);
+}
+
+int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents)
+{
+ struct sbus_iommu *iommu = sdev->bus->iommu;
+ unsigned long flags, npages;
+ iopte_t *iopte;
+ u32 dma_base;
+ struct scatterlist *sgtmp;
+ int unused;
+
+ /* Fast path single entry scatterlists. */
+ if (nents == 1) {
+ sg->dvma_address = sbus_map_single(sdev, sg->address, sg->length);
+ sg->dvma_length = sg->length;
+ return 1;
+ }
+
+ npages = prepare_sg(sg, nents);
+
+ spin_lock_irqsave(&iommu->lock, flags);
+ iopte = alloc_streaming_cluster(iommu, npages);
+ dma_base = MAP_BASE + ((iopte - iommu->page_table) << PAGE_SHIFT);
+
+ /* Normalize DVMA addresses. */
+ sgtmp = sg;
+ unused = nents;
+
+ while (unused && sgtmp->dvma_length) {
+ sgtmp->dvma_address += dma_base;
+ sgtmp++;
+ unused--;
+ }
+
+ fill_sg(iopte, sg, nents);
+#ifdef VERIFY_SG
+ verify_sglist(sg, nents, iopte, npages);
+#endif
+ iommu_flush(iommu, dma_base, npages);
+ spin_unlock_irqrestore(&iommu->lock, flags);
+
+ return nents - unused;
+}
+
+void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents)
+{
+ unsigned long size, flags;
+ struct sbus_iommu *iommu;
+ u32 dvma_base;
+ int i;
+
+ /* Fast path single entry scatterlists. */
+ if (nents == 1) {
+ sbus_unmap_single(sdev, sg->dvma_address, sg->dvma_length);
+ return;
+ }
+
+ dvma_base = sg[0].dvma_address & PAGE_MASK;
+ for (i = 0; i < nents; i++) {
+ if (sg[i].dvma_length == 0)
+ break;
+ }
+ i--;
+ size = PAGE_ALIGN(sg[i].dvma_address + sg[i].dvma_length) - dvma_base;
+
+ iommu = sdev->bus->iommu;
+ spin_lock_irqsave(&iommu->lock, flags);
+ free_streaming_cluster(iommu, dvma_base, size >> PAGE_SHIFT);
+ strbuf_flush(iommu, dvma_base, size >> PAGE_SHIFT);
+ spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+void sbus_dma_sync_single(struct sbus_dev *sdev, u32 base, long size)
+{
+ struct sbus_iommu *iommu = sdev->bus->iommu;
+ unsigned long flags;
+
+ size = (PAGE_ALIGN(base + size) - (base & PAGE_MASK));
+
+ spin_lock_irqsave(&iommu->lock, flags);
+ strbuf_flush(iommu, base & PAGE_MASK, size >> PAGE_SHIFT);
+ spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+void sbus_dma_sync_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents)
+{
+ struct sbus_iommu *iommu = sdev->bus->iommu;
+ unsigned long flags, size;
+ u32 base;
+ int i;
+
+ base = sg[0].dvma_address & PAGE_MASK;
+ for (i = 0; i < nents; i++) {
+ if (sg[i].dvma_length == 0)
+ break;
+ }
+ i--;
+ size = PAGE_ALIGN(sg[i].dvma_address + sg[i].dvma_length) - base;
+
+ spin_lock_irqsave(&iommu->lock, flags);
+ strbuf_flush(iommu, base, size >> PAGE_SHIFT);
+ spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+/* Enable 64-bit DVMA mode for the given device. */
+void sbus_set_sbus64(struct sbus_dev *sdev, int bursts)
+{
+ struct sbus_iommu *iommu = sdev->bus->iommu;
+ int slot = sdev->slot;
+ unsigned long cfg_reg;
+ u64 val;
+
+ cfg_reg = iommu->sbus_control_reg;
+ switch (slot) {
+ case 0:
+ cfg_reg += 0x20UL;
+ break;
+ case 1:
+ cfg_reg += 0x28UL;
+ break;
+ case 2:
+ cfg_reg += 0x30UL;
+ break;
+ case 3:
+ cfg_reg += 0x38UL;
+ break;
+ case 13:
+ cfg_reg += 0x40UL;
+ break;
+ case 14:
+ cfg_reg += 0x48UL;
+ break;
+ case 15:
+ cfg_reg += 0x50UL;
+ break;
+
+ default:
+ return;
+ };
+
+ val = upa_readq(cfg_reg);
+ if (val & (1UL << 14UL)) {
+ /* Extended transfer mode already enabled. */
+ return;
+ }
+
+ val |= (1UL << 14UL);
+
+ if (bursts & DMA_BURST8)
+ val |= (1UL << 1UL);
+ if (bursts & DMA_BURST16)
+ val |= (1UL << 2UL);
+ if (bursts & DMA_BURST32)
+ val |= (1UL << 3UL);
+ if (bursts & DMA_BURST64)
+ val |= (1UL << 4UL);
+ upa_writeq(val, cfg_reg);
+}
+
+/* SBUS SYSIO INO number to Sparc PIL level. */
+static unsigned char sysio_ino_to_pil[] = {
+ 0, 1, 2, 7, 5, 7, 8, 9, /* SBUS slot 0 */
+ 0, 1, 2, 7, 5, 7, 8, 9, /* SBUS slot 1 */
+ 0, 1, 2, 7, 5, 7, 8, 9, /* SBUS slot 2 */
+ 0, 1, 2, 7, 5, 7, 8, 9, /* SBUS slot 3 */
+ 3, /* Onboard SCSI */
+ 5, /* Onboard Ethernet */
+/*XXX*/ 8, /* Onboard BPP */
+ 0, /* Bogon */
+ 13, /* Audio */
+/*XXX*/15, /* PowerFail */
+ 0, /* Bogon */
+ 0, /* Bogon */
+ 12, /* Zilog Serial Channels (incl. Keyboard/Mouse lines) */
+ 11, /* Floppy */
+ 0, /* Spare Hardware (bogon for now) */
+ 0, /* Keyboard (bogon for now) */
+ 0, /* Mouse (bogon for now) */
+ 0, /* Serial (bogon for now) */
+ 0, 0, /* Bogon, Bogon */
+ 10, /* Timer 0 */
+ 11, /* Timer 1 */
+ 0, 0, /* Bogon, Bogon */
+ 15, /* Uncorrectable SBUS Error */
+ 15, /* Correctable SBUS Error */
+ 15, /* SBUS Error */
+/*XXX*/ 0, /* Power Management (bogon for now) */
+};
+
+/* INO number to IMAP register offset for SYSIO external IRQ's.
+ * This should conform to both Sunfire/Wildfire server and Fusion
+ * desktop designs.
+ */
+#define SYSIO_IMAP_SLOT0 0x2c04UL
+#define SYSIO_IMAP_SLOT1 0x2c0cUL
+#define SYSIO_IMAP_SLOT2 0x2c14UL
+#define SYSIO_IMAP_SLOT3 0x2c1cUL
+#define SYSIO_IMAP_SCSI 0x3004UL
+#define SYSIO_IMAP_ETH 0x300cUL
+#define SYSIO_IMAP_BPP 0x3014UL
+#define SYSIO_IMAP_AUDIO 0x301cUL
+#define SYSIO_IMAP_PFAIL 0x3024UL
+#define SYSIO_IMAP_KMS 0x302cUL
+#define SYSIO_IMAP_FLPY 0x3034UL
+#define SYSIO_IMAP_SHW 0x303cUL
+#define SYSIO_IMAP_KBD 0x3044UL
+#define SYSIO_IMAP_MS 0x304cUL
+#define SYSIO_IMAP_SER 0x3054UL
+#define SYSIO_IMAP_TIM0 0x3064UL
+#define SYSIO_IMAP_TIM1 0x306cUL
+#define SYSIO_IMAP_UE 0x3074UL
+#define SYSIO_IMAP_CE 0x307cUL
+#define SYSIO_IMAP_SBERR 0x3084UL
+#define SYSIO_IMAP_PMGMT 0x308cUL
+#define SYSIO_IMAP_GFX 0x3094UL
+#define SYSIO_IMAP_EUPA 0x309cUL
+
+#define bogon ((unsigned long) -1)
+static unsigned long sysio_irq_offsets[] = {
+ /* SBUS Slot 0 --> 3, level 1 --> 7 */
+ SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
+ SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
+ SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
+ SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
+ SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
+ SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
+ SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
+ SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
+
+ /* Onboard devices (not relevant/used on SunFire). */
+ SYSIO_IMAP_SCSI,
+ SYSIO_IMAP_ETH,
+ SYSIO_IMAP_BPP,
+ bogon,
+ SYSIO_IMAP_AUDIO,
+ SYSIO_IMAP_PFAIL,
+ bogon,
+ bogon,
+ SYSIO_IMAP_KMS,
+ SYSIO_IMAP_FLPY,
+ SYSIO_IMAP_SHW,
+ SYSIO_IMAP_KBD,
+ SYSIO_IMAP_MS,
+ SYSIO_IMAP_SER,
+ bogon,
+ bogon,
+ SYSIO_IMAP_TIM0,
+ SYSIO_IMAP_TIM1,
+ bogon,
+ bogon,
+ SYSIO_IMAP_UE,
+ SYSIO_IMAP_CE,
+ SYSIO_IMAP_SBERR,
+ SYSIO_IMAP_PMGMT,
+};
+
+#undef bogon
+
+#define NUM_SYSIO_OFFSETS (sizeof(sysio_irq_offsets) / sizeof(sysio_irq_offsets[0]))
+
+/* Convert Interrupt Mapping register pointer to assosciated
+ * Interrupt Clear register pointer, SYSIO specific version.
+ */
+#define SYSIO_ICLR_UNUSED0 0x3400UL
+#define SYSIO_ICLR_SLOT0 0x340cUL
+#define SYSIO_ICLR_SLOT1 0x344cUL
+#define SYSIO_ICLR_SLOT2 0x348cUL
+#define SYSIO_ICLR_SLOT3 0x34ccUL
+static unsigned long sysio_imap_to_iclr(unsigned long imap)
+{
+ unsigned long diff = SYSIO_ICLR_UNUSED0 - SYSIO_IMAP_SLOT0;
+ return imap + diff;
+}
+
+unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
+{
+ struct sbus_bus *sbus = (struct sbus_bus *)buscookie;
+ struct sbus_iommu *iommu = sbus->iommu;
+ unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
+ unsigned long imap, iclr;
+ int pil, sbus_level = 0;
+
+ pil = sysio_ino_to_pil[ino];
+ if (!pil) {
+ printk("sbus_irq_build: Bad SYSIO INO[%x]\n", ino);
+ panic("Bad SYSIO IRQ translations...");
+ }
+ imap = sysio_irq_offsets[ino];
+ if (imap == ((unsigned long)-1)) {
+ prom_printf("get_irq_translations: Bad SYSIO INO[%x] cpu[%d]\n",
+ ino, pil);
+ prom_halt();
+ }
+ imap += reg_base;
+
+ /* SYSIO inconsistancy. For external SLOTS, we have to select
+ * the right ICLR register based upon the lower SBUS irq level
+ * bits.
+ */
+ if (ino >= 0x20) {
+ iclr = sysio_imap_to_iclr(imap);
+ } else {
+ int sbus_slot = (ino & 0x18)>>3;
+
+ sbus_level = ino & 0x7;
+
+ switch(sbus_slot) {
+ case 0:
+ iclr = reg_base + SYSIO_ICLR_SLOT0;
+ break;
+ case 1:
+ iclr = reg_base + SYSIO_ICLR_SLOT1;
+ break;
+ case 2:
+ iclr = reg_base + SYSIO_ICLR_SLOT2;
+ break;
+ default:
+ case 3:
+ iclr = reg_base + SYSIO_ICLR_SLOT3;
+ break;
+ };
+
+ iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
+ }
+ return build_irq(pil, sbus_level, iclr, imap);
+}
+
+/* Error interrupt handling. */
+#define SYSIO_UE_AFSR 0x0030UL
+#define SYSIO_UE_AFAR 0x0038UL
+#define SYSIO_UEAFSR_PPIO 0x8000000000000000 /* Primary PIO is cause */
+#define SYSIO_UEAFSR_PDRD 0x4000000000000000 /* Primary DVMA read is cause */
+#define SYSIO_UEAFSR_PDWR 0x2000000000000000 /* Primary DVMA write is cause */
+#define SYSIO_UEAFSR_SPIO 0x1000000000000000 /* Secondary PIO is cause */
+#define SYSIO_UEAFSR_SDRD 0x0800000000000000 /* Secondary DVMA read is cause */
+#define SYSIO_UEAFSR_SDWR 0x0400000000000000 /* Secondary DVMA write is cause*/
+#define SYSIO_UEAFSR_RESV1 0x03ff000000000000 /* Reserved */
+#define SYSIO_UEAFSR_DOFF 0x0000e00000000000 /* Doubleword Offset */
+#define SYSIO_UEAFSR_SIZE 0x00001c0000000000 /* Bad transfer size is 2**SIZE */
+#define SYSIO_UEAFSR_MID 0x000003e000000000 /* UPA MID causing the fault */
+#define SYSIO_UEAFSR_RESV2 0x0000001fffffffff /* Reserved */
+static void sysio_ue_handler(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct sbus_bus *sbus = dev_id;
+ struct sbus_iommu *iommu = sbus->iommu;
+ unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
+ unsigned long afsr_reg, afar_reg;
+ unsigned long afsr, afar, error_bits;
+ int reported;
+
+ afsr_reg = reg_base + SYSIO_UE_AFSR;
+ afar_reg = reg_base + SYSIO_UE_AFAR;
+
+ /* Latch error status. */
+ afsr = upa_readq(afsr_reg);
+ afar = upa_readq(afar_reg);
+
+ /* Clear primary/secondary error status bits. */
+ error_bits = afsr &
+ (SYSIO_UEAFSR_PPIO | SYSIO_UEAFSR_PDRD | SYSIO_UEAFSR_PDWR |
+ SYSIO_UEAFSR_SPIO | SYSIO_UEAFSR_SDRD | SYSIO_UEAFSR_SDWR);
+ upa_writeq(error_bits, afsr_reg);
+
+ /* Log the error. */
+ printk("SYSIO[%x]: Uncorrectable ECC Error, primary error type[%s]\n",
+ sbus->portid,
+ (((error_bits & SYSIO_UEAFSR_PPIO) ?
+ "PIO" :
+ ((error_bits & SYSIO_UEAFSR_PDRD) ?
+ "DVMA Read" :
+ ((error_bits & SYSIO_UEAFSR_PDWR) ?
+ "DVMA Write" : "???")))));
+ printk("SYSIO[%x]: DOFF[%lx] SIZE[%lx] MID[%lx]\n",
+ sbus->portid,
+ (afsr & SYSIO_UEAFSR_DOFF) >> 45UL,
+ (afsr & SYSIO_UEAFSR_SIZE) >> 42UL,
+ (afsr & SYSIO_UEAFSR_MID) >> 37UL);
+ printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
+ printk("SYSIO[%x]: Secondary UE errors [", sbus->portid);
+ reported = 0;
+ if (afsr & SYSIO_UEAFSR_SPIO) {
+ reported++;
+ printk("(PIO)");
+ }
+ if (afsr & SYSIO_UEAFSR_SDRD) {
+ reported++;
+ printk("(DVMA Read)");
+ }
+ if (afsr & SYSIO_UEAFSR_SDWR) {
+ reported++;
+ printk("(DVMA Write)");
+ }
+ if (!reported)
+ printk("(none)");
+ printk("]\n");
+}
+
+#define SYSIO_CE_AFSR 0x0040UL
+#define SYSIO_CE_AFAR 0x0048UL
+#define SYSIO_CEAFSR_PPIO 0x8000000000000000 /* Primary PIO is cause */
+#define SYSIO_CEAFSR_PDRD 0x4000000000000000 /* Primary DVMA read is cause */
+#define SYSIO_CEAFSR_PDWR 0x2000000000000000 /* Primary DVMA write is cause */
+#define SYSIO_CEAFSR_SPIO 0x1000000000000000 /* Secondary PIO is cause */
+#define SYSIO_CEAFSR_SDRD 0x0800000000000000 /* Secondary DVMA read is cause */
+#define SYSIO_CEAFSR_SDWR 0x0400000000000000 /* Secondary DVMA write is cause*/
+#define SYSIO_CEAFSR_RESV1 0x0300000000000000 /* Reserved */
+#define SYSIO_CEAFSR_ESYND 0x00ff000000000000 /* Syndrome Bits */
+#define SYSIO_CEAFSR_DOFF 0x0000e00000000000 /* Double Offset */
+#define SYSIO_CEAFSR_SIZE 0x00001c0000000000 /* Bad transfer size is 2**SIZE */
+#define SYSIO_CEAFSR_MID 0x000003e000000000 /* UPA MID causing the fault */
+#define SYSIO_CEAFSR_RESV2 0x0000001fffffffff /* Reserved */
+static void sysio_ce_handler(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct sbus_bus *sbus = dev_id;
+ struct sbus_iommu *iommu = sbus->iommu;
+ unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
+ unsigned long afsr_reg, afar_reg;
+ unsigned long afsr, afar, error_bits;
+ int reported;
+
+ afsr_reg = reg_base + SYSIO_CE_AFSR;
+ afar_reg = reg_base + SYSIO_CE_AFAR;
+
+ /* Latch error status. */
+ afsr = upa_readq(afsr_reg);
+ afar = upa_readq(afar_reg);
+
+ /* Clear primary/secondary error status bits. */
+ error_bits = afsr &
+ (SYSIO_CEAFSR_PPIO | SYSIO_CEAFSR_PDRD | SYSIO_CEAFSR_PDWR |
+ SYSIO_CEAFSR_SPIO | SYSIO_CEAFSR_SDRD | SYSIO_CEAFSR_SDWR);
+ upa_writeq(error_bits, afsr_reg);
+
+ printk("SYSIO[%x]: Correctable ECC Error, primary error type[%s]\n",
+ sbus->portid,
+ (((error_bits & SYSIO_CEAFSR_PPIO) ?
+ "PIO" :
+ ((error_bits & SYSIO_CEAFSR_PDRD) ?
+ "DVMA Read" :
+ ((error_bits & SYSIO_CEAFSR_PDWR) ?
+ "DVMA Write" : "???")))));
+
+ /* XXX Use syndrome and afar to print out module string just like
+ * XXX UDB CE trap handler does... -DaveM
+ */
+ printk("SYSIO[%x]: DOFF[%lx] ECC Syndrome[%lx] Size[%lx] MID[%lx]\n",
+ sbus->portid,
+ (afsr & SYSIO_CEAFSR_DOFF) >> 45UL,
+ (afsr & SYSIO_CEAFSR_ESYND) >> 48UL,
+ (afsr & SYSIO_CEAFSR_SIZE) >> 42UL,
+ (afsr & SYSIO_CEAFSR_MID) >> 37UL);
+ printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
+
+ printk("SYSIO[%x]: Secondary CE errors [", sbus->portid);
+ reported = 0;
+ if (afsr & SYSIO_CEAFSR_SPIO) {
+ reported++;
+ printk("(PIO)");
+ }
+ if (afsr & SYSIO_CEAFSR_SDRD) {
+ reported++;
+ printk("(DVMA Read)");
+ }
+ if (afsr & SYSIO_CEAFSR_SDWR) {
+ reported++;
+ printk("(DVMA Write)");
+ }
+ if (!reported)
+ printk("(none)");
+ printk("]\n");
+}
+
+#define SYSIO_SBUS_AFSR 0x2010UL
+#define SYSIO_SBUS_AFAR 0x2018UL
+#define SYSIO_SBAFSR_PLE 0x8000000000000000 /* Primary Late PIO Error */
+#define SYSIO_SBAFSR_PTO 0x4000000000000000 /* Primary SBUS Timeout */
+#define SYSIO_SBAFSR_PBERR 0x2000000000000000 /* Primary SBUS Error ACK */
+#define SYSIO_SBAFSR_SLE 0x1000000000000000 /* Secondary Late PIO Error */
+#define SYSIO_SBAFSR_STO 0x0800000000000000 /* Secondary SBUS Timeout */
+#define SYSIO_SBAFSR_SBERR 0x0400000000000000 /* Secondary SBUS Error ACK */
+#define SYSIO_SBAFSR_RESV1 0x03ff000000000000 /* Reserved */
+#define SYSIO_SBAFSR_RD 0x0000800000000000 /* Primary was late PIO read */
+#define SYSIO_SBAFSR_RESV2 0x0000600000000000 /* Reserved */
+#define SYSIO_SBAFSR_SIZE 0x00001c0000000000 /* Size of transfer */
+#define SYSIO_SBAFSR_MID 0x000003e000000000 /* MID causing the error */
+#define SYSIO_SBAFSR_RESV3 0x0000001fffffffff /* Reserved */
+static void sysio_sbus_error_handler(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct sbus_bus *sbus = dev_id;
+ struct sbus_iommu *iommu = sbus->iommu;
+ unsigned long afsr_reg, afar_reg, reg_base;
+ unsigned long afsr, afar, error_bits;
+ int reported;
+
+ reg_base = iommu->sbus_control_reg - 0x2000UL;
+ afsr_reg = reg_base + SYSIO_SBUS_AFSR;
+ afar_reg = reg_base + SYSIO_SBUS_AFAR;
+
+ afsr = upa_readq(afsr_reg);
+ afar = upa_readq(afar_reg);
+
+ /* Clear primary/secondary error status bits. */
+ error_bits = afsr &
+ (SYSIO_SBAFSR_PLE | SYSIO_SBAFSR_PTO | SYSIO_SBAFSR_PBERR |
+ SYSIO_SBAFSR_SLE | SYSIO_SBAFSR_STO | SYSIO_SBAFSR_SBERR);
+ upa_writeq(error_bits, afsr_reg);
+
+ /* Log the error. */
+ printk("SYSIO[%x]: SBUS Error, primary error type[%s] read(%d)\n",
+ sbus->portid,
+ (((error_bits & SYSIO_SBAFSR_PLE) ?
+ "Late PIO Error" :
+ ((error_bits & SYSIO_SBAFSR_PTO) ?
+ "Time Out" :
+ ((error_bits & SYSIO_SBAFSR_PBERR) ?
+ "Error Ack" : "???")))),
+ (afsr & SYSIO_SBAFSR_RD) ? 1 : 0);
+ printk("SYSIO[%x]: size[%lx] MID[%lx]\n",
+ sbus->portid,
+ (afsr & SYSIO_SBAFSR_SIZE) >> 42UL,
+ (afsr & SYSIO_SBAFSR_MID) >> 37UL);
+ printk("SYSIO[%x]: AFAR[%016lx]\n", sbus->portid, afar);
+ printk("SYSIO[%x]: Secondary SBUS errors [", sbus->portid);
+ reported = 0;
+ if (afsr & SYSIO_SBAFSR_SLE) {
+ reported++;
+ printk("(Late PIO Error)");
+ }
+ if (afsr & SYSIO_SBAFSR_STO) {
+ reported++;
+ printk("(Time Out)");
+ }
+ if (afsr & SYSIO_SBAFSR_SBERR) {
+ reported++;
+ printk("(Error Ack)");
+ }
+ if (!reported)
+ printk("(none)");
+ printk("]\n");
+
+ /* XXX check iommu/strbuf for further error status XXX */
+}
+
+#define ECC_CONTROL 0x0020UL
+#define SYSIO_ECNTRL_ECCEN 0x8000000000000000 /* Enable ECC Checking */
+#define SYSIO_ECNTRL_UEEN 0x4000000000000000 /* Enable UE Interrupts */
+#define SYSIO_ECNTRL_CEEN 0x2000000000000000 /* Enable CE Interrupts */
+
+#define SYSIO_UE_INO 0x34
+#define SYSIO_CE_INO 0x35
+#define SYSIO_SBUSERR_INO 0x36
+
+static void __init sysio_register_error_handlers(struct sbus_bus *sbus)
+{
+ struct sbus_iommu *iommu = sbus->iommu;
+ unsigned long reg_base = iommu->sbus_control_reg - 0x2000UL;
+ unsigned int irq;
+ u64 control;
+
+ irq = sbus_build_irq(sbus, SYSIO_UE_INO);
+ if (request_irq(irq, sysio_ue_handler,
+ SA_SHIRQ, "SYSIO UE", sbus) < 0) {
+ prom_printf("SYSIO[%x]: Cannot register UE interrupt.\n",
+ sbus->portid);
+ prom_halt();
+ }
+
+ irq = sbus_build_irq(sbus, SYSIO_CE_INO);
+ if (request_irq(irq, sysio_ce_handler,
+ SA_SHIRQ, "SYSIO CE", sbus) < 0) {
+ prom_printf("SYSIO[%x]: Cannot register CE interrupt.\n",
+ sbus->portid);
+ prom_halt();
+ }
+
+ irq = sbus_build_irq(sbus, SYSIO_SBUSERR_INO);
+ if (request_irq(irq, sysio_sbus_error_handler,
+ SA_SHIRQ, "SYSIO SBUS Error", sbus) < 0) {
+ prom_printf("SYSIO[%x]: Cannot register SBUS Error interrupt.\n",
+ sbus->portid);
+ prom_halt();
+ }
+
+ /* Now turn the error interrupts on and also enable ECC checking. */
+ upa_writeq((SYSIO_ECNTRL_ECCEN |
+ SYSIO_ECNTRL_UEEN |
+ SYSIO_ECNTRL_CEEN),
+ reg_base + ECC_CONTROL);
+
+ control = upa_readq(iommu->sbus_control_reg);
+ control |= 0x100UL; /* SBUS Error Interrupt Enable */
+ upa_writeq(control, iommu->sbus_control_reg);
+}
+
+/* Boot time initialization. */
+void __init sbus_iommu_init(int prom_node, struct sbus_bus *sbus)
+{
+ struct linux_prom64_registers rprop;
+ struct sbus_iommu *iommu;
+ unsigned long regs, tsb_base;
+ u64 control;
+ int err, i;
+
+ sbus->portid = prom_getintdefault(sbus->prom_node,
+ "upa-portid", -1);
+
+ err = prom_getproperty(prom_node, "reg",
+ (char *)&rprop, sizeof(rprop));
+ if (err < 0) {
+ prom_printf("sbus_iommu_init: Cannot map SYSIO control registers.\n");
+ prom_halt();
+ }
+ regs = rprop.phys_addr;
+
+ iommu = kmalloc(sizeof(*iommu) + SMP_CACHE_BYTES, GFP_ATOMIC);
+ if (iommu == NULL) {
+ prom_printf("sbus_iommu_init: Fatal error, kmalloc(iommu) failed\n");
+ prom_halt();
+ }
+
+ /* Align on E$ line boundry. */
+ iommu = (struct sbus_iommu *)
+ (((unsigned long)iommu + (SMP_CACHE_BYTES - 1UL)) &
+ ~(SMP_CACHE_BYTES - 1UL));
+
+ memset(iommu, 0, sizeof(*iommu));
+
+ /* Setup spinlock. */
+ spin_lock_init(&iommu->lock);
+
+ /* Init register offsets. */
+ iommu->iommu_regs = regs + SYSIO_IOMMUREG_BASE;
+ iommu->strbuf_regs = regs + SYSIO_STRBUFREG_BASE;
+
+ /* The SYSIO SBUS control register is used for dummy reads
+ * in order to ensure write completion.
+ */
+ iommu->sbus_control_reg = regs + 0x2000UL;
+
+ /* Link into SYSIO software state. */
+ sbus->iommu = iommu;
+
+ printk("SYSIO: UPA portID %x, at %016lx\n",
+ sbus->portid, regs);
+
+ /* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */
+ control = upa_readq(iommu->iommu_regs + IOMMU_CONTROL);
+ control = ((7UL << 16UL) |
+ (0UL << 2UL) |
+ (1UL << 1UL) |
+ (1UL << 0UL));
+
+ /* Using the above configuration we need 1MB iommu page
+ * table (128K ioptes * 8 bytes per iopte). This is
+ * page order 7 on UltraSparc.
+ */
+ tsb_base = __get_free_pages(GFP_ATOMIC, 7);
+ if (tsb_base == 0UL) {
+ prom_printf("sbus_iommu_init: Fatal error, cannot alloc TSB table.\n");
+ prom_halt();
+ }
+
+ iommu->page_table = (iopte_t *) tsb_base;
+ memset(iommu->page_table, 0, (PAGE_SIZE << 7));
+
+ upa_writeq(control, iommu->iommu_regs + IOMMU_CONTROL);
+
+ /* Clean out any cruft in the IOMMU using
+ * diagnostic accesses.
+ */
+ for (i = 0; i < 16; i++) {
+ unsigned long dram = iommu->iommu_regs + IOMMU_DRAMDIAG;
+
+ dram += (unsigned long)i * 8UL;
+ upa_writeq(0, dram);
+ }
+ upa_readq(iommu->sbus_control_reg);
+
+ /* Give the TSB to SYSIO. */
+ upa_writeq(__pa(tsb_base), iommu->iommu_regs + IOMMU_TSBBASE);
+
+ /* Setup streaming buffer, DE=1 SB_EN=1 */
+ control = (1UL << 1UL) | (1UL << 0UL);
+ upa_writeq(control, iommu->strbuf_regs + STRBUF_CONTROL);
+
+ /* Clear out the tags using diagnostics. */
+ for (i = 0; i < 16; i++) {
+ unsigned long ptag, ltag;
+
+ ptag = iommu->strbuf_regs + STRBUF_PTAGDIAG;
+ ltag = iommu->strbuf_regs + STRBUF_LTAGDIAG;
+ ptag += (unsigned long)i * 8UL;
+ ltag += (unsigned long)i * 8UL;
+
+ upa_writeq(0UL, ptag);
+ upa_writeq(0UL, ltag);
+ }
+
+ /* Enable DVMA arbitration for all devices/slots. */
+ control = upa_readq(iommu->sbus_control_reg);
+ control |= 0x3fUL;
+ upa_writeq(control, iommu->sbus_control_reg);
+
+ /* Now some Xfire specific grot... */
+ {
+ extern void *starfire_hookup(int);
+ extern int this_is_starfire;
+
+ if (this_is_starfire)
+ sbus->starfire_cookie = starfire_hookup(sbus->portid);
+ else
+ sbus->starfire_cookie = NULL;
+ }
+
+ sysio_register_error_handlers(sbus);
+}
diff --git a/arch/sparc64/kernel/semaphore.c b/arch/sparc64/kernel/semaphore.c
index f72aeedd6..88ab813f3 100644
--- a/arch/sparc64/kernel/semaphore.c
+++ b/arch/sparc64/kernel/semaphore.c
@@ -1,4 +1,4 @@
-/* $Id: semaphore.c,v 1.1 1999/08/30 10:00:50 davem Exp $
+/* $Id: semaphore.c,v 1.2 1999/12/23 17:12:03 jj Exp $
* Generic semaphore code. Buyer beware. Do your own
* specific changes in <asm/semaphore-helper.h>
*/
@@ -62,8 +62,7 @@ void __up(struct semaphore *sem)
#define DOWN_VAR \
struct task_struct *tsk = current; \
- wait_queue_t wait; \
- init_waitqueue_entry(&wait, tsk);
+ DECLARE_WAITQUEUE(wait, tsk);
#define DOWN_HEAD(task_state) \
\
@@ -127,3 +126,172 @@ int __down_trylock(struct semaphore * sem)
{
return waking_non_zero_trylock(sem);
}
+
+/* rw mutexes
+ * Implemented by Jakub Jelinek (jakub@redhat.com) based on
+ * i386 implementation by Ben LaHaise (bcrl@redhat.com).
+ */
+
+asm("
+ .text
+ .align 32
+ .globl __down_read_failed
+__down_read_failed:
+ save %sp, -160, %sp
+ membar #StoreStore
+ brz,pt %g5, 3f
+ mov %g7, %l0
+1: call down_read_failed
+ mov %l0, %o0
+2: lduw [%l0], %l1
+ sub %l1, 1, %l2
+ cas [%l0], %l1, %l2
+
+ cmp %l1, %l2
+ bne,pn %icc, 2b
+ membar #StoreStore
+ subcc %l1, 1, %g0
+ bpos,pt %icc, 4f
+ nop
+ bcc,pn %icc, 1b
+ nop
+
+3: call down_read_failed_biased
+ mov %l0, %o0
+4: ret
+ restore
+ .previous
+");
+
+asm("
+ .text
+ .align 32
+ .globl __down_write_failed
+__down_write_failed:
+ save %sp, -160, %sp
+ membar #StoreStore
+ tst %g5
+ bge,pt %icc, 3f
+ mov %g7, %l0
+1: call down_write_failed
+ mov %l0, %o0
+2: lduw [%l0], %l1
+ sethi %hi (" RW_LOCK_BIAS_STR "), %l3
+ sub %l1, %l3, %l2
+ cas [%l0], %l1, %l2
+
+ cmp %l1, %l2
+ bne,pn %icc, 2b
+ membar #StoreStore
+ subcc %l1, %l3, %g0
+ be,pt %icc, 4f
+ nop
+ bcc,pn %icc, 1b
+ nop
+
+3: call down_write_failed_biased
+ mov %l0, %o0
+4: ret
+ restore
+ .previous
+");
+
+void down_read_failed_biased(struct rw_semaphore *sem)
+{
+ DOWN_VAR
+
+ add_wait_queue(&sem->wait, &wait); /* put ourselves at the head of the list */
+
+ for (;;) {
+ if (clear_le_bit(0, &sem->granted))
+ break;
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+ if (!test_le_bit(0, &sem->granted))
+ schedule();
+ }
+
+ remove_wait_queue(&sem->wait, &wait);
+ tsk->state = TASK_RUNNING;
+}
+
+void down_write_failed_biased(struct rw_semaphore *sem)
+{
+ DOWN_VAR
+
+ add_wait_queue_exclusive(&sem->write_bias_wait, &wait); /* put ourselves at the end of the list */
+
+ for (;;) {
+ if (clear_le_bit(1, &sem->granted))
+ break;
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE | TASK_EXCLUSIVE);
+ if (!test_le_bit(1, &sem->granted))
+ schedule();
+ }
+
+ remove_wait_queue(&sem->write_bias_wait, &wait);
+ tsk->state = TASK_RUNNING;
+
+ /* if the lock is currently unbiased, awaken the sleepers
+ * FIXME: this wakes up the readers early in a bit of a
+ * stampede -> bad!
+ */
+ if (sem->count >= 0)
+ wake_up(&sem->wait);
+}
+
+/* Wait for the lock to become unbiased. Readers
+ * are non-exclusive. =)
+ */
+void down_read_failed(struct rw_semaphore *sem)
+{
+ DOWN_VAR
+
+ __up_read(sem); /* this takes care of granting the lock */
+
+ add_wait_queue(&sem->wait, &wait);
+
+ while (sem->count < 0) {
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+ if (sem->count >= 0)
+ break;
+ schedule();
+ }
+
+ remove_wait_queue(&sem->wait, &wait);
+ tsk->state = TASK_RUNNING;
+}
+
+/* Wait for the lock to become unbiased. Since we're
+ * a writer, we'll make ourselves exclusive.
+ */
+void down_write_failed(struct rw_semaphore *sem)
+{
+ DOWN_VAR
+
+ __up_write(sem); /* this takes care of granting the lock */
+
+ add_wait_queue_exclusive(&sem->wait, &wait);
+
+ while (sem->count < 0) {
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE | TASK_EXCLUSIVE);
+ if (sem->count >= 0)
+ break; /* we must attempt to aquire or bias the lock */
+ schedule();
+ }
+
+ remove_wait_queue(&sem->wait, &wait);
+ tsk->state = TASK_RUNNING;
+}
+
+void __rwsem_wake(struct rw_semaphore *sem, unsigned long readers)
+{
+ if (readers) {
+ if (set_le_bit(0, &sem->granted))
+ BUG();
+ wake_up(&sem->wait);
+ } else {
+ if (set_le_bit(1, &sem->granted))
+ BUG();
+ wake_up(&sem->write_bias_wait);
+ }
+}
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index 066850108..0f280f818 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -1,4 +1,4 @@
-/* $Id: setup.c,v 1.47 1999/08/31 06:54:55 davem Exp $
+/* $Id: setup.c,v 1.50 1999/12/01 10:44:45 davem Exp $
* linux/arch/sparc64/kernel/setup.c
*
* Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
@@ -279,7 +279,9 @@ unsigned int boot_flags = 0;
#ifdef CONFIG_SUN_CONSOLE
static int console_fb __initdata = 0;
#endif
-static unsigned long memory_size = 0;
+
+/* Exported for mm/init.c:paging_init. */
+unsigned long cmdline_memory_size = 0;
#ifdef PROM_DEBUG_CONSOLE
static struct console prom_debug_console = {
@@ -398,13 +400,13 @@ static void __init boot_flags_init(char *commands)
* "mem=XXX[kKmM]" overrides the PROM-reported
* memory size.
*/
- memory_size = simple_strtoul(commands + 4,
- &commands, 0);
+ cmdline_memory_size = simple_strtoul(commands + 4,
+ &commands, 0);
if (*commands == 'K' || *commands == 'k') {
- memory_size <<= 10;
+ cmdline_memory_size <<= 10;
commands++;
} else if (*commands=='M' || *commands=='m') {
- memory_size <<= 20;
+ cmdline_memory_size <<= 20;
commands++;
}
}
@@ -438,12 +440,22 @@ static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
extern struct consw sun_serial_con;
-void __init setup_arch(char **cmdline_p,
- unsigned long * memory_start_p, unsigned long * memory_end_p)
+void register_prom_callbacks(void)
+{
+ prom_setcallback(prom_callback);
+ prom_feval(": linux-va>tte-data 2 \" va>tte-data\" $callback drop ; "
+ "' linux-va>tte-data to va>tte-data");
+ prom_feval(": linux-.soft1 1 \" .soft1\" $callback 2drop ; "
+ "' linux-.soft1 to .soft1");
+ prom_feval(": linux-.soft2 1 \" .soft2\" $callback 2drop ; "
+ "' linux-.soft2 to .soft2");
+}
+
+void __init setup_arch(char **cmdline_p)
{
extern int serial_console; /* in console.c, of course */
- unsigned long lowest_paddr, end_of_phys_memory = 0;
- int total, i;
+ unsigned long highest_paddr;
+ int i;
/* Initialize PROM console and command line. */
*cmdline_p = prom_getbootargs();
@@ -464,44 +476,23 @@ void __init setup_arch(char **cmdline_p,
boot_flags_init(*cmdline_p);
idprom_init();
- total = prom_probe_memory();
-
- lowest_paddr = 0xffffffffffffffffUL;
- for(i=0; sp_banks[i].num_bytes != 0; i++) {
- if(sp_banks[i].base_addr < lowest_paddr)
- lowest_paddr = sp_banks[i].base_addr;
- end_of_phys_memory = sp_banks[i].base_addr +
- sp_banks[i].num_bytes;
- if (memory_size) {
- if (end_of_phys_memory > memory_size) {
- sp_banks[i].num_bytes -=
- (end_of_phys_memory - memory_size);
- end_of_phys_memory = memory_size;
- sp_banks[++i].base_addr = 0xdeadbeef;
- sp_banks[i].num_bytes = 0;
- }
- }
- }
- prom_setcallback(prom_callback);
- prom_feval(": linux-va>tte-data 2 \" va>tte-data\" $callback drop ; "
- "' linux-va>tte-data to va>tte-data");
- prom_feval(": linux-.soft1 1 \" .soft1\" $callback 2drop ; "
- "' linux-.soft1 to .soft1");
- prom_feval(": linux-.soft2 1 \" .soft2\" $callback 2drop ; "
- "' linux-.soft2 to .soft2");
+ (void) prom_probe_memory();
/* In paging_init() we tip off this value to see if we need
* to change init_mm.pgd to point to the real alias mapping.
*/
- phys_base = lowest_paddr;
-
- *memory_start_p = PAGE_ALIGN(((unsigned long) &end));
- *memory_end_p = (end_of_phys_memory + PAGE_OFFSET);
-
-#ifdef DAVEM_DEBUGGING
- prom_printf("phys_base[%016lx] memory_start[%016lx] memory_end[%016lx]\n",
- phys_base, *memory_start_p, *memory_end_p);
-#endif
+ phys_base = 0xffffffffffffffffUL;
+ highest_paddr = 0UL;
+ for (i = 0; sp_banks[i].num_bytes != 0; i++) {
+ unsigned long top;
+
+ if (sp_banks[i].base_addr < phys_base)
+ phys_base = sp_banks[i].base_addr;
+ top = sp_banks[i].base_addr +
+ sp_banks[i].num_bytes;
+ if (highest_paddr < top)
+ highest_paddr = top;
+ }
if (!root_flags)
root_mountflags &= ~MS_RDONLY;
@@ -512,6 +503,7 @@ void __init setup_arch(char **cmdline_p,
rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
#endif
#ifdef CONFIG_BLK_DEV_INITRD
+// FIXME needs to do the new bootmem alloc stuff
if (sparc_ramdisk_image) {
unsigned long start = 0;
@@ -537,7 +529,7 @@ void __init setup_arch(char **cmdline_p,
/* Due to stack alignment restrictions and assumptions... */
init_mm.mmap->vm_page_prot = PAGE_SHARED;
init_mm.mmap->vm_start = PAGE_OFFSET;
- init_mm.mmap->vm_end = *memory_end_p;
+ init_mm.mmap->vm_end = PAGE_OFFSET + highest_paddr;
init_task.thread.kregs = &fake_swapper_regs;
#ifdef CONFIG_IP_PNP
@@ -643,5 +635,12 @@ int get_cpuinfo(char *buffer)
#ifdef __SMP__
len += smp_info(buffer + len);
#endif
+#undef ZS_LOG
+#ifdef ZS_LOG
+ {
+ extern int zs_dumplog(char *);
+ len += zs_dumplog(buffer + len);
+ }
+#endif
return len;
}
diff --git a/arch/sparc64/kernel/signal.c b/arch/sparc64/kernel/signal.c
index f618ab85c..79db5bc4e 100644
--- a/arch/sparc64/kernel/signal.c
+++ b/arch/sparc64/kernel/signal.c
@@ -1,4 +1,4 @@
-/* $Id: signal.c,v 1.45 1999/09/06 08:21:59 jj Exp $
+/* $Id: signal.c,v 1.48 1999/12/15 22:24:52 davem Exp $
* arch/sparc64/kernel/signal.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
diff --git a/arch/sparc64/kernel/signal32.c b/arch/sparc64/kernel/signal32.c
index 949017da3..66e2b0bbe 100644
--- a/arch/sparc64/kernel/signal32.c
+++ b/arch/sparc64/kernel/signal32.c
@@ -1,4 +1,4 @@
-/* $Id: signal32.c,v 1.50 1999/07/30 09:35:25 davem Exp $
+/* $Id: signal32.c,v 1.56 1999/12/20 01:16:16 davem Exp $
* arch/sparc64/kernel/signal32.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
@@ -663,7 +663,8 @@ static inline void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *reg
goto sigsegv;
if(pte_present(*ptep)) {
- unsigned long page = pte_page(*ptep);
+ unsigned long page = (unsigned long)
+ __va(pte_pagenr(*ptep) << PAGE_SHIFT);
__asm__ __volatile__("
membar #StoreStore
@@ -1033,6 +1034,26 @@ static inline void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs
err |= __put_user(0, &sf->fpu_save);
}
+ /* Update the siginfo structure. Is this good? */
+ if (info->si_code == 0) {
+ info->si_signo = signr;
+ info->si_errno = 0;
+
+ switch (signr) {
+ case SIGSEGV:
+ case SIGILL:
+ case SIGFPE:
+ case SIGBUS:
+ case SIGEMT:
+ info->si_code = current->thread.sig_desc;
+ info->si_addr = (void *)current->thread.sig_address;
+ info->si_trapno = 0;
+ break;
+ default:
+ break;
+ }
+ }
+
err = __put_user (info->si_signo, &sf->info.si_signo);
err |= __put_user (info->si_errno, &sf->info.si_errno);
err |= __put_user (info->si_code, &sf->info.si_code);
@@ -1084,7 +1105,7 @@ static inline void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs
case 1: seta.sig[1] = (oldset->sig[0] >> 32);
seta.sig[0] = oldset->sig[0];
}
- err |= __copy_to_user(&sf->mask, &seta, sizeof(sigset_t));
+ err |= __copy_to_user(&sf->mask, &seta, sizeof(sigset_t32));
err |= copy_in_user((u32 *)sf,
(u32 *)(regs->u_regs[UREG_FP]),
@@ -1122,7 +1143,8 @@ static inline void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs
goto sigsegv;
if(pte_present(*ptep)) {
- unsigned long page = pte_page(*ptep);
+ unsigned long page = (unsigned long)
+ __va(pte_pagenr(*ptep) << PAGE_SHIFT);
__asm__ __volatile__("
membar #StoreStore
@@ -1326,7 +1348,8 @@ asmlinkage int do_signal32(sigset_t *oldset, struct pt_regs * regs,
continue;
case SIGQUIT: case SIGILL: case SIGTRAP:
- case SIGABRT: case SIGFPE: case SIGSEGV: case SIGBUS:
+ case SIGABRT: case SIGFPE: case SIGSEGV:
+ case SIGBUS: case SIGSYS: case SIGXCPU: case SIGXFSZ:
if (do_coredump(signr, regs))
exit_code |= 0x80;
#ifdef DEBUG_SIGNALS
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index d1adeb2c7..2fa4945d8 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -94,7 +94,8 @@ void __init smp_store_cpu_info(int id)
cpu_data[id].udelay_val = loops_per_sec;
cpu_data[id].pgcache_size = 0;
- cpu_data[id].pte_cache = NULL;
+ cpu_data[id].pte_cache[0] = NULL;
+ cpu_data[id].pte_cache[1] = NULL;
cpu_data[id].pgdcache_size = 0;
cpu_data[id].pgd_cache = NULL;
cpu_data[id].idle_volume = 1;
@@ -184,7 +185,7 @@ void cpu_panic(void)
extern struct prom_cpuinfo linux_cpus[64];
-extern unsigned long smp_trampoline;
+extern unsigned long sparc64_cpu_startup;
/* The OBP cpu startup callback truncates the 3rd arg cookie to
* 32-bits (I think) so to be safe we have it read the pointer
@@ -210,15 +211,13 @@ void __init smp_boot_cpus(void)
continue;
if(cpu_present_map & (1UL << i)) {
- unsigned long entry = (unsigned long)(&smp_trampoline);
+ unsigned long entry = (unsigned long)(&sparc64_cpu_startup);
unsigned long cookie = (unsigned long)(&cpu_new_task);
struct task_struct *p;
int timeout;
int no;
- extern unsigned long phys_base;
- entry += phys_base - KERNBASE;
- cookie += phys_base - KERNBASE;
+ prom_printf("Starting CPU %d... ", i);
kernel_thread(start_secondary, NULL, CLONE_PID);
cpucount++;
@@ -247,9 +246,11 @@ void __init smp_boot_cpus(void)
cpu_number_map[i] = cpucount;
__cpu_logical_map[cpucount] = i;
prom_cpu_nodes[i] = linux_cpus[no].prom_node;
+ prom_printf("OK\n");
} else {
cpucount--;
printk("Processor %d is stuck.\n", i);
+ prom_printf("FAILED\n");
}
}
if(!callin_flag) {
@@ -537,14 +538,31 @@ void smp_release(void)
/* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
* can service tlb flush xcalls...
*/
+extern void prom_world(int);
+extern void save_alternate_globals(unsigned long *);
+extern void restore_alternate_globals(unsigned long *);
void smp_penguin_jailcell(void)
{
- flushw_user();
+ unsigned long global_save[24];
+
+ __asm__ __volatile__("flushw");
+ save_alternate_globals(global_save);
+ prom_world(1);
atomic_inc(&smp_capture_registry);
membar("#StoreLoad | #StoreStore");
while(penguins_are_doing_time)
membar("#LoadLoad");
+ restore_alternate_globals(global_save);
atomic_dec(&smp_capture_registry);
+ prom_world(0);
+}
+
+extern unsigned long xcall_promstop;
+
+void smp_promstop_others(void)
+{
+ if (smp_processors_ready)
+ smp_cross_call(&xcall_promstop, 0, 0, 0);
}
static inline void sparc64_do_profile(unsigned long pc, unsigned long g3)
@@ -701,14 +719,13 @@ static inline unsigned long find_flush_base(unsigned long size)
/* Failure. */
if(p >= (mem_map + max_mapnr))
return 0UL;
- if(PageSkip(p)) {
- p = p->next_hash;
- base = page_address(p);
+ if(PageReserved(p)) {
found = size;
+ base = page_address(p);
} else {
found -= PAGE_SIZE;
- p++;
}
+ p++;
}
return base;
}
@@ -718,7 +735,7 @@ cycles_t cacheflush_time;
static void __init smp_tune_scheduling (void)
{
unsigned long flush_base, flags, *p;
- unsigned int ecache_size;
+ unsigned int ecache_size, order;
cycles_t tick1, tick2, raw;
/* Approximate heuristic for SMP scheduling. It is an
@@ -733,18 +750,22 @@ static void __init smp_tune_scheduling (void)
*/
printk("SMP: Calibrating ecache flush... ");
ecache_size = prom_getintdefault(linux_cpus[0].prom_node,
- "ecache-size", (512 *1024));
- flush_base = find_flush_base(ecache_size << 1);
-
- if(flush_base != 0UL) {
+ "ecache-size", (512 * 1024));
+ if (ecache_size > (4 * 1024 * 1024))
+ ecache_size = (4 * 1024 * 1024);
+ for (order = 0UL; (PAGE_SIZE << order) < ecache_size; order++)
+ ;
+ flush_base = __get_free_pages(GFP_KERNEL, order);
+
+ if (flush_base != 0UL) {
__save_and_cli(flags);
/* Scan twice the size once just to get the TLB entries
* loaded and make sure the second scan measures pure misses.
*/
- for(p = (unsigned long *)flush_base;
- ((unsigned long)p) < (flush_base + (ecache_size<<1));
- p += (64 / sizeof(unsigned long)))
+ for (p = (unsigned long *)flush_base;
+ ((unsigned long)p) < (flush_base + (ecache_size<<1));
+ p += (64 / sizeof(unsigned long)))
*((volatile unsigned long *)p);
/* Now the real measurement. */
@@ -775,9 +796,12 @@ static void __init smp_tune_scheduling (void)
* sharing the cache and fitting.
*/
cacheflush_time = (raw - (raw >> 2));
- } else
+
+ free_pages(flush_base, order);
+ } else {
cacheflush_time = ((ecache_size << 2) +
(ecache_size << 1));
+ }
printk("Using heuristic of %d cycles.\n",
(int) cacheflush_time);
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index 434f22bf8..aa28151c8 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -1,4 +1,4 @@
-/* $Id: sparc64_ksyms.c,v 1.64 1999/09/05 09:33:38 ecd Exp $
+/* $Id: sparc64_ksyms.c,v 1.70 2000/01/07 18:15:18 jj Exp $
* arch/sparc64/kernel/sparc64_ksyms.c: Sparc64 specific ksyms support.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
@@ -55,8 +55,6 @@ struct poll {
extern unsigned prom_cpu_nodes[64];
extern void die_if_kernel(char *str, struct pt_regs *regs);
extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
-extern unsigned long sunos_mmap(unsigned long, unsigned long, unsigned long,
- unsigned long, unsigned long, unsigned long);
void _sigpause_common (unsigned int set, struct pt_regs *);
extern void *__bzero(void *, size_t);
extern void *__bzero_noasi(void *, size_t);
@@ -82,6 +80,7 @@ extern int svr4_setcontext(svr4_ucontext_t *uc, struct pt_regs *regs);
extern int sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg);
extern int sys32_ioctl(unsigned int fd, unsigned int cmd, u32 arg);
extern int (*handle_mathemu)(struct pt_regs *, struct fpustate *);
+extern long sparc32_open(const char * filename, int flags, int mode);
extern void bcopy (const char *, char *, int);
extern int __ashrdi3(int, int);
@@ -117,11 +116,18 @@ __attribute__((section("__ksymtab"))) = \
/* used by various drivers */
#ifdef __SMP__
+#ifndef SPIN_LOCK_DEBUG
/* Out of line rw-locking implementation. */
EXPORT_SYMBOL_PRIVATE(read_lock);
EXPORT_SYMBOL_PRIVATE(read_unlock);
EXPORT_SYMBOL_PRIVATE(write_lock);
EXPORT_SYMBOL_PRIVATE(write_unlock);
+#endif
+
+/* rw semaphores */
+EXPORT_SYMBOL_NOVERS(__down_read_failed);
+EXPORT_SYMBOL_NOVERS(__down_write_failed);
+EXPORT_SYMBOL_NOVERS(__rwsem_wake);
/* Kernel wide locking */
EXPORT_SYMBOL(kernel_flag);
@@ -175,23 +181,25 @@ EXPORT_SYMBOL_PRIVATE(flushw_user);
EXPORT_SYMBOL(mstk48t02_regs);
EXPORT_SYMBOL(request_fast_irq);
-EXPORT_SYMBOL(sparc_alloc_io);
-EXPORT_SYMBOL(sparc_free_io);
-EXPORT_SYMBOL(sparc_ultra_unmapioaddr);
-EXPORT_SYMBOL(mmu_get_scsi_sgl);
-EXPORT_SYMBOL(mmu_get_scsi_one);
-EXPORT_SYMBOL(sparc_dvma_malloc);
-EXPORT_SYMBOL(mmu_release_scsi_one);
-EXPORT_SYMBOL(mmu_release_scsi_sgl);
#if CONFIG_SBUS
-EXPORT_SYMBOL(mmu_set_sbus64);
-EXPORT_SYMBOL(SBus_chain);
+EXPORT_SYMBOL(sbus_root);
EXPORT_SYMBOL(dma_chain);
+EXPORT_SYMBOL(sbus_set_sbus64);
+EXPORT_SYMBOL(sbus_alloc_consistant);
+EXPORT_SYMBOL(sbus_free_consistant);
+EXPORT_SYMBOL(sbus_map_single);
+EXPORT_SYMBOL(sbus_unmap_single);
+EXPORT_SYMBOL(sbus_map_sg);
+EXPORT_SYMBOL(sbus_unmap_sg);
+EXPORT_SYMBOL(sbus_dma_sync_single);
+EXPORT_SYMBOL(sbus_dma_sync_sg);
#endif
#if CONFIG_PCI
EXPORT_SYMBOL(ebus_chain);
+#ifndef NEW_PCI_DMA_MAP
EXPORT_SYMBOL(pci_dvma_v2p_hash);
EXPORT_SYMBOL(pci_dvma_p2v_hash);
+#endif
EXPORT_SYMBOL(pci_memspace_mask);
EXPORT_SYMBOL(empty_zero_page);
EXPORT_SYMBOL(outsb);
@@ -204,7 +212,6 @@ EXPORT_SYMBOL(insl);
/* Solaris/SunOS binary compatibility */
EXPORT_SYMBOL(_sigpause_common);
-EXPORT_SYMBOL(sunos_mmap);
/* Should really be in linux/kernel/ksyms.c */
EXPORT_SYMBOL(dump_thread);
@@ -229,10 +236,10 @@ EXPORT_SYMBOL(prom_node_has_property);
EXPORT_SYMBOL(prom_setprop);
EXPORT_SYMBOL(saved_command_line);
EXPORT_SYMBOL(prom_getname);
+EXPORT_SYMBOL(prom_finddevice);
EXPORT_SYMBOL(prom_feval);
EXPORT_SYMBOL(prom_getbool);
EXPORT_SYMBOL(prom_getstring);
-EXPORT_SYMBOL(prom_apply_sbus_ranges);
EXPORT_SYMBOL(prom_getint);
EXPORT_SYMBOL(prom_getintdefault);
EXPORT_SYMBOL(__prom_getchild);
@@ -274,6 +281,7 @@ EXPORT_SYMBOL(svr4_setcontext);
EXPORT_SYMBOL(prom_cpu_nodes);
EXPORT_SYMBOL(sys_ioctl);
EXPORT_SYMBOL(sys32_ioctl);
+EXPORT_SYMBOL(sparc32_open);
EXPORT_SYMBOL(move_addr_to_kernel);
EXPORT_SYMBOL(move_addr_to_user);
#endif
@@ -311,5 +319,3 @@ EXPORT_SYMBOL_NOVERS(memcmp);
EXPORT_SYMBOL_NOVERS(memcpy);
EXPORT_SYMBOL_NOVERS(memset);
EXPORT_SYMBOL_NOVERS(memmove);
-
-EXPORT_SYMBOL(get_wchan);
diff --git a/arch/sparc64/kernel/starfire.c b/arch/sparc64/kernel/starfire.c
index 805dce338..3237b37dc 100644
--- a/arch/sparc64/kernel/starfire.c
+++ b/arch/sparc64/kernel/starfire.c
@@ -1,4 +1,4 @@
-/* $Id: starfire.c,v 1.3 1999/08/30 10:01:13 davem Exp $
+/* $Id: starfire.c,v 1.4 1999/09/21 14:35:25 davem Exp $
* starfire.c: Starfire/E10000 support.
*
* Copyright (C) 1998 David S. Miller (davem@dm.cobaltmicro.com)
@@ -10,6 +10,7 @@
#include <asm/page.h>
#include <asm/oplib.h>
#include <asm/smp.h>
+#include <asm/upa.h>
/* A few places around the kernel check this to see if
* they need to call us to do things in a Starfire specific
@@ -43,7 +44,7 @@ void starfire_check(void)
int starfire_hard_smp_processor_id(void)
{
- return *((volatile unsigned int *) __va(0x1fff40000d0));
+ return upa_readl(0x1fff40000d0UL);
}
/* Each Starfire board has 32 registers which perform translation
@@ -52,8 +53,8 @@ int starfire_hard_smp_processor_id(void)
* bits than in all previous Sun5 systems.
*/
struct starfire_irqinfo {
- volatile unsigned int *imap_slots[32];
- volatile unsigned int *tregs[32];
+ unsigned long imap_slots[32];
+ unsigned long tregs[32];
struct starfire_irqinfo *next;
int upaid, hwmid;
};
@@ -79,8 +80,8 @@ void *starfire_hookup(int upaid)
treg_base += (hwmid << 33UL);
treg_base += 0x200UL;
for(i = 0; i < 32; i++) {
- p->imap_slots[i] = NULL;
- p->tregs[i] = (volatile unsigned int *)__va(treg_base + (i * 0x10));
+ p->imap_slots[i] = 0UL;
+ p->tregs[i] = treg_base + (i * 0x10UL);
}
p->upaid = upaid;
p->next = sflist;
@@ -89,7 +90,7 @@ void *starfire_hookup(int upaid)
return (void *) p;
}
-unsigned int starfire_translate(volatile unsigned int *imap,
+unsigned int starfire_translate(unsigned long imap,
unsigned int upaid)
{
struct starfire_irqinfo *p;
@@ -107,7 +108,7 @@ unsigned int starfire_translate(volatile unsigned int *imap,
}
for(i = 0; i < 32; i++) {
if(p->imap_slots[i] == imap ||
- p->imap_slots[i] == NULL)
+ p->imap_slots[i] == 0UL)
break;
}
if(i == 32) {
@@ -115,7 +116,7 @@ unsigned int starfire_translate(volatile unsigned int *imap,
panic("Lucy in the sky....");
}
p->imap_slots[i] = imap;
- *(p->tregs[i]) = upaid;
+ upa_writel(upaid, p->tregs[i]);
return i;
}
diff --git a/arch/sparc64/kernel/sys32.S b/arch/sparc64/kernel/sys32.S
index fd1ff6a0b..4b461fbee 100644
--- a/arch/sparc64/kernel/sys32.S
+++ b/arch/sparc64/kernel/sys32.S
@@ -1,4 +1,4 @@
-/* $Id: sys32.S,v 1.8 1998/10/28 08:10:37 jj Exp $
+/* $Id: sys32.S,v 1.9 1999/12/21 14:09:18 jj Exp $
* sys32.S: I-cache tricks for 32-bit compatability layer simple
* conversions.
*
@@ -74,3 +74,12 @@ sys32_bdflush:
sethi %hi(sys_bdflush), %g1
jmpl %g1 + %lo(sys_bdflush), %g0
sra %o1, 0, %o1
+
+ .align 32
+ .globl sys32_mmap2
+sys32_mmap2:
+ srl %o4, 0, %o4
+ sethi %hi(sys_mmap), %g1
+ srl %o5, 0, %o5
+ jmpl %g1 + %lo(sys_mmap), %g0
+ sllx %o5, 12, %o5
diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c
index 4e87819d4..486a09d99 100644
--- a/arch/sparc64/kernel/sys_sparc.c
+++ b/arch/sparc64/kernel/sys_sparc.c
@@ -1,4 +1,4 @@
-/* $Id: sys_sparc.c,v 1.29 1999/08/04 07:04:10 jj Exp $
+/* $Id: sys_sparc.c,v 1.32 2000/01/05 01:00:40 davem Exp $
* linux/arch/sparc64/kernel/sys_sparc.c
*
* This file contains various random system calls that
@@ -6,6 +6,7 @@
* platform.
*/
+#include <linux/config.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/sched.h>
@@ -156,15 +157,16 @@ asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
struct file * file = NULL;
unsigned long retval = -EBADF;
- down(&current->mm->mmap_sem);
- lock_kernel();
if (!(flags & MAP_ANONYMOUS)) {
file = fget(fd);
if (!file)
goto out;
}
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
retval = -ENOMEM;
len = PAGE_ALIGN(len);
+ down(&current->mm->mmap_sem);
+ lock_kernel();
if(!(flags & MAP_FIXED) && !addr) {
addr = get_unmapped_area(addr, len);
if(!addr)
@@ -187,15 +189,14 @@ asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
}
}
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
retval = do_mmap(file, addr, len, prot, flags, off);
out_putf:
+ unlock_kernel();
+ up(&current->mm->mmap_sem);
if (file)
fput(file);
out:
- unlock_kernel();
- up(&current->mm->mmap_sem);
return retval;
}
@@ -275,6 +276,21 @@ asmlinkage int solaris_syscall(struct pt_regs *regs)
return -ENOSYS;
}
+#ifndef CONFIG_SUNOS_EMUL
+asmlinkage int sunos_syscall(struct pt_regs *regs)
+{
+ static int count = 0;
+ lock_kernel();
+ regs->tpc = regs->tnpc;
+ regs->tnpc += 4;
+ if(++count <= 20)
+ printk ("SunOS binary emulation not compiled in\n");
+ force_sig(SIGSEGV, current);
+ unlock_kernel();
+ return -ENOSYS;
+}
+#endif
+
asmlinkage int sys_utrap_install(utrap_entry_t type, utrap_handler_t new_p,
utrap_handler_t new_d,
utrap_handler_t *old_p, utrap_handler_t *old_d)
diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c
index e394ec35b..f9be1320e 100644
--- a/arch/sparc64/kernel/sys_sparc32.c
+++ b/arch/sparc64/kernel/sys_sparc32.c
@@ -1,4 +1,4 @@
-/* $Id: sys_sparc32.c,v 1.118 1999/08/30 10:01:15 davem Exp $
+/* $Id: sys_sparc32.c,v 1.127 2000/01/04 23:54:41 davem Exp $
* sys_sparc32.c: Conversion between 32bit and 64bit native syscalls.
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/fs.h>
+#include <linux/mm.h>
#include <linux/file.h>
#include <linux/signal.h>
#include <linux/utime.h>
@@ -40,6 +41,8 @@
#include <linux/poll.h>
#include <linux/personality.h>
#include <linux/stat.h>
+#include <linux/filter.h>
+#include <linux/highmem.h>
#include <asm/types.h>
#include <asm/ipc.h>
@@ -712,6 +715,25 @@ asmlinkage int sys32_fstatfs(unsigned int fd, struct statfs32 *buf)
return ret;
}
+extern asmlinkage long sys_truncate(const char * path, unsigned long length);
+extern asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
+
+asmlinkage int sys32_truncate64(const char * path, unsigned long high, unsigned long low)
+{
+ if ((int)high < 0)
+ return -EINVAL;
+ else
+ return sys_truncate(path, (high << 32) | low);
+}
+
+asmlinkage int sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned long low)
+{
+ if ((int)high < 0)
+ return -EINVAL;
+ else
+ return sys_ftruncate(fd, (high << 32) | low);
+}
+
extern asmlinkage int sys_utime(char * filename, struct utimbuf * times);
struct utimbuf32 {
@@ -1125,8 +1147,10 @@ asmlinkage int sys32_select(int n, u32 *inp, u32 *outp, u32 *exp, u32 tvp_x)
}
ret = -EINVAL;
- if (n < 0 || n > KFDS_NR)
+ if (n < 0)
goto out_nofds;
+ if (n > current->files->max_fdset)
+ n = current->files->max_fdset;
/*
* We need 6 bitmaps (in/out/ex for both incoming and outgoing),
@@ -1186,84 +1210,157 @@ out_nofds:
return ret;
}
-static inline int putstat(struct stat32 *ubuf, struct stat *kbuf)
+static int cp_new_stat32(struct inode *inode, struct stat32 *statbuf)
{
+ unsigned long ino, blksize, blocks;
+ kdev_t dev, rdev;
+ umode_t mode;
+ nlink_t nlink;
+ uid_t uid;
+ gid_t gid;
+ off_t size;
+ time_t atime, mtime, ctime;
int err;
-
- err = put_user (kbuf->st_dev, &ubuf->st_dev);
- err |= __put_user (kbuf->st_ino, &ubuf->st_ino);
- err |= __put_user (kbuf->st_mode, &ubuf->st_mode);
- err |= __put_user (kbuf->st_nlink, &ubuf->st_nlink);
- err |= __put_user (kbuf->st_uid, &ubuf->st_uid);
- err |= __put_user (kbuf->st_gid, &ubuf->st_gid);
- err |= __put_user (kbuf->st_rdev, &ubuf->st_rdev);
- err |= __put_user (kbuf->st_size, &ubuf->st_size);
- err |= __put_user (kbuf->st_atime, &ubuf->st_atime);
- err |= __put_user (kbuf->st_mtime, &ubuf->st_mtime);
- err |= __put_user (kbuf->st_ctime, &ubuf->st_ctime);
- err |= __put_user (kbuf->st_blksize, &ubuf->st_blksize);
- err |= __put_user (kbuf->st_blocks, &ubuf->st_blocks);
+
+ /* Stream the loads of inode data into the load buffer,
+ * then we push it all into the store buffer below. This
+ * should give optimal cache performance.
+ */
+ ino = inode->i_ino;
+ dev = inode->i_dev;
+ mode = inode->i_mode;
+ nlink = inode->i_nlink;
+ uid = inode->i_uid;
+ gid = inode->i_gid;
+ rdev = inode->i_rdev;
+ size = inode->i_size;
+ atime = inode->i_atime;
+ mtime = inode->i_mtime;
+ ctime = inode->i_ctime;
+ blksize = inode->i_blksize;
+ blocks = inode->i_blocks;
+
+ err = put_user(kdev_t_to_nr(dev), &statbuf->st_dev);
+ err |= put_user(ino, &statbuf->st_ino);
+ err |= put_user(mode, &statbuf->st_mode);
+ err |= put_user(nlink, &statbuf->st_nlink);
+ err |= put_user(uid, &statbuf->st_uid);
+ err |= put_user(gid, &statbuf->st_gid);
+ err |= put_user(kdev_t_to_nr(rdev), &statbuf->st_rdev);
+ err |= put_user(size, &statbuf->st_size);
+ err |= put_user(atime, &statbuf->st_atime);
+ err |= put_user(0, &statbuf->__unused1);
+ err |= put_user(mtime, &statbuf->st_mtime);
+ err |= put_user(0, &statbuf->__unused2);
+ err |= put_user(ctime, &statbuf->st_ctime);
+ err |= put_user(0, &statbuf->__unused3);
+ if (blksize) {
+ err |= put_user(blksize, &statbuf->st_blksize);
+ err |= put_user(blocks, &statbuf->st_blocks);
+ } else {
+ unsigned int tmp_blocks;
+
+#define D_B 7
+#define I_B (BLOCK_SIZE / sizeof(unsigned short))
+ tmp_blocks = (size + BLOCK_SIZE - 1) / BLOCK_SIZE;
+ if (tmp_blocks > D_B) {
+ unsigned int indirect;
+
+ indirect = (tmp_blocks - D_B + I_B - 1) / I_B;
+ tmp_blocks += indirect;
+ if (indirect > 1) {
+ indirect = (indirect - 1 + I_B - 1) / I_B;
+ tmp_blocks += indirect;
+ if (indirect > 1)
+ tmp_blocks++;
+ }
+ }
+ err |= put_user(BLOCK_SIZE, &statbuf->st_blksize);
+ err |= put_user((BLOCK_SIZE / 512) * tmp_blocks, &statbuf->st_blocks);
+#undef D_B
+#undef I_B
+ }
+ err |= put_user(0, &statbuf->__unused4[0]);
+ err |= put_user(0, &statbuf->__unused4[1]);
+
return err;
}
-extern asmlinkage int sys_newstat(char * filename, struct stat * statbuf);
-
asmlinkage int sys32_newstat(char * filename, struct stat32 *statbuf)
{
- int ret;
- struct stat s;
- char *filenam;
- mm_segment_t old_fs = get_fs();
-
- filenam = getname32 (filename);
- ret = PTR_ERR(filenam);
- if (!IS_ERR(filenam)) {
- set_fs (KERNEL_DS);
- ret = sys_newstat(filenam, &s);
- set_fs (old_fs);
- putname (filenam);
- if (putstat (statbuf, &s))
- return -EFAULT;
+ struct dentry *dentry;
+ int error;
+
+ lock_kernel();
+ dentry = namei(filename);
+
+ error = PTR_ERR(dentry);
+ if (!IS_ERR(dentry)) {
+ struct inode *inode = dentry->d_inode;
+
+ if (inode->i_op &&
+ inode->i_op->revalidate)
+ error = inode->i_op->revalidate(dentry);
+ else
+ error = 0;
+ if (!error)
+ error = cp_new_stat32(inode, statbuf);
+
+ dput(dentry);
}
- return ret;
+ unlock_kernel();
+ return error;
}
-extern asmlinkage int sys_newlstat(char * filename, struct stat * statbuf);
-
asmlinkage int sys32_newlstat(char * filename, struct stat32 *statbuf)
{
- int ret;
- struct stat s;
- char *filenam;
- mm_segment_t old_fs = get_fs();
-
- filenam = getname32 (filename);
- ret = PTR_ERR(filenam);
- if (!IS_ERR(filenam)) {
- set_fs (KERNEL_DS);
- ret = sys_newlstat(filenam, &s);
- set_fs (old_fs);
- putname (filenam);
- if (putstat (statbuf, &s))
- return -EFAULT;
+ struct dentry *dentry;
+ int error;
+
+ lock_kernel();
+ dentry = lnamei(filename);
+
+ error = PTR_ERR(dentry);
+ if (!IS_ERR(dentry)) {
+ struct inode *inode = dentry->d_inode;
+
+ if (inode->i_op &&
+ inode->i_op->revalidate)
+ error = inode->i_op->revalidate(dentry);
+ else
+ error = 0;
+ if (!error)
+ error = cp_new_stat32(inode, statbuf);
+
+ dput(dentry);
}
- return ret;
+ unlock_kernel();
+ return error;
}
-extern asmlinkage int sys_newfstat(unsigned int fd, struct stat * statbuf);
-
asmlinkage int sys32_newfstat(unsigned int fd, struct stat32 *statbuf)
{
- int ret;
- struct stat s;
- mm_segment_t old_fs = get_fs();
-
- set_fs (KERNEL_DS);
- ret = sys_newfstat(fd, &s);
- set_fs (old_fs);
- if (putstat (statbuf, &s))
- return -EFAULT;
- return ret;
+ struct file *f;
+ int err = -EBADF;
+
+ lock_kernel();
+ f = fget(fd);
+ if (f) {
+ struct dentry *dentry = f->f_dentry;
+ struct inode *inode = dentry->d_inode;
+
+ if (inode->i_op &&
+ inode->i_op->revalidate)
+ err = inode->i_op->revalidate(dentry);
+ else
+ err = 0;
+ if (!err)
+ err = cp_new_stat32(inode, statbuf);
+
+ fput(f);
+ }
+ unlock_kernel();
+ return err;
}
extern asmlinkage int sys_sysfs(int option, unsigned long arg1, unsigned long arg2);
@@ -1912,8 +2009,8 @@ asmlinkage int sys32_setgroups(int gidsetsize, __kernel_gid_t32 *grouplist)
#define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x)
struct rlimit32 {
- s32 rlim_cur;
- s32 rlim_max;
+ u32 rlim_cur;
+ u32 rlim_max;
};
extern asmlinkage int sys_getrlimit(unsigned int resource, struct rlimit *rlim);
@@ -2523,6 +2620,48 @@ out:
return len;
}
+extern asmlinkage int sys_setsockopt(int fd, int level, int optname,
+ char *optval, int optlen);
+
+asmlinkage int sys32_setsockopt(int fd, int level, int optname,
+ char *optval, int optlen)
+{
+ if (optname == SO_ATTACH_FILTER) {
+ struct sock_fprog32 {
+ __u16 len;
+ __u32 filter;
+ } *fprog32 = (struct sock_fprog32 *)optval;
+ struct sock_fprog kfprog;
+ struct sock_filter *kfilter;
+ unsigned int fsize;
+ mm_segment_t old_fs;
+ __u32 uptr;
+ int ret;
+
+ if (get_user(kfprog.len, &fprog32->len) ||
+ __get_user(uptr, &fprog32->filter))
+ return -EFAULT;
+ kfprog.filter = (struct sock_filter *)A(uptr);
+ fsize = kfprog.len * sizeof(struct sock_filter);
+ kfilter = (struct sock_filter *)kmalloc(fsize, GFP_KERNEL);
+ if (kfilter == NULL)
+ return -ENOMEM;
+ if (copy_from_user(kfilter, kfprog.filter, fsize)) {
+ kfree(kfilter);
+ return -EFAULT;
+ }
+ kfprog.filter = kfilter;
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_setsockopt(fd, level, optname,
+ (char *)&kfprog, sizeof(kfprog));
+ set_fs(old_fs);
+ kfree(kfilter);
+ return ret;
+ }
+ return sys_setsockopt(fd, level, optname, optval, optlen);
+}
+
/* Argument list sizes for sys_socketcall */
#define AL(x) ((x) * sizeof(u32))
static unsigned char nargs[18]={AL(0),AL(3),AL(3),AL(3),AL(2),AL(3),
@@ -2541,8 +2680,6 @@ extern asmlinkage int sys32_sendto(int fd, u32 buff, __kernel_size_t32 len,
extern asmlinkage int sys_recv(int fd, void *ubuf, size_t size, unsigned flags);
extern asmlinkage int sys32_recvfrom(int fd, u32 ubuf, __kernel_size_t32 size,
unsigned flags, u32 addr, u32 addr_len);
-extern asmlinkage int sys_setsockopt(int fd, int level, int optname,
- char *optval, int optlen);
extern asmlinkage int sys32_getsockopt(int fd, int level, int optname,
u32 optval, u32 optlen);
@@ -2593,7 +2730,7 @@ asmlinkage int sys32_socketcall(int call, u32 *args)
case SYS_SHUTDOWN:
return sys_shutdown(a0,a1);
case SYS_SETSOCKOPT:
- return sys_setsockopt(a0, a1, a[2], (char *)A(a[3]), a[4]);
+ return sys32_setsockopt(a0, a1, a[2], (char *)A(a[3]), a[4]);
case SYS_GETSOCKOPT:
return sys32_getsockopt(a0, a1, a[2], a[3], a[4]);
case SYS_SENDMSG:
@@ -2727,8 +2864,9 @@ static int copy_strings32(int argc, u32 * argv, struct linux_binprm *bprm)
if (get_user(str, argv + argc) ||
!str ||
- !(len = strlen_user((char *)A(str))))
+ !(len = strnlen_user((char *)A(str), bprm->p)))
return -EFAULT;
+
if (bprm->p < len)
return -E2BIG;
@@ -2736,20 +2874,38 @@ static int copy_strings32(int argc, u32 * argv, struct linux_binprm *bprm)
pos = bprm->p;
while (len) {
- char *pag;
- int offset, bytes_to_copy;
+ char *kaddr;
+ struct page *page;
+ int offset, bytes_to_copy, new, err;
offset = pos % PAGE_SIZE;
- if (!(pag = (char *) bprm->page[pos/PAGE_SIZE]) &&
- !(pag = (char *) bprm->page[pos/PAGE_SIZE] =
- (unsigned long *) get_free_page(GFP_USER)))
- return -ENOMEM;
+ page = bprm->page[pos / PAGE_SIZE];
+ new = 0;
+ if (!page) {
+ page = alloc_page(GFP_USER);
+ bprm->page[pos / PAGE_SIZE] = page;
+ if (!page)
+ return -ENOMEM;
+ new = 1;
+ }
+ kaddr = (char *)kmap(page);
+ if (new && offset)
+ memset(kaddr, 0, offset);
bytes_to_copy = PAGE_SIZE - offset;
- if (bytes_to_copy > len)
+ if (bytes_to_copy > len) {
bytes_to_copy = len;
+ if (new)
+ memset(kaddr+offset+len, 0,
+ PAGE_SIZE-offset-len);
+ }
- if (copy_from_user(pag + offset, (char *)A(str), bytes_to_copy))
+ err = copy_from_user(kaddr + offset, (char *)A(str),
+ bytes_to_copy);
+ flush_page_to_ram(page);
+ kunmap((unsigned long)kaddr);
+
+ if (err)
return -EFAULT;
pos += bytes_to_copy;
@@ -2772,8 +2928,7 @@ do_execve32(char * filename, u32 * argv, u32 * envp, struct pt_regs * regs)
int i;
bprm.p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
- for (i=0 ; i<MAX_ARG_PAGES ; i++) /* clear page-table */
- bprm.page[i] = 0;
+ memset(bprm.page, 0, MAX_ARG_PAGES * sizeof(bprm.page[0]));
dentry = open_namei(filename, 0, 0);
retval = PTR_ERR(dentry);
@@ -2822,7 +2977,8 @@ out:
dput(bprm.dentry);
for (i=0 ; i<MAX_ARG_PAGES ; i++)
- free_page(bprm.page[i]);
+ if (bprm.page[i])
+ __free_page(bprm.page[i]);
return retval;
}
@@ -3843,3 +3999,37 @@ asmlinkage int sys32_adjtimex(struct timex32 *utp)
return ret;
}
+
+/* This is just a version for 32-bit applications which does
+ * not force O_LARGEFILE on.
+ */
+
+asmlinkage long sparc32_open(const char * filename, int flags, int mode)
+{
+ char * tmp;
+ int fd, error;
+
+ tmp = getname(filename);
+ fd = PTR_ERR(tmp);
+ if (!IS_ERR(tmp)) {
+ fd = get_unused_fd();
+ if (fd >= 0) {
+ struct file * f;
+ lock_kernel();
+ f = filp_open(tmp, flags, mode);
+ unlock_kernel();
+ error = PTR_ERR(f);
+ if (IS_ERR(f))
+ goto out_error;
+ fd_install(fd, f);
+ }
+out:
+ putname(tmp);
+ }
+ return fd;
+
+out_error:
+ put_unused_fd(fd);
+ fd = error;
+ goto out;
+}
diff --git a/arch/sparc64/kernel/sys_sunos32.c b/arch/sparc64/kernel/sys_sunos32.c
index 0f0c2a536..ffc72b74d 100644
--- a/arch/sparc64/kernel/sys_sunos32.c
+++ b/arch/sparc64/kernel/sys_sunos32.c
@@ -1,4 +1,4 @@
-/* $Id: sys_sunos32.c,v 1.31 1999/08/30 10:01:19 davem Exp $
+/* $Id: sys_sunos32.c,v 1.35 2000/01/06 23:51:50 davem Exp $
* sys_sunos32.c: SunOS binary compatability layer on sparc64.
*
* Copyright (C) 1995, 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
@@ -162,10 +162,10 @@ asmlinkage int sunos_brk(u32 baddr)
* simple, it hopefully works in most obvious cases.. Easy to
* fool it, but this should catch most mistakes.
*/
- freepages = atomic_read(&buffermem) >> PAGE_SHIFT;
+ freepages = atomic_read(&buffermem_pages) >> PAGE_SHIFT;
freepages += atomic_read(&page_cache_size);
freepages >>= 1;
- freepages += nr_free_pages;
+ freepages += nr_free_pages();
freepages += nr_swap_pages;
freepages -= num_physpages >> 4;
freepages -= (newbrk-oldbrk) >> PAGE_SHIFT;
@@ -685,7 +685,7 @@ struct sunos_nfs_mount_args {
char *netname; /* server's netname */
};
-extern int do_mount(kdev_t, const char *, const char *, char *, int, void *);
+extern int do_mount(struct block_device *, const char *, const char *, char *, int, void *);
extern dev_t get_unnamed_dev(void);
extern void put_unnamed_dev(dev_t);
extern asmlinkage int sys_mount(char *, char *, char *, unsigned long, void *);
@@ -762,12 +762,10 @@ static int get_default (int value, int def_value)
/* XXXXXXXXXXXXXXXXXXXX */
asmlinkage int sunos_nfs_mount(char *dir_name, int linux_flags, void *data)
{
- int ret = -ENODEV;
int server_fd;
char *the_name;
struct nfs_mount_data linux_nfs_mount;
struct sunos_nfs_mount_args *sunos_mount = data;
- dev_t dev;
/* Ok, here comes the fun part: Linux's nfs mount needs a
* socket connection to the server, but SunOS mount does not
@@ -809,13 +807,7 @@ asmlinkage int sunos_nfs_mount(char *dir_name, int linux_flags, void *data)
linux_nfs_mount.hostname [255] = 0;
putname (the_name);
- dev = get_unnamed_dev ();
-
- ret = do_mount (dev, "", dir_name, "nfs", linux_flags, &linux_nfs_mount);
- if (ret)
- put_unnamed_dev(dev);
-
- return ret;
+ return do_mount (NULL, "", dir_name, "nfs", linux_flags, &linux_nfs_mount);
}
/* XXXXXXXXXXXXXXXXXXXX */
@@ -1274,15 +1266,14 @@ asmlinkage int sunos_shmsys(int op, u32 arg1, u32 arg2, u32 arg3)
return rval;
}
-asmlinkage int sunos_open(u32 filename, int flags, int mode)
+extern asmlinkage long sparc32_open(const char * filename, int flags, int mode);
+
+asmlinkage int sunos_open(u32 fname, int flags, int mode)
{
- int ret;
+ const char *filename = (const char *)(long)fname;
- lock_kernel();
current->personality |= PER_BSD;
- ret = sys_open ((char *)A(filename), flags, mode);
- unlock_kernel();
- return ret;
+ return sparc32_open(filename, flags, mode);
}
#define SUNOS_EWOULDBLOCK 35
diff --git a/arch/sparc64/kernel/systbls.S b/arch/sparc64/kernel/systbls.S
index 06a827db2..27355da96 100644
--- a/arch/sparc64/kernel/systbls.S
+++ b/arch/sparc64/kernel/systbls.S
@@ -1,4 +1,4 @@
-/* $Id: systbls.S,v 1.56 1999/07/31 00:06:17 davem Exp $
+/* $Id: systbls.S,v 1.62 2000/01/04 23:54:43 davem Exp $
* systbls.S: System call entry point tables for OS compatibility.
* The native Linux system call table lives here also.
*
@@ -10,6 +10,8 @@
* Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
*/
+#include <linux/config.h>
+
.text
.align 1024
@@ -18,7 +20,7 @@
.globl sys_call_table32
sys_call_table32:
/*0*/ .word sys_nis_syscall, sparc_exit, sys_fork, sys_read, sys_write
-/*5*/ .word sys_open, sys_close, sys32_wait4, sys_creat, sys_link
+/*5*/ .word sparc32_open, sys_close, sys32_wait4, sys_creat, sys_link
/*10*/ .word sys_unlink, sunos_execv, sys_chdir, sys32_chown, sys32_mknod
/*15*/ .word sys32_chmod, sys32_lchown, sparc_brk, sys_perfctr, sys32_lseek
/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid, sys_getuid
@@ -28,12 +30,12 @@ sys_call_table32:
/*40*/ .word sys32_newlstat, sys_dup, sys_pipe, sys32_times, sys_nis_syscall
.word sys_umount, sys_setgid, sys_getgid, sys_signal, sys_geteuid
/*50*/ .word sys_getegid, sys_acct, sys_nis_syscall, sys_nis_syscall, sys32_ioctl
- .word sys_reboot, sys_nis_syscall, sys_symlink, sys_readlink, sys32_execve
-/*60*/ .word sys_umask, sys_chroot, sys32_newfstat, sys_nis_syscall, sys_getpagesize
+ .word sys_reboot, sys32_mmap2, sys_symlink, sys_readlink, sys32_execve
+/*60*/ .word sys_umask, sys_chroot, sys32_newfstat, sys_fstat64, sys_getpagesize
.word sys_msync, sys_vfork, sys32_pread, sys32_pwrite, sys_nis_syscall
/*70*/ .word sys_nis_syscall, sys32_mmap, sys_nis_syscall, sys_munmap, sys_mprotect
- .word sys_nis_syscall, sys_vhangup, sys_nis_syscall, sys_nis_syscall, sys32_getgroups
-/*80*/ .word sys32_setgroups, sys_getpgrp, sys_nis_syscall, sys32_setitimer, sys_nis_syscall
+ .word sys_nis_syscall, sys_vhangup, sys32_truncate64, sys_nis_syscall, sys32_getgroups
+/*80*/ .word sys32_setgroups, sys_getpgrp, sys_nis_syscall, sys32_setitimer, sys32_ftruncate64
.word sys_swapon, sys32_getitimer, sys_nis_syscall, sys_sethostname, sys_nis_syscall
/*90*/ .word sys_dup2, sys_nis_syscall, sys32_fcntl, sys32_select, sys_nis_syscall
.word sys_fsync, sys_setpriority, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
@@ -43,8 +45,8 @@ sys_call_table32:
.word sys_nis_syscall, sys32_gettimeofday, sys32_getrusage, sys_nis_syscall, sys_getcwd
/*120*/ .word sys32_readv, sys32_writev, sys32_settimeofday, sys_fchown, sys_fchmod
.word sys_nis_syscall, sys32_setreuid, sys32_setregid, sys_rename, sys_truncate
-/*130*/ .word sys_ftruncate, sys_flock, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
- .word sys_nis_syscall, sys_mkdir, sys_rmdir, sys32_utimes, sys_nis_syscall
+/*130*/ .word sys_ftruncate, sys_flock, sys_lstat64, sys_nis_syscall, sys_nis_syscall
+ .word sys_nis_syscall, sys_mkdir, sys_rmdir, sys32_utimes, sys_stat64
/*140*/ .word sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys32_getrlimit
.word sys32_setrlimit, sys_nis_syscall, sys32_prctl, sys32_pciconfig_read, sys32_pciconfig_write
/*150*/ .word sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_poll, sys_nis_syscall
@@ -129,6 +131,8 @@ sys_call_table:
/*250*/ .word sys_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl
.word sys_aplib
+#if defined(CONFIG_SUNOS_EMUL) || defined(CONFIG_SOLARIS_EMUL) || \
+ defined(CONFIG_SOLARIS_EMUL_MODULE)
/* Now the 32-bit SunOS syscall table. */
.align 1024
@@ -221,3 +225,5 @@ sunos_sys_table:
.word sunos_nosys, sunos_nosys
/*250*/ .word sunos_nosys, sunos_nosys, sunos_nosys
.word sunos_nosys, sunos_nosys, sys_aplib
+
+#endif
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index 60d5e4a5f..57f5b1622 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -1,4 +1,4 @@
-/* $Id: time.c,v 1.22 1999/08/30 10:01:22 davem Exp $
+/* $Id: time.c,v 1.23 1999/09/21 14:35:27 davem Exp $
* time.c: UltraSparc timer and TOD clock support.
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
@@ -268,7 +268,7 @@ void __init clock_probe(void)
int node, busnd = -1, err;
unsigned long flags;
#ifdef CONFIG_PCI
- struct linux_ebus *ebus = 0;
+ struct linux_ebus *ebus = NULL;
#endif
__save_and_cli(flags);
@@ -282,8 +282,8 @@ void __init clock_probe(void)
busnd = ebus->prom_node;
}
#endif
- else {
- busnd = SBus_chain->prom_node;
+ else if (sbus_root != NULL) {
+ busnd = sbus_root->prom_node;
}
if(busnd == -1) {
@@ -304,9 +304,9 @@ void __init clock_probe(void)
if (node)
node = prom_getsibling(node);
#ifdef CONFIG_PCI
- while ((node == 0) && ebus) {
+ while ((node == 0) && ebus != NULL) {
ebus = ebus->next;
- if (ebus) {
+ if (ebus != NULL) {
busnd = ebus->prom_node;
node = prom_getchild(busnd);
}
@@ -327,17 +327,17 @@ void __init clock_probe(void)
}
if(central_bus) {
- prom_apply_fhc_ranges(central_bus->child, clk_reg, 1);
- prom_apply_central_ranges(central_bus, clk_reg, 1);
+ apply_fhc_ranges(central_bus->child, clk_reg, 1);
+ apply_central_ranges(central_bus, clk_reg, 1);
}
#ifdef CONFIG_PCI
- else if (ebus_chain) {
+ else if (ebus_chain != NULL) {
struct linux_ebus_device *edev;
for_each_ebusdev(edev, ebus)
if (edev->prom_node == node)
break;
- if (!edev) {
+ if (edev == NULL) {
prom_printf("%s: Mostek not probed by EBUS\n",
__FUNCTION__);
prom_halt();
@@ -349,9 +349,24 @@ void __init clock_probe(void)
}
#endif
else {
- prom_adjust_regs(clk_reg, 1,
- SBus_chain->sbus_ranges,
- SBus_chain->num_sbus_ranges);
+ if (sbus_root->num_sbus_ranges) {
+ int nranges = sbus_root->num_sbus_ranges;
+ int rngc;
+
+ for (rngc = 0; rngc < nranges; rngc++)
+ if (clk_reg[0].which_io ==
+ sbus_root->sbus_ranges[rngc].ot_child_space)
+ break;
+ if (rngc == nranges) {
+ prom_printf("clock_probe: Cannot find ranges for "
+ "clock regs.\n");
+ prom_halt();
+ }
+ clk_reg[0].which_io =
+ sbus_root->sbus_ranges[rngc].ot_parent_space;
+ clk_reg[0].phys_addr +=
+ sbus_root->sbus_ranges[rngc].ot_parent_base;
+ }
}
if(model[5] == '0' && model[6] == '2') {
diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S
index 6efbe1356..4f2606c97 100644
--- a/arch/sparc64/kernel/trampoline.S
+++ b/arch/sparc64/kernel/trampoline.S
@@ -1,4 +1,4 @@
-/* $Id: trampoline.S,v 1.10 1999/09/10 10:40:48 davem Exp $
+/* $Id: trampoline.S,v 1.12 1999/12/15 15:45:12 davem Exp $
* trampoline.S: Jump start slave processors on sparc64.
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
@@ -14,21 +14,107 @@
#include <asm/asm_offsets.h>
.data
- .align 8
- .globl smp_trampoline
-smp_trampoline: .skip 0x300
+ .align 8
+call_method:
+ .asciz "call-method"
+ .align 8
+itlb_load:
+ .asciz "SUNW,itlb-load"
+ .align 8
+dtlb_load:
+ .asciz "SUNW,dtlb-load"
.text
.align 8
.globl sparc64_cpu_startup, sparc64_cpu_startup_end
sparc64_cpu_startup:
flushw
+
mov (LSU_CONTROL_IC | LSU_CONTROL_DC | LSU_CONTROL_IM | LSU_CONTROL_DM), %g1
stxa %g1, [%g0] ASI_LSU_CONTROL
membar #Sync
- wrpr %g0, (PSTATE_PRIV | PSTATE_PEF | PSTATE_IE), %pstate
- wr %g0, 0, %fprs
+
wrpr %g0, 15, %pil
+ wr %g0, 0, %tick_cmpr
+
+ /* Call OBP by hand to lock KERNBASE into i/d tlbs. */
+ mov %o0, %l0
+
+ sethi %hi(prom_entry_lock), %g2
+1: ldstub [%g2 + %lo(prom_entry_lock)], %g1
+ brnz,pn %g1, 1b
+ membar #StoreLoad | #StoreStore
+
+ sethi %hi(p1275buf), %g2
+ or %g2, %lo(p1275buf), %g2
+ ldx [%g2 + 0x10], %l2
+ mov %sp, %l1
+ add %l2, -(192 + 128), %sp
+ flushw
+
+ sethi %hi(call_method), %g2
+ or %g2, %lo(call_method), %g2
+ stx %g2, [%sp + 2047 + 128 + 0x00]
+ mov 5, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x08]
+ mov 1, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x10]
+ sethi %hi(itlb_load), %g2
+ or %g2, %lo(itlb_load), %g2
+ stx %g2, [%sp + 2047 + 128 + 0x18]
+ sethi %hi(mmu_ihandle_cache), %g2
+ lduw [%g2 + %lo(mmu_ihandle_cache)], %g2
+ stx %g2, [%sp + 2047 + 128 + 0x20]
+ sethi %hi(KERNBASE), %g2
+ stx %g2, [%sp + 2047 + 128 + 0x28]
+ sethi %hi(kern_locked_tte_data), %g2
+ ldx [%g2 + %lo(kern_locked_tte_data)], %g2
+ stx %g2, [%sp + 2047 + 128 + 0x30]
+ mov 63, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x38]
+ sethi %hi(p1275buf), %g2
+ or %g2, %lo(p1275buf), %g2
+ ldx [%g2 + 0x08], %o1
+ call %o1
+ add %sp, (2047 + 128), %o0
+
+ sethi %hi(call_method), %g2
+ or %g2, %lo(call_method), %g2
+ stx %g2, [%sp + 2047 + 128 + 0x00]
+ mov 5, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x08]
+ mov 1, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x10]
+ sethi %hi(dtlb_load), %g2
+ or %g2, %lo(dtlb_load), %g2
+ stx %g2, [%sp + 2047 + 128 + 0x18]
+ sethi %hi(mmu_ihandle_cache), %g2
+ lduw [%g2 + %lo(mmu_ihandle_cache)], %g2
+ stx %g2, [%sp + 2047 + 128 + 0x20]
+ sethi %hi(KERNBASE), %g2
+ stx %g2, [%sp + 2047 + 128 + 0x28]
+ sethi %hi(kern_locked_tte_data), %g2
+ ldx [%g2 + %lo(kern_locked_tte_data)], %g2
+ stx %g2, [%sp + 2047 + 128 + 0x30]
+ mov 63, %g2
+ stx %g2, [%sp + 2047 + 128 + 0x38]
+ sethi %hi(p1275buf), %g2
+ or %g2, %lo(p1275buf), %g2
+ ldx [%g2 + 0x08], %o1
+ call %o1
+ add %sp, (2047 + 128), %o0
+
+ sethi %hi(prom_entry_lock), %g2
+ stb %g0, [%g2 + %lo(prom_entry_lock)]
+ membar #StoreStore | #StoreLoad
+
+ mov %l1, %sp
+ flushw
+
+ mov %l0, %o0
+
+ wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate
+ wr %g0, 0, %fprs
sethi %uhi(PAGE_OFFSET), %g4
sllx %g4, 32, %g4
@@ -37,99 +123,6 @@ sparc64_cpu_startup:
srl %o0, 0, %o0
ldx [%o0], %g6
- sethi %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5
- sllx %g5, 32, %g5
- or %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W | _PAGE_G), %g5
-
- sethi %uhi(_PAGE_PADDR), %g3
- or %g3, %ulo(_PAGE_PADDR), %g3
- sllx %g3, 32, %g3
- sethi %hi(_PAGE_PADDR), %g7
- or %g7, %lo(_PAGE_PADDR), %g7
- or %g3, %g7, %g3
-
- clr %l0
- set 0x1fff, %l2
- rd %pc, %l3
- andn %l3, %l2, %g2
-1: ldxa [%l0] ASI_ITLB_TAG_READ, %g1
- nop
- nop
- nop
- andn %g1, %l2, %g1
- cmp %g1, %g2
- be,a,pn %xcc, 2f
- ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g1
- cmp %l0, (63 << 3)
- blu,pt %xcc, 1b
- add %l0, (1 << 3), %l0
-
-2: nop
- nop
- nop
- and %g1, %g3, %g1
- sub %g1, %g2, %g1
- or %g5, %g1, %g5
- clr %l0
- sethi %hi(KERNBASE), %g3
- sethi %hi(KERNBASE<<1), %g7
- mov TLB_TAG_ACCESS, %l7
-1: ldxa [%l0] ASI_ITLB_TAG_READ, %g1
- nop
- nop
- nop
- andn %g1, %l2, %g1
- cmp %g1, %g3
- blu,pn %xcc, 2f
- cmp %g1, %g7
- bgeu,pn %xcc, 2f
- nop
- stxa %g0, [%l7] ASI_IMMU
- stxa %g0, [%l0] ASI_ITLB_DATA_ACCESS
-2: cmp %l0, (63 << 3)
- blu,pt %xcc, 1b
- add %l0, (1 << 3), %l0
-
- nop
- nop
- nop
- clr %l0
-1: ldxa [%l0] ASI_DTLB_TAG_READ, %g1
- nop
- nop
- nop
- andn %g1, %l2, %g1
- cmp %g1, %g3
- blu,pn %xcc, 2f
- cmp %g1, %g7
- bgeu,pn %xcc, 2f
- nop
- stxa %g0, [%l7] ASI_DMMU
- stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS
-2: cmp %l0, (63 << 3)
- blu,pt %xcc, 1b
- add %l0, (1 << 3), %l0
-
- nop
- nop
- nop
- sethi %hi(KERNBASE), %g3
- mov (63 << 3), %g7
- stxa %g3, [%l7] ASI_DMMU
- stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS
- membar #Sync
- stxa %g3, [%l7] ASI_IMMU
- stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS
- membar #Sync
- flush %g3
- membar #Sync
- b,pt %xcc, 1f
- nop
-1: set bounce, %g2
- jmpl %g2 + %g0, %g0
- nop
-
-bounce:
wr %g0, ASI_P, %asi
mov PRIMARY_CONTEXT, %g7
@@ -139,24 +132,6 @@ bounce:
stxa %g0, [%g7] ASI_DMMU
membar #Sync
- mov TLB_TAG_ACCESS, %g2
- stxa %g3, [%g2] ASI_IMMU
- stxa %g3, [%g2] ASI_DMMU
-
- mov (63 << 3), %g7
- ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g1
- andn %g1, (_PAGE_G), %g1
- stxa %g1, [%g7] ASI_ITLB_DATA_ACCESS
- membar #Sync
-
- ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g1
- andn %g1, (_PAGE_G), %g1
- stxa %g1, [%g7] ASI_DTLB_DATA_ACCESS
- membar #Sync
-
- flush %g3
- membar #Sync
-
mov 1, %g5
sllx %g5, (PAGE_SHIFT + 1), %g5
sub %g5, (REGWIN_SZ + STACK_BIAS), %g5
@@ -169,12 +144,12 @@ bounce:
/* Setup the trap globals, then we can resurface. */
rdpr %pstate, %o1
mov %g6, %o2
- wrpr %o1, (PSTATE_AG | PSTATE_IE), %pstate
+ wrpr %o1, PSTATE_AG, %pstate
sethi %hi(sparc64_ttable_tl0), %g5
wrpr %g5, %tba
mov %o2, %g6
- wrpr %o1, (PSTATE_MG | PSTATE_IE), %pstate
+ wrpr %o1, PSTATE_MG, %pstate
#define KERN_HIGHBITS ((_PAGE_VALID | _PAGE_SZ4MB) ^ 0xfffff80000000000)
#define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W)
#ifdef THIS_IS_CHEETAH
@@ -200,7 +175,7 @@ bounce:
#undef VPTE_BASE
/* Setup interrupt globals, we are always SMP. */
- wrpr %o1, (PSTATE_IG | PSTATE_IE), %pstate
+ wrpr %o1, PSTATE_IG, %pstate
/* Get our UPA MID. */
lduw [%o2 + AOFF_task_processor], %g1
@@ -210,12 +185,15 @@ bounce:
/* In theory this is: &(cpu_data[this_upamid].irq_worklists[0]) */
sllx %g1, 7, %g1
add %g5, %g1, %g1
- add %g1, 64, %g1
+ add %g1, 64, %g6
wrpr %g0, 0, %wstate
or %o1, PSTATE_IE, %o1
wrpr %o1, 0, %pstate
+ call prom_set_trap_table
+ sethi %hi(sparc64_ttable_tl0), %o0
+
call smp_callin
nop
call cpu_idle
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index f4f2287df..845809709 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -1,4 +1,4 @@
-/* $Id: traps.c,v 1.62 1999/08/31 19:25:35 davem Exp $
+/* $Id: traps.c,v 1.64 1999/12/19 23:53:13 davem Exp $
* arch/sparc64/kernel/traps.c
*
* Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
@@ -26,6 +26,7 @@
#include <asm/uaccess.h>
#include <asm/fpumacro.h>
#include <asm/lsu.h>
+#include <asm/psrcompat.h>
#ifdef CONFIG_KMOD
#include <linux/kmod.h>
#endif
@@ -519,8 +520,22 @@ void do_fpe_common(struct pt_regs *regs)
regs->tpc = regs->tnpc;
regs->tnpc += 4;
} else {
+ unsigned long fsr = current->thread.xfsr[0];
+
current->thread.sig_address = regs->tpc;
current->thread.sig_desc = SUBSIG_FPERROR;
+ if ((fsr & 0x1c000) == (1 << 14)) {
+ if (fsr & 0x01)
+ current->thread.sig_desc = SUBSIG_FPINEXACT;
+ else if (fsr & 0x02)
+ current->thread.sig_desc = SUBSIG_FPDIVZERO;
+ else if (fsr & 0x04)
+ current->thread.sig_desc = SUBSIG_FPUNFLOW;
+ else if (fsr & 0x08)
+ current->thread.sig_desc = SUBSIG_FPOVFLOW;
+ else if (fsr & 0x10)
+ current->thread.sig_desc = SUBSIG_FPINTOVFL;
+ }
send_sig(SIGFPE, current, 1);
}
}
@@ -564,7 +579,9 @@ void do_tof(struct pt_regs *regs)
void do_div0(struct pt_regs *regs)
{
- send_sig(SIGILL, current, 1);
+ current->thread.sig_address = regs->tpc;
+ current->thread.sig_desc = SUBSIG_IDIVZERO;
+ send_sig(SIGFPE, current, 1);
}
void instruction_dump (unsigned int *pc)
@@ -712,10 +729,12 @@ void do_priv_instruction(struct pt_regs *regs, unsigned long pc, unsigned long n
send_sig(SIGILL, current, 1);
}
-void handle_hw_divzero(struct pt_regs *regs, unsigned long pc, unsigned long npc,
- unsigned long psr)
+void handle_hw_divzero(struct pt_regs *regs, unsigned long pc,
+ unsigned long npc, unsigned long psr)
{
- send_sig(SIGILL, current, 1);
+ current->thread.sig_address = regs->tpc;
+ current->thread.sig_desc = SUBSIG_IDIVZERO;
+ send_sig(SIGFPE, current, 1);
}
/* Trap level 1 stuff or other traps we should never see... */
@@ -842,6 +861,13 @@ void cache_flush_trap(struct pt_regs *regs)
}
#endif
+void do_getpsr(struct pt_regs *regs)
+{
+ regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
+ regs->tpc = regs->tnpc;
+ regs->tnpc += 4;
+}
+
void trap_init(void)
{
/* Attach to the address space of init_task. */
diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S
index b378756c0..257d56d6b 100644
--- a/arch/sparc64/kernel/ttable.S
+++ b/arch/sparc64/kernel/ttable.S
@@ -1,4 +1,4 @@
-/* $Id: ttable.S,v 1.29 1999/08/31 19:25:37 davem Exp $
+/* $Id: ttable.S,v 1.30 1999/12/01 23:52:03 davem Exp $
* ttable.S: Sparc V9 Trap Table(s) with SpitFire extensions.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
@@ -120,7 +120,8 @@ tl0_resv11c: TRAP_UTRAP(UT_TRAP_INSTRUCTION_28,0x11c) TRAP_UTRAP(UT_TRAP_INSTRUC
tl0_resv11e: TRAP_UTRAP(UT_TRAP_INSTRUCTION_30,0x11e) TRAP_UTRAP(UT_TRAP_INSTRUCTION_31,0x11f)
tl0_getcc: GETCC_TRAP
tl0_setcc: SETCC_TRAP
-tl0_resv122: BTRAP(0x122) BTRAP(0x123) BTRAP(0x124) BTRAP(0x125) BTRAP(0x126)
+tl0_getpsr: TRAP(do_getpsr)
+tl0_resv123: BTRAP(0x123) BTRAP(0x124) BTRAP(0x125) BTRAP(0x126)
tl0_solindir: INDIRECT_SOLARIS_SYSCALL(156)
tl0_resv128: BTRAP(0x128) BTRAP(0x129) BTRAP(0x12a) BTRAP(0x12b) BTRAP(0x12c)
tl0_resv12d: BTRAP(0x12d) BTRAP(0x12e) BTRAP(0x12f) BTRAP(0x130) BTRAP(0x131)