summaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1997-01-07 02:33:00 +0000
committer <ralf@linux-mips.org>1997-01-07 02:33:00 +0000
commitbeb116954b9b7f3bb56412b2494b562f02b864b1 (patch)
tree120e997879884e1b9d93b265221b939d2ef1ade1 /arch/sparc
parent908d4681a1dc3792ecafbe64265783a86c4cccb6 (diff)
Import of Linux/MIPS 2.1.14
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/Makefile31
-rw-r--r--arch/sparc/boot/Makefile12
-rw-r--r--arch/sparc/boot/README21
-rw-r--r--arch/sparc/boot/bare.S160
-rw-r--r--arch/sparc/boot/bare.h18
-rw-r--r--arch/sparc/boot/empirical.h8
-rw-r--r--arch/sparc/boot/init_me.c69
-rw-r--r--arch/sparc/config.in310
-rw-r--r--arch/sparc/defconfig187
-rw-r--r--arch/sparc/kernel/Makefile66
-rw-r--r--arch/sparc/kernel/auxio.c47
-rw-r--r--arch/sparc/kernel/cpu.c161
-rw-r--r--arch/sparc/kernel/devices.c84
-rw-r--r--arch/sparc/kernel/entry.S2418
-rw-r--r--arch/sparc/kernel/errtbls.c276
-rw-r--r--arch/sparc/kernel/etrap.S319
-rw-r--r--arch/sparc/kernel/head.S1885
-rw-r--r--arch/sparc/kernel/idprom.c227
-rw-r--r--arch/sparc/kernel/ioport.c133
-rw-r--r--arch/sparc/kernel/irq.c608
-rw-r--r--arch/sparc/kernel/ksyms.c34
-rw-r--r--arch/sparc/kernel/probe.c432
-rw-r--r--arch/sparc/kernel/process.c485
-rw-r--r--arch/sparc/kernel/promops.c107
-rw-r--r--arch/sparc/kernel/ptrace.c891
-rw-r--r--arch/sparc/kernel/rirq.S289
-rw-r--r--arch/sparc/kernel/rtrap.S338
-rw-r--r--arch/sparc/kernel/sclow.S193
-rw-r--r--arch/sparc/kernel/setup.c461
-rw-r--r--arch/sparc/kernel/signal.c550
-rw-r--r--arch/sparc/kernel/smp.c650
-rw-r--r--arch/sparc/kernel/solaris.c59
-rw-r--r--arch/sparc/kernel/sparc-stub.c688
-rw-r--r--arch/sparc/kernel/sparc_ksyms.c187
-rw-r--r--arch/sparc/kernel/sun4c_irq.c187
-rw-r--r--arch/sparc/kernel/sun4m_irq.c340
-rw-r--r--arch/sparc/kernel/sunos_asm.S80
-rw-r--r--arch/sparc/kernel/sunos_ioctl.c191
-rw-r--r--arch/sparc/kernel/switch.S96
-rw-r--r--arch/sparc/kernel/sys_solaris.c29
-rw-r--r--arch/sparc/kernel/sys_sparc.c237
-rw-r--r--arch/sparc/kernel/sys_sunos.c1178
-rw-r--r--arch/sparc/kernel/systbls.S478
-rw-r--r--arch/sparc/kernel/tadpole.c120
-rw-r--r--arch/sparc/kernel/tick14.c84
-rw-r--r--arch/sparc/kernel/time.c361
-rw-r--r--arch/sparc/kernel/trampoline.S93
-rw-r--r--arch/sparc/kernel/traps.c344
-rw-r--r--arch/sparc/kernel/unaligned.c395
-rw-r--r--arch/sparc/kernel/windows.c124
-rw-r--r--arch/sparc/kernel/wof.S421
-rw-r--r--arch/sparc/kernel/wuf.S351
-rw-r--r--arch/sparc/lib/Makefile47
-rw-r--r--arch/sparc/lib/ashrdi3.S24
-rw-r--r--arch/sparc/lib/blockops.S103
-rw-r--r--arch/sparc/lib/checksum.S439
-rw-r--r--arch/sparc/lib/memcmp.S314
-rw-r--r--arch/sparc/lib/memcpy.S364
-rw-r--r--arch/sparc/lib/memscan.S135
-rw-r--r--arch/sparc/lib/memset.S166
-rw-r--r--arch/sparc/lib/memset.c71
-rw-r--r--arch/sparc/lib/mul.S22
-rw-r--r--arch/sparc/lib/rem.S221
-rw-r--r--arch/sparc/lib/sdiv.S222
-rw-r--r--arch/sparc/lib/strlen.S88
-rw-r--r--arch/sparc/lib/strncmp.S120
-rw-r--r--arch/sparc/lib/strncpy_from_user.S49
-rw-r--r--arch/sparc/lib/udiv.S209
-rw-r--r--arch/sparc/lib/umul.S25
-rw-r--r--arch/sparc/lib/urem.S207
-rw-r--r--arch/sparc/mm/Makefile27
-rw-r--r--arch/sparc/mm/asyncd.c189
-rw-r--r--arch/sparc/mm/fault.c398
-rw-r--r--arch/sparc/mm/generic.c124
-rw-r--r--arch/sparc/mm/init.c414
-rw-r--r--arch/sparc/mm/loadmmu.c165
-rw-r--r--arch/sparc/mm/srmmu.c3477
-rw-r--r--arch/sparc/mm/sun4c.c1965
-rw-r--r--arch/sparc/mm/vac-flush.c94
-rw-r--r--arch/sparc/prom/Makefile23
-rw-r--r--arch/sparc/prom/bootstr.c67
-rw-r--r--arch/sparc/prom/console.c220
-rw-r--r--arch/sparc/prom/devmap.c56
-rw-r--r--arch/sparc/prom/devops.c97
-rw-r--r--arch/sparc/prom/init.c86
-rw-r--r--arch/sparc/prom/memory.c215
-rw-r--r--arch/sparc/prom/misc.c154
-rw-r--r--arch/sparc/prom/mp.c135
-rw-r--r--arch/sparc/prom/palloc.c44
-rw-r--r--arch/sparc/prom/printf.c43
-rw-r--r--arch/sparc/prom/ranges.c145
-rw-r--r--arch/sparc/prom/segment.c29
-rw-r--r--arch/sparc/prom/tree.c368
93 files changed, 24881 insertions, 3993 deletions
diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
index 88bcd6578..512eb6122 100644
--- a/arch/sparc/Makefile
+++ b/arch/sparc/Makefile
@@ -1,4 +1,4 @@
-#
+# $Id: Makefile,v 1.20 1996/04/16 08:02:50 davem Exp $
# sparc/Makefile
#
# Makefile for the architecture dependent flags and dependencies on the
@@ -7,27 +7,36 @@
# Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu)
#
-
# If the solaris /bin/sh wasn't so broken, I wouldn't need the following
# line...
SHELL =/bin/bash
#
-# How to link, we send the linker the address at which the text section
-# is to start. The prom loads us at 0x0-kernel_size. There is also an
-# alias of this address space at 0xf8000000-(0xf8000000+kernel_size) but
-# I ignore it and eliminate those mappings during vm initialization and
-# just leave the low mapping.
-#
-LINKFLAGS = -N -Ttext 0x00004000
+# Uncomment the first CFLAGS if you are doing kgdb source level
+# debugging of the kernel to get the proper debugging information.
+
+#CFLAGS := $(CFLAGS) -g -pipe
CFLAGS := $(CFLAGS) -pipe
+LINKFLAGS = -N -Ttext 0xf0004000
+
HEAD := arch/sparc/kernel/head.o
-SUBDIRS := $(SUBDIRS) arch/sparc/kernel arch/sparc/lib arch/sparc/mm
+SUBDIRS := $(SUBDIRS) arch/sparc/kernel arch/sparc/lib arch/sparc/mm \
+ arch/sparc/prom
+
ARCHIVES := arch/sparc/kernel/kernel.o arch/sparc/mm/mm.o $(ARCHIVES)
-LIBS := $(TOPDIR)/lib/lib.a $(LIBS) $(TOPDIR)/arch/sparc/lib/lib.a
+
+LIBS := $(TOPDIR)/lib/lib.a $(LIBS) $(TOPDIR)/arch/sparc/prom/promlib.a \
+ $(TOPDIR)/arch/sparc/lib/lib.a
+
+ifdef CONFIG_AP1000
+SUBDIRS := $(SUBDIRS) arch/sparc/ap1000
+ARCHIVES := $(TOPDIR)/arch/sparc/ap1000/ap1000lib.o $(ARCHIVES)
+DRIVERS := $(DRIVERS) drivers/ap1000/ap1000.a
+endif
archclean:
+ rm -f $(TOPDIR)/arch/sparc/boot/boot
archdep:
diff --git a/arch/sparc/boot/Makefile b/arch/sparc/boot/Makefile
new file mode 100644
index 000000000..b9d54e652
--- /dev/null
+++ b/arch/sparc/boot/Makefile
@@ -0,0 +1,12 @@
+# $Id: Makefile,v 1.3 1996/08/04 08:40:58 ecd Exp $
+# Makefile for the Sparc low level /boot module.
+#
+# Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+
+all: boot
+
+boot:
+ @echo "Nothing special to be done for 'boot' on Linux/SPARC."
+
+dep:
+
diff --git a/arch/sparc/boot/README b/arch/sparc/boot/README
new file mode 100644
index 000000000..189a72d29
--- /dev/null
+++ b/arch/sparc/boot/README
@@ -0,0 +1,21 @@
+This directory will contain the code necessary to compile and link the
+/boot program which is necessary to boot on the Sparc. This program
+is real ugly and it knows too much. It must be able to not only boot
+off of the root partition but also be able to netboot. This means
+that it knows about RPC and NFS (bleech, yuck, eeewwwww!!) so that it
+can remote mount the root directory to fetch the kernel. Also it must
+be able to ARP for its IP address and who its boot server is. I
+think I'm getting sick.
+
+Regardless for now I will concentrate on the low-level stuff necessary
+to get the thing going. This means the low-level entry code, etc.
+The prom knows how to get "us" if we have the proper boot blocks,
+actually the boot blocks live in our logical partition on a hard drive
+whereas over NFS this isn't applicable. We have the boot blocks in
+our data area either way because we can be dual purpose.
+
+More will come....
+
+Hopefully I can write this such that it will work on almost all SUN
+machines in existence. We'll see ;(
+
diff --git a/arch/sparc/boot/bare.S b/arch/sparc/boot/bare.S
new file mode 100644
index 000000000..ab3508591
--- /dev/null
+++ b/arch/sparc/boot/bare.S
@@ -0,0 +1,160 @@
+/* $Id: bare.S,v 1.4 1996/04/23 01:53:40 davem Exp $
+ * base.S: Ugly low-level boot program entry code. The job of this
+ * module is to parse the boot flags, try to mount the remote
+ * root filesystem and load the kernel into virtual memory.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include "bare.h"
+#include <asm/ptrace.h>
+
+ .data
+ .globl C_LABEL(romvec)
+ .globl C_LABEL(idp_ptr)
+
+C_LABEL(romvec):
+ .word 0
+C_LABEL(idp_ptr):
+ .word 0
+
+ .text
+ .align 8
+ .globl C_LABEL(first_adr_in_text)
+
+C_LABEL(first_adr_in_text):
+
+ /* Grrr, boot block, scratching my head... */
+ .globl C_LABEL(b_block) /* Start of actual boot block */
+ .globl C_LABEL(b_block_size) /* In bytes */
+ .globl C_LABEL(b_block_cksum) /* Checksum of boot block bytes */
+
+ b start_of_execution /* XXX Hack */
+ nop
+
+ .align 8
+C_LABEL(b_block):
+ .skip (BOOTBLOCK_NENTRIES * BOOTBLOCK_ENTSIZE)
+
+C_LABEL(b_block_size):
+ .word 0
+
+C_LABEL(b_block_cksum):
+ .word 0
+
+/* Ok, the prom has left in %o0 the PROM pointer. We leave it here
+ * for when we jump into the kernel. So save out of this window before
+ * you dick with %o0. As far as I know we could be loaded *anywhere*, so
+ * we relocate ourselves to the "linked" location. Self modifying code rules.
+ */
+
+start_of_execution:
+ sethi %hi(C_LABEL(first_adr_in_text)), %o1 ! This is our top
+ or %o1, %lo(C_LABEL(first_adr_in_text)), %o1 ! of stack too.
+ sub %o1, REGWIN_SZ, %o1
+ add %o1, 0x7, %o1
+ andn %o1, 0x7, %o1
+ save %o1, 0x0, %sp ! save is an add
+here:
+ call there
+ sethi %hi(here), %o4
+there:
+ sub %o7, here-C_LABEL(first_adr_in_text), %o5
+ or %o4, %lo(here), %o4
+ cmp %o4, %o7
+ be loaded_ok
+ nop
+
+ /* Gotta relocate, compute our size sans bss segment. */
+ set C_LABEL(edata)+4, %o3
+ set C_LABEL(first_adr_in_text), %o2
+ sub %o3, %o2, %o3
+rel_loop:
+ ld [%o5], %o4
+ add %o5, 0x4, %o5
+ st %o4, [%o2]
+ subcc %o3, 0x4, %o3
+ bg rel_loop
+ add %o2, 0x4, %o2
+
+ /* Pray that we are now in a sane place in memory */
+ sethi %hi(loaded_ok), %o2
+ or %o2, %lo(loaded_ok), %o2
+ jmp %o2
+ nop
+
+loaded_ok:
+ /* Save the PROM pointer */
+ sethi %hi(C_LABEL(romvec)), %o1
+ or %o1, %lo(C_LABEL(romvec)), %o1
+ st %i0, [%o1]
+
+ /* Build a PSR we can live with */
+ rd %psr, %o1
+
+#if 0
+ andn %o1, PSR_PIL, %o1
+ sethi %hi(SANE_PSR), %g4
+ or %g4, %lo(SANE_PSR), %g4
+ or %o1, %g4, %o1
+#endif
+
+ /* V8 book says this works to calculate num_windows */
+ sethi %hi(0xffffffff), %g2
+ rd %wim, %g3
+ or %g2, %lo(0xffffffff), %g2
+ wr %g2, 0x0, %wim
+ WRITE_PAUSE
+
+ rd %wim, %g4
+ WRITE_PAUSE
+
+ wr %g3, 0x0, %wim
+ WRITE_PAUSE
+
+ /* Restore old %psr */
+ wr %o1, 0x0, %psr
+ WRITE_PAUSE
+
+ or %g0, 0x0, %g3
+1:
+ srl %g4, 0x1, %g4
+ subcc %g4, 0x0, %g0
+ bne 1b
+ add %g3, 0x1, %g3
+
+ /* %g3 now contains nwindows */
+ sethi %hi(C_LABEL(nwindows)), %o4
+ st %g3, [%o4 + %lo(C_LABEL(nwindows))]
+
+ /* Now zero out our bss segment, lord knows the nasty prom monster
+ * didn't do it for us.
+ */
+ sethi %hi(C_LABEL(end)), %g1
+ or %g1, %lo(C_LABEL(end)), %g1
+ add %g1, 0x4, %g1
+ sethi %hi(C_LABEL(edata)), %g2
+ or %g2, %lo(C_LABEL(edata)), %g2
+
+ /* Slow, inefficient, who cares, this is messy boot code */
+bzero_bss_loop:
+ st %g0, [%g2]
+ add %g2, 0x4, %g2
+ cmp %g2, %g1
+ bl bzero_bss_loop
+ nop
+
+ call C_LABEL(init_me) ! Fun with empirical constants and prom
+ nop
+
+ /* Dump back into the prom */
+get_me_out_of_here:
+ set C_LABEL(romvec), %g2
+ ld [%g2], %g2
+ ld [%g2 + 0x74], %g2
+ restore
+ call %g2
+ nop
+
+
+
diff --git a/arch/sparc/boot/bare.h b/arch/sparc/boot/bare.h
new file mode 100644
index 000000000..b84178c45
--- /dev/null
+++ b/arch/sparc/boot/bare.h
@@ -0,0 +1,18 @@
+/* $Id: bare.h,v 1.2 1995/11/25 00:57:41 davem Exp $
+ * bare.h: Defines for the low level entry code of the BOOT program.
+ * We include in the head.h stuff that the real kernel uses
+ * and this saves a lot of repetition here.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/head.h>
+#include <asm/psr.h>
+#include <asm/cprefix.h>
+
+#define SANE_PIL (0xd00) /* No interrupts except clock and unmaskable NMI's */
+#define SANE_PSR (SANE_PIL|PSR_S|PSR_ET)
+
+#define BOOTBLOCK_NENTRIES 0x40 /* Number of entries in the boot block */
+#define BOOTBLOCK_ENTSIZE 0x04 /* Size in bytes of each boot block entry */
+
diff --git a/arch/sparc/boot/empirical.h b/arch/sparc/boot/empirical.h
new file mode 100644
index 000000000..e13b8c80a
--- /dev/null
+++ b/arch/sparc/boot/empirical.h
@@ -0,0 +1,8 @@
+/* $Id: empirical.h,v 1.2 1996/04/23 01:53:42 davem Exp $
+ * empirical.h: Nasty hacks....
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#define DEF_BOGO 25
+
diff --git a/arch/sparc/boot/init_me.c b/arch/sparc/boot/init_me.c
new file mode 100644
index 000000000..ad26a1969
--- /dev/null
+++ b/arch/sparc/boot/init_me.c
@@ -0,0 +1,69 @@
+/* $Id: init_me.c,v 1.3 1996/04/21 10:30:09 davem Exp $
+ * init_me.c: Initialize empirical constants and gather some info from
+ * the boot prom.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/openprom.h> /* For property declarations and the prom structs */
+#include <asm/oplib.h>
+#include <asm/vac-ops.h>
+
+#include "empirical.h" /* Don't ask... */
+
+#define DEBUG_INIT_ME /* Tell me what's going on */
+
+unsigned int nwindows; /* Set in bare.S */
+unsigned int nwindowsm1;
+unsigned int pac_or_vac; /* 0 means "dunno" 1 means "VAC" 2 means "PAC" */
+unsigned int pvac_size; /* Use the same two variables for a PAC and VAC */
+unsigned int pvac_linesize;
+unsigned int pac_size;
+int num_segmaps;
+int num_contexts;
+unsigned int BOGOMIPS; /* bogosity without the VAC cache on */
+unsigned int BOGOMIPS_WCACHE; /* bogosity with the VAC cache */
+unsigned int delay_factor;
+
+extern int prom_node_root;
+void (*printk)(const char *str, ...);
+
+void init_me(void)
+{
+ unsigned int grrr;
+
+ printk = romvec->pv_printf;
+ prom_node_root = prom_nextnode(0);
+ prom_getprop(prom_node_root, "mmu-npmg", &num_segmaps,
+ sizeof(unsigned int));
+
+ pvac_size = prom_getint_default(prom_node_root, "vac-size", 65536);
+
+ pvac_linesize = prom_getint_default(prom_node_root, "vac-linesize", 16);
+
+ grrr = prom_getint_default(prom_node_root, "mips-on", 0);
+ if(!grrr) {
+ grrr = prom_getint_default(prom_node_root, "clock-frequency", 0);
+ if(grrr > 15000000 && grrr < 100000000) {
+ BOGOMIPS = 3;
+ BOGOMIPS_WCACHE = grrr / 1000000;
+ } else {
+ BOGOMIPS = DEF_BOGO;
+ BOGOMIPS_WCACHE = DEF_BOGO;
+ }
+ } else (BOGOMIPS_WCACHE = grrr,
+ BOGOMIPS = prom_getint(prom_node_root, "mips-off"));
+
+#ifdef DEBUG_INIT_ME
+ (*(romvec->pv_printf))("\nBOGOMIPS %d\n", (int) BOGOMIPS);
+ (*(romvec->pv_printf))("BOGOMIPS_WCACHE %d\n", (int) BOGOMIPS_WCACHE);
+ (*(romvec->pv_printf))("pvac_size %d\n", (int) pvac_size);
+ (*(romvec->pv_printf))("pvac_linesize %d\n", (int) pvac_linesize);
+ (*(romvec->pv_printf))("num_segmaps %d\n", (int) num_segmaps);
+#endif
+
+ delay_factor = (BOGOMIPS > 3) ? ((BOGOMIPS - 2) >> 1) : 11;
+
+ (*(romvec->pv_printf))("\nLILO: \n");
+ return;
+}
diff --git a/arch/sparc/config.in b/arch/sparc/config.in
index be9336eed..45168ffe6 100644
--- a/arch/sparc/config.in
+++ b/arch/sparc/config.in
@@ -1,246 +1,142 @@
-#
-# arch/sparc/config.in
-#
-# Bare minimum configuration file for the Sparc.
-#
-# Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu)
-#
+# $Id: config.in,v 1.23 1996/10/28 01:24:40 davem Exp $
# For a description of the syntax of this configuration file,
# see the Configure script.
#
+mainmenu_name "Linux/SPARC Kernel Configuration"
-comment 'Sparc Kernel setup'
-
-bool 'Sparc V8 kernel' CONFIG_SPARC_V8 y
-bool 'Sparc SMP support' CONFIG_LINUX_SMP n
-bool 'Sparc SUN4M support' CONFIG_SUN4M n
-bool 'Sparc Reference MMU' CONFIG_SRMMU n
-bool 'Networking support' CONFIG_NET n
-bool 'Limit memory to low 16MB' CONFIG_MAX_16M n
-bool 'System V IPC' CONFIG_SYSVIPC y
-bool 'Kernel support for ELF binaries' CONFIG_BINFMT_ELF y
+mainmenu_option next_comment
+comment 'Code maturity level options'
+bool 'Prompt for development and/or incomplete code/drivers' CONFIG_EXPERIMENTAL
+endmenu
-if [ "$CONFIG_NET" = "y" ]; then
-comment 'Networking options'
-bool 'TCP/IP networking' CONFIG_INET y
-if [ "$CONFIG_INET" "=" "y" ]; then
-bool 'IP forwarding/gatewaying' CONFIG_IP_FORWARD n
-bool 'IP multicasting (ALPHA)' CONFIG_IP_MULTICAST n
-bool 'IP firewalling' CONFIG_IP_FIREWALL n
-bool 'IP accounting' CONFIG_IP_ACCT n
-comment '(it is safe to leave these untouched)'
-bool 'PC/TCP compatibility mode' CONFIG_INET_PCTCP n
-bool 'Reverse ARP' CONFIG_INET_RARP n
-bool 'Assume subnets are local' CONFIG_INET_SNARL y
-bool 'Disable NAGLE algorithm (normally enabled)' CONFIG_TCP_NAGLE_OFF n
-fi
-bool 'The IPX protocol' CONFIG_IPX n
-#bool 'Amateur Radio AX.25 Level 2' CONFIG_AX25 n
+mainmenu_option next_comment
+comment 'Loadable module support'
+bool 'Enable loadable module support' CONFIG_MODULES
+if [ "$CONFIG_MODULES" = "y" ]; then
+ bool 'Set version information on all symbols for modules' CONFIG_MODVERSIONS
+ bool 'Kernel daemon support (e.g. autoload of modules)' CONFIG_KERNELD
fi
+endmenu
-comment 'SCSI support'
-
-bool 'SCSI support?' CONFIG_SCSI n
-
-if [ "$CONFIG_SCSI" = "n" ]; then
+mainmenu_option next_comment
+comment 'General setup'
-comment 'Skipping SCSI configuration options...'
+bool 'Support for AP1000 multicomputer' CONFIG_AP1000
+if [ "$CONFIG_AP1000" = "y" ]; then
+ define_bool CONFIG_NO_KEYBOARD y
+ define_bool CONFIG_APFDDI y
+ define_bool CONFIG_APBLOCK y
+ define_bool CONFIG_APBIF y
+ define_bool CONFIG_DDV y
else
-
-comment 'SCSI support type (disk, tape, CDrom)'
-
-bool 'SCSI disk support' CONFIG_BLK_DEV_SD y
-bool 'SCSI tape support' CONFIG_CHR_DEV_ST n
-bool 'SCSI CDROM support' CONFIG_BLK_DEV_SR n
-bool 'SCSI generic support' CONFIG_CHR_DEV_SG n
-
-comment 'SCSI low-level drivers'
-
-bool 'Adaptec AHA152X support' CONFIG_SCSI_AHA152X n
-bool 'Adaptec AHA1542 support' CONFIG_SCSI_AHA1542 y
-bool 'Adaptec AHA1740 support' CONFIG_SCSI_AHA1740 n
-bool 'Adaptec AHA274X/284X support' CONFIG_SCSI_AHA274X n
-bool 'BusLogic SCSI support' CONFIG_SCSI_BUSLOGIC n
-bool 'UltraStor 14F/34F support' CONFIG_SCSI_U14_34F n
-bool 'Future Domain 16xx SCSI support' CONFIG_SCSI_FUTURE_DOMAIN n
-bool 'Generic NCR5380 SCSI support' CONFIG_SCSI_GENERIC_NCR5380 n
-if [ "$CONFIG_PCI" = "y" ]; then
- bool 'NCR53c7,8xx SCSI support' CONFIG_SCSI_NCR53C7xx n
-fi
-bool 'Always IN2000 SCSI support (test release)' CONFIG_SCSI_IN2000 n
-bool 'PAS16 SCSI support' CONFIG_SCSI_PAS16 n
-bool 'QLOGIC SCSI support' CONFIG_SCSI_QLOGIC n
-bool 'Seagate ST-02 and Future Domain TMC-8xx SCSI support' CONFIG_SCSI_SEAGATE n
-bool 'Trantor T128/T128F/T228 SCSI support' CONFIG_SCSI_T128 n
-bool 'UltraStor SCSI support' CONFIG_SCSI_ULTRASTOR n
-bool '7000FASST SCSI support' CONFIG_SCSI_7000FASST n
-bool 'EATA ISA/EISA (DPT PM2011/021/012/022/122/322) support' CONFIG_SCSI_EATA n
-#bool 'SCSI debugging host adapter' CONFIG_SCSI_DEBUG n
+ # Global things across all Sun machines.
+ define_bool CONFIG_SBUS y
+ define_bool CONFIG_SBUSCHAR y
+ define_bool CONFIG_SUN_MOUSE y
+ define_bool CONFIG_SERIAL y
+ define_bool CONFIG_SUN_SERIAL y
+ define_bool CONFIG_SUN_KEYBOARD y
+ define_bool CONFIG_SUN_CONSOLE y
+ define_bool CONFIG_SUN_AUXIO y
+ define_bool CONFIG_SUN_IO y
+ source drivers/sbus/char/Config.in
fi
+tristate 'Openprom tree appears in /proc/openprom (EXPERIMENTAL)' CONFIG_SUN_OPENPROMFS
+bool 'Networking support' CONFIG_NET
+bool 'System V IPC' CONFIG_SYSVIPC
+tristate 'Kernel support for a.out binaries' CONFIG_BINFMT_AOUT
+tristate 'Kernel support for ELF binaries' CONFIG_BINFMT_ELF
+if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
+ tristate 'Kernel support for JAVA binaries' CONFIG_BINFMT_JAVA
+fi
+endmenu
-if [ "$CONFIG_NET" = "y" ]; then
-
-comment 'Network device support'
-
-bool 'Network device support?' CONFIG_NETDEVICES y
-if [ "$CONFIG_NETDEVICES" = "n" ]; then
+mainmenu_option next_comment
+comment 'Floppy, IDE, and other block devices'
-comment 'Skipping network driver configuration options...'
+bool 'Normal floppy disk support' CONFIG_BLK_DEV_FD
-else
-bool 'Dummy net driver support' CONFIG_DUMMY n
-bool 'SLIP (serial line) support' CONFIG_SLIP n
-if [ "$CONFIG_SLIP" = "y" ]; then
- bool ' CSLIP compressed headers' SL_COMPRESSED y
- bool ' 16 channels instead of 4' SL_SLIP_LOTS n
-# bool ' SLIP debugging on' SL_DUMP y
-fi
-bool 'PPP (point-to-point) support' CONFIG_PPP n
-bool 'PLIP (parallel port) support' CONFIG_PLIP n
-bool 'Load balancing support (experimental)' CONFIG_SLAVE_BALANCING n
-bool 'Do you want to be offered ALPHA test drivers' CONFIG_NET_ALPHA n
-bool 'Western Digital/SMC cards' CONFIG_NET_VENDOR_SMC n
-if [ "$CONFIG_NET_VENDOR_SMC" = "y" ]; then
- bool 'WD80*3 support' CONFIG_WD80x3 n
- bool 'SMC Ultra support' CONFIG_ULTRA n
-fi
-bool 'AMD LANCE and PCnet (AT1500 and NE2100) support' CONFIG_LANCE n
-bool '3COM cards' CONFIG_NET_VENDOR_3COM y
-if [ "$CONFIG_NET_VENDOR_3COM" = "y" ]; then
- bool '3c501 support' CONFIG_EL1 n
- bool '3c503 support' CONFIG_EL2 n
- if [ "$CONFIG_NET_ALPHA" = "y" ]; then
- bool '3c505 support' CONFIG_ELPLUS n
- bool '3c507 support' CONFIG_EL16 n
- fi
- bool '3c509/3c579 support' CONFIG_EL3 y
-fi
-bool 'Other ISA cards' CONFIG_NET_ISA n
-if [ "$CONFIG_NET_ISA" = "y" ]; then
- bool 'Cabletron E21xx support' CONFIG_E2100 n
- bool 'DEPCA support' CONFIG_DEPCA n
- bool 'EtherWorks 3 support' CONFIG_EWRK3 n
- if [ "$CONFIG_NET_ALPHA" = "y" ]; then
-# bool 'Arcnet support' CONFIG_ARCNET n
- bool 'AT1700 support' CONFIG_AT1700 n
-# bool 'EtherExpressPro support' CONFIG_EEXPRESS_PRO n
- bool 'EtherExpress support' CONFIG_EEXPRESS n
- bool 'NI5210 support' CONFIG_NI52 n
- bool 'NI6510 support' CONFIG_NI65 n
- fi
- bool 'HP PCLAN+ (27247B and 27252A) support' CONFIG_HPLAN_PLUS n
- bool 'HP PCLAN (27245 and other 27xxx series) support' CONFIG_HPLAN n
- bool 'NE2000/NE1000 support' CONFIG_NE2000 y
- bool 'SK_G16 support' CONFIG_SK_G16 n
-fi
-bool 'EISA, VLB, PCI and on board controllers' CONFIG_NET_EISA n
-if [ "$CONFIG_NET_EISA" = "y" ]; then
- if [ "$CONFIG_NET_ALPHA" = "y" ]; then
- bool 'Ansel Communications EISA 3200 support' CONFIG_AC3200 n
- fi
- bool 'Apricot Xen-II on board ethernet' CONFIG_APRICOT n
-# bool 'DEC 21040 PCI support' CONFIG_DEC_ELCP n
-# bool 'LPL T100V 100Mbs support' CONFIG_LPL_T100 n
-# bool 'PCnet32 (32 bit VLB and PCI LANCE) support' CONFIG_PCNET32 n
- bool 'Zenith Z-Note support' CONFIG_ZNET y
-fi
-bool 'Pocket and portable adaptors' CONFIG_NET_POCKET n
-if [ "$CONFIG_NET_POCKET" = "y" ]; then
- bool 'AT-LAN-TEC/RealTek pocket adaptor support' CONFIG_ATP n
- bool 'D-Link DE600 pocket adaptor support' CONFIG_DE600 n
- bool 'D-Link DE620 pocket adaptor support' CONFIG_DE620 n
-# bool 'Silicom pocket adaptor support' CONFIG_SILICOM_PEA n
-# bool 'WaveLAN PCMCIA support' CONFIG_WaveLAN n
-# bool '3 Com 3c589 PCMCIA support' CONFIG_3C589 n
-fi
-fi
+bool 'Multiple devices driver support' CONFIG_BLK_DEV_MD
+if [ "$CONFIG_BLK_DEV_MD" = "y" ]; then
+ tristate ' Linear (append) mode' CONFIG_MD_LINEAR
+ tristate ' RAID-0 (striping) mode' CONFIG_MD_STRIPED
fi
-comment 'CD-ROM drivers (not for SCSI or IDE/ATAPI drives)'
-
-bool 'Sony CDU31A/CDU33A CDROM driver support' CONFIG_CDU31A n
-bool 'Mitsumi (not IDE/ATAPI) CDROM driver support' CONFIG_MCD n
-bool 'Matsushita/Panasonic CDROM driver support' CONFIG_SBPCD n
-if [ "$CONFIG_SBPCD" = "y" ]; then
- bool 'Matsushita/Panasonic second CDROM controller support' CONFIG_SBPCD2 n
- if [ "$CONFIG_SBPCD2" = "y" ]; then
- bool 'Matsushita/Panasonic third CDROM controller support' CONFIG_SBPCD3 n
- if [ "$CONFIG_SBPCD3" = "y" ]; then
- bool 'Matsushita/Panasonic fourth CDROM controller support' CONFIG_SBPCD4 n
- fi
- fi
+tristate 'RAM disk support' CONFIG_BLK_DEV_RAM
+if [ "$CONFIG_BLK_DEV_RAM" = "y" ]; then
+ bool ' Initial RAM disk (initrd) support' CONFIG_BLK_DEV_INITRD
fi
-bool 'Aztech/Orchid/Okano/Wearnes (non IDE) CDROM support' CONFIG_AZTCD n
-comment 'Filesystems'
+tristate 'Loopback device support' CONFIG_BLK_DEV_LOOP
-bool 'Standard (minix) fs support' CONFIG_MINIX_FS y
-bool 'Extended fs support' CONFIG_EXT_FS y
-bool 'Second extended fs support' CONFIG_EXT2_FS y
-bool 'xiafs filesystem support' CONFIG_XIA_FS y
-bool 'msdos fs support' CONFIG_MSDOS_FS y
-if [ "$CONFIG_MSDOS_FS" = "y" ]; then
-bool 'umsdos: Unix like fs on top of std MSDOS FAT fs' CONFIG_UMSDOS_FS n
-fi
-bool '/proc filesystem support' CONFIG_PROC_FS n
-if [ "$CONFIG_INET" = "y" ]; then
-bool 'NFS filesystem support' CONFIG_NFS_FS n
-fi
-if [ "$CONFIG_BLK_DEV_SR" = "y" -o "$CONFIG_CDU31A" = "y" -o "$CONFIG_MCD" = "y" -o "$CONFIG_SBPCD" = "y" -o "$CONFIG_BLK_DEV_IDECD" = "y" ]; then
- bool 'ISO9660 cdrom filesystem support' CONFIG_ISO9660_FS y
-else
- bool 'ISO9660 cdrom filesystem support' CONFIG_ISO9660_FS n
+endmenu
+
+if [ "$CONFIG_NET" = "y" ]; then
+ source net/Config.in
fi
-bool 'OS/2 HPFS filesystem support (read only)' CONFIG_HPFS_FS n
-bool 'System V and Coherent filesystem support' CONFIG_SYSV_FS y
+mainmenu_option next_comment
+comment 'SCSI support'
-comment 'character devices'
+tristate 'SCSI support' CONFIG_SCSI
-bool 'Cyclades async mux support' CONFIG_CYCLADES n
-bool 'Parallel printer support' CONFIG_PRINTER n
-bool 'Logitech busmouse support' CONFIG_BUSMOUSE n
-bool 'PS/2 mouse (aka "auxiliary device") support' CONFIG_PSMOUSE n
-if [ "$CONFIG_PSMOUSE" = "y" ]; then
-bool 'C&T 82C710 mouse port support (as on TI Travelmate)' CONFIG_82C710_MOUSE y
-fi
-bool 'Microsoft busmouse support' CONFIG_MS_BUSMOUSE n
-bool 'ATIXL busmouse support' CONFIG_ATIXL_BUSMOUSE n
+if [ "$CONFIG_SCSI" != "n" ]; then
+ comment 'SCSI support type (disk, tape, CDrom)'
+ dep_tristate 'SCSI disk support' CONFIG_BLK_DEV_SD $CONFIG_SCSI
+ dep_tristate 'SCSI tape support' CONFIG_CHR_DEV_ST $CONFIG_SCSI
+ dep_tristate 'SCSI CDROM support' CONFIG_BLK_DEV_SR $CONFIG_SCSI
+ dep_tristate 'SCSI generic support' CONFIG_CHR_DEV_SG $CONFIG_SCSI
-bool 'QIC-02 tape support' CONFIG_QIC02_TAPE n
-if [ "$CONFIG_QIC02_TAPE" = "y" ]; then
-bool 'Do you want runtime configuration for QIC-02' CONFIG_QIC02_DYNCONF y
-if [ "$CONFIG_QIC02_DYNCONF" != "y" ]; then
+ comment 'Some SCSI devices (e.g. CD jukebox) support multiple LUNs'
-comment '>>> Edit configuration parameters in ./include/linux/tpqic02.h!'
+ bool 'Probe all LUNs on each SCSI device' CONFIG_SCSI_MULTI_LUN
-else
+ bool 'Verbose SCSI error reporting (kernel size +=12K)' CONFIG_SCSI_CONSTANTS
-comment '>>> Setting runtime QIC-02 configuration is done with qic02conf'
-comment '>>> Which is available from ftp://ftp.funet.fi/pub/OS/Linux/BETA/QIC-02/'
+ mainmenu_option next_comment
+ comment 'SCSI low-level drivers'
+ bool 'Sparc ESP Scsi Driver' CONFIG_SCSI_SUNESP $CONFIG_SCSI
+ endmenu
fi
-fi
+endmenu
-bool 'QIC-117 tape support' CONFIG_FTAPE n
-if [ "$CONFIG_FTAPE" = "y" ]; then
-int ' number of ftape buffers' NR_FTAPE_BUFFERS 3
+if [ "$CONFIG_NET" = "y" ]; then
+ mainmenu_option next_comment
+ comment 'Network device support'
+
+ bool 'Network device support' CONFIG_NETDEVICES
+ if [ "$CONFIG_NETDEVICES" = "y" ]; then
+ tristate 'Dummy net driver support' CONFIG_DUMMY
+ tristate 'PPP (point-to-point) support' CONFIG_PPP
+ if [ ! "$CONFIG_PPP" = "n" ]; then
+ comment 'CCP compressors for PPP are only built as modules.'
+ fi
+ tristate 'SLIP (serial line) support' CONFIG_SLIP
+ if [ "$CONFIG_SLIP" != "n" ]; then
+ bool ' CSLIP compressed headers' CONFIG_SLIP_COMPRESSED
+ bool ' Keepalive and linefill' CONFIG_SLIP_SMART
+ bool ' Six bit SLIP encapsulation' CONFIG_SLIP_MODE_SLIP6
+ fi
+ bool 'Sun LANCE support' CONFIG_SUNLANCE
+ bool 'Sun Happy Meal 10/100baseT support' CONFIG_HAPPYMEAL
+# bool 'FDDI driver support' CONFIG_FDDI
+# if [ "$CONFIG_FDDI" = "y" ]; then
+# fi
+ fi
+ endmenu
fi
-comment 'Sound'
-
-bool 'Sound card support' CONFIG_SOUND n
+source fs/Config.in
+mainmenu_option next_comment
comment 'Kernel hacking'
-#bool 'Debug kmalloc/kfree' CONFIG_DEBUG_MALLOC n
-bool 'Kernel profiling support' CONFIG_PROFILE n
+bool 'Kernel profiling support' CONFIG_PROFILE
if [ "$CONFIG_PROFILE" = "y" ]; then
- int ' Profile shift count' CONFIG_PROFILE_SHIFT 2
-fi
-if [ "$CONFIG_SCSI" = "y" ]; then
-bool 'Verbose SCSI error reporting (kernel size +=12K)' CONFIG_SCSI_CONSTANTS y
+ int ' Profile shift count' CONFIG_PROFILE_SHIFT 2
fi
+endmenu
diff --git a/arch/sparc/defconfig b/arch/sparc/defconfig
new file mode 100644
index 000000000..6bcd1a785
--- /dev/null
+++ b/arch/sparc/defconfig
@@ -0,0 +1,187 @@
+#
+# Automatically generated make config: don't edit
+#
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODVERSIONS=y
+CONFIG_KERNELD=y
+
+#
+# General setup
+#
+# CONFIG_AP1000 is not set
+CONFIG_SBUS=y
+CONFIG_SBUSCHAR=y
+CONFIG_SUN_MOUSE=y
+CONFIG_SERIAL=y
+CONFIG_SUN_SERIAL=y
+CONFIG_SUN_KEYBOARD=y
+CONFIG_SUN_CONSOLE=y
+CONFIG_SUN_AUXIO=y
+CONFIG_SUN_IO=y
+
+#
+# SBUS Frame Buffer support
+#
+SUN_FBS_IN_PROCFS=y
+CONFIG_SUN_FB_DISPLAY=y
+SUN_FB_CGSIX=y
+SUN_FB_TCX=y
+SUN_FB_CGTHREE=y
+SUN_FB_CGFOURTEEN=y
+SUN_FB_BWTWO=y
+SUN_FB_LEO=y
+TADPOLE_FB_WEITEK=y
+SUN_FB_FAST_ONE=y
+SUN_FB_FAST_TWO=y
+SUN_FB_FAST_MONO=y
+SUN_FB_GENERIC=y
+
+#
+# Misc Linux/SPARC drivers
+#
+CONFIG_SUN_OPENPROMIO=m
+CONFIG_SUN_MOSTEK_RTC=y
+CONFIG_SUN_OPENPROMFS=m
+CONFIG_NET=y
+CONFIG_SYSVIPC=y
+CONFIG_BINFMT_AOUT=y
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_JAVA=m
+
+#
+# Floppy, IDE, and other block devices
+#
+CONFIG_BLK_DEV_FD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
+CONFIG_MD_STRIPED=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_BLK_DEV_LOOP=m
+
+#
+# Networking options
+#
+CONFIG_FIREWALL=y
+CONFIG_NET_ALIAS=y
+CONFIG_INET=y
+CONFIG_IP_FORWARD=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_FIREWALL=y
+# CONFIG_IP_FIREWALL_VERBOSE is not set
+CONFIG_IP_MASQUERADE=y
+
+#
+# Protocol-specific masquerading support will be built as modules.
+#
+# CONFIG_IP_TRANSPARENT_PROXY is not set
+# CONFIG_IP_ALWAYS_DEFRAG is not set
+# CONFIG_IP_ACCT is not set
+# CONFIG_IP_ROUTER is not set
+CONFIG_NET_IPIP=m
+# CONFIG_IP_MROUTE is not set
+CONFIG_IP_ALIAS=m
+
+#
+# (it is safe to leave these untouched)
+#
+# CONFIG_INET_PCTCP is not set
+CONFIG_INET_RARP=m
+# CONFIG_NO_PATH_MTU_DISCOVERY is not set
+CONFIG_IP_NOSR=y
+CONFIG_SKB_LARGE=y
+CONFIG_IPV6=m
+
+#
+#
+#
+CONFIG_IPX=m
+# CONFIG_IPX_INTERN is not set
+CONFIG_ATALK=m
+# CONFIG_AX25 is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NETLINK is not set
+
+#
+# SCSI support
+#
+CONFIG_SCSI=y
+
+#
+# SCSI support type (disk, tape, CDrom)
+#
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=y
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+CONFIG_SCSI_CONSTANTS=y
+
+#
+# SCSI low-level drivers
+#
+CONFIG_SCSI_SUNESP=y
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+CONFIG_PPP=m
+
+#
+# CCP compressors for PPP are only built as modules.
+#
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+# CONFIG_SLIP_MODE_SLIP6 is not set
+CONFIG_SUNLANCE=y
+CONFIG_HAPPYMEAL=y
+
+#
+# Filesystems
+#
+CONFIG_QUOTA=y
+CONFIG_MINIX_FS=m
+CONFIG_EXT_FS=m
+CONFIG_EXT2_FS=y
+CONFIG_XIA_FS=m
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_UMSDOS_FS=m
+CONFIG_PROC_FS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_RNFS_BOOTP=y
+CONFIG_RNFS_RARP=y
+CONFIG_SMB_FS=m
+CONFIG_SMB_LONG=y
+CONFIG_NCP_FS=m
+CONFIG_ISO9660_FS=y
+CONFIG_HPFS_FS=m
+CONFIG_SYSV_FS=m
+CONFIG_AFFS_FS=m
+CONFIG_AMIGA_PARTITION=y
+CONFIG_UFS_FS=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_SMD_DISKLABEL=y
+
+#
+# Kernel hacking
+#
+# CONFIG_PROFILE is not set
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index b5151d77e..45d15abe8 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -1,4 +1,4 @@
-#
+# $Id: Makefile,v 1.34 1996/09/21 04:07:31 davem Exp $
# Makefile for the linux kernel.
#
# Note! Dependencies are done automagically by 'make dep', which also
@@ -7,41 +7,59 @@
#
# Note 2! The CFLAGS definitions are now in the main makefile...
-.c.s:
- $(CC) $(CFLAGS) -S $<
-.s.o:
- $(AS) -o $*.o $<
-.c.o:
- $(CC) $(CFLAGS) -c $<
+ifdef SMP
+
+.S.s:
+ $(CPP) -D__ASSEMBLY__ $(AFLAGS) -ansi $< -o $*.s
+
+.S.o:
+ $(CC) -D__ASSEMBLY__ $(AFLAGS) -ansi -c $< -o $*.o
+
+
+else
+
.S.s:
$(CPP) -D__ASSEMBLY__ -ansi $< -o $*.s
+
.S.o:
$(CC) -D__ASSEMBLY__ -ansi -c $< -o $*.o
-OBJS = entry.o traps.o irq.o process.o promops.o signal.o ioport.o setup.o \
- idprom.o probe.o
+
+endif
all: kernel.o head.o
-head.o: head.s
+O_TARGET := kernel.o
+IRQ_OBJS := irq.o sun4m_irq.o sun4c_irq.o
+O_OBJS := entry.o wof.o wuf.o etrap.o rtrap.o traps.o ${IRQ_OBJS} \
+ process.o signal.o ioport.o setup.o idprom.o \
+ sys_sparc.o sunos_asm.o sparc-stub.o systbls.o sys_sunos.o \
+ sunos_ioctl.o time.o windows.o cpu.o devices.o \
+ sclow.o solaris.o tadpole.o tick14.o ptrace.o sys_solaris.o \
+ unaligned.o
-head.s: head.S $(TOPDIR)/include/asm-sparc/head.h
- $(CPP) -D__ASSEMBLY__ -ansi -o $*.s $<
+OX_OBJS := sparc_ksyms.o
-kernel.o: $(OBJS)
- $(LD) -r -o kernel.o $(OBJS)
- sync
+ifdef SMP
+O_OBJS += trampoline.o smp.o rirq.o
+endif
-dep:
- $(CPP) -M *.c > .depend
+ifdef CONFIG_SUN_AUXIO
+O_OBJS += auxio.o
+endif
-dummy:
+all: kernel.o head.o
-#
-# include a dependency file if one exists
-#
-ifeq (.depend,$(wildcard .depend))
-include .depend
-endif
+ifdef SMP
+
+head.o: head.S
+ $(CC) -D__ASSEMBLY__ $(AFLAGS) -ansi -c $*.S -o $*.o
+
+else
+head.o: head.S
+ $(CC) -D__ASSEMBLY__ -ansi -c $*.S -o $*.o
+
+endif
+include $(TOPDIR)/Rules.make
diff --git a/arch/sparc/kernel/auxio.c b/arch/sparc/kernel/auxio.c
new file mode 100644
index 000000000..cd1fcca02
--- /dev/null
+++ b/arch/sparc/kernel/auxio.c
@@ -0,0 +1,47 @@
+/* auxio.c: Probing for the Sparc AUXIO register at boot time.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/oplib.h>
+#include <asm/io.h>
+#include <asm/auxio.h>
+
+/* Probe and map in the Auxiliary I/O register */
+unsigned char *auxio_register;
+
+void
+auxio_probe(void)
+{
+ int node, auxio_nd;
+ struct linux_prom_registers auxregs[1];
+
+ if (sparc_cpu_model == sun4d) {
+ auxio_register = 0;
+ return;
+ }
+ node = prom_getchild(prom_root_node);
+ auxio_nd = prom_searchsiblings(node, "auxiliary-io");
+ if(!auxio_nd) {
+ node = prom_searchsiblings(node, "obio");
+ node = prom_getchild(node);
+ auxio_nd = prom_searchsiblings(node, "auxio");
+ if(!auxio_nd) {
+ prom_printf("Cannot find auxio node, cannot continue...\n");
+ prom_halt();
+ }
+ }
+ prom_getproperty(auxio_nd, "reg", (char *) auxregs, sizeof(auxregs));
+ prom_apply_obio_ranges(auxregs, 0x1);
+ /* Map the register both read and write */
+ auxio_register = (unsigned char *) sparc_alloc_io(auxregs[0].phys_addr, 0,
+ auxregs[0].reg_size,
+ "auxiliaryIO",
+ auxregs[0].which_io, 0x0);
+ /* Fix the address on sun4m and sun4c. */
+ if((((unsigned long) auxregs[0].phys_addr) & 3) == 3 ||
+ sparc_cpu_model == sun4c)
+ auxio_register = (unsigned char *) ((int)auxio_register | 3);
+
+ TURN_ON_LED;
+}
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
new file mode 100644
index 000000000..091002808
--- /dev/null
+++ b/arch/sparc/kernel/cpu.c
@@ -0,0 +1,161 @@
+/* cpu.c: Dinky routines to look for the kind of Sparc cpu
+ * we are on.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/kernel.h>
+
+#include <asm/oplib.h>
+#include <asm/page.h>
+#include <asm/head.h>
+#include <asm/psr.h>
+#include <asm/mbus.h>
+
+struct cpu_iu_info {
+ int psr_impl;
+ int psr_vers;
+ char* cpu_name; /* should be enough I hope... */
+};
+
+struct cpu_fp_info {
+ int psr_impl;
+ int fp_vers;
+ char* fp_name;
+};
+
+/* In order to get the fpu type correct, you need to take the IDPROM's
+ * machine type value into consideration too. I will fix this.
+ */
+struct cpu_fp_info linux_sparc_fpu[] = {
+ { 0, 0, "Fujitsu MB86910 or Weitek WTL1164/5"},
+ { 0, 1, "Fujitsu MB86911 or Weitek WTL1164/5 or LSI L64831"},
+ { 0, 2, "LSI Logic L64802 or Texas Instruments ACT8847"},
+ /* SparcStation SLC, SparcStation1 */
+ { 0, 3, "Weitek WTL3170/2"},
+ /* SPARCstation-5 */
+ { 0, 4, "Lsi Logic/Meiko L64804 or compatible"},
+ { 0, 5, "reserved"},
+ { 0, 6, "reserved"},
+ { 0, 7, "No FPU"},
+ { 1, 0, "ROSS HyperSparc combined IU/FPU"},
+ { 1, 1, "Lsi Logic L64814"},
+ { 1, 2, "Texas Instruments TMS390-C602A"},
+ { 1, 3, "Cypress CY7C602 FPU"},
+ { 1, 4, "reserved"},
+ { 1, 5, "reserved"},
+ { 1, 6, "reserved"},
+ { 1, 7, "No FPU"},
+ { 2, 0, "BIT B5010 or B5110/20 or B5210"},
+ { 2, 1, "reserved"},
+ { 2, 2, "reserved"},
+ { 2, 3, "reserved"},
+ { 2, 4, "reserved"},
+ { 2, 5, "reserved"},
+ { 2, 6, "reserved"},
+ { 2, 7, "No FPU"},
+ /* SuperSparc 50 module */
+ { 4, 0, "SuperSparc on-chip FPU"},
+ /* SparcClassic */
+ { 4, 4, "TI MicroSparc on chip FPU"},
+ { 5, 0, "Matsushita MN10501"},
+ { 5, 1, "reserved"},
+ { 5, 2, "reserved"},
+ { 5, 3, "reserved"},
+ { 5, 4, "reserved"},
+ { 5, 5, "reserved"},
+ { 5, 6, "reserved"},
+ { 5, 7, "No FPU"},
+ { 9, 3, "Weitek on-chip FPU"},
+};
+
+#define NSPARCFPU (sizeof(linux_sparc_fpu)/sizeof(struct cpu_fp_info))
+
+struct cpu_iu_info linux_sparc_chips[] = {
+ /* Sun4/100, 4/200, SLC */
+ { 0, 0, "Fujitsu MB86900/1A or LSI L64831 SparcKIT-40"},
+ /* borned STP1012PGA */
+ { 0, 4, "Fujitsu MB86904"},
+ /* SparcStation2, SparcServer 490 & 690 */
+ { 1, 0, "LSI Logic Corporation - L64811"},
+ /* SparcStation2 */
+ { 1, 1, "Cypress/ROSS CY7C601"},
+ /* Embedded controller */
+ { 1, 3, "Cypress/ROSS CY7C611"},
+ /* Ross Technologies HyperSparc */
+ { 1, 0xf, "ROSS HyperSparc RT620"},
+ { 1, 0xe, "ROSS HyperSparc RT625"},
+ /* ECL Implementation, CRAY S-MP Supercomputer... AIEEE! */
+ /* Someone please write the code to support this beast! ;) */
+ { 2, 0, "Bipolar Integrated Technology - B5010"},
+ { 3, 0, "LSI Logic Corporation - unknown-type"},
+ { 4, 0, "Texas Instruments, Inc. - SuperSparc 50"},
+ /* SparcClassic -- borned STP1010TAB-50*/
+ { 4, 1, "Texas Instruments, Inc. - MicroSparc"},
+ { 4, 2, "Texas Instruments, Inc. - MicroSparc II"},
+ { 4, 3, "Texas Instruments, Inc. - SuperSparc 51"},
+ { 4, 4, "Texas Instruments, Inc. - SuperSparc 61"},
+ { 4, 5, "Texas Instruments, Inc. - unknown"},
+ { 5, 0, "Matsushita - MN10501"},
+ { 6, 0, "Philips Corporation - unknown"},
+ { 7, 0, "Harvest VLSI Design Center, Inc. - unknown"},
+ /* Gallium arsenide 200MHz, BOOOOGOOOOMIPS!!! */
+ { 8, 0, "Systems and Processes Engineering Corporation (SPEC)"},
+ { 9, 0, "Weitek Power-UP"},
+ { 9, 1, "Weitek Power-UP"},
+ { 9, 2, "Weitek Power-UP"},
+ { 9, 3, "Weitek Power-UP"},
+ { 0xa, 0, "UNKNOWN CPU-VENDOR/TYPE"},
+ { 0xb, 0, "UNKNOWN CPU-VENDOR/TYPE"},
+ { 0xc, 0, "UNKNOWN CPU-VENDOR/TYPE"},
+ { 0xd, 0, "UNKNOWN CPU-VENDOR/TYPE"},
+ { 0xe, 0, "UNKNOWN CPU-VENDOR/TYPE"},
+ { 0xf, 0, "UNKNOWN CPU-VENDOR/TYPE"},
+};
+
+#define NSPARCCHIPS (sizeof(linux_sparc_chips)/sizeof(struct cpu_iu_info))
+
+char *sparc_cpu_type[NCPUS] = { "cpu-oops", "cpu-oops1", "cpu-oops2", "cpu-oops3" };
+char *sparc_fpu_type[NCPUS] = { "fpu-oops", "fpu-oops1", "fpu-oops2", "fpu-oops3" };
+
+unsigned int fsr_storage;
+
+void
+cpu_probe(void)
+{
+ int psr_impl, psr_vers, fpu_vers;
+ int i, cpuid;
+
+ cpuid = get_cpuid();
+
+ psr_impl = ((get_psr()>>28)&0xf);
+ psr_vers = ((get_psr()>>24)&0xf);
+
+ fpu_vers = ((get_fsr()>>17)&0x7);
+
+ for(i = 0; i<NSPARCCHIPS; i++) {
+ if(linux_sparc_chips[i].psr_impl == psr_impl)
+ if(linux_sparc_chips[i].psr_vers == psr_vers) {
+ sparc_cpu_type[cpuid] = linux_sparc_chips[i].cpu_name;
+ break;
+ }
+ }
+
+ if(i==NSPARCCHIPS)
+ printk("DEBUG: psr.impl = 0x%x psr.vers = 0x%x\n", psr_impl,
+ psr_vers);
+
+ for(i = 0; i<NSPARCFPU; i++) {
+ if(linux_sparc_fpu[i].psr_impl == psr_impl)
+ if(linux_sparc_fpu[i].fp_vers == fpu_vers) {
+ sparc_fpu_type[cpuid] = linux_sparc_fpu[i].fp_name;
+ break;
+ }
+ }
+
+ if(i == NSPARCFPU) {
+ printk("DEBUG: psr.impl = 0x%x fsr.vers = 0x%x\n", psr_impl,
+ fpu_vers);
+ sparc_fpu_type[cpuid] = linux_sparc_fpu[31].fp_name;
+ }
+}
diff --git a/arch/sparc/kernel/devices.c b/arch/sparc/kernel/devices.c
new file mode 100644
index 000000000..26ccf1214
--- /dev/null
+++ b/arch/sparc/kernel/devices.c
@@ -0,0 +1,84 @@
+/* devices.c: Initial scan of the prom device tree for important
+ * Sparc device nodes which we need to find.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/kernel.h>
+#include <linux/tasks.h>
+#include <linux/config.h>
+
+#include <asm/page.h>
+#include <asm/oplib.h>
+#include <asm/smp.h>
+#include <asm/system.h>
+
+struct prom_cpuinfo linux_cpus[NCPUS];
+int linux_num_cpus;
+
+extern void cpu_probe(void);
+extern void clock_stop_probe(void); /* tadpole.c */
+extern void sun4c_probe_memerr_reg(void);
+
+unsigned long
+device_scan(unsigned long mem_start)
+{
+ char node_str[128];
+ int nd, prom_node_cpu, thismid;
+ int cpu_nds[NCPUS]; /* One node for each cpu */
+ int cpu_ctr = 0;
+
+#if CONFIG_AP1000
+ printk("Not scanning device list for CPUs\n");
+ linux_num_cpus = 1;
+ return mem_start;
+#endif
+
+ prom_getstring(prom_root_node, "device_type", node_str, sizeof(node_str));
+
+ if(strcmp(node_str, "cpu") == 0) {
+ cpu_nds[0] = prom_root_node;
+ cpu_ctr++;
+ } else {
+ int scan;
+ scan = prom_getchild(prom_root_node);
+ prom_printf("root child is %08lx\n", (unsigned long) scan);
+ nd = 0;
+ while((scan = prom_getsibling(scan)) != 0) {
+ prom_getstring(scan, "device_type", node_str, sizeof(node_str));
+ if(strcmp(node_str, "cpu") == 0) {
+ cpu_nds[cpu_ctr] = scan;
+ linux_cpus[cpu_ctr].prom_node = scan;
+ prom_getproperty(scan, "mid", (char *) &thismid, sizeof(thismid));
+ linux_cpus[cpu_ctr].mid = thismid;
+ prom_printf("Found CPU %d <node=%08lx,mid=%d>\n",
+ cpu_ctr, (unsigned long) scan,
+ thismid);
+ cpu_ctr++;
+ }
+ };
+ if(cpu_ctr == 0) {
+ printk("No CPU nodes found, cannot continue.\n");
+ /* Probably a sun4d or sun4e, Sun is trying to trick us ;-) */
+ halt();
+ }
+ printk("Found %d CPU prom device tree node(s).\n", cpu_ctr);
+ };
+ prom_node_cpu = cpu_nds[0];
+
+ linux_num_cpus = cpu_ctr;
+
+ cpu_probe();
+#if CONFIG_SUN_AUXIO
+ {
+ extern void auxio_probe(void);
+ auxio_probe();
+ }
+#endif
+ clock_stop_probe();
+
+ if (sparc_cpu_model == sun4c)
+ sun4c_probe_memerr_reg();
+
+ return mem_start;
+}
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index 21548015e..dd16976b2 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -1,927 +1,1631 @@
-/* arch/sparc/kernel/entry.S: Sparc trap low-level entry points.
+/* $Id: entry.S,v 1.116 1996/10/27 08:35:47 davem Exp $
+ * arch/sparc/kernel/entry.S: Sparc trap low-level entry points.
*
- * Sparc traps are so ugly, this code is going to go through a lot
- * of changes as I find out more interesting things. See head.S for
- * the trap table and how it works, this will show you how we get
- * to these routines.
- *
- * Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
*/
+#include <linux/config.h>
+#include <linux/errno.h>
+
#include <asm/head.h>
#include <asm/asi.h>
+#include <asm/smp.h>
+#include <asm/kgdb.h>
+#include <asm/contregs.h>
+#include <asm/ptrace.h>
#include <asm/psr.h>
#include <asm/cprefix.h>
#include <asm/vaddrs.h>
+#include <asm/memreg.h>
+#include <asm/page.h>
+#include <asm/winmacro.h>
+#include <asm/signal.h>
-/* Here are macros for routines we do often, this allows me to inline this
- * without making the code look real ugly. Well, the macro looks ugly too but
- * makes the trap entry code easier to understand.
- */
+#include <asm/asmmacro.h>
-/* I really don't like synthetic instructions. So I avoid them like the
- * plague.
- */
+#define curptr g6
-/* Note that when I have to write a window out, and it is a user's window, I
- * have to check that the pages of memory that I am going to throw the window(s)
- * onto are valid and are writable by the user (this is %sp to %sp + 64) before
- * I start dumping stuff there. We always assume that kernels stack is ok.
- *
- * If we have to save a kernel window, only one branch is taken. This should
- * make trap handlers quicker in this scenario.
- *
- * Once 'current' is loaded into %g6, it stays there until we leave
- * this macro.
- *
- * XXX must do some checking on the assumption that kernel stack is always ok
- */
+#define NR_SYSCALLS 255 /* Each OS is different... */
-/* I will document how this works real soon. TODO */
-
-#define TRAP_WIN_CLEAN \
- or %g0, %g5, %l5; /* we need the globals to do our work */ \
- or %g0, %g6, %l6; /* and %l0 to %l4 are loaded with important */ \
- or %g0, %g7, %l7; /* information like the psr and pc's to return to */ \
- sethi %hi( C_LABEL(current) ), %g6; \
- ld [%g6 + %lo( C_LABEL(current) )], %g6; \
- ld [%g6 + THREAD_UWINDOWS], %g7; /* how many user wins are active? */ \
- subcc %g7, 0x0, %g0; \
- bne 2f; /* If there are any, branch. */ \
- save %g0, %g0, %g0; /* Save into that window either way. */ \
- std %l0, [%sp]; /* If above shows only kernel windows */ \
-1: std %l2, [%sp + 0x8]; /* then we get here. */ \
- std %l4, [%sp + 0x10]; \
- std %l6, [%sp + 0x18]; \
- std %i0, [%sp + 0x20]; \
- std %i2, [%sp + 0x28]; \
- std %i4, [%sp + 0x30]; \
- std %i6, [%sp + 0x38]; \
- or %g0, 0x1, %g5; \
- rd %psr, %g7; \
- sll %g5, %g7, %g5; \
- wr %g5, 0x0, %wim; /* update %wim to 'now' invalid */ \
- and %g7, 0x1f, %g7; \
- st %g7, [%g6 + THREAD_WIM]; /* save 'this' threads mask */ \
- restore %g0, %g0, %g0; \
- or %g0, %l5, %g5; /* restore the globals we used */ \
- or %g0, %l6, %g6; \
- b 8f; /* we are done */ \
- or %g0, %l7, %g7; \
-2: sub %g7, 0x1, %g7; \
- st %g7, [%g6 + THREAD_UWINDOWS]; /* There are user windows if we */ \
- andcc %sp, 0x7, %g0; /* get here. Check for stack alignment. */ \
- bne 5f; /* Stack is unaligned, yuck. */ \
- sra %sp, 0x1e, %g7; /* This stuff checks to see if top 3-bits */ \
- subcc %g7, 0x0, %g0; /* of stack pointer address are ok. */ \
- be,a 3f; \
- andn %sp, 0xfff, %g7; \
- subcc %g7, -1, %g0; \
- bne 5f; /* bad stack pointer, ugh */ \
- andn %sp, 0xfff, %g7; \
-3: lda [%g7] ASI_PTE, %g7; /* Ok, user stack is a valid address */ \
- srl %g7, 0x1d, %g7; \
- subcc %g7, 0x6, %g0; /* Can the user write to it? */ \
- bne 5f; \
- and %sp, 0xfff, %g7; \
- subcc %g7, 0xfc1, %g0; /* Is our save area on one page? */ \
- bl,a 1b; \
- std %l0, [%sp]; \
- add %sp, 0x38, %g5; /* Nope, have to check both pages */ \
- sra %g5, 0x1e, %g7; \
- subcc %g7, 0x0, %g0; \
- be,a 4f; \
- andn %g5, 0xfff, %g7; \
- subcc %g7, -1, %g0; \
- bne 5f; \
- andn %g5, 0xfff, %g7; \
-4: lda [%g7] ASI_PTE, %g7; /* Stack space in 2nd page is valid */ \
- srl %g7, 0x1d, %g7; \
- subcc %g7, 0x6, %g0; /* Can user write here too? */ \
- be,a 1b; \
- std %l0, [%sp]; \
-5: ld [%g6 + THREAD_UWINDOWS], %g7; /* This is due to either bad page perms */ \
- add %g6, THREAD_REG_WINDOW, %g5; /* for the users stack area, or the stack */ \
-6: std %l0, [%g5]; /* pointer is misaligned. See above. */ \
- std %l2, [%g5 + 0x8]; \
- std %l4, [%g5 + 0x10]; \
- std %l6, [%g5 + 0x18]; \
- std %i0, [%g5 + 0x20]; \
- std %i2, [%g5 + 0x28]; \
- std %i4, [%g5 + 0x30]; \
- std %i6, [%g5 + 0x38]; \
- subcc %g7, 0x1, %g7; \
- bge,a 6b; /* while(uwindows>=0) { write_win(); */ \
- save %g5, 0x40, %g5; /* uwindows--; } */ \
- st %sp, [%g6 + THREAD_USP]; \
- or %g0, 0x1, %g5; \
- rd %psr, %g7; \
- sll %g5, %g7, %g5; \
- wr %g5, 0x0, %wim; \
- and %g7, 0x1f, %g7; \
- st %g7, [%g6 + THREAD_WIM]; /* Update thread_struct fields */ \
- ld [%g6 + THREAD_UWINDOWS], %g7; \
- add %g7, 0x1, %g5; \
- st %g5, [%g6 + THREAD_W_SAVED]; \
- st %g0, [%g6 + THREAD_UWINDOWS]; \
-7: subcc %g7, 0x1, %g7; /* Restore back to where we started. */ \
- bge 7b; \
- restore %g0, %g0, %g0; \
- or %g0, %l5, %g5; /* Restore the globals. */ \
- or %g0, %l6, %g6; \
- or %g0, %l7, %g7; \
-8: nop; /* We are done when we get here. */ \
-
-/* As if the last macro wasn't enough, we have to go through a very similar routine
- * upon entry to most traps and interrupts. This is save away the current window
- * if it is the trap window, clean it, and adjust the stack for the handler c-code
- * to work.
+/* First, KGDB low level things. This is a rewrite
+ * of the routines found in the sparc-stub.c asm() statement
+ * from the gdb distribution. This is also dual-purpose
+ * as a software trap for userlevel programs.
*/
+ .data
+ .align 4
-#define ENTER_TRAP \
- rd %wim, %l4; \
- or %g0, 0x1, %l5; \
- sll %l5, %l0, %l5; \
- andcc %l0, 0x40, %g0; \
- bz 1f; \
- andcc %l4, %l5, %g0; \
- bz,a 3f; \
- sub %fp, 0xb0, %sp; \
- TRAP_WIN_CLEAN \
- b 3f; \
- sub %fp, 0xb0, %sp; \
-1: sethi %hi( C_LABEL(current) ), %l6; \
- ld [%l6 + %lo( C_LABEL(current) )], %l6; \
- ld [%l6 + THREAD_WIM], %l5; \
- and %l0, 0x1f, %l4; \
- cmp %l5, %l3; \
- ble,a 4f; \
- sethi %hi( C_LABEL(nwindowsm1) ), %l4; \
- sub %l5, %l3, %l3; \
- b 5f; \
- sub %l3, 0x1, %l5; \
-4: ld [%l4 + %lo( C_LABEL(nwindowsm1) )], %l4; \
- sub %l4, %l3, %l4; \
- add %l5, %l4, %l5; \
-5: st %l5, [%l6 + THREAD_UWINDOWS]; \
- bz,a 2f; \
- sethi %hi(TASK_SIZE-176), %l5; \
- TRAP_WIN_CLEAN; \
- sethi %hi( C_LABEL(current) ), %l6; \
- ld [%l6 + %lo( C_LABEL(current) )], %l6; \
- sethi %hi(TASK_SIZE-176), %l5; \
-2: or %l5, %lo(TASK_SIZE-176), %l5; \
- add %l6, %l5, %sp; \
-3: \
-
-#define ENTER_IRQ \
- rd %wim, %l4; \
- or %g0, 0x1, %l5; \
- sll %l5, %l0, %l5; \
- andcc %l0, 0x40, %g0; \
- bz 1f; \
- andcc %l4, %l5, %g0; \
- sethi %hi( C_LABEL(eintstack) ), %l7; \
- or %l7, %lo( C_LABEL(eintstack) ), %l7; \
- bz 0f; \
- nop; \
- TRAP_WIN_CLEAN \
- sethi %hi( C_LABEL(eintstack) ), %l7; \
- or %l7, %lo( C_LABEL(eintstack) ), %l7; \
-0: subcc %fp, %l7, %g0; \
- bg,a 3f; \
- sub %l7, 0xb0, %sp; \
- b 3f; \
- sub %fp, 0xb0, %sp; \
-1: sethi %hi( C_LABEL(current) ), %l6; \
- ld [%l6 + %lo( C_LABEL(current) )], %l6; \
- ld [%l6 + THREAD_WIM], %l5; \
- and %l0, 0x1f, %l7; \
- cmp %l5, %l7; \
- ble,a 4f; \
- sethi %hi( C_LABEL(nwindowsm1) ), %l4; \
- sub %l5, %l7, %l7; \
- b 5f; \
- sub %l7, 0x1, %l5; \
-4: ld [%l4 + %lo( C_LABEL(nwindowsm1) )], %l4; \
- sub %l4, %l7, %l4; \
- add %l5, %l4, %l5; \
-5: st %l5, [%l6 + THREAD_UWINDOWS]; \
- bz,a 2f; \
- sethi %hi( C_LABEL(eintstack) ), %l7; \
- TRAP_WIN_CLEAN \
- sethi %hi( C_LABEL(eintstack) ), %l7; \
-2: \
- sub %l7, 0xb0, %sp; \
-3:
+in_trap_handler:
+ .word 0
+
+ .text
+ .align 4
+
+! This function is called when any SPARC trap (except window overflow or
+! underflow) occurs. It makes sure that the invalid register window is still
+! available before jumping into C code. It will also restore the world if you
+! return from handle_exception.
+
+ .globl C_LABEL(trap_low)
+C_LABEL(trap_low):
+ rd %wim, %l3
+ SAVE_ALL
+ ENTER_SYSCALL
+
+ sethi %hi(in_trap_handler), %l4
+ ld [%lo(in_trap_handler) + %l4], %l5
+ inc %l5
+ st %l5, [%lo(in_trap_handler) + %l4]
+
+ /* Make sure kgdb sees the same state we just saved. */
+ LOAD_PT_GLOBALS(sp)
+ LOAD_PT_INS(sp)
+ ld [%sp + REGWIN_SZ + PT_Y], %l4
+ ld [%sp + REGWIN_SZ + PT_WIM], %l3
+ ld [%sp + REGWIN_SZ + PT_PSR], %l0
+ ld [%sp + REGWIN_SZ + PT_PC], %l1
+ ld [%sp + REGWIN_SZ + PT_NPC], %l2
+ rd %tbr, %l5 /* Never changes... */
+
+ /* Make kgdb exception frame. */
+ sub %sp,(16+1+6+1+72)*4,%sp ! Make room for input & locals
+ ! + hidden arg + arg spill
+ ! + doubleword alignment
+ ! + registers[72] local var
+ SAVE_KGDB_GLOBALS(sp)
+ SAVE_KGDB_INS(sp)
+ SAVE_KGDB_SREGS(sp, l4, l0, l3, l5, l1, l2)
+
+ /* We are increasing PIL, so two writes. */
+ or %l0, PSR_PIL, %l0
+ wr %l0, 0, %psr
+ wr %l0, PSR_ET, %psr
+ WRITE_PAUSE
+
+ call C_LABEL(handle_exception)
+ add %sp, REGWIN_SZ, %o0 ! Pass address of registers
+
+ /* Load new kgdb register set. */
+ LOAD_KGDB_GLOBALS(sp)
+ LOAD_KGDB_INS(sp)
+ LOAD_KGDB_SREGS(sp, l0, l2)
+ wr %l0, 0x0, %y
+
+ sethi %hi(in_trap_handler), %l4
+ ld [%lo(in_trap_handler) + %l4], %l5
+ dec %l5
+ st %l5, [%lo(in_trap_handler) + %l4]
+
+ add %sp,(16+1+6+1+72)*4,%sp ! Undo the kgdb trap frame.
+
+ /* Now take what kgdb did and place it into the pt_regs
+ * frame which SparcLinux RESTORE_ALL understands.,
+ */
+ STORE_PT_INS(sp)
+ STORE_PT_GLOBALS(sp)
+ STORE_PT_YREG(sp, g2)
+ STORE_PT_PRIV(sp, l1, l2, l3)
+ RESTORE_ALL
+
+#ifdef CONFIG_BLK_DEV_FD
.text
- .align 4
+ .align 4
+ .globl C_LABEL(floppy_hardint)
+C_LABEL(floppy_hardint):
+ /*
+ * This code cannot touch registers %l0 %l1 and %l2
+ * because SAVE_ALL depends on their values. It depends
+ * on %l3 also, but we regenerate it before a call.
+ * Other registers are:
+ * %l3 -- base address of fdc registers
+ * %l4 -- pdma_vaddr
+ * %l5 -- scratch for ld/st address
+ * %l6 -- pdma_size
+ * %l7 -- scratch [floppy byte, ld/st address, aux. data]
+ */
-/* Default trap handler */
- .globl my_trap_handler
-my_trap_handler:
-#if 1
- jmp %l1
- rett %l2
- nop
-#else
- rd %wim, %l4
- or %g0, 0x1, %l5
- sll %l5, %l0, %l5
- cmp %l4, %l5 ! are we in the invalid window?
-
- TRAP_WIN_CLEAN
-
- nop
- or %g0, %l3, %o0
- call C_LABEL(do_hw_interrupt)
- or %g0, %g0, %o1
- wr %l0, 0x20, %psr ! re-enable traps and reset the condition codes
- nop
- nop
- nop ! click our heels three times, "no place like home"
- jmp %l1
- rett %l2
-#endif /* bogon */
-
- .align 4
- .globl sparc_timer
-sparc_timer:
- sethi %hi(TIMER_VADDR), %l4
- or %l4, %lo(TIMER_VADDR), %l4 ! read the limit register
- ld [%l4 + 0xc], %l4 ! to clear the interrupt
- rd %wim, %l4
- or %g0, 0x1, %l5
- sll %l5, %l0, %l5
- andcc %l0, 0x40, %g0
- bz st1
- sethi %hi( C_LABEL(eintstack) ), %l7
- andcc %l4, %l5, %g0
- bz st0
- or %l7, %lo( C_LABEL(eintstack) ), %l7
- TRAP_WIN_CLEAN
- sethi %hi( C_LABEL(eintstack) ), %l7
- or %l7, %lo( C_LABEL(eintstack) ), %l7
-st0: subcc %fp, %l7, %g0
- bg,a st3
- sub %l7, 0xb0, %sp
- b st3
- sub %fp, 0xb0, %sp
-st1: sethi %hi( C_LABEL(current) ), %l6
- ld [%l6 + %lo( C_LABEL(current) )], %l6
- ld [%l6 + THREAD_WIM], %l5
- and %l0, 0x1f, %l7
- cmp %l5, %l7
- ble,a st4
- sethi %hi( C_LABEL(nwindowsm1) ), %l4
- sub %l5, %l7, %l7
- b st5
- sub %l7, 0x1, %l5
-st4: ld [%l4 + %lo( C_LABEL(nwindowsm1) )], %l4
- sub %l4, %l7, %l4
- add %l5, %l4, %l5
-st5: st %l5, [%l6 + THREAD_UWINDOWS]
- sethi %hi( C_LABEL(eintstack) ), %l7
- bz,a st2
- or %l7, %lo( C_LABEL(eintstack) ), %l7
- TRAP_WIN_CLEAN
- sethi %hi( C_LABEL(eintstack) ), %l7
- or %l7, %lo( C_LABEL(eintstack) ), %l7
-st2: sub %l7, 0xb0, %sp
-
-st3: std %g2, [%sp + 96 + 24]
- or %g0, %g1, %l7
- rd %y, %l6
- std %g4, [%sp + 96 + 32]
- andn %l0, PSR_PIL, %l4
- sll %l3, 0x8, %l5
- std %g6, [%sp + 96 + 40]
- or %l5, %l4, %l4
+ /* Do we have work to do? */
+ sethi %hi(C_LABEL(doing_pdma)), %l7
+ ld [%l7 + %lo(C_LABEL(doing_pdma))], %l7
+ cmp %l7, 0
+ be floppy_dosoftint
+ nop
- wr %l4, 0x0, %psr
- wr %l4, PSR_ET, %psr
+ /* Load fdc register base */
+ sethi %hi(C_LABEL(fdc_status)), %l3
+ ld [%l3 + %lo(C_LABEL(fdc_status))], %l3
- std %l0, [%sp + 96 + 0]
- std %l2, [%sp + 96 + 8]
- st %fp, [%sp + 96 + 16]
-
- or %g0, 14, %o0
- or %g0, %g0, %o1
- call C_LABEL(do_sparc_timer)
- nop
-
- or %g0, %l7, %g1
- wr %l6, 0x0, %y
- ldd [%sp + 96 + 24], %g2
- ldd [%sp + 96 + 32], %g4
- ldd [%sp + 96 + 40], %g6
- wr %l0, 0x0, %psr
- nop
- nop
- nop
-
- and %l0, 31, %l5
- sethi %hi(lnx_winmask), %l6
- or %l6, %lo(lnx_winmask), %l6
- ldub [%l6 + %l5], %l5
- andcc %l0, PSR_PS, %g0
- bnz 1f
- rd %wim, %l4
+ /* Setup register addresses */
+ sethi %hi(C_LABEL(pdma_vaddr)), %l5 ! transfer buffer
+ ld [%l5 + %lo(C_LABEL(pdma_vaddr))], %l4
+ sethi %hi(C_LABEL(pdma_size)), %l5 ! bytes to go
+ ld [%l5 + %lo(C_LABEL(pdma_size))], %l6
+next_byte:
+ ldub [%l3], %l7
+
+ andcc %l7, 0x80, %g0 ! Does fifo still have data
+ bz floppy_fifo_emptied ! fifo has been emptied...
+ andcc %l7, 0x20, %g0 ! in non-dma mode still?
+ bz floppy_overrun ! nope, overrun
+ andcc %l7, 0x40, %g0 ! 0=write 1=read
+ bz floppy_write
+ sub %l6, 0x1, %l6
+
+ /* Ok, actually read this byte */
+ ldub [%l3 + 1], %l7
+ orcc %g0, %l6, %g0
+ stb %l7, [%l4]
+ bne next_byte
+ add %l4, 0x1, %l4
+
+ b floppy_tdone
+ nop
+
+floppy_write:
+ /* Ok, actually write this byte */
+ ldub [%l4], %l7
+ orcc %g0, %l6, %g0
+ stb %l7, [%l3 + 1]
+ bne next_byte
+ add %l4, 0x1, %l4
+
+ /* fall through... */
+floppy_tdone:
+ sethi %hi(C_LABEL(pdma_vaddr)), %l5
+ st %l4, [%l5 + %lo(C_LABEL(pdma_vaddr))]
+ sethi %hi(C_LABEL(pdma_size)), %l5
+ st %l6, [%l5 + %lo(C_LABEL(pdma_size))]
+ /* Flip terminal count pin */
+ set C_LABEL(auxio_register), %l7
+ ld [%l7], %l7
+
+ set C_LABEL(sparc_cpu_model), %l5
+ ld [%l5], %l5
+ subcc %l5, 1, %g0 /* enum { sun4c = 1 }; */
+ be 1f
+ ldub [%l7], %l5
+
+ or %l5, 0xc2, %l5
+ stb %l5, [%l7]
+ andn %l5, 0x02, %l5
+ b 2f
+ nop
+
+1:
+ or %l5, 0xf4, %l5
+ stb %l5, [%l7]
+ andn %l5, 0x04, %l5
-1: andcc %l5, %l4, %g0
- bnz 2f
+2:
+ /* Kill some time so the bits set */
+ WRITE_PAUSE
+ WRITE_PAUSE
+
+ stb %l5, [%l7]
+
+ /* Prevent recursion */
+ sethi %hi(C_LABEL(doing_pdma)), %l7
+ b floppy_dosoftint
+ st %g0, [%l7 + %lo(C_LABEL(doing_pdma))]
+
+ /* We emptied the FIFO, but we haven't read everything
+ * as of yet. Store the current transfer address and
+ * bytes left to read so we can continue when the next
+ * fast IRQ comes in.
+ */
+floppy_fifo_emptied:
+ sethi %hi(C_LABEL(pdma_vaddr)), %l5
+ st %l4, [%l5 + %lo(C_LABEL(pdma_vaddr))]
+ sethi %hi(C_LABEL(pdma_size)), %l7
+ st %l6, [%l7 + %lo(C_LABEL(pdma_size))]
+
+ /* Restore condition codes */
wr %l0, 0x0, %psr
- nop
- nop
- nop
+ WRITE_PAUSE
jmp %l1
rett %l2
-2: wr %g0, 0x0, %wim
- nop
- nop
- nop
+floppy_overrun:
+ sethi %hi(C_LABEL(pdma_vaddr)), %l5
+ st %l4, [%l5 + %lo(C_LABEL(pdma_vaddr))]
+ sethi %hi(C_LABEL(pdma_size)), %l5
+ st %l6, [%l5 + %lo(C_LABEL(pdma_size))]
+ /* Prevent recursion */
+ sethi %hi(C_LABEL(doing_pdma)), %l7
+ st %g0, [%l7 + %lo(C_LABEL(doing_pdma))]
- restore
- restore %g0, 0x1, %l1
- rd %psr, %l0
- and %l0, 31, %l0
- sll %l1, %l0, %l1
- wr %l1, 0x0, %wim
- sethi %hi( C_LABEL(current) ), %l1
- ld [%l1 + %lo( C_LABEL(current) ) ], %l1
- st %l0, [%l1 + THREAD_WIM]
- save %g0, %g0, %g0
-
- ldd [%sp], %l0
- ldd [%sp + 0x8], %l2
- ldd [%sp + 0x10], %l4
- ldd [%sp + 0x18], %l6
- ldd [%sp + 0x20], %i0
- ldd [%sp + 0x28], %i2
- ldd [%sp + 0x30], %i4
- ldd [%sp + 0x38], %i6
-
- save %g0, %g0, %g0
+ /* fall through... */
+floppy_dosoftint:
+ rd %wim, %l3
+ SAVE_ALL
+ ENTER_IRQ
+
+ /* Set all IRQs off. */
+ or %l0, PSR_PIL, %l4
+ wr %l4, 0x0, %psr
+ wr %l4, PSR_ET, %psr
+ WRITE_PAUSE
+
+ mov 11, %o0 ! floppy irq level (unused anyway)
+ mov %g0, %o1 ! devid is not used in fast interrupts
+ call C_LABEL(floppy_interrupt)
+ add %sp, REGWIN_SZ, %o2 ! struct pt_regs *regs
+
+ LEAVE_IRQ
+ RESTORE_ALL
- jmp %l1
- rett %l2
+#endif /* (CONFIG_BLK_DEV_FD) */
+ /* Bad trap handler */
+ .globl bad_trap_handler
+bad_trap_handler:
+ SAVE_ALL
+ ENTER_SYSCALL
-/* For now all IRQ's not registered get sent here so I can see
- * what is poking the chip.
+ wr %l0, PSR_ET, %psr
+ WRITE_PAUSE
+
+ mov %l7, %o0 ! trap number
+ mov %l0, %o1 ! psr
+ call C_LABEL(do_hw_interrupt)
+ mov %l1, %o2 ! pc
+
+ RESTORE_ALL
+
+/* For now all IRQ's not registered get sent here. handler_irq() will
+ * see if a routine is registered to handle this interrupt and if not
+ * it will say so on the console.
*/
- .align 4
- .globl stray_irq_entry
-stray_irq_entry:
- rd %wim, %l4
- or %g0, 0x1, %l5
- sll %l5, %l0, %l5
- andcc %l0, 0x40, %g0
- bz tt1
- sethi %hi( C_LABEL(eintstack) ), %l7
- andcc %l4, %l5, %g0
- bz tt0
- or %l7, %lo( C_LABEL(eintstack) ), %l7
- TRAP_WIN_CLEAN
- sethi %hi( C_LABEL(eintstack) ), %l7
- or %l7, %lo( C_LABEL(eintstack) ), %l7
-tt0: subcc %fp, %l7, %g0
- bg,a tt3
- sub %l7, 0xb0, %sp
- b tt3
- sub %fp, 0xb0, %sp
-tt1: sethi %hi( C_LABEL(current) ), %l6
- ld [%l6 + %lo( C_LABEL(current) )], %l6
- ld [%l6 + THREAD_WIM], %l5
- and %l0, 0x1f, %l7
- cmp %l5, %l7
- ble,a tt4
- sethi %hi( C_LABEL(nwindowsm1) ), %l4
- sub %l5, %l7, %l7
- b tt5
- sub %l7, 0x1, %l5
-tt4: ld [%l4 + %lo( C_LABEL(nwindowsm1) )], %l4
- sub %l4, %l7, %l4
+ .align 4
+ .globl real_irq_entry
+real_irq_entry:
+ SAVE_ALL
+#ifdef __SMP__
+ cmp %l7, 9
+ bne 1f
+ nop
+
+ GET_PROCESSOR_MID(l4, l5)
+ set C_LABEL(sun4m_interrupts), %l5
+ ld [%l5], %l5
+ sethi %hi(0x02000000), %l6
+ sll %l4, 12, %l4
add %l5, %l4, %l5
-tt5: st %l5, [%l6 + THREAD_UWINDOWS]
- sethi %hi( C_LABEL(eintstack) ), %l7
- bz,a tt2
- or %l7, %lo( C_LABEL(eintstack) ), %l7
- TRAP_WIN_CLEAN
- sethi %hi( C_LABEL(eintstack) ), %l7
- or %l7, %lo( C_LABEL(eintstack) ), %l7
-tt2: sub %l7, 0xb0, %sp
-
-tt3: std %g2, [%sp + 96 + 24]
- or %g0, %g1, %l7
- rd %y, %l6
- std %g4, [%sp + 96 + 32]
- andn %l0, PSR_PIL, %l4
- sll %l3, 0x8, %l5
- std %g6, [%sp + 96 + 40]
- or %l5, %l4, %l4
+ ld [%l5], %l4
+ andcc %l4, %l6, %g0
+ be 1f
+ nop
+
+ b,a linux_trap_ipi9_sun4m
+
+1:
+#endif
+ ENTER_IRQ
+
+#ifdef __SMP__
+ cmp %l7, 13
+ bne 1f
+ nop
+
+ /* This is where we catch the level 13 reschedule soft-IRQ. */
+ GET_PROCESSOR_MID(o3, o2)
+ set C_LABEL(sun4m_interrupts), %l5
+ ld [%l5], %o5
+ sethi %hi(0x20000000), %o4
+ sll %o3, 12, %o3
+ add %o5, %o3, %o5
+ ld [%o5], %o1 ! read processor irq pending reg
+ andcc %o1, %o4, %g0
+ be 1f
+ nop
+
+ b,a linux_trap_ipi13_sun4m
+
+1:
+
+#endif
+
+ /* start atomic operation with respect to software interrupts */
+ sethi %hi(C_LABEL(intr_count)), %l6
+ ld [%l6 + %lo(C_LABEL(intr_count))], %l5
+ or %l0, PSR_PIL, %g2
+ add %l5, 0x1, %l4
+ wr %g2, 0x0, %psr
+ st %l4, [%l6 + %lo(C_LABEL(intr_count))]
+ wr %g2, PSR_ET, %psr
+ mov %l7, %o0 ! irq level
+ call C_LABEL(handler_irq)
+ add %sp, REGWIN_SZ, %o1 ! pt_regs ptr
+ wr %l0, PSR_ET, %psr
+ st %l5, [%l6 + %lo(C_LABEL(intr_count))]
+ LEAVE_IRQ
+ RESTORE_ALL
+
+ /* This routine handles illegal instructions and privileged
+ * instruction attempts from user code.
+ */
+ .align 4
+ .globl bad_instruction
+bad_instruction:
+ SAVE_ALL
+ ENTER_SYSCALL
+
+ wr %l0, PSR_ET, %psr ! re-enable traps
+ WRITE_PAUSE
+
+ add %sp, REGWIN_SZ, %o0
+ mov %l1, %o1
+ mov %l2, %o2
+ call C_LABEL(do_illegal_instruction)
+ mov %l0, %o3
+
+ RESTORE_ALL
+
+ .align 4
+ .globl priv_instruction
+priv_instruction:
+ SAVE_ALL
+ ENTER_SYSCALL
+
+ wr %l0, PSR_ET, %psr
+ WRITE_PAUSE
+
+ add %sp, REGWIN_SZ, %o0
+ mov %l1, %o1
+ mov %l2, %o2
+ call C_LABEL(do_priv_instruction)
+ mov %l0, %o3
+
+ RESTORE_ALL
+
+ /* This routine handles unaligned data accesses. */
+ .align 4
+ .globl mna_handler
+mna_handler:
+ andcc %l0, PSR_PS, %g0
+ be mna_fromuser
+ ld [%l1], %l7
+
+ SAVE_ALL
+ ENTER_SYSCALL
+
+ wr %l0, PSR_ET, %psr
+ WRITE_PAUSE
+
+ mov %l7, %o1
+ call C_LABEL(kernel_unaligned_trap)
+ add %sp, REGWIN_SZ, %o0
+
+ RESTORE_ALL
+
+mna_fromuser:
+ SAVE_ALL
+ ENTER_SYSCALL
+
+ wr %l0, PSR_ET, %psr ! re-enable traps
+ WRITE_PAUSE
+ mov %l7, %o1
+ call C_LABEL(user_unaligned_trap)
+ add %sp, REGWIN_SZ, %o0
+
+ RESTORE_ALL
+
+ /* This routine handles floating point disabled traps. */
+ .align 4
+ .globl fpd_trap_handler
+fpd_trap_handler:
+ SAVE_ALL
+ ENTER_SYSCALL
+
+ wr %l0, PSR_ET, %psr ! re-enable traps
+ WRITE_PAUSE
+
+ add %sp, REGWIN_SZ, %o0
+ mov %l1, %o1
+ mov %l2, %o2
+ call C_LABEL(do_fpd_trap)
+ mov %l0, %o3
+
+ RESTORE_ALL
+
+ /* This routine handles Floating Point Exceptions. */
+ .align 4
+ .globl fpe_trap_handler
+fpe_trap_handler:
+ set fpsave_magic, %l5
+ cmp %l1, %l5
+ be 1f
+ sethi %hi(C_LABEL(fpsave)), %l5
+ or %l5, %lo(C_LABEL(fpsave)), %l5
+ cmp %l1, %l5
+ bne 2f
+ sethi %hi(fpsave_catch2), %l5
+ or %l5, %lo(fpsave_catch2), %l5
+ wr %l0, 0x0, %psr
+ WRITE_PAUSE
+ jmp %l5
+ rett %l5 + 4
+1:
+ sethi %hi(fpsave_catch), %l5
+ or %l5, %lo(fpsave_catch), %l5
+ wr %l0, 0x0, %psr
+ WRITE_PAUSE
+ jmp %l5
+ rett %l5 + 4
+
+2:
+ SAVE_ALL
+ ENTER_SYSCALL
+
+ wr %l0, PSR_ET, %psr ! re-enable traps
+ WRITE_PAUSE
+
+ add %sp, REGWIN_SZ, %o0
+ mov %l1, %o1
+ mov %l2, %o2
+ call C_LABEL(do_fpe_trap)
+ mov %l0, %o3
+
+ RESTORE_ALL
+
+ /* This routine handles Tag Overflow Exceptions. */
+ .align 4
+ .globl do_tag_overflow
+do_tag_overflow:
+ SAVE_ALL
+ ENTER_SYSCALL
+
+ wr %l0, PSR_ET, %psr ! re-enable traps
+ WRITE_PAUSE
+
+ add %sp, REGWIN_SZ, %o0
+ mov %l1, %o1
+ mov %l2, %o2
+ call C_LABEL(handle_tag_overflow)
+ mov %l0, %o3
+
+ RESTORE_ALL
+
+ /* This routine handles Watchpoint Exceptions. */
+ .align 4
+ .globl do_watchpoint
+do_watchpoint:
+ SAVE_ALL
+ ENTER_SYSCALL
+
+ wr %l0, PSR_ET, %psr ! re-enable traps
+ WRITE_PAUSE
+
+ add %sp, REGWIN_SZ, %o0
+ mov %l1, %o1
+ mov %l2, %o2
+ call C_LABEL(handle_watchpoint)
+ mov %l0, %o3
+
+ RESTORE_ALL
+
+ /* This routine handles Register Access Exceptions. */
+ .align 4
+ .globl do_reg_access
+do_reg_access:
+ SAVE_ALL
+ ENTER_SYSCALL
+
+ wr %l0, PSR_ET, %psr ! re-enable traps
+ WRITE_PAUSE
+
+ add %sp, REGWIN_SZ, %o0
+ mov %l1, %o1
+ mov %l2, %o2
+ call C_LABEL(handle_reg_access)
+ mov %l0, %o3
+
+ RESTORE_ALL
+
+ /* This routine handles Co-Processor Disabled Exceptions. */
+ .align 4
+ .globl do_cp_disabled
+do_cp_disabled:
+ SAVE_ALL
+ ENTER_SYSCALL
+
+ wr %l0, PSR_ET, %psr ! re-enable traps
+ WRITE_PAUSE
+
+ add %sp, REGWIN_SZ, %o0
+ mov %l1, %o1
+ mov %l2, %o2
+ call C_LABEL(handle_cp_disabled)
+ mov %l0, %o3
+
+ RESTORE_ALL
+
+ /* This routine handles Unimplemented FLUSH Exceptions. */
+ .align 4
+ .globl do_bad_flush
+do_bad_flush:
+ SAVE_ALL
+ ENTER_SYSCALL
+
+ wr %l0, PSR_ET, %psr ! re-enable traps
+ WRITE_PAUSE
+
+ add %sp, REGWIN_SZ, %o0
+ mov %l1, %o1
+ mov %l2, %o2
+ call C_LABEL(handle_bad_flush)
+ mov %l0, %o3
+
+ RESTORE_ALL
+
+ /* This routine handles Co-Processor Exceptions. */
+ .align 4
+ .globl do_cp_exception
+do_cp_exception:
+ SAVE_ALL
+ ENTER_SYSCALL
+
+ wr %l0, PSR_ET, %psr ! re-enable traps
+ WRITE_PAUSE
+
+ add %sp, REGWIN_SZ, %o0
+ mov %l1, %o1
+ mov %l2, %o2
+ call C_LABEL(handle_cp_exception)
+ mov %l0, %o3
+
+ RESTORE_ALL
+
+ /* This routine handles Hardware Divide By Zero Exceptions. */
+ .align 4
+ .globl do_hw_divzero
+do_hw_divzero:
+ SAVE_ALL
+ ENTER_SYSCALL
+
+ wr %l0, PSR_ET, %psr ! re-enable traps
+ WRITE_PAUSE
+
+ add %sp, REGWIN_SZ, %o0
+ mov %l1, %o1
+ mov %l2, %o2
+ call C_LABEL(handle_hw_divzero)
+ mov %l0, %o3
+
+ RESTORE_ALL
+
+ .align 4
+ .globl do_flush_windows
+do_flush_windows:
+ SAVE_ALL
+ ENTER_SYSCALL
+
+ wr %l0, PSR_ET, %psr
+ WRITE_PAUSE
+
+ andcc %l0, PSR_PS, %g0
+ bne dfw_kernel
+ nop
+
+ call C_LABEL(flush_user_windows)
+ nop
+
+ /* Advance over the trap instruction. */
+ ld [%sp + REGWIN_SZ + PT_NPC], %l1
+ add %l1, 0x4, %l2
+ st %l1, [%sp + REGWIN_SZ + PT_PC]
+ st %l2, [%sp + REGWIN_SZ + PT_NPC]
+
+ RESTORE_ALL
+
+ .globl flush_patch_one
+
+ /* We get these for debugging routines using __builtin_return_address() */
+dfw_kernel:
+flush_patch_one:
+ FLUSH_ALL_KERNEL_WINDOWS
+
+ /* Advance over the trap instruction. */
+ ld [%sp + REGWIN_SZ + PT_NPC], %l1
+ add %l1, 0x4, %l2
+ st %l1, [%sp + REGWIN_SZ + PT_PC]
+ st %l2, [%sp + REGWIN_SZ + PT_NPC]
+
+ RESTORE_ALL
+
+ /* The getcc software trap. The user wants the condition codes from
+ * the %psr in register %g1.
+ */
+
+ .align 4
+ .globl getcc_trap_handler
+getcc_trap_handler:
+ srl %l0, 20, %g1 ! give user
+ and %g1, 0xf, %g1 ! only ICC bits in %psr
+ jmp %l2 ! advance over trap instruction
+ rett %l2 + 0x4 ! like this...
+
+ /* The setcc software trap. The user has condition codes in %g1
+ * that it would like placed in the %psr. Be careful not to flip
+ * any unintentional bits!
+ */
+
+ .align 4
+ .globl setcc_trap_handler
+setcc_trap_handler:
+ sll %g1, 0x14, %l4
+ set PSR_ICC, %l5
+ andn %l0, %l5, %l0 ! clear ICC bits in %psr
+ and %l4, %l5, %l4 ! clear non-ICC bits in user value
+ or %l4, %l0, %l4 ! or them in... mix mix mix
+
+ wr %l4, 0x0, %psr ! set new %psr
+ WRITE_PAUSE ! TI scumbags...
+
+ jmp %l2 ! advance over trap instruction
+ rett %l2 + 0x4 ! like this...
+
+ .align 4
+ .globl linux_trap_nmi_sun4c
+linux_trap_nmi_sun4c:
+ SAVE_ALL
+ ENTER_SYSCALL
+
+ /* Ugh, we need to clear the IRQ line. This is now
+ * a very sun4c specific trap handler...
+ */
+ sethi %hi(C_LABEL(interrupt_enable)), %l5
+ ld [%l5 + %lo(C_LABEL(interrupt_enable))], %l5
+ ldub [%l5], %l6
+ andn %l6, INTS_ENAB, %l6
+ stb %l6, [%l5]
+
+ /* Now it is safe to re-enable traps without recursion. */
+ or %l0, PSR_PIL, %l0
+ wr %l0, PSR_ET, %psr
+ WRITE_PAUSE
+
+ /* Now call the c-code with the pt_regs frame ptr and the
+ * memory error registers as arguments. The ordering chosen
+ * here is due to unlatching semantics.
+ */
+ sethi %hi(AC_SYNC_ERR), %o0
+ add %o0, 0x4, %o0
+ lda [%o0] ASI_CONTROL, %o2 ! sync vaddr
+ sub %o0, 0x4, %o0
+ lda [%o0] ASI_CONTROL, %o1 ! sync error
+ add %o0, 0xc, %o0
+ lda [%o0] ASI_CONTROL, %o4 ! async vaddr
+ sub %o0, 0x4, %o0
+ lda [%o0] ASI_CONTROL, %o3 ! async error
+ call C_LABEL(sparc_lvl15_nmi)
+ add %sp, REGWIN_SZ, %o0
+
+ RESTORE_ALL
+
+#ifdef __SMP__
+
+ .align 4
+ .globl linux_trap_ipi9_sun4m
+linux_trap_ipi9_sun4m:
+ sethi %hi(0x02000000), %o2
+ GET_PROCESSOR_MID(o0, o1)
+ set C_LABEL(sun4m_interrupts), %l5
+ ld [%l5], %o5
+ sll %o0, 12, %o0
+ add %o5, %o0, %o5
+ st %o2, [%o5 + 4]
+ WRITE_PAUSE
+
+ ld [%o5], %g0
+ WRITE_PAUSE
+
+ /* IRQ's off else we deadlock. */
+ or %l0, PSR_PIL, %l4
wr %l4, 0x0, %psr
+ WRITE_PAUSE
+
wr %l4, PSR_ET, %psr
+ WRITE_PAUSE
+
+ call C_LABEL(smp_message_irq)
+ nop
+
+ RESTORE_ALL_FASTIRQ
+
+ .align 4
+ .globl linux_trap_ipi13_sun4m
+linux_trap_ipi13_sun4m:
+ /* NOTE: real_irq_entry saved state and grabbed klock already. */
+
+ /* start atomic operation with respect to software interrupts */
+ sethi %hi(C_LABEL(intr_count)), %l4
+ ld [%l4 + %lo(C_LABEL(intr_count))], %l5
+ add %l5, 0x1, %l5
+ st %l5, [%l4 + %lo(C_LABEL(intr_count))]
+
+ sethi %hi(0x20000000), %o2
+ GET_PROCESSOR_MID(o0, o1)
+ set C_LABEL(sun4m_interrupts), %l5
+ ld [%l5], %o5
+ sll %o0, 12, %o0
+ add %o5, %o0, %o5
+ st %o2, [%o5 + 4]
+ WRITE_PAUSE
+
+ ld [%o5], %g0
+ WRITE_PAUSE
+
+ /* IRQ's off else we deadlock. */
+ or %l0, PSR_PIL, %l4
+ wr %l4, 0x0, %psr
+ WRITE_PAUSE
+
+ wr %l4, PSR_ET, %psr
+ WRITE_PAUSE
+
+ call C_LABEL(smp_reschedule_irq)
+ nop
+
+ sethi %hi(C_LABEL(intr_count)), %l4
+ ld [%l4 + %lo(C_LABEL(intr_count))], %l5
+ sub %l5, 0x1, %l5
+ st %l5, [%l4 + %lo(C_LABEL(intr_count))]
+
+ LEAVE_IRQ
+ RESTORE_ALL
+
+ .align 4
+ .globl linux_trap_ipi15_sun4m
+linux_trap_ipi15_sun4m:
+ SAVE_ALL
+
+ /* First check for hard NMI memory error. */
+ sethi %hi(0xf0000000), %o2
+ set C_LABEL(sun4m_interrupts), %l5
+ set 0x4000, %o3
+ ld [%l5], %l5
+ add %l5, %o3, %l5
+ ld [%l5], %l6
+ andcc %o2, %l6, %o2
+ be 1f
+ nop
+
+ /* Asynchronous fault, why you little ?!#&%@... */
+ sethi %hi(0x80000000), %o2
+ st %o2, [%l5 + 0xc]
+ WRITE_PAUSE
+ ld [%l5], %g0
+ WRITE_PAUSE
+
+ /* All interrupts are off... now safe to enable traps
+ * and call C-code.
+ */
+ or %l0, PSR_PIL, %l4 ! I am very paranoid...
+ wr %l4, 0x0, %psr
+ WRITE_PAUSE
+ wr %l4, PSR_ET, %psr
+ WRITE_PAUSE
+ call C_LABEL(sun4m_nmi)
+ nop
+
+ sethi %hi(0x80000000), %o2
+ st %o2, [%l5 + 0x8]
+ WRITE_PAUSE
+ ld [%l5], %g0
+ WRITE_PAUSE
+
+ RESTORE_ALL_FASTIRQ
+
+1:
+ sethi %hi(0x80000000), %o2
+ GET_PROCESSOR_MID(o0, o1)
+ set C_LABEL(sun4m_interrupts), %l5
+ ld [%l5], %o5
+ sll %o0, 12, %o0
+ add %o5, %o0, %o5
+ st %o2, [%o5 + 4]
+ WRITE_PAUSE
+
+ ld [%o5], %g0
+ WRITE_PAUSE
+
+ /* IRQ's off else we deadlock. */
+ or %l0, PSR_PIL, %l4
+ wr %l4, 0x0, %psr
+ WRITE_PAUSE
+
+ wr %l4, PSR_ET, %psr
+ WRITE_PAUSE
+
+ call C_LABEL(smp_message_irq)
+ nop
+
+ RESTORE_ALL_FASTIRQ
+
+#endif
+
+
+ .align 4
+ .globl C_LABEL(invalid_segment_patch1_ff)
+ .globl C_LABEL(invalid_segment_patch2_ff)
+C_LABEL(invalid_segment_patch1_ff): cmp %l4, 0xff
+C_LABEL(invalid_segment_patch2_ff): mov 0xff, %l4
+
+ .align 4
+ .globl C_LABEL(num_context_patch1_16), C_LABEL(num_context_patch2_16)
+C_LABEL(num_context_patch1_16): mov 0x10, %l7
+C_LABEL(num_context_patch2_16): mov 0x10, %l7
+
+ .align 4
+ .globl C_LABEL(sun4c_kernel_buckets_patch_32)
+C_LABEL(sun4c_kernel_buckets_patch_32): andn %l7, 256, %l3
+
+ .globl C_LABEL(invalid_segment_patch1), C_LABEL(invalid_segment_patch2)
+ .globl C_LABEL(num_context_patch1), C_LABEL(num_context_patch2)
+ .globl C_LABEL(sun4c_kernel_buckets_patch)
+
+ .align 4
+ .globl sun4c_fault
+sun4c_fault:
+ sethi %hi(AC_SYNC_ERR), %l4
+ add %l4, 0x4, %l6 ! AC_SYNC_VA in %l6
+ lda [%l6] ASI_CONTROL, %l5 ! Address
+ lda [%l4] ASI_CONTROL, %l6
+
+ andn %l5, 0xfff, %l5 ! Encode all info into l7
+ srl %l6, 14, %l6
+
+ and %l6, 2, %l6
+ or %l5, %l6, %l6
+
+ or %l6, %l7, %l7 ! l7 = [addr,write,txtfault]
- std %l0, [%sp + 96 + 0]
- std %l2, [%sp + 96 + 8]
- st %fp, [%sp + 96 + 16]
-
- or %g0, %l3, %o0
- or %g0, %g0, %o1
- call C_LABEL(unexpected_irq)
- nop
-
- or %g0, %l7, %g1
- wr %l6, 0x0, %y
- ldd [%sp + 96 + 24], %g2
- ldd [%sp + 96 + 32], %g4
- ldd [%sp + 96 + 40], %g6
- wr %l0, 0x0, %psr
- nop
- nop
- nop
-
- and %l0, 31, %l5
- sethi %hi(lnx_winmask), %l6
- or %l6, %lo(lnx_winmask), %l6
- ldub [%l6 + %l5], %l5
andcc %l0, PSR_PS, %g0
- bnz 1f
- rd %wim, %l4
+ be sun4c_fault_fromuser
+ andcc %l7, 1, %g0 ! Text fault?
-1: andcc %l5, %l4, %g0
- bnz 2f
- wr %l0, 0x0, %psr
- nop
- nop
- nop
+ be 1f
+ sethi %hi(KERNBASE), %l6
- jmp %l1
- rett %l2
+ mov %l1, %l5 ! PC
-2: wr %g0, 0x0, %wim
- nop
- nop
- nop
+1:
+ cmp %l5, %l6
+ blu sun4c_fault_fromuser
+ sethi %hi(0xfffc0000), %l4 ! SUN4C_REAL_PGDIR_MASK
- restore
- restore %g0, 0x1, %l1
- rd %psr, %l0
- and %l0, 31, %l0
- sll %l1, %l0, %l1
- wr %l1, 0x0, %wim
- sethi %hi( C_LABEL(current) ), %l1
- ld [%l1 + %lo( C_LABEL(current) ) ], %l1
- st %l0, [%l1 + THREAD_WIM]
- save %g0, %g0, %g0
-
- ldd [%sp], %l0
- ldd [%sp + 0x8], %l2
- ldd [%sp + 0x10], %l4
- ldd [%sp + 0x18], %l6
- ldd [%sp + 0x20], %i0
- ldd [%sp + 0x28], %i2
- ldd [%sp + 0x30], %i4
- ldd [%sp + 0x38], %i6
-
- save %g0, %g0, %g0
-
+ and %l5, %l4, %l5
+
+ lduba [%l5] ASI_SEGMAP, %l4
+C_LABEL(invalid_segment_patch1):
+ cmp %l4, 0x7f
+ bne 1f
+ sethi %hi(C_LABEL(sun4c_kernel_next)), %l4
+
+ ld [%l4 + %lo(C_LABEL(sun4c_kernel_next))], %l6 ! entry
+
+ ld [%l6], %l3 ! entry->vaddr
+ cmp %l3, 0 ! is this segment available?
+ be 4f ! Yes, use it.
+ st %l5, [%l6] ! entry->vaddr = address
+
+ ! use entry->vaddr to unmap the old segment
+ mov %l3, %l5
+
+C_LABEL(num_context_patch1):
+ mov 0x08, %l7
+
+C_LABEL(invalid_segment_patch2):
+ mov 0x7f, %l4
+
+ sethi %hi(AC_CONTEXT), %l3
+ lduba [%l3] ASI_CONTROL, %l6
+
+3:
+ deccc %l7
+ stba %l7, [%l3] ASI_CONTROL
+ bne 3b
+ stba %l4, [%l5] ASI_SEGMAP
+
+ stba %l6, [%l3] ASI_CONTROL
+
+ ! reload the entry
+
+ sethi %hi(C_LABEL(sun4c_kernel_next)), %l4
+ ld [%l4 + %lo(C_LABEL(sun4c_kernel_next))], %l6
+
+ ld [%l6], %l5 ! restore address from entry->vaddr
+
+4:
+ ! advance sun4c_kernel_next
+ add %l6, 8, %l7
+C_LABEL(sun4c_kernel_buckets_patch):
+ andn %l7, 128, %l3
+ st %l3, [%l4 + %lo(C_LABEL(sun4c_kernel_next))]
+
+C_LABEL(num_context_patch2):
+ mov 0x08, %l7
+
+ ldub [%l6 + 0x4], %l4 ! entry->pseg
+
+ sethi %hi(AC_CONTEXT), %l3
+ lduba [%l3] ASI_CONTROL, %l6
+
+3:
+ deccc %l7
+ stba %l7, [%l3] ASI_CONTROL
+ bne 3b
+ stba %l4, [%l5] ASI_SEGMAP
+
+ stba %l6, [%l3] ASI_CONTROL
+
+1:
+ sethi %hi(0xfe200000), %l4 ! SUN4C_VMALLOC_START
+ cmp %l5, %l4
+
+ bgeu 1f
+ mov 0x40, %l7 ! SUN4C_REAL_PGDIR_SIZE / PAGE_SIZE
+
+ sethi %hi(KERNBASE), %l6
+
+ sub %l5, %l6, %l4
+ srl %l4, PAGE_SHIFT, %l4
+ sethi %hi(0xf3000000), %l3 ! SUN4C_PAGE_KERNEL
+ or %l3, %l4, %l3
+
+ sethi %hi(PAGE_SIZE), %l4
+
+2:
+ sta %l3, [%l5] ASI_PTE
+ deccc %l7
+ inc %l3
+ bne 2b
+ add %l5, %l4, %l5
+
+ /* Restore condition codes */
+ wr %l0, 0x0, %psr
+ WRITE_PAUSE
jmp %l1
- rett %l2
+ rett %l2
+1:
+ srl %l5, 22, %l3 ! SUN4C_PGDIR_SHIFT
+ sethi %hi(C_LABEL(swapper_pg_dir)), %l4
+ or %l4, %lo(C_LABEL(swapper_pg_dir)), %l4
+ sll %l3, 2, %l3
+ ld [%l4 + %l3], %l4
+ andn %l4, 0xfff, %l4 ! PAGE_MASK
+ srl %l5, PAGE_SHIFT - 2, %l6
+ and %l6, 0xffc, %l6 ! (SUN4C_PTRS_PER_PTE - 1) << 2
+ add %l6, %l4, %l6
-/* This routine is optimized for kernel window fills. User fills take about two
- * or three extra jumps on the average. We'll see how this works out.
- */
+ sethi %hi(PAGE_SIZE), %l4
-/* Don't use local labels, or if you do be REAL CAREFUL. TRAP_WIN_CLEAN is
- * full of them! If you think this routine is hairy, window spills are worse,
- * see below.
- */
+2:
+ ld [%l6], %l3
+ deccc %l7
+ sta %l3, [%l5] ASI_PTE
+ add %l6, 0x4, %l6
+ bne 2b
+ add %l5, %l4, %l5
- .align 4
- .globl spill_window_entry
-spill_window_entry:
- andcc %l0, 0x40, %g0 ! see if this is a user window fill
- bz,a spill_from_user
- nop
-
- TRAP_WIN_CLEAN /* danger, danger... */
- wr %l0, 0x0, %psr
- nop
+ /* Restore condition codes */
+ wr %l0, 0x0, %psr
+ WRITE_PAUSE
jmp %l1
- rett %l2
+ rett %l2
-spill_from_user:
- sethi %hi( C_LABEL(current) ), %l6
- ld [%l6 + %lo( C_LABEL(current) )], %l6
- ld [%l6 + THREAD_WIM], %l5
- and %l0, 0x1f, %l3
+sun4c_fault_fromuser:
+ SAVE_ALL
+ ENTER_SYSCALL
+
+ mov %l7, %o1 ! Decode the info from %l7
+ mov %l7, %o2
+ and %o1, 1, %o1 ! arg2 = text_faultp
+ mov %l7, %o3
+ and %o2, 2, %o2 ! arg3 = writep
+ andn %o3, 0xfff, %o3 ! arg4 = faulting address
-/* I don't know what's worse, the extra comparison here, or an extra load
- * from a lookup table, we'll see.
- */
- cmp %l5, %l3
- ble,a 1f
- sethi %hi( C_LABEL(nwindowsm1) ), %l4
- sub %l5, %l3, %l3
+ wr %l0, PSR_ET, %psr
+ WRITE_PAUSE
+
+ call C_LABEL(do_sun4c_fault)
+ add %sp, REGWIN_SZ, %o0 ! arg1 = pt_regs ptr
+
+ RESTORE_ALL
+
+ .align 4
+ .globl C_LABEL(srmmu_fault)
+C_LABEL(srmmu_fault):
+ mov 0x400, %l5
+ mov 0x300, %l4
+
+ lda [%l5] ASI_M_MMUREGS, %l6 ! read sfar first
+ lda [%l4] ASI_M_MMUREGS, %l5 ! read sfsr last
+
+ andn %l6, 0xfff, %l6
+ srl %l5, 6, %l5 ! and encode all info into l7
+
+ and %l5, 2, %l5
+ or %l5, %l6, %l6
+
+ or %l6, %l7, %l7 ! l7 = [addr,write,txtfault]
+
+ SAVE_ALL
+ ENTER_SYSCALL
+
+ mov %l7, %o1
+ mov %l7, %o2
+ and %o1, 1, %o1 ! arg2 = text_faultp
+ mov %l7, %o3
+ and %o2, 2, %o2 ! arg3 = writep
+ andn %o3, 0xfff, %o3 ! arg4 = faulting address
+
+ wr %l0, PSR_ET, %psr
+ WRITE_PAUSE
+
+ call C_LABEL(do_sparc_fault)
+ add %sp, REGWIN_SZ, %o0 ! arg1 = pt_regs ptr
+
+ RESTORE_ALL
+
+ /* SunOS uses syscall zero as the 'indirect syscall' it looks
+ * like indir_syscall(scall_num, arg0, arg1, arg2...); etc.
+ * This is complete brain damage.
+ */
+ .globl C_LABEL(sunos_indir)
+C_LABEL(sunos_indir):
+ mov %o7, %l4
+ cmp %o0, NR_SYSCALLS
+ blu,a 1f
+ sll %o0, 0x2, %o0
+
+ sethi %hi(C_LABEL(sunos_nosys)), %l6
b 2f
- sub %l3, 0x1, %l5
-1: ld [%l4 + %lo( C_LABEL(nwindowsm1) )], %l4
- sub %l4, %l3, %l4
- add %l5, %l4, %l5
-2: st %l5, [%l6 + THREAD_UWINDOWS]
-
- TRAP_WIN_CLEAN /* danger, danger... */
- sethi %hi( C_LABEL(current) ), %l6
- ld [%l6 + %lo( C_LABEL(current) )], %l6
- ld [%l6 + THREAD_KSP], %sp
- and %l0, 0x1f, %l3
- sethi %hi(lnx_winmask), %l6
- or %l6, %lo(lnx_winmask), %l6
- ldub [%l6 + %l3], %l5
- rd %wim, %l4
- jmp %l1
- rett %l2
+ or %l6, %lo(C_LABEL(sunos_nosys)), %l6
-/* A window spill has occurred. This presents a weird situation, a restore
- * was attempted and a trap occurred. Therefore the restore attempt had no
- * effect on window movement and the trap saved, which means it went in the
- * other direction. :-( We are in a trap window which is two restores away
- * from the window we want to un-invalidate so to speak and three away from
- * the one which will become invalid after this routine. There are probably
- * bugs already this routine. Bugs suck.
- */
+1:
+ set C_LABEL(sunos_sys_table), %l7
+ ld [%l7 + %o0], %l6
-/* This is a very complicated and hairy routine, don't expect to understand
- * it the first time. :>
- */
+2:
+ mov %o1, %o0
+ mov %o2, %o1
+ mov %o3, %o2
+ mov %o4, %o3
+ mov %o5, %o4
+ call %l6
+ mov %l4, %o7
+
+ .align 4
+ .globl C_LABEL(sys_nis_syscall)
+C_LABEL(sys_nis_syscall):
+ mov %o7, %l5
+ add %sp, REGWIN_SZ, %o0 ! pt_regs *regs arg
+ call C_LABEL(c_sys_nis_syscall)
+ mov %l5, %o7
.align 4
- .globl fill_window_entry
-fill_window_entry:
- wr %g0, 0, %wim ! Can not enter invalid register without this.
- andcc %l0, 0x40, %g0 ! From user?
- restore ! restore to where trap occurred
- bz fill_from_user
- restore ! enter invalid register, whee...
- restore %g0, 0x1, %l1 ! enter one-past invalid register
- rd %psr, %l0 ! this is the window we need to save
- and %l0, 0x1f, %l0
- sll %l1, %l0, %l1
- wr %l1, 0x0, %wim
- sethi %hi( C_LABEL(current) ), %l1
- ld [%l1 + %lo( C_LABEL(current) )], %l1
- st %l0, [%l1 + THREAD_WIM]
- save %g0, %g0, %g0 ! back to invalid register
- ldd [%sp], %l0 ! load the window from stack
- ldd [%sp + 8], %l2
- ldd [%sp + 16], %l4
- ldd [%sp + 24], %l6
- ldd [%sp + 32], %i0
- ldd [%sp + 40], %i2
- ldd [%sp + 48], %i4
- ldd [%sp + 56], %i6
- save %g0, %g0, %g0 ! to window where trap happened
- save %g0, %g0, %g0 ! back to trap window, so rett works
- wr %l0, 0x0, %psr ! load condition codes
- nop
- jmp %l1
- rett %l2 ! are you as confused as I am?
+ .globl C_LABEL(sys_ptrace)
+C_LABEL(sys_ptrace):
+ call C_LABEL(do_ptrace)
+ add %sp, REGWIN_SZ, %o0
-fill_from_user:
- andcc %sp, 0x7, %g0 ! check for alignment of user stack
- bne fill_bad_stack
- sra %sp, 0x1e, %l7
- cmp %l7, 0x0
- be,a 1f
- andn %sp, 0xfff, %l7
- cmp %l7, -1
- bne fill_bad_stack
- andn %sp, 0xfff, %l7
-1: lda [%l7] ASI_PTE, %l7
- srl %l7, 0x1d, %l7
- andn %l7, 0x2, %l7
- cmp %l7, 0x4
- bne fill_bad_stack
- and %sp, 0xfff, %l7
- cmp %l7, 0xfc1
- bl,a fill_stack_ok
- restore %g0, 1, %l1
- add %sp, 0x38, %l5
- sra %sp, 0x1e, %l7
- cmp %l7, 0x0
+ ld [%curptr + 0x14], %l5
+ andcc %l5, 0x20, %g0
+ be 1f
+ nop
+
+ call C_LABEL(syscall_trace)
+ nop
+
+1:
+ RESTORE_ALL
+
+ .align 4
+ .globl C_LABEL(sys_execve)
+C_LABEL(sys_execve):
+ mov %o7, %l5
+ add %sp, REGWIN_SZ, %o0 ! pt_regs *regs arg
+ call C_LABEL(sparc_execve)
+ mov %l5, %o7
+
+ .align 4
+ .globl C_LABEL(sys_pipe)
+C_LABEL(sys_pipe):
+ mov %o7, %l5
+ add %sp, REGWIN_SZ, %o0 ! pt_regs *regs arg
+ call C_LABEL(sparc_pipe)
+ mov %l5, %o7
+
+ .align 4
+ .globl C_LABEL(sys_sigpause)
+C_LABEL(sys_sigpause):
+ /* Note: %o0 already has correct value... */
+ call C_LABEL(do_sigpause)
+ add %sp, REGWIN_SZ, %o1
+
+ ld [%curptr + 0x14], %l5
+ andcc %l5, 0x20, %g0
+ be 1f
+ nop
+
+ call C_LABEL(syscall_trace)
+ nop
+
+1:
+ /* We are returning to a signal handler. */
+ RESTORE_ALL
+
+ .align 4
+ .globl C_LABEL(sys_sigsuspend)
+C_LABEL(sys_sigsuspend):
+ call C_LABEL(do_sigsuspend)
+ add %sp, REGWIN_SZ, %o0
+
+ ld [%curptr + 0x14], %l5
+ andcc %l5, 0x20, %g0
+ be 1f
+ nop
+
+ call C_LABEL(syscall_trace)
+ nop
+
+1:
+ /* We are returning to a signal handler. */
+ RESTORE_ALL
+
+ .align 4
+ .globl C_LABEL(sys_sigreturn)
+C_LABEL(sys_sigreturn):
+ call C_LABEL(do_sigreturn)
+ add %sp, REGWIN_SZ, %o0
+
+ ld [%curptr + 0x14], %l5
+ andcc %l5, 0x20, %g0
+ be 1f
+ nop
+
+ call C_LABEL(syscall_trace)
+ nop
+
+1:
+ /* We don't want to muck with user registers like a
+ * normal syscall, just return.
+ */
+ RESTORE_ALL
+
+ /* Now that we have a real sys_clone, sys_fork() is
+ * implemented in terms of it. Our _real_ implementation
+ * of SunOS vfork() will use sys_clone() instead.
+ */
+ .align 4
+ .globl C_LABEL(sys_fork), C_LABEL(sys_vfork), flush_patch_two
+C_LABEL(sys_vfork):
+C_LABEL(sys_fork):
+ mov %o7, %l5
+flush_patch_two:
+ FLUSH_ALL_KERNEL_WINDOWS;
+ rd %psr, %g4
+ mov SIGCHLD, %o0 ! arg0: clone flags
+ rd %wim, %g5
+ mov %fp, %o1 ! arg1: usp
+ std %g4, [%curptr + THREAD_FORK_KPSR]
+ add %sp, REGWIN_SZ, %o2 ! arg2: pt_regs ptr
+ call C_LABEL(do_fork)
+ mov %l5, %o7
+
+ /* Whee, kernel threads! */
+ .globl C_LABEL(sys_clone), flush_patch_three
+C_LABEL(sys_clone):
+ mov %o7, %l5
+flush_patch_three:
+ FLUSH_ALL_KERNEL_WINDOWS;
+ rd %psr, %g4
+
+ /* arg0,1: flags,usp -- loaded already */
+ cmp %o1, 0x0 ! Is new_usp NULL?
+ rd %wim, %g5
be,a 1f
- andn %sp, 0xfff, %l7
- cmp %l7, -1
- bne fill_bad_stack
- andn %sp, 0xfff, %l7
-1: lda [%l7] ASI_PTE, %l7
- srl %l7, 0x1d, %l7
- andn %l7, 0x2, %l7
- cmp %l7, 0x4
- be,a fill_stack_ok
- restore %g0, 0x1, %l1
-
-fill_bad_stack:
- save %g0, %g0, %g0 ! save to where restore happened
- save %g0, 0x1, %l4 ! save is an add remember? to trap window
- sethi %hi( C_LABEL(current) ), %l6
- ld [%l6 + %lo( C_LABEL(current) )], %l6
- st %l4, [%l6 + THREAD_UWINDOWS] ! update current->tss values
- ld [%l6 + THREAD_WIM], %l5
- sll %l4, %l5, %l4
- wr %l4, 0x0, %wim
- ld [%l6 + THREAD_KSP], %sp ! set to kernel stack pointer
- wr %l0, 0x20, %psr ! turn off traps
- std %l0, [%sp + C_STACK] ! set up thread_frame on stack
- rd %y, %l3
- std %l2, [%sp + C_STACK + 0x8]
- or %g0, 0x6, %o0 ! so _sparc_trap knows what to do
- st %g1, [%sp + C_STACK + 0x14] ! no need to save %g0, always zero
- or %g0, %l0, %o1
- std %g2, [%sp + C_STACK + 0x18]
- or %g0, %l1, %o2
- std %g4, [%sp + C_STACK + 0x20]
- add %sp, C_STACK, %o3
- std %g6, [%sp + C_STACK + 0x28]
- std %i0, [%sp + C_STACK + 0x30]
- std %i2, [%sp + C_STACK + 0x38]
- std %i4, [%sp + C_STACK + 0x40]
- call sparc_trap
- std %i6, [%sp + C_STACK + 0x48]
+ mov %fp, %o1 ! yes, use callers usp
+ andn %o1, 7, %o1 ! no, align to 8 bytes
+1:
+ std %g4, [%curptr + THREAD_FORK_KPSR]
+ add %sp, REGWIN_SZ, %o2 ! arg2: pt_regs ptr
+ call C_LABEL(do_fork)
+ mov %l5, %o7
+
+ /* Linux native and SunOS system calls enter here... */
+ .align 4
+ .globl linux_sparc_syscall
+linux_sparc_syscall:
+ /* Direct access to user regs, must faster. */
+ cmp %g1, NR_SYSCALLS
+ blu,a 1f
+ sll %g1, 2, %l4
+
+ sethi %hi(C_LABEL(sys_ni_syscall)), %l7
+ b syscall_is_too_hard
+ or %l7, %lo(C_LABEL(sys_ni_syscall)), %l7
+
+1:
+ ld [%l7 + %l4], %l7
+
+ .globl syscall_is_too_hard
+syscall_is_too_hard:
+ rd %wim, %l3
+ SAVE_ALL
+ ENTER_SYSCALL
+
+ wr %l0, PSR_ET, %psr
+ WRITE_PAUSE
+
+ ld [%curptr + 0x14], %l5
+ andcc %l5, 0x20, %g0
+ be,a 2f
+ mov %i0, %o0
+
+ call C_LABEL(syscall_trace)
+ nop
+
+ mov %i0, %o0
+2:
+ mov %i1, %o1
+ mov %i2, %o2
+ mov %i0, %l5
+ mov %i3, %o3
+ mov %i4, %o4
+ call %l7
+ mov %i5, %o5
+
+ st %o0, [%sp + REGWIN_SZ + PT_I0]
+
+ .globl C_LABEL(ret_sys_call)
+C_LABEL(ret_sys_call):
+ ld [%sp + REGWIN_SZ + PT_I0], %o0
+ set PSR_C, %g2
+ cmp %o0, -ENOIOCTLCMD
+ bgeu 1f
+ ld [%sp + REGWIN_SZ + PT_PSR], %g3
+
+ /* System call success, clear Carry condition code. */
+ andn %g3, %g2, %g3
+ clr %l6
+ b 2f
+ st %g3, [%sp + REGWIN_SZ + PT_PSR]
+
+1:
+ /* System call failure, set Carry condition code.
+ * Also, get abs(errno) to return to the process.
+ */
+ sub %g0, %o0, %o0
+ or %g3, %g2, %g3
+ st %o0, [%sp + REGWIN_SZ + PT_I0]
+ mov 1, %l6
+ st %g3, [%sp + REGWIN_SZ + PT_PSR]
+
+2:
+ ld [%curptr + 0x14], %g2
+ andcc %g2, 0x20, %g0
+ be,a 3f
+ ld [%sp + REGWIN_SZ + PT_NPC], %l1 /* pc = npc */
+
+ call C_LABEL(syscall_trace)
+ nop
+
+ /* Advance the pc and npc over the trap instruction. */
+ ld [%sp + REGWIN_SZ + PT_NPC], %l1 /* pc = npc */
+3:
+ add %l1, 0x4, %l2 /* npc = npc+4 */
+ st %l1, [%sp + REGWIN_SZ + PT_PC]
+ b ret_trap_entry
+ st %l2, [%sp + REGWIN_SZ + PT_NPC]
+
+ /*
+ * Solaris system calls and indirect system calls enter here.
+ *
+ * I have named the solaris indirect syscalls like that because
+ * it seems like Solaris has some fast path syscalls that can
+ * be handled as indirect system calls. - mig
+ */
- ldd [%sp + C_STACK], %l0
- ldd [%sp + C_STACK + 0x8], %l2
- wr %l3, 0, %y
- ld [%sp + C_STACK + 0x14], %g1
- ldd [%sp + C_STACK + 0x18], %g2
- ldd [%sp + C_STACK + 0x20], %g4
- ldd [%sp + C_STACK + 0x28], %g6
- ldd [%sp + C_STACK + 0x30], %i0
- ldd [%sp + C_STACK + 0x38], %i2
- ldd [%sp + C_STACK + 0x40], %i4
- wr %l0, 0, %psr ! disable traps again
- ldd [%sp + C_STACK + 0x48], %i6
- sethi %hi( C_LABEL(current) ), %l6
- ld [%l6 + %lo( C_LABEL(current) )], %l6
- ld [%l6 + THREAD_W_SAVED], %l7
- cmp %l7, 0x0
- bl,a 1f
- wr %g0, 0x0, %wim
- b,a leave_trap
-
-1: or %g0, %g6, %l3
- or %g0, %l6, %g6
- st %g0, [%g6 + THREAD_W_SAVED]
- restore %g0, %g0, %g0
- restore %g0, %g0, %g0
- restore %g0, 0x1, %l1
- rd %psr, %l0
- sll %l1, %l0, %l1
- wr %l1, 0x0, %wim
- and %l0, 0x1f, %l0
- st %l0, [%g6 + THREAD_WIM]
- nop
- save %g0, %g0, %g0
- ldd [%sp], %l0 ! load number one
- ldd [%sp + 0x8], %l2
- ldd [%sp + 0x10], %l4
- ldd [%sp + 0x18], %l6
- ldd [%sp + 0x20], %i0
- ldd [%sp + 0x28], %i2
- ldd [%sp + 0x30], %i4
- ldd [%sp + 0x38], %i6
- save %g0, %g0, %g0
- ldd [%sp], %l0 ! load number two
- ldd [%sp + 0x8], %l2
- ldd [%sp + 0x10], %l4
- ldd [%sp + 0x18], %l6
- ldd [%sp + 0x20], %i0
- ldd [%sp + 0x28], %i2
- ldd [%sp + 0x30], %i4
- ldd [%sp + 0x38], %i6
- save %g0, %g0, %g0 ! re-enter trap window
- wr %l0, 0x0, %psr ! restore condition codes
- or %g0, %l3, %g6 ! restore scratch register
- jmp %l1
- rett %l2
+ .align 4
+ .globl solaris_indirect_syscall
+solaris_indirect_syscall:
+ /* sethi done on the macro */
+ /* or %l7, %lo(C_LABEL(sys_call_table)), %l7; -- really needed? */
+
+ .align 4
+ .globl solaris_syscall
+solaris_syscall:
+ /* Direct access to user regs, must faster. */
+ cmp %g1, NR_SYSCALLS
+ blu,a 1f
+#ifdef OLD_SOLARIS
+ sll %g1, 2, %l4
+#else
+ nop
+#endif
+ sethi %hi(C_LABEL(sys_ni_syscall)), %l7
+ b solaris_is_too_hard
+ or %l7, %lo(C_LABEL(sys_ni_syscall)), %l7
+1:
+#ifdef OLD_SOLARIS
+ ld [%l7 + %l4], %l7
+#endif
+ .globl solaris_is_too_hard
+solaris_is_too_hard:
+ rd %wim, %l3
+ SAVE_ALL
+ ENTER_SYSCALL
-fill_stack_ok:
- rd %psr, %l0
- sll %l1, %l0, %l1
- wr %l1, 0x0, %wim
- sethi %hi( C_LABEL(current) ), %l2
- ld [%l2 + %lo( C_LABEL(current) )], %l2
- and %l0, 0x1f, %l0
- st %l0, [%l2 + THREAD_WIM]
- save %g0, %g0, %g0
- ldd [%sp], %l0 ! only one load necessary
- ldd [%sp + 0x8], %l2
- ldd [%sp + 0x10], %l4
- ldd [%sp + 0x18], %l6
- ldd [%sp + 0x20], %i0
- ldd [%sp + 0x28], %i2
- ldd [%sp + 0x30], %i4
- ldd [%sp + 0x38], %i6
- save %g0, %g0, %g0
- save %g0, %g0, %g0 ! save into trap window
- wr %l0, 0x0, %psr ! local number 0 here has cond codes
- nop
- jmp %l1
- rett %l2
+ wr %l0, PSR_ET, %psr
+ WRITE_PAUSE
- .align 4
- .globl trap_entry
-trap_entry:
- TRAP_WIN_CLEAN
- jmp %l1
- rett %l2
+2:
+ mov %i0, %o0
+ mov %i1, %o1
+ mov %i2, %o2
+ mov %i0, %l5
+ mov %i3, %o3
+ mov %i4, %o4
+#ifdef OLD_SOLARIS
+ call %l7
+ mov %i5, %o5
+#else
+ mov %i5, %o5
+ call C_LABEL(do_solaris_syscall)
+ add %sp, REGWIN_SZ, %o0
+#endif
- .align 4
- .globl linux_trap_nmi
-linux_trap_nmi:
- TRAP_WIN_CLEAN
- jmp %l1
- rett %l2
+ st %o0, [%sp + REGWIN_SZ + PT_I0]
+ set PSR_C, %g2
+ cmp %o0, -ENOIOCTLCMD
+ bgeu 1f
+ ld [%sp + REGWIN_SZ + PT_PSR], %g3
- .align 4
- .globl sparc_trap
-sparc_trap:
- TRAP_WIN_CLEAN
- jmp %l1
- rett %l2
+ /* System call success, clear Carry condition code. */
+ andn %g3, %g2, %g3
+ clr %l6
+ b 2f
+ st %g3, [%sp + REGWIN_SZ + PT_PSR]
- .align 4
- .globl leave_trap
-leave_trap:
- jmp %l1
- rett %l2
+1:
+ /* System call failure, set Carry condition code.
+ * Also, get abs(errno) to return to the process.
+ */
+ sub %g0, %o0, %o0
+ sethi %hi(C_LABEL(solaris_xlatb_rorl)), %o3
+ or %o3, %lo(C_LABEL(solaris_xlatb_rorl)), %o3
+ sll %o0, 2, %o0
+ ld [%o3 + %o0], %o0
+ mov 1, %l6
+ st %o0, [%sp + REGWIN_SZ + PT_I0]
+ or %g3, %g2, %g3
+ st %g3, [%sp + REGWIN_SZ + PT_PSR]
+
+ /* Advance the pc and npc over the trap instruction. */
+2:
+ ld [%sp + REGWIN_SZ + PT_NPC], %l1 /* pc = npc */
+ add %l1, 0x4, %l2 /* npc = npc+4 */
+ st %l1, [%sp + REGWIN_SZ + PT_PC]
+ b ret_trap_entry
+ st %l2, [%sp + REGWIN_SZ + PT_NPC]
+
+ /* {net, open}bsd system calls enter here... */
+ .align 4
+ .globl bsd_syscall
+bsd_syscall:
+ /* Direct access to user regs, must faster. */
+ cmp %g1, NR_SYSCALLS
+ blu,a 1f
+ sll %g1, 2, %l4
-/* The following two things point to window management tables. The first
- one is used to quickly look up how many user windows there are from
- trap-land. The second is used in a trap handler to determine if a rett
- instruction will land us smack inside the invalid window that possibly
- the trap was called to fix-up.
-*/
+ set C_LABEL(sys_ni_syscall), %l7
+ b bsd_is_too_hard
+ nop
-/* For now these are static tables geared for a 7 window sparc. */
+1:
+ ld [%l7 + %l4], %l7
- .data
- .align 4
-lnx_winmask: .byte 2, 4, 8, 16, 32, 64, 128, 1 ! lnx_winmask[0..7]
+ .globl bsd_is_too_hard
+bsd_is_too_hard:
+ rd %wim, %l3
+ SAVE_ALL
+ ENTER_SYSCALL
+
+ wr %l0, PSR_ET, %psr
+ WRITE_PAUSE
+
+2:
+ mov %i0, %o0
+ mov %i1, %o1
+ mov %i2, %o2
+ mov %i0, %l5
+ mov %i3, %o3
+ mov %i4, %o4
+ call %l7
+ mov %i5, %o5
+
+ st %o0, [%sp + REGWIN_SZ + PT_I0]
+ set PSR_C, %g2
+ cmp %o0, -ENOIOCTLCMD
+ bgeu 1f
+ ld [%sp + REGWIN_SZ + PT_PSR], %g3
+
+ /* System call success, clear Carry condition code. */
+ andn %g3, %g2, %g3
+ clr %l6
+ b 2f
+ st %g3, [%sp + REGWIN_SZ + PT_PSR]
+1:
+ /* System call failure, set Carry condition code.
+ * Also, get abs(errno) to return to the process.
+ */
+ sub %g0, %o0, %o0
+#if 0 /* XXX todo XXX */
+ sethi %hi(C_LABEL(bsd_xlatb_rorl), %o3
+ or %o3, %lo(C_LABEL(bsd_xlatb_rorl)), %o3
+ sll %o0, 2, %o0
+ ld [%o3 + %o0], %o0
+#endif
+ mov 1, %l6
+ st %o0, [%sp + REGWIN_SZ + PT_I0]
+ or %g3, %g2, %g3
+ st %g3, [%sp + REGWIN_SZ + PT_PSR]
+
+ /* Advance the pc and npc over the trap instruction. */
+2:
+ ld [%sp + REGWIN_SZ + PT_NPC], %l1 /* pc = npc */
+ add %l1, 0x4, %l2 /* npc = npc+4 */
+ st %l1, [%sp + REGWIN_SZ + PT_PC]
+ b ret_trap_entry
+ st %l2, [%sp + REGWIN_SZ + PT_NPC]
+
+/* Saving and restoring the FPU state is best done from lowlevel code.
+ *
+ * void fpsave(unsigned long *fpregs, unsigned long *fsr,
+ * void *fpqueue, unsigned long *fpqdepth)
+ */
+
+ .globl C_LABEL(fpsave)
+C_LABEL(fpsave):
+ st %fsr, [%o1] ! this can trap on us if fpu is in bogon state
+ ld [%o1], %g1
+ set 0x2000, %g4
+ andcc %g1, %g4, %g0
+ be 2f
+ mov 0, %g2
+
+ /* We have an fpqueue to save. */
+1:
+ std %fq, [%o2]
+fpsave_magic:
+ st %fsr, [%o1]
+ ld [%o1], %g3
+ andcc %g3, %g4, %g0
+ add %g2, 1, %g2
+ bne 1b
+ add %o2, 8, %o2
+
+2:
+ st %g2, [%o3]
+
+ std %f0, [%o0 + 0x00]
+ std %f2, [%o0 + 0x08]
+ std %f4, [%o0 + 0x10]
+ std %f6, [%o0 + 0x18]
+ std %f8, [%o0 + 0x20]
+ std %f10, [%o0 + 0x28]
+ std %f12, [%o0 + 0x30]
+ std %f14, [%o0 + 0x38]
+ std %f16, [%o0 + 0x40]
+ std %f18, [%o0 + 0x48]
+ std %f20, [%o0 + 0x50]
+ std %f22, [%o0 + 0x58]
+ std %f24, [%o0 + 0x60]
+ std %f26, [%o0 + 0x68]
+ std %f28, [%o0 + 0x70]
+ retl
+ std %f30, [%o0 + 0x78]
+
+ /* Thanks for Theo Deraadt and the authors of the Sprite/netbsd/openbsd
+ * code for pointing out this possible deadlock, while we save state
+ * above we could trap on the fsr store so our low level fpu trap
+ * code has to know how to deal with this.
+ */
+fpsave_catch:
+ b fpsave_magic + 4
+ st %fsr, [%o1]
+
+fpsave_catch2:
+ b C_LABEL(fpsave) + 4
+ st %fsr, [%o1]
+
+ /* void fpload(unsigned long *fpregs, unsigned long *fsr); */
+
+ .globl C_LABEL(fpload)
+C_LABEL(fpload):
+ ldd [%o0 + 0x00], %f0
+ ldd [%o0 + 0x08], %f2
+ ldd [%o0 + 0x10], %f4
+ ldd [%o0 + 0x18], %f6
+ ldd [%o0 + 0x20], %f8
+ ldd [%o0 + 0x28], %f10
+ ldd [%o0 + 0x30], %f12
+ ldd [%o0 + 0x38], %f14
+ ldd [%o0 + 0x40], %f16
+ ldd [%o0 + 0x48], %f18
+ ldd [%o0 + 0x50], %f20
+ ldd [%o0 + 0x58], %f22
+ ldd [%o0 + 0x60], %f24
+ ldd [%o0 + 0x68], %f26
+ ldd [%o0 + 0x70], %f28
+ ldd [%o0 + 0x78], %f30
+ ld [%o1], %fsr
+ retl
+ nop
+
+ .globl C_LABEL(udelay)
+C_LABEL(udelay):
+ save %sp, -REGWIN_SZ, %sp
+ mov %i0, %o0
+ sethi %hi(0x10c6), %o1
+ call .umul
+ or %o1, %lo(0x10c6), %o1
+#ifndef __SMP__
+ sethi %hi(C_LABEL(loops_per_sec)), %o3
+ call .umul
+ ld [%o3 + %lo(C_LABEL(loops_per_sec))], %o1
+#else
+ GET_PROCESSOR_OFFSET(o4)
+ set C_LABEL(cpu_data), %o3
+ call .umul
+ ld [%o3 + %o4], %o1
+#endif
+
+ cmp %o1, 0x0
+1:
+ bne 1b
+ subcc %o1, 1, %o1
- .align 4
- .globl C_LABEL(sys_call_table)
-C_LABEL(sys_call_table):
- .long C_LABEL(sys_setup) /* 0 */
- .long C_LABEL(sys_exit)
- .long C_LABEL(sys_fork)
- .long C_LABEL(sys_read)
- .long C_LABEL(sys_write)
- .long C_LABEL(sys_open) /* 5 */
- .long C_LABEL(sys_close)
- .long C_LABEL(sys_waitpid)
- .long C_LABEL(sys_creat)
- .long C_LABEL(sys_link)
- .long C_LABEL(sys_unlink) /* 10 */
- .long C_LABEL(sys_execve)
- .long C_LABEL(sys_chdir)
- .long C_LABEL(sys_time)
- .long C_LABEL(sys_mknod)
- .long C_LABEL(sys_chmod) /* 15 */
- .long C_LABEL(sys_chown)
- .long C_LABEL(sys_break)
- .long C_LABEL(sys_stat)
- .long C_LABEL(sys_lseek)
- .long C_LABEL(sys_getpid) /* 20 */
- .long C_LABEL(sys_mount)
- .long C_LABEL(sys_umount)
- .long C_LABEL(sys_setuid)
- .long C_LABEL(sys_getuid)
- .long C_LABEL(sys_stime) /* 25 */
- .long C_LABEL(sys_ni_syscall) /* this will be sys_ptrace() */
- .long C_LABEL(sys_alarm)
- .long C_LABEL(sys_fstat)
- .long C_LABEL(sys_pause)
- .long C_LABEL(sys_utime) /* 30 */
- .long C_LABEL(sys_stty)
- .long C_LABEL(sys_gtty)
- .long C_LABEL(sys_access)
- .long C_LABEL(sys_nice)
- .long C_LABEL(sys_ftime) /* 35 */
- .long C_LABEL(sys_sync)
- .long C_LABEL(sys_kill)
- .long C_LABEL(sys_rename)
- .long C_LABEL(sys_mkdir)
- .long C_LABEL(sys_rmdir) /* 40 */
- .long C_LABEL(sys_dup)
- .long C_LABEL(sys_pipe)
- .long C_LABEL(sys_times)
- .long C_LABEL(sys_prof)
- .long C_LABEL(sys_brk) /* 45 */
- .long C_LABEL(sys_setgid)
- .long C_LABEL(sys_getgid)
- .long C_LABEL(sys_signal)
- .long C_LABEL(sys_geteuid)
- .long C_LABEL(sys_getegid) /* 50 */
- .long C_LABEL(sys_acct)
- .long C_LABEL(sys_phys)
- .long C_LABEL(sys_lock)
- .long C_LABEL(sys_ioctl)
- .long C_LABEL(sys_fcntl) /* 55 */
- .long C_LABEL(sys_mpx)
- .long C_LABEL(sys_setpgid)
- .long C_LABEL(sys_ulimit)
- .long C_LABEL(sys_olduname)
- .long C_LABEL(sys_umask) /* 60 */
- .long C_LABEL(sys_chroot)
- .long C_LABEL(sys_ustat)
- .long C_LABEL(sys_dup2)
- .long C_LABEL(sys_getppid)
- .long C_LABEL(sys_getpgrp) /* 65 */
- .long C_LABEL(sys_setsid)
- .long C_LABEL(sys_sigaction)
- .long C_LABEL(sys_sgetmask)
- .long C_LABEL(sys_ssetmask)
- .long C_LABEL(sys_setreuid) /* 70 */
- .long C_LABEL(sys_setregid)
- .long C_LABEL(sys_sigsuspend)
- .long C_LABEL(sys_sigpending)
- .long C_LABEL(sys_sethostname)
- .long C_LABEL(sys_setrlimit) /* 75 */
- .long C_LABEL(sys_getrlimit)
- .long C_LABEL(sys_getrusage)
- .long C_LABEL(sys_gettimeofday)
- .long C_LABEL(sys_settimeofday)
- .long C_LABEL(sys_getgroups) /* 80 */
- .long C_LABEL(sys_setgroups)
- .long C_LABEL(sys_select)
- .long C_LABEL(sys_symlink)
- .long C_LABEL(sys_lstat)
- .long C_LABEL(sys_readlink) /* 85 */
- .long C_LABEL(sys_uselib)
- .long C_LABEL(sys_swapon)
- .long C_LABEL(sys_reboot)
- .long C_LABEL(sys_readdir)
- .long C_LABEL(sys_mmap) /* 90 */
- .long C_LABEL(sys_munmap)
- .long C_LABEL(sys_truncate)
- .long C_LABEL(sys_ftruncate)
- .long C_LABEL(sys_fchmod)
- .long C_LABEL(sys_fchown) /* 95 */
- .long C_LABEL(sys_getpriority)
- .long C_LABEL(sys_setpriority)
- .long C_LABEL(sys_profil)
- .long C_LABEL(sys_statfs)
- .long C_LABEL(sys_fstatfs) /* 100 */
- .long C_LABEL(sys_ni_syscall)
- .long C_LABEL(sys_socketcall)
- .long C_LABEL(sys_syslog)
- .long C_LABEL(sys_setitimer)
- .long C_LABEL(sys_getitimer) /* 105 */
- .long C_LABEL(sys_newstat)
- .long C_LABEL(sys_newlstat)
- .long C_LABEL(sys_newfstat)
- .long C_LABEL(sys_uname)
- .long C_LABEL(sys_ni_syscall) /* 110 */
- .long C_LABEL(sys_vhangup)
- .long C_LABEL(sys_idle)
- .long C_LABEL(sys_ni_syscall) /* was vm86, meaningless on Sparc */
- .long C_LABEL(sys_wait4)
- .long C_LABEL(sys_swapoff) /* 115 */
- .long C_LABEL(sys_sysinfo)
- .long C_LABEL(sys_ipc)
- .long C_LABEL(sys_fsync)
- .long C_LABEL(sys_sigreturn)
- .long C_LABEL(sys_ni_syscall) /* 120 */
- .long C_LABEL(sys_setdomainname)
- .long C_LABEL(sys_newuname)
- .long C_LABEL(sys_ni_syscall)
- .long C_LABEL(sys_adjtimex)
- .long C_LABEL(sys_mprotect) /* 125 */
- .long C_LABEL(sys_sigprocmask)
- .long C_LABEL(sys_create_module)
- .long C_LABEL(sys_init_module)
- .long C_LABEL(sys_delete_module)
- .long C_LABEL(sys_get_kernel_syms) /* 130 */
- .long C_LABEL(sys_ni_syscall)
- .long C_LABEL(sys_getpgid)
- .long C_LABEL(sys_fchdir)
- .long C_LABEL(sys_bdflush)
- .long C_LABEL(sys_sysfs) /* 135 */
- .long C_LABEL(sys_personality)
- .long 0 /* for afs_syscall */
- .long C_LABEL(sys_setfsuid)
- .long C_LABEL(sys_setfsgid)
- .long C_LABEL(sys_llseek) /* 140 */
+ ret
+ restore
+
+ /* Handle a software breakpoint */
+ /* We have to inform parent that child has stopped */
.align 4
+ .globl breakpoint_trap
+breakpoint_trap:
+ rd %wim,%l3
+ SAVE_ALL
+ ENTER_SYSCALL
+ wr %l0, PSR_ET, %psr
+ WRITE_PAUSE
+
+ st %i0, [%sp + REGWIN_SZ + PT_G0] ! for restarting syscalls
+ call C_LABEL(sparc_breakpoint)
+ add %sp, REGWIN_SZ, %o0
+
+ RESTORE_ALL
+
+ .align 4
+ .globl C_LABEL(__handle_exception), flush_patch_exception
+C_LABEL(__handle_exception):
+flush_patch_exception:
+ FLUSH_ALL_KERNEL_WINDOWS;
+ ldd [%o0], %o6
+ jmpl %o7 + 0xc, %g0 ! see asm-sparc/processor.h
+ mov 1, %g1 ! signal EFAULT condition
+
+/* End of entry.S */
diff --git a/arch/sparc/kernel/errtbls.c b/arch/sparc/kernel/errtbls.c
new file mode 100644
index 000000000..bb36f6ead
--- /dev/null
+++ b/arch/sparc/kernel/errtbls.c
@@ -0,0 +1,276 @@
+/* $Id: errtbls.c,v 1.2 1995/11/25 00:57:55 davem Exp $
+ * errtbls.c: Error number conversion tables between various syscall
+ * OS semantics.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ *
+ * Based upon preliminary work which is:
+ *
+ * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
+ */
+
+#include <asm/bsderrno.h> /* NetBSD (bsd4.4) errnos */
+#include <asm/solerrno.h> /* Solaris errnos */
+
+/* Here are tables which convert between Linux/SunOS error number
+ * values to the equivalent in other OSs. Note that since the Linux
+ * ones have been set up to match exactly those of SunOS, no
+ * translation table is needed for that OS.
+ */
+
+int solaris_errno[] = {
+ 0,
+ SOL_EPERM,
+ SOL_ENOENT,
+ SOL_ESRCH,
+ SOL_EINTR,
+ SOL_EIO,
+ SOL_ENXIO,
+ SOL_E2BIG,
+ SOL_ENOEXEC,
+ SOL_EBADF,
+ SOL_ECHILD,
+ SOL_EAGAIN,
+ SOL_ENOMEM,
+ SOL_EACCES,
+ SOL_EFAULT,
+ SOL_NOTBLK,
+ SOL_EBUSY,
+ SOL_EEXIST,
+ SOL_EXDEV,
+ SOL_ENODEV,
+ SOL_ENOTDIR,
+ SOL_EISDIR,
+ SOL_EINVAL,
+ SOL_ENFILE,
+ SOL_EMFILE,
+ SOL_ENOTTY,
+ SOL_ETXTBSY,
+ SOL_EFBIG,
+ SOL_ENOSPC,
+ SOL_ESPIPE,
+ SOL_EROFS,
+ SOL_EMLINK,
+ SOL_EPIPE,
+ SOL_EDOM,
+ SOL_ERANGE,
+ SOL_EWOULDBLOCK,
+ SOL_EINPROGRESS,
+ SOL_EALREADY,
+ SOL_ENOTSOCK,
+ SOL_EDESTADDRREQ,
+ SOL_EMSGSIZE,
+ SOL_EPROTOTYPE,
+ SOL_ENOPROTOOPT,
+ SOL_EPROTONOSUPPORT,
+ SOL_ESOCKTNOSUPPORT,
+ SOL_EOPNOTSUPP,
+ SOL_EPFNOSUPPORT,
+ SOL_EAFNOSUPPORT,
+ SOL_EADDRINUSE,
+ SOL_EADDRNOTAVAIL,
+ SOL_ENETDOWN,
+ SOL_ENETUNREACH,
+ SOL_ENETRESET,
+ SOL_ECONNABORTED,
+ SOL_ECONNRESET,
+ SOL_ENOBUFS,
+ SOL_EISCONN,
+ SOL_ENOTONN,
+ SOL_ESHUTDOWN,
+ SOL_ETOOMANYREFS,
+ SOL_ETIMEDOUT,
+ SOL_ECONNREFUSED,
+ SOL_ELOOP,
+ SOL_ENAMETOOLONG,
+ SOL_EHOSTDOWN,
+ SOL_EHOSTUNREACH,
+ SOL_ENOTEMPTY,
+ SOL_EPROCLIM,
+ SOL_EUSERS,
+ SOL_EDQUOT,
+ SOL_ESTALE,
+ SOL_EREMOTE,
+ SOL_ENOSTR,
+ SOL_ETIME,
+ SOL_ENOSR,
+ SOL_ENOMSG,
+ SOL_EBADMSG,
+ SOL_IDRM,
+ SOL_EDEADLK,
+ SOL_ENOLCK,
+ SOL_ENONET,
+ SOL_ERREMOTE,
+ SOL_ENOLINK,
+ SOL_EADV,
+ SOL_ESRMNT,
+ SOL_ECOMM,
+ SOL_EPROTO,
+ SOL_EMULTIHOP,
+ SOL_EINVAL, /* EDOTDOT XXX??? */
+ SOL_REMCHG,
+ SOL_NOSYS,
+ SOL_STRPIPE,
+ SOL_EOVERFLOW,
+ SOL_EBADFD,
+ SOL_ECHRNG,
+ SOL_EL2NSYNC,
+ SOL_EL3HLT,
+ SOL_EL3RST,
+ SOL_NRNG,
+ SOL_EUNATCH,
+ SOL_ENOCSI,
+ SOL_EL2HLT,
+ SOL_EBADE,
+ SOL_EBADR,
+ SOL_EXFULL,
+ SOL_ENOANO,
+ SOL_EBADRQC,
+ SOL_EBADSLT,
+ SOL_EDEADLOCK,
+ SOL_EBFONT,
+ SOL_ELIBEXEC,
+ SOL_ENODATA,
+ SOL_ELIBBAD,
+ SOL_ENOPKG,
+ SOL_ELIBACC,
+ SOL_ENOTUNIQ,
+ SOL_ERESTART,
+ SOL_EUCLEAN,
+ SOL_ENOTNAM,
+ SOL_ENAVAIL,
+ SOL_EISNAM,
+ SOL_EREMOTEIO,
+ SOL_EILSEQ,
+ SOL_ELIBMAX,
+ SOL_ELIBSCN,
+};
+
+int netbsd_errno[] = {
+ 0,
+ BSD_EPERM,
+ BSD_ENOENT,
+ BSD_ESRCH,
+ BSD_EINTR,
+ BSD_EIO,
+ BSD_ENXIO,
+ BSD_E2BIG,
+ BSD_ENOEXEC,
+ BSD_EBADF,
+ BSD_ECHILD,
+ BSD_EAGAIN,
+ BSD_ENOMEM,
+ BSD_EACCES,
+ BSD_EFAULT,
+ BSD_NOTBLK,
+ BSD_EBUSY,
+ BSD_EEXIST,
+ BSD_EXDEV,
+ BSD_ENODEV,
+ BSD_ENOTDIR,
+ BSD_EISDIR,
+ BSD_EINVAL,
+ BSD_ENFILE,
+ BSD_EMFILE,
+ BSD_ENOTTY,
+ BSD_ETXTBSY,
+ BSD_EFBIG,
+ BSD_ENOSPC,
+ BSD_ESPIPE,
+ BSD_EROFS,
+ BSD_EMLINK,
+ BSD_EPIPE,
+ BSD_EDOM,
+ BSD_ERANGE,
+ BSD_EWOULDBLOCK,
+ BSD_EINPROGRESS,
+ BSD_EALREADY,
+ BSD_ENOTSOCK,
+ BSD_EDESTADDRREQ,
+ BSD_EMSGSIZE,
+ BSD_EPROTOTYPE,
+ BSD_ENOPROTOOPT,
+ BSD_EPROTONOSUPPORT,
+ BSD_ESOCKTNOSUPPORT,
+ BSD_EOPNOTSUPP,
+ BSD_EPFNOSUPPORT,
+ BSD_EAFNOSUPPORT,
+ BSD_EADDRINUSE,
+ BSD_EADDRNOTAVAIL,
+ BSD_ENETDOWN,
+ BSD_ENETUNREACH,
+ BSD_ENETRESET,
+ BSD_ECONNABORTED,
+ BSD_ECONNRESET,
+ BSD_ENOBUFS,
+ BSD_EISCONN,
+ BSD_ENOTONN,
+ BSD_ESHUTDOWN,
+ BSD_ETOOMANYREFS,
+ BSD_ETIMEDOUT,
+ BSD_ECONNREFUSED,
+ BSD_ELOOP,
+ BSD_ENAMETOOLONG,
+ BSD_EHOSTDOWN,
+ BSD_EHOSTUNREACH,
+ BSD_ENOTEMPTY,
+ BSD_EPROCLIM,
+ BSD_EUSERS,
+ BSD_EDQUOT,
+ BSD_ESTALE,
+ BSD_EREMOTE,
+ BSD_ENOSTR,
+ BSD_ETIME,
+ BSD_ENOSR,
+ BSD_ENOMSG,
+ BSD_EBADMSG,
+ BSD_IDRM,
+ BSD_EDEADLK,
+ BSD_ENOLCK,
+ BSD_ENONET,
+ BSD_ERREMOTE,
+ BSD_ENOLINK,
+ BSD_EADV,
+ BSD_ESRMNT,
+ BSD_ECOMM,
+ BSD_EPROTO,
+ BSD_EMULTIHOP,
+ BSD_EINVAL, /* EDOTDOT XXX??? */
+ BSD_REMCHG,
+ BSD_NOSYS,
+ BSD_STRPIPE,
+ BSD_EOVERFLOW,
+ BSD_EBADFD,
+ BSD_ECHRNG,
+ BSD_EL2NSYNC,
+ BSD_EL3HLT,
+ BSD_EL3RST,
+ BSD_NRNG,
+ BSD_EUNATCH,
+ BSD_ENOCSI,
+ BSD_EL2HLT,
+ BSD_EBADE,
+ BSD_EBADR,
+ BSD_EXFULL,
+ BSD_ENOANO,
+ BSD_EBADRQC,
+ BSD_EBADSLT,
+ BSD_EDEADLOCK,
+ BSD_EBFONT,
+ BSD_ELIBEXEC,
+ BSD_ENODATA,
+ BSD_ELIBBAD,
+ BSD_ENOPKG,
+ BSD_ELIBACC,
+ BSD_ENOTUNIQ,
+ BSD_ERESTART,
+ BSD_EUCLEAN,
+ BSD_ENOTNAM,
+ BSD_ENAVAIL,
+ BSD_EISNAM,
+ BSD_EREMOTEIO,
+ BSD_EILSEQ,
+ BSD_ELIBMAX,
+ BSD_ELIBSCN,
+};
+
diff --git a/arch/sparc/kernel/etrap.S b/arch/sparc/kernel/etrap.S
new file mode 100644
index 000000000..9a628e7f4
--- /dev/null
+++ b/arch/sparc/kernel/etrap.S
@@ -0,0 +1,319 @@
+/* $Id: etrap.S,v 1.21 1996/10/11 00:59:40 davem Exp $
+ * etrap.S: Sparc trap window preparation for entry into the
+ * Linux kernel.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/cprefix.h>
+#include <asm/head.h>
+#include <asm/asi.h>
+#include <asm/contregs.h>
+#include <asm/page.h>
+#include <asm/psr.h>
+#include <asm/ptrace.h>
+#include <asm/winmacro.h>
+
+/* Registers to not touch at all. */
+#define t_psr l0 /* Set by caller */
+#define t_pc l1 /* Set by caller */
+#define t_npc l2 /* Set by caller */
+#define t_wim l3 /* Set by caller */
+#define t_twinmask l4 /* Set at beginning of this entry routine. */
+#define t_kstack l5 /* Set right before pt_regs frame is built */
+#define t_retpc l6 /* If you change this, change winmacro.h header file */
+#define t_systable l7 /* Never touch this, could be the syscall table ptr. */
+#define curptr g6 /* Set after pt_regs frame is built */
+
+ .text
+ .align 4
+
+ /* SEVEN WINDOW PATCH INSTRUCTIONS */
+ .globl tsetup_7win_patch1, tsetup_7win_patch2
+ .globl tsetup_7win_patch3, tsetup_7win_patch4
+ .globl tsetup_7win_patch5, tsetup_7win_patch6
+tsetup_7win_patch1: sll %t_wim, 0x6, %t_wim
+tsetup_7win_patch2: and %g2, 0x7f, %g2
+tsetup_7win_patch3: and %g2, 0x7f, %g2
+tsetup_7win_patch4: and %g1, 0x7f, %g1
+tsetup_7win_patch5: sll %t_wim, 0x6, %t_wim
+tsetup_7win_patch6: and %g2, 0x7f, %g2
+ /* END OF PATCH INSTRUCTIONS */
+
+ /* At trap time, interrupts and all generic traps do the
+ * following:
+ *
+ * rd %psr, %l0
+ * b some_handler
+ * rd %wim, %l3
+ * nop
+ *
+ * Then 'some_handler' if it needs a trap frame (ie. it has
+ * to call c-code and the trap cannot be handled in-window)
+ * then it does the SAVE_ALL macro in entry.S which does
+ *
+ * sethi %hi(trap_setup), %l4
+ * jmpl %l4 + %lo(trap_setup), %l6
+ * mov 1, %l4
+ */
+
+ /* 2 3 4 window number
+ * -----
+ * O T S mnemonic
+ *
+ * O == Current window before trap
+ * T == Window entered when trap occurred
+ * S == Window we will need to save if (1<<T) == %wim
+ *
+ * Before execution gets here, it must be guaranteed that
+ * %l0 contains trap time %psr, %l1 and %l2 contain the
+ * trap pc and npc, and %l3 contains the trap time %wim.
+ */
+
+ .globl trap_setup, tsetup_patch1, tsetup_patch2
+ .globl tsetup_patch3, tsetup_patch4
+ .globl tsetup_patch5, tsetup_patch6
+trap_setup:
+ /* Calculate mask of trap window. See if from user
+ * or kernel and branch conditionally.
+ */
+ mov 1, %t_twinmask
+ andcc %t_psr, PSR_PS, %g0 ! fromsupv_p = (psr & PSR_PS)
+ be trap_setup_from_user ! nope, from user mode
+ sll %t_twinmask, %t_psr, %t_twinmask ! t_twinmask = (1 << psr)
+
+ /* From kernel, allocate more kernel stack and
+ * build a pt_regs trap frame.
+ */
+ sub %fp, (REGWIN_SZ + TRACEREG_SZ), %t_kstack
+ STORE_PT_ALL(t_kstack, t_psr, t_pc, t_npc, g2)
+
+ /* See if we are in the trap window. */
+ andcc %t_twinmask, %t_wim, %g0
+ be 1f
+ nop
+
+ b,a trap_setup_kernel_spill ! in trap window, clean up
+
+ /* Trap from kernel with a window available.
+ * Just do it...
+ */
+1:
+ jmpl %t_retpc + 0x8, %g0 ! return to caller
+ mov %t_kstack, %sp ! jump onto new stack
+
+trap_setup_kernel_spill:
+ ld [%curptr + THREAD_UMASK], %g1
+ orcc %g0, %g1, %g0
+ bne trap_setup_user_spill ! there are some user windows, yuck
+ nop
+
+ /* Spill from kernel, but only kernel windows, adjust
+ * %wim and go.
+ */
+ srl %t_wim, 0x1, %g2 ! begin computation of new %wim
+tsetup_patch1: sll %t_wim, 0x7, %t_wim ! patched on 7 window Sparcs
+ or %t_wim, %g2, %g2
+tsetup_patch2: and %g2, 0xff, %g2 ! patched on 7 window Sparcs
+
+ save %g0, %g0, %g0
+
+ /* Set new %wim value */
+ wr %g2, 0x0, %wim
+
+ /* Save the kernel window onto the corresponding stack. */
+ STORE_WINDOW(sp)
+
+ restore %g0, %g0, %g0
+
+ jmpl %t_retpc + 0x8, %g0 ! return to caller
+ mov %t_kstack, %sp ! and onto new kernel stack
+
+trap_setup_from_user:
+ /* We can't use %curptr yet. */
+ LOAD_CURRENT(t_kstack, t_twinmask)
+ mov 1, %t_twinmask
+ ld [%t_kstack + TASK_SAVED_KSTACK], %t_kstack
+ sll %t_twinmask, %t_psr, %t_twinmask ! t_twinmask = (1 << psr)
+
+ /* Build pt_regs frame. */
+ STORE_PT_ALL(t_kstack, t_psr, t_pc, t_npc, g2)
+
+ /* Clear current->tss.w_saved */
+ LOAD_CURRENT(curptr, g1)
+ st %g0, [%curptr + THREAD_W_SAVED]
+
+ /* See if we are in the trap window. */
+ andcc %t_twinmask, %t_wim, %g0
+ bne trap_setup_user_spill ! yep we are
+ orn %g0, %t_twinmask, %g1 ! negate trap win mask into %g1
+
+ /* Trap from user, but not into the invalid window.
+ * Calculate new umask. The way this works is,
+ * any window from the %wim at trap time until
+ * the window right before the one we are in now,
+ * is a user window. A diagram:
+ *
+ * 7 6 5 4 3 2 1 0 window number
+ * ---------------
+ * I L T mnemonic
+ *
+ * Window 'I' is the invalid window in our example,
+ * window 'L' is the window the user was in when
+ * the trap occurred, window T is the trap window
+ * we are in now. So therefore, windows 5, 4 and
+ * 3 are user windows. The following sequence
+ * computes the user winmask to represent this.
+ */
+ subcc %t_wim, %t_twinmask, %g2
+ bneg,a 1f
+ sub %g2, 0x1, %g2
+1:
+ andn %g2, %t_twinmask, %g2
+tsetup_patch3: and %g2, 0xff, %g2 ! patched on 7win Sparcs
+ st %g2, [%curptr + THREAD_UMASK] ! store new umask
+
+ jmpl %t_retpc + 0x8, %g0 ! return to caller
+ mov %t_kstack, %sp ! and onto kernel stack
+
+trap_setup_user_spill:
+ /* A spill occurred from either kernel or user mode
+ * and there exist some user windows to deal with.
+ * A mask of the currently valid user windows
+ * is in %g1 upon entry to here.
+ */
+
+tsetup_patch4: and %g1, 0xff, %g1 ! patched on 7win Sparcs, mask
+ srl %t_wim, 0x1, %g2 ! compute new %wim
+tsetup_patch5: sll %t_wim, 0x7, %t_wim ! patched on 7win Sparcs
+ or %t_wim, %g2, %g2 ! %g2 is new %wim
+tsetup_patch6: and %g2, 0xff, %g2 ! patched on 7win Sparcs
+ andn %g1, %g2, %g1 ! clear this bit in %g1
+ st %g1, [%curptr + THREAD_UMASK]
+
+ save %g0, %g0, %g0
+
+ wr %g2, 0x0, %wim
+
+ /* Call MMU-architecture dependent stack checking
+ * routine.
+ */
+ .globl C_LABEL(tsetup_mmu_patchme)
+C_LABEL(tsetup_mmu_patchme): b C_LABEL(tsetup_sun4c_stackchk)
+ andcc %sp, 0x7, %g0
+
+trap_setup_user_stack_is_bolixed:
+ /* From user/kernel into invalid window w/bad user
+ * stack. Save bad user stack, and return to caller.
+ */
+ SAVE_BOLIXED_USER_STACK(curptr, g3)
+ restore %g0, %g0, %g0
+
+ jmpl %t_retpc + 0x8, %g0
+ mov %t_kstack, %sp
+
+trap_setup_good_ustack:
+ STORE_WINDOW(sp)
+
+trap_setup_finish_up:
+ restore %g0, %g0, %g0
+
+ jmpl %t_retpc + 0x8, %g0
+ mov %t_kstack, %sp
+
+ /* Architecture specific stack checking routines. When either
+ * of these routines are called, the globals are free to use
+ * as they have been safely stashed on the new kernel stack
+ * pointer. Thus the definition below for simplicity.
+ */
+#define glob_tmp g1
+
+ .globl C_LABEL(tsetup_sun4c_stackchk)
+C_LABEL(tsetup_sun4c_stackchk):
+ /* Done by caller: andcc %sp, 0x7, %g0 */
+ be 1f
+ sra %sp, 29, %glob_tmp
+
+ b,a trap_setup_user_stack_is_bolixed
+
+1:
+ add %glob_tmp, 0x1, %glob_tmp
+ andncc %glob_tmp, 0x1, %g0
+ be 1f
+ and %sp, 0xfff, %glob_tmp ! delay slot
+
+ b,a trap_setup_user_stack_is_bolixed
+
+ /* See if our dump area will be on more than one
+ * page.
+ */
+1:
+ add %glob_tmp, 0x38, %glob_tmp
+ andncc %glob_tmp, 0xff8, %g0
+ be tsetup_sun4c_onepage ! only one page to check
+ lda [%sp] ASI_PTE, %glob_tmp ! have to check first page anyways
+
+tsetup_sun4c_twopages:
+ /* Is first page ok permission wise? */
+ srl %glob_tmp, 29, %glob_tmp
+ cmp %glob_tmp, 0x6
+ be 1f
+ add %sp, 0x38, %glob_tmp /* Is second page in vma hole? */
+
+ b,a trap_setup_user_stack_is_bolixed
+
+1:
+ sra %glob_tmp, 29, %glob_tmp
+ add %glob_tmp, 0x1, %glob_tmp
+ andncc %glob_tmp, 0x1, %g0
+ be 1f
+ add %sp, 0x38, %glob_tmp
+
+ b,a trap_setup_user_stack_is_bolixed
+
+1:
+ lda [%glob_tmp] ASI_PTE, %glob_tmp
+
+tsetup_sun4c_onepage:
+ srl %glob_tmp, 29, %glob_tmp
+ cmp %glob_tmp, 0x6 ! can user write to it?
+ be trap_setup_good_ustack ! success
+ nop
+
+ b,a trap_setup_user_stack_is_bolixed
+
+ .globl C_LABEL(tsetup_srmmu_stackchk)
+C_LABEL(tsetup_srmmu_stackchk):
+ /* Check results of callers andcc %sp, 0x7, %g0 */
+ sethi %hi(C_LABEL(page_offset)), %glob_tmp
+ be 1f
+ ld [%glob_tmp + %lo(C_LABEL(page_offset))], %glob_tmp
+
+ b,a trap_setup_user_stack_is_bolixed
+1:
+ cmp %glob_tmp, %sp
+ bgu,a 1f
+ lda [%g0] ASI_M_MMUREGS, %glob_tmp ! read MMU control
+
+ b,a trap_setup_user_stack_is_bolixed
+
+1:
+ /* Clear the fault status and turn on the no_fault bit. */
+ or %glob_tmp, 0x2, %glob_tmp ! or in no_fault bit
+ sta %glob_tmp, [%g0] ASI_M_MMUREGS ! set it
+
+ /* Dump the registers and cross fingers. */
+ STORE_WINDOW(sp)
+
+ /* Clear the no_fault bit and check the status. */
+ andn %glob_tmp, 0x2, %glob_tmp
+ sta %glob_tmp, [%g0] ASI_M_MMUREGS
+ mov AC_M_SFAR, %glob_tmp
+ lda [%glob_tmp] ASI_M_MMUREGS, %g0
+ mov AC_M_SFSR, %glob_tmp
+ lda [%glob_tmp] ASI_M_MMUREGS, %glob_tmp ! save away status of winstore
+ andcc %glob_tmp, 0x2, %g0 ! did we fault?
+ be,a trap_setup_finish_up + 0x4 ! cool beans, success
+ restore %g0, %g0, %g0
+
+ b,a trap_setup_user_stack_is_bolixed ! we faulted, ugh
diff --git a/arch/sparc/kernel/head.S b/arch/sparc/kernel/head.S
index c3a5453e7..5e620e568 100644
--- a/arch/sparc/kernel/head.S
+++ b/arch/sparc/kernel/head.S
@@ -1,79 +1,44 @@
-/* boot.S: The initial boot code for the Sparc port of Linux.
-
- Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu)
-
- This file has to serve three purposes.
-
- 1) determine the prom-version and cpu/architecture
- 2) print enough useful info before we start to execute
- c-code that I can possibly begin to debug things
- 3) Hold the vector of trap entry points
-
- The Sparc offers many challenges to kernel design. Here I will
- document those I have come across thus far. Upon bootup the boot
- prom loads your a.out image into memory. This memory the prom has
- already mapped for you in two places, however as far as I can tell
- the virtual address cache is not turned on although the MMU is
- translating things. You get loaded at 0x4000 exactly and you are
- aliased to 0xf8004000 with the appropriate mmu entries. So, when
- you link a boot-loadable object you want to do something like:
-
- ld -e start -Ttext 4000 -o mykernel myobj1.o myobj2.o ....
-
- to produce a proper image.
-
- At boot time you are given (as far as I can tell at this time)
- one key to figure out what machine you are one and what devices
- are available. The prom when it loads you leaves a pointer to
- the 'rom vector' in register %o0 right before it jumps to your
- starting address. This is a pointer to a struct that is full of
- pointer to functions (ie. printf, halt, reboot), pointers to
- linked lists (ie. memory mappings), and pointer to empirical
- constants (ie. stdin and stdout magic cookies + rom version).
- Starting with this piece of information you can figure out
- just about anything you want about the machine you are on.
-
- Although I don't use it now, if you are on a Multiprocessor and
- therefore a v3 or above prom, register %o2 at boot contains a
- function pointer you must call before you proceed to invoke the
- other cpu's on the machine. I have no idea what kind of magic this
- is, give me time.
-*/
+/* $Id: head.S,v 1.70 1996/10/31 06:28:29 davem Exp $
+ * head.S: The initial boot code for the Sparc port of Linux.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1995 Peter Zaitcev (Zaitcev@ipmce.su)
+ * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
+ */
+
+#include <linux/version.h>
+#include <linux/config.h>
#include <asm/cprefix.h>
#include <asm/head.h>
-#include <asm/version.h>
#include <asm/asi.h>
#include <asm/contregs.h>
+#include <asm/ptrace.h>
#include <asm/psr.h>
#include <asm/page.h>
+#include <asm/kdebug.h>
+#include <asm/winmacro.h>
.data
-
-/* First thing to go in the data segment is the interrupt stack. */
-
- .globl C_LABEL(intstack)
- .globl C_LABEL(eintstack)
-C_LABEL(intstack):
- .skip 4 * PAGE_SIZE ! 16k = 128 128-byte stack frames
-C_LABEL(eintstack):
-
-
-
/*
- The following are used with the prom_vector node-ops to figure out
- the cpu-type
-*/
+ * The following are used with the prom_vector node-ops to figure out
+ * the cpu-type
+ */
+ .align 4
.globl C_LABEL(cputyp)
-
C_LABEL(cputyp):
.word 1
+ .align 4
+ .globl C_LABEL(cputypval)
C_LABEL(cputypval):
.asciz "sun4c"
.ascii " "
+C_LABEL(cputypvalend):
+C_LABEL(cputypvallen) = C_LABEL(cputypvar) - C_LABEL(cputypval)
+
.align 4
/*
* Sun people can't spell worth damn. "compatability" indeed.
@@ -86,854 +51,1083 @@ C_LABEL(cputypval):
C_LABEL(cputypvar):
.asciz "compatability"
-C_LABEL(cputypvallen) = C_LABEL(cputypvar) - C_LABEL(cputypval)
-
-/* This hold the prom-interface-version number for either v0 or v2. */
-
+/* Tested on SS-5, SS-10. Probably someone at Sun applied a spell-checker. */
.align 4
- .globl C_LABEL(prom_iface_vers)
-
-C_LABEL(prom_iface_vers): .skip 4
-
-/* WARNING: evil messages follow */
+C_LABEL(cputypvar_sun4m):
+ .asciz "compatible"
.align 4
-
sun4_notsup:
- .asciz "Sparc-Linux: sun4 support not implemented yet\n\n"
- .align 4
-
-sun4m_notsup:
- .asciz "Sparc-Linux: sun4m support does not exist\n\n"
- .align 4
-
-sun4d_notsup:
- .asciz "Sparc-Linux: sun4d support does not exist\n\n"
- .align 4
-
-you_lose:
- .asciz "You lose..... Thanks for playing...\n"
- .align 4
-
-
- .globl boot_msg
-
-/* memory descriptor property strings, v2 = yuk yuk yuk */
-/* XXX how to figure out vm mapped by prom? May have to scan magic addresses */
-
-mem_prop_physavail: .asciz "available"
-
- .align 4
-mem_prop_phystot: .asciz "reg"
-
-/* v2_memory descriptor struct kludged here for assembly, if it ain't broke */
-
- .align 4
-v2_mem_struct: .skip 0xff
-
- .align 4
-v2_printf_physavail: .asciz "Physical Memory Available: 0x%x bytes"
-
- .align 4
-v2_printf_phystot: .asciz "Physical Memory: 0x%x bytes"
-
-/* A place to store property strings returned from the prom 'node' funcs */
-
- .align 4
-prop_string_buf: .skip 32
-
- .align 4
-prop_name: .asciz "name"
-
- .align 4
-current_node: .skip 4
-
-
-/* nice little boot message */
-
- .align 4
-boot_msg:
- .ascii "Booting Sparc-Linux V0.00PRE-ALPHA "
- .ascii WHO_COMPILED_ME
- .ascii "\r\n"
- .align 4
-
- .globl boot_msg2
-
-boot_msg2:
- .asciz "Booting Sparclinux V0.00 PRE-ALPHA on a (SUN4C)\r\n\n"
-
- .align 4
-
-pstring1:
- .asciz "Prom Magic Cookie: 0x%x \n"
+ .asciz "Sparc-Linux sun4 support not implemented yet\n\n"
.align 4
-pstring2:
- .asciz "Interface Version: v%d\n"
+sun4e_notsup:
+ .asciz "Sparc-Linux sun4e support does not exist\n\n"
.align 4
-pstring3:
- .asciz "Prom Revision: V%d\n\n"
+sun4u_notsup:
+ .asciz "Sparc-Linux sun4u support does not exist\n\n"
.align 4
-pstring4:
- .ascii "Total Physical Memory: %d bytes\nVM mapped by Prom: %d bytes\n"
- .asciz "Available Physical Memory: %d bytes\n"
- .align 4
-
-
+ /* The Sparc trap table, bootloader gives us control at _start. */
.text
-
- .globl C_LABEL(msgbuf)
-msgbufsize = PAGE_SIZE ! 1 page for msg buffer
-C_LABEL(msgbuf) = PAGE_SIZE
-
-
-IE_reg_addr = C_LABEL(msgbuf) + msgbufsize ! this page not used; points to IEreg
-
-
-/* Ok, things start to get interesting. We get linked such that 'start'
- is the entry symbol. However, it is real low in kernel address space
- and as such a nifty place to place the trap table. We achieve this goal
- by just jumping to 'gokernel' for the first trap's entry as the sparc
- never receives the zero trap as it is real special (hw reset).
-
- Each trap entry point is the size of 4 sparc instructions (or 4 bytes
- * 4 insns = 16 bytes). There are 128 hardware traps (some undefined
- or unimplemented) and 128 software traps (sys-calls, etc.).
-
- One of the instructions must be a branch. More often than not this
- will be to a trap handler entry point because it is completely
- impossible to handle any trap in 4 insns. I welcome anyone to
- challenge this theory. :-)
-
- On entry into this table the hardware has loaded the program counter
- at which the trap occurred into register %l1 and the next program
- counter into %l2, this way we can return from the trap with a simple
-
- jmp %l1; rett %l2 ! poof...
-
- after properly servicing the trap. It wouldn't be a bad idea to load
- some more information into the local regs since we have technically
- 2 or 3 instructions to play with besides the jmp to the 'real' trap
- handler (one can even go in the delay slot). For now I am going to put
- the %psr (processor status register) and the trap-type value in %l0
- and %l3 respectively. Also, for IRQ's I'll put the level in %l4.
-
-*/
-
- .globl start
- .globl _start /* warning, solaris hack */
+ .globl start, _stext, _start, __stext
.globl C_LABEL(trapbase)
_start: /* danger danger */
+__stext:
+_stext:
start:
C_LABEL(trapbase):
- b gokernel; nop; nop; nop; ! we never get trap #0 it is special
-
- TRAP_ENTRY(0x1, my_trap_handler) /* Instruction Access Exception */
- TRAP_ENTRY(0x2, my_trap_handler) /* Illegal Instruction */
- TRAP_ENTRY(0x3, my_trap_handler) /* Privileged Instruction */
- TRAP_ENTRY(0x4, my_trap_handler) /* Floating Point Disabled */
- TRAP_ENTRY(0x5, spill_window_entry) /* Window Overflow */
- TRAP_ENTRY(0x6, fill_window_entry) /* Window Underflow */
- TRAP_ENTRY(0x7, my_trap_handler) /* Memory Address Not Aligned */
- TRAP_ENTRY(0x8, my_trap_handler) /* Floating Point Exception */
- TRAP_ENTRY(0x9, my_trap_handler) /* Data Miss Exception */
- TRAP_ENTRY(0xa, my_trap_handler) /* Tagged Instruction Overflow */
- TRAP_ENTRY(0xb, my_trap_handler) /* Watchpoint Detected */
- TRAP_ENTRY(0xc, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0xd, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0xe, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0xf, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x10, my_trap_handler) /* Undefined... */
-
-/* Level'd interrupt entry points, see macro defs above */
-
- TRAP_ENTRY_INTERRUPT_SOFT(1, 0x101) /* IRQ Software/SBUS Level 1 */
- TRAP_ENTRY_INTERRUPT(2) /* IRQ SBUS Level 2 */
- TRAP_ENTRY_INTERRUPT(3) /* IRQ SCSI/DMA/SBUS Level 3 */
- TRAP_ENTRY_INTERRUPT_SOFT(4, 0x104) /* IRQ Software Level 4 */
- TRAP_ENTRY_INTERRUPT(5) /* IRQ SBUS/Ethernet Level 5 */
- TRAP_ENTRY_INTERRUPT_SOFT(6, 0x106) /* IRQ Software Level 6 */
- TRAP_ENTRY_INTERRUPT(7) /* IRQ Video/SBUS Level 5 */
- TRAP_ENTRY_INTERRUPT(8) /* IRQ SBUS Level 6 */
- TRAP_ENTRY_INTERRUPT(9) /* IRQ SBUS Level 7 */
- TRAP_ENTRY_INTERRUPT(10) /* IRQ Timer #1 */
- TRAP_ENTRY_INTERRUPT(11) /* IRQ Floppy Intr. */
- TRAP_ENTRY_INTERRUPT(12) /* IRQ Zilog serial chip */
- TRAP_ENTRY_INTERRUPT(13) /* IRQ Audio Intr. */
- TRAP_ENTRY_TIMER /* IRQ Timer #2 (one we use) */
- TRAP_ENTRY_INTERRUPT_NMI(15, linux_trap_nmi) /* Level 15 (nmi) */
-
- TRAP_ENTRY(0x20, my_trap_handler) /* General Register Access Error */
- TRAP_ENTRY(0x21, my_trap_handler) /* Instruction Access Error */
- TRAP_ENTRY(0x22, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x23, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x24, my_trap_handler) /* Co-Processor Disabled */
- TRAP_ENTRY(0x25, my_trap_handler) /* Unimplemented FLUSH inst. */
- TRAP_ENTRY(0x26, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x27, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x28, my_trap_handler) /* Co-Processor Exception */
- TRAP_ENTRY(0x29, my_trap_handler) /* Data Access Error */
- TRAP_ENTRY(0x2a, my_trap_handler) /* Division by zero, you lose... */
- TRAP_ENTRY(0x2b, my_trap_handler) /* Data Store Error */
- TRAP_ENTRY(0x2c, my_trap_handler) /* Data Access MMU-Miss */
- TRAP_ENTRY(0x2d, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x2e, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x2f, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x30, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x31, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x32, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x33, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x34, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x35, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x36, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x37, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x38, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x39, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x3a, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x3b, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x3c, my_trap_handler) /* Instruction Access MMU-Miss */
- TRAP_ENTRY(0x3d, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x3e, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x3f, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x40, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x41, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x42, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x43, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x44, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x45, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x46, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x47, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x48, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x49, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x4a, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x4b, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x4c, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x4d, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x4e, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x4f, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x50, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x51, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x52, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x53, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x54, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x55, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x56, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x57, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x58, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x59, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x5a, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x5b, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x5c, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x5d, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x5e, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x5f, my_trap_handler) /* Undefined... */
- TRAP_ENTRY(0x60, my_trap_handler) /* Impl-Dep Exception */
- TRAP_ENTRY(0x61, my_trap_handler) /* Impl-Dep Exception */
- TRAP_ENTRY(0x62, my_trap_handler) /* Impl-Dep Exception */
- TRAP_ENTRY(0x63, my_trap_handler) /* Impl-Dep Exception */
- TRAP_ENTRY(0x64, my_trap_handler) /* Impl-Dep Exception */
- TRAP_ENTRY(0x65, my_trap_handler) /* Impl-Dep Exception */
- TRAP_ENTRY(0x66, my_trap_handler) /* Impl-Dep Exception */
- TRAP_ENTRY(0x67, my_trap_handler) /* Impl-Dep Exception */
- TRAP_ENTRY(0x68, my_trap_handler) /* Impl-Dep Exception */
- TRAP_ENTRY(0x69, my_trap_handler) /* Impl-Dep Exception */
- TRAP_ENTRY(0x6a, my_trap_handler) /* Impl-Dep Exception */
- TRAP_ENTRY(0x6b, my_trap_handler) /* Impl-Dep Exception */
- TRAP_ENTRY(0x6c, my_trap_handler) /* Impl-Dep Exception */
- TRAP_ENTRY(0x6d, my_trap_handler) /* Impl-Dep Exception */
- TRAP_ENTRY(0x6e, my_trap_handler) /* Impl-Dep Exception */
- TRAP_ENTRY(0x6f, my_trap_handler) /* Impl-Dep Exception */
- TRAP_ENTRY(0x70, my_trap_handler) /* Impl-Dep Exception */
- TRAP_ENTRY(0x71, my_trap_handler) /* Impl-Dep Exception */
- TRAP_ENTRY(0x72, my_trap_handler) /* Impl-Dep Exception */
- TRAP_ENTRY(0x73, my_trap_handler) /* Impl-Dep Exception */
- TRAP_ENTRY(0x74, my_trap_handler) /* Impl-Dep Exception */
- TRAP_ENTRY(0x75, my_trap_handler) /* Impl-Dep Exception */
- TRAP_ENTRY(0x76, my_trap_handler) /* Impl-Dep Exception */
- TRAP_ENTRY(0x77, my_trap_handler) /* Impl-Dep Exception */
- TRAP_ENTRY(0x78, my_trap_handler) /* Impl-Dep Exception */
- TRAP_ENTRY(0x79, my_trap_handler) /* Impl-Dep Exception */
- TRAP_ENTRY(0x7a, my_trap_handler) /* Impl-Dep Exception */
- TRAP_ENTRY(0x7b, my_trap_handler) /* Impl-Dep Exception */
- TRAP_ENTRY(0x7c, my_trap_handler) /* Impl-Dep Exception */
- TRAP_ENTRY(0x7d, my_trap_handler) /* Impl-Dep Exception */
- TRAP_ENTRY(0x7e, my_trap_handler) /* Impl-Dep Exception */
- TRAP_ENTRY(0x7f, my_trap_handler) /* Impl-Dep Exception */
- TRAP_ENTRY(0x80, my_trap_handler) /* SunOS System Call */
- TRAP_ENTRY(0x81, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0x82, my_trap_handler) /* Divide by zero trap XXX */
- TRAP_ENTRY(0x83, my_trap_handler) /* Flush Windows Trap XXX */
- TRAP_ENTRY(0x84, my_trap_handler) /* Clean Windows Trap XXX */
- TRAP_ENTRY(0x85, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0x86, my_trap_handler) /* Fix Unaligned Access Trap XXX */
- TRAP_ENTRY(0x87, my_trap_handler) /* Integer Overflow Trap XXX */
- TRAP_ENTRY(0x88, my_trap_handler) /* Slowaris System Call */
- TRAP_ENTRY(0x89, my_trap_handler) /* NetBSD System Call */
- TRAP_ENTRY(0x8a, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0x8b, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0x8c, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0x8d, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0x8e, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0x8f, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0x90, my_trap_handler) /* SparcLinux System Call */
- TRAP_ENTRY(0x91, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0x92, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0x93, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0x94, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0x95, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0x96, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0x97, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0x98, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0x99, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0x9a, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0x9b, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0x9c, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0x9d, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0x9e, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0x9f, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xa0, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xa1, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xa2, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xa3, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xa4, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xa5, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xa6, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xa7, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xa8, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xa9, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xaa, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xab, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xac, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xad, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xae, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xaf, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xb0, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xb1, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xb2, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xb3, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xb4, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xb5, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xb6, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xb7, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xb8, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xb9, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xba, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xbb, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xbc, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xbd, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xbe, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xbf, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xc0, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xc1, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xc2, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xc3, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xc4, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xc5, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xc6, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xc7, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xc8, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xc9, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xca, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xcb, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xcc, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xcd, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xce, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xcf, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xd0, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xd1, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xd2, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xd3, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xd4, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xd5, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xd6, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xd7, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xd8, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xd9, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xda, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xdb, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xdc, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xdd, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xde, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xdf, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xe0, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xe1, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xe2, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xe3, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xe4, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xe5, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xe6, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xe7, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xe8, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xe9, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xea, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xeb, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xec, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xed, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xee, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xef, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xf0, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xf1, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xf2, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xf3, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xf4, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xf5, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xf6, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xf7, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xf8, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xf9, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xfa, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xfb, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xfc, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xfd, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xfe, my_trap_handler) /* Software Trap */
- TRAP_ENTRY(0xff, my_trap_handler) /* Software Trap */
-
+#ifdef __SMP__
+C_LABEL(trapbase_cpu0):
+#endif
+/* We get control passed to us here at t_zero. */
+t_zero: b gokernel; nop; nop; nop;
+t_tflt: SPARC_TFAULT /* Inst. Access Exception */
+t_bins: TRAP_ENTRY(0x2, bad_instruction) /* Illegal Instruction */
+t_pins: TRAP_ENTRY(0x3, priv_instruction) /* Privileged Instruction */
+t_fpd: TRAP_ENTRY(0x4, fpd_trap_handler) /* Floating Point Disabled */
+t_wovf: WINDOW_SPILL /* Window Overflow */
+t_wunf: WINDOW_FILL /* Window Underflow */
+t_mna: TRAP_ENTRY(0x7, mna_handler) /* Memory Address Not Aligned */
+t_fpe: TRAP_ENTRY(0x8, fpe_trap_handler) /* Floating Point Exception */
+t_dflt: SPARC_DFAULT /* Data Miss Exception */
+t_tio: TRAP_ENTRY(0xa, do_tag_overflow) /* Tagged Instruction Ovrflw */
+t_wpt: TRAP_ENTRY(0xb, do_watchpoint) /* Watchpoint Detected */
+t_badc: BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
+t_irq1: TRAP_ENTRY_INTERRUPT(1) /* IRQ Software/SBUS Level 1 */
+t_irq2: TRAP_ENTRY_INTERRUPT(2) /* IRQ SBUS Level 2 */
+t_irq3: TRAP_ENTRY_INTERRUPT(3) /* IRQ SCSI/DMA/SBUS Level 3 */
+t_irq4: TRAP_ENTRY_INTERRUPT(4) /* IRQ Software Level 4 */
+t_irq5: TRAP_ENTRY_INTERRUPT(5) /* IRQ SBUS/Ethernet Level 5 */
+t_irq6: TRAP_ENTRY_INTERRUPT(6) /* IRQ Software Level 6 */
+t_irq7: TRAP_ENTRY_INTERRUPT(7) /* IRQ Video/SBUS Level 5 */
+t_irq8: TRAP_ENTRY_INTERRUPT(8) /* IRQ SBUS Level 6 */
+t_irq9: TRAP_ENTRY_INTERRUPT(9) /* IRQ SBUS Level 7 */
+t_irq10:TRAP_ENTRY_INTERRUPT(10) /* IRQ Timer #1 (one we use) */
+t_irq11:TRAP_ENTRY_INTERRUPT(11) /* IRQ Floppy Intr. */
+t_irq12:TRAP_ENTRY_INTERRUPT(12) /* IRQ Zilog serial chip */
+t_irq13:TRAP_ENTRY_INTERRUPT(13) /* IRQ Audio Intr. */
+t_irq14:TRAP_ENTRY_INTERRUPT(14) /* IRQ Timer #2 */
+#ifndef __SMP__
+t_nmi: NMI_TRAP /* Level 15 (NMI) */
+#else
+ TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
+#endif
+t_racc: TRAP_ENTRY(0x20, do_reg_access) /* General Register Access Error */
+t_iacce:BAD_TRAP(0x21) /* Instr Access Error */
+t_bad22:BAD_TRAP(0x22) BAD_TRAP(0x23)
+t_cpdis:TRAP_ENTRY(0x24, do_cp_disabled) /* Co-Processor Disabled */
+t_uflsh:TRAP_ENTRY(0x25, do_bad_flush) /* Unimplemented FLUSH inst. */
+t_bad26:BAD_TRAP(0x26) BAD_TRAP(0x27)
+t_cpexc:TRAP_ENTRY(0x28, do_cp_exception) /* Co-Processor Exception */
+t_dacce:SPARC_DFAULT /* Data Access Error */
+t_hwdz: TRAP_ENTRY(0x2a, do_hw_divzero) /* Division by zero, you lose... */
+t_dserr:BAD_TRAP(0x2b) /* Data Store Error */
+t_daccm:BAD_TRAP(0x2c) /* Data Access MMU-Miss */
+t_bad2d:BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
+t_bad32:BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
+t_bad37:BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
+t_iaccm:BAD_TRAP(0x3c) /* Instr Access MMU-Miss */
+t_bad3d:BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40) BAD_TRAP(0x41)
+t_bad42:BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45) BAD_TRAP(0x46)
+t_bad47:BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a) BAD_TRAP(0x4b)
+t_bad4c:BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f) BAD_TRAP(0x50)
+t_bad51:BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
+t_bad56:BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
+t_bad5b:BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
+t_bad60:BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
+t_bad65:BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
+t_bad6a:BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
+t_bad6f:BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
+t_bad74:BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
+t_bad79:BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
+t_bad7e:BAD_TRAP(0x7e) BAD_TRAP(0x7f)
+t_sunos:SUNOS_SYSCALL_TRAP /* SunOS System Call */
+t_sbkpt:BREAKPOINT_TRAP /* Software Breakpoint/KGDB */
+t_divz: BAD_TRAP(0x82) /* Divide by zero trap */
+t_flwin:TRAP_ENTRY(0x83, do_flush_windows) /* Flush Windows Trap */
+t_clwin:BAD_TRAP(0x84) /* Clean Windows Trap */
+t_rchk: BAD_TRAP(0x85) /* Range Check */
+t_funal:BAD_TRAP(0x86) /* Fix Unaligned Access Trap */
+t_iovf: BAD_TRAP(0x87) /* Integer Overflow Trap */
+t_slowl:SOLARIS_SYSCALL_TRAP /* Slowaris System Call */
+t_netbs:NETBSD_SYSCALL_TRAP /* Net-B.S. System Call */
+t_bad8a:BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c) BAD_TRAP(0x8d) BAD_TRAP(0x8e)
+t_bad8f:BAD_TRAP(0x8f)
+t_linux:LINUX_SYSCALL_TRAP /* Linux System Call */
+t_bad91:BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94) BAD_TRAP(0x95)
+t_bad96:BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99) BAD_TRAP(0x9a)
+t_bad9b:BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e) BAD_TRAP(0x9f)
+t_getcc:GETCC_TRAP /* Get Condition Codes */
+t_setcc:SETCC_TRAP /* Set Condition Codes */
+t_bada2:BAD_TRAP(0xa2) BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
+t_bada7:INDIRECT_SOLARIS_SYSCALL(156)
+t_bada8:BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
+t_badac:BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
+t_badb1:BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
+t_badb6:BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
+t_badbb:BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
+t_badc0:BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
+t_badc5:BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
+t_badca:BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
+t_badcf:BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
+t_badd4:BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
+t_badd9:BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
+t_badde:BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
+t_bade3:BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
+t_bade8:BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
+t_baded:BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
+t_badf2:BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
+t_badf7:BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
+t_badfc:BAD_TRAP(0xfc) BAD_TRAP(0xfd)
+dbtrap: BAD_TRAP(0xfe) /* Debugger/PROM breakpoint #1 */
+dbtrap2:BAD_TRAP(0xff) /* Debugger/PROM breakpoint #2 */
+
+ .globl C_LABEL(end_traptable)
+C_LABEL(end_traptable):
+
+#ifdef __SMP__
+ /* Trap tables for the other cpus. */
+ .globl C_LABEL(trapbase_cpu1), C_LABEL(trapbase_cpu2), C_LABEL(trapbase_cpu3)
+C_LABEL(trapbase_cpu1):
+ BAD_TRAP(0x0) SRMMU_TFAULT TRAP_ENTRY(0x2, bad_instruction)
+ TRAP_ENTRY(0x3, priv_instruction) TRAP_ENTRY(0x4, fpd_trap_handler)
+ WINDOW_SPILL WINDOW_FILL TRAP_ENTRY(0x7, mna_handler)
+ TRAP_ENTRY(0x8, fpe_trap_handler) SRMMU_DFAULT
+ TRAP_ENTRY(0xa, do_tag_overflow) TRAP_ENTRY(0xb, do_watchpoint)
+ BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
+ TRAP_ENTRY_INTERRUPT(1) TRAP_ENTRY_INTERRUPT(2)
+ TRAP_ENTRY_INTERRUPT(3) TRAP_ENTRY_INTERRUPT(4)
+ TRAP_ENTRY_INTERRUPT(5) TRAP_ENTRY_INTERRUPT(6)
+ TRAP_ENTRY_INTERRUPT(7) TRAP_ENTRY_INTERRUPT(8)
+ TRAP_ENTRY_INTERRUPT(9) TRAP_ENTRY_INTERRUPT(10)
+ TRAP_ENTRY_INTERRUPT(11) TRAP_ENTRY_INTERRUPT(12)
+ TRAP_ENTRY_INTERRUPT(13) TRAP_ENTRY_INTERRUPT(14)
+ TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
+ TRAP_ENTRY(0x20, do_reg_access) BAD_TRAP(0x21) BAD_TRAP(0x22)
+ BAD_TRAP(0x23) TRAP_ENTRY(0x24, do_cp_disabled) TRAP_ENTRY(0x25, do_bad_flush)
+ BAD_TRAP(0x26) BAD_TRAP(0x27) TRAP_ENTRY(0x28, do_cp_exception)
+ SRMMU_DFAULT TRAP_ENTRY(0x2a, do_hw_divzero) BAD_TRAP(0x2b) BAD_TRAP(0x2c)
+ BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
+ BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
+ BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
+ BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40)
+ BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45)
+ BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a)
+ BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f)
+ BAD_TRAP(0x50)
+ BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
+ BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
+ BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
+ BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
+ BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
+ BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
+ BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
+ BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
+ BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
+ BAD_TRAP(0x7e) BAD_TRAP(0x7f)
+ SUNOS_SYSCALL_TRAP
+ BREAKPOINT_TRAP
+ BAD_TRAP(0x82)
+ TRAP_ENTRY(0x83, do_flush_windows) BAD_TRAP(0x84) BAD_TRAP(0x85)
+ BAD_TRAP(0x86) BAD_TRAP(0x87) SOLARIS_SYSCALL_TRAP
+ NETBSD_SYSCALL_TRAP BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c)
+ BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f)
+ LINUX_SYSCALL_TRAP BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94)
+ BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99)
+ BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e)
+ BAD_TRAP(0x9f) GETCC_TRAP SETCC_TRAP
+ BAD_TRAP(0xa2) BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
+ INDIRECT_SOLARIS_SYSCALL(156) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
+ BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
+ BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
+ BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
+ BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
+ BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
+ BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
+ BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
+ BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
+ BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
+ BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
+ BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
+ BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
+ BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
+ BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
+ BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
+ BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
+ BAD_TRAP(0xfc) BAD_TRAP(0xfd) BAD_TRAP(0xfe) BAD_TRAP(0xff)
+
+C_LABEL(trapbase_cpu2):
+ BAD_TRAP(0x0) SRMMU_TFAULT TRAP_ENTRY(0x2, bad_instruction)
+ TRAP_ENTRY(0x3, priv_instruction) TRAP_ENTRY(0x4, fpd_trap_handler)
+ WINDOW_SPILL WINDOW_FILL TRAP_ENTRY(0x7, mna_handler)
+ TRAP_ENTRY(0x8, fpe_trap_handler) SRMMU_DFAULT
+ TRAP_ENTRY(0xa, do_tag_overflow) TRAP_ENTRY(0xb, do_watchpoint)
+ BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
+ TRAP_ENTRY_INTERRUPT(1) TRAP_ENTRY_INTERRUPT(2)
+ TRAP_ENTRY_INTERRUPT(3) TRAP_ENTRY_INTERRUPT(4)
+ TRAP_ENTRY_INTERRUPT(5) TRAP_ENTRY_INTERRUPT(6)
+ TRAP_ENTRY_INTERRUPT(7) TRAP_ENTRY_INTERRUPT(8)
+ TRAP_ENTRY_INTERRUPT(9) TRAP_ENTRY_INTERRUPT(10)
+ TRAP_ENTRY_INTERRUPT(11) TRAP_ENTRY_INTERRUPT(12)
+ TRAP_ENTRY_INTERRUPT(13) TRAP_ENTRY_INTERRUPT(14)
+ TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
+ TRAP_ENTRY(0x20, do_reg_access) BAD_TRAP(0x21) BAD_TRAP(0x22)
+ BAD_TRAP(0x23) TRAP_ENTRY(0x24, do_cp_disabled) TRAP_ENTRY(0x25, do_bad_flush)
+ BAD_TRAP(0x26) BAD_TRAP(0x27) TRAP_ENTRY(0x28, do_cp_exception)
+ SRMMU_DFAULT TRAP_ENTRY(0x2a, do_hw_divzero) BAD_TRAP(0x2b) BAD_TRAP(0x2c)
+ BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
+ BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
+ BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
+ BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40)
+ BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45)
+ BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a)
+ BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f)
+ BAD_TRAP(0x50)
+ BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
+ BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
+ BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
+ BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
+ BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
+ BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
+ BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
+ BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
+ BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
+ BAD_TRAP(0x7e) BAD_TRAP(0x7f)
+ SUNOS_SYSCALL_TRAP
+ BREAKPOINT_TRAP
+ BAD_TRAP(0x82)
+ TRAP_ENTRY(0x83, do_flush_windows) BAD_TRAP(0x84) BAD_TRAP(0x85)
+ BAD_TRAP(0x86) BAD_TRAP(0x87) SOLARIS_SYSCALL_TRAP
+ NETBSD_SYSCALL_TRAP BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c)
+ BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f)
+ LINUX_SYSCALL_TRAP BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94)
+ BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99)
+ BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e)
+ BAD_TRAP(0x9f) GETCC_TRAP SETCC_TRAP
+ BAD_TRAP(0xa2) BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
+ INDIRECT_SOLARIS_SYSCALL(156) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
+ BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
+ BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
+ BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
+ BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
+ BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
+ BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
+ BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
+ BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
+ BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
+ BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
+ BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
+ BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
+ BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
+ BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
+ BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
+ BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
+ BAD_TRAP(0xfc) BAD_TRAP(0xfd) BAD_TRAP(0xfe) BAD_TRAP(0xff)
+
+C_LABEL(trapbase_cpu3):
+ BAD_TRAP(0x0) SRMMU_TFAULT TRAP_ENTRY(0x2, bad_instruction)
+ TRAP_ENTRY(0x3, priv_instruction) TRAP_ENTRY(0x4, fpd_trap_handler)
+ WINDOW_SPILL WINDOW_FILL TRAP_ENTRY(0x7, mna_handler)
+ TRAP_ENTRY(0x8, fpe_trap_handler) SRMMU_DFAULT
+ TRAP_ENTRY(0xa, do_tag_overflow) TRAP_ENTRY(0xb, do_watchpoint)
+ BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10)
+ TRAP_ENTRY_INTERRUPT(1) TRAP_ENTRY_INTERRUPT(2)
+ TRAP_ENTRY_INTERRUPT(3) TRAP_ENTRY_INTERRUPT(4)
+ TRAP_ENTRY_INTERRUPT(5) TRAP_ENTRY_INTERRUPT(6)
+ TRAP_ENTRY_INTERRUPT(7) TRAP_ENTRY_INTERRUPT(8)
+ TRAP_ENTRY_INTERRUPT(9) TRAP_ENTRY_INTERRUPT(10)
+ TRAP_ENTRY_INTERRUPT(11) TRAP_ENTRY_INTERRUPT(12)
+ TRAP_ENTRY_INTERRUPT(13) TRAP_ENTRY_INTERRUPT(14)
+ TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m)
+ TRAP_ENTRY(0x20, do_reg_access) BAD_TRAP(0x21) BAD_TRAP(0x22)
+ BAD_TRAP(0x23) TRAP_ENTRY(0x24, do_cp_disabled) TRAP_ENTRY(0x25, do_bad_flush)
+ BAD_TRAP(0x26) BAD_TRAP(0x27) TRAP_ENTRY(0x28, do_cp_exception)
+ SRMMU_DFAULT TRAP_ENTRY(0x2a, do_hw_divzero) BAD_TRAP(0x2b) BAD_TRAP(0x2c)
+ BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31)
+ BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36)
+ BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b)
+ BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40)
+ BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45)
+ BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a)
+ BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f)
+ BAD_TRAP(0x50)
+ BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55)
+ BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a)
+ BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f)
+ BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64)
+ BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69)
+ BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e)
+ BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73)
+ BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78)
+ BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d)
+ BAD_TRAP(0x7e) BAD_TRAP(0x7f)
+ SUNOS_SYSCALL_TRAP
+ BREAKPOINT_TRAP
+ BAD_TRAP(0x82)
+ TRAP_ENTRY(0x83, do_flush_windows) BAD_TRAP(0x84) BAD_TRAP(0x85)
+ BAD_TRAP(0x86) BAD_TRAP(0x87) SOLARIS_SYSCALL_TRAP
+ NETBSD_SYSCALL_TRAP BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c)
+ BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f)
+ LINUX_SYSCALL_TRAP BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94)
+ BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99)
+ BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e)
+ BAD_TRAP(0x9f) GETCC_TRAP SETCC_TRAP
+ BAD_TRAP(0xa2) BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6)
+ INDIRECT_SOLARIS_SYSCALL(156) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab)
+ BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0)
+ BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5)
+ BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba)
+ BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf)
+ BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4)
+ BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9)
+ BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce)
+ BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3)
+ BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8)
+ BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd)
+ BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2)
+ BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7)
+ BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec)
+ BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
+ BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
+ BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
+ BAD_TRAP(0xfc) BAD_TRAP(0xfd) BAD_TRAP(0xfe) BAD_TRAP(0xff)
+
+ .globl C_LABEL(cpu0_stack), C_LABEL(cpu1_stack), C_LABEL(cpu2_stack)
+ .globl C_LABEL(cpu3_stack)
+C_LABEL(cpu0_stack): .skip 0x2000
+C_LABEL(cpu1_stack): .skip 0x2000
+C_LABEL(cpu2_stack): .skip 0x2000
+C_LABEL(cpu3_stack): .skip 0x2000
+#endif
.skip 4096
-C_LABEL(msgbufmapped):
- .word 1
+/* This was the only reasonable way I could think of to properly align
+ * these page-table data structures.
+ */
+ .globl C_LABEL(bootup_user_stack)
+ .globl C_LABEL(bootup_kernel_stack)
+ .globl C_LABEL(pg0)
+ .globl C_LABEL(empty_bad_page)
+ .globl C_LABEL(empty_bad_page_table)
+ .globl C_LABEL(empty_zero_page)
+ .globl C_LABEL(swapper_pg_dir)
+C_LABEL(bootup_user_stack): .skip 0x2000
+C_LABEL(bootup_kernel_stack): .skip 0x2000
+C_LABEL(swapper_pg_dir): .skip 0x1000
+C_LABEL(pg0): .skip 0x1000
+C_LABEL(empty_bad_page): .skip 0x1000
+C_LABEL(empty_bad_page_table): .skip 0x1000
+C_LABEL(empty_zero_page): .skip 0x1000
+ .global C_LABEL(root_flags)
+ .global C_LABEL(ram_flags)
+ .global C_LABEL(root_dev)
+ .global C_LABEL(ramdisk_image)
+ .global C_LABEL(ramdisk_size)
+/* This stuff has to be in sync with SILO and other potential boot loaders
+ * Fields should be kept upward compatible and whenever any change is made,
+ * HdrS version should be incremented.
+ */
+ .ascii "HdrS"
+ .word LINUX_VERSION_CODE
+ .half 0x0201 /* HdrS version */
+C_LABEL(root_flags):
+ .half 1
+C_LABEL(root_dev):
+ .half 0
+C_LABEL(ram_flags):
+ .half 0
+C_LABEL(ramdisk_image):
+ .word 0
+C_LABEL(ramdisk_size):
+ .word 0
+ .word C_LABEL(reboot_command)
/* Cool, here we go. Pick up the romvec pointer in %o0 and stash it in
- %g7 and at _prom_vector_p. And also quickly check whether we are on
- a v0 or v2 prom.
-*/
-
-gokernel: or %g0, %o0, %g7
- sethi %hi( C_LABEL(prom_vector_p) ), %g1
- st %o0, [%g1 + %lo( C_LABEL(prom_vector_p) )] ! we will need it later
- rd %psr, %l2
- rd %wim, %l3
- rd %tbr, %l4
- or %g0, %o2, %l5 ! could be prom magic value...
+ * %g7 and at prom_vector_p. And also quickly check whether we are on
+ * a v0, v2, or v3 prom.
+ */
+gokernel:
+ /* Ok, it's nice to know, as early as possible, if we
+ * are already mapped where we expect to be in virtual
+ * memory. The Solaris /boot elf format bootloader
+ * will peek into our elf header and load us where
+ * we want to be, otherwise we have to re-map.
+ *
+ * Some boot loaders don't place the jmp'rs address
+ * in %o7, so we do a pc-relative call to a local
+ * label, then see what %o7 has.
+ */
+
+ mov %o7, %g4 ! Save %o7
+
+ /* Jump to it, and pray... */
+current_pc:
+ call 1f
+ nop
+
+1:
+ mov %o7, %g3
+
+got_pc:
+ mov %g4, %o7 /* Previous %o7. */
-#if 0 /* You think I'm nutz? */
- subcc %l5, 0x0, %g0 ! check for magic SMP pointer
- bne nosmp
- nop
- call %o2 ! call smp prom setup
- nop
-#endif /* I will be soon... */
+ mov %o0, %l0 ! stash away romvec
+ mov %o0, %g7 ! put it here too
+ mov %o1, %l1 ! stash away debug_vec too
+
+ /* Ok, let's check out our run time program counter. */
+ set current_pc, %g5
+ cmp %g3, %g5
+ be already_mapped
+ nop
+
+ /* %l6 will hold the offset we have to subtract
+ * from absolute symbols in order to access areas
+ * in our own image. If already mapped this is
+ * just plain zero, else it is KERNBASE.
+ */
+ set KERNBASE, %l6
+ b copy_prom_lvl14
+ nop
+
+already_mapped:
+ mov 0, %l6
+
+ /* Copy over the Prom's level 14 clock handler. */
+copy_prom_lvl14:
+#if 1
+ /* DJHR
+ * preserve our linked/calculated instructions
+ */
+ set C_LABEL(lvl14_save), %g1
+ set t_irq14, %g3
+ sub %g1, %l6, %g1 ! translate to physical
+ sub %g3, %l6, %g3 ! translate to physical
+ ldd [%g3], %g4
+ std %g4, [%g1]
+ ldd [%g3+8], %g4
+ std %g4, [%g1+8]
+#endif
+ rd %tbr, %g1
+ andn %g1, 0xfff, %g1 ! proms trap table base
+ or %g0, (0x1e<<4), %g2 ! offset to lvl14 intr
+ or %g1, %g2, %g2
+ set t_irq14, %g3
+ sub %g3, %l6, %g3
+ ldd [%g2], %g4
+ std %g4, [%g3]
+ ldd [%g2 + 0x8], %g4
+ std %g4, [%g3 + 0x8] ! Copy proms handler
+
+/* Must determine whether we are on a sun4c MMU, SRMMU, or SUN4/400 MUTANT
+ * MMU so we can remap ourselves properly. DON'T TOUCH %l0 thru %l5 in these
+ * remapping routines, we need their values afterwards!
+ */
+ /* Now check whether we are already mapped, if we
+ * are we can skip all this garbage coming up.
+ */
+copy_prom_done:
+ cmp %l6, 0
+ be go_to_highmem ! this will be a nop then
+ nop
+
+ set LOAD_ADDR, %g6
+ cmp %g7, %g6
+ bne remap_not_a_sun4 ! This is not a Sun4
+ nop
+
+ or %g0, 0x1, %g1
+ lduba [%g1] ASI_CONTROL, %g1 ! Only safe to try on Sun4.
+ subcc %g1, 0x24, %g0 ! Is this a mutant Sun4/400???
+ be sun4_mutant_remap ! Ugh, it is...
+ nop
+
+ b sun4_normal_remap ! regular sun4, 2 level mmu
+ nop
+
+remap_not_a_sun4:
+ lda [%g0] ASI_M_MMUREGS, %g1 ! same as ASI_PTE on sun4c
+ and %g1, 0x1, %g1 ! Test SRMMU Enable bit ;-)
+ cmp %g1, 0x0
+ be sun4c_remap ! A sun4c MMU or normal Sun4
+ nop
+srmmu_remap:
+ /* First, check for a viking (TI) module. */
+ set 0x40000000, %g2
+ rd %psr, %g3
+ and %g2, %g3, %g3
+ subcc %g3, 0x0, %g0
+ bz srmmu_nviking
+ nop
+
+ /* Figure out what kind of viking we are on.
+ * We need to know if we have to play with the
+ * AC bit and disable traps or not.
+ */
+
+ /* I've only seen MicroSparc's on SparcClassics with this
+ * bit set.
+ */
+ set 0x800, %g2
+ lda [%g0] ASI_M_MMUREGS, %g3 ! peek in the control reg
+ and %g2, %g3, %g3
+ subcc %g3, 0x0, %g0
+ bnz srmmu_nviking ! is in mbus mode
+ nop
+
+ rd %psr, %g3 ! DONT TOUCH %g3
+ andn %g3, PSR_ET, %g2
+ wr %g2, 0x0, %psr
+ WRITE_PAUSE
+
+ /* Get context table pointer, then convert to
+ * a physical address, which is 36 bits.
+ */
+ set AC_M_CTPR, %g4
+ lda [%g4] ASI_M_MMUREGS, %g4
+ sll %g4, 0x4, %g4 ! We use this below
+ ! DONT TOUCH %g4
+
+ /* Set the AC bit in the Viking's MMU control reg. */
+ lda [%g0] ASI_M_MMUREGS, %g5 ! DONT TOUCH %g5
+ set 0x8000, %g6 ! AC bit mask
+ or %g5, %g6, %g6 ! Or it in...
+ sta %g6, [%g0] ASI_M_MMUREGS ! Close your eyes...
+
+ /* Grrr, why does it seem like every other load/store
+ * on the sun4m is in some ASI space...
+ * Fine with me, let's get the pointer to the level 1
+ * page table directory and fetch its entry.
+ */
+ lda [%g4] ASI_M_BYPASS, %o1 ! This is a level 1 ptr
+ srl %o1, 0x4, %o1 ! Clear low 4 bits
+ sll %o1, 0x8, %o1 ! Make physical
+
+ /* Ok, pull in the PTD. */
+ lda [%o1] ASI_M_BYPASS, %o2 ! This is the 0x0 16MB pgd
+
+ /* Calculate to KERNBASE entry.
+ *
+ * XXX Should not use empirical constant, but Gas gets an XXX
+ * XXX upset stomach with the bitshift I would have to use XXX
+ */
+ add %o1, 0x3c0, %o3
+
+ /* Poke the entry into the calculated address. */
+ sta %o2, [%o3] ASI_M_BYPASS
+
+ /* I don't get it Sun, if you engineered all these
+ * boot loaders and the PROM (thank you for the debugging
+ * features btw) why did you not have them load kernel
+ * images up in high address space, since this is necessary
+ * for ABI compliance anyways? Does this low-mapping provide
+ * enhanced interoperability?
+ *
+ * "The PROM is the computer."
+ */
+
+ /* Ok, restore the MMU control register we saved in %g5 */
+ sta %g5, [%g0] ASI_M_MMUREGS ! POW... ouch
+
+ /* Turn traps back on. We saved it in %g3 earlier. */
+ wr %g3, 0x0, %psr ! tick tock, tick tock
+
+ /* Now we burn precious CPU cycles due to bad engineering. */
+ WRITE_PAUSE
+
+ /* Wow, all that just to move a 32-bit value from one
+ * place to another... Jump to high memory.
+ */
+ b go_to_highmem
+ nop
+
+ /* This works on viking's in Mbus mode and all
+ * other MBUS modules. It is virtually the same as
+ * the above madness sans turning traps off and flipping
+ * the AC bit.
+ */
+srmmu_nviking:
+ set AC_M_CTPR, %g1
+ lda [%g1] ASI_M_MMUREGS, %g1 ! get ctx table ptr
+ sll %g1, 0x4, %g1 ! make physical addr
+ lda [%g1] ASI_M_BYPASS, %g1 ! ptr to level 1 pg_table
+ srl %g1, 0x4, %g1
+ sll %g1, 0x8, %g1 ! make phys addr for l1 tbl
+
+ lda [%g1] ASI_M_BYPASS, %g2 ! get level1 entry for 0x0
+ add %g1, 0x3c0, %g3 ! XXX AWAY WITH EMPIRICALS
+ sta %g2, [%g3] ASI_M_BYPASS ! place at KERNBASE entry
+ b go_to_highmem
+ nop ! wheee....
+
+ /* This remaps the kernel on Sun4/4xx machines
+ * that have the Sun Mutant Three Level MMU.
+ * It's like a platypus, Sun didn't have the
+ * SRMMU in conception so they kludged the three
+ * level logic in the regular Sun4 MMU probably.
+ *
+ * Basically, you take each entry in the top level
+ * directory that maps the low 3MB starting at
+ * address zero and put the mapping in the KERNBASE
+ * slots. These top level pgd's are called regmaps.
+ */
+sun4_mutant_remap:
+ or %g0, %g0, %g3 ! source base
+ sethi %hi(KERNBASE), %g4 ! destination base
+ or %g4, %lo(KERNBASE), %g4
+ sethi %hi(0x300000), %g5
+ or %g5, %lo(0x300000), %g5 ! upper bound 3MB
+ or %g0, 0x1, %l6
+ sll %l6, 24, %l6 ! Regmap mapping size
+ add %g3, 0x2, %g3 ! Base magic
+ add %g4, 0x2, %g4 ! Base magic
+
+ /* Main remapping loop on Sun4-Mutant-MMU.
+ * "I am not an animal..." -Famous Mutant Person
+ */
+sun4_mutant_loop:
+ lduha [%g3] ASI_REGMAP, %g2 ! Get lower entry
+ stha %g2, [%g4] ASI_REGMAP ! Store in high entry
+ add %g4, %l6, %g4 ! Move up high memory ptr
+ subcc %g3, %g5, %g0 ! Reached our limit?
+ blu sun4_mutant_loop ! Nope, loop again
+ add %g3, %l6, %g3 ! delay, Move up low ptr
+ b go_to_highmem ! Jump to high memory.
+ nop
+
+ /* The following is for non-4/4xx sun4 MMU's. */
+sun4_normal_remap:
+ mov 0, %g3 ! source base
+ set KERNBASE, %g4 ! destination base
+ set 0x300000, %g5 ! upper bound 3MB
+ mov 1, %l6
+ sll %l6, 18, %l6 ! sun4 mmu segmap size
+sun4_normal_loop:
+ lduha [%g3] ASI_SEGMAP, %g6 ! load phys_seg
+ stha %g6, [%g4] ASI_SEGMAP ! stort new virt mapping
+ add %g3, %l6, %g3 ! increment source pointer
+ subcc %g3, %g5, %g0 ! reached limit?
+ blu sun4_normal_loop ! nope, loop again
+ add %g4, %l6, %g4 ! delay, increment dest ptr
+ b go_to_highmem
+ nop
+
+ /* The following works for Sun4c MMU's */
+sun4c_remap:
+ mov 0, %g3 ! source base
+ set KERNBASE, %g4 ! destination base
+ set 0x300000, %g5 ! upper bound 3MB
+ mov 1, %l6
+ sll %l6, 18, %l6 ! sun4c mmu segmap size
+sun4c_remap_loop:
+ lda [%g3] ASI_SEGMAP, %g6 ! load phys_seg
+ sta %g6, [%g4] ASI_SEGMAP ! store new virt mapping
+ add %g3, %l6, %g3 ! Increment source ptr
+ subcc %g3, %g5, %g0 ! Reached limit?
+ bl sun4c_remap_loop ! Nope, loop again
+ add %g4, %l6, %g4 ! delay, Increment dest ptr
+
+/* Now do a non-relative jump so that PC is in high-memory */
+go_to_highmem:
+ set execute_in_high_mem, %g1
+ jmpl %g1, %g0
+ nop
/* Acquire boot time privileged register values, this will help debugging.
- * I figure out and store nwindows later on.
+ * I figure out and store nwindows and nwindowsm1 later on.
*/
+execute_in_high_mem:
+#if CONFIG_AP1000
+ /* we don't have a prom :-( */
+ b sun4m_init
+ nop
+#endif
+ mov %l0, %o0 ! put back romvec
+ mov %l1, %o1 ! and debug_vec
-nosmp: sethi %hi( C_LABEL(boot_psr) ), %l1
- st %l2, [%l1 + %lo( C_LABEL(boot_psr) )]
- sethi %hi( C_LABEL(boot_wim) ), %l1
- st %l3, [%l1 + %lo( C_LABEL(boot_wim) )]
- sethi %hi( C_LABEL(boot_tbr) ), %l1
- st %l4, [%l1 + %lo( C_LABEL(boot_tbr) )]
- sethi %hi( C_LABEL(boot_smp_ptr) ), %l1
- st %l5, [%l1 + %lo( C_LABEL(boot_smp_ptr) )]
+ sethi %hi( C_LABEL(prom_vector_p) ), %g1
+ st %o0, [%g1 + %lo( C_LABEL(prom_vector_p) )]
- or %g0, %o0, %g7
- sethi %hi( C_LABEL(prom_vector_p) ), %g5
- st %o0, [%g5 + %lo( C_LABEL(prom_vector_p) )] ! we will need it later
+ sethi %hi( C_LABEL(linux_dbvec) ), %g1
+ st %o1, [%g1 + %lo( C_LABEL(linux_dbvec) )]
- ld [%g7 + 0x4], %o3
- subcc %o3, 0x2, %g0 ! a v2 prom?
- be found_v2
- nop
+ ld [%o0 + 0x4], %o3
+ and %o3, 0x3, %o5 ! get the version
- /* paul@sfe.com.au */
- subcc %o3, 0x3, %g0 ! a v3 prom?
- or %g0, 0x3, %o5
- sethi %hi(C_LABEL(prom_iface_vers) ), %g1
- st %o5, [%g1 + %lo( C_LABEL(prom_iface_vers) )]
- be not_v2
- nop
+ cmp %o3, 0x2 ! a v2 prom?
+ be found_version
+ nop
+ /* paul@sfe.com.au */
+ cmp %o3, 0x3 ! a v3 prom?
+ be found_version
+ nop
/* Old sun4's pass our load address into %o0 instead of the prom
- pointer. On sun4's you have to hard code the romvec pointer into
- your code. Sun probably still does that because they don't even
- trust their own "OpenBoot" specifications.
-*/
+ * pointer. On sun4's you have to hard code the romvec pointer into
+ * your code. Sun probably still does that because they don't even
+ * trust their own "OpenBoot" specifications.
+ */
- sethi %hi(LOAD_ADDR), %g6
- subcc %o0, %g6, %g0 ! an old sun4?
+ set LOAD_ADDR, %g6
+ cmp %o0, %g6 ! an old sun4?
be no_sun4_here
- nop
-
- sethi %hi( C_LABEL(prom_iface_vers) ), %g1
- st %g0, [%g1 + %lo( C_LABEL(prom_iface_vers) )]
- b not_v2
- nop
-
-found_v2:
- or %g0, 0x2, %o5
- sethi %hi( C_LABEL(prom_iface_vers) ), %g1
- st %o5, [%g1 + %lo( C_LABEL(prom_iface_vers) )]
-
-not_v2:
-
-/* Get the machine type via the mysterious romvec node operations.
- * Here we can find out whether we are on a sun4 sun4c, sun4m, or
- * a sun4m. The "nodes" are set up as a bunch of n-ary trees which
- * you can traverse to get information about devices and such. The
- * information acquisition happens via the node-ops which are defined
- * in the linux_openprom.h header file. Of particular interest is the
- * 'nextnode(int node)' function as it does the smart thing when
- * presented with a value of '0', it gives you the first node in the
- * tree. These node integers probably offset into some internal prom
- * pointer table the openboot has. It's completely undocumented, so
- * I'm not about to go sifting through the prom address space, but may
- * do so if I get suspicious enough. :-)
- */
+ nop
+
+found_version:
+
+/* Get the machine type via the mysterious romvec node operations. */
or %g0, %g7, %l1
add %l1, 0x1c, %l1
ld [%l1], %l0
ld [%l0], %l0
call %l0
- or %g0, %g0, %o0 ! next_node(0) = first_node
+ or %g0, %g0, %o0 ! next_node(0) = first_node
+ or %o0, %g0, %g6
- sethi %hi( C_LABEL(cputypvar) ), %o1 ! first node has cpu-arch
+ sethi %hi( C_LABEL(cputypvar) ), %o1 ! First node has cpu-arch
or %o1, %lo( C_LABEL(cputypvar) ), %o1
sethi %hi( C_LABEL(cputypval) ), %o2 ! information, the string
or %o2, %lo( C_LABEL(cputypval) ), %o2
ld [%l1], %l0 ! 'compatibility' tells
ld [%l0 + 0xc], %l0 ! that we want 'sun4x' where
call %l0 ! x is one of '', 'c', 'm',
- nop ! 'd' or 'e'. %o2 holds pointer
+ nop ! 'd' or 'e'. %o2 holds pointer
! to a buf where above string
! will get stored by the prom.
- sethi %hi( C_LABEL(cputypval) ), %o2 ! better safe than sorry
- or %o2, %lo( C_LABEL(cputypval) ), %o2
- ldub [%o2 + 0x4], %o0
- subcc %o0, 'c', %g0 ! we already know we are not
- be is_sun4c ! on a plain sun4 because of
- nop ! the check for 0x4000 in %o0
- subcc %o0, 'm', %g0 ! at start:
- be is_sun4m
- nop
- b no_sun4d_here ! god bless the person who
- nop ! tried to run this on sun4d
-
-is_sun4m:
-is_sun4c: ! OK, this is a sun4c, yippie
- or %g0, %g7, %g6 ! load up the promvec offsets
- sethi %hi(prom_magic), %g5 ! magic mushroom :>
- st %g6, [%g5 + %lo(prom_magic)]
- add %g7, 0x4, %g6
- sethi %hi(prom_rom_vers), %g5
- st %g6, [%g5 + %lo(prom_rom_vers)]
- add %g7, 0x8, %g6
- sethi %hi(prom_pluginvers), %g5
- st %g6, [%g5 + %lo(prom_pluginvers)]
- add %g7, 0xc, %g6
- sethi %hi(prom_revision), %g5
- st %g6, [%g5 + %lo(prom_revision)]
- add %g7, 0x10, %g6
- sethi %hi(prom_v0mem_desc), %g5
- st %g6, [%g5 + %lo(prom_v0mem_desc)]
- add %g7, 0x1c, %g6
- sethi %hi(prom_nodefuncs), %g5
- st %g6, [%g5 + %lo(prom_nodefuncs)]
- add %g7, 0x68, %g6
- sethi %hi(prom_printf), %g5
- st %g6, [%g5 + %lo(prom_printf)]
- add %g7, 0x6c, %g6
- sethi %hi(prom_abort), %g5
- st %g6, [%g5 + %lo(prom_abort)]
- add %g7, 0x74, %g6
- sethi %hi(prom_halt), %g5
- st %g6, [%g5 + %lo(prom_halt)]
- add %g7, 0x78, %g6
- sethi %hi(prom_sync), %g5
- st %g6, [%g5 + %lo(prom_sync)]
- add %g7, 0x7c, %g6
- sethi %hi(prom_eval), %g5
- st %g6, [%g5 + %lo(prom_eval)]
- add %g7, 0x80, %g6
- sethi %hi(prom_v0bootline), %g6
- st %g6, [%g5 + %lo(prom_v0bootline)]
-
-
-/* That was easy, now lets try to print some message on the screen.
- * We don't have to worry about bad address translations when the prom
- * addresses our pointers because our pointers are at 0x0-kern_size
- * as the prom expects.
- */
-
-/* paul@sfe.com.au */
-/* V3 doesn't have printf.. And I don't really feel like doing the formatting
- * myself.. So we miss out on some messages (for now).
- */
- ld [%g7 + 0x4], %o0
- subcc %o3, 0x3, %g0
- be v3_bootmsg
- nop
-
- sethi %hi(boot_msg), %o0
- or %o0, %lo(boot_msg), %o0
- sethi %hi(prom_printf), %o1
- ld [%o1 + %lo(prom_printf)], %o1
- ld [%o1], %o1
- call %o1 ! print boot message #1
- nop
-
- sethi %hi(pstring1), %o0
- or %o0, %lo(pstring1), %o0
- sethi %hi(prom_printf), %o2
- ld [%o2 + %lo(prom_printf)], %o2
- ld [%o2], %o2
- sethi %hi(prom_magic), %o1
- ld [%o1 + %lo(prom_magic)], %o1
- ld [%o1], %o1
- call %o2
- nop
-
- sethi %hi(pstring2), %o0
- or %o0, %lo(pstring2), %o0
- sethi %hi(prom_printf), %o2
- ld [%o2 + %lo(prom_printf)], %o2
- ld [%o2], %o2
- sethi %hi( C_LABEL(prom_iface_vers) ), %o1
- ld [%o1 + %lo( C_LABEL(prom_iface_vers) )], %o1
- ld [%o1], %o1
- call %o2
- nop
-
- b rest_of_boot
- nop
-
-v3_bootmsg:
- ld [%g7 + 0x94], %o0
- ld [%o0], %o0
- sethi %hi(boot_msg), %o1
- or %o1, %lo(boot_msg), %o1
- mov BOOT_MSG_LEN, %o2
- ld [%g7 + 0xb8], %o4
- call %o4
- nop
-
- ld [%g7 + 0x94], %o0
- ld [%o0], %o0
- sethi %hi(boot_msg2), %o1
- or %o1, %lo(boot_msg2), %o1
- mov BOOT_MSG2_LEN, %o2
- ld [%g7 + 0xb8], %o4
- call %o4
- nop
- b rest_of_boot
- nop
-
+ subcc %o0, %g0, %g0
+ bpos got_prop ! Got the property
+ nop
-no_sun4_here:
- ld [%g7 + 0x68], %o1
- set sun4_notsup, %o0
- call %o1
- nop
+ or %g6, %g0, %o0
+ sethi %hi( C_LABEL(cputypvar_sun4m) ), %o1
+ or %o1, %lo( C_LABEL(cputypvar_sun4m) ), %o1
+ sethi %hi( C_LABEL(cputypval) ), %o2
+ or %o2, %lo( C_LABEL(cputypval) ), %o2
+ ld [%l1], %l0
+ ld [%l0 + 0xc], %l0
+ call %l0
+ nop
-rest_of_boot:
- or %g0, PAGE_SHIFT, %g5
+got_prop:
+ set C_LABEL(cputypval), %o2
+ ldub [%o2 + 0x4], %l1
- sethi %hi(AC_CONTEXT), %g1 ! kernel context, safe now
- ! the only valid context
- ! until we call paging_init()
- stba %g0, [%g1] ASI_CONTROL
+ cmp %l1, 'c' ! We already know we are not
+ be 1f ! on a plain sun4 because of
+ nop ! the check for 0x4000 in %o0
+ cmp %l1, 'm' ! at start
+ be 1f
+ nop
-/* I make the kernel image sit in memory relative to 0x0 with the text
- * starting at 0x4000. Now it looks like the way memory is set in Linux
- * on an ix86.
- */
+ cmp %l1, 'e'
+ be no_sun4e_here ! Could be a sun4e.
+ nop
-/* Uh, oh, interrupt time. This crap is real confusing. What I want to do is
- * clear all interrupts, map the interrupt enable register which in effect
- * enables non-maskable interrupts (or NMI's). Actually we take no interrupts
- * until we frob with the %tbr (trap base register) which the prom has set
- * to all its routines which allows some sanity during bootup.
- */
+ b no_sun4u_here ! AIEEE, a V9 sun4u...
+ nop
- sethi %hi(IE_reg_addr), %l0
- or %l0, %lo(IE_reg_addr), %l0
- set 0xf4000000, %l3
- sethi %hi(INT_ENABLE_REG_PHYSADR), %l2
- or %l2, %lo(INT_ENABLE_REG_PHYSADR), %l2
- srl %l2, %g5, %l2
- or %l2, %l3, %l1
+1:
+ set C_LABEL(cputypval), %l1
+ ldub [%l1 + 0x4], %l1
+ cmp %l1, 'm' ! Test for sun4d, sun4e ?
+ be sun4m_init
+ cmp %l1, 'd' ! Let us see how the beast will die
+ be sun4m_init
+ nop
-#ifndef CONFIG_SRMMU
- sta %l1, [%l0] ASI_PTE
-#endif
-
- or %g0, 0x1, %l1
- stb %l1, [%l0]
-
+ /* Jump into mmu context zero. */
+ set AC_CONTEXT, %g1
+ stba %g0, [%g1] ASI_CONTROL
-/* Aieee, now set PC and nPC, enable traps, give ourselves a stack and it's
- * show-time!
+ b sun4c_continue_boot
+ nop
+
+sun4m_init:
+ /* All sun4m processors can do hw mul/div/rem, patch 'em. */
+#define PATCH_IT(dst, src) \
+ set (dst), %g5; \
+ set (src), %g4; \
+ ld [%g4], %g3; \
+ st %g3, [%g5]; \
+ ld [%g4+0x4], %g3; \
+ st %g3, [%g5+0x4];
+
+ /* Signed multiply. */
+ PATCH_IT(.mul, .mul_patch)
+ PATCH_IT(.mul+0x08, .mul_patch+0x08)
+
+ /* Signed remainder. */
+ PATCH_IT(.rem, .rem_patch)
+ PATCH_IT(.rem+0x08, .rem_patch+0x08)
+ PATCH_IT(.rem+0x10, .rem_patch+0x10)
+ PATCH_IT(.rem+0x18, .rem_patch+0x18)
+ PATCH_IT(.rem+0x20, .rem_patch+0x20)
+ PATCH_IT(.rem+0x28, .rem_patch+0x28)
+
+ /* Signed division. */
+ PATCH_IT(.div, .div_patch)
+ PATCH_IT(.div+0x08, .div_patch+0x08)
+ PATCH_IT(.div+0x10, .div_patch+0x10)
+ PATCH_IT(.div+0x18, .div_patch+0x18)
+ PATCH_IT(.div+0x20, .div_patch+0x20)
+
+ /* Unsigned multiply. */
+ PATCH_IT(.umul, .umul_patch)
+ PATCH_IT(.umul+0x08, .umul_patch+0x08)
+
+ /* Unsigned remainder. */
+ PATCH_IT(.urem, .urem_patch)
+ PATCH_IT(.urem+0x08, .urem_patch+0x08)
+ PATCH_IT(.urem+0x10, .urem_patch+0x10)
+ PATCH_IT(.urem+0x18, .urem_patch+0x18)
+
+ /* Unsigned division. */
+ PATCH_IT(.udiv, .udiv_patch)
+ PATCH_IT(.udiv+0x08, .udiv_patch+0x08)
+ PATCH_IT(.udiv+0x10, .udiv_patch+0x10)
+
+#undef PATCH_IT
+
+/* Ok, the PROM could have done funny things and apple cider could still
+ * be sitting in the fault status/address registers. Read them all to
+ * clear them so we don't get magic faults later on.
*/
+/* This sucks, apparently this makes Vikings call prom panic, will fix later */
- sethi %hi(1f), %g1
- or %g1, %lo(1f), %g1
- jmp %g1
- nop
+ rd %psr, %o1
+ srl %o1, 28, %o1 ! Get a type of the CPU
- .align 4
-1: sethi %hi( C_LABEL(cputyp) ), %o0
- st %g4, [%o0 + %lo( C_LABEL(cputyp) )]
+ subcc %o1, 4, %g0 ! TI: Viking or MicroSPARC
+ be sun4c_continue_boot
+ nop
- sethi %hi( C_LABEL(pgshift) ), %o0
- st %g5, [%o0 + %lo( C_LABEL(pgshift) )]
+ set AC_M_SFSR, %o0
+ lda [%o0] ASI_M_MMUREGS, %g0
+ set AC_M_SFAR, %o0
+ lda [%o0] ASI_M_MMUREGS, %g0
- mov 1, %o0
- sll %o0, %g5, %g5
- sethi %hi( C_LABEL(nbpg) ), %o0
- st %g5, [%o0 + %lo( C_LABEL(nbpg) )]
+ /* Fujitsu MicroSPARC-II has no asynchronous flavors of FARs */
+ subcc %o1, 0, %g0
+ be sun4c_continue_boot
+ nop
- sub %g5, 1, %g5
- sethi %hi( C_LABEL(pgofset) ), %o0
- st %g5, [%o0 + %lo( C_LABEL(pgofset) )]
+ set AC_M_AFSR, %o0
+ lda [%o0] ASI_M_MMUREGS, %g0
+ set AC_M_AFAR, %o0
+ lda [%o0] ASI_M_MMUREGS, %g0
+ nop
- rd %psr, %g3
- andn %g3, PSR_ET, %g3
- wr %g3, 0x0, %psr ! make sure traps are off
- ! before we play around
- WRITE_PAUSE ! no guarantees until 3 insns
-
-
- wr %g0, 0x0, %wim ! magical invalid window reg
- WRITE_PAUSE ! see above
+sun4c_continue_boot:
-/* I keep the timer interrupt on so that BogoMIPS works and the prom
- * keeps updating its "jiffies" counter. 100HZ clock on sparcstations.
- */
-/* If gas wasn't so dumb, I could use or'd macros in this next
- * write. ;-( like this (PSR_PS | PSR_S | PSR_PIL)...
+/* Aieee, now set PC and nPC, enable traps, give ourselves a stack and it's
+ * show-time!
*/
- sethi %hi(PSR_PS | PSR_S | PSR_PIL | PSR_EF), %g2
- or %g2, %lo(PSR_PS | PSR_S | PSR_PIL | PSR_EF), %g2
+ sethi %hi( C_LABEL(cputyp) ), %o0
+ st %g4, [%o0 + %lo( C_LABEL(cputyp) )]
+
+ /* Turn on Supervisor, EnableFloating, and all the PIL bits.
+ * Also puts us in register window zero with traps off.
+ */
+ set (PSR_PS | PSR_S | PSR_PIL | PSR_EF), %g2
wr %g2, 0x0, %psr
WRITE_PAUSE
- wr %g0, 0x2, %wim ! window 1 invalid
- WRITE_PAUSE
+ /* I want a kernel stack NOW! */
+ set C_LABEL(bootup_user_stack), %g1
+ add %g1, (PAGE_SIZE - REGWIN_SZ), %sp
+ mov 0, %fp /* And for good luck */
- or %g0, 0x1, %g1
- sethi %hi( C_LABEL(current) + THREAD_WIM), %g2
- st %g1, [%g2 + %lo( C_LABEL(current) + THREAD_WIM)]
+ /* Zero out our BSS section. */
+ set C_LABEL(edata) , %o0 ! First address of BSS
+ set C_LABEL(end) , %o1 ! Last address of BSS
+ add %o0, 0x1, %o0
+1:
+ stb %g0, [%o0]
+ subcc %o0, %o1, %g0
+ bl 1b
+ add %o0, 0x1, %o0
-/* I want a kernel stack NOW! */
+ /* Initialize the umask value for init_task just in case.
+ * But first make current_set[0] point to something useful.
+ */
+ set C_LABEL(init_task), %g6
+ set C_LABEL(current_set), %g2
+ st %g6, [%g2]
- set ( C_LABEL(init_user_stack) + 4092 - 96 - 80), %fp
- set ( C_LABEL(init_user_stack) + 4092), %sp
+ set C_LABEL(bootup_kernel_stack), %g3
+ st %g3, [%g6 + TASK_KSTACK_PG]
+ st %g0, [%g6 + THREAD_UMASK]
-/* now out stack is set up similarly to the way it is on the i386 */
+/* Compute NWINDOWS and stash it away. Now uses %wim trick explained
+ * in the V8 manual. Ok, this method seems to work, Sparc is cool...
+ * No, it doesn't work, have to play the save/readCWP/restore trick.
+ */
- rd %psr, %l0
- wr %l0, PSR_ET, %psr
+ wr %g0, 0x0, %wim ! so we dont get a trap
WRITE_PAUSE
-/*
- * Maybe the prom zeroes out our BSS section, maybe it doesn't. I certainly
- * don't know, do you?
- */
+ save
- set C_LABEL(edata) , %o0
- set C_LABEL(end) , %o1
- sub %o1, %o0, %g2
- sethi %hi( C_LABEL(kernel_bss_len) ), %g3
- st %g2, [%g3 + %lo( C_LABEL(kernel_bss_len) )]
- sethi %hi( C_LABEL(trapbase) ), %g3
- or %g3, %lo( C_LABEL(trapbase) ), %g3
- sethi %hi( C_LABEL(etext) ), %g4
- or %g4, %lo( C_LABEL(etext) ), %g4
- sub %g4, %g3, %g2
- sethi %hi( C_LABEL(kernel_text_len) ), %g3
- st %g2, [%g3 + %lo( C_LABEL(kernel_text_len) )]
- sethi %hi( C_LABEL(etext) ), %g4
- or %g4, %lo( C_LABEL(etext) ), %g4
- sethi %hi( C_LABEL(edata) ), %g3
- or %g3, %lo( C_LABEL(edata) ), %g3
- sub %g3, %g4, %g2
- sethi %hi( C_LABEL(kernel_data_len) ), %g3
- st %g2, [%g3 + %lo( C_LABEL(kernel_data_len) )]
- or %g0, %g0, %g1
+ rd %psr, %g3
-1:
- st %g0, [%o0]
- add %o0, 0x4, %o0
- subcc %o0, %o1, %g0
- bl 1b
- nop
+ restore
-/* Compute NWINDOWS and stash it away. Now uses %wim trick explained
- * in the V8 manual. Ok, this method seems to work, sparc is cool...
- */
+ and %g3, 0x1f, %g3
+ add %g3, 0x1, %g3
- sethi %hi(0xffffffff), %g1
- rd %wim, %g2 ! save current value
- or %g1, %lo(0xffffffff), %g1
- wr %g1, 0x0, %wim
- rd %wim, %g1 ! get remaining mask
- wr %g2, 0x0, %wim ! restore old value
+ mov 2, %g1
+ wr %g1, 0x0, %wim ! make window 1 invalid
WRITE_PAUSE
- or %g0, 0x0, %g3
+ cmp %g3, 0x7
+ bne 2f
+ nop
-1: srl %g1, 0x1, %g1 ! shift until highest
- subcc %g1, 0x0, %g0 ! bit set
- bne 1b
- add %g3, 0x1, %g3
+ /* Adjust our window handling routines to
+ * do things correctly on 7 window Sparcs.
+ */
+
+#define PATCH_INSN(src, dest) \
+ set src, %g5; \
+ set dest, %g2; \
+ ld [%g5], %g4; \
+ st %g4, [%g2];
+
+ /* Patch for window spills... */
+ PATCH_INSN(spnwin_patch1_7win, spnwin_patch1)
+ PATCH_INSN(spnwin_patch2_7win, spnwin_patch2)
+ PATCH_INSN(spnwin_patch3_7win, spnwin_patch3)
+
+ /* Patch for window fills... */
+ PATCH_INSN(fnwin_patch1_7win, fnwin_patch1)
+ PATCH_INSN(fnwin_patch2_7win, fnwin_patch2)
+
+ /* Patch for trap entry setup... */
+ PATCH_INSN(tsetup_7win_patch1, tsetup_patch1)
+ PATCH_INSN(tsetup_7win_patch2, tsetup_patch2)
+ PATCH_INSN(tsetup_7win_patch3, tsetup_patch3)
+ PATCH_INSN(tsetup_7win_patch4, tsetup_patch4)
+ PATCH_INSN(tsetup_7win_patch5, tsetup_patch5)
+ PATCH_INSN(tsetup_7win_patch6, tsetup_patch6)
+
+ /* Patch for returning from traps... */
+ PATCH_INSN(rtrap_7win_patch1, rtrap_patch1)
+ PATCH_INSN(rtrap_7win_patch2, rtrap_patch2)
+ PATCH_INSN(rtrap_7win_patch3, rtrap_patch3)
+ PATCH_INSN(rtrap_7win_patch4, rtrap_patch4)
+ PATCH_INSN(rtrap_7win_patch5, rtrap_patch5)
+
+#ifdef __SMP__
+
+ /* Patch for returning from an ipi... */
+ PATCH_INSN(rirq_7win_patch1, rirq_patch1)
+ PATCH_INSN(rirq_7win_patch2, rirq_patch2)
+ PATCH_INSN(rirq_7win_patch3, rirq_patch3)
+ PATCH_INSN(rirq_7win_patch4, rirq_patch4)
+ PATCH_INSN(rirq_7win_patch5, rirq_patch5)
+
+#endif
+ /* Now patch the kernel window flush sequences.
+ * This saves 2 traps on every switch and fork.
+ */
+ set 0x01000000, %g4
+ set flush_patch_one, %g5
+ st %g4, [%g5 + 0x18]
+ st %g4, [%g5 + 0x1c]
+ set flush_patch_two, %g5
+ st %g4, [%g5 + 0x18]
+ st %g4, [%g5 + 0x1c]
+ set flush_patch_three, %g5
+ st %g4, [%g5 + 0x18]
+ st %g4, [%g5 + 0x1c]
+ set flush_patch_exception, %g5
+ st %g4, [%g5 + 0x18]
+ st %g4, [%g5 + 0x1c]
+ set flush_patch_switch, %g5
+ st %g4, [%g5 + 0x18]
+ st %g4, [%g5 + 0x1c]
+
+2:
sethi %hi( C_LABEL(nwindows) ), %g4
st %g3, [%g4 + %lo( C_LABEL(nwindows) )] ! store final value
sub %g3, 0x1, %g3
sethi %hi( C_LABEL(nwindowsm1) ), %g4
st %g3, [%g4 + %lo( C_LABEL(nwindowsm1) )]
-
-/* Here we go */
-
-#ifndef CONFIG_SUN4M
- /* paul@sfe.com.au */
- /* Look into traps later :( */
+ /* Here we go, start using Linux's trap table... */
set C_LABEL(trapbase), %g3
wr %g3, 0x0, %tbr
WRITE_PAUSE
-#endif
+ /* Finally, turn on traps so that we can call c-code. */
+ rd %psr, %g3
+ wr %g3, 0x0, %psr
+ WRITE_PAUSE
-/* First we call init_prom() to set up romvec, then off to start_kernel() */
-/* XXX put this in arch_init() */
+ wr %g3, PSR_ET, %psr
+ WRITE_PAUSE
+
+ /* First we call prom_init() to set up PROMLIB, then
+ * off to start_kernel().
+ */
sethi %hi( C_LABEL(prom_vector_p) ), %g5
- call C_LABEL(init_prom)
- ld [%g5 + %lo( C_LABEL(prom_vector_p) )], %o0 /* delay slot */
+ ld [%g5 + %lo( C_LABEL(prom_vector_p) )], %o0
+ call C_LABEL(prom_init)
+ nop
call C_LABEL(start_kernel)
- nop
+ nop
+ /* We should not get here. */
call halt_me
- nop
+ nop
+
+/* There, happy now Adrian? */
-/* There, happy now adrian? */
+ /* XXX Fix this... XXX */
+no_sun4_here:
+ sethi %hi(SUN4_PROM_VECTOR+SUN4_PRINTF), %o1
+ ld [%o1 + %lo(SUN4_PROM_VECTOR+SUN4_PRINTF)], %o1
+ set sun4_notsup, %o0
+ call %o1
+ nop
+1:
+ ba 1b ! Cannot exit into KMON
+ nop
-no_sun4d_here:
+no_sun4e_here:
ld [%g7 + 0x68], %o1
- set sun4d_notsup, %o0
+ set sun4e_notsup, %o0
call %o1
- nop
+ nop
b halt_me
- nop
+ nop
+
+no_sun4u_here:
+ ld [%g7 + 0x68], %o1
+ set sun4u_notsup, %o0
+ call %o1
+ nop
+ b halt_me
+ nop
halt_me:
ld [%g7 + 0x74], %o0
- call %o0 ! get us out of here...
- nop ! apparently solaris is better
+ call %o0 ! Get us out of here...
+ nop ! Apparently Solaris is better.
.data
.align 4
@@ -944,102 +1138,37 @@ halt_me:
* gets initialized in c-code so all routines can use it.
*/
- .globl C_LABEL(prom_vector_p)
-
-C_LABEL(prom_vector_p): .skip 4
-prom_magic: .skip 4 ! magic mushroom, beware...
-prom_rom_vers: .skip 4 ! interface version (v0 or v2)
-prom_pluginvers: .skip 4 ! XXX help help help ???
-prom_revision: .skip 4 ! PROM revision (ie. 1.4)
-prom_halt: .skip 4 ! void halt(void) solaris friend
-prom_eval: .skip 4 ! void eval(int len, char* string)
-prom_v0bootline: .skip 4 ! boot command line
-prom_v0mem_desc: .skip 4 ! V0 memory descriptor list ptr.
-prom_nodefuncs: .skip 4 ! Magical Node functions
-prom_printf: .skip 4 ! minimal printf()
-
-/* The prom_abort pointer MUST be mapped in all contexts, because if you
- * don't then if a user process is running when you press the abort key
- * sequence, all sorts of bad things can happen
- */
-
-prom_abort: .skip 4 ! L1-A magic cookie
- ! must be mapped in ALL contexts
-
-/* prom_sync is a place where the kernel should place a pointer to a kernel
- * function that when called will sync all pending information to the drives
- * and then promptly return. If the kernel gets aborted with 'L1-A' one can
- * give the 'sync' command to the boot prompt and this magic cookie gets
- * executed. Nice feature eh?
- */
-
-prom_sync: .skip 4 ! hook in prom for sync func
-
- .align 4
+ .globl C_LABEL(prom_vector_p)
+C_LABEL(prom_vector_p):
+ .word 0
/* We calculate the following at boot time, window fills/spills and trap entry
* code uses these to keep track of the register windows.
*/
- .globl C_LABEL(nwindows)
- .globl C_LABEL(nwindowsm1)
-C_LABEL(nwindows): .skip 4
-C_LABEL(nwindowsm1): .skip 4
-
.align 4
-/* Boot time privileged register values, plus magic %o2 value */
-
- .globl C_LABEL(boot_wim)
- .globl C_LABEL(boot_psr)
- .globl C_LABEL(boot_tbr)
- .globl C_LABEL(boot_smp_ptr)
-C_LABEL(boot_wim): .skip 4
-C_LABEL(boot_psr): .skip 4
-C_LABEL(boot_tbr): .skip 4
-C_LABEL(boot_smp_ptr): .skip 4
+ .globl C_LABEL(nwindows)
+ .globl C_LABEL(nwindowsm1)
+C_LABEL(nwindows):
+ .word 8
+C_LABEL(nwindowsm1):
+ .word 7
+/* Boot time debugger vector value. We need this later on. */
.align 4
-/* Miscellaneous pieces of information saved at kernel startup. */
- .globl C_LABEL(kernel_text_len)
- .globl C_LABEL(kernel_data_len)
- .globl C_LABEL(kernel_bss_len)
-C_LABEL(kernel_text_len): .word 0
-C_LABEL(kernel_data_len): .word 0
-C_LABEL(kernel_bss_len): .word 0
-
-/* These are for page alignment/offset information as they change from
- machine to machine.
-*/
-
- .globl C_LABEL(pgshift)
- .globl C_LABEL(nbpg)
- .globl C_LABEL(pgofset)
-
- .align 4
-C_LABEL(pgshift):
- .word 1
-C_LABEL(nbpg):
- .word 1
-C_LABEL(pgofset):
- .word 1
+ .globl C_LABEL(linux_dbvec)
+C_LABEL(linux_dbvec):
+ .word 0
+ .word 0
-/* Just to get the kernel through the compiler for now */
- .globl C_LABEL(swapper_pg_dir), C_LABEL(pg0)
- .globl C_LABEL(empty_bad_page)
- .globl C_LABEL(empty_bad_page_table)
- .globl C_LABEL(empty_zero_page)
- .globl C_LABEL(floppy_track_buffer)
-C_LABEL(floppy_track_buffer):
- .fill 512*2*36,1,0
+ .align 8
- .align 4
-C_LABEL(swapper_pg_dir): .skip 0x1000
-C_LABEL(pg0): .skip 0x1000
-C_LABEL(empty_bad_page): .skip 0x1000
-C_LABEL(empty_bad_page_table): .skip 0x1000
-C_LABEL(empty_zero_page): .skip 0x1000
+ .globl C_LABEL(lvl14_save)
+C_LABEL(lvl14_save):
+ .word 0
+ .word 0
+ .word 0
+ .word 0
+ .word t_irq14
- .align 4
-diagstr: .asciz "DIAG\n"
- .align 4
diff --git a/arch/sparc/kernel/idprom.c b/arch/sparc/kernel/idprom.c
index c12f7467b..e48c3de05 100644
--- a/arch/sparc/kernel/idprom.c
+++ b/arch/sparc/kernel/idprom.c
@@ -1,183 +1,100 @@
-/* idprom.c: Routines to load the idprom into kernel addresses and
+/* $Id: idprom.c,v 1.21 1996/10/12 13:12:48 davem Exp $
+ * idprom.c: Routines to load the idprom into kernel addresses and
* interpret the data contained within.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
#include <linux/kernel.h>
+#include <linux/types.h>
-#include <asm/types.h>
-#include <asm/openprom.h>
+#include <asm/oplib.h>
#include <asm/idprom.h>
+#include <asm/machines.h> /* Fun with Sun released architectures. */
-struct idp_struct idprom;
-extern int num_segmaps, num_contexts;
+struct idprom *idprom;
+static struct idprom idprom_buffer;
-void get_idprom(void)
-{
- char* idp_addr;
- char* knl_idp_addr;
- int i;
-
- idp_addr = (char *)IDPROM_ADDR;
- knl_idp_addr = (char *) &idprom;
-
- for(i = 0; i<IDPROM_SIZE; i++)
- *knl_idp_addr++ = *idp_addr++;
-
- return;
-}
-
-/* find_vac_size() returns the number of bytes in the VAC (virtual
- * address cache) on this machine.
+/* Here is the master table of Sun machines which use some implementation
+ * of the Sparc CPU and have a meaningful IDPROM machtype value that we
+ * know about. See asm-sparc/machines.h for empirical constants.
*/
-
-int
-find_vac_size(void)
+struct Sun_Machine_Models Sun_Machines[NUM_SUN_MACHINES] = {
+/* First, Sun4's */
+{ "Sun 4/100 Series", (SM_SUN4 | SM_4_110) },
+{ "Sun 4/200 Series", (SM_SUN4 | SM_4_260) },
+{ "Sun 4/300 Series", (SM_SUN4 | SM_4_330) },
+{ "Sun 4/400 Series", (SM_SUN4 | SM_4_470) },
+/* Now, Sun4c's */
+{ "Sun4c SparcStation 1", (SM_SUN4C | SM_4C_SS1) },
+{ "Sun4c SparcStation IPC", (SM_SUN4C | SM_4C_IPC) },
+{ "Sun4c SparcStation 1+", (SM_SUN4C | SM_4C_SS1PLUS) },
+{ "Sun4c SparcStation SLC", (SM_SUN4C | SM_4C_SLC) },
+{ "Sun4c SparcStation 2", (SM_SUN4C | SM_4C_SS2) },
+{ "Sun4c SparcStation ELC", (SM_SUN4C | SM_4C_ELC) },
+{ "Sun4c SparcStation IPX", (SM_SUN4C | SM_4C_IPX) },
+/* Finally, early Sun4m's */
+{ "Sun4m SparcSystem600", (SM_SUN4M | SM_4M_SS60) },
+{ "Sun4m SparcStation10", (SM_SUN4M | SM_4M_SS50) },
+{ "Sun4m SparcStation5", (SM_SUN4M | SM_4M_SS40) },
+/* One entry for the OBP arch's which are sun4d, sun4e, and newer sun4m's */
+{ "Sun4M OBP based system", (SM_SUN4M_OBP | 0x0) } };
+
+static void display_system_type(unsigned char machtype)
{
- int vac_prop_len;
- int vacsize = 0;
- int node_root;
-
- node_root = (*(romvec->pv_nodeops->no_nextnode))(0);
-
- vac_prop_len = (*(romvec->pv_nodeops->no_proplen))(node_root, "vac-size");
-
- if(vac_prop_len != -1)
- {
- (*(romvec->pv_nodeops->no_getprop))(node_root, "vac-size", (char *) &vacsize);
- return vacsize;
- }
- else
- {
-
- /* The prom node functions can't help, do it via idprom struct */
- switch(idprom.id_machtype)
- {
- case 0x51:
- case 0x52:
- case 0x53:
- case 0x54:
- case 0x55:
- case 0x56:
- case 0x57:
- return 65536;
- default:
- return -1;
+ char sysname[128];
+ register int i;
+
+ for (i = 0; i < NUM_SUN_MACHINES; i++) {
+ if(Sun_Machines[i].id_machtype == machtype) {
+ if (machtype != (SM_SUN4M_OBP | 0x00))
+ printk("TYPE: %s\n", Sun_Machines[i].name);
+ else {
+ prom_getproperty(prom_root_node, "banner-name",
+ sysname, sizeof(sysname));
+ printk("TYPE: %s\n", sysname);
+ }
+ return;
+ }
}
- };
-}
-/* find_vac_linesize() returns the size in bytes of the VAC linesize */
-
-int
-find_vac_linesize(void)
-{
- int vac_prop_len;
- int vaclinesize = 0;
- int node_root;
-
- node_root = (*(romvec->pv_nodeops->no_nextnode))(0);
-
- vac_prop_len = (*(romvec->pv_nodeops->no_proplen))(node_root, "vac-linesize");
-
- if(vac_prop_len != -1)
- {
- (*(romvec->pv_nodeops->no_getprop))(node_root, "vac-linesize",
- (char *) &vaclinesize);
- return vaclinesize;
- }
- else
- {
-
- /* The prom node functions can't help, do it via idprom struct */
- switch(idprom.id_machtype)
- {
- case 0x51:
- case 0x52:
- case 0x53:
- case 0x54:
- return 16;
- case 0x55:
- case 0x56:
- case 0x57:
- return 32;
- default:
- return -1;
- }
- };
+ prom_printf("IDPROM: Bogus id_machtype value, 0x%x\n", machtype);
+ prom_halt();
}
-int
-find_vac_hwflushes(void)
+/* Calculate the IDPROM checksum (xor of the data bytes). */
+static unsigned char calc_idprom_cksum(struct idprom *idprom)
{
- register int len, node_root;
- int tmp1, tmp2;
-
- node_root = (*(romvec->pv_nodeops->no_nextnode))(0);
-
- len = (*(romvec->pv_nodeops->no_proplen))(node_root, "vac_hwflush");
-
-#ifdef DEBUG_IDPROM
- printf("DEBUG: find_vac_hwflushes: proplen vac_hwflush=0x%x\n", len);
-#endif
-
- /* Sun 4/75 has typo in prom_node, it's a dash instead of an underscore
- * in the property name. :-(
- */
- len |= (*(romvec->pv_nodeops->no_proplen))(node_root, "vac-hwflush");
+ unsigned char cksum, i, *ptr = (unsigned char *)idprom;
-#ifdef DEBUG_IDPROM
- printf("DEBUG: find_vac_hwflushes: proplen vac-hwflush=0x%x\n", len);
-#endif
+ for (i = cksum = 0; i <= 0x0E; i++)
+ cksum ^= *ptr++;
- len = (*(romvec->pv_nodeops->no_getprop))(node_root,"vac_hwflush",
- (char *) &tmp1);
- if(len != 4) tmp1=0;
-
- len = (*(romvec->pv_nodeops->no_getprop))(node_root, "vac-hwflush",
- (char *) &tmp2);
- if(len != 4) tmp2=0;
-
-
- return (tmp1|tmp2);
-}
-
-void
-find_mmu_num_segmaps(void)
-{
- register int root_node, len;
-
- root_node = (*(romvec->pv_nodeops->no_nextnode))(0);
-
- len = (*(romvec->pv_nodeops->no_getprop))(root_node, "mmu-npmg",
- (char *) &num_segmaps);
-
-#ifdef DEBUG_MMU
- printf("find_mmu_num_segmaps: property length = %d\n", len);
-#endif
-
- if(len != 4) num_segmaps = 128;
-
- return;
+ return cksum;
}
-void
-find_mmu_num_contexts(void)
+/* Create a local IDPROM copy, verify integrity, and display information. */
+void idprom_init(void)
{
- register int root_node, len;
+ prom_get_idprom((char *) &idprom_buffer, sizeof(idprom_buffer));
- root_node = (*(romvec->pv_nodeops->no_nextnode))(0);
+ idprom = &idprom_buffer;
- len = (*(romvec->pv_nodeops->no_getprop))(root_node, "mmu-nctx",
- (char *) &num_contexts);
+ if (idprom->id_format != 0x01) {
+ prom_printf("IDPROM: Unknown format type!\n");
+ prom_halt();
+ }
-#ifdef DEBUG_MMU
- printf("find_mmu_num_contexts: property length = %d\n", len);
-#endif
+ if (idprom->id_cksum != calc_idprom_cksum(idprom)) {
+ prom_printf("IDPROM: Checksum failure (nvram=%x, calc=%x)!\n",
+ idprom->id_cksum, calc_idprom_cksum(idprom));
+ prom_halt();
+ }
- if(len != 4) num_contexts = 8;
+ display_system_type(idprom->id_machtype);
- return;
+ printk("Ethernet address: %x:%x:%x:%x:%x:%x\n",
+ idprom->id_ethaddr[0], idprom->id_ethaddr[1],
+ idprom->id_ethaddr[2], idprom->id_ethaddr[3],
+ idprom->id_ethaddr[4], idprom->id_ethaddr[5]);
}
-
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index effa6c25e..b9cc0cc38 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -1,8 +1,14 @@
-/* ioport.c: I/O access on the Sparc. Work in progress.. Most of the things
- * in this file are for the sole purpose of getting the kernel
- * through the compiler. :-)
+/* $Id: ioport.c,v 1.22 1996/10/11 00:59:46 davem Exp $
+ * ioport.c: Simple io mapping allocator.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
+ *
+ * The routines in this file should be changed for a memory allocator
+ * that would be setup just like NetBSD does : you create regions that
+ * are administered by a general purpose allocator, and then you call
+ * that allocator with your handle and the block size instead of this
+ * weak stuff.
*/
#include <linux/sched.h>
@@ -10,3 +16,124 @@
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/ioport.h>
+#include <linux/mm.h>
+
+#include <asm/io.h>
+#include <asm/vaddrs.h>
+#include <asm/oplib.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+/* This points to the next to use virtual memory for io mappings */
+static unsigned long dvma_next_free = DVMA_VADDR;
+unsigned long sparc_iobase_vaddr = IOBASE_VADDR;
+
+/*
+ * sparc_alloc_io:
+ * Map and allocates an obio device.
+ * Implements a simple linear allocator, you can force the function
+ * to use your own mapping, but in practice this should not be used.
+ *
+ * Input:
+ * address: the obio address to map
+ * virtual: if non zero, specifies a fixed virtual address where
+ * the mapping should take place.
+ * len: the length of the mapping
+ * bus_type: The bus on which this io area sits.
+ *
+ * Returns:
+ * The virtual address where the mapping actually took place.
+ */
+
+void *sparc_alloc_io (void *address, void *virtual, int len, char *name,
+ int bus_type, int rdonly)
+{
+ unsigned long vaddr, base_address;
+ unsigned long addr = (unsigned long) address;
+ unsigned long offset = (addr & (~PAGE_MASK));
+
+ if (virtual) {
+ vaddr = (unsigned long) virtual;
+
+ len += offset;
+ if(((unsigned long) virtual + len) > (IOBASE_VADDR + IOBASE_LEN)) {
+ prom_printf("alloc_io: Mapping outside IOBASE area\n");
+ prom_halt();
+ }
+ if(check_region ((vaddr | offset), len)) {
+ prom_printf("alloc_io: 0x%lx is already in use\n", vaddr);
+ prom_halt();
+ }
+
+ /* Tell Linux resource manager about the mapping */
+ request_region ((vaddr | offset), len, name);
+ } else {
+ vaddr = occupy_region(sparc_iobase_vaddr, IOBASE_END,
+ (offset + len + PAGE_SIZE-1) & PAGE_MASK, PAGE_SIZE, name);
+ if (vaddr == 0) {
+ /* Usually we cannot see printks in this case. */
+ prom_printf("alloc_io: cannot occupy %d region\n", len);
+ prom_halt();
+ }
+ }
+
+ base_address = vaddr;
+ /* Do the actual mapping */
+ for (; len > 0; len -= PAGE_SIZE) {
+ mapioaddr(addr, vaddr, bus_type, rdonly);
+ vaddr += PAGE_SIZE;
+ addr += PAGE_SIZE;
+ }
+
+ return (void *) (base_address | offset);
+}
+
+void sparc_free_io (void *virtual, int len)
+{
+ unsigned long vaddr = (unsigned long) virtual & PAGE_MASK;
+ unsigned long plen = (((unsigned long)virtual & ~PAGE_MASK) + len + PAGE_SIZE-1) & PAGE_MASK;
+
+ release_region(vaddr, plen);
+
+ for (; plen != 0;) {
+ plen -= PAGE_SIZE;
+ unmapioaddr(vaddr + plen);
+ }
+}
+
+/* Does DVMA allocations with PAGE_SIZE granularity. How this basically
+ * works is that the ESP chip can do DVMA transfers at ANY address with
+ * certain size and boundary restrictions. But other devices that are
+ * attached to it and would like to do DVMA have to set things up in
+ * a special way, if the DVMA sees a device attached to it transfer data
+ * at addresses above DVMA_VADDR it will grab them, this way it does not
+ * now have to know the peculiarities of where to read the Lance data
+ * from. (for example)
+ */
+void *sparc_dvma_malloc (int len, char *name)
+{
+ unsigned long vaddr, base_address;
+
+ vaddr = dvma_next_free;
+ if(check_region (vaddr, len)) {
+ prom_printf("alloc_dma: 0x%lx is already in use\n", vaddr);
+ prom_halt();
+ }
+ if(vaddr + len > (DVMA_VADDR + DVMA_LEN)) {
+ prom_printf("alloc_dvma: out of dvma memory\n");
+ prom_halt();
+ }
+
+ /* Basically these can be mapped just like any old
+ * IO pages, cacheable bit off, etc. The physical
+ * pages are now mapped dynamically to save space.
+ */
+ base_address = vaddr;
+ mmu_map_dma_area(base_address, len);
+ /* Assign the memory area. */
+ dvma_next_free = PAGE_ALIGN(dvma_next_free+len);
+
+ request_region(base_address, len, name);
+
+ return (void *) base_address;
+}
diff --git a/arch/sparc/kernel/irq.c b/arch/sparc/kernel/irq.c
index 5dcaf1971..cb2a72638 100644
--- a/arch/sparc/kernel/irq.c
+++ b/arch/sparc/kernel/irq.c
@@ -1,22 +1,13 @@
-/* arch/sparc/kernel/irq.c: Interrupt request handling routines. On the
+/* $Id: irq.c,v 1.53 1996/10/16 12:30:18 zaitcev Exp $
+ * arch/sparc/kernel/irq.c: Interrupt request handling routines. On the
* Sparc the IRQ's are basically 'cast in stone'
* and you are supposed to probe the prom's device
* node trees to find out who's got which IRQ.
*
- * Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu)
- *
- */
-
-/*
- * IRQ's are in fact implemented a bit like signal handlers for the kernel.
- * The same sigaction struct is used, and with similar semantics (ie there
- * is a SA_INTERRUPT flag etc). Naturally it's not a 1:1 relation, but there
- * are similarities.
- *
- * sa_handler(int irq_NR) is the default function called (0 if no).
- * sa_mask is horribly ugly (I won't even mention it)
- * sa_flags contains various info: SA_INTERRUPT etc
- * sa_restorer is the unused
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
+ * Copyright (C) 1995 Pete A. Zaitcev (zaitcev@ipmce.su)
+ * Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk)
*/
#include <linux/config.h>
@@ -27,309 +18,398 @@
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
+#include <linux/malloc.h>
+#include <linux/random.h>
+
#include <asm/ptrace.h>
+#include <asm/processor.h>
#include <asm/system.h>
#include <asm/psr.h>
+#include <asm/smp.h>
#include <asm/vaddrs.h>
-#include <asm/clock.h>
+#include <asm/timer.h>
#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/traps.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
-#define DEBUG_IRQ
+/*
+ * Dave Redman (djhr@tadpole.co.uk)
+ *
+ * IRQ numbers.. These are no longer restricted to 15..
+ *
+ * this is done to enable SBUS cards and onboard IO to be masked
+ * correctly. using the interrupt level isn't good enough.
+ *
+ * For example:
+ * A device interrupting at sbus level6 and the Floppy both come in
+ * at IRQ11, but enabling and disabling them requires writing to
+ * different bits in the SLAVIO/SEC.
+ *
+ * As a result of these changes sun4m machines could now support
+ * directed CPU interrupts using the existing enable/disable irq code
+ * with tweaks.
+ *
+ */
-void disable_irq(unsigned int irq_nr)
+static void irq_panic(void)
{
- unsigned long flags;
- unsigned char *int_reg;
-
- save_flags(flags);
- cli();
-
- /* We have mapped the irq enable register in head.S and all we
- * have to do here is frob the bits.
- */
-
- int_reg = (unsigned char *) IRQ_ENA_ADR;
-
- switch(irq_nr)
- {
- case 1:
- *int_reg = ((*int_reg) & (~(0x02)));
- break;
- case 4:
- *int_reg = ((*int_reg) & (~(0x04)));
- break;
- case 6:
- *int_reg = ((*int_reg) & (~(0x08)));
- break;
- case 8:
- *int_reg = ((*int_reg) & (~(0x10)));
- break;
- case 10:
- *int_reg = ((*int_reg) & (~(0x20)));
- break;
- case 14:
- *int_reg = ((*int_reg) & (~(0x80)));
- break;
- default:
- printk("AIEEE, Illegal interrupt disable requested irq=%d\n",
- (int) irq_nr);
- break;
- };
-
- restore_flags(flags);
- return;
+ extern char *cputypval;
+ prom_printf("machine: %s doesn't have irq handlers defined!\n",cputypval);
+ prom_halt();
}
-void enable_irq(unsigned int irq_nr)
-{
- unsigned long flags;
- unsigned char *int_reg;
-
- save_flags(flags);
- cli();
-
- /* We have mapped the irq enable register in head.S and all we
- * have to do here is frob the bits.
- */
-
- int_reg = (unsigned char *) IRQ_ENA_ADR;
-
-#ifdef DEBUG_IRQ
- printk(" --- Enabling IRQ level %d ---\n", irq_nr);
+void (*enable_irq)(unsigned int) = (void (*)(unsigned int)) irq_panic;
+void (*disable_irq)(unsigned int) = (void (*)(unsigned int)) irq_panic;
+void (*clear_clock_irq)( void ) = irq_panic;
+void (*clear_profile_irq)( void ) = irq_panic;
+void (*load_profile_irq)( unsigned int ) = (void (*)(unsigned int)) irq_panic;
+void (*init_timers)( void (*)(int, void *,struct pt_regs *)) =
+ (void (*)( void (*)(int, void *,struct pt_regs *))) irq_panic;
+
+#ifdef __SMP__
+void (*set_cpu_int)(int, int);
+void (*clear_cpu_int)(int, int);
+void (*set_irq_udt)(int);
#endif
- switch(irq_nr)
- {
- case 1:
- *int_reg = ((*int_reg) | 0x02);
- break;
- case 4:
- *int_reg = ((*int_reg) | 0x04);
- break;
- case 6:
- *int_reg = ((*int_reg) | 0x08);
- break;
- case 8:
- *int_reg = ((*int_reg) | 0x10);
- break;
- case 10:
- *int_reg = ((*int_reg) | 0x20);
- break;
- case 14:
- *int_reg = ((*int_reg) | 0x80);
- break;
- default:
- printk("AIEEE, Illegal interrupt enable requested irq=%d\n",
- (int) irq_nr);
- break;
- };
-
- restore_flags(flags);
-
- return;
-}
-
/*
- * Initial irq handlers.
+ * Dave Redman (djhr@tadpole.co.uk)
+ *
+ * There used to be extern calls and hard coded values here.. very sucky!
+ * instead, because some of the devices attach very early, I do something
+ * equally sucky but at least we'll never try to free statically allocated
+ * space or call kmalloc before kmalloc_init :(.
+ *
+ * In fact it's the timer10 that attaches first.. then timer14
+ * then kmalloc_init is called.. then the tty interrupts attach.
+ * hmmm....
+ *
*/
-struct irqaction {
- void (*handler)(int, struct pt_regs *);
- unsigned long flags;
- unsigned long mask;
- const char *name;
-};
+#define MAX_STATIC_ALLOC 4
+static struct irqaction static_irqaction[MAX_STATIC_ALLOC];
+static int static_irq_count = 0;
-static struct irqaction irq_action[16] = {
- { NULL, 0, 0, NULL }, { NULL, 0, 0, NULL },
- { NULL, 0, 0, NULL }, { NULL, 0, 0, NULL },
- { NULL, 0, 0, NULL }, { NULL, 0, 0, NULL },
- { NULL, 0, 0, NULL }, { NULL, 0, 0, NULL },
- { NULL, 0, 0, NULL }, { NULL, 0, 0, NULL },
- { NULL, 0, 0, NULL }, { NULL, 0, 0, NULL },
- { NULL, 0, 0, NULL }, { NULL, 0, 0, NULL },
- { NULL, 0, 0, NULL }, { NULL, 0, 0, NULL }
+static struct irqaction *irq_action[NR_IRQS+1] = {
+ NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL
};
-
int get_irq_list(char *buf)
{
- int i, len = 0;
- struct irqaction * action = irq_action;
-
- for (i = 0 ; i < 16 ; i++, action++) {
- if (!action->handler)
- continue;
- len += sprintf(buf+len, "%2d: %8d %c %s\n",
- i, kstat.interrupts[i],
- (action->flags & SA_INTERRUPT) ? '+' : ' ',
- action->name);
- }
- return len;
+ int i, len = 0;
+ struct irqaction * action;
+
+ for (i = 0 ; i < (NR_IRQS+1) ; i++) {
+ action = *(i + irq_action);
+ if (!action)
+ continue;
+ len += sprintf(buf+len, "%2d: %8d %c %s",
+ i, kstat.interrupts[i],
+ (action->flags & SA_INTERRUPT) ? '+' : ' ',
+ action->name);
+ for (action=action->next; action; action = action->next) {
+ len += sprintf(buf+len, ",%s %s",
+ (action->flags & SA_INTERRUPT) ? " +" : "",
+ action->name);
+ }
+ len += sprintf(buf+len, "\n");
+ }
+ return len;
}
-void free_irq(unsigned int irq)
+void free_irq(unsigned int irq, void *dev_id)
{
- struct irqaction * action = irq + irq_action;
+ struct irqaction * action;
+ struct irqaction * tmp = NULL;
unsigned long flags;
-
- if (irq > 14) { /* 14 irq levels on the sparc */
- printk("Trying to free IRQ %d\n", irq);
+ unsigned int cpu_irq;
+
+ cpu_irq = irq & NR_IRQS;
+ action = *(cpu_irq + irq_action);
+ if (cpu_irq > 14) { /* 14 irq levels on the sparc */
+ printk("Trying to free bogus IRQ %d\n", irq);
return;
}
- if (!action->handler) {
- printk("Trying to free free IRQ%d\n", irq);
- return;
- }
- save_flags(flags);
- cli();
- disable_irq(irq);
- action->handler = NULL;
- action->flags = 0;
- action->mask = 0;
- action->name = NULL;
- restore_flags(flags);
-}
+ if (!action->handler) {
+ printk("Trying to free free IRQ%d\n",irq);
+ return;
+ }
+ if (dev_id) {
+ for (; action; action = action->next) {
+ if (action->dev_id == dev_id) break;
+ tmp = action;
+ }
+ if (!action) {
+ printk("Trying to free free shared IRQ%d\n",irq);
+ return;
+ }
+ } else if (action->flags & SA_SHIRQ) {
+ printk("Trying to free shared IRQ%d with NULL device ID\n", irq);
+ return;
+ }
+ if (action->flags & SA_STATIC_ALLOC)
+ {
+ /* This interrupt is marked as specially allocated
+ * so it is a bad idea to free it.
+ */
+ printk("Attempt to free statically allocated IRQ%d (%s)\n",
+ irq, action->name);
+ return;
+ }
+
+ save_and_cli(flags);
+ if (action && tmp)
+ tmp->next = action->next;
+ else
+ *(cpu_irq + irq_action) = action->next;
+
+ kfree_s(action, sizeof(struct irqaction));
+
+ if (!(*(cpu_irq + irq_action)))
+ disable_irq(irq);
-#if 0
-static void handle_nmi(struct pt_regs * regs)
-{
- printk("NMI, probably due to bus-parity error.\n");
- printk("PC=%08lx, SP=%08lx\n", regs->pc, regs->sp);
+ restore_flags(flags);
}
-#endif
-void unexpected_irq(int irq, struct pt_regs * regs)
+void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs)
{
int i;
+ struct irqaction * action;
+ unsigned int cpu_irq;
+
+ cpu_irq = irq & NR_IRQS;
+ action = *(cpu_irq + irq_action);
printk("IO device interrupt, irq = %d\n", irq);
- printk("PC = %08lx NPC = %08lx SP=%08lx\n", regs->pc,
- regs->npc, regs->sp);
- printk("Expecting: ");
- for (i = 0; i < 16; i++)
- if (irq_action[i].handler)
- printk("[%s:%d] ", irq_action[i].name, i);
+ printk("PC = %08lx NPC = %08lx FP=%08lx\n", regs->pc,
+ regs->npc, regs->u_regs[14]);
+ if (action) {
+ printk("Expecting: ");
+ for (i = 0; i < 16; i++)
+ if (action->handler)
+ prom_printf("[%s:%d:0x%x] ", action->name,
+ (int) i, (unsigned int) action->handler);
+ }
printk("AIEEE\n");
+ panic("bogus interrupt received");
}
-static inline void handler_irq(int irq, struct pt_regs * regs)
+void handler_irq(int irq, struct pt_regs * regs)
{
- struct irqaction * action = irq + irq_action;
-
- if (!action->handler) {
- unexpected_irq(irq, regs);
- return;
- }
- action->handler(irq, regs);
+ struct irqaction * action;
+ unsigned int cpu_irq;
+
+ cpu_irq = irq & NR_IRQS;
+ action = *(cpu_irq + irq_action);
+ kstat.interrupts[cpu_irq]++;
+#if 0
+ printk("I<%d,%d,%d>", smp_processor_id(), irq, smp_proc_in_lock[smp_processor_id()]);
+#endif
+ do {
+ if (!action || !action->handler)
+ unexpected_irq(irq, 0, regs);
+ action->handler(irq, action->dev_id, regs);
+ action = action->next;
+ } while (action);
}
-/*
- * do_IRQ handles IRQ's that have been installed without the
- * SA_INTERRUPT flag: it uses the full signal-handling return
- * and runs with other interrupts enabled. All relatively slow
- * IRQ's should use this format: notably the keyboard/timer
- * routines.
+/* Fast IRQ's on the Sparc can only have one routine attached to them,
+ * thus no sharing possible.
*/
-asmlinkage void do_IRQ(int irq, struct pt_regs * regs)
+int request_fast_irq(unsigned int irq,
+ void (*handler)(int, void *, struct pt_regs *),
+ unsigned long irqflags, const char *devname)
{
- struct irqaction *action = irq + irq_action;
-
- kstat.interrupts[irq]++;
- action->handler(irq, regs);
- return;
+ struct irqaction *action;
+ unsigned long flags;
+ unsigned int cpu_irq;
+#ifdef __SMP__
+ struct tt_entry *trap_table;
+ extern struct tt_entry trapbase_cpu1, trapbase_cpu2, trapbase_cpu3;
+#endif
+
+ cpu_irq = irq & NR_IRQS;
+ if(cpu_irq > 14)
+ return -EINVAL;
+ if(!handler)
+ return -EINVAL;
+ action = *(cpu_irq + irq_action);
+ if(action) {
+ if(action->flags & SA_SHIRQ)
+ panic("Trying to register fast irq when already shared.\n");
+ if(irqflags & SA_SHIRQ)
+ panic("Trying to register fast irq as shared.\n");
+
+ /* Anyway, someone already owns it so cannot be made fast. */
+ return -EBUSY;
+ }
+
+ save_and_cli(flags);
+
+ /* If this is flagged as statically allocated then we use our
+ * private struct which is never freed.
+ */
+ if (irqflags & SA_STATIC_ALLOC)
+ if (static_irq_count < MAX_STATIC_ALLOC)
+ action = &static_irqaction[static_irq_count++];
+ else
+ printk("Fast IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",
+ irq, devname);
+
+ if (action == NULL)
+ action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
+ GFP_KERNEL);
+
+ if (!action) {
+ restore_flags(flags);
+ return -ENOMEM;
+ }
+
+ /* Dork with trap table if we get this far. */
+#define INSTANTIATE(table) \
+ table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_one = SPARC_RD_PSR_L0; \
+ table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two = \
+ SPARC_BRANCH((unsigned long) handler, \
+ (unsigned long) &table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two);\
+ table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_three = SPARC_RD_WIM_L3; \
+ table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP;
+
+ INSTANTIATE(sparc_ttable)
+#ifdef __SMP__
+ trap_table = &trapbase_cpu1; INSTANTIATE(trap_table)
+ trap_table = &trapbase_cpu2; INSTANTIATE(trap_table)
+ trap_table = &trapbase_cpu3; INSTANTIATE(trap_table)
+#endif
+#undef INSTANTIATE
+ /*
+ * XXX Correct thing whould be to flush only I- and D-cache lines
+ * which contain the handler in question. But as of time of the
+ * writing we have no CPU-neutral interface to fine-grained flushes.
+ */
+ flush_cache_all();
+
+ action->handler = handler;
+ action->flags = irqflags;
+ action->mask = 0;
+ action->name = devname;
+ action->dev_id = NULL;
+
+ *(cpu_irq + irq_action) = action;
+
+ enable_irq(irq);
+ restore_flags(flags);
+ return 0;
}
-/*
- * Since we need to special things to clear up the clock chip around
- * the do_timer() call we have a special version of do_IRQ for the
- * level 14 interrupt which does these things.
- */
-
-asmlinkage void do_sparc_timer(int irq, struct pt_regs * regs)
+int request_irq(unsigned int irq,
+ void (*handler)(int, void *, struct pt_regs *),
+ unsigned long irqflags, const char * devname, void *dev_id)
{
- struct irqaction *action = irq + irq_action;
- register volatile int clear;
-
- kstat.interrupts[irq]++;
-
- /* I do the following already in the entry code, better safe than
- * sorry for now. Reading the limit register clears the interrupt.
- */
- clear = TIMER_STRUCT->timer_limit14;
-
- action->handler(irq, regs);
- return;
+ struct irqaction * action, *tmp = NULL;
+ unsigned long flags;
+ unsigned int cpu_irq;
+
+ cpu_irq = irq & NR_IRQS;
+ if(cpu_irq > 14)
+ return -EINVAL;
+
+ if (!handler)
+ return -EINVAL;
+ action = *(cpu_irq + irq_action);
+ if (action) {
+ if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) {
+ for (tmp = action; tmp->next; tmp = tmp->next);
+ } else {
+ return -EBUSY;
+ }
+ if ((action->flags & SA_INTERRUPT) ^ (irqflags & SA_INTERRUPT)) {
+ printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq);
+ return -EBUSY;
+ }
+ }
+
+ save_and_cli(flags);
+
+ /* If this is flagged as statically allocated then we use our
+ * private struct which is never freed.
+ */
+ if (irqflags & SA_STATIC_ALLOC)
+ if (static_irq_count < MAX_STATIC_ALLOC)
+ action = &static_irqaction[static_irq_count++];
+ else
+ printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",irq, devname);
+
+ if (action == NULL)
+ action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
+ GFP_KERNEL);
+
+ if (!action) {
+ restore_flags(flags);
+ return -ENOMEM;
+ }
+
+ action->handler = handler;
+ action->flags = irqflags;
+ action->mask = 0;
+ action->name = devname;
+ action->next = NULL;
+ action->dev_id = dev_id;
+
+ if (tmp)
+ tmp->next = action;
+ else
+ *(cpu_irq + irq_action) = action;
+
+ enable_irq(irq);
+ restore_flags(flags);
+ return 0;
}
-/*
- * do_fast_IRQ handles IRQ's that don't need the fancy interrupt return
- * stuff - the handler is also running with interrupts disabled unless
- * it explicitly enables them later.
+/* We really don't need these at all on the Sparc. We only have
+ * stubs here because they are exported to modules.
*/
-asmlinkage void do_fast_IRQ(int irq)
-{
- kstat.interrupts[irq]++;
- printk("Got FAST_IRQ number %04lx\n", (long unsigned int) irq);
- return;
-}
-
-extern int first_descent;
-extern void probe_clock(int);
-
-int request_irq(unsigned int irq, void (*handler)(int, struct pt_regs *),
- unsigned long irqflags, const char * devname)
+unsigned long probe_irq_on(void)
{
- struct irqaction *action;
- unsigned long flags;
-
- if(irq > 14) /* Only levels 1-14 are valid on the Sparc. */
- return -EINVAL;
-
- if(irq == 0) /* sched_init() requesting the timer IRQ */
- {
- irq = 14;
- probe_clock(first_descent);
- }
-
- action = irq + irq_action;
-
- if(action->handler)
- return -EBUSY;
-
- if(!handler)
- return -EINVAL;
-
- save_flags(flags);
-
- cli();
-
- action->handler = handler;
- action->flags = irqflags;
- action->mask = 0;
- action->name = devname;
-
- enable_irq(irq);
-
- restore_flags(flags);
-
return 0;
}
-unsigned int probe_irq_on (void)
+int probe_irq_off(unsigned long mask)
{
- unsigned int irqs = 0;
-
- return irqs;
+ return 0;
}
-int probe_irq_off (unsigned int irqs)
-{
- unsigned int i = 0;
-
- return i;
-}
+/* djhr
+ * This could probably be made indirect too and assigned in the CPU
+ * bits of the code. That would be much nicer I think and would also
+ * fit in with the idea of being able to tune your kernel for your machine
+ * by removing unrequired machine and device support.
+ *
+ */
void init_IRQ(void)
{
- return;
+ extern void sun4c_init_IRQ( void );
+ extern void sun4m_init_IRQ( void );
+#if CONFIG_AP1000
+ extern void ap_init_IRQ(void);
+ ap_init_IRQ();
+ return;
+#endif
+
+ switch(sparc_cpu_model) {
+ case sun4c:
+ sun4c_init_IRQ();
+ break;
+
+ case sun4m:
+ sun4m_init_IRQ();
+ break;
+
+ default:
+ prom_printf("Cannot initialize IRQ's on this Sun machine...");
+ break;
+ }
}
diff --git a/arch/sparc/kernel/ksyms.c b/arch/sparc/kernel/ksyms.c
new file mode 100644
index 000000000..ee30edf21
--- /dev/null
+++ b/arch/sparc/kernel/ksyms.c
@@ -0,0 +1,34 @@
+/* $Id: ksyms.c,v 1.1 1996/02/25 06:30:18 davem Exp $
+ * arch/sparc/kernel/ksyms.c: Sparc specific ksyms support.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+/* We really haven't played around with modules at all in our
+ * port, but this is here as a starting point for when we do.
+ * One thing to note is that the way the symbols of the mul/div
+ * support routines are named is a mess, they all start with
+ * a '.' which makes it a bitch to export, we'll see.
+ */
+
+extern void bcopy (const char *src, char *dst, int len);
+extern void * memmove(void *,const void *,size_t);
+extern void * memcpy(void *,const void *,size_t);
+
+static struct symbol_table arch_symbol_table = {
+#include <linux/symtab_begin.h>
+ /* platform dependent support */
+ X(bcopy),
+ X(memmove),
+ X(memcpy),
+#include <linux/symtab_end.h>
+};
+
+void arch_syms_export(void)
+{
+ register_symtab(&arch_symbol_table);
+}
diff --git a/arch/sparc/kernel/probe.c b/arch/sparc/kernel/probe.c
deleted file mode 100644
index 462556164..000000000
--- a/arch/sparc/kernel/probe.c
+++ /dev/null
@@ -1,432 +0,0 @@
-/* probe.c: Preliminary device tree probing routines...
-
- Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
-*/
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <asm/vac-ops.h>
-#include <asm/io.h>
-#include <asm/vaddrs.h>
-#include <asm/param.h>
-#include <asm/clock.h>
-#include <asm/system.h>
-
-/* #define DEBUG_PROBING */
-
-char promstr_buf[64]; /* overkill */
-unsigned int promint_buf[1];
-
-extern int prom_node_root;
-extern int num_segmaps, num_contexts;
-
-extern int node_get_sibling(int node);
-extern int node_get_child(int node);
-extern char* get_str_from_prom(int node, char* name, char* value);
-extern unsigned int* get_int_from_prom(int node, char* name, unsigned int *value);
-
-int first_descent;
-
-/* Cpu-type information and manufacturer strings */
-
-
-struct cpu_iu_info {
- int psr_impl;
- int psr_vers;
- char* cpu_name; /* should be enough I hope... */
-};
-
-struct cpu_fp_info {
- int psr_impl;
- int fp_vers;
- char* fp_name;
-};
-
-struct cpu_fp_info linux_sparc_fpu[] = {
- { 0, 0, "Fujitsu MB86910 or Weitek WTL1164/5"},
- { 0, 1, "Fujitsu MB86911 or Weitek WTL1164/5"},
- { 0, 2, "LSI Logic L64802 or Texas Instruments ACT8847"},
- { 0, 3, "Weitek WTL3170/2"},
- { 0, 4, "Lsi Logic/Meiko L64804"},
- { 0, 5, "reserved"},
- { 0, 6, "reserved"},
- { 0, 7, "No FPU"},
- { 1, 0, "Lsi Logic L64812 or Texas Instruments ACT8847"},
- { 1, 1, "Lsi Logic L64814"},
- { 1, 2, "Texas Instruments TMS390-C602A"},
- { 1, 3, "Weitek WTL3171"},
- { 1, 4, "reserved"},
- { 1, 5, "reserved"},
- { 1, 6, "reserved"},
- { 1, 7, "No FPU"},
- { 2, 0, "BIT B5010 or B5110/20 or B5210"},
- { 2, 1, "reserved"},
- { 2, 2, "reserved"},
- { 2, 3, "reserved"},
- { 2, 4, "reserved"},
- { 2, 5, "reserved"},
- { 2, 6, "reserved"},
- { 2, 7, "No FPU"},
- { 5, 0, "Matsushita MN10501"},
- { 5, 1, "reserved"},
- { 5, 2, "reserved"},
- { 5, 3, "reserved"},
- { 5, 4, "reserved"},
- { 5, 5, "reserved"},
- { 5, 6, "reserved"},
- { 5, 7, "No FPU"},
-};
-
-struct cpu_iu_info linux_sparc_chips[] = {
- { 0, 0, "Fujitsu Microelectronics, Inc. - MB86900/1A"},
- { 1, 0, "Cypress CY7C601"},
- { 1, 1, "LSI Logic Corporation - L64811"},
- { 1, 3, "Cypress CY7C611"},
- { 2, 0, "Bipolar Integrated Technology - B5010"},
- { 3, 0, "LSI Logic Corporation - unknown-type"},
- { 4, 0, "Texas Instruments, Inc. - unknown"},
- { 4, 1, "Texas Instruments, Inc. - Sparc Classic"},
- { 4, 2, "Texas Instruments, Inc. - unknown"},
- { 4, 3, "Texas Instruments, Inc. - unknown"},
- { 4, 4, "Texas Instruments, Inc. - unknown"},
- { 4, 5, "Texas Instruments, Inc. - unknown"},
- { 5, 0, "Matsushita - MN10501"},
- { 6, 0, "Philips Corporation - unknown"},
- { 7, 0, "Harvest VLSI Design Center, Inc. - unknown"},
- { 8, 0, "Systems and Processes Engineering Corporation (SPEC)"},
- { 9, 0, "UNKNOWN CPU-VENDOR/TYPE"},
- { 0xa, 0, "UNKNOWN CPU-VENDOR/TYPE"},
- { 0xb, 0, "UNKNOWN CPU-VENDOR/TYPE"},
- { 0xc, 0, "UNKNOWN CPU-VENDOR/TYPE"},
- { 0xd, 0, "UNKNOWN CPU-VENDOR/TYPE"},
- { 0xe, 0, "UNKNOWN CPU-VENDOR/TYPE"},
- { 0xf, 0, "UNKNOWN CPU-VENDOR/TYPE"},
-};
-
-char *sparc_cpu_type = "cpu-oops";
-char *sparc_fpu_type = "fpu-oops";
-
-/* various Virtual Address Cache parameters we find at boot time... */
-
-extern int vac_size, vac_linesize, vac_do_hw_vac_flushes;
-extern int vac_entries_per_context, vac_entries_per_segment;
-extern int vac_entries_per_page;
-
-extern int find_vac_size(void);
-extern int find_vac_linesize(void);
-extern int find_vac_hwflushes(void);
-extern void find_mmu_num_segmaps(void);
-extern void find_mmu_num_contexts(void);
-
-void
-probe_cpu(void)
-{
- register int psr_impl=0;
- register int psr_vers = 0;
- register int fpu_vers = 0;
- register int i = 0;
- unsigned int tmp_fsr;
-
- &tmp_fsr; /* GCC grrr... */
-
- __asm__("rd %%psr, %0\n\t"
- "mov %0, %1\n\t"
- "srl %0, 28, %0\n\t"
- "srl %1, 24, %1\n\t"
- "and %0, 0xf, %0\n\t"
- "and %1, 0xf, %1\n\t" :
- "=r" (psr_impl),
- "=r" (psr_vers) :
- "0" (psr_impl),
- "1" (psr_vers));
-
-
- __asm__("st %%fsr, %1\n\t"
- "ld %1, %0\n\t"
- "srl %0, 17, %0\n\t"
- "and %0, 0x7, %0\n\t" :
- "=r" (fpu_vers),
- "=m" (tmp_fsr) :
- "0" (fpu_vers),
- "1" (tmp_fsr));
-
- printk("fpu_vers: %d ", fpu_vers);
- printk("psr_impl: %d ", psr_impl);
- printk("psr_vers: %d \n\n", psr_vers);
-
- for(i = 0; i<23; i++)
- {
- if(linux_sparc_chips[i].psr_impl == psr_impl)
- if(linux_sparc_chips[i].psr_vers == psr_vers)
- {
- sparc_cpu_type = linux_sparc_chips[i].cpu_name;
- break;
- }
- }
-
- if(i==23)
- {
- printk("No CPU type! You lose\n");
- printk("DEBUG: psr.impl = 0x%x psr.vers = 0x%x\n", psr_impl,
- psr_vers);
- return;
- }
-
- for(i = 0; i<32; i++)
- {
- if(linux_sparc_fpu[i].psr_impl == psr_impl)
- if(linux_sparc_fpu[i].fp_vers == fpu_vers)
- {
- sparc_fpu_type = linux_sparc_fpu[i].fp_name;
- break;
- }
- }
-
- if(i == 32)
- {
- printk("No FPU type! You don't completely lose though...\n");
- printk("DEBUG: psr.impl = 0x%x fsr.vers = 0x%x\n", psr_impl, fpu_vers);
- sparc_fpu_type = linux_sparc_fpu[31].fp_name;
- }
-
- printk("CPU: %s \n", sparc_cpu_type);
- printk("FPU: %s \n", sparc_fpu_type);
-
- return;
-}
-
-void
-probe_vac(void)
-{
- register unsigned int x,y;
-
-#ifndef CONFIG_SRMMU
- vac_size = find_vac_size();
- vac_linesize = find_vac_linesize();
- vac_do_hw_vac_flushes = find_vac_hwflushes();
-
- /* Calculate various constants that make the cache-flushing code
- * mode speedy.
- */
-
- vac_entries_per_segment = vac_entries_per_context = vac_size >> 12;
-
- for(x=0,y=vac_linesize; ((1<<x)<y); x++);
- if((1<<x) != vac_linesize) printk("Warning BOGUS VAC linesize 0x%x",
- vac_size);
-
- vac_entries_per_page = x;
-
- printk("Sparc VAC cache: Size=%d bytes Line-Size=%d bytes ... ", vac_size,
- vac_linesize);
-
- /* Here we want to 'invalidate' all the software VAC "tags"
- * just in case there is garbage in there. Then we enable it.
- */
-
- for(x=0x80000000, y=(x+vac_size); x<y; x+=vac_linesize)
- __asm__("sta %0, [%1] %2" : : "r" (0), "r" (x), "n" (0x2));
-
- x=enable_vac();
- printk("ENABLED\n");
-#endif
-
- return;
-}
-
-void
-probe_mmu(void)
-{
- find_mmu_num_segmaps();
- find_mmu_num_contexts();
-
- printk("MMU segmaps: %d MMU contexts: %d\n", num_segmaps,
- num_contexts);
-
- return;
-}
-
-void
-probe_clock(int fchild)
-{
- register int node, type;
- register char *node_str;
-
- /* This will basically traverse the node-tree of the prom to see
- * which timer chip is on this machine.
- */
-
- printk("Probing timer chip... ");
-
- type = 0;
- for(node = fchild ; ; )
- {
- node_str = get_str_from_prom(node, "model", promstr_buf);
- if(strcmp(node_str, "mk48t02") == 0)
- {
- type = 2;
- break;
- }
-
- if(strcmp(node_str, "mk48t08") == 0)
- {
- type = 8;
- break;
- }
-
- node = node_get_sibling(node);
- if(node == fchild)
- {
- printk("Aieee, could not find timer chip type\n");
- return;
- }
- }
-
- printk("Mostek %s\n", node_str);
- printk("At OBIO address: 0x%x Virtual address: 0x%x\n",
- (unsigned int) TIMER_PHYSADDR, (unsigned int) TIMER_STRUCT);
-
- mapioaddr((unsigned long) TIMER_PHYSADDR,
- (unsigned long) TIMER_STRUCT);
-
- TIMER_STRUCT->timer_limit14=(((1000000/HZ) << 10) | 0x80000000);
-
- return;
-}
-
-
-void
-probe_esp(register int esp_node)
-{
- register int nd;
- register char* lbuf;
-
- nd = node_get_child(esp_node);
-
- printk("\nProbing ESP:\n");
- lbuf = get_str_from_prom(nd, "name", promstr_buf);
-
- if(*get_int_from_prom(nd, "name", promint_buf) != 0)
- printk("Node: 0x%x Name: %s\n", nd, lbuf);
-
- while((nd = node_get_sibling(nd)) != 0) {
- lbuf = get_str_from_prom(nd, "name", promstr_buf);
- printk("Node: 0x%x Name: %s\n", nd, lbuf);
- }
-
- printk("\n");
-
- return;
-}
-
-void
-probe_sbus(register int cpu_child_node)
-{
- register int nd, savend;
- register char* lbuf;
-
- nd = cpu_child_node;
-
- lbuf = (char *) 0;
-
- while((nd = node_get_sibling(nd)) != 0) {
- lbuf = get_str_from_prom(nd, "name", promstr_buf);
- if(strcmp(lbuf, "sbus") == 0)
- break;
- };
-
- nd = node_get_child(nd);
-
- printk("Node: 0x%x Name: %s\n", nd,
- get_str_from_prom(nd, "name", promstr_buf));
-
- if(strcmp(lbuf, "esp") == 0) {
- probe_esp(nd);
- };
-
- while((nd = node_get_sibling(nd)) != 0) {
- printk("Node: 0x%x Name: %s\n", nd,
- lbuf = get_str_from_prom(nd, "name", promstr_buf));
-
- if(strcmp(lbuf, "esp") == 0) {
- savend = nd;
- probe_esp(nd);
- nd = savend;
- };
- };
-
- printk("\n");
- return;
-}
-
-extern unsigned long probe_memory(void);
-extern struct sparc_phys_banks sp_banks[14];
-unsigned int phys_bytes_of_ram, end_of_phys_memory;
-
-void
-probe_devices(void)
-{
- register int nd, i;
- register char* str;
-
- nd = prom_node_root;
-
- printk("PROBING DEVICES:\n");
-
- str = get_str_from_prom(nd, "device_type", promstr_buf);
- if(strcmp(str, "cpu") == 0) {
- printk("Found CPU root prom device tree node.\n");
- } else {
- printk("Root node in device tree was not 'cpu' cannot continue.\n");
- halt();
- };
-
-#ifdef DEBUG_PROBING
- printk("String address for d_type: 0x%x\n", (unsigned int) str);
- printk("str[0] = %c str[1] = %c str[2] = %c \n", str[0], str[1], str[2]);
-#endif
-
- str = get_str_from_prom(nd, "name", promstr_buf);
-
-#ifdef DEBUG_PROBING
- printk("String address for name: 0x%x\n", (unsigned int) str);
- printk("str[0] = %c str[1] = %c str[2] = %c \n", str[0], str[1], str[2]);
-#endif
-
- printk("Name: %s \n", str);
-
- first_descent = nd = node_get_child(nd);
-
-
-/* Ok, here will go a call to each specific device probe. We can
- * call these now that we have the 'root' node and the child of
- * this node to send to the routines. ORDER IS IMPORTANT!
- */
-
- probe_cpu();
- probe_vac();
- probe_mmu();
- phys_bytes_of_ram = probe_memory();
-
- printk("Physical Memory: %d bytes\n", (int) phys_bytes_of_ram);
- for(i=0; sp_banks[i].num_bytes != 0; i++) {
- printk("Bank %d: base 0x%x bytes %d\n", i,
- (unsigned int) sp_banks[i].base_addr,
- (int) sp_banks[i].num_bytes);
- end_of_phys_memory = sp_banks[i].base_addr + sp_banks[i].num_bytes;
- }
-
- printk("PROM Root Child Node: 0x%x Name: %s \n", nd,
- get_str_from_prom(nd, "name", promstr_buf));
-
- while((nd = node_get_sibling(nd)) != 0) {
- printk("Node: 0x%x Name: %s", nd,
- get_str_from_prom(nd, "name", promstr_buf));
- printk("\n");
- };
-
- printk("\nProbing SBUS:\n");
- probe_sbus(first_descent);
-
- return;
-}
diff --git a/arch/sparc/kernel/process.c b/arch/sparc/kernel/process.c
index 679863ba3..86b4980e2 100644
--- a/arch/sparc/kernel/process.c
+++ b/arch/sparc/kernel/process.c
@@ -1,13 +1,17 @@
-/*
- * linux/arch/i386/kernel/process.c
+/* $Id: process.c,v 1.77 1996/11/03 08:25:43 davem Exp $
+ * linux/arch/sparc/kernel/process.c
*
- * Copyright (C) 1995 Linus Torvalds
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
*/
/*
* This file handles the architecture-dependent parts of process handling..
*/
+#define __KERNEL_SYSCALLS__
+#include <stdarg.h>
+
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -16,51 +20,224 @@
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/malloc.h>
-#include <linux/ldt.h>
#include <linux/user.h>
#include <linux/a.out.h>
+#include <linux/config.h>
-#include <asm/segment.h>
+#include <asm/auxio.h>
+#include <asm/oplib.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/delay.h>
+#include <asm/processor.h>
+#include <asm/psr.h>
#include <asm/system.h>
+#include <asm/elf.h>
-void ret_from_sys_call(void) { __asm__("nop"); }
+extern void fpsave(unsigned long *, unsigned long *, void *, unsigned long *);
+
+#ifndef __SMP__
/*
- * The idle loop on a i386..
+ * the idle loop on a Sparc... ;)
*/
asmlinkage int sys_idle(void)
{
if (current->pid != 0)
return -EPERM;
- /* Map out the low memory: it's no longer needed */
- /* Sparc version RSN */
-
/* endless idle loop with no priority at all */
current->counter = -100;
for (;;) {
schedule();
}
+ return 0;
+}
+
+#else
+
+/*
+ * the idle loop on a SparcMultiPenguin...
+ */
+asmlinkage int sys_idle(void)
+{
+ if (current->pid != 0)
+ return -EPERM;
+
+ /* endless idle loop with no priority at all */
+ current->counter = -100;
+ schedule();
+ return 0;
+}
+
+/* This is being executed in task 0 'user space'. */
+int cpu_idle(void *unused)
+{
+ volatile int *spap = &smp_process_available;
+ volatile int cval;
+
+ while(1) {
+ if(0==*spap)
+ continue;
+ cli();
+ /* Acquire exclusive access. */
+ while((cval = smp_swap(spap, -1)) == -1)
+ while(*spap == -1)
+ ;
+ if (0==cval) {
+ /* ho hum, release it. */
+ *spap = 0;
+ sti();
+ continue;
+ }
+ /* Something interesting happened, whee... */
+ *spap = (cval - 1);
+ sti();
+ idle();
+ }
+}
+
+#endif
+
+extern char reboot_command [];
+
+#ifdef CONFIG_SUN_CONSOLE
+extern void console_restore_palette (void);
+extern int serial_console;
+#endif
+
+void halt_now(void)
+{
+ sti();
+ udelay(8000);
+ cli();
+#ifdef CONFIG_SUN_CONSOLE
+ if (!serial_console)
+ console_restore_palette ();
+#endif
+ prom_halt();
+ panic("Halt failed!");
}
void hard_reset_now(void)
{
- halt();
+ char *p;
+
+ sti();
+ udelay(8000);
+ cli();
+
+ p = strchr (reboot_command, '\n');
+ if (p) *p = 0;
+#ifdef CONFIG_SUN_CONSOLE
+ if (!serial_console)
+ console_restore_palette ();
+#endif
+ if (*reboot_command)
+ prom_reboot (reboot_command);
+ prom_feval ("reset");
+ panic("Reboot failed!");
+}
+
+void show_regwindow(struct reg_window *rw)
+{
+ printk("l0: %08lx l1: %08lx l2: %08lx l3: %08lx\n"
+ "l4: %08lx l5: %08lx l6: %08lx l7: %08lx\n",
+ rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
+ rw->locals[4], rw->locals[5], rw->locals[6], rw->locals[7]);
+ printk("i0: %08lx i1: %08lx i2: %08lx i3: %08lx\n"
+ "i4: %08lx i5: %08lx i6: %08lx i7: %08lx\n",
+ rw->ins[0], rw->ins[1], rw->ins[2], rw->ins[3],
+ rw->ins[4], rw->ins[5], rw->ins[6], rw->ins[7]);
+}
+
+void show_stackframe(struct sparc_stackf *sf)
+{
+ unsigned long size;
+ unsigned long *stk;
+ int i;
+
+ printk("l0: %08lx l1: %08lx l2: %08lx l3: %08lx\n"
+ "l4: %08lx l5: %08lx l6: %08lx l7: %08lx\n",
+ sf->locals[0], sf->locals[1], sf->locals[2], sf->locals[3],
+ sf->locals[4], sf->locals[5], sf->locals[6], sf->locals[7]);
+ printk("i0: %08lx i1: %08lx i2: %08lx i3: %08lx\n"
+ "i4: %08lx i5: %08lx fp: %08lx ret_pc: %08lx\n",
+ sf->ins[0], sf->ins[1], sf->ins[2], sf->ins[3],
+ sf->ins[4], sf->ins[5], (unsigned long)sf->fp, sf->callers_pc);
+ printk("sp: %08lx x0: %08lx x1: %08lx x2: %08lx\n"
+ "x3: %08lx x4: %08lx x5: %08lx xx: %08lx\n",
+ (unsigned long)sf->structptr, sf->xargs[0], sf->xargs[1],
+ sf->xargs[2], sf->xargs[3], sf->xargs[4], sf->xargs[5],
+ sf->xxargs[0]);
+ size = ((unsigned long)sf->fp) - ((unsigned long)sf);
+ size -= STACKFRAME_SZ;
+ stk = (unsigned long *)((unsigned long)sf + STACKFRAME_SZ);
+ i = 0;
+ do {
+ printk("s%d: %08lx\n", i++, *stk++);
+ } while ((size -= sizeof(unsigned long)));
}
void show_regs(struct pt_regs * regs)
{
- printk("\nSP: %08lx PC: %08lx NPC: %08lx\n", regs->sp, regs->pc,
- regs->npc);
+ printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx\n", regs->psr,
+ regs->pc, regs->npc, regs->y);
+ printk("g0: %08lx g1: %08lx g2: %08lx g3: %08lx\n",
+ regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
+ regs->u_regs[3]);
+ printk("g4: %08lx g5: %08lx g6: %08lx g7: %08lx\n",
+ regs->u_regs[4], regs->u_regs[5], regs->u_regs[6],
+ regs->u_regs[7]);
+ printk("o0: %08lx o1: %08lx o2: %08lx o3: %08lx\n",
+ regs->u_regs[8], regs->u_regs[9], regs->u_regs[10],
+ regs->u_regs[11]);
+ printk("o4: %08lx o5: %08lx sp: %08lx ret_pc: %08lx\n",
+ regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
+ regs->u_regs[15]);
+ show_regwindow((struct reg_window *)regs->u_regs[14]);
}
-/*
- * Do necessary setup to start up a newly executed thread.
- */
-void start_thread(struct pt_regs * regs, unsigned long sp, unsigned long fp)
+void show_thread(struct thread_struct *tss)
{
- regs->sp = sp;
- regs->fp = fp;
+ int i;
+
+ printk("uwinmask: 0x%08lx\n", tss->uwinmask);
+ printk("kregs: 0x%08lx\n", (unsigned long)tss->kregs);
+ show_regs(tss->kregs);
+ printk("sig_address: 0x%08lx\n", tss->sig_address);
+ printk("sig_desc: 0x%08lx\n", tss->sig_desc);
+ printk("ksp: 0x%08lx\n", tss->ksp);
+ printk("kpc: 0x%08lx\n", tss->kpc);
+ printk("kpsr: 0x%08lx\n", tss->kpsr);
+ printk("kwim: 0x%08lx\n", tss->kwim);
+ printk("fork_kpsr: 0x%08lx\n", tss->fork_kpsr);
+ printk("fork_kwim: 0x%08lx\n", tss->fork_kwim);
+
+ for (i = 0; i < NSWINS; i++) {
+ if (!tss->rwbuf_stkptrs[i])
+ continue;
+ printk("reg_window[%d]:\n", i);
+ printk("stack ptr: 0x%08lx\n", tss->rwbuf_stkptrs[i]);
+ show_regwindow(&tss->reg_window[i]);
+ }
+ printk("w_saved: 0x%08lx\n", tss->w_saved);
+
+ /* XXX missing: float_regs */
+ printk("fsr: 0x%08lx\n", tss->fsr);
+ printk("fpqdepth: 0x%08lx\n", tss->fpqdepth);
+ /* XXX missing: fpqueue */
+
+ printk("sstk_info.stack: 0x%08lx\n",
+ (unsigned long)tss->sstk_info.the_stack);
+ printk("sstk_info.status: 0x%08lx\n",
+ (unsigned long)tss->sstk_info.cur_status);
+ printk("flags: 0x%08lx\n", tss->flags);
+ printk("current_ds: 0x%08x\n", tss->current_ds);
+
+ /* XXX missing: core_exec */
}
/*
@@ -68,24 +245,218 @@ void start_thread(struct pt_regs * regs, unsigned long sp, unsigned long fp)
*/
void exit_thread(void)
{
- halt();
+ flush_user_windows();
+#ifndef __SMP__
+ if(last_task_used_math == current) {
+#else
+ if(current->flags & PF_USEDFPU) {
+#endif
+ /* Keep process from leaving FPU in a bogon state. */
+ put_psr(get_psr() | PSR_EF);
+ fpsave(&current->tss.float_regs[0], &current->tss.fsr,
+ &current->tss.fpqueue[0], &current->tss.fpqdepth);
+#ifndef __SMP__
+ last_task_used_math = NULL;
+#else
+ current->flags &= ~PF_USEDFPU;
+#endif
+ }
+ mmu_exit_hook();
}
void flush_thread(void)
{
- halt();
+ /* Make sure old user windows don't get in the way. */
+ flush_user_windows();
+ current->tss.w_saved = 0;
+ current->tss.uwinmask = 0;
+ current->tss.sstk_info.cur_status = 0;
+ current->tss.sstk_info.the_stack = 0;
+
+#ifndef __SMP__
+ if(last_task_used_math == current) {
+#else
+ if(current->flags & PF_USEDFPU) {
+#endif
+ /* Clean the fpu. */
+ put_psr(get_psr() | PSR_EF);
+ fpsave(&current->tss.float_regs[0], &current->tss.fsr,
+ &current->tss.fpqueue[0], &current->tss.fpqdepth);
+#ifndef __SMP__
+ last_task_used_math = NULL;
+#else
+ current->flags &= ~PF_USEDFPU;
+#endif
+ }
+
+ mmu_flush_hook();
+ /* Now, this task is no longer a kernel thread. */
+ current->tss.flags &= ~SPARC_FLAG_KTHREAD;
}
-void copy_thread(int nr, unsigned long clone_flags, unsigned long sp, struct task_struct * p, struct pt_regs * regs)
+static __inline__ void copy_regs(struct pt_regs *dst, struct pt_regs *src)
+{
+ __asm__ __volatile__("ldd\t[%1 + 0x00], %%g2\n\t"
+ "ldd\t[%1 + 0x08], %%g4\n\t"
+ "ldd\t[%1 + 0x10], %%o4\n\t"
+ "std\t%%g2, [%0 + 0x00]\n\t"
+ "std\t%%g4, [%0 + 0x08]\n\t"
+ "std\t%%o4, [%0 + 0x10]\n\t"
+ "ldd\t[%1 + 0x18], %%g2\n\t"
+ "ldd\t[%1 + 0x20], %%g4\n\t"
+ "ldd\t[%1 + 0x28], %%o4\n\t"
+ "std\t%%g2, [%0 + 0x18]\n\t"
+ "std\t%%g4, [%0 + 0x20]\n\t"
+ "std\t%%o4, [%0 + 0x28]\n\t"
+ "ldd\t[%1 + 0x30], %%g2\n\t"
+ "ldd\t[%1 + 0x38], %%g4\n\t"
+ "ldd\t[%1 + 0x40], %%o4\n\t"
+ "std\t%%g2, [%0 + 0x30]\n\t"
+ "std\t%%g4, [%0 + 0x38]\n\t"
+ "ldd\t[%1 + 0x48], %%g2\n\t"
+ "std\t%%o4, [%0 + 0x40]\n\t"
+ "std\t%%g2, [%0 + 0x48]\n\t" : :
+ "r" (dst), "r" (src) :
+ "g2", "g3", "g4", "g5", "o4", "o5");
+}
+
+static __inline__ void copy_regwin(struct reg_window *dst, struct reg_window *src)
+{
+ __asm__ __volatile__("ldd\t[%1 + 0x00], %%g2\n\t"
+ "ldd\t[%1 + 0x08], %%g4\n\t"
+ "ldd\t[%1 + 0x10], %%o4\n\t"
+ "std\t%%g2, [%0 + 0x00]\n\t"
+ "std\t%%g4, [%0 + 0x08]\n\t"
+ "std\t%%o4, [%0 + 0x10]\n\t"
+ "ldd\t[%1 + 0x18], %%g2\n\t"
+ "ldd\t[%1 + 0x20], %%g4\n\t"
+ "ldd\t[%1 + 0x28], %%o4\n\t"
+ "std\t%%g2, [%0 + 0x18]\n\t"
+ "std\t%%g4, [%0 + 0x20]\n\t"
+ "std\t%%o4, [%0 + 0x28]\n\t"
+ "ldd\t[%1 + 0x30], %%g2\n\t"
+ "ldd\t[%1 + 0x38], %%g4\n\t"
+ "std\t%%g2, [%0 + 0x30]\n\t"
+ "std\t%%g4, [%0 + 0x38]\n\t" : :
+ "r" (dst), "r" (src) :
+ "g2", "g3", "g4", "g5", "o4", "o5");
+}
+
+static __inline__ struct sparc_stackf *
+clone_stackframe(struct sparc_stackf *dst, struct sparc_stackf *src)
+{
+ unsigned long size;
+ struct sparc_stackf *sp;
+
+ size = ((unsigned long)src->fp) - ((unsigned long)src);
+ sp = (struct sparc_stackf *)(((unsigned long)dst) - size);
+
+ if (copy_to_user(sp, src, size))
+ return 0;
+ if (put_user(dst, &sp->fp))
+ return 0;
+ return sp;
+}
+
+
+/* Copy a Sparc thread. The fork() return value conventions
+ * under SunOS are nothing short of bletcherous:
+ * Parent --> %o0 == childs pid, %o1 == 0
+ * Child --> %o0 == parents pid, %o1 == 1
+ *
+ * NOTE: We have a separate fork kpsr/kwim because
+ * the parent could change these values between
+ * sys_fork invocation and when we reach here
+ * if the parent should sleep while trying to
+ * allocate the task_struct and kernel stack in
+ * do_fork().
+ */
+extern void ret_sys_call(void);
+
+int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
+ struct task_struct *p, struct pt_regs *regs)
{
- struct pt_regs * childregs;
+ struct pt_regs *childregs;
+ struct reg_window *new_stack;
+ unsigned long stack_offset;
- childregs = ((struct pt_regs *) (p->kernel_stack_page + PAGE_SIZE)) - 1;
- p->tss.usp = (unsigned long) childregs;
- *childregs = *regs;
- childregs->sp = sp;
- p->tss.psr = regs->psr; /* for condition codes */
- return;
+#ifndef __SMP__
+ if(last_task_used_math == current) {
+#else
+ if(current->flags & PF_USEDFPU) {
+#endif
+ put_psr(get_psr() | PSR_EF);
+ fpsave(&p->tss.float_regs[0], &p->tss.fsr,
+ &p->tss.fpqueue[0], &p->tss.fpqdepth);
+#ifdef __SMP__
+ current->flags &= ~PF_USEDFPU;
+#endif
+ }
+
+ /* Calculate offset to stack_frame & pt_regs */
+ stack_offset = ((PAGE_SIZE<<1) - TRACEREG_SZ);
+
+ if(regs->psr & PSR_PS)
+ stack_offset -= REGWIN_SZ;
+ childregs = ((struct pt_regs *) (p->kernel_stack_page + stack_offset));
+ copy_regs(childregs, regs);
+ new_stack = (((struct reg_window *) childregs) - 1);
+ copy_regwin(new_stack, (((struct reg_window *) regs) - 1));
+
+ p->tss.ksp = p->saved_kernel_stack = (unsigned long) new_stack;
+ p->tss.kpc = (((unsigned long) ret_sys_call) - 0x8);
+ p->tss.kpsr = current->tss.fork_kpsr;
+ p->tss.kwim = current->tss.fork_kwim;
+ p->tss.kregs = childregs;
+
+ if(regs->psr & PSR_PS) {
+ childregs->u_regs[UREG_FP] = p->tss.ksp;
+ p->tss.flags |= SPARC_FLAG_KTHREAD;
+ p->tss.current_ds = KERNEL_DS;
+ childregs->u_regs[UREG_G6] = (unsigned long) p;
+ } else {
+ childregs->u_regs[UREG_FP] = sp;
+ p->tss.flags &= ~SPARC_FLAG_KTHREAD;
+ p->tss.current_ds = USER_DS;
+
+ if (sp != current->tss.kregs->u_regs[UREG_FP]) {
+ struct sparc_stackf *childstack;
+ struct sparc_stackf *parentstack;
+
+ /*
+ * This is a clone() call with supplied user stack.
+ * Set some valid stack frames to give to the child.
+ */
+ childstack = (struct sparc_stackf *)sp;
+ parentstack = (struct sparc_stackf *)
+ current->tss.kregs->u_regs[UREG_FP];
+
+#if 0
+ printk("clone: parent stack:\n");
+ show_stackframe(parentstack);
+#endif
+
+ childstack = clone_stackframe(childstack, parentstack);
+ if (!childstack)
+ return -EFAULT;
+
+#if 0
+ printk("clone: child stack:\n");
+ show_stackframe(childstack);
+#endif
+
+ childregs->u_regs[UREG_FP] = (unsigned long)childstack;
+ }
+ }
+
+ /* Set the return value for the child. */
+ childregs->u_regs[UREG_I0] = current->pid;
+ childregs->u_regs[UREG_I1] = 1;
+
+ /* Set the return value for the parent. */
+ regs->u_regs[UREG_I1] = 0;
+
+ return 0;
}
/*
@@ -93,20 +464,60 @@ void copy_thread(int nr, unsigned long clone_flags, unsigned long sp, struct tas
*/
void dump_thread(struct pt_regs * regs, struct user * dump)
{
- return; /* solaris does this enough */
+ unsigned long first_stack_page;
+
+ dump->magic = SUNOS_CORE_MAGIC;
+ dump->len = sizeof(struct user);
+ dump->regs.psr = regs->psr;
+ dump->regs.pc = regs->pc;
+ dump->regs.npc = regs->npc;
+ dump->regs.y = regs->y;
+ /* fuck me plenty */
+ memcpy(&dump->regs.regs[0], &regs->u_regs[1], (sizeof(unsigned long) * 15));
+ dump->uexec = current->tss.core_exec;
+ dump->u_tsize = (((unsigned long) current->mm->end_code) -
+ ((unsigned long) current->mm->start_code)) & ~(PAGE_SIZE - 1);
+ dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1)));
+ dump->u_dsize -= dump->u_tsize;
+ dump->u_dsize &= ~(PAGE_SIZE - 1);
+ first_stack_page = (regs->u_regs[UREG_FP] & ~(PAGE_SIZE - 1));
+ dump->u_ssize = (TASK_SIZE - first_stack_page) & ~(PAGE_SIZE - 1);
+ memcpy(&dump->fpu.fpstatus.fregs.regs[0], &current->tss.float_regs[0], (sizeof(unsigned long) * 32));
+ dump->fpu.fpstatus.fsr = current->tss.fsr;
+ dump->fpu.fpstatus.flags = dump->fpu.fpstatus.extra = 0;
+ dump->fpu.fpstatus.fpq_count = current->tss.fpqdepth;
+ memcpy(&dump->fpu.fpstatus.fpq[0], &current->tss.fpqueue[0],
+ ((sizeof(unsigned long) * 2) * 16));
+ dump->sigcode = current->tss.sig_desc;
}
-asmlinkage int sys_fork(struct pt_regs regs)
+/*
+ * fill in the fpu structure for a core dump.
+ */
+int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs)
{
- return do_fork(COPYVM | SIGCHLD, regs.sp, &regs);
+ /* Currently we report that we couldn't dump the fpu structure */
+ return 0;
}
/*
- * sys_execve() executes a new program.
+ * sparc_execve() executes a new program after the asm stub has set
+ * things up for us. This should basically do what I want it to.
*/
-asmlinkage int sys_execve(struct pt_regs regs)
+asmlinkage int sparc_execve(struct pt_regs *regs)
{
- halt();
- return 0;
-}
+ int error, base = 0;
+ char *filename;
+ /* Check for indirect call. */
+ if(regs->u_regs[UREG_G1] == 0)
+ base = 1;
+
+ error = getname((char *) regs->u_regs[base + UREG_I0], &filename);
+ if(error)
+ return error;
+ error = do_execve(filename, (char **) regs->u_regs[base + UREG_I1],
+ (char **) regs->u_regs[base + UREG_I2], regs);
+ putname(filename);
+ return error;
+}
diff --git a/arch/sparc/kernel/promops.c b/arch/sparc/kernel/promops.c
deleted file mode 100644
index b5c897b0d..000000000
--- a/arch/sparc/kernel/promops.c
+++ /dev/null
@@ -1,107 +0,0 @@
-/* promops.c: Prom node tree operations and Prom Vector initialization
- * initialization routines.
- *
- * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- */
-
-#include <linux/kernel.h>
-
-#include <asm/openprom.h>
-
-/* #define DEBUG_PROMOPS */
-#define MAX_PR_LEN 64 /* exotic hardware probably overshoots this */
-
-int prom_node_root; /* initialized in init_prom */
-
-extern struct linux_romvec *romvec;
-
-/* These two functions return and siblings and direct child descendents
- * in the prom device tree respectively.
- */
-
-int
-node_get_sibling(int node)
-{
- return (*(romvec->pv_nodeops->no_nextnode))(node);
-}
-
-int
-node_get_child(int node)
-{
- return (*(romvec->pv_nodeops->no_child))(node);
-}
-
-/* The following routine is used during device probing to determine
- * an integer value property about a (perhaps virtual) device. This
- * could be anything, like the size of the mmu cache lines, etc.
- * the default return value is -1 is the prom has nothing interesting.
- */
-
-unsigned int prom_int_null;
-
-unsigned int *
-get_int_from_prom(int node, char *nd_prop, unsigned int *value)
-{
- unsigned int pr_len;
-
- *value = &prom_int_null; /* duh, I was returning -1 as an unsigned int, prom_panic() */
-
- pr_len = romvec->pv_nodeops->no_proplen(node, nd_prop);
- if(pr_len > MAX_PR_LEN)
- {
-#ifdef DEBUG_PROMOPS
- printk("Bad pr_len in promops -- node: %d nd_prop: %s pr_len: %d",
- node, nd_prop, (int) pr_len);
-#endif
- return value; /* XXX */
- }
-
- romvec->pv_nodeops->no_getprop(node, nd_prop, (char *) value);
-
- return value;
-}
-
-
-/* This routine returns what is termed a property string as opposed
- * to a property integer as above. This can be used to extract the
- * 'type' of device from the prom. An example could be the clock timer
- * chip type. By default you get returned a null string if garbage
- * is returned from the prom.
- */
-
-char *
-get_str_from_prom(int node, char *nd_prop, char *value)
-{
- unsigned int pr_len;
-
- *value='\n';
-
- pr_len = romvec->pv_nodeops->no_proplen(node, nd_prop);
- if(pr_len > MAX_PR_LEN)
- {
-#ifdef DEBUG_PROMOPS
- printk("Bad pr_len in promops -- node: %d nd_prop: %s pr_len: %d",
- node, nd_prop, pr_len);
-#endif
- return value; /* XXX */
- }
-
- romvec->pv_nodeops->no_getprop(node, nd_prop, value);
- value[pr_len] = 0;
-
- return value;
-}
-
-/* This gets called from head.S upon bootup to initialize the
- * prom vector pointer for the rest of the kernel.
- */
-
-void
-init_prom(struct linux_romvec *r_ptr)
-{
- romvec = r_ptr;
- prom_node_root = romvec->pv_nodeops->no_nextnode(0);
- prom_int_null = 0;
-
- return;
-}
diff --git a/arch/sparc/kernel/ptrace.c b/arch/sparc/kernel/ptrace.c
new file mode 100644
index 000000000..e0750598b
--- /dev/null
+++ b/arch/sparc/kernel/ptrace.c
@@ -0,0 +1,891 @@
+/* ptrace.c: Sparc process tracing support.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caipfs.rutgers.edu)
+ *
+ * Based upon code written by Ross Biro, Linus Torvalds, Bob Manson,
+ * and David Mosberger.
+ *
+ * Added Linux support -miguel (wierd, eh?, the orignal code was meant
+ * to emulate SunOS).
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+#define MAGIC_CONSTANT 0x80000000
+
+/* change a pid into a task struct. */
+static inline struct task_struct * get_task(int pid)
+{
+ int i;
+
+ for (i = 1; i < NR_TASKS; i++) {
+ if (task[i] != NULL && (task[i]->pid == pid))
+ return task[i];
+ }
+ return NULL;
+}
+
+/*
+ * This routine gets a long from any process space by following the page
+ * tables. NOTE! You should check that the long isn't on a page boundary,
+ * and that it is in the task area before calling this: this routine does
+ * no checking.
+ */
+static unsigned long get_long(struct task_struct * tsk,
+ struct vm_area_struct * vma, unsigned long addr)
+{
+ pgd_t * pgdir;
+ pmd_t * pgmiddle;
+ pte_t * pgtable;
+ unsigned long page, retval;
+
+repeat:
+ pgdir = pgd_offset(vma->vm_mm, addr);
+ if (pgd_none(*pgdir)) {
+ do_no_page(tsk, vma, addr, 0);
+ goto repeat;
+ }
+ if (pgd_bad(*pgdir)) {
+ printk("ptrace: bad page directory %08lx\n", pgd_val(*pgdir));
+ pgd_clear(pgdir);
+ return 0;
+ }
+ pgmiddle = pmd_offset(pgdir, addr);
+ if (pmd_none(*pgmiddle)) {
+ do_no_page(tsk, vma, addr, 0);
+ goto repeat;
+ }
+ if (pmd_bad(*pgmiddle)) {
+ printk("ptrace: bad page middle %08lx\n", pmd_val(*pgmiddle));
+ pmd_clear(pgmiddle);
+ return 0;
+ }
+ pgtable = pte_offset(pgmiddle, addr);
+ if (!pte_present(*pgtable)) {
+ do_no_page(tsk, vma, addr, 0);
+ goto repeat;
+ }
+ page = pte_page(*pgtable);
+/* this is a hack for non-kernel-mapped video buffers and similar */
+ if (MAP_NR(page) >= max_mapnr)
+ return 0;
+ page += addr & ~PAGE_MASK;
+ retval = *(unsigned long *) page;
+ flush_page_to_ram(page);
+ return retval;
+}
+
+/*
+ * This routine puts a long into any process space by following the page
+ * tables. NOTE! You should check that the long isn't on a page boundary,
+ * and that it is in the task area before calling this: this routine does
+ * no checking.
+ *
+ * Now keeps R/W state of page so that a text page stays readonly
+ * even if a debugger scribbles breakpoints into it. -M.U-
+ */
+static void put_long(struct task_struct * tsk, struct vm_area_struct * vma,
+ unsigned long addr, unsigned long data)
+{
+ pgd_t *pgdir;
+ pmd_t *pgmiddle;
+ pte_t *pgtable;
+ unsigned long page;
+
+repeat:
+ pgdir = pgd_offset(vma->vm_mm, addr);
+ if (!pgd_present(*pgdir)) {
+ do_no_page(tsk, vma, addr, 1);
+ goto repeat;
+ }
+ if (pgd_bad(*pgdir)) {
+ printk("ptrace: bad page directory %08lx\n", pgd_val(*pgdir));
+ pgd_clear(pgdir);
+ return;
+ }
+ pgmiddle = pmd_offset(pgdir, addr);
+ if (pmd_none(*pgmiddle)) {
+ do_no_page(tsk, vma, addr, 1);
+ goto repeat;
+ }
+ if (pmd_bad(*pgmiddle)) {
+ printk("ptrace: bad page middle %08lx\n", pmd_val(*pgmiddle));
+ pmd_clear(pgmiddle);
+ return;
+ }
+ pgtable = pte_offset(pgmiddle, addr);
+ if (!pte_present(*pgtable)) {
+ do_no_page(tsk, vma, addr, 1);
+ goto repeat;
+ }
+ page = pte_page(*pgtable);
+ if (!pte_write(*pgtable)) {
+ do_wp_page(tsk, vma, addr, 1);
+ goto repeat;
+ }
+/* this is a hack for non-kernel-mapped video buffers and similar */
+ flush_cache_page(vma, addr);
+ if (MAP_NR(page) < max_mapnr) {
+ *(unsigned long *) (page + (addr & ~PAGE_MASK)) = data;
+ flush_page_to_ram(page);
+ }
+/* we're bypassing pagetables, so we have to set the dirty bit ourselves */
+/* this should also re-instate whatever read-only mode there was before */
+ set_pte(pgtable, pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
+ flush_tlb_page(vma, addr);
+}
+
+static struct vm_area_struct * find_extend_vma(struct task_struct * tsk,
+ unsigned long addr)
+{
+ struct vm_area_struct * vma;
+
+ addr &= PAGE_MASK;
+ vma = find_vma(tsk->mm,addr);
+ if (!vma)
+ return NULL;
+ if (vma->vm_start <= addr)
+ return vma;
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ return NULL;
+ if (vma->vm_end - addr > tsk->rlim[RLIMIT_STACK].rlim_cur)
+ return NULL;
+ vma->vm_offset -= vma->vm_start - addr;
+ vma->vm_start = addr;
+ return vma;
+}
+
+/*
+ * This routine checks the page boundaries, and that the offset is
+ * within the task area. It then calls get_long() to read a long.
+ */
+static int read_long(struct task_struct * tsk, unsigned long addr,
+ unsigned long * result)
+{
+ struct vm_area_struct * vma = find_extend_vma(tsk, addr);
+
+ if (!vma)
+ return -EIO;
+ *result = get_long(tsk, vma, addr);
+ return 0;
+}
+
+static int read_byte(struct task_struct *tsk, unsigned long addr,
+ unsigned char *result)
+{
+ struct vm_area_struct *vma = find_extend_vma(tsk, addr&~3);
+ unsigned long tmp;
+
+ if(!vma)
+ return -EIO;
+ tmp = get_long(tsk, vma, (addr & ~3));
+ switch(addr & 3) {
+ case 0:
+ *result = (tmp & 0xff000000)>>24;
+ break;
+ case 1:
+ *result = (tmp & 0x00ff0000)>>16;
+ break;
+ case 2:
+ *result = (tmp & 0x0000ff00)>>8;
+ break;
+ case 3:
+ *result = (tmp & 0x000000ff);
+ break;
+ }
+ return 0;
+}
+
+/*
+ * This routine checks the page boundaries, and that the offset is
+ * within the task area. It then calls put_long() to write a long.
+ */
+static int write_long(struct task_struct * tsk, unsigned long addr,
+ unsigned long data)
+{
+ struct vm_area_struct * vma = find_extend_vma(tsk, addr);
+
+ if (!vma)
+ return -EIO;
+ put_long(tsk, vma, addr, data);
+ return 0;
+}
+
+static int write_byte(struct task_struct * tsk, unsigned long addr,
+ unsigned char data)
+{
+ struct vm_area_struct * vma = find_extend_vma(tsk, (addr & ~3));
+ unsigned long tmp;
+
+ if (!vma)
+ return -EIO;
+ tmp = get_long(tsk, vma, (addr & ~3));
+ switch(addr & 3) {
+ case 0:
+ tmp &= 0x00ffffff;
+ tmp |= (data << 24);
+ break;
+ case 1:
+ tmp &= 0xff00ffff;
+ tmp |= ((data << 16) & 0x00ff0000);
+ break;
+ case 2:
+ tmp &= 0xffff00ff;
+ tmp |= ((data << 8) & 0x0000ff00);
+ break;
+ case 3:
+ tmp &= 0xffffff00;
+ tmp |= (data & 0x000000ff);
+ break;
+ }
+ put_long(tsk, vma, (addr & ~3), tmp);
+ return 0;
+}
+
+/* Returning from ptrace is a bit tricky because the syscall return
+ * low level code assumes any value returned which is negative and
+ * is a valid errno will mean setting the condition codes to indicate
+ * an error return. This doesn't work, so we have this hook.
+ */
+static inline void
+pt_error_return(struct pt_regs *regs, unsigned long error)
+{
+ regs->u_regs[UREG_I0] = error;
+ regs->psr |= PSR_C;
+ regs->pc = regs->npc;
+ regs->npc += 4;
+}
+
+static inline void
+pt_succ_return(struct pt_regs *regs, unsigned long value)
+{
+ regs->u_regs[UREG_I0] = value;
+ regs->psr &= ~PSR_C;
+ regs->pc = regs->npc;
+ regs->npc += 4;
+}
+
+static void
+pt_succ_return_linux(struct pt_regs *regs, unsigned long value, long *addr)
+{
+ if(put_user(value, addr))
+ return pt_error_return(regs, EFAULT);
+ regs->u_regs[UREG_I0] = 0;
+ regs->psr &= ~PSR_C;
+ regs->pc = regs->npc;
+ regs->npc += 4;
+}
+
+static void
+pt_os_succ_return (struct pt_regs *regs, unsigned long val, long *addr)
+{
+ if (current->personality & PER_BSD)
+ pt_succ_return (regs, val);
+ else
+ pt_succ_return_linux (regs, val, addr);
+}
+
+/* Fuck me gently with a chainsaw... */
+static inline void read_sunos_user(struct pt_regs *regs, unsigned long offset,
+ struct task_struct *tsk, long *addr)
+{
+ struct pt_regs *cregs = tsk->tss.kregs;
+ struct thread_struct *t = &tsk->tss;
+ int v;
+
+ if(offset >= 1024)
+ offset -= 1024; /* whee... */
+ if(offset & ((sizeof(unsigned long) - 1))) {
+ pt_error_return(regs, EIO);
+ return;
+ }
+ if(offset >= 16 && offset < 784) {
+ offset -= 16; offset >>= 2;
+ pt_os_succ_return(regs, *(((unsigned long *)(&t->reg_window[0]))+offset), addr);
+ return;
+ }
+ if(offset >= 784 && offset < 832) {
+ offset -= 784; offset >>= 2;
+ pt_os_succ_return(regs, *(((unsigned long *)(&t->rwbuf_stkptrs[0]))+offset), addr);
+ return;
+ }
+ switch(offset) {
+ case 0:
+ v = t->ksp;
+ break;
+ case 4:
+ v = t->kpc;
+ break;
+ case 8:
+ v = t->kpsr;
+ break;
+ case 12:
+ v = t->uwinmask;
+ break;
+ case 832:
+ v = t->w_saved;
+ break;
+ case 896:
+ v = cregs->u_regs[UREG_I0];
+ break;
+ case 900:
+ v = cregs->u_regs[UREG_I1];
+ break;
+ case 904:
+ v = cregs->u_regs[UREG_I2];
+ break;
+ case 908:
+ v = cregs->u_regs[UREG_I3];
+ break;
+ case 912:
+ v = cregs->u_regs[UREG_I4];
+ break;
+ case 916:
+ v = cregs->u_regs[UREG_I5];
+ break;
+ case 920:
+ v = cregs->u_regs[UREG_I6];
+ break;
+ case 924:
+ if(tsk->tss.flags & MAGIC_CONSTANT)
+ v = cregs->u_regs[UREG_G1];
+ else
+ v = 0;
+ break;
+ case 940:
+ v = cregs->u_regs[UREG_I0];
+ break;
+ case 944:
+ v = cregs->u_regs[UREG_I1];
+ break;
+
+ case 948:
+ /* Isn't binary compatibility _fun_??? */
+ if(cregs->psr & PSR_C)
+ v = cregs->u_regs[UREG_I0] << 24;
+ else
+ v = 0;
+ break;
+
+ /* Rest of them are completely unsupported. */
+ default:
+ printk("%s [%d]: Wants to read user offset %ld\n",
+ current->comm, current->pid, offset);
+ pt_error_return(regs, EIO);
+ return;
+ }
+ if (current->personality & PER_BSD)
+ pt_succ_return (regs, v);
+ else
+ pt_succ_return_linux (regs, v, addr);
+ return;
+}
+
+static inline void write_sunos_user(struct pt_regs *regs, unsigned long offset,
+ struct task_struct *tsk)
+{
+ struct pt_regs *cregs = tsk->tss.kregs;
+ struct thread_struct *t = &tsk->tss;
+ unsigned long value = regs->u_regs[UREG_I3];
+
+ if(offset >= 1024)
+ offset -= 1024; /* whee... */
+ if(offset & ((sizeof(unsigned long) - 1)))
+ goto failure;
+ if(offset >= 16 && offset < 784) {
+ offset -= 16; offset >>= 2;
+ *(((unsigned long *)(&t->reg_window[0]))+offset) = value;
+ goto success;
+ }
+ if(offset >= 784 && offset < 832) {
+ offset -= 784; offset >>= 2;
+ *(((unsigned long *)(&t->rwbuf_stkptrs[0]))+offset) = value;
+ goto success;
+ }
+ switch(offset) {
+ case 896:
+ cregs->u_regs[UREG_I0] = value;
+ break;
+ case 900:
+ cregs->u_regs[UREG_I1] = value;
+ break;
+ case 904:
+ cregs->u_regs[UREG_I2] = value;
+ break;
+ case 908:
+ cregs->u_regs[UREG_I3] = value;
+ break;
+ case 912:
+ cregs->u_regs[UREG_I4] = value;
+ break;
+ case 916:
+ cregs->u_regs[UREG_I5] = value;
+ break;
+ case 920:
+ cregs->u_regs[UREG_I6] = value;
+ break;
+ case 924:
+ cregs->u_regs[UREG_I7] = value;
+ break;
+ case 940:
+ cregs->u_regs[UREG_I0] = value;
+ break;
+ case 944:
+ cregs->u_regs[UREG_I1] = value;
+ break;
+
+ /* Rest of them are completely unsupported or "no-touch". */
+ default:
+ printk("%s [%d]: Wants to write user offset %ld\n",
+ current->comm, current->pid, offset);
+ goto failure;
+ }
+success:
+ pt_succ_return(regs, 0);
+ return;
+failure:
+ pt_error_return(regs, EIO);
+ return;
+}
+
+/* #define ALLOW_INIT_TRACING */
+/* #define DEBUG_PTRACE */
+
+#ifdef DEBUG_PTRACE
+char *pt_rq [] = {
+"TRACEME",
+"PEEKTEXT",
+"PEEKDATA",
+"PEEKUSR",
+"POKETEXT",
+"POKEDATA",
+"POKEUSR",
+"CONT",
+"KILL",
+"SINGLESTEP",
+"SUNATTACH",
+"SUNDETACH",
+"GETREGS",
+"SETREGS",
+"GETFPREGS",
+"SETFPREGS",
+"READDATA",
+"WRITEDATA",
+"READTEXT",
+"WRITETEXT",
+"GETFPAREGS",
+"SETFPAREGS",
+""
+};
+#endif
+
+asmlinkage void do_ptrace(struct pt_regs *regs)
+{
+ unsigned long request = regs->u_regs[UREG_I0];
+ unsigned long pid = regs->u_regs[UREG_I1];
+ unsigned long addr = regs->u_regs[UREG_I2];
+ unsigned long data = regs->u_regs[UREG_I3];
+ unsigned long addr2 = regs->u_regs[UREG_I4];
+ struct task_struct *child;
+
+#ifdef DEBUG_PTRACE
+ {
+ char *s;
+
+ if ((request > 0) && (request < 21))
+ s = pt_rq [request];
+ else
+ s = "unknown";
+
+ if (request == PTRACE_POKEDATA && data == 0x91d02001){
+ printk ("do_ptrace: breakpoint pid=%d, addr=%08lx addr2=%08lx\n",
+ pid, addr, addr2);
+ } else
+ printk("do_ptrace: rq=%s(%d) pid=%d addr=%08lx data=%08lx addr2=%08lx\n",
+ s, (int) request, (int) pid, addr, data, addr2);
+ }
+#endif
+ if(request == PTRACE_TRACEME) {
+ /* are we already being traced? */
+ if (current->flags & PF_PTRACED) {
+ pt_error_return(regs, EPERM);
+ return;
+ }
+ /* set the ptrace bit in the process flags. */
+ current->flags |= PF_PTRACED;
+ pt_succ_return(regs, 0);
+ return;
+ }
+#ifndef ALLOW_INIT_TRACING
+ if(pid == 1) {
+ /* Can't dork with init. */
+ pt_error_return(regs, EPERM);
+ return;
+ }
+#endif
+ if(!(child = get_task(pid))) {
+ pt_error_return(regs, ESRCH);
+ return;
+ }
+
+ if(request == PTRACE_SUNATTACH) {
+ if(child == current) {
+ /* Try this under SunOS/Solaris, bwa haha
+ * You'll never be able to kill the process. ;-)
+ */
+ pt_error_return(regs, EPERM);
+ return;
+ }
+ if((!child->dumpable ||
+ (current->uid != child->euid) ||
+ (current->uid != child->uid) ||
+ (current->gid != child->egid) ||
+ (current->gid != child->gid)) && !suser()) {
+ pt_error_return(regs, EPERM);
+ return;
+ }
+ /* the same process cannot be attached many times */
+ if (child->flags & PF_PTRACED) {
+ pt_error_return(regs, EPERM);
+ return;
+ }
+ child->flags |= PF_PTRACED;
+ if(child->p_pptr != current) {
+ REMOVE_LINKS(child);
+ child->p_pptr = current;
+ SET_LINKS(child);
+ }
+ send_sig(SIGSTOP, child, 1);
+ pt_succ_return(regs, 0);
+ return;
+ }
+ if(!(child->flags & PF_PTRACED)) {
+ pt_error_return(regs, ESRCH);
+ return;
+ }
+ if(child->state != TASK_STOPPED) {
+ if(request != PTRACE_KILL) {
+ pt_error_return(regs, ESRCH);
+ return;
+ }
+ }
+ if(child->p_pptr != current) {
+ pt_error_return(regs, ESRCH);
+ return;
+ }
+ switch(request) {
+ case PTRACE_PEEKTEXT: /* read word at location addr. */
+ case PTRACE_PEEKDATA: {
+ unsigned long tmp;
+ int res;
+
+ /* Non-word alignment _not_ allowed on Sparc. */
+ if(addr & (sizeof(unsigned long) - 1)) {
+ pt_error_return(regs, EINVAL);
+ return;
+ }
+ res = read_long(child, addr, &tmp);
+ if (res < 0) {
+ pt_error_return(regs, -res);
+ return;
+ }
+ pt_os_succ_return(regs, tmp, (long *) data);
+ return;
+ }
+
+ case PTRACE_PEEKUSR:
+ read_sunos_user(regs, addr, child, (long *) data);
+ return;
+
+ case PTRACE_POKEUSR:
+ write_sunos_user(regs, addr, child);
+ return;
+
+ case PTRACE_POKETEXT: /* write the word at location addr. */
+ case PTRACE_POKEDATA: {
+ struct vm_area_struct *vma;
+ int res;
+
+ /* Non-word alignment _not_ allowed on Sparc. */
+ if(addr & (sizeof(unsigned long) - 1)) {
+ pt_error_return(regs, EINVAL);
+ return;
+ }
+ vma = find_extend_vma(child, addr);
+ res = write_long(child, addr, data);
+ if(res < 0)
+ pt_error_return(regs, -res);
+ else
+ pt_succ_return(regs, res);
+ return;
+ }
+
+ case PTRACE_GETREGS: {
+ struct pt_regs *pregs = (struct pt_regs *) addr;
+ struct pt_regs *cregs = child->tss.kregs;
+ int rval;
+
+ rval = verify_area(VERIFY_WRITE, pregs, sizeof(struct pt_regs));
+ if(rval)
+ return pt_error_return(regs, -rval);
+ __put_user(cregs->psr, (&pregs->psr));
+ __put_user(cregs->pc, (&pregs->pc));
+ __put_user(cregs->npc, (&pregs->npc));
+ __put_user(cregs->y, (&pregs->y));
+ for(rval = 1; rval < 16; rval++)
+ __put_user(cregs->u_regs[rval], (&pregs->u_regs[rval - 1]));
+ pt_succ_return(regs, 0);
+#ifdef DEBUG_PTRACE
+ printk ("PC=%x nPC=%x o7=%x\n", cregs->pc, cregs->npc, cregs->u_regs [15]);
+#endif
+ return;
+ }
+
+ case PTRACE_SETREGS: {
+ struct pt_regs *pregs = (struct pt_regs *) addr;
+ struct pt_regs *cregs = child->tss.kregs;
+ unsigned long psr, pc, npc, y;
+ int i;
+
+ /* Must be careful, tracing process can only set certain
+ * bits in the psr.
+ */
+ i = verify_area(VERIFY_READ, pregs, sizeof(struct pt_regs));
+ if(i)
+ return pt_error_return(regs, -i);
+ __get_user(psr, (&pregs->psr));
+ __get_user(pc, (&pregs->pc));
+ __get_user(npc, (&pregs->npc));
+ __get_user(y, (&pregs->y));
+ psr &= PSR_ICC;
+ cregs->psr &= ~PSR_ICC;
+ cregs->psr |= psr;
+ if(!((pc | npc) & 3)) {
+ cregs->pc = pc;
+ cregs->npc =npc;
+ }
+ cregs->y = y;
+ for(i = 1; i < 16; i++)
+ __get_user(cregs->u_regs[i], (&pregs->u_regs[i-1]));
+ pt_succ_return(regs, 0);
+ return;
+ }
+
+ case PTRACE_GETFPREGS: {
+ struct fps {
+ unsigned long regs[32];
+ unsigned long fsr;
+ unsigned long flags;
+ unsigned long extra;
+ unsigned long fpqd;
+ struct fq {
+ unsigned long *insnaddr;
+ unsigned long insn;
+ } fpq[16];
+ } *fps = (struct fps *) addr;
+ int i;
+
+ i = verify_area(VERIFY_WRITE, fps, sizeof(struct fps));
+ if(i)
+ return pt_error_return(regs, -i);
+ for(i = 0; i < 32; i++)
+ __put_user(child->tss.float_regs[i], (&fps->regs[i]));
+ __put_user(child->tss.fsr, (&fps->fsr));
+ __put_user(child->tss.fpqdepth, (&fps->fpqd));
+ __put_user(0, (&fps->flags));
+ __put_user(0, (&fps->extra));
+ for(i = 0; i < 16; i++) {
+ __put_user(child->tss.fpqueue[i].insn_addr,
+ (&fps->fpq[i].insnaddr));
+ __put_user(child->tss.fpqueue[i].insn, (&fps->fpq[i].insn));
+ }
+ pt_succ_return(regs, 0);
+ return;
+ }
+
+ case PTRACE_SETFPREGS: {
+ struct fps {
+ unsigned long regs[32];
+ unsigned long fsr;
+ unsigned long flags;
+ unsigned long extra;
+ unsigned long fpqd;
+ struct fq {
+ unsigned long *insnaddr;
+ unsigned long insn;
+ } fpq[16];
+ } *fps = (struct fps *) addr;
+ int i;
+
+ i = verify_area(VERIFY_READ, fps, sizeof(struct fps));
+ if(i)
+ return pt_error_return(regs, -i);
+ copy_from_user(&child->tss.float_regs[0], &fps->regs[0], (32 * 4));
+ __get_user(child->tss.fsr, (&fps->fsr));
+ __get_user(child->tss.fpqdepth, (&fps->fpqd));
+ for(i = 0; i < 16; i++) {
+ __get_user(child->tss.fpqueue[i].insn_addr,
+ (&fps->fpq[i].insnaddr));
+ __get_user(child->tss.fpqueue[i].insn, (&fps->fpq[i].insn));
+ }
+ pt_succ_return(regs, 0);
+ return;
+ }
+
+ case PTRACE_READTEXT:
+ case PTRACE_READDATA: {
+ unsigned char *dest = (unsigned char *) addr2;
+ unsigned long src = addr;
+ unsigned char tmp;
+ int res, len = data;
+
+ res = verify_area(VERIFY_WRITE, dest, len);
+ if(res)
+ return pt_error_return(regs, -res);
+ while(len) {
+ res = read_byte(child, src, &tmp);
+ if(res < 0)
+ return pt_error_return(regs, -res);
+ __put_user(tmp, dest);
+ src++; dest++; len--;
+ }
+ pt_succ_return(regs, 0);
+ return;
+ }
+
+ case PTRACE_WRITETEXT:
+ case PTRACE_WRITEDATA: {
+ unsigned char *src = (unsigned char *) addr2;
+ unsigned long dest = addr;
+ int res, len = data;
+
+ res = verify_area(VERIFY_READ, src, len);
+ if(res)
+ return pt_error_return(regs, -res);
+ while(len) {
+ unsigned long tmp;
+
+ __get_user(tmp, src);
+ res = write_byte(child, dest, tmp);
+ if(res < 0)
+ return pt_error_return(regs, -res);
+ src++; dest++; len--;
+ }
+ pt_succ_return(regs, 0);
+ return;
+ }
+
+ case PTRACE_SYSCALL: /* continue and stop at (return from) syscall */
+ addr = 1;
+
+ case PTRACE_CONT: { /* restart after signal. */
+ if ((unsigned long) data > NSIG) {
+ pt_error_return(regs, EIO);
+ return;
+ }
+ if (addr != 1) {
+ if (addr & 3) {
+ pt_error_return(regs, EINVAL);
+ return;
+ }
+#ifdef DEBUG_PTRACE
+ printk ("Original: %08lx %08lx\n", child->tss.kregs->pc, child->tss.kregs->npc);
+ printk ("Continuing with %08lx %08lx\n", addr, addr+4);
+#endif
+ child->tss.kregs->pc = addr;
+ child->tss.kregs->npc = addr + 4;
+ }
+
+ if (request == PTRACE_SYSCALL)
+ child->flags |= PF_TRACESYS;
+ else
+ child->flags &= ~PF_TRACESYS;
+
+ child->exit_code = data;
+#ifdef DEBUG_PTRACE
+ printk("CONT: %s [%d]: set exit_code = %x %x %x\n", child->comm,
+ child->pid, child->exit_code,
+ child->tss.kregs->pc,
+ child->tss.kregs->npc);
+
+#endif
+ wake_up_process(child);
+ pt_succ_return(regs, 0);
+ return;
+ }
+
+/*
+ * make the child exit. Best I can do is send it a sigkill.
+ * perhaps it should be put in the status that it wants to
+ * exit.
+ */
+ case PTRACE_KILL: {
+ if (child->state == TASK_ZOMBIE) { /* already dead */
+ pt_succ_return(regs, 0);
+ return;
+ }
+ wake_up_process(child);
+ child->exit_code = SIGKILL;
+ pt_succ_return(regs, 0);
+ return;
+ }
+
+ case PTRACE_SUNDETACH: { /* detach a process that was attached. */
+ if ((unsigned long) data > NSIG) {
+ pt_error_return(regs, EIO);
+ return;
+ }
+ child->flags &= ~(PF_PTRACED|PF_TRACESYS);
+ wake_up_process(child);
+ child->exit_code = data;
+ REMOVE_LINKS(child);
+ child->p_pptr = child->p_opptr;
+ SET_LINKS(child);
+ pt_succ_return(regs, 0);
+ return;
+ }
+
+ /* PTRACE_DUMPCORE unsupported... */
+
+ default:
+ pt_error_return(regs, EIO);
+ return;
+ }
+}
+
+asmlinkage void syscall_trace(void)
+{
+#ifdef DEBUG_PTRACE
+ printk("%s [%d]: syscall_trace\n", current->comm, current->pid);
+#endif
+ if ((current->flags & (PF_PTRACED|PF_TRACESYS))
+ != (PF_PTRACED|PF_TRACESYS))
+ return;
+ current->exit_code = SIGTRAP;
+ current->state = TASK_STOPPED;
+ current->tss.flags ^= MAGIC_CONSTANT;
+ notify_parent(current);
+ schedule();
+ /*
+ * this isn't the same as continuing with a signal, but it will do
+ * for normal use. strace only continues with a signal if the
+ * stopping signal is not SIGTRAP. -brl
+ */
+#ifdef DEBUG_PTRACE
+ printk("%s [%d]: syscall_trace exit= %x\n", current->comm,
+ current->pid, current->exit_code);
+#endif
+ if (current->exit_code) {
+ current->signal |= (1 << (current->exit_code - 1));
+ }
+ current->exit_code = 0;
+}
diff --git a/arch/sparc/kernel/rirq.S b/arch/sparc/kernel/rirq.S
new file mode 100644
index 000000000..28fc8cd65
--- /dev/null
+++ b/arch/sparc/kernel/rirq.S
@@ -0,0 +1,289 @@
+/* rirq.S: Needed to return from an interrupt on SMP with no
+ * locks held or released.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/cprefix.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+#include <asm/psr.h>
+#include <asm/asi.h>
+#include <asm/smp.h>
+#include <asm/contregs.h>
+#include <asm/winmacro.h>
+#include <asm/asmmacro.h>
+
+#define t_psr l0
+#define t_pc l1
+#define t_npc l2
+#define t_wim l3
+#define twin_tmp1 l4
+#define twin_tmp2 l5
+#define twin_tmp3 l6
+#define curptr g6
+
+ /* 7 WINDOW SPARC PATCH INSTRUCTIONS */
+ .globl rirq_7win_patch1, rirq_7win_patch2, rirq_7win_patch3
+ .globl rirq_7win_patch4, rirq_7win_patch5
+rirq_7win_patch1: srl %t_wim, 0x6, %twin_tmp2
+rirq_7win_patch2: and %twin_tmp2, 0x7f, %twin_tmp2
+rirq_7win_patch3: srl %g1, 7, %g2
+rirq_7win_patch4: srl %g2, 6, %g2
+rirq_7win_patch5: and %g1, 0x7f, %g1
+ /* END OF PATCH INSTRUCTIONS */
+
+ .globl ret_irq_entry, rirq_patch1, rirq_patch2
+ .globl rirq_patch3, rirq_patch4, rirq_patch5
+ret_irq_entry:
+ ld [%sp + REGWIN_SZ + PT_PSR], %t_psr
+ andcc %t_psr, PSR_PS, %g0
+ bne ret_irq_kernel
+ nop
+
+ret_irq_user:
+ wr %t_psr, 0x0, %psr
+ WRITE_PAUSE
+
+ ld [%curptr + THREAD_W_SAVED], %twin_tmp1
+ orcc %g0, %twin_tmp1, %g0
+ be ret_irq_nobufwins
+ nop
+
+ /* User has toasty windows, must grab klock. */
+ ENTER_SYSCALL
+
+ wr %t_psr, PSR_ET, %psr
+ WRITE_PAUSE
+
+ mov 1, %o1
+ call C_LABEL(try_to_clear_window_buffer)
+ add %sp, REGWIN_SZ, %o0
+
+ /* We have klock, so we must return just like a normal trap. */
+ b ret_trap_entry
+ clr %l5
+
+ret_irq_nobufwins:
+ /* Load up the user's out registers so we can pull
+ * a window from the stack, if necessary.
+ */
+ LOAD_PT_INS(sp)
+
+ /* If there are already live user windows in the
+ * set we can return from trap safely.
+ */
+ ld [%curptr + THREAD_UMASK], %twin_tmp1
+ orcc %g0, %twin_tmp1, %g0
+ bne ret_irq_userwins_ok
+ nop
+
+ /* Calculate new %wim, we have to pull a register
+ * window from the users stack.
+ */
+ret_irq_pull_one_window:
+ rd %wim, %t_wim
+ sll %t_wim, 0x1, %twin_tmp1
+rirq_patch1: srl %t_wim, 0x7, %twin_tmp2
+ or %twin_tmp2, %twin_tmp1, %twin_tmp2
+rirq_patch2: and %twin_tmp2, 0xff, %twin_tmp2
+
+ wr %twin_tmp2, 0x0, %wim
+ WRITE_PAUSE
+
+ /* Here comes the architecture specific
+ * branch to the user stack checking routine
+ * for return from traps.
+ */
+ .globl C_LABEL(rirq_mmu_patchme)
+C_LABEL(rirq_mmu_patchme): b C_LABEL(sun4c_reti_stackchk)
+ andcc %fp, 0x7, %g0
+
+ret_irq_userwins_ok:
+ LOAD_PT_PRIV(sp, t_psr, t_pc, t_npc)
+ or %t_pc, %t_npc, %g2
+ andcc %g2, 0x3, %g0
+ bne ret_irq_unaligned_pc
+ nop
+
+ LOAD_PT_YREG(sp, g1)
+ LOAD_PT_GLOBALS(sp)
+
+ wr %t_psr, 0x0, %psr
+ WRITE_PAUSE
+
+ jmp %t_pc
+ rett %t_npc
+
+ret_irq_unaligned_pc:
+ add %sp, REGWIN_SZ, %o0
+ ld [%sp + REGWIN_SZ + PT_PC], %o1
+ ld [%sp + REGWIN_SZ + PT_NPC], %o2
+ ld [%sp + REGWIN_SZ + PT_PSR], %o3
+
+ wr %t_wim, 0x0, %wim ! or else...
+ WRITE_PAUSE
+
+ /* User has unaligned crap, must grab klock. */
+ ENTER_SYSCALL
+
+ wr %t_psr, PSR_ET, %psr
+ WRITE_PAUSE
+
+ call C_LABEL(do_memaccess_unaligned)
+ nop
+
+ /* We have klock, so we must return just like a normal trap. */
+ b ret_trap_entry
+ clr %l5
+
+ret_irq_kernel:
+ wr %t_psr, 0x0, %psr
+ WRITE_PAUSE
+
+ /* Will the rett land us in the invalid window? */
+ mov 2, %g1
+ sll %g1, %t_psr, %g1
+rirq_patch3: srl %g1, 8, %g2
+ or %g1, %g2, %g1
+ rd %wim, %g2
+ andcc %g2, %g1, %g0
+ be 1f ! Nope, just return from the trap
+ nop
+
+ /* We have to grab a window before returning. */
+ sll %g2, 0x1, %g1
+rirq_patch4: srl %g2, 7, %g2
+ or %g1, %g2, %g1
+rirq_patch5: and %g1, 0xff, %g1
+
+ wr %g1, 0x0, %wim
+ WRITE_PAUSE
+
+ restore %g0, %g0, %g0
+ LOAD_WINDOW(sp)
+ save %g0, %g0, %g0
+
+ /* Reload the entire frame in case this is from a
+ * kernel system call or whatever...
+ */
+1:
+ LOAD_PT_ALL(sp, t_psr, t_pc, t_npc, g1)
+
+ wr %t_psr, 0x0, %psr
+ WRITE_PAUSE
+
+ jmp %t_pc
+ rett %t_npc
+
+ret_irq_user_stack_is_bolixed:
+ wr %t_wim, 0x0, %wim
+ WRITE_PAUSE
+
+ /* User has a toasty window, must grab klock. */
+ ENTER_SYSCALL
+
+ wr %t_psr, PSR_ET, %psr
+ WRITE_PAUSE
+
+ call C_LABEL(window_ret_fault)
+ add %sp, REGWIN_SZ, %o0
+
+ /* We have klock, so we must return just like a normal trap. */
+ b ret_trap_entry
+ clr %l5
+
+ .globl C_LABEL(sun4c_reti_stackchk)
+C_LABEL(sun4c_reti_stackchk):
+ be 1f
+ and %fp, 0xfff, %g1 ! delay slot
+
+ b,a ret_irq_user_stack_is_bolixed
+
+ /* See if we have to check the sanity of one page or two */
+1:
+ add %g1, 0x38, %g1
+ sra %fp, 29, %g2
+ add %g2, 0x1, %g2
+ andncc %g2, 0x1, %g0
+ be 1f
+ andncc %g1, 0xff8, %g0
+
+ /* %sp is in vma hole, yuck */
+ b,a ret_irq_user_stack_is_bolixed
+
+1:
+ be sun4c_reti_onepage /* Only one page to check */
+ lda [%fp] ASI_PTE, %g2
+
+sun4c_reti_twopages:
+ add %fp, 0x38, %g1
+ sra %g1, 29, %g2
+ add %g2, 0x1, %g2
+ andncc %g2, 0x1, %g0
+ be 1f
+ lda [%g1] ASI_PTE, %g2
+
+ /* Second page is in vma hole */
+ b,a ret_irq_user_stack_is_bolixed
+
+1:
+ srl %g2, 29, %g2
+ andcc %g2, 0x4, %g0
+ bne sun4c_reti_onepage
+ lda [%fp] ASI_PTE, %g2
+
+ /* Second page has bad perms */
+ b,a ret_irq_user_stack_is_bolixed
+
+sun4c_reti_onepage:
+ srl %g2, 29, %g2
+ andcc %g2, 0x4, %g0
+ bne 1f
+ nop
+
+ /* A page had bad page permissions, losing... */
+ b,a ret_irq_user_stack_is_bolixed
+
+ /* Whee, things are ok, load the window and continue. */
+1:
+ restore %g0, %g0, %g0
+
+ LOAD_WINDOW(sp)
+
+ save %g0, %g0, %g0
+ b,a ret_irq_userwins_ok
+
+ .globl C_LABEL(srmmu_reti_stackchk)
+C_LABEL(srmmu_reti_stackchk):
+ sethi %hi(C_LABEL(page_offset)), %g1
+ bne ret_irq_user_stack_is_bolixed
+ ld [%g1 + %lo(C_LABEL(page_offset))], %g1
+ cmp %g1, %fp
+ bleu ret_irq_user_stack_is_bolixed
+ mov AC_M_SFSR, %g1
+ lda [%g1] ASI_M_MMUREGS, %g0
+
+ lda [%g0] ASI_M_MMUREGS, %g1
+ or %g1, 0x2, %g1
+ sta %g1, [%g0] ASI_M_MMUREGS
+
+ restore %g0, %g0, %g0
+
+ LOAD_WINDOW(sp)
+
+ save %g0, %g0, %g0
+
+ andn %g1, 0x2, %g1
+ sta %g1, [%g0] ASI_M_MMUREGS
+
+ mov AC_M_SFAR, %g2
+ lda [%g2] ASI_M_MMUREGS, %g2
+
+ mov AC_M_SFSR, %g1
+ lda [%g1] ASI_M_MMUREGS, %g1
+ andcc %g1, 0x2, %g0
+ bne ret_irq_user_stack_is_bolixed
+ nop
+
+ b,a ret_irq_userwins_ok
diff --git a/arch/sparc/kernel/rtrap.S b/arch/sparc/kernel/rtrap.S
new file mode 100644
index 000000000..23fb10987
--- /dev/null
+++ b/arch/sparc/kernel/rtrap.S
@@ -0,0 +1,338 @@
+/* $Id: rtrap.S,v 1.39 1996/10/28 07:49:01 davem Exp $
+ * rtrap.S: Return from Sparc trap low-level code.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/cprefix.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+#include <asm/psr.h>
+#include <asm/asi.h>
+#include <asm/smp.h>
+#include <asm/contregs.h>
+#include <asm/winmacro.h>
+#include <asm/asmmacro.h>
+
+#define t_psr l0
+#define t_pc l1
+#define t_npc l2
+#define t_wim l3
+#define twin_tmp1 l4
+#define glob_tmp g4
+#define curptr g6
+
+ /* 7 WINDOW SPARC PATCH INSTRUCTIONS */
+ .globl rtrap_7win_patch1, rtrap_7win_patch2, rtrap_7win_patch3
+ .globl rtrap_7win_patch4, rtrap_7win_patch5
+rtrap_7win_patch1: srl %t_wim, 0x6, %glob_tmp
+rtrap_7win_patch2: and %glob_tmp, 0x7f, %glob_tmp
+rtrap_7win_patch3: srl %g1, 7, %g2
+rtrap_7win_patch4: srl %g2, 6, %g2
+rtrap_7win_patch5: and %g1, 0x7f, %g1
+ /* END OF PATCH INSTRUCTIONS */
+
+ /* We need to check for a few things which are:
+ * 1) The need to call schedule() because this
+ * processes quantum is up.
+ * 2) Pending signals for this process, if any
+ * exist we need to call do_signal() to do
+ * the needy.
+ *
+ * Else we just check if the rett would land us
+ * in an invalid window, if so we need to grab
+ * it off the user/kernel stack first.
+ */
+
+ .globl ret_trap_entry, rtrap_patch1, rtrap_patch2
+ .globl rtrap_patch3, rtrap_patch4, rtrap_patch5
+ret_trap_entry:
+ sethi %hi(C_LABEL(intr_count)), %g2
+ ld [%g2 + %lo(C_LABEL(intr_count))], %g3
+ orcc %g0, %g3, %g0
+ bne 0f
+ sethi %hi(C_LABEL(bh_active)), %l3
+ sethi %hi(C_LABEL(bh_mask)), %l4
+9:
+ ld [%l4 + %lo(C_LABEL(bh_mask))], %g5
+ ld [%l3 + %lo(C_LABEL(bh_active))], %g4
+ sethi %hi(C_LABEL(intr_count)), %l7
+ andcc %g4, %g5, %g0
+ be 0f
+ mov 1, %g7
+ call C_LABEL(do_bottom_half)
+ st %g7, [%l7 + %lo(C_LABEL(intr_count))]
+ b 9b
+ st %g0, [%l7 + %lo(C_LABEL(intr_count))]
+0:
+ andcc %t_psr, PSR_PS, %g0
+ be 1f
+ sethi %hi(C_LABEL(need_resched)), %twin_tmp1
+
+ b ret_trap_kernel
+ wr %t_psr, 0x0, %psr
+
+1:
+ ld [%twin_tmp1 + %lo(C_LABEL(need_resched))], %g2
+
+ cmp %g2, 0
+ be signal_p
+ nop
+
+ call C_LABEL(schedule)
+ nop
+
+signal_p:
+ ld [%curptr + TASK_SIGNAL], %g2
+ ld [%curptr + TASK_BLOCKED], %o0
+ andncc %g2, %o0, %g0
+ be,a ret_trap_continue
+ ld [%sp + REGWIN_SZ + PT_PSR], %t_psr
+
+ mov %l5, %o2
+ mov %l6, %o3
+ call C_LABEL(do_signal)
+ add %sp, REGWIN_SZ, %o1 ! pt_regs ptr
+
+ /* Fall through. */
+ ld [%sp + REGWIN_SZ + PT_PSR], %t_psr
+ clr %l6
+ret_trap_continue:
+ wr %t_psr, 0x0, %psr
+
+ ld [%curptr + THREAD_W_SAVED], %twin_tmp1
+ orcc %g0, %twin_tmp1, %g0
+ be ret_trap_nobufwins
+ nop
+
+ wr %t_psr, PSR_ET, %psr
+ WRITE_PAUSE
+
+ mov 1, %o1
+ call C_LABEL(try_to_clear_window_buffer)
+ add %sp, REGWIN_SZ, %o0
+
+ b,a signal_p
+
+ret_trap_nobufwins:
+ /* Load up the user's out registers so we can pull
+ * a window from the stack, if necessary.
+ */
+ LOAD_PT_INS(sp)
+
+ /* If there are already live user windows in the
+ * set we can return from trap safely.
+ */
+ ld [%curptr + THREAD_UMASK], %twin_tmp1
+ orcc %g0, %twin_tmp1, %g0
+ bne ret_trap_userwins_ok
+ nop
+
+ /* Calculate new %wim, we have to pull a register
+ * window from the users stack.
+ */
+ret_trap_pull_one_window:
+ rd %wim, %t_wim
+ sll %t_wim, 0x1, %twin_tmp1
+rtrap_patch1: srl %t_wim, 0x7, %glob_tmp
+ or %glob_tmp, %twin_tmp1, %glob_tmp
+rtrap_patch2: and %glob_tmp, 0xff, %glob_tmp
+
+ wr %glob_tmp, 0x0, %wim
+
+ /* Here comes the architecture specific
+ * branch to the user stack checking routine
+ * for return from traps.
+ */
+ .globl C_LABEL(rtrap_mmu_patchme)
+C_LABEL(rtrap_mmu_patchme): b C_LABEL(sun4c_rett_stackchk)
+ andcc %fp, 0x7, %g0
+
+ret_trap_userwins_ok:
+ LOAD_PT_PRIV(sp, t_psr, t_pc, t_npc)
+ or %t_pc, %t_npc, %g2
+ andcc %g2, 0x3, %g0
+ be 1f
+ nop
+
+ b ret_trap_unaligned_pc
+ add %sp, REGWIN_SZ, %o0
+
+1:
+ LOAD_PT_YREG(sp, g1)
+ LOAD_PT_GLOBALS(sp)
+
+ LEAVE_SYSCALL
+
+ wr %t_psr, 0x0, %psr
+ WRITE_PAUSE
+
+ jmp %t_pc
+ rett %t_npc
+
+ret_trap_unaligned_pc:
+ ld [%sp + REGWIN_SZ + PT_PC], %o1
+ ld [%sp + REGWIN_SZ + PT_NPC], %o2
+ ld [%sp + REGWIN_SZ + PT_PSR], %o3
+
+ wr %t_wim, 0x0, %wim ! or else...
+ WRITE_PAUSE
+
+ wr %t_psr, PSR_ET, %psr
+ WRITE_PAUSE
+
+ call C_LABEL(do_memaccess_unaligned)
+ nop
+
+ b,a signal_p
+
+ret_trap_kernel:
+ /* Will the rett land us in the invalid window? */
+ mov 2, %g1
+ sll %g1, %t_psr, %g1
+rtrap_patch3: srl %g1, 8, %g2
+ or %g1, %g2, %g1
+ rd %wim, %g2
+ andcc %g2, %g1, %g0
+ be 1f ! Nope, just return from the trap
+ sll %g2, 0x1, %g1
+
+ /* We have to grab a window before returning. */
+rtrap_patch4: srl %g2, 7, %g2
+ or %g1, %g2, %g1
+rtrap_patch5: and %g1, 0xff, %g1
+
+ wr %g1, 0x0, %wim
+ WRITE_PAUSE
+
+ /* Grrr, make sure we load from the right %sp... */
+ LOAD_PT_ALL(sp, t_psr, t_pc, t_npc, g1)
+
+ restore %g0, %g0, %g0
+ LOAD_WINDOW(sp)
+ b 2f
+ save %g0, %g0, %g0
+
+ /* Reload the entire frame in case this is from a
+ * kernel system call or whatever...
+ */
+1:
+ LOAD_PT_ALL(sp, t_psr, t_pc, t_npc, g1)
+2:
+ LEAVE_SYSCALL
+
+ wr %t_psr, 0x0, %psr
+ WRITE_PAUSE
+
+ jmp %t_pc
+ rett %t_npc
+
+ret_trap_user_stack_is_bolixed:
+ wr %t_wim, 0x0, %wim
+ WRITE_PAUSE
+
+ wr %t_psr, PSR_ET, %psr
+ WRITE_PAUSE
+
+ call C_LABEL(window_ret_fault)
+ add %sp, REGWIN_SZ, %o0
+
+ b,a signal_p
+
+ .globl C_LABEL(sun4c_rett_stackchk)
+C_LABEL(sun4c_rett_stackchk):
+ be 1f
+ and %fp, 0xfff, %g1 ! delay slot
+
+ b ret_trap_user_stack_is_bolixed + 0x4
+ wr %t_wim, 0x0, %wim
+
+ /* See if we have to check the sanity of one page or two */
+1:
+ add %g1, 0x38, %g1
+ sra %fp, 29, %g2
+ add %g2, 0x1, %g2
+ andncc %g2, 0x1, %g0
+ be 1f
+ andncc %g1, 0xff8, %g0
+
+ /* %sp is in vma hole, yuck */
+ b ret_trap_user_stack_is_bolixed + 0x4
+ wr %t_wim, 0x0, %wim
+
+1:
+ be sun4c_rett_onepage /* Only one page to check */
+ lda [%fp] ASI_PTE, %g2
+
+sun4c_rett_twopages:
+ add %fp, 0x38, %g1
+ sra %g1, 29, %g2
+ add %g2, 0x1, %g2
+ andncc %g2, 0x1, %g0
+ be 1f
+ lda [%g1] ASI_PTE, %g2
+
+ /* Second page is in vma hole */
+ b ret_trap_user_stack_is_bolixed + 0x4
+ wr %t_wim, 0x0, %wim
+
+1:
+ srl %g2, 29, %g2
+ andcc %g2, 0x4, %g0
+ bne sun4c_rett_onepage
+ lda [%fp] ASI_PTE, %g2
+
+ /* Second page has bad perms */
+ b ret_trap_user_stack_is_bolixed + 0x4
+ wr %t_wim, 0x0, %wim
+
+sun4c_rett_onepage:
+ srl %g2, 29, %g2
+ andcc %g2, 0x4, %g0
+ bne,a 1f
+ restore %g0, %g0, %g0
+
+ /* A page had bad page permissions, losing... */
+ b ret_trap_user_stack_is_bolixed + 0x4
+ wr %t_wim, 0x0, %wim
+
+ /* Whee, things are ok, load the window and continue. */
+1:
+ LOAD_WINDOW(sp)
+
+ b ret_trap_userwins_ok
+ save %g0, %g0, %g0
+
+ .globl C_LABEL(srmmu_rett_stackchk)
+C_LABEL(srmmu_rett_stackchk):
+ sethi %hi(C_LABEL(page_offset)), %g1
+ bne ret_trap_user_stack_is_bolixed
+ ld [%g1 + %lo(C_LABEL(page_offset))], %g1
+ cmp %g1, %fp
+ bleu ret_trap_user_stack_is_bolixed
+ mov AC_M_SFSR, %g1
+ lda [%g1] ASI_M_MMUREGS, %g0
+
+ lda [%g0] ASI_M_MMUREGS, %g1
+ or %g1, 0x2, %g1
+ sta %g1, [%g0] ASI_M_MMUREGS
+
+ restore %g0, %g0, %g0
+
+ LOAD_WINDOW(sp)
+
+ save %g0, %g0, %g0
+
+ andn %g1, 0x2, %g1
+ sta %g1, [%g0] ASI_M_MMUREGS
+
+ mov AC_M_SFAR, %g2
+ lda [%g2] ASI_M_MMUREGS, %g2
+
+ mov AC_M_SFSR, %g1
+ lda [%g1] ASI_M_MMUREGS, %g1
+ andcc %g1, 0x2, %g0
+ be ret_trap_userwins_ok
+ nop
+
+ b,a ret_trap_user_stack_is_bolixed
diff --git a/arch/sparc/kernel/sclow.S b/arch/sparc/kernel/sclow.S
new file mode 100644
index 000000000..6e28137a2
--- /dev/null
+++ b/arch/sparc/kernel/sclow.S
@@ -0,0 +1,193 @@
+/* sclow.S: Low level special syscall handling.
+ * Basically these are cases where we can completely
+ * handle the system call without saving any state
+ * because we know that the process will not sleep.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/cprefix.h>
+#include <asm/ptrace.h>
+#include <asm/errno.h>
+#include <asm/winmacro.h>
+#include <asm/psr.h>
+
+#define CC_AND_RETT \
+ set PSR_C, %l4; \
+ andn %l0, %l4, %l4; \
+ wr %l4, 0x0, %psr; \
+ nop; nop; nop; \
+ jmp %l2; \
+ rett %l2 + 4;
+
+#define SC_AND_RETT \
+ set PSR_C, %l4; \
+ or %l0, %l4, %l4; \
+ wr %l4, 0x0, %psr; \
+ nop; nop; nop; \
+ jmp %l2; \
+ rett %l2 + 4;
+
+#define LABEL(func) CONCAT(func, _low)
+
+ .globl LABEL(sunosnop)
+LABEL(sunosnop):
+ CC_AND_RETT
+
+ .globl LABEL(sunosgetpid)
+LABEL(sunosgetpid):
+ LOAD_CURRENT(l4, l5)
+ ld [%l4 + 108], %i0
+ ld [%l4 + 256], %l5
+ ld [%l5 + 108], %i1
+ CC_AND_RETT
+
+ .globl LABEL(sunosgetuid)
+LABEL(sunosgetuid):
+ LOAD_CURRENT(l4, l5)
+ lduh [%l4 + 280], %i0
+ lduh [%l4 + 282], %i1
+ CC_AND_RETT
+
+ .globl LABEL(sunosgetgid)
+LABEL(sunosgetgid):
+ LOAD_CURRENT(l4, l5)
+ lduh [%l4 + 288], %i0
+ lduh [%l4 + 290], %i1
+ CC_AND_RETT
+
+ .globl LABEL(sunosmctl)
+LABEL(sunosmctl):
+ mov 0, %i0
+ CC_AND_RETT
+
+ .globl LABEL(sunosgdtsize)
+LABEL(sunosgdtsize):
+ mov 256, %i0
+ CC_AND_RETT
+
+ .globl LABEL(sunossblock)
+LABEL(sunossblock):
+ LOAD_CURRENT(l4, l5)
+ set -65793, %l5
+ and %i0, %l5, %l5
+ ld [%l4 + TASK_BLOCKED], %i0
+ or %i0, %l5, %l5
+ st %l5, [%l4 + TASK_BLOCKED]
+ CC_AND_RETT
+
+ .globl LABEL(sunossmask)
+LABEL(sunossmask):
+ LOAD_CURRENT(l4, l5)
+ set -65793, %l5
+ and %i0, %l5, %l5
+ ld [%l4 + TASK_BLOCKED], %i0
+ st %l5, [%l4 + TASK_BLOCKED]
+ CC_AND_RETT
+
+ .globl LABEL(getpagesize)
+LABEL(getpagesize):
+ set 4096, %i0
+ CC_AND_RETT
+
+ .globl LABEL(umask)
+LABEL(umask):
+ LOAD_CURRENT(l4, l5)
+ ld [%l4 + 1560], %l5
+ and %i0, 511, %l4
+ lduh [%l5 + 4], %i0
+ sth %l4, [%l5 + 4]
+ CC_AND_RETT
+
+#if 0
+ .globl LABEL(write)
+LABEL(write):
+ cmp %i0, 255 /* fd >= NR_OPEN */
+ bgu,a write_error_return
+ mov EBADF, %i0
+
+ LOAD_CURRENT(l4, l5)
+ ld [%l4 + 1564], %l5
+ sll %i0, 2, %l6
+ add %l5, %l6, %l5
+ ld [%l5 + 36], %l6
+ cmp %l6, 0 /* !(file=current->files->fd[fd]) */
+ be,a write_error_return
+ mov EBADF, %i0
+
+ ld [%l6 + 36], %l5
+ cmp %l5, 0 /* !(inode=file->f_inode) */
+ be,a write_error_return
+ mov EBADF, %i0
+
+ lduh [%l6], %l5 /* !(file->f_mode & 2) */
+ andcc %l5, 2, %g0
+ be,a write_error_return
+ mov EBADF, %i0
+
+ ld [%l6 + 40], %l5
+ cmp %l5, 0 /* !file->f_op */
+ be,a write_error_return
+ mov EINVAL, %i0
+
+ ld [%l5 + 8], %l5 /* !file->f_op->write */
+ cmp %l5, 0
+ be,a write_error_return
+ mov EINVAL, %i0
+
+ cmp %i2, 0 /* count == 0 */
+ bne 1f
+ nop
+
+ mov 0, %i0
+ CC_AND_RETT
+
+1:
+ /* See if we can do the optimization... */
+ ld [%l6 + 36], %l5
+ lduh [%l5 + 16], %l5
+ srl %l5, 8, %l6
+ cmp %l6, 1 /* MEM_MAJOR */
+ bne,a write_is_too_hard
+ sethi %hi(C_LABEL(quick_sys_write)), %l7
+
+ and %l5, 0xff, %l5
+ cmp %l5, 3 /* NULL_MINOR */
+ bne,a write_is_too_hard
+ sethi %hi(C_LABEL(quick_sys_write)), %l7
+
+ /* We only optimize for the /dev/null case currently,
+ * however to stay POSIX4 compliant we must check the
+ * validity of the passed buffer. Blowlaris2.x does not
+ * do this and is therefore not POSIX4 compliant!
+ * If you are going to optimize for benchmarks, fine,
+ * but to break behavior of a system call in the process
+ * is complete brain damage...
+ */
+
+ /* XXX write verify_area thingy for full POSIX conformance! XXX */
+
+ mov %i2, %i0
+ CC_AND_RETT
+
+write_is_too_hard:
+ b syscall_is_too_hard
+ or %l7, %lo(C_LABEL(quick_sys_write)), %l7
+
+write_error_return:
+ SC_AND_RETT
+#endif
+
+ /* XXX sys_nice() XXX */
+ /* XXX sys_setpriority() XXX */
+ /* XXX sys_getpriority() XXX */
+ /* XXX sys_setregid() XXX */
+ /* XXX sys_setgid() XXX */
+ /* XXX sys_setreuid() XXX */
+ /* XXX sys_setuid() XXX */
+ /* XXX sys_setfsuid() XXX */
+ /* XXX sys_setfsgid() XXX */
+ /* XXX sys_setpgid() XXX */
+ /* XXX sys_getpgid() XXX */
+ /* XXX sys_setsid() XXX */
+ /* XXX sys_getsid() XXX */
diff --git a/arch/sparc/kernel/setup.c b/arch/sparc/kernel/setup.c
index 5ec5b56ba..96b036aa5 100644
--- a/arch/sparc/kernel/setup.c
+++ b/arch/sparc/kernel/setup.c
@@ -1,13 +1,9 @@
-/*
- * linux/arch/alpha/kernel/setup.c
+/* $Id: setup.c,v 1.75 1996/10/12 12:37:27 davem Exp $
+ * linux/arch/sparc/kernel/setup.c
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
-/*
- * bootup setup stuff..
- */
-
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -16,77 +12,211 @@
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/malloc.h>
-#include <linux/ldt.h>
+#include <linux/smp.h>
#include <linux/user.h>
#include <linux/a.out.h>
#include <linux/tty.h>
+#include <linux/delay.h>
+#include <linux/config.h>
+#include <linux/fs.h>
+#include <linux/kdev_t.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/blk.h>
#include <asm/segment.h>
#include <asm/system.h>
#include <asm/io.h>
-#include <asm/openprom.h> /* for console registration + cheese */
-
-extern void get_idprom(void);
-extern void probe_devices(void);
+#include <asm/kgdb.h>
+#include <asm/processor.h>
+#include <asm/oplib.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/traps.h>
+#include <asm/vaddrs.h>
+#include <asm/kdebug.h>
+#include <asm/mbus.h>
+#include <asm/idprom.h>
-/*
- * Gcc is hard to keep happy ;-)
- */
struct screen_info screen_info = {
0, 0, /* orig-x, orig-y */
{ 0, 0, }, /* unused */
0, /* orig-video-page */
0, /* orig-video-mode */
- 80, /* orig-video-cols */
+ 128, /* orig-video-cols */
0,0,0, /* ega_ax, ega_bx, ega_cx */
- 25 /* orig-video-lines */
+ 54, /* orig-video-lines */
+ 0, /* orig-video-isVGA */
+ 16 /* orig-video-points */
};
-/* At least I hide the sneaky floppy_track_buffer in my dirty assembly
- * code. ;-)
- */
+unsigned int phys_bytes_of_ram, end_of_phys_memory;
unsigned long bios32_init(unsigned long memory_start, unsigned long memory_end)
{
return memory_start;
}
-/* Lame prom console routines, gets registered below. Thanks for the
- * tip Linus. First comes the V0 prom routine, then the V3 version
- * written by Paul Hatchman (paul@sfe.com.au).
+/* Typing sync at the prom prompt calls the function pointed to by
+ * romvec->pv_synchook which I set to the following function.
+ * This should sync all filesystems and return, for now it just
+ * prints out pretty messages and returns.
*/
-void sparc_console_print(const char * p)
+extern unsigned long trapbase;
+extern void breakpoint(void);
+#if CONFIG_SUN_CONSOLE
+extern void console_restore_palette(void);
+#endif
+asmlinkage void sys_sync(void); /* it's really int */
+
+/* Pretty sick eh? */
+void prom_sync_me(void)
{
- unsigned char c;
+ unsigned long prom_tbr, flags;
- while ((c = *(p++)) != 0)
- {
- if (c == '\n') romvec->pv_putchar('\r');
- (*(romvec->pv_putchar))(c);
- }
+ save_and_cli(flags);
+ __asm__ __volatile__("rd %%tbr, %0\n\t" : "=r" (prom_tbr));
+ __asm__ __volatile__("wr %0, 0x0, %%tbr\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ "nop\n\t" : : "r" (&trapbase));
- return;
+#ifdef CONFIG_SUN_CONSOLE
+ console_restore_palette ();
+#endif
+ prom_printf("PROM SYNC COMMAND...\n");
+ show_free_areas();
+ if(current->pid != 0) {
+ sti();
+ sys_sync();
+ cli();
+ }
+ prom_printf("Returning to prom\n");
+ __asm__ __volatile__("wr %0, 0x0, %%tbr\n\t"
+ "nop\n\t"
+ "nop\n\t"
+ "nop\n\t" : : "r" (prom_tbr));
+ restore_flags(flags);
+
+ return;
}
-/* paul@sfe.com.au */
-/* V3 prom console printing routines */
-void sparc_console_print_v3 (const char *p)
+extern void rs_kgdb_hook(int tty_num); /* sparc/serial.c */
+
+unsigned int boot_flags;
+#define BOOTME_DEBUG 0x1
+#define BOOTME_SINGLE 0x2
+#define BOOTME_KGDB 0x4
+
+extern char *console_fb_path;
+static int console_fb = 0;
+
+void kernel_enter_debugger(void)
{
- unsigned char c;
+ if (boot_flags & BOOTME_KGDB) {
+ printk("KGDB: Entered\n");
+ breakpoint();
+ }
+}
- while ((c = *(p++)) != 0)
- {
- if (c == '\n') romvec->pv_v2devops.v2_dev_write
- ((*romvec->pv_v2bootargs.fd_stdout), "\r", 1);
- romvec->pv_v2devops.v2_dev_write
- ((*romvec->pv_v2bootargs.fd_stdout), &c, 1);
- }
+int obp_system_intr(void)
+{
+ if (boot_flags & BOOTME_KGDB) {
+ printk("KGDB: system interrupted\n");
+ breakpoint();
+ return 1;
+ }
+ if (boot_flags & BOOTME_DEBUG) {
+ printk("OBP: system interrupted\n");
+ prom_halt();
+ return 1;
+ }
+ return 0;
+}
- return;
+/*
+ * Process kernel command line switches that are specific to the
+ * SPARC or that require special low-level processing.
+ */
+static void process_switch(char c)
+{
+ switch (c) {
+ case 'd':
+ boot_flags |= BOOTME_DEBUG;
+ break;
+ case 's':
+ boot_flags |= BOOTME_SINGLE;
+ break;
+ case 'h':
+ prom_printf("boot_flags_init: Halt!\n");
+ halt();
+ break;
+ default:
+ printk("Unknown boot switch (-%c)\n", c);
+ break;
+ }
}
+static void boot_flags_init(char *commands)
+{
+ while (*commands) {
+ /* Move to the start of the next "argument". */
+ while (*commands && *commands == ' ')
+ commands++;
+
+ /* Process any command switches, otherwise skip it. */
+ if (*commands == '\0')
+ break;
+ else if (*commands == '-') {
+ commands++;
+ while (*commands && *commands != ' ')
+ process_switch(*commands++);
+ } else if (strlen(commands) >= 9
+ && !strncmp(commands, "kgdb=tty", 8)) {
+ boot_flags |= BOOTME_KGDB;
+ switch (commands[8]) {
+#ifdef CONFIG_SUN_SERIAL
+ case 'a':
+ rs_kgdb_hook(0);
+ printk("KGDB: Using serial line /dev/ttya.\n");
+ break;
+ case 'b':
+ rs_kgdb_hook(1);
+ printk("KGDB: Using serial line /dev/ttyb.\n");
+ break;
+#endif
+#ifdef CONFIG_AP1000
+ case 'c':
+ printk("KGDB: AP1000+ debugging\n");
+ break;
+#endif
+ default:
+ printk("KGDB: Unknown tty line.\n");
+ boot_flags &= ~BOOTME_KGDB;
+ break;
+ }
+ commands += 9;
+ } else {
+ if (!strncmp(commands, "console=", 8)) {
+ commands += 8;
+ if (!strncmp (commands, "ttya", 4)) {
+ console_fb = 2;
+ prom_printf ("Using /dev/ttya as console.\n");
+ } else if (!strncmp (commands, "ttyb", 4)) {
+ console_fb = 3;
+ prom_printf ("Using /dev/ttyb as console.\n");
+ } else {
+ console_fb = 1;
+ console_fb_path = commands;
+ }
+ }
+ while (*commands && *commands != ' ')
+ commands++;
+ }
+ }
+}
/* This routine will in the future do all the nasty prom stuff
* to probe for the mmu type and its parameters, etc. This will
@@ -94,27 +224,254 @@ void sparc_console_print_v3 (const char *p)
* physical memory probe as on the alpha.
*/
-extern void register_console(void (*proc)(const char *));
-extern unsigned int prom_iface_vers, end_of_phys_memory;
+extern void load_mmu(void);
+extern int prom_probe_memory(void);
+extern void sun4c_probe_vac(void);
+extern char cputypval;
+extern unsigned long start, end;
+extern void panic_setup(char *, int *);
+extern unsigned long srmmu_endmem_fixup(unsigned long);
+
+extern unsigned short root_flags;
+extern unsigned short root_dev;
+extern unsigned short ram_flags;
+extern unsigned ramdisk_image;
+extern unsigned ramdisk_size;
+#define RAMDISK_IMAGE_START_MASK 0x07FF
+#define RAMDISK_PROMPT_FLAG 0x8000
+#define RAMDISK_LOAD_FLAG 0x4000
+
+extern int root_mountflags;
+
+char saved_command_line[256];
+char reboot_command[256];
+enum sparc_cpu sparc_cpu_model;
+
+struct tt_entry *sparc_ttable;
+
+static struct pt_regs fake_swapper_regs = { 0, 0, 0, 0, { 0, } };
void setup_arch(char **cmdline_p,
unsigned long * memory_start_p, unsigned long * memory_end_p)
{
- if(romvec->pv_romvers == 0) {
- register_console(sparc_console_print);
- } else {
- register_console(sparc_console_print_v3);
+ int total, i, packed;
+
+#if CONFIG_AP1000
+ register_console(prom_printf);
+ ((char *)(&cputypval))[4] = 'm'; /* ugly :-( */
+#endif
+
+ sparc_ttable = (struct tt_entry *) &start;
+
+ /* Initialize PROM console and command line. */
+ *cmdline_p = prom_getbootargs();
+ strcpy(saved_command_line, *cmdline_p);
+
+ /* Set sparc_cpu_model */
+ sparc_cpu_model = sun_unknown;
+ if(!strcmp(&cputypval,"sun4 ")) { sparc_cpu_model=sun4; }
+ if(!strcmp(&cputypval,"sun4c")) { sparc_cpu_model=sun4c; }
+ if(!strcmp(&cputypval,"sun4m")) { sparc_cpu_model=sun4m; }
+ if(!strcmp(&cputypval,"sun4d")) { sparc_cpu_model=sun4d; }
+ if(!strcmp(&cputypval,"sun4e")) { sparc_cpu_model=sun4e; }
+ if(!strcmp(&cputypval,"sun4u")) { sparc_cpu_model=sun4u; }
+ printk("ARCH: ");
+ packed = 0;
+ switch(sparc_cpu_model) {
+ case sun4:
+ printk("SUN4\n");
+ sun4c_probe_vac();
+ packed = 0;
+ break;
+ case sun4c:
+ printk("SUN4C\n");
+ sun4c_probe_vac();
+ packed = 0;
+ break;
+ case sun4m:
+ printk("SUN4M\n");
+ packed = 1;
+ break;
+ case sun4d:
+ printk("SUN4D\n");
+ packed = 1;
+ break;
+ case sun4e:
+ printk("SUN4E\n");
+ packed = 0;
+ break;
+ case sun4u:
+ printk("SUN4U\n");
+ break;
+ default:
+ printk("UNKNOWN!\n");
+ break;
};
- printk("Sparc PROM-Console registered...\n");
- get_idprom(); /* probe_devices expects this to be done */
- probe_devices(); /* cpu/fpu, mmu probes */
+ boot_flags_init(*cmdline_p);
+ if((boot_flags&BOOTME_DEBUG) && (linux_dbvec!=0) &&
+ ((*(short *)linux_dbvec) != -1)) {
+ printk("Booted under KADB. Syncing trap table.\n");
+ (*(linux_dbvec->teach_debugger))();
+ }
+ if((boot_flags & BOOTME_KGDB)) {
+ set_debug_traps();
+ breakpoint();
+ }
+ idprom_init();
+ load_mmu();
+ total = prom_probe_memory();
*memory_start_p = (((unsigned long) &end));
- *memory_end_p = (((unsigned long) end_of_phys_memory));
+
+ if(!packed) {
+ for(i=0; sp_banks[i].num_bytes != 0; i++)
+ end_of_phys_memory = sp_banks[i].base_addr +
+ sp_banks[i].num_bytes;
+ } else {
+ unsigned int sum = 0;
+
+ for(i = 0; sp_banks[i].num_bytes != 0; i++)
+ sum += sp_banks[i].num_bytes;
+
+ end_of_phys_memory = sum;
+ }
+
+ prom_setsync(prom_sync_me);
+
+ *memory_end_p = (end_of_phys_memory + KERNBASE);
+ if((sparc_cpu_model == sun4c) ||
+ (sparc_cpu_model == sun4))
+ goto not_relevant;
+ if(end_of_phys_memory >= 0x0d000000) {
+ *memory_end_p = 0xfd000000;
+ } else {
+ if((sparc_cpu_model == sun4m) ||
+ (sparc_cpu_model == sun4d))
+ *memory_end_p = srmmu_endmem_fixup(*memory_end_p);
+ }
+not_relevant:
+
+ if (!root_flags)
+ root_mountflags &= ~MS_RDONLY;
+ ROOT_DEV = to_kdev_t(root_dev);
+#ifdef CONFIG_BLK_DEV_RAM
+ rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
+ rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0);
+ rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
+#endif
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (ramdisk_image) {
+ initrd_start = ramdisk_image;
+ if (initrd_start < KERNBASE) initrd_start += KERNBASE;
+ initrd_end = initrd_start + ramdisk_size;
+ if (initrd_end > *memory_end_p) {
+ printk(KERN_CRIT "initrd extends beyond end of memory "
+ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
+ initrd_end,*memory_end_p);
+ initrd_start = 0;
+ }
+ if (initrd_start >= *memory_start_p && initrd_start < *memory_start_p + 2 * PAGE_SIZE) {
+ initrd_below_start_ok = 1;
+ *memory_start_p = PAGE_ALIGN (initrd_end);
+ }
+ }
+#endif
+
+ /* Due to stack alignment restrictions and assumptions... */
+ init_task.mm->mmap->vm_page_prot = PAGE_SHARED;
+ init_task.mm->mmap->vm_start = KERNBASE;
+ init_task.mm->mmap->vm_end = *memory_end_p;
+ init_task.tss.kregs = &fake_swapper_regs;
+
+ {
+ extern int serial_console; /* in console.c, of course */
+#if !CONFIG_SUN_SERIAL
+ serial_console = 0;
+#else
+ switch (console_fb) {
+ case 0: /* Let get our io devices from prom */
+ {
+ int idev = prom_query_input_device();
+ int odev = prom_query_output_device();
+ if (idev == PROMDEV_IKBD && odev == PROMDEV_OSCREEN) {
+ serial_console = 0;
+ } else if (idev == PROMDEV_ITTYA && odev == PROMDEV_OTTYA) {
+ serial_console = 1;
+ } else if (idev == PROMDEV_ITTYB && odev == PROMDEV_OTTYB) {
+ serial_console = 2;
+ } else {
+ prom_printf("Inconsistent console\n");
+ prom_halt();
+ }
+ }
+ break;
+ case 1: serial_console = 0; break; /* Force one of the framebuffers as console */
+ case 2: serial_console = 1; break; /* Force ttya as console */
+ case 3: serial_console = 2; break; /* Force ttyb as console */
+ }
+#endif
+ }
}
asmlinkage int sys_ioperm(unsigned long from, unsigned long num, int on)
{
return -EIO;
}
+
+/* BUFFER is PAGE_SIZE bytes long. */
+
+extern char *sparc_cpu_type[];
+extern char *sparc_fpu_type[];
+
+extern char *smp_info(void);
+
+extern int linux_num_cpus;
+
+int get_cpuinfo(char *buffer)
+{
+ int cpuid=get_cpuid();
+
+ return sprintf(buffer, "cpu\t\t: %s\n"
+ "fpu\t\t: %s\n"
+ "promlib\t\t: Version %d Revision %d\n"
+ "type\t\t: %s\n"
+ "ncpus probed\t: %d\n"
+ "ncpus active\t: %d\n"
+#ifndef __SMP__
+ "BogoMips\t: %lu.%02lu\n"
+#else
+ "Cpu0Bogo\t: %lu.%02lu\n"
+ "Cpu1Bogo\t: %lu.%02lu\n"
+ "Cpu2Bogo\t: %lu.%02lu\n"
+ "Cpu3Bogo\t: %lu.%02lu\n"
+#endif
+ "%s"
+#ifdef __SMP__
+ "%s"
+#endif
+ ,
+ sparc_cpu_type[cpuid],
+ sparc_fpu_type[cpuid],
+#if CONFIG_AP1000
+ 0, 0,
+#else
+ romvec->pv_romvers, prom_rev,
+#endif
+ &cputypval,
+ linux_num_cpus, smp_num_cpus,
+#ifndef __SMP__
+ loops_per_sec/500000, (loops_per_sec/5000) % 100,
+#else
+ cpu_data[0].udelay_val/500000, (cpu_data[0].udelay_val/5000)%100,
+ cpu_data[1].udelay_val/500000, (cpu_data[1].udelay_val/5000)%100,
+ cpu_data[2].udelay_val/500000, (cpu_data[2].udelay_val/5000)%100,
+ cpu_data[3].udelay_val/500000, (cpu_data[3].udelay_val/5000)%100,
+#endif
+ mmu_info()
+#ifdef __SMP__
+ , smp_info()
+#endif
+ );
+
+}
diff --git a/arch/sparc/kernel/signal.c b/arch/sparc/kernel/signal.c
index cd949e4ed..52952eb61 100644
--- a/arch/sparc/kernel/signal.c
+++ b/arch/sparc/kernel/signal.c
@@ -1,7 +1,9 @@
-/*
+/* $Id: signal.c,v 1.57 1996/10/31 00:59:01 davem Exp $
* linux/arch/sparc/kernel/signal.c
*
+ * Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
*/
#include <linux/sched.h>
@@ -11,61 +13,555 @@
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
+#include <linux/mm.h>
-#include <asm/segment.h>
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <asm/ptrace.h>
+#include <asm/svr4.h>
#define _S(nr) (1<<((nr)-1))
#define _BLOCKABLE (~(_S(SIGKILL) | _S(SIGSTOP)))
-asmlinkage int sys_waitpid(pid_t pid,unsigned long * stat_addr, int options);
+asmlinkage int sys_waitpid(pid_t pid, unsigned long *stat_addr, int options);
+asmlinkage int do_signal(unsigned long oldmask, struct pt_regs * regs,
+ unsigned long orig_o0, int ret_from_syscall);
-/*
- * atomically swap in the new signal mask, and wait for a signal.
+/* This turned off for production... */
+/* #define DEBUG_FATAL_SIGNAL 1 */
+
+#ifdef DEBUG_FATAL_SIGNAL
+extern void instruction_dump (unsigned long *pc);
+#endif
+
+/* atomically swap in the new signal mask, and wait for a signal.
+ * This is really tricky on the Sparc, watch out...
*/
-asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, unsigned long set)
+asmlinkage inline void _sigpause_common(unsigned int set, struct pt_regs *regs)
{
unsigned long mask;
- struct pt_regs * regs = (struct pt_regs *) &restart;
mask = current->blocked;
current->blocked = set & _BLOCKABLE;
+ regs->pc = regs->npc;
+ regs->npc += 4;
+ /* Condition codes and return value where set here for sigpause,
+ * and so got used by setup_frame, which again causes sigreturn()
+ * to return -EINTR.
+ */
while (1) {
current->state = TASK_INTERRUPTIBLE;
schedule();
- if (do_signal(mask,regs))
- return -EINTR;
+ /*
+ * Return -EINTR and set condition code here,
+ * so the interrupted system call actually returns
+ * these.
+ */
+ regs->psr |= PSR_C;
+ regs->u_regs[UREG_I0] = EINTR;
+ if (do_signal(mask, regs, 0, 0))
+ return;
}
}
-asmlinkage int sys_sigreturn(unsigned long __unused)
+asmlinkage void do_sigpause(unsigned int set, struct pt_regs *regs)
+{
+ _sigpause_common(set, regs);
+}
+
+asmlinkage void do_sigsuspend (struct pt_regs *regs)
{
- halt();
- return 0;
+ _sigpause_common(regs->u_regs[UREG_I0], regs);
+}
+
+asmlinkage void do_sigreturn(struct pt_regs *regs)
+{
+ struct sigcontext *scptr =
+ (struct sigcontext *) regs->u_regs[UREG_I0];
+ unsigned long pc, npc, psr;
+
+ synchronize_user_stack();
+
+ /* Check sanity of the user arg. */
+ if(verify_area(VERIFY_READ, scptr, sizeof(struct sigcontext)) ||
+ (((unsigned long) scptr) & 3)) {
+ printk("%s [%d]: do_sigreturn, scptr is invalid at "
+ "pc<%08lx> scptr<%p>\n",
+ current->comm, current->pid, regs->pc, scptr);
+ do_exit(SIGSEGV);
+ }
+ __get_user(pc, &scptr->sigc_pc);
+ __get_user(npc, &scptr->sigc_npc);
+ if((pc | npc) & 3)
+ do_exit(SIGSEGV); /* Nice try. */
+
+ __get_user(current->blocked, &scptr->sigc_mask);
+ current->blocked &= _BLOCKABLE;
+ __get_user(current->tss.sstk_info.cur_status, &scptr->sigc_onstack);
+ current->tss.sstk_info.cur_status &= 1;
+ regs->pc = pc;
+ regs->npc = npc;
+ __get_user(regs->u_regs[UREG_FP], &scptr->sigc_sp);
+ __get_user(regs->u_regs[UREG_I0], &scptr->sigc_o0);
+ __get_user(regs->u_regs[UREG_G1], &scptr->sigc_g1);
+
+ /* User can only change condition codes in %psr. */
+ __get_user(psr, &scptr->sigc_psr);
+ regs->psr &= ~(PSR_ICC);
+ regs->psr |= (psr & PSR_ICC);
}
-/*
- * Set up a signal frame... Make the stack look the way iBCS2 expects
- * it to look.
+/* Set up a signal frame... Make the stack look the way SunOS
+ * expects it to look which is basically:
+ *
+ * ---------------------------------- <-- %sp at signal time
+ * Struct sigcontext
+ * Signal address
+ * Ptr to sigcontext area above
+ * Signal code
+ * The signal number itself
+ * One register window
+ * ---------------------------------- <-- New %sp
*/
-void setup_frame(struct sigaction * sa, unsigned long ** fp, unsigned long eip,
- struct pt_regs * regs, int signr, unsigned long oldmask)
+struct signal_sframe {
+ struct reg_window sig_window;
+ int sig_num;
+ int sig_code;
+ struct sigcontext *sig_scptr;
+ int sig_address;
+ struct sigcontext sig_context;
+};
+/* To align the structure properly. */
+#define SF_ALIGNEDSZ (((sizeof(struct signal_sframe) + 7) & (~7)))
+
+/* Checks if the fp is valid */
+int invalid_frame_pointer (void *fp, int fplen)
{
- halt();
+ if ((((unsigned long) fp) & 7) ||
+ !__access_ok((unsigned long)fp, fplen) ||
+ ((sparc_cpu_model == sun4 || sparc_cpu_model == sun4c) &&
+ ((unsigned long) fp < 0xe0000000 && (unsigned long) fp >= 0x20000000)))
+ return 1;
+
+ return 0;
}
-/*
- * Note that 'init' is a special process: it doesn't get signals it doesn't
+static inline void
+setup_frame(struct sigaction *sa, unsigned long pc, unsigned long npc,
+ struct pt_regs *regs, int signr, unsigned long oldmask)
+{
+ struct signal_sframe *sframep;
+ struct sigcontext *sc;
+ int window = 0;
+ int old_status = current->tss.sstk_info.cur_status;
+
+ synchronize_user_stack();
+ sframep = (struct signal_sframe *) regs->u_regs[UREG_FP];
+ sframep = (struct signal_sframe *) (((unsigned long) sframep)-SF_ALIGNEDSZ);
+ if (invalid_frame_pointer (sframep, sizeof(*sframep))){
+#if 0 /* fills up the console logs during crashme runs, yuck... */
+ printk("%s [%d]: User has trashed signal stack\n",
+ current->comm, current->pid);
+ printk("Sigstack ptr %p handler at pc<%08lx> for sig<%d>\n",
+ sframep, pc, signr);
+#endif
+ /* Don't change signal code and address, so that
+ * post mortem debuggers can have a look.
+ */
+ do_exit(SIGILL);
+ return;
+ }
+
+ sc = &sframep->sig_context;
+
+ /* We've already made sure frame pointer isn't in kernel space... */
+ __put_user(old_status, &sc->sigc_onstack);
+ __put_user(oldmask, &sc->sigc_mask);
+ __put_user(regs->u_regs[UREG_FP], &sc->sigc_sp);
+ __put_user(pc, &sc->sigc_pc);
+ __put_user(npc, &sc->sigc_npc);
+ __put_user(regs->psr, &sc->sigc_psr);
+ __put_user(regs->u_regs[UREG_G1], &sc->sigc_g1);
+ __put_user(regs->u_regs[UREG_I0], &sc->sigc_o0);
+ __put_user(current->tss.w_saved, &sc->sigc_oswins);
+ if(current->tss.w_saved)
+ for(window = 0; window < current->tss.w_saved; window++) {
+ sc->sigc_spbuf[window] =
+ (char *)current->tss.rwbuf_stkptrs[window];
+ copy_to_user(&sc->sigc_wbuf[window],
+ &current->tss.reg_window[window],
+ sizeof(struct reg_window));
+ }
+ else
+ copy_to_user(sframep, (char *)regs->u_regs[UREG_FP],
+ sizeof(struct reg_window));
+
+ current->tss.w_saved = 0; /* So process is allowed to execute. */
+ __put_user(signr, &sframep->sig_num);
+ if(signr == SIGSEGV ||
+ signr == SIGILL ||
+ signr == SIGFPE ||
+ signr == SIGBUS ||
+ signr == SIGEMT) {
+ __put_user(current->tss.sig_desc, &sframep->sig_code);
+ __put_user(current->tss.sig_address, &sframep->sig_address);
+ } else {
+ __put_user(0, &sframep->sig_code);
+ __put_user(0, &sframep->sig_address);
+ }
+ __put_user(sc, &sframep->sig_scptr);
+ regs->u_regs[UREG_FP] = (unsigned long) sframep;
+ regs->pc = (unsigned long) sa->sa_handler;
+ regs->npc = (regs->pc + 4);
+}
+
+/* Setup a Solaris stack frame */
+static inline void
+setup_svr4_frame(struct sigaction *sa, unsigned long pc, unsigned long npc,
+ struct pt_regs *regs, int signr, unsigned long oldmask)
+{
+ svr4_signal_frame_t *sfp;
+ svr4_gregset_t *gr;
+ svr4_siginfo_t *si;
+ svr4_mcontext_t *mc;
+ svr4_gwindows_t *gw;
+ svr4_ucontext_t *uc;
+ int window = 0;
+
+ synchronize_user_stack();
+ sfp = (svr4_signal_frame_t *) regs->u_regs[UREG_FP] - REGWIN_SZ;
+ sfp = (svr4_signal_frame_t *) (((unsigned long) sfp)-SVR4_SF_ALIGNED);
+
+ if (invalid_frame_pointer (sfp, sizeof (*sfp))){
+#if 0
+ printk ("Invalid stack frame\n");
+#endif
+ do_exit(SIGILL);
+ return;
+ }
+
+ /* Start with a clean frame pointer and fill it */
+ clear_user(sfp, sizeof (*sfp));
+
+ /* Setup convenience variables */
+ si = &sfp->si;
+ uc = &sfp->uc;
+ gw = &sfp->gw;
+ mc = &uc->mcontext;
+ gr = &mc->greg;
+
+ /* FIXME: where am I supposed to put this?
+ * sc->sigc_onstack = old_status;
+ * anyways, it does not look like it is used for anything at all.
+ */
+ __put_user(oldmask, &uc->sigmask.sigbits [0]);
+
+ /* Store registers */
+ __put_user(regs->pc, &((*gr) [SVR4_PC]));
+ __put_user(regs->npc, &((*gr) [SVR4_NPC]));
+ __put_user(regs->psr, &((*gr) [SVR4_PSR]));
+ __put_user(regs->y, &((*gr) [SVR4_Y]));
+
+ /* Copy g [1..7] and o [0..7] registers */
+ copy_to_user(&(*gr)[SVR4_G1], &regs->u_regs [UREG_G1], sizeof (uint) * 7);
+ copy_to_user(&(*gr)[SVR4_O0], &regs->u_regs [UREG_I0], sizeof (uint) * 8);
+
+ /* Setup sigaltstack, FIXME */
+ __put_user(0xdeadbeef, &uc->stack.sp);
+ __put_user(0, &uc->stack.size);
+ __put_user(0, &uc->stack.flags); /* Possible: ONSTACK, DISABLE */
+
+ /* Save the currently window file: */
+
+ /* 1. Link sfp->uc->gwins to our windows */
+ __put_user(gw, &mc->gwin);
+
+ /* 2. Number of windows to restore at setcontext (): */
+ __put_user(current->tss.w_saved, &gw->count);
+
+ /* 3. Save each valid window
+ * Currently, it makes a copy of the windows from the kernel copy.
+ * David's code for SunOS, makes the copy but keeps the pointer to
+ * the kernel. My version makes the pointer point to a userland
+ * copy of those. Mhm, I wonder if I shouldn't just ignore those
+ * on setcontext and use those that are on the kernel, the signal
+ * handler should not be modyfing those, mhm.
+ *
+ * These windows are just used in case synchronize_user_stack failed
+ * to flush the user windows.
+ */
+ for(window = 0; window < current->tss.w_saved; window++) {
+ __put_user((int *) &(gw->win [window]), &gw->winptr [window]);
+ copy_to_user(&gw->win [window], &current->tss.reg_window [window], sizeof (svr4_rwindow_t));
+ __put_user(0, gw->winptr [window]);
+ }
+
+ /* 4. We just pay attention to the gw->count field on setcontext */
+ current->tss.w_saved = 0; /* So process is allowed to execute. */
+
+ /* Setup the signal information. Solaris expects a bunch of
+ * information to be passed to the signal handler, we don't provide
+ * that much currently, should use those that David already
+ * is providing with tss.sig_desc
+ */
+ __put_user(signr, &si->siginfo.signo);
+ __put_user(SVR4_SINOINFO, &si->siginfo.code);
+
+ regs->u_regs[UREG_FP] = (unsigned long) sfp;
+ regs->pc = (unsigned long) sa->sa_handler;
+ regs->npc = (regs->pc + 4);
+
+ /* Arguments passed to signal handler */
+ if (regs->u_regs [14]){
+ struct reg_window *rw = (struct reg_window *) regs->u_regs [14];
+
+ __put_user(signr, &rw->ins [0]);
+ __put_user(si, &rw->ins [1]);
+ __put_user(uc, &rw->ins [2]);
+ __put_user(sfp, &rw->ins [6]); /* frame pointer */
+#if 0
+ regs->u_regs[UREG_I0] = signr;
+ regs->u_regs[UREG_I1] = (uint) si;
+ regs->u_regs[UREG_I2] = (uint) uc;
+#endif
+ }
+}
+
+asmlinkage int
+svr4_getcontext (svr4_ucontext_t *uc, struct pt_regs *regs)
+{
+ svr4_gregset_t *gr;
+ svr4_mcontext_t *mc;
+
+ synchronize_user_stack();
+ if (current->tss.w_saved){
+ printk ("Uh oh, w_saved is not zero (%ld)\n", current->tss.w_saved);
+ do_exit (SIGSEGV);
+ }
+ if(clear_user(uc, sizeof (*uc)))
+ return -EFAULT;
+
+ /* Setup convenience variables */
+ mc = &uc->mcontext;
+ gr = &mc->greg;
+
+ /* We only have < 32 signals, fill the first slot only */
+ __put_user(current->sig->action->sa_mask, &uc->sigmask.sigbits [0]);
+
+ /* Store registers */
+ __put_user(regs->pc, &uc->mcontext.greg [SVR4_PC]);
+ __put_user(regs->npc, &uc->mcontext.greg [SVR4_NPC]);
+ __put_user(regs->psr, &uc->mcontext.greg [SVR4_PSR]);
+ __put_user(regs->y, &uc->mcontext.greg [SVR4_Y]);
+
+ /* Copy g [1..7] and o [0..7] registers */
+ copy_to_user(&(*gr)[SVR4_G1], &regs->u_regs [UREG_G1], sizeof (uint) * 7);
+ copy_to_user(&(*gr)[SVR4_O0], &regs->u_regs [UREG_I0], sizeof (uint) * 8);
+
+ /* Setup sigaltstack, FIXME */
+ __put_user(0xdeadbeef, &uc->stack.sp);
+ __put_user(0, &uc->stack.size);
+ __put_user(0, &uc->stack.flags); /* Possible: ONSTACK, DISABLE */
+
+ /* The register file is not saved
+ * we have already stuffed all of it with sync_user_stack
+ */
+ return 0;
+}
+
+
+/* Set the context for a svr4 application, this is Solaris way to sigreturn */
+asmlinkage int svr4_setcontext (svr4_ucontext_t *c, struct pt_regs *regs)
+{
+ struct thread_struct *tp = &current->tss;
+ svr4_gregset_t *gr;
+ unsigned long pc, npc, psr;
+
+ /* Fixme: restore windows, or is this already taken care of in
+ * svr4_setup_frame when sync_user_windows is done?
+ */
+ flush_user_windows();
+
+ if (tp->w_saved){
+ printk ("Uh oh, w_saved is: 0x%lx\n", tp->w_saved);
+ do_exit(SIGSEGV);
+ }
+ if (((uint) c) & 3){
+ printk ("Unaligned structure passed\n");
+ do_exit (SIGSEGV);
+ }
+
+ if(!__access_ok((unsigned long)c, sizeof(*c))) {
+ /* Miguel, add nice debugging msg _here_. ;-) */
+ do_exit(SIGSEGV);
+ }
+
+ /* Check for valid PC and nPC */
+ gr = &c->mcontext.greg;
+ __get_user(pc, &((*gr)[SVR4_PC]));
+ __get_user(npc, &((*gr)[SVR4_NPC]));
+ if((pc | npc) & 3) {
+ printk ("setcontext, PC or nPC were bogus\n");
+ do_exit (SIGSEGV);
+ }
+ /* Retrieve information from passed ucontext */
+ __get_user(current->blocked, &c->sigmask.sigbits [0]);
+ current->blocked &= _BLOCKABLE;
+ regs->pc = pc;
+ regs->npc = npc;
+ __get_user(regs->y, &((*gr) [SVR4_Y]));
+ __get_user(psr, &((*gr) [SVR4_PSR]));
+ regs->psr &= ~(PSR_ICC);
+ regs->psr |= (psr & PSR_ICC);
+
+ /* Restore g[1..7] and o[0..7] registers */
+ copy_from_user(&regs->u_regs [UREG_G1], &(*gr)[SVR4_G1], sizeof (uint) * 7);
+ copy_from_user(&regs->u_regs [UREG_I0], &(*gr)[SVR4_O0], sizeof (uint) * 8);
+
+ printk ("Setting PC=%lx nPC=%lx\n", regs->pc, regs->npc);
+ return -EINTR;
+}
+
+static inline void handle_signal(unsigned long signr, struct sigaction *sa,
+ unsigned long oldmask, struct pt_regs *regs,
+ int svr4_signal)
+{
+ if(svr4_signal)
+ setup_svr4_frame(sa, regs->pc, regs->npc, regs, signr, oldmask);
+ else
+ setup_frame(sa, regs->pc, regs->npc, regs, signr, oldmask);
+
+ if(sa->sa_flags & SA_ONESHOT)
+ sa->sa_handler = NULL;
+ if(!(sa->sa_flags & SA_NOMASK))
+ current->blocked |= (sa->sa_mask | _S(signr)) & _BLOCKABLE;
+}
+
+static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
+ struct sigaction *sa)
+{
+ switch(regs->u_regs[UREG_I0]) {
+ case ERESTARTNOHAND:
+ no_system_call_restart:
+ regs->u_regs[UREG_I0] = EINTR;
+ regs->psr |= PSR_C;
+ break;
+ case ERESTARTSYS:
+ if(!(sa->sa_flags & SA_RESTART))
+ goto no_system_call_restart;
+ /* fallthrough */
+ case ERESTARTNOINTR:
+ regs->u_regs[UREG_I0] = orig_i0;
+ regs->pc -= 4;
+ regs->npc -= 4;
+ }
+}
+
+/* Note that 'init' is a special process: it doesn't get signals it doesn't
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
- *
- * Note that we go through the signals twice: once to check the signals that
- * the kernel can handle, and then we build all the user-level signal handling
- * stack-frames in one go after that.
*/
-asmlinkage int do_signal(unsigned long oldmask, struct pt_regs * regs)
+asmlinkage int do_signal(unsigned long oldmask, struct pt_regs * regs,
+ unsigned long orig_i0, int restart_syscall)
{
- halt();
- return 1;
+ unsigned long signr, mask = ~current->blocked;
+ struct sigaction *sa;
+ int svr4_signal = current->personality == PER_SVR4;
+
+ while ((signr = current->signal & mask) != 0) {
+ signr = ffz(~signr);
+ clear_bit(signr, &current->signal);
+ sa = current->sig->action + signr;
+ signr++;
+ if ((current->flags & PF_PTRACED) && signr != SIGKILL) {
+ current->exit_code = signr;
+ current->state = TASK_STOPPED;
+ notify_parent(current);
+ schedule();
+ if (!(signr = current->exit_code))
+ continue;
+ current->exit_code = 0;
+ if (signr == SIGSTOP)
+ continue;
+ if (_S(signr) & current->blocked) {
+ current->signal |= _S(signr);
+ continue;
+ }
+ sa = current->sig->action + signr - 1;
+ }
+ if(sa->sa_handler == SIG_IGN) {
+ if(signr != SIGCHLD)
+ continue;
+ while(sys_waitpid(-1,NULL,WNOHANG) > 0);
+ continue;
+ }
+ if(sa->sa_handler == SIG_DFL) {
+ if(current->pid == 1)
+ continue;
+ switch(signr) {
+ case SIGCONT: case SIGCHLD: case SIGWINCH:
+ continue;
+
+ case SIGTSTP: case SIGTTIN: case SIGTTOU:
+ if (is_orphaned_pgrp(current->pgrp))
+ continue;
+
+ case SIGSTOP:
+ if (current->flags & PF_PTRACED)
+ continue;
+ current->state = TASK_STOPPED;
+ current->exit_code = signr;
+ if(!(current->p_pptr->sig->action[SIGCHLD-1].sa_flags &
+ SA_NOCLDSTOP))
+ notify_parent(current);
+ schedule();
+ continue;
+
+ case SIGQUIT: case SIGILL: case SIGTRAP:
+ case SIGABRT: case SIGFPE: case SIGSEGV: case SIGBUS:
+ if(current->binfmt && current->binfmt->core_dump) {
+ if(current->binfmt->core_dump(signr, regs))
+ signr |= 0x80;
+ }
+ /* fall through */
+ default:
+ current->signal |= _S(signr & 0x7f);
+ current->flags |= PF_SIGNALED;
+ do_exit(signr);
+ }
+ }
+ if(restart_syscall)
+ syscall_restart(orig_i0, regs, sa);
+ handle_signal(signr, sa, oldmask, regs, svr4_signal);
+ return 1;
+ }
+ if(restart_syscall &&
+ (regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
+ regs->u_regs[UREG_I0] == ERESTARTSYS ||
+ regs->u_regs[UREG_I0] == ERESTARTNOINTR)) {
+ /* replay the system call when we are done */
+ regs->u_regs[UREG_I0] = orig_i0;
+ regs->pc -= 4;
+ regs->npc -= 4;
+ }
+ return 0;
+}
+
+asmlinkage int
+sys_sigstack(struct sigstack *ssptr, struct sigstack *ossptr)
+{
+ /* First see if old state is wanted. */
+ if(ossptr) {
+ if(copy_to_user(ossptr, &current->tss.sstk_info, sizeof(struct sigstack)))
+ return -EFAULT;
+ }
+
+ /* Now see if we want to update the new state. */
+ if(ssptr) {
+ if(copy_from_user(&current->tss.sstk_info, ssptr, sizeof(struct sigstack)))
+ return -EFAULT;
+ }
+ return 0;
}
diff --git a/arch/sparc/kernel/smp.c b/arch/sparc/kernel/smp.c
new file mode 100644
index 000000000..5dbe26cc3
--- /dev/null
+++ b/arch/sparc/kernel/smp.c
@@ -0,0 +1,650 @@
+/* smp.c: Sparc SMP support.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/head.h>
+#include <asm/ptrace.h>
+
+#include <linux/kernel.h>
+#include <linux/tasks.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+
+#include <asm/delay.h>
+#include <asm/irq.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/oplib.h>
+
+extern ctxd_t *srmmu_ctx_table_phys;
+extern int linux_num_cpus;
+
+struct tlog {
+ unsigned long pc;
+ unsigned long psr;
+};
+
+struct tlog trap_log[4][256];
+unsigned long trap_log_ent[4] = { 0, 0, 0, 0, };
+
+extern void calibrate_delay(void);
+
+volatile unsigned long stuck_pc = 0;
+volatile int smp_processors_ready = 0;
+
+int smp_found_config = 0;
+unsigned long cpu_present_map = 0;
+int smp_num_cpus = 1;
+int smp_threads_ready=0;
+unsigned char mid_xlate[NR_CPUS] = { 0, 0, 0, 0, };
+volatile unsigned long cpu_callin_map[NR_CPUS] = {0,};
+volatile unsigned long smp_invalidate_needed[NR_CPUS] = { 0, };
+volatile unsigned long smp_spinning[NR_CPUS] = { 0, };
+struct cpuinfo_sparc cpu_data[NR_CPUS];
+unsigned char boot_cpu_id = 0;
+static int smp_activated = 0;
+static volatile unsigned char smp_cpu_in_msg[NR_CPUS];
+static volatile unsigned long smp_msg_data;
+static volatile int smp_src_cpu;
+static volatile int smp_msg_id;
+volatile int cpu_number_map[NR_CPUS];
+volatile int cpu_logical_map[NR_CPUS];
+
+/* The only guaranteed locking primitive available on all Sparc
+ * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
+ * places the current byte at the effective address into dest_reg and
+ * places 0xff there afterwards. Pretty lame locking primitive
+ * compared to the Alpha and the intel no? Most Sparcs have 'swap'
+ * instruction which is much better...
+ */
+klock_t kernel_flag = KLOCK_CLEAR;
+volatile unsigned char active_kernel_processor = NO_PROC_ID;
+volatile unsigned long kernel_counter = 0;
+volatile unsigned long syscall_count = 0;
+volatile unsigned long ipi_count;
+#ifdef __SMP_PROF__
+volatile unsigned long smp_spins[NR_CPUS]={0};
+volatile unsigned long smp_spins_syscall[NR_CPUS]={0};
+volatile unsigned long smp_spins_syscall_cur[NR_CPUS]={0};
+volatile unsigned long smp_spins_sys_idle[NR_CPUS]={0};
+volatile unsigned long smp_idle_count[1+NR_CPUS]={0,};
+#endif
+#if defined (__SMP_PROF__)
+volatile unsigned long smp_idle_map=0;
+#endif
+
+volatile unsigned long smp_proc_in_lock[NR_CPUS] = {0,};
+volatile int smp_process_available=0;
+
+/*#define SMP_DEBUG*/
+
+#ifdef SMP_DEBUG
+#define SMP_PRINTK(x) printk x
+#else
+#define SMP_PRINTK(x)
+#endif
+
+static volatile int smp_commenced = 0;
+
+static char smp_buf[512];
+
+char *smp_info(void)
+{
+ sprintf(smp_buf,
+" CPU0\t\tCPU1\t\tCPU2\t\tCPU3\n"
+"State: %s\t\t%s\t\t%s\t\t%s\n",
+(cpu_present_map & 1) ? ((active_kernel_processor == 0) ? "akp" : "online") : "offline",
+(cpu_present_map & 2) ? ((active_kernel_processor == 1) ? "akp" : "online") : "offline",
+(cpu_present_map & 4) ? ((active_kernel_processor == 2) ? "akp" : "online") : "offline",
+(cpu_present_map & 8) ? ((active_kernel_processor == 3) ? "akp" : "online") : "offline");
+ return smp_buf;
+}
+
+static inline unsigned long swap(volatile unsigned long *ptr, unsigned long val)
+{
+ __asm__ __volatile__("swap [%1], %0\n\t" :
+ "=&r" (val), "=&r" (ptr) :
+ "0" (val), "1" (ptr));
+ return val;
+}
+
+/*
+ * The bootstrap kernel entry code has set these up. Save them for
+ * a given CPU
+ */
+
+void smp_store_cpu_info(int id)
+{
+ cpu_data[id].udelay_val = loops_per_sec; /* this is it on sparc. */
+}
+
+/*
+ * Architecture specific routine called by the kernel just before init is
+ * fired off. This allows the BP to have everything in order [we hope].
+ * At the end of this all the AP's will hit the system scheduling and off
+ * we go. Each AP will load the system gdt's and jump through the kernel
+ * init into idle(). At this point the scheduler will one day take over
+ * and give them jobs to do. smp_callin is a standard routine
+ * we use to track CPU's as they power up.
+ */
+
+void smp_commence(void)
+{
+ /*
+ * Lets the callin's below out of their loop.
+ */
+ local_flush_cache_all();
+ local_flush_tlb_all();
+ smp_commenced = 1;
+ local_flush_cache_all();
+ local_flush_tlb_all();
+}
+
+void smp_callin(void)
+{
+ int cpuid = smp_processor_id();
+
+ sti();
+ local_flush_cache_all();
+ local_flush_tlb_all();
+ calibrate_delay();
+ smp_store_cpu_info(cpuid);
+ local_flush_cache_all();
+ local_flush_tlb_all();
+ cli();
+
+ /* Allow master to continue. */
+ swap((unsigned long *)&cpu_callin_map[cpuid], 1);
+ local_flush_cache_all();
+ local_flush_tlb_all();
+ while(!smp_commenced)
+ barrier();
+ local_flush_cache_all();
+ local_flush_tlb_all();
+
+ /* Fix idle thread fields. */
+ __asm__ __volatile__("ld [%0], %%g6\n\t"
+ : : "r" (&current_set[smp_processor_id()])
+ : "memory" /* paranoid */);
+ current->mm->mmap->vm_page_prot = PAGE_SHARED;
+ current->mm->mmap->vm_start = PAGE_OFFSET;
+ current->mm->mmap->vm_end = init_task.mm->mmap->vm_end;
+
+ local_flush_cache_all();
+ local_flush_tlb_all();
+
+ sti();
+}
+
+void cpu_panic(void)
+{
+ printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
+ panic("SMP bolixed\n");
+}
+
+/*
+ * Cycle through the processors asking the PROM to start each one.
+ */
+
+extern struct prom_cpuinfo linux_cpus[NCPUS];
+static struct linux_prom_registers penguin_ctable;
+
+void smp_boot_cpus(void)
+{
+ int cpucount = 0;
+ int i = 0;
+
+ printk("Entering SMP Mode...\n");
+
+ penguin_ctable.which_io = 0;
+ penguin_ctable.phys_addr = (char *) srmmu_ctx_table_phys;
+ penguin_ctable.reg_size = 0;
+
+ sti();
+ cpu_present_map |= (1 << smp_processor_id());
+ cpu_present_map = 0;
+ for(i=0; i < linux_num_cpus; i++)
+ cpu_present_map |= (1<<i);
+ for(i=0; i < NR_CPUS; i++)
+ cpu_number_map[i] = -1;
+ for(i=0; i < NR_CPUS; i++)
+ cpu_logical_map[i] = -1;
+ mid_xlate[boot_cpu_id] = (linux_cpus[boot_cpu_id].mid & ~8);
+ cpu_number_map[boot_cpu_id] = 0;
+ cpu_logical_map[0] = boot_cpu_id;
+ active_kernel_processor = boot_cpu_id;
+ smp_store_cpu_info(boot_cpu_id);
+ set_irq_udt(0);
+ local_flush_cache_all();
+ if(linux_num_cpus == 1)
+ return; /* Not an MP box. */
+ for(i = 0; i < NR_CPUS; i++) {
+ if(i == boot_cpu_id)
+ continue;
+
+ if(cpu_present_map & (1 << i)) {
+ extern unsigned long sparc_cpu_startup;
+ unsigned long *entry = &sparc_cpu_startup;
+ int timeout;
+
+ /* See trampoline.S for details... */
+ entry += ((i-1) * 6);
+
+ /* whirrr, whirrr, whirrrrrrrrr... */
+ printk("Starting CPU %d at %p\n", i, entry);
+ mid_xlate[i] = (linux_cpus[i].mid & ~8);
+ local_flush_cache_all();
+ prom_startcpu(linux_cpus[i].prom_node,
+ &penguin_ctable, 0, (char *)entry);
+
+ /* wheee... it's going... */
+ for(timeout = 0; timeout < 5000000; timeout++) {
+ if(cpu_callin_map[i])
+ break;
+ udelay(100);
+ }
+ if(cpu_callin_map[i]) {
+ /* Another "Red Snapper". */
+ cpucount++;
+ cpu_number_map[i] = i;
+ cpu_logical_map[i] = i;
+ } else {
+ printk("Processor %d is stuck.\n", i);
+ }
+ }
+ if(!(cpu_callin_map[i])) {
+ cpu_present_map &= ~(1 << i);
+ cpu_number_map[i] = -1;
+ }
+ }
+ local_flush_cache_all();
+ if(cpucount == 0) {
+ printk("Error: only one Processor found.\n");
+ cpu_present_map = (1 << smp_processor_id());
+ } else {
+ unsigned long bogosum = 0;
+ for(i = 0; i < NR_CPUS; i++) {
+ if(cpu_present_map & (1 << i))
+ bogosum += cpu_data[i].udelay_val;
+ }
+ printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n",
+ cpucount + 1,
+ (bogosum + 2500)/500000,
+ ((bogosum + 2500)/5000)%100);
+ smp_activated = 1;
+ smp_num_cpus = cpucount + 1;
+ }
+ smp_processors_ready = 1;
+}
+
+static inline void send_ipi(unsigned long target_map, int irq)
+{
+ int i;
+
+ for(i = 0; i < 4; i++) {
+ if((1<<i) & target_map)
+ set_cpu_int(mid_xlate[i], irq);
+ }
+}
+
+/*
+ * A non wait message cannot pass data or cpu source info. This current
+ * setup is only safe because the kernel lock owner is the only person
+ * who can send a message.
+ *
+ * Wrapping this whole block in a spinlock is not the safe answer either.
+ * A processor may get stuck with irq's off waiting to send a message and
+ * thus not replying to the person spinning for a reply....
+ *
+ * On the Sparc we use NMI's for all messages except reschedule.
+ */
+
+static volatile int message_cpu = NO_PROC_ID;
+
+void smp_message_pass(int target, int msg, unsigned long data, int wait)
+{
+ unsigned long target_map;
+ int p = smp_processor_id();
+ int irq = 15;
+ int i;
+
+ /* Before processors have been placed into their initial
+ * patterns do not send messages.
+ */
+ if(!smp_processors_ready)
+ return;
+
+ /* Skip the reschedule if we are waiting to clear a
+ * message at this time. The reschedule cannot wait
+ * but is not critical.
+ */
+ if(msg == MSG_RESCHEDULE) {
+ irq = 13;
+ if(smp_cpu_in_msg[p])
+ return;
+ }
+
+ /* Sanity check we don't re-enter this across CPU's. Only the kernel
+ * lock holder may send messages. For a STOP_CPU we are bringing the
+ * entire box to the fastest halt we can.. A reschedule carries
+ * no data and can occur during a flush.. guess what panic
+ * I got to notice this bug...
+ */
+ if(message_cpu != NO_PROC_ID && msg != MSG_STOP_CPU && msg != MSG_RESCHEDULE) {
+ printk("CPU #%d: Message pass %d but pass in progress by %d of %d\n",
+ smp_processor_id(),msg,message_cpu, smp_msg_id);
+
+ /* I don't know how to gracefully die so that debugging
+ * this doesn't completely eat up my filesystems...
+ * let's try this...
+ */
+ smp_cpu_in_msg[p] = 0; /* In case we come back here... */
+ intr_count = 0; /* and so panic don't barf... */
+ smp_swap(&message_cpu, NO_PROC_ID); /* push the store buffer */
+ sti();
+ printk("spinning, please L1-A, type ctrace and send output to davem\n");
+ while(1)
+ barrier();
+ }
+ smp_swap(&message_cpu, smp_processor_id()); /* store buffers... */
+
+ /* We are busy. */
+ smp_cpu_in_msg[p]++;
+
+ /* Reschedule is currently special. */
+ if(msg != MSG_RESCHEDULE) {
+ smp_src_cpu = p;
+ smp_msg_id = msg;
+ smp_msg_data = data;
+ }
+
+#if 0
+ printk("SMP message pass from cpu %d to cpu %d msg %d\n", p, target, msg);
+#endif
+
+ /* Set the target requirement. */
+ for(i = 0; i < smp_num_cpus; i++)
+ swap((unsigned long *) &cpu_callin_map[i], 0);
+ if(target == MSG_ALL_BUT_SELF) {
+ target_map = (cpu_present_map & ~(1<<p));
+ swap((unsigned long *) &cpu_callin_map[p], 1);
+ } else if(target == MSG_ALL) {
+ target_map = cpu_present_map;
+ } else {
+ for(i = 0; i < smp_num_cpus; i++)
+ if(i != target)
+ swap((unsigned long *) &cpu_callin_map[i], 1);
+ target_map = (1<<target);
+ }
+
+ /* Fire it off. */
+ send_ipi(target_map, irq);
+
+ switch(wait) {
+ case 1:
+ for(i = 0; i < smp_num_cpus; i++)
+ while(!cpu_callin_map[i])
+ barrier();
+ break;
+ case 2:
+ for(i = 0; i < smp_num_cpus; i++)
+ while(smp_invalidate_needed[i])
+ barrier();
+ break;
+ case 3:
+ /* For cross calls we hold message_cpu and smp_cpu_in_msg[]
+ * until all processors disperse. Else we have _big_ problems.
+ */
+ return;
+ }
+ smp_cpu_in_msg[p]--;
+ message_cpu = NO_PROC_ID;
+}
+
+struct smp_funcall {
+ smpfunc_t func;
+ unsigned long arg1;
+ unsigned long arg2;
+ unsigned long arg3;
+ unsigned long arg4;
+ unsigned long arg5;
+ unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */
+ unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */
+} ccall_info;
+
+/* Returns failure code if for example any of the cpu's failed to respond
+ * within a certain timeout period.
+ */
+
+#define CCALL_TIMEOUT 5000000 /* enough for initial testing */
+
+/* #define DEBUG_CCALL */
+
+/* Some nice day when we really thread the kernel I'd like to synchronize
+ * this with either a broadcast conditional variable, a resource adaptive
+ * generic mutex, or a convoy semaphore scheme of some sort. No reason
+ * we can't let multiple processors in here if the appropriate locking
+ * is done. Note that such a scheme assumes we will have a
+ * prioritized ipi scheme using different software level irq's.
+ */
+void smp_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4, unsigned long arg5)
+{
+ unsigned long me = smp_processor_id();
+ unsigned long flags;
+ int i, timeout;
+
+#ifdef DEBUG_CCALL
+ printk("xc%d<", me);
+#endif
+ if(smp_processors_ready) {
+ save_and_cli(flags);
+ if(me != active_kernel_processor)
+ goto cross_call_not_master;
+
+ /* Init function glue. */
+ ccall_info.func = func;
+ ccall_info.arg1 = arg1;
+ ccall_info.arg2 = arg2;
+ ccall_info.arg3 = arg3;
+ ccall_info.arg4 = arg4;
+ ccall_info.arg5 = arg5;
+
+ /* Init receive/complete mapping. */
+ for(i = 0; i < smp_num_cpus; i++) {
+ ccall_info.processors_in[i] = 0;
+ ccall_info.processors_out[i] = 0;
+ }
+ ccall_info.processors_in[me] = 1;
+ ccall_info.processors_out[me] = 1;
+
+ /* Fire it off. */
+ smp_message_pass(MSG_ALL_BUT_SELF, MSG_CROSS_CALL, 0, 3);
+
+ /* For debugging purposes right now we can timeout
+ * on both callin and callexit.
+ */
+ timeout = CCALL_TIMEOUT;
+ for(i = 0; i < smp_num_cpus; i++) {
+ while(!ccall_info.processors_in[i] && timeout-- > 0)
+ barrier();
+ if(!ccall_info.processors_in[i])
+ goto procs_time_out;
+ }
+#ifdef DEBUG_CCALL
+ printk("I");
+#endif
+
+ /* Run local copy. */
+ func(arg1, arg2, arg3, arg4, arg5);
+
+ /* Spin on proc dispersion. */
+ timeout = CCALL_TIMEOUT;
+ for(i = 0; i < smp_num_cpus; i++) {
+ while(!ccall_info.processors_out[i] && timeout-- > 0)
+ barrier();
+ if(!ccall_info.processors_out[i])
+ goto procs_time_out;
+ }
+#ifdef DEBUG_CCALL
+ printk("O>");
+#endif
+ /* See wait case 3 in smp_message_pass()... */
+ smp_cpu_in_msg[me]--;
+ message_cpu = NO_PROC_ID;
+ restore_flags(flags);
+ return; /* made it... */
+
+procs_time_out:
+ printk("smp: Wheee, penguin drops off the bus\n");
+ smp_cpu_in_msg[me]--;
+ message_cpu = NO_PROC_ID;
+ restore_flags(flags);
+ return; /* why me... why me... */
+ }
+
+ /* Just need to run local copy. */
+ func(arg1, arg2, arg3, arg4, arg5);
+ return;
+
+cross_call_not_master:
+ printk("Cross call initiated by non master cpu\n");
+ printk("akp=%x me=%08lx\n", active_kernel_processor, me);
+ restore_flags(flags);
+ panic("penguin cross call");
+}
+
+void smp_flush_cache_all(void)
+{ xc0((smpfunc_t) local_flush_cache_all); }
+
+void smp_flush_tlb_all(void)
+{ xc0((smpfunc_t) local_flush_tlb_all); }
+
+void smp_flush_cache_mm(struct mm_struct *mm)
+{
+ if(mm->context != NO_CONTEXT)
+ xc1((smpfunc_t) local_flush_cache_mm, (unsigned long) mm);
+}
+
+void smp_flush_tlb_mm(struct mm_struct *mm)
+{
+ if(mm->context != NO_CONTEXT)
+ xc1((smpfunc_t) local_flush_tlb_mm, (unsigned long) mm);
+}
+
+void smp_flush_cache_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end)
+{
+ if(mm->context != NO_CONTEXT)
+ xc3((smpfunc_t) local_flush_cache_range, (unsigned long) mm,
+ start, end);
+}
+
+void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end)
+{
+ if(mm->context != NO_CONTEXT)
+ xc3((smpfunc_t) local_flush_tlb_range, (unsigned long) mm,
+ start, end);
+}
+
+void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
+{ xc2((smpfunc_t) local_flush_cache_page, (unsigned long) vma, page); }
+
+void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{ xc2((smpfunc_t) local_flush_tlb_page, (unsigned long) vma, page); }
+
+void smp_flush_page_to_ram(unsigned long page)
+{ xc1((smpfunc_t) local_flush_page_to_ram, page); }
+
+/* Reschedule call back. */
+void smp_reschedule_irq(void)
+{
+ if(smp_processor_id() != active_kernel_processor)
+ panic("SMP Reschedule on CPU #%d, but #%d is active.\n",
+ smp_processor_id(), active_kernel_processor);
+
+ need_resched=1;
+}
+
+/* XXX FIXME: this still doesn't work right... XXX */
+
+/* #define DEBUG_CAPTURE */
+
+static volatile unsigned long release = 1;
+static volatile int capture_level = 0;
+
+void smp_capture(void)
+{
+ unsigned long flags;
+
+ if(!smp_activated || !smp_commenced)
+ return;
+#ifdef DEBUG_CAPTURE
+ printk("C<%d>", smp_processor_id());
+#endif
+ save_and_cli(flags);
+ if(!capture_level) {
+ release = 0;
+ smp_message_pass(MSG_ALL_BUT_SELF, MSG_CAPTURE, 0, 1);
+ }
+ capture_level++;
+ restore_flags(flags);
+}
+
+void smp_release(void)
+{
+ unsigned long flags;
+ int i;
+
+ if(!smp_activated || !smp_commenced)
+ return;
+#ifdef DEBUG_CAPTURE
+ printk("R<%d>", smp_processor_id());
+#endif
+ save_and_cli(flags);
+ if(!(capture_level - 1)) {
+ release = 1;
+ for(i = 0; i < smp_num_cpus; i++)
+ while(cpu_callin_map[i])
+ barrier();
+ }
+ capture_level -= 1;
+ restore_flags(flags);
+}
+
+/* Park a processor, we must watch for more IPI's to invalidate
+ * our cache's and TLB's. And also note we can only wait for
+ * "lock-less" IPI's and process those, as a result of such IPI's
+ * being non-maskable traps being on is enough to receive them.
+ */
+
+/* Message call back. */
+void smp_message_irq(void)
+{
+ int i=smp_processor_id();
+
+ switch(smp_msg_id) {
+ case MSG_CROSS_CALL:
+ /* Do it to it. */
+ ccall_info.processors_in[i] = 1;
+ ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
+ ccall_info.arg4, ccall_info.arg5);
+ ccall_info.processors_out[i] = 1;
+ break;
+
+ /*
+ * Halt other CPU's for a panic or reboot
+ */
+ case MSG_STOP_CPU:
+ sti();
+ while(1)
+ barrier();
+
+ default:
+ printk("CPU #%d sent invalid cross CPU message to CPU #%d: %X(%lX).\n",
+ smp_src_cpu,smp_processor_id(),smp_msg_id,smp_msg_data);
+ break;
+ }
+}
diff --git a/arch/sparc/kernel/solaris.c b/arch/sparc/kernel/solaris.c
new file mode 100644
index 000000000..bb741c88e
--- /dev/null
+++ b/arch/sparc/kernel/solaris.c
@@ -0,0 +1,59 @@
+/* solaris.c: Solaris binary emulation, whee...
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+
+#include <asm/errno.h>
+#include <asm/solerrno.h>
+
+unsigned long solaris_xlatb_rorl[] = {
+ 0, SOL_EPERM, SOL_ENOENT, SOL_ESRCH, SOL_EINTR, SOL_EIO,
+ SOL_ENXIO, SOL_E2BIG, SOL_ENOEXEC, SOL_EBADF, SOL_ECHILD,
+ SOL_EAGAIN, SOL_ENOMEM, SOL_EACCES, SOL_EFAULT,
+ SOL_ENOTBLK, SOL_EBUSY, SOL_EEXIST, SOL_EXDEV, SOL_ENODEV,
+ SOL_ENOTDIR, SOL_EISDIR, SOL_EINVAL, SOL_ENFILE, SOL_EMFILE,
+ SOL_ENOTTY, SOL_ETXTBSY, SOL_EFBIG, SOL_ENOSPC, SOL_ESPIPE,
+ SOL_EROFS, SOL_EMLINK, SOL_EPIPE, SOL_EDOM, SOL_ERANGE,
+ SOL_EWOULDBLOCK, SOL_EINPROGRESS, SOL_EALREADY, SOL_ENOTSOCK,
+ SOL_EDESTADDRREQ, SOL_EMSGSIZE, SOL_EPROTOTYPE, SOL_ENOPROTOOPT,
+ SOL_EPROTONOSUPPORT, SOL_ESOCKTNOSUPPORT, SOL_EOPNOTSUPP,
+ SOL_EPFNOSUPPORT, SOL_EAFNOSUPPORT, SOL_EADDRINUSE,
+ SOL_EADDRNOTAVAIL, SOL_ENETDOWN, SOL_ENETUNREACH, SOL_ENETRESET,
+ SOL_ECONNABORTED, SOL_ECONNRESET, SOL_ENOBUFS, SOL_EISCONN,
+ SOL_ENOTCONN, SOL_ESHUTDOWN, SOL_ETOOMANYREFS, SOL_ETIMEDOUT,
+ SOL_ECONNREFUSED, SOL_ELOOP, SOL_ENAMETOOLONG, SOL_EHOSTDOWN,
+ SOL_EHOSTUNREACH, SOL_ENOTEMPTY, SOL_EUSERS, SOL_EUSERS,
+ SOL_EDQUOT, SOL_ESTALE, SOL_EREMOTE, SOL_ENOSTR, SOL_ETIME,
+ SOL_ENOSR, SOL_ENOMSG, SOL_EBADMSG, SOL_EIDRM, SOL_EDEADLK,
+ SOL_ENOLCK, SOL_ENONET, SOL_EINVAL, SOL_ENOLINK, SOL_EADV,
+ SOL_ESRMNT, SOL_ECOMM, SOL_EPROTO, SOL_EMULTIHOP, SOL_EINVAL,
+ SOL_EREMCHG, SOL_ENOSYS
+};
+
+extern asmlinkage int sys_open(const char *,int,int);
+
+asmlinkage int solaris_open(const char *filename, int flags, int mode)
+{
+ int newflags = flags & 0xf;
+
+ flags &= ~0xf;
+ if(flags & 0x8050)
+ newflags |= FASYNC;
+ if(flags & 0x80)
+ newflags |= O_NONBLOCK;
+ if(flags & 0x100)
+ newflags |= O_CREAT;
+ if(flags & 0x200)
+ newflags |= O_TRUNC;
+ if(flags & 0x400)
+ newflags |= O_EXCL;
+ if(flags & 0x800)
+ newflags |= O_NOCTTY;
+ return sys_open(filename, newflags, mode);
+}
+
+
diff --git a/arch/sparc/kernel/sparc-stub.c b/arch/sparc/kernel/sparc-stub.c
new file mode 100644
index 000000000..5fa429ba5
--- /dev/null
+++ b/arch/sparc/kernel/sparc-stub.c
@@ -0,0 +1,688 @@
+/* $Id: sparc-stub.c,v 1.19 1996/09/30 02:21:48 davem Exp $
+ * sparc-stub.c: KGDB support for the Linux kernel.
+ *
+ * Modifications to run under Linux
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ *
+ * This file originally came from the gdb sources, and the
+ * copyright notices have been retained below.
+ */
+
+/****************************************************************************
+
+ THIS SOFTWARE IS NOT COPYRIGHTED
+
+ HP offers the following for use in the public domain. HP makes no
+ warranty with regard to the software or its performance and the
+ user accepts the software "AS IS" with all faults.
+
+ HP DISCLAIMS ANY WARRANTIES, EXPRESS OR IMPLIED, WITH REGARD
+ TO THIS SOFTWARE INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+
+****************************************************************************/
+
+/****************************************************************************
+ * Header: remcom.c,v 1.34 91/03/09 12:29:49 glenne Exp $
+ *
+ * Module name: remcom.c $
+ * Revision: 1.34 $
+ * Date: 91/03/09 12:29:49 $
+ * Contributor: Lake Stevens Instrument Division$
+ *
+ * Description: low level support for gdb debugger. $
+ *
+ * Considerations: only works on target hardware $
+ *
+ * Written by: Glenn Engel $
+ * ModuleState: Experimental $
+ *
+ * NOTES: See Below $
+ *
+ * Modified for SPARC by Stu Grossman, Cygnus Support.
+ *
+ * This code has been extensively tested on the Fujitsu SPARClite demo board.
+ *
+ * To enable debugger support, two things need to happen. One, a
+ * call to set_debug_traps() is necessary in order to allow any breakpoints
+ * or error conditions to be properly intercepted and reported to gdb.
+ * Two, a breakpoint needs to be generated to begin communication. This
+ * is most easily accomplished by a call to breakpoint(). Breakpoint()
+ * simulates a breakpoint by executing a trap #1.
+ *
+ *************
+ *
+ * The following gdb commands are supported:
+ *
+ * command function Return value
+ *
+ * g return the value of the CPU registers hex data or ENN
+ * G set the value of the CPU registers OK or ENN
+ *
+ * mAA..AA,LLLL Read LLLL bytes at address AA..AA hex data or ENN
+ * MAA..AA,LLLL: Write LLLL bytes at address AA.AA OK or ENN
+ *
+ * c Resume at current address SNN ( signal NN)
+ * cAA..AA Continue at address AA..AA SNN
+ *
+ * s Step one instruction SNN
+ * sAA..AA Step one instruction from AA..AA SNN
+ *
+ * k kill
+ *
+ * ? What was the last sigval ? SNN (signal NN)
+ *
+ * bBB..BB Set baud rate to BB..BB OK or BNN, then sets
+ * baud rate
+ *
+ * All commands and responses are sent with a packet which includes a
+ * checksum. A packet consists of
+ *
+ * $<packet info>#<checksum>.
+ *
+ * where
+ * <packet info> :: <characters representing the command or response>
+ * <checksum> :: < two hex digits computed as modulo 256 sum of <packetinfo>>
+ *
+ * When a packet is received, it is first acknowledged with either '+' or '-'.
+ * '+' indicates a successful transfer. '-' indicates a failed transfer.
+ *
+ * Example:
+ *
+ * Host: Reply:
+ * $m0,10#2a +$00010203040506070809101112131415#42
+ *
+ ****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+
+#include <asm/system.h>
+#include <asm/signal.h>
+#include <asm/oplib.h>
+#include <asm/head.h>
+#include <asm/traps.h>
+#include <asm/system.h>
+#include <asm/vac-ops.h>
+#include <asm/kgdb.h>
+#include <asm/pgtable.h>
+/*
+ *
+ * external low-level support routines
+ */
+
+extern void putDebugChar(char); /* write a single character */
+extern char getDebugChar(void); /* read and return a single char */
+
+/*
+ * BUFMAX defines the maximum number of characters in inbound/outbound buffers
+ * at least NUMREGBYTES*2 are needed for register packets
+ */
+#define BUFMAX 2048
+
+static int initialized = 0; /* !0 means we've been initialized */
+
+static const char hexchars[]="0123456789abcdef";
+
+#define NUMREGS 72
+
+/* Number of bytes of registers. */
+#define NUMREGBYTES (NUMREGS * 4)
+enum regnames {G0, G1, G2, G3, G4, G5, G6, G7,
+ O0, O1, O2, O3, O4, O5, SP, O7,
+ L0, L1, L2, L3, L4, L5, L6, L7,
+ I0, I1, I2, I3, I4, I5, FP, I7,
+
+ F0, F1, F2, F3, F4, F5, F6, F7,
+ F8, F9, F10, F11, F12, F13, F14, F15,
+ F16, F17, F18, F19, F20, F21, F22, F23,
+ F24, F25, F26, F27, F28, F29, F30, F31,
+ Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR };
+
+
+extern void trap_low(void); /* In arch/sparc/kernel/entry.S */
+
+unsigned long get_sun4cpte(unsigned long addr)
+{
+ unsigned long entry;
+
+ __asm__ __volatile__("\n\tlda [%1] %2, %0\n\t" :
+ "=r" (entry) :
+ "r" (addr), "i" (ASI_PTE));
+ return entry;
+}
+
+unsigned long get_sun4csegmap(unsigned long addr)
+{
+ unsigned long entry;
+
+ __asm__ __volatile__("\n\tlduba [%1] %2, %0\n\t" :
+ "=r" (entry) :
+ "r" (addr), "i" (ASI_SEGMAP));
+ return entry;
+}
+
+static void flush_cache_all_nop(void)
+{
+}
+
+/* Place where we save old trap entries for restoration */
+struct tt_entry kgdb_savettable[256];
+typedef void (*trapfunc_t)(void);
+
+/* Helper routine for manipulation of kgdb_savettable */
+static inline void copy_ttentry(struct tt_entry *src, struct tt_entry *dest)
+{
+ dest->inst_one = src->inst_one;
+ dest->inst_two = src->inst_two;
+ dest->inst_three = src->inst_three;
+ dest->inst_four = src->inst_four;
+}
+
+/* Initialize the kgdb_savettable so that debugging can commence */
+static void eh_init(void)
+{
+ int i, flags;
+
+ save_and_cli(flags);
+ for(i=0; i < 256; i++)
+ copy_ttentry(&sparc_ttable[i], &kgdb_savettable[i]);
+ restore_flags(flags);
+}
+
+/* Install an exception handler for kgdb */
+static void exceptionHandler(int tnum, trapfunc_t trap_entry)
+{
+ unsigned long te_addr = (unsigned long) trap_entry;
+ int flags;
+
+ /* We are dorking with a live trap table, all irqs off */
+ save_and_cli(flags);
+
+ /* Make new vector */
+ sparc_ttable[tnum].inst_one =
+ SPARC_BRANCH((unsigned long) te_addr,
+ (unsigned long) &sparc_ttable[tnum].inst_one);
+ sparc_ttable[tnum].inst_two = SPARC_RD_PSR_L0;
+ sparc_ttable[tnum].inst_three = SPARC_NOP;
+ sparc_ttable[tnum].inst_four = SPARC_NOP;
+
+ restore_flags(flags);
+}
+
+/* Convert ch from a hex digit to an int */
+static int
+hex(unsigned char ch)
+{
+ if (ch >= 'a' && ch <= 'f')
+ return ch-'a'+10;
+ if (ch >= '0' && ch <= '9')
+ return ch-'0';
+ if (ch >= 'A' && ch <= 'F')
+ return ch-'A'+10;
+ return -1;
+}
+
+/* scan for the sequence $<data>#<checksum> */
+static void
+getpacket(char *buffer)
+{
+ unsigned char checksum;
+ unsigned char xmitcsum;
+ int i;
+ int count;
+ unsigned char ch;
+
+ do {
+ /* wait around for the start character, ignore all other characters */
+ while ((ch = (getDebugChar() & 0x7f)) != '$') ;
+
+ checksum = 0;
+ xmitcsum = -1;
+
+ count = 0;
+
+ /* now, read until a # or end of buffer is found */
+ while (count < BUFMAX) {
+ ch = getDebugChar() & 0x7f;
+ if (ch == '#')
+ break;
+ checksum = checksum + ch;
+ buffer[count] = ch;
+ count = count + 1;
+ }
+
+ if (count >= BUFMAX)
+ continue;
+
+ buffer[count] = 0;
+
+ if (ch == '#') {
+ xmitcsum = hex(getDebugChar() & 0x7f) << 4;
+ xmitcsum |= hex(getDebugChar() & 0x7f);
+ if (checksum != xmitcsum)
+ putDebugChar('-'); /* failed checksum */
+ else {
+ putDebugChar('+'); /* successful transfer */
+ /* if a sequence char is present, reply the ID */
+ if (buffer[2] == ':') {
+ putDebugChar(buffer[0]);
+ putDebugChar(buffer[1]);
+ /* remove sequence chars from buffer */
+ count = strlen(buffer);
+ for (i=3; i <= count; i++)
+ buffer[i-3] = buffer[i];
+ }
+ }
+ }
+ } while (checksum != xmitcsum);
+}
+
+/* send the packet in buffer. */
+
+static void
+putpacket(unsigned char *buffer)
+{
+ unsigned char checksum;
+ int count;
+ unsigned char ch, recv;
+
+ /* $<packet info>#<checksum>. */
+ do {
+ putDebugChar('$');
+ checksum = 0;
+ count = 0;
+
+ while ((ch = buffer[count])) {
+ putDebugChar(ch);
+ checksum += ch;
+ count += 1;
+ }
+
+ putDebugChar('#');
+ putDebugChar(hexchars[checksum >> 4]);
+ putDebugChar(hexchars[checksum & 0xf]);
+ recv = getDebugChar();
+ } while ((recv & 0x7f) != '+');
+}
+
+static char remcomInBuffer[BUFMAX];
+static char remcomOutBuffer[BUFMAX];
+
+/* Convert the memory pointed to by mem into hex, placing result in buf.
+ * Return a pointer to the last char put in buf (null), in case of mem fault,
+ * return 0.
+ */
+
+static unsigned char *
+mem2hex(char *mem, char *buf, int count)
+{
+ unsigned char ch;
+
+ while (count-- > 0) {
+ ch = *mem++;
+ *buf++ = hexchars[ch >> 4];
+ *buf++ = hexchars[ch & 0xf];
+ }
+
+ *buf = 0;
+ return buf;
+}
+
+/* convert the hex array pointed to by buf into binary to be placed in mem
+ * return a pointer to the character AFTER the last byte written.
+*/
+static char *
+hex2mem(char *buf, char *mem, int count)
+{
+ int i;
+ unsigned char ch;
+
+ for (i=0; i<count; i++) {
+
+ ch = hex(*buf++) << 4;
+ ch |= hex(*buf++);
+ *mem++ = ch;
+ }
+ return mem;
+}
+
+/* This table contains the mapping between SPARC hardware trap types, and
+ signals, which are primarily what GDB understands. It also indicates
+ which hardware traps we need to commandeer when initializing the stub. */
+
+static struct hard_trap_info
+{
+ unsigned char tt; /* Trap type code for SPARC */
+ unsigned char signo; /* Signal that we map this trap into */
+} hard_trap_info[] = {
+ {SP_TRAP_SBPT, SIGTRAP}, /* ta 1 - Linux/KGDB software breakpoint */
+ {0, 0} /* Must be last */
+};
+
+/* Set up exception handlers for tracing and breakpoints */
+
+void
+set_debug_traps(void)
+{
+ struct hard_trap_info *ht;
+ unsigned long flags;
+ unsigned char c;
+
+ save_and_cli(flags);
+ flush_cache_all = flush_cache_all_nop;
+
+ /* Initialize our copy of the Linux Sparc trap table */
+ eh_init();
+
+ for (ht = hard_trap_info; ht->tt && ht->signo; ht++) {
+ /* Only if it doesn't destroy our fault handlers */
+ if((ht->tt != SP_TRAP_TFLT) &&
+ (ht->tt != SP_TRAP_DFLT))
+ exceptionHandler(ht->tt, trap_low);
+ }
+
+ /* In case GDB is started before us, ack any packets (presumably
+ * "$?#xx") sitting there.
+ */
+
+ while((c = getDebugChar()) != '$');
+ while((c = getDebugChar()) != '#');
+ c = getDebugChar(); /* eat first csum byte */
+ c = getDebugChar(); /* eat second csum byte */
+ putDebugChar('+'); /* ack it */
+
+ initialized = 1; /* connect! */
+ restore_flags(flags);
+}
+
+/* Convert the SPARC hardware trap type code to a unix signal number. */
+
+static int
+computeSignal(int tt)
+{
+ struct hard_trap_info *ht;
+
+ for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
+ if (ht->tt == tt)
+ return ht->signo;
+
+ return SIGHUP; /* default for things we don't know about */
+}
+
+/*
+ * While we find nice hex chars, build an int.
+ * Return number of chars processed.
+ */
+
+static int
+hexToInt(char **ptr, int *intValue)
+{
+ int numChars = 0;
+ int hexValue;
+
+ *intValue = 0;
+
+ while (**ptr) {
+ hexValue = hex(**ptr);
+ if (hexValue < 0)
+ break;
+
+ *intValue = (*intValue << 4) | hexValue;
+ numChars ++;
+
+ (*ptr)++;
+ }
+
+ return (numChars);
+}
+
+/*
+ * This function does all command processing for interfacing to gdb. It
+ * returns 1 if you should skip the instruction at the trap address, 0
+ * otherwise.
+ */
+
+extern void breakinst(void);
+
+void
+handle_exception (unsigned long *registers)
+{
+ int tt; /* Trap type */
+ int sigval;
+ int addr;
+ int length;
+ char *ptr;
+ unsigned long *sp;
+
+ /* First, we must force all of the windows to be spilled out */
+
+ asm("save %sp, -64, %sp\n\t"
+ "save %sp, -64, %sp\n\t"
+ "save %sp, -64, %sp\n\t"
+ "save %sp, -64, %sp\n\t"
+ "save %sp, -64, %sp\n\t"
+ "save %sp, -64, %sp\n\t"
+ "save %sp, -64, %sp\n\t"
+ "save %sp, -64, %sp\n\t"
+ "restore\n\t"
+ "restore\n\t"
+ "restore\n\t"
+ "restore\n\t"
+ "restore\n\t"
+ "restore\n\t"
+ "restore\n\t"
+ "restore\n\t");
+
+ if (registers[PC] == (unsigned long)breakinst) {
+ /* Skip over breakpoint trap insn */
+ registers[PC] = registers[NPC];
+ registers[NPC] += 4;
+ }
+
+ sp = (unsigned long *)registers[SP];
+
+ tt = (registers[TBR] >> 4) & 0xff;
+
+ /* reply to host that an exception has occurred */
+ sigval = computeSignal(tt);
+ ptr = remcomOutBuffer;
+
+ *ptr++ = 'T';
+ *ptr++ = hexchars[sigval >> 4];
+ *ptr++ = hexchars[sigval & 0xf];
+
+ *ptr++ = hexchars[PC >> 4];
+ *ptr++ = hexchars[PC & 0xf];
+ *ptr++ = ':';
+ ptr = mem2hex((char *)&registers[PC], ptr, 4);
+ *ptr++ = ';';
+
+ *ptr++ = hexchars[FP >> 4];
+ *ptr++ = hexchars[FP & 0xf];
+ *ptr++ = ':';
+ ptr = mem2hex((char *) (sp + 8 + 6), ptr, 4); /* FP */
+ *ptr++ = ';';
+
+ *ptr++ = hexchars[SP >> 4];
+ *ptr++ = hexchars[SP & 0xf];
+ *ptr++ = ':';
+ ptr = mem2hex((char *)&sp, ptr, 4);
+ *ptr++ = ';';
+
+ *ptr++ = hexchars[NPC >> 4];
+ *ptr++ = hexchars[NPC & 0xf];
+ *ptr++ = ':';
+ ptr = mem2hex((char *)&registers[NPC], ptr, 4);
+ *ptr++ = ';';
+
+ *ptr++ = hexchars[O7 >> 4];
+ *ptr++ = hexchars[O7 & 0xf];
+ *ptr++ = ':';
+ ptr = mem2hex((char *)&registers[O7], ptr, 4);
+ *ptr++ = ';';
+
+ *ptr++ = 0;
+
+ putpacket(remcomOutBuffer);
+
+ /* XXX We may want to add some features dealing with poking the
+ * XXX page tables, the real ones on the srmmu, and what is currently
+ * XXX loaded in the sun4/sun4c tlb at this point in time. But this
+ * XXX also required hacking to the gdb sources directly...
+ */
+
+ while (1) {
+ remcomOutBuffer[0] = 0;
+
+ getpacket(remcomInBuffer);
+ switch (remcomInBuffer[0]) {
+ case '?':
+ remcomOutBuffer[0] = 'S';
+ remcomOutBuffer[1] = hexchars[sigval >> 4];
+ remcomOutBuffer[2] = hexchars[sigval & 0xf];
+ remcomOutBuffer[3] = 0;
+ break;
+
+ case 'd':
+ /* toggle debug flag */
+ break;
+
+ case 'g': /* return the value of the CPU registers */
+ {
+ ptr = remcomOutBuffer;
+ /* G & O regs */
+ ptr = mem2hex((char *)registers, ptr, 16 * 4);
+ /* L & I regs */
+ ptr = mem2hex((char *) (sp + 0), ptr, 16 * 4);
+ /* Floating point */
+ memset(ptr, '0', 32 * 8);
+ /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
+ mem2hex((char *)&registers[Y], (ptr + 32 * 4 * 2), (8 * 4));
+ }
+ break;
+
+ case 'G': /* set the value of the CPU registers - return OK */
+ {
+ unsigned long *newsp, psr;
+
+ psr = registers[PSR];
+
+ ptr = &remcomInBuffer[1];
+ /* G & O regs */
+ hex2mem(ptr, (char *)registers, 16 * 4);
+ /* L & I regs */
+ hex2mem(ptr + 16 * 4 * 2, (char *) (sp + 0), 16 * 4);
+ /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
+ hex2mem(ptr + 64 * 4 * 2, (char *)&registers[Y], 8 * 4);
+
+ /* See if the stack pointer has moved. If so,
+ * then copy the saved locals and ins to the
+ * new location. This keeps the window
+ * overflow and underflow routines happy.
+ */
+
+ newsp = (unsigned long *)registers[SP];
+ if (sp != newsp)
+ sp = memcpy(newsp, sp, 16 * 4);
+
+ /* Don't allow CWP to be modified. */
+
+ if (psr != registers[PSR])
+ registers[PSR] = (psr & 0x1f) | (registers[PSR] & ~0x1f);
+
+ strcpy(remcomOutBuffer,"OK");
+ }
+ break;
+
+ case 'm': /* mAA..AA,LLLL Read LLLL bytes at address AA..AA */
+ /* Try to read %x,%x. */
+
+ ptr = &remcomInBuffer[1];
+
+ if (hexToInt(&ptr, &addr)
+ && *ptr++ == ','
+ && hexToInt(&ptr, &length)) {
+ if (mem2hex((char *)addr, remcomOutBuffer, length))
+ break;
+
+ strcpy (remcomOutBuffer, "E03");
+ } else {
+ strcpy(remcomOutBuffer,"E01");
+ }
+ break;
+
+ case 'M': /* MAA..AA,LLLL: Write LLLL bytes at address AA.AA return OK */
+ /* Try to read '%x,%x:'. */
+
+ ptr = &remcomInBuffer[1];
+
+ if (hexToInt(&ptr, &addr)
+ && *ptr++ == ','
+ && hexToInt(&ptr, &length)
+ && *ptr++ == ':') {
+ if (hex2mem(ptr, (char *)addr, length)) {
+ strcpy(remcomOutBuffer, "OK");
+ } else {
+ strcpy(remcomOutBuffer, "E03");
+ }
+ } else {
+ strcpy(remcomOutBuffer, "E02");
+ }
+ break;
+
+ case 'c': /* cAA..AA Continue at address AA..AA(optional) */
+ /* try to read optional parameter, pc unchanged if no parm */
+
+ ptr = &remcomInBuffer[1];
+ if (hexToInt(&ptr, &addr)) {
+ registers[PC] = addr;
+ registers[NPC] = addr + 4;
+ }
+
+/* Need to flush the instruction cache here, as we may have deposited a
+ * breakpoint, and the icache probably has no way of knowing that a data ref to
+ * some location may have changed something that is in the instruction cache.
+ */
+ flush_cache_all();
+ return;
+
+ /* kill the program */
+ case 'k' : /* do nothing */
+ break;
+ case 'r': /* Reset */
+ asm ("call 0\n\t"
+ "nop\n\t");
+ break;
+ } /* switch */
+
+ /* reply to the request */
+ putpacket(remcomOutBuffer);
+ } /* while(1) */
+}
+
+/* This function will generate a breakpoint exception. It is used at the
+ beginning of a program to sync up with a debugger and can be used
+ otherwise as a quick means to stop program execution and "break" into
+ the debugger. */
+
+void
+breakpoint(void)
+{
+ if (!initialized)
+ return;
+
+ /* Again, watch those c-prefixes for ELF kernels */
+#if defined(__svr4__) || defined(__ELF__)
+ asm(" .globl breakinst
+
+ breakinst: ta 1
+ ");
+#else
+ asm(" .globl _breakinst
+
+ _breakinst: ta 1
+ ");
+#endif
+}
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c
new file mode 100644
index 000000000..fe10f0447
--- /dev/null
+++ b/arch/sparc/kernel/sparc_ksyms.c
@@ -0,0 +1,187 @@
+/* $Id: sparc_ksyms.c,v 1.24 1996/10/27 08:36:08 davem Exp $
+ * arch/sparc/kernel/ksyms.c: Sparc specific ksyms support.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+
+#include <asm/oplib.h>
+#include <asm/delay.h>
+#include <asm/system.h>
+#include <asm/auxio.h>
+#include <asm/pgtable.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/idprom.h>
+#include <asm/svr4.h>
+#include <asm/head.h>
+#include <asm/smp.h>
+#include <asm/mostek.h>
+#include <asm/ptrace.h>
+#include <asm/user.h>
+#ifdef CONFIG_SBUS
+#include <asm/sbus.h>
+#endif
+
+struct poll {
+ int fd;
+ short events;
+ short revents;
+};
+
+extern int svr4_getcontext (svr4_ucontext_t *, struct pt_regs *);
+extern int svr4_setcontext (svr4_ucontext_t *, struct pt_regs *);
+extern int sunos_poll(struct poll * ufds, size_t nfds, int timeout);
+extern unsigned long sunos_mmap(unsigned long, unsigned long, unsigned long,
+ unsigned long, unsigned long, unsigned long);
+void _sigpause_common (unsigned int set, struct pt_regs *);
+extern void __copy_1page(void *, const void *);
+extern void *__memcpy(void *, const void *, __kernel_size_t);
+extern void *__memset(void *, int, __kernel_size_t);
+extern void *bzero_1page(void *);
+extern void *__bzero(void *, size_t);
+extern void *__memscan_zero(void *, size_t);
+extern void *__memscan_generic(void *, int, size_t);
+extern int __memcmp(const void *, const void *, __kernel_size_t);
+extern int __strncmp(const char *, const char *, __kernel_size_t);
+
+extern int __copy_to_user(unsigned long to, unsigned long from, int size);
+extern int __copy_from_user(unsigned long to, unsigned long from, int size);
+extern int __clear_user(unsigned long addr, int size);
+extern int __strncpy_from_user(unsigned long dest, unsigned long src, int count);
+
+extern void bcopy (const char *, char *, int);
+extern int __ashrdi3(int, int);
+
+extern void dump_thread(struct pt_regs *, struct user *);
+
+/* One thing to note is that the way the symbols of the mul/div
+ * support routines are named is a mess, they all start with
+ * a '.' which makes it a bitch to export, here is the trick:
+ */
+#define DD(sym) extern int __sparc_dot_ ## sym (int) __asm__("." ## #sym)
+#define XD(sym) { (void *) & __sparc_dot_ ## sym, "." ## #sym }
+
+DD(rem);
+DD(urem);
+DD(div);
+DD(udiv);
+DD(mul);
+DD(umul);
+
+static struct symbol_table arch_symbol_table = {
+#include <linux/symtab_begin.h>
+
+ /* used by various drivers */
+ X(sparc_cpu_model),
+#ifdef __SMP__
+ X(kernel_flag),
+ X(kernel_counter),
+ X(active_kernel_processor),
+ X(syscall_count),
+#endif
+ X(page_offset),
+
+ X(udelay),
+ X(mstk48t02_regs),
+#if CONFIG_SUN_AUXIO
+ X(auxio_register),
+#endif
+ X(request_fast_irq),
+ X(sparc_alloc_io),
+ X(sparc_free_io),
+ X(mmu_unlockarea),
+ X(mmu_lockarea),
+ X(SBus_chain),
+
+ /* Solaris/SunOS binary compatibility */
+ X(svr4_setcontext),
+ X(svr4_getcontext),
+ X(_sigpause_common),
+ X(sunos_mmap),
+ X(sunos_poll),
+
+ /* Should really be in linux/kernel/ksyms.c */
+ X(dump_thread),
+
+ /* prom symbols */
+ X(idprom),
+ X(prom_root_node),
+ X(prom_getchild),
+ X(prom_getsibling),
+ X(prom_searchsiblings),
+ X(prom_firstprop),
+ X(prom_nextprop),
+ X(prom_getproplen),
+ X(prom_getproperty),
+ X(prom_setprop),
+ X(prom_nodeops),
+ X(prom_getbootargs),
+ X(prom_apply_obio_ranges),
+ X(prom_getname),
+ X(prom_feval),
+ X(romvec),
+
+ /* sparc library symbols */
+ X(bcopy),
+ X(memmove),
+ X(memscan),
+ X(strlen),
+ X(strnlen),
+ X(strcpy),
+ X(strncpy),
+ X(strcat),
+ X(strncat),
+ X(strcmp),
+ X(strncmp),
+ X(strchr),
+ X(strrchr),
+ X(strpbrk),
+ X(strtok),
+ X(strstr),
+ X(strspn),
+
+ /* Special internal versions of library functions. */
+ X(__copy_1page),
+ X(__memcpy),
+ X(__memset),
+ X(bzero_1page),
+ X(__bzero),
+ X(__memscan_zero),
+ X(__memscan_generic),
+ X(__memcmp),
+ X(__strncmp),
+
+ /* Moving data to/from userspace. */
+ X(__copy_to_user),
+ X(__copy_from_user),
+ X(__clear_user),
+ X(__strncpy_from_user),
+
+ /* No version information on these, as gcc produces such symbols. */
+ XNOVERS(memcmp),
+ XNOVERS(memcpy),
+ XNOVERS(memset),
+ XNOVERS(__ashrdi3),
+
+ XD(rem),
+ XD(urem),
+ XD(mul),
+ XD(umul),
+ XD(div),
+ XD(udiv),
+#include <linux/symtab_end.h>
+};
+
+void arch_syms_export(void)
+{
+ register_symtab(&arch_symbol_table);
+#if CONFIG_AP1000
+ ap_register_ksyms();
+#endif
+}
diff --git a/arch/sparc/kernel/sun4c_irq.c b/arch/sparc/kernel/sun4c_irq.c
new file mode 100644
index 000000000..934e7f28c
--- /dev/null
+++ b/arch/sparc/kernel/sun4c_irq.c
@@ -0,0 +1,187 @@
+/* sun4c_irq.c
+ * arch/sparc/kernel/sun4c_irq.c:
+ *
+ * djhr: Hacked out of irq.c into a CPU dependent version.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
+ * Copyright (C) 1995 Pete A. Zaitcev (zaitcev@ipmce.su)
+ * Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk)
+ */
+
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <linux/kernel_stat.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/malloc.h>
+
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/psr.h>
+#include <asm/vaddrs.h>
+#include <asm/timer.h>
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/traps.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+
+/* Pointer to the interrupt enable byte
+ *
+ * Dave Redman (djhr@tadpole.co.uk)
+ * What you may not be aware of is that entry.S requires this variable.
+ *
+ * --- linux_trap_nmi_sun4c --
+ *
+ * so don't go making it static, like I tried. sigh.
+ */
+unsigned char *interrupt_enable = 0;
+
+static void sun4c_disable_irq(unsigned int irq_nr)
+{
+ unsigned long flags;
+ unsigned char current_mask, new_mask;
+
+ save_and_cli(flags);
+ irq_nr &= NR_IRQS;
+ current_mask = *interrupt_enable;
+ switch(irq_nr) {
+ case 1:
+ new_mask = ((current_mask) & (~(SUN4C_INT_E1)));
+ break;
+ case 8:
+ new_mask = ((current_mask) & (~(SUN4C_INT_E8)));
+ break;
+ case 10:
+ new_mask = ((current_mask) & (~(SUN4C_INT_E10)));
+ break;
+ case 14:
+ new_mask = ((current_mask) & (~(SUN4C_INT_E14)));
+ break;
+ default:
+ restore_flags(flags);
+ return;
+ }
+ *interrupt_enable = new_mask;
+ restore_flags(flags);
+}
+
+static void sun4c_enable_irq(unsigned int irq_nr)
+{
+ unsigned long flags;
+ unsigned char current_mask, new_mask;
+
+ save_and_cli(flags);
+ irq_nr &= NR_IRQS;
+ current_mask = *interrupt_enable;
+ switch(irq_nr) {
+ case 1:
+ new_mask = ((current_mask) | SUN4C_INT_E1);
+ break;
+ case 8:
+ new_mask = ((current_mask) | SUN4C_INT_E8);
+ break;
+ case 10:
+ new_mask = ((current_mask) | SUN4C_INT_E10);
+ break;
+ case 14:
+ new_mask = ((current_mask) | SUN4C_INT_E14);
+ break;
+ default:
+ restore_flags(flags);
+ return;
+ }
+ *interrupt_enable = new_mask;
+ restore_flags(flags);
+}
+
+#define TIMER_IRQ 10 /* Also at level 14, but we ignore that one. */
+#define PROFILE_IRQ 14 /* Level14 ticker.. used by OBP for polling */
+
+volatile struct sun4c_timer_info *sun4c_timers;
+
+static void sun4c_clear_clock_irq(void)
+{
+ volatile unsigned int clear_intr;
+ clear_intr = sun4c_timers->timer_limit10;
+}
+
+static void sun4c_clear_profile_irq(void)
+{
+ /* Errm.. not sure how to do this.. */
+}
+
+static void sun4c_load_profile_irq(unsigned int limit)
+{
+ /* Errm.. not sure how to do this.. */
+}
+
+static void sun4c_init_timers(void (*counter_fn)(int, void *, struct pt_regs *))
+{
+ int irq;
+
+ /* Map the Timer chip, this is implemented in hardware inside
+ * the cache chip on the sun4c.
+ */
+ sun4c_timers = sparc_alloc_io ((void *) SUN4C_TIMER_PHYSADDR, 0,
+ sizeof(struct sun4c_timer_info),
+ "timer", 0x0, 0x0);
+
+ /* Have the level 10 timer tick at 100HZ. We don't touch the
+ * level 14 timer limit since we are letting the prom handle
+ * them until we have a real console driver so L1-A works.
+ */
+ sun4c_timers->timer_limit10 = (((1000000/HZ) + 1) << 10);
+ master_l10_counter = &sun4c_timers->cur_count10;
+ master_l10_limit = &sun4c_timers->timer_limit10;
+
+ irq = request_irq(TIMER_IRQ,
+ counter_fn,
+ (SA_INTERRUPT | SA_STATIC_ALLOC),
+ "timer", NULL);
+ if (irq) {
+ prom_printf("time_init: unable to attach IRQ%d\n",TIMER_IRQ);
+ prom_halt();
+ }
+
+ claim_ticker14(NULL, PROFILE_IRQ, 0);
+}
+
+#ifdef __SMP__
+static void sun4c_nop(void) {}
+#endif
+
+void sun4c_init_IRQ(void)
+{
+ struct linux_prom_registers int_regs[2];
+ int ie_node;
+
+ ie_node = prom_searchsiblings (prom_getchild(prom_root_node),
+ "interrupt-enable");
+ if(ie_node == 0)
+ panic("Cannot find /interrupt-enable node");
+
+ /* Depending on the "address" property is bad news... */
+ prom_getproperty(ie_node, "reg", (char *) int_regs, sizeof(int_regs));
+ interrupt_enable = (char *) sparc_alloc_io(int_regs[0].phys_addr, 0,
+ int_regs[0].reg_size,
+ "sun4c_interrupts",
+ int_regs[0].which_io, 0x0);
+ enable_irq = sun4c_enable_irq;
+ disable_irq = sun4c_disable_irq;
+ clear_clock_irq = sun4c_clear_clock_irq;
+ clear_profile_irq = sun4c_clear_profile_irq;
+ load_profile_irq = sun4c_load_profile_irq;
+ init_timers = sun4c_init_timers;
+#ifdef __SMP__
+ set_cpu_int = (void (*) (int, int))sun4c_nop;
+ clear_cpu_int = (void (*) (int, int))sun4c_nop;
+ set_irq_udt = (void (*) (int))sun4c_nop;
+#endif
+ *interrupt_enable = (SUN4C_INT_ENABLE);
+ sti();
+}
diff --git a/arch/sparc/kernel/sun4m_irq.c b/arch/sparc/kernel/sun4m_irq.c
new file mode 100644
index 000000000..7ef23737b
--- /dev/null
+++ b/arch/sparc/kernel/sun4m_irq.c
@@ -0,0 +1,340 @@
+/* sun4m_irq.c
+ * arch/sparc/kernel/sun4m_irq.c:
+ *
+ * djhr: Hacked out of irq.c into a CPU dependent version.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
+ * Copyright (C) 1995 Pete A. Zaitcev (zaitcev@ipmce.su)
+ * Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk)
+ */
+
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <linux/kernel_stat.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/malloc.h>
+
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/psr.h>
+#include <asm/vaddrs.h>
+#include <asm/timer.h>
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/traps.h>
+#include <asm/smp.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+
+static unsigned long dummy;
+
+extern int linux_num_cpus;
+struct sun4m_intregs *sun4m_interrupts;
+unsigned long *irq_rcvreg = &dummy;
+
+/* These tables only apply for interrupts greater than 15..
+ *
+ * any intr value below 0x10 is considered to be a soft-int
+ * this may be useful or it may not.. but that's how I've done it.
+ * and it won't clash with what OBP is telling us about devices.
+ *
+ * take an encoded intr value and lookup if it's valid
+ * then get the mask bits that match from irq_mask
+ */
+static unsigned char irq_xlate[32] = {
+ /* 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, a, b, c, d, e, f */
+ 0, 0, 0, 0, 1, 0, 2, 0, 3, 0, 4, 5, 6, 0, 0, 7,
+ 0, 0, 8, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 0
+};
+
+static unsigned long irq_mask[] = {
+ 0, /* illegal index */
+ SUN4M_INT_SCSI, /* 1 irq 4 */
+ SUN4M_INT_ETHERNET, /* 2 irq 6 */
+ SUN4M_INT_VIDEO, /* 3 irq 8 */
+ SUN4M_INT_REALTIME, /* 4 irq 10 */
+ SUN4M_INT_FLOPPY, /* 5 irq 11 */
+ (SUN4M_INT_SERIAL | SUN4M_INT_KBDMS), /* 6 irq 12 */
+ SUN4M_INT_MODULE_ERR, /* 7 irq 15 */
+ SUN4M_INT_SBUS(0), /* 8 irq 2 */
+ SUN4M_INT_SBUS(1), /* 9 irq 3 */
+ SUN4M_INT_SBUS(2), /* 10 irq 5 */
+ SUN4M_INT_SBUS(3), /* 11 irq 7 */
+ SUN4M_INT_SBUS(4), /* 12 irq 9 */
+ SUN4M_INT_SBUS(5), /* 13 irq 11 */
+ SUN4M_INT_SBUS(6) /* 14 irq 13 */
+};
+
+inline unsigned long sun4m_get_irqmask(unsigned int irq)
+{
+ unsigned long mask;
+
+ if (irq > 0x20) {
+ /* OBIO/SBUS interrupts */
+ irq &= 0x1f;
+ mask = irq_mask[irq_xlate[irq]];
+ if (!mask)
+ printk("sun4m_get_irqmask: IRQ%d has no valid mask!\n",irq);
+ } else {
+ /* Soft Interrupts will come here
+ * Currently there is no way to trigger them but I'm sure something
+ * could be cooked up.
+ */
+ irq &= 0xf;
+ mask = SUN4M_SOFT_INT(irq);
+ }
+ return mask;
+}
+
+static void sun4m_disable_irq(unsigned int irq_nr)
+{
+ unsigned long mask, flags;
+ int cpu = smp_processor_id();
+
+ mask = sun4m_get_irqmask(irq_nr);
+ save_and_cli(flags);
+ if (irq_nr > 15)
+ sun4m_interrupts->set = mask;
+ else
+ sun4m_interrupts->cpu_intregs[cpu].set = mask;
+ restore_flags(flags);
+}
+
+static void sun4m_enable_irq(unsigned int irq_nr)
+{
+ unsigned long mask, flags;
+ int cpu = smp_processor_id();
+
+ /* Dreadful floppy hack. When we use 0x2b instead of
+ * 0x0b the system blows (it starts to whistle!).
+ * So we continue to use 0x0b. Fixme ASAP. --P3
+ */
+ if (irq_nr != 0x0b) {
+ mask = sun4m_get_irqmask(irq_nr);
+ save_and_cli(flags);
+ if (irq_nr > 15)
+ sun4m_interrupts->clear = mask;
+ else
+ sun4m_interrupts->cpu_intregs[cpu].clear = mask;
+ restore_flags(flags);
+ } else {
+ save_and_cli(flags);
+ sun4m_interrupts->clear = SUN4M_INT_FLOPPY;
+ restore_flags(flags);
+ }
+}
+
+void sun4m_send_ipi(int cpu, int level)
+{
+ unsigned long mask;
+
+ mask = sun4m_get_irqmask(level);
+ sun4m_interrupts->cpu_intregs[cpu].set = mask;
+}
+
+void sun4m_clear_ipi(int cpu, int level)
+{
+ unsigned long mask;
+
+ mask = sun4m_get_irqmask(level);
+ sun4m_interrupts->cpu_intregs[cpu].clear = mask;
+}
+
+void sun4m_set_udt(int cpu)
+{
+ sun4m_interrupts->undirected_target = cpu;
+}
+
+#define OBIO_INTR 0x20
+#define TIMER_IRQ (OBIO_INTR | 10)
+#define PROFILE_IRQ (OBIO_INTR | 14)
+
+struct sun4m_timer_regs *sun4m_timers;
+unsigned int lvl14_resolution = (((1000000/HZ) + 1) << 10);
+
+static void sun4m_clear_clock_irq(void)
+{
+ volatile unsigned int clear_intr;
+ clear_intr = sun4m_timers->l10_timer_limit;
+}
+
+static void sun4m_clear_profile_irq(void)
+{
+ volatile unsigned int clear;
+
+ clear = sun4m_timers->cpu_timers[0].l14_timer_limit;
+}
+
+static void sun4m_load_profile_irq(unsigned int limit)
+{
+ sun4m_timers->cpu_timers[0].l14_timer_limit = limit;
+}
+
+#if HANDLE_LVL14_IRQ
+static void sun4m_lvl14_handler(int irq, void *dev_id, struct pt_regs * regs)
+{
+ volatile unsigned int clear;
+
+ printk("CPU[%d]: TOOK A LEVEL14!\n", smp_processor_id());
+ /* we do nothing with this at present
+ * this is purely to prevent OBP getting its mucky paws
+ * in linux.
+ */
+ clear = sun4m_timers->cpu_timers[0].l14_timer_limit; /* clear interrupt */
+
+ /* reload with value, this allows on the fly retuning of the level14
+ * timer
+ */
+ sun4m_timers->cpu_timers[0].l14_timer_limit = lvl14_resolution;
+}
+#endif /* HANDLE_LVL14_IRQ */
+
+static void sun4m_init_timers(void (*counter_fn)(int, void *, struct pt_regs *))
+{
+ int reg_count, irq, cpu;
+ struct linux_prom_registers cnt_regs[PROMREG_MAX];
+ int obio_node, cnt_node;
+
+ cnt_node = 0;
+ if((obio_node =
+ prom_searchsiblings (prom_getchild(prom_root_node), "obio")) == 0 ||
+ (obio_node = prom_getchild (obio_node)) == 0 ||
+ (cnt_node = prom_searchsiblings (obio_node, "counter")) == 0) {
+ prom_printf("Cannot find /obio/counter node\n");
+ prom_halt();
+ }
+ reg_count = prom_getproperty(cnt_node, "reg",
+ (void *) cnt_regs, sizeof(cnt_regs));
+ reg_count = (reg_count/sizeof(struct linux_prom_registers));
+
+ /* Apply the obio ranges to the timer registers. */
+ prom_apply_obio_ranges(cnt_regs, reg_count);
+
+ cnt_regs[4].phys_addr = cnt_regs[reg_count-1].phys_addr;
+ cnt_regs[4].reg_size = cnt_regs[reg_count-1].reg_size;
+ cnt_regs[4].which_io = cnt_regs[reg_count-1].which_io;
+ for(obio_node = 1; obio_node < 4; obio_node++) {
+ cnt_regs[obio_node].phys_addr =
+ cnt_regs[obio_node-1].phys_addr + PAGE_SIZE;
+ cnt_regs[obio_node].reg_size = cnt_regs[obio_node-1].reg_size;
+ cnt_regs[obio_node].which_io = cnt_regs[obio_node-1].which_io;
+ }
+
+ /* Map the per-cpu Counter registers. */
+ sun4m_timers = sparc_alloc_io(cnt_regs[0].phys_addr, 0,
+ PAGE_SIZE*NCPUS, "counters_percpu",
+ cnt_regs[0].which_io, 0x0);
+
+ /* Map the system Counter register. */
+ sparc_alloc_io(cnt_regs[4].phys_addr, 0,
+ cnt_regs[4].reg_size,
+ "counters_system",
+ cnt_regs[4].which_io, 0x0);
+
+ sun4m_timers->l10_timer_limit = (((1000000/HZ) + 1) << 10);
+ master_l10_counter = &sun4m_timers->l10_cur_count;
+ master_l10_limit = &sun4m_timers->l10_timer_limit;
+
+ irq = request_irq(TIMER_IRQ,
+ counter_fn,
+ (SA_INTERRUPT | SA_STATIC_ALLOC),
+ "timer", NULL);
+ if (irq) {
+ prom_printf("time_init: unable to attach IRQ%d\n",TIMER_IRQ);
+ prom_halt();
+ }
+
+ /* Can't cope with multiple CPUS yet so no level14 tick events */
+#if HANDLE_LVL14_IRQ
+ if (linux_num_cpus > 1)
+ claim_ticker14(NULL, PROFILE_IRQ, 0);
+ else
+ claim_ticker14(sun4m_lvl14_handler, PROFILE_IRQ, lvl14_resolution);
+#endif /* HANDLE_LVL14_IRQ */
+ if(linux_num_cpus > 1) {
+ for(cpu = 0; cpu < 4; cpu++)
+ sun4m_timers->cpu_timers[cpu].l14_timer_limit = 0;
+ sun4m_interrupts->set = SUN4M_INT_E14;
+ } else {
+ sun4m_timers->cpu_timers[0].l14_timer_limit = 0;
+ }
+}
+
+void sun4m_init_IRQ(void)
+{
+ int ie_node,i;
+ struct linux_prom_registers int_regs[PROMREG_MAX];
+ int num_regs;
+
+ cli();
+ if((ie_node = prom_searchsiblings(prom_getchild(prom_root_node), "obio")) == 0 ||
+ (ie_node = prom_getchild (ie_node)) == 0 ||
+ (ie_node = prom_searchsiblings (ie_node, "interrupt")) == 0) {
+ prom_printf("Cannot find /obio/interrupt node\n");
+ prom_halt();
+ }
+ num_regs = prom_getproperty(ie_node, "reg", (char *) int_regs,
+ sizeof(int_regs));
+ num_regs = (num_regs/sizeof(struct linux_prom_registers));
+
+ /* Apply the obio ranges to these registers. */
+ prom_apply_obio_ranges(int_regs, num_regs);
+
+ int_regs[4].phys_addr = int_regs[num_regs-1].phys_addr;
+ int_regs[4].reg_size = int_regs[num_regs-1].reg_size;
+ int_regs[4].which_io = int_regs[num_regs-1].which_io;
+ for(ie_node = 1; ie_node < 4; ie_node++) {
+ int_regs[ie_node].phys_addr = int_regs[ie_node-1].phys_addr + PAGE_SIZE;
+ int_regs[ie_node].reg_size = int_regs[ie_node-1].reg_size;
+ int_regs[ie_node].which_io = int_regs[ie_node-1].which_io;
+ }
+
+ /* Map the interrupt registers for all possible cpus. */
+ sun4m_interrupts = sparc_alloc_io(int_regs[0].phys_addr, 0,
+ PAGE_SIZE*NCPUS, "interrupts_percpu",
+ int_regs[0].which_io, 0x0);
+
+ /* Map the system interrupt control registers. */
+ sparc_alloc_io(int_regs[4].phys_addr, 0,
+ int_regs[4].reg_size, "interrupts_system",
+ int_regs[4].which_io, 0x0);
+
+ sun4m_interrupts->set = ~SUN4M_INT_MASKALL;
+ for (i=0; i<linux_num_cpus; i++)
+ sun4m_interrupts->cpu_intregs[i].clear = ~0x17fff;
+
+ if (linux_num_cpus > 1) {
+ /* system wide interrupts go to cpu 0, this should always
+ * be safe because it is guaranteed to be fitted or OBP doesn't
+ * come up
+ *
+ * Not sure, but writing here on SLAVIO systems may puke
+ * so I don't do it unless there is more than 1 cpu.
+ */
+#if 0
+ printk("Warning:"
+ "sun4m multiple CPU interrupt code requires work\n");
+#endif
+ irq_rcvreg = (unsigned long *)
+ &sun4m_interrupts->undirected_target;
+ sun4m_interrupts->undirected_target = 0;
+ }
+ enable_irq = sun4m_enable_irq;
+ disable_irq = sun4m_disable_irq;
+ clear_clock_irq = sun4m_clear_clock_irq;
+ clear_profile_irq = sun4m_clear_profile_irq;
+ load_profile_irq = sun4m_load_profile_irq;
+ init_timers = sun4m_init_timers;
+#ifdef __SMP__
+ set_cpu_int = (void (*) (int, int))sun4m_send_ipi;
+ clear_cpu_int = (void (*) (int, int))sun4m_clear_ipi;
+ set_irq_udt = (void (*) (int))sun4m_set_udt;
+#endif
+ sti();
+}
diff --git a/arch/sparc/kernel/sunos_asm.S b/arch/sparc/kernel/sunos_asm.S
new file mode 100644
index 000000000..eda4cd968
--- /dev/null
+++ b/arch/sparc/kernel/sunos_asm.S
@@ -0,0 +1,80 @@
+/* $Id: sunos_asm.S,v 1.12 1996/04/03 02:14:57 davem Exp $
+ * sunos_asm.S: SunOS system calls which must have a low-level
+ * entry point to operate correctly.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ *
+ * Based upon preliminary work which is:
+ *
+ * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
+ */
+
+#include <asm/cprefix.h>
+#include <asm/ptrace.h>
+
+ .text
+ .align 4
+
+ /* SunOS getpid() returns pid in %o0 and ppid in %o1 */
+ .globl C_LABEL(sunos_getpid)
+C_LABEL(sunos_getpid):
+ call C_LABEL(sys_getpid)
+ nop
+
+ st %o0, [%sp + REGWIN_SZ + PT_I0]
+
+ call C_LABEL(sys_getppid)
+ nop
+
+ st %o0, [%sp + REGWIN_SZ + PT_I1]
+
+ b C_LABEL(ret_sys_call)
+ nop
+
+ /* SunOS getuid() returns uid in %o0 and euid in %o1 */
+ .globl C_LABEL(sunos_getuid)
+C_LABEL(sunos_getuid):
+ call C_LABEL(sys_getuid)
+ nop
+
+ st %o0, [%sp + REGWIN_SZ + PT_I0]
+
+ call C_LABEL(sys_geteuid)
+ nop
+
+ st %o0, [%sp + REGWIN_SZ + PT_I1]
+
+ b C_LABEL(ret_sys_call)
+ nop
+
+ /* SunOS getgid() returns gid in %o0 and egid in %o1 */
+ .globl C_LABEL(sunos_getgid)
+C_LABEL(sunos_getgid):
+ call C_LABEL(sys_getgid)
+ nop
+
+ st %o0, [%sp + REGWIN_SZ + PT_I0]
+
+ call C_LABEL(sys_getegid)
+ nop
+
+ st %o0, [%sp + REGWIN_SZ + PT_I1]
+
+ b C_LABEL(ret_sys_call)
+ nop
+
+ /* SunOS's execv() call only specifies the argv argument, the
+ * environment settings are the same as the calling processes.
+ */
+ .globl C_LABEL(sunos_execv)
+C_LABEL(sunos_execv):
+ st %g0, [%sp + REGWIN_SZ + PT_I2]
+
+ call C_LABEL(sparc_execve)
+ add %sp, REGWIN_SZ, %o0
+
+ b C_LABEL(ret_sys_call)
+ nop
+
+
+
diff --git a/arch/sparc/kernel/sunos_ioctl.c b/arch/sparc/kernel/sunos_ioctl.c
new file mode 100644
index 000000000..f8c264647
--- /dev/null
+++ b/arch/sparc/kernel/sunos_ioctl.c
@@ -0,0 +1,191 @@
+/* $Id: sunos_ioctl.c,v 1.26 1996/10/31 00:59:06 davem Exp $
+ * sunos_ioctl.c: The Linux Operating system: SunOS ioctl compatibility.
+ *
+ * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/uaccess.h>
+
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/termios.h>
+#include <linux/ioctl.h>
+#include <linux/route.h>
+#include <linux/sockios.h>
+#include <linux/if.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <asm/kbio.h>
+
+#if 0
+extern char sunkbd_type;
+extern char sunkbd_layout;
+#endif
+
+extern asmlinkage int sys_ioctl(unsigned int, unsigned int, unsigned long);
+extern asmlinkage int sys_setsid(void);
+
+asmlinkage int sunos_ioctl (int fd, unsigned long cmd, unsigned long arg)
+{
+ struct file *filp;
+ int ret;
+
+ if (fd >= NR_OPEN || !(filp = current->files->fd [fd]))
+ return -EBADF;
+
+ /* First handle an easy compat. case for tty ldisc. */
+ if(cmd == TIOCSETD) {
+ int *p, ntty = N_TTY;
+ int tmp, oldfs;
+
+ p = (int *) arg;
+ if(get_user(tmp, p))
+ return -EFAULT;
+ if(tmp == 2) {
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ ret = sys_ioctl(fd, cmd, (int) &ntty);
+ set_fs(oldfs);
+ return (ret == -EINVAL ? -EOPNOTSUPP : ret);
+ }
+ }
+
+ /* Binary compatibility is good American knowhow fuckin' up. */
+ if(cmd == TIOCNOTTY)
+ return sys_setsid();
+
+ /* SunOS networking ioctls. */
+ switch (cmd) {
+ case _IOW('r', 10, struct rtentry):
+ return sys_ioctl(fd, SIOCADDRT, arg);
+ case _IOW('r', 11, struct rtentry):
+ return sys_ioctl(fd, SIOCDELRT, arg);
+ case _IOW('i', 12, struct ifreq):
+ return sys_ioctl(fd, SIOCSIFADDR, arg);
+ case _IOWR('i', 13, struct ifreq):
+ return sys_ioctl(fd, SIOCGIFADDR, arg);
+ case _IOW('i', 14, struct ifreq):
+ return sys_ioctl(fd, SIOCSIFDSTADDR, arg);
+ case _IOWR('i', 15, struct ifreq):
+ return sys_ioctl(fd, SIOCGIFDSTADDR, arg);
+ case _IOW('i', 16, struct ifreq):
+ return sys_ioctl(fd, SIOCSIFFLAGS, arg);
+ case _IOWR('i', 17, struct ifreq):
+ return sys_ioctl(fd, SIOCGIFFLAGS, arg);
+ case _IOW('i', 18, struct ifreq):
+ return sys_ioctl(fd, SIOCSIFMEM, arg);
+ case _IOWR('i', 19, struct ifreq):
+ return sys_ioctl(fd, SIOCGIFMEM, arg);
+ case _IOWR('i', 20, struct ifconf):
+ return sys_ioctl(fd, SIOCGIFCONF, arg);
+ case _IOW('i', 21, struct ifreq): /* SIOCSIFMTU */
+ return sys_ioctl(fd, SIOCSIFMTU, arg);
+ case _IOWR('i', 22, struct ifreq): /* SIOCGIFMTU */
+ return sys_ioctl(fd, SIOCGIFMTU, arg);
+
+ case _IOWR('i', 23, struct ifreq):
+ return sys_ioctl(fd, SIOCGIFBRDADDR, arg);
+ case _IOW('i', 24, struct ifreq):
+ return sys_ioctl(fd, SIOCGIFBRDADDR, arg);
+ case _IOWR('i', 25, struct ifreq):
+ return sys_ioctl(fd, SIOCGIFNETMASK, arg);
+ case _IOW('i', 26, struct ifreq):
+ return sys_ioctl(fd, SIOCSIFNETMASK, arg);
+ case _IOWR('i', 27, struct ifreq):
+ return sys_ioctl(fd, SIOCGIFMETRIC, arg);
+ case _IOW('i', 28, struct ifreq):
+ return sys_ioctl(fd, SIOCSIFMETRIC, arg);
+
+ case _IOW('i', 30, struct arpreq):
+ return sys_ioctl(fd, SIOCSARP, arg);
+ case _IOWR('i', 31, struct arpreq):
+ return sys_ioctl(fd, SIOCGARP, arg);
+ case _IOW('i', 32, struct arpreq):
+ return sys_ioctl(fd, SIOCDARP, arg);
+
+ case _IOW('i', 40, struct ifreq): /* SIOCUPPER */
+ case _IOW('i', 41, struct ifreq): /* SIOCLOWER */
+ case _IOW('i', 44, struct ifreq): /* SIOCSETSYNC */
+ case _IOW('i', 45, struct ifreq): /* SIOCGETSYNC */
+ case _IOW('i', 46, struct ifreq): /* SIOCSSDSTATS */
+ case _IOW('i', 47, struct ifreq): /* SIOCSSESTATS */
+ case _IOW('i', 48, struct ifreq): /* SIOCSPROMISC */
+ return -EOPNOTSUPP;
+
+ case _IOW('i', 49, struct ifreq):
+ return sys_ioctl(fd, SIOCADDMULTI, arg);
+ case _IOW('i', 50, struct ifreq):
+ return sys_ioctl(fd, SIOCDELMULTI, arg);
+
+ /* FDDI interface ioctls, unsupported. */
+
+ case _IOW('i', 51, struct ifreq): /* SIOCFDRESET */
+ case _IOW('i', 52, struct ifreq): /* SIOCFDSLEEP */
+ case _IOW('i', 53, struct ifreq): /* SIOCSTRTFMWAR */
+ case _IOW('i', 54, struct ifreq): /* SIOCLDNSTRTFW */
+ case _IOW('i', 55, struct ifreq): /* SIOCGETFDSTAT */
+ case _IOW('i', 56, struct ifreq): /* SIOCFDNMIINT */
+ case _IOW('i', 57, struct ifreq): /* SIOCFDEXUSER */
+ case _IOW('i', 58, struct ifreq): /* SIOCFDGNETMAP */
+ case _IOW('i', 59, struct ifreq): /* SIOCFDGIOCTL */
+ printk("FDDI ioctl, returning EOPNOTSUPP\n");
+ return -EOPNOTSUPP;
+ case _IOW('t', 125, int):
+ /* More stupid tty sunos ioctls, just
+ * say it worked.
+ */
+ return 0;
+ /* Non posix grp */
+ case _IOW('t', 118, int): {
+ int oldval, newval, *ptr;
+
+ cmd = TIOCSPGRP;
+ ptr = (int *) arg;
+ if(get_user(oldval, ptr))
+ return -EFAULT;
+ ret = sys_ioctl(fd, cmd, arg);
+ __get_user(newval, ptr);
+ if(newval == -1) {
+ __put_user(oldval, ptr);
+ ret = -EIO;
+ }
+ if(ret == -ENOTTY)
+ ret = -EIO;
+ return ret;
+ }
+
+ case _IOR('t', 119, int): {
+ int oldval, newval, *ptr;
+
+ cmd = TIOCGPGRP;
+ ptr = (int *) arg;
+ if(get_user(oldval, ptr))
+ return -EFAULT;
+ ret = sys_ioctl(fd, cmd, arg);
+ __get_user(newval, ptr);
+ if(newval == -1) {
+ __put_user(oldval, ptr);
+ ret = -EIO;
+ }
+ if(ret == -ENOTTY)
+ ret = -EIO;
+ return ret;
+ }
+ }
+
+#if 0
+ if (cmd & 0xff00 == ('k' << 8)){
+ printk ("[[KBIO: %8.8x\n", (unsigned int) cmd);
+ }
+#endif
+
+ ret = sys_ioctl(fd, cmd, arg);
+ /* so stupid... */
+ return (ret == -EINVAL ? -EOPNOTSUPP : ret);
+}
+
+
diff --git a/arch/sparc/kernel/switch.S b/arch/sparc/kernel/switch.S
new file mode 100644
index 000000000..82eb2fb10
--- /dev/null
+++ b/arch/sparc/kernel/switch.S
@@ -0,0 +1,96 @@
+/* $Id: switch.S,v 1.18 1996/04/03 02:15:00 davem Exp $
+ * switch.S: Sparc task switch code.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/head.h>
+#include <asm/asi.h>
+#include <asm/contregs.h>
+#include <asm/cprefix.h>
+#include <asm/psr.h>
+#include <asm/asmmacro.h>
+#include <asm/ptrace.h>
+#include <asm/winmacro.h>
+
+#define sw_ntask g1
+#define sw_psr g4
+#define sw_wim g5
+#define sw_tmp g6
+#define sw_ctx g7
+
+/* Context switch code. The new process's task_struct
+ * ptr is passed as the first parameter.
+ *
+ * First successful task switch 05/13/95 21:52:37
+ */
+ .align 4
+ .globl C_LABEL(sparc_switch_to)
+C_LABEL(sparc_switch_to):
+ mov %o0, %sw_ntask
+
+ /* Save kernel state. */
+ FLUSH_ALL_KERNEL_WINDOWS;
+ STORE_WINDOW(sp)
+ rd %psr, %sw_psr
+ LOAD_CURRENT(sw_tmp, sw_wim)
+ rd %wim, %sw_wim
+ std %sw_psr, [%sw_tmp + THREAD_KPSR]
+ std %sp, [%sw_tmp + THREAD_KSP]
+
+ /* Load new kernel state. */
+ wr %sw_psr, PSR_ET, %psr
+ WRITE_PAUSE
+#ifdef __SMP__
+ GET_PROCESSOR_OFFSET(sw_psr)
+ set C_LABEL(current_set), %sw_tmp
+ st %sw_ntask, [%sw_tmp + %sw_psr]
+#else
+ sethi %hi(C_LABEL(current_set)), %sw_tmp
+ st %sw_ntask, [%sw_tmp + %lo(C_LABEL(current_set))]
+#endif
+ ldd [%sw_ntask + THREAD_KPSR], %sw_psr
+ wr %sw_psr, PSR_ET, %psr
+ WRITE_PAUSE
+ wr %sw_wim, 0x0, %wim
+ WRITE_PAUSE
+ ldd [%sw_ntask + THREAD_KSP], %sp
+ LOAD_WINDOW(sp)
+
+ wr %sw_psr, 0x0, %psr ! traps back on
+ WRITE_PAUSE
+
+ retl
+ nop
+
+
+#ifdef __SMP__
+ /* Because of nasty register windows this is the only way
+ * to start a processor into its cpu_idle() thread.
+ */
+
+ .globl C_LABEL(sparc_cpusched)
+C_LABEL(sparc_cpusched):
+ LOAD_CURRENT(g1, g2)
+ rd %psr, %g7
+
+ wr %g7, PSR_ET, %psr
+ WRITE_PAUSE
+
+ ldd [%g1 + THREAD_KPSR], %g2
+
+ wr %g2, PSR_ET, %psr
+ WRITE_PAUSE
+
+ wr %g3, 0x0, %wim
+ WRITE_PAUSE
+
+ ldd [%g1 + THREAD_KSP], %sp
+ LOAD_WINDOW(sp)
+
+ wr %g2, 0x0, %psr
+ WRITE_PAUSE
+
+ retl
+ nop
+#endif
diff --git a/arch/sparc/kernel/sys_solaris.c b/arch/sparc/kernel/sys_solaris.c
new file mode 100644
index 000000000..0b90f5d96
--- /dev/null
+++ b/arch/sparc/kernel/sys_solaris.c
@@ -0,0 +1,29 @@
+/*
+ * linux/arch/sparc/sys_solaris.c
+ *
+ * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/personality.h>
+#include <linux/ptrace.h>
+#include <linux/mm.h>
+
+asmlinkage int
+do_solaris_syscall (struct pt_regs *regs)
+{
+ current->personality = PER_SVR4;
+ current->exec_domain = lookup_exec_domain(PER_SVR4);
+
+ if (current->exec_domain && current->exec_domain->handler){
+ current->exec_domain->handler (regs);
+ current->exec_domain->use_count = 0;
+ return regs->u_regs [UREG_I0];
+ }
+ printk ("No solaris handler\n");
+ send_sig (SIGSEGV, current, 1);
+ return 0;
+}
diff --git a/arch/sparc/kernel/sys_sparc.c b/arch/sparc/kernel/sys_sparc.c
new file mode 100644
index 000000000..14926b1d3
--- /dev/null
+++ b/arch/sparc/kernel/sys_sparc.c
@@ -0,0 +1,237 @@
+/* $Id: sys_sparc.c,v 1.25 1996/11/03 20:58:07 davem Exp $
+ * linux/arch/sparc/kernel/sys_sparc.c
+ *
+ * This file contains various random system calls that
+ * have a non-standard calling sequence on the Linux/sparc
+ * platform.
+ */
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/stat.h>
+#include <linux/mman.h>
+
+#include <asm/uaccess.h>
+
+/* XXX Make this per-binary type, this way we can detect the type of
+ * XXX a binary. Every Sparc executable calls this very early on.
+ */
+asmlinkage unsigned long sys_getpagesize(void)
+{
+ return PAGE_SIZE; /* Possibly older binaries want 8192 on sun4's? */
+}
+
+extern asmlinkage unsigned long sys_brk(unsigned long brk);
+
+asmlinkage unsigned long sparc_brk(unsigned long brk)
+{
+ if(sparc_cpu_model == sun4c) {
+ if(brk >= 0x20000000 && brk < 0xe0000000)
+ return current->mm->brk;
+ }
+ return sys_brk(brk);
+}
+
+/*
+ * sys_pipe() is the normal C calling standard for creating
+ * a pipe. It's not the way unix traditionally does this, though.
+ */
+asmlinkage int sparc_pipe(struct pt_regs *regs)
+{
+ int fd[2];
+ int error;
+
+ error = do_pipe(fd);
+ if (error) {
+ return error;
+ } else {
+ regs->u_regs[UREG_I1] = fd[1];
+ return fd[0];
+ }
+}
+
+/*
+ * sys_ipc() is the de-multiplexer for the SysV IPC calls..
+ *
+ * This is really horribly ugly.
+ */
+
+struct ipc_kludge {
+ struct msgbuf *msgp;
+ long msgtyp;
+};
+
+#define SEMOP 1
+#define SEMGET 2
+#define SEMCTL 3
+#define MSGSND 11
+#define MSGRCV 12
+#define MSGGET 13
+#define MSGCTL 14
+#define SHMAT 21
+#define SHMDT 22
+#define SHMGET 23
+#define SHMCTL 24
+
+asmlinkage int sys_ipc (uint call, int first, int second, int third, void *ptr, long fifth)
+{
+ int version;
+
+ version = call >> 16; /* hack for backward compatibility */
+ call &= 0xffff;
+
+ if (call <= SEMCTL)
+ switch (call) {
+ case SEMOP:
+ return sys_semop (first, (struct sembuf *)ptr, second);
+ case SEMGET:
+ return sys_semget (first, second, third);
+ case SEMCTL: {
+ union semun fourth;
+ if (!ptr)
+ return -EINVAL;
+ if(get_user(fourth.__pad, (void **)ptr))
+ return -EFAULT;
+ return sys_semctl (first, second, third, fourth);
+ }
+ default:
+ return -EINVAL;
+ }
+ if (call <= MSGCTL)
+ switch (call) {
+ case MSGSND:
+ return sys_msgsnd (first, (struct msgbuf *) ptr,
+ second, third);
+ case MSGRCV:
+ switch (version) {
+ case 0: {
+ struct ipc_kludge tmp;
+ if (!ptr)
+ return -EINVAL;
+ if(copy_from_user(&tmp,(struct ipc_kludge *) ptr, sizeof (tmp)))
+ return -EFAULT;
+ return sys_msgrcv (first, tmp.msgp, second, tmp.msgtyp, third);
+ }
+ case 1: default:
+ return sys_msgrcv (first, (struct msgbuf *) ptr, second, fifth, third);
+ }
+ case MSGGET:
+ return sys_msgget ((key_t) first, second);
+ case MSGCTL:
+ return sys_msgctl (first, second, (struct msqid_ds *) ptr);
+ default:
+ return -EINVAL;
+ }
+ if (call <= SHMCTL)
+ switch (call) {
+ case SHMAT:
+ switch (version) {
+ case 0: default: {
+ ulong raddr;
+ int err;
+
+ err = sys_shmat (first, (char *) ptr, second, &raddr);
+ if (err)
+ return err;
+ if(put_user (raddr, (ulong *) third))
+ return -EFAULT;
+ return 0;
+ }
+ case 1: /* iBCS2 emulator entry point */
+ return sys_shmat (first, (char *) ptr, second, (ulong *) third);
+ }
+ case SHMDT:
+ return sys_shmdt ((char *)ptr);
+ case SHMGET:
+ return sys_shmget (first, second, third);
+ case SHMCTL:
+ return sys_shmctl (first, second, (struct shmid_ds *) ptr);
+ default:
+ return -EINVAL;
+ }
+ return -EINVAL;
+}
+
+extern unsigned long get_unmapped_area(unsigned long addr, unsigned long len);
+
+/* Linux version of mmap */
+asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags, unsigned long fd,
+ unsigned long off)
+{
+ struct file * file = NULL;
+ long retval;
+
+ if (!(flags & MAP_ANONYMOUS)) {
+ if (fd >= NR_OPEN || !(file = current->files->fd[fd])){
+ return -EBADF;
+ }
+ }
+ if(!(flags & MAP_FIXED) && !addr) {
+ addr = get_unmapped_area(addr, len);
+ if(!addr){
+ return -ENOMEM;
+ }
+ }
+ retval = do_mmap(file, addr, len, prot, flags, off);
+ return retval;
+}
+
+extern int do_open_namei(const char * pathname, int flag, int mode,
+ struct inode ** res_inode, struct inode * base);
+
+#define BSD_EMUL "/usr/gnemul/sunos"
+#define SOL_EMUL "/usr/gnemul/solaris"
+
+int
+open_namei(const char * pathname, int flag, int mode,
+ struct inode ** res_inode, struct inode * base)
+{
+ if (!base && (current->personality & (PER_BSD|PER_SVR4)) && *pathname == '/'){
+ struct inode *emul_ino;
+ const char *p = pathname;
+ char *emul_path = current->personality & PER_BSD ? BSD_EMUL : SOL_EMUL;
+ int v;
+
+ while (*p == '/')
+ p++;
+
+ if (do_open_namei (emul_path, flag, mode, &emul_ino, NULL) >= 0 && emul_ino){
+ v = do_open_namei (p, flag, mode, res_inode, emul_ino);
+ if (v >= 0)
+ return v;
+ }
+ }
+ return do_open_namei (pathname, flag, mode, res_inode, base);
+}
+
+
+/* we come to here via sys_nis_syscall so it can setup the regs argument */
+asmlinkage unsigned long
+c_sys_nis_syscall (struct pt_regs *regs)
+{
+ printk ("Unimplemented SPARC system call %d\n",(int)regs->u_regs[1]);
+ show_regs (regs);
+ return -ENOSYS;
+}
+
+/* #define DEBUG_SPARC_BREAKPOINT */
+
+asmlinkage void
+sparc_breakpoint (struct pt_regs *regs)
+{
+#ifdef DEBUG_SPARC_BREAKPOINT
+ printk ("TRAP: Entering kernel PC=%x, nPC=%x\n", regs->pc, regs->npc);
+#endif
+ force_sig(SIGTRAP, current);
+#ifdef DEBUG_SPARC_BREAKPOINT
+ printk ("TRAP: Returning to space: PC=%x nPC=%x\n", regs->pc, regs->npc);
+#endif
+}
+
diff --git a/arch/sparc/kernel/sys_sunos.c b/arch/sparc/kernel/sys_sunos.c
new file mode 100644
index 000000000..91e12da55
--- /dev/null
+++ b/arch/sparc/kernel/sys_sunos.c
@@ -0,0 +1,1178 @@
+/* $Id: sys_sunos.c,v 1.61 1996/11/03 20:58:11 davem Exp $
+ * sys_sunos.c: SunOS specific syscall compatibility support.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
+ *
+ * Based upon preliminary work which is:
+ *
+ * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
+ *
+ * The sunos_poll routine is based on iBCS2's poll routine, this
+ * is the copyright message for that file:
+ *
+ * This file contains the procedures for the handling of poll.
+ *
+ * Copyright (C) 1994 Eric Youngdale
+ *
+ * Created for Linux based loosely upon linux select code, which
+ * in turn is loosely based upon Mathius Lattner's minix
+ * patches by Peter MacDonald. Heavily edited by Linus.
+ *
+ * Poll is used by SVr4 instead of select, and it has considerably
+ * more functionality. Parts of it are related to STREAMS, and since
+ * we do not have streams, we fake it. In fact, select() still exists
+ * under SVr4, but libc turns it into a poll() call instead. We attempt
+ * to do the inverse mapping.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/fs.h>
+#include <linux/resource.h>
+#include <linux/ipc.h>
+#include <linux/shm.h>
+#include <linux/sem.h>
+#include <linux/signal.h>
+#include <linux/uio.h>
+#include <linux/utsname.h>
+#include <linux/fs.h>
+#include <linux/major.h>
+#include <linux/stat.h>
+#include <linux/malloc.h>
+#include <linux/pagemap.h>
+#include <linux/errno.h>
+
+#include <asm/uaccess.h>
+#ifndef KERNEL_DS
+#include <linux/segment.h>
+#endif
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/pconf.h>
+#include <asm/idprom.h> /* for gethostid() */
+#include <asm/unistd.h>
+#include <asm/system.h>
+
+/* For the nfs mount emulation */
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/nfs.h>
+#include <linux/nfs_mount.h>
+
+/* for sunos_select */
+#include <linux/time.h>
+#include <linux/personality.h>
+
+extern unsigned long get_unmapped_area(unsigned long addr, unsigned long len);
+
+/* We use the SunOS mmap() semantics. */
+asmlinkage unsigned long sunos_mmap(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long off)
+{
+ struct file * file = NULL;
+ unsigned long retval, ret_type;
+
+ current->personality |= PER_BSD;
+ if(flags & MAP_NORESERVE) {
+ printk("%s: unimplemented SunOS MAP_NORESERVE mmap() flag\n",
+ current->comm);
+ flags &= ~MAP_NORESERVE;
+ }
+ if(!(flags & MAP_ANONYMOUS))
+ if (fd >= NR_OPEN || !(file = current->files->fd[fd]))
+ return -EBADF;
+ if(!(flags & MAP_FIXED) && !addr) {
+ addr = get_unmapped_area(addr, len);
+ if(!addr)
+ return -ENOMEM;
+ }
+ /* If this is ld.so or a shared library doing an mmap
+ * of /dev/zero, transform it into an anonymous mapping.
+ * SunOS is so stupid some times... hmph!
+ */
+ if(MAJOR(file->f_inode->i_rdev) == MEM_MAJOR &&
+ MINOR(file->f_inode->i_rdev) == 5) {
+ flags |= MAP_ANONYMOUS;
+ file = 0;
+ }
+ if(!(flags & MAP_FIXED))
+ addr = 0;
+ ret_type = flags & _MAP_NEW;
+ flags &= ~_MAP_NEW;
+ retval = do_mmap(file, addr, len, prot, flags, off);
+ if(ret_type)
+ return retval;
+ else
+ return ((retval < PAGE_OFFSET) ? 0 : retval);
+}
+
+/* lmbench calls this, just say "yeah, ok" */
+asmlinkage int sunos_mctl(unsigned long addr, unsigned long len, int function, char *arg)
+{
+ return 0;
+}
+
+/* SunOS is completely broken... it returns 0 on success, otherwise
+ * ENOMEM. For sys_sbrk() it wants the new brk value as a return
+ * on success and ENOMEM as before on failure.
+ */
+asmlinkage int sunos_brk(unsigned long brk)
+{
+ int freepages;
+ unsigned long rlim;
+ unsigned long newbrk, oldbrk;
+
+ if(sparc_cpu_model == sun4c) {
+ if(brk >= 0x20000000 && brk < 0xe0000000)
+ return current->mm->brk;
+ }
+
+ if (brk < current->mm->end_code)
+ return -ENOMEM;
+
+ newbrk = PAGE_ALIGN(brk);
+ oldbrk = PAGE_ALIGN(current->mm->brk);
+ if (oldbrk == newbrk) {
+ current->mm->brk = brk;
+ return 0;
+ }
+
+ /*
+ * Always allow shrinking brk
+ */
+ if (brk <= current->mm->brk) {
+ current->mm->brk = brk;
+ do_munmap(newbrk, oldbrk-newbrk);
+ return 0;
+ }
+ /*
+ * Check against rlimit and stack..
+ */
+ rlim = current->rlim[RLIMIT_DATA].rlim_cur;
+ if (rlim >= RLIM_INFINITY)
+ rlim = ~0;
+ if (brk - current->mm->end_code > rlim)
+ return -ENOMEM;
+
+ /*
+ * Check against existing mmap mappings.
+ */
+ if (find_vma_intersection(current->mm, oldbrk, newbrk+PAGE_SIZE))
+ return -ENOMEM;
+
+ /*
+ * stupid algorithm to decide if we have enough memory: while
+ * simple, it hopefully works in most obvious cases.. Easy to
+ * fool it, but this should catch most mistakes.
+ */
+ freepages = buffermem >> PAGE_SHIFT;
+ freepages += page_cache_size;
+ freepages >>= 1;
+ freepages += nr_free_pages;
+ freepages += nr_swap_pages;
+ freepages -= max_mapnr >> 4;
+ freepages -= (newbrk-oldbrk) >> PAGE_SHIFT;
+ if (freepages < 0)
+ return -ENOMEM;
+ /*
+ * Ok, we have probably got enough memory - let it rip.
+ */
+ current->mm->brk = brk;
+ do_mmap(NULL, oldbrk, newbrk-oldbrk,
+ PROT_READ|PROT_WRITE|PROT_EXEC,
+ MAP_FIXED|MAP_PRIVATE, 0);
+ return 0;
+}
+
+asmlinkage unsigned long sunos_sbrk(int increment)
+{
+ int error;
+
+ /* This should do it hopefully... */
+ error = sunos_brk(((int) current->mm->brk) + increment);
+ if(error)
+ return error;
+ else
+ return current->mm->brk;
+}
+
+/* XXX Completely undocumented, and completely magic...
+ * XXX I believe it is to increase the size of the stack by
+ * XXX argument 'increment' and return the new end of stack
+ * XXX area. Wheee...
+ */
+asmlinkage unsigned long sunos_sstk(int increment)
+{
+ printk("%s: Call to sunos_sstk(increment<%d>) is unsupported\n",
+ current->comm, increment);
+ return -1;
+}
+
+/* Give hints to the kernel as to what paging strategy to use...
+ * Completely bogus, don't remind me.
+ */
+#define VA_NORMAL 0 /* Normal vm usage expected */
+#define VA_ABNORMAL 1 /* Abnormal/random vm usage probable */
+#define VA_SEQUENTIAL 2 /* Accesses will be of a sequential nature */
+#define VA_INVALIDATE 3 /* Page table entries should be flushed ??? */
+static char *vstrings[] = {
+ "VA_NORMAL",
+ "VA_ABNORMAL",
+ "VA_SEQUENTIAL",
+ "VA_INVALIDATE",
+};
+
+asmlinkage void sunos_vadvise(unsigned long strategy)
+{
+ /* I wanna see who uses this... */
+ printk("%s: Advises us to use %s paging strategy\n",
+ current->comm,
+ strategy <= 3 ? vstrings[strategy] : "BOGUS");
+ return; /* We don't do diddly... */
+}
+
+/* Same as vadvise, and just as bogus, but for a range of virtual
+ * process address space.
+ */
+#define MADV_NORMAL 0 /* Nothing special... */
+#define MADV_RANDOM 1 /* I am emacs... */
+#define MADV_SEQUENTIAL 2 /* I am researcher code... */
+#define MADV_WILLNEED 3 /* Pages in this range will be needed */
+#define MADV_DONTNEED 4 /* Pages in this range won't be needed */
+
+static char *mstrings[] = {
+ "MADV_NORMAL",
+ "MADV_RANDOM",
+ "MADV_SEQUENTIAL",
+ "MADV_WILLNEED",
+ "MADV_DONTNEED",
+};
+
+asmlinkage void sunos_madvise(unsigned long address, unsigned long len,
+ unsigned long strategy)
+{
+ /* I wanna see who uses this... */
+ printk("%s: Advises us to use %s paging strategy for addr<%08lx> len<%08lx>\n",
+ current->comm,
+ strategy <= 4 ? mstrings[strategy] : "BOGUS",
+ address, len);
+ return; /* We don't do diddly... */
+}
+
+/* Places into character array, the status of all the pages in the passed
+ * range from 'addr' to 'addr + len'. -1 on failure, 0 on success...
+ * The encoding in each character is:
+ * low-bit is zero == Page is not in physical ram right now
+ * low-bit is one == Page is currently residing in core
+ * All other bits are undefined within the character so there...
+ * Also, if you try to get stats on an area outside of the user vm area
+ * *or* the passed base address is not aligned on a page boundary you
+ * get an error.
+ */
+asmlinkage int sunos_mincore(unsigned long addr, unsigned long len, char *array)
+{
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+ unsigned long limit;
+ int num_pages, pnum;
+
+ if(addr & ~(PAGE_MASK))
+ return -EINVAL;
+
+ num_pages = (len / PAGE_SIZE);
+ if(verify_area(VERIFY_WRITE, array, num_pages))
+ return -EFAULT;
+ if((addr >= PAGE_OFFSET) || ((addr + len) > PAGE_OFFSET))
+ return -ENOMEM; /* I'm sure you're curious about kernel mappings.. */
+
+ /* Wheee, go through pte's */
+ pnum = 0;
+ for(limit = addr + len; addr < limit; addr += PAGE_SIZE, pnum++) {
+ pgdp = pgd_offset(current->mm, addr);
+ if(pgd_none(*pgdp))
+ return -ENOMEM; /* As per SunOS manpage */
+ pmdp = pmd_offset(pgdp, addr);
+ if(pmd_none(*pmdp))
+ return -ENOMEM; /* As per SunOS manpage */
+ ptep = pte_offset(pmdp, addr);
+ if(pte_none(*ptep))
+ return -ENOMEM; /* As per SunOS manpage */
+ /* Page in core or Swapped page? */
+ __put_user((pte_present(*ptep) ? 1 : 0), &array[pnum]);
+ }
+ return 0; /* Success... I think... */
+}
+
+/* This just wants the soft limit (ie. rlim_cur element) of the RLIMIT_NOFILE
+ * resource limit and is for backwards compatibility with older sunos
+ * revs.
+ */
+asmlinkage long sunos_getdtablesize(void)
+{
+ return NR_OPEN;
+}
+#define _S(nr) (1<<((nr)-1))
+
+#define _BLOCKABLE (~(_S(SIGKILL) | _S(SIGSTOP)))
+
+asmlinkage unsigned long sunos_sigblock(unsigned long blk_mask)
+{
+ unsigned long flags;
+ unsigned long old;
+
+ save_and_cli(flags);
+ old = current->blocked;
+ current->blocked |= (blk_mask & _BLOCKABLE);
+ restore_flags(flags);
+ return old;
+}
+
+asmlinkage unsigned long sunos_sigsetmask(unsigned long newmask)
+{
+ unsigned long flags;
+ unsigned long retval;
+
+ save_and_cli(flags);
+ retval = current->blocked;
+ current->blocked = newmask & _BLOCKABLE;
+ restore_flags(flags);
+ return retval;
+}
+
+/* SunOS getdents is very similar to the newer Linux (iBCS2 compliant) */
+/* getdents system call, the format of the structure just has a different */
+/* layout (d_off+d_ino instead of d_ino+d_off) */
+struct sunos_dirent {
+ long d_off;
+ unsigned long d_ino;
+ unsigned short d_reclen;
+ unsigned short d_namlen;
+ char d_name[1];
+};
+
+struct sunos_dirent_callback {
+ struct sunos_dirent *curr;
+ struct sunos_dirent *previous;
+ int count;
+ int error;
+};
+
+#define NAME_OFFSET(de) ((int) ((de)->d_name - (char *) (de)))
+#define ROUND_UP(x) (((x)+sizeof(long)-1) & ~(sizeof(long)-1))
+
+static int sunos_filldir(void * __buf, const char * name, int namlen,
+ off_t offset, ino_t ino)
+{
+ struct sunos_dirent * dirent;
+ struct sunos_dirent_callback * buf = (struct sunos_dirent_callback *) __buf;
+ int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 1);
+
+ buf->error = -EINVAL; /* only used if we fail.. */
+ if (reclen > buf->count)
+ return -EINVAL;
+ dirent = buf->previous;
+ if (dirent)
+ put_user(offset, &dirent->d_off);
+ dirent = buf->curr;
+ buf->previous = dirent;
+ put_user(ino, &dirent->d_ino);
+ put_user(namlen, &dirent->d_namlen);
+ put_user(reclen, &dirent->d_reclen);
+ copy_to_user(dirent->d_name, name, namlen);
+ put_user(0, dirent->d_name + namlen);
+ ((char *) dirent) += reclen;
+ buf->curr = dirent;
+ buf->count -= reclen;
+ return 0;
+}
+
+asmlinkage int sunos_getdents(unsigned int fd, void * dirent, int cnt)
+{
+ struct file * file;
+ struct sunos_dirent * lastdirent;
+ struct sunos_dirent_callback buf;
+ int error;
+
+ if (fd >= NR_OPEN || !(file = current->files->fd[fd]))
+ return -EBADF;
+ if (!file->f_op || !file->f_op->readdir)
+ return -ENOTDIR;
+ if(cnt < (sizeof(struct sunos_dirent) + 255))
+ return -EINVAL;
+
+ buf.curr = (struct sunos_dirent *) dirent;
+ buf.previous = NULL;
+ buf.count = cnt;
+ buf.error = 0;
+ error = file->f_op->readdir(file->f_inode, file, &buf, sunos_filldir);
+ if (error < 0)
+ return error;
+ lastdirent = buf.previous;
+ if (!lastdirent)
+ return buf.error;
+ put_user(file->f_pos, &lastdirent->d_off);
+ return cnt - buf.count;
+}
+
+/* Old sunos getdirentries, severely broken compatibility stuff here. */
+struct sunos_direntry {
+ unsigned long d_ino;
+ unsigned short d_reclen;
+ unsigned short d_namlen;
+ char d_name[1];
+};
+
+struct sunos_direntry_callback {
+ struct sunos_direntry *curr;
+ struct sunos_direntry *previous;
+ int count;
+ int error;
+};
+
+static int sunos_filldirentry(void * __buf, const char * name, int namlen,
+ off_t offset, ino_t ino)
+{
+ struct sunos_direntry * dirent;
+ struct sunos_direntry_callback * buf = (struct sunos_direntry_callback *) __buf;
+ int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 1);
+
+ buf->error = -EINVAL; /* only used if we fail.. */
+ if (reclen > buf->count)
+ return -EINVAL;
+ dirent = buf->previous;
+ dirent = buf->curr;
+ buf->previous = dirent;
+ put_user(ino, &dirent->d_ino);
+ put_user(namlen, &dirent->d_namlen);
+ put_user(reclen, &dirent->d_reclen);
+ copy_to_user(dirent->d_name, name, namlen);
+ put_user(0, dirent->d_name + namlen);
+ ((char *) dirent) += reclen;
+ buf->curr = dirent;
+ buf->count -= reclen;
+ return 0;
+}
+
+asmlinkage int sunos_getdirentries(unsigned int fd, void * dirent, int cnt, unsigned int *basep)
+{
+ struct file * file;
+ struct sunos_direntry * lastdirent;
+ struct sunos_direntry_callback buf;
+ int error;
+
+ if (fd >= NR_OPEN || !(file = current->files->fd[fd]))
+ return -EBADF;
+ if (!file->f_op || !file->f_op->readdir)
+ return -ENOTDIR;
+ if(cnt < (sizeof(struct sunos_direntry) + 255))
+ return -EINVAL;
+
+ buf.curr = (struct sunos_direntry *) dirent;
+ buf.previous = NULL;
+ buf.count = cnt;
+ buf.error = 0;
+ error = file->f_op->readdir(file->f_inode, file, &buf, sunos_filldirentry);
+ if (error < 0)
+ return error;
+ lastdirent = buf.previous;
+ if (!lastdirent)
+ return buf.error;
+ put_user(file->f_pos, basep);
+ return cnt - buf.count;
+}
+
+asmlinkage int sunos_getdomainname(char *name, int len)
+{
+ int nlen = strlen(system_utsname.domainname);
+
+ if (nlen < len)
+ len = nlen;
+
+ if(len > __NEW_UTS_LEN)
+ return -EFAULT;
+ if(copy_to_user(name, system_utsname.domainname, len))
+ return -EFAULT;
+ return 0;
+}
+
+struct sunos_utsname {
+ char sname[9];
+ char nname[9];
+ char nnext[56];
+ char rel[9];
+ char ver[9];
+ char mach[9];
+};
+
+asmlinkage int sunos_uname(struct sunos_utsname *name)
+{
+ if(!name)
+ return -EFAULT;
+ if(copy_to_user(&name->sname[0], &system_utsname.sysname[0], sizeof(name->sname) - 1))
+ return -EFAULT;
+ copy_to_user(&name->nname[0], &system_utsname.nodename[0], sizeof(name->nname) - 1);
+ put_user('\0', &name->nname[8]);
+ copy_to_user(&name->rel[0], &system_utsname.release[0], sizeof(name->rel) - 1);
+ copy_to_user(&name->ver[0], &system_utsname.version[0], sizeof(name->ver) - 1);
+ copy_to_user(&name->mach[0], &system_utsname.machine[0], sizeof(name->mach) - 1);
+ return 0;
+}
+
+asmlinkage int sunos_nosys(void)
+{
+ struct pt_regs *regs;
+
+ regs = current->tss.kregs;
+ current->tss.sig_address = regs->pc;
+ current->tss.sig_desc = regs->u_regs[UREG_G1];
+ send_sig(SIGSYS, current, 1);
+ printk("Process makes ni_syscall number %d, register dump:\n",
+ (int) regs->u_regs[UREG_G1]);
+ show_regs(regs);
+ return -ENOSYS;
+}
+
+/* This is not a real and complete implementation yet, just to keep
+ * the easy SunOS binaries happy.
+ */
+asmlinkage int sunos_fpathconf(int fd, int name)
+{
+ switch(name) {
+ case _PCONF_LINK:
+ return LINK_MAX;
+ case _PCONF_CANON:
+ return MAX_CANON;
+ case _PCONF_INPUT:
+ return MAX_INPUT;
+ case _PCONF_NAME:
+ return NAME_MAX;
+ case _PCONF_PATH:
+ return PATH_MAX;
+ case _PCONF_PIPE:
+ return PIPE_BUF;
+ case _PCONF_CHRESTRICT:
+ return 1; /* XXX Investigate XXX */
+ case _PCONF_NOTRUNC:
+ return 0; /* XXX Investigate XXX */
+ case _PCONF_VDISABLE:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+asmlinkage int sunos_pathconf(char *path, int name)
+{
+ return sunos_fpathconf(0, name); /* XXX cheese XXX */
+}
+
+/* SunOS mount system call emulation */
+extern asmlinkage int
+sys_select(int n, fd_set *inp, fd_set *outp, fd_set *exp, struct timeval *tvp);
+
+asmlinkage int sunos_select(int width, fd_set *inp, fd_set *outp, fd_set *exp, struct timeval *tvp)
+{
+ /* SunOS binaries expect that select won't change the tvp contents */
+ current->personality |= STICKY_TIMEOUTS;
+ return sys_select (width, inp, outp, exp, tvp);
+}
+
+asmlinkage void sunos_nop(void)
+{
+ return;
+}
+
+/* SunOS mount/umount. */
+#define SMNT_RDONLY 1
+#define SMNT_NOSUID 2
+#define SMNT_NEWTYPE 4
+#define SMNT_GRPID 8
+#define SMNT_REMOUNT 16
+#define SMNT_NOSUB 32
+#define SMNT_MULTI 64
+#define SMNT_SYS5 128
+
+struct sunos_fh_t {
+ char fh_data [NFS_FHSIZE];
+};
+
+struct sunos_nfs_mount_args {
+ struct sockaddr_in *addr; /* file server address */
+ struct nfs_fh *fh; /* File handle to be mounted */
+ int flags; /* flags */
+ int wsize; /* write size in bytes */
+ int rsize; /* read size in bytes */
+ int timeo; /* initial timeout in .1 secs */
+ int retrans; /* times to retry send */
+ char *hostname; /* server's hostname */
+ int acregmin; /* attr cache file min secs */
+ int acregmax; /* attr cache file max secs */
+ int acdirmin; /* attr cache dir min secs */
+ int acdirmax; /* attr cache dir max secs */
+ char *netname; /* server's netname */
+};
+
+
+extern int do_mount(kdev_t, const char *, const char *, char *, int, void *);
+extern dev_t get_unnamed_dev(void);
+extern void put_unnamed_dev(dev_t);
+extern asmlinkage int sys_mount(char *, char *, char *, unsigned long, void *);
+extern asmlinkage int sys_connect(int fd, struct sockaddr *uservaddr, int addrlen);
+extern asmlinkage int sys_socket(int family, int type, int protocol);
+extern asmlinkage int sys_bind(int fd, struct sockaddr *umyaddr, int addrlen);
+
+
+/* Bind the socket on a local reserved port and connect it to the
+ * remote server. This on Linux/i386 is done by the mount program,
+ * not by the kernel.
+ */
+static int
+sunos_nfs_get_server_fd (int fd, struct sockaddr_in *addr)
+{
+ struct sockaddr_in local;
+ struct sockaddr_in server;
+ int try_port;
+ int ret;
+ struct socket *socket;
+ struct inode *inode;
+ struct file *file;
+
+ file = current->files->fd [fd];
+ inode = file->f_inode;
+ if (!inode || !inode->i_sock)
+ return 0;
+
+ socket = &inode->u.socket_i;
+ local.sin_family = AF_INET;
+ local.sin_addr.s_addr = INADDR_ANY;
+
+ /* IPPORT_RESERVED = 1024, can't find the definition in the kernel */
+ try_port = 1024;
+ do {
+ local.sin_port = htons (--try_port);
+ ret = socket->ops->bind(socket, (struct sockaddr*)&local,
+ sizeof(local));
+ } while (ret && try_port > (1024 / 2));
+
+ if (ret)
+ return 0;
+
+ server.sin_family = AF_INET;
+ server.sin_addr = addr->sin_addr;
+ server.sin_port = NFS_PORT;
+
+ /* Call sys_connect */
+ ret = socket->ops->connect (socket, (struct sockaddr *) &server,
+ sizeof (server), file->f_flags);
+ if (ret < 0)
+ return 0;
+ return 1;
+}
+
+static int get_default (int value, int def_value)
+{
+ if (value)
+ return value;
+ else
+ return def_value;
+}
+
+asmlinkage int sunos_nfs_mount(char *dir_name, int linux_flags, void *data)
+{
+ int ret = -ENODEV;
+ int server_fd;
+ char *the_name;
+ struct nfs_mount_data linux_nfs_mount;
+ struct sunos_nfs_mount_args *sunos_mount = data;
+ dev_t dev;
+
+ /* Ok, here comes the fun part: Linux's nfs mount needs a
+ * socket connection to the server, but SunOS mount does not
+ * require this, so we use the information on the destination
+ * address to create a socket and bind it to a reserved
+ * port on this system
+ */
+ server_fd = sys_socket (AF_INET, SOCK_DGRAM, IPPROTO_UDP);
+ if (server_fd < 0)
+ return -ENXIO;
+
+ if (!sunos_nfs_get_server_fd (server_fd, sunos_mount->addr)){
+ sys_close (server_fd);
+ return -ENXIO;
+ }
+
+ /* Now, bind it to a locally reserved port */
+ linux_nfs_mount.version = NFS_MOUNT_VERSION;
+ linux_nfs_mount.flags = sunos_mount->flags;
+ linux_nfs_mount.addr = *sunos_mount->addr;
+ linux_nfs_mount.root = *sunos_mount->fh;
+ linux_nfs_mount.fd = server_fd;
+
+ linux_nfs_mount.rsize = get_default (sunos_mount->rsize, 8192);
+ linux_nfs_mount.wsize = get_default (sunos_mount->wsize, 8192);
+ linux_nfs_mount.timeo = get_default (sunos_mount->timeo, 10);
+ linux_nfs_mount.retrans = sunos_mount->retrans;
+
+ linux_nfs_mount.acregmin = sunos_mount->acregmin;
+ linux_nfs_mount.acregmax = sunos_mount->acregmax;
+ linux_nfs_mount.acdirmin = sunos_mount->acdirmin;
+ linux_nfs_mount.acdirmax = sunos_mount->acdirmax;
+
+ if (getname (sunos_mount->hostname, &the_name))
+ return -EFAULT;
+
+ strncpy (linux_nfs_mount.hostname, the_name, 254);
+ linux_nfs_mount.hostname [255] = 0;
+ putname (the_name);
+
+ dev = get_unnamed_dev ();
+
+ ret = do_mount (dev, "", dir_name, "nfs", linux_flags, &linux_nfs_mount);
+ if (ret)
+ put_unnamed_dev(dev);
+
+ return ret;
+}
+
+asmlinkage int
+sunos_mount(char *type, char *dir, int flags, void *data)
+{
+ int linux_flags = MS_MGC_MSK; /* new semantics */
+ char *dev_fname = 0;
+
+ /* We don't handle the integer fs type */
+ if ((flags & SMNT_NEWTYPE) == 0)
+ return -EINVAL;
+
+ /* Do not allow for those flags we don't support */
+ if (flags & (SMNT_GRPID|SMNT_NOSUB|SMNT_MULTI|SMNT_SYS5))
+ return -EINVAL;
+
+ if(flags & SMNT_REMOUNT)
+ linux_flags |= MS_REMOUNT;
+ if(flags & SMNT_RDONLY)
+ linux_flags |= MS_RDONLY;
+ if(flags & SMNT_NOSUID)
+ linux_flags |= MS_NOSUID;
+ if(strcmp(type, "ext2") == 0) {
+ dev_fname = (char *) data;
+ } else if(strcmp(type, "iso9660") == 0) {
+ dev_fname = (char *) data;
+ } else if(strcmp(type, "minix") == 0) {
+ dev_fname = (char *) data;
+ } else if(strcmp(type, "ext") == 0) {
+ dev_fname = (char *) data;
+ } else if(strcmp(type, "xiafs") == 0) {
+ dev_fname = (char *) data;
+ } else if(strcmp(type, "nfs") == 0) {
+ return sunos_nfs_mount (dir, flags, data);
+ } else if(strcmp(type, "ufs") == 0) {
+ printk("Warning: UFS filesystem mounts unsupported.\n");
+ return -ENODEV;
+ } else if(strcmp(type, "proc")) {
+ return -ENODEV;
+ }
+ return sys_mount(dev_fname, dir, type, linux_flags, NULL);
+}
+
+extern asmlinkage int sys_setsid(void);
+extern asmlinkage int sys_setpgid(pid_t, pid_t);
+
+asmlinkage int sunos_setpgrp(pid_t pid, pid_t pgid)
+{
+ /* So stupid... */
+ if((!pid || pid == current->pid) &&
+ !pgid) {
+ sys_setsid();
+ return 0;
+ } else {
+ return sys_setpgid(pid, pgid);
+ }
+}
+
+/* So stupid... */
+extern asmlinkage int sys_wait4(pid_t, unsigned int *, int, struct rusage *);
+asmlinkage int sunos_wait4(pid_t pid, unsigned int *stat_addr, int options, struct rusage *ru)
+{
+ return sys_wait4((pid ? pid : -1), stat_addr, options, ru);
+}
+
+extern int kill_pg(int, int, int);
+asmlinkage int sunos_killpg(int pgrp, int sig)
+{
+ return kill_pg(pgrp, sig, 0);
+}
+
+asmlinkage int sunos_audit(void)
+{
+ printk ("sys_audit\n");
+ return -1;
+}
+
+extern asmlinkage unsigned long sunos_gethostid(void)
+{
+#if CONFIG_AP1000
+ return mpp_cid();
+#else
+ return ((unsigned long)idprom->id_machtype << 24) |
+ (unsigned long)idprom->id_sernum;
+#endif
+}
+
+extern asmlinkage long sunos_sysconf (int name)
+{
+ switch (name){
+ case _SC_ARG_MAX:
+ return ARG_MAX;
+ case _SC_CHILD_MAX:
+ return CHILD_MAX;
+ case _SC_CLK_TCK:
+ return HZ;
+ case _SC_NGROUPS_MAX:
+ return NGROUPS_MAX;
+ case _SC_OPEN_MAX:
+ return OPEN_MAX;
+ case _SC_JOB_CONTROL:
+ return 1; /* yes, we do support job control */
+ case _SC_SAVED_IDS:
+ return 1; /* yes, we do support saved uids */
+ case _SC_VERSION:
+ /* mhm, POSIX_VERSION is in /usr/include/unistd.h
+ * should it go on /usr/include/linux?
+ */
+ return 199009L;
+ }
+ return -1;
+}
+
+#define POLL_ROUND_UP(x,y) (((x)+(y)-1)/(y))
+
+#define POLLIN 1
+#define POLLPRI 2
+#define POLLOUT 4
+#define POLLERR 8
+#define POLLHUP 16
+#define POLLNVAL 32
+#define POLLRDNORM 64
+#define POLLWRNORM POLLOUT
+#define POLLRDBAND 128
+#define POLLWRBAND 256
+
+#define LINUX_POLLIN (POLLRDNORM | POLLRDBAND | POLLIN)
+#define LINUX_POLLOUT (POLLWRBAND | POLLWRNORM | POLLOUT)
+#define LINUX_POLLERR (POLLERR)
+
+static inline void free_wait(select_table * p)
+{
+ struct select_table_entry * entry = p->entry + p->nr;
+
+ while (p->nr > 0) {
+ p->nr--;
+ entry--;
+ remove_wait_queue(entry->wait_address,&entry->wait);
+ }
+}
+
+
+/* Copied directly from fs/select.c */
+static int check(int flag, select_table * wait, struct file * file)
+{
+ struct inode * inode;
+ struct file_operations *fops;
+ int (*select) (struct inode *, struct file *, int, select_table *);
+
+ inode = file->f_inode;
+ if ((fops = file->f_op) && (select = fops->select))
+ return select(inode, file, flag, wait)
+ || (wait && select(inode, file, flag, NULL));
+ if (S_ISREG(inode->i_mode))
+ return 1;
+ return 0;
+}
+
+struct poll {
+ int fd;
+ short events;
+ short revents;
+};
+
+int sunos_poll(struct poll * ufds, size_t nfds, int timeout)
+{
+ int i,j, count, fdcount, retflag;
+ struct poll * fdpnt;
+ struct poll * fds, *fds1;
+ select_table wait_table, *wait;
+ struct select_table_entry *entry;
+
+ if (nfds > NR_OPEN)
+ return -EINVAL;
+
+ if (!(entry = (struct select_table_entry*)__get_free_page(GFP_KERNEL))
+ || !(fds = (struct poll *)kmalloc(nfds*sizeof(struct poll), GFP_KERNEL)))
+ return -ENOMEM;
+
+ if(copy_from_user(fds, ufds, nfds*sizeof(struct poll))) {
+ free_page((unsigned long)entry);
+ kfree(fds);
+ return -EFAULT;
+ }
+
+ if (timeout < 0)
+ current->timeout = 0x7fffffff;
+ else {
+ current->timeout = jiffies + POLL_ROUND_UP(timeout, (1000/HZ));
+ if (current->timeout <= jiffies)
+ current->timeout = 0;
+ }
+
+ count = 0;
+ wait_table.nr = 0;
+ wait_table.entry = entry;
+ wait = &wait_table;
+
+ for(fdpnt = fds, j = 0; j < (int)nfds; j++, fdpnt++) {
+ i = fdpnt->fd;
+ fdpnt->revents = 0;
+ if (!current->files->fd[i] || !current->files->fd[i]->f_inode)
+ fdpnt->revents = POLLNVAL;
+ }
+repeat:
+ current->state = TASK_INTERRUPTIBLE;
+ for(fdpnt = fds, j = 0; j < (int)nfds; j++, fdpnt++) {
+ i = fdpnt->fd;
+
+ if(i < 0) continue;
+ if (!current->files->fd[i] || !current->files->fd[i]->f_inode) continue;
+
+ if ((fdpnt->events & LINUX_POLLIN)
+ && check(SEL_IN, wait, current->files->fd[i])) {
+ retflag = 0;
+ if (fdpnt->events & POLLIN)
+ retflag = POLLIN;
+ if (fdpnt->events & POLLRDNORM)
+ retflag = POLLRDNORM;
+ fdpnt->revents |= retflag;
+ count++;
+ wait = NULL;
+ }
+
+ if ((fdpnt->events & LINUX_POLLOUT) &&
+ check(SEL_OUT, wait, current->files->fd[i])) {
+ fdpnt->revents |= (LINUX_POLLOUT & fdpnt->events);
+ count++;
+ wait = NULL;
+ }
+
+ if (check(SEL_EX, wait, current->files->fd[i])) {
+ fdpnt->revents |= POLLHUP;
+ count++;
+ wait = NULL;
+ }
+ }
+
+ if ((current->signal & (~current->blocked)))
+ return -EINTR;
+
+ wait = NULL;
+ if (!count && current->timeout > jiffies) {
+ schedule();
+ goto repeat;
+ }
+
+ free_wait(&wait_table);
+ free_page((unsigned long) entry);
+
+ /* OK, now copy the revents fields back to user space. */
+ fds1 = fds;
+ fdcount = 0;
+ for(i=0; i < (int)nfds; i++, ufds++, fds++) {
+ if (fds->revents) {
+ fdcount++;
+ }
+ __put_user(fds->revents, &ufds->revents);
+ }
+ kfree(fds1);
+ current->timeout = 0;
+ current->state = TASK_RUNNING;
+ return fdcount;
+}
+
+extern asmlinkage int sys_semctl (int semid, int semnum, int cmd, union semun arg);
+extern asmlinkage int sys_semget (key_t key, int nsems, int semflg);
+extern asmlinkage int sys_semop (int semid, struct sembuf *tsops, unsigned nsops);
+
+asmlinkage int sunos_semsys(int op, unsigned long arg1, unsigned long arg2,
+ unsigned long arg3, void *ptr)
+{
+ union semun arg4;
+
+ switch (op) {
+ case 0:
+ /* Most arguments match on a 1:1 basis but cmd doesn't */
+ switch(arg3) {
+ case 4:
+ arg3=GETPID; break;
+ case 5:
+ arg3=GETVAL; break;
+ case 6:
+ arg3=GETALL; break;
+ case 3:
+ arg3=GETNCNT; break;
+ case 7:
+ arg3=GETZCNT; break;
+ case 8:
+ arg3=SETVAL; break;
+ case 9:
+ arg3=SETALL; break;
+ }
+ /* sys_semctl(): */
+ arg4.__pad=ptr; /* value to modify semaphore to */
+ return sys_semctl((int)arg1, (int)arg2, (int)arg3, arg4 );
+ case 1:
+ /* sys_semget(): */
+ return sys_semget((key_t)arg1, (int)arg2, (int)arg3);
+ case 2:
+ /* sys_semop(): */
+ return sys_semop((int)arg1, (struct sembuf *)arg2, (unsigned)arg3);
+ default:
+ return -EINVAL;
+ }
+}
+
+extern asmlinkage int sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr);
+extern asmlinkage int sys_shmctl (int shmid, int cmd, struct shmid_ds *buf);
+extern asmlinkage int sys_shmdt (char *shmaddr);
+extern asmlinkage int sys_shmget (key_t key, int size, int shmflg);
+
+asmlinkage int sunos_shmsys(int op, unsigned long arg1, unsigned long arg2,
+ unsigned long arg3)
+{
+ unsigned long raddr;
+ int rval;
+
+ switch(op) {
+ case 0:
+ /* sys_shmat(): attach a shared memory area */
+ rval = sys_shmat((int)arg1,(char *)arg2,(int)arg3,&raddr);
+ if(rval != 0)
+ return rval;
+ return (int) raddr;
+ case 1:
+ /* sys_shmctl(): modify shared memory area attr. */
+ rval = sys_shmctl((int)arg1,(int)arg2,(struct shmid_ds *)arg3);
+ return (rval);
+ case 2:
+ /* sys_shmdt(): detach a shared memory area */
+ return sys_shmdt((char *)arg1);
+ case 3:
+ /* sys_shmget(): get a shared memory area */
+ return sys_shmget((key_t)arg1,(int)arg2,(int)arg3);
+ default:
+ return -EINVAL;
+ }
+}
+
+asmlinkage int sunos_open(const char *filename, int flags, int mode)
+{
+ current->personality |= PER_BSD;
+ return sys_open (filename, flags, mode);
+}
+
+
+#define SUNOS_EWOULDBLOCK 35
+
+/* see the sunos man page read(2v) for an explanation
+ of this garbage. We use O_NDELAY to mark
+ file descriptors that have been set non-blocking
+ using 4.2BSD style calls. (tridge) */
+
+static inline int check_nonblock(int ret,int fd)
+{
+ if (ret == -EAGAIN && (current->files->fd[fd]->f_flags & O_NDELAY))
+ return -SUNOS_EWOULDBLOCK;
+ return ret;
+}
+
+extern asmlinkage int sys_read(unsigned int fd,char *buf,int count);
+extern asmlinkage int sys_write(unsigned int fd,char *buf,int count);
+extern asmlinkage int sys_recv(int fd, void * ubuf, int size, unsigned flags);
+extern asmlinkage int sys_send(int fd, void * buff, int len, unsigned flags);
+extern asmlinkage int sys_accept(int fd, struct sockaddr *sa, int *addrlen);
+extern asmlinkage int sys_readv(unsigned long fd, const struct iovec * vector, long count);
+extern asmlinkage int sys_writev(unsigned long fd, const struct iovec * vector, long count);
+
+
+asmlinkage int sunos_read(unsigned int fd,char *buf,int count)
+{
+ return check_nonblock(sys_read(fd,buf,count),fd);
+}
+
+asmlinkage int sunos_readv(unsigned long fd, const struct iovec * vector, long count)
+{
+ return check_nonblock(sys_readv(fd,vector,count),fd);
+}
+
+asmlinkage int sunos_write(unsigned int fd,char *buf,int count)
+{
+ return check_nonblock(sys_write(fd,buf,count),fd);
+}
+
+asmlinkage int sunos_writev(unsigned long fd, const struct iovec * vector, long count)
+{
+ return check_nonblock(sys_writev(fd,vector,count),fd);
+}
+
+asmlinkage int sunos_recv(int fd, void * ubuf, int size, unsigned flags)
+{
+ return check_nonblock(sys_recv(fd,ubuf,size,flags),fd);
+}
+
+asmlinkage int sunos_send(int fd, void * buff, int len, unsigned flags)
+{
+ return check_nonblock(sys_send(fd,buff,len,flags),fd);
+}
+
+asmlinkage int sunos_accept(int fd, struct sockaddr *sa, int *addrlen)
+{
+ return check_nonblock(sys_accept(fd,sa,addrlen),fd);
+}
+
+#define SUNOS_SV_INTERRUPT 2
+
+extern asmlinkage int sys_sigaction(int, const struct sigaction *, struct sigaction *);
+
+asmlinkage int sunos_sigaction(int signum, const struct sigaction *action,
+ struct sigaction *oldaction)
+{
+ struct sigaction tmp_sa;
+ const int sigaction_size = sizeof (struct sigaction) - sizeof (void *);
+ int err;
+ int old_fs;
+
+ if(copy_from_user(&tmp_sa, action, sigaction_size))
+ return -EFAULT;
+ if (tmp_sa.sa_flags & SUNOS_SV_INTERRUPT)
+ tmp_sa.sa_flags &= ~SUNOS_SV_INTERRUPT;
+ else
+ tmp_sa.sa_flags |= SA_RESTART;
+ old_fs = get_fs ();
+ set_fs (get_ds ());
+ err = sys_sigaction (signum, &tmp_sa, oldaction);
+ if (err == 0 && oldaction){
+ if (oldaction->sa_flags & SA_RESTART)
+ oldaction->sa_flags &= ~SA_RESTART;
+ else
+ oldaction->sa_flags |= SUNOS_SV_INTERRUPT;
+ }
+ set_fs (old_fs);
+ return err;
+}
diff --git a/arch/sparc/kernel/systbls.S b/arch/sparc/kernel/systbls.S
new file mode 100644
index 000000000..67744d25c
--- /dev/null
+++ b/arch/sparc/kernel/systbls.S
@@ -0,0 +1,478 @@
+/* $Id: systbls.S,v 1.51 1996/11/03 20:58:04 davem Exp $
+ * systbls.S: System call entry point tables for OS compatibility.
+ * The native Linux system call table lives here also.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ *
+ * Based upon preliminary work which is:
+ *
+ * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
+ */
+
+#include <asm/cprefix.h>
+
+ /* READ THIS BEFORE DICKING WITH THIS TABLE...
+ *
+ * The format of these entries is kind of peculiar
+ * to optimize non-blocking easy syscalls. If
+ * it is a difficult call or it will sleep the entry
+ * is just to word aligned address of the function
+ * routine to call. If the lowest bit of the entry
+ * is set then (entry & ~1) is the address of the low
+ * in-trap-window assembler routine which will handle
+ * the system call at the lowest possible level. For
+ * these low level optimized routines no state is saved
+ * at all and the usual restrictions reply. Act as
+ * if you got called directly from the trap table.
+ * Some of these optimized routines try really hard
+ * to get around a state save, if you run into trouble
+ * you can still survive by branching to the label
+ * syscall_is_too_hard which is in entry.S If you
+ * have to back out like this you _must_ preserve the
+ * value of %l0, %l1, %l2, and %l7 when you were called
+ * so be _careful_.
+ */
+
+#define LOWSYS(func) (CONCAT(func, _low) + 1)
+
+ .data
+ .align 4
+
+ /* First, the Linux native syscall table. */
+
+ .globl C_LABEL(sys_call_table)
+C_LABEL(sys_call_table):
+/*0*/ .long C_LABEL(sys_setup), C_LABEL(sys_exit), C_LABEL(sys_fork)
+ .long C_LABEL(sys_read), C_LABEL(sys_write)
+/*5*/ .long C_LABEL(sys_open), C_LABEL(sys_close), C_LABEL(sys_wait4)
+ .long C_LABEL(sys_creat), C_LABEL(sys_link)
+/*10*/ .long C_LABEL(sys_unlink), C_LABEL(sunos_execv), C_LABEL(sys_chdir)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_mknod)
+/*15*/ .long C_LABEL(sys_chmod), C_LABEL(sys_chown), C_LABEL(sparc_brk)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_lseek)
+/*20*/ .long C_LABEL(sys_getpid), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_setuid), C_LABEL(sys_getuid)
+/*25*/ .long C_LABEL(sys_time), C_LABEL(sys_ptrace), C_LABEL(sys_alarm)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_pause)
+/*30*/ .long C_LABEL(sys_utime), C_LABEL(sys_stty), C_LABEL(sys_gtty)
+ .long C_LABEL(sys_access), C_LABEL(sys_nice), C_LABEL(sys_ftime)
+ .long C_LABEL(sys_sync), C_LABEL(sys_kill), C_LABEL(sys_newstat)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_newlstat), C_LABEL(sys_dup)
+ .long C_LABEL(sys_pipe), C_LABEL(sys_times), C_LABEL(sys_profil)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_setgid), C_LABEL(sys_getgid)
+ .long C_LABEL(sys_signal), C_LABEL(sys_geteuid)
+/*50*/ .long C_LABEL(sys_getegid), C_LABEL(sys_acct), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_ioctl), C_LABEL(sys_reboot)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_symlink), C_LABEL(sys_readlink)
+ .long C_LABEL(sys_execve), C_LABEL(sys_umask), C_LABEL(sys_chroot)
+ .long C_LABEL(sys_newfstat), C_LABEL(sys_nis_syscall), C_LABEL(sys_getpagesize)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_vfork), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_mmap), C_LABEL(sys_nis_syscall), C_LABEL(sys_munmap)
+ .long C_LABEL(sys_mprotect), C_LABEL(sys_nis_syscall), C_LABEL(sys_vhangup)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_getgroups)
+ .long C_LABEL(sys_setgroups), C_LABEL(sys_getpgrp), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_setitimer), C_LABEL(sys_nis_syscall), C_LABEL(sys_swapon)
+ .long C_LABEL(sys_getitimer), C_LABEL(sys_nis_syscall), C_LABEL(sys_sethostname)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_dup2), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_fcntl), C_LABEL(sys_select), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_fsync), C_LABEL(sys_setpriority), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*100*/ .long C_LABEL(sys_getpriority), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_gettimeofday), C_LABEL(sys_getrusage)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_readv)
+ .long C_LABEL(sys_writev), C_LABEL(sys_settimeofday), C_LABEL(sys_fchown)
+ .long C_LABEL(sys_fchmod), C_LABEL(sys_nis_syscall), C_LABEL(sys_setreuid)
+ .long C_LABEL(sys_setregid), C_LABEL(sys_rename), C_LABEL(sys_truncate)
+ .long C_LABEL(sys_ftruncate), C_LABEL(sys_flock), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_mkdir), C_LABEL(sys_rmdir), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_getrlimit)
+ .long C_LABEL(sys_setrlimit), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*150*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_statfs), C_LABEL(sys_fstatfs)
+ .long C_LABEL(sys_umount), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_setdomainname)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_quotactl), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_mount), C_LABEL(sys_ustat), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_getdents), C_LABEL(sys_setsid)
+ .long C_LABEL(sys_fchdir), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_sigpending), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_setpgid), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_newuname), C_LABEL(sys_init_module)
+ .long C_LABEL(sys_personality), C_LABEL(sys_prof), C_LABEL(sys_break)
+ .long C_LABEL(sys_lock), C_LABEL(sys_mpx), C_LABEL(sys_ulimit)
+ .long C_LABEL(sys_getppid), C_LABEL(sys_sigaction), C_LABEL(sys_sgetmask)
+/*200*/ .long C_LABEL(sys_ssetmask), C_LABEL(sys_sigsuspend), C_LABEL(sys_newlstat)
+ .long C_LABEL(sys_uselib), C_LABEL(old_readdir), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_socketcall), C_LABEL(sys_syslog), C_LABEL(sys_olduname)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_idle), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_waitpid), C_LABEL(sys_swapoff), C_LABEL(sys_sysinfo)
+ .long C_LABEL(sys_ipc), C_LABEL(sys_sigreturn), C_LABEL(sys_clone)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_adjtimex), C_LABEL(sys_sigprocmask)
+ .long C_LABEL(sys_create_module), C_LABEL(sys_delete_module)
+ .long C_LABEL(sys_get_kernel_syms), C_LABEL(sys_getpgid), C_LABEL(sys_bdflush)
+ .long C_LABEL(sys_sysfs), C_LABEL(sys_nis_syscall), C_LABEL(sys_setfsuid)
+ .long C_LABEL(sys_setfsgid), C_LABEL(sys_llseek), C_LABEL(sys_time)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_stime), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_llseek)
+ /* "We are the Knights of the Forest of Ni!!" */
+ .long C_LABEL(sys_mlock), C_LABEL(sys_munlock), C_LABEL(sys_mlockall)
+ .long C_LABEL(sys_munlockall), C_LABEL(sys_sched_setparam)
+ .long C_LABEL(sys_sched_getparam), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_sched_get_priority_max), C_LABEL(sys_sched_get_priority_min)
+ .long C_LABEL(sys_sched_rr_get_interval), C_LABEL(sys_nanosleep)
+/*250*/ .long C_LABEL(sys_mremap)
+ .long C_LABEL(sys_sysctl)
+ .long C_LABEL(sys_getsid), C_LABEL(sys_fdatasync), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+
+ /* Now the SunOS syscall table. */
+
+ .align 4
+ .globl C_LABEL(sunos_sys_table)
+C_LABEL(sunos_sys_table):
+/*0*/ .long C_LABEL(sunos_indir), C_LABEL(sys_exit), C_LABEL(sys_fork)
+ .long C_LABEL(sunos_read), C_LABEL(sunos_write), C_LABEL(sunos_open)
+ .long C_LABEL(sys_close), C_LABEL(sunos_wait4), C_LABEL(sys_creat)
+ .long C_LABEL(sys_link), C_LABEL(sys_unlink), C_LABEL(sunos_execv)
+ .long C_LABEL(sys_chdir), C_LABEL(sunos_nosys), C_LABEL(sys_mknod)
+ .long C_LABEL(sys_chmod), C_LABEL(sys_chown), C_LABEL(sunos_brk)
+ .long C_LABEL(sunos_nosys), C_LABEL(sys_lseek), C_LABEL(sunos_getpid)
+ .long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+ .long C_LABEL(sunos_getuid), C_LABEL(sunos_nosys), C_LABEL(sys_ptrace)
+ .long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+ .long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+ .long C_LABEL(sys_access), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+ .long C_LABEL(sys_sync), C_LABEL(sys_kill), C_LABEL(sys_newstat)
+ .long C_LABEL(sunos_nosys), C_LABEL(sys_newlstat), C_LABEL(sys_dup)
+ .long C_LABEL(sys_pipe), C_LABEL(sunos_nosys), C_LABEL(sys_profil)
+ .long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys), C_LABEL(sunos_getgid)
+ .long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+/*50*/ .long C_LABEL(sunos_nosys), C_LABEL(sys_acct), C_LABEL(sunos_nosys)
+ .long C_LABEL(sunos_mctl), C_LABEL(sunos_ioctl), C_LABEL(sys_reboot)
+ .long C_LABEL(sunos_nosys), C_LABEL(sys_symlink), C_LABEL(sys_readlink)
+ .long C_LABEL(sys_execve), C_LABEL(sys_umask), C_LABEL(sys_chroot)
+ .long C_LABEL(sys_newfstat), C_LABEL(sunos_nosys), C_LABEL(sys_getpagesize)
+ .long C_LABEL(sys_msync), C_LABEL(sys_vfork), C_LABEL(sunos_nosys)
+ .long C_LABEL(sunos_nosys), C_LABEL(sunos_sbrk), C_LABEL(sunos_sstk)
+ .long C_LABEL(sunos_mmap), C_LABEL(sunos_vadvise), C_LABEL(sys_munmap)
+ .long C_LABEL(sys_mprotect), C_LABEL(sunos_madvise), C_LABEL(sys_vhangup)
+ .long C_LABEL(sunos_nosys), C_LABEL(sunos_mincore), C_LABEL(sys_getgroups)
+ .long C_LABEL(sys_setgroups), C_LABEL(sys_getpgrp), C_LABEL(sunos_setpgrp)
+ .long C_LABEL(sys_setitimer), C_LABEL(sunos_nosys), C_LABEL(sys_swapon)
+ .long C_LABEL(sys_getitimer), C_LABEL(sys_gethostname), C_LABEL(sys_sethostname)
+ .long C_LABEL(sunos_getdtablesize), C_LABEL(sys_dup2), C_LABEL(sunos_nop)
+ .long C_LABEL(sys_fcntl), C_LABEL(sunos_select), C_LABEL(sunos_nop)
+ .long C_LABEL(sys_fsync), C_LABEL(sys_setpriority), C_LABEL(sys_socket)
+ .long C_LABEL(sys_connect), C_LABEL(sunos_accept)
+/*100*/ .long C_LABEL(sys_getpriority), C_LABEL(sunos_send), C_LABEL(sunos_recv)
+ .long C_LABEL(sunos_nosys), C_LABEL(sys_bind), C_LABEL(sys_setsockopt)
+ .long C_LABEL(sys_listen), C_LABEL(sunos_nosys), C_LABEL(sunos_sigaction)
+ .long C_LABEL(sunos_sigblock), C_LABEL(sunos_sigsetmask), C_LABEL(sys_sigpause)
+ .long C_LABEL(sys_sigstack), C_LABEL(sys_recvmsg), C_LABEL(sys_sendmsg)
+ .long C_LABEL(sunos_nosys), C_LABEL(sys_gettimeofday), C_LABEL(sys_getrusage)
+ .long C_LABEL(sys_getsockopt), C_LABEL(sunos_nosys), C_LABEL(sunos_readv)
+ .long C_LABEL(sunos_writev), C_LABEL(sys_settimeofday), C_LABEL(sys_fchown)
+ .long C_LABEL(sys_fchmod), C_LABEL(sys_recvfrom), C_LABEL(sys_setreuid)
+ .long C_LABEL(sys_setregid), C_LABEL(sys_rename), C_LABEL(sys_truncate)
+ .long C_LABEL(sys_ftruncate), C_LABEL(sys_flock), C_LABEL(sunos_nosys)
+ .long C_LABEL(sys_sendto), C_LABEL(sys_shutdown), C_LABEL(sys_socketpair)
+ .long C_LABEL(sys_mkdir), C_LABEL(sys_rmdir), C_LABEL(sys_utimes)
+ .long C_LABEL(sys_sigreturn), C_LABEL(sunos_nosys), C_LABEL(sys_getpeername)
+ .long C_LABEL(sunos_gethostid), C_LABEL(sunos_nosys), C_LABEL(sys_getrlimit)
+ .long C_LABEL(sys_setrlimit), C_LABEL(sunos_killpg), C_LABEL(sunos_nosys)
+ .long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+/*150*/ .long C_LABEL(sys_getsockname), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+ .long C_LABEL(sunos_poll), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+ .long C_LABEL(sunos_getdirentries), C_LABEL(sys_statfs), C_LABEL(sys_fstatfs)
+ .long C_LABEL(sys_umount), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+ .long C_LABEL(sunos_getdomainname), C_LABEL(sys_setdomainname)
+ .long C_LABEL(sunos_nosys), C_LABEL(sys_quotactl), C_LABEL(sunos_nosys)
+ .long C_LABEL(sunos_mount), C_LABEL(sys_ustat), C_LABEL(sunos_semsys)
+ .long C_LABEL(sunos_nosys), C_LABEL(sunos_shmsys), C_LABEL(sunos_audit)
+ .long C_LABEL(sunos_nosys), C_LABEL(sunos_getdents), C_LABEL(sys_setsid)
+ .long C_LABEL(sys_fchdir), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+ .long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+ .long C_LABEL(sunos_nosys), C_LABEL(sys_sigpending), C_LABEL(sunos_nosys)
+ .long C_LABEL(sys_setpgid), C_LABEL(sunos_pathconf), C_LABEL(sunos_fpathconf)
+ .long C_LABEL(sunos_sysconf), C_LABEL(sunos_uname), C_LABEL(sunos_nosys)
+ .long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+ .long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+ .long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+/*200*/ .long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+ .long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+ .long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+ .long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+ .long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+ .long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+ .long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+ .long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+ .long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+ .long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+ .long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+ .long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+ .long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+ .long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+ .long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+ .long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+ .long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+/*250*/ .long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+ .long C_LABEL(sunos_nosys), C_LABEL(sunos_nosys)
+
+ /* {net, open}bsd system call table. */
+
+ .align 4
+ .globl C_LABEL(bsd_sys_table)
+C_LABEL(bsd_sys_table):
+ .long C_LABEL(sunos_nosys)/*SYSCALL*/, C_LABEL(sunos_nosys)/*EXIT*/
+ .long C_LABEL(sunos_nosys)/*FORK*/, C_LABEL(sunos_nosys)/*READ*/
+ .long C_LABEL(sunos_nosys)/*WRITE*/, C_LABEL(sunos_nosys)/*OPEN*/
+ .long C_LABEL(sunos_nosys)/*CLOSE*/, C_LABEL(sunos_nosys)/*WAIT4*/
+ .long C_LABEL(sunos_nosys)/*CREAT*/, C_LABEL(sunos_nosys)/*LINK*/
+ .long C_LABEL(sunos_nosys)/*UNLINK*/, C_LABEL(sunos_nosys)/*EXECV*/
+ .long C_LABEL(sunos_nosys)/*CHDIR*/, C_LABEL(sunos_nosys)/*FCHDIR*/
+ .long C_LABEL(sunos_nosys)/*MKNOD*/, C_LABEL(sunos_nosys)/*CHMOD*/
+ .long C_LABEL(sunos_nosys)/*CHOWN*/, C_LABEL(sunos_nosys)/*BREAK*/
+ .long C_LABEL(sunos_nosys)/*GETFSSTAT*/, C_LABEL(sunos_nosys)/*OLSEEK*/
+ .long C_LABEL(sunos_nosys)/*GETPID*/, C_LABEL(sunos_nosys)/*MOUNT*/
+ .long C_LABEL(sunos_nosys)/*UNMOUNT*/, C_LABEL(sunos_nosys)/*SETUID*/
+ .long C_LABEL(sunos_nosys)/*GETUID*/, C_LABEL(sunos_nosys)/*GETEUID*/
+ .long C_LABEL(sunos_nosys)/*PTRACE*/, C_LABEL(sunos_nosys)/*RECVMSG*/
+ .long C_LABEL(sunos_nosys)/*SENDMSG*/, C_LABEL(sunos_nosys)/*RECVFROM*/
+ .long C_LABEL(sunos_nosys)/*ACCEPT*/, C_LABEL(sunos_nosys)/*GETPEERNAME*/
+ .long C_LABEL(sunos_nosys)/*GETSOCKNAME*/, C_LABEL(sunos_nosys)/*ACCESS*/
+ .long C_LABEL(sunos_nosys)/*CHFLAGS*/, C_LABEL(sunos_nosys)/*FCHFLAGS*/
+ .long C_LABEL(sunos_nosys)/*SYNC*/, C_LABEL(sunos_nosys)/*KILL*/
+ .long C_LABEL(sunos_nosys)/*OSTAT*/, C_LABEL(sunos_nosys)/*GETPPID*/
+ .long C_LABEL(sunos_nosys)/*OLSTAT*/, C_LABEL(sunos_nosys)/*DUP*/
+ .long C_LABEL(sunos_nosys)/*PIPE*/, C_LABEL(sunos_nosys)/*GETEGID*/
+ .long C_LABEL(sunos_nosys)/*PROFIL*/, C_LABEL(sunos_nosys)/*KTRACE*/
+ .long C_LABEL(sunos_nosys)/*SIGACTION*/, C_LABEL(sunos_nosys)/*GETGID*/
+ .long C_LABEL(sunos_nosys)/*SIGPROCMASK*/, C_LABEL(sunos_nosys)/*GETLOGIN*/
+ .long C_LABEL(sunos_nosys)/*SETLOGIN*/, C_LABEL(sunos_nosys)/*ACCT*/
+ .long C_LABEL(sunos_nosys)/*SIGPENDING*/, C_LABEL(sunos_nosys)/*SIGALTSTACK*/
+ .long C_LABEL(sunos_nosys)/*IOCTL*/, C_LABEL(sunos_nosys)/*REBOOT*/
+ .long C_LABEL(sunos_nosys)/*REVOKE*/, C_LABEL(sunos_nosys)/*SYMLINK*/
+ .long C_LABEL(sunos_nosys)/*READLINK*/, C_LABEL(sunos_nosys)/*EXECVE*/
+ .long C_LABEL(sunos_nosys)/*UMASK*/, C_LABEL(sunos_nosys)/*CHROOT*/
+ .long C_LABEL(sunos_nosys)/*OFSTAT*/, C_LABEL(sunos_nosys)/*OGETKERNINFO*/
+ .long C_LABEL(sunos_nosys)/*OGETPAGESIZE*/, C_LABEL(sunos_nosys)/*MSYNC*/
+ .long C_LABEL(sunos_nosys)/*VFORK*/, C_LABEL(sunos_nosys)/*VREAD*/
+ .long C_LABEL(sunos_nosys)/*VWRITE*/, C_LABEL(sunos_nosys)/*SBRK*/
+ .long C_LABEL(sunos_nosys)/*SSTK*/, C_LABEL(sunos_nosys)/*OMMAP*/
+ .long C_LABEL(sunos_nosys)/*VADVISE*/, C_LABEL(sunos_nosys)/*MUNMAP*/
+ .long C_LABEL(sunos_nosys)/*MPROTECT*/, C_LABEL(sunos_nosys)/*MADVISE*/
+ .long C_LABEL(sunos_nosys)/*VHANGUP*/, C_LABEL(sunos_nosys)/*VLIMIT*/
+ .long C_LABEL(sunos_nosys)/*MINCORE*/, C_LABEL(sunos_nosys)/*GETGROUPS*/
+ .long C_LABEL(sunos_nosys)/*SETGROUPS*/, C_LABEL(sunos_nosys)/*GETPGRP*/
+ .long C_LABEL(sunos_nosys)/*SETPGID*/, C_LABEL(sunos_nosys)/*SETITIMER*/
+ .long C_LABEL(sunos_nosys)/*OWAIT*/, C_LABEL(sunos_nosys)/*SWAPON*/
+ .long C_LABEL(sunos_nosys)/*GETITIMER*/, C_LABEL(sunos_nosys)/*OGETHOSTNAME*/
+ .long C_LABEL(sunos_nosys)/*OSETHOSTNAME*/, C_LABEL(sunos_nosys)/*OGETDTABLESIZE*/
+ .long C_LABEL(sunos_nosys)/*DUP2*/, C_LABEL(sunos_nosys)/*GETDOPT*/
+ .long C_LABEL(sunos_nosys)/*FCNTL*/, C_LABEL(sunos_nosys)/*SELECT*/
+ .long C_LABEL(sunos_nosys)/*SETDOPT*/, C_LABEL(sunos_nosys)/*FSYNC*/
+ .long C_LABEL(sunos_nosys)/*SETPRIORITY*/, C_LABEL(sunos_nosys)/*SOCKET*/
+ .long C_LABEL(sunos_nosys)/*CONNECT*/, C_LABEL(sunos_nosys)/*OACCEPT*/
+ .long C_LABEL(sunos_nosys)/*GETPRIORITY*/, C_LABEL(sunos_nosys)/*OSEND*/
+ .long C_LABEL(sunos_nosys)/*ORECV*/, C_LABEL(sunos_nosys)/*SIGRETURN*/
+ .long C_LABEL(sunos_nosys)/*BIND*/, C_LABEL(sunos_nosys)/*SETSOCKOPT*/
+ .long C_LABEL(sunos_nosys)/*LISTEN*/, C_LABEL(sunos_nosys)/*VTIMES*/
+ .long C_LABEL(sunos_nosys)/*OSIGVEC*/, C_LABEL(sunos_nosys)/*OSIGBLOCK*/
+ .long C_LABEL(sunos_nosys)/*OSIGSETMASK*/, C_LABEL(sunos_nosys)/*SIGSUSPEND*/
+ .long C_LABEL(sunos_nosys)/*OSIGSTACK*/, C_LABEL(sunos_nosys)/*ORECVMSG*/
+ .long C_LABEL(sunos_nosys)/*OSENDMSG*/, C_LABEL(sunos_nosys)/*VTRACE*/
+ .long C_LABEL(sunos_nosys)/*GETTIMEOFDAY*/, C_LABEL(sunos_nosys)/*GETRUSAGE*/
+ .long C_LABEL(sunos_nosys)/*GETSOCKOPT*/, C_LABEL(sunos_nosys)/*ORESUBA*/
+ .long C_LABEL(sunos_nosys)/*READV*/, C_LABEL(sunos_nosys)/*WRITEV*/
+ .long C_LABEL(sunos_nosys)/*SETTIMEOFDAY*/, C_LABEL(sunos_nosys)/*FCHOWN*/
+ .long C_LABEL(sunos_nosys)/*FCHMOD*/, C_LABEL(sunos_nosys)/*ORECVFROM*/
+ .long C_LABEL(sunos_nosys)/*OSETREUID*/, C_LABEL(sunos_nosys)/*OSETREGID*/
+ .long C_LABEL(sunos_nosys)/*RENAME*/, C_LABEL(sunos_nosys)/*OTRUNCATE*/
+ .long C_LABEL(sunos_nosys)/*OFTRUNCATE*/, C_LABEL(sunos_nosys)/*FLOCK*/
+ .long C_LABEL(sunos_nosys)/*MKFIFO*/, C_LABEL(sunos_nosys)/*SENDTO*/
+ .long C_LABEL(sunos_nosys)/*SHUTDOWN*/, C_LABEL(sunos_nosys)/*SOCKETPAIR*/
+ .long C_LABEL(sunos_nosys)/*MKDIR*/, C_LABEL(sunos_nosys)/*RMDIR*/
+ .long C_LABEL(sunos_nosys)/*UTIMES*/, C_LABEL(sunos_nosys)/*OSIGRETURN*/
+ .long C_LABEL(sunos_nosys)/*ADJTIME*/, C_LABEL(sunos_nosys)/*OGETPEERNAME*/
+ .long C_LABEL(sunos_nosys)/*OGETHOSTID*/, C_LABEL(sunos_nosys)/*OSETHOSTID*/
+ .long C_LABEL(sunos_nosys)/*OGETRLIMIT*/, C_LABEL(sunos_nosys)/*OSETRLIMIT*/
+ .long C_LABEL(sunos_nosys)/*OKILLPG*/, C_LABEL(sunos_nosys)/*SETSID*/
+ .long C_LABEL(sunos_nosys)/*QUOTACTL*/, C_LABEL(sunos_nosys)/*OQUOTA*/
+ .long C_LABEL(sunos_nosys)/*OGETSOCKNAME*/, C_LABEL(sunos_nosys)/*NOTHING*/
+ .long C_LABEL(sunos_nosys)/*NOTHING*/, C_LABEL(sunos_nosys)/*NOTHING*/
+ .long C_LABEL(sunos_nosys)/*NOTHING*/, C_LABEL(sunos_nosys)/*NFSSVC*/
+ .long C_LABEL(sunos_nosys)/*OGETDIRENTRIES*/, C_LABEL(sunos_nosys)/*STATFS*/
+ .long C_LABEL(sunos_nosys)/*FSTATFS*/, C_LABEL(sunos_nosys)/*NOTHING*/
+ .long C_LABEL(sunos_nosys)/*NOTHING*/, C_LABEL(sunos_nosys)/*GETFH*/
+ .long C_LABEL(sunos_nosys)/*OGETDOMAINNAME*/
+ .long C_LABEL(sunos_nosys)/*OSETDOMAINNAME*/
+ .long C_LABEL(sunos_nosys)/*OUNAME*/, C_LABEL(sunos_nosys)/*SYSARCH*/
+ .long C_LABEL(sunos_nosys)/*NOTHING*/, C_LABEL(sunos_nosys)/*NOTHING*/
+ .long C_LABEL(sunos_nosys)/*NOTHING*/, C_LABEL(sunos_nosys)/*OSEMSYS*/
+ .long C_LABEL(sunos_nosys)/*OMSGSYS*/, C_LABEL(sunos_nosys)/*OSHMSYS*/
+ .long C_LABEL(sunos_nosys)/*NOTHING*/, C_LABEL(sunos_nosys)/*NOTHING*/
+ .long C_LABEL(sunos_nosys)/*NOTHING*/, C_LABEL(sunos_nosys)/*NOTHING*/
+ .long C_LABEL(sunos_nosys)/*NOTHING*/, C_LABEL(sunos_nosys)/*NOTHING*/
+ .long C_LABEL(sunos_nosys)/*NOTHING*/, C_LABEL(sunos_nosys)/*NOTHING*/
+ .long C_LABEL(sunos_nosys)/*NOTHING*/, C_LABEL(sunos_nosys)/*SETGID*/
+ .long C_LABEL(sunos_nosys)/*SETEGID*/, C_LABEL(sunos_nosys)/*SETEUID*/
+ .long C_LABEL(sunos_nosys)/*LFS_BMAPV*/, C_LABEL(sunos_nosys)/*LFS_MARKV*/
+ .long C_LABEL(sunos_nosys)/*LFS_SEGCLEAN*/, C_LABEL(sunos_nosys)/*LFS_SEGWAIT*/
+ .long C_LABEL(sunos_nosys)/*NOTHING*/, C_LABEL(sunos_nosys)/*NOTHING*/
+ .long C_LABEL(sunos_nosys)/*NOTHING*/, C_LABEL(sunos_nosys)/*NOTHING*/
+ .long C_LABEL(sunos_nosys)/*STAT*/, C_LABEL(sunos_nosys)/*FSTAT*/
+ .long C_LABEL(sunos_nosys)/*LSTAT*/, C_LABEL(sunos_nosys)/*PATHCONF*/
+ .long C_LABEL(sunos_nosys)/*FPATHCONF*/, C_LABEL(sunos_nosys)/*NOTHING*/
+ .long C_LABEL(sunos_nosys)/*GETRLIMIT*/, C_LABEL(sunos_nosys)/*SETRLIMIT*/
+ .long C_LABEL(sunos_nosys)/*GETDIRENTRIES*/, C_LABEL(sunos_nosys)/*MMAP*/
+ .long C_LABEL(sunos_nosys)/*__SYSCALL*/, C_LABEL(sunos_nosys)/*LSEEK*/
+ .long C_LABEL(sunos_nosys)/*TRUNCATE*/, C_LABEL(sunos_nosys)/*FTRUNCATE*/
+ .long C_LABEL(sunos_nosys)/*__SYSCTL*/, C_LABEL(sunos_nosys)/*MLOCK*/
+ .long C_LABEL(sunos_nosys)/*MUNLOCK*/, C_LABEL(sunos_nosys)/*UNDELETE*/
+ .long C_LABEL(sunos_nosys)/*NOTHING*/, C_LABEL(sunos_nosys)/*NOTHING*/
+ .long C_LABEL(sunos_nosys)/*NOTHING*/, C_LABEL(sunos_nosys)/*NOTHING*/
+ .long C_LABEL(sunos_nosys)/*LKMNOSYS*/, C_LABEL(sunos_nosys)/*LKMNOSYS*/
+ .long C_LABEL(sunos_nosys)/*LKMNOSYS*/, C_LABEL(sunos_nosys)/*LKMNOSYS*/
+ .long C_LABEL(sunos_nosys)/*LKMNOSYS*/, C_LABEL(sunos_nosys)/*LKMNOSYS*/
+ .long C_LABEL(sunos_nosys)/*LKMNOSYS*/, C_LABEL(sunos_nosys)/*LKMNOSYS*/
+ .long C_LABEL(sunos_nosys)/*LKMNOSYS*/, C_LABEL(sunos_nosys)/*LKMNOSYS*/
+ .long C_LABEL(sunos_nosys)/*NOTHING*/, C_LABEL(sunos_nosys)/*NOTHING*/
+ .long C_LABEL(sunos_nosys)/*NOTHING*/, C_LABEL(sunos_nosys)/*NOTHING*/
+ .long C_LABEL(sunos_nosys)/*NOTHING*/, C_LABEL(sunos_nosys)/*NOTHING*/
+ .long C_LABEL(sunos_nosys)/*NOTHING*/, C_LABEL(sunos_nosys)/*NOTHING*/
+ .long C_LABEL(sunos_nosys)/*NOTHING*/, C_LABEL(sunos_nosys)/*NOTHING*/
+ .long C_LABEL(sunos_nosys)/*__SEMCTL*/, C_LABEL(sunos_nosys)/*SEMGET*/
+ .long C_LABEL(sunos_nosys)/*SEMOP*/, C_LABEL(sunos_nosys)/*SEMCONFIG*/
+ .long C_LABEL(sunos_nosys)/*MSGCTL*/, C_LABEL(sunos_nosys)/*MSGGET*/
+ .long C_LABEL(sunos_nosys)/*MSGSND*/, C_LABEL(sunos_nosys)/*MSGRCV*/
+ .long C_LABEL(sunos_nosys)/*SHMAT*/, C_LABEL(sunos_nosys)/*SHMCTL*/
+ .long C_LABEL(sunos_nosys)/*SHMDT*/, C_LABEL(sunos_nosys)/*SHMGET*/
+ .long C_LABEL(sunos_nosys)/*NOTHING*/, C_LABEL(sunos_nosys)/*NOTHING*/
+ .long C_LABEL(sunos_nosys)/*NOTHING*/, C_LABEL(sunos_nosys)/*NOTHING*/
+ .long C_LABEL(sunos_nosys)/*NOTHING*/, C_LABEL(sunos_nosys)/*NOTHING*/
+ .long C_LABEL(sunos_nosys)/*NOTHING*/, C_LABEL(sunos_nosys)/*NOTHING*/
+ .long C_LABEL(sunos_nosys)/*NOTHING*/, C_LABEL(sunos_nosys)/*NOTHING*/
+ .long C_LABEL(sunos_nosys)/*NOTHING*/, C_LABEL(sunos_nosys)/*NOTHING*/
+ .long C_LABEL(sunos_nosys)/*NOTHING*/, C_LABEL(sunos_nosys)/*NOTHING*/
+ .long C_LABEL(sunos_nosys)/*NOTHING*/, C_LABEL(sunos_nosys)/*NOTHING*/
+ .long C_LABEL(sunos_nosys)/*NOTHING*/, C_LABEL(sunos_nosys)/*NOTHING*/
+ .long C_LABEL(sunos_nosys)/*MINHERIT*/, C_LABEL(sunos_nosys)/*RFORK*/
+
+ /* One thing left, Solaris syscall table, TODO */
+ .globl C_LABEL(solaris_sys_table)
+C_LABEL(solaris_sys_table):
+/*0*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_exit), C_LABEL(sys_fork)
+ .long C_LABEL(sys_read), C_LABEL(sys_write)
+/*5*/ .long C_LABEL(solaris_open), C_LABEL(sys_close), C_LABEL(sys_wait4)
+ .long C_LABEL(sys_creat), C_LABEL(sys_link)
+/*10*/ .long C_LABEL(sys_unlink), C_LABEL(sys_nis_syscall), C_LABEL(sys_chdir)
+ .long C_LABEL(sys_time), C_LABEL(sys_mknod)
+/*15*/ .long C_LABEL(sys_chmod), C_LABEL(sys_chown), C_LABEL(sys_brk)
+ .long C_LABEL(sys_stat), C_LABEL(sys_lseek)
+/*20*/ .long C_LABEL(sunos_getpid), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_setuid), C_LABEL(sunos_getuid)
+/*25*/ .long C_LABEL(sys_stime), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_alarm), C_LABEL(sys_nis_syscall), C_LABEL(sys_pause)
+/*30*/ .long C_LABEL(sys_utime), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_access), C_LABEL(sys_nice)
+/*35*/ .long C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_sync), C_LABEL(sys_kill), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall)
+/*40*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*45*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*50*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*55*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*60*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*65*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*70*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*75*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*80*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*85*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*90*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*95*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*100*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*105*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*110*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*115*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*120*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*125*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*130*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*135*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*140*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*145*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*150*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*155*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*160*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*165*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*170*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*175*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*180*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*185*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*190*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*195*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*200*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*205*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*210*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*215*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*220*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*225*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*230*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*235*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*240*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*245*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*250*/ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+ .long C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall), C_LABEL(sys_nis_syscall)
+/*255*/ .long C_LABEL(sys_nis_syscall)
diff --git a/arch/sparc/kernel/tadpole.c b/arch/sparc/kernel/tadpole.c
new file mode 100644
index 000000000..bea6336c5
--- /dev/null
+++ b/arch/sparc/kernel/tadpole.c
@@ -0,0 +1,120 @@
+/* tadpole.c: Probing for the tadpole clock stopping h/w at boot time.
+ *
+ * Copyright (C) 1996 David Redman (djhr@tadpole.co.uk)
+ */
+
+#include <linux/string.h>
+
+#include <asm/asi.h>
+#include <asm/oplib.h>
+#include <asm/io.h>
+
+#define MACIO_SCSI_CSR_ADDR 0x78400000
+#define MACIO_EN_DMA 0x00000200
+#define CLOCK_INIT_DONE 1
+
+static int clk_state;
+static volatile unsigned char *clk_ctrl;
+void (*cpu_pwr_save)(void);
+
+static inline unsigned int ldphys(unsigned int addr)
+{
+ unsigned long data;
+
+ __asm__ __volatile__("\n\tlda [%1] %2, %0\n\t" :
+ "=r" (data) :
+ "r" (addr), "i" (ASI_M_BYPASS));
+ return data;
+}
+
+static void clk_init(void)
+{
+ __asm__ __volatile__("mov 0x6c, %%g1\n\t"
+ "mov 0x4c, %%g2\n\t"
+ "mov 0xdf, %%g3\n\t"
+ "stb %%g1, [%0+3]\n\t"
+ "stb %%g2, [%0+3]\n\t"
+ "stb %%g3, [%0+3]\n\t" : :
+ "r" (clk_ctrl) :
+ "g1", "g2", "g3");
+}
+
+static void clk_slow(void)
+{
+ __asm__ __volatile__("mov 0xcc, %%g2\n\t"
+ "mov 0x4c, %%g3\n\t"
+ "mov 0xcf, %%g4\n\t"
+ "mov 0xdf, %%g5\n\t"
+ "stb %%g2, [%0+3]\n\t"
+ "stb %%g3, [%0+3]\n\t"
+ "stb %%g4, [%0+3]\n\t"
+ "stb %%g5, [%0+3]\n\t" : :
+ "r" (clk_ctrl) :
+ "g2", "g3", "g4", "g5");
+}
+
+static void tsu_clockstop(void)
+{
+ unsigned int mcsr;
+ unsigned long flags;
+
+ if (!clk_ctrl)
+ return;
+ if (!(clk_state & CLOCK_INIT_DONE)) {
+ save_and_cli(flags);
+ clk_init();
+ clk_state |= CLOCK_INIT_DONE; /* all done */
+ restore_flags(flags);
+ return;
+ }
+ if (!(clk_ctrl[2] & 1))
+ return; /* no speed up yet */
+
+ save_and_cli(flags);
+
+ /* if SCSI DMA in progress, don't slow clock */
+ mcsr = ldphys(MACIO_SCSI_CSR_ADDR);
+ if ((mcsr&MACIO_EN_DMA) != 0) {
+ restore_flags(flags);
+ return;
+ }
+ /* TODO... the minimum clock setting ought to increase the
+ * memory refresh interval..
+ */
+ clk_slow();
+ restore_flags(flags);
+}
+
+static void swift_clockstop(void)
+{
+ if (!clk_ctrl)
+ return;
+ clk_ctrl[0] = 0;
+}
+
+void clock_stop_probe(void)
+{
+ unsigned int node, clk_nd;
+ char name[20];
+
+ prom_getstring(prom_root_node, "name", name, sizeof(name));
+ if (strncmp(name, "Tadpole", 7))
+ return;
+ node = prom_getchild(prom_root_node);
+ node = prom_searchsiblings(node, "obio");
+ node = prom_getchild(node);
+ clk_nd = prom_searchsiblings(node, "clk-ctrl");
+ if (!clk_nd)
+ return;
+ printk("Clock Stopping h/w detected... ");
+ clk_ctrl = (char *) prom_getint(clk_nd, "address");
+ clk_state = 0;
+ if (name[10] == '\0') {
+ cpu_pwr_save = tsu_clockstop;
+ printk("enabled (S3)\n");
+ } else if ((name[10] == 'X') || (name[10] == 'G')) {
+ cpu_pwr_save = swift_clockstop;
+ printk("enabled (%s)\n",name+7);
+ } else
+ printk("disabled %s\n",name+7);
+}
diff --git a/arch/sparc/kernel/tick14.c b/arch/sparc/kernel/tick14.c
new file mode 100644
index 000000000..f00c8f183
--- /dev/null
+++ b/arch/sparc/kernel/tick14.c
@@ -0,0 +1,84 @@
+/* tick14.c
+ * linux/arch/sparc/kernel/tick14.c
+ *
+ * Copyright (C) 1996 David Redman (djhr@tadpole.co.uk)
+ *
+ * This file handles the Sparc specific level14 ticker
+ * This is really useful for profiling OBP uses it for keyboard
+ * aborts and other stuff.
+ *
+ *
+ */
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/timex.h>
+
+#include <asm/oplib.h>
+#include <asm/segment.h>
+#include <asm/timer.h>
+#include <asm/mostek.h>
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+
+extern unsigned long lvl14_save[5];
+static unsigned long *linux_lvl14 = NULL;
+static unsigned long obp_lvl14[4];
+
+void install_linux_ticker(void)
+{
+ unsigned long flags;
+
+ if (!linux_lvl14)
+ return;
+ save_and_cli(flags);
+ linux_lvl14[0] = lvl14_save[0];
+ linux_lvl14[1] = lvl14_save[1];
+ linux_lvl14[2] = lvl14_save[2];
+ linux_lvl14[3] = lvl14_save[3];
+ restore_flags(flags);
+}
+
+void install_obp_ticker(void)
+{
+ unsigned long flags;
+
+ if (!linux_lvl14)
+ return;
+ save_and_cli(flags);
+ linux_lvl14[0] = obp_lvl14[0];
+ linux_lvl14[1] = obp_lvl14[1];
+ linux_lvl14[2] = obp_lvl14[2];
+ linux_lvl14[3] = obp_lvl14[3];
+ restore_flags(flags);
+}
+
+void claim_ticker14(void (*handler)(int, void *, struct pt_regs *),
+ int irq_nr, unsigned int timeout )
+{
+ /* first we copy the obp handler instructions
+ */
+ disable_irq(irq_nr);
+ if (!handler)
+ return;
+
+ linux_lvl14 = (unsigned long *)lvl14_save[4];
+ obp_lvl14[0] = linux_lvl14[0];
+ obp_lvl14[1] = linux_lvl14[1];
+ obp_lvl14[2] = linux_lvl14[2];
+ obp_lvl14[3] = linux_lvl14[3];
+
+ if (!request_irq(irq_nr,
+ handler,
+ (SA_INTERRUPT | SA_STATIC_ALLOC),
+ "counter14",
+ NULL)) {
+ install_linux_ticker();
+ load_profile_irq(timeout);
+ enable_irq(irq_nr);
+ }
+}
diff --git a/arch/sparc/kernel/time.c b/arch/sparc/kernel/time.c
new file mode 100644
index 000000000..bc7ccdbe7
--- /dev/null
+++ b/arch/sparc/kernel/time.c
@@ -0,0 +1,361 @@
+/* $Id: time.c,v 1.19 1996/10/31 06:28:26 davem Exp $
+ * linux/arch/sparc/kernel/time.c
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu)
+ *
+ * This file handles the Sparc specific time handling details.
+ */
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/timex.h>
+
+#include <asm/oplib.h>
+#include <asm/segment.h>
+#include <asm/timer.h>
+#include <asm/mostek.h>
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+
+#ifdef CONFIG_AP1000
+#include <asm/ap1000/apservice.h>
+#endif
+
+
+enum sparc_clock_type sp_clock_typ;
+struct mostek48t02 *mstk48t02_regs = 0;
+struct mostek48t08 *mstk48t08_regs = 0;
+static int set_rtc_mmss(unsigned long);
+
+__volatile__ unsigned int *master_l10_counter;
+__volatile__ unsigned int *master_l10_limit;
+
+/*
+ * timer_interrupt() needs to keep up the real-time clock,
+ * as well as call the "do_timer()" routine every clocktick
+ */
+void timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ /* last time the cmos clock got updated */
+ static long last_rtc_update=0;
+
+ clear_clock_irq();
+
+ do_timer(regs);
+
+ /* Determine when to update the Mostek clock. */
+ if (time_state != TIME_BAD && xtime.tv_sec > last_rtc_update + 660 &&
+ xtime.tv_usec > 500000 - (tick >> 1) &&
+ xtime.tv_usec < 500000 + (tick >> 1))
+ if (set_rtc_mmss(xtime.tv_sec) == 0)
+ last_rtc_update = xtime.tv_sec;
+ else
+ last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
+}
+
+/* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
+ * Assumes input in normal date format, i.e. 1980-12-31 23:59:59
+ * => year=1980, mon=12, day=31, hour=23, min=59, sec=59.
+ *
+ * [For the Julian calendar (which was used in Russia before 1917,
+ * Britain & colonies before 1752, anywhere else before 1582,
+ * and is still in use by some communities) leave out the
+ * -year/100+year/400 terms, and add 10.]
+ *
+ * This algorithm was first published by Gauss (I think).
+ *
+ * WARNING: this function will overflow on 2106-02-07 06:28:16 on
+ * machines were long is 32-bit! (However, as time_t is signed, we
+ * will already get problems at other places on 2038-01-19 03:14:08)
+ */
+static inline unsigned long mktime(unsigned int year, unsigned int mon,
+ unsigned int day, unsigned int hour,
+ unsigned int min, unsigned int sec)
+{
+ if (0 >= (int) (mon -= 2)) { /* 1..12 -> 11,12,1..10 */
+ mon += 12; /* Puts Feb last since it has leap day */
+ year -= 1;
+ }
+ return (((
+ (unsigned long)(year/4 - year/100 + year/400 + 367*mon/12 + day) +
+ year*365 - 719499
+ )*24 + hour /* now have hours */
+ )*60 + min /* now have minutes */
+ )*60 + sec; /* finally seconds */
+}
+
+/* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */
+static void kick_start_clock(void)
+{
+ register struct mostek48t02 *regs = mstk48t02_regs;
+ unsigned char sec;
+ int i, count;
+
+ prom_printf("CLOCK: Clock was stopped. Kick start ");
+
+ /* Turn on the kick start bit to start the oscillator. */
+ regs->creg |= MSTK_CREG_WRITE;
+ regs->sec &= ~MSTK_STOP;
+ regs->hour |= MSTK_KICK_START;
+ regs->creg &= ~MSTK_CREG_WRITE;
+
+ /* Delay to allow the clock oscillator to start. */
+ sec = MSTK_REG_SEC(regs);
+ for (i = 0; i < 3; i++) {
+ while (sec == MSTK_REG_SEC(regs))
+ for (count = 0; count < 100000; count++)
+ /* nothing */ ;
+ prom_printf(".");
+ sec = regs->sec;
+ }
+ prom_printf("\n");
+
+ /* Turn off kick start and set a "valid" time and date. */
+ regs->creg |= MSTK_CREG_WRITE;
+ regs->hour &= ~MSTK_KICK_START;
+ MSTK_SET_REG_SEC(regs,0);
+ MSTK_SET_REG_MIN(regs,0);
+ MSTK_SET_REG_HOUR(regs,0);
+ MSTK_SET_REG_DOW(regs,5);
+ MSTK_SET_REG_DOM(regs,1);
+ MSTK_SET_REG_MONTH(regs,8);
+ MSTK_SET_REG_YEAR(regs,1996 - MSTK_YEAR_ZERO);
+ regs->creg &= ~MSTK_CREG_WRITE;
+
+ /* Ensure the kick start bit is off. If it isn't, turn it off. */
+ while (regs->hour & MSTK_KICK_START) {
+ prom_printf("CLOCK: Kick start still on!\n");
+ regs->creg |= MSTK_CREG_WRITE;
+ regs->hour &= ~MSTK_KICK_START;
+ regs->creg &= ~MSTK_CREG_WRITE;
+ }
+
+ prom_printf("CLOCK: Kick start procedure successful.\n");
+}
+
+/* Return nonzero if the clock chip battery is low. */
+static int has_low_battery(void)
+{
+ register struct mostek48t02 *regs = mstk48t02_regs;
+ unsigned char data1, data2;
+
+ data1 = regs->eeprom[0]; /* Read some data. */
+ regs->eeprom[0] = ~data1; /* Write back the complement. */
+ data2 = regs->eeprom[0]; /* Read back the complement. */
+ regs->eeprom[0] = data1; /* Restore the original value. */
+
+ return (data1 == data2); /* Was the write blocked? */
+}
+
+/* Probe for the real time clock chip. */
+static void clock_probe(void)
+{
+ struct linux_prom_registers clk_reg[2];
+ char model[128];
+ register int node, cpuunit, bootbus;
+
+ /* Determine the correct starting PROM node for the probe. */
+ node = prom_getchild(prom_root_node);
+ switch (sparc_cpu_model) {
+ case sun4c:
+ break;
+ case sun4m:
+ node = prom_getchild(prom_searchsiblings(node, "obio"));
+ break;
+ case sun4d:
+ node = prom_getchild(bootbus = prom_searchsiblings(prom_getchild(cpuunit = prom_searchsiblings(node, "cpu-unit")), "bootbus"));
+ break;
+ default:
+ prom_printf("CLOCK: Unsupported architecture!\n");
+ prom_halt();
+ }
+
+ /* Find the PROM node describing the real time clock. */
+ sp_clock_typ = MSTK_INVALID;
+ node = prom_searchsiblings(node,"eeprom");
+ if (!node) {
+ prom_printf("CLOCK: No clock found!\n");
+ prom_halt();
+ }
+
+ /* Get the model name and setup everything up. */
+ model[0] = '\0';
+ prom_getstring(node, "model", model, sizeof(model));
+ if (strcmp(model, "mk48t02") == 0) {
+ sp_clock_typ = MSTK48T02;
+ if (prom_getproperty(node, "reg", (char *) clk_reg, sizeof(clk_reg)) == -1) {
+ prom_printf("clock_probe: FAILED!\n");
+ prom_halt();
+ }
+ if (sparc_cpu_model == sun4d)
+ prom_apply_generic_ranges (bootbus, cpuunit, clk_reg, 1);
+ else
+ prom_apply_obio_ranges(clk_reg, 1);
+ /* Map the clock register io area read-only */
+ mstk48t02_regs = (struct mostek48t02 *)
+ sparc_alloc_io((void *) clk_reg[0].phys_addr,
+ (void *) 0, sizeof(*mstk48t02_regs),
+ "clock", clk_reg[0].which_io, 0x0);
+ mstk48t08_regs = 0; /* To catch weirdness */
+ } else if (strcmp(model, "mk48t08") == 0) {
+ sp_clock_typ = MSTK48T08;
+ if(prom_getproperty(node, "reg", (char *) clk_reg,
+ sizeof(clk_reg)) == -1) {
+ prom_printf("clock_probe: FAILED!\n");
+ prom_halt();
+ }
+ if (sparc_cpu_model == sun4d)
+ prom_apply_generic_ranges (bootbus, cpuunit, clk_reg, 1);
+ else
+ prom_apply_obio_ranges(clk_reg, 1);
+ /* Map the clock register io area read-only */
+ mstk48t08_regs = (struct mostek48t08 *)
+ sparc_alloc_io((void *) clk_reg[0].phys_addr,
+ (void *) 0, sizeof(*mstk48t08_regs),
+ "clock", clk_reg[0].which_io, 0x0);
+
+ mstk48t02_regs = &mstk48t08_regs->regs;
+ } else {
+ prom_printf("CLOCK: Unknown model name '%s'\n",model);
+ prom_halt();
+ }
+
+ /* Report a low battery voltage condition. */
+ if (has_low_battery())
+ printk(KERN_CRIT "NVRAM: Low battery voltage!\n");
+
+ /* Kick start the clock if it is completely stopped. */
+ if (mstk48t02_regs->sec & MSTK_STOP)
+ kick_start_clock();
+}
+
+void time_init(void)
+{
+ unsigned int year, mon, day, hour, min, sec;
+ struct mostek48t02 *mregs;
+
+#if CONFIG_AP1000
+ init_timers(timer_interrupt);
+ {
+ extern struct cap_init cap_init;
+ xtime.tv_sec = cap_init.init_time;
+ xtime.tv_usec = 0;
+ }
+ return;
+#endif
+
+ clock_probe();
+ init_timers(timer_interrupt);
+
+ mregs = mstk48t02_regs;
+ if(!mregs) {
+ prom_printf("Something wrong, clock regs not mapped yet.\n");
+ prom_halt();
+ }
+ mregs->creg |= MSTK_CREG_READ;
+ sec = MSTK_REG_SEC(mregs);
+ min = MSTK_REG_MIN(mregs);
+ hour = MSTK_REG_HOUR(mregs);
+ day = MSTK_REG_DOM(mregs);
+ mon = MSTK_REG_MONTH(mregs);
+ year = MSTK_CVT_YEAR( MSTK_REG_YEAR(mregs) );
+ xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
+ xtime.tv_usec = 0;
+ mregs->creg &= ~MSTK_CREG_READ;
+ return;
+}
+
+#if !CONFIG_AP1000
+static __inline__ unsigned long do_gettimeoffset(void)
+{
+ unsigned long offset = 0;
+ unsigned int count;
+
+ count = (*master_l10_counter >> 10) & 0x1fffff;
+
+ if(test_bit(TIMER_BH, &bh_active))
+ offset = 1000000;
+
+ return offset + count;
+}
+#endif
+
+void do_gettimeofday(struct timeval *tv)
+{
+ unsigned long flags;
+
+ save_and_cli(flags);
+#if CONFIG_AP1000
+ ap_gettimeofday(&xtime);
+#endif
+ *tv = xtime;
+#if !CONFIG_AP1000
+ tv->tv_usec += do_gettimeoffset();
+ if(tv->tv_usec >= 1000000) {
+ tv->tv_usec -= 1000000;
+ tv->tv_sec++;
+ }
+#endif
+ restore_flags(flags);
+}
+
+void do_settimeofday(struct timeval *tv)
+{
+ cli();
+#if !CONFIG_AP1000
+ tv->tv_usec -= do_gettimeoffset();
+ if(tv->tv_usec < 0) {
+ tv->tv_usec += 1000000;
+ tv->tv_sec--;
+ }
+#endif
+ xtime = *tv;
+ time_state = TIME_BAD;
+ time_maxerror = 0x70000000;
+ time_esterror = 0x70000000;
+ sti();
+}
+
+static int set_rtc_mmss(unsigned long nowtime)
+{
+ int real_seconds, real_minutes, mostek_minutes;
+ struct mostek48t02 *regs = mstk48t02_regs;
+
+ /* Not having a register set can lead to trouble. */
+ if (!regs)
+ return -1;
+
+ /* Read the current RTC minutes. */
+ regs->creg |= MSTK_CREG_READ;
+ mostek_minutes = MSTK_REG_MIN(regs);
+ regs->creg &= ~MSTK_CREG_READ;
+
+ /*
+ * since we're only adjusting minutes and seconds,
+ * don't interfere with hour overflow. This avoids
+ * messing with unknown time zones but requires your
+ * RTC not to be off by more than 15 minutes
+ */
+ real_seconds = nowtime % 60;
+ real_minutes = nowtime / 60;
+ if (((abs(real_minutes - mostek_minutes) + 15)/30) & 1)
+ real_minutes += 30; /* correct for half hour time zone */
+ real_minutes %= 60;
+
+ if (abs(real_minutes - mostek_minutes) < 30) {
+ regs->creg |= MSTK_CREG_WRITE;
+ MSTK_SET_REG_SEC(regs,real_seconds);
+ MSTK_SET_REG_MIN(regs,real_minutes);
+ regs->creg &= ~MSTK_CREG_WRITE;
+ } else
+ return -1;
+
+ return 0;
+}
diff --git a/arch/sparc/kernel/trampoline.S b/arch/sparc/kernel/trampoline.S
new file mode 100644
index 000000000..ad02e6a9c
--- /dev/null
+++ b/arch/sparc/kernel/trampoline.S
@@ -0,0 +1,93 @@
+/* $Id: trampoline.S,v 1.5 1996/09/22 06:43:10 davem Exp $
+ * mp.S: Multiprocessor low-level routines on the Sparc.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/cprefix.h>
+#include <asm/head.h>
+#include <asm/psr.h>
+#include <asm/asi.h>
+#include <asm/ptrace.h>
+#include <asm/vaddrs.h>
+#include <asm/contregs.h>
+
+
+ .text
+ .align 4
+
+/* When we start up a cpu for the first time it enters this routine.
+ * This initializes the chip from whatever state the prom left it
+ * in and sets PIL in %psr to 15, no irqs.
+ */
+
+ .globl C_LABEL(sparc_cpu_startup)
+C_LABEL(sparc_cpu_startup):
+cpu1_startup:
+ sethi %hi(C_LABEL(trapbase_cpu1)), %g3
+ or %g3, %lo(C_LABEL(trapbase_cpu1)), %g3
+ sethi %hi(C_LABEL(cpu1_stack)), %g2
+ or %g2, %lo(C_LABEL(cpu1_stack)), %g2
+ b 1f
+ nop
+
+cpu2_startup:
+ sethi %hi(C_LABEL(trapbase_cpu2)), %g3
+ or %g3, %lo(C_LABEL(trapbase_cpu2)), %g3
+ sethi %hi(C_LABEL(cpu2_stack)), %g2
+ or %g2, %lo(C_LABEL(cpu2_stack)), %g2
+ b 1f
+ nop
+
+cpu3_startup:
+ sethi %hi(C_LABEL(trapbase_cpu3)), %g3
+ or %g3, %lo(C_LABEL(trapbase_cpu3)), %g3
+ sethi %hi(C_LABEL(cpu3_stack)), %g2
+ or %g2, %lo(C_LABEL(cpu3_stack)), %g2
+ b 1f
+ nop
+
+1:
+ /* Set up a sane %psr -- PIL<0xf> S<0x1> PS<0x1> CWP<0x0> */
+ set (PSR_PIL | PSR_S | PSR_PS), %g1
+ wr %g1, 0x0, %psr ! traps off though
+ WRITE_PAUSE
+
+ /* Our %wim is one behind CWP */
+ mov 2, %g1
+ wr %g1, 0x0, %wim
+ WRITE_PAUSE
+
+ /* This identifies "this cpu". */
+ wr %g3, 0x0, %tbr
+ WRITE_PAUSE
+
+ /* Give ourselves a stack. */
+ set 0x2000, %g5
+ add %g2, %g5, %g2 ! end of stack
+ sub %g2, REGWIN_SZ, %sp
+ mov 0, %fp
+
+ /* Set up curptr. */
+ set C_LABEL(init_task), %g6
+
+ /* Turn on traps (PSR_ET). */
+ rd %psr, %g1
+ wr %g1, PSR_ET, %psr ! traps on
+ WRITE_PAUSE
+
+ /* Init our caches, etc. */
+ set C_LABEL(poke_srmmu), %g5
+ ld [%g5], %g5
+ call %g5
+ nop
+
+ /* Start this processor. */
+ call C_LABEL(smp_callin)
+ nop
+
+ call C_LABEL(cpu_idle)
+ mov 0, %o0
+
+ call C_LABEL(cpu_panic)
+ nop
diff --git a/arch/sparc/kernel/traps.c b/arch/sparc/kernel/traps.c
index 9302191c7..798ca3aca 100644
--- a/arch/sparc/kernel/traps.c
+++ b/arch/sparc/kernel/traps.c
@@ -1,7 +1,7 @@
-/*
+/* $Id: traps.c,v 1.47 1996/10/27 08:36:17 davem Exp $
* arch/sparc/kernel/traps.c
*
- * Copyright 1994 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright 1995 David S. Miller (davem@caip.rutgers.edu)
*/
/*
@@ -10,38 +10,342 @@
#include <linux/sched.h> /* for jiffies */
#include <linux/kernel.h>
+#include <linux/config.h>
+#include <linux/signal.h>
+
+#include <asm/delay.h>
+#include <asm/system.h>
+#include <asm/ptrace.h>
+#include <asm/oplib.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/kdebug.h>
+#include <asm/unistd.h>
+#include <asm/traps.h>
+#include <asm/smp.h>
+
+/* #define TRAP_DEBUG */
+
+struct trap_trace_entry {
+ unsigned long pc;
+ unsigned long type;
+};
+
+int trap_curbuf = 0;
+struct trap_trace_entry trapbuf[1024];
-void do_hw_interrupt(unsigned long type, unsigned long vector)
+void syscall_trace_entry(struct pt_regs *regs)
{
- if (vector == 14) {
- jiffies++;
- return;
- }
+ printk("%s[%d]: ", current->comm, current->pid);
+ printk("scall<%d> (could be %d)\n", (int) regs->u_regs[UREG_G1],
+ (int) regs->u_regs[UREG_I0]);
+}
- /* Just print garbage for everything else for now. */
+void syscall_trace_exit(struct pt_regs *regs)
+{
+}
- printk("Unimplemented Sparc TRAP, vector = %lx type = %lx\n", vector, type);
+void sun4m_nmi(struct pt_regs *regs)
+{
+ unsigned long afsr, afar;
- return;
+ printk("Aieee: sun4m NMI received!\n");
+ /* XXX HyperSparc hack XXX */
+ __asm__ __volatile__("mov 0x500, %%g1\n\t"
+ "lda [%%g1] 0x4, %0\n\t"
+ "mov 0x600, %%g1\n\t"
+ "lda [%%g1] 0x4, %1\n\t" :
+ "=r" (afsr), "=r" (afar));
+ printk("afsr=%08lx afar=%08lx\n", afsr, afar);
+ printk("you lose buddy boy...\n");
+ show_regs(regs);
+ prom_halt();
}
-extern unsigned long *trapbase;
+void instruction_dump (unsigned long *pc)
+{
+ int i;
+
+ if((((unsigned long) pc) & 3))
+ return;
-void trap_init(void)
+ for(i = -3; i < 6; i++)
+ printk("%c%08lx%c",i?' ':'<',pc[i],i?' ':'>');
+ printk("\n");
+}
+
+void die_if_kernel(char *str, struct pt_regs *regs)
+{
+ /* Amuse the user. */
+ printk(
+" \\|/ ____ \\|/\n"
+" \"@'/ ,. \\`@\"\n"
+" /_| \\__/ |_\\\n"
+" \\__U_/\n");
+
+ printk("%s(%d): %s\n", current->comm, current->pid, str);
+ show_regs(regs);
+#if CONFIG_AP1000
+ ap_panic();
+#endif
+ printk("Instruction DUMP:");
+ instruction_dump ((unsigned long *) regs->pc);
+ if(regs->psr & PSR_PS)
+ do_exit(SIGKILL);
+ do_exit(SIGSEGV);
+}
+
+void do_hw_interrupt(unsigned long type, unsigned long psr, unsigned long pc)
+{
+ if(type < 0x80) {
+ /* Sun OS's puke from bad traps, Linux survives! */
+ printk("Unimplemented Sparc TRAP, type = %02lx\n", type);
+ die_if_kernel("Whee... Hello Mr. Penguin", current->tss.kregs);
+ }
+
+ if(type == SP_TRAP_SBPT) {
+ send_sig(SIGTRAP, current, 1);
+ return;
+ }
+
+ if(psr & PSR_PS)
+ die_if_kernel("Kernel bad trap", current->tss.kregs);
+
+ current->tss.sig_desc = SUBSIG_BADTRAP(type - 0x80);
+ current->tss.sig_address = pc;
+ send_sig(SIGILL, current, 1);
+}
+
+void do_illegal_instruction(struct pt_regs *regs, unsigned long pc, unsigned long npc,
+ unsigned long psr)
+{
+ if(psr & PSR_PS)
+ die_if_kernel("Kernel illegal instruction", regs);
+#ifdef TRAP_DEBUG
+ printk("Ill instr. at pc=%08lx instruction is %08lx\n",
+ regs->pc, *(unsigned long *)regs->pc);
+#endif
+ current->tss.sig_address = pc;
+ current->tss.sig_desc = SUBSIG_ILLINST;
+ send_sig(SIGILL, current, 1);
+}
+
+void do_priv_instruction(struct pt_regs *regs, unsigned long pc, unsigned long npc,
+ unsigned long psr)
+{
+ if(psr & PSR_PS)
+ die_if_kernel("Penguin instruction from Penguin mode??!?!", regs);
+ current->tss.sig_address = pc;
+ current->tss.sig_desc = SUBSIG_PRIVINST;
+ send_sig(SIGILL, current, 1);
+}
+
+/* XXX User may want to be allowed to do this. XXX */
+
+void do_memaccess_unaligned(struct pt_regs *regs, unsigned long pc, unsigned long npc,
+ unsigned long psr)
+{
+ if(regs->psr & PSR_PS) {
+ printk("KERNEL MNA at pc %08lx npc %08lx called by %08lx\n", pc, npc,
+ regs->u_regs[UREG_RETPC]);
+ die_if_kernel("BOGUS", regs);
+ /* die_if_kernel("Kernel MNA access", regs); */
+ }
+ current->tss.sig_address = pc;
+ current->tss.sig_desc = SUBSIG_PRIVINST;
+#if 0
+ show_regs (regs);
+ instruction_dump ((unsigned long *) regs->pc);
+ printk ("do_MNA!\n");
+#endif
+ send_sig(SIGBUS, current, 1);
+}
+
+extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
+ void *fpqueue, unsigned long *fpqdepth);
+extern void fpload(unsigned long *fpregs, unsigned long *fsr);
+
+static unsigned long init_fsr = 0x0UL;
+static unsigned long init_fregs[32] __attribute__ ((aligned (8))) =
+ { ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL,
+ ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL,
+ ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL,
+ ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL, ~0UL };
+
+void do_fpd_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc,
+ unsigned long psr)
{
+ /* Sanity check... */
+ if(psr & PSR_PS)
+ die_if_kernel("Kernel gets FloatingPenguinUnit disabled trap", regs);
- /* load up the trap table */
+ put_psr(get_psr() | PSR_EF); /* Allow FPU ops. */
+ regs->psr |= PSR_EF;
+#ifndef __SMP__
+ if(last_task_used_math == current)
+ return;
+ if(last_task_used_math) {
+ /* Other processes fpu state, save away */
+ struct task_struct *fptask = last_task_used_math;
+ fpsave(&fptask->tss.float_regs[0], &fptask->tss.fsr,
+ &fptask->tss.fpqueue[0], &fptask->tss.fpqdepth);
+ }
+ last_task_used_math = current;
+ if(current->used_math) {
+ fpload(&current->tss.float_regs[0], &current->tss.fsr);
+ } else {
+ /* Set initial sane state. */
+ fpload(&init_fregs[0], &init_fsr);
+ current->used_math = 1;
+ }
+#else
+ if(!current->used_math) {
+ fpload(&init_fregs[0], &init_fsr);
+ current->used_math = 1;
+ } else {
+ fpload(&current->tss.float_regs[0], &current->tss.fsr);
+ }
+ current->flags |= PF_USEDFPU;
+#endif
+}
+
+static unsigned long fake_regs[32] __attribute__ ((aligned (8)));
+static unsigned long fake_fsr;
+static unsigned long fake_queue[32] __attribute__ ((aligned (8)));
+static unsigned long fake_depth;
+
+void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc,
+ unsigned long psr)
+{
+ static calls = 0;
+#ifndef __SMP__
+ struct task_struct *fpt = last_task_used_math;
+#else
+ struct task_struct *fpt = current;
+#endif
+
+ put_psr(get_psr() | PSR_EF);
+ /* If nobody owns the fpu right now, just clear the
+ * error into our fake static buffer and hope it don't
+ * happen again. Thank you crashme...
+ */
+#ifndef __SMP__
+ if(!fpt) {
+#else
+ if(!(fpt->flags & PF_USEDFPU)) {
+#endif
+ fpsave(&fake_regs[0], &fake_fsr, &fake_queue[0], &fake_depth);
+ regs->psr &= ~PSR_EF;
+ return;
+ }
+ fpsave(&fpt->tss.float_regs[0], &fpt->tss.fsr,
+ &fpt->tss.fpqueue[0], &fpt->tss.fpqdepth);
+ fpt->tss.sig_address = pc;
+ fpt->tss.sig_desc = SUBSIG_FPERROR; /* as good as any */
+#ifdef __SMP__
+ fpt->flags &= ~PF_USEDFPU;
+#endif
+ if(psr & PSR_PS) {
+ /* The first fsr store/load we tried trapped,
+ * the second one will not (we hope).
+ */
+ printk("WARNING: FPU exception from kernel mode. at pc=%08lx\n",
+ regs->pc);
+ regs->pc = regs->npc;
+ regs->npc += 4;
+ calls++;
+ if(calls > 2)
+ die_if_kernel("Too many Penguin-FPU traps from kernel mode",
+ regs);
+ return;
+ }
+ send_sig(SIGFPE, fpt, 1);
+#ifndef __SMP__
+ last_task_used_math = NULL;
+#endif
+ regs->psr &= ~PSR_EF;
+ if(calls > 0)
+ calls=0;
+}
+
+void handle_tag_overflow(struct pt_regs *regs, unsigned long pc, unsigned long npc,
+ unsigned long psr)
+{
+ if(psr & PSR_PS)
+ die_if_kernel("Penguin overflow trap from kernel mode", regs);
+ current->tss.sig_address = pc;
+ current->tss.sig_desc = SUBSIG_TAG; /* as good as any */
+ send_sig(SIGEMT, current, 1);
+}
+
+void handle_watchpoint(struct pt_regs *regs, unsigned long pc, unsigned long npc,
+ unsigned long psr)
+{
+#ifdef TRAP_DEBUG
+ printk("Watchpoint detected at PC %08lx NPC %08lx PSR %08lx\n",
+ pc, npc, psr);
+#endif
+ if(psr & PSR_PS)
+ panic("Tell me what a watchpoint trap is, and I'll then deal "
+ "with such a beast...");
+}
+
+void handle_reg_access(struct pt_regs *regs, unsigned long pc, unsigned long npc,
+ unsigned long psr)
+{
+#ifdef TRAP_DEBUG
+ printk("Register Access Exception at PC %08lx NPC %08lx PSR %08lx\n",
+ pc, npc, psr);
+#endif
+ send_sig(SIGILL, current, 1);
+}
+
+void handle_cp_disabled(struct pt_regs *regs, unsigned long pc, unsigned long npc,
+ unsigned long psr)
+{
+ send_sig(SIGILL, current, 1);
+}
-#if 0 /* not yet */
- __asm__("wr %0, 0x0, %%tbr\n\t"
- "nop; nop; nop\n\t" : :
- "r" (trapbase));
+void handle_bad_flush(struct pt_regs *regs, unsigned long pc, unsigned long npc,
+ unsigned long psr)
+{
+#ifdef TRAP_DEBUG
+ printk("Unimplemented FLUSH Exception at PC %08lx NPC %08lx PSR %08lx\n",
+ pc, npc, psr);
#endif
+ printk("INSTRUCTION=%08lx\n", *((unsigned long *) regs->pc));
+ send_sig(SIGILL, current, 1);
+}
- return;
+void handle_cp_exception(struct pt_regs *regs, unsigned long pc, unsigned long npc,
+ unsigned long psr)
+{
+#ifdef TRAP_DEBUG
+ printk("Co-Processor Exception at PC %08lx NPC %08lx PSR %08lx\n",
+ pc, npc, psr);
+#endif
+ send_sig(SIGILL, current, 1);
}
-void die_if_kernel(char * str, struct pt_regs * regs, long err)
+void handle_hw_divzero(struct pt_regs *regs, unsigned long pc, unsigned long npc,
+ unsigned long psr)
+{
+ send_sig(SIGILL, current, 1);
+}
+
+/* Since we have our mappings set up, on multiprocessors we can spin them
+ * up here so that timer interrupts work during initialization.
+ */
+
+extern void sparc_cpu_startup(void);
+
+extern int linux_num_cpus;
+extern ctxd_t *srmmu_ctx_table_phys;
+
+int linux_smp_still_initting;
+unsigned int thiscpus_tbr;
+int thiscpus_mid;
+
+void trap_init(void)
{
- return;
}
diff --git a/arch/sparc/kernel/unaligned.c b/arch/sparc/kernel/unaligned.c
new file mode 100644
index 000000000..adfa0d6a3
--- /dev/null
+++ b/arch/sparc/kernel/unaligned.c
@@ -0,0 +1,395 @@
+/* $Id: unaligned.c,v 1.10 1996/11/10 21:25:47 davem Exp $
+ * unaligned.c: Unaligned load/store trap handling with special
+ * cases for the kernel to do them more quickly.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+/* #define DEBUG_MNA */
+
+extern void die_if_kernel(char *, struct pt_regs *);
+
+enum direction {
+ load, /* ld, ldd, ldh, ldsh */
+ store, /* st, std, sth, stsh */
+ both, /* Swap, ldstub, etc. */
+ fpload,
+ fpstore,
+ invalid,
+};
+
+#ifdef DEBUG_MNA
+static char *dirstrings[] = {
+ "load", "store", "both", "fpload", "fpstore", "invalid"
+};
+#endif
+
+static inline enum direction decode_direction(unsigned int insn)
+{
+ unsigned long tmp = (insn >> 21) & 1;
+
+ if(!tmp)
+ return load;
+ else {
+ if(((insn>>19)&0x3f) == 15)
+ return both;
+ else
+ return store;
+ }
+}
+
+/* 8 = double-word, 4 = word, 2 = half-word */
+static inline int decode_access_size(unsigned int insn)
+{
+ insn = (insn >> 19) & 3;
+
+ if(!insn)
+ return 4;
+ else if(insn == 3)
+ return 8;
+ else if(insn == 2)
+ return 2;
+ else {
+ printk("Impossible unaligned trap. insn=%08x\n", insn);
+ die_if_kernel("Byte sized unaligned access?!?!", current->tss.kregs);
+ return 4; /* just to keep gcc happy. */
+ }
+}
+
+/* 1 = signed, 0 = unsigned */
+static inline int decode_signedness(unsigned int insn)
+{
+ return (insn >> 22) & 1;
+}
+
+static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
+ unsigned int rd)
+{
+ int yep;
+
+ if(rs2 >= 16 || rs1 >= 16 || rd >= 16)
+ yep = 1;
+ else
+ yep = 0;
+ if(yep) {
+ /* Wheee... */
+ __asm__ __volatile__("save %sp, -0x40, %sp\n\t"
+ "save %sp, -0x40, %sp\n\t"
+ "save %sp, -0x40, %sp\n\t"
+ "save %sp, -0x40, %sp\n\t"
+ "save %sp, -0x40, %sp\n\t"
+ "save %sp, -0x40, %sp\n\t"
+ "save %sp, -0x40, %sp\n\t"
+ "restore; restore; restore; restore;\n\t"
+ "restore; restore; restore;\n\t");
+ }
+}
+
+static inline int sign_extend_halfword(int hword)
+{
+ return hword << 16 >> 16;
+}
+
+static inline int sign_extend_imm13(int imm)
+{
+ return imm << 19 >> 19;
+}
+
+static inline unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
+{
+ struct reg_window *win;
+
+ if(reg < 16)
+ return (!reg ? 0 : regs->u_regs[reg]);
+
+ /* Ho hum, the slightly complicated case. */
+ win = (struct reg_window *) regs->u_regs[UREG_FP];
+ return win->locals[reg - 16]; /* yes, I know what this does... */
+}
+
+static inline unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
+{
+ struct reg_window *win;
+
+ if(reg < 16)
+ return &regs->u_regs[reg];
+ win = (struct reg_window *) regs->u_regs[UREG_FP];
+ return &win->locals[reg - 16];
+}
+
+static inline unsigned long compute_effective_address(struct pt_regs *regs,
+ unsigned int insn)
+{
+ unsigned int rs1 = (insn >> 14) & 0x1f;
+ unsigned int rs2 = insn & 0x1f;
+ unsigned int rd = (insn >> 25) & 0x1f;
+ unsigned int imm13 = (insn & 0x1fff);
+
+ if(insn & 0x2000) {
+ maybe_flush_windows(rs1, 0, rd);
+ return (fetch_reg(rs1, regs) + sign_extend_imm13(imm13));
+ } else {
+ maybe_flush_windows(rs1, rs2, rd);
+ return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
+ }
+}
+
+static inline void do_integer_load(unsigned long *dest_reg, int size,
+ unsigned long *saddr, int is_signed)
+{
+ unsigned char bytes[4];
+
+ switch(size) {
+ case 2:
+ bytes[0] = *((unsigned char *)saddr + 1);
+ bytes[1] = *((unsigned char *)saddr + 0);
+ *dest_reg = (bytes[0] | (bytes[1] << 8));
+ if(is_signed)
+ *dest_reg = sign_extend_halfword(*dest_reg);
+ break;
+
+ case 4:
+ bytes[0] = *((unsigned char *)saddr + 3);
+ bytes[1] = *((unsigned char *)saddr + 2);
+ bytes[2] = *((unsigned char *)saddr + 1);
+ bytes[3] = *((unsigned char *)saddr + 0);
+ *dest_reg = (bytes[0] | (bytes[1] << 8) |
+ (bytes[2] << 16) | (bytes[3] << 24));
+ break;
+
+ case 8:
+ bytes[0] = *((unsigned char *)saddr + 3);
+ bytes[1] = *((unsigned char *)saddr + 2);
+ bytes[2] = *((unsigned char *)saddr + 1);
+ bytes[3] = *((unsigned char *)saddr + 0);
+ *dest_reg++ = (bytes[0] | (bytes[1] << 8) |
+ (bytes[2] << 16) | (bytes[3] << 24));
+ saddr++;
+ bytes[0] = *((unsigned char *)saddr + 3);
+ bytes[1] = *((unsigned char *)saddr + 2);
+ bytes[2] = *((unsigned char *)saddr + 1);
+ bytes[3] = *((unsigned char *)saddr + 0);
+ *dest_reg = (bytes[0] | (bytes[1] << 8) |
+ (bytes[2] << 16) | (bytes[3] << 24));
+ break;
+
+ default:
+ panic("Impossible unaligned load.");
+ };
+}
+
+static inline void store_common(unsigned long *src_val,
+ int size, unsigned long *dst_addr)
+{
+ unsigned char *daddr = (unsigned char *) dst_addr;
+ switch(size) {
+ case 2:
+ daddr[0] = ((*src_val) >> 8) & 0xff;
+ daddr[1] = (*src_val & 0xff);
+ break;
+
+ case 4:
+ daddr[0] = ((*src_val) >> 24) & 0xff;
+ daddr[1] = ((*src_val) >> 16) & 0xff;
+ daddr[2] = ((*src_val) >> 8) & 0xff;
+ daddr[3] = (*src_val & 0xff);
+ break;
+
+ case 8:
+ daddr[0] = ((*src_val) >> 24) & 0xff;
+ daddr[1] = ((*src_val) >> 16) & 0xff;
+ daddr[2] = ((*src_val) >> 8) & 0xff;
+ daddr[3] = (*src_val & 0xff);
+ daddr += 4;
+ src_val++;
+ daddr[0] = ((*src_val) >> 24) & 0xff;
+ daddr[1] = ((*src_val) >> 16) & 0xff;
+ daddr[2] = ((*src_val) >> 8) & 0xff;
+ daddr[3] = (*src_val & 0xff);
+ break;
+
+ default:
+ panic("Impossible unaligned store.");
+ }
+}
+
+static inline void do_integer_store(int reg_num, int size,
+ unsigned long *dst_addr,
+ struct pt_regs *regs)
+{
+ unsigned long *src_val;
+ static unsigned long zero[2] = { 0, 0 };
+
+ if(reg_num)
+ src_val = fetch_reg_addr(reg_num, regs);
+ else
+ src_val = &zero[0];
+ store_common(src_val, size, dst_addr);
+}
+
+static inline void do_atomic(unsigned long *srcdest_reg, unsigned long *mem)
+{
+ unsigned long flags, tmp;
+
+#ifdef __SMP__
+ /* XXX Need to capture/release other cpu's around this. */
+#endif
+ save_and_cli(flags);
+ tmp = *srcdest_reg;
+ do_integer_load(srcdest_reg, 4, mem, 0);
+ store_common(&tmp, 4, mem);
+ restore_flags(flags);
+}
+
+static inline void advance(struct pt_regs *regs)
+{
+ regs->pc = regs->npc;
+ regs->npc += 4;
+}
+
+static inline int floating_point_load_or_store_p(unsigned int insn)
+{
+ return (insn >> 24) & 1;
+}
+
+static inline int ok_for_kernel(unsigned int insn)
+{
+ return !floating_point_load_or_store_p(insn);
+}
+
+asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
+{
+ enum direction dir = decode_direction(insn);
+ int size = decode_access_size(insn);
+
+ if(!ok_for_kernel(insn) || dir == both) {
+ printk("Unsupported unaligned load/store trap for kernel at <%08lx>.\n",
+ regs->pc);
+ panic("Wheee. Kernel does fpu/atomic unaligned load/store.");
+ /* Not reached... */
+ } else {
+ unsigned long addr = compute_effective_address(regs, insn);
+
+#ifdef DEBUG_MNA
+ printk("KMNA: pc=%08lx [dir=%s addr=%08lx size=%d] retpc[%08lx]\n",
+ regs->pc, dirstrings[dir], addr, size, regs->u_regs[UREG_RETPC]);
+#endif
+ switch(dir) {
+ case load:
+ do_integer_load(fetch_reg_addr(((insn>>25)&0x1f), regs),
+ size, (unsigned long *) addr,
+ decode_signedness(insn));
+ break;
+
+ case store:
+ do_integer_store(((insn>>25)&0x1f), size,
+ (unsigned long *) addr, regs);
+ break;
+ case both:
+#if 0 /* unsupported */
+ do_atomic(fetch_reg_addr(((insn>>25)&0x1f), regs),
+ (unsigned long *) addr);
+ break;
+#endif
+ default:
+ panic("Impossible kernel unaligned trap.");
+ /* Not reached... */
+ }
+ advance(regs);
+ }
+}
+
+static inline int ok_for_user(struct pt_regs *regs, unsigned int insn,
+ enum direction dir)
+{
+ unsigned int reg;
+ int retval, check = (dir == load) ? VERIFY_READ : VERIFY_WRITE;
+ int size = ((insn >> 19) & 3) == 3 ? 8 : 4;
+
+ if((regs->pc | regs->npc) & 3)
+ return 0;
+
+ /* Must verify_area() in all the necessary places. */
+#define WINREG_ADDR(regnum) ((void *)(((unsigned long *)regs->u_regs[UREG_FP])+(regnum)))
+ retval = 0;
+ reg = (insn >> 25) & 0x1f;
+ if(reg >= 16) {
+ retval = verify_area(check, WINREG_ADDR(reg - 16), size);
+ if(retval)
+ return retval;
+ }
+ reg = (insn >> 14) & 0x1f;
+ if(reg >= 16) {
+ retval = verify_area(check, WINREG_ADDR(reg - 16), size);
+ if(retval)
+ return retval;
+ }
+ if(!(insn & 0x2000)) {
+ reg = (insn & 0x1f);
+ if(reg >= 16) {
+ retval = verify_area(check, WINREG_ADDR(reg - 16), size);
+ if(retval)
+ return retval;
+ }
+ }
+ return retval;
+#undef WINREG_ADDR
+}
+
+asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
+{
+ enum direction dir;
+
+ if(!(current->tss.flags & SPARC_FLAG_UNALIGNED) ||
+ (((insn >> 30) & 3) != 3))
+ goto kill_user;
+ dir = decode_direction(insn);
+ if(!ok_for_user(regs, insn, dir)) {
+ goto kill_user;
+ } else {
+ int size = decode_access_size(insn);
+ unsigned long addr;
+
+ if(floating_point_load_or_store_p(insn)) {
+ printk("User FPU load/store unaligned unsupported.\n");
+ goto kill_user;
+ }
+
+ addr = compute_effective_address(regs, insn);
+ switch(dir) {
+ case load:
+ do_integer_load(fetch_reg_addr(((insn>>25)&0x1f), regs),
+ size, (unsigned long *) addr,
+ decode_signedness(insn));
+ break;
+
+ case store:
+ do_integer_store(((insn>>25)&0x1f), size,
+ (unsigned long *) addr, regs);
+ break;
+
+ case both:
+ do_atomic(fetch_reg_addr(((insn>>25)&0x1f), regs),
+ (unsigned long *) addr);
+ break;
+
+ default:
+ panic("Impossible user unaligned trap.");
+ }
+ advance(regs);
+ return;
+ }
+
+kill_user:
+ current->tss.sig_address = regs->pc;
+ current->tss.sig_desc = SUBSIG_PRIVINST;
+ send_sig(SIGBUS, current, 1);
+}
diff --git a/arch/sparc/kernel/windows.c b/arch/sparc/kernel/windows.c
new file mode 100644
index 000000000..874d7e0f5
--- /dev/null
+++ b/arch/sparc/kernel/windows.c
@@ -0,0 +1,124 @@
+/* windows.c: Routines to deal with register window management
+ * at the C-code level.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+
+#include <asm/uaccess.h>
+
+/* Do save's until all user register windows are out of the cpu. */
+void flush_user_windows(void)
+{
+ register int ctr asm("g5");
+
+ ctr = 0;
+ __asm__ __volatile__("
+1:
+ ld [%%g6 + %2], %%g4
+ orcc %%g0, %%g4, %%g0
+ add %0, 1, %0
+ bne 1b
+ save %%sp, -64, %%sp
+2:
+ subcc %0, 1, %0
+ bne 2b
+ restore %%g0, %%g0, %%g0"
+ : "=&r" (ctr)
+ : "0" (ctr),
+ "i" ((const unsigned long)(&(((struct task_struct *)0)->tss.uwinmask)))
+ : "g4");
+}
+
+static inline void shift_window_buffer(int first_win, int last_win, struct thread_struct *tp)
+{
+ int i;
+
+ for(i = first_win; i < last_win; i++) {
+ tp->rwbuf_stkptrs[i] = tp->rwbuf_stkptrs[i+1];
+ memcpy(&tp->reg_window[i], &tp->reg_window[i+1], sizeof(struct reg_window));
+ }
+}
+
+/* Place as many of the user's current register windows
+ * on the stack that we can. Even if the %sp is unaligned
+ * we still copy the window there, the only case that we don't
+ * succeed is if the %sp points to a bum mapping altogether.
+ * setup_frame() and do_sigreturn() use this before shifting
+ * the user stack around. Future instruction and hardware
+ * bug workaround routines will need this functionality as
+ * well.
+ */
+void synchronize_user_stack(void)
+{
+ struct thread_struct *tp;
+ int window;
+
+ flush_user_windows();
+ tp = &current->tss;
+ if(!tp->w_saved)
+ return;
+
+ /* Ok, there is some dirty work to do. */
+ for(window = tp->w_saved - 1; window >= 0; window--) {
+ unsigned long sp = tp->rwbuf_stkptrs[window];
+
+ /* Ok, let it rip. */
+ if(copy_to_user((char *) sp, &tp->reg_window[window],
+ sizeof(struct reg_window)))
+ continue;
+
+ shift_window_buffer(window, tp->w_saved - 1, tp);
+ tp->w_saved--;
+ }
+}
+
+#if 0
+/* An optimization. */
+static inline void copy_aligned_window(void *dest, const void *src)
+{
+ __asm__ __volatile__("ldd [%1], %%g2\n\t"
+ "ldd [%1 + 0x8], %%g4\n\t"
+ "std %%g2, [%0]\n\t"
+ "std %%g4, [%0 + 0x8]\n\t"
+ "ldd [%1 + 0x10], %%g2\n\t"
+ "ldd [%1 + 0x18], %%g4\n\t"
+ "std %%g2, [%0 + 0x10]\n\t"
+ "std %%g4, [%0 + 0x18]\n\t"
+ "ldd [%1 + 0x20], %%g2\n\t"
+ "ldd [%1 + 0x28], %%g4\n\t"
+ "std %%g2, [%0 + 0x20]\n\t"
+ "std %%g4, [%0 + 0x28]\n\t"
+ "ldd [%1 + 0x30], %%g2\n\t"
+ "ldd [%1 + 0x38], %%g4\n\t"
+ "std %%g2, [%0 + 0x30]\n\t"
+ "std %%g4, [%0 + 0x38]\n\t" : :
+ "r" (dest), "r" (src) :
+ "g2", "g3", "g4", "g5");
+}
+#endif
+
+/* Try to push the windows in a threads window buffer to the
+ * user stack. Unaligned %sp's are not allowed here.
+ */
+
+void try_to_clear_window_buffer(struct pt_regs *regs, int who)
+{
+ struct thread_struct *tp;
+ int window;
+
+ flush_user_windows();
+ tp = &current->tss;
+ for(window = 0; window < tp->w_saved; window++) {
+ unsigned long sp = tp->rwbuf_stkptrs[window];
+
+ if((sp & 7) ||
+ copy_to_user((char *) sp, &tp->reg_window[window], REGWIN_SZ))
+ do_exit(SIGILL);
+ }
+ tp->w_saved = 0;
+}
diff --git a/arch/sparc/kernel/wof.S b/arch/sparc/kernel/wof.S
new file mode 100644
index 000000000..564c27973
--- /dev/null
+++ b/arch/sparc/kernel/wof.S
@@ -0,0 +1,421 @@
+/* $Id: wof.S,v 1.29 1996/10/11 01:00:04 davem Exp $
+ * wof.S: Sparc window overflow handler.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/cprefix.h>
+#include <asm/contregs.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+#include <asm/psr.h>
+#include <asm/smp.h>
+#include <asm/asi.h>
+#include <asm/winmacro.h>
+#include <asm/asmmacro.h>
+
+/* WARNING: This routine is hairy and _very_ complicated, but it
+ * must be as fast as possible as it handles the allocation
+ * of register windows to the user and kernel. If you touch
+ * this code be _very_ careful as many other pieces of the
+ * kernel depend upon how this code behaves. You have been
+ * duly warned...
+ */
+
+/* We define macro's for registers which have a fixed
+ * meaning throughout this entire routine. The 'T' in
+ * the comments mean that the register can only be
+ * accessed when in the 'trap' window, 'G' means
+ * accessible in any window. Do not change these registers
+ * after they have been set, until you are ready to return
+ * from the trap.
+ */
+#define t_psr l0 /* %psr at trap time T */
+#define t_pc l1 /* PC for trap return T */
+#define t_npc l2 /* NPC for trap return T */
+#define t_wim l3 /* %wim at trap time T */
+#define saved_g5 l5 /* Global save register T */
+#define saved_g6 l6 /* Global save register T */
+#define curptr g6 /* Gets set to 'current' then stays G */
+
+/* Now registers whose values can change within the handler. */
+#define twin_tmp l4 /* Temp reg, only usable in trap window T */
+#define glob_tmp g5 /* Global temporary reg, usable anywhere G */
+
+ .text
+ .align 4
+ /* BEGINNING OF PATCH INSTRUCTIONS */
+ /* On a 7-window Sparc the boot code patches spnwin_*
+ * instructions with the following ones.
+ */
+ .globl spnwin_patch1_7win, spnwin_patch2_7win, spnwin_patch3_7win
+spnwin_patch1_7win: sll %t_wim, 6, %glob_tmp
+spnwin_patch2_7win: and %glob_tmp, 0x7f, %glob_tmp
+spnwin_patch3_7win: and %twin_tmp, 0x7f, %twin_tmp
+ /* END OF PATCH INSTRUCTIONS */
+
+ /* The trap entry point has done the following:
+ *
+ * rd %psr, %l0
+ * rd %wim, %l3
+ * b spill_window_entry
+ * andcc %l0, PSR_PS, %g0
+ */
+
+ /* Datum current->tss.uwinmask contains at all times a bitmask
+ * where if any user windows are active, at least one bit will
+ * be set in to mask. If no user windows are active, the bitmask
+ * will be all zeroes.
+ */
+ .globl spill_window_entry
+ .globl spnwin_patch1, spnwin_patch2, spnwin_patch3
+spill_window_entry:
+ /* LOCATION: Trap Window */
+
+ mov %g5, %saved_g5 ! save away global temp register
+ mov %g6, %saved_g6 ! save away 'current' ptr register
+
+ /* Compute what the new %wim will be if we save the
+ * window properly in this trap handler.
+ *
+ * newwim = ((%wim>>1) | (%wim<<(nwindows - 1)));
+ */
+ srl %t_wim, 0x1, %twin_tmp
+spnwin_patch1: sll %t_wim, 7, %glob_tmp
+ or %glob_tmp, %twin_tmp, %glob_tmp
+spnwin_patch2: and %glob_tmp, 0xff, %glob_tmp
+
+ /* The trap entry point has set the condition codes
+ * up for us to see if this is from user or kernel.
+ * Get the load of 'curptr' out of the way.
+ */
+ LOAD_CURRENT(curptr, twin_tmp)
+
+ andcc %t_psr, PSR_PS, %g0
+ be,a spwin_fromuser ! all user wins, branch
+ save %g0, %g0, %g0 ! Go where saving will occur
+
+ /* See if any user windows are active in the set. */
+ ld [%curptr + THREAD_UMASK], %twin_tmp ! grab win mask
+ orcc %g0, %twin_tmp, %g0 ! check for set bits
+ bne spwin_exist_uwins ! yep, there are some
+ andn %twin_tmp, %glob_tmp, %twin_tmp ! compute new umask
+
+ /* Save into the window which must be saved and do it.
+ * Basically if we are here, this means that we trapped
+ * from kernel mode with only kernel windows in the register
+ * file.
+ */
+ save %g0, %g0, %g0 ! save into the window to stash away
+ wr %glob_tmp, 0x0, %wim ! set new %wim, this is safe now
+
+spwin_no_userwins_from_kernel:
+ /* LOCATION: Window to be saved */
+
+ STORE_WINDOW(sp) ! stash the window
+ restore %g0, %g0, %g0 ! go back into trap window
+
+ /* LOCATION: Trap window */
+ mov %saved_g5, %g5 ! restore %glob_tmp
+ mov %saved_g6, %g6 ! restore %curptr
+ wr %t_psr, 0x0, %psr ! restore condition codes in %psr
+ WRITE_PAUSE ! waste some time
+ jmp %t_pc ! Return from trap
+ rett %t_npc ! we are done
+
+spwin_exist_uwins:
+ /* LOCATION: Trap window */
+
+ /* Wow, user windows have to be dealt with, this is dirty
+ * and messy as all hell. And difficult to follow if you
+ * are approaching the infamous register window trap handling
+ * problem for the first time. DON'T LOOK!
+ *
+ * Note that how the execution path works out, the new %wim
+ * will be left for us in the global temporary register,
+ * %glob_tmp. We cannot set the new %wim first because we
+ * need to save into the appropriate window without inducing
+ * a trap (traps are off, we'd get a watchdog wheee)...
+ * But first, store the new user window mask calculated
+ * above.
+ */
+ st %twin_tmp, [%curptr + THREAD_UMASK]
+ save %g0, %g0, %g0 ! Go to where the saving will occur
+
+spwin_fromuser:
+ /* LOCATION: Window to be saved */
+ wr %glob_tmp, 0x0, %wim ! Now it is safe to set new %wim
+
+ /* LOCATION: Window to be saved */
+
+ /* This instruction branches to a routine which will check
+ * to validity of the users stack pointer by whatever means
+ * are necessary. This means that this is architecture
+ * specific and thus this branch instruction will need to
+ * be patched at boot time once the machine type is known.
+ * This routine _shall not_ touch %curptr under any
+ * circumstances whatsoever! It will branch back to the
+ * label 'spwin_good_ustack' if the stack is ok but still
+ * needs to be dumped (SRMMU for instance will not need to
+ * do this) or 'spwin_finish_up' if the stack is ok and the
+ * registers have already been saved. If the stack is found
+ * to be bogus for some reason the routine shall branch to
+ * the label 'spwin_user_stack_is_bolixed' which will take
+ * care of things at that point.
+ */
+ .globl C_LABEL(spwin_mmu_patchme)
+C_LABEL(spwin_mmu_patchme): b C_LABEL(spwin_sun4c_stackchk)
+ andcc %sp, 0x7, %g0
+
+spwin_good_ustack:
+ /* LOCATION: Window to be saved */
+
+ /* The users stack is ok and we can safely save it at
+ * %sp.
+ */
+ STORE_WINDOW(sp)
+
+spwin_finish_up:
+ restore %g0, %g0, %g0 /* Back to trap window. */
+
+ /* LOCATION: Trap window */
+
+ /* We have spilled successfully, and we have properly stored
+ * the appropriate window onto the stack.
+ */
+
+ /* Restore saved globals */
+ mov %saved_g5, %g5
+ mov %saved_g6, %g6
+
+ wr %t_psr, 0x0, %psr
+ WRITE_PAUSE
+ jmp %t_pc
+ rett %t_npc
+
+spwin_user_stack_is_bolixed:
+ /* LOCATION: Window to be saved */
+
+ /* Wheee, user has trashed his/her stack. We have to decide
+ * how to proceed based upon whether we came from kernel mode
+ * or not. If we came from kernel mode, toss the window into
+ * a special buffer and proceed, the kernel _needs_ a window
+ * and we could be in an interrupt handler so timing is crucial.
+ * If we came from user land we build a full stack frame and call
+ * c-code to gun down the process.
+ */
+ rd %psr, %glob_tmp
+ andcc %glob_tmp, PSR_PS, %g0
+ bne spwin_bad_ustack_from_kernel
+ nop
+
+ /* Oh well, throw this one window into the per-task window
+ * buffer, the first one.
+ */
+ st %sp, [%curptr + THREAD_STACK_PTRS]
+ STORE_WINDOW(curptr + THREAD_REG_WINDOW)
+ restore %g0, %g0, %g0
+
+ /* LOCATION: Trap Window */
+
+ /* Back in the trap window, update winbuffer save count. */
+ mov 1, %glob_tmp
+ st %glob_tmp, [%curptr + THREAD_W_SAVED]
+
+ /* Compute new user window mask. What we are basically
+ * doing is taking two windows, the invalid one at trap
+ * time and the one we attempted to throw onto the users
+ * stack, and saying that everything else is an ok user
+ * window. umask = ((~(%t_wim | %wim)) & valid_wim_bits)
+ */
+ rd %wim, %twin_tmp
+ or %twin_tmp, %t_wim, %twin_tmp
+ not %twin_tmp
+spnwin_patch3: and %twin_tmp, 0xff, %twin_tmp ! patched on 7win Sparcs
+ st %twin_tmp, [%curptr + THREAD_UMASK]
+
+ /* Jump onto kernel stack for this process... */
+ ld [%curptr + TASK_SAVED_KSTACK], %sp
+
+ /* Restore the saved globals and build a pt_regs frame. */
+ mov %saved_g5, %g5
+ mov %g6, %l4
+ mov %saved_g6, %g6
+ STORE_PT_ALL(sp, t_psr, t_pc, t_npc, g1)
+ mov %l4, %g6
+
+ ENTER_SYSCALL
+
+ /* Turn on traps and call c-code to deal with it. */
+ wr %t_psr, PSR_ET, %psr
+ WRITE_PAUSE
+
+ call C_LABEL(window_overflow_fault)
+ nop
+
+ /* Return from trap if C-code actually fixes things, if it
+ * doesn't then we never get this far as the process will
+ * be given the look of death from Commander Peanut.
+ */
+ b ret_trap_entry
+ clr %l6
+
+spwin_bad_ustack_from_kernel:
+ /* LOCATION: Window to be saved */
+
+ /* The kernel provoked a spill window trap, but the window we
+ * need to save is a user one and the process has trashed its
+ * stack pointer. We need to be quick, so we throw it into
+ * a per-process window buffer until we can properly handle
+ * this later on.
+ */
+ SAVE_BOLIXED_USER_STACK(curptr, glob_tmp)
+ restore %g0, %g0, %g0
+
+ /* LOCATION: Trap window */
+
+ /* Restore globals, condition codes in the %psr and
+ * return from trap. Note, restoring %g6 when returning
+ * to kernel mode is not necessarily these days. ;-)
+ */
+ mov %saved_g5, %g5
+ mov %saved_g6, %g6
+
+ wr %t_psr, 0x0, %psr
+ WRITE_PAUSE
+
+ jmp %t_pc
+ rett %t_npc
+
+/* Undefine the register macros which would only cause trouble
+ * if used below. This helps find 'stupid' coding errors that
+ * produce 'odd' behavior. The routines below are allowed to
+ * make usage of glob_tmp and t_psr so we leave them defined.
+ */
+#undef twin_tmp
+#undef curptr
+#undef t_pc
+#undef t_npc
+#undef t_wim
+#undef saved_g5
+#undef saved_g6
+
+/* Now come the per-architecture window overflow stack checking routines.
+ * As noted above %curptr cannot be touched by this routine at all.
+ */
+
+ .globl C_LABEL(spwin_sun4c_stackchk)
+C_LABEL(spwin_sun4c_stackchk):
+ /* LOCATION: Window to be saved on the stack */
+
+ /* See if the stack is in the address space hole but first,
+ * check results of callers andcc %sp, 0x7, %g0
+ */
+ be 1f
+ sra %sp, 29, %glob_tmp
+
+ b spwin_user_stack_is_bolixed + 0x4
+ rd %psr, %glob_tmp
+
+1:
+ add %glob_tmp, 0x1, %glob_tmp
+ andncc %glob_tmp, 0x1, %g0
+ be 1f
+ and %sp, 0xfff, %glob_tmp ! delay slot
+
+ b spwin_user_stack_is_bolixed + 0x4
+ rd %psr, %glob_tmp
+
+ /* See if our dump area will be on more than one
+ * page.
+ */
+1:
+ add %glob_tmp, 0x38, %glob_tmp
+ andncc %glob_tmp, 0xff8, %g0
+ be spwin_sun4c_onepage ! only one page to check
+ lda [%sp] ASI_PTE, %glob_tmp ! have to check first page anyways
+
+spwin_sun4c_twopages:
+ /* Is first page ok permission wise? */
+ srl %glob_tmp, 29, %glob_tmp
+ cmp %glob_tmp, 0x6
+ be 1f
+ add %sp, 0x38, %glob_tmp /* Is second page in vma hole? */
+
+ b spwin_user_stack_is_bolixed + 0x4
+ rd %psr, %glob_tmp
+
+1:
+ sra %glob_tmp, 29, %glob_tmp
+ add %glob_tmp, 0x1, %glob_tmp
+ andncc %glob_tmp, 0x1, %g0
+ be 1f
+ add %sp, 0x38, %glob_tmp
+
+ b spwin_user_stack_is_bolixed + 0x4
+ rd %psr, %glob_tmp
+
+1:
+ lda [%glob_tmp] ASI_PTE, %glob_tmp
+
+spwin_sun4c_onepage:
+ srl %glob_tmp, 29, %glob_tmp
+ cmp %glob_tmp, 0x6 ! can user write to it?
+ be spwin_good_ustack ! success
+ nop
+
+ b spwin_user_stack_is_bolixed + 0x4
+ rd %psr, %glob_tmp
+
+ /* This is a generic SRMMU routine. As far as I know this
+ * works for all current v8/srmmu implementations, we'll
+ * see...
+ */
+ .globl C_LABEL(spwin_srmmu_stackchk)
+C_LABEL(spwin_srmmu_stackchk):
+ /* LOCATION: Window to be saved on the stack */
+
+ /* Because of SMP concerns and speed we play a trick.
+ * We disable fault traps in the MMU control register,
+ * Execute the stores, then check the fault registers
+ * to see what happens. I can hear Linus now
+ * "disgusting... broken hardware...".
+ *
+ * But first, check to see if the users stack has ended
+ * up in kernel vma, then we would succeed for the 'wrong'
+ * reason... ;( Note that the 'sethi' below assumes the
+ * kernel is page aligned, which should always be the case.
+ */
+ /* Check results of callers andcc %sp, 0x7, %g0 */
+ sethi %hi(C_LABEL(page_offset)), %glob_tmp
+ bne spwin_user_stack_is_bolixed
+ ld [%glob_tmp + %lo(C_LABEL(page_offset))], %glob_tmp
+ cmp %glob_tmp, %sp
+ bleu spwin_user_stack_is_bolixed
+ mov AC_M_SFSR, %glob_tmp
+
+ /* Clear the fault status and turn on the no_fault bit. */
+ lda [%glob_tmp] ASI_M_MMUREGS, %g0 ! eat SFSR
+
+ lda [%g0] ASI_M_MMUREGS, %glob_tmp ! read MMU control
+ or %glob_tmp, 0x2, %glob_tmp ! or in no_fault bit
+ sta %glob_tmp, [%g0] ASI_M_MMUREGS ! set it
+
+ /* Dump the registers and cross fingers. */
+ STORE_WINDOW(sp)
+
+ /* Clear the no_fault bit and check the status. */
+ andn %glob_tmp, 0x2, %glob_tmp
+ sta %glob_tmp, [%g0] ASI_M_MMUREGS
+
+ mov AC_M_SFAR, %glob_tmp
+ lda [%glob_tmp] ASI_M_MMUREGS, %g0
+
+ mov AC_M_SFSR, %glob_tmp
+ lda [%glob_tmp] ASI_M_MMUREGS, %glob_tmp
+ andcc %glob_tmp, 0x2, %g0 ! did we fault?
+ be,a spwin_finish_up + 0x4 ! cool beans, success
+ restore %g0, %g0, %g0
+
+ b spwin_user_stack_is_bolixed + 0x4 ! we faulted, ugh
+ rd %psr, %glob_tmp
diff --git a/arch/sparc/kernel/wuf.S b/arch/sparc/kernel/wuf.S
new file mode 100644
index 000000000..2a999c606
--- /dev/null
+++ b/arch/sparc/kernel/wuf.S
@@ -0,0 +1,351 @@
+/* $Id: wuf.S,v 1.27 1996/10/11 01:00:06 davem Exp $
+ * wuf.S: Window underflow trap handler for the Sparc.
+ *
+ * Copyright (C) 1995 David S. Miller
+ */
+
+#include <asm/cprefix.h>
+#include <asm/contregs.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+#include <asm/psr.h>
+#include <asm/smp.h>
+#include <asm/asi.h>
+#include <asm/winmacro.h>
+#include <asm/asmmacro.h>
+
+/* Just like the overflow handler we define macros for registers
+ * with fixed meanings in this routine.
+ */
+#define t_psr l0
+#define t_pc l1
+#define t_npc l2
+#define t_wim l3
+/* Don't touch the above registers or else you die horribly... */
+
+/* Now macros for the available scratch registers in this routine. */
+#define twin_tmp1 l4
+#define twin_tmp2 l5
+
+#define curptr g6
+
+ .text
+ .align 4
+
+ /* The trap entry point has executed the following:
+ *
+ * rd %psr, %l0
+ * rd %wim, %l3
+ * b fill_window_entry
+ * andcc %l0, PSR_PS, %g0
+ */
+
+ /* Datum current->tss.uwinmask contains at all times a bitmask
+ * where if any user windows are active, at least one bit will
+ * be set in to mask. If no user windows are active, the bitmask
+ * will be all zeroes.
+ */
+
+ /* To get an idea of what has just happened to cause this
+ * trap take a look at this diagram:
+ *
+ * 1 2 3 4 <-- Window number
+ * ----------
+ * T O W I <-- Symbolic name
+ *
+ * O == the window that execution was in when
+ * the restore was attempted
+ *
+ * T == the trap itself has save'd us into this
+ * window
+ *
+ * W == this window is the one which is now invalid
+ * and must be made valid plus loaded from the
+ * stack
+ *
+ * I == this window will be the invalid one when we
+ * are done and return from trap if successful
+ */
+
+ /* BEGINNING OF PATCH INSTRUCTIONS */
+
+ /* On 7-window Sparc the boot code patches fnwin_patch1
+ * with the following instruction.
+ */
+ .globl fnwin_patch1_7win, fnwin_patch2_7win
+fnwin_patch1_7win: srl %t_wim, 6, %twin_tmp2
+fnwin_patch2_7win: and %twin_tmp1, 0x7f, %twin_tmp1
+ /* END OF PATCH INSTRUCTIONS */
+
+ .globl fill_window_entry, fnwin_patch1, fnwin_patch2
+fill_window_entry:
+ /* LOCATION: Window 'T' */
+
+ /* Compute what the new %wim is going to be if we retrieve
+ * the proper window off of the stack.
+ */
+ sll %t_wim, 1, %twin_tmp1
+fnwin_patch1: srl %t_wim, 7, %twin_tmp2
+ or %twin_tmp1, %twin_tmp2, %twin_tmp1
+fnwin_patch2: and %twin_tmp1, 0xff, %twin_tmp1
+
+ wr %twin_tmp1, 0x0, %wim /* Make window 'I' invalid */
+
+ andcc %t_psr, PSR_PS, %g0
+ be fwin_from_user
+ restore %g0, %g0, %g0 /* Restore to window 'O' */
+
+ /* Trapped from kernel, we trust that the kernel does not
+ * 'over restore' sorta speak and just grab the window
+ * from the stack and return. Easy enough.
+ */
+fwin_from_kernel:
+ /* LOCATION: Window 'O' */
+
+ restore %g0, %g0, %g0
+
+ /* LOCATION: Window 'W' */
+
+ LOAD_WINDOW(sp) /* Load it up */
+
+ /* Spin the wheel... */
+ save %g0, %g0, %g0
+ save %g0, %g0, %g0
+ /* I'd like to buy a vowel please... */
+
+ /* LOCATION: Window 'T' */
+
+ /* Now preserve the condition codes in %psr, pause, and
+ * return from trap. This is the simplest case of all.
+ */
+ wr %t_psr, 0x0, %psr
+ WRITE_PAUSE
+
+ jmp %t_pc
+ rett %t_npc
+
+fwin_from_user:
+ /* LOCATION: Window 'O' */
+
+ restore %g0, %g0, %g0 /* Restore to window 'W' */
+
+ /* LOCATION: Window 'W' */
+
+ /* Branch to the architecture specific stack validation
+ * routine. They can be found below...
+ */
+ .globl C_LABEL(fwin_mmu_patchme)
+C_LABEL(fwin_mmu_patchme): b C_LABEL(sun4c_fwin_stackchk)
+ andcc %sp, 0x7, %g0
+
+fwin_user_stack_is_bolixed:
+ /* LOCATION: Window 'W' */
+
+ /* Place a pt_regs frame on the kernel stack, save back
+ * to the trap window and call c-code to deal with this.
+ */
+ LOAD_CURRENT(l4, l5)
+ ld [%l4 + TASK_SAVED_KSTACK], %l5
+
+ /* Store globals into pt_regs frame. */
+ STORE_PT_GLOBALS(l5)
+ STORE_PT_YREG(l5, g3)
+
+ /* Save kernel %sp in global while we change windows. */
+ mov %l5, %g2
+ mov %l4, %curptr
+
+ save %g0, %g0, %g0
+
+ /* LOCATION: Window 'O' */
+
+ rd %psr, %g3 /* Read %psr in live user window */
+ mov %fp, %g4 /* Save bogus frame pointer. */
+
+ save %g0, %g0, %g0
+
+ /* LOCATION: Window 'T' */
+
+ mov %g2, %sp /* Jump onto kernel %sp being held */
+
+ /* Build rest of pt_regs. */
+ STORE_PT_INS(sp)
+ STORE_PT_PRIV(sp, t_psr, t_pc, t_npc)
+
+ /* re-set trap time %wim value */
+ wr %t_wim, 0x0, %wim
+
+ /* Fix users window mask and buffer save count. */
+ mov 0x1, %g5
+ sll %g5, %g3, %g5
+ st %g5, [%curptr + THREAD_UMASK] ! one live user window still
+ st %g0, [%curptr + THREAD_W_SAVED] ! no windows in the buffer
+
+ ENTER_SYSCALL
+
+ wr %t_psr, PSR_ET, %psr ! enable traps
+ WRITE_PAUSE
+
+ call C_LABEL(window_underflow_fault)
+ mov %g4, %o0
+
+ b ret_trap_entry
+ clr %l6
+
+fwin_user_stack_is_ok:
+ /* LOCATION: Window 'W' */
+
+ /* The users stack area is kosher and mapped, load the
+ * window and fall through to the finish up routine.
+ */
+ LOAD_WINDOW(sp)
+
+ /* Round and round she goes... */
+ save %g0, %g0, %g0 /* Save to window 'O' */
+ save %g0, %g0, %g0 /* Save to window 'T' */
+ /* Where she'll trap nobody knows... */
+
+ /* LOCATION: Window 'T' */
+
+fwin_user_finish_up:
+ /* LOCATION: Window 'T' */
+
+ wr %t_psr, 0x0, %psr
+ WRITE_PAUSE
+
+ jmp %t_pc
+ rett %t_npc
+
+ /* Here come the architecture specific checks for stack.
+ * mappings. Note that unlike the window overflow handler
+ * we only need to check whether the user can read from
+ * the appropriate addresses. Also note that we are in
+ * an invalid window which will be loaded, and this means
+ * that until we actually load the window up we are free
+ * to use any of the local registers contained within.
+ *
+ * On success these routine branch to fwin_user_stack_is_ok
+ * if the area at %sp is user readable and the window still
+ * needs to be loaded, else fwin_user_finish_up if the
+ * routine has done the loading itself. On failure (bogus
+ * user stack) the routine shall branch to the label called
+ * fwin_user_stack_is_bolixed.
+ *
+ * Contrary to the arch-specific window overflow stack
+ * check routines in wof.S, these routines are free to use
+ * any of the local registers they want to as this window
+ * does not belong to anyone at this point, however the
+ * outs and ins are still verboten as they are part of
+ * 'someone elses' window possibly.
+ */
+
+ .align 4
+ .globl C_LABEL(sun4c_fwin_stackchk)
+C_LABEL(sun4c_fwin_stackchk):
+ /* LOCATION: Window 'W' */
+
+ /* Caller did 'andcc %sp, 0x7, %g0' */
+ be 1f
+ and %sp, 0xfff, %l0 ! delay slot
+
+ b,a fwin_user_stack_is_bolixed
+
+ /* See if we have to check the sanity of one page or two */
+1:
+ add %l0, 0x38, %l0
+ sra %sp, 29, %l5
+ add %l5, 0x1, %l5
+ andncc %l5, 0x1, %g0
+ be 1f
+ andncc %l0, 0xff8, %g0
+
+ b,a fwin_user_stack_is_bolixed /* %sp is in vma hole, yuck */
+
+1:
+ be sun4c_fwin_onepage /* Only one page to check */
+ lda [%sp] ASI_PTE, %l1
+sun4c_fwin_twopages:
+ add %sp, 0x38, %l0
+ sra %l0, 29, %l5
+ add %l5, 0x1, %l5
+ andncc %l5, 0x1, %g0
+ be 1f
+ lda [%l0] ASI_PTE, %l1
+
+ b,a fwin_user_stack_is_bolixed /* Second page in vma hole */
+
+1:
+ srl %l1, 29, %l1
+ andcc %l1, 0x4, %g0
+ bne sun4c_fwin_onepage
+ lda [%sp] ASI_PTE, %l1
+
+ b,a fwin_user_stack_is_bolixed /* Second page has bad perms */
+
+sun4c_fwin_onepage:
+ srl %l1, 29, %l1
+ andcc %l1, 0x4, %g0
+ bne fwin_user_stack_is_ok
+ nop
+
+ /* A page had bad page permissions, losing... */
+ b,a fwin_user_stack_is_bolixed
+
+ .globl C_LABEL(srmmu_fwin_stackchk)
+C_LABEL(srmmu_fwin_stackchk):
+ /* LOCATION: Window 'W' */
+
+ /* Caller did 'andcc %sp, 0x7, %g0' */
+ sethi %hi(C_LABEL(page_offset)), %l5
+ bne fwin_user_stack_is_bolixed
+ ld [%l5 + %lo(C_LABEL(page_offset))], %l5
+
+ /* Check if the users stack is in kernel vma, then our
+ * trial and error technique below would succeed for
+ * the 'wrong' reason.
+ */
+ mov AC_M_SFSR, %l4
+ cmp %l5, %sp
+ bleu fwin_user_stack_is_bolixed
+ lda [%l4] ASI_M_MMUREGS, %g0 ! clear fault status
+
+ /* The technique is, turn off faults on this processor,
+ * just let the load rip, then check the sfsr to see if
+ * a fault did occur. Then we turn on fault traps again
+ * and branch conditionally based upon what happened.
+ */
+ lda [%g0] ASI_M_MMUREGS, %l5 ! read mmu-ctrl reg
+ or %l5, 0x2, %l5 ! turn on no-fault bit
+ sta %l5, [%g0] ASI_M_MMUREGS ! store it
+
+ /* Cross fingers and go for it. */
+ LOAD_WINDOW(sp)
+
+ /* A penny 'saved'... */
+ save %g0, %g0, %g0
+ save %g0, %g0, %g0
+ /* Is a BADTRAP earned... */
+
+ /* LOCATION: Window 'T' */
+
+ lda [%g0] ASI_M_MMUREGS, %twin_tmp1 ! load mmu-ctrl again
+ andn %twin_tmp1, 0x2, %twin_tmp1 ! clear no-fault bit
+ sta %twin_tmp1, [%g0] ASI_M_MMUREGS ! store it
+
+ mov AC_M_SFAR, %twin_tmp2
+ lda [%twin_tmp2] ASI_M_MMUREGS, %g0 ! read fault address
+
+ mov AC_M_SFSR, %twin_tmp2
+ lda [%twin_tmp2] ASI_M_MMUREGS, %twin_tmp2 ! read fault status
+ andcc %twin_tmp2, 0x2, %g0 ! did fault occur?
+ be,a fwin_user_finish_up + 0x4
+ wr %t_psr, 0x0, %psr
+
+ /* Did I ever tell you about my window lobotomy?
+ * anyways... fwin_user_stack_is_bolixed expects
+ * to be in window 'W' so make it happy or else
+ * we watchdog badly.
+ */
+ restore %g0, %g0, %g0
+ b fwin_user_stack_is_bolixed ! oh well
+ restore %g0, %g0, %g0
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index 1f2ce0e1c..2cb74336f 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -1,22 +1,44 @@
-#
+# $Id: Makefile,v 1.12 1996/10/27 08:36:26 davem Exp $
# Makefile for Sparc library files..
#
CFLAGS := $(CFLAGS) -ansi
-.c.s:
- $(CC) $(CFLAGS) -S $<
-.s.o:
- $(AS) -c -o $*.o $<
-.c.o:
- $(CC) $(CFLAGS) -c $<
-
-OBJS = mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
+OBJS = mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o memcpy.o memset.o \
+ strlen.o checksum.o blockops.o memscan.o memcmp.o strncmp.o \
+ strncpy_from_user.o
lib.a: $(OBJS)
$(AR) rcs lib.a $(OBJS)
sync
+checksum.o: checksum.S
+ $(CC) -ansi -c -o checksum.o checksum.S
+
+memcpy.o: memcpy.S
+ $(CC) -D__ASSEMBLY__ -ansi -c -o memcpy.o memcpy.S
+
+memcmp.o: memcmp.S
+ $(CC) -ansi -c -o memcmp.o memcmp.S
+
+memscan.o: memscan.S
+ $(CC) -ansi -c -o memscan.o memscan.S
+
+strncmp.o: strncmp.S
+ $(CC) -ansi -c -o strncmp.o strncmp.S
+
+strncpy_from_user.o: strncpy_from_user.S
+ $(CC) -D__ASSEMBLY__ -ansi -c -o strncpy_from_user.o strncpy_from_user.S
+
+blockops.o: blockops.S
+ $(CC) -ansi -c -o blockops.o blockops.S
+
+memset.o: memset.S
+ $(CC) -D__ASSEMBLY__ -ansi -c -o memset.o memset.S
+
+strlen.o: strlen.S
+ $(CC) -ansi -c -o strlen.o strlen.S
+
mul.o: mul.S
$(CC) -c -o mul.o mul.S
@@ -40,9 +62,4 @@ ashrdi3.o: ashrdi3.S
dep:
-#
-# include a dependency file if one exists
-#
-ifeq (.depend,$(wildcard .depend))
-include .depend
-endif
+include $(TOPDIR)/Rules.make
diff --git a/arch/sparc/lib/ashrdi3.S b/arch/sparc/lib/ashrdi3.S
index c672d2c9f..bf589c283 100644
--- a/arch/sparc/lib/ashrdi3.S
+++ b/arch/sparc/lib/ashrdi3.S
@@ -1,4 +1,5 @@
-/* ashrdi3.S: The filesystem code creates all kinds of references to
+/* $Id: ashrdi3.S,v 1.3 1996/09/07 23:18:10 davem Exp $
+ * ashrdi3.S: The filesystem code creates all kinds of references to
* this little routine on the sparc with gcc.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -10,19 +11,26 @@
C_LABEL(__ashrdi3):
tst %o2
be 3f
- or %g0, 32, %g2
+ or %g0, 32, %g2
+
sub %g2, %o2, %g2
+
tst %g2
bg 1f
- sra %o0, %o2, %o4
+ sra %o0, %o2, %o4
+
sra %o0, 31, %o4
sub %g0, %g2, %g2
ba 2f
- sra %o0, %g2, %o5
-1: sll %o0, %g2, %g3
+ sra %o0, %g2, %o5
+
+1:
+ sll %o0, %g2, %g3
srl %o1, %o2, %g2
or %g2, %g3, %o5
-2: or %g0, %o4, %o0
+2:
+ or %g0, %o4, %o0
or %g0, %o5, %o1
-3: jmpl %o7 + 8, %g0
- nop
+3:
+ jmpl %o7 + 8, %g0
+ nop
diff --git a/arch/sparc/lib/blockops.S b/arch/sparc/lib/blockops.S
new file mode 100644
index 000000000..f8a9e80df
--- /dev/null
+++ b/arch/sparc/lib/blockops.S
@@ -0,0 +1,103 @@
+/* $Id: blockops.S,v 1.5 1996/09/24 05:22:56 davem Exp $
+ * blockops.S: Common block zero optimized routines.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/cprefix.h>
+
+ /* Zero out 64 bytes of memory at (buf + offset).
+ * Assumes %g1 contains zero.
+ */
+#define BLAST_BLOCK(buf, offset) \
+ std %g0, [buf + offset + 0x38]; \
+ std %g0, [buf + offset + 0x30]; \
+ std %g0, [buf + offset + 0x28]; \
+ std %g0, [buf + offset + 0x20]; \
+ std %g0, [buf + offset + 0x18]; \
+ std %g0, [buf + offset + 0x10]; \
+ std %g0, [buf + offset + 0x08]; \
+ std %g0, [buf + offset + 0x00];
+
+ /* Copy 32 bytes of memory at (src + offset) to
+ * (dst + offset).
+ */
+#define MIRROR_BLOCK(dst, src, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
+ ldd [src + offset + 0x18], t0; \
+ ldd [src + offset + 0x10], t2; \
+ ldd [src + offset + 0x08], t4; \
+ ldd [src + offset + 0x00], t6; \
+ std t0, [dst + offset + 0x18]; \
+ std t2, [dst + offset + 0x10]; \
+ std t4, [dst + offset + 0x08]; \
+ std t6, [dst + offset + 0x00];
+
+ /* Profiling evidence indicates that memset() is
+ * commonly called for blocks of size PAGE_SIZE,
+ * and (2 * PAGE_SIZE) (for kernel stacks)
+ * and with a second arg of zero. We assume in
+ * all of these cases that the buffer is aligned
+ * on at least an 8 byte boundry.
+ *
+ * Therefore we special case them to make them
+ * as fast as possible.
+ */
+
+ .text
+ .align 4
+
+ .globl C_LABEL(bzero_2page), C_LABEL(bzero_1page)
+C_LABEL(bzero_2page):
+ /* %o0 = buf */
+ or %g0, %g0, %g1
+ or %o0, %g0, %o1
+ or %g0, 0x20, %g2
+1:
+ BLAST_BLOCK(%o0, 0x00)
+ BLAST_BLOCK(%o0, 0x40)
+ BLAST_BLOCK(%o0, 0x80)
+ BLAST_BLOCK(%o0, 0xc0)
+ subcc %g2, 1, %g2
+ bne 1b
+ add %o0, 0x100, %o0
+
+ retl
+ mov %o1, %o0
+
+C_LABEL(bzero_1page):
+ /* %o0 = buf */
+ or %g0, %g0, %g1
+ or %o0, %g0, %o1
+ or %g0, 0x10, %g2
+1:
+ BLAST_BLOCK(%o0, 0x00)
+ BLAST_BLOCK(%o0, 0x40)
+ BLAST_BLOCK(%o0, 0x80)
+ BLAST_BLOCK(%o0, 0xc0)
+ subcc %g2, 1, %g2
+ bne 1b
+ add %o0, 0x100, %o0
+
+ retl
+ mov %o1, %o0
+
+ .globl C_LABEL(__copy_1page)
+C_LABEL(__copy_1page):
+ /* %o0 = dst, %o1 = src */
+ or %g0, 0x10, %g1
+1:
+ MIRROR_BLOCK(%o0, %o1, 0x00, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5)
+ MIRROR_BLOCK(%o0, %o1, 0x20, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5)
+ MIRROR_BLOCK(%o0, %o1, 0x40, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5)
+ MIRROR_BLOCK(%o0, %o1, 0x60, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5)
+ MIRROR_BLOCK(%o0, %o1, 0x80, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5)
+ MIRROR_BLOCK(%o0, %o1, 0xa0, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5)
+ MIRROR_BLOCK(%o0, %o1, 0xc0, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5)
+ MIRROR_BLOCK(%o0, %o1, 0xe0, %o2, %o3, %o4, %o5, %g2, %g3, %g4, %g5)
+ subcc %g1, 1, %g1
+ add %o0, 0x100, %o0
+ bne 1b
+ add %o1, 0x100, %o1
+
+ retl
+ nop
diff --git a/arch/sparc/lib/checksum.S b/arch/sparc/lib/checksum.S
new file mode 100644
index 000000000..a71371bf8
--- /dev/null
+++ b/arch/sparc/lib/checksum.S
@@ -0,0 +1,439 @@
+/* checksum.S: Sparc optimized checksum code.
+ *
+ * Copyright(C) 1995 Linus Torvalds
+ * Copyright(C) 1995 Miguel de Icaza
+ * Copyright(C) 1996 David S. Miller
+ *
+ * derived from:
+ * Linux/Alpha checksum c-code
+ * Linux/ix86 inline checksum assembly
+ * RFC1071 Computing the Internet Checksum (esp. Jacobsons m68k code)
+ * David Mosberger-Tang for optimized reference c-code
+ * BSD4.4 portable checksum routine
+ */
+
+#include <asm-sparc/cprefix.h>
+
+#define CSUM_BIGCHUNK(buf, offset, sum, t0, t1, t2, t3, t4, t5) \
+ ldd [buf + offset + 0x00], t0; \
+ ldd [buf + offset + 0x08], t2; \
+ addxcc t0, sum, sum; \
+ addxcc t1, sum, sum; \
+ ldd [buf + offset + 0x10], t4; \
+ addxcc t2, sum, sum; \
+ addxcc t3, sum, sum; \
+ ldd [buf + offset + 0x18], t0; \
+ addxcc t4, sum, sum; \
+ addxcc t5, sum, sum; \
+ addxcc t0, sum, sum; \
+ addxcc t1, sum, sum;
+
+#define CSUM_LASTCHUNK(buf, offset, sum, t0, t1, t2, t3) \
+ ldd [buf - offset - 0x08], t0; \
+ ldd [buf - offset - 0x00], t2; \
+ addxcc t0, sum, sum; \
+ addxcc t1, sum, sum; \
+ addxcc t2, sum, sum; \
+ addxcc t3, sum, sum;
+
+ /* Do end cruft out of band to get better cache patterns. */
+csum_partial_end_cruft:
+ be 1f ! caller asks %o1 & 0x8
+ andcc %o1, 4, %g0 ! nope, check for word remaining
+ ldd [%o0], %g2 ! load two
+ addcc %g2, %o2, %o2 ! add first word to sum
+ addxcc %g3, %o2, %o2 ! add second word as well
+ add %o0, 8, %o0 ! advance buf ptr
+ addx %g0, %o2, %o2 ! add in final carry
+ andcc %o1, 4, %g0 ! check again for word remaining
+1: be 1f ! nope, skip this code
+ andcc %o1, 3, %o1 ! check for trailing bytes
+ ld [%o0], %g2 ! load it
+ addcc %g2, %o2, %o2 ! add to sum
+ add %o0, 4, %o0 ! advance buf ptr
+ addx %g0, %o2, %o2 ! add in final carry
+ andcc %o1, 3, %g0 ! check again for trailing bytes
+1: be 1f ! no trailing bytes, return
+ addcc %o1, -1, %g0 ! only one byte remains?
+ bne 2f ! at least two bytes more
+ subcc %o1, 2, %o1 ! only two bytes more?
+ b 4f ! only one byte remains
+ or %g0, %g0, %o4 ! clear fake hword value
+2: lduh [%o0], %o4 ! get hword
+ be 6f ! jmp if only hword remains
+ add %o0, 2, %o0 ! advance buf ptr either way
+ sll %o4, 16, %o4 ! create upper hword
+4: ldub [%o0], %o5 ! get final byte
+ sll %o5, 8, %o5 ! put into place
+ or %o5, %o4, %o4 ! coalese with hword (if any)
+6: addcc %o4, %o2, %o2 ! add to sum
+1: retl ! get outta here
+ addx %g0, %o2, %o0 ! add final carry into retval
+
+ /* Also do alignment out of band to get better cache patterns. */
+csum_partial_fix_alignment:
+ cmp %o1, 6
+ bl cpte - 0x4
+ andcc %o0, 0x2, %g0
+ be 1f
+ andcc %o0, 0x4, %g0
+ lduh [%o0 + 0x00], %g2
+ sub %o1, 2, %o1
+ add %o0, 2, %o0
+ sll %g2, 16, %g2
+ addcc %g2, %o2, %o2
+ srl %o2, 16, %g3
+ addx %g0, %g3, %g2
+ sll %o2, 16, %o2
+ sll %g2, 16, %g3
+ srl %o2, 16, %o2
+ andcc %o0, 0x4, %g0
+ or %g3, %o2, %o2
+1: be cpa
+ andcc %o1, 0xffffff80, %o3
+ ld [%o0 + 0x00], %g2
+ sub %o1, 4, %o1
+ addcc %g2, %o2, %o2
+ add %o0, 4, %o0
+ addx %g0, %o2, %o2
+ b cpa
+ andcc %o1, 0xffffff80, %o3
+
+ /* The common case is to get called with a nicely aligned
+ * buffer of size 0x20. Follow the code path for that case.
+ */
+ .globl C_LABEL(csum_partial)
+C_LABEL(csum_partial): /* %o0=buf, %o1=len, %o2=sum */
+ andcc %o0, 0x7, %g0 ! alignment problems?
+ bne csum_partial_fix_alignment ! yep, handle it
+ sethi %hi(cpte - 8), %g7 ! prepare table jmp ptr
+ andcc %o1, 0xffffff80, %o3 ! num loop iterations
+cpa: be 3f ! none to do
+ andcc %o1, 0x70, %g1 ! clears carry flag too
+5: CSUM_BIGCHUNK(%o0, 0x00, %o2, %o4, %o5, %g2, %g3, %g4, %g5)
+ CSUM_BIGCHUNK(%o0, 0x20, %o2, %o4, %o5, %g2, %g3, %g4, %g5)
+ CSUM_BIGCHUNK(%o0, 0x40, %o2, %o4, %o5, %g2, %g3, %g4, %g5)
+ CSUM_BIGCHUNK(%o0, 0x60, %o2, %o4, %o5, %g2, %g3, %g4, %g5)
+ addx %g0, %o2, %o2 ! sink in final carry
+ subcc %o3, 128, %o3 ! detract from loop iters
+ bne 5b ! more to do
+ add %o0, 128, %o0 ! advance buf ptr
+ andcc %o1, 0x70, %g1 ! clears carry flag too
+3: be cpte ! nope
+ andcc %o1, 0xf, %g0 ! anything left at all?
+ srl %g1, 1, %o4 ! compute offset
+ sub %g7, %g1, %g7 ! adjust jmp ptr
+ sub %g7, %o4, %g7 ! final jmp ptr adjust
+ jmp %g7 + %lo(cpte - 8) ! enter the table
+ add %o0, %g1, %o0 ! advance buf ptr
+cptbl: CSUM_LASTCHUNK(%o0, 0x68, %o2, %g2, %g3, %g4, %g5)
+ CSUM_LASTCHUNK(%o0, 0x58, %o2, %g2, %g3, %g4, %g5)
+ CSUM_LASTCHUNK(%o0, 0x48, %o2, %g2, %g3, %g4, %g5)
+ CSUM_LASTCHUNK(%o0, 0x38, %o2, %g2, %g3, %g4, %g5)
+ CSUM_LASTCHUNK(%o0, 0x28, %o2, %g2, %g3, %g4, %g5)
+ CSUM_LASTCHUNK(%o0, 0x18, %o2, %g2, %g3, %g4, %g5)
+ CSUM_LASTCHUNK(%o0, 0x08, %o2, %g2, %g3, %g4, %g5)
+ addx %g0, %o2, %o2 ! fetch final carry
+ andcc %o1, 0xf, %g0 ! anything left at all?
+cpte: bne csum_partial_end_cruft ! yep, handle it
+ andcc %o1, 8, %g0 ! check how much
+cpout: retl ! get outta here
+ mov %o2, %o0 ! return computed csum
+
+ /* This aligned version executes typically in 8.5 superscalar cycles, this
+ * is the best I can do. I say 8.5 because the final add will pair with
+ * the next ldd in the main unrolled loop. Thus the pipe is always full.
+ */
+#define CSUMCOPY_BIGCHUNK_ALIGNED(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \
+ ldd [src + off + 0x00], t0; \
+ ldd [src + off + 0x08], t2; \
+ addxcc t0, sum, sum; \
+ ldd [src + off + 0x10], t4; \
+ addxcc t1, sum, sum; \
+ ldd [src + off + 0x18], t6; \
+ addxcc t2, sum, sum; \
+ std t0, [dst + off + 0x00]; \
+ addxcc t3, sum, sum; \
+ std t2, [dst + off + 0x08]; \
+ addxcc t4, sum, sum; \
+ std t4, [dst + off + 0x10]; \
+ addxcc t5, sum, sum; \
+ std t6, [dst + off + 0x18]; \
+ addxcc t6, sum, sum; \
+ addxcc t7, sum, sum;
+
+ /* 12 superscalar cycles seems to be the limit for this case,
+ * because of this we thus do all the ldd's together to get
+ * Viking MXCC into streaming mode. Ho hum...
+ */
+#define CSUMCOPY_BIGCHUNK(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \
+ ldd [src + off + 0x00], t0; \
+ ldd [src + off + 0x08], t2; \
+ ldd [src + off + 0x10], t4; \
+ ldd [src + off + 0x18], t6; \
+ st t0, [dst + off + 0x00]; \
+ addxcc t0, sum, sum; \
+ st t1, [dst + off + 0x04]; \
+ addxcc t1, sum, sum; \
+ st t2, [dst + off + 0x08]; \
+ addxcc t2, sum, sum; \
+ st t3, [dst + off + 0x0c]; \
+ addxcc t3, sum, sum; \
+ st t4, [dst + off + 0x10]; \
+ addxcc t4, sum, sum; \
+ st t5, [dst + off + 0x14]; \
+ addxcc t5, sum, sum; \
+ st t6, [dst + off + 0x18]; \
+ addxcc t6, sum, sum; \
+ st t7, [dst + off + 0x1c]; \
+ addxcc t7, sum, sum;
+
+ /* Yuck, 6 superscalar cycles... */
+#define CSUMCOPY_LASTCHUNK(src, dst, sum, off, t0, t1, t2, t3) \
+ ldd [src - off - 0x08], t0; \
+ ldd [src - off - 0x00], t2; \
+ addxcc t0, sum, sum; \
+ st t0, [dst - off - 0x08]; \
+ addxcc t1, sum, sum; \
+ st t1, [dst - off - 0x04]; \
+ addxcc t2, sum, sum; \
+ st t2, [dst - off - 0x00]; \
+ addxcc t3, sum, sum; \
+ st t3, [dst - off + 0x04];
+
+ /* Handle the end cruft code out of band for better cache patterns. */
+cc_end_cruft:
+ be 1f
+ andcc %o3, 4, %g0
+ ldd [%o0 + 0x00], %g2
+ add %o1, 8, %o1
+ addcc %g2, %g7, %g7
+ add %o0, 8, %o0
+ addxcc %g3, %g7, %g7
+ st %g2, [%o1 - 0x08]
+ addx %g0, %g7, %g7
+ andcc %o3, 4, %g0
+ st %g3, [%o1 - 0x04]
+1: be 1f
+ andcc %o3, 3, %o3
+ ld [%o0 + 0x00], %g2
+ add %o1, 4, %o1
+ addcc %g2, %g7, %g7
+ st %g2, [%o1 - 0x04]
+ addx %g0, %g7, %g7
+ add %o0, 4, %o0
+ andcc %o3, 3, %g0
+1: be 1f
+ addcc %o3, -1, %g0
+ bne 2f
+ subcc %o3, 2, %o3
+ b 4f
+ or %g0, %g0, %o4
+2: lduh [%o0 + 0x00], %o4
+ add %o0, 2, %o0
+ sth %o4, [%o1 + 0x00]
+ be 6f
+ add %o1, 2, %o1
+ sll %o4, 16, %o4
+4: ldub [%o0 + 0x00], %o5
+ stb %o5, [%o1 + 0x00]
+ sll %o5, 8, %o5
+ or %o5, %o4, %o4
+6: addcc %o4, %g7, %g7
+1: retl
+ addx %g0, %g7, %o0
+
+ /* Also, handle the alignment code out of band. */
+cc_dword_align:
+ cmp %g1, 6
+ bl,a ccte
+ andcc %g1, 0xf, %o3
+ andcc %o0, 0x1, %g0
+ bne ccslow
+ andcc %o0, 0x2, %g0
+ be 1f
+ andcc %o0, 0x4, %g0
+ lduh [%o0 + 0x00], %g2
+ sub %g1, 2, %g1
+ sth %g2, [%o1 + 0x00]
+ add %o0, 2, %o0
+ sll %g2, 16, %g2
+ addcc %g2, %g7, %g7
+ add %o1, 2, %o1
+ srl %g7, 16, %g3
+ addx %g0, %g3, %g2
+ sll %g7, 16, %g7
+ sll %g2, 16, %g3
+ srl %g7, 16, %g7
+ andcc %o0, 0x4, %g0
+ or %g3, %g7, %g7
+1: be 3f
+ andcc %g1, 0xffffff80, %g0
+ ld [%o0 + 0x00], %g2
+ sub %g1, 4, %g1
+ st %g2, [%o1 + 0x00]
+ add %o0, 4, %o0
+ addcc %g2, %g7, %g7
+ add %o1, 4, %o1
+ addx %g0, %g7, %g7
+ b 3f
+ andcc %g1, 0xffffff80, %g0
+
+ /* Sun, you just can't beat me, you just can't. Stop trying,
+ * give up. I'm serious, I am going to kick the living shit
+ * out of you, game over, lights out.
+ */
+ .align 8
+ .globl C_LABEL(csum_partial_copy)
+C_LABEL(csum_partial_copy): /* %o0=src, %o1=dest, %o2=len, %o3=sum */
+ xor %o0, %o1, %o4 ! get changing bits
+ mov %o2, %g1 ! free up %o2
+ andcc %o4, 3, %g0 ! check for mismatched alignment
+ bne ccslow ! better this than unaligned/fixups
+ andcc %o0, 7, %g0 ! need to align things?
+ mov %o3, %g7 ! free up %o3
+ bne cc_dword_align ! yes, we check for short lengths there
+ andcc %g1, 0xffffff80, %g0 ! can we use unrolled loop?
+3: be 3f ! nope, less than one loop remains
+ andcc %o1, 4, %g0 ! dest aligned on 4 or 8 byte boundry?
+ be ccdbl + 4 ! 8 byte aligned, kick ass
+5: CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
+ CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
+ CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
+ CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
+ sub %g1, 128, %g1 ! detract from length
+ addx %g0, %g7, %g7 ! add in last carry bit
+ andcc %g1, 0xffffff80, %g0 ! more to csum?
+ add %o0, 128, %o0 ! advance src ptr
+ bne 5b ! we did not go negative, continue looping
+ add %o1, 128, %o1 ! advance dest ptr
+3: andcc %g1, 0x70, %o2 ! can use table?
+ccmerge:be ccte ! nope, go and check for end cruft
+ andcc %g1, 0xf, %o3 ! get low bits of length (clears carry btw)
+ srl %o2, 1, %o4 ! begin negative offset computation
+ sethi %hi(ccte - 8), %o5 ! set up table ptr end
+ add %o0, %o2, %o0 ! advance src ptr
+ sub %o5, %o4, %o5 ! continue table calculation
+ sll %o2, 1, %g2 ! constant multiplies are fun...
+ sub %o5, %g2, %o5 ! some more adjustments
+ jmp %o5 + %lo(ccte - 8) ! jump into it, duff style, wheee...
+ add %o1, %o2, %o1 ! advance dest ptr (carry is clear btw)
+cctbl: CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x68,%g2,%g3,%g4,%g5)
+ CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x58,%g2,%g3,%g4,%g5)
+ CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x48,%g2,%g3,%g4,%g5)
+ CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x38,%g2,%g3,%g4,%g5)
+ CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x28,%g2,%g3,%g4,%g5)
+ CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x18,%g2,%g3,%g4,%g5)
+ CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x08,%g2,%g3,%g4,%g5)
+ addx %g0, %g7, %g7
+ andcc %o3, 0xf, %g0 ! check for low bits set
+ccte: bne cc_end_cruft ! something left, handle it out of band
+ andcc %o3, 8, %g0 ! begin checks for that code
+ retl ! return
+ mov %g7, %o0 ! give em the computed checksum
+ccdbl: CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
+ CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
+ CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
+ CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
+ sub %g1, 128, %g1 ! detract from length
+ addx %g0, %g7, %g7 ! add in last carry bit
+ andcc %g1, 0xffffff80, %g0 ! more to csum?
+ add %o0, 128, %o0 ! advance src ptr
+ bne ccdbl ! we did not go negative, continue looping
+ add %o1, 128, %o1 ! advance dest ptr
+ b ccmerge ! finish it off, above
+ andcc %g1, 0x70, %o2 ! can use table? (clears carry btw)
+
+ccslow:
+ save %sp, -104, %sp
+ mov %i0, %g2
+ mov %g2, %o4
+ orcc %i2, %g0, %o5
+ ble .LL37
+ mov 0, %o3
+ andcc %g2, 1, %g3
+ be .LL50
+ sra %o5, 1, %o1
+ ldub [%g2], %o3
+ add %i2, -1, %o5
+ add %g2, 1, %o4
+ sra %o5, 1, %o1
+.LL50:
+ cmp %o1, 0
+ be .LL39
+ andcc %o4, 2, %g0
+ be,a .LL51
+ sra %o1, 1, %o1
+ add %o1, -1, %o1
+ lduh [%o4], %o0
+ add %o5, -2, %o5
+ add %o3, %o0, %o3
+ add %o4, 2, %o4
+ sra %o1, 1, %o1
+.LL51:
+ cmp %o1, 0
+ be .LL41
+ mov 0, %o2
+.LL42:
+ ld [%o4], %o0
+ add %o3, %o2, %o3
+ add %o3, %o0, %o3
+ cmp %o3, %o0
+ addx %g0, 0, %o2
+ addcc %o1, -1, %o1
+ bne .LL42
+ add %o4, 4, %o4
+ add %o3, %o2, %o3
+ sethi %hi(65535), %o0
+ or %o0, %lo(65535), %o0
+ and %o3, %o0, %o0
+ srl %o3, 16, %o1
+ add %o0, %o1, %o3
+.LL41:
+ andcc %o5, 2, %g0
+ be .LL52
+ andcc %o5, 1, %g0
+ lduh [%o4], %o0
+ add %o3, %o0, %o3
+ add %o4, 2, %o4
+.LL39:
+ andcc %o5, 1, %g0
+.LL52:
+ be .LL53
+ sethi %hi(65535), %o0
+ ldub [%o4], %o0
+ sll %o0, 8, %o0
+ add %o3, %o0, %o3
+ sethi %hi(65535), %o0
+.LL53:
+ or %o0, %lo(65535), %o0
+ and %o3, %o0, %o2
+ srl %o3, 16, %o1
+ add %o2, %o1, %o1
+ and %o1, %o0, %o2
+ srl %o1, 16, %o1
+ add %o2, %o1, %o1
+ and %o1, %o0, %o0
+ srl %o1, 16, %o1
+ add %o0, %o1, %o1
+ sll %o1, 16, %o0
+ cmp %g3, 0
+ be .LL37
+ srl %o0, 16, %o3
+ srl %o0, 24, %o1
+ and %o3, 255, %o0
+ sll %o0, 8, %o0
+ or %o1, %o0, %o3
+.LL37:
+ add %o3, %i3, %o1
+ sethi %hi(65535), %o0
+ or %o0, %lo(65535), %o0
+ and %o1, %o0, %o0
+ srl %o1, 16, %o1
+ add %o0, %o1, %i0
+ mov %i1, %o0
+ mov %g2, %o1
+ call C_LABEL(__memcpy)
+ mov %i2, %o2
+ ret
+ restore
diff --git a/arch/sparc/lib/memcmp.S b/arch/sparc/lib/memcmp.S
new file mode 100644
index 000000000..bf22e492c
--- /dev/null
+++ b/arch/sparc/lib/memcmp.S
@@ -0,0 +1,314 @@
+#include <asm/cprefix.h>
+
+ .text
+ .align 4
+ .global C_LABEL(__memcmp), C_LABEL(memcmp)
+C_LABEL(__memcmp):
+C_LABEL(memcmp):
+#if 1
+ cmp %o2, 0
+ ble L3
+ mov 0, %g3
+L5:
+ ldub [%o0], %g2
+ ldub [%o1], %g3
+ sub %g2, %g3, %g2
+ mov %g2, %g3
+ sll %g2, 24, %g2
+
+ cmp %g2, 0
+ bne L3
+ add %o0, 1, %o0
+
+ add %o2, -1, %o2
+
+ cmp %o2, 0
+ bg L5
+ add %o1, 1, %o1
+L3:
+ sll %g3, 24, %o0
+ sra %o0, 24, %o0
+
+ retl
+ nop
+#else
+ save %sp, -104, %sp
+ mov %i2, %o4
+ mov %i0, %o0
+
+ cmp %o4, 15
+ ble L72
+ mov %i1, %i2
+
+ andcc %i2, 3, %g0
+ be L161
+ andcc %o0, 3, %g2
+L75:
+ ldub [%o0], %g3
+ ldub [%i2], %g2
+ add %o0,1, %o0
+
+ subcc %g3, %g2, %i0
+ bne L156
+ add %i2, 1, %i2
+
+ andcc %i2, 3, %g0
+ bne L75
+ add %o4, -1, %o4
+
+ andcc %o0, 3, %g2
+L161:
+ bne,a L78
+ mov %i2, %i1
+
+ mov %o0, %i5
+ mov %i2, %i3
+ srl %o4, 2, %i4
+
+ cmp %i4, 0
+ bge L93
+ mov %i4, %g2
+
+ add %i4, 3, %g2
+L93:
+ sra %g2, 2, %g2
+ sll %g2, 2, %g2
+ sub %i4, %g2, %g2
+
+ cmp %g2, 1
+ be,a L88
+ add %o0, 4, %i5
+
+ bg L94
+ cmp %g2, 2
+
+ cmp %g2, 0
+ be,a L86
+ ld [%o0], %g3
+
+ b L162
+ ld [%i5], %g3
+L94:
+ be L81
+ cmp %g2, 3
+
+ be,a L83
+ add %o0, -4, %i5
+
+ b L162
+ ld [%i5], %g3
+L81:
+ add %o0, -8, %i5
+ ld [%o0], %g3
+ add %i2, -8, %i3
+ ld [%i2], %g2
+
+ b L82
+ add %i4, 2, %i4
+L83:
+ ld [%o0], %g4
+ add %i2, -4, %i3
+ ld [%i2], %g1
+
+ b L84
+ add %i4, 1, %i4
+L86:
+ b L87
+ ld [%i2], %g2
+L88:
+ add %i2, 4, %i3
+ ld [%o0], %g4
+ add %i4, -1, %i4
+ ld [%i2], %g1
+L95:
+ ld [%i5], %g3
+L162:
+ cmp %g4, %g1
+ be L87
+ ld [%i3], %g2
+
+ cmp %g4, %g1
+L163:
+ bleu L114
+ mov -1, %i0
+
+ b L114
+ mov 1, %i0
+L87:
+ ld [%i5 + 4], %g4
+ cmp %g3, %g2
+ bne L163
+ ld [%i3 + 4], %g1
+L84:
+ ld [%i5 + 8], %g3
+
+ cmp %g4, %g1
+ bne L163
+ ld [%i3 + 8], %g2
+L82:
+ ld [%i5 + 12], %g4
+ cmp %g3, %g2
+ bne L163
+ ld [%i3 + 12], %g1
+
+ add %i5, 16, %i5
+
+ addcc %i4, -4, %i4
+ bne L95
+ add %i3, 16, %i3
+
+ cmp %g4, %g1
+ bne L163
+ nop
+
+ b L114
+ mov 0, %i0
+L78:
+ srl %o4, 2, %i0
+ and %o0, -4, %i3
+ orcc %i0, %g0, %g3
+ sll %g2, 3, %o7
+ mov 32, %g2
+
+ bge L129
+ sub %g2, %o7, %o1
+
+ add %i0, 3, %g3
+L129:
+ sra %g3, 2, %g2
+ sll %g2, 2, %g2
+ sub %i0, %g2, %g2
+
+ cmp %g2, 1
+ be,a L124
+ ld [%i3], %o3
+
+ bg L130
+ cmp %g2, 2
+
+ cmp %g2, 0
+ be,a L122
+ ld [%i3], %o2
+
+ b L164
+ sll %o3, %o7, %g3
+L130:
+ be L117
+ cmp %g2, 3
+
+ be,a L119
+ ld [%i3], %g1
+
+ b L164
+ sll %o3, %o7, %g3
+L117:
+ ld [%i3], %g4
+ add %i2, -8, %i1
+ ld [%i3 + 4], %o3
+ add %i0, 2, %i0
+ ld [%i2], %i4
+
+ b L118
+ add %i3, -4, %i3
+L119:
+ ld [%i3 + 4], %g4
+ add %i2, -4, %i1
+ ld [%i2], %i5
+
+ b L120
+ add %i0, 1, %i0
+L122:
+ ld [%i3 + 4], %g1
+ ld [%i2], %i4
+
+ b L123
+ add %i3, 4, %i3
+L124:
+ add %i2, 4, %i1
+ ld [%i3 + 4], %o2
+ add %i0, -1, %i0
+ ld [%i2], %i5
+ add %i3, 8, %i3
+L131:
+ sll %o3, %o7, %g3
+L164:
+ srl %o2, %o1, %g2
+ ld [%i3], %g1
+ or %g3, %g2, %g3
+
+ cmp %g3, %i5
+ bne L163
+ ld [%i1], %i4
+L123:
+ sll %o2, %o7, %g3
+ srl %g1, %o1, %g2
+ ld [%i3 + 4], %g4
+ or %g3, %g2, %g3
+
+ cmp %g3, %i4
+ bne L163
+ ld [%i1 + 4], %i5
+L120:
+ sll %g1, %o7, %g3
+ srl %g4, %o1, %g2
+ ld [%i3 + 8], %o3
+ or %g3, %g2, %g3
+
+ cmp %g3, %i5
+ bne L163
+ ld [%i1 + 8], %i4
+L118:
+ sll %g4, %o7, %g3
+ srl %o3, %o1, %g2
+ ld [%i3 + 12], %o2
+ or %g3, %g2, %g3
+
+ cmp %g3, %i4
+ bne L163
+ ld [%i1 + 12], %i5
+
+ add %i3, 16, %i3
+ addcc %i0, -4, %i0
+ bne L131
+ add %i1, 16, %i1
+
+ sll %o3, %o7, %g3
+ srl %o2, %o1, %g2
+ or %g3, %g2, %g3
+
+ cmp %g3, %i5
+ be,a L114
+ mov 0, %i0
+
+ b,a L163
+L114:
+ cmp %i0, 0
+ bne L156
+ and %o4, -4, %g2
+
+ add %o0, %g2, %o0
+ add %i2, %g2, %i2
+ and %o4, 3, %o4
+L72:
+ cmp %o4, 0
+ be L156
+ mov 0, %i0
+
+ ldub [%o0], %g3
+L165:
+ ldub [%i2], %g2
+ add %o0, 1, %o0
+
+ subcc %g3, %g2, %i0
+ bne L156
+ add %i2, 1, %i2
+
+ addcc %o4, -1, %o4
+ bne,a L165
+ ldub [%o0], %g3
+
+ mov 0, %i0
+L156:
+ ret
+ restore
+#endif
diff --git a/arch/sparc/lib/memcpy.S b/arch/sparc/lib/memcpy.S
new file mode 100644
index 000000000..c4f0394a4
--- /dev/null
+++ b/arch/sparc/lib/memcpy.S
@@ -0,0 +1,364 @@
+/* memcpy.S: Sparc optimized memcpy code.
+ *
+ * Copyright(C) 1995 Linus Torvalds
+ * Copyright(C) 1996 David S. Miller
+ * Copyright(C) 1996 Eddie C. Dost
+ * Copyright(C) 1996 Jakub Jelinek
+ *
+ * derived from:
+ * e-mail between David and Eddie.
+ */
+
+#include <asm/cprefix.h>
+#include <asm/ptrace.h>
+
+/* Both these macros have to start with exactly the same insn */
+#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
+ ldd [%src + offset + 0x00], %t0; \
+ ldd [%src + offset + 0x08], %t2; \
+ ldd [%src + offset + 0x10], %t4; \
+ ldd [%src + offset + 0x18], %t6; \
+ st %t0, [%dst + offset + 0x00]; \
+ st %t1, [%dst + offset + 0x04]; \
+ st %t2, [%dst + offset + 0x08]; \
+ st %t3, [%dst + offset + 0x0c]; \
+ st %t4, [%dst + offset + 0x10]; \
+ st %t5, [%dst + offset + 0x14]; \
+ st %t6, [%dst + offset + 0x18]; \
+ st %t7, [%dst + offset + 0x1c];
+
+#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
+ ldd [%src + offset + 0x00], %t0; \
+ ldd [%src + offset + 0x08], %t2; \
+ ldd [%src + offset + 0x10], %t4; \
+ ldd [%src + offset + 0x18], %t6; \
+ std %t0, [%dst + offset + 0x00]; \
+ std %t2, [%dst + offset + 0x08]; \
+ std %t4, [%dst + offset + 0x10]; \
+ std %t6, [%dst + offset + 0x18];
+
+#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
+ ldd [%src - offset - 0x10], %t0; \
+ ldd [%src - offset - 0x08], %t2; \
+ st %t0, [%dst - offset - 0x10]; \
+ st %t1, [%dst - offset - 0x0c]; \
+ st %t2, [%dst - offset - 0x08]; \
+ st %t3, [%dst - offset - 0x04];
+
+#define MOVE_HALFCHUNK(src, dst, offset, t0, t1, t2, t3) \
+ lduh [%src + offset + 0x00], %t0; \
+ lduh [%src + offset + 0x02], %t1; \
+ lduh [%src + offset + 0x04], %t2; \
+ lduh [%src + offset + 0x06], %t3; \
+ sth %t0, [%dst + offset + 0x00]; \
+ sth %t1, [%dst + offset + 0x02]; \
+ sth %t2, [%dst + offset + 0x04]; \
+ sth %t3, [%dst + offset + 0x06];
+
+#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
+ ldub [%src - offset - 0x02], %t0; \
+ ldub [%src - offset - 0x01], %t1; \
+ stb %t0, [%dst - offset - 0x02]; \
+ stb %t1, [%dst - offset - 0x01];
+
+ .text
+ .align 4
+
+ .globl C_LABEL(__memcpy), C_LABEL(memcpy), C_LABEL(bcopy)
+ .globl C_LABEL(amemmove), C_LABEL(memmove)
+C_LABEL(bcopy):
+ mov %o0, %o3
+ mov %o1, %o0
+ mov %o3, %o1
+C_LABEL(amemmove):
+C_LABEL(memmove):
+/* This should be kept as optimized as possible */
+ cmp %o0, %o1
+ bleu 1f
+ xor %o0, %o1, %o4
+
+ add %o1, %o2, %o3
+ cmp %o3, %o0
+ bleu 2f
+ andcc %o4, 3, %g0
+
+/* But I think from now on, we can hold on. Or tell me, is memmoving
+ * overlapping regions such a nice game? */
+
+ mov %o0, %g1
+ add %o1, %o2, %o1
+ add %o0, %o2, %o0
+ sub %o1, 1, %o1
+ sub %o0, 1, %o0
+
+reverse_bytes:
+ ldub [%o1], %o4
+ subcc %o2, 1, %o2
+ stb %o4, [%o0]
+ sub %o1, 1, %o1
+ bne reverse_bytes
+ sub %o0, 1, %o0
+
+ retl
+ mov %g1, %o0
+
+/* And here start optimizing again... */
+
+dword_align:
+ andcc %o1, 1, %g0
+ be 4f
+ andcc %o1, 2, %g0
+
+ ldub [%o1], %g2
+ add %o1, 1, %o1
+ stb %g2, [%o0]
+ sub %o2, 1, %o2
+ bne 3f
+ add %o0, 1, %o0
+
+ lduh [%o1], %g2
+ add %o1, 2, %o1
+ sth %g2, [%o0]
+ sub %o2, 2, %o2
+ b 3f
+ add %o0, 2, %o0
+4:
+ lduh [%o1], %g2
+ add %o1, 2, %o1
+ sth %g2, [%o0]
+ sub %o2, 2, %o2
+ b 3f
+ add %o0, 2, %o0
+
+C_LABEL(__memcpy):
+C_LABEL(memcpy): /* %o0=dst %o1=src %o2=len */
+ xor %o0, %o1, %o4
+1:
+ andcc %o4, 3, %o5
+2:
+ bne cannot_optimize
+ cmp %o2, 15
+
+ bleu short_aligned_end
+ andcc %o1, 3, %g0
+
+ bne dword_align
+3:
+ andcc %o1, 4, %g0
+
+ be 2f
+ mov %o2, %g1
+
+ ld [%o1], %o4
+ sub %g1, 4, %g1
+ st %o4, [%o0]
+ add %o1, 4, %o1
+ add %o0, 4, %o0
+2:
+ andcc %g1, 0xffffff80, %g7
+ be 3f
+ andcc %o0, 4, %g0
+
+ be ldd_std + 4
+5:
+ MOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
+ MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
+ MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
+ MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
+ subcc %g7, 128, %g7
+ add %o1, 128, %o1
+ bne 5b
+ add %o0, 128, %o0
+3:
+ andcc %g1, 0x70, %g7
+ be memcpy_table_end
+ andcc %g1, 8, %g0
+
+ sethi %hi(memcpy_table_end), %o5
+ srl %g7, 1, %o4
+ add %g7, %o4, %o4
+ add %o1, %g7, %o1
+ sub %o5, %o4, %o5
+ jmpl %o5 + %lo(memcpy_table_end), %g0
+ add %o0, %g7, %o0
+
+memcpy_table:
+ MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
+ MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
+ MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
+ MOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
+ MOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
+ MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
+ MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
+
+memcpy_table_end:
+ be memcpy_last7
+ andcc %g1, 4, %g0
+
+ ldd [%o1], %g2
+ add %o0, 8, %o0
+ add %o1, 8, %o1
+ st %g2, [%o0 - 0x08]
+ st %g3, [%o0 - 0x04]
+memcpy_last7:
+ be 1f
+ andcc %g1, 2, %g0
+
+ ld [%o1], %g2
+ add %o1, 4, %o1
+ st %g2, [%o0]
+ add %o0, 4, %o0
+1:
+ be 1f
+ andcc %g1, 1, %g0
+
+ lduh [%o1], %g2
+ add %o1, 2, %o1
+ sth %g2, [%o0]
+ add %o0, 2, %o0
+1:
+ be 1f
+ nop
+
+ ldub [%o1], %g2
+ stb %g2, [%o0]
+1:
+ retl
+ nop
+
+ /* Placed here for cache reasons. */
+ .globl C_LABEL(__copy_to_user), C_LABEL(__copy_from_user)
+C_LABEL(__copy_to_user):
+ b copy_user_common
+ st %o0, [%g6 + THREAD_EX_ADDR]
+
+C_LABEL(__copy_from_user):
+ st %o1, [%g6 + THREAD_EX_ADDR]
+
+copy_user_common:
+ ld [%g6 + THREAD_EX_COUNT], %g1
+ set copy_user_failure, %g2
+ add %g1, 1, %g1
+ st %o7, [%g6 + THREAD_EX_PC]
+ st %g1, [%g6 + THREAD_EX_COUNT]
+ call C_LABEL(__memcpy)
+ st %g2, [%g6 + THREAD_EX_EXPC]
+
+copy_user_success:
+ ldd [%g6 + THREAD_EX_COUNT], %g2
+ mov 0, %o0
+ sub %g2, 1, %g1
+ jmpl %g3 + 0x8, %g0
+ st %g1, [%g6 + THREAD_EX_COUNT]
+
+copy_user_failure:
+ jmpl %g3 + 0x8, %g0
+ mov %g2, %o0
+
+ldd_std:
+ MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
+ MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
+ MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
+ MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
+ subcc %g7, 128, %g7
+ add %o1, 128, %o1
+ bne ldd_std
+ add %o0, 128, %o0
+
+ andcc %g1, 0x70, %g7
+ be memcpy_table_end
+ andcc %g1, 8, %g0
+
+ sethi %hi(memcpy_table_end), %o5
+ srl %g7, 1, %o4
+ add %g7, %o4, %o4
+ add %o1, %g7, %o1
+ sub %o5, %o4, %o5
+ jmpl %o5 + %lo(memcpy_table_end), %g0
+ add %o0, %g7, %o0
+
+cannot_optimize:
+ bleu short_end
+ cmp %o5, 2
+
+ bne byte_chunk
+ and %o2, 0xfffffff0, %o3
+
+ andcc %o1, 1, %g0
+ be 1f
+ nop
+
+ ldub [%o1], %g2
+ add %o1, 1, %o1
+ sub %o2, 1, %o2
+ stb %g2, [%o0]
+ andcc %o2, 0xfffffff0, %o3
+ be short_end
+ add %o0, 1, %o0
+1:
+ MOVE_HALFCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
+ MOVE_HALFCHUNK(o1, o0, 0x08, g2, g3, g4, g5)
+ subcc %o3, 0x10, %o3
+ add %o1, 0x10, %o1
+ bne 1b
+ add %o0, 0x10, %o0
+ b 2f
+ and %o2, 0xe, %o3
+
+byte_chunk:
+ MOVE_SHORTCHUNK(o1, o0, -0x02, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, -0x04, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, -0x06, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, -0x08, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, -0x0a, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, -0x0c, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, -0x0e, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, -0x10, g2, g3)
+ subcc %o3, 0x10, %o3
+ add %o1, 0x10, %o1
+ bne byte_chunk
+ add %o0, 0x10, %o0
+
+short_end:
+ and %o2, 0xe, %o3
+2:
+ sethi %hi(short_table_end), %o5
+ sll %o3, 3, %o4
+ add %o0, %o3, %o0
+ sub %o5, %o4, %o5
+ add %o1, %o3, %o1
+ jmpl %o5 + %lo(short_table_end), %g0
+ andcc %o2, 1, %g0
+
+ MOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
+short_table_end:
+ be 1f
+ nop
+ ldub [%o1], %g2
+ stb %g2, [%o0]
+1:
+ retl
+ nop
+
+short_aligned_end:
+ bne short_end
+ andcc %o2, 8, %g0
+
+ be 1f
+ andcc %o2, 4, %g0
+
+ ld [%o1 + 0x00], %g2
+ ld [%o1 + 0x04], %g3
+ add %o1, 8, %o1
+ st %g2, [%o0 + 0x00]
+ st %g3, [%o0 + 0x04]
+ add %o0, 8, %o0
+1:
+ b memcpy_last7
+ mov %o2, %g1
diff --git a/arch/sparc/lib/memscan.S b/arch/sparc/lib/memscan.S
new file mode 100644
index 000000000..f334751c2
--- /dev/null
+++ b/arch/sparc/lib/memscan.S
@@ -0,0 +1,135 @@
+/* $Id: memscan.S,v 1.4 1996/09/08 02:01:20 davem Exp $
+ * memscan.S: Optimized memscan for the Sparc.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/cprefix.h>
+
+/* In essence, this is just a fancy strlen. */
+
+#define LO_MAGIC 0x01010101
+#define HI_MAGIC 0x80808080
+
+ .text
+ .align 4
+ .globl C_LABEL(__memscan_zero), C_LABEL(__memscan_generic)
+ .globl C_LABEL(memscan)
+C_LABEL(__memscan_zero):
+ /* %o0 = addr, %o1 = size */
+ cmp %o1, 0
+ bne,a 1f
+ andcc %o0, 3, %g0
+
+ retl
+ nop
+
+1:
+ be mzero_scan_word
+ sethi %hi(HI_MAGIC), %g2
+
+ ldsb [%o0], %g3
+mzero_still_not_word_aligned:
+ cmp %g3, 0
+ bne 1f
+ add %o0, 1, %o0
+
+ retl
+ sub %o0, 1, %o0
+
+1:
+ subcc %o1, 1, %o1
+ bne,a 1f
+ andcc %o0, 3, %g0
+
+ retl
+ nop
+
+1:
+ bne,a mzero_still_not_word_aligned
+ ldsb [%o0], %g3
+
+ sethi %hi(HI_MAGIC), %g2
+mzero_scan_word:
+ or %g2, %lo(HI_MAGIC), %o3
+ sethi %hi(LO_MAGIC), %g3
+ or %g3, %lo(LO_MAGIC), %o2
+mzero_next_word:
+ ld [%o0], %g2
+mzero_next_word_preloaded:
+ sub %g2, %o2, %g2
+mzero_next_word_preloaded_next:
+ andcc %g2, %o3, %g0
+ bne mzero_byte_zero
+ add %o0, 4, %o0
+
+mzero_check_out_of_fuel:
+ subcc %o1, 4, %o1
+ bg,a 1f
+ ld [%o0], %g2
+
+ retl
+ nop
+
+1:
+ b mzero_next_word_preloaded_next
+ sub %g2, %o2, %g2
+
+ /* Check every byte. */
+mzero_byte_zero:
+ ldsb [%o0 - 4], %g2
+ cmp %g2, 0
+ bne mzero_byte_one
+ sub %o0, 4, %g3
+
+ retl
+ mov %g3, %o0
+
+mzero_byte_one:
+ ldsb [%o0 - 3], %g2
+ cmp %g2, 0
+ bne,a mzero_byte_two_and_three
+ ldsb [%o0 - 2], %g2
+
+ retl
+ sub %o0, 3, %o0
+
+mzero_byte_two_and_three:
+ cmp %g2, 0
+ bne,a 1f
+ ldsb [%o0 - 1], %g2
+
+ retl
+ sub %o0, 2, %o0
+
+1:
+ cmp %g2, 0
+ bne,a mzero_next_word_preloaded
+ ld [%o0], %g2
+
+ retl
+ sub %o0, 1, %o0
+
+mzero_found_it:
+ retl
+ sub %o0, 2, %o0
+
+C_LABEL(memscan):
+C_LABEL(__memscan_generic):
+ /* %o0 = addr, %o1 = c, %o2 = size */
+ cmp %o2, 0
+ bne,a 0f
+ ldub [%o0], %g2
+
+ b,a 2f
+1:
+ ldub [%o0], %g2
+0:
+ cmp %g2, %o1
+ be 2f
+ addcc %o2, -1, %o2
+ bne 1b
+ add %o0, 1, %o0
+2:
+ retl
+ nop
diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S
new file mode 100644
index 000000000..95691debb
--- /dev/null
+++ b/arch/sparc/lib/memset.S
@@ -0,0 +1,166 @@
+/* linux/arch/sparc/lib/memset.S: Sparc optimized memset and bzero code
+ * Hand optimized from GNU libc's memset
+ * Copyright (C) 1991,1996 Free Software Foundation
+ * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/cprefix.h>
+#include <asm/ptrace.h>
+
+#define HANDLE_UNALIGNED 1
+
+ /* Store 64 bytes at (BASE + OFFSET) using value SOURCE. */
+#define ZERO_BIG_BLOCK(base, offset, source) \
+ std source, [base + offset + 0x00]; \
+ std source, [base + offset + 0x08]; \
+ std source, [base + offset + 0x10]; \
+ std source, [base + offset + 0x18]; \
+ std source, [base + offset + 0x20]; \
+ std source, [base + offset + 0x28]; \
+ std source, [base + offset + 0x30]; \
+ std source, [base + offset + 0x38];
+
+#define ZERO_LAST_BLOCKS(base, offset, source) \
+ std source, [base - offset - 0x38]; \
+ std source, [base - offset - 0x30]; \
+ std source, [base - offset - 0x28]; \
+ std source, [base - offset - 0x20]; \
+ std source, [base - offset - 0x18]; \
+ std source, [base - offset - 0x10]; \
+ std source, [base - offset - 0x08]; \
+ std source, [base - offset - 0x00];
+
+ .text
+ .align 4
+
+ .globl C_LABEL(__bzero), C_LABEL(__memset), C_LABEL(memset)
+C_LABEL(__memset):
+C_LABEL(memset):
+ and %o1, 0xff, %g3
+ sll %g3, 8, %g2
+ or %g3, %g2, %g3
+ sll %g3, 16, %g2
+ or %g3, %g2, %g3
+ b 1f
+ mov %o2, %o1
+
+#if HANDLE_UNALIGNED
+/* As this is highly unprobable, we optimize the other case (4 aligned)
+ * Define HANDLE_UNALIGNED to 0, if all the alignment work is done by
+ * the trap. Then we have to hope nobody will memset something unaligned
+ * with large counts, as this would lead to a lot of traps...
+ */
+3:
+ cmp %o2, 3
+ be 2f
+ stb %g3, [%o0]
+
+ cmp %o2, 2
+ be 2f
+ stb %g3, [%o0 + 0x01]
+
+ stb %g3, [%o0 + 0x02]
+2:
+ sub %o2, 4, %o2
+ add %o1, %o2, %o1
+ b 4f
+ sub %o0, %o2, %o0
+#endif /* HANDLE_UNALIGNED */
+
+ .globl C_LABEL(__clear_user)
+C_LABEL(__clear_user):
+ st %o0, [%g6 + THREAD_EX_ADDR]
+ ld [%g6 + THREAD_EX_COUNT], %g1
+ set clear_user_failure, %g2
+ add %g1, 1, %g1
+ st %o7, [%g6 + THREAD_EX_PC]
+ st %g1, [%g6 + THREAD_EX_COUNT]
+ call C_LABEL(__bzero)
+ st %g2, [%g6 + THREAD_EX_EXPC]
+
+clear_user_success:
+ ldd [%g6 + THREAD_EX_COUNT], %g2
+ mov 0, %o0
+ sub %g2, 1, %g1
+ jmpl %g3 + 0x8, %g0
+ st %g1, [%g6 + THREAD_EX_COUNT]
+
+clear_user_failure:
+ jmpl %g3 + 0x8, %g0
+ mov %g2, %o0
+
+C_LABEL(__bzero):
+ mov %g0, %g3
+1:
+ cmp %o1, 7
+ bleu 7f
+ mov %o0, %g1
+
+#if HANDLE_UNALIGNED
+ andcc %o0, 3, %o2
+ bne 3b
+#endif /* HANDLE_UNALIGNED */
+4:
+ andcc %o0, 4, %g0
+
+ be 2f
+ mov %g3, %g2
+
+ st %g3, [%o0]
+ sub %o1, 4, %o1
+ add %o0, 4, %o0
+2:
+ andcc %o1, 0xffffff80, %o3 ! Now everything is 8 aligned and o1 is len to run
+ be 9f
+ andcc %o1, 0x78, %o2
+4:
+ ZERO_BIG_BLOCK(%o0, 0x00, %g2)
+ subcc %o3, 128, %o3
+ ZERO_BIG_BLOCK(%o0, 0x40, %g2)
+ bne 4b
+ add %o0, 128, %o0
+
+ orcc %o2, %g0, %g0
+9:
+ be 6f
+ andcc %o1, 7, %o1
+
+ srl %o2, 1, %o3
+ set bzero_table + 64, %o4
+ sub %o4, %o3, %o4
+ jmp %o4
+ add %o0, %o2, %o0
+
+bzero_table:
+ ZERO_LAST_BLOCKS(%o0, 0x48, %g2)
+ ZERO_LAST_BLOCKS(%o0, 0x08, %g2)
+
+6:
+ be 8f
+ andcc %o1, 4, %g0
+
+ be 1f
+ andcc %o1, 2, %g0
+
+ st %g3, [%o0]
+ add %o0, 4, %o0
+1:
+ be 1f
+ andcc %o1, 1, %g0
+
+ sth %g3, [%o0]
+ add %o0, 2, %o0
+1:
+ bne,a 8f
+ stb %g3, [%o0]
+8:
+ retl
+ mov %g1,%o0
+
+/* Don't care about alignment here. It is highly
+ * unprobable and at most two traps may happen
+ */
+7:
+ b 6b
+ orcc %o1, 0, %g0
diff --git a/arch/sparc/lib/memset.c b/arch/sparc/lib/memset.c
new file mode 100644
index 000000000..1e81dff49
--- /dev/null
+++ b/arch/sparc/lib/memset.c
@@ -0,0 +1,71 @@
+/* linux/arch/sparc/lib/memset.c
+ *
+ * This is from GNU libc.
+ */
+
+#include <linux/types.h>
+
+#define op_t unsigned long int
+#define OPSIZ (sizeof(op_t))
+
+typedef unsigned char byte;
+
+void *memset(void *dstpp, char c, size_t len)
+{
+ long int dstp = (long int) dstpp;
+
+ if (len >= 8) {
+ size_t xlen;
+ op_t cccc;
+
+ cccc = (unsigned char) c;
+ cccc |= cccc << 8;
+ cccc |= cccc << 16;
+
+ /* There are at least some bytes to set.
+ No need to test for LEN == 0 in this alignment loop. */
+ while (dstp % OPSIZ != 0) {
+ ((byte *) dstp)[0] = c;
+ dstp += 1;
+ len -= 1;
+ }
+
+ /* Write 8 `op_t' per iteration until less
+ * than 8 `op_t' remain.
+ */
+ xlen = len / (OPSIZ * 8);
+ while (xlen > 0) {
+ ((op_t *) dstp)[0] = cccc;
+ ((op_t *) dstp)[1] = cccc;
+ ((op_t *) dstp)[2] = cccc;
+ ((op_t *) dstp)[3] = cccc;
+ ((op_t *) dstp)[4] = cccc;
+ ((op_t *) dstp)[5] = cccc;
+ ((op_t *) dstp)[6] = cccc;
+ ((op_t *) dstp)[7] = cccc;
+ dstp += 8 * OPSIZ;
+ xlen -= 1;
+ }
+ len %= OPSIZ * 8;
+
+ /* Write 1 `op_t' per iteration until less than
+ * OPSIZ bytes remain.
+ */
+ xlen = len / OPSIZ;
+ while (xlen > 0) {
+ ((op_t *) dstp)[0] = cccc;
+ dstp += OPSIZ;
+ xlen -= 1;
+ }
+ len %= OPSIZ;
+ }
+
+ /* Write the last few bytes. */
+ while (len > 0) {
+ ((byte *) dstp)[0] = c;
+ dstp += 1;
+ len -= 1;
+ }
+
+ return dstpp;
+}
diff --git a/arch/sparc/lib/mul.S b/arch/sparc/lib/mul.S
index e6d78f85f..83dffbc2f 100644
--- a/arch/sparc/lib/mul.S
+++ b/arch/sparc/lib/mul.S
@@ -1,4 +1,5 @@
-/* mul.S: This routine was taken from glibc-1.09 and is covered
+/* $Id: mul.S,v 1.4 1996/09/30 02:22:32 davem Exp $
+ * mul.S: This routine was taken from glibc-1.09 and is covered
* by the GNU Library General Public License Version 2.
*/
@@ -19,7 +20,7 @@
mov %o0, %y ! multiplier -> Y
andncc %o0, 0xfff, %g0 ! test bits 12..31
be Lmul_shortway ! if zero, can do it the short way
- andcc %g0, %g0, %o4 ! zero the partial product and clear N and V
+ andcc %g0, %g0, %o4 ! zero the partial product and clear N and V
/*
* Long multiply. 32 steps, followed by a final shift step.
@@ -65,23 +66,23 @@
#if 0
tst %o0
bge 1f
- rd %y, %o0
+ rd %y, %o0
! %o0 was indeed negative; fix upper 32 bits of result by subtracting
! %o1 (i.e., return %o4 - %o1 in %o1).
retl
- sub %o4, %o1, %o1
+ sub %o4, %o1, %o1
1:
retl
- mov %o4, %o1
+ mov %o4, %o1
#else
/* Faster code adapted from tege@sics.se's code for umul.S. */
sra %o0, 31, %o2 ! make mask from sign bit
and %o1, %o2, %o2 ! %o2 = 0 or %o1, depending on sign of %o0
rd %y, %o0 ! get lower half of product
retl
- sub %o4, %o2, %o1 ! subtract compensation
+ sub %o4, %o2, %o1 ! subtract compensation
! and put upper half in place
#endif
@@ -124,4 +125,11 @@ Lmul_shortway:
srl %o5, 20, %o5 ! shift low bits right 20, zero fill at left
or %o5, %o0, %o0 ! construct low part of result
retl
- sra %o4, 20, %o1 ! ... and extract high part of result
+ sra %o4, 20, %o1 ! ... and extract high part of result
+
+ .globl .mul_patch
+.mul_patch:
+ smul %o0, %o1, %o0
+ retl
+ rd %y, %o1
+ nop
diff --git a/arch/sparc/lib/rem.S b/arch/sparc/lib/rem.S
index 3c0cc579b..44508148d 100644
--- a/arch/sparc/lib/rem.S
+++ b/arch/sparc/lib/rem.S
@@ -1,4 +1,5 @@
-/* rem.S: This routine was taken from glibc-1.09 and is covered
+/* $Id: rem.S,v 1.7 1996/09/30 02:22:34 davem Exp $
+ * rem.S: This routine was taken from glibc-1.09 and is covered
* by the GNU Library General Public License Version 2.
*/
@@ -46,13 +47,14 @@
! compute sign of result; if neither is negative, no problem
orcc %o1, %o0, %g0 ! either negative?
bge 2f ! no, go do the divide
- xor %o1, %o0, %g6 ! compute sign in any case
+ mov %o0, %g2 ! compute sign in any case
+
tst %o1
bge 1f
- tst %o0
+ tst %o0
! %o1 is definitely negative; %o0 might also be negative
bge 2f ! if %o0 not negative...
- sub %g0, %o1, %o1 ! in any case, make %o1 nonneg
+ sub %g0, %o1, %o1 ! in any case, make %o1 nonneg
1: ! %o0 is negative, %o1 is nonnegative
sub %g0, %o0, %o0 ! make %o0 nonnegative
2:
@@ -60,22 +62,24 @@
! Ready to divide. Compute size of quotient; scale comparand.
orcc %o1, %g0, %o5
bne 1f
- mov %o0, %o3
+ mov %o0, %o3
! Divide by zero trap. If it returns, return 0 (about as
! wrong as possible, but that is what SunOS does...).
ta ST_DIV0
retl
- clr %o0
+ clr %o0
1:
cmp %o3, %o5 ! if %o1 exceeds %o0, done
blu Lgot_result ! (and algorithm fails otherwise)
- clr %o2
+ clr %o2
+
sethi %hi(1 << (32 - 4 - 1)), %g1
+
cmp %o3, %g1
blu Lnot_really_big
- clr %o4
+ clr %o4
! Here the dividend is >= 2**(31-N) or so. We must be careful here,
! as our usual N-at-a-shot divide step will cause overflow and havoc.
@@ -85,15 +89,19 @@
1:
cmp %o5, %g1
bgeu 3f
- mov 1, %g7
+ mov 1, %g7
+
sll %o5, 4, %o5
+
b 1b
- add %o4, 1, %o4
+ add %o4, 1, %o4
! Now compute %g7.
- 2: addcc %o5, %o5, %o5
+ 2:
+ addcc %o5, %o5, %o5
+
bcc Lnot_too_big
- add %g7, 1, %g7
+ add %g7, 1, %g7
! We get here if the %o1 overflowed while shifting.
! This means that %o3 has the high-order bit set.
@@ -101,15 +109,18 @@
sll %g1, 4, %g1 ! high order bit
srl %o5, 1, %o5 ! rest of %o5
add %o5, %g1, %o5
+
b Ldo_single_div
- sub %g7, 1, %g7
+ sub %g7, 1, %g7
Lnot_too_big:
- 3: cmp %o5, %o3
+ 3:
+ cmp %o5, %o3
blu 2b
- nop
+ nop
+
be Ldo_single_div
- nop
+ nop
/* NB: these are commented out in the V8-Sparc manual as well */
/* (I do not understand this) */
! %o5 > %o3: went too far: back up 1 step
@@ -126,19 +137,23 @@
Ldo_single_div:
subcc %g7, 1, %g7
bl Lend_regular_divide
- nop
+ nop
+
sub %o3, %o5, %o3
mov 1, %o2
+
b Lend_single_divloop
- nop
+ nop
Lsingle_divloop:
sll %o2, 1, %o2
+
bl 1f
- srl %o5, 1, %o5
+ srl %o5, 1, %o5
! %o3 >= 0
sub %o3, %o5, %o3
+
b 2f
- add %o2, 1, %o2
+ add %o2, 1, %o2
1: ! %o3 < 0
add %o3, %o5, %o3
sub %o2, 1, %o2
@@ -146,7 +161,8 @@
Lend_single_divloop:
subcc %g7, 1, %g7
bge Lsingle_divloop
- tst %o3
+ tst %o3
+
b,a Lend_regular_divide
Lnot_really_big:
@@ -154,206 +170,213 @@ Lnot_really_big:
sll %o5, 4, %o5
cmp %o5, %o3
bleu 1b
- addcc %o4, 1, %o4
+ addcc %o4, 1, %o4
be Lgot_result
- sub %o4, 1, %o4
+ sub %o4, 1, %o4
tst %o3 ! set up for initial iteration
Ldivloop:
sll %o2, 4, %o2
! depth 1, accumulated bits 0
bl L.1.16
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 2, accumulated bits 1
bl L.2.17
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 3, accumulated bits 3
bl L.3.19
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 4, accumulated bits 7
bl L.4.23
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
- b 9f
- add %o2, (7*2+1), %o2
+
+ b 9f
+ add %o2, (7*2+1), %o2
L.4.23:
! remainder is negative
addcc %o3,%o5,%o3
- b 9f
- add %o2, (7*2-1), %o2
-
+ b 9f
+ add %o2, (7*2-1), %o2
L.3.19:
! remainder is negative
addcc %o3,%o5,%o3
! depth 4, accumulated bits 5
bl L.4.21
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
- b 9f
- add %o2, (5*2+1), %o2
+ b 9f
+ add %o2, (5*2+1), %o2
L.4.21:
! remainder is negative
addcc %o3,%o5,%o3
- b 9f
- add %o2, (5*2-1), %o2
-
-
+ b 9f
+ add %o2, (5*2-1), %o2
L.2.17:
! remainder is negative
addcc %o3,%o5,%o3
! depth 3, accumulated bits 1
bl L.3.17
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 4, accumulated bits 3
bl L.4.19
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
- b 9f
- add %o2, (3*2+1), %o2
-
+ b 9f
+ add %o2, (3*2+1), %o2
+
L.4.19:
! remainder is negative
addcc %o3,%o5,%o3
- b 9f
- add %o2, (3*2-1), %o2
-
-
+ b 9f
+ add %o2, (3*2-1), %o2
+
L.3.17:
! remainder is negative
addcc %o3,%o5,%o3
! depth 4, accumulated bits 1
bl L.4.17
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
- b 9f
- add %o2, (1*2+1), %o2
-
+ b 9f
+ add %o2, (1*2+1), %o2
+
L.4.17:
! remainder is negative
addcc %o3,%o5,%o3
- b 9f
- add %o2, (1*2-1), %o2
-
-
-
-
+ b 9f
+ add %o2, (1*2-1), %o2
+
L.1.16:
! remainder is negative
addcc %o3,%o5,%o3
! depth 2, accumulated bits -1
bl L.2.15
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 3, accumulated bits -1
bl L.3.15
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 4, accumulated bits -1
bl L.4.15
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
- b 9f
- add %o2, (-1*2+1), %o2
-
+ b 9f
+ add %o2, (-1*2+1), %o2
+
L.4.15:
! remainder is negative
addcc %o3,%o5,%o3
- b 9f
- add %o2, (-1*2-1), %o2
-
-
+ b 9f
+ add %o2, (-1*2-1), %o2
+
L.3.15:
! remainder is negative
addcc %o3,%o5,%o3
! depth 4, accumulated bits -3
bl L.4.13
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
- b 9f
- add %o2, (-3*2+1), %o2
-
+ b 9f
+ add %o2, (-3*2+1), %o2
+
L.4.13:
! remainder is negative
addcc %o3,%o5,%o3
- b 9f
- add %o2, (-3*2-1), %o2
-
-
-
+ b 9f
+ add %o2, (-3*2-1), %o2
+
L.2.15:
! remainder is negative
addcc %o3,%o5,%o3
! depth 3, accumulated bits -3
bl L.3.13
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 4, accumulated bits -5
bl L.4.11
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
- b 9f
- add %o2, (-5*2+1), %o2
-
+ b 9f
+ add %o2, (-5*2+1), %o2
+
L.4.11:
! remainder is negative
addcc %o3,%o5,%o3
- b 9f
- add %o2, (-5*2-1), %o2
-
-
+ b 9f
+ add %o2, (-5*2-1), %o2
+
+
L.3.13:
! remainder is negative
addcc %o3,%o5,%o3
! depth 4, accumulated bits -7
bl L.4.9
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
- b 9f
- add %o2, (-7*2+1), %o2
-
+ b 9f
+ add %o2, (-7*2+1), %o2
+
L.4.9:
! remainder is negative
addcc %o3,%o5,%o3
- b 9f
- add %o2, (-7*2-1), %o2
-
-
-
-
+ b 9f
+ add %o2, (-7*2-1), %o2
+
9:
Lend_regular_divide:
subcc %o4, 1, %o4
bge Ldivloop
- tst %o3
+ tst %o3
+
bl,a Lgot_result
! non-restoring fixup here (one instruction only!)
add %o3, %o1, %o3
-
Lgot_result:
+ ! check to see if answer should be < 0
+ tst %g2
+ bl,a 1f
+ sub %g0, %o3, %o3
+1:
+ retl
+ mov %o3, %o0
+ .globl .rem_patch
+.rem_patch:
+ sra %o0, 0x1f, %o4
+ wr %o4, 0x0, %y
+ nop
+ nop
+ nop
+ sdivcc %o0, %o1, %o2
+ bvs,a 1f
+ xnor %o2, %g0, %o2
+1: smul %o2, %o1, %o2
retl
- mov %o3, %o0
+ sub %o0, %o2, %o0
+ nop
diff --git a/arch/sparc/lib/sdiv.S b/arch/sparc/lib/sdiv.S
index 2fa7a9794..e0ad80b6f 100644
--- a/arch/sparc/lib/sdiv.S
+++ b/arch/sparc/lib/sdiv.S
@@ -1,4 +1,5 @@
-/* sdiv.S: This routine was taken from glibc-1.09 and is covered
+/* $Id: sdiv.S,v 1.6 1996/10/02 17:37:00 davem Exp $
+ * sdiv.S: This routine was taken from glibc-1.09 and is covered
* by the GNU Library General Public License Version 2.
*/
@@ -46,13 +47,14 @@
! compute sign of result; if neither is negative, no problem
orcc %o1, %o0, %g0 ! either negative?
bge 2f ! no, go do the divide
- xor %o1, %o0, %g6 ! compute sign in any case
+ xor %o1, %o0, %g2 ! compute sign in any case
+
tst %o1
bge 1f
- tst %o0
+ tst %o0
! %o1 is definitely negative; %o0 might also be negative
bge 2f ! if %o0 not negative...
- sub %g0, %o1, %o1 ! in any case, make %o1 nonneg
+ sub %g0, %o1, %o1 ! in any case, make %o1 nonneg
1: ! %o0 is negative, %o1 is nonnegative
sub %g0, %o0, %o0 ! make %o0 nonnegative
2:
@@ -60,22 +62,24 @@
! Ready to divide. Compute size of quotient; scale comparand.
orcc %o1, %g0, %o5
bne 1f
- mov %o0, %o3
+ mov %o0, %o3
! Divide by zero trap. If it returns, return 0 (about as
! wrong as possible, but that is what SunOS does...).
ta ST_DIV0
retl
- clr %o0
+ clr %o0
1:
cmp %o3, %o5 ! if %o1 exceeds %o0, done
blu Lgot_result ! (and algorithm fails otherwise)
- clr %o2
+ clr %o2
+
sethi %hi(1 << (32 - 4 - 1)), %g1
+
cmp %o3, %g1
blu Lnot_really_big
- clr %o4
+ clr %o4
! Here the dividend is >= 2**(31-N) or so. We must be careful here,
! as our usual N-at-a-shot divide step will cause overflow and havoc.
@@ -85,15 +89,18 @@
1:
cmp %o5, %g1
bgeu 3f
- mov 1, %g7
+ mov 1, %g7
+
sll %o5, 4, %o5
+
b 1b
- add %o4, 1, %o4
+ add %o4, 1, %o4
! Now compute %g7.
- 2: addcc %o5, %o5, %o5
+ 2:
+ addcc %o5, %o5, %o5
bcc Lnot_too_big
- add %g7, 1, %g7
+ add %g7, 1, %g7
! We get here if the %o1 overflowed while shifting.
! This means that %o3 has the high-order bit set.
@@ -101,15 +108,18 @@
sll %g1, 4, %g1 ! high order bit
srl %o5, 1, %o5 ! rest of %o5
add %o5, %g1, %o5
+
b Ldo_single_div
- sub %g7, 1, %g7
+ sub %g7, 1, %g7
Lnot_too_big:
- 3: cmp %o5, %o3
+ 3:
+ cmp %o5, %o3
blu 2b
- nop
+ nop
+
be Ldo_single_div
- nop
+ nop
/* NB: these are commented out in the V8-Sparc manual as well */
/* (I do not understand this) */
! %o5 > %o3: went too far: back up 1 step
@@ -126,19 +136,23 @@
Ldo_single_div:
subcc %g7, 1, %g7
bl Lend_regular_divide
- nop
+ nop
+
sub %o3, %o5, %o3
mov 1, %o2
+
b Lend_single_divloop
- nop
+ nop
Lsingle_divloop:
sll %o2, 1, %o2
+
bl 1f
- srl %o5, 1, %o5
+ srl %o5, 1, %o5
! %o3 >= 0
sub %o3, %o5, %o3
+
b 2f
- add %o2, 1, %o2
+ add %o2, 1, %o2
1: ! %o3 < 0
add %o3, %o5, %o3
sub %o2, 1, %o2
@@ -146,7 +160,8 @@
Lend_single_divloop:
subcc %g7, 1, %g7
bge Lsingle_divloop
- tst %o3
+ tst %o3
+
b,a Lend_regular_divide
Lnot_really_big:
@@ -154,83 +169,81 @@ Lnot_really_big:
sll %o5, 4, %o5
cmp %o5, %o3
bleu 1b
- addcc %o4, 1, %o4
+ addcc %o4, 1, %o4
+
be Lgot_result
- sub %o4, 1, %o4
+ sub %o4, 1, %o4
tst %o3 ! set up for initial iteration
Ldivloop:
sll %o2, 4, %o2
! depth 1, accumulated bits 0
bl L.1.16
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 2, accumulated bits 1
bl L.2.17
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 3, accumulated bits 3
bl L.3.19
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 4, accumulated bits 7
bl L.4.23
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
- b 9f
- add %o2, (7*2+1), %o2
-
+ b 9f
+ add %o2, (7*2+1), %o2
+
L.4.23:
! remainder is negative
addcc %o3,%o5,%o3
- b 9f
- add %o2, (7*2-1), %o2
-
-
+ b 9f
+ add %o2, (7*2-1), %o2
+
L.3.19:
! remainder is negative
addcc %o3,%o5,%o3
! depth 4, accumulated bits 5
bl L.4.21
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
- b 9f
- add %o2, (5*2+1), %o2
-
+ b 9f
+ add %o2, (5*2+1), %o2
+
L.4.21:
! remainder is negative
addcc %o3,%o5,%o3
- b 9f
- add %o2, (5*2-1), %o2
-
-
-
+ b 9f
+ add %o2, (5*2-1), %o2
+
L.2.17:
! remainder is negative
addcc %o3,%o5,%o3
! depth 3, accumulated bits 1
bl L.3.17
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 4, accumulated bits 3
bl L.4.19
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
- b 9f
- add %o2, (3*2+1), %o2
-
+ b 9f
+ add %o2, (3*2+1), %o2
+
L.4.19:
! remainder is negative
addcc %o3,%o5,%o3
- b 9f
- add %o2, (3*2-1), %o2
+ b 9f
+ add %o2, (3*2-1), %o2
L.3.17:
@@ -238,126 +251,129 @@ L.3.17:
addcc %o3,%o5,%o3
! depth 4, accumulated bits 1
bl L.4.17
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
- b 9f
- add %o2, (1*2+1), %o2
-
+ b 9f
+ add %o2, (1*2+1), %o2
+
L.4.17:
! remainder is negative
addcc %o3,%o5,%o3
- b 9f
- add %o2, (1*2-1), %o2
-
-
-
-
+ b 9f
+ add %o2, (1*2-1), %o2
+
L.1.16:
! remainder is negative
addcc %o3,%o5,%o3
! depth 2, accumulated bits -1
bl L.2.15
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 3, accumulated bits -1
bl L.3.15
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 4, accumulated bits -1
bl L.4.15
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
- b 9f
- add %o2, (-1*2+1), %o2
-
+ b 9f
+ add %o2, (-1*2+1), %o2
+
L.4.15:
! remainder is negative
addcc %o3,%o5,%o3
- b 9f
- add %o2, (-1*2-1), %o2
-
-
+ b 9f
+ add %o2, (-1*2-1), %o2
+
L.3.15:
! remainder is negative
addcc %o3,%o5,%o3
! depth 4, accumulated bits -3
bl L.4.13
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
- b 9f
- add %o2, (-3*2+1), %o2
-
+ b 9f
+ add %o2, (-3*2+1), %o2
+
L.4.13:
! remainder is negative
addcc %o3,%o5,%o3
- b 9f
- add %o2, (-3*2-1), %o2
-
-
-
+ b 9f
+ add %o2, (-3*2-1), %o2
+
L.2.15:
! remainder is negative
addcc %o3,%o5,%o3
! depth 3, accumulated bits -3
bl L.3.13
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 4, accumulated bits -5
bl L.4.11
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
- b 9f
- add %o2, (-5*2+1), %o2
-
+ b 9f
+ add %o2, (-5*2+1), %o2
+
L.4.11:
! remainder is negative
addcc %o3,%o5,%o3
- b 9f
- add %o2, (-5*2-1), %o2
-
-
+ b 9f
+ add %o2, (-5*2-1), %o2
+
L.3.13:
! remainder is negative
addcc %o3,%o5,%o3
! depth 4, accumulated bits -7
bl L.4.9
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
- b 9f
- add %o2, (-7*2+1), %o2
-
+ b 9f
+ add %o2, (-7*2+1), %o2
+
L.4.9:
! remainder is negative
addcc %o3,%o5,%o3
- b 9f
- add %o2, (-7*2-1), %o2
-
-
-
-
+ b 9f
+ add %o2, (-7*2-1), %o2
+
9:
Lend_regular_divide:
subcc %o4, 1, %o4
bge Ldivloop
- tst %o3
+ tst %o3
+
bl,a Lgot_result
! non-restoring fixup here (one instruction only!)
sub %o2, 1, %o2
-
Lgot_result:
! check to see if answer should be < 0
- tst %g6
+ tst %g2
bl,a 1f
- sub %g0, %o2, %o2
+ sub %g0, %o2, %o2
1:
retl
- mov %o2, %o0
+ mov %o2, %o0
+
+ .globl .div_patch
+.div_patch:
+ sra %o0, 0x1f, %o2
+ wr %o2, 0x0, %y
+ nop
+ nop
+ nop
+ sdivcc %o0, %o1, %o0
+ bvs,a 1f
+ xnor %o0, %g0, %o0
+1: retl
+ nop
diff --git a/arch/sparc/lib/strlen.S b/arch/sparc/lib/strlen.S
new file mode 100644
index 000000000..95321d4c5
--- /dev/null
+++ b/arch/sparc/lib/strlen.S
@@ -0,0 +1,88 @@
+/* strlen.S: Sparc optimized strlen().
+ *
+ * This was hand optimized by davem@caip.rutgers.edu from
+ * the C-code in GNU-libc.
+ */
+
+#include <asm/cprefix.h>
+
+#define LO_MAGIC 0x01010101
+#define HI_MAGIC 0x80808080
+
+ .align 4
+ .global C_LABEL(strlen)
+C_LABEL(strlen):
+ mov %o0, %o1
+ andcc %o0, 3, %g0 ! and with %o0 so no dependency problems
+ be scan_words
+ sethi %hi(HI_MAGIC), %g2 ! common case and most Sparcs predict taken
+
+ ldsb [%o0], %g2
+still_not_word_aligned:
+ cmp %g2, 0
+ bne,a 1f
+ add %o0, 1, %o0
+
+ /* Ok, so there are tons of quick interlocks above for the
+ * < 4 length string unaligned... not too common so I'm not
+ * very concerned.
+ */
+ retl
+ sub %o0, %o1, %o0
+
+1:
+ andcc %o0, 3, %g0
+ bne,a still_not_word_aligned
+ ldsb [%o0], %g2
+
+ /* HyperSparc executes each sethi/or pair in 1 cycle. */
+ sethi %hi(HI_MAGIC), %g2
+scan_words:
+ or %g2, %lo(HI_MAGIC), %o3
+ sethi %hi(LO_MAGIC), %g3
+ or %g3, %lo(LO_MAGIC), %o2
+next_word:
+ ld [%o0], %g2 ! no dependencies
+next_word_preloaded:
+ sub %g2, %o2, %g2 ! lots of locks here
+ andcc %g2, %o3, %g0 ! and I dont like it...
+ be next_word
+ add %o0, 4, %o0
+
+ /* Check every byte. */
+byte_zero:
+ ldsb [%o0 - 0x4], %g2
+ cmp %g2, 0
+ bne byte_one
+ add %o0, -4, %g3
+
+ retl
+ sub %g3, %o1, %o0
+
+byte_one:
+ ldsb [%o0 - 0x3], %g2
+ cmp %g2, 0
+ bne,a byte_two_and_three
+ ldsb [%o0 - 0x2], %g2
+
+ sub %g3, %o1, %o0
+ retl
+ add %o0, 1, %o0
+
+byte_two_and_three:
+ cmp %g2, 0
+ be,a found_it
+ sub %g3, %o1, %o0
+
+ ldsb [%o0 - 0x1], %g2
+ cmp %g2, 0
+ bne,a next_word_preloaded
+ ld [%o0], %g2
+
+ sub %g3, %o1, %o0
+ retl
+ add %o0, 3, %o0
+
+found_it:
+ retl
+ add %o0, 2, %o0
diff --git a/arch/sparc/lib/strncmp.S b/arch/sparc/lib/strncmp.S
new file mode 100644
index 000000000..2f26b1b4a
--- /dev/null
+++ b/arch/sparc/lib/strncmp.S
@@ -0,0 +1,120 @@
+/* $Id: strncmp.S,v 1.2 1996/09/09 02:47:20 davem Exp $
+ * strncmp.S: Hand optimized Sparc assembly of GCC output from GNU libc
+ * generic strncmp routine.
+ */
+
+#include <asm/cprefix.h>
+
+ .text
+ .align 4
+ .global C_LABEL(__strncmp), C_LABEL(strncmp)
+C_LABEL(__strncmp):
+C_LABEL(strncmp):
+ mov %o0, %g3
+ mov 0, %o3
+
+ cmp %o2, 3
+ ble 7f
+ mov 0, %g2
+
+ sra %o2, 2, %o4
+ ldub [%g3], %o3
+
+0:
+ ldub [%o1], %g2
+ add %g3, 1, %g3
+ and %o3, 0xff, %o0
+
+ cmp %o0, 0
+ be 8f
+ add %o1, 1, %o1
+
+ cmp %o0, %g2
+ be,a 1f
+ ldub [%g3], %o3
+
+ retl
+ sub %o0, %g2, %o0
+
+1:
+ ldub [%o1], %g2
+ add %g3,1, %g3
+ and %o3, 0xff, %o0
+
+ cmp %o0, 0
+ be 8f
+ add %o1, 1, %o1
+
+ cmp %o0, %g2
+ be,a 1f
+ ldub [%g3], %o3
+
+ retl
+ sub %o0, %g2, %o0
+
+1:
+ ldub [%o1], %g2
+ add %g3, 1, %g3
+ and %o3, 0xff, %o0
+
+ cmp %o0, 0
+ be 8f
+ add %o1, 1, %o1
+
+ cmp %o0, %g2
+ be,a 1f
+ ldub [%g3], %o3
+
+ retl
+ sub %o0, %g2, %o0
+
+1:
+ ldub [%o1], %g2
+ add %g3, 1, %g3
+ and %o3, 0xff, %o0
+
+ cmp %o0, 0
+ be 8f
+ add %o1, 1, %o1
+
+ cmp %o0, %g2
+ be 1f
+ add %o4, -1, %o4
+
+ retl
+ sub %o0, %g2, %o0
+
+1:
+
+ cmp %o4, 0
+ bg,a 0b
+ ldub [%g3], %o3
+
+ b 7f
+ and %o2, 3, %o2
+
+9:
+ ldub [%o1], %g2
+ add %g3, 1, %g3
+ and %o3, 0xff, %o0
+
+ cmp %o0, 0
+ be 8f
+ add %o1, 1, %o1
+
+ cmp %o0, %g2
+ be 7f
+ add %o2, -1, %o2
+
+8:
+ retl
+ sub %o0, %g2, %o0
+
+7:
+ cmp %o2, 0
+ bg,a 9b
+ ldub [%g3], %o3
+
+ and %g2, 0xff, %o0
+ retl
+ sub %o3, %o0, %o0
diff --git a/arch/sparc/lib/strncpy_from_user.S b/arch/sparc/lib/strncpy_from_user.S
new file mode 100644
index 000000000..3dd2bd71c
--- /dev/null
+++ b/arch/sparc/lib/strncpy_from_user.S
@@ -0,0 +1,49 @@
+/* strncpy_from_user.S: Sparc strncpy from userspace.
+ *
+ * Copyright(C) 1996 David S. Miller
+ */
+
+#include <asm/cprefix.h>
+#include <asm/ptrace.h>
+
+ .text
+ .align 4
+
+ /* Must return:
+ *
+ * -EFAULT for an exception
+ * count if we hit the buffer limit
+ * bytes copied if we hit a null byte
+ */
+
+ .globl C_LABEL(__strncpy_from_user)
+C_LABEL(__strncpy_from_user):
+ /* %o0=dest, %o1=src, %o2=count */
+ ld [%g6 + THREAD_EX_COUNT], %g1
+ set strncpy_user_failure, %g2
+ add %g1, 1, %g3
+ st %o7, [%g6 + THREAD_EX_PC]
+ st %g3, [%g6 + THREAD_EX_COUNT]
+ st %g2, [%g6 + THREAD_EX_EXPC]
+
+ mov %o2, %o3
+1:
+ subcc %o2, 1, %o2
+ bneg 2f
+ nop
+
+ ldub [%o1], %o4
+ add %o0, 1, %o0
+ cmp %o4, 0
+ add %o1, 1, %o1
+ bne 1b
+ stb %o4, [%o0 - 1]
+2:
+ add %o2, 1, %o0
+ st %g1, [%g6 + THREAD_EX_COUNT]
+ retl
+ sub %o3, %o0, %o0
+
+strncpy_user_failure:
+ jmpl %g3 + 0x8, %g0
+ mov %g1, %o0
diff --git a/arch/sparc/lib/udiv.S b/arch/sparc/lib/udiv.S
index 53cfeac90..2abfc6b0f 100644
--- a/arch/sparc/lib/udiv.S
+++ b/arch/sparc/lib/udiv.S
@@ -1,4 +1,5 @@
-/* udiv.S: This routine was taken from glibc-1.09 and is covered
+/* $Id: udiv.S,v 1.4 1996/09/30 02:22:38 davem Exp $
+ * udiv.S: This routine was taken from glibc-1.09 and is covered
* by the GNU Library General Public License Version 2.
*/
@@ -47,22 +48,24 @@
! Ready to divide. Compute size of quotient; scale comparand.
orcc %o1, %g0, %o5
bne 1f
- mov %o0, %o3
+ mov %o0, %o3
! Divide by zero trap. If it returns, return 0 (about as
! wrong as possible, but that is what SunOS does...).
ta ST_DIV0
retl
- clr %o0
+ clr %o0
1:
cmp %o3, %o5 ! if %o1 exceeds %o0, done
blu Lgot_result ! (and algorithm fails otherwise)
- clr %o2
+ clr %o2
+
sethi %hi(1 << (32 - 4 - 1)), %g1
+
cmp %o3, %g1
blu Lnot_really_big
- clr %o4
+ clr %o4
! Here the dividend is >= 2**(31-N) or so. We must be careful here,
! as our usual N-at-a-shot divide step will cause overflow and havoc.
@@ -72,15 +75,18 @@
1:
cmp %o5, %g1
bgeu 3f
- mov 1, %g7
+ mov 1, %g7
+
sll %o5, 4, %o5
+
b 1b
- add %o4, 1, %o4
+ add %o4, 1, %o4
! Now compute %g7.
- 2: addcc %o5, %o5, %o5
+ 2:
+ addcc %o5, %o5, %o5
bcc Lnot_too_big
- add %g7, 1, %g7
+ add %g7, 1, %g7
! We get here if the %o1 overflowed while shifting.
! This means that %o3 has the high-order bit set.
@@ -88,15 +94,18 @@
sll %g1, 4, %g1 ! high order bit
srl %o5, 1, %o5 ! rest of %o5
add %o5, %g1, %o5
+
b Ldo_single_div
- sub %g7, 1, %g7
+ sub %g7, 1, %g7
Lnot_too_big:
- 3: cmp %o5, %o3
+ 3:
+ cmp %o5, %o3
blu 2b
- nop
+ nop
+
be Ldo_single_div
- nop
+ nop
/* NB: these are commented out in the V8-Sparc manual as well */
/* (I do not understand this) */
! %o5 > %o3: went too far: back up 1 step
@@ -113,19 +122,21 @@
Ldo_single_div:
subcc %g7, 1, %g7
bl Lend_regular_divide
- nop
+ nop
+
sub %o3, %o5, %o3
mov 1, %o2
+
b Lend_single_divloop
- nop
+ nop
Lsingle_divloop:
sll %o2, 1, %o2
bl 1f
- srl %o5, 1, %o5
+ srl %o5, 1, %o5
! %o3 >= 0
sub %o3, %o5, %o3
b 2f
- add %o2, 1, %o2
+ add %o2, 1, %o2
1: ! %o3 < 0
add %o3, %o5, %o3
sub %o2, 1, %o2
@@ -133,214 +144,212 @@
Lend_single_divloop:
subcc %g7, 1, %g7
bge Lsingle_divloop
- tst %o3
+ tst %o3
+
b,a Lend_regular_divide
Lnot_really_big:
1:
sll %o5, 4, %o5
+
cmp %o5, %o3
bleu 1b
- addcc %o4, 1, %o4
+ addcc %o4, 1, %o4
+
be Lgot_result
- sub %o4, 1, %o4
+ sub %o4, 1, %o4
tst %o3 ! set up for initial iteration
Ldivloop:
sll %o2, 4, %o2
! depth 1, accumulated bits 0
bl L.1.16
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 2, accumulated bits 1
bl L.2.17
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 3, accumulated bits 3
bl L.3.19
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 4, accumulated bits 7
bl L.4.23
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
- b 9f
- add %o2, (7*2+1), %o2
-
+ b 9f
+ add %o2, (7*2+1), %o2
+
L.4.23:
! remainder is negative
addcc %o3,%o5,%o3
- b 9f
- add %o2, (7*2-1), %o2
-
-
+ b 9f
+ add %o2, (7*2-1), %o2
+
L.3.19:
! remainder is negative
addcc %o3,%o5,%o3
! depth 4, accumulated bits 5
bl L.4.21
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
- b 9f
- add %o2, (5*2+1), %o2
-
+ b 9f
+ add %o2, (5*2+1), %o2
+
L.4.21:
! remainder is negative
addcc %o3,%o5,%o3
- b 9f
- add %o2, (5*2-1), %o2
-
-
-
+ b 9f
+ add %o2, (5*2-1), %o2
+
L.2.17:
! remainder is negative
addcc %o3,%o5,%o3
! depth 3, accumulated bits 1
bl L.3.17
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 4, accumulated bits 3
bl L.4.19
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
- b 9f
- add %o2, (3*2+1), %o2
-
+ b 9f
+ add %o2, (3*2+1), %o2
+
L.4.19:
! remainder is negative
addcc %o3,%o5,%o3
- b 9f
- add %o2, (3*2-1), %o2
-
-
+ b 9f
+ add %o2, (3*2-1), %o2
+
L.3.17:
! remainder is negative
addcc %o3,%o5,%o3
! depth 4, accumulated bits 1
bl L.4.17
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
- b 9f
- add %o2, (1*2+1), %o2
-
+ b 9f
+ add %o2, (1*2+1), %o2
+
L.4.17:
! remainder is negative
addcc %o3,%o5,%o3
- b 9f
- add %o2, (1*2-1), %o2
-
-
-
-
+ b 9f
+ add %o2, (1*2-1), %o2
+
L.1.16:
! remainder is negative
addcc %o3,%o5,%o3
! depth 2, accumulated bits -1
bl L.2.15
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 3, accumulated bits -1
bl L.3.15
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 4, accumulated bits -1
bl L.4.15
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
- b 9f
- add %o2, (-1*2+1), %o2
-
+ b 9f
+ add %o2, (-1*2+1), %o2
+
L.4.15:
! remainder is negative
addcc %o3,%o5,%o3
- b 9f
- add %o2, (-1*2-1), %o2
-
-
+ b 9f
+ add %o2, (-1*2-1), %o2
+
L.3.15:
! remainder is negative
addcc %o3,%o5,%o3
! depth 4, accumulated bits -3
bl L.4.13
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
- b 9f
- add %o2, (-3*2+1), %o2
-
+ b 9f
+ add %o2, (-3*2+1), %o2
+
L.4.13:
! remainder is negative
addcc %o3,%o5,%o3
- b 9f
- add %o2, (-3*2-1), %o2
-
-
-
+ b 9f
+ add %o2, (-3*2-1), %o2
+
L.2.15:
! remainder is negative
addcc %o3,%o5,%o3
! depth 3, accumulated bits -3
bl L.3.13
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 4, accumulated bits -5
bl L.4.11
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
- b 9f
- add %o2, (-5*2+1), %o2
-
+ b 9f
+ add %o2, (-5*2+1), %o2
+
L.4.11:
! remainder is negative
addcc %o3,%o5,%o3
- b 9f
- add %o2, (-5*2-1), %o2
-
-
+ b 9f
+ add %o2, (-5*2-1), %o2
+
L.3.13:
! remainder is negative
addcc %o3,%o5,%o3
! depth 4, accumulated bits -7
bl L.4.9
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
- b 9f
- add %o2, (-7*2+1), %o2
-
+ b 9f
+ add %o2, (-7*2+1), %o2
+
L.4.9:
! remainder is negative
addcc %o3,%o5,%o3
- b 9f
- add %o2, (-7*2-1), %o2
-
-
-
-
+ b 9f
+ add %o2, (-7*2-1), %o2
+
9:
Lend_regular_divide:
subcc %o4, 1, %o4
bge Ldivloop
- tst %o3
+ tst %o3
+
bl,a Lgot_result
! non-restoring fixup here (one instruction only!)
sub %o2, 1, %o2
-
Lgot_result:
retl
- mov %o2, %o0
+ mov %o2, %o0
+
+ .globl .udiv_patch
+.udiv_patch:
+ wr %g0, 0x0, %y
+ nop
+ nop
+ retl
+ udiv %o0, %o1, %o0
+ nop
diff --git a/arch/sparc/lib/umul.S b/arch/sparc/lib/umul.S
index 24f7c3cda..a784720a8 100644
--- a/arch/sparc/lib/umul.S
+++ b/arch/sparc/lib/umul.S
@@ -1,4 +1,5 @@
-/* umul.S: This routine was taken from glibc-1.09 and is covered
+/* $Id: umul.S,v 1.4 1996/09/30 02:22:39 davem Exp $
+ * umul.S: This routine was taken from glibc-1.09 and is covered
* by the GNU Library General Public License Version 2.
*/
@@ -23,9 +24,10 @@
.umul:
or %o0, %o1, %o4
mov %o0, %y ! multiplier -> Y
+
andncc %o4, 0xfff, %g0 ! test bits 12..31 of *both* args
be Lmul_shortway ! if zero, can do it the short way
- andcc %g0, %g0, %o4 ! zero the partial product and clear N and V
+ andcc %g0, %g0, %o4 ! zero the partial product and clear N and V
/*
* Long multiply. 32 steps, followed by a final shift step.
@@ -102,17 +104,19 @@
#if 0
tst %o1
bl,a 1f ! if %o1 < 0 (high order bit = 1),
- add %o4, %o0, %o4 ! %o4 += %o0 (add y to upper half)
-1: rd %y, %o0 ! get lower half of product
+ add %o4, %o0, %o4 ! %o4 += %o0 (add y to upper half)
+
+1:
+ rd %y, %o0 ! get lower half of product
retl
- addcc %o4, %g0, %o1 ! put upper half in place and set Z for %o1==0
+ addcc %o4, %g0, %o1 ! put upper half in place and set Z for %o1==0
#else
/* Faster code from tege@sics.se. */
sra %o1, 31, %o2 ! make mask from sign bit
and %o0, %o2, %o2 ! %o2 = 0 or %o0, depending on sign of %o1
rd %y, %o0 ! get lower half of product
retl
- addcc %o4, %o2, %o1 ! add compensation and put upper half in place
+ addcc %o4, %o2, %o1 ! add compensation and put upper half in place
#endif
Lmul_shortway:
@@ -155,4 +159,11 @@ Lmul_shortway:
srl %o5, 20, %o5 ! shift low bits right 20
or %o5, %o0, %o0
retl
- addcc %g0, %g0, %o1 ! %o1 = zero, and set Z
+ addcc %g0, %g0, %o1 ! %o1 = zero, and set Z
+
+ .globl .umul_patch
+.umul_patch:
+ umul %o0, %o1, %o0
+ retl
+ rd %y, %o1
+ nop
diff --git a/arch/sparc/lib/urem.S b/arch/sparc/lib/urem.S
index c84aa81e5..ec7f0c502 100644
--- a/arch/sparc/lib/urem.S
+++ b/arch/sparc/lib/urem.S
@@ -1,4 +1,5 @@
-/* urem.S: This routine was taken from glibc-1.09 and is covered
+/* $Id: urem.S,v 1.4 1996/09/30 02:22:42 davem Exp $
+ * urem.S: This routine was taken from glibc-1.09 and is covered
* by the GNU Library General Public License Version 2.
*/
@@ -45,22 +46,24 @@
! Ready to divide. Compute size of quotient; scale comparand.
orcc %o1, %g0, %o5
bne 1f
- mov %o0, %o3
+ mov %o0, %o3
! Divide by zero trap. If it returns, return 0 (about as
! wrong as possible, but that is what SunOS does...).
ta ST_DIV0
retl
- clr %o0
+ clr %o0
1:
cmp %o3, %o5 ! if %o1 exceeds %o0, done
blu Lgot_result ! (and algorithm fails otherwise)
- clr %o2
+ clr %o2
+
sethi %hi(1 << (32 - 4 - 1)), %g1
+
cmp %o3, %g1
blu Lnot_really_big
- clr %o4
+ clr %o4
! Here the dividend is >= 2**(31-N) or so. We must be careful here,
! as our usual N-at-a-shot divide step will cause overflow and havoc.
@@ -70,15 +73,18 @@
1:
cmp %o5, %g1
bgeu 3f
- mov 1, %g7
+ mov 1, %g7
+
sll %o5, 4, %o5
+
b 1b
- add %o4, 1, %o4
+ add %o4, 1, %o4
! Now compute %g7.
- 2: addcc %o5, %o5, %o5
+ 2:
+ addcc %o5, %o5, %o5
bcc Lnot_too_big
- add %g7, 1, %g7
+ add %g7, 1, %g7
! We get here if the %o1 overflowed while shifting.
! This means that %o3 has the high-order bit set.
@@ -86,15 +92,18 @@
sll %g1, 4, %g1 ! high order bit
srl %o5, 1, %o5 ! rest of %o5
add %o5, %g1, %o5
+
b Ldo_single_div
- sub %g7, 1, %g7
+ sub %g7, 1, %g7
Lnot_too_big:
- 3: cmp %o5, %o3
+ 3:
+ cmp %o5, %o3
blu 2b
- nop
+ nop
+
be Ldo_single_div
- nop
+ nop
/* NB: these are commented out in the V8-Sparc manual as well */
/* (I do not understand this) */
! %o5 > %o3: went too far: back up 1 step
@@ -111,19 +120,21 @@
Ldo_single_div:
subcc %g7, 1, %g7
bl Lend_regular_divide
- nop
+ nop
+
sub %o3, %o5, %o3
mov 1, %o2
+
b Lend_single_divloop
- nop
+ nop
Lsingle_divloop:
sll %o2, 1, %o2
bl 1f
- srl %o5, 1, %o5
+ srl %o5, 1, %o5
! %o3 >= 0
sub %o3, %o5, %o3
b 2f
- add %o2, 1, %o2
+ add %o2, 1, %o2
1: ! %o3 < 0
add %o3, %o5, %o3
sub %o2, 1, %o2
@@ -131,214 +142,214 @@
Lend_single_divloop:
subcc %g7, 1, %g7
bge Lsingle_divloop
- tst %o3
+ tst %o3
+
b,a Lend_regular_divide
Lnot_really_big:
1:
sll %o5, 4, %o5
+
cmp %o5, %o3
bleu 1b
- addcc %o4, 1, %o4
+ addcc %o4, 1, %o4
+
be Lgot_result
- sub %o4, 1, %o4
+ sub %o4, 1, %o4
tst %o3 ! set up for initial iteration
Ldivloop:
sll %o2, 4, %o2
! depth 1, accumulated bits 0
bl L.1.16
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 2, accumulated bits 1
bl L.2.17
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 3, accumulated bits 3
bl L.3.19
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 4, accumulated bits 7
bl L.4.23
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
- b 9f
- add %o2, (7*2+1), %o2
-
+ b 9f
+ add %o2, (7*2+1), %o2
+
L.4.23:
! remainder is negative
addcc %o3,%o5,%o3
- b 9f
- add %o2, (7*2-1), %o2
-
-
+ b 9f
+ add %o2, (7*2-1), %o2
+
L.3.19:
! remainder is negative
addcc %o3,%o5,%o3
! depth 4, accumulated bits 5
bl L.4.21
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
- b 9f
- add %o2, (5*2+1), %o2
-
+ b 9f
+ add %o2, (5*2+1), %o2
+
L.4.21:
! remainder is negative
addcc %o3,%o5,%o3
- b 9f
- add %o2, (5*2-1), %o2
-
-
-
+ b 9f
+ add %o2, (5*2-1), %o2
+
L.2.17:
! remainder is negative
addcc %o3,%o5,%o3
! depth 3, accumulated bits 1
bl L.3.17
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 4, accumulated bits 3
bl L.4.19
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
- b 9f
- add %o2, (3*2+1), %o2
-
+ b 9f
+ add %o2, (3*2+1), %o2
+
L.4.19:
! remainder is negative
addcc %o3,%o5,%o3
- b 9f
- add %o2, (3*2-1), %o2
-
-
+ b 9f
+ add %o2, (3*2-1), %o2
+
L.3.17:
! remainder is negative
addcc %o3,%o5,%o3
! depth 4, accumulated bits 1
bl L.4.17
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
- b 9f
- add %o2, (1*2+1), %o2
+ b 9f
+ add %o2, (1*2+1), %o2
L.4.17:
! remainder is negative
addcc %o3,%o5,%o3
- b 9f
- add %o2, (1*2-1), %o2
-
-
-
-
+ b 9f
+ add %o2, (1*2-1), %o2
+
L.1.16:
! remainder is negative
addcc %o3,%o5,%o3
! depth 2, accumulated bits -1
bl L.2.15
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 3, accumulated bits -1
bl L.3.15
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 4, accumulated bits -1
bl L.4.15
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
- b 9f
- add %o2, (-1*2+1), %o2
-
+ b 9f
+ add %o2, (-1*2+1), %o2
+
L.4.15:
! remainder is negative
addcc %o3,%o5,%o3
- b 9f
- add %o2, (-1*2-1), %o2
-
-
+ b 9f
+ add %o2, (-1*2-1), %o2
+
L.3.15:
! remainder is negative
addcc %o3,%o5,%o3
! depth 4, accumulated bits -3
bl L.4.13
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
- b 9f
- add %o2, (-3*2+1), %o2
-
+ b 9f
+ add %o2, (-3*2+1), %o2
+
L.4.13:
! remainder is negative
addcc %o3,%o5,%o3
- b 9f
- add %o2, (-3*2-1), %o2
-
-
-
+ b 9f
+ add %o2, (-3*2-1), %o2
+
L.2.15:
! remainder is negative
addcc %o3,%o5,%o3
! depth 3, accumulated bits -3
bl L.3.13
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
! depth 4, accumulated bits -5
bl L.4.11
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
- b 9f
- add %o2, (-5*2+1), %o2
+ b 9f
+ add %o2, (-5*2+1), %o2
L.4.11:
! remainder is negative
addcc %o3,%o5,%o3
- b 9f
- add %o2, (-5*2-1), %o2
-
-
+ b 9f
+ add %o2, (-5*2-1), %o2
+
L.3.13:
! remainder is negative
addcc %o3,%o5,%o3
! depth 4, accumulated bits -7
bl L.4.9
- srl %o5,1,%o5
+ srl %o5,1,%o5
! remainder is positive
subcc %o3,%o5,%o3
- b 9f
- add %o2, (-7*2+1), %o2
-
+ b 9f
+ add %o2, (-7*2+1), %o2
+
L.4.9:
! remainder is negative
addcc %o3,%o5,%o3
- b 9f
- add %o2, (-7*2-1), %o2
-
-
-
-
+ b 9f
+ add %o2, (-7*2-1), %o2
+
9:
Lend_regular_divide:
subcc %o4, 1, %o4
bge Ldivloop
- tst %o3
+ tst %o3
+
bl,a Lgot_result
! non-restoring fixup here (one instruction only!)
add %o3, %o1, %o3
-
Lgot_result:
retl
- mov %o3, %o0
+ mov %o3, %o0
+
+ .globl .urem_patch
+.urem_patch:
+ wr %g0, 0x0, %y
+ nop
+ nop
+ nop
+ udiv %o0, %o1, %o2
+ umul %o2, %o1, %o2
+ retl
+ sub %o0, %o2, %o0
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
index a4148d013..13652e467 100644
--- a/arch/sparc/mm/Makefile
+++ b/arch/sparc/mm/Makefile
@@ -1,4 +1,4 @@
-#
+# $Id: Makefile,v 1.21 1996/04/26 10:45:53 tridge Exp $
# Makefile for the linux Sparc-specific parts of the memory manager.
#
# Note! Dependencies are done automagically by 'make dep', which also
@@ -7,26 +7,7 @@
#
# Note 2! The CFLAGS definition is now in the main makefile...
-.c.o:
- $(CC) $(CFLAGS) -c $<
-.s.o:
- $(AS) -o $*.o $<
-.c.s:
- $(CC) $(CFLAGS) -S $<
-
-OBJS = fault.o vac-flush.o init.o
-
-mm.o: $(OBJS)
- $(LD) -r -o mm.o $(OBJS)
-
-modules:
+O_TARGET := mm.o
+O_OBJS := fault.o init.o sun4c.o srmmu.o loadmmu.o generic.o asyncd.o
-dep:
- $(CPP) -M *.c > .depend
-
-#
-# include a dependency file if one exists
-#
-ifeq (.depend,$(wildcard .depend))
-include .depend
-endif
+include $(TOPDIR)/Rules.make
diff --git a/arch/sparc/mm/asyncd.c b/arch/sparc/mm/asyncd.c
new file mode 100644
index 000000000..d6ed42252
--- /dev/null
+++ b/arch/sparc/mm/asyncd.c
@@ -0,0 +1,189 @@
+/* $Id: asyncd.c,v 1.8 1996/09/21 04:30:12 davem Exp $
+ * The asyncd kernel daemon. This handles paging on behalf of
+ * processes that receive page faults due to remote (async) memory
+ * accesses.
+ *
+ * Idea and skeleton code courtesy of David Miller (bless his cotton socks)
+ *
+ * Implemented by tridge
+ */
+
+#include <linux/mm.h>
+#include <linux/malloc.h>
+#include <linux/sched.h>
+#include <linux/head.h>
+#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/stat.h>
+#include <linux/swap.h>
+#include <linux/fs.h>
+
+#include <asm/dma.h>
+#include <asm/system.h> /* for cli()/sti() */
+#include <asm/segment.h> /* for memcpy_to/fromfs */
+#include <asm/bitops.h>
+#include <asm/pgtable.h>
+
+/*
+ * The wait queue for waking up the async daemon:
+ */
+static struct wait_queue * asyncd_wait = NULL;
+
+struct async_job {
+ volatile struct async_job *next;
+ int taskid;
+ struct mm_struct *mm;
+ unsigned long address;
+ int write;
+ void (*callback)(int,unsigned long,int,int);
+};
+
+static volatile struct async_job *async_queue = NULL;
+static volatile struct async_job *async_queue_end = NULL;
+
+static void add_to_async_queue(int taskid,
+ struct mm_struct *mm,
+ unsigned long address,
+ int write,
+ void (*callback)(int,unsigned long,int,int))
+{
+ struct async_job *a = kmalloc(sizeof(*a),GFP_ATOMIC);
+
+ if (!a)
+ panic("out of memory in asyncd\n");
+
+ a->next = NULL;
+ a->taskid = taskid;
+ a->mm = mm;
+ a->address = address;
+ a->write = write;
+ a->callback = callback;
+
+ if (!async_queue) {
+ async_queue = a;
+ } else {
+ async_queue_end->next = a;
+ }
+ async_queue_end = a;
+}
+
+
+void async_fault(unsigned long address, int write, int taskid,
+ void (*callback)(int,unsigned long,int,int))
+{
+ struct task_struct *tsk = task[taskid];
+ struct mm_struct *mm = tsk->mm;
+
+#if 0
+ printk("paging in %x for task=%d\n",address,taskid);
+#endif
+ add_to_async_queue(taskid, mm, address, write, callback);
+ wake_up(&asyncd_wait);
+}
+
+static int fault_in_page(int taskid,
+ struct vm_area_struct *vma,
+ unsigned address,int write)
+{
+ struct task_struct *tsk = task[taskid];
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ if (!tsk || !tsk->mm)
+ return 1;
+
+ if (!vma || (write && !(vma->vm_flags & VM_WRITE)))
+ goto bad_area;
+ if (vma->vm_start > address)
+ goto bad_area;
+
+ pgd = pgd_offset(vma->vm_mm, address);
+ pmd = pmd_alloc(pgd,address);
+ if(!pmd)
+ goto no_memory;
+ pte = pte_alloc(pmd, address);
+ if(!pte)
+ goto no_memory;
+ if(!pte_present(*pte)) {
+ do_no_page(tsk, vma, address, write);
+ goto finish_up;
+ }
+ set_pte(pte, pte_mkyoung(*pte));
+ flush_tlb_page(vma, address);
+ if(!write)
+ goto finish_up;
+ if(pte_write(*pte)) {
+ set_pte(pte, pte_mkdirty(*pte));
+ flush_tlb_page(vma, address);
+ goto finish_up;
+ }
+ do_wp_page(tsk, vma, address, write);
+
+ /* Fall through for do_wp_page */
+finish_up:
+ update_mmu_cache(vma, address, *pte);
+ return 0;
+
+no_memory:
+ oom(tsk);
+ return 1;
+
+bad_area:
+ tsk->tss.sig_address = address;
+ tsk->tss.sig_desc = SUBSIG_NOMAPPING;
+ send_sig(SIGSEGV, tsk, 1);
+ return 1;
+}
+
+/* Note the semaphore operations must be done here, and _not_
+ * in async_fault().
+ */
+static void run_async_queue(void)
+{
+ int ret;
+ while (async_queue) {
+ volatile struct async_job *a = async_queue;
+ struct mm_struct *mm = a->mm;
+ struct vm_area_struct *vma;
+ async_queue = async_queue->next;
+ down(&mm->mmap_sem);
+ vma = find_vma(mm, a->address);
+ ret = fault_in_page(a->taskid,vma,a->address,a->write);
+ a->callback(a->taskid,a->address,a->write,ret);
+ up(&mm->mmap_sem);
+ kfree_s((void *)a,sizeof(*a));
+ }
+}
+
+
+
+
+/*
+ * The background async daemon.
+ * Started as a kernel thread from the init process.
+ */
+int asyncd(void *unused)
+{
+ current->session = 1;
+ current->pgrp = 1;
+ sprintf(current->comm, "asyncd");
+ current->blocked = ~0UL; /* block all signals */
+
+ /* Give kswapd a realtime priority. */
+ current->policy = SCHED_FIFO;
+ current->priority = 32; /* Fixme --- we need to standardise our
+ namings for POSIX.4 realtime scheduling
+ priorities. */
+
+ printk("Started asyncd\n");
+
+ while (1) {
+ current->signal = 0;
+ interruptible_sleep_on(&asyncd_wait);
+ run_async_queue();
+ }
+}
+
diff --git a/arch/sparc/mm/fault.c b/arch/sparc/mm/fault.c
index 4c5fd0bc3..8c8755ce5 100644
--- a/arch/sparc/mm/fault.c
+++ b/arch/sparc/mm/fault.c
@@ -1,26 +1,40 @@
+/* $Id: fault.c,v 1.77 1996/10/28 00:56:02 davem Exp $
+ * fault.c: Page fault handlers for the Sparc.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
+ */
+
+#include <asm/head.h>
+
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
+#include <linux/tasks.h>
+#include <linux/smp.h>
#include <linux/signal.h>
#include <linux/mm.h>
#include <asm/system.h>
#include <asm/segment.h>
-#include <asm/openprom.h>
#include <asm/page.h>
#include <asm/pgtable.h>
+#include <asm/memreg.h>
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/smp.h>
+#include <asm/traps.h>
+#include <asm/kdebug.h>
-extern unsigned long pg0[1024]; /* page table for 0-4MB for everybody */
-extern struct sparc_phys_banks sp_banks[14];
-
-extern void die_if_kernel(char *,struct pt_regs *,long);
+#define ELEMENTS(arr) (sizeof (arr)/sizeof (arr[0]))
-struct linux_romvec *romvec;
+extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
+extern int prom_node_root;
-/* foo */
+extern void die_if_kernel(char *,struct pt_regs *);
-int tbase_needs_unmapping;
+struct linux_romvec *romvec;
/* At boot time we determine these two values necessary for setting
* up the segment maps and page table entries (pte's).
@@ -35,139 +49,297 @@ int vac_size, vac_linesize, vac_do_hw_vac_flushes;
int vac_entries_per_context, vac_entries_per_segment;
int vac_entries_per_page;
-/*
- * Define this if things work differently on a i386 and a i486:
- * it will (on a i486) warn about kernel memory accesses that are
- * done without a 'verify_area(VERIFY_WRITE,..)'
- */
-#undef CONFIG_TEST_VERIFY_AREA
+/* Nice, simple, prom library does all the sweating for us. ;) */
+int prom_probe_memory (void)
+{
+ register struct linux_mlist_v0 *mlist;
+ register unsigned long bytes, base_paddr, tally;
+ register int i;
+
+ i = 0;
+ mlist= *prom_meminfo()->v0_available;
+ bytes = tally = mlist->num_bytes;
+ base_paddr = (unsigned long) mlist->start_adr;
+
+ sp_banks[0].base_addr = base_paddr;
+ sp_banks[0].num_bytes = bytes;
+
+ while (mlist->theres_more != (void *) 0){
+ i++;
+ mlist = mlist->theres_more;
+ bytes = mlist->num_bytes;
+ tally += bytes;
+ if (i >= SPARC_PHYS_BANKS-1) {
+ printk ("The machine has more banks that this kernel can support\n"
+ "Increase the SPARC_PHYS_BANKS setting (currently %d)\n",
+ SPARC_PHYS_BANKS);
+ i = SPARC_PHYS_BANKS-1;
+ break;
+ }
+
+ sp_banks[i].base_addr = (unsigned long) mlist->start_adr;
+ sp_banks[i].num_bytes = mlist->num_bytes;
+ }
+
+ i++;
+ sp_banks[i].base_addr = 0xdeadbeef;
+ sp_banks[i].num_bytes = 0;
+
+ /* Now mask all bank sizes on a page boundary, it is all we can
+ * use anyways.
+ */
+ for(i=0; sp_banks[i].num_bytes != 0; i++)
+ sp_banks[i].num_bytes &= PAGE_MASK;
+
+ return tally;
+}
/* Traverse the memory lists in the prom to see how much physical we
* have.
*/
-
unsigned long
probe_memory(void)
{
- register struct linux_romvec *lprom;
- register struct linux_mlist_v0 *mlist;
- register unsigned long bytes, base_paddr, tally;
- register int i;
-
- bytes = tally = 0;
- base_paddr = 0;
- i=0;
- lprom = romvec;
- switch(lprom->pv_romvers)
- {
- case 0:
- mlist=(*(lprom->pv_v0mem.v0_totphys));
- bytes = tally = mlist->num_bytes;
- base_paddr = (unsigned long) mlist->start_adr;
-
- sp_banks[0].base_addr = base_paddr;
- sp_banks[0].num_bytes = bytes;
-
- if(mlist->theres_more != (void *)0) {
- i++;
- mlist=mlist->theres_more;
- bytes=mlist->num_bytes;
- tally += bytes;
- sp_banks[i].base_addr = (unsigned long) mlist->start_adr;
- sp_banks[i].num_bytes = mlist->num_bytes;
- }
- break;
- case 2:
- printk("no v2 memory probe support yet.\n");
- (*(lprom->pv_halt))();
- break;
- }
-
- i++;
- sp_banks[i].base_addr = 0xdeadbeef;
- sp_banks[i].num_bytes = 0;
-
- return tally;
-}
+ int total;
-/* Sparc routine to reserve the mapping of the open boot prom */
+ total = prom_probe_memory();
-/* uncomment this for FAME and FORTUNE! */
-/* #define DEBUG_MAP_PROM */
+ /* Oh man, much nicer, keep the dirt in promlib. */
+ return total;
+}
-int
-map_the_prom(int curr_num_segs)
-{
- register unsigned long prom_va_begin;
- register unsigned long prom_va_end;
- register int segmap_entry, i;
+extern void sun4c_complete_all_stores(void);
- prom_va_begin = LINUX_OPPROM_BEGVM;
- prom_va_end = LINUX_OPPROM_ENDVM;
+/* Whee, a level 15 NMI interrupt memory error. Let's have fun... */
+asmlinkage void sparc_lvl15_nmi(struct pt_regs *regs, unsigned long serr,
+ unsigned long svaddr, unsigned long aerr,
+ unsigned long avaddr)
+{
+ sun4c_complete_all_stores();
+ printk("FAULT: NMI received\n");
+ printk("SREGS: Synchronous Error %08lx\n", serr);
+ printk(" Synchronous Vaddr %08lx\n", svaddr);
+ printk(" Asynchronous Error %08lx\n", aerr);
+ printk(" Asynchronous Vaddr %08lx\n", avaddr);
+ if (sun4c_memerr_reg)
+ printk(" Memory Parity Error %08lx\n", *sun4c_memerr_reg);
+ printk("REGISTER DUMP:\n");
+ show_regs(regs);
+ prom_halt();
+}
-#ifdef DEBUG_MAP_PROM
- printk("\ncurr_num_segs = 0x%x\n", curr_num_segs);
+asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
+ unsigned long address)
+{
+ struct vm_area_struct *vma;
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->mm;
+ int from_user = !(regs->psr & PSR_PS);
+#if 0
+ static unsigned long last_one;
#endif
- while( prom_va_begin < prom_va_end)
- {
- segmap_entry=get_segmap(prom_va_begin);
-
- curr_num_segs = ((segmap_entry<curr_num_segs)
- ? segmap_entry : curr_num_segs);
-
- for(i = num_contexts; --i > 0;)
- (*romvec->pv_setctxt)(i, (char *) prom_va_begin,
- segmap_entry);
-
- if(segmap_entry == invalid_segment)
- {
-
-#ifdef DEBUG_MAP_PROM
- printk("invalid_segments, virt_addr 0x%x\n", prom_va_begin);
+ down(&mm->mmap_sem);
+ if(text_fault)
+ address = regs->pc;
+
+#if 0
+ if(current->tss.ex.count) {
+ printk("f<pid=%d,tf=%d,wr=%d,addr=%08lx,pc=%08lx>\n",
+ tsk->pid, text_fault, write, address, regs->pc);
+ printk("EX: count<%d> pc<%08lx> expc<%08lx> address<%08lx>\n",
+ (int) current->tss.ex.count, current->tss.ex.pc,
+ current->tss.ex.expc, current->tss.ex.address);
+#if 0
+ if(last_one == address) {
+ printk("Twice in a row, AIEEE. Spinning so you can see the dump.\n");
+ show_regs(regs);
+ sti();
+ while(1)
+ barrier();
+ }
+ last_one = address;
#endif
-
- prom_va_begin += 0x40000; /* num bytes per segment entry */
- continue;
}
-
- /* DUH, prom maps itself so that users can access it. This is
- * broken.
- */
-
-#ifdef DEBUG_MAP_PROM
- printk("making segmap for prom privileged, va = 0x%x\n",
- prom_va_begin);
#endif
+ /* Now actually handle the fault. Do kernel faults special,
+ * because on the sun4c we could have faulted trying to read
+ * the vma area of the task and without the following code
+ * we'd fault recursively until all our stack is gone. ;-(
+ */
+ if(!from_user && address >= PAGE_OFFSET) {
+ quick_kernel_fault(address);
+ return;
+ }
- for(i = 0x40; --i >= 0; prom_va_begin+=4096)
- {
- put_pte(prom_va_begin, get_pte(prom_va_begin) | 0x20000000);
+ vma = find_vma(mm, address);
+ if(!vma)
+ goto bad_area;
+ if(vma->vm_start <= address)
+ goto good_area;
+ if(!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+ if(expand_stack(vma, address))
+ goto bad_area;
+ /*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+good_area:
+ if(write) {
+ if(!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ } else {
+ /* Allow reads even for write-only mappings */
+ if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+ }
+ handle_mm_fault(vma, address, write);
+ up(&mm->mmap_sem);
+ return;
+ /*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+ up(&mm->mmap_sem);
+ /* Did we have an exception handler installed? */
+ if(current->tss.ex.count == 1) {
+ if(from_user) {
+ printk("Yieee, exception signalled from user mode.\n");
+ } else {
+ /* Set pc to %g1, set %g1 to -EFAULT and %g2 to
+ * the faulting address so we can cleanup.
+ */
+ printk("Exception: PC<%08lx> faddr<%08lx>\n", regs->pc, address);
+ printk("EX: count<%d> pc<%08lx> expc<%08lx> address<%08lx>\n",
+ (int) current->tss.ex.count, current->tss.ex.pc,
+ current->tss.ex.expc, current->tss.ex.address);
+ current->tss.ex.count = 0;
+ regs->pc = current->tss.ex.expc;
+ regs->npc = regs->pc + 4;
+ regs->u_regs[UREG_G1] = -EFAULT;
+ regs->u_regs[UREG_G2] = address - current->tss.ex.address;
+ regs->u_regs[UREG_G3] = current->tss.ex.pc;
+ return;
+ }
}
+ if(from_user) {
+#if 0
+ printk("Fault whee %s [%d]: segfaults at %08lx pc=%08lx\n",
+ tsk->comm, tsk->pid, address, regs->pc);
+#endif
+ tsk->tss.sig_address = address;
+ tsk->tss.sig_desc = SUBSIG_NOMAPPING;
+ send_sig(SIGSEGV, tsk, 1);
+ return;
+ }
+ if((unsigned long) address < PAGE_SIZE) {
+ printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
+ } else
+ printk(KERN_ALERT "Unable to handle kernel paging request");
+ printk(KERN_ALERT " at virtual address %08lx\n",address);
+ printk(KERN_ALERT "tsk->mm->context = %08lx\n",
+ (unsigned long) tsk->mm->context);
+ printk(KERN_ALERT "tsk->mm->pgd = %08lx\n",
+ (unsigned long) tsk->mm->pgd);
+ die_if_kernel("Oops", regs);
+}
- }
+asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write,
+ unsigned long address)
+{
+ extern void sun4c_update_mmu_cache(struct vm_area_struct *,unsigned long,pte_t);
+ extern pgd_t *sun4c_pgd_offset(struct mm_struct *,unsigned long);
+ extern pte_t *sun4c_pte_offset(pmd_t *,unsigned long);
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->mm;
+ pgd_t *pgd;
+ pte_t *pte;
+
+ if(text_fault)
+ address = regs->pc;
+
+ pgd = sun4c_pgd_offset(mm, address);
+ pte = sun4c_pte_offset((pmd_t *) pgd, address);
+
+ /* This conditional is 'interesting'. */
+ if(pgd_val(*pgd) && !(write && !(pte_val(*pte) & _SUN4C_PAGE_WRITE))
+ && (pte_val(*pte) & _SUN4C_PAGE_VALID))
+ /* XXX Very bad, can't do this optimization when VMA arg is actually
+ * XXX used by update_mmu_cache()!
+ */
+ sun4c_update_mmu_cache((struct vm_area_struct *) 0, address, *pte);
+ else
+ do_sparc_fault(regs, text_fault, write, address);
+}
- printk("Mapped the PROM in all contexts...\n");
+/* This always deals with user addresses. */
+inline void force_user_fault(unsigned long address, int write)
+{
+ struct vm_area_struct *vma;
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->mm;
-#ifdef DEBUG_MAP_PROM
- printk("curr_num_segs = 0x%x\n", curr_num_segs);
+#if 0
+ printk("wf<pid=%d,wr=%d,addr=%08lx>\n",
+ tsk->pid, write, address);
#endif
+ down(&mm->mmap_sem);
+ vma = find_vma(mm, address);
+ if(!vma)
+ goto bad_area;
+ if(vma->vm_start <= address)
+ goto good_area;
+ if(!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+ if(expand_stack(vma, address))
+ goto bad_area;
+good_area:
+ if(write)
+ if(!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ else
+ if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+ handle_mm_fault(vma, address, write);
+ up(&mm->mmap_sem);
+ return;
+bad_area:
+ up(&mm->mmap_sem);
+#if 0
+ printk("Window whee %s [%d]: segfaults at %08lx\n",
+ tsk->comm, tsk->pid, address);
+#endif
+ tsk->tss.sig_address = address;
+ tsk->tss.sig_desc = SUBSIG_NOMAPPING;
+ send_sig(SIGSEGV, tsk, 1);
+ return;
+}
- return curr_num_segs;
+void window_overflow_fault(void)
+{
+ unsigned long sp;
+ sp = current->tss.rwbuf_stkptrs[0];
+ if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
+ force_user_fault(sp + 0x38, 1);
+ force_user_fault(sp, 1);
}
-/*
- * This routine handles page faults. It determines the address,
- * and the problem, and then passes it off to one of the appropriate
- * routines.
- */
-asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
+void window_underflow_fault(unsigned long sp)
{
- die_if_kernel("Oops", regs, error_code);
- do_exit(SIGKILL);
+ if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
+ force_user_fault(sp + 0x38, 0);
+ force_user_fault(sp, 0);
}
+void window_ret_fault(struct pt_regs *regs)
+{
+ unsigned long sp;
-
-
+ sp = regs->u_regs[UREG_FP];
+ if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
+ force_user_fault(sp + 0x38, 0);
+ force_user_fault(sp, 0);
+}
diff --git a/arch/sparc/mm/generic.c b/arch/sparc/mm/generic.c
new file mode 100644
index 000000000..0c202fdeb
--- /dev/null
+++ b/arch/sparc/mm/generic.c
@@ -0,0 +1,124 @@
+/* $Id: generic.c,v 1.4 1996/10/27 08:36:41 davem Exp $
+ * generic.c: Generic Sparc mm routines that are not dependent upon
+ * MMU type but are Sparc specific.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+
+#include <asm/pgtable.h>
+#include <asm/page.h>
+
+
+/* Allocate a block of RAM which is aligned to its size.
+ * This procedure can be used until the call to mem_init().
+ */
+void *sparc_init_alloc(unsigned long *kbrk, unsigned long size)
+{
+ unsigned long mask = size - 1;
+ unsigned long ret;
+
+ if(!size)
+ return 0x0;
+ if(size & mask) {
+ prom_printf("panic: sparc_init_alloc botch\n");
+ prom_halt();
+ }
+ ret = (*kbrk + mask) & ~mask;
+ *kbrk = ret + size;
+ memset((void*) ret, 0, size);
+ return (void*) ret;
+}
+
+static inline void forget_pte(pte_t page)
+{
+ if (pte_none(page))
+ return;
+ if (pte_present(page)) {
+ unsigned long addr = pte_page(page);
+ if (MAP_NR(addr) >= max_mapnr || PageReserved(mem_map+MAP_NR(addr)))
+ return;
+ free_page(addr);
+ if (current->mm->rss <= 0)
+ return;
+ current->mm->rss--;
+ return;
+ }
+ swap_free(pte_val(page));
+}
+
+/* Remap IO memory, the same way as remap_page_range(), but use
+ * the obio memory space.
+ *
+ * They use a pgprot that sets PAGE_IO and does not check the
+ * mem_map table as this is independent of normal memory.
+ */
+static inline void io_remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
+ unsigned long offset, pgprot_t prot, int space)
+{
+ unsigned long end;
+
+ address &= ~PMD_MASK;
+ end = address + size;
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ do {
+ pte_t oldpage = *pte;
+ pte_clear(pte);
+ set_pte(pte, mk_pte_io(offset, prot, space));
+ forget_pte(oldpage);
+ address += PAGE_SIZE;
+ offset += PAGE_SIZE;
+ pte++;
+ } while (address < end);
+}
+
+static inline int io_remap_pmd_range(pmd_t * pmd, unsigned long address, unsigned long size,
+ unsigned long offset, pgprot_t prot, int space)
+{
+ unsigned long end;
+
+ address &= ~PGDIR_MASK;
+ end = address + size;
+ if (end > PGDIR_SIZE)
+ end = PGDIR_SIZE;
+ offset -= address;
+ do {
+ pte_t * pte = pte_alloc(pmd, address);
+ if (!pte)
+ return -ENOMEM;
+ io_remap_pte_range(pte, address, end - address, address + offset, prot, space);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address < end);
+ return 0;
+}
+
+int io_remap_page_range(unsigned long from, unsigned long offset, unsigned long size, pgprot_t prot, int space)
+{
+ int error = 0;
+ pgd_t * dir;
+ unsigned long beg = from;
+ unsigned long end = from + size;
+
+ pgprot_val(prot) = pg_iobits;
+ offset -= from;
+ dir = pgd_offset(current->mm, from);
+ flush_cache_range(current->mm, beg, end);
+ while (from < end) {
+ pmd_t *pmd = pmd_alloc(dir, from);
+ error = -ENOMEM;
+ if (!pmd)
+ break;
+ error = io_remap_pmd_range(pmd, from, end - from, offset + from, prot, space);
+ if (error)
+ break;
+ from = (from + PGDIR_SIZE) & PGDIR_MASK;
+ dir++;
+ }
+ flush_tlb_range(current->mm, beg, end);
+ return error;
+}
diff --git a/arch/sparc/mm/init.c b/arch/sparc/mm/init.c
index a65e9e094..41ac6c194 100644
--- a/arch/sparc/mm/init.c
+++ b/arch/sparc/mm/init.c
@@ -1,7 +1,8 @@
-/*
+/* $Id: init.c,v 1.42 1996/10/27 08:36:44 davem Exp $
* linux/arch/sparc/mm/init.c
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1995 Eddie C. Dost (ecd@skynet.be)
*/
#include <linux/config.h>
@@ -15,29 +16,28 @@
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
+#include <linux/swap.h>
+#ifdef CONFIG_BLK_DEV_INITRD
+#include <linux/blk.h>
+#endif
#include <asm/system.h>
#include <asm/segment.h>
#include <asm/vac-ops.h>
#include <asm/page.h>
#include <asm/pgtable.h>
+#include <asm/vaddrs.h>
-extern void scsi_mem_init(unsigned long);
-extern void sound_mem_init(void);
-extern void die_if_kernel(char *,struct pt_regs *,long);
extern void show_net_buffers(void);
-extern int map_the_prom(int);
-
-struct sparc_phys_banks sp_banks[14];
-unsigned long *sun4c_mmu_table;
-extern int invalid_segment, num_segmaps, num_contexts;
+struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
+unsigned long sparc_unmapped_base;
/*
* BAD_PAGE is the page that is used for page faults when linux
* is out-of-memory. Older versions of linux just did a
* do_exit(), but using this instead means there is less risk
- * for a process dying in kernel mode, possibly leaving a inode
+ * for a process dying in kernel mode, possibly leaving an inode
* unused etc..
*
* BAD_PAGETABLE is the accompanying page-table: it is initialized
@@ -58,29 +58,23 @@ pte_t __bad_page(void)
return pte_mkdirty(mk_pte((unsigned long) EMPTY_PGE, PAGE_SHARED));
}
-unsigned long __zero_page(void)
-{
- memset((void *) ZERO_PGE, 0, PAGE_SIZE);
- return ZERO_PGE;
-}
-
void show_mem(void)
{
int i,free = 0,total = 0,reserved = 0;
int shared = 0;
- printk("Mem-info:\n");
+ printk("\nMem-info:\n");
show_free_areas();
printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
- i = high_memory >> PAGE_SHIFT;
+ i = max_mapnr;
while (i-- > 0) {
total++;
- if (mem_map[i] & MAP_PAGE_RESERVED)
+ if (PageReserved(mem_map + i))
reserved++;
- else if (!mem_map[i])
+ else if (!mem_map[i].count)
free++;
else
- shared += mem_map[i]-1;
+ shared += mem_map[i].count-1;
}
printk("%d pages of RAM\n",total);
printk("%d free pages\n",free);
@@ -92,273 +86,189 @@ void show_mem(void)
#endif
}
-extern unsigned long free_area_init(unsigned long, unsigned long);
+extern pgprot_t protection_map[16];
+
+unsigned long sparc_context_init(unsigned long start_mem, int numctx)
+{
+ int ctx;
+
+ ctx_list_pool = (struct ctx_list *) start_mem;
+ start_mem += (numctx * sizeof(struct ctx_list));
+ for(ctx = 0; ctx < numctx; ctx++) {
+ struct ctx_list *clist;
+
+ clist = (ctx_list_pool + ctx);
+ clist->ctx_number = ctx;
+ clist->ctx_mm = 0;
+ }
+ ctx_free.next = ctx_free.prev = &ctx_free;
+ ctx_used.next = ctx_used.prev = &ctx_used;
+ for(ctx = 0; ctx < numctx; ctx++)
+ add_to_free_ctxlist(ctx_list_pool + ctx);
+ return start_mem;
+}
/*
- * paging_init() sets up the page tables: in the alpha version this actually
- * unmaps the bootup page table (as we're now in KSEG, so we don't need it).
+ * paging_init() sets up the page tables: We call the MMU specific
+ * init routine based upon the Sun model type on the Sparc.
*
- * The bootup sequence put the virtual page table into high memory: that
- * means that we can change the L1 page table by just using VL1p below.
*/
+extern unsigned long sun4c_paging_init(unsigned long, unsigned long);
+extern unsigned long srmmu_paging_init(unsigned long, unsigned long);
+extern unsigned long device_scan(unsigned long);
unsigned long paging_init(unsigned long start_mem, unsigned long end_mem)
{
- unsigned long i, a, b, mask=0;
- unsigned long curseg, curpte, num_inval;
- unsigned long address;
- pte_t *pg_table;
+ switch(sparc_cpu_model) {
+ case sun4c:
+ case sun4e:
+ start_mem = sun4c_paging_init(start_mem, end_mem);
+ sparc_unmapped_base = 0xe0000000;
+ break;
+ case sun4m:
+ case sun4d:
+ start_mem = srmmu_paging_init(start_mem, end_mem);
+ sparc_unmapped_base = 0x50000000;
+ break;
+ default:
+ prom_printf("paging_init: Cannot init paging on this Sparc\n");
+ prom_printf("paging_init: sparc_cpu_model = %d\n", sparc_cpu_model);
+ prom_printf("paging_init: Halting...\n");
+ prom_halt();
+ };
+
+ /* Initialize the protection map with non-constant, MMU dependent values. */
+ protection_map[0] = PAGE_NONE;
+ protection_map[1] = PAGE_READONLY;
+ protection_map[2] = PAGE_COPY;
+ protection_map[3] = PAGE_COPY;
+ protection_map[4] = PAGE_READONLY;
+ protection_map[5] = PAGE_READONLY;
+ protection_map[6] = PAGE_COPY;
+ protection_map[7] = PAGE_COPY;
+ protection_map[8] = PAGE_NONE;
+ protection_map[9] = PAGE_READONLY;
+ protection_map[10] = PAGE_SHARED;
+ protection_map[11] = PAGE_SHARED;
+ protection_map[12] = PAGE_READONLY;
+ protection_map[13] = PAGE_READONLY;
+ protection_map[14] = PAGE_SHARED;
+ protection_map[15] = PAGE_SHARED;
+ return device_scan(start_mem);
+}
- register int num_segs, num_ctx;
- register char * c;
+struct cache_palias *sparc_aliases;
- num_segs = num_segmaps;
- num_ctx = num_contexts;
+extern int min_free_pages;
+extern int free_pages_low;
+extern int free_pages_high;
+extern void srmmu_frob_mem_map(unsigned long);
- num_segs -= 1;
- invalid_segment = num_segs;
+int physmem_mapped_contig = 1;
- start_mem = free_area_init(start_mem, end_mem);
+static void taint_real_pages(unsigned long start_mem, unsigned long end_mem)
+{
+ unsigned long addr, tmp2 = 0;
+
+ if(physmem_mapped_contig) {
+ for(addr = PAGE_OFFSET; addr < end_mem; addr += PAGE_SIZE) {
+ if(addr >= KERNBASE && addr < start_mem)
+ addr = start_mem;
+ for(tmp2=0; sp_banks[tmp2].num_bytes != 0; tmp2++) {
+ unsigned long phys_addr = (addr - PAGE_OFFSET);
+ unsigned long base = sp_banks[tmp2].base_addr;
+ unsigned long limit = base + sp_banks[tmp2].num_bytes;
+
+ if((phys_addr >= base) && (phys_addr < limit) &&
+ ((phys_addr + PAGE_SIZE) < limit))
+ mem_map[MAP_NR(addr)].flags &= ~(1<<PG_reserved);
+ }
+ }
+ } else {
+ if((sparc_cpu_model == sun4m) || (sparc_cpu_model == sun4d)) {
+ srmmu_frob_mem_map(start_mem);
+ } else {
+ for(addr = start_mem; addr < end_mem; addr += PAGE_SIZE)
+ mem_map[MAP_NR(addr)].flags &= ~(1<<PG_reserved);
+ }
+ }
+}
-/* On the sparc we first need to allocate the segmaps for the
- * PROM's virtual space, and make those segmaps unusable. We
- * map the PROM in ALL contexts therefore the break key and the
- * sync command work no matter what state you took the machine
- * out of
- */
+void mem_init(unsigned long start_mem, unsigned long end_mem)
+{
+ int codepages = 0;
+ int datapages = 0;
+ unsigned long tmp2, addr;
+ extern char etext;
+
+ /* Saves us work later. */
+ memset((void *) ZERO_PAGE, 0, PAGE_SIZE);
- printk("mapping the prom...\n");
- num_segs = map_the_prom(num_segs);
+ end_mem &= PAGE_MASK;
+ max_mapnr = MAP_NR(end_mem);
+ high_memory = (void *) end_mem;
start_mem = PAGE_ALIGN(start_mem);
- /* Set up static page tables in kernel space, this will be used
- * so that the low-level page fault handler can fill in missing
- * TLB entries since all mmu entries cannot be loaded at once
- * on the sun4c.
- */
-
-#if 0
- /* ugly debugging code */
- for(i=0; i<40960; i+=PAGE_SIZE)
- printk("address=0x%x vseg=%d pte=0x%x\n", (unsigned int) i,
- (int) get_segmap(i), (unsigned int) get_pte(i));
-#endif
+ addr = KERNBASE;
+ while(addr < start_mem) {
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (initrd_below_start_ok && addr >= initrd_start && addr < initrd_end)
+ mem_map[MAP_NR(addr)].flags &= ~(1<<PG_reserved);
+ else
+#endif
+ mem_map[MAP_NR(addr)].flags |= (1<<PG_reserved);
+ addr += PAGE_SIZE;
+ }
- printk("Setting up kernel static mmu table... bounce bounce\n");
-
- address = 0; /* ((unsigned long) &end) + 524288; */
- sun4c_mmu_table = (unsigned long *) start_mem;
- pg_table = (pte_t *) start_mem;
- curseg = curpte = num_inval = 0;
- while(address < end_mem) {
- if(curpte == 0)
- put_segmap((address&PGDIR_MASK), curseg);
- for(i=0; sp_banks[i].num_bytes != 0; i++)
- if((address >= sp_banks[i].base_addr) &&
- (address <= (sp_banks[i].base_addr + sp_banks[i].num_bytes)))
- goto good_address;
- /* No physical memory here, so set the virtual segment to
- * the invalid one, and put an invalid pte in the static
- * kernel table.
- */
- *pg_table = mk_pte((address >> PAGE_SHIFT), PAGE_INVALID);
- pg_table++; curpte++; num_inval++;
- if(curpte > 63) {
- if(curpte == num_inval) {
- put_segmap((address&PGDIR_MASK), invalid_segment);
- } else {
- put_segmap((address&PGDIR_MASK), curseg);
- curseg++;
- }
- curpte = num_inval = 0;
- }
- address += PAGE_SIZE;
- continue;
-
- good_address:
- /* create pte entry */
- if(address < (((unsigned long) &end) + 524288)) {
- pte_val(*pg_table) = get_pte(address);
- } else {
- *pg_table = mk_pte((address >> PAGE_SHIFT), PAGE_KERNEL);
- put_pte(address, pte_val(*pg_table));
- }
-
- pg_table++; curpte++;
- if(curpte > 63) {
- put_segmap((address&PGDIR_MASK), curseg);
- curpte = num_inval = 0;
- curseg++;
- }
- address += PAGE_SIZE;
- }
-
- start_mem = (unsigned long) pg_table;
- /* ok, allocate the kernel pages, map them in all contexts
- * (with help from the prom), and lock them. Isn't the sparc
- * fun kiddies? TODO
- */
-
-#if 0
- /* ugly debugging code */
- for(i=0x1a3000; i<(0x1a3000+40960); i+=PAGE_SIZE)
- printk("address=0x%x vseg=%d pte=0x%x\n", (unsigned int) i,
- (int) get_segmap(i), (unsigned int) get_pte(i));
- halt();
+ taint_real_pages(start_mem, end_mem);
+ for (addr = PAGE_OFFSET; addr < end_mem; addr += PAGE_SIZE) {
+ if(PageReserved(mem_map + MAP_NR(addr))) {
+ if ((addr < (unsigned long) &etext) && (addr >= KERNBASE))
+ codepages++;
+ else if((addr < start_mem) && (addr >= KERNBASE))
+ datapages++;
+ continue;
+ }
+ mem_map[MAP_NR(addr)].count = 1;
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (!initrd_start ||
+ (addr < initrd_start || addr >= initrd_end))
#endif
+ free_page(addr);
+ }
- b=PGDIR_ALIGN(start_mem)>>18;
- c= (char *)0x0;
-
- printk("mapping kernel in all contexts...\n");
-
- for(a=0; a<b; a++)
- {
- for(i=0; i<num_contexts; i++)
- {
- /* map the kernel virt_addrs */
- (*(romvec->pv_setctxt))(i, (char *) c, a);
- }
- c += 0x40000;
- }
-
- /* Ok, since now mapped in all contexts, we can free up
- * context zero to be used amongst user processes.
- */
-
- /* free context 0 here TODO */
-
- /* invalidate all user pages and initialize the pte struct
- * for userland. TODO
- */
-
- /* Make the kernel text unwritable and cacheable, the prom
- * loaded our text as writable, only sneaky sunos kernels need
- * self-modifying code.
- */
-
- a= (unsigned long) &etext;
- mask=~(PTE_NC|PTE_W); /* make cacheable + not writable */
-
- /* must do for every segment since kernel uses all contexts
- * and unlike some sun kernels I know of, we can't hard wire
- * context 0 just for the kernel, that is unnecessary.
- */
-
- for(i=0; i<8; i++)
- {
- b=PAGE_ALIGN((unsigned long) &trapbase);
-
- switch_to_context(i);
-
- for(;b<a; b+=4096)
- {
- put_pte(b, (get_pte(b) & mask));
- }
- }
-
- invalidate(); /* flush the virtual address cache */
-
- printk("\nCurrently in context - ");
- for(i=0; i<num_contexts; i++)
- {
- switch_to_context(i);
- printk("%d ", (int) i);
- }
- printk("\n");
-
- switch_to_context(0);
+ tmp2 = nr_free_pages << PAGE_SHIFT;
- invalidate();
- return start_mem;
-}
-
-void mem_init(unsigned long start_mem, unsigned long end_mem)
-{
- unsigned long start_low_mem = PAGE_SIZE;
- int codepages = 0;
- int reservedpages = 0;
- int datapages = 0;
- int i = 0;
- unsigned long tmp, limit, tmp2, addr;
- extern char etext;
-
- end_mem &= PAGE_MASK;
- high_memory = end_mem;
-
- start_low_mem = PAGE_ALIGN(start_low_mem);
- start_mem = PAGE_ALIGN(start_mem);
-
- for(i = 0; sp_banks[i].num_bytes != 0; i++) {
- tmp = sp_banks[i].base_addr;
- limit = (sp_banks[i].base_addr + sp_banks[i].num_bytes);
- if(tmp<start_mem) {
- if(limit>start_mem)
- tmp = start_mem;
- else continue;
- }
-
- while(tmp<limit) {
- mem_map[MAP_NR(tmp)] = 0;
- tmp += PAGE_SIZE;
- }
- if(sp_banks[i+1].num_bytes != 0)
- while(tmp < sp_banks[i+1].base_addr) {
- mem_map[MAP_NR(tmp)] = MAP_PAGE_RESERVED;
- tmp += PAGE_SIZE;
- }
- }
-
-#ifdef CONFIG_SCSI
- scsi_mem_init(high_memory);
-#endif
+ printk("Memory: %luk available (%dk kernel code, %dk data) [%08lx,%08lx]\n",
+ tmp2 >> 10,
+ codepages << (PAGE_SHIFT-10),
+ datapages << (PAGE_SHIFT-10), PAGE_OFFSET, end_mem);
- for (addr = 0; addr < high_memory; addr += PAGE_SIZE) {
- if(mem_map[MAP_NR(addr)]) {
- if (addr < (unsigned long) &etext)
- codepages++;
- else if(addr < start_mem)
- datapages++;
- else
- reservedpages++;
- continue;
- }
- mem_map[MAP_NR(addr)] = 1;
- free_page(addr);
- }
-
- tmp2 = nr_free_pages << PAGE_SHIFT;
-
- printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data)\n",
- tmp2 >> 10,
- high_memory >> 10,
- codepages << (PAGE_SHIFT-10),
- reservedpages << (PAGE_SHIFT-10),
- datapages << (PAGE_SHIFT-10));
-
- invalidate();
- return;
+ min_free_pages = nr_free_pages >> 7;
+ if(min_free_pages < 16)
+ min_free_pages = 16;
+ free_pages_low = min_free_pages + (min_free_pages >> 1);
+ free_pages_high = min_free_pages + min_free_pages;
}
void si_meminfo(struct sysinfo *val)
{
int i;
- i = high_memory >> PAGE_SHIFT;
+ i = MAP_NR(high_memory);
val->totalram = 0;
val->sharedram = 0;
val->freeram = nr_free_pages << PAGE_SHIFT;
val->bufferram = buffermem;
while (i-- > 0) {
- if (mem_map[i] & MAP_PAGE_RESERVED)
+ if (PageReserved(mem_map + i))
continue;
val->totalram++;
- if (!mem_map[i])
+ if (!mem_map[i].count)
continue;
- val->sharedram += mem_map[i]-1;
+ val->sharedram += mem_map[i].count-1;
}
val->totalram <<= PAGE_SHIFT;
val->sharedram <<= PAGE_SHIFT;
- return;
}
diff --git a/arch/sparc/mm/loadmmu.c b/arch/sparc/mm/loadmmu.c
new file mode 100644
index 000000000..ac1ecd790
--- /dev/null
+++ b/arch/sparc/mm/loadmmu.c
@@ -0,0 +1,165 @@
+/* $Id: loadmmu.c,v 1.36 1996/10/27 08:36:46 davem Exp $
+ * loadmmu.c: This code loads up all the mm function pointers once the
+ * machine type has been determined. It also sets the static
+ * mmu values such as PAGE_NONE, etc.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+
+#include <asm/system.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+unsigned long page_offset = 0xf0000000;
+
+struct ctx_list *ctx_list_pool;
+struct ctx_list ctx_free;
+struct ctx_list ctx_used;
+
+unsigned long (*alloc_kernel_stack)(struct task_struct *tsk);
+void (*free_kernel_stack)(unsigned long stack);
+struct task_struct *(*alloc_task_struct)(void);
+void (*free_task_struct)(struct task_struct *tsk);
+
+void (*quick_kernel_fault)(unsigned long);
+
+void (*mmu_exit_hook)(void);
+void (*mmu_flush_hook)(void);
+
+/* translate between physical and virtual addresses */
+unsigned long (*mmu_v2p)(unsigned long);
+unsigned long (*mmu_p2v)(unsigned long);
+
+char *(*mmu_lockarea)(char *, unsigned long);
+void (*mmu_unlockarea)(char *, unsigned long);
+
+char *(*mmu_get_scsi_one)(char *, unsigned long, struct linux_sbus *sbus);
+void (*mmu_get_scsi_sgl)(struct mmu_sglist *, int, struct linux_sbus *sbus);
+void (*mmu_release_scsi_one)(char *, unsigned long, struct linux_sbus *sbus);
+void (*mmu_release_scsi_sgl)(struct mmu_sglist *, int, struct linux_sbus *sbus);
+
+void (*mmu_map_dma_area)(unsigned long addr, int len);
+
+void (*update_mmu_cache)(struct vm_area_struct *vma, unsigned long address, pte_t pte);
+
+#ifdef __SMP__
+void (*local_flush_cache_all)(void);
+void (*local_flush_cache_mm)(struct mm_struct *);
+void (*local_flush_cache_range)(struct mm_struct *, unsigned long start,
+ unsigned long end);
+void (*local_flush_cache_page)(struct vm_area_struct *, unsigned long address);
+
+void (*local_flush_tlb_all)(void);
+void (*local_flush_tlb_mm)(struct mm_struct *);
+void (*local_flush_tlb_range)(struct mm_struct *, unsigned long start,
+ unsigned long end);
+void (*local_flush_tlb_page)(struct vm_area_struct *, unsigned long address);
+void (*local_flush_page_to_ram)(unsigned long address);
+#endif
+
+void (*flush_cache_all)(void);
+void (*flush_cache_mm)(struct mm_struct *);
+void (*flush_cache_range)(struct mm_struct *, unsigned long start,
+ unsigned long end);
+void (*flush_cache_page)(struct vm_area_struct *, unsigned long address);
+
+void (*flush_tlb_all)(void);
+void (*flush_tlb_mm)(struct mm_struct *);
+void (*flush_tlb_range)(struct mm_struct *, unsigned long start,
+ unsigned long end);
+void (*flush_tlb_page)(struct vm_area_struct *, unsigned long address);
+
+void (*flush_page_to_ram)(unsigned long page);
+
+void (*set_pte)(pte_t *pteptr, pte_t pteval);
+
+unsigned int pmd_shift, pmd_size, pmd_mask;
+unsigned int (*pmd_align)(unsigned int);
+unsigned int pgdir_shift, pgdir_size, pgdir_mask;
+unsigned int (*pgdir_align)(unsigned int);
+unsigned int ptrs_per_pte, ptrs_per_pmd, ptrs_per_pgd;
+unsigned int pg_iobits;
+
+pgprot_t page_none, page_shared, page_copy, page_readonly, page_kernel;
+
+unsigned long (*pte_page)(pte_t);
+unsigned long (*pmd_page)(pmd_t);
+unsigned long (*pgd_page)(pgd_t);
+
+void (*sparc_update_rootmmu_dir)(struct task_struct *, pgd_t *pgdir);
+unsigned long (*(vmalloc_start))(void);
+void (*switch_to_context)(struct task_struct *tsk);
+
+int (*pte_none)(pte_t);
+int (*pte_present)(pte_t);
+void (*pte_clear)(pte_t *);
+
+int (*pmd_none)(pmd_t);
+int (*pmd_bad)(pmd_t);
+int (*pmd_present)(pmd_t);
+void (*pmd_clear)(pmd_t *);
+
+int (*pgd_none)(pgd_t);
+int (*pgd_bad)(pgd_t);
+int (*pgd_present)(pgd_t);
+void (*pgd_clear)(pgd_t *);
+
+pte_t (*mk_pte)(unsigned long, pgprot_t);
+pte_t (*mk_pte_phys)(unsigned long, pgprot_t);
+pte_t (*mk_pte_io)(unsigned long, pgprot_t, int);
+void (*pgd_set)(pgd_t *, pmd_t *);
+pte_t (*pte_modify)(pte_t, pgprot_t);
+pgd_t * (*pgd_offset)(struct mm_struct *, unsigned long);
+pmd_t * (*pmd_offset)(pgd_t *, unsigned long);
+pte_t * (*pte_offset)(pmd_t *, unsigned long);
+void (*pte_free_kernel)(pte_t *);
+pte_t * (*pte_alloc_kernel)(pmd_t *, unsigned long);
+
+void (*pmd_free_kernel)(pmd_t *);
+pmd_t * (*pmd_alloc_kernel)(pgd_t *, unsigned long);
+void (*pte_free)(pte_t *);
+pte_t * (*pte_alloc)(pmd_t *, unsigned long);
+
+void (*pmd_free)(pmd_t *);
+pmd_t * (*pmd_alloc)(pgd_t *, unsigned long);
+void (*pgd_free)(pgd_t *);
+
+pgd_t * (*pgd_alloc)(void);
+
+int (*pte_write)(pte_t);
+int (*pte_dirty)(pte_t);
+int (*pte_young)(pte_t);
+
+pte_t (*pte_wrprotect)(pte_t);
+pte_t (*pte_mkclean)(pte_t);
+pte_t (*pte_mkold)(pte_t);
+pte_t (*pte_mkwrite)(pte_t);
+pte_t (*pte_mkdirty)(pte_t);
+pte_t (*pte_mkyoung)(pte_t);
+
+char *(*mmu_info)(void);
+
+extern void ld_mmu_sun4c(void);
+extern void ld_mmu_srmmu(void);
+
+void
+load_mmu(void)
+{
+ switch(sparc_cpu_model) {
+ case sun4c:
+ ld_mmu_sun4c();
+ break;
+ case sun4m:
+ case sun4d:
+ ld_mmu_srmmu();
+ break;
+ default:
+ printk("load_mmu:MMU support not available for this architecture\n");
+ printk("load_mmu:sparc_cpu_model = %d\n", (int) sparc_cpu_model);
+ printk("load_mmu:Halting...\n");
+ panic("load_mmu()");
+ }
+}
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
new file mode 100644
index 000000000..7d9b653df
--- /dev/null
+++ b/arch/sparc/mm/srmmu.c
@@ -0,0 +1,3477 @@
+/* $Id: srmmu.c,v 1.103 1996/10/31 06:28:35 davem Exp $
+ * srmmu.c: SRMMU specific routines for memory management.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1995 Peter A. Zaitcev (zaitcev@ithil.mcst.ru)
+ * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/malloc.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/io.h>
+#include <asm/kdebug.h>
+#include <asm/vaddrs.h>
+#include <asm/traps.h>
+#include <asm/smp.h>
+#include <asm/mbus.h>
+#include <asm/cache.h>
+#include <asm/oplib.h>
+#include <asm/sbus.h>
+#include <asm/iommu.h>
+#include <asm/asi.h>
+#include <asm/msi.h>
+
+/* Now the cpu specific definitions. */
+#include <asm/viking.h>
+#include <asm/mxcc.h>
+#include <asm/ross.h>
+#include <asm/tsunami.h>
+#include <asm/swift.h>
+
+enum mbus_module srmmu_modtype;
+unsigned int hwbug_bitmask;
+int vac_cache_size;
+int vac_line_size;
+int vac_badbits;
+
+extern unsigned long sparc_iobase_vaddr;
+
+#ifdef __SMP__
+extern void smp_capture(void);
+extern void smp_release(void);
+#else
+#define smp_capture()
+#define smp_release()
+#endif /* !(__SMP__) */
+
+/* #define USE_CHUNK_ALLOC 1 */
+
+static void (*ctxd_set)(ctxd_t *ctxp, pgd_t *pgdp);
+static void (*pmd_set)(pmd_t *pmdp, pte_t *ptep);
+
+static void (*flush_page_for_dma)(unsigned long page);
+static void (*flush_cache_page_to_uncache)(unsigned long page);
+static void (*flush_tlb_page_for_cbit)(unsigned long page);
+#ifdef __SMP__
+static void (*local_flush_page_for_dma)(unsigned long page);
+static void (*local_flush_cache_page_to_uncache)(unsigned long page);
+static void (*local_flush_tlb_page_for_cbit)(unsigned long page);
+#endif
+
+static struct srmmu_stats {
+ int invall;
+ int invpg;
+ int invrnge;
+ int invmm;
+} module_stats;
+
+static char *srmmu_name;
+
+ctxd_t *srmmu_ctx_table_phys;
+ctxd_t *srmmu_context_table;
+
+static struct srmmu_trans {
+ unsigned long vbase;
+ unsigned long pbase;
+ unsigned long size;
+} srmmu_map[SPARC_PHYS_BANKS];
+
+static int viking_mxcc_present = 0;
+
+void srmmu_frob_mem_map(unsigned long start_mem)
+{
+ unsigned long bank_start, bank_end;
+ unsigned long addr;
+ int i;
+
+ /* First, mark all pages as invalid. */
+ for(addr = PAGE_OFFSET; MAP_NR(addr) < max_mapnr; addr += PAGE_SIZE)
+ mem_map[MAP_NR(addr)].flags |= (1<<PG_reserved);
+
+ start_mem = PAGE_ALIGN(start_mem);
+ for(i = 0; srmmu_map[i].size; i++) {
+ bank_start = srmmu_map[i].vbase;
+ bank_end = bank_start + srmmu_map[i].size;
+ while(bank_start < bank_end) {
+ if((bank_start >= KERNBASE) &&
+ (bank_start < start_mem)) {
+ bank_start += PAGE_SIZE;
+ continue;
+ }
+ mem_map[MAP_NR(bank_start)].flags &= ~(1<<PG_reserved);
+ bank_start += PAGE_SIZE;
+ }
+ }
+}
+
+/* Physical memory can be _very_ non-contiguous on the sun4m, especially
+ * the SS10/20 class machines and with the latest openprom revisions.
+ * So we have to crunch the free page pool.
+ */
+static inline unsigned long srmmu_v2p(unsigned long vaddr)
+{
+ int i;
+
+ for(i=0; srmmu_map[i].size != 0; i++) {
+ if(srmmu_map[i].vbase <= vaddr &&
+ (srmmu_map[i].vbase + srmmu_map[i].size > vaddr)) {
+ return (vaddr - srmmu_map[i].vbase) + srmmu_map[i].pbase;
+ }
+ }
+ return 0xffffffffUL;
+}
+
+static inline unsigned long srmmu_p2v(unsigned long paddr)
+{
+ int i;
+
+ for(i=0; srmmu_map[i].size != 0; i++) {
+ if(srmmu_map[i].pbase <= paddr &&
+ (srmmu_map[i].pbase + srmmu_map[i].size > paddr))
+ return (paddr - srmmu_map[i].pbase) + srmmu_map[i].vbase;
+ }
+ return 0xffffffffUL;
+}
+
+/* In general all page table modifications should use the V8 atomic
+ * swap instruction. This insures the mmu and the cpu are in sync
+ * with respect to ref/mod bits in the page tables.
+ */
+static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
+{
+#if MEM_BUS_SPACE
+ /* the AP1000 has its memory on bus 8, not 0 like suns do */
+ if (!(value&KERNBASE))
+ value |= MEM_BUS_SPACE<<28;
+ if (value == MEM_BUS_SPACE<<28) value = 0;
+#endif
+ __asm__ __volatile__("swap [%2], %0\n\t" :
+ "=&r" (value) :
+ "0" (value), "r" (addr));
+ return value;
+}
+
+/* Functions really use this, not srmmu_swap directly. */
+#define srmmu_set_entry(ptr, newentry) \
+ srmmu_swap((unsigned long *) (ptr), (newentry))
+
+
+/* The very generic SRMMU page table operations. */
+static unsigned int srmmu_pmd_align(unsigned int addr) { return SRMMU_PMD_ALIGN(addr); }
+static unsigned int srmmu_pgdir_align(unsigned int addr) { return SRMMU_PGDIR_ALIGN(addr); }
+
+static unsigned long srmmu_vmalloc_start(void)
+{
+ return SRMMU_VMALLOC_START;
+}
+
+static unsigned long srmmu_pgd_page(pgd_t pgd)
+{ return srmmu_p2v((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); }
+
+static unsigned long srmmu_pmd_page(pmd_t pmd)
+{ return srmmu_p2v((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4); }
+
+static inline int srmmu_device_memory(pte_t pte)
+{
+ return (pte_val(pte)>>28) != MEM_BUS_SPACE;
+}
+
+static unsigned long srmmu_pte_page(pte_t pte)
+{ return srmmu_device_memory(pte)?~0:srmmu_p2v((pte_val(pte) & SRMMU_PTE_PMASK) << 4); }
+
+static int srmmu_pte_none(pte_t pte) { return !pte_val(pte); }
+static int srmmu_pte_present(pte_t pte)
+{ return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); }
+
+static void srmmu_pte_clear(pte_t *ptep) { set_pte(ptep, __pte(0)); }
+
+static int srmmu_pmd_none(pmd_t pmd) { return !pmd_val(pmd); }
+static int srmmu_pmd_bad(pmd_t pmd)
+{ return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
+
+static int srmmu_pmd_present(pmd_t pmd)
+{ return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
+
+static void srmmu_pmd_clear(pmd_t *pmdp) { set_pte((pte_t *)pmdp, __pte(0)); }
+
+static int srmmu_pgd_none(pgd_t pgd) { return !pgd_val(pgd); }
+static int srmmu_pgd_bad(pgd_t pgd)
+{ return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
+
+static int srmmu_pgd_present(pgd_t pgd)
+{ return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
+
+static void srmmu_pgd_clear(pgd_t * pgdp) { set_pte((pte_t *)pgdp, __pte(0)); }
+
+static int srmmu_pte_write(pte_t pte) { return pte_val(pte) & SRMMU_WRITE; }
+static int srmmu_pte_dirty(pte_t pte) { return pte_val(pte) & SRMMU_DIRTY; }
+static int srmmu_pte_young(pte_t pte) { return pte_val(pte) & SRMMU_REF; }
+
+static pte_t srmmu_pte_wrprotect(pte_t pte) { pte_val(pte) &= ~SRMMU_WRITE; return pte;}
+static pte_t srmmu_pte_mkclean(pte_t pte) { pte_val(pte) &= ~SRMMU_DIRTY; return pte; }
+static pte_t srmmu_pte_mkold(pte_t pte) { pte_val(pte) &= ~SRMMU_REF; return pte; }
+static pte_t srmmu_pte_mkwrite(pte_t pte) { pte_val(pte) |= SRMMU_WRITE; return pte; }
+static pte_t srmmu_pte_mkdirty(pte_t pte) { pte_val(pte) |= SRMMU_DIRTY; return pte; }
+static pte_t srmmu_pte_mkyoung(pte_t pte) { pte_val(pte) |= SRMMU_REF; return pte; }
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+static pte_t srmmu_mk_pte(unsigned long page, pgprot_t pgprot)
+{ pte_t pte; pte_val(pte) = ((srmmu_v2p(page)) >> 4) | pgprot_val(pgprot); return pte; }
+
+static pte_t srmmu_mk_pte_phys(unsigned long page, pgprot_t pgprot)
+{ pte_t pte; pte_val(pte) = ((page) >> 4) | pgprot_val(pgprot); return pte; }
+
+static pte_t srmmu_mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
+{
+ pte_t pte;
+ pte_val(pte) = ((page) >> 4) | (space << 28) | pgprot_val(pgprot);
+ return pte;
+}
+
+static void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
+{
+ set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) pgdp) >> 4)));
+}
+
+static void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
+{
+ set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) pmdp) >> 4)));
+}
+
+static void srmmu_pmd_set(pmd_t * pmdp, pte_t * ptep)
+{
+ set_pte((pte_t *)pmdp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) ptep) >> 4)));
+}
+
+static pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot)
+{
+ pte_val(pte) = (pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot);
+ return pte;
+}
+
+/* to find an entry in a top-level page table... */
+static pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
+{
+ return mm->pgd + ((address >> SRMMU_PGDIR_SHIFT) & (SRMMU_PTRS_PER_PGD - 1));
+}
+
+/* Find an entry in the second-level page table.. */
+static pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address)
+{
+ return (pmd_t *) srmmu_pgd_page(*dir) + ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1));
+}
+
+/* Find an entry in the third-level page table.. */
+static pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address)
+{
+ return (pte_t *) srmmu_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
+}
+
+/* This must update the context table entry for this process. */
+static void srmmu_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp)
+{
+ if(tsk->mm->context != NO_CONTEXT) {
+ flush_cache_mm(current->mm);
+ ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp);
+ flush_tlb_mm(current->mm);
+ }
+}
+
+static inline void srmmu_uncache_page(unsigned long addr)
+{
+ pgd_t *pgdp = srmmu_pgd_offset(init_task.mm, addr);
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ if((pgd_val(*pgdp) & SRMMU_ET_MASK) == SRMMU_ET_PTE) {
+ ptep = (pte_t *) pgdp;
+ } else {
+ pmdp = srmmu_pmd_offset(pgdp, addr);
+ if((pmd_val(*pmdp) & SRMMU_ET_MASK) == SRMMU_ET_PTE) {
+ ptep = (pte_t *) pmdp;
+ } else {
+ ptep = srmmu_pte_offset(pmdp, addr);
+ }
+ }
+
+ flush_cache_page_to_uncache(addr);
+ set_pte(ptep, __pte((pte_val(*ptep) & ~SRMMU_CACHE)));
+ flush_tlb_page_for_cbit(addr);
+}
+
+static inline void srmmu_recache_page(unsigned long addr)
+{
+ pgd_t *pgdp = srmmu_pgd_offset(init_task.mm, addr);
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ if((pgd_val(*pgdp) & SRMMU_ET_MASK) == SRMMU_ET_PTE) {
+ ptep = (pte_t *) pgdp;
+ } else {
+ pmdp = srmmu_pmd_offset(pgdp, addr);
+ if((pmd_val(*pmdp) & SRMMU_ET_MASK) == SRMMU_ET_PTE) {
+ ptep = (pte_t *) pmdp;
+ } else {
+ ptep = srmmu_pte_offset(pmdp, addr);
+ }
+ }
+ set_pte(ptep, __pte((pte_val(*ptep) | SRMMU_CACHE)));
+ flush_tlb_page_for_cbit(addr);
+}
+
+static inline unsigned long srmmu_getpage(void)
+{
+ unsigned long page = get_free_page(GFP_KERNEL);
+
+ return page;
+}
+
+static inline void srmmu_putpage(unsigned long page)
+{
+ free_page(page);
+}
+
+#ifdef USE_CHUNK_ALLOC
+
+#define LC_HIGH_WATER 128
+#define BC_HIGH_WATER 32
+
+static unsigned long *lcnks = 0;
+static unsigned long *bcnks = 0;
+static int lcwater = 0;
+static int bcwater = 0;
+static int chunk_pages = 0;
+static int clct_pages = 0;
+
+#define RELAX_JIFFIES 16
+
+static int lcjiffies;
+static int bcjiffies;
+
+struct chunk {
+ struct chunk *next;
+ struct chunk *prev;
+ struct chunk *npage;
+ struct chunk *ppage;
+ int count;
+};
+
+static int garbage_calls = 0;
+
+#define OTHER_PAGE(p,q) (((unsigned long)(p) ^ (unsigned long)(q)) & PAGE_MASK)
+
+static inline int garbage_collect(unsigned long **cnks, int n, int cpp)
+{
+ struct chunk *root = (struct chunk *)*cnks;
+ struct chunk *p, *q, *curr, *next;
+ int water = n;
+
+ next = root->next;
+ curr = root->prev = root->next = root->npage = root->ppage = root;
+ root->count = 1;
+
+ garbage_calls++;
+
+ while (--n) {
+ p = next;
+ next = next->next;
+
+ if (OTHER_PAGE(p, curr)) {
+
+ q = curr->npage;
+ while (q != curr) {
+ if (!OTHER_PAGE(p, q))
+ break;
+ q = q->npage;
+ }
+
+ if (q == curr) {
+
+ (p->npage = curr->npage)->ppage = p;
+ curr->npage = p;
+ p->ppage = curr;
+
+ p->next = p->prev = p;
+ p->count = 1;
+
+ curr = p;
+
+ continue;
+ }
+ curr = q;
+ }
+
+ (p->next = curr->next)->prev = p;
+ curr->next = p;
+ p->prev = curr;
+
+ if (++curr->count == cpp) {
+
+ q = curr->npage;
+ if (curr == q) {
+
+ srmmu_putpage((unsigned long)curr & PAGE_MASK);
+ water -= cpp;
+
+ clct_pages++;
+ chunk_pages--;
+
+ if (--n) {
+ p = next;
+ next = next->next;
+
+ curr = root->prev =
+ root->next = root->npage =
+ root->ppage = root = p;
+ root->count = 1;
+
+ continue;
+ }
+ return 0;
+ }
+
+ if (curr == root)
+ root = q;
+
+ curr->ppage->npage = q;
+ q->ppage = curr->ppage;
+
+ srmmu_putpage((unsigned long)curr & PAGE_MASK);
+ water -= cpp;
+
+ clct_pages++;
+ chunk_pages--;
+
+ curr = q;
+ }
+ }
+
+ p = root;
+ while (p->npage != root) {
+ p->prev->next = p->npage;
+ p = p->npage;
+ }
+
+ *cnks = (unsigned long *)root;
+ return water;
+}
+
+
+static inline unsigned long *get_small_chunk(void)
+{
+ unsigned long *rval;
+ unsigned long flags;
+
+ save_and_cli(flags);
+ if(lcwater) {
+ lcwater--;
+ rval = lcnks;
+ lcnks = (unsigned long *) *rval;
+ } else {
+ rval = (unsigned long *) __get_free_page(GFP_KERNEL);
+
+ if(!rval) {
+ restore_flags(flags);
+ return 0;
+ }
+ chunk_pages++;
+
+ lcnks = (rval + 64);
+
+ /* Cache stomping, I know... */
+ *(rval + 64) = (unsigned long) (rval + 128);
+ *(rval + 128) = (unsigned long) (rval + 192);
+ *(rval + 192) = (unsigned long) (rval + 256);
+ *(rval + 256) = (unsigned long) (rval + 320);
+ *(rval + 320) = (unsigned long) (rval + 384);
+ *(rval + 384) = (unsigned long) (rval + 448);
+ *(rval + 448) = (unsigned long) (rval + 512);
+ *(rval + 512) = (unsigned long) (rval + 576);
+ *(rval + 576) = (unsigned long) (rval + 640);
+ *(rval + 640) = (unsigned long) (rval + 704);
+ *(rval + 704) = (unsigned long) (rval + 768);
+ *(rval + 768) = (unsigned long) (rval + 832);
+ *(rval + 832) = (unsigned long) (rval + 896);
+ *(rval + 896) = (unsigned long) (rval + 960);
+ *(rval + 960) = 0;
+ lcwater = 15;
+ }
+ lcjiffies = jiffies;
+ restore_flags(flags);
+ memset(rval, 0, 256);
+ return rval;
+}
+
+static inline void free_small_chunk(unsigned long *it)
+{
+ unsigned long flags;
+
+ save_and_cli(flags);
+ *it = (unsigned long) lcnks;
+ lcnks = it;
+ lcwater++;
+
+ if ((lcwater > LC_HIGH_WATER) &&
+ (jiffies > lcjiffies + RELAX_JIFFIES))
+ lcwater = garbage_collect(&lcnks, lcwater, 16);
+
+ restore_flags(flags);
+}
+
+static inline unsigned long *get_big_chunk(void)
+{
+ unsigned long *rval;
+ unsigned long flags;
+
+ save_and_cli(flags);
+ if(bcwater) {
+ bcwater--;
+ rval = bcnks;
+ bcnks = (unsigned long *) *rval;
+ } else {
+ rval = (unsigned long *) __get_free_page(GFP_KERNEL);
+
+ if(!rval) {
+ restore_flags(flags);
+ return 0;
+ }
+ chunk_pages++;
+
+ bcnks = (rval + 256);
+
+ /* Cache stomping, I know... */
+ *(rval + 256) = (unsigned long) (rval + 512);
+ *(rval + 512) = (unsigned long) (rval + 768);
+ *(rval + 768) = 0;
+ bcwater = 3;
+ }
+ bcjiffies = jiffies;
+ restore_flags(flags);
+ memset(rval, 0, 1024);
+ return rval;
+}
+
+static inline void free_big_chunk(unsigned long *it)
+{
+ unsigned long flags;
+
+ save_and_cli(flags);
+ *it = (unsigned long) bcnks;
+ bcnks = it;
+ bcwater++;
+
+ if ((bcwater > BC_HIGH_WATER) &&
+ (jiffies > bcjiffies + RELAX_JIFFIES))
+ bcwater = garbage_collect(&bcnks, bcwater, 4);
+
+ restore_flags(flags);
+}
+
+#define NEW_PGD() (pgd_t *) get_big_chunk()
+#define NEW_PMD() (pmd_t *) get_small_chunk()
+#define NEW_PTE() (pte_t *) get_small_chunk()
+#define FREE_PGD(chunk) free_big_chunk((unsigned long *)(chunk))
+#define FREE_PMD(chunk) free_small_chunk((unsigned long *)(chunk))
+#define FREE_PTE(chunk) free_small_chunk((unsigned long *)(chunk))
+
+#else
+
+/* The easy versions. */
+#define NEW_PGD() (pgd_t *) srmmu_getpage()
+#define NEW_PMD() (pmd_t *) srmmu_getpage()
+#define NEW_PTE() (pte_t *) srmmu_getpage()
+#define FREE_PGD(chunk) srmmu_putpage((unsigned long)(chunk))
+#define FREE_PMD(chunk) srmmu_putpage((unsigned long)(chunk))
+#define FREE_PTE(chunk) srmmu_putpage((unsigned long)(chunk))
+
+#endif
+
+/*
+ * Allocate and free page tables. The xxx_kernel() versions are
+ * used to allocate a kernel page table - this turns on ASN bits
+ * if any, and marks the page tables reserved.
+ */
+static void srmmu_pte_free_kernel(pte_t *pte)
+{
+ FREE_PTE(pte);
+}
+
+static pte_t *srmmu_pte_alloc_kernel(pmd_t *pmd, unsigned long address)
+{
+ address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1);
+ if(srmmu_pmd_none(*pmd)) {
+ pte_t *page = NEW_PTE();
+ if(srmmu_pmd_none(*pmd)) {
+ if(page) {
+ pmd_set(pmd, page);
+ return page + address;
+ }
+ pmd_set(pmd, BAD_PAGETABLE);
+ return NULL;
+ }
+ FREE_PTE(page);
+ }
+ if(srmmu_pmd_bad(*pmd)) {
+ printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
+ pmd_set(pmd, BAD_PAGETABLE);
+ return NULL;
+ }
+ return (pte_t *) srmmu_pmd_page(*pmd) + address;
+}
+
+static void srmmu_pmd_free_kernel(pmd_t *pmd)
+{
+ FREE_PMD(pmd);
+}
+
+static pmd_t *srmmu_pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
+{
+ address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
+ if(srmmu_pgd_none(*pgd)) {
+ pmd_t *page;
+ page = NEW_PMD();
+ if(srmmu_pgd_none(*pgd)) {
+ if(page) {
+ pgd_set(pgd, page);
+ return page + address;
+ }
+ pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
+ return NULL;
+ }
+ FREE_PMD(page);
+ }
+ if(srmmu_pgd_bad(*pgd)) {
+ printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
+ pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
+ return NULL;
+ }
+ return (pmd_t *) pgd_page(*pgd) + address;
+}
+
+static void srmmu_pte_free(pte_t *pte)
+{
+ FREE_PTE(pte);
+}
+
+static pte_t *srmmu_pte_alloc(pmd_t * pmd, unsigned long address)
+{
+ address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1);
+ if(srmmu_pmd_none(*pmd)) {
+ pte_t *page = NEW_PTE();
+ if(srmmu_pmd_none(*pmd)) {
+ if(page) {
+ pmd_set(pmd, page);
+ return page + address;
+ }
+ pmd_set(pmd, BAD_PAGETABLE);
+ return NULL;
+ }
+ FREE_PTE(page);
+ }
+ if(srmmu_pmd_bad(*pmd)) {
+ printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
+ pmd_set(pmd, BAD_PAGETABLE);
+ return NULL;
+ }
+ return ((pte_t *) srmmu_pmd_page(*pmd)) + address;
+}
+
+/* Real three-level page tables on SRMMU. */
+static void srmmu_pmd_free(pmd_t * pmd)
+{
+ FREE_PMD(pmd);
+}
+
+static pmd_t *srmmu_pmd_alloc(pgd_t * pgd, unsigned long address)
+{
+ address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
+ if(srmmu_pgd_none(*pgd)) {
+ pmd_t *page = NEW_PMD();
+ if(srmmu_pgd_none(*pgd)) {
+ if(page) {
+ pgd_set(pgd, page);
+ return page + address;
+ }
+ pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
+ return NULL;
+ }
+ FREE_PMD(page);
+ }
+ if(srmmu_pgd_bad(*pgd)) {
+ printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
+ pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
+ return NULL;
+ }
+ return (pmd_t *) srmmu_pgd_page(*pgd) + address;
+}
+
+static void srmmu_pgd_free(pgd_t *pgd)
+{
+ FREE_PGD(pgd);
+}
+
+static pgd_t *srmmu_pgd_alloc(void)
+{
+ return NEW_PGD();
+}
+
+static void srmmu_set_pte_cacheable(pte_t *ptep, pte_t pteval)
+{
+ srmmu_set_entry(ptep, pte_val(pteval));
+}
+
+static void srmmu_set_pte_nocache_hyper(pte_t *ptep, pte_t pteval)
+{
+ unsigned long flags;
+
+ save_and_cli(flags);
+ srmmu_set_entry(ptep, pte_val(pteval));
+ hyper_flush_cache_page(((unsigned long)ptep) & PAGE_MASK);
+ restore_flags(flags);
+}
+
+static void srmmu_set_pte_nocache_cypress(pte_t *ptep, pte_t pteval)
+{
+ register unsigned long a, b, c, d, e, f, g;
+ unsigned long line, page;
+
+ srmmu_set_entry(ptep, pte_val(pteval));
+ page = ((unsigned long)ptep) & PAGE_MASK;
+ line = (page + PAGE_SIZE) - 0x100;
+ a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
+ goto inside;
+ do {
+ line -= 0x100;
+ inside:
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
+ "sta %%g0, [%0 + %2] %1\n\t"
+ "sta %%g0, [%0 + %3] %1\n\t"
+ "sta %%g0, [%0 + %4] %1\n\t"
+ "sta %%g0, [%0 + %5] %1\n\t"
+ "sta %%g0, [%0 + %6] %1\n\t"
+ "sta %%g0, [%0 + %7] %1\n\t"
+ "sta %%g0, [%0 + %8] %1\n\t" : :
+ "r" (line),
+ "i" (ASI_M_FLUSH_PAGE),
+ "r" (a), "r" (b), "r" (c), "r" (d),
+ "r" (e), "r" (f), "r" (g));
+ } while(line != page);
+}
+
+static void srmmu_set_pte_nocache_nomxccvik(pte_t *ptep, pte_t pteval)
+{
+ unsigned long paddr = srmmu_v2p(((unsigned long)ptep));
+ unsigned long vaddr;
+ int set;
+ int i;
+
+ set = (paddr >> 5) & 0x7f;
+ vaddr = (KERNBASE + PAGE_SIZE) | (set << 5);
+ srmmu_set_entry(ptep, pteval);
+ for (i = 0; i < 8; i++) {
+ __asm__ __volatile__ ("ld [%0], %%g0" : : "r" (vaddr));
+ vaddr += PAGE_SIZE;
+ }
+}
+
+static void srmmu_quick_kernel_fault(unsigned long address)
+{
+#ifdef __SMP__
+ printk("CPU[%d]: Kernel faults at addr=0x%08lx\n",
+ smp_processor_id(), address);
+ while (1) ;
+#else
+ printk("Kernel faults at addr=0x%08lx\n", address);
+ printk("PTE=%08lx\n", srmmu_hwprobe((address & PAGE_MASK)));
+ die_if_kernel("SRMMU bolixed...", current->tss.kregs);
+#endif
+}
+
+static inline void alloc_context(struct task_struct *tsk)
+{
+ struct mm_struct *mm = tsk->mm;
+ struct ctx_list *ctxp;
+
+#if CONFIG_AP1000
+ if (tsk->taskid >= MPP_TASK_BASE) {
+ mm->context = MPP_CONTEXT_BASE + (tsk->taskid - MPP_TASK_BASE);
+ return;
+ }
+#endif
+
+ ctxp = ctx_free.next;
+ if(ctxp != &ctx_free) {
+ remove_from_ctx_list(ctxp);
+ add_to_used_ctxlist(ctxp);
+ mm->context = ctxp->ctx_number;
+ ctxp->ctx_mm = mm;
+ return;
+ }
+ ctxp = ctx_used.next;
+ if(ctxp->ctx_mm == current->mm)
+ ctxp = ctxp->next;
+ if(ctxp == &ctx_used)
+ panic("out of mmu contexts");
+ flush_cache_mm(ctxp->ctx_mm);
+ flush_tlb_mm(ctxp->ctx_mm);
+ remove_from_ctx_list(ctxp);
+ add_to_used_ctxlist(ctxp);
+ ctxp->ctx_mm->context = NO_CONTEXT;
+ ctxp->ctx_mm = mm;
+ mm->context = ctxp->ctx_number;
+}
+
+static inline void free_context(int context)
+{
+ struct ctx_list *ctx_old;
+
+#if CONFIG_AP1000
+ if (context >= MPP_CONTEXT_BASE)
+ return; /* nothing to do! */
+#endif
+
+ ctx_old = ctx_list_pool + context;
+ remove_from_ctx_list(ctx_old);
+ add_to_free_ctxlist(ctx_old);
+}
+
+
+static void srmmu_switch_to_context(struct task_struct *tsk)
+{
+ if(tsk->mm->context == NO_CONTEXT) {
+ alloc_context(tsk);
+ flush_cache_mm(current->mm);
+ ctxd_set(&srmmu_context_table[tsk->mm->context], tsk->mm->pgd);
+ flush_tlb_mm(current->mm);
+ }
+ srmmu_set_context(tsk->mm->context);
+}
+
+/* Low level IO area allocation on the SRMMU. */
+void srmmu_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type, int rdonly)
+{
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+ unsigned long tmp;
+
+ physaddr &= PAGE_MASK;
+ pgdp = srmmu_pgd_offset(init_task.mm, virt_addr);
+ pmdp = srmmu_pmd_offset(pgdp, virt_addr);
+ ptep = srmmu_pte_offset(pmdp, virt_addr);
+ tmp = (physaddr >> 4) | SRMMU_ET_PTE;
+
+ /* I need to test whether this is consistent over all
+ * sun4m's. The bus_type represents the upper 4 bits of
+ * 36-bit physical address on the I/O space lines...
+ */
+ tmp |= (bus_type << 28);
+ if(rdonly)
+ tmp |= SRMMU_PRIV_RDONLY;
+ else
+ tmp |= SRMMU_PRIV;
+ flush_page_to_ram(virt_addr);
+ set_pte(ptep, tmp);
+ flush_tlb_all();
+}
+
+void srmmu_unmapioaddr(unsigned long virt_addr)
+{
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ pgdp = srmmu_pgd_offset(init_task.mm, virt_addr);
+ pmdp = srmmu_pmd_offset(pgdp, virt_addr);
+ ptep = srmmu_pte_offset(pmdp, virt_addr);
+
+ /* No need to flush uncacheable page. */
+ set_pte(ptep, pte_val(srmmu_mk_pte((unsigned long) EMPTY_PGE, PAGE_SHARED)));
+ flush_tlb_all();
+}
+
+static char *srmmu_lockarea(char *vaddr, unsigned long len)
+{
+ return vaddr;
+}
+
+static void srmmu_unlockarea(char *vaddr, unsigned long len)
+{
+}
+
+/* On the SRMMU we do not have the problems with limited tlb entries
+ * for mapping kernel pages, so we just take things from the free page
+ * pool. As a side effect we are putting a little too much pressure
+ * on the gfp() subsystem. This setup also makes the logic of the
+ * iommu mapping code a lot easier as we can transparently handle
+ * mappings on the kernel stack without any special code as we did
+ * need on the sun4c.
+ */
+struct task_struct *srmmu_alloc_task_struct(void)
+{
+ return (struct task_struct *) kmalloc(sizeof(struct task_struct), GFP_KERNEL);
+}
+
+unsigned long srmmu_alloc_kernel_stack(struct task_struct *tsk)
+{
+ return __get_free_pages(GFP_KERNEL, 1, 0);
+}
+
+static void srmmu_free_task_struct(struct task_struct *tsk)
+{
+ kfree(tsk);
+}
+
+static void srmmu_free_kernel_stack(unsigned long stack)
+{
+ free_pages(stack, 1);
+}
+
+/* Tsunami flushes. It's page level tlb invalidation is not very
+ * useful at all, you must be in the context that page exists in to
+ * get a match.
+ */
+static void tsunami_flush_cache_all(void)
+{
+ flush_user_windows();
+ tsunami_flush_icache();
+ tsunami_flush_dcache();
+}
+
+static void tsunami_flush_cache_mm(struct mm_struct *mm)
+{
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ flush_user_windows();
+ tsunami_flush_icache();
+ tsunami_flush_dcache();
+#ifndef __SMP__
+ }
+#endif
+}
+
+static void tsunami_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ flush_user_windows();
+ tsunami_flush_icache();
+ tsunami_flush_dcache();
+#ifndef __SMP__
+ }
+#endif
+}
+
+static void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
+{
+#ifndef __SMP__
+ struct mm_struct *mm = vma->vm_mm;
+ if(mm->context != NO_CONTEXT) {
+#endif
+ flush_user_windows();
+ tsunami_flush_icache();
+ tsunami_flush_dcache();
+#ifndef __SMP__
+ }
+#endif
+}
+
+static void tsunami_flush_cache_page_to_uncache(unsigned long page)
+{
+ tsunami_flush_dcache();
+}
+
+/* Tsunami does not have a Copy-back style virtual cache. */
+static void tsunami_flush_page_to_ram(unsigned long page)
+{
+}
+
+/* However, Tsunami is not IO coherent. */
+static void tsunami_flush_page_for_dma(unsigned long page)
+{
+ tsunami_flush_icache();
+ tsunami_flush_dcache();
+}
+
+static void tsunami_flush_tlb_all(void)
+{
+ module_stats.invall++;
+ srmmu_flush_whole_tlb();
+}
+
+static void tsunami_flush_tlb_mm(struct mm_struct *mm)
+{
+ module_stats.invmm++;
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ srmmu_flush_whole_tlb();
+#ifndef __SMP__
+ }
+#endif
+}
+
+static void tsunami_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+ module_stats.invrnge++;
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ srmmu_flush_whole_tlb();
+#ifndef __SMP__
+ }
+#endif
+}
+
+static void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ int octx;
+ struct mm_struct *mm = vma->vm_mm;
+
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ unsigned long flags;
+
+ save_and_cli(flags);
+ octx = srmmu_get_context();
+
+ srmmu_set_context(mm->context);
+ srmmu_flush_tlb_page(page);
+ srmmu_set_context(octx);
+ restore_flags(flags);
+#ifndef __SMP__
+ }
+#endif
+ module_stats.invpg++;
+}
+
+static void tsunami_flush_tlb_page_for_cbit(unsigned long page)
+{
+ srmmu_flush_tlb_page(page);
+}
+
+/* Swift flushes. It has the recommended SRMMU specification flushing
+ * facilities, so we can do things in a more fine grained fashion than we
+ * could on the tsunami. Let's watch out for HARDWARE BUGS...
+ */
+
+static void swift_flush_cache_all(void)
+{
+ flush_user_windows();
+ swift_idflash_clear();
+}
+
+static void swift_flush_cache_mm(struct mm_struct *mm)
+{
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ flush_user_windows();
+ swift_idflash_clear();
+#ifndef __SMP__
+ }
+#endif
+}
+
+static void swift_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ flush_user_windows();
+ swift_idflash_clear();
+#ifndef __SMP__
+ }
+#endif
+}
+
+static void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
+{
+#ifndef __SMP__
+ struct mm_struct *mm = vma->vm_mm;
+ if(mm->context != NO_CONTEXT) {
+#endif
+ flush_user_windows();
+ if(vma->vm_flags & VM_EXEC)
+ swift_flush_icache();
+ swift_flush_dcache();
+#ifndef __SMP__
+ }
+#endif
+}
+
+/* Not copy-back on swift. */
+static void swift_flush_page_to_ram(unsigned long page)
+{
+}
+
+/* But not IO coherent either. */
+static void swift_flush_page_for_dma(unsigned long page)
+{
+ swift_flush_dcache();
+}
+
+static void swift_flush_cache_page_to_uncache(unsigned long page)
+{
+ swift_flush_dcache();
+}
+
+static void swift_flush_tlb_all(void)
+{
+ module_stats.invall++;
+ srmmu_flush_whole_tlb();
+}
+
+static void swift_flush_tlb_mm(struct mm_struct *mm)
+{
+ module_stats.invmm++;
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT)
+#endif
+ srmmu_flush_whole_tlb();
+}
+
+static void swift_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+ module_stats.invrnge++;
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT)
+#endif
+ srmmu_flush_whole_tlb();
+}
+
+static void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+#ifndef __SMP__
+ struct mm_struct *mm = vma->vm_mm;
+ if(mm->context != NO_CONTEXT)
+#endif
+ srmmu_flush_whole_tlb();
+ module_stats.invpg++;
+}
+
+static void swift_flush_tlb_page_for_cbit(unsigned long page)
+{
+ srmmu_flush_whole_tlb();
+}
+
+/* The following are all MBUS based SRMMU modules, and therefore could
+ * be found in a multiprocessor configuration. On the whole, these
+ * chips seems to be much more touchy about DVMA and page tables
+ * with respect to cache coherency.
+ */
+
+/* Viking flushes. For Sun's mainline MBUS processor it is pretty much
+ * a crappy mmu. The on-chip I&D caches only have full flushes, no fine
+ * grained cache invalidations. It only has these "flash clear" things
+ * just like the MicroSparcI. Added to this many revs of the chip are
+ * teaming with hardware buggery. Someday maybe we'll do direct
+ * diagnostic tag accesses for page level flushes as those should
+ * be painless and will increase performance due to the frequency of
+ * page level flushes. This is a must to _really_ flush the caches,
+ * crazy hardware ;-)
+ */
+
+static void viking_flush_cache_all(void)
+{
+}
+
+static void viking_flush_cache_mm(struct mm_struct *mm)
+{
+}
+
+static void viking_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+}
+
+static void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
+{
+}
+
+/* Non-mxcc vikings are copy-back but are pure-physical so no flushing. */
+static void viking_flush_page_to_ram(unsigned long page)
+{
+}
+
+static void viking_mxcc_flush_page(unsigned long page)
+{
+ unsigned long ppage = srmmu_v2p(page & PAGE_MASK);
+ unsigned long paddr0, paddr1;
+
+ if (ppage == 0xffffffffUL)
+ return;
+
+ paddr0 = 0x10; /* Set cacheable bit. */
+ paddr1 = ppage;
+
+ /* Read the page's data through the stream registers,
+ * and write it back to memory. This will issue
+ * coherent write invalidates to all other caches, thus
+ * should also be sufficient in an MP system.
+ */
+ __asm__ __volatile__ ("or %%g0, %0, %%g2\n\t"
+ "or %%g0, %1, %%g3\n"
+ "1:\n\t"
+ "stda %%g2, [%2] %5\n\t"
+ "stda %%g2, [%3] %5\n\t"
+ "add %%g3, %4, %%g3\n\t"
+ "btst 0xfff, %%g3\n\t"
+ "bne 1b\n\t"
+ "nop\n\t" : :
+ "r" (paddr0), "r" (paddr1),
+ "r" (MXCC_SRCSTREAM),
+ "r" (MXCC_DESSTREAM),
+ "r" (MXCC_STREAM_SIZE),
+ "i" (ASI_M_MXCC) : "g2", "g3");
+
+ /* This was handcoded after a look at the gcc output from
+ *
+ * do {
+ * mxcc_set_stream_src(paddr);
+ * mxcc_set_stream_dst(paddr);
+ * paddr[1] += MXCC_STREAM_SIZE;
+ * } while (paddr[1] & ~PAGE_MASK);
+ */
+}
+
+static void viking_no_mxcc_flush_page(unsigned long page)
+{
+ unsigned long ppage = srmmu_v2p(page & PAGE_MASK);
+ int set, block;
+ unsigned long ptag[2];
+ unsigned long vaddr;
+ int i;
+
+ if (ppage == 0xffffffffUL)
+ return;
+ ppage >>= 12;
+
+ for (set = 0; set < 128; set++) {
+ for (block = 0; block < 4; block++) {
+
+ viking_get_dcache_ptag(set, block, ptag);
+
+ if (ptag[1] != ppage)
+ continue;
+ if (!(ptag[0] & VIKING_PTAG_VALID))
+ continue;
+ if (!(ptag[0] & VIKING_PTAG_DIRTY))
+ continue;
+
+ /* There was a great cache from TI
+ * with comfort as much as vi,
+ * 4 pages to flush,
+ * 4 pages, no rush,
+ * since anything else makes him die.
+ */
+ vaddr = (KERNBASE + PAGE_SIZE) | (set << 5);
+ for (i = 0; i < 8; i++) {
+ __asm__ __volatile__ ("ld [%0], %%g2\n\t" : :
+ "r" (vaddr) : "g2");
+ vaddr += PAGE_SIZE;
+ }
+
+ /* Continue with next set. */
+ break;
+ }
+ }
+}
+
+/* Viking is IO cache coherent, but really only on MXCC. */
+static void viking_flush_page_for_dma(unsigned long page)
+{
+}
+
+static void viking_flush_tlb_all(void)
+{
+ module_stats.invall++;
+ flush_user_windows();
+ srmmu_flush_whole_tlb();
+}
+
+static void viking_flush_tlb_mm(struct mm_struct *mm)
+{
+ int octx;
+ module_stats.invmm++;
+
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ flush_user_windows();
+ octx = srmmu_get_context();
+ srmmu_set_context(mm->context);
+ srmmu_flush_tlb_ctx();
+ srmmu_set_context(octx);
+#ifndef __SMP__
+ }
+#endif
+}
+
+static void viking_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+ int octx;
+ module_stats.invrnge++;
+
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ flush_user_windows();
+ octx = srmmu_get_context();
+ srmmu_set_context(mm->context);
+ if((start - end) < SRMMU_PMD_SIZE) {
+ start &= PAGE_MASK;
+ while(start < end) {
+ srmmu_flush_tlb_page(start);
+ start += PAGE_SIZE;
+ }
+ } else if((start - end) < SRMMU_PGDIR_SIZE) {
+ start &= SRMMU_PMD_MASK;
+ while(start < end) {
+ srmmu_flush_tlb_segment(start);
+ start += SRMMU_PMD_SIZE;
+ }
+ } else {
+ start &= SRMMU_PGDIR_MASK;
+ while(start < end) {
+ srmmu_flush_tlb_region(start);
+ start += SRMMU_PGDIR_SIZE;
+ }
+ }
+ srmmu_set_context(octx);
+#ifndef __SMP__
+ }
+#endif
+}
+
+static void viking_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ int octx;
+ struct mm_struct *mm = vma->vm_mm;
+
+ module_stats.invpg++;
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ flush_user_windows();
+ octx = srmmu_get_context();
+ srmmu_set_context(mm->context);
+ srmmu_flush_tlb_page(page);
+ srmmu_set_context(octx);
+#ifndef __SMP__
+ }
+#endif
+}
+
+static void viking_flush_tlb_page_for_cbit(unsigned long page)
+{
+ srmmu_flush_tlb_page(page);
+}
+
+/* Cypress flushes. */
+static void cypress_flush_cache_all(void)
+{
+ volatile unsigned long cypress_sucks;
+ unsigned long faddr, tagval;
+
+ flush_user_windows();
+ for(faddr = 0; faddr < 0x10000; faddr += 0x20) {
+ __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" :
+ "=r" (tagval) :
+ "r" (faddr), "r" (0x40000),
+ "i" (ASI_M_DATAC_TAG));
+
+ /* If modified and valid, kick it. */
+ if((tagval & 0x60) == 0x60)
+ cypress_sucks = *(unsigned long *)(0xf0020000 + faddr);
+ }
+}
+
+static void cypress_flush_cache_mm(struct mm_struct *mm)
+{
+ unsigned long flags, faddr;
+ int octx;
+
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ register unsigned long a, b, c, d, e, f, g;
+ flush_user_windows();
+ save_and_cli(flags);
+ octx = srmmu_get_context();
+ srmmu_set_context(mm->context);
+ a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
+ faddr = (0x10000 - 0x100);
+ goto inside;
+ do {
+ faddr -= 0x100;
+ inside:
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
+ "sta %%g0, [%0 + %2] %1\n\t"
+ "sta %%g0, [%0 + %3] %1\n\t"
+ "sta %%g0, [%0 + %4] %1\n\t"
+ "sta %%g0, [%0 + %5] %1\n\t"
+ "sta %%g0, [%0 + %6] %1\n\t"
+ "sta %%g0, [%0 + %7] %1\n\t"
+ "sta %%g0, [%0 + %8] %1\n\t" : :
+ "r" (faddr), "i" (ASI_M_FLUSH_CTX),
+ "r" (a), "r" (b), "r" (c), "r" (d),
+ "r" (e), "r" (f), "r" (g));
+ } while(faddr);
+ srmmu_set_context(octx);
+ restore_flags(flags);
+#ifndef __SMP__
+ }
+#endif
+}
+
+static void cypress_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+ unsigned long flags, faddr;
+ int octx;
+
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ register unsigned long a, b, c, d, e, f, g;
+ flush_user_windows();
+ save_and_cli(flags);
+ octx = srmmu_get_context();
+ srmmu_set_context(mm->context);
+ a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
+ start &= SRMMU_PMD_MASK;
+ while(start < end) {
+ faddr = (start + (0x10000 - 0x100));
+ goto inside;
+ do {
+ faddr -= 0x100;
+ inside:
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
+ "sta %%g0, [%0 + %2] %1\n\t"
+ "sta %%g0, [%0 + %3] %1\n\t"
+ "sta %%g0, [%0 + %4] %1\n\t"
+ "sta %%g0, [%0 + %5] %1\n\t"
+ "sta %%g0, [%0 + %6] %1\n\t"
+ "sta %%g0, [%0 + %7] %1\n\t"
+ "sta %%g0, [%0 + %8] %1\n\t" : :
+ "r" (faddr),
+ "i" (ASI_M_FLUSH_SEG),
+ "r" (a), "r" (b), "r" (c), "r" (d),
+ "r" (e), "r" (f), "r" (g));
+ } while (faddr != start);
+ start += SRMMU_PMD_SIZE;
+ }
+ srmmu_set_context(octx);
+ restore_flags(flags);
+#ifndef __SMP__
+ }
+#endif
+}
+
+static void cypress_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long flags, line;
+ int octx;
+
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ register unsigned long a, b, c, d, e, f, g;
+ flush_user_windows();
+ save_and_cli(flags);
+ octx = srmmu_get_context();
+ srmmu_set_context(mm->context);
+ a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
+ page &= PAGE_MASK;
+ line = (page + PAGE_SIZE) - 0x100;
+ goto inside;
+ do {
+ line -= 0x100;
+ inside:
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
+ "sta %%g0, [%0 + %2] %1\n\t"
+ "sta %%g0, [%0 + %3] %1\n\t"
+ "sta %%g0, [%0 + %4] %1\n\t"
+ "sta %%g0, [%0 + %5] %1\n\t"
+ "sta %%g0, [%0 + %6] %1\n\t"
+ "sta %%g0, [%0 + %7] %1\n\t"
+ "sta %%g0, [%0 + %8] %1\n\t" : :
+ "r" (line),
+ "i" (ASI_M_FLUSH_PAGE),
+ "r" (a), "r" (b), "r" (c), "r" (d),
+ "r" (e), "r" (f), "r" (g));
+ } while(line != page);
+ srmmu_set_context(octx);
+ restore_flags(flags);
+#ifndef __SMP__
+ }
+#endif
+}
+
+/* Cypress is copy-back, at least that is how we configure it. */
+static void cypress_flush_page_to_ram(unsigned long page)
+{
+ register unsigned long a, b, c, d, e, f, g;
+ unsigned long line;
+
+ a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
+ page &= PAGE_MASK;
+ line = (page + PAGE_SIZE) - 0x100;
+ goto inside;
+ do {
+ line -= 0x100;
+ inside:
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
+ "sta %%g0, [%0 + %2] %1\n\t"
+ "sta %%g0, [%0 + %3] %1\n\t"
+ "sta %%g0, [%0 + %4] %1\n\t"
+ "sta %%g0, [%0 + %5] %1\n\t"
+ "sta %%g0, [%0 + %6] %1\n\t"
+ "sta %%g0, [%0 + %7] %1\n\t"
+ "sta %%g0, [%0 + %8] %1\n\t" : :
+ "r" (line),
+ "i" (ASI_M_FLUSH_PAGE),
+ "r" (a), "r" (b), "r" (c), "r" (d),
+ "r" (e), "r" (f), "r" (g));
+ } while(line != page);
+}
+
+/* Cypress is also IO cache coherent. */
+static void cypress_flush_page_for_dma(unsigned long page)
+{
+}
+
+static void cypress_flush_page_to_uncache(unsigned long page)
+{
+ register unsigned long a, b, c, d, e, f, g;
+ unsigned long line;
+
+ a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
+ page &= PAGE_MASK;
+ line = (page + PAGE_SIZE) - 0x100;
+ goto inside;
+ do {
+ line -= 0x100;
+ inside:
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
+ "sta %%g0, [%0 + %2] %1\n\t"
+ "sta %%g0, [%0 + %3] %1\n\t"
+ "sta %%g0, [%0 + %4] %1\n\t"
+ "sta %%g0, [%0 + %5] %1\n\t"
+ "sta %%g0, [%0 + %6] %1\n\t"
+ "sta %%g0, [%0 + %7] %1\n\t"
+ "sta %%g0, [%0 + %8] %1\n\t" : :
+ "r" (line),
+ "i" (ASI_M_FLUSH_PAGE),
+ "r" (a), "r" (b), "r" (c), "r" (d),
+ "r" (e), "r" (f), "r" (g));
+ } while(line != page);
+}
+
+static void cypress_flush_tlb_all(void)
+{
+ module_stats.invall++;
+ srmmu_flush_whole_tlb();
+}
+
+static void cypress_flush_tlb_mm(struct mm_struct *mm)
+{
+ int octx;
+
+ module_stats.invmm++;
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ octx = srmmu_get_context();
+ srmmu_set_context(mm->context);
+ srmmu_flush_tlb_ctx();
+ srmmu_set_context(octx);
+#ifndef __SMP__
+ }
+#endif
+}
+
+static void cypress_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+ int octx;
+ module_stats.invrnge++;
+
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ flush_user_windows();
+ octx = srmmu_get_context();
+ srmmu_set_context(mm->context);
+ if((start - end) < SRMMU_PMD_SIZE) {
+ start &= PAGE_MASK;
+ while(start < end) {
+ srmmu_flush_tlb_page(start);
+ start += PAGE_SIZE;
+ }
+ } else if((start - end) < SRMMU_PGDIR_SIZE) {
+ start &= SRMMU_PMD_MASK;
+ while(start < end) {
+ srmmu_flush_tlb_segment(start);
+ start += SRMMU_PMD_SIZE;
+ }
+ } else {
+ start &= SRMMU_PGDIR_MASK;
+ while(start < end) {
+ srmmu_flush_tlb_region(start);
+ start += SRMMU_PGDIR_SIZE;
+ }
+ }
+ srmmu_set_context(octx);
+#ifndef __SMP__
+ }
+#endif
+}
+
+static void cypress_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ int octx;
+ struct mm_struct *mm = vma->vm_mm;
+
+ module_stats.invpg++;
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ flush_user_windows();
+ octx = srmmu_get_context();
+ srmmu_set_context(mm->context);
+ srmmu_flush_tlb_page(page);
+ srmmu_set_context(octx);
+#ifndef __SMP__
+ }
+#endif
+}
+
+static void cypress_flush_tlb_page_for_cbit(unsigned long page)
+{
+ srmmu_flush_tlb_page(page);
+}
+
+/* Hypersparc flushes. Very nice chip... */
+static void hypersparc_flush_cache_all(void)
+{
+ flush_user_windows();
+ hyper_flush_unconditional_combined();
+ hyper_flush_whole_icache();
+}
+
+static void hypersparc_flush_cache_mm(struct mm_struct *mm)
+{
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ flush_user_windows();
+ hyper_flush_cache_user();
+ hyper_flush_whole_icache();
+#ifndef __SMP__
+ }
+#endif
+}
+
+/* Boy was my older implementation inefficient... */
+static void hypersparc_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+ volatile unsigned long clear;
+ int octx;
+
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ flush_user_windows();
+ octx = srmmu_get_context();
+ start &= PAGE_MASK;
+ srmmu_set_context(mm->context);
+ while(start < end) {
+ if(srmmu_hwprobe(start))
+ hyper_flush_cache_page(start);
+ start += PAGE_SIZE;
+ }
+ clear = srmmu_get_fstatus();
+ srmmu_set_context(octx);
+ hyper_flush_whole_icache();
+#ifndef __SMP__
+ }
+#endif
+}
+
+/* HyperSparc requires a valid mapping where we are about to flush
+ * in order to check for a physical tag match during the flush.
+ */
+static void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ volatile unsigned long clear;
+ int octx;
+
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ octx = srmmu_get_context();
+ flush_user_windows();
+ srmmu_set_context(mm->context);
+ hyper_flush_whole_icache();
+ if(!srmmu_hwprobe(page))
+ goto no_mapping;
+ hyper_flush_cache_page(page);
+ no_mapping:
+ clear = srmmu_get_fstatus();
+ srmmu_set_context(octx);
+#ifndef __SMP__
+ }
+#endif
+}
+
+/* HyperSparc is copy-back. */
+static void hypersparc_flush_page_to_ram(unsigned long page)
+{
+ volatile unsigned long clear;
+
+ if(srmmu_hwprobe(page))
+ hyper_flush_cache_page(page);
+ clear = srmmu_get_fstatus();
+}
+
+/* HyperSparc is IO cache coherent. */
+static void hypersparc_flush_page_for_dma(unsigned long page)
+{
+}
+
+static void hypersparc_flush_cache_page_to_uncache(unsigned long page)
+{
+ volatile unsigned long clear;
+
+ if(srmmu_hwprobe(page))
+ hyper_flush_cache_page(page);
+ clear = srmmu_get_fstatus();
+}
+
+static void hypersparc_flush_tlb_all(void)
+{
+ module_stats.invall++;
+ srmmu_flush_whole_tlb();
+}
+
+static void hypersparc_flush_tlb_mm(struct mm_struct *mm)
+{
+ int octx;
+
+ module_stats.invmm++;
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+
+ octx = srmmu_get_context();
+ srmmu_set_context(mm->context);
+ srmmu_flush_tlb_ctx();
+ srmmu_set_context(octx);
+
+#ifndef __SMP__
+ }
+#endif
+}
+
+static void hypersparc_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+ int octx;
+
+ module_stats.invrnge++;
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+
+ octx = srmmu_get_context();
+ srmmu_set_context(mm->context);
+ if((start - end) < SRMMU_PMD_SIZE) {
+ start &= PAGE_MASK;
+ while(start < end) {
+ srmmu_flush_tlb_page(start);
+ start += PAGE_SIZE;
+ }
+ } else if((start - end) < SRMMU_PGDIR_SIZE) {
+ start &= SRMMU_PMD_MASK;
+ while(start < end) {
+ srmmu_flush_tlb_segment(start);
+ start += SRMMU_PMD_SIZE;
+ }
+ } else {
+ start &= SRMMU_PGDIR_MASK;
+ while(start < end) {
+ srmmu_flush_tlb_region(start);
+ start += SRMMU_PGDIR_SIZE;
+ }
+ }
+ srmmu_set_context(octx);
+
+#ifndef __SMP__
+ }
+#endif
+}
+
+static void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ int octx;
+
+ module_stats.invpg++;
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+
+ octx = srmmu_get_context();
+ srmmu_set_context(mm->context);
+ srmmu_flush_tlb_page(page);
+ srmmu_set_context(octx);
+
+#ifndef __SMP__
+ }
+#endif
+}
+
+static void hypersparc_flush_tlb_page_for_cbit(unsigned long page)
+{
+ srmmu_flush_tlb_page(page);
+}
+
+static void hypersparc_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
+{
+ hyper_flush_whole_icache();
+ set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (srmmu_v2p((unsigned long) pgdp) >> 4)));
+}
+
+static void hypersparc_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp)
+{
+ if(tsk->mm->context != NO_CONTEXT) {
+ flush_cache_mm(current->mm);
+ ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp);
+ flush_tlb_mm(current->mm);
+ }
+}
+
+static void hypersparc_switch_to_context(struct task_struct *tsk)
+{
+ hyper_flush_whole_icache();
+ if(tsk->mm->context == NO_CONTEXT) {
+ alloc_context(tsk);
+ flush_cache_mm(current->mm);
+ ctxd_set(&srmmu_context_table[tsk->mm->context], tsk->mm->pgd);
+ flush_tlb_mm(current->mm);
+ }
+ srmmu_set_context(tsk->mm->context);
+}
+
+/* IOMMU things go here. */
+
+#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
+
+#define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
+#define MKIOPTE(phys) (((((phys)>>4) & IOPTE_PAGE) | IOPERM) & ~IOPTE_WAZ)
+
+static inline void srmmu_map_dvma_pages_for_iommu(struct iommu_struct *iommu,
+ unsigned long kern_end)
+{
+ unsigned long first = page_offset;
+ unsigned long last = kern_end;
+ iopte_t *iopte = iommu->page_table;
+
+ iopte += ((first - iommu->start) >> PAGE_SHIFT);
+ while(first <= last) {
+ iopte_val(*iopte++) = MKIOPTE(srmmu_v2p(first));
+ first += PAGE_SIZE;
+ }
+}
+
+unsigned long iommu_init(int iommund, unsigned long memory_start,
+ unsigned long memory_end, struct linux_sbus *sbus)
+{
+ unsigned int impl, vers, ptsize;
+ unsigned long tmp;
+ struct iommu_struct *iommu;
+ struct linux_prom_registers iommu_promregs[PROMREG_MAX];
+
+ memory_start = LONG_ALIGN(memory_start);
+ iommu = (struct iommu_struct *) memory_start;
+ memory_start += sizeof(struct iommu_struct);
+ prom_getproperty(iommund, "reg", (void *) iommu_promregs,
+ sizeof(iommu_promregs));
+ iommu->regs = (struct iommu_regs *)
+ sparc_alloc_io(iommu_promregs[0].phys_addr, 0, (PAGE_SIZE * 3),
+ "IOMMU registers", iommu_promregs[0].which_io, 0x0);
+ if(!iommu->regs)
+ panic("Cannot map IOMMU registers.");
+ impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28;
+ vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24;
+ tmp = iommu->regs->control;
+ tmp &= ~(IOMMU_CTRL_RNGE);
+ switch(page_offset & 0xf0000000) {
+ case 0xf0000000:
+ tmp |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
+ iommu->plow = iommu->start = 0xf0000000;
+ break;
+ case 0xe0000000:
+ tmp |= (IOMMU_RNGE_512MB | IOMMU_CTRL_ENAB);
+ iommu->plow = iommu->start = 0xe0000000;
+ break;
+ case 0xd0000000:
+ case 0xc0000000:
+ tmp |= (IOMMU_RNGE_1GB | IOMMU_CTRL_ENAB);
+ iommu->plow = iommu->start = 0xc0000000;
+ break;
+ case 0xb0000000:
+ case 0xa0000000:
+ case 0x90000000:
+ case 0x80000000:
+ tmp |= (IOMMU_RNGE_2GB | IOMMU_CTRL_ENAB);
+ iommu->plow = iommu->start = 0x80000000;
+ break;
+ }
+ iommu->regs->control = tmp;
+ iommu_invalidate(iommu->regs);
+ iommu->end = 0xffffffff;
+
+ /* Allocate IOMMU page table */
+ ptsize = iommu->end - iommu->start + 1;
+ ptsize = (ptsize >> PAGE_SHIFT) * sizeof(iopte_t);
+
+ /* Stupid alignment constraints give me a headache. */
+ memory_start = PAGE_ALIGN(memory_start);
+ memory_start = (((memory_start) + (ptsize - 1)) & ~(ptsize - 1));
+ iommu->lowest = iommu->page_table = (iopte_t *) memory_start;
+ memory_start += ptsize;
+
+ /* Initialize new table. */
+ flush_cache_all();
+ if(viking_mxcc_present) {
+ unsigned long start = (unsigned long) iommu->page_table;
+ unsigned long end = (start + ptsize);
+ while(start < end) {
+ viking_mxcc_flush_page(start);
+ start += PAGE_SIZE;
+ }
+ } else if(flush_page_for_dma == viking_no_mxcc_flush_page) {
+ unsigned long start = (unsigned long) iommu->page_table;
+ unsigned long end = (start + ptsize);
+ while(start < end) {
+ viking_no_mxcc_flush_page(start);
+ start += PAGE_SIZE;
+ }
+ }
+ memset(iommu->page_table, 0, ptsize);
+ srmmu_map_dvma_pages_for_iommu(iommu, memory_end);
+ flush_tlb_all();
+ iommu->regs->base = srmmu_v2p((unsigned long) iommu->page_table) >> 4;
+ iommu_invalidate(iommu->regs);
+
+ sbus->iommu = iommu;
+ printk("IOMMU: impl %d vers %d page table at %p of size %d bytes\n",
+ impl, vers, iommu->page_table, ptsize);
+ return memory_start;
+}
+
+void iommu_sun4d_init(int sbi_node, struct linux_sbus *sbus)
+{
+ u32 *iommu;
+ struct linux_prom_registers iommu_promregs[PROMREG_MAX];
+
+ prom_getproperty(sbi_node, "reg", (void *) iommu_promregs,
+ sizeof(iommu_promregs));
+ iommu = (u32 *)
+ sparc_alloc_io(iommu_promregs[2].phys_addr, 0, (PAGE_SIZE * 16),
+ "XPT", iommu_promregs[2].which_io, 0x0);
+ if(!iommu)
+ panic("Cannot map External Page Table.");
+
+ /* Initialize new table. */
+ flush_cache_all();
+ if(viking_mxcc_present) {
+ unsigned long start = (unsigned long) iommu;
+ unsigned long end = (start + 16 * PAGE_SIZE);
+ while(start < end) {
+ viking_mxcc_flush_page(start);
+ start += PAGE_SIZE;
+ }
+ } else if(flush_page_for_dma == viking_no_mxcc_flush_page) {
+ unsigned long start = (unsigned long) iommu;
+ unsigned long end = (start + 16 * PAGE_SIZE);
+ while(start < end) {
+ viking_no_mxcc_flush_page(start);
+ start += PAGE_SIZE;
+ }
+ }
+ memset(iommu, 0, 16 * PAGE_SIZE);
+ flush_tlb_all();
+
+ sbus->iommu = (struct iommu_struct *)iommu;
+}
+
+static char *srmmu_get_scsi_one(char *vaddr, unsigned long len, struct linux_sbus *sbus)
+{
+ unsigned long page = ((unsigned long) vaddr) & PAGE_MASK;
+
+ while(page < ((unsigned long)(vaddr + len))) {
+ flush_page_for_dma(page);
+ page += PAGE_SIZE;
+ }
+ return vaddr;
+}
+
+static void srmmu_get_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
+{
+ unsigned long page;
+
+ while(sz >= 0) {
+ page = ((unsigned long) sg[sz].addr) & PAGE_MASK;
+ while(page < (unsigned long)(sg[sz].addr + sg[sz].len)) {
+ flush_page_for_dma(page);
+ page += PAGE_SIZE;
+ }
+ sg[sz].dvma_addr = (char *) (sg[sz].addr);
+ sz--;
+ }
+}
+
+static void srmmu_release_scsi_one(char *vaddr, unsigned long len, struct linux_sbus *sbus)
+{
+}
+
+static void srmmu_release_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
+{
+}
+
+static unsigned long mempool;
+
+/* NOTE: All of this startup code assumes the low 16mb (approx.) of
+ * kernel mappings are done with one single contiguous chunk of
+ * ram. On small ram machines (classics mainly) we only get
+ * around 8mb mapped for us.
+ */
+
+static unsigned long kbpage;
+
+/* Some dirty hacks to abstract away the painful boot up init. */
+static inline unsigned long srmmu_early_paddr(unsigned long vaddr)
+{
+ return ((vaddr - KERNBASE) + kbpage);
+}
+
+static inline void srmmu_early_pgd_set(pgd_t *pgdp, pmd_t *pmdp)
+{
+ set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (srmmu_early_paddr((unsigned long) pmdp) >> 4)));
+}
+
+static inline void srmmu_early_pmd_set(pmd_t *pmdp, pte_t *ptep)
+{
+ set_pte((pte_t *)pmdp, (SRMMU_ET_PTD | (srmmu_early_paddr((unsigned long) ptep) >> 4)));
+}
+
+static inline unsigned long srmmu_early_pgd_page(pgd_t pgd)
+{
+ return (((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4) - kbpage) + KERNBASE;
+}
+
+static inline unsigned long srmmu_early_pmd_page(pmd_t pmd)
+{
+ return (((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4) - kbpage) + KERNBASE;
+}
+
+static inline pmd_t *srmmu_early_pmd_offset(pgd_t *dir, unsigned long address)
+{
+ return (pmd_t *) srmmu_early_pgd_page(*dir) + ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1));
+}
+
+static inline pte_t *srmmu_early_pte_offset(pmd_t *dir, unsigned long address)
+{
+ return (pte_t *) srmmu_early_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
+}
+
+static inline void srmmu_allocate_ptable_skeleton(unsigned long start, unsigned long end)
+{
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ while(start < end) {
+ pgdp = srmmu_pgd_offset(init_task.mm, start);
+ if(srmmu_pgd_none(*pgdp)) {
+ pmdp = sparc_init_alloc(&mempool, SRMMU_PMD_TABLE_SIZE);
+ srmmu_early_pgd_set(pgdp, pmdp);
+ }
+ pmdp = srmmu_early_pmd_offset(pgdp, start);
+ if(srmmu_pmd_none(*pmdp)) {
+ ptep = sparc_init_alloc(&mempool, SRMMU_PTE_TABLE_SIZE);
+ srmmu_early_pmd_set(pmdp, ptep);
+ }
+ start = (start + SRMMU_PMD_SIZE) & SRMMU_PMD_MASK;
+ }
+}
+
+/* This is much cleaner than poking around physical address space
+ * looking at the prom's page table directly which is what most
+ * other OS's do. Yuck... this is much better.
+ */
+void srmmu_inherit_prom_mappings(unsigned long start,unsigned long end)
+{
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+ int what = 0; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
+ unsigned long prompte;
+
+ while(start <= end) {
+ if (start == 0)
+ break; /* probably wrap around */
+ if(start == 0xfef00000)
+ start = KADB_DEBUGGER_BEGVM;
+ if(!(prompte = srmmu_hwprobe(start))) {
+ start += PAGE_SIZE;
+ continue;
+ }
+
+ /* A red snapper, see what it really is. */
+ what = 0;
+
+ if(!(start & ~(SRMMU_PMD_MASK))) {
+ if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PMD_SIZE) == prompte)
+ what = 1;
+ }
+
+ if(!(start & ~(SRMMU_PGDIR_MASK))) {
+ if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) ==
+ prompte)
+ what = 2;
+ }
+
+ pgdp = srmmu_pgd_offset(init_task.mm, start);
+ if(what == 2) {
+ pgd_val(*pgdp) = prompte;
+ start += SRMMU_PGDIR_SIZE;
+ continue;
+ }
+ if(srmmu_pgd_none(*pgdp)) {
+ pmdp = sparc_init_alloc(&mempool, SRMMU_PMD_TABLE_SIZE);
+ srmmu_early_pgd_set(pgdp, pmdp);
+ }
+ pmdp = srmmu_early_pmd_offset(pgdp, start);
+ if(what == 1) {
+ pmd_val(*pmdp) = prompte;
+ start += SRMMU_PMD_SIZE;
+ continue;
+ }
+ if(srmmu_pmd_none(*pmdp)) {
+ ptep = sparc_init_alloc(&mempool, SRMMU_PTE_TABLE_SIZE);
+ srmmu_early_pmd_set(pmdp, ptep);
+ }
+ ptep = srmmu_early_pte_offset(pmdp, start);
+ pte_val(*ptep) = prompte;
+ start += PAGE_SIZE;
+ }
+}
+
+static void srmmu_map_dma_area(unsigned long addr, int len)
+{
+ unsigned long page, end;
+ pgprot_t dvma_prot;
+ struct iommu_struct *iommu = SBus_chain->iommu;
+ iopte_t *iopte = iommu->page_table;
+ iopte_t *iopte_first = iopte;
+
+ if(viking_mxcc_present)
+ dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
+ else
+ dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
+
+ iopte += ((addr - iommu->start) >> PAGE_SHIFT);
+ end = PAGE_ALIGN((addr + len));
+ while(addr < end) {
+ page = get_free_page(GFP_KERNEL);
+ if(!page) {
+ prom_printf("alloc_dvma: Cannot get a dvma page\n");
+ prom_halt();
+ } else {
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ pgdp = srmmu_pgd_offset(init_task.mm, addr);
+ pmdp = srmmu_pmd_offset(pgdp, addr);
+ ptep = srmmu_pte_offset(pmdp, addr);
+
+ set_pte(ptep, pte_val(srmmu_mk_pte(page, dvma_prot)));
+
+ iopte_val(*iopte++) = MKIOPTE(srmmu_v2p(page));
+ }
+ addr += PAGE_SIZE;
+ }
+ flush_cache_all();
+ if(viking_mxcc_present) {
+ unsigned long start = ((unsigned long) iopte_first) & PAGE_MASK;
+ unsigned long end = PAGE_ALIGN(((unsigned long) iopte));
+ while(start < end) {
+ viking_mxcc_flush_page(start);
+ start += PAGE_SIZE;
+ }
+ } else if(flush_page_for_dma == viking_no_mxcc_flush_page) {
+ unsigned long start = ((unsigned long) iopte_first) & PAGE_MASK;
+ unsigned long end = PAGE_ALIGN(((unsigned long) iopte));
+ while(start < end) {
+ viking_no_mxcc_flush_page(start);
+ start += PAGE_SIZE;
+ }
+ }
+ flush_tlb_all();
+ iommu_invalidate(iommu->regs);
+}
+
+/* #define DEBUG_MAP_KERNEL */
+
+#ifdef DEBUG_MAP_KERNEL
+#define MKTRACE(foo) prom_printf foo
+#else
+#define MKTRACE(foo)
+#endif
+
+static int lots_of_ram = 0;
+static int large_pte_optimize = 1;
+
+#define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID)
+
+/* Create a third-level SRMMU 16MB page mapping. */
+static inline void do_large_mapping(unsigned long vaddr, unsigned long phys_base)
+{
+ pgd_t *pgdp = srmmu_pgd_offset(init_task.mm, vaddr);
+ unsigned long big_pte;
+
+ MKTRACE(("dlm[v<%08lx>-->p<%08lx>]", vaddr, phys_base));
+ big_pte = KERNEL_PTE(phys_base >> 4);
+ pgd_val(*pgdp) = big_pte;
+}
+
+/* Create second-level SRMMU 256K medium sized page mappings. */
+static inline void do_medium_mapping(unsigned long vaddr, unsigned long vend,
+ unsigned long phys_base)
+{
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ unsigned long medium_pte;
+
+ MKTRACE(("dmm[v<%08lx,%08lx>-->p<%08lx>]", vaddr, vend, phys_base));
+ while(vaddr < vend) {
+ pgdp = srmmu_pgd_offset(init_task.mm, vaddr);
+ pmdp = srmmu_early_pmd_offset(pgdp, vaddr);
+ medium_pte = KERNEL_PTE(phys_base >> 4);
+ pmd_val(*pmdp) = medium_pte;
+ phys_base += SRMMU_PMD_SIZE;
+ vaddr += SRMMU_PMD_SIZE;
+ }
+}
+
+/* Create a normal set of SRMMU page mappings for the virtual range
+ * START to END, using physical pages beginning at PHYS_BASE.
+ */
+static inline void do_small_mapping(unsigned long start, unsigned long end,
+ unsigned long phys_base)
+{
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ MKTRACE(("dsm[v<%08lx,%08lx>-->p<%08lx>]", start, end, phys_base));
+ while(start < end) {
+ pgdp = srmmu_pgd_offset(init_task.mm, start);
+ pmdp = srmmu_early_pmd_offset(pgdp, start);
+ ptep = srmmu_early_pte_offset(pmdp, start);
+
+ pte_val(*ptep) = KERNEL_PTE(phys_base >> 4);
+ phys_base += PAGE_SIZE;
+ start += PAGE_SIZE;
+ }
+}
+
+/* Look in the sp_bank for the given physical page, return the
+ * index number the entry was found in, or -1 for not found.
+ */
+static inline int find_in_spbanks(unsigned long phys_page)
+{
+ int entry;
+
+ for(entry = 0; sp_banks[entry].num_bytes; entry++) {
+ unsigned long start = sp_banks[entry].base_addr;
+ unsigned long end = start + sp_banks[entry].num_bytes;
+
+ if((start <= phys_page) && (phys_page < end))
+ return entry;
+ }
+ return -1;
+}
+
+/* Find an spbank entry not mapped as of yet, TAKEN_VECTOR is an
+ * array of char's, each member indicating if that spbank is mapped
+ * yet or not.
+ */
+static inline int find_free_spbank(char *taken_vector)
+{
+ int entry;
+
+ for(entry = 0; sp_banks[entry].num_bytes; entry++)
+ if(!taken_vector[entry])
+ break;
+ return entry;
+}
+
+/* Same as above, but with a given bank size limit BLIMIT. */
+static inline int find_free_spbank_limited(char *taken_vector, unsigned long limit)
+{
+ int entry;
+
+ for(entry = 0; sp_banks[entry].num_bytes; entry++)
+ if(!taken_vector[entry] &&
+ (sp_banks[entry].num_bytes < limit))
+ break;
+ return entry;
+}
+
+/* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE.
+ * This routine is expected to update the srmmu_map and try as
+ * hard as possible to use 16MB level-one SRMMU pte's when at all
+ * possible to get short termination and faster translations.
+ */
+static inline unsigned long map_spbank(unsigned long vbase, int sp_entry)
+{
+ unsigned long pstart = sp_banks[sp_entry].base_addr;
+ unsigned long vstart = vbase;
+ unsigned long vend = vbase + sp_banks[sp_entry].num_bytes;
+ static int srmmu_bank = 0;
+
+ /* If physically not aligned on 16MB boundry, just shortcut
+ * right here by mapping them with 4k normal pages, and bumping
+ * the next virtual address to the next 16MB boundry. You can
+ * get this with various RAM configurations due to the way in
+ * which the PROM carves out it's own chunks of memory.
+ */
+ if(pstart & ~SRMMU_PGDIR_MASK) {
+ do_small_mapping(vstart, vend, pstart);
+ vstart = SRMMU_PGDIR_ALIGN(vend);
+ goto finish_up;
+ }
+ while(vstart < vend) {
+ unsigned long coverage, next_aligned;
+ if(vstart & ~SRMMU_PMD_MASK) {
+ next_aligned = SRMMU_PMD_ALIGN(vstart);
+ if(next_aligned <= vend) {
+ coverage = (next_aligned - vstart);
+ do_small_mapping(vstart, next_aligned, pstart);
+ } else {
+ coverage = (vend - vstart);
+ do_small_mapping(vstart, vend, pstart);
+ }
+ } else if(vstart & ~SRMMU_PGDIR_MASK) {
+ next_aligned = SRMMU_PGDIR_ALIGN(vstart);
+ if(next_aligned <= vend) {
+ coverage = (next_aligned - vstart);
+ do_medium_mapping(vstart, next_aligned, pstart);
+ } else {
+ coverage = (vend - vstart);
+ do_small_mapping(vstart, vend, pstart);
+ }
+ } else {
+ coverage = SRMMU_PGDIR_SIZE;
+ if(large_pte_optimize || ((vstart+coverage)<=vend)) {
+ do_large_mapping(vstart, pstart);
+ } else {
+ coverage = (vend - vstart);
+ do_small_mapping(vstart, vend, pstart);
+ }
+ }
+ vstart += coverage; pstart += coverage;
+ }
+finish_up:
+ srmmu_map[srmmu_bank].vbase = vbase;
+ srmmu_map[srmmu_bank].pbase = sp_banks[sp_entry].base_addr;
+ srmmu_map[srmmu_bank].size = sp_banks[sp_entry].num_bytes;
+ MKTRACE(("SRMMUBANK[v<%08lx>p<%08lx>s<%08lx>]", vbase, sp_banks[sp_entry].base_addr, sp_banks[sp_entry].num_bytes));
+ srmmu_bank++;
+ return vstart;
+}
+
+static inline void memprobe_error(char *msg)
+{
+ prom_printf(msg);
+ prom_printf("Halting now...\n");
+ prom_halt();
+}
+
+/* Assumptions: The bank given to the kernel from the prom/bootloader
+ * is part of a full bank which is at least 4MB in size and begins at
+ * 0xf0000000 (ie. KERNBASE).
+ */
+static void map_kernel(void)
+{
+ unsigned long raw_pte, physpage;
+ unsigned long vaddr, tally, low_base;
+ char etaken[SPARC_PHYS_BANKS];
+ int entry;
+
+ /* Step 1: Clear out sp_banks taken map. */
+ MKTRACE(("map_kernel: clearing etaken vector... "));
+ for(entry = 0; entry < SPARC_PHYS_BANKS; entry++)
+ etaken[entry] = 0;
+
+ low_base = KERNBASE;
+
+ /* Step 2: Calculate 'lots_of_ram'. */
+ tally = 0;
+ for(entry = 0; sp_banks[entry].num_bytes; entry++)
+ tally += sp_banks[entry].num_bytes;
+ if(tally >= (0xfd000000 - KERNBASE))
+ lots_of_ram = 1;
+ else
+ lots_of_ram = 0;
+ MKTRACE(("tally=%08lx lots_of_ram<%d>\n", tally, lots_of_ram));
+
+ /* Step 3: Fill in KERNBASE base pgd. Lots of sanity checking here. */
+ raw_pte = srmmu_hwprobe(KERNBASE + PAGE_SIZE);
+ if((raw_pte & SRMMU_ET_MASK) != SRMMU_ET_PTE)
+ memprobe_error("Wheee, kernel not mapped at all by boot loader.\n");
+ physpage = (raw_pte & SRMMU_PTE_PMASK) << 4;
+ physpage -= PAGE_SIZE;
+ if(physpage & ~(SRMMU_PGDIR_MASK))
+ memprobe_error("Wheee, kernel not mapped on 16MB physical boundry.\n");
+ entry = find_in_spbanks(physpage);
+ if(entry == -1 || (sp_banks[entry].base_addr != physpage))
+ memprobe_error("Kernel mapped in non-existant memory.\n");
+ MKTRACE(("map_kernel: map_spbank(vbase=%08x, entry<%d>)[%08lx,%08lx]\n", KERNBASE, entry, sp_banks[entry].base_addr, sp_banks[entry].num_bytes));
+ if(((KERNBASE + (sp_banks[entry].num_bytes)) > 0xfd000000) ||
+ ((KERNBASE + (sp_banks[entry].num_bytes)) < KERNBASE)) {
+ unsigned long orig_base = sp_banks[entry].base_addr;
+ unsigned long orig_len = sp_banks[entry].num_bytes;
+ unsigned long can_map = (0xfd000000 - KERNBASE);
+
+ /* Map a partial bank in this case, adjust the base
+ * and the length, but don't mark it used.
+ */
+ sp_banks[entry].num_bytes = can_map;
+ MKTRACE(("wheee really big mapping [%08lx,%08lx]", orig_base, can_map));
+ vaddr = map_spbank(KERNBASE, entry);
+ MKTRACE(("vaddr now %08lx ", vaddr));
+ sp_banks[entry].base_addr = orig_base + can_map;
+ sp_banks[entry].num_bytes = orig_len - can_map;
+ MKTRACE(("adjust[%08lx,%08lx]\n", (orig_base + can_map), (orig_len - can_map)));
+ MKTRACE(("map_kernel: skipping first loop\n"));
+ goto loop_skip;
+ }
+ vaddr = map_spbank(KERNBASE, entry);
+ etaken[entry] = 1;
+
+ /* Step 4: Map what we can above KERNBASE. */
+ MKTRACE(("map_kernel: vaddr=%08lx, entering first loop\n", vaddr));
+ for(;;) {
+ unsigned long bank_size;
+
+ MKTRACE(("map_kernel: ffsp()"));
+ entry = find_free_spbank(&etaken[0]);
+ bank_size = sp_banks[entry].num_bytes;
+ MKTRACE(("<%d> base=%08lx bs=%08lx ", entry, sp_banks[entry].base_addr, bank_size));
+ if(!bank_size)
+ break;
+ if(((vaddr + bank_size) >= 0xfd000000) ||
+ ((vaddr + bank_size) < KERNBASE)) {
+ unsigned long orig_base = sp_banks[entry].base_addr;
+ unsigned long orig_len = sp_banks[entry].num_bytes;
+ unsigned long can_map = (0xfd000000 - vaddr);
+
+ /* Map a partial bank in this case, adjust the base
+ * and the length, but don't mark it used.
+ */
+ sp_banks[entry].num_bytes = can_map;
+ MKTRACE(("wheee really big mapping [%08lx,%08lx]", orig_base, can_map));
+ vaddr = map_spbank(vaddr, entry);
+ MKTRACE(("vaddr now %08lx ", vaddr));
+ sp_banks[entry].base_addr = orig_base + can_map;
+ sp_banks[entry].num_bytes = orig_len - can_map;
+ MKTRACE(("adjust[%08lx,%08lx]\n", (orig_base + can_map), (orig_len - can_map)));
+ break;
+ }
+ if(!bank_size)
+ break;
+
+ /* Ok, we can map this one, do it. */
+ MKTRACE(("map_spbank(%08lx,entry<%d>) ", vaddr, entry));
+ vaddr = map_spbank(vaddr, entry);
+ etaken[entry] = 1;
+ MKTRACE(("vaddr now %08lx\n", vaddr));
+ }
+ MKTRACE(("\n"));
+ /* If not lots_of_ram, assume we did indeed map it all above. */
+loop_skip:
+ if(!lots_of_ram)
+ goto check_and_return;
+
+ /* Step 5: Map the rest (if any) right below KERNBASE. */
+ MKTRACE(("map_kernel: doing low mappings... "));
+ tally = 0;
+ for(entry = 0; sp_banks[entry].num_bytes; entry++) {
+ if(!etaken[entry])
+ tally += SRMMU_PGDIR_ALIGN(sp_banks[entry].num_bytes);
+ }
+ if(!tally)
+ memprobe_error("Whee, lots_of_ram yet no low pages to map.\n");
+ low_base = (KERNBASE - tally);
+ MKTRACE(("tally=%08lx low_base=%08lx\n", tally, low_base));
+
+ /* Ok, now map 'em. */
+ MKTRACE(("map_kernel: Allocate pt skeleton (%08lx, %08x)\n",low_base,KERNBASE));
+ srmmu_allocate_ptable_skeleton(low_base, KERNBASE);
+ vaddr = low_base;
+ MKTRACE(("map_kernel: vaddr=%08lx Entering second loop for low maps.\n", vaddr));
+ for(;;) {
+ unsigned long bank_size;
+
+ entry = find_free_spbank(&etaken[0]);
+ bank_size = sp_banks[entry].num_bytes;
+ MKTRACE(("map_kernel: e<%d> base=%08lx bs=%08lx ", entry, sp_banks[entry].base_addr, bank_size));
+ if(!bank_size)
+ break;
+ if((vaddr + bank_size) > KERNBASE)
+ memprobe_error("Wheee, kernel low mapping overflow.\n");
+ MKTRACE(("map_spbank(%08lx, %d) ", vaddr, entry));
+ vaddr = map_spbank(vaddr, entry);
+ etaken[entry] = 1;
+ tally -= SRMMU_PGDIR_ALIGN(bank_size);
+ MKTRACE(("Now, vaddr=%08lx tally=%08lx\n", vaddr, tally));
+ }
+ MKTRACE(("\n"));
+ if(tally)
+ memprobe_error("Wheee, did not map all of low mappings.\n");
+check_and_return:
+ /* Step 6: Sanity check, make sure we did it all. */
+ MKTRACE(("check_and_return: "));
+ for(entry = 0; sp_banks[entry].num_bytes; entry++) {
+ MKTRACE(("e[%d]=%d ", entry, etaken[entry]));
+ if(!etaken[entry]) {
+ MKTRACE(("oops\n"));
+ memprobe_error("Some bank did not get mapped.\n");
+ }
+ }
+ MKTRACE(("success\n"));
+ init_task.mm->mmap->vm_start = page_offset = low_base;
+ return; /* SUCCESS! */
+}
+
+unsigned long srmmu_endmem_fixup(unsigned long mem_end_now)
+{
+ unsigned long tally = 0;
+ int i;
+
+ for(i = 0; sp_banks[i].num_bytes; i++)
+ tally += SRMMU_PGDIR_ALIGN(sp_banks[i].num_bytes);
+ if(tally < (0x0d000000UL)) {
+ return KERNBASE + tally;
+ } else {
+ return 0xfd000000UL;
+ }
+}
+
+/* Paging initialization on the Sparc Reference MMU. */
+extern unsigned long free_area_init(unsigned long, unsigned long);
+extern unsigned long sparc_context_init(unsigned long, int);
+
+extern int physmem_mapped_contig;
+extern int linux_num_cpus;
+
+void (*poke_srmmu)(void);
+
+unsigned long srmmu_paging_init(unsigned long start_mem, unsigned long end_mem)
+{
+ unsigned long ptables_start;
+ int i, cpunode;
+ char node_str[128];
+
+ sparc_iobase_vaddr = 0xfd000000; /* 16MB of IOSPACE on all sun4m's. */
+ physmem_mapped_contig = 0; /* for init.c:taint_real_pages() */
+
+#if CONFIG_AP1000
+ num_contexts = AP_NUM_CONTEXTS;
+#else
+ /* Find the number of contexts on the srmmu. */
+ cpunode = prom_getchild(prom_root_node);
+ num_contexts = 0;
+ while((cpunode = prom_getsibling(cpunode)) != 0) {
+ prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
+ if(!strcmp(node_str, "cpu")) {
+ num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8);
+ break;
+ }
+ }
+#endif
+ if(!num_contexts) {
+ prom_printf("Something wrong, can't find cpu node in paging_init.\n");
+ prom_halt();
+ }
+
+ ptables_start = mempool = PAGE_ALIGN(start_mem);
+ memset(swapper_pg_dir, 0, PAGE_SIZE);
+ kbpage = srmmu_hwprobe(KERNBASE + PAGE_SIZE);
+ kbpage = (kbpage & SRMMU_PTE_PMASK) << 4;
+ kbpage -= PAGE_SIZE;
+
+ srmmu_allocate_ptable_skeleton(KERNBASE, end_mem);
+#if CONFIG_SUN_IO
+ srmmu_allocate_ptable_skeleton(sparc_iobase_vaddr, IOBASE_END);
+ srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
+#endif
+
+ mempool = PAGE_ALIGN(mempool);
+#if CONFIG_AP1000
+ ap_inherit_mappings();
+#else
+ srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE));
+#endif
+ map_kernel();
+#if CONFIG_AP1000
+ /* the MSC wants this aligned on a 16k boundary */
+ srmmu_context_table =
+ sparc_init_alloc(&mempool,
+ num_contexts*sizeof(ctxd_t)<0x4000?
+ 0x4000:
+ num_contexts*sizeof(ctxd_t));
+#else
+ srmmu_context_table = sparc_init_alloc(&mempool, num_contexts*sizeof(ctxd_t));
+#endif
+ srmmu_ctx_table_phys = (ctxd_t *) srmmu_v2p((unsigned long) srmmu_context_table);
+ for(i = 0; i < num_contexts; i++)
+ ctxd_set(&srmmu_context_table[i], swapper_pg_dir);
+
+ start_mem = PAGE_ALIGN(mempool);
+
+ flush_cache_all();
+ if(flush_page_for_dma == viking_no_mxcc_flush_page) {
+ unsigned long start = ptables_start;
+ unsigned long end = start_mem;
+
+ while(start < end) {
+ viking_no_mxcc_flush_page(start);
+ start += PAGE_SIZE;
+ }
+ }
+ srmmu_set_ctable_ptr((unsigned long) srmmu_ctx_table_phys);
+ flush_tlb_all();
+ poke_srmmu();
+
+#if CONFIG_AP1000
+ /* on the AP we don't put the top few contexts into the free
+ context list as these are reserved for parallel tasks */
+ start_mem = sparc_context_init(start_mem, MPP_CONTEXT_BASE);
+#else
+ start_mem = sparc_context_init(start_mem, num_contexts);
+#endif
+ start_mem = free_area_init(start_mem, end_mem);
+
+ return PAGE_ALIGN(start_mem);
+}
+
+static char srmmuinfo[512];
+
+static char *srmmu_mmu_info(void)
+{
+ sprintf(srmmuinfo, "MMU type\t: %s\n"
+ "invall\t\t: %d\n"
+ "invmm\t\t: %d\n"
+ "invrnge\t\t: %d\n"
+ "invpg\t\t: %d\n"
+ "contexts\t: %d\n"
+#ifdef USE_CHUNK_ALLOC
+ "big chunks\t: %d\n"
+ "little chunks\t: %d\n"
+ "chunk pages\t: %d\n"
+ "garbage\t\t: %d\n"
+ "garbage hits\t: %d\n"
+#endif
+ , srmmu_name,
+ module_stats.invall,
+ module_stats.invmm,
+ module_stats.invrnge,
+ module_stats.invpg,
+ num_contexts
+#ifdef USE_CHUNK_ALLOC
+ , bcwater, lcwater,
+ chunk_pages,
+ garbage_calls,
+ clct_pages
+#endif
+ );
+ return srmmuinfo;
+}
+
+static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte)
+{
+}
+
+static void srmmu_exit_hook(void)
+{
+ struct mm_struct *mm = current->mm;
+
+ if(mm->context != NO_CONTEXT && mm->count == 1) {
+ flush_cache_mm(mm);
+ ctxd_set(&srmmu_context_table[mm->context], swapper_pg_dir);
+ flush_tlb_mm(mm);
+ free_context(mm->context);
+ mm->context = NO_CONTEXT;
+ }
+}
+
+static void srmmu_flush_hook(void)
+{
+ if(current->tss.flags & SPARC_FLAG_KTHREAD) {
+ alloc_context(current);
+ flush_cache_mm(current->mm);
+ ctxd_set(&srmmu_context_table[current->mm->context], current->mm->pgd);
+ flush_tlb_mm(current->mm);
+ srmmu_set_context(current->mm->context);
+ }
+}
+
+static void srmmu_vac_update_mmu_cache(struct vm_area_struct * vma,
+ unsigned long address, pte_t pte)
+{
+#if 0
+ struct inode *inode;
+ struct vm_area_struct *vmaring;
+ unsigned long offset, vaddr;
+ unsigned long start;
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ if (!(vma->vm_flags & VM_WRITE) ||
+ !(vma->vm_flags & VM_SHARED))
+ return;
+
+ inode = vma->vm_inode;
+ if (!inode)
+ return;
+
+ offset = (address & PAGE_MASK) - vma->vm_start;
+ vmaring = inode->i_mmap;
+ do {
+ vaddr = vmaring->vm_start + offset;
+
+ if ((vaddr ^ address) & vac_badbits) {
+ start = vma->vm_start;
+ while (start < vma->vm_end) {
+ pgdp = srmmu_pgd_offset(vma->vm_mm, start);
+ pmdp = srmmu_pmd_offset(pgdp, start);
+ ptep = srmmu_pte_offset(pmdp, start);
+
+ flush_cache_page_to_uncache(start);
+ set_pte(ptep, __pte((pte_val(*ptep) &
+ ~SRMMU_CACHE)));
+ flush_tlb_page_for_cbit(start);
+
+ start += PAGE_SIZE;
+ }
+ return;
+ }
+ } while ((vmaring = vmaring->vm_next_share) != inode->i_mmap);
+#endif
+}
+
+static void hypersparc_exit_hook(void)
+{
+ struct mm_struct *mm = current->mm;
+
+ if(mm->context != NO_CONTEXT && mm->count == 1) {
+ /* HyperSparc is copy-back, any data for this
+ * process in a modified cache line is stale
+ * and must be written back to main memory now
+ * else we eat shit later big time.
+ */
+ flush_cache_mm(mm);
+ ctxd_set(&srmmu_context_table[mm->context], swapper_pg_dir);
+ flush_tlb_mm(mm);
+ free_context(mm->context);
+ mm->context = NO_CONTEXT;
+ }
+}
+
+static void hypersparc_flush_hook(void)
+{
+ if(current->tss.flags & SPARC_FLAG_KTHREAD) {
+ alloc_context(current);
+ flush_cache_mm(current->mm);
+ ctxd_set(&srmmu_context_table[current->mm->context], current->mm->pgd);
+ flush_tlb_mm(current->mm);
+ srmmu_set_context(current->mm->context);
+ }
+}
+
+/* Init various srmmu chip types. */
+static void srmmu_is_bad(void)
+{
+ prom_printf("Could not determine SRMMU chip type.\n");
+ prom_halt();
+}
+
+static void init_vac_layout(void)
+{
+ int nd, cache_lines;
+ char node_str[128];
+
+ nd = prom_getchild(prom_root_node);
+ while((nd = prom_getsibling(nd)) != 0) {
+ prom_getstring(nd, "device_type", node_str, sizeof(node_str));
+ if(!strcmp(node_str, "cpu"))
+ break;
+ }
+ if(nd == 0) {
+ prom_printf("No CPU nodes found, halting.\n");
+ prom_halt();
+ }
+
+ vac_line_size = prom_getint(nd, "cache-line-size");
+ if (vac_line_size == -1) {
+ prom_printf("can't determine cache-line-size, halting.\n");
+ prom_halt();
+ }
+ cache_lines = prom_getint(nd, "cache-nlines");
+ if (cache_lines == -1) {
+ prom_printf("can't determine cache-nlines, halting.\n");
+ prom_halt();
+ }
+ vac_cache_size = cache_lines * vac_line_size;
+ vac_badbits = (vac_cache_size - 1) & PAGE_MASK;
+}
+
+static void poke_hypersparc(void)
+{
+ volatile unsigned long clear;
+ unsigned long mreg = srmmu_get_mmureg();
+
+ hyper_flush_unconditional_combined();
+
+ mreg &= ~(HYPERSPARC_CWENABLE);
+ mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE);
+ mreg |= (HYPERSPARC_CMODE);
+
+ srmmu_set_mmureg(mreg);
+ hyper_clear_all_tags();
+
+ put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE);
+ hyper_flush_whole_icache();
+ clear = srmmu_get_faddr();
+ clear = srmmu_get_fstatus();
+}
+
+static void init_hypersparc(void)
+{
+ srmmu_name = "ROSS HyperSparc";
+
+ init_vac_layout();
+
+ set_pte = srmmu_set_pte_nocache_hyper;
+ flush_cache_all = hypersparc_flush_cache_all;
+ flush_cache_mm = hypersparc_flush_cache_mm;
+ flush_cache_range = hypersparc_flush_cache_range;
+ flush_cache_page = hypersparc_flush_cache_page;
+
+ flush_tlb_all = hypersparc_flush_tlb_all;
+ flush_tlb_mm = hypersparc_flush_tlb_mm;
+ flush_tlb_range = hypersparc_flush_tlb_range;
+ flush_tlb_page = hypersparc_flush_tlb_page;
+
+ flush_page_to_ram = hypersparc_flush_page_to_ram;
+ flush_page_for_dma = hypersparc_flush_page_for_dma;
+ flush_cache_page_to_uncache = hypersparc_flush_cache_page_to_uncache;
+ flush_tlb_page_for_cbit = hypersparc_flush_tlb_page_for_cbit;
+
+ ctxd_set = hypersparc_ctxd_set;
+ switch_to_context = hypersparc_switch_to_context;
+ mmu_exit_hook = hypersparc_exit_hook;
+ mmu_flush_hook = hypersparc_flush_hook;
+ update_mmu_cache = srmmu_vac_update_mmu_cache;
+ sparc_update_rootmmu_dir = hypersparc_update_rootmmu_dir;
+ poke_srmmu = poke_hypersparc;
+}
+
+static void poke_cypress(void)
+{
+ unsigned long mreg = srmmu_get_mmureg();
+ unsigned long faddr;
+ volatile unsigned long clear;
+
+ clear = srmmu_get_faddr();
+ clear = srmmu_get_fstatus();
+
+ for(faddr = 0x0; faddr < 0x10000; faddr += 20) {
+ __asm__ __volatile__("sta %%g0, [%0 + %1] %2\n\t"
+ "sta %%g0, [%0] %2\n\t" : :
+ "r" (faddr), "r" (0x40000),
+ "i" (ASI_M_DATAC_TAG));
+ }
+
+ /* And one more, for our good neighbor, Mr. Broken Cypress. */
+ clear = srmmu_get_faddr();
+ clear = srmmu_get_fstatus();
+
+ mreg |= (CYPRESS_CENABLE | CYPRESS_CMODE);
+ srmmu_set_mmureg(mreg);
+}
+
+static void init_cypress_common(void)
+{
+ init_vac_layout();
+
+ set_pte = srmmu_set_pte_nocache_cypress;
+ flush_cache_all = cypress_flush_cache_all;
+ flush_cache_mm = cypress_flush_cache_mm;
+ flush_cache_range = cypress_flush_cache_range;
+ flush_cache_page = cypress_flush_cache_page;
+
+ flush_tlb_all = cypress_flush_tlb_all;
+ flush_tlb_mm = cypress_flush_tlb_mm;
+ flush_tlb_page = cypress_flush_tlb_page;
+ flush_tlb_range = cypress_flush_tlb_range;
+
+ flush_page_to_ram = cypress_flush_page_to_ram;
+ flush_page_for_dma = cypress_flush_page_for_dma;
+ flush_cache_page_to_uncache = cypress_flush_page_to_uncache;
+ flush_tlb_page_for_cbit = cypress_flush_tlb_page_for_cbit;
+
+ update_mmu_cache = srmmu_vac_update_mmu_cache;
+ poke_srmmu = poke_cypress;
+}
+
+static void init_cypress_604(void)
+{
+ srmmu_name = "ROSS Cypress-604(UP)";
+ srmmu_modtype = Cypress;
+ init_cypress_common();
+}
+
+static void init_cypress_605(unsigned long mrev)
+{
+ srmmu_name = "ROSS Cypress-605(MP)";
+ if(mrev == 0xe) {
+ srmmu_modtype = Cypress_vE;
+ hwbug_bitmask |= HWBUG_COPYBACK_BROKEN;
+ } else {
+ if(mrev == 0xd) {
+ srmmu_modtype = Cypress_vD;
+ hwbug_bitmask |= HWBUG_ASIFLUSH_BROKEN;
+ } else {
+ srmmu_modtype = Cypress;
+ }
+ }
+ init_cypress_common();
+}
+
+static void poke_swift(void)
+{
+ unsigned long mreg = srmmu_get_mmureg();
+
+ /* Clear any crap from the cache or else... */
+ swift_idflash_clear();
+ mreg |= (SWIFT_IE | SWIFT_DE); /* I & D caches on */
+
+ /* The Swift branch folding logic is completely broken. At
+ * trap time, if things are just right, if can mistakenly
+ * think that a trap is coming from kernel mode when in fact
+ * it is coming from user mode (it mis-executes the branch in
+ * the trap code). So you see things like crashme completely
+ * hosing your machine which is completely unacceptable. Turn
+ * this shit off... nice job Fujitsu.
+ */
+ mreg &= ~(SWIFT_BF);
+ srmmu_set_mmureg(mreg);
+}
+
+#define SWIFT_MASKID_ADDR 0x10003018
+static void init_swift(void)
+{
+ unsigned long swift_rev;
+
+ __asm__ __volatile__("lda [%1] %2, %0\n\t"
+ "srl %0, 0x18, %0\n\t" :
+ "=r" (swift_rev) :
+ "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS));
+ srmmu_name = "Fujitsu Swift";
+ switch(swift_rev) {
+ case 0x11:
+ case 0x20:
+ case 0x23:
+ case 0x30:
+ srmmu_modtype = Swift_lots_o_bugs;
+ hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN);
+ /* Gee george, I wonder why Sun is so hush hush about
+ * this hardware bug... really braindamage stuff going
+ * on here. However I think we can find a way to avoid
+ * all of the workaround overhead under Linux. Basically,
+ * any page fault can cause kernel pages to become user
+ * accessible (the mmu gets confused and clears some of
+ * the ACC bits in kernel ptes). Aha, sounds pretty
+ * horrible eh? But wait, after extensive testing it appears
+ * that if you use pgd_t level large kernel pte's (like the
+ * 4MB pages on the Pentium) the bug does not get tripped
+ * at all. This avoids almost all of the major overhead.
+ * Welcome to a world where your vendor tells you to,
+ * "apply this kernel patch" instead of "sorry for the
+ * broken hardware, send it back and we'll give you
+ * properly functioning parts"
+ */
+ break;
+ case 0x25:
+ case 0x31:
+ srmmu_modtype = Swift_bad_c;
+ hwbug_bitmask |= HWBUG_KERN_CBITBROKEN;
+ /* You see Sun allude to this hardware bug but never
+ * admit things directly, they'll say things like,
+ * "the Swift chip cache problems" or similar.
+ */
+ break;
+ default:
+ srmmu_modtype = Swift_ok;
+ break;
+ };
+
+ flush_cache_all = swift_flush_cache_all;
+ flush_cache_mm = swift_flush_cache_mm;
+ flush_cache_page = swift_flush_cache_page;
+ flush_cache_range = swift_flush_cache_range;
+
+ flush_tlb_all = swift_flush_tlb_all;
+ flush_tlb_mm = swift_flush_tlb_mm;
+ flush_tlb_page = swift_flush_tlb_page;
+ flush_tlb_range = swift_flush_tlb_range;
+
+ flush_page_to_ram = swift_flush_page_to_ram;
+ flush_page_for_dma = swift_flush_page_for_dma;
+ flush_cache_page_to_uncache = swift_flush_cache_page_to_uncache;
+ flush_tlb_page_for_cbit = swift_flush_tlb_page_for_cbit;
+
+ /* Are you now convinced that the Swift is one of the
+ * biggest VLSI abortions of all time? Bravo Fujitsu!
+ * Fujitsu, the !#?!%$'d up processor people. I bet if
+ * you examined the microcode of the Swift you'd find
+ * XXX's all over the place.
+ */
+ poke_srmmu = poke_swift;
+}
+
+static void poke_tsunami(void)
+{
+ unsigned long mreg = srmmu_get_mmureg();
+
+ tsunami_flush_icache();
+ tsunami_flush_dcache();
+ mreg &= ~TSUNAMI_ITD;
+ mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB);
+ srmmu_set_mmureg(mreg);
+}
+
+static void init_tsunami(void)
+{
+ /* Tsunami's pretty sane, Sun and TI actually got it
+ * somewhat right this time. Fujitsu should have
+ * taken some lessons from them.
+ */
+
+ srmmu_name = "TI Tsunami";
+ srmmu_modtype = Tsunami;
+
+ flush_cache_all = tsunami_flush_cache_all;
+ flush_cache_mm = tsunami_flush_cache_mm;
+ flush_cache_page = tsunami_flush_cache_page;
+ flush_cache_range = tsunami_flush_cache_range;
+
+ flush_tlb_all = tsunami_flush_tlb_all;
+ flush_tlb_mm = tsunami_flush_tlb_mm;
+ flush_tlb_page = tsunami_flush_tlb_page;
+ flush_tlb_range = tsunami_flush_tlb_range;
+
+ flush_page_to_ram = tsunami_flush_page_to_ram;
+ flush_page_for_dma = tsunami_flush_page_for_dma;
+ flush_cache_page_to_uncache = tsunami_flush_cache_page_to_uncache;
+ flush_tlb_page_for_cbit = tsunami_flush_tlb_page_for_cbit;
+
+ poke_srmmu = poke_tsunami;
+}
+
+static void poke_viking(void)
+{
+ unsigned long mreg = srmmu_get_mmureg();
+ static int smp_catch = 0;
+
+ if(viking_mxcc_present) {
+ unsigned long mxcc_control = mxcc_get_creg();
+
+ mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
+ mxcc_control &= ~(MXCC_CTL_RRC);
+ mxcc_set_creg(mxcc_control);
+
+ /* We don't need memory parity checks.
+ * XXX This is a mess, have to dig out later. ecd.
+ viking_mxcc_turn_off_parity(&mreg, &mxcc_control);
+ */
+
+ /* We do cache ptables on MXCC. */
+ mreg |= VIKING_TCENABLE;
+ } else {
+ unsigned long bpreg;
+
+ mreg &= ~(VIKING_TCENABLE);
+ if(smp_catch++) {
+ /* Must disable mixed-cmd mode here for
+ * other cpu's.
+ */
+ bpreg = viking_get_bpreg();
+ bpreg &= ~(VIKING_ACTION_MIX);
+ viking_set_bpreg(bpreg);
+
+ /* Just in case PROM does something funny. */
+ msi_set_sync();
+ }
+ }
+
+ mreg |= VIKING_SPENABLE;
+ mreg |= (VIKING_ICENABLE | VIKING_DCENABLE);
+ mreg |= VIKING_SBENABLE;
+ mreg &= ~(VIKING_ACENABLE);
+#if CONFIG_AP1000
+ mreg &= ~(VIKING_SBENABLE);
+#endif
+ srmmu_set_mmureg(mreg);
+
+
+#ifdef __SMP__
+ /* Avoid unnecessary cross calls. */
+ flush_cache_all = local_flush_cache_all;
+ flush_page_to_ram = local_flush_page_to_ram;
+ flush_page_for_dma = local_flush_page_for_dma;
+ if (viking_mxcc_present) {
+ flush_cache_page_to_uncache = local_flush_cache_page_to_uncache;
+ }
+#endif
+}
+
+static void init_viking(void)
+{
+ unsigned long mreg = srmmu_get_mmureg();
+
+ /* Ahhh, the viking. SRMMU VLSI abortion number two... */
+
+ if(mreg & VIKING_MMODE) {
+ unsigned long bpreg;
+
+ srmmu_name = "TI Viking";
+ viking_mxcc_present = 0;
+ set_pte = srmmu_set_pte_nocache_nomxccvik;
+
+ bpreg = viking_get_bpreg();
+ bpreg &= ~(VIKING_ACTION_MIX);
+ viking_set_bpreg(bpreg);
+
+ msi_set_sync();
+
+ flush_cache_page_to_uncache = viking_no_mxcc_flush_page;
+
+ /* We need this to make sure old viking takes no hits
+ * on it's cache for dma snoops to workaround the
+ * "load from non-cacheable memory" interrupt bug.
+ * This is only necessary because of the new way in
+ * which we use the IOMMU.
+ */
+ flush_page_for_dma = viking_no_mxcc_flush_page;
+ } else {
+ srmmu_name = "TI Viking/MXCC";
+ viking_mxcc_present = 1;
+ flush_cache_page_to_uncache = viking_mxcc_flush_page;
+
+ /* MXCC vikings lack the DMA snooping bug. */
+ flush_page_for_dma = viking_flush_page_for_dma;
+ }
+
+ flush_cache_all = viking_flush_cache_all;
+ flush_cache_mm = viking_flush_cache_mm;
+ flush_cache_page = viking_flush_cache_page;
+ flush_cache_range = viking_flush_cache_range;
+
+ flush_tlb_all = viking_flush_tlb_all;
+ flush_tlb_mm = viking_flush_tlb_mm;
+ flush_tlb_page = viking_flush_tlb_page;
+ flush_tlb_range = viking_flush_tlb_range;
+
+ flush_page_to_ram = viking_flush_page_to_ram;
+ flush_tlb_page_for_cbit = viking_flush_tlb_page_for_cbit;
+
+ poke_srmmu = poke_viking;
+}
+
+/* Probe for the srmmu chip version. */
+static void get_srmmu_type(void)
+{
+ unsigned long mreg, psr;
+ unsigned long mod_typ, mod_rev, psr_typ, psr_vers;
+
+ srmmu_modtype = SRMMU_INVAL_MOD;
+ hwbug_bitmask = 0;
+
+ mreg = srmmu_get_mmureg(); psr = get_psr();
+ mod_typ = (mreg & 0xf0000000) >> 28;
+ mod_rev = (mreg & 0x0f000000) >> 24;
+ psr_typ = (psr >> 28) & 0xf;
+ psr_vers = (psr >> 24) & 0xf;
+
+ /* First, check for HyperSparc or Cypress. */
+ if(mod_typ == 1) {
+ switch(mod_rev) {
+ case 7:
+ /* UP or MP Hypersparc */
+ init_hypersparc();
+ break;
+ case 0:
+ /* Uniprocessor Cypress */
+ init_cypress_604();
+ break;
+ case 12:
+ /* _REALLY OLD_ Cypress MP chips... */
+ case 13:
+ case 14:
+ case 15:
+ /* MP Cypress mmu/cache-controller */
+ init_cypress_605(mod_rev);
+ break;
+ default:
+ srmmu_is_bad();
+ break;
+ };
+ return;
+ }
+
+ /* Next check for Fujitsu Swift. */
+ if(psr_typ == 0 && psr_vers == 4) {
+ init_swift();
+ return;
+ }
+
+ /* Now the Viking family of srmmu. */
+ if(psr_typ == 4 &&
+ ((psr_vers == 0) ||
+ ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
+ init_viking();
+ return;
+ }
+
+ /* Finally the Tsunami. */
+ if(psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
+ init_tsunami();
+ return;
+ }
+
+ /* Oh well */
+ srmmu_is_bad();
+}
+
+extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme,
+ tsetup_mmu_patchme, rtrap_mmu_patchme;
+
+extern unsigned long spwin_srmmu_stackchk, srmmu_fwin_stackchk,
+ tsetup_srmmu_stackchk, srmmu_rett_stackchk;
+
+#ifdef __SMP__
+extern unsigned long rirq_mmu_patchme, srmmu_reti_stackchk;
+#endif
+
+extern unsigned long srmmu_fault;
+
+#define PATCH_BRANCH(insn, dest) do { \
+ iaddr = &(insn); \
+ daddr = &(dest); \
+ *iaddr = SPARC_BRANCH((unsigned long) daddr, (unsigned long) iaddr); \
+ } while(0);
+
+static void patch_window_trap_handlers(void)
+{
+ unsigned long *iaddr, *daddr;
+
+ PATCH_BRANCH(spwin_mmu_patchme, spwin_srmmu_stackchk);
+ PATCH_BRANCH(fwin_mmu_patchme, srmmu_fwin_stackchk);
+ PATCH_BRANCH(tsetup_mmu_patchme, tsetup_srmmu_stackchk);
+ PATCH_BRANCH(rtrap_mmu_patchme, srmmu_rett_stackchk);
+#ifdef __SMP__
+ PATCH_BRANCH(rirq_mmu_patchme, srmmu_reti_stackchk);
+#endif
+ PATCH_BRANCH(sparc_ttable[SP_TRAP_TFLT].inst_three, srmmu_fault);
+ PATCH_BRANCH(sparc_ttable[SP_TRAP_DFLT].inst_three, srmmu_fault);
+ PATCH_BRANCH(sparc_ttable[SP_TRAP_DACC].inst_three, srmmu_fault);
+}
+
+#ifdef __SMP__
+/* Local cross-calls. */
+static void smp_flush_page_for_dma(unsigned long page)
+{
+ xc1((smpfunc_t) local_flush_page_for_dma, page);
+}
+
+static void smp_flush_cache_page_to_uncache(unsigned long page)
+{
+ xc1((smpfunc_t) local_flush_cache_page_to_uncache, page);
+}
+
+static void smp_flush_tlb_page_for_cbit(unsigned long page)
+{
+ xc1((smpfunc_t) local_flush_tlb_page_for_cbit, page);
+}
+#endif
+
+/* Load up routines and constants for sun4m mmu */
+void ld_mmu_srmmu(void)
+{
+ /* First the constants */
+ pmd_shift = SRMMU_PMD_SHIFT;
+ pmd_size = SRMMU_PMD_SIZE;
+ pmd_mask = SRMMU_PMD_MASK;
+ pgdir_shift = SRMMU_PGDIR_SHIFT;
+ pgdir_size = SRMMU_PGDIR_SIZE;
+ pgdir_mask = SRMMU_PGDIR_MASK;
+
+ ptrs_per_pte = SRMMU_PTRS_PER_PTE;
+ ptrs_per_pmd = SRMMU_PTRS_PER_PMD;
+ ptrs_per_pgd = SRMMU_PTRS_PER_PGD;
+
+ page_none = SRMMU_PAGE_NONE;
+ page_shared = SRMMU_PAGE_SHARED;
+ page_copy = SRMMU_PAGE_COPY;
+ page_readonly = SRMMU_PAGE_RDONLY;
+ page_kernel = SRMMU_PAGE_KERNEL;
+ pg_iobits = SRMMU_VALID | SRMMU_WRITE | SRMMU_REF;
+
+ /* Functions */
+ set_pte = srmmu_set_pte_cacheable;
+ switch_to_context = srmmu_switch_to_context;
+ pmd_align = srmmu_pmd_align;
+ pgdir_align = srmmu_pgdir_align;
+ vmalloc_start = srmmu_vmalloc_start;
+
+ pte_page = srmmu_pte_page;
+ pmd_page = srmmu_pmd_page;
+ pgd_page = srmmu_pgd_page;
+
+ sparc_update_rootmmu_dir = srmmu_update_rootmmu_dir;
+
+ pte_none = srmmu_pte_none;
+ pte_present = srmmu_pte_present;
+ pte_clear = srmmu_pte_clear;
+
+ pmd_none = srmmu_pmd_none;
+ pmd_bad = srmmu_pmd_bad;
+ pmd_present = srmmu_pmd_present;
+ pmd_clear = srmmu_pmd_clear;
+
+ pgd_none = srmmu_pgd_none;
+ pgd_bad = srmmu_pgd_bad;
+ pgd_present = srmmu_pgd_present;
+ pgd_clear = srmmu_pgd_clear;
+
+ mk_pte = srmmu_mk_pte;
+ mk_pte_phys = srmmu_mk_pte_phys;
+ pgd_set = srmmu_pgd_set;
+ mk_pte_io = srmmu_mk_pte_io;
+ pte_modify = srmmu_pte_modify;
+ pgd_offset = srmmu_pgd_offset;
+ pmd_offset = srmmu_pmd_offset;
+ pte_offset = srmmu_pte_offset;
+ pte_free_kernel = srmmu_pte_free_kernel;
+ pmd_free_kernel = srmmu_pmd_free_kernel;
+ pte_alloc_kernel = srmmu_pte_alloc_kernel;
+ pmd_alloc_kernel = srmmu_pmd_alloc_kernel;
+ pte_free = srmmu_pte_free;
+ pte_alloc = srmmu_pte_alloc;
+ pmd_free = srmmu_pmd_free;
+ pmd_alloc = srmmu_pmd_alloc;
+ pgd_free = srmmu_pgd_free;
+ pgd_alloc = srmmu_pgd_alloc;
+
+ pte_write = srmmu_pte_write;
+ pte_dirty = srmmu_pte_dirty;
+ pte_young = srmmu_pte_young;
+ pte_wrprotect = srmmu_pte_wrprotect;
+ pte_mkclean = srmmu_pte_mkclean;
+ pte_mkold = srmmu_pte_mkold;
+ pte_mkwrite = srmmu_pte_mkwrite;
+ pte_mkdirty = srmmu_pte_mkdirty;
+ pte_mkyoung = srmmu_pte_mkyoung;
+ update_mmu_cache = srmmu_update_mmu_cache;
+ mmu_exit_hook = srmmu_exit_hook;
+ mmu_flush_hook = srmmu_flush_hook;
+ mmu_lockarea = srmmu_lockarea;
+ mmu_unlockarea = srmmu_unlockarea;
+
+ mmu_get_scsi_one = srmmu_get_scsi_one;
+ mmu_get_scsi_sgl = srmmu_get_scsi_sgl;
+ mmu_release_scsi_one = srmmu_release_scsi_one;
+ mmu_release_scsi_sgl = srmmu_release_scsi_sgl;
+
+ mmu_map_dma_area = srmmu_map_dma_area;
+
+ mmu_info = srmmu_mmu_info;
+ mmu_v2p = srmmu_v2p;
+ mmu_p2v = srmmu_p2v;
+
+ /* Task struct and kernel stack allocating/freeing. */
+ alloc_kernel_stack = srmmu_alloc_kernel_stack;
+ alloc_task_struct = srmmu_alloc_task_struct;
+ free_kernel_stack = srmmu_free_kernel_stack;
+ free_task_struct = srmmu_free_task_struct;
+
+ quick_kernel_fault = srmmu_quick_kernel_fault;
+
+ /* SRMMU specific. */
+ ctxd_set = srmmu_ctxd_set;
+ pmd_set = srmmu_pmd_set;
+
+ get_srmmu_type();
+ patch_window_trap_handlers();
+
+#ifdef __SMP__
+ /* El switcheroo... */
+
+ local_flush_cache_all = flush_cache_all;
+ local_flush_cache_mm = flush_cache_mm;
+ local_flush_cache_range = flush_cache_range;
+ local_flush_cache_page = flush_cache_page;
+ local_flush_tlb_all = flush_tlb_all;
+ local_flush_tlb_mm = flush_tlb_mm;
+ local_flush_tlb_range = flush_tlb_range;
+ local_flush_tlb_page = flush_tlb_page;
+ local_flush_page_to_ram = flush_page_to_ram;
+ local_flush_page_for_dma = flush_page_for_dma;
+ local_flush_cache_page_to_uncache = flush_cache_page_to_uncache;
+ local_flush_tlb_page_for_cbit = flush_tlb_page_for_cbit;
+
+ flush_cache_all = smp_flush_cache_all;
+ flush_cache_mm = smp_flush_cache_mm;
+ flush_cache_range = smp_flush_cache_range;
+ flush_cache_page = smp_flush_cache_page;
+ flush_tlb_all = smp_flush_tlb_all;
+ flush_tlb_mm = smp_flush_tlb_mm;
+ flush_tlb_range = smp_flush_tlb_range;
+ flush_tlb_page = smp_flush_tlb_page;
+ flush_page_to_ram = smp_flush_page_to_ram;
+ flush_page_for_dma = smp_flush_page_for_dma;
+ flush_cache_page_to_uncache = smp_flush_cache_page_to_uncache;
+ flush_tlb_page_for_cbit = smp_flush_tlb_page_for_cbit;
+#endif
+}
diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c
new file mode 100644
index 000000000..fdc74d3d7
--- /dev/null
+++ b/arch/sparc/mm/sun4c.c
@@ -0,0 +1,1965 @@
+/* $Id: sun4c.c,v 1.121 1996/11/01 20:36:27 ecd Exp $
+ * sun4c.c: Doing in software what should be done in hardware.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 1996 Andrew Tridgell (Andrew.Tridgell@anu.edu.au)
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/vaddrs.h>
+#include <asm/idprom.h>
+#include <asm/machines.h>
+#include <asm/memreg.h>
+#include <asm/processor.h>
+#include <asm/auxio.h>
+#include <asm/io.h>
+#include <asm/oplib.h>
+#include <asm/openprom.h>
+
+extern int num_segmaps, num_contexts;
+
+/* Small structure for ease of handling in the low level kernel fault
+ * handler. This holds all information necessary, like the sun4c_ufree_ring
+ * for user segments.
+ */
+struct sun4c_segment_info {
+ unsigned long vaddr;
+ unsigned char pseg;
+};
+struct sun4c_segment_info *sun4c_kernel_next;
+
+#define SUN4C_KERNEL_BUCKETS 32
+#define SUN4C_KERNEL_BSIZE (sizeof(struct sun4c_segment_info) \
+ * SUN4C_KERNEL_BUCKETS)
+
+#ifndef MAX
+#define MAX(a,b) ((a)<(b)?(b):(a))
+#endif
+#ifndef MIN
+#define MIN(a,b) ((a)<(b)?(a):(b))
+#endif
+
+
+
+#define KGPROF_PROFILING 0
+#if KGPROF_PROFILING
+#define KGPROF_DEPTH 3 /* this needs to match the code below */
+#define KGPROF_SIZE 100
+static struct {
+ unsigned addr[KGPROF_DEPTH];
+ unsigned count;
+} kgprof_counters[KGPROF_SIZE];
+
+/* just call this function from whatever function you think needs it then
+ look at /proc/cpuinfo to see where the function is being called from
+ and how often. This gives a type of "kernel gprof" */
+#define NEXT_PROF(prev,lvl) (prev>PAGE_OFFSET?__builtin_return_address(lvl):0)
+static inline void kgprof_profile(void)
+{
+ unsigned ret[KGPROF_DEPTH];
+ int i,j;
+ /* you can't use a variable argument to __builtin_return_address() */
+ ret[0] = (unsigned)__builtin_return_address(0);
+ ret[1] = (unsigned)NEXT_PROF(ret[0],1);
+ ret[2] = (unsigned)NEXT_PROF(ret[1],2);
+
+ for (i=0;i<KGPROF_SIZE && kgprof_counters[i].addr[0];i++) {
+ for (j=0;j<KGPROF_DEPTH;j++)
+ if (ret[j] != kgprof_counters[i].addr[j]) break;
+ if (j==KGPROF_DEPTH) break;
+ }
+ if (i<KGPROF_SIZE) {
+ for (j=0;j<KGPROF_DEPTH;j++)
+ kgprof_counters[i].addr[j] = ret[j];
+ kgprof_counters[i].count++;
+ }
+}
+#endif
+
+
+/* Flushing the cache. */
+struct sun4c_vac_props sun4c_vacinfo;
+static int ctxflushes, segflushes, pageflushes;
+
+/* convert a virtual address to a physical address and vice
+ versa. Easy on the 4c */
+static unsigned long sun4c_v2p(unsigned long vaddr)
+{
+ return(vaddr - PAGE_OFFSET);
+}
+
+static unsigned long sun4c_p2v(unsigned long vaddr)
+{
+ return(vaddr + PAGE_OFFSET);
+}
+
+
+/* Invalidate every sun4c cache line tag. */
+void sun4c_flush_all(void)
+{
+ unsigned long begin, end;
+
+ if(sun4c_vacinfo.on)
+ panic("SUN4C: AIEEE, trying to invalidate vac while"
+ " it is on.");
+
+ /* Clear 'valid' bit in all cache line tags */
+ begin = AC_CACHETAGS;
+ end = (AC_CACHETAGS + sun4c_vacinfo.num_bytes);
+ while(begin < end) {
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
+ "r" (begin), "i" (ASI_CONTROL));
+ begin += sun4c_vacinfo.linesize;
+ }
+}
+
+/* Blow the entire current context out of the virtual cache. */
+static inline void sun4c_flush_context(void)
+{
+ unsigned long vaddr;
+
+ ctxflushes++;
+ if(sun4c_vacinfo.do_hwflushes) {
+ for(vaddr=0; vaddr < sun4c_vacinfo.num_bytes; vaddr+=PAGE_SIZE)
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
+ "r" (vaddr), "i" (ASI_HWFLUSHCONTEXT));
+ } else {
+ /* AJT: possibly read the tags and avoid flushing the ones that
+ are above 0xf0000000 so the kernel isn't flushed all the time */
+ __asm__ __volatile__("add %1, %1, %%g1\n\t"
+ "add %1, %%g1, %%g2\n\t"
+ "add %1, %%g2, %%g3\n\t"
+ "add %1, %%g3, %%g4\n\t"
+ "add %1, %%g4, %%g5\n\t"
+ "add %1, %%g5, %%o4\n\t"
+ "add %1, %%o4, %%o5\n"
+ "1:\n\t"
+ "subcc %0, %%o5, %0\n\t"
+ "sta %%g0, [%0] %2\n\t"
+ "sta %%g0, [%0 + %1] %2\n\t"
+ "sta %%g0, [%0 + %%g1] %2\n\t"
+ "sta %%g0, [%0 + %%g2] %2\n\t"
+ "sta %%g0, [%0 + %%g3] %2\n\t"
+ "sta %%g0, [%0 + %%g4] %2\n\t"
+ "sta %%g0, [%0 + %%g5] %2\n\t"
+ "bg 1b\n\t"
+ " sta %%g0, [%0 + %%o4] %2\n\t" : :
+ "r" (sun4c_vacinfo.num_bytes),
+ "r" (sun4c_vacinfo.linesize),
+ "i" (ASI_FLUSHCTX) :
+ "g1", "g2", "g3", "g4", "g5", "o4", "o5");
+ }
+}
+
+/* Scrape the segment starting at ADDR from the virtual cache. */
+static inline void sun4c_flush_segment(unsigned long addr)
+{
+ segflushes++;
+ addr &= SUN4C_REAL_PGDIR_MASK;
+ if(sun4c_vacinfo.do_hwflushes) {
+ unsigned long end = (addr + sun4c_vacinfo.num_bytes);
+
+ for( ; addr < end; addr += PAGE_SIZE)
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
+ "r" (addr), "i" (ASI_HWFLUSHSEG));
+ } else {
+ __asm__ __volatile__("add %2, %2, %%g1\n\t"
+ "add %2, %%g1, %%g2\n\t"
+ "add %2, %%g2, %%g3\n\t"
+ "add %2, %%g3, %%g4\n\t"
+ "add %2, %%g4, %%g5\n\t"
+ "add %2, %%g5, %%o4\n\t"
+ "add %2, %%o4, %%o5\n"
+ "1:\n\t"
+ "subcc %1, %%o5, %1\n\t"
+ "sta %%g0, [%0] %3\n\t"
+ "sta %%g0, [%0 + %2] %3\n\t"
+ "sta %%g0, [%0 + %%g1] %3\n\t"
+ "sta %%g0, [%0 + %%g2] %3\n\t"
+ "sta %%g0, [%0 + %%g3] %3\n\t"
+ "sta %%g0, [%0 + %%g4] %3\n\t"
+ "sta %%g0, [%0 + %%g5] %3\n\t"
+ "sta %%g0, [%0 + %%o4] %3\n\t"
+ "bg 1b\n\t"
+ " add %0, %%o5, %0\n\t" : :
+ "r" (addr), "r" (sun4c_vacinfo.num_bytes),
+ "r" (sun4c_vacinfo.linesize),
+ "i" (ASI_FLUSHSEG) :
+ "g1", "g2", "g3", "g4", "g5", "o4", "o5");
+ }
+}
+
+/* Bolix one page from the virtual cache. */
+static inline void sun4c_flush_page(unsigned long addr)
+{
+ addr &= PAGE_MASK;
+
+ pageflushes++;
+ if(sun4c_vacinfo.do_hwflushes) {
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
+ "r" (addr), "i" (ASI_HWFLUSHPAGE));
+ } else {
+ __asm__ __volatile__("add %2, %2, %%g1\n\t"
+ "add %2, %%g1, %%g2\n\t"
+ "add %2, %%g2, %%g3\n\t"
+ "add %2, %%g3, %%g4\n\t"
+ "add %2, %%g4, %%g5\n\t"
+ "add %2, %%g5, %%o4\n\t"
+ "add %2, %%o4, %%o5\n"
+ "1:\n\t"
+ "subcc %1, %%o5, %1\n\t"
+ "sta %%g0, [%0] %3\n\t"
+ "sta %%g0, [%0 + %2] %3\n\t"
+ "sta %%g0, [%0 + %%g1] %3\n\t"
+ "sta %%g0, [%0 + %%g2] %3\n\t"
+ "sta %%g0, [%0 + %%g3] %3\n\t"
+ "sta %%g0, [%0 + %%g4] %3\n\t"
+ "sta %%g0, [%0 + %%g5] %3\n\t"
+ "sta %%g0, [%0 + %%o4] %3\n\t"
+ "bg 1b\n\t"
+ " add %0, %%o5, %0\n\t" : :
+ "r" (addr), "r" (PAGE_SIZE),
+ "r" (sun4c_vacinfo.linesize),
+ "i" (ASI_FLUSHPG) :
+ "g1", "g2", "g3", "g4", "g5", "o4", "o5");
+ }
+}
+
+/* The sun4c's do have an on chip store buffer. And the way you
+ * clear them out isn't so obvious. The only way I can think of
+ * to accomplish this is to read the current context register,
+ * store the same value there, then read an external hardware
+ * register.
+ */
+void sun4c_complete_all_stores(void)
+{
+ volatile int _unused;
+
+ _unused = sun4c_get_context();
+ sun4c_set_context(_unused);
+ _unused = *AUXREG;
+}
+
+/* Bootup utility functions. */
+static inline void sun4c_init_clean_segmap(unsigned char pseg)
+{
+ unsigned long vaddr;
+
+ sun4c_put_segmap(0, pseg);
+ for(vaddr = 0; vaddr < SUN4C_REAL_PGDIR_SIZE; vaddr+=PAGE_SIZE)
+ sun4c_put_pte(vaddr, 0);
+ sun4c_put_segmap(0, invalid_segment);
+}
+
+static inline void sun4c_init_clean_mmu(unsigned long kernel_end)
+{
+ unsigned long vaddr;
+ unsigned char savectx, ctx;
+
+ savectx = sun4c_get_context();
+ kernel_end = SUN4C_REAL_PGDIR_ALIGN(kernel_end);
+ for(ctx = 0; ctx < num_contexts; ctx++) {
+ sun4c_set_context(ctx);
+ for(vaddr = 0; vaddr < 0x20000000; vaddr += SUN4C_REAL_PGDIR_SIZE)
+ sun4c_put_segmap(vaddr, invalid_segment);
+ for(vaddr = 0xe0000000; vaddr < KERNBASE; vaddr += SUN4C_REAL_PGDIR_SIZE)
+ sun4c_put_segmap(vaddr, invalid_segment);
+ for(vaddr = kernel_end; vaddr < KADB_DEBUGGER_BEGVM; vaddr += SUN4C_REAL_PGDIR_SIZE)
+ sun4c_put_segmap(vaddr, invalid_segment);
+ for(vaddr = LINUX_OPPROM_ENDVM; vaddr; vaddr += SUN4C_REAL_PGDIR_SIZE)
+ sun4c_put_segmap(vaddr, invalid_segment);
+ }
+ sun4c_set_context(savectx);
+}
+
+void sun4c_probe_vac(void)
+{
+ sun4c_disable_vac();
+ sun4c_vacinfo.num_bytes = prom_getintdefault(prom_root_node,
+ "vac-size", 65536);
+ sun4c_vacinfo.linesize = prom_getintdefault(prom_root_node,
+ "vac-linesize", 16);
+ sun4c_vacinfo.num_lines =
+ (sun4c_vacinfo.num_bytes / sun4c_vacinfo.linesize);
+ switch(sun4c_vacinfo.linesize) {
+ case 16:
+ sun4c_vacinfo.log2lsize = 4;
+ break;
+ case 32:
+ sun4c_vacinfo.log2lsize = 5;
+ break;
+ default:
+ prom_printf("probe_vac: Didn't expect vac-linesize of %d, halting\n",
+ sun4c_vacinfo.linesize);
+ prom_halt();
+ };
+
+ /* Only vac-hwflush (with a dash) is reliable, weitek
+ * power-up processor claims vac_hwflush (underscore)
+ * yet crashes if you try to use hardware based flushes.
+ */
+ sun4c_vacinfo.do_hwflushes = prom_getintdefault(prom_root_node,
+ "vac-hwflush", 0);
+
+ if(sun4c_vacinfo.num_bytes != 65536) {
+ prom_printf("WEIRD Sun4C VAC cache size, tell davem");
+ prom_halt();
+ }
+
+ sun4c_flush_all();
+ sun4c_enable_vac();
+}
+
+/* Patch instructions for the low level kernel fault handler. */
+extern unsigned long invalid_segment_patch1, invalid_segment_patch1_ff;
+extern unsigned long invalid_segment_patch2, invalid_segment_patch2_ff;
+extern unsigned long num_context_patch1, num_context_patch1_16;
+extern unsigned long num_context_patch2, num_context_patch2_16;
+extern unsigned long sun4c_kernel_buckets_patch;
+extern unsigned long sun4c_kernel_buckets_patch_32;
+
+#define PATCH_INSN(src, dst) do { \
+ daddr = &(dst); \
+ iaddr = &(src); \
+ *daddr = *iaddr; \
+ } while (0);
+
+static void patch_kernel_fault_handler(void)
+{
+ unsigned long *iaddr, *daddr;
+
+ switch (num_segmaps) {
+ case 128:
+ /* Default, nothing to do. */
+ break;
+ case 256:
+ PATCH_INSN(invalid_segment_patch1_ff,
+ invalid_segment_patch1);
+ PATCH_INSN(invalid_segment_patch2_ff,
+ invalid_segment_patch2);
+ break;
+ default:
+ prom_printf("Unhandled number of segmaps: %d\n",
+ num_segmaps);
+ prom_halt();
+ }
+ switch (num_contexts) {
+ case 8:
+ /* Default, nothing to do. */
+ break;
+ case 16:
+ PATCH_INSN(num_context_patch1_16,
+ num_context_patch1);
+ PATCH_INSN(num_context_patch2_16,
+ num_context_patch2);
+ break;
+ default:
+ prom_printf("Unhandled number of contexts: %d\n",
+ num_contexts);
+ prom_halt();
+ }
+ switch (SUN4C_KERNEL_BUCKETS) {
+ case 16:
+ /* Default, nothing to do. */
+ break;
+ case 32:
+ PATCH_INSN(sun4c_kernel_buckets_patch_32,
+ sun4c_kernel_buckets_patch);
+ break;
+ default:
+ prom_printf("Unhandled number of kernel buckets: %d\n",
+ SUN4C_KERNEL_BUCKETS);
+ prom_halt();
+ }
+}
+
+static void sun4c_probe_mmu(void)
+{
+ num_segmaps = prom_getintdefault(prom_root_node, "mmu-npmg", 128);
+ num_contexts = prom_getintdefault(prom_root_node, "mmu-nctx", 0x8);
+ patch_kernel_fault_handler();
+}
+
+volatile unsigned long *sun4c_memerr_reg = 0;
+
+void sun4c_probe_memerr_reg(void)
+{
+ int node;
+ struct linux_prom_registers regs[1];
+
+ node = prom_getchild(prom_root_node);
+ node = prom_searchsiblings(prom_root_node, "memory-error");
+ if (!node)
+ return;
+ prom_getproperty(node, "reg", (char *)regs, sizeof(regs));
+ sun4c_memerr_reg = sparc_alloc_io(regs[0].phys_addr, 0,
+ regs[0].reg_size,
+ "memory parity error",
+ regs[0].which_io, 0);
+}
+
+static inline void sun4c_init_ss2_cache_bug(void)
+{
+ extern unsigned long start;
+
+ if((idprom->id_machtype == (SM_SUN4C | SM_4C_SS2)) ||
+ (idprom->id_machtype == (SM_SUN4C | SM_4C_IPX))) {
+ /* Whee.. */
+ printk("SS2 cache bug detected, uncaching trap table page\n");
+ sun4c_flush_page((unsigned int) &start);
+ sun4c_put_pte(((unsigned long) &start),
+ (sun4c_get_pte((unsigned long) &start) | _SUN4C_PAGE_NOCACHE));
+ }
+}
+
+/* Addr is always aligned on a page boundry for us already. */
+static void sun4c_map_dma_area(unsigned long addr, int len)
+{
+ unsigned long page, end;
+
+ end = PAGE_ALIGN((addr + len));
+ while(addr < end) {
+ page = get_free_page(GFP_KERNEL);
+ if(!page) {
+ prom_printf("alloc_dvma: Cannot get a dvma page\n");
+ prom_halt();
+ }
+ sun4c_flush_page(page);
+ page -= PAGE_OFFSET;
+ page >>= PAGE_SHIFT;
+ page |= (_SUN4C_PAGE_VALID | _SUN4C_PAGE_WRITE | _SUN4C_PAGE_NOCACHE);
+ sun4c_put_pte(addr, page);
+ addr += PAGE_SIZE;
+ }
+}
+
+
+/* TLB management. */
+struct sun4c_mmu_entry {
+ struct sun4c_mmu_entry *next;
+ struct sun4c_mmu_entry *prev;
+ unsigned long vaddr;
+ unsigned char pseg;
+ unsigned char locked;
+};
+static struct sun4c_mmu_entry mmu_entry_pool[256];
+
+static void sun4c_init_mmu_entry_pool(void)
+{
+ int i;
+
+ for(i=0; i < 256; i++) {
+ mmu_entry_pool[i].pseg = i;
+ mmu_entry_pool[i].next = 0;
+ mmu_entry_pool[i].prev = 0;
+ mmu_entry_pool[i].vaddr = 0;
+ mmu_entry_pool[i].locked = 0;
+ }
+ mmu_entry_pool[invalid_segment].locked = 1;
+}
+
+static inline void fix_permissions(unsigned long vaddr, unsigned long bits_on,
+ unsigned long bits_off)
+{
+ unsigned long start, end;
+
+ end = vaddr + SUN4C_REAL_PGDIR_SIZE;
+ for(start = vaddr; start < end; start += PAGE_SIZE)
+ if(sun4c_get_pte(start) & _SUN4C_PAGE_VALID)
+ sun4c_put_pte(start, (sun4c_get_pte(start) | bits_on) &
+ ~bits_off);
+}
+
+static inline void sun4c_init_map_kernelprom(unsigned long kernel_end)
+{
+ unsigned long vaddr;
+ unsigned char pseg, ctx;
+
+ for(vaddr = KADB_DEBUGGER_BEGVM;
+ vaddr < LINUX_OPPROM_ENDVM;
+ vaddr += SUN4C_REAL_PGDIR_SIZE) {
+ pseg = sun4c_get_segmap(vaddr);
+ if(pseg != invalid_segment) {
+ mmu_entry_pool[pseg].locked = 1;
+ for(ctx = 0; ctx < num_contexts; ctx++)
+ prom_putsegment(ctx, vaddr, pseg);
+ fix_permissions(vaddr, _SUN4C_PAGE_PRIV, 0);
+ }
+ }
+ for(vaddr = KERNBASE; vaddr < kernel_end; vaddr += SUN4C_REAL_PGDIR_SIZE) {
+ pseg = sun4c_get_segmap(vaddr);
+ mmu_entry_pool[pseg].locked = 1;
+ for(ctx = 0; ctx < num_contexts; ctx++)
+ prom_putsegment(ctx, vaddr, pseg);
+ fix_permissions(vaddr, _SUN4C_PAGE_PRIV, _SUN4C_PAGE_NOCACHE);
+ }
+}
+
+static void sun4c_init_lock_area(unsigned long start, unsigned long end)
+{
+ int i, ctx;
+
+ while(start < end) {
+ for(i=0; i < invalid_segment; i++)
+ if(!mmu_entry_pool[i].locked)
+ break;
+ mmu_entry_pool[i].locked = 1;
+ sun4c_init_clean_segmap(i);
+ for(ctx = 0; ctx < num_contexts; ctx++)
+ prom_putsegment(ctx, start, mmu_entry_pool[i].pseg);
+ start += SUN4C_REAL_PGDIR_SIZE;
+ }
+}
+
+struct sun4c_mmu_ring {
+ struct sun4c_mmu_entry ringhd;
+ int num_entries;
+};
+static struct sun4c_mmu_ring sun4c_context_ring[16]; /* used user entries */
+static struct sun4c_mmu_ring sun4c_ufree_ring; /* free user entries */
+
+static inline void sun4c_next_kernel_bucket(struct sun4c_segment_info **next)
+{
+ (*next)++;
+ *next = (struct sun4c_segment_info *)
+ ((unsigned long)*next & ~SUN4C_KERNEL_BSIZE);
+}
+
+static inline void sun4c_init_rings(unsigned long *mempool)
+{
+ int i;
+ for(i=0; i<16; i++) {
+ sun4c_context_ring[i].ringhd.next =
+ sun4c_context_ring[i].ringhd.prev =
+ &sun4c_context_ring[i].ringhd;
+ sun4c_context_ring[i].num_entries = 0;
+ }
+ sun4c_ufree_ring.ringhd.next = sun4c_ufree_ring.ringhd.prev =
+ &sun4c_ufree_ring.ringhd;
+ sun4c_ufree_ring.num_entries = 0;
+ /* This needs to be aligned to twice it's size for speed. */
+ sun4c_kernel_next = sparc_init_alloc(mempool, 2 * SUN4C_KERNEL_BSIZE);
+}
+
+static inline void add_ring(struct sun4c_mmu_ring *ring, struct sun4c_mmu_entry *entry)
+{
+ struct sun4c_mmu_entry *head = &ring->ringhd;
+
+ entry->prev = head;
+ (entry->next = head->next)->prev = entry;
+ head->next = entry;
+ ring->num_entries++;
+}
+
+static inline void remove_ring(struct sun4c_mmu_ring *ring, struct sun4c_mmu_entry *entry)
+{
+ struct sun4c_mmu_entry *next = entry->next;
+
+ (next->prev = entry->prev)->next = next;
+ ring->num_entries--;
+}
+
+static inline void recycle_ring(struct sun4c_mmu_ring *ring, struct sun4c_mmu_entry *entry)
+{
+ struct sun4c_mmu_entry *head = &ring->ringhd;
+ struct sun4c_mmu_entry *next = entry->next;
+
+ (next->prev = entry->prev)->next = next;
+ entry->prev = head; (entry->next = head->next)->prev = entry;
+ head->next = entry;
+ /* num_entries stays the same */
+}
+
+static inline void free_user_entry(int ctx, struct sun4c_mmu_entry *entry)
+{
+ remove_ring(sun4c_context_ring+ctx, entry);
+ add_ring(&sun4c_ufree_ring, entry);
+}
+
+static inline void assign_user_entry(int ctx, struct sun4c_mmu_entry *entry)
+{
+ remove_ring(&sun4c_ufree_ring, entry);
+ add_ring(sun4c_context_ring+ctx, entry);
+}
+
+static void sun4c_init_fill_kernel_ring(int howmany)
+{
+ int i;
+
+ while(howmany) {
+ for(i=0; i < invalid_segment; i++)
+ if(!mmu_entry_pool[i].locked)
+ break;
+ mmu_entry_pool[i].locked = 1;
+ sun4c_init_clean_segmap(i);
+ sun4c_kernel_next->vaddr = 0;
+ sun4c_kernel_next->pseg = mmu_entry_pool[i].pseg;
+ sun4c_next_kernel_bucket(&sun4c_kernel_next);
+ howmany--;
+ }
+}
+
+static void sun4c_init_fill_user_ring(void)
+{
+ int i;
+
+ for(i=0; i < invalid_segment; i++) {
+ if(mmu_entry_pool[i].locked)
+ continue;
+ sun4c_init_clean_segmap(i);
+ add_ring(&sun4c_ufree_ring, &mmu_entry_pool[i]);
+ }
+}
+
+static inline void sun4c_kernel_unmap(struct sun4c_mmu_entry *kentry)
+{
+ int savectx, ctx;
+
+ savectx = sun4c_get_context();
+ for(ctx = 0; ctx < num_contexts; ctx++) {
+ sun4c_set_context(ctx);
+ sun4c_put_segmap(kentry->vaddr, invalid_segment);
+ }
+ sun4c_set_context(savectx);
+}
+
+static inline void sun4c_kernel_map(struct sun4c_mmu_entry *kentry)
+{
+ int savectx, ctx;
+
+ savectx = sun4c_get_context();
+ for(ctx = 0; ctx < num_contexts; ctx++) {
+ sun4c_set_context(ctx);
+ sun4c_put_segmap(kentry->vaddr, kentry->pseg);
+ }
+ sun4c_set_context(savectx);
+}
+
+static inline void sun4c_user_unmap(struct sun4c_mmu_entry *uentry)
+{
+ /* PM: need flush_user_windows() ?? */
+ sun4c_put_segmap(uentry->vaddr, invalid_segment);
+}
+
+static inline void sun4c_user_map(struct sun4c_mmu_entry *uentry)
+{
+ unsigned long start = uentry->vaddr;
+ unsigned long end = start + SUN4C_REAL_PGDIR_SIZE;
+
+ sun4c_put_segmap(uentry->vaddr, uentry->pseg);
+ while(start < end) {
+ sun4c_put_pte(start, 0);
+ start += PAGE_SIZE;
+ }
+}
+
+static inline void sun4c_demap_context(struct sun4c_mmu_ring *crp, unsigned char ctx)
+{
+ struct sun4c_mmu_entry *this_entry, *next_entry;
+ int savectx = sun4c_get_context();
+
+ this_entry = crp->ringhd.next;
+ flush_user_windows();
+ sun4c_set_context(ctx);
+ sun4c_flush_context();
+ while(crp->num_entries) {
+ next_entry = this_entry->next;
+ sun4c_user_unmap(this_entry);
+ free_user_entry(ctx, this_entry);
+ this_entry = next_entry;
+ }
+ sun4c_set_context(savectx);
+}
+
+static inline void sun4c_demap_one(struct sun4c_mmu_ring *crp,unsigned char ctx)
+{
+ /* by using .prev we get a kind of "lru" algorithm */
+ struct sun4c_mmu_entry *entry = crp->ringhd.prev;
+ int savectx = sun4c_get_context();
+
+ flush_user_windows();
+ sun4c_set_context(ctx);
+ sun4c_flush_segment(entry->vaddr);
+ sun4c_user_unmap(entry);
+ free_user_entry(ctx, entry);
+ sun4c_set_context(savectx);
+}
+
+/* Using this method to free up mmu entries eliminates a lot of
+ * potential races since we have a kernel that incurs tlb
+ * replacement faults. There may be performance penalties.
+ */
+static inline struct sun4c_mmu_entry *sun4c_user_strategy(void)
+{
+ struct ctx_list *next_one;
+ struct sun4c_mmu_ring *rp = 0;
+ unsigned char ctx;
+
+ /* If some are free, return first one. */
+ if(sun4c_ufree_ring.num_entries)
+ return sun4c_ufree_ring.ringhd.next;
+
+ /* Grab one from the LRU context. */
+ next_one = ctx_used.next;
+ while (sun4c_context_ring[next_one->ctx_number].num_entries == 0)
+ next_one = next_one->next;
+
+ ctx = next_one->ctx_number;
+ rp = &sun4c_context_ring[ctx];
+
+ sun4c_demap_one(rp,ctx);
+ return sun4c_ufree_ring.ringhd.next;
+}
+
+static inline void alloc_user_segment(unsigned long address, unsigned char ctx)
+{
+ struct sun4c_mmu_entry *entry;
+
+ address &= SUN4C_REAL_PGDIR_MASK;
+ entry = sun4c_user_strategy();
+ assign_user_entry(ctx, entry);
+ entry->vaddr = address;
+ sun4c_user_map(entry);
+}
+
+/* XXX Just like kernel tlb replacement we'd like to have a low level
+ * XXX equivalent for user faults which need not go through the mm
+ * XXX subsystem just to load a mmu entry. But this might not be as
+ * XXX feasible since we need to go through the kernel page tables
+ * XXX for this process, which we currently don't lock into the mmu
+ * XXX so we would fault with traps off... must think about this...
+ */
+void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
+{
+ unsigned long flags;
+#if 0
+ struct inode *inode;
+ struct vm_area_struct *vmaring;
+ unsigned long offset, vaddr;
+ unsigned long start;
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+#endif
+
+ save_and_cli(flags);
+ address &= PAGE_MASK;
+ if(sun4c_get_segmap(address) == invalid_segment)
+ alloc_user_segment(address, sun4c_get_context());
+ sun4c_put_pte(address, pte_val(pte));
+
+#if 0
+ if (!(vma->vm_flags & VM_WRITE) ||
+ !(vma->vm_flags & VM_SHARED))
+ goto done;
+
+ inode = vma->vm_inode;
+ if (!inode)
+ goto done;
+
+ offset = (address & PAGE_MASK) - vma->vm_start;
+ vmaring = inode->i_mmap;
+ do {
+ vaddr = vmaring->vm_start + offset;
+
+ if (S4CVAC_BADALIAS(vaddr, address)) {
+ start = vma->vm_start;
+ while (start < vma->vm_end) {
+ pgdp = pgd_offset(vma->vm_mm, start);
+ pmdp = pmd_offset(pgdp, start);
+ ptep = pte_offset(pmdp, start);
+
+ if (sun4c_get_pte(start) & _SUN4C_PAGE_VALID)
+ sun4c_put_pte(start, sun4c_get_pte(start) |
+ _SUN4C_PAGE_NOCACHE);
+
+ start += PAGE_SIZE;
+ }
+ goto done;
+ }
+ } while ((vmaring = vmaring->vm_next_share) != inode->i_mmap);
+
+done:
+#endif
+ restore_flags(flags);
+}
+
+/* This is now a fast in-window trap handler to avoid any and all races. */
+static void sun4c_quick_kernel_fault(unsigned long address)
+{
+ printk("Kernel faults at addr=0x%08lx\n", address);
+ panic("sun4c fault handler bolixed...");
+}
+
+/*
+ * 4 page buckets for task struct and kernel stack allocation.
+ *
+ * TASK_STACK_BEGIN
+ * bucket[0]
+ * bucket[1]
+ * [ ... ]
+ * bucket[NR_TASKS-1]
+ * TASK_STACK_BEGIN + (sizeof(struct task_bucket) * NR_TASKS)
+ *
+ * Each slot looks like:
+ *
+ * page 1 -- task struct
+ * page 2 -- unmapped, for stack redzone (maybe use for pgd)
+ * page 3/4 -- kernel stack
+ */
+
+struct task_bucket {
+ struct task_struct task;
+ char _unused1[PAGE_SIZE - sizeof(struct task_struct)];
+ char kstack[(PAGE_SIZE*3)];
+};
+
+struct task_bucket *sun4c_bucket[NR_TASKS];
+
+#define BUCKET_EMPTY ((struct task_bucket *) 0)
+#define BUCKET_SIZE (PAGE_SIZE << 2)
+#define BUCKET_SHIFT 14 /* log2(sizeof(struct task_bucket)) */
+#define BUCKET_NUM(addr) ((((addr) - SUN4C_LOCK_VADDR) >> BUCKET_SHIFT))
+#define BUCKET_ADDR(num) (((num) << BUCKET_SHIFT) + SUN4C_LOCK_VADDR)
+#define BUCKET_PTE(page) \
+ ((((page) - PAGE_OFFSET) >> PAGE_SHIFT) | pgprot_val(SUN4C_PAGE_KERNEL))
+#define BUCKET_PTE_PAGE(pte) \
+ (PAGE_OFFSET + (((pte) & 0xffff) << PAGE_SHIFT))
+
+static inline void get_locked_segment(unsigned long addr)
+{
+ struct sun4c_mmu_entry *stolen;
+ unsigned long flags;
+
+ save_and_cli(flags);
+ addr &= SUN4C_REAL_PGDIR_MASK;
+ stolen = sun4c_user_strategy();
+ remove_ring(&sun4c_ufree_ring, stolen);
+ stolen->vaddr = addr;
+ flush_user_windows();
+ sun4c_kernel_map(stolen);
+ restore_flags(flags);
+}
+
+static inline void free_locked_segment(unsigned long addr)
+{
+ struct sun4c_mmu_entry *entry;
+ unsigned long flags;
+ unsigned char pseg;
+
+ save_and_cli(flags);
+ addr &= SUN4C_REAL_PGDIR_MASK;
+ pseg = sun4c_get_segmap(addr);
+ entry = &mmu_entry_pool[pseg];
+ flush_user_windows();
+ sun4c_flush_segment(addr);
+ sun4c_kernel_unmap(entry);
+ add_ring(&sun4c_ufree_ring, entry);
+ restore_flags(flags);
+}
+
+static inline void garbage_collect(int entry)
+{
+ int start, end;
+
+ /* 16 buckets per segment... */
+ entry &= ~15;
+ start = entry;
+ for(end = (start + 16); start < end; start++)
+ if(sun4c_bucket[start] != BUCKET_EMPTY)
+ return;
+ /* Entire segment empty, release it. */
+ free_locked_segment(BUCKET_ADDR(entry));
+}
+
+static struct task_struct *sun4c_alloc_task_struct(void)
+{
+ unsigned long addr, page;
+ int entry;
+
+ page = get_free_page(GFP_KERNEL);
+ if(!page)
+ return (struct task_struct *) 0;
+ /* XXX Bahh, linear search too slow, use hash
+ * XXX table in final implementation. Or
+ * XXX keep track of first free when we free
+ * XXX a bucket... anything but this.
+ */
+ for(entry = 0; entry < NR_TASKS; entry++)
+ if(sun4c_bucket[entry] == BUCKET_EMPTY)
+ break;
+ if(entry == NR_TASKS) {
+ free_page(page);
+ return (struct task_struct *) 0;
+ }
+ addr = BUCKET_ADDR(entry);
+ sun4c_bucket[entry] = (struct task_bucket *) addr;
+ if(sun4c_get_segmap(addr) == invalid_segment)
+ get_locked_segment(addr);
+ sun4c_put_pte(addr, BUCKET_PTE(page));
+ return (struct task_struct *) addr;
+}
+
+static unsigned long sun4c_alloc_kernel_stack(struct task_struct *tsk)
+{
+ unsigned long saddr = (unsigned long) tsk;
+ unsigned long page[2];
+
+ if(!saddr)
+ return 0;
+ page[0] = __get_free_page(GFP_KERNEL);
+ if(!page[0])
+ return 0;
+ page[1] = __get_free_page(GFP_KERNEL);
+ if(!page[1]) {
+ free_page(page[0]);
+ return 0;
+ }
+ saddr += (PAGE_SIZE << 1);
+ sun4c_put_pte(saddr, BUCKET_PTE(page[0]));
+ sun4c_put_pte(saddr + PAGE_SIZE, BUCKET_PTE(page[1]));
+ return saddr;
+}
+
+static void sun4c_free_kernel_stack(unsigned long stack)
+{
+ unsigned long page[2];
+
+ page[0] = BUCKET_PTE_PAGE(sun4c_get_pte(stack));
+ page[1] = BUCKET_PTE_PAGE(sun4c_get_pte(stack+PAGE_SIZE));
+ sun4c_flush_page(stack);
+ sun4c_flush_page(stack + PAGE_SIZE);
+ sun4c_put_pte(stack, 0);
+ sun4c_put_pte(stack + PAGE_SIZE, 0);
+ free_page(page[0]);
+ free_page(page[1]);
+}
+
+static void sun4c_free_task_struct(struct task_struct *tsk)
+{
+ unsigned long tsaddr = (unsigned long) tsk;
+ unsigned long page = BUCKET_PTE_PAGE(sun4c_get_pte(tsaddr));
+ int entry = BUCKET_NUM(tsaddr);
+
+ sun4c_flush_page(tsaddr);
+ sun4c_put_pte(tsaddr, 0);
+ sun4c_bucket[entry] = BUCKET_EMPTY;
+ free_page(page);
+ garbage_collect(entry);
+}
+
+static void sun4c_init_buckets(void)
+{
+ int entry;
+
+ if(sizeof(struct task_bucket) != (PAGE_SIZE << 2)) {
+ prom_printf("task bucket not 4 pages!\n");
+ prom_halt();
+ }
+ for(entry = 0; entry < NR_TASKS; entry++)
+ sun4c_bucket[entry] = BUCKET_EMPTY;
+}
+
+static unsigned long sun4c_iobuffer_start;
+static unsigned long sun4c_iobuffer_end;
+static unsigned long sun4c_iobuffer_high;
+static unsigned long *sun4c_iobuffer_map;
+static int iobuffer_map_size;
+
+/*
+ * Alias our pages so they do not cause a trap.
+ * Also one page may be aliased into several I/O areas and we may
+ * finish these I/O separately.
+ */
+static char *sun4c_lockarea(char *vaddr, unsigned long size)
+{
+ unsigned long base, scan;
+ unsigned long npages;
+ unsigned long vpage;
+ unsigned long pte;
+ unsigned long apage;
+ unsigned long high;
+ unsigned long flags;
+
+ npages = (((unsigned long)vaddr & ~PAGE_MASK) +
+ size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
+
+ scan = 0;
+ save_and_cli(flags);
+ for (;;) {
+ scan = find_next_zero_bit(sun4c_iobuffer_map,
+ iobuffer_map_size, scan);
+ if ((base = scan) + npages > iobuffer_map_size) goto abend;
+ for (;;) {
+ if (scan >= base + npages) goto found;
+ if (test_bit(scan, sun4c_iobuffer_map)) break;
+ scan++;
+ }
+ }
+
+found:
+ high = ((base + npages) << PAGE_SHIFT) + sun4c_iobuffer_start;
+ high = SUN4C_REAL_PGDIR_ALIGN(high);
+ while (high > sun4c_iobuffer_high) {
+ get_locked_segment(sun4c_iobuffer_high);
+ sun4c_iobuffer_high += SUN4C_REAL_PGDIR_SIZE;
+ }
+
+ vpage = ((unsigned long) vaddr) & PAGE_MASK;
+ for (scan = base; scan < base+npages; scan++) {
+ pte = ((vpage-PAGE_OFFSET) >> PAGE_SHIFT);
+ pte |= pgprot_val(SUN4C_PAGE_KERNEL);
+ pte |= _SUN4C_PAGE_NOCACHE;
+ set_bit(scan, sun4c_iobuffer_map);
+ apage = (scan << PAGE_SHIFT) + sun4c_iobuffer_start;
+ sun4c_flush_page(vpage);
+ sun4c_put_pte(apage, pte);
+ vpage += PAGE_SIZE;
+ }
+ restore_flags(flags);
+ return (char *) ((base << PAGE_SHIFT) + sun4c_iobuffer_start +
+ (((unsigned long) vaddr) & ~PAGE_MASK));
+
+abend:
+ restore_flags(flags);
+ printk("DMA vaddr=0x%p size=%08lx\n", vaddr, size);
+ panic("Out of iobuffer table");
+ return 0;
+}
+
+static void sun4c_unlockarea(char *vaddr, unsigned long size)
+{
+ unsigned long vpage, npages;
+ unsigned long flags;
+ int scan, high;
+
+ vpage = (unsigned long)vaddr & PAGE_MASK;
+ npages = (((unsigned long)vaddr & ~PAGE_MASK) +
+ size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
+ while (npages != 0) {
+ --npages;
+ sun4c_put_pte(vpage, 0);
+ clear_bit((vpage - sun4c_iobuffer_start) >> PAGE_SHIFT,
+ sun4c_iobuffer_map);
+ vpage += PAGE_SIZE;
+ }
+
+ /* garbage collect */
+ save_and_cli(flags);
+ scan = (sun4c_iobuffer_high - sun4c_iobuffer_start) >> PAGE_SHIFT;
+ while (scan >= 0 && !sun4c_iobuffer_map[scan >> 5])
+ scan -= 32;
+ scan += 32;
+ high = sun4c_iobuffer_start + (scan << PAGE_SHIFT);
+ high = SUN4C_REAL_PGDIR_ALIGN(high) + SUN4C_REAL_PGDIR_SIZE;
+ while (high < sun4c_iobuffer_high) {
+ sun4c_iobuffer_high -= SUN4C_REAL_PGDIR_SIZE;
+ free_locked_segment(sun4c_iobuffer_high);
+ }
+ restore_flags(flags);
+}
+
+/* Note the scsi code at init time passes to here buffers
+ * which sit on the kernel stack, those are already locked
+ * by implication and fool the page locking code above
+ * if passed to by mistake.
+ */
+static char *sun4c_get_scsi_one(char *bufptr, unsigned long len, struct linux_sbus *sbus)
+{
+ unsigned long page;
+
+ page = ((unsigned long) bufptr) & PAGE_MASK;
+ if(MAP_NR(page) > max_mapnr)
+ return bufptr; /* already locked */
+ return sun4c_lockarea(bufptr, len);
+}
+
+static void sun4c_get_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
+{
+ while(sz >= 0) {
+ sg[sz].dvma_addr = sun4c_lockarea(sg[sz].addr, sg[sz].len);
+ sz--;
+ }
+}
+
+static void sun4c_release_scsi_one(char *bufptr, unsigned long len, struct linux_sbus *sbus)
+{
+ unsigned long page = (unsigned long) bufptr;
+
+ if(page < sun4c_iobuffer_start)
+ return; /* On kernel stack or similar, see above */
+ sun4c_unlockarea(bufptr, len);
+}
+
+static void sun4c_release_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
+{
+ while(sz >= 0) {
+ sun4c_unlockarea(sg[sz].dvma_addr, sg[sz].len);
+ sz--;
+ }
+}
+
+#define TASK_ENTRY_SIZE BUCKET_SIZE /* see above */
+#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
+
+struct vm_area_struct sun4c_kstack_vma;
+
+static unsigned long sun4c_init_lock_areas(unsigned long start_mem)
+{
+ unsigned long sun4c_taskstack_start;
+ unsigned long sun4c_taskstack_end;
+ int bitmap_size;
+
+ sun4c_init_buckets();
+ sun4c_taskstack_start = SUN4C_LOCK_VADDR;
+ sun4c_taskstack_end = (sun4c_taskstack_start +
+ (TASK_ENTRY_SIZE * NR_TASKS));
+ if(sun4c_taskstack_end >= SUN4C_LOCK_END) {
+ prom_printf("Too many tasks, decrease NR_TASKS please.\n");
+ prom_halt();
+ }
+
+ sun4c_iobuffer_start = sun4c_iobuffer_high =
+ SUN4C_REAL_PGDIR_ALIGN(sun4c_taskstack_end);
+ sun4c_iobuffer_end = SUN4C_LOCK_END;
+ bitmap_size = (sun4c_iobuffer_end - sun4c_iobuffer_start) >> PAGE_SHIFT;
+ bitmap_size = (bitmap_size + 7) >> 3;
+ bitmap_size = LONG_ALIGN(bitmap_size);
+ iobuffer_map_size = bitmap_size << 3;
+ sun4c_iobuffer_map = (unsigned long *) start_mem;
+ memset((void *) start_mem, 0, bitmap_size);
+ start_mem += bitmap_size;
+
+ /* Now get us some mmu entries for I/O maps. */
+ /* sun4c_init_lock_area(sun4c_iobuffer_start, sun4c_iobuffer_end); */
+ sun4c_kstack_vma.vm_mm = init_task.mm;
+ sun4c_kstack_vma.vm_start = sun4c_taskstack_start;
+ sun4c_kstack_vma.vm_end = sun4c_taskstack_end;
+ sun4c_kstack_vma.vm_page_prot = PAGE_SHARED;
+ sun4c_kstack_vma.vm_flags = VM_READ | VM_WRITE | VM_EXEC;
+ insert_vm_struct(&init_mm, &sun4c_kstack_vma);
+ return start_mem;
+}
+
+/* Cache flushing on the sun4c. */
+static void sun4c_flush_cache_all(void)
+{
+ /* Clear all tags in the sun4c cache.
+ * The cache is write through so this is safe.
+ */
+ flush_user_windows();
+ __asm__ __volatile__("add %2, %2, %%g1\n\t"
+ "add %2, %%g1, %%g2\n\t"
+ "add %2, %%g2, %%g3\n\t"
+ "add %2, %%g3, %%g4\n\t"
+ "add %2, %%g4, %%g5\n\t"
+ "add %2, %%g5, %%o4\n\t"
+ "add %2, %%o4, %%o5\n"
+ "1:\n\t"
+ "subcc %1, %%o5, %1\n\t"
+ "sta %%g0, [%0] %3\n\t"
+ "sta %%g0, [%0 + %2] %3\n\t"
+ "sta %%g0, [%0 + %%g1] %3\n\t"
+ "sta %%g0, [%0 + %%g2] %3\n\t"
+ "sta %%g0, [%0 + %%g3] %3\n\t"
+ "sta %%g0, [%0 + %%g4] %3\n\t"
+ "sta %%g0, [%0 + %%g5] %3\n\t"
+ "sta %%g0, [%0 + %%o4] %3\n\t"
+ "bg 1b\n\t"
+ " add %0, %%o5, %0\n\t" : :
+ "r" (AC_CACHETAGS),
+ "r" (sun4c_vacinfo.num_bytes),
+ "r" (sun4c_vacinfo.linesize),
+ "i" (ASI_CONTROL) :
+ "g1", "g2", "g3", "g4", "g5", "o4", "o5");
+}
+
+static void sun4c_flush_cache_mm(struct mm_struct *mm)
+{
+ int octx;
+
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ octx = sun4c_get_context();
+ flush_user_windows();
+ sun4c_set_context(mm->context);
+ sun4c_flush_context();
+ sun4c_set_context(octx);
+#ifndef __SMP__
+ }
+#endif
+}
+
+
+static void sun4c_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+ int size, size2, octx, i;
+ unsigned long start2,end2;
+ struct sun4c_mmu_entry *entry,*entry2;
+
+ /* don't flush kernel memory as its always valid in
+ all contexts */
+ if (start >= PAGE_OFFSET)
+ return;
+
+#if KGPROF_PROFILING
+ kgprof_profile();
+#endif
+
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ size = end - start;
+
+ octx = sun4c_get_context();
+ flush_user_windows();
+ sun4c_set_context(mm->context);
+
+ entry = sun4c_context_ring[mm->context].ringhd.next;
+ i = sun4c_context_ring[mm->context].num_entries;
+ while (i--) {
+ entry2 = entry->next;
+ if (entry->vaddr < start || entry->vaddr >= end)
+ goto next_entry;
+
+ start2 = MAX(start,entry->vaddr);
+ end2 = MIN(end,entry->vaddr+SUN4C_REAL_PGDIR_SIZE);
+ size2 = end2 - start2;
+
+ if (size2 <= (PAGE_SIZE << 3)) {
+ start2 &= PAGE_MASK;
+ while(start2 < end2) {
+ sun4c_flush_page(start2);
+ start2 += PAGE_SIZE;
+ }
+ } else {
+ start2 &= SUN4C_REAL_PGDIR_MASK;
+ sun4c_flush_segment(start2);
+ /* we are betting that the entry will not be
+ needed for a while */
+ sun4c_user_unmap(entry);
+ free_user_entry(mm->context, entry);
+ }
+
+ next_entry:
+ entry = entry2;
+ }
+ sun4c_set_context(octx);
+#ifndef __SMP__
+ }
+#endif
+}
+
+static void sun4c_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
+{
+ int octx;
+ struct mm_struct *mm = vma->vm_mm;
+
+ /* don't flush kernel memory as its always valid in
+ all contexts */
+ if (page >= PAGE_OFFSET)
+ return;
+
+ /* Sun4c has no separate I/D caches so cannot optimize for non
+ * text page flushes.
+ */
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ octx = sun4c_get_context();
+ flush_user_windows();
+ sun4c_set_context(mm->context);
+ sun4c_flush_page(page);
+ sun4c_set_context(octx);
+#ifndef __SMP__
+ }
+#endif
+}
+
+/* Sun4c cache is write-through, so no need to validate main memory
+ * during a page copy in kernel space.
+ */
+static void sun4c_flush_page_to_ram(unsigned long page)
+{
+}
+
+/* TLB flushing on the sun4c. These routines count on the cache
+ * flushing code to flush the user register windows so that we need
+ * not do so when we get here.
+ */
+
+static void sun4c_flush_tlb_all(void)
+{
+ unsigned long flags;
+ int savectx, ctx, entry;
+
+ save_and_cli(flags);
+ savectx = sun4c_get_context();
+ for (entry = 0; entry < SUN4C_KERNEL_BUCKETS; entry++) {
+ if (sun4c_kernel_next->vaddr) {
+ for(ctx = 0; ctx < num_contexts; ctx++) {
+ sun4c_set_context(ctx);
+ sun4c_put_segmap(sun4c_kernel_next->vaddr,
+ invalid_segment);
+ }
+ sun4c_kernel_next->vaddr = 0;
+ }
+ sun4c_next_kernel_bucket(&sun4c_kernel_next);
+ }
+ sun4c_set_context(savectx);
+ restore_flags(flags);
+}
+
+static void sun4c_flush_tlb_mm(struct mm_struct *mm)
+{
+ struct sun4c_mmu_entry *this_entry, *next_entry;
+ struct sun4c_mmu_ring *crp;
+ int savectx, ctx;
+
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ crp = &sun4c_context_ring[mm->context];
+ savectx = sun4c_get_context();
+ ctx = mm->context;
+ this_entry = crp->ringhd.next;
+ flush_user_windows();
+ sun4c_set_context(mm->context);
+ sun4c_flush_context();
+ while(crp->num_entries) {
+ next_entry = this_entry->next;
+ sun4c_user_unmap(this_entry);
+ free_user_entry(ctx, this_entry);
+ this_entry = next_entry;
+ }
+ sun4c_set_context(savectx);
+#ifndef __SMP__
+ }
+#endif
+}
+
+static void sun4c_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+ struct sun4c_mmu_entry *entry,*entry2;
+ unsigned char savectx;
+ int i;
+
+#ifndef __SMP__
+ if(mm->context == NO_CONTEXT)
+ return;
+#endif
+
+#if KGPROF_PROFILING
+ kgprof_profile();
+#endif
+
+ savectx = sun4c_get_context();
+ sun4c_set_context(mm->context);
+ start &= SUN4C_REAL_PGDIR_MASK;
+
+ entry = sun4c_context_ring[mm->context].ringhd.next;
+ i = sun4c_context_ring[mm->context].num_entries;
+ while (i--) {
+ entry2 = entry->next;
+ if (entry->vaddr >= start && entry->vaddr < end) {
+ sun4c_flush_segment(entry->vaddr);
+ sun4c_user_unmap(entry);
+ free_user_entry(mm->context, entry);
+ }
+ entry = entry2;
+ }
+ sun4c_set_context(savectx);
+}
+
+
+static void sun4c_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ int savectx;
+
+#ifndef __SMP__
+ if(mm->context != NO_CONTEXT) {
+#endif
+ savectx = sun4c_get_context();
+ sun4c_set_context(mm->context);
+ page &= PAGE_MASK;
+ if(sun4c_get_pte(page) & _SUN4C_PAGE_VALID)
+ sun4c_put_pte(page, 0);
+ sun4c_set_context(savectx);
+#ifndef __SMP__
+ }
+#endif
+}
+
+/* Sun4c mmu hardware doesn't update the dirty bit in the pte's
+ * for us, so we do it in software.
+ */
+static void sun4c_set_pte(pte_t *ptep, pte_t pte)
+{
+
+ if((pte_val(pte) & (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_DIRTY)) ==
+ _SUN4C_PAGE_WRITE)
+ pte_val(pte) |= _SUN4C_PAGE_DIRTY;
+
+ *ptep = pte;
+}
+
+void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr,
+ int bus_type, int rdonly)
+{
+ unsigned long page_entry;
+
+ page_entry = ((physaddr >> PAGE_SHIFT) & 0xffff);
+ page_entry |= (_SUN4C_PAGE_VALID | _SUN4C_PAGE_WRITE |
+ _SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_IO);
+ if(rdonly)
+ page_entry &= (~_SUN4C_PAGE_WRITE);
+ sun4c_flush_page(virt_addr);
+ sun4c_put_pte(virt_addr, page_entry);
+}
+
+void sun4c_unmapioaddr(unsigned long virt_addr)
+{
+ sun4c_flush_page(virt_addr); /* XXX P3: Is it necessary for I/O page? */
+ sun4c_put_pte(virt_addr, 0);
+}
+
+static inline void sun4c_alloc_context(struct mm_struct *mm)
+{
+ struct ctx_list *ctxp;
+
+ ctxp = ctx_free.next;
+ if(ctxp != &ctx_free) {
+ remove_from_ctx_list(ctxp);
+ add_to_used_ctxlist(ctxp);
+ mm->context = ctxp->ctx_number;
+ ctxp->ctx_mm = mm;
+ return;
+ }
+ ctxp = ctx_used.next;
+ if(ctxp->ctx_mm == current->mm)
+ ctxp = ctxp->next;
+ if(ctxp == &ctx_used)
+ panic("out of mmu contexts");
+ remove_from_ctx_list(ctxp);
+ add_to_used_ctxlist(ctxp);
+ ctxp->ctx_mm->context = NO_CONTEXT;
+ ctxp->ctx_mm = mm;
+ mm->context = ctxp->ctx_number;
+ sun4c_demap_context(&sun4c_context_ring[ctxp->ctx_number],
+ ctxp->ctx_number);
+}
+
+#if some_day_soon /* We need some tweaking to start using this */
+extern void force_user_fault(unsigned long, int);
+
+void sun4c_switch_heuristic(struct pt_regs *regs)
+{
+ unsigned long sp = regs->u_regs[UREG_FP];
+ unsigned long sp2 = sp + REGWIN_SZ - 0x8;
+
+ force_user_fault(regs->pc, 0);
+ force_user_fault(sp, 0);
+ if((sp&PAGE_MASK) != (sp2&PAGE_MASK))
+ force_user_fault(sp2, 0);
+}
+#endif
+
+static void sun4c_switch_to_context(struct task_struct *tsk)
+{
+ struct ctx_list *ctx;
+
+ if(tsk->mm->context == NO_CONTEXT) {
+ sun4c_alloc_context(tsk->mm);
+ goto set_context;
+ }
+
+ /* Update the LRU ring of contexts. */
+ ctx = ctx_list_pool + tsk->mm->context;
+ remove_from_ctx_list(ctx);
+ add_to_used_ctxlist(ctx);
+
+set_context:
+ sun4c_set_context(tsk->mm->context);
+}
+
+static void sun4c_flush_hook(void)
+{
+ if(current->tss.flags & SPARC_FLAG_KTHREAD) {
+ sun4c_alloc_context(current->mm);
+ sun4c_set_context(current->mm->context);
+ }
+}
+
+static void sun4c_exit_hook(void)
+{
+ struct ctx_list *ctx_old;
+ struct mm_struct *mm = current->mm;
+
+ if(mm->context != NO_CONTEXT && mm->count == 1) {
+ sun4c_demap_context(&sun4c_context_ring[mm->context], mm->context);
+ ctx_old = ctx_list_pool + mm->context;
+ remove_from_ctx_list(ctx_old);
+ add_to_free_ctxlist(ctx_old);
+ mm->context = NO_CONTEXT;
+ }
+}
+
+#if KGPROF_PROFILING
+static char s4cinfo[10240];
+#else
+static char s4cinfo[512];
+#endif
+
+static char *sun4c_mmu_info(void)
+{
+ int used_user_entries, i;
+
+ used_user_entries = 0;
+ for(i=0; i < num_contexts; i++)
+ used_user_entries += sun4c_context_ring[i].num_entries;
+
+ sprintf(s4cinfo, "vacsize\t\t: %d bytes\n"
+ "vachwflush\t: %s\n"
+ "vaclinesize\t: %d bytes\n"
+ "mmuctxs\t\t: %d\n"
+ "mmupsegs\t: %d\n"
+ "kernelpsegs\t: %d\n"
+ "usedpsegs\t: %d\n"
+ "ufreepsegs\t: %d\n"
+ "context\t\t: %d flushes\n"
+ "segment\t\t: %d flushes\n"
+ "page\t\t: %d flushes\n",
+ sun4c_vacinfo.num_bytes,
+ (sun4c_vacinfo.do_hwflushes ? "yes" : "no"),
+ sun4c_vacinfo.linesize,
+ num_contexts,
+ (invalid_segment + 1),
+ invalid_segment - used_user_entries -
+ sun4c_ufree_ring.num_entries + 1,
+ used_user_entries,
+ sun4c_ufree_ring.num_entries,
+ ctxflushes, segflushes, pageflushes);
+
+#if KGPROF_PROFILING
+ {
+ char *p = s4cinfo + strlen(s4cinfo);
+ int i,j;
+ sprintf(p,"kgprof profiling:\n"); p += strlen(p);
+ for (i=0;i<KGPROF_SIZE && kgprof_counters[i].addr[0];i++) {
+ sprintf(p,"%5d ",kgprof_counters[i].count); p += strlen(p);
+ for (j=0;j<KGPROF_DEPTH;j++) {
+ sprintf(p,"%08x ",kgprof_counters[i].addr[j]);
+ p += strlen(p);
+ }
+ sprintf(p,"\n"); p += strlen(p);
+ }
+ }
+#endif
+
+ return s4cinfo;
+}
+
+/* Nothing below here should touch the mmu hardware nor the mmu_entry
+ * data structures.
+ */
+
+static unsigned int sun4c_pmd_align(unsigned int addr) { return SUN4C_PMD_ALIGN(addr); }
+static unsigned int sun4c_pgdir_align(unsigned int addr) { return SUN4C_PGDIR_ALIGN(addr); }
+
+/* First the functions which the mid-level code uses to directly
+ * manipulate the software page tables. Some defines since we are
+ * emulating the i386 page directory layout.
+ */
+#define PGD_PRESENT 0x001
+#define PGD_RW 0x002
+#define PGD_USER 0x004
+#define PGD_ACCESSED 0x020
+#define PGD_DIRTY 0x040
+#define PGD_TABLE (PGD_PRESENT | PGD_RW | PGD_USER | PGD_ACCESSED | PGD_DIRTY)
+
+static unsigned long sun4c_vmalloc_start(void)
+{
+ return SUN4C_VMALLOC_START;
+}
+
+static int sun4c_pte_none(pte_t pte) { return !pte_val(pte); }
+static int sun4c_pte_present(pte_t pte) { return pte_val(pte) & _SUN4C_PAGE_VALID; }
+static void sun4c_pte_clear(pte_t *ptep) { pte_val(*ptep) = 0; }
+
+static int sun4c_pmd_none(pmd_t pmd) { return !pmd_val(pmd); }
+static int sun4c_pmd_bad(pmd_t pmd)
+{
+ return (pmd_val(pmd) & ~PAGE_MASK) != PGD_TABLE ||
+ MAP_NR(pmd_val(pmd)) > max_mapnr;
+}
+
+static int sun4c_pmd_present(pmd_t pmd) { return pmd_val(pmd) & PGD_PRESENT; }
+static void sun4c_pmd_clear(pmd_t *pmdp) { pmd_val(*pmdp) = 0; }
+
+static int sun4c_pgd_none(pgd_t pgd) { return 0; }
+static int sun4c_pgd_bad(pgd_t pgd) { return 0; }
+static int sun4c_pgd_present(pgd_t pgd) { return 1; }
+static void sun4c_pgd_clear(pgd_t * pgdp) { }
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+static int sun4c_pte_write(pte_t pte) { return pte_val(pte) & _SUN4C_PAGE_WRITE; }
+static int sun4c_pte_dirty(pte_t pte) { return pte_val(pte) & _SUN4C_PAGE_DIRTY; }
+static int sun4c_pte_young(pte_t pte) { return pte_val(pte) & _SUN4C_PAGE_REF; }
+
+static pte_t sun4c_pte_wrprotect(pte_t pte) { pte_val(pte) &= ~_SUN4C_PAGE_WRITE; return pte; }
+static pte_t sun4c_pte_mkclean(pte_t pte) { pte_val(pte) &= ~_SUN4C_PAGE_DIRTY; return pte; }
+static pte_t sun4c_pte_mkold(pte_t pte) { pte_val(pte) &= ~_SUN4C_PAGE_REF; return pte; }
+static pte_t sun4c_pte_mkwrite(pte_t pte) { pte_val(pte) |= _SUN4C_PAGE_WRITE; return pte; }
+static pte_t sun4c_pte_mkdirty(pte_t pte) { pte_val(pte) |= _SUN4C_PAGE_DIRTY; return pte; }
+static pte_t sun4c_pte_mkyoung(pte_t pte) { pte_val(pte) |= _SUN4C_PAGE_REF; return pte; }
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+static pte_t sun4c_mk_pte(unsigned long page, pgprot_t pgprot)
+{
+ return __pte(((page - PAGE_OFFSET) >> PAGE_SHIFT) | pgprot_val(pgprot));
+}
+
+static pte_t sun4c_mk_pte_phys(unsigned long phys_page, pgprot_t pgprot)
+{
+ return __pte((phys_page >> PAGE_SHIFT) | pgprot_val(pgprot));
+}
+
+static pte_t sun4c_mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
+{
+ return __pte(((page - PAGE_OFFSET) >> PAGE_SHIFT) | pgprot_val(pgprot));
+}
+
+static pte_t sun4c_pte_modify(pte_t pte, pgprot_t newprot)
+{
+ return __pte((pte_val(pte) & _SUN4C_PAGE_CHG_MASK) | pgprot_val(newprot));
+}
+
+static unsigned long sun4c_pte_page(pte_t pte)
+{
+ return (PAGE_OFFSET + ((pte_val(pte) & 0xffff) << (PAGE_SHIFT)));
+}
+
+static unsigned long sun4c_pmd_page(pmd_t pmd)
+{
+ return (pmd_val(pmd) & PAGE_MASK);
+}
+
+/* to find an entry in a page-table-directory */
+pgd_t *sun4c_pgd_offset(struct mm_struct * mm, unsigned long address)
+{
+ return mm->pgd + (address >> SUN4C_PGDIR_SHIFT);
+}
+
+/* Find an entry in the second-level page table.. */
+static pmd_t *sun4c_pmd_offset(pgd_t * dir, unsigned long address)
+{
+ return (pmd_t *) dir;
+}
+
+/* Find an entry in the third-level page table.. */
+pte_t *sun4c_pte_offset(pmd_t * dir, unsigned long address)
+{
+ return (pte_t *) sun4c_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SUN4C_PTRS_PER_PTE - 1));
+}
+
+/* Update the root mmu directory. */
+static void sun4c_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdir)
+{
+}
+
+/* Allocate and free page tables. The xxx_kernel() versions are
+ * used to allocate a kernel page table - this turns on ASN bits
+ * if any, and marks the page tables reserved.
+ */
+static void sun4c_pte_free_kernel(pte_t *pte)
+{
+ free_page((unsigned long) pte);
+}
+
+static pte_t *sun4c_pte_alloc_kernel(pmd_t *pmd, unsigned long address)
+{
+ address = (address >> PAGE_SHIFT) & (SUN4C_PTRS_PER_PTE - 1);
+ if (sun4c_pmd_none(*pmd)) {
+ pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
+ if (sun4c_pmd_none(*pmd)) {
+ if (page) {
+ pmd_val(*pmd) = PGD_TABLE | (unsigned long) page;
+ return page + address;
+ }
+ pmd_val(*pmd) = PGD_TABLE | (unsigned long) BAD_PAGETABLE;
+ return NULL;
+ }
+ free_page((unsigned long) page);
+ }
+ if (sun4c_pmd_bad(*pmd)) {
+ printk("Bad pmd in pte_alloc_kernel: %08lx\n", pmd_val(*pmd));
+ pmd_val(*pmd) = PGD_TABLE | (unsigned long) BAD_PAGETABLE;
+ return NULL;
+ }
+ return (pte_t *) sun4c_pmd_page(*pmd) + address;
+}
+
+/*
+ * allocating and freeing a pmd is trivial: the 1-entry pmd is
+ * inside the pgd, so has no extra memory associated with it.
+ */
+static void sun4c_pmd_free_kernel(pmd_t *pmd)
+{
+ pmd_val(*pmd) = 0;
+}
+
+static pmd_t *sun4c_pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
+{
+ return (pmd_t *) pgd;
+}
+
+static void sun4c_pte_free(pte_t *pte)
+{
+ free_page((unsigned long) pte);
+}
+
+static pte_t *sun4c_pte_alloc(pmd_t * pmd, unsigned long address)
+{
+ address = (address >> PAGE_SHIFT) & (SUN4C_PTRS_PER_PTE - 1);
+ if (sun4c_pmd_none(*pmd)) {
+ pte_t *page = (pte_t *) get_free_page(GFP_KERNEL);
+ if (sun4c_pmd_none(*pmd)) {
+ if (page) {
+ pmd_val(*pmd) = PGD_TABLE | (unsigned long) page;
+ return page + address;
+ }
+ pmd_val(*pmd) = PGD_TABLE | (unsigned long) BAD_PAGETABLE;
+ return NULL;
+ }
+ free_page((unsigned long) page);
+ }
+ if (sun4c_pmd_bad(*pmd)) {
+ printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
+ pmd_val(*pmd) = PGD_TABLE | (unsigned long) BAD_PAGETABLE;
+ return NULL;
+ }
+ return (pte_t *) sun4c_pmd_page(*pmd) + address;
+}
+
+/*
+ * allocating and freeing a pmd is trivial: the 1-entry pmd is
+ * inside the pgd, so has no extra memory associated with it.
+ */
+static void sun4c_pmd_free(pmd_t * pmd)
+{
+ pmd_val(*pmd) = 0;
+}
+
+static pmd_t *sun4c_pmd_alloc(pgd_t * pgd, unsigned long address)
+{
+ return (pmd_t *) pgd;
+}
+
+static void sun4c_pgd_free(pgd_t *pgd)
+{
+ free_page((unsigned long) pgd);
+}
+
+static pgd_t *sun4c_pgd_alloc(void)
+{
+ return (pgd_t *) get_free_page(GFP_KERNEL);
+}
+
+extern unsigned long free_area_init(unsigned long, unsigned long);
+extern unsigned long sparc_context_init(unsigned long, int);
+extern unsigned long end;
+
+unsigned long sun4c_paging_init(unsigned long start_mem, unsigned long end_mem)
+{
+ int i, cnt;
+ unsigned long kernel_end;
+ extern unsigned long sparc_iobase_vaddr;
+
+ kernel_end = (unsigned long) &end;
+ kernel_end += (SUN4C_REAL_PGDIR_SIZE * 3);
+ kernel_end = SUN4C_REAL_PGDIR_ALIGN(kernel_end);
+ sun4c_probe_mmu();
+ invalid_segment = (num_segmaps - 1);
+ sun4c_init_mmu_entry_pool();
+ sun4c_init_rings(&start_mem);
+ sun4c_init_map_kernelprom(kernel_end);
+ sun4c_init_clean_mmu(kernel_end);
+ sun4c_init_fill_kernel_ring(SUN4C_KERNEL_BUCKETS);
+ sun4c_init_lock_area(sparc_iobase_vaddr, IOBASE_END);
+ sun4c_init_lock_area(DVMA_VADDR, DVMA_END);
+ start_mem = sun4c_init_lock_areas(start_mem);
+ sun4c_init_fill_user_ring();
+
+ sun4c_set_context(0);
+ memset(swapper_pg_dir, 0, PAGE_SIZE);
+ memset(pg0, 0, PAGE_SIZE);
+ /* Save work later. */
+ pgd_val(swapper_pg_dir[SUN4C_VMALLOC_START>>SUN4C_PGDIR_SHIFT]) =
+ PGD_TABLE | (unsigned long) pg0;
+ sun4c_init_ss2_cache_bug();
+ start_mem = PAGE_ALIGN(start_mem);
+ /* start_mem = sun4c_init_alloc_dvma_pages(start_mem); */
+ start_mem = sparc_context_init(start_mem, num_contexts);
+ start_mem = free_area_init(start_mem, end_mem);
+ cnt = 0;
+ for(i = 0; i < num_segmaps; i++)
+ if(mmu_entry_pool[i].locked)
+ cnt++;
+ printk("SUN4C: %d mmu entries for the kernel\n", cnt);
+ return start_mem;
+}
+
+/* Load up routines and constants for sun4c mmu */
+void ld_mmu_sun4c(void)
+{
+ printk("Loading sun4c MMU routines\n");
+
+ /* First the constants */
+ pmd_shift = SUN4C_PMD_SHIFT;
+ pmd_size = SUN4C_PMD_SIZE;
+ pmd_mask = SUN4C_PMD_MASK;
+ pgdir_shift = SUN4C_PGDIR_SHIFT;
+ pgdir_size = SUN4C_PGDIR_SIZE;
+ pgdir_mask = SUN4C_PGDIR_MASK;
+
+ ptrs_per_pte = SUN4C_PTRS_PER_PTE;
+ ptrs_per_pmd = SUN4C_PTRS_PER_PMD;
+ ptrs_per_pgd = SUN4C_PTRS_PER_PGD;
+
+ page_none = SUN4C_PAGE_NONE;
+ page_shared = SUN4C_PAGE_SHARED;
+ page_copy = SUN4C_PAGE_COPY;
+ page_readonly = SUN4C_PAGE_READONLY;
+ page_kernel = SUN4C_PAGE_KERNEL;
+ pg_iobits = _SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_IO | _SUN4C_PAGE_VALID
+ | _SUN4C_PAGE_WRITE | _SUN4C_PAGE_DIRTY;
+
+ /* Functions */
+#ifndef __SMP__
+ flush_cache_all = sun4c_flush_cache_all;
+ flush_cache_mm = sun4c_flush_cache_mm;
+ flush_cache_range = sun4c_flush_cache_range;
+ flush_cache_page = sun4c_flush_cache_page;
+
+ flush_tlb_all = sun4c_flush_tlb_all;
+ flush_tlb_mm = sun4c_flush_tlb_mm;
+ flush_tlb_range = sun4c_flush_tlb_range;
+ flush_tlb_page = sun4c_flush_tlb_page;
+#else
+ local_flush_cache_all = sun4c_flush_cache_all;
+ local_flush_cache_mm = sun4c_flush_cache_mm;
+ local_flush_cache_range = sun4c_flush_cache_range;
+ local_flush_cache_page = sun4c_flush_cache_page;
+
+ local_flush_tlb_all = sun4c_flush_tlb_all;
+ local_flush_tlb_mm = sun4c_flush_tlb_mm;
+ local_flush_tlb_range = sun4c_flush_tlb_range;
+ local_flush_tlb_page = sun4c_flush_tlb_page;
+
+ flush_cache_all = smp_flush_cache_all;
+ flush_cache_mm = smp_flush_cache_mm;
+ flush_cache_range = smp_flush_cache_range;
+ flush_cache_page = smp_flush_cache_page;
+
+ flush_tlb_all = smp_flush_tlb_all;
+ flush_tlb_mm = smp_flush_tlb_mm;
+ flush_tlb_range = smp_flush_tlb_range;
+ flush_tlb_page = smp_flush_tlb_page;
+#endif
+
+ flush_page_to_ram = sun4c_flush_page_to_ram;
+
+ set_pte = sun4c_set_pte;
+ switch_to_context = sun4c_switch_to_context;
+ pmd_align = sun4c_pmd_align;
+ pgdir_align = sun4c_pgdir_align;
+ vmalloc_start = sun4c_vmalloc_start;
+
+ pte_page = sun4c_pte_page;
+ pmd_page = sun4c_pmd_page;
+
+ sparc_update_rootmmu_dir = sun4c_update_rootmmu_dir;
+
+ pte_none = sun4c_pte_none;
+ pte_present = sun4c_pte_present;
+ pte_clear = sun4c_pte_clear;
+
+ pmd_none = sun4c_pmd_none;
+ pmd_bad = sun4c_pmd_bad;
+ pmd_present = sun4c_pmd_present;
+ pmd_clear = sun4c_pmd_clear;
+
+ pgd_none = sun4c_pgd_none;
+ pgd_bad = sun4c_pgd_bad;
+ pgd_present = sun4c_pgd_present;
+ pgd_clear = sun4c_pgd_clear;
+
+ mk_pte = sun4c_mk_pte;
+ mk_pte_phys = sun4c_mk_pte_phys;
+ mk_pte_io = sun4c_mk_pte_io;
+ pte_modify = sun4c_pte_modify;
+ pgd_offset = sun4c_pgd_offset;
+ pmd_offset = sun4c_pmd_offset;
+ pte_offset = sun4c_pte_offset;
+ pte_free_kernel = sun4c_pte_free_kernel;
+ pmd_free_kernel = sun4c_pmd_free_kernel;
+ pte_alloc_kernel = sun4c_pte_alloc_kernel;
+ pmd_alloc_kernel = sun4c_pmd_alloc_kernel;
+ pte_free = sun4c_pte_free;
+ pte_alloc = sun4c_pte_alloc;
+ pmd_free = sun4c_pmd_free;
+ pmd_alloc = sun4c_pmd_alloc;
+ pgd_free = sun4c_pgd_free;
+ pgd_alloc = sun4c_pgd_alloc;
+
+ pte_write = sun4c_pte_write;
+ pte_dirty = sun4c_pte_dirty;
+ pte_young = sun4c_pte_young;
+ pte_wrprotect = sun4c_pte_wrprotect;
+ pte_mkclean = sun4c_pte_mkclean;
+ pte_mkold = sun4c_pte_mkold;
+ pte_mkwrite = sun4c_pte_mkwrite;
+ pte_mkdirty = sun4c_pte_mkdirty;
+ pte_mkyoung = sun4c_pte_mkyoung;
+ update_mmu_cache = sun4c_update_mmu_cache;
+ mmu_exit_hook = sun4c_exit_hook;
+ mmu_flush_hook = sun4c_flush_hook;
+ mmu_lockarea = sun4c_lockarea;
+ mmu_unlockarea = sun4c_unlockarea;
+
+ mmu_get_scsi_one = sun4c_get_scsi_one;
+ mmu_get_scsi_sgl = sun4c_get_scsi_sgl;
+ mmu_release_scsi_one = sun4c_release_scsi_one;
+ mmu_release_scsi_sgl = sun4c_release_scsi_sgl;
+
+ mmu_map_dma_area = sun4c_map_dma_area;
+
+ mmu_v2p = sun4c_v2p;
+ mmu_p2v = sun4c_p2v;
+
+ /* Task struct and kernel stack allocating/freeing. */
+ alloc_kernel_stack = sun4c_alloc_kernel_stack;
+ alloc_task_struct = sun4c_alloc_task_struct;
+ free_kernel_stack = sun4c_free_kernel_stack;
+ free_task_struct = sun4c_free_task_struct;
+
+ quick_kernel_fault = sun4c_quick_kernel_fault;
+ mmu_info = sun4c_mmu_info;
+
+ /* These should _never_ get called with two level tables. */
+ pgd_set = 0;
+ pgd_page = 0;
+}
diff --git a/arch/sparc/mm/vac-flush.c b/arch/sparc/mm/vac-flush.c
deleted file mode 100644
index 796366b53..000000000
--- a/arch/sparc/mm/vac-flush.c
+++ /dev/null
@@ -1,94 +0,0 @@
-/* vac.c: Routines for flushing various amount of the Sparc VAC
- (virtual address cache).
-
- Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu)
-*/
-
-#include <asm/vac-ops.h>
-#include <asm/page.h>
-
-/* Flush all VAC entries for the current context */
-
-extern int vac_do_hw_vac_flushes, vac_size, vac_linesize;
-extern int vac_entries_per_context, vac_entries_per_segment;
-extern int vac_entries_per_page;
-
-void
-flush_vac_context()
-{
- register int entries_left, offset;
- register char* address;
-
- entries_left = vac_entries_per_context;
- address = (char *) 0;
-
- if(vac_do_hw_vac_flushes)
- {
- while(entries_left-- >=0)
- {
- hw_flush_vac_context_entry(address);
- address += PAGE_SIZE;
- }
- }
- else
- {
- offset = vac_linesize;
- while(entries_left-- >=0)
- {
- sw_flush_vac_context_entry(address);
- address += offset;
- }
- }
-}
-
-void
-flush_vac_segment(register unsigned int segment)
-{
- register int entries_left, offset;
- register char* address = (char *) 0;
-
- entries_left = vac_entries_per_segment;
- __asm__ __volatile__("sll %0, 18, %1\n\t"
- "sra %1, 0x2, %1\n\t"
- : "=r" (segment) : "0" (address));
-
- if(vac_do_hw_vac_flushes)
- {
- while(entries_left-- >=0)
- {
- hw_flush_vac_segment_entry(address);
- address += PAGE_SIZE;
- }
- }
- else
- {
- offset = vac_linesize;
- while(entries_left-- >=0)
- {
- sw_flush_vac_segment_entry(address);
- address += offset;
- }
- }
-}
-
-void
-flush_vac_page(register unsigned int addr)
-{
- register int entries_left, offset;
-
- if(vac_do_hw_vac_flushes)
- {
- hw_flush_vac_page_entry((unsigned long *) addr);
- }
- else
- {
- entries_left = vac_entries_per_page;
- offset = vac_linesize;
- while(entries_left-- >=0)
- {
- sw_flush_vac_page_entry((unsigned long *) addr);
- addr += offset;
- }
- }
-}
-
diff --git a/arch/sparc/prom/Makefile b/arch/sparc/prom/Makefile
new file mode 100644
index 000000000..9c820a006
--- /dev/null
+++ b/arch/sparc/prom/Makefile
@@ -0,0 +1,23 @@
+# $Id: Makefile,v 1.5 1995/11/25 00:59:48 davem Exp $
+# Makefile for the Sun Boot PROM interface library under
+# Linux.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definitions are now in the main makefile...
+
+OBJS = bootstr.o devmap.o devops.o init.o memory.o misc.o mp.o \
+ palloc.o ranges.o segment.o tree.o console.o printf.o
+
+all: promlib.a
+
+promlib.a: $(OBJS)
+ $(AR) rcs promlib.a $(OBJS)
+ sync
+
+dep:
+ $(CPP) -M *.c > .depend
+
+include $(TOPDIR)/Rules.make
diff --git a/arch/sparc/prom/bootstr.c b/arch/sparc/prom/bootstr.c
new file mode 100644
index 000000000..84120be11
--- /dev/null
+++ b/arch/sparc/prom/bootstr.c
@@ -0,0 +1,67 @@
+/* $Id: bootstr.c,v 1.11 1996/07/27 05:02:06 zaitcev Exp $
+ * bootstr.c: Boot string/argument acquisition from the PROM.
+ *
+ * Copyright(C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/config.h>
+#include <linux/string.h>
+#include <asm/oplib.h>
+
+#define BARG_LEN 256
+static char barg_buf[BARG_LEN];
+static char fetched = 0;
+
+char *
+prom_getbootargs(void)
+{
+ int iter;
+ char *cp, *arg;
+
+ /* This check saves us from a panic when bootfd patches args. */
+ if (fetched) {
+ return barg_buf;
+ }
+
+ switch(prom_vers) {
+ case PROM_V0:
+ cp = barg_buf;
+ /* Start from 1 and go over fd(0,0,0)kernel */
+ for(iter = 1; iter < 8; iter++) {
+ arg = (*(romvec->pv_v0bootargs))->argv[iter];
+ if(arg == 0) break;
+ while(*arg != 0) {
+ /* Leave place for space and null. */
+ if(cp >= barg_buf + BARG_LEN-2){
+ /* We might issue a warning here. */
+ break;
+ }
+ *cp++ = *arg++;
+ }
+ *cp++ = ' ';
+ }
+ *cp = 0;
+ break;
+ case PROM_V2:
+ case PROM_V3:
+ /*
+ * V3 PROM cannot supply as with more than 128 bytes
+ * of an argument. But a smart bootstrap loader can.
+ */
+ strncpy(barg_buf, *romvec->pv_v2bootargs.bootargs, BARG_LEN-1);
+ break;
+ case PROM_AP1000:
+ /*
+ * Get message from host boot process.
+ */
+#if CONFIG_AP1000
+ ap_getbootargs(barg_buf, BARG_LEN);
+#endif
+ break;
+ default:
+ break;
+ }
+
+ fetched = 1;
+ return barg_buf;
+}
diff --git a/arch/sparc/prom/console.c b/arch/sparc/prom/console.c
new file mode 100644
index 000000000..73f1799ab
--- /dev/null
+++ b/arch/sparc/prom/console.c
@@ -0,0 +1,220 @@
+/* $Id: console.c,v 1.9 1996/09/19 20:27:17 davem Exp $
+ * console.c: Routines that deal with sending and receiving IO
+ * to/from the current console device using the PROM.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/system.h>
+#include <linux/string.h>
+
+/* Non blocking get character from console input device, returns -1
+ * if no input was taken. This can be used for polling.
+ */
+int
+prom_nbgetchar(void)
+{
+ static char inc;
+ int i = -1;
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ switch(prom_vers) {
+ case PROM_V0:
+ i = (*(romvec->pv_nbgetchar))();
+ break;
+ case PROM_V2:
+ case PROM_V3:
+ case PROM_P1275:
+ if( (*(romvec->pv_v2devops).v2_dev_read)(*romvec->pv_v2bootargs.fd_stdin , &inc, 0x1) == 1) {
+ i = inc;
+ } else {
+ i = -1;
+ }
+ break;
+ case PROM_AP1000:
+ i = -1;
+ break;
+ };
+ __asm__ __volatile__("ld [%0], %%g6\n\t" : :
+ "r" (&current_set[smp_processor_id()]) :
+ "memory");
+ restore_flags(flags);
+ return i; /* Ugh, we could spin forever on unsupported proms ;( */
+}
+
+/* Non blocking put character to console device, returns -1 if
+ * unsuccessful.
+ */
+int
+prom_nbputchar(char c)
+{
+ static char outc;
+ unsigned long flags;
+ int i = -1;
+
+ save_flags(flags); cli();
+ switch(prom_vers) {
+ case PROM_V0:
+ i = (*(romvec->pv_nbputchar))(c);
+ break;
+ case PROM_V2:
+ case PROM_V3:
+ case PROM_P1275:
+ outc = c;
+ if( (*(romvec->pv_v2devops).v2_dev_write)(*romvec->pv_v2bootargs.fd_stdout, &outc, 0x1) == 1)
+ i = 0;
+ else
+ i = -1;
+ break;
+ case PROM_AP1000:
+#if CONFIG_AP1000
+ {
+ extern void ap_putchar(char );
+ ap_putchar(c);
+ i = 0;
+ }
+#else
+ i = -1;
+#endif
+
+ break;
+ };
+ __asm__ __volatile__("ld [%0], %%g6\n\t" : :
+ "r" (&current_set[smp_processor_id()]) :
+ "memory");
+ restore_flags(flags);
+ return i; /* Ugh, we could spin forever on unsupported proms ;( */
+}
+
+/* Blocking version of get character routine above. */
+char
+prom_getchar(void)
+{
+ int character;
+ while((character = prom_nbgetchar()) == -1) ;
+ return (char) character;
+}
+
+/* Blocking version of put character routine above. */
+void
+prom_putchar(char c)
+{
+ while(prom_nbputchar(c) == -1) ;
+ return;
+}
+
+/* Query for input device type */
+enum prom_input_device
+prom_query_input_device()
+{
+ unsigned long flags;
+ int st_p;
+ char propb[64];
+ char *p;
+
+ switch(prom_vers) {
+ case PROM_V0:
+ case PROM_V2:
+ default:
+ switch(*romvec->pv_stdin) {
+ case PROMDEV_KBD: return PROMDEV_IKBD;
+ case PROMDEV_TTYA: return PROMDEV_ITTYA;
+ case PROMDEV_TTYB: return PROMDEV_ITTYB;
+ default:
+ return PROMDEV_I_UNK;
+ };
+ case PROM_V3:
+ case PROM_P1275:
+ save_flags(flags); cli();
+ st_p = (*romvec->pv_v2devops.v2_inst2pkg)(*romvec->pv_v2bootargs.fd_stdin);
+ __asm__ __volatile__("ld [%0], %%g6\n\t" : :
+ "r" (&current_set[smp_processor_id()]) :
+ "memory");
+ restore_flags(flags);
+ if(prom_node_has_property(st_p, "keyboard"))
+ return PROMDEV_IKBD;
+ prom_getproperty(st_p, "device_type", propb, sizeof(propb));
+ if(strncmp(propb, "serial", sizeof("serial")))
+ return PROMDEV_I_UNK;
+ prom_getproperty(prom_root_node, "stdin-path", propb, sizeof(propb));
+ p = propb;
+ while(*p) p++; p -= 2;
+ if(p[0] == ':') {
+ if(p[1] == 'a')
+ return PROMDEV_ITTYA;
+ else if(p[1] == 'b')
+ return PROMDEV_ITTYB;
+ }
+ return PROMDEV_I_UNK;
+ case PROM_AP1000:
+ return PROMDEV_I_UNK;
+ };
+}
+
+/* Query for output device type */
+
+enum prom_output_device
+prom_query_output_device()
+{
+ unsigned long flags;
+ int st_p;
+ char propb[64];
+ char *p;
+ int propl;
+
+ switch(prom_vers) {
+ case PROM_V0:
+ switch(*romvec->pv_stdin) {
+ case PROMDEV_SCREEN: return PROMDEV_OSCREEN;
+ case PROMDEV_TTYA: return PROMDEV_OTTYA;
+ case PROMDEV_TTYB: return PROMDEV_OTTYB;
+ };
+ break;
+ case PROM_V2:
+ case PROM_V3:
+ case PROM_P1275:
+ save_flags(flags); cli();
+ st_p = (*romvec->pv_v2devops.v2_inst2pkg)(*romvec->pv_v2bootargs.fd_stdout);
+ __asm__ __volatile__("ld [%0], %%g6\n\t" : :
+ "r" (&current_set[smp_processor_id()]) :
+ "memory");
+ restore_flags(flags);
+ propl = prom_getproperty(st_p, "device_type", propb, sizeof(propb));
+ if (propl >= 0 && propl == sizeof("display") &&
+ strncmp("display", propb, sizeof("display")) == 0)
+ {
+ return PROMDEV_OSCREEN;
+ }
+ if(prom_vers == PROM_V3) {
+ if(strncmp("serial", propb, sizeof("serial")))
+ return PROMDEV_O_UNK;
+ prom_getproperty(prom_root_node, "stdout-path", propb, sizeof(propb));
+ p = propb;
+ while(*p) p++; p -= 2;
+ if(p[0]==':') {
+ if(p[1] == 'a')
+ return PROMDEV_OTTYA;
+ else if(p[1] == 'b')
+ return PROMDEV_OTTYB;
+ }
+ return PROMDEV_O_UNK;
+ } else {
+ /* This works on SS-2 (an early OpenFirmware) still. */
+ switch(*romvec->pv_stdin) {
+ case PROMDEV_TTYA: return PROMDEV_OTTYA;
+ case PROMDEV_TTYB: return PROMDEV_OTTYB;
+ };
+ }
+ break;
+ case PROM_AP1000:
+ return PROMDEV_I_UNK;
+ };
+ return PROMDEV_O_UNK;
+}
diff --git a/arch/sparc/prom/devmap.c b/arch/sparc/prom/devmap.c
new file mode 100644
index 000000000..841d63a47
--- /dev/null
+++ b/arch/sparc/prom/devmap.c
@@ -0,0 +1,56 @@
+/* $Id: devmap.c,v 1.3 1996/09/19 20:27:19 davem Exp $
+ * promdevmap.c: Map device/IO areas to virtual addresses.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+
+/* Just like the routines in palloc.c, these should not be used
+ * by the kernel at all. Bootloader facility mainly. And again,
+ * this is only available on V2 proms and above.
+ */
+
+/* Map physical device address 'paddr' in IO space 'ios' of size
+ * 'num_bytes' to a virtual address, with 'vhint' being a hint to
+ * the prom as to where you would prefer the mapping. We return
+ * where the prom actually mapped it.
+ */
+char *
+prom_mapio(char *vhint, int ios, unsigned int paddr, unsigned int num_bytes)
+{
+ unsigned long flags;
+ char *ret;
+
+ save_flags(flags); cli();
+ if((num_bytes == 0) || (paddr == 0)) ret = (char *) 0x0;
+ else
+ ret = (*(romvec->pv_v2devops.v2_dumb_mmap))(vhint, ios, paddr,
+ num_bytes);
+ __asm__ __volatile__("ld [%0], %%g6\n\t" : :
+ "r" (&current_set[smp_processor_id()]) :
+ "memory");
+ restore_flags(flags);
+ return ret;
+}
+
+/* Unmap an IO/device area that was mapped using the above routine. */
+void
+prom_unmapio(char *vaddr, unsigned int num_bytes)
+{
+ unsigned long flags;
+
+ if(num_bytes == 0x0) return;
+ save_flags(flags); cli();
+ (*(romvec->pv_v2devops.v2_dumb_munmap))(vaddr, num_bytes);
+ __asm__ __volatile__("ld [%0], %%g6\n\t" : :
+ "r" (&current_set[smp_processor_id()]) :
+ "memory");
+ restore_flags(flags);
+ return;
+}
diff --git a/arch/sparc/prom/devops.c b/arch/sparc/prom/devops.c
new file mode 100644
index 000000000..69803c31b
--- /dev/null
+++ b/arch/sparc/prom/devops.c
@@ -0,0 +1,97 @@
+/* $Id: devops.c,v 1.6 1996/10/12 12:37:38 davem Exp $
+ * devops.c: Device operations using the PROM.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+
+/* Open the device described by the string 'dstr'. Returns the handle
+ * to that device used for subsequent operations on that device.
+ * Returns -1 on failure.
+ */
+int
+prom_devopen(char *dstr)
+{
+ int handle;
+ unsigned long flags;
+ save_flags(flags); cli();
+ switch(prom_vers) {
+ case PROM_V0:
+ handle = (*(romvec->pv_v0devops.v0_devopen))(dstr);
+ if(handle == 0) handle = -1;
+ break;
+ case PROM_V2:
+ case PROM_V3:
+ case PROM_P1275:
+ handle = (*(romvec->pv_v2devops.v2_dev_open))(dstr);
+ break;
+ case PROM_AP1000:
+ default:
+ handle = -1;
+ break;
+ };
+ __asm__ __volatile__("ld [%0], %%g6\n\t" : :
+ "r" (&current_set[smp_processor_id()]) :
+ "memory");
+ restore_flags(flags);
+
+ return handle;
+}
+
+/* Close the device described by device handle 'dhandle'. */
+int
+prom_devclose(int dhandle)
+{
+ unsigned long flags;
+ save_flags(flags); cli();
+ switch(prom_vers) {
+ case PROM_V0:
+ (*(romvec->pv_v0devops.v0_devclose))(dhandle);
+ break;
+ case PROM_V2:
+ case PROM_V3:
+ case PROM_P1275:
+ (*(romvec->pv_v2devops.v2_dev_close))(dhandle);
+ break;
+ case PROM_AP1000:
+ break;
+ };
+ __asm__ __volatile__("ld [%0], %%g6\n\t" : :
+ "r" (&current_set[smp_processor_id()]) :
+ "memory");
+ restore_flags(flags);
+ return 0;
+}
+
+/* Seek to specified location described by 'seekhi' and 'seeklo'
+ * for device 'dhandle'.
+ */
+void
+prom_seek(int dhandle, unsigned int seekhi, unsigned int seeklo)
+{
+ unsigned long flags;
+ save_flags(flags); cli();
+ switch(prom_vers) {
+ case PROM_V0:
+ (*(romvec->pv_v0devops.v0_seekdev))(dhandle, seekhi, seeklo);
+ break;
+ case PROM_V2:
+ case PROM_V3:
+ case PROM_P1275:
+ (*(romvec->pv_v2devops.v2_dev_seek))(dhandle, seekhi, seeklo);
+ break;
+ case PROM_AP1000:
+ break;
+ };
+ __asm__ __volatile__("ld [%0], %%g6\n\t" : :
+ "r" (&current_set[smp_processor_id()]) :
+ "memory");
+ restore_flags(flags);
+
+ return;
+}
diff --git a/arch/sparc/prom/init.c b/arch/sparc/prom/init.c
new file mode 100644
index 000000000..4cdc064af
--- /dev/null
+++ b/arch/sparc/prom/init.c
@@ -0,0 +1,86 @@
+/* $Id: init.c,v 1.7 1996/04/04 16:31:00 tridge Exp $
+ * init.c: Initialize internal variables used by the PROM
+ * library functions.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+
+struct linux_romvec *romvec;
+enum prom_major_version prom_vers;
+unsigned int prom_rev, prom_prev;
+
+/* The root node of the prom device tree. */
+int prom_root_node;
+
+/* Pointer to the device tree operations structure. */
+struct linux_nodeops *prom_nodeops;
+
+/* You must call prom_init() before you attempt to use any of the
+ * routines in the prom library. It returns 0 on success, 1 on
+ * failure. It gets passed the pointer to the PROM vector.
+ */
+
+extern void prom_meminit(void);
+extern void prom_ranges_init(void);
+
+void
+prom_init(struct linux_romvec *rp)
+{
+ romvec = rp;
+
+#if CONFIG_AP1000
+ prom_vers = PROM_AP1000;
+ prom_meminit();
+ prom_ranges_init();
+ return;
+#endif
+ switch(romvec->pv_romvers) {
+ case 0:
+ prom_vers = PROM_V0;
+ break;
+ case 2:
+ prom_vers = PROM_V2;
+ break;
+ case 3:
+ prom_vers = PROM_V3;
+ break;
+ case 4:
+ prom_vers = PROM_P1275;
+ prom_printf("PROMLIB: Sun IEEE Prom not supported yet\n");
+ prom_halt();
+ break;
+ default:
+ prom_printf("PROMLIB: Bad PROM version %d\n",
+ romvec->pv_romvers);
+ prom_halt();
+ break;
+ };
+
+ prom_rev = romvec->pv_plugin_revision;
+ prom_prev = romvec->pv_printrev;
+ prom_nodeops = romvec->pv_nodeops;
+
+ prom_root_node = prom_getsibling(0);
+ if((prom_root_node == 0) || (prom_root_node == -1))
+ prom_halt();
+
+ if((((unsigned long) prom_nodeops) == 0) ||
+ (((unsigned long) prom_nodeops) == -1))
+ prom_halt();
+
+ prom_meminit();
+
+ prom_ranges_init();
+
+ printk("PROMLIB: Sun Boot Prom Version %d Revision %d\n",
+ romvec->pv_romvers, prom_rev);
+
+ /* Initialization successful. */
+ return;
+}
diff --git a/arch/sparc/prom/memory.c b/arch/sparc/prom/memory.c
new file mode 100644
index 000000000..7ee8e2a97
--- /dev/null
+++ b/arch/sparc/prom/memory.c
@@ -0,0 +1,215 @@
+/* $Id: memory.c,v 1.8 1996/07/12 05:14:56 tridge Exp $
+ * memory.c: Prom routine for acquiring various bits of information
+ * about RAM on the machine, both virtual and physical.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+
+/* This routine, for consistency, returns the ram parameters in the
+ * V0 prom memory descriptor format. I choose this format because I
+ * think it was the easiest to work with. I feel the religious
+ * arguments now... ;) Also, I return the linked lists sorted to
+ * prevent paging_init() upset stomach as I have not yet written
+ * the pepto-bismol kernel module yet.
+ */
+
+struct linux_prom_registers prom_reg_memlist[64];
+struct linux_prom_registers prom_reg_tmp[64];
+
+struct linux_mlist_v0 prom_phys_total[64];
+struct linux_mlist_v0 prom_prom_taken[64];
+struct linux_mlist_v0 prom_phys_avail[64];
+
+struct linux_mlist_v0 *prom_ptot_ptr = prom_phys_total;
+struct linux_mlist_v0 *prom_ptak_ptr = prom_prom_taken;
+struct linux_mlist_v0 *prom_pavl_ptr = prom_phys_avail;
+
+struct linux_mem_v0 prom_memlist;
+
+
+/* Internal Prom library routine to sort a linux_mlist_v0 memory
+ * list. Used below in initialization.
+ */
+void
+prom_sortmemlist(struct linux_mlist_v0 *thislist)
+{
+ int swapi = 0;
+ int i, mitr, tmpsize;
+ char *tmpaddr;
+ char *lowest;
+
+ for(i=0; thislist[i].theres_more != 0; i++) {
+ lowest = thislist[i].start_adr;
+ for(mitr = i+1; thislist[mitr-1].theres_more != 0; mitr++)
+ if(thislist[mitr].start_adr < lowest) {
+ lowest = thislist[mitr].start_adr;
+ swapi = mitr;
+ }
+ if(lowest == thislist[i].start_adr) continue;
+ tmpaddr = thislist[swapi].start_adr;
+ tmpsize = thislist[swapi].num_bytes;
+ for(mitr = swapi; mitr > i; mitr--) {
+ thislist[mitr].start_adr = thislist[mitr-1].start_adr;
+ thislist[mitr].num_bytes = thislist[mitr-1].num_bytes;
+ }
+ thislist[i].start_adr = tmpaddr;
+ thislist[i].num_bytes = tmpsize;
+ }
+
+ return;
+}
+
+/* Initialize the memory lists based upon the prom version. */
+void
+prom_meminit(void)
+{
+ int node = 0;
+ unsigned int iter, num_regs;
+ struct linux_mlist_v0 *mptr; /* ptr for traversal */
+
+ switch(prom_vers) {
+ case PROM_V0:
+ /* Nice, kind of easier to do in this case. */
+ /* First, the total physical descriptors. */
+ for(mptr = (*(romvec->pv_v0mem.v0_totphys)), iter=0;
+ mptr; mptr=mptr->theres_more, iter++) {
+ prom_phys_total[iter].start_adr = mptr->start_adr;
+ prom_phys_total[iter].num_bytes = mptr->num_bytes;
+ prom_phys_total[iter].theres_more = &prom_phys_total[iter+1];
+ }
+ prom_phys_total[iter-1].theres_more = 0x0;
+ /* Second, the total prom taken descriptors. */
+ for(mptr = (*(romvec->pv_v0mem.v0_prommap)), iter=0;
+ mptr; mptr=mptr->theres_more, iter++) {
+ prom_prom_taken[iter].start_adr = mptr->start_adr;
+ prom_prom_taken[iter].num_bytes = mptr->num_bytes;
+ prom_prom_taken[iter].theres_more = &prom_prom_taken[iter+1];
+ }
+ prom_prom_taken[iter-1].theres_more = 0x0;
+ /* Last, the available physical descriptors. */
+ for(mptr = (*(romvec->pv_v0mem.v0_available)), iter=0;
+ mptr; mptr=mptr->theres_more, iter++) {
+ prom_phys_avail[iter].start_adr = mptr->start_adr;
+ prom_phys_avail[iter].num_bytes = mptr->num_bytes;
+ prom_phys_avail[iter].theres_more = &prom_phys_avail[iter+1];
+ }
+ prom_phys_avail[iter-1].theres_more = 0x0;
+ /* Sort all the lists. */
+ prom_sortmemlist(prom_phys_total);
+ prom_sortmemlist(prom_prom_taken);
+ prom_sortmemlist(prom_phys_avail);
+ break;
+ case PROM_V2:
+ case PROM_V3:
+ case PROM_P1275:
+ /* Grrr, have to traverse the prom device tree ;( */
+ node = prom_getchild(prom_root_node);
+ node = prom_searchsiblings(node, "memory");
+ num_regs = prom_getproperty(node, "available",
+ (char *) prom_reg_memlist,
+ sizeof(prom_reg_memlist));
+ num_regs = (num_regs/sizeof(struct linux_prom_registers));
+ for(iter=0; iter<num_regs; iter++) {
+ prom_phys_avail[iter].start_adr =
+ prom_reg_memlist[iter].phys_addr;
+ prom_phys_avail[iter].num_bytes =
+ (unsigned long) prom_reg_memlist[iter].reg_size;
+ prom_phys_avail[iter].theres_more =
+ &prom_phys_avail[iter+1];
+ }
+ prom_phys_avail[iter-1].theres_more = 0x0;
+
+ num_regs = prom_getproperty(node, "reg",
+ (char *) prom_reg_memlist,
+ sizeof(prom_reg_memlist));
+ num_regs = (num_regs/sizeof(struct linux_prom_registers));
+ for(iter=0; iter<num_regs; iter++) {
+ prom_phys_total[iter].start_adr =
+ prom_reg_memlist[iter].phys_addr;
+ prom_phys_total[iter].num_bytes =
+ (unsigned long) prom_reg_memlist[iter].reg_size;
+ prom_phys_total[iter].theres_more =
+ &prom_phys_total[iter+1];
+ }
+ prom_phys_total[iter-1].theres_more = 0x0;
+
+ node = prom_getchild(prom_root_node);
+ node = prom_searchsiblings(node, "virtual-memory");
+ num_regs = prom_getproperty(node, "available",
+ (char *) prom_reg_memlist,
+ sizeof(prom_reg_memlist));
+ num_regs = (num_regs/sizeof(struct linux_prom_registers));
+
+ /* Convert available virtual areas to taken virtual
+ * areas. First sort, then convert.
+ */
+ for(iter=0; iter<num_regs; iter++) {
+ prom_prom_taken[iter].start_adr =
+ prom_reg_memlist[iter].phys_addr;
+ prom_prom_taken[iter].num_bytes =
+ (unsigned long) prom_reg_memlist[iter].reg_size;
+ prom_prom_taken[iter].theres_more =
+ &prom_phys_total[iter+1];
+ }
+ prom_prom_taken[iter-1].theres_more = 0x0;
+
+ prom_sortmemlist(prom_prom_taken);
+
+ /* Finally, convert. */
+ for(iter=0; iter<num_regs; iter++) {
+ prom_prom_taken[iter].start_adr =
+ prom_prom_taken[iter].start_adr +
+ prom_prom_taken[iter].num_bytes;
+ prom_prom_taken[iter].num_bytes =
+ prom_prom_taken[iter+1].start_adr -
+ prom_prom_taken[iter].start_adr;
+ }
+ prom_prom_taken[iter-1].num_bytes =
+ 0xffffffff - (unsigned long) prom_prom_taken[iter-1].start_adr;
+
+ /* Sort the other two lists. */
+ prom_sortmemlist(prom_phys_total);
+ prom_sortmemlist(prom_phys_avail);
+ break;
+
+ case PROM_AP1000:
+#if CONFIG_AP1000
+ /* really simple memory map */
+ prom_phys_total[0].start_adr = 0x00000000;
+ prom_phys_total[0].num_bytes = ap_memory_size();
+ prom_phys_total[0].theres_more = 0x0;
+ prom_prom_taken[0].start_adr = 0x00000000;
+ prom_prom_taken[0].num_bytes = 0x00000000;
+ prom_prom_taken[0].theres_more = 0x0;
+ prom_phys_avail[0].start_adr = 0x00000000;
+ prom_phys_avail[0].num_bytes = prom_phys_total[0].num_bytes;
+ prom_phys_avail[0].theres_more = 0x0;
+ prom_sortmemlist(prom_phys_total);
+ prom_sortmemlist(prom_prom_taken);
+ prom_sortmemlist(prom_phys_avail);
+#endif
+ break;
+ };
+
+ /* Link all the lists into the top-level descriptor. */
+ prom_memlist.v0_totphys=&prom_ptot_ptr;
+ prom_memlist.v0_prommap=&prom_ptak_ptr;
+ prom_memlist.v0_available=&prom_pavl_ptr;
+
+ return;
+}
+
+/* This returns a pointer to our libraries internal v0 format
+ * memory descriptor.
+ */
+struct linux_mem_v0 *
+prom_meminfo(void)
+{
+ return &prom_memlist;
+}
diff --git a/arch/sparc/prom/misc.c b/arch/sparc/prom/misc.c
new file mode 100644
index 000000000..25f1028ac
--- /dev/null
+++ b/arch/sparc/prom/misc.c
@@ -0,0 +1,154 @@
+/* $Id: misc.c,v 1.11 1996/10/12 13:12:58 davem Exp $
+ * misc.c: Miscellaneous prom functions that don't belong
+ * anywhere else.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/auxio.h>
+
+/* Reset and reboot the machine with the command 'bcommand'. */
+void
+prom_reboot(char *bcommand)
+{
+ unsigned long flags;
+ save_flags(flags); cli();
+ (*(romvec->pv_reboot))(bcommand);
+ /* Never get here. */
+ __asm__ __volatile__("ld [%0], %%g6\n\t" : :
+ "r" (&current_set[smp_processor_id()]) :
+ "memory");
+ restore_flags(flags);
+}
+
+/* Forth evaluate the expression contained in 'fstring'. */
+void
+prom_feval(char *fstring)
+{
+ unsigned long flags;
+ if(!fstring || fstring[0] == 0)
+ return;
+ save_flags(flags); cli();
+ if(prom_vers == PROM_V0)
+ (*(romvec->pv_fortheval.v0_eval))(strlen(fstring), fstring);
+ else
+ (*(romvec->pv_fortheval.v2_eval))(fstring);
+ __asm__ __volatile__("ld [%0], %%g6\n\t" : :
+ "r" (&current_set[smp_processor_id()]) :
+ "memory");
+ restore_flags(flags);
+}
+
+/* We want to do this more nicely some day. */
+#ifdef CONFIG_SUN_CONSOLE
+extern void console_restore_palette(void);
+extern void set_palette(void);
+extern int serial_console;
+#endif
+
+/* Drop into the prom, with the chance to continue with the 'go'
+ * prom command.
+ */
+void
+prom_cmdline(void)
+{
+ extern void kernel_enter_debugger(void);
+ extern void install_obp_ticker(void);
+ extern void install_linux_ticker(void);
+ unsigned long flags;
+
+ kernel_enter_debugger();
+#ifdef CONFIG_SUN_CONSOLE
+ if(!serial_console)
+ console_restore_palette ();
+#endif
+ install_obp_ticker();
+ save_flags(flags); cli();
+ (*(romvec->pv_abort))();
+ __asm__ __volatile__("ld [%0], %%g6\n\t" : :
+ "r" (&current_set[smp_processor_id()]) :
+ "memory");
+ restore_flags(flags);
+ install_linux_ticker();
+#ifdef CONFIG_SUN_AUXIO
+ TURN_ON_LED;
+#endif
+#ifdef CONFIG_SUN_CONSOLE
+ if(!serial_console)
+ set_palette ();
+#endif
+}
+
+/* Drop into the prom, but completely terminate the program.
+ * No chance of continuing.
+ */
+void
+prom_halt(void)
+{
+ unsigned long flags;
+ save_flags(flags); cli();
+ (*(romvec->pv_halt))();
+ /* Never get here. */
+ __asm__ __volatile__("ld [%0], %%g6\n\t" : :
+ "r" (&current_set[smp_processor_id()]) :
+ "memory");
+ restore_flags(flags);
+}
+
+typedef void (*sfunc_t)(void);
+
+/* Set prom sync handler to call function 'funcp'. */
+void
+prom_setsync(sfunc_t funcp)
+{
+#ifdef CONFIG_AP1000
+ printk("not doing setsync\n");
+ return;
+#endif
+ if(!funcp) return;
+ *romvec->pv_synchook = funcp;
+}
+
+/* Get the idprom and stuff it into buffer 'idbuf'. Returns the
+ * format type. 'num_bytes' is the number of bytes that your idbuf
+ * has space for. Returns 0xff on error.
+ */
+unsigned char
+prom_get_idprom(char *idbuf, int num_bytes)
+{
+ int len;
+
+ len = prom_getproplen(prom_root_node, "idprom");
+ if((len>num_bytes) || (len==-1)) return 0xff;
+ if(!prom_getproperty(prom_root_node, "idprom", idbuf, num_bytes))
+ return idbuf[0];
+
+ return 0xff;
+}
+
+/* Get the major prom version number. */
+int
+prom_version(void)
+{
+ return romvec->pv_romvers;
+}
+
+/* Get the prom plugin-revision. */
+int
+prom_getrev(void)
+{
+ return prom_rev;
+}
+
+/* Get the prom firmware print revision. */
+int
+prom_getprev(void)
+{
+ return prom_prev;
+}
diff --git a/arch/sparc/prom/mp.c b/arch/sparc/prom/mp.c
new file mode 100644
index 000000000..f046db55a
--- /dev/null
+++ b/arch/sparc/prom/mp.c
@@ -0,0 +1,135 @@
+/* $Id: mp.c,v 1.6 1996/09/19 20:27:25 davem Exp $
+ * mp.c: OpenBoot Prom Multiprocessor support routines. Don't call
+ * these on a UP or else you will halt and catch fire. ;)
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+
+/* Start cpu with prom-tree node 'cpunode' using context described
+ * by 'ctable_reg' in context 'ctx' at program counter 'pc'.
+ *
+ * XXX Have to look into what the return values mean. XXX
+ */
+int
+prom_startcpu(int cpunode, struct linux_prom_registers *ctable_reg, int ctx, char *pc)
+{
+ int ret;
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ switch(prom_vers) {
+ case PROM_V0:
+ case PROM_V2:
+ case PROM_AP1000:
+ default:
+ ret = -1;
+ break;
+ case PROM_V3:
+ case PROM_P1275:
+ ret = (*(romvec->v3_cpustart))(cpunode, (int) ctable_reg, ctx, pc);
+ break;
+ };
+ __asm__ __volatile__("ld [%0], %%g6\n\t" : :
+ "r" (&current_set[smp_processor_id()]) :
+ "memory");
+ restore_flags(flags);
+
+ return ret;
+}
+
+/* Stop CPU with device prom-tree node 'cpunode'.
+ * XXX Again, what does the return value really mean? XXX
+ */
+int
+prom_stopcpu(int cpunode)
+{
+ int ret;
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ switch(prom_vers) {
+ case PROM_V0:
+ case PROM_V2:
+ case PROM_AP1000:
+ default:
+ ret = -1;
+ break;
+ case PROM_V3:
+ case PROM_P1275:
+ ret = (*(romvec->v3_cpustop))(cpunode);
+ break;
+ };
+ __asm__ __volatile__("ld [%0], %%g6\n\t" : :
+ "r" (&current_set[smp_processor_id()]) :
+ "memory");
+ restore_flags(flags);
+
+ return ret;
+}
+
+/* Make CPU with device prom-tree node 'cpunode' idle.
+ * XXX Return value, anyone? XXX
+ */
+int
+prom_idlecpu(int cpunode)
+{
+ int ret;
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ switch(prom_vers) {
+ case PROM_V0:
+ case PROM_V2:
+ case PROM_AP1000:
+ default:
+ ret = -1;
+ break;
+ case PROM_V3:
+ case PROM_P1275:
+ ret = (*(romvec->v3_cpuidle))(cpunode);
+ break;
+ };
+ __asm__ __volatile__("ld [%0], %%g6\n\t" : :
+ "r" (&current_set[smp_processor_id()]) :
+ "memory");
+ restore_flags(flags);
+
+ return ret;
+}
+
+/* Resume the execution of CPU with nodeid 'cpunode'.
+ * XXX Come on, somebody has to know... XXX
+ */
+int
+prom_restartcpu(int cpunode)
+{
+ int ret;
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ switch(prom_vers) {
+ case PROM_V0:
+ case PROM_V2:
+ case PROM_AP1000:
+ default:
+ ret = -1;
+ break;
+ case PROM_V3:
+ case PROM_P1275:
+ ret = (*(romvec->v3_cpuresume))(cpunode);
+ break;
+ };
+ __asm__ __volatile__("ld [%0], %%g6\n\t" : :
+ "r" (&current_set[smp_processor_id()]) :
+ "memory");
+ restore_flags(flags);
+
+ return ret;
+}
diff --git a/arch/sparc/prom/palloc.c b/arch/sparc/prom/palloc.c
new file mode 100644
index 000000000..84ce8bc54
--- /dev/null
+++ b/arch/sparc/prom/palloc.c
@@ -0,0 +1,44 @@
+/* $Id: palloc.c,v 1.4 1996/04/25 06:09:48 davem Exp $
+ * palloc.c: Memory allocation from the Sun PROM.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+
+/* You should not call these routines after memory management
+ * has been initialized in the kernel, if fact you should not
+ * use these if at all possible in the kernel. They are mainly
+ * to be used for a bootloader for temporary allocations which
+ * it will free before jumping into the kernel it has loaded.
+ *
+ * Also, these routines don't work on V0 proms, only V2 and later.
+ */
+
+/* Allocate a chunk of memory of size 'num_bytes' giving a suggestion
+ * of virtual_hint as the preferred virtual base address of this chunk.
+ * There are no guarantees that you will get the allocation, or that
+ * the prom will abide by your "hint". So check your return value.
+ */
+char *
+prom_alloc(char *virtual_hint, unsigned int num_bytes)
+{
+ if(prom_vers == PROM_V0) return (char *) 0x0;
+ if(num_bytes == 0x0) return (char *) 0x0;
+ return (*(romvec->pv_v2devops.v2_dumb_mem_alloc))(virtual_hint, num_bytes);
+}
+
+/* Free a previously allocated chunk back to the prom at virtual address
+ * 'vaddr' of size 'num_bytes'. NOTE: This vaddr is not the hint you
+ * used for the allocation, but the virtual address the prom actually
+ * returned to you. They may be have been the same, they may have not,
+ * doesn't matter.
+ */
+void
+prom_free(char *vaddr, unsigned int num_bytes)
+{
+ if((prom_vers == PROM_V0) || (num_bytes == 0x0)) return;
+ (*(romvec->pv_v2devops.v2_dumb_mem_free))(vaddr, num_bytes);
+ return;
+}
diff --git a/arch/sparc/prom/printf.c b/arch/sparc/prom/printf.c
new file mode 100644
index 000000000..72a6cd6cc
--- /dev/null
+++ b/arch/sparc/prom/printf.c
@@ -0,0 +1,43 @@
+/* $Id: printf.c,v 1.5 1996/04/04 16:31:07 tridge Exp $
+ * printf.c: Internal prom library printf facility.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+/* This routine is internal to the prom library, no one else should know
+ * about or use it! It's simple and smelly anyway....
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+
+static char ppbuf[1024];
+
+void
+prom_printf(char *fmt, ...)
+{
+ va_list args;
+ char ch, *bptr;
+ int i;
+
+ va_start(args, fmt);
+ i = vsprintf(ppbuf, fmt, args);
+
+ bptr = ppbuf;
+
+#if CONFIG_AP1000
+ ap_write(1,bptr,strlen(bptr));
+#else
+ while((ch = *(bptr++)) != 0) {
+ if(ch == '\n')
+ prom_putchar('\r');
+
+ prom_putchar(ch);
+ }
+#endif
+ va_end(args);
+ return;
+}
diff --git a/arch/sparc/prom/ranges.c b/arch/sparc/prom/ranges.c
new file mode 100644
index 000000000..8c7518578
--- /dev/null
+++ b/arch/sparc/prom/ranges.c
@@ -0,0 +1,145 @@
+/* $Id: ranges.c,v 1.6 1996/11/03 08:12:01 davem Exp $
+ * ranges.c: Handle ranges in newer proms for obio/sbus.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/sbus.h>
+#include <asm/system.h>
+
+struct linux_prom_ranges promlib_obio_ranges[PROMREG_MAX];
+int num_obio_ranges;
+
+/* Adjust register values based upon the ranges parameters. */
+void
+prom_adjust_regs(struct linux_prom_registers *regp, int nregs,
+ struct linux_prom_ranges *rangep, int nranges)
+{
+ int regc, rngc;
+
+ for(regc=0; regc < nregs; regc++) {
+ for(rngc=0; rngc < nranges; rngc++)
+ if(regp[regc].which_io == rangep[rngc].ot_child_space)
+ break; /* Fount it */
+ if(rngc==nranges) /* oops */
+ prom_printf("adjust_regs: Could not find range with matching bus type...\n");
+ regp[regc].which_io = rangep[rngc].ot_parent_space;
+ regp[regc].phys_addr += rangep[rngc].ot_parent_base;
+ }
+}
+
+void
+prom_adjust_ranges(struct linux_prom_ranges *ranges1, int nranges1,
+ struct linux_prom_ranges *ranges2, int nranges2)
+{
+ int rng1c, rng2c;
+
+ for(rng1c=0; rng1c < nranges1; rng1c++) {
+ for(rng2c=0; rng2c < nranges2; rng2c++)
+ if(ranges1[rng1c].ot_child_space ==
+ ranges2[rng2c].ot_child_space) break;
+ if(rng2c == nranges2) /* oops */
+ prom_printf("adjust_ranges: Could not find matching bus type...\n");
+ ranges1[rng1c].ot_parent_space = ranges2[rng2c].ot_parent_space;
+ ranges1[rng1c].ot_parent_base += ranges2[rng2c].ot_parent_base;
+ }
+}
+
+/* Apply probed obio ranges to registers passed, if no ranges return. */
+void
+prom_apply_obio_ranges(struct linux_prom_registers *regs, int nregs)
+{
+ if(num_obio_ranges)
+ prom_adjust_regs(regs, nregs, promlib_obio_ranges, num_obio_ranges);
+}
+
+/* Apply probed sbus ranges to registers passed, if no ranges return. */
+void
+prom_apply_sbus_ranges(struct linux_sbus *sbus, struct linux_prom_registers *regs, int nregs)
+{
+ if(sbus->num_sbus_ranges)
+ prom_adjust_regs(regs, nregs, sbus->sbus_ranges, sbus->num_sbus_ranges);
+}
+
+void
+prom_ranges_init(void)
+{
+ int node, obio_node;
+ int success;
+
+ num_obio_ranges = 0;
+
+ /* Check for obio and sbus ranges. */
+ node = prom_getchild(prom_root_node);
+ obio_node = prom_searchsiblings(node, "obio");
+
+ if(obio_node) {
+ success = prom_getproperty(obio_node, "ranges",
+ (char *) promlib_obio_ranges,
+ sizeof(promlib_obio_ranges));
+ if(success != -1)
+ num_obio_ranges = (success/sizeof(struct linux_prom_ranges));
+ }
+
+ if(num_obio_ranges)
+ prom_printf("PROMLIB: obio_ranges %d\n", num_obio_ranges);
+
+ return;
+}
+
+void
+prom_sbus_ranges_init(int parentnd, struct linux_sbus *sbus)
+{
+ int success;
+
+ sbus->num_sbus_ranges = 0;
+ if(sparc_cpu_model == sun4c)
+ return;
+ success = prom_getproperty(sbus->prom_node, "ranges",
+ (char *) sbus->sbus_ranges,
+ sizeof (sbus->sbus_ranges));
+ if (success != -1)
+ sbus->num_sbus_ranges = (success/sizeof(struct linux_prom_ranges));
+ if (sparc_cpu_model == sun4d) {
+ struct linux_prom_ranges iounit_ranges[PROMREG_MAX];
+ int num_iounit_ranges;
+
+ success = prom_getproperty(parentnd, "ranges",
+ (char *) iounit_ranges,
+ sizeof (iounit_ranges));
+ if (success != -1) {
+ num_iounit_ranges = (success/sizeof(struct linux_prom_ranges));
+ prom_adjust_ranges (sbus->sbus_ranges, sbus->num_sbus_ranges, iounit_ranges, num_iounit_ranges);
+ }
+ }
+}
+
+void
+prom_apply_generic_ranges (int node, int parent, struct linux_prom_registers *regs, int nregs)
+{
+ int success;
+ int num_ranges;
+ struct linux_prom_ranges ranges[PROMREG_MAX];
+
+ success = prom_getproperty(node, "ranges",
+ (char *) ranges,
+ sizeof (ranges));
+ if (success != -1) {
+ num_ranges = (success/sizeof(struct linux_prom_ranges));
+ if (parent) {
+ struct linux_prom_ranges parent_ranges[PROMREG_MAX];
+ int num_parent_ranges;
+
+ success = prom_getproperty(parent, "ranges",
+ (char *) parent_ranges,
+ sizeof (parent_ranges));
+ if (success != -1) {
+ num_parent_ranges = (success/sizeof(struct linux_prom_ranges));
+ prom_adjust_ranges (ranges, num_ranges, parent_ranges, num_parent_ranges);
+ }
+ }
+ prom_adjust_regs(regs, nregs, ranges, num_ranges);
+ }
+}
diff --git a/arch/sparc/prom/segment.c b/arch/sparc/prom/segment.c
new file mode 100644
index 000000000..cb1def0e1
--- /dev/null
+++ b/arch/sparc/prom/segment.c
@@ -0,0 +1,29 @@
+/* $Id: segment.c,v 1.3 1996/09/19 20:27:28 davem Exp $
+ * segment.c: Prom routine to map segments in other contexts before
+ * a standalone is completely mapped. This is for sun4 and
+ * sun4c architectures only.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+
+/* Set physical segment 'segment' at virtual address 'vaddr' in
+ * context 'ctx'.
+ */
+void
+prom_putsegment(int ctx, unsigned long vaddr, int segment)
+{
+ unsigned long flags;
+ save_flags(flags); cli();
+ (*(romvec->pv_setctxt))(ctx, (char *) vaddr, segment);
+ __asm__ __volatile__("ld [%0], %%g6\n\t" : :
+ "r" (&current_set[smp_processor_id()]) :
+ "memory");
+ restore_flags(flags);
+ return;
+}
diff --git a/arch/sparc/prom/tree.c b/arch/sparc/prom/tree.c
new file mode 100644
index 000000000..4afe9908b
--- /dev/null
+++ b/arch/sparc/prom/tree.c
@@ -0,0 +1,368 @@
+/* $Id: tree.c,v 1.12 1996/10/12 12:37:40 davem Exp $
+ * tree.c: Basic device tree traversal/scanning for the Linux
+ * prom library.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/config.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+
+static char promlib_buf[128];
+
+/* Return the child of node 'node' or zero if no this node has no
+ * direct descendent.
+ */
+int
+prom_getchild(int node)
+{
+ int cnode, ret;
+ unsigned long flags;
+
+ save_flags(flags); cli();
+
+#if CONFIG_AP1000
+ printk("prom_getchild -> 0\n");
+ restore_flags(flags);
+ return 0;
+#else
+ if(node == -1) {
+ ret = 0;
+ } else {
+ cnode = prom_nodeops->no_child(node);
+ if((cnode == 0) || (cnode == -1))
+ ret = 0;
+ else
+ ret = cnode;
+ }
+ __asm__ __volatile__("ld [%0], %%g6\n\t" : :
+ "r" (&current_set[smp_processor_id()]) :
+ "memory");
+ restore_flags(flags);
+ return ret;
+#endif
+}
+
+/* Return the next sibling of node 'node' or zero if no more siblings
+ * at this level of depth in the tree.
+ */
+int
+prom_getsibling(int node)
+{
+ int sibnode, ret;
+ unsigned long flags;
+
+ save_flags(flags); cli();
+
+#if CONFIG_AP1000
+ printk("prom_getsibling -> 0\n");
+ restore_flags(flags);
+ return 0;
+#else
+ if(node == -1) {
+ ret = 0;
+ } else {
+ sibnode = prom_nodeops->no_nextnode(node);
+ if((sibnode == 0) || (sibnode == -1))
+ ret = 0;
+ else
+ ret = sibnode;
+ }
+ __asm__ __volatile__("ld [%0], %%g6\n\t" : :
+ "r" (&current_set[smp_processor_id()]) :
+ "memory");
+ restore_flags(flags);
+ return ret;
+#endif
+}
+
+/* Return the length in bytes of property 'prop' at node 'node'.
+ * Return -1 on error.
+ */
+int
+prom_getproplen(int node, char *prop)
+{
+ int ret;
+ unsigned long flags;
+
+ save_flags(flags); cli();
+
+#if CONFIG_AP1000
+ printk("prom_getproplen(%s) -> -1\n",prop);
+ restore_flags(flags);
+ return -1;
+#endif
+ if((!node) || (!prop))
+ ret = -1;
+ else
+ ret = prom_nodeops->no_proplen(node, prop);
+ __asm__ __volatile__("ld [%0], %%g6\n\t" : :
+ "r" (&current_set[smp_processor_id()]) :
+ "memory");
+ restore_flags(flags);
+ return ret;
+}
+
+/* Acquire a property 'prop' at node 'node' and place it in
+ * 'buffer' which has a size of 'bufsize'. If the acquisition
+ * was successful the length will be returned, else -1 is returned.
+ */
+int
+prom_getproperty(int node, char *prop, char *buffer, int bufsize)
+{
+ int plen, ret;
+ unsigned long flags;
+
+ save_flags(flags); cli();
+
+#if CONFIG_AP1000
+ printk("prom_getproperty(%s) -> -1\n",prop);
+ restore_flags(flags);
+ return -1;
+#endif
+ plen = prom_getproplen(node, prop);
+ if((plen > bufsize) || (plen == 0) || (plen == -1))
+ ret = -1;
+ else {
+ /* Ok, things seem all right. */
+ ret = prom_nodeops->no_getprop(node, prop, buffer);
+ }
+ __asm__ __volatile__("ld [%0], %%g6\n\t" : :
+ "r" (&current_set[smp_processor_id()]) :
+ "memory");
+ restore_flags(flags);
+ return ret;
+}
+
+/* Acquire an integer property and return its value. Returns -1
+ * on failure.
+ */
+int
+prom_getint(int node, char *prop)
+{
+ static int intprop;
+
+#if CONFIG_AP1000
+ printk("prom_getint(%s) -> -1\n",prop);
+ return -1;
+#endif
+ if(prom_getproperty(node, prop, (char *) &intprop, sizeof(int)) != -1)
+ return intprop;
+
+ return -1;
+}
+
+/* Acquire an integer property, upon error return the passed default
+ * integer.
+ */
+
+int
+prom_getintdefault(int node, char *property, int deflt)
+{
+ int retval;
+
+#if CONFIG_AP1000
+ printk("prom_getintdefault(%s) -> 0\n",property);
+ return 0;
+#endif
+ retval = prom_getint(node, property);
+ if(retval == -1) return deflt;
+
+ return retval;
+}
+
+/* Acquire a boolean property, 1=TRUE 0=FALSE. */
+int
+prom_getbool(int node, char *prop)
+{
+ int retval;
+
+#if CONFIG_AP1000
+ printk("prom_getbool(%s) -> 0\n",prop);
+ return 0;
+#endif
+ retval = prom_getproplen(node, prop);
+ if(retval == -1) return 0;
+ return 1;
+}
+
+/* Acquire a property whose value is a string, returns a null
+ * string on error. The char pointer is the user supplied string
+ * buffer.
+ */
+void
+prom_getstring(int node, char *prop, char *user_buf, int ubuf_size)
+{
+ int len;
+
+#if CONFIG_AP1000
+ printk("prom_getstring(%s) -> .\n",prop);
+ return;
+#endif
+ len = prom_getproperty(node, prop, user_buf, ubuf_size);
+ if(len != -1) return;
+ user_buf[0] = 0;
+ return;
+}
+
+
+/* Does the device at node 'node' have name 'name'?
+ * YES = 1 NO = 0
+ */
+int
+prom_nodematch(int node, char *name)
+{
+ static char namebuf[128];
+ prom_getproperty(node, "name", namebuf, sizeof(namebuf));
+ if(strcmp(namebuf, name) == 0) return 1;
+ return 0;
+}
+
+/* Search siblings at 'node_start' for a node with name
+ * 'nodename'. Return node if successful, zero if not.
+ */
+int
+prom_searchsiblings(int node_start, char *nodename)
+{
+
+ int thisnode, error;
+
+ for(thisnode = node_start; thisnode;
+ thisnode=prom_getsibling(thisnode)) {
+ error = prom_getproperty(thisnode, "name", promlib_buf,
+ sizeof(promlib_buf));
+ /* Should this ever happen? */
+ if(error == -1) continue;
+ if(strcmp(nodename, promlib_buf)==0) return thisnode;
+ }
+
+ return 0;
+}
+
+/* Gets name in the form prom v2+ uses it (name@x,yyyyy or name (if no reg)) */
+int
+prom_getname (int node, char *buffer, int len)
+{
+ int i;
+ struct linux_prom_registers reg[PROMREG_MAX];
+
+ i = prom_getproperty (node, "name", buffer, len);
+ if (i <= 0) return -1;
+ buffer [i] = 0;
+ len -= i;
+ i = prom_getproperty (node, "reg", (char *)reg, sizeof (reg));
+ if (i <= 0) return 0;
+ if (len < 11) return -1;
+ buffer = strchr (buffer, 0);
+ sprintf (buffer, "@%x,%x", reg[0].which_io, (uint)reg[0].phys_addr);
+ return 0;
+}
+
+/* Return the first property type for node 'node'.
+ */
+char *
+prom_firstprop(int node)
+{
+ unsigned long flags;
+ char *ret;
+
+ if(node == -1) return "";
+ save_flags(flags); cli();
+ ret = prom_nodeops->no_nextprop(node, (char *) 0x0);
+ __asm__ __volatile__("ld [%0], %%g6\n\t" : :
+ "r" (&current_set[smp_processor_id()]) :
+ "memory");
+ restore_flags(flags);
+ return ret;
+}
+
+/* Return the property type string after property type 'oprop'
+ * at node 'node' . Returns NULL string if no more
+ * property types for this node.
+ */
+char *
+prom_nextprop(int node, char *oprop)
+{
+ char *ret;
+ unsigned long flags;
+
+ if(node == -1) return "";
+ save_flags(flags); cli();
+ ret = prom_nodeops->no_nextprop(node, oprop);
+ __asm__ __volatile__("ld [%0], %%g6\n\t" : :
+ "r" (&current_set[smp_processor_id()]) :
+ "memory");
+ restore_flags(flags);
+ return ret;
+}
+
+int
+prom_node_has_property(int node, char *prop)
+{
+ char *current_property = "";
+
+ do {
+ current_property = prom_nextprop(node, current_property);
+ if(!strcmp(current_property, prop))
+ return 1;
+ } while (*current_property);
+ return 0;
+}
+
+/* Set property 'pname' at node 'node' to value 'value' which has a length
+ * of 'size' bytes. Return the number of bytes the prom accepted.
+ */
+int
+prom_setprop(int node, char *pname, char *value, int size)
+{
+ unsigned long flags;
+ int ret;
+
+ if(size == 0) return 0;
+ if((pname == 0) || (value == 0)) return 0;
+ save_flags(flags); cli();
+ ret = prom_nodeops->no_setprop(node, pname, value, size);
+ __asm__ __volatile__("ld [%0], %%g6\n\t" : :
+ "r" (&current_set[smp_processor_id()]) :
+ "memory");
+ restore_flags(flags);
+ return ret;
+}
+
+int
+prom_inst2pkg(int inst)
+{
+ int node;
+ unsigned long flags;
+
+ save_flags(flags); cli();
+ node = (*romvec->pv_v2devops.v2_inst2pkg)(inst);
+ __asm__ __volatile__("ld [%0], %%g6\n\t" : :
+ "r" (&current_set[smp_processor_id()]) :
+ "memory");
+ restore_flags(flags);
+ if (node == -1) return 0;
+ return node;
+}
+
+/* Return 'node' assigned to a particular prom 'path'
+ * FIXME: Should work for v0 as well
+ */
+int
+prom_pathtoinode(char *path)
+{
+ int node, inst;
+
+ inst = prom_devopen (path);
+ if (inst == -1) return 0;
+ node = prom_inst2pkg (inst);
+ prom_devclose (inst);
+ if (node == -1) return 0;
+ return node;
+}