summaryrefslogtreecommitdiffstats
path: root/arch/sparc64
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1997-04-29 21:13:14 +0000
committer <ralf@linux-mips.org>1997-04-29 21:13:14 +0000
commit19c9bba94152148523ba0f7ef7cffe3d45656b11 (patch)
tree40b1cb534496a7f1ca0f5c314a523c69f1fee464 /arch/sparc64
parent7206675c40394c78a90e74812bbdbf8cf3cca1be (diff)
Import of Linux/MIPS 2.1.36
Diffstat (limited to 'arch/sparc64')
-rw-r--r--arch/sparc64/Makefile51
-rw-r--r--arch/sparc64/config.in157
-rw-r--r--arch/sparc64/defconfig162
-rw-r--r--arch/sparc64/kernel/Makefile55
-rw-r--r--arch/sparc64/kernel/auxio.c42
-rw-r--r--arch/sparc64/kernel/check_asm.sh3
-rw-r--r--arch/sparc64/kernel/cpu.c90
-rw-r--r--arch/sparc64/kernel/devices.c67
-rw-r--r--arch/sparc64/kernel/dtlb_miss.S80
-rw-r--r--arch/sparc64/kernel/dtlb_prot.S56
-rw-r--r--arch/sparc64/kernel/entry.S280
-rw-r--r--arch/sparc64/kernel/etrap.S114
-rw-r--r--arch/sparc64/kernel/hack.S214
-rw-r--r--arch/sparc64/kernel/head.S373
-rw-r--r--arch/sparc64/kernel/idprom.c49
-rw-r--r--arch/sparc64/kernel/ioport.c139
-rw-r--r--arch/sparc64/kernel/irq.c638
-rw-r--r--arch/sparc64/kernel/itlb_miss.S49
-rw-r--r--arch/sparc64/kernel/process.c594
-rw-r--r--arch/sparc64/kernel/rtrap.S109
-rw-r--r--arch/sparc64/kernel/setup.c435
-rw-r--r--arch/sparc64/kernel/signal32.c806
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c178
-rw-r--r--arch/sparc64/kernel/sparcelf32.c1281
-rw-r--r--arch/sparc64/kernel/sys_sparc.c270
-rw-r--r--arch/sparc64/kernel/sys_sparc32.c1741
-rw-r--r--arch/sparc64/kernel/systbls.S222
-rw-r--r--arch/sparc64/kernel/time.c352
-rw-r--r--arch/sparc64/kernel/traps.c185
-rw-r--r--arch/sparc64/kernel/ttable.S252
-rw-r--r--arch/sparc64/lib/Makefile56
-rw-r--r--arch/sparc64/lib/blockops.S138
-rw-r--r--arch/sparc64/lib/checksum.S565
-rw-r--r--arch/sparc64/lib/copy_from_user.S456
-rw-r--r--arch/sparc64/lib/copy_to_user.S456
-rw-r--r--arch/sparc64/lib/locks.S77
-rw-r--r--arch/sparc64/lib/memcmp.S29
-rw-r--r--arch/sparc64/lib/memcpy.S526
-rw-r--r--arch/sparc64/lib/memscan.S116
-rw-r--r--arch/sparc64/lib/memset.S196
-rw-r--r--arch/sparc64/lib/strlen.S77
-rw-r--r--arch/sparc64/lib/strlen_user.S99
-rw-r--r--arch/sparc64/lib/strncmp.S31
-rw-r--r--arch/sparc64/lib/strncpy_from_user.S54
-rw-r--r--arch/sparc64/mm/Makefile13
-rw-r--r--arch/sparc64/mm/asyncd.c272
-rw-r--r--arch/sparc64/mm/extable.c69
-rw-r--r--arch/sparc64/mm/fault.c201
-rw-r--r--arch/sparc64/mm/generic.c124
-rw-r--r--arch/sparc64/mm/init.c730
-rw-r--r--arch/sparc64/prom/Makefile23
-rw-r--r--arch/sparc64/prom/bootstr.c23
-rw-r--r--arch/sparc64/prom/console.c128
-rw-r--r--arch/sparc64/prom/devops.c41
-rw-r--r--arch/sparc64/prom/init.c79
-rw-r--r--arch/sparc64/prom/memory.c152
-rw-r--r--arch/sparc64/prom/misc.c134
-rw-r--r--arch/sparc64/prom/p1275.c170
-rw-r--r--arch/sparc64/prom/printf.c46
-rw-r--r--arch/sparc64/prom/ranges.c107
-rw-r--r--arch/sparc64/prom/tree.c328
-rw-r--r--arch/sparc64/vmlinux.lds83
62 files changed, 14643 insertions, 0 deletions
diff --git a/arch/sparc64/Makefile b/arch/sparc64/Makefile
new file mode 100644
index 000000000..bacf9b095
--- /dev/null
+++ b/arch/sparc64/Makefile
@@ -0,0 +1,51 @@
+# $Id: Makefile,v 1.15 1997/04/14 17:04:49 jj Exp $
+# sparc64/Makefile
+#
+# Makefile for the architecture dependent flags and dependencies on the
+# 64-bit Sparc.
+#
+# Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+#
+
+# If the solaris /bin/sh wasn't so broken, I wouldn't need the following
+# line...
+SHELL =/bin/bash
+
+CC = sparc64-linux-gcc -D__KERNEL__ -I$(TOPDIR)/include
+AS = sparc64-linux-as
+LD = sparc64-linux-ld
+NM = sparc64-linux-nm
+AR = sparc64-linux-ar
+RANLIB = sparc64-linux-ranlib
+ELF2AOUT64 = elf2aout64
+
+#
+# Uncomment the first CFLAGS if you are doing kgdb source level
+# debugging of the kernel to get the proper debugging information.
+
+#CFLAGS := $(CFLAGS) -g -pipe -fcall-used-g5 -fcall-used-g7
+CFLAGS := $(CFLAGS) -pipe \
+ -fcall-used-g5 -fcall-used-g7 -Wno-sign-compare
+
+LINKFLAGS = -T arch/sparc64/vmlinux.lds
+
+HEAD := arch/sparc64/kernel/head.o
+
+SUBDIRS := $(SUBDIRS) arch/sparc64/kernel arch/sparc64/lib arch/sparc64/mm \
+ arch/sparc64/prom
+
+CORE_FILES := arch/sparc64/kernel/kernel.o arch/sparc64/mm/mm.o $(CORE_FILES)
+
+LIBS := $(TOPDIR)/lib/lib.a $(LIBS) $(TOPDIR)/arch/sparc64/prom/promlib.a \
+ $(TOPDIR)/arch/sparc64/lib/lib.a
+
+vmlinux.aout: vmlinux
+ $(ELF2AOUT64) -o $(TOPDIR)/vmlinux.aout $(TOPDIR)/vmlinux
+
+archclean:
+ rm -f $(TOPDIR)/vmlinux.aout
+
+archdep:
+
+check_asm:
+ $(MAKE) -C arch/sparc64/kernel check_asm
diff --git a/arch/sparc64/config.in b/arch/sparc64/config.in
new file mode 100644
index 000000000..c8cdc0134
--- /dev/null
+++ b/arch/sparc64/config.in
@@ -0,0 +1,157 @@
+# $Id: config.in,v 1.6 1997/04/17 20:35:42 jj Exp $
+# For a description of the syntax of this configuration file,
+# see the Configure script.
+#
+mainmenu_name "Linux/SPARC Kernel Configuration"
+
+mainmenu_option next_comment
+comment 'Code maturity level options'
+bool 'Prompt for development and/or incomplete code/drivers' CONFIG_EXPERIMENTAL
+endmenu
+
+mainmenu_option next_comment
+comment 'Loadable module support'
+bool 'Enable loadable module support' CONFIG_MODULES
+if [ "$CONFIG_MODULES" = "y" ]; then
+ bool 'Set version information on all symbols for modules' CONFIG_MODVERSIONS
+ bool 'Kernel daemon support (e.g. autoload of modules)' CONFIG_KERNELD
+fi
+endmenu
+
+mainmenu_option next_comment
+comment 'General setup'
+
+define_bool CONFIG_VT y
+define_bool CONFIG_VT_CONSOLE y
+
+bool 'Support for AP1000 multicomputer' CONFIG_AP1000
+
+if [ "$CONFIG_AP1000" = "y" ]; then
+ define_bool CONFIG_NO_KEYBOARD y
+ define_bool CONFIG_APFDDI y
+ define_bool CONFIG_APBLOCK y
+ define_bool CONFIG_APBIF y
+ tristate 'OPIU DDV Driver' CONFIG_DDV
+else
+ # Global things across all Sun machines.
+ define_bool CONFIG_SBUS y
+ define_bool CONFIG_SBUSCHAR y
+ define_bool CONFIG_SUN_MOUSE y
+ define_bool CONFIG_SERIAL y
+ define_bool CONFIG_SUN_SERIAL y
+ define_bool CONFIG_SUN_KEYBOARD y
+ define_bool CONFIG_SUN_CONSOLE y
+ define_bool CONFIG_SUN_AUXIO y
+ define_bool CONFIG_SUN_IO y
+ source drivers/sbus/char/Config.in
+fi
+
+tristate 'Openprom tree appears in /proc/openprom (EXPERIMENTAL)' CONFIG_SUN_OPENPROMFS
+bool 'Networking support' CONFIG_NET
+bool 'System V IPC' CONFIG_SYSVIPC
+bool 'Sysctl support' CONFIG_SYSCTL
+bool 'Kernel support for Linux/Sparc 32bit binary compatibility' CONFIG_SPARC32_COMPAT
+tristate 'Kernel support for a.out binaries' CONFIG_BINFMT_AOUT
+tristate 'Kernel support for 64-bit ELF binaries' CONFIG_BINFMT_ELF
+if [ "$CONFIG_SPARC32_COMPAT" != "n" ]; then
+ tristate 'Kernel support for 32-bit ELF binaries' CONFIG_BINFMT_ELF32
+fi
+if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
+ tristate 'Kernel support for JAVA binaries' CONFIG_BINFMT_JAVA
+fi
+endmenu
+
+mainmenu_option next_comment
+comment 'Floppy, IDE, and other block devices'
+
+bool 'Normal floppy disk support' CONFIG_BLK_DEV_FD
+
+bool 'Multiple devices driver support' CONFIG_BLK_DEV_MD
+if [ "$CONFIG_BLK_DEV_MD" = "y" ]; then
+ tristate ' Linear (append) mode' CONFIG_MD_LINEAR
+ tristate ' RAID-0 (striping) mode' CONFIG_MD_STRIPED
+# tristate ' RAID-1 (mirroring) mode' CONFIG_MD_MIRRORING
+fi
+
+tristate 'RAM disk support' CONFIG_BLK_DEV_RAM
+if [ "$CONFIG_BLK_DEV_RAM" = "y" ]; then
+ bool ' Initial RAM disk (initrd) support' CONFIG_BLK_DEV_INITRD
+fi
+
+tristate 'Loopback device support' CONFIG_BLK_DEV_LOOP
+
+endmenu
+
+if [ "$CONFIG_NET" = "y" ]; then
+ source net/Config.in
+fi
+
+mainmenu_option next_comment
+comment 'SCSI support'
+
+tristate 'SCSI support' CONFIG_SCSI
+
+if [ "$CONFIG_SCSI" != "n" ]; then
+ comment 'SCSI support type (disk, tape, CDrom)'
+
+ dep_tristate 'SCSI disk support' CONFIG_BLK_DEV_SD $CONFIG_SCSI
+ dep_tristate 'SCSI tape support' CONFIG_CHR_DEV_ST $CONFIG_SCSI
+ dep_tristate 'SCSI CDROM support' CONFIG_BLK_DEV_SR $CONFIG_SCSI
+ if [ "$CONFIG_BLK_DEV_SR" != "n" ]; then
+ bool ' Enable vendor-specific extentions (for SCSI CDROM)' CONFIG_BLK_DEV_SR_VENDOR
+ fi
+ dep_tristate 'SCSI generic support' CONFIG_CHR_DEV_SG $CONFIG_SCSI
+
+ comment 'Some SCSI devices (e.g. CD jukebox) support multiple LUNs'
+
+ bool 'Probe all LUNs on each SCSI device' CONFIG_SCSI_MULTI_LUN
+
+ bool 'Verbose SCSI error reporting (kernel size +=12K)' CONFIG_SCSI_CONSTANTS
+
+ mainmenu_option next_comment
+ comment 'SCSI low-level drivers'
+
+ bool 'Sparc ESP Scsi Driver' CONFIG_SCSI_SUNESP $CONFIG_SCSI
+ tristate 'PTI Qlogic,ISP Driver' CONFIG_SCSI_QLOGICPTI $CONFIG_SCSI
+ endmenu
+fi
+endmenu
+
+if [ "$CONFIG_NET" = "y" ]; then
+ mainmenu_option next_comment
+ comment 'Network device support'
+
+ bool 'Network device support' CONFIG_NETDEVICES
+ if [ "$CONFIG_NETDEVICES" = "y" ]; then
+ tristate 'Dummy net driver support' CONFIG_DUMMY
+ tristate 'PPP (point-to-point) support' CONFIG_PPP
+ if [ ! "$CONFIG_PPP" = "n" ]; then
+ comment 'CCP compressors for PPP are only built as modules.'
+ fi
+ tristate 'SLIP (serial line) support' CONFIG_SLIP
+ if [ "$CONFIG_SLIP" != "n" ]; then
+ bool ' CSLIP compressed headers' CONFIG_SLIP_COMPRESSED
+ bool ' Keepalive and linefill' CONFIG_SLIP_SMART
+ bool ' Six bit SLIP encapsulation' CONFIG_SLIP_MODE_SLIP6
+ fi
+ bool 'Sun LANCE support' CONFIG_SUNLANCE
+ tristate 'Sun Happy Meal 10/100baseT support' CONFIG_HAPPYMEAL
+ tristate 'Sun QuadEthernet support' CONFIG_SUNQE
+ tristate 'MyriCOM Gigabit Ethernet support' CONFIG_MYRI_SBUS
+# bool 'FDDI driver support' CONFIG_FDDI
+# if [ "$CONFIG_FDDI" = "y" ]; then
+# fi
+ fi
+ endmenu
+fi
+
+source fs/Config.in
+
+mainmenu_option next_comment
+comment 'Kernel hacking'
+
+bool 'Kernel profiling support' CONFIG_PROFILE
+if [ "$CONFIG_PROFILE" = "y" ]; then
+ int ' Profile shift count' CONFIG_PROFILE_SHIFT 2
+fi
+endmenu
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig
new file mode 100644
index 000000000..9ecbf90b4
--- /dev/null
+++ b/arch/sparc64/defconfig
@@ -0,0 +1,162 @@
+#
+# Automatically generated make config: don't edit
+#
+
+#
+# Code maturity level options
+#
+# CONFIG_EXPERIMENTAL is not set
+
+#
+# Loadable module support
+#
+# CONFIG_MODULES is not set
+
+#
+# General setup
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+# CONFIG_AP1000 is not set
+CONFIG_SBUS=y
+CONFIG_SBUSCHAR=y
+CONFIG_SUN_MOUSE=y
+CONFIG_SERIAL=y
+CONFIG_SUN_SERIAL=y
+CONFIG_SUN_KEYBOARD=y
+CONFIG_SUN_CONSOLE=y
+CONFIG_SUN_AUXIO=y
+CONFIG_SUN_IO=y
+
+#
+# SBUS Frame Buffer support
+#
+SUN_FBS_IN_PROCFS=y
+CONFIG_SUN_FB_DISPLAY=y
+SUN_FB_CGSIX=y
+SUN_FB_TCX=y
+SUN_FB_CGTHREE=y
+SUN_FB_CGFOURTEEN=y
+SUN_FB_BWTWO=y
+SUN_FB_LEO=y
+TADPOLE_FB_WEITEK=y
+
+#
+# Misc Linux/SPARC drivers
+#
+# CONFIG_SUN_OPENPROMIO is not set
+# CONFIG_SUN_MOSTEK_RTC is not set
+# CONFIG_SUN_OPENPROMFS is not set
+CONFIG_NET=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSCTL=y
+CONFIG_SPARC32_COMPAT=y
+CONFIG_BINFMT_AOUT=y
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_ELF32=y
+
+#
+# Floppy, IDE, and other block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_MD is not set
+# CONFIG_BLK_DEV_RAM is not set
+# CONFIG_BLK_DEV_LOOP is not set
+
+#
+# Networking options
+#
+# CONFIG_NETLINK is not set
+# CONFIG_FIREWALL is not set
+# CONFIG_NET_ALIAS is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ACCT is not set
+# CONFIG_IP_ROUTER is not set
+# CONFIG_NET_IPIP is not set
+
+#
+# (it is safe to leave these untouched)
+#
+# CONFIG_INET_PCTCP is not set
+# CONFIG_INET_RARP is not set
+CONFIG_PATH_MTU_DISCOVERY=y
+CONFIG_IP_NOSR=y
+CONFIG_SKB_LARGE=y
+
+#
+#
+#
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_AX25 is not set
+
+#
+# SCSI support
+#
+CONFIG_SCSI=y
+
+#
+# SCSI support type (disk, tape, CDrom)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+# CONFIG_CHR_DEV_SG is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+
+#
+# SCSI low-level drivers
+#
+CONFIG_SCSI_SUNESP=y
+# CONFIG_SCSI_QLOGICPTI is not set
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=y
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+CONFIG_SUNLANCE=y
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNQE is not set
+# CONFIG_MYRI_SBUS is not set
+
+#
+# Filesystems
+#
+CONFIG_QUOTA=y
+CONFIG_MINIX_FS=y
+CONFIG_EXT2_FS=y
+# CONFIG_FAT_FS is not set
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_UMSDOS_FS is not set
+CONFIG_PROC_FS=y
+CONFIG_NFS_FS=y
+# CONFIG_ROOT_NFS is not set
+# CONFIG_NFSD is not set
+CONFIG_SUNRPC=y
+CONFIG_LOCKD=y
+# CONFIG_SMB_FS is not set
+CONFIG_ISO9660_FS=y
+# CONFIG_HPFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_AUTOFS_FS is not set
+CONFIG_UFS_FS=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_SMD_DISKLABEL=y
+
+#
+# Kernel hacking
+#
+# CONFIG_PROFILE is not set
diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile
new file mode 100644
index 000000000..d66fa06e7
--- /dev/null
+++ b/arch/sparc64/kernel/Makefile
@@ -0,0 +1,55 @@
+# $Id: Makefile,v 1.16 1997/04/17 20:35:37 jj Exp $
+# Makefile for the linux kernel.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definitions are now in the main makefile...
+
+.S.s:
+ $(CPP) -D__ASSEMBLY__ -ansi $< -o $*.s
+
+.S.o:
+ $(CC) -D__ASSEMBLY__ -ansi -c $< -o $*.o
+
+all: kernel.o head.o
+
+O_TARGET := kernel.o
+O_OBJS := etrap.o rtrap.o hack.o process.o setup.o cpu.o idprom.o \
+ systbls.o traps.o entry.o devices.o auxio.o ioport.o \
+ irq.o time.o sys_sparc.o
+OX_OBJS := sparc64_ksyms.o
+
+ifdef CONFIG_SPARC32_COMPAT
+ O_OBJS += sys_sparc32.o signal32.o
+endif
+
+ifdef CONFIG_BINFMT_ELF32
+ O_OBJS += sparcelf32.o
+endif
+
+head.o: head.S ttable.S itlb_miss.S dtlb_miss.S dtlb_prot.S
+ $(CC) -D__ASSEMBLY__ -ansi -c $*.S -o $*.o
+
+check_asm: dummy
+ @echo "#include <linux/sched.h>" > tmp.c
+ $(CC) -E tmp.c -o tmp.i
+ @echo "/* Automatically generated. Do not edit. */" > check_asm.c; echo "#include <linux/sched.h>" >> check_asm.c; echo 'struct task_struct _task; struct mm_struct _mm; struct thread_struct _thread; int main(void) { printf ("/* Automatically generated. Do not edit. */\n#ifndef __ASM_OFFSETS_H__\n#define __ASM_OFFSETS_H__\n\n");' >> check_asm.c
+ $(SH) ./check_asm.sh task tmp.i check_asm.c
+ $(SH) ./check_asm.sh mm tmp.i check_asm.c
+ $(SH) ./check_asm.sh thread tmp.i check_asm.c
+ @echo 'printf ("\n#endif /* __ASM_OFFSETS_H__ */\n"); return 0; }' >> check_asm.c
+ @rm -f tmp.[ci]
+ #$(CC) -o check_asm check_asm.c
+ # <hack> Until we can do this natively, a hack has to take place
+ $(CC) -mmedlow -S -o check_asm.s check_asm.c
+ $(HOSTCC) -o check_asm check_asm.s
+ @rm -f check_asm.s
+ # </hack>
+ ./check_asm > asm_offsets.h
+ @if test -r $(HPATH)/asm/asm_offsets.h; then if cmp -s asm_offsets.h $(HPATH)/asm/asm_offsets.h; then echo $(HPATH)/asm/asm_offsets.h is unchanged; rm -f asm_offsets.h; else mv -f asm_offsets.h $(HPATH)/asm/asm_offsets.h; fi; else mv -f asm_offsets.h $(HPATH)/asm/asm_offsets.h; fi
+ @rm -f check_asm check_asm.c
+
+
+include $(TOPDIR)/Rules.make
diff --git a/arch/sparc64/kernel/auxio.c b/arch/sparc64/kernel/auxio.c
new file mode 100644
index 000000000..6c3ff9174
--- /dev/null
+++ b/arch/sparc64/kernel/auxio.c
@@ -0,0 +1,42 @@
+/* auxio.c: Probing for the Sparc AUXIO register at boot time.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/stddef.h>
+#include <linux/init.h>
+#include <asm/oplib.h>
+#include <asm/io.h>
+#include <asm/auxio.h>
+#include <asm/sbus.h>
+
+/* Probe and map in the Auxiliary I/O register */
+unsigned char *auxio_register;
+
+__initfunc(void auxio_probe(void))
+{
+ struct linux_sbus *bus;
+ struct linux_sbus_device *sdev = 0;
+ struct linux_prom_registers auxregs[1];
+
+ for_each_sbus(bus) {
+ for_each_sbusdev(sdev, bus) {
+ if(!strcmp(sdev->prom_name, "auxio")) {
+ break;
+ }
+ }
+ }
+
+ if (!sdev) {
+ prom_printf("Cannot find auxio node, cannot continue...\n");
+ prom_halt();
+ }
+ prom_getproperty(sdev->prom_node, "reg", (char *) auxregs, sizeof(auxregs));
+ prom_apply_sbus_ranges(sdev->my_bus, auxregs, 0x1, sdev);
+ /* Map the register both read and write */
+ auxio_register = (unsigned char *) sparc_alloc_io(auxregs[0].phys_addr, 0,
+ auxregs[0].reg_size,
+ "auxiliaryIO",
+ auxregs[0].which_io, 0x0);
+ TURN_ON_LED;
+}
diff --git a/arch/sparc64/kernel/check_asm.sh b/arch/sparc64/kernel/check_asm.sh
new file mode 100644
index 000000000..2d2fbd224
--- /dev/null
+++ b/arch/sparc64/kernel/check_asm.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+sed -n -e '/struct[ ]*'$1'_struct[ ]*{/,/};/p' < $2 | sed '/struct[ ]*'$1'_struct[ ]*{/d;/:[0-9]*[ ]*;/d;/^[ ]*$/d;/};/d;s/^[ ]*//;s/volatile[ ]*//;s/\(unsigned\|signed\|struct\)[ ]*//;s/\(\[\|__attribute__\).*;[ ]*$//;s/;[ ]*$//;s/^[^ ]*[ ]*//;s/,/\
+/g' | sed 's/^[ *]*//;s/[ ]*$//;s/^.*$/printf ("#define AOFF_'$1'_\0 0x%08x\\n#define ASIZ_'$1'_\0 0x%08x\\n", ((char *)\&_'$1'.\0) - ((char *)\&_'$1'), sizeof(_'$1'.\0));/' >> $3
diff --git a/arch/sparc64/kernel/cpu.c b/arch/sparc64/kernel/cpu.c
new file mode 100644
index 000000000..695ad680e
--- /dev/null
+++ b/arch/sparc64/kernel/cpu.c
@@ -0,0 +1,90 @@
+/* cpu.c: Dinky routines to look for the kind of Sparc cpu
+ * we are on.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <asm/system.h>
+
+struct cpu_iu_info {
+ short manuf;
+ short impl;
+ char* cpu_name; /* should be enough I hope... */
+};
+
+struct cpu_fp_info {
+ short manuf;
+ short impl;
+ char fpu_vers;
+ char* fp_name;
+};
+
+/* In order to get the fpu type correct, you need to take the IDPROM's
+ * machine type value into consideration too. I will fix this.
+ */
+struct cpu_fp_info linux_sparc_fpu[] = {
+ { 0x17, 0x10, 0, "UltraSparc I integrated FPU"},
+ { 0x17, 0x11, 0, "UltraSparc II integrated FPU"},
+ { 0x17, 0x12, 0, "UltraSparc III integrated FPU"},
+};
+
+#define NSPARCFPU (sizeof(linux_sparc_fpu)/sizeof(struct cpu_fp_info))
+
+struct cpu_iu_info linux_sparc_chips[] = {
+ { 0x17, 0x10, "TI UltraSparc I (SpitFire)"},
+ { 0x17, 0x11, "TI UltraSparc II (BlackBird)"},
+ { 0x17, 0x12, "TI UltraSparc III (Cheetah)"}, /* A guess... */
+};
+
+#define NSPARCCHIPS (sizeof(linux_sparc_chips)/sizeof(struct cpu_iu_info))
+
+char *sparc_cpu_type[NCPUS] = { "cpu-oops", "cpu-oops1", "cpu-oops2", "cpu-oops3" };
+char *sparc_fpu_type[NCPUS] = { "fpu-oops", "fpu-oops1", "fpu-oops2", "fpu-oops3" };
+
+unsigned int fsr_storage;
+
+__initfunc(void cpu_probe(void))
+{
+ int manuf, impl;
+ unsigned i, cpuid;
+ long ver, fpu_vers;
+
+ cpuid = get_cpuid();
+
+ __asm__ __volatile__ ("rdpr %%ver, %0; stx %%fsr, [%1]" : "=r" (ver) : "r" (&fpu_vers));
+
+ manuf = ((ver >> 48)&0xffff);
+ impl = ((ver >> 32)&0xffff);
+
+ fpu_vers = ((fpu_vers>>17)&0x7);
+
+ for(i = 0; i<NSPARCCHIPS; i++) {
+ if(linux_sparc_chips[i].manuf == manuf)
+ if(linux_sparc_chips[i].impl == impl) {
+ sparc_cpu_type[cpuid] = linux_sparc_chips[i].cpu_name;
+ break;
+ }
+ }
+
+ if(i==NSPARCCHIPS) {
+ printk("DEBUG: manuf = 0x%x impl = 0x%x\n", manuf,
+ impl);
+ sparc_cpu_type[cpuid] = "Unknow CPU";
+ }
+
+ for(i = 0; i<NSPARCFPU; i++) {
+ if(linux_sparc_fpu[i].manuf == manuf && linux_sparc_fpu[i].impl == impl)
+ if(linux_sparc_fpu[i].fpu_vers == fpu_vers) {
+ sparc_fpu_type[cpuid] = linux_sparc_fpu[i].fp_name;
+ break;
+ }
+ }
+
+ if(i == NSPARCFPU) {
+ printk("DEBUG: manuf = 0x%x impl = 0x%x fsr.vers = 0x%x\n", manuf, impl,
+ (unsigned)fpu_vers);
+ sparc_fpu_type[cpuid] = "Unknown FPU";
+ }
+}
diff --git a/arch/sparc64/kernel/devices.c b/arch/sparc64/kernel/devices.c
new file mode 100644
index 000000000..6aadd14e0
--- /dev/null
+++ b/arch/sparc64/kernel/devices.c
@@ -0,0 +1,67 @@
+/* devices.c: Initial scan of the prom device tree for important
+ * Sparc device nodes which we need to find.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/kernel.h>
+#include <linux/tasks.h>
+#include <linux/config.h>
+#include <linux/init.h>
+
+#include <asm/page.h>
+#include <asm/oplib.h>
+#include <asm/system.h>
+#include <asm/smp.h>
+
+struct prom_cpuinfo linux_cpus[NCPUS];
+int linux_num_cpus;
+
+extern void cpu_probe(void);
+
+__initfunc(unsigned long
+device_scan(unsigned long mem_start))
+{
+ char node_str[128];
+ int nd, prom_node_cpu, thismid;
+ int cpu_nds[NCPUS]; /* One node for each cpu */
+ int cpu_ctr = 0;
+
+ prom_getstring(prom_root_node, "device_type", node_str, sizeof(node_str));
+
+ if(strcmp(node_str, "cpu") == 0) {
+ cpu_nds[0] = prom_root_node;
+ linux_cpus[0].prom_node = prom_root_node;
+ linux_cpus[0].mid = 0;
+ cpu_ctr++;
+ } else {
+ int scan;
+ scan = prom_getchild(prom_root_node);
+ prom_printf("root child is %08x\n", (unsigned) scan);
+ nd = 0;
+ while((scan = prom_getsibling(scan)) != 0) {
+ prom_getstring(scan, "device_type", node_str, sizeof(node_str));
+ if(strcmp(node_str, "cpu") == 0) {
+ cpu_nds[cpu_ctr] = scan;
+ linux_cpus[cpu_ctr].prom_node = scan;
+ prom_getproperty(scan, "mid", (char *) &thismid, sizeof(thismid));
+ linux_cpus[cpu_ctr].mid = thismid;
+ prom_printf("Found CPU %d <node=%08x,mid=%d>\n",
+ cpu_ctr, (unsigned) scan,
+ thismid);
+ cpu_ctr++;
+ }
+ };
+ if(cpu_ctr == 0) {
+ printk("No CPU nodes found, cannot continue.\n");
+ halt();
+ }
+ printk("Found %d CPU prom device tree node(s).\n", cpu_ctr);
+ };
+ prom_node_cpu = cpu_nds[0];
+
+ linux_num_cpus = cpu_ctr;
+
+ cpu_probe();
+ return mem_start;
+}
diff --git a/arch/sparc64/kernel/dtlb_miss.S b/arch/sparc64/kernel/dtlb_miss.S
new file mode 100644
index 000000000..31b87f3de
--- /dev/null
+++ b/arch/sparc64/kernel/dtlb_miss.S
@@ -0,0 +1,80 @@
+/* $Id: dtlb_miss.S,v 1.11 1997/04/10 01:59:35 davem Exp $
+ * dtlb_miss.S: Data TLB miss code, this is included directly
+ * into the trap table.
+ *
+ * Copyright (C) 1996,1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+/* The basic algorithm is:
+ *
+ * if(faulting_context != 0) {
+ * pgd = pgd_offset(current->mm.pgd, fault_address);
+ * page_table_walk_continue:
+ * pmd = pmd_offset(pgd, fault_address);
+ * pte = pte_offset(pmd, fault_address);
+ * if(pte & _PAGE_V) {
+ * tlb_load(pte, fault_address);
+ * return_from_trap();
+ * }
+ * goto longer_processing;
+ * } else {
+ * if(fault_address >= KERNBASE &&
+ * fault_address < VMALLOC_START) {
+ * tlb_load(__pa(fault_address) | PAGE_KERNEL);
+ * return_from_trap();
+ * } else {
+ * pgd = pgd_offset(swapper_pg_dir, fault_address);
+ * goto page_table_walk_continue;
+ * }
+ * }
+ *
+ * This is optimized for user TLB misses on purpose.
+ */
+
+#define KERN_HIGHBITS (_PAGE_VALID | _PAGE_SZ4MB)
+#define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W)
+#define KERN_LOWBITS_IO (_PAGE_E | _PAGE_P | _PAGE_W)
+
+ /* ICACHE line 1 */
+ /*0x00*/ ldxa [%g0] ASI_DMMU, %g1 ! Get TAG_TARGET
+ /*0x04*/ srlx %g1, 8, %g3 ! Position PGD offset
+ /*0x08*/ srlx %g1, 48, %g5 ! Shift down CONTEXT bits
+ /*0x0c*/ and %g3, %g2, %g3 ! Mask PGD offset
+ /*0x10*/ sllx %g1, 2, %g4 ! Position PMD offset
+ /*0x14*/ brz,pn %g5, 3f ! Context 0 == kernel
+ /*0x18*/ and %g4, %g2, %g4 ! Mask PMD offset
+ /*0x1c*/ ldxa [%g0] ASI_DMMU_TSB_8KB_PTR, %g1 ! For PTE offset
+
+ /* ICACHE line 2 */
+ /*0x20*/ ldxa [%g7 + %g3] ASI_PHYS_USE_EC, %g5 ! Load PGD
+ /*0x24*/ srlx %g1, 1, %g1 ! PTE offset
+2:/*0x28*/ ldxa [%g5 + %g4] ASI_PHYS_USE_EC, %g3 ! Load PMD
+ /*0x2c*/ ldxa [%g3 + %g1] ASI_PHYS_USE_EC, %g5 ! Load PTE
+ /*0x30*/ brlz,a,pt %g5, 1f ! Valid set?
+ /*0x34*/ stxa %g5, [%g0] ASI_DTLB_DATA_IN ! TLB load
+ /*0x38*/ ba,a,pt %xcc, sparc64_dtlb_refbit_catch ! Nope...
+1:/*0x3c*/ retry ! Trap return
+
+3: /* ICACHE line 3 */
+ /*0x40*/ sllx %g1, 43, %g5 ! This gets >= VMALLOC_START...
+ /*0x44*/ brlz,pn %g5, 4f ! ...if now less than zero.
+ /*0x48*/ andncc %g1, 0x3ff, %g0 ! Slick trick...
+ /*0x4c*/ be,pn %xcc, 4f ! Yes, it is some PROM mapping
+ /*0x50*/ srlx %g5, 21, %g5 ! This is now physical page
+ /*0x54*/ sethi %uhi(KERN_HIGHBITS), %g1 ! Construct PTE
+ /*0x58*/ sllx %g1, 32, %g1 ! Move priv bits up
+ /*0x5c*/ or %g1, %g5, %g1 ! Or in the page
+
+ /* ICACHE line 4 */
+ /*0x60*/ or %g1, (KERN_LOWBITS), %g1 ! Set low priv bits
+ /*0x64*/ stxa %g1, [%g0] ASI_DTLB_DATA_IN ! TLB load
+ /*0x68*/ retry ! Trap return
+4:/*0x6c*/ ldxa [%g0] ASI_DMMU_TSB_8KB_PTR, %g1 ! For PTE offset
+ /*0x70*/ ldxa [%g6 + %g3] ASI_PHYS_USE_EC, %g5 ! Load kern PGD
+ /*0x74*/ ba,pt %xcc, 2b ! Go back up top
+ /*0x78*/ srlx %g1, 1, %g1
+ /*0x7c*/ nop
+
+#undef KERN_HIGHBITS
+#undef KERN_LOWBITS
diff --git a/arch/sparc64/kernel/dtlb_prot.S b/arch/sparc64/kernel/dtlb_prot.S
new file mode 100644
index 000000000..8eec19260
--- /dev/null
+++ b/arch/sparc64/kernel/dtlb_prot.S
@@ -0,0 +1,56 @@
+/* $Id: dtlb_prot.S,v 1.10 1997/03/25 09:47:13 davem Exp $
+ * dtlb_prot.S: Data TLB protection code, this is included directly
+ * into the trap table.
+ *
+ * Copyright (C) 1996,1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+ /* We know kernel never takes protection trap,
+ * this makes this routine a lot easier than it
+ * would be otherwise.
+ */
+
+#define MODIFIED_BITS (_PAGE_WRITE | _PAGE_W | _PAGE_MODIFIED | _PAGE_ACCESSED)
+
+ /* ICACHE line 1 */
+ /*0x00*/ ldxa [%g0] ASI_DMMU, %g1 ! Get TAG_TARGET
+ /*0x04*/ srlx %g1, 8, %g3 ! Position PGD offset
+ /*0x08*/ sllx %g1, 2, %g4 ! Position PMD offset
+ /*0x0c*/ and %g3, %g2, %g3 ! Mask PGD offset
+ /*0x10*/ and %g4, %g2, %g3 ! Mask PMD offset
+ /*0x14*/ ldxa [%g7 + %g3] ASI_PHYS_USE_EC, %g5 ! Load PGD
+ /*0x18*/ ldxa [%g5 + %g3] ASI_PHYS_USE_EC, %g4 ! Load PMD
+ /*0x1c*/ ldxa [%g0] ASI_DMMU_TSB_8KB_PTR, %g1 ! For PTE offset
+
+ /* ICACHE line 2 */
+ /*0x20*/ srlx %g1, 1, %g1 ! PTE offset
+ /*0x24*/ ldxa [%g4 + %g1] ASI_PHYS_USE_EC, %g3 ! Load PTE
+ /*0x28*/ andcc %g3, _PAGE_WRITE, %g0 ! Writable?
+ /*0x2c*/ be,pt %xcc, sparc64_dtlb_fault ! Nope...
+ /*0x30*/ or %g3, (MODIFIED_BITS), %g3 ! Yes it is
+ /*0x34*/ mov TLB_TAG_ACCESS, %g5 ! Get the page
+ /*0x38*/ ldxa [%g5] ASI_DMMU, %g1 ! From MMU
+ /*0x3c*/ add %g2, 7, %g5 ! Compute mask
+
+ /* ICACHE line 3 */
+ /*0x40*/ andn %g1, %g5, %g1 ! Mask page
+ /*0x44*/ or %g1, 0x10, %g1 ! 2ndary Context
+ /*0x48*/ stxa %g0, [%g1] ASI_DMMU_DEMAP ! TLB flush page
+ /*0x4c*/ membar #Sync ! Synchronize
+ /*0x50*/ stxa %g3, [%g4 + %g1] ASI_PHYS_USE_EC ! Update sw PTE
+ /*0x54*/ stxa %g3, [%g0] ASI_DTLB_DATA_IN ! TLB load
+ /*0x58*/ retry ! Trap return
+ /*0x5c*/ nop
+
+ /* ICACHE line 4 */
+ /*0x60*/ nop
+ /*0x64*/ nop
+ /*0x68*/ nop
+ /*0x6c*/ nop
+ /*0x70*/ nop
+ /*0x74*/ nop
+ /*0x78*/ nop
+ /*0x7c*/ nop
+
+#undef MODIFIED_BITS
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
new file mode 100644
index 000000000..16fe5c8a0
--- /dev/null
+++ b/arch/sparc64/kernel/entry.S
@@ -0,0 +1,280 @@
+/* $Id: entry.S,v 1.14 1997/04/14 06:56:54 davem Exp $
+ * arch/sparc64/kernel/entry.S: Sparc64 trap low-level entry points.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
+ * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+
+#include <asm/head.h>
+#include <asm/asi.h>
+#include <asm/smp.h>
+#include <asm/ptrace.h>
+#include <asm/page.h>
+#include <asm/signal.h>
+#include <asm/pgtable.h>
+
+#define curptr g6
+
+#define NR_SYSCALLS 256 /* Each OS is different... */
+
+ .text
+ .align 4
+/* FIXME: This is still debugging hack */
+ .globl sparc64_dtlb_fault, sparc64_dtlb_refbit_catch, sparc64_itlb_refbit_catch
+sparc64_dtlb_fault:
+ rdpr %pstate, %g1
+ wrpr %g1, PSTATE_AG|PSTATE_MG, %pstate
+ ba,pt %xcc, etrap
+ rd %pc, %g7
+ call sparc64_dtlb_fault_handler
+ nop
+
+sparc64_dtlb_refbit_catch:
+ rdpr %pstate, %g1
+ wrpr %g1, PSTATE_AG|PSTATE_MG, %pstate
+ ba,pt %xcc, etrap
+ rd %pc, %g7
+ call sparc64_dtlb_refbit_handler
+ add %sp, STACK_BIAS + REGWIN_SZ, %o0
+
+sparc64_itlb_refbit_catch:
+ rdpr %pstate, %g1
+ wrpr %g1, PSTATE_AG|PSTATE_MG, %pstate
+ ba,pt %xcc, etrap
+ rd %pc, %g7
+ call sparc64_dtlb_refbit_handler
+ nop
+
+ /* Note check out head.h, this code isn't even used for UP,
+ * for SMP things will be different. In particular the data
+ * registers for cross calls will be:
+ *
+ * DATA 0: Address of function to call
+ * DATA 1: Argument 1, place in %g6
+ * DATA 2: Argument 2, place in %g7
+ *
+ * With this method we can do most of the cross-call tlb/cache
+ * flushing in very quickly.
+ */
+ .align 4
+ .globl do_ivec
+do_ivec:
+ ldxa [%g0] ASI_INTR_RECEIVE, %g1
+ andcc %g1, 0x20, %g0
+ be,pn %xcc, do_ivec_return
+ mov 0x40, %g2
+
+ /* Load up Interrupt Vector Data 0 register. */
+ sethi %uhi(ivector_to_mask), %g4
+ ldxa [%g2] ASI_UDB_INTR_R, %g3
+ or %g4, %ulo(ivector_to_mask), %g4
+ and %g3, 0x7ff, %g3
+ sllx %g4, 32, %g4
+ sethi %hi(ivector_to_mask), %g5
+ sllx %g3, 3, %g3
+ or %g5, %lo(ivector_to_mask), %g5
+ add %g5, %g4, %g4
+ ldx [%g4 + %g3], %g2
+ brz,pn %g2, do_ivec_spurious
+ nop
+
+ /* No branches, worse case we don't know about this interrupt
+ * yet, so we would just write a zero into the softint register
+ * which is completely harmless.
+ */
+ wr %g2, 0x0, %set_softint
+
+do_ivec_return:
+ /* Acknowledge the UPA */
+ stxa %g0, [%g0] ASI_INTR_RECEIVE
+ membar #Sync
+ retry
+
+do_ivec_spurious:
+ stxa %g0, [%g0] ASI_INTR_RECEIVE
+ rdpr %pstate, %g1
+ wrpr %g1, PSTATE_IG | PSTATE_AG, %pstate
+ ba,pt %xcc, etrap
+ rd %pc, %g7
+ call report_spurious_ivec
+ add %sp, STACK_BIAS + REGWIN_SZ, %o0
+ ba,pt %xcc, rtrap
+ nop
+
+breakpoint_t:
+ .asciz "Breakpoint Trap %lx\n"
+ .align 4
+ .globl breakpoint_trap
+breakpoint_trap:
+ mov %o0, %o1
+ sethi %hi(breakpoint_t), %o0
+ or %o0, %lo(breakpoint_t), %o0
+ call prom_printf
+ add %o0, %g4, %o0
+ call prom_cmdline
+ nop
+ ba,a,pt %xcc, rtrap
+
+ .globl sys_pipe, sys_execve, sys_sigpause, sys_nis_syscall
+
+sys_pipe:
+ sethi %hi(sparc_pipe), %g1
+ add %g1, %g4, %g1
+ jmpl %g1 + %lo(sparc_pipe), %g0
+ add %sp, STACK_BIAS + REGWIN_SZ, %o0
+
+sys_nis_syscall:
+ sethi %hi(c_sys_nis_syscall), %g1
+ add %g1, %g4, %g1
+ jmpl %g1 + %lo(c_sys_nis_syscall), %g0
+ add %sp, STACK_BIAS + REGWIN_SZ, %o0
+
+sys_execve:
+ sethi %hi(sparc_execve), %g1
+ add %g1, %g4, %g1
+ jmpl %g1 + %lo(sparc_execve), %g0
+ add %sp, STACK_BIAS + REGWIN_SZ, %o0
+
+sys_sigpause:
+ /* NOTE: %o0 has a correct value already */
+ call do_sigpause
+ add %sp, STACK_BIAS + REGWIN_SZ, %o1
+
+ ld [%curptr + AOFF_task_flags], %l5
+ andcc %l5, 0x20, %g0
+ be,pt %icc, ret_sys_call
+ clr %o0
+ call syscall_trace
+ nop
+ ba,pt %xcc, ret_sys_call
+ clr %o0
+
+ /* This is how fork() was meant to be done, 11 instruction entry. -DaveM */
+ .globl sys_fork, sys_vfork, sys_clone
+sys_fork:
+sys_vfork:
+ mov SIGCHLD, %o0
+ clr %o1
+sys_clone:
+ mov %o7, %l5
+ flushw
+ rdpr %cwp, %o4
+ add %sp, STACK_BIAS + REGWIN_SZ, %o2
+ brz,a %o1, 1f
+ mov %fp, %o1
+1:
+ /* Don't try this at home. */
+ stx %o4, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G0]
+ call do_fork
+ add %l5, 8, %o7
+
+linux_sparc_ni_syscall:
+ sethi %hi(sys_ni_syscall), %l7
+ or %l7, %lo(sys_ni_syscall), %l7
+ ba,pt %xcc,syscall_is_too_hard
+ add %l7, %g4, %l7
+
+linux_fast_syscall:
+ andn %l7, 3, %l7
+ mov %i0, %o0
+ mov %i1, %o1
+ mov %i2, %o2
+ jmpl %l7 + %g0, %g0
+ mov %i3, %o3
+
+linux_syscall_trace:
+ call syscall_trace
+ nop
+ mov %i0, %o0
+ mov %i1, %o1
+ mov %i2, %o2
+ mov %i3, %o3
+ ba,pt %xcc, 2f
+ mov %i4, %o4
+
+ .globl ret_from_syscall
+ret_from_syscall:
+ ba,pt %xcc, ret_sys_call
+ ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_I0], %o0
+
+ /* Linux native and SunOS system calls enter here... */
+ .align 4
+ .globl linux_sparc_syscall
+linux_sparc_syscall:
+ /* Direct access to user regs, must faster. */
+ cmp %g1, NR_SYSCALLS
+ add %l7, %g4, %l7
+ bgeu,pn %xcc, linux_sparc_ni_syscall
+ sll %g1, 3, %l4
+ ldx [%l7 + %l4], %l7
+ andcc %l7, 1, %g0
+ bne,pn %icc, linux_fast_syscall
+ /* Just do the next insn in the delay slot */
+
+ .globl syscall_is_too_hard
+syscall_is_too_hard:
+ mov %i0, %o0
+ mov %i1, %o1
+ mov %i2, %o2
+
+ ldx [%curptr + AOFF_task_flags], %l5
+ mov %i3, %o3
+ mov %i4, %o4
+ andcc %l5, 0x20, %g0
+ bne,pn %icc, linux_syscall_trace
+ mov %i0, %l5
+2:
+ call %l7
+ mov %i5, %o5
+
+ stx %o0, [%sp + STACK_BIAS + REGWIN_SZ + PT_I0]
+
+ .globl ret_sys_call
+ret_sys_call:
+ ldx [%curptr + AOFF_task_flags], %l6
+ mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
+ ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_TSTATE], %g3
+ cmp %o0, -ENOIOCTLCMD
+ sllx %g2, 32, %g2
+ bgeu,pn %xcc, 1f
+ andcc %l6, 0x20, %l6
+
+ /* System call success, clear Carry condition code. */
+ andn %g3, %g2, %g3
+ clr %l6
+ stx %g3, [%sp + STACK_BIAS + REGWIN_SZ + PT_TSTATE]
+ bne,pn %icc, linux_syscall_trace2
+ ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_TNPC], %l1 /* pc = npc */
+ add %l1, 0x4, %l2 /* npc = npc+4 */
+ stx %l1, [%sp + STACK_BIAS + REGWIN_SZ + PT_TPC]
+ ba,pt %xcc, rtrap
+ stx %l2, [%sp + STACK_BIAS + REGWIN_SZ + PT_TNPC]
+1:
+ /* System call failure, set Carry condition code.
+ * Also, get abs(errno) to return to the process.
+ */
+ sub %g0, %o0, %o0
+ or %g3, %g2, %g3
+ stx %o0, [%sp + STACK_BIAS + REGWIN_SZ + PT_I0]
+ mov 1, %l6
+ stx %g3, [%sp + STACK_BIAS + REGWIN_SZ + PT_TSTATE]
+ bne,pn %icc, linux_syscall_trace2
+ ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_TNPC], %l1 /* pc = npc */
+ add %l1, 0x4, %l2 /* npc = npc+4 */
+ stx %l1, [%sp + STACK_BIAS + REGWIN_SZ + PT_TPC]
+ ba,pt %xcc, rtrap
+ stx %l2, [%sp + STACK_BIAS + REGWIN_SZ + PT_TNPC]
+
+linux_syscall_trace2:
+ call syscall_trace
+ add %l1, 0x4, %l2 /* npc = npc+4 */
+ stx %l1, [%sp + STACK_BIAS + REGWIN_SZ + PT_TPC]
+ ba,pt %xcc, rtrap
+ stx %l2, [%sp + STACK_BIAS + REGWIN_SZ + PT_TNPC]
+
+/* End of entry.S */
diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S
new file mode 100644
index 000000000..7d293a88b
--- /dev/null
+++ b/arch/sparc64/kernel/etrap.S
@@ -0,0 +1,114 @@
+/* $Id: etrap.S,v 1.11 1997/04/14 17:04:45 jj Exp $
+ * etrap.S: Preparing for entry into the kernel on Sparc V9.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <asm/asi.h>
+#include <asm/pstate.h>
+#include <asm/ptrace.h>
+#include <asm/spitfire.h>
+#include <asm/head.h>
+
+/* We assume that pstate, when entering this, has AG and IE bits set, MG and IG clear */
+
+ .text
+ .align 32
+ .globl etrap, etrap_irq
+etrap:
+ rdpr %pil, %g4
+etrap_irq:
+ rdpr %tstate, %g1
+ sllx %g4, 20, %g4
+ rdpr %tpc, %g2
+ or %g1, %g4, %g1
+ rdpr %tnpc, %g3
+
+ /* What happens more often? etrap when already in priv or from userland? */
+ andcc %g1, TSTATE_PRIV, %g0
+ bne,a,pn %xcc, 1f
+ sub %sp, REGWIN_SZ + TRACEREG_SZ - STACK_BIAS, %g5
+
+ /* Just when going from userland to privileged mode,
+ * we have to change this stuff.
+ *
+ * Setup to run in NUCLEUS context, stash user context in
+ * secondary for later trap return. Note we must not change
+ * trap level until PRIMARY_CONTEXT is set to zero, else
+ * we fall out of NUCLEUS too soon and crash hard.
+ */
+ rdpr %wstate, %g5
+ mov PRIMARY_CONTEXT, %g7
+ ldxa [%g7] ASI_DMMU, %g4
+ mov SECONDARY_CONTEXT, %g6
+ stxa %g0, [%g7] ASI_DMMU
+ stxa %g4, [%g6] ASI_DMMU
+ wrpr %g0, 0x0, %tl
+
+ sll %g5, 3, %g5
+ sethi %uhi(KERNBASE), %g4
+ or %g4, %ulo(KERNBASE), %g4
+ sethi %hi(current_set), %g6
+ or %g6, %lo(current_set), %g6
+ sllx %g4, 32, %g4
+ wrpr %g5, %wstate
+ rdpr %canrestore, %g5
+ ldx [%g6 + %g4], %g6
+#ifdef __SMP__
+/* FIXME: Fix the above insn for SMP */
+#endif
+ wrpr %g0, 0, %canrestore
+ wrpr %g5, 0, %otherwin
+ ba,pt %xcc, 2f
+ ldx [%g6 + AOFF_task_saved_kernel_stack], %g5
+1:
+ wrpr %g0, 0x0, %tl
+2:
+ rd %y, %g4
+ stx %g1, [%g5 + REGWIN_SZ + PT_V9_TSTATE]
+ stx %g2, [%g5 + REGWIN_SZ + PT_V9_TPC]
+ stx %g3, [%g5 + REGWIN_SZ + PT_V9_TNPC]
+ stx %g4, [%g5 + REGWIN_SZ + PT_V9_Y]
+ rdpr %pstate, %g1
+ save %g5, -STACK_BIAS, %sp
+ mov %g1, %l1
+ mov %g7, %l2
+ wrpr %l1, PSTATE_AG, %pstate
+ stx %g1, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G1]
+ stx %g2, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G2]
+ stx %g3, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G3]
+ stx %g4, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G4]
+ stx %g5, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G5]
+ stx %g6, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G6]
+ stx %g7, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G7]
+ stx %i0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I0]
+ stx %i1, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I1]
+ stx %i2, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I2]
+ stx %i3, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I3]
+ stx %i4, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I4]
+ stx %i5, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I5]
+ stx %i6, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I6]
+ stx %i7, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I7]
+ wrpr %l1, (PSTATE_IE | PSTATE_AG), %pstate
+ sethi %uhi(KERNBASE), %g4
+ or %g4, %ulo(KERNBASE), %g4
+ sethi %hi(current_set), %g6
+ or %g6, %lo(current_set), %g6
+ sllx %g4, 32, %g4
+ jmpl %l2 + 0x4, %g0
+ ldx [%g6 + %g4], %g6
+#ifdef __SMP__
+/* FIXME: Fix the above insn for SMP */
+#endif
+
+ .globl etraptl1
+etraptl1:
+ rdpr %tl, %g4
+ rdpr %tstate, %g1
+ sub %g4, 1, %g4
+ rdpr %tpc, %g2
+ rdpr %tnpc, %g3
+ wrpr %g4, 0x0, %tl
+ ba,pt %xcc, 1b
+ sub %sp, REGWIN_SZ + TRACEREG_SZ - STACK_BIAS, %g5
diff --git a/arch/sparc64/kernel/hack.S b/arch/sparc64/kernel/hack.S
new file mode 100644
index 000000000..0aca22a77
--- /dev/null
+++ b/arch/sparc64/kernel/hack.S
@@ -0,0 +1,214 @@
+/* <hack>
+ This is just a huge ugly hack to get things compiled.
+ Hopefully will disappear quickly, once we get everything
+ to compile... */
+ .text
+ .align 8
+ .globl _sigpause_common
+_sigpause_common: retl;nop
+ .globl breakpoint
+breakpoint: retl;nop
+ .globl do_cee
+do_cee: retl;nop
+ .globl do_cee_tl1
+do_cee_tl1: retl;nop
+ .globl do_dae
+do_dae: retl;nop
+ .globl do_dae_tl1
+do_dae_tl1: retl;nop
+ .globl do_div0
+do_div0: retl;nop
+ .globl do_div0_tl1
+do_div0_tl1: retl;nop
+ .globl do_fpdis
+do_fpdis: retl;nop
+ .globl do_fpdis_tl1
+do_fpdis_tl1: retl;nop
+ .globl do_fpieee
+do_fpieee: retl;nop
+ .globl do_fpieee_tl1
+do_fpieee_tl1: retl;nop
+ .globl do_fpother
+do_fpother: retl;nop
+ .globl do_fpother_tl1
+do_fpother_tl1: retl;nop
+ .globl do_iae
+do_iae: retl;nop
+ .globl do_iae_tl1
+do_iae_tl1: retl;nop
+ .globl do_ill
+do_ill: retl;nop
+ .globl do_ill_tl1
+do_ill_tl1: retl;nop
+ .globl do_irq
+do_irq: retl;nop
+ .globl do_irq_tl1
+do_irq_tl1: retl;nop
+ .globl do_lddfmna
+do_lddfmna: retl;nop
+ .globl do_lddfmna_tl1
+do_lddfmna_tl1: retl;nop
+ .globl do_mna
+do_mna: retl;nop
+ .globl do_mna_tl1
+do_mna_tl1: retl;nop
+ .globl do_paw
+do_paw: retl;nop
+ .globl do_paw_tl1
+do_paw_tl1: retl;nop
+ .globl do_privact
+do_privact: retl;nop
+ .globl do_privop
+do_privop: retl;nop
+ .globl do_signal
+do_signal: retl;nop
+ .globl do_stdfmna
+do_stdfmna: retl;nop
+ .globl do_stdfmna_tl1
+do_stdfmna_tl1: retl;nop
+ .globl do_tof
+do_tof: retl;nop
+ .globl do_tof_tl1
+do_tof_tl1: retl;nop
+ .globl do_vaw
+do_vaw: retl;nop
+ .globl do_vaw_tl1
+do_vaw_tl1: retl;nop
+ .globl floppy_hardint
+floppy_hardint: retl;nop
+ .globl get_cpuid
+get_cpuid: retl;nop
+ .globl getcc
+getcc: retl;nop
+ .globl halt
+halt: retl;nop
+ .globl indirect_syscall
+indirect_syscall: retl;nop
+ .globl install_linux_ticker
+install_linux_ticker: retl;nop
+ .globl install_obp_ticker
+install_obp_ticker: retl;nop
+ .globl linux_dbvec
+linux_dbvec: retl;nop
+ .globl linux_num_cpus
+linux_num_cpus: retl;nop
+ .globl netbsd_syscall
+netbsd_syscall: retl;nop
+ .globl setcc
+setcc: retl;nop
+ .globl solaris_syscall
+solaris_syscall: retl;nop
+ .globl sunos_mmap
+sunos_mmap: retl;nop
+ .globl sunos_syscall
+sunos_syscall: retl;nop
+ .globl svr4_getcontext
+svr4_getcontext: retl;nop
+ .globl svr4_setcontext
+svr4_setcontext: retl;nop
+ .globl sunos_accept
+sunos_accept: retl;nop
+ .globl sunos_audit
+sunos_audit: retl;nop
+ .globl sunos_brk
+sunos_brk: retl;nop
+ .globl sunos_execv
+sunos_execv: retl;nop
+ .globl sunos_fpathconf
+sunos_fpathconf: retl;nop
+ .globl sunos_getdents
+sunos_getdents: retl;nop
+ .globl sunos_getdirentries
+sunos_getdirentries: retl;nop
+ .globl sunos_getdomainname
+sunos_getdomainname: retl;nop
+ .globl sunos_getdtablesize
+sunos_getdtablesize: retl;nop
+ .globl sunos_getgid
+sunos_getgid: retl;nop
+ .globl sunos_gethostid
+sunos_gethostid: retl;nop
+ .globl sunos_getpid
+sunos_getpid: retl;nop
+ .globl sunos_getsockopt
+sunos_getsockopt: retl;nop
+ .globl sunos_getuid
+sunos_getuid: retl;nop
+ .globl sunos_indir
+sunos_indir: retl;nop
+ .globl sunos_ioctl
+sunos_ioctl: retl;nop
+ .globl sunos_killpg
+sunos_killpg: retl;nop
+ .globl sunos_madvise
+sunos_madvise: retl;nop
+ .globl sunos_mctl
+sunos_mctl: retl;nop
+ .globl sunos_mincore
+sunos_mincore: retl;nop
+ .globl sunos_mount
+sunos_mount: retl;nop
+ .globl sunos_nop
+sunos_nop: retl;nop
+ .globl sunos_nosys
+sunos_nosys: retl;nop
+ .globl sunos_open
+sunos_open: retl;nop
+ .globl sunos_pathconf
+sunos_pathconf: retl;nop
+ .globl sunos_poll
+sunos_poll: retl;nop
+ .globl sunos_read
+sunos_read: retl;nop
+ .globl sunos_readv
+sunos_readv: retl;nop
+ .globl sunos_recv
+sunos_recv: retl;nop
+ .globl sunos_sbrk
+sunos_sbrk: retl;nop
+ .globl sunos_select
+sunos_select: retl;nop
+ .globl sunos_semsys
+sunos_semsys: retl;nop
+ .globl sunos_send
+sunos_send: retl;nop
+ .globl sunos_setpgrp
+sunos_setpgrp: retl;nop
+ .globl sunos_setsockopt
+sunos_setsockopt: retl;nop
+ .globl sunos_shmsys
+sunos_shmsys: retl;nop
+ .globl sunos_sigaction
+sunos_sigaction: retl;nop
+ .globl sunos_sigblock
+sunos_sigblock: retl;nop
+ .globl sunos_sigsetmask
+sunos_sigsetmask: retl;nop
+ .globl sunos_sstk
+sunos_sstk: retl;nop
+ .globl sunos_sysconf
+sunos_sysconf: retl;nop
+ .globl sunos_uname
+sunos_uname: retl;nop
+ .globl sunos_vadvise
+sunos_vadvise: retl;nop
+ .globl sunos_wait4
+sunos_wait4: retl;nop
+ .globl sunos_write
+sunos_write: retl;nop
+ .globl sunos_writev
+sunos_writev: retl;nop
+ .globl sys_ptrace
+sys_ptrace: retl;nop
+ .globl sys_sigreturn
+sys_sigreturn: retl;nop
+ .globl sys_sigstack
+sys_sigstack: retl;nop
+ .globl sys_sigsuspend
+sys_sigsuspend: retl;nop
+ .globl syscall_trace
+syscall_trace: retl;nop
+ .globl sys32_ptrace
+sys32_ptrace: retl;nop
+ .globl do_sigpause
+do_sigpause: retl;nop
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
new file mode 100644
index 000000000..fdbe87aa3
--- /dev/null
+++ b/arch/sparc64/kernel/head.S
@@ -0,0 +1,373 @@
+/* $Id: head.S,v 1.27 1997/04/04 00:49:49 davem Exp $
+ * head.S: Initial boot code for the Sparc64 port of Linux.
+ *
+ * Copyright (C) 1996,1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996 David Sitsky (David.Sitsky@anu.edu.au)
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1997 Miguel de Icaza (miguel@nuclecu.unam.mx)
+ */
+
+#include <linux/version.h>
+#include <asm/pstate.h>
+#include <asm/ptrace.h>
+#include <asm/spitfire.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/errno.h>
+#include <asm/lsu.h>
+#include <asm/head.h>
+
+/* This section from from _start to sparc64_boot_end should fit into
+ * 0xffff.f800.0000.4000 to 0xffff.f800.0000.8000 and will be sharing space
+ * with bootup_user_stack, which is from 0xffff.f800.0000.4000 to
+ * 0xffff.f800.0000.6000 and bootup_kernel_stack, which is from
+ * 0xffff.f800.0000.6000 to 0xffff.f800.0000.8000.
+ */
+
+ .text
+ .globl start, _start, stext, _stext
+_start:
+start:
+_stext:
+stext:
+bootup_user_stack:
+! 0xfffff80000004000
+ b sparc64_boot
+ flushw /* Flush register file. */
+
+/* This stuff has to be in sync with SILO and other potential boot loaders
+ * Fields should be kept upward compatible and whenever any change is made,
+ * HdrS version should be incremented.
+ */
+ .global root_flags, ram_flags, root_dev
+ .global ramdisk_image, ramdisk_size
+
+ .ascii "HdrS"
+ .word LINUX_VERSION_CODE
+ .half 0x0201 /* HdrS version */
+root_flags:
+ .half 1
+root_dev:
+ .half 0
+ram_flags:
+ .half 0
+ramdisk_image:
+ .word 0
+ramdisk_size:
+ .word 0
+ .word reboot_command
+
+ /* We must be careful, 32-bit OpenBOOT will get confused if it
+ * tries to save away a register window to a 64-bit kernel
+ * stack address. Flush all windows, disable interrupts,
+ * remap if necessary, jump onto kernel trap table, then kernel
+ * stack, or else we die.
+ *
+ * PROM entry point is on %o4
+ */
+sparc64_boot:
+ /* Typically PROM has already enabled both MMU's and both on-chip
+ * caches, but we do it here anyway just to be paranoid.
+ */
+ mov (LSU_CONTROL_IC|LSU_CONTROL_DC|LSU_CONTROL_IM|LSU_CONTROL_DM), %g1
+ stxa %g1, [%g0] ASI_LSU_CONTROL
+
+ /*
+ * Make sure we are in privileged mode, have address masking,
+ * using the ordinary globals and have enabled floating
+ * point.
+ *
+ * Again, typically PROM has left %pil at 13 or similar, and
+ * (PSTATE_PRIV | PSTATE_PEF | PSTATE_IE) in %pstate.
+ */
+ wrpr %g0, 0xf, %pil /* Interrupts off. */
+ wrpr %g0, (PSTATE_PRIV|PSTATE_PEF), %pstate
+
+ /* Check if we are mapped where we expect to be in virtual
+ * memory. The Solaris /boot elf format bootloader
+ * will peek into our elf header and load us where
+ * we want to be, otherwise we have to re-map.
+ */
+current_pc:
+ rd %pc, %g3
+ sethi %uhi(KERNBASE), %g4
+ sllx %g4, 32, %g4
+
+ /* Check the run time program counter. */
+
+ set current_pc, %g5
+ add %g5, %g4, %g5
+ cmp %g3, %g5
+ be %xcc, sun4u_init
+ nop
+
+create_mappings:
+ /* %g5 holds the tlb data */
+ sethi %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5
+ sllx %g5, 32, %g5
+ or %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W | _PAGE_G), %g5
+
+ /* Base of physical memory cannot reliably be assumed to be
+ * at 0x0! Figure out where it happens to be. -DaveM
+ */
+
+ /* Put PADDR tlb data mask into %g3. */
+ sethi %uhi(_PAGE_PADDR), %g3
+ or %g3, %ulo(_PAGE_PADDR), %g3
+ sllx %g3, 32, %g3
+ sethi %hi(_PAGE_PADDR), %g7
+ or %g7, %lo(_PAGE_PADDR), %g7
+ or %g3, %g7, %g3
+
+ /* Walk through entire ITLB, looking for entry which maps
+ * our %pc currently, stick PADDR from there into %g5 tlb data.
+ */
+ clr %l0 /* TLB entry walker. */
+ set 0x1fff, %l2 /* Page mask. */
+ rd %pc, %l3
+ andn %l3, %l2, %g2 /* vaddr comparator */
+1:
+ /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
+ ldxa [%l0] ASI_ITLB_TAG_READ, %g1
+ nop
+ nop
+ nop
+ andn %g1, %l2, %g1 /* Get vaddr */
+ cmp %g1, %g2
+ be,a,pn %xcc, got_tlbentry
+ ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g1
+ cmp %l1, (63 << 3)
+ blu,pt %xcc, 1b
+ add %l0, (1 << 3), %l0
+
+boot_failed:
+ /* Debugging 8-) */
+ set 0xdeadbeef, %g1
+ t 0x11
+
+got_tlbentry:
+ /* Nops here again, perhaps Cheetah/Blackbird are better behaved... */
+ nop
+ nop
+ nop
+ and %g1, %g3, %g1 /* Mask to just get paddr bits. */
+ sub %g1, %g2, %g1 /* Get rid of %pc offset to get base. */
+
+ /* NOTE: We hold on to %g1 paddr base as we need it below to lock
+ * NOTE: the PROM cif code into the TLB.
+ */
+
+ or %g5, %g1, %g5 /* Or it into TAG being built. */
+
+ /* PROM never puts any TLB entries into the MMU with the lock bit
+ * set. So we gladly use tlb entry 63 for KERNBASE, 62 for
+ * boot time locked PROM CIF handler page, we remove the locked
+ * bit for the CIF page in paging_init().
+ */
+ mov TLB_TAG_ACCESS, %g3
+ mov (63 << 3), %g7
+ stxa %g4, [%g3] ASI_IMMU /* KERNBASE into TLB TAG */
+ stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS /* TTE into TLB DATA */
+ membar #Sync
+
+ /* Same for DTLB */
+ stxa %g4, [%g3] ASI_DMMU /* KERNBASE into TLB TAG */
+ stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS /* TTE into TLB DATA */
+ membar #Sync
+
+ /* Kill instruction prefetch queues. */
+ flush %g4
+ membar #Sync
+
+ ba,pt %xcc, go_to_highmem
+ nop
+
+go_to_highmem:
+ /* Now do a non-relative jump so that PC is in high-memory */
+ set sun4u_init, %g2
+ jmpl %g2 + %g4, %g0
+ nop
+
+sun4u_init:
+ /* Set ctx 0 */
+ mov PRIMARY_CONTEXT, %g7
+ stxa %g0, [%g7] ASI_DMMU
+ membar #Sync
+
+ mov SECONDARY_CONTEXT, %g7
+ stxa %g0, [%g7] ASI_DMMU
+ membar #Sync
+
+ /* The lock bit has to be removed from this page later on,
+ * but before firing up init we will use PROM a lot, so we
+ * lock it there now...
+ */
+
+ /* Compute PROM CIF interface page TTE. */
+ sethi %hi(__p1275_loc), %g7
+ or %g7, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W | _PAGE_L), %g7
+ sethi %uhi(_PAGE_VALID), %g5
+ sethi %hi(0x8000), %g3
+ sllx %g5, 32, %g5
+ mov TLB_TAG_ACCESS, %g6
+ or %g5, %g7, %g5
+ add %g5, %g1, %g5 /* Add in physbase. */
+
+ mov (62 << 3), %g7 /* TLB entry 62 */
+ stxa %g3, [%g6] ASI_IMMU /* CIF page into TLB TAG */
+ stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS /* TTE into TLB DATA */
+ membar #Sync
+
+ /* Same for DTLB */
+ stxa %g3, [%g6] ASI_DMMU /* CIF page into TLB TAG */
+ stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS /* TTE into TLB DATA */
+ membar #Sync
+
+ /* Kill instruction prefetch queues. */
+ flush %g3
+ membar #Sync
+
+ /* We are now safely (we hope) in Nucleus context (0), rewrite
+ * the KERNBASE TTE's so they no longer have the global bit set.
+ * Don't forget to setup TAG_ACCESS first 8-)
+ */
+ mov TLB_TAG_ACCESS, %g2
+ stxa %g4, [%g2] ASI_IMMU
+ stxa %g4, [%g2] ASI_DMMU
+
+ mov (63 << 3), %g7
+ ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g1
+ andn %g1, (_PAGE_G), %g1
+ stxa %g1, [%g7] ASI_ITLB_DATA_ACCESS
+ membar #Sync
+
+ ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g1
+ andn %g1, (_PAGE_G), %g1
+ stxa %g1, [%g7] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+
+ /* Kill instruction prefetch queues. */
+ flush %g4
+ membar #Sync
+
+ /* Compute the number of windows in this machine
+ * store this in nwindows and nwindowsm1
+ */
+ rdpr %ver, %g1 /* Get VERSION register. */
+ sethi %hi(nwindows), %g2
+ and %g1, VERS_MAXWIN, %g5
+ or %g2,%lo(nwindows),%g2
+ add %g5, 1, %g6
+ add %g2, (nwindows - nwindowsm1), %g3
+ stx %g6, [%g2 + %g4]
+ stx %g5, [%g3 + %g4]
+
+ sethi %hi(init_task), %g6
+ or %g6, %lo(init_task), %g6
+ add %g6, %g4, %g6 ! g6 usage is fixed as well
+ mov %sp, %l6
+ mov %o4, %l7
+
+ sethi %hi(bootup_kernel_stack + 0x2000 - STACK_BIAS - REGWIN_SZ), %g5
+ or %g5, %lo(bootup_kernel_stack + 0x2000 - STACK_BIAS - REGWIN_SZ), %g5
+ add %g5, %g4, %sp
+ mov 0, %fp
+ wrpr %g0, 0, %wstate
+ wrpr %g0, 0x0, %tl
+
+ /* Clear the bss */
+ sethi %hi(8191), %l2
+ or %l2, %lo(8191), %l2
+ sethi %hi(__bss_start), %l0
+ or %l0, %lo(__bss_start), %l0
+ sethi %hi(_end), %l1
+ or %l1, %lo(_end), %l1
+ add %l1, %l2, %l1
+ andn %l1, %l2, %l1
+ add %l2, 1, %l2
+ add %l0, %g4, %o0
+1:
+ call bzero_1page
+ add %l0, %l2, %l0
+ cmp %l0, %l1
+ blu,pt %xcc, 1b
+ add %l0, %g4, %o0
+
+ /* Now clear empty_zero_page */
+ call bzero_1page
+ mov %g4, %o0
+
+ mov %l6, %o1 ! OpenPROM stack
+ call prom_init
+ mov %l7, %o0 ! OpenPROM cif handler
+
+ /* Off we go.... */
+ call start_kernel
+ nop
+ /* Not reached... */
+
+ .globl setup_tba
+setup_tba:
+ sethi %hi(sparc64_ttable_tl0), %g5
+ add %g5, %g4, %g5
+ wrpr %g5, %tba
+
+ /* Set up MMU globals */
+ rdpr %pstate, %o1
+ wrpr %o1, PSTATE_MG, %pstate
+
+ /* PGD/PMD offset mask, used by TLB miss handlers. */
+ sethi %hi(0x1ff8), %g2
+ or %g2, %lo(0x1ff8), %g2
+
+ /* Kernel PGDIR used by TLB miss handlers. */
+ mov %o0, %g6
+
+ /* To catch bootup bugs, this is user PGDIR for TLB miss handlers. */
+ clr %g7
+
+ /* Setup Interrupt globals */
+ wrpr %o1, PSTATE_IG, %pstate
+ sethi %uhi(ivector_to_mask), %g4
+ or %g4, %ulo(ivector_to_mask), %g4
+ sethi %hi(ivector_to_mask), %g5
+ or %g5, %lo(ivector_to_mask), %g5
+ or %g5, %g4, %g1 /* IVECTOR table */
+ mov 0x40, %g2 /* INTR data 0 register */
+
+ andn %o1, PSTATE_IE, %o1
+ wrpr %g0, %g0, %wstate
+ wrpr %o1, %g0, %pstate
+
+ /* Zap TSB BASE to zero with TSB_size==1. */
+ mov TSB_REG, %o4
+ mov 1, %o5
+ stxa %o5, [%o4] ASI_DMMU
+ stxa %o5, [%o4] ASI_IMMU
+
+ membar #Sync
+
+ retl
+ nop
+
+sparc64_boot_end:
+ .skip 0x2000 + _start - sparc64_boot_end
+bootup_user_stack_end:
+
+bootup_kernel_stack:
+ .skip 0x2000
+
+! 0xfffff80000008000
+
+#include "ttable.S"
+
+ .data
+ .align 8
+ .globl nwindows, nwindowsm1
+nwindows: .xword 0
+nwindowsm1: .xword 0
+ .section ".fixup",#alloc,#execinstr
+ .globl __ret_efault
+__ret_efault:
+ ret
+ restore %g0, -EFAULT, %o0
diff --git a/arch/sparc64/kernel/idprom.c b/arch/sparc64/kernel/idprom.c
new file mode 100644
index 000000000..08a4a6b6a
--- /dev/null
+++ b/arch/sparc64/kernel/idprom.c
@@ -0,0 +1,49 @@
+/* $Id: idprom.c,v 1.2 1997/04/17 02:28:10 miguel Exp $
+ * idprom.c: Routines to load the idprom into kernel addresses and
+ * interpret the data contained within.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+
+#include <asm/oplib.h>
+#include <asm/idprom.h>
+
+struct idprom *idprom;
+static struct idprom idprom_buffer;
+
+/* Calculate the IDPROM checksum (xor of the data bytes). */
+__initfunc(static unsigned char calc_idprom_cksum(struct idprom *idprom))
+{
+ unsigned char cksum, i, *ptr = (unsigned char *)idprom;
+
+ for (i = cksum = 0; i <= 0x0E; i++)
+ cksum ^= *ptr++;
+
+ return cksum;
+}
+
+/* Create a local IDPROM copy and verify integrity. */
+__initfunc(void idprom_init(void))
+{
+ prom_get_idprom((char *) &idprom_buffer, sizeof(idprom_buffer));
+
+ idprom = &idprom_buffer;
+
+ if (idprom->id_format != 0x01) {
+ prom_printf("IDPROM: Warning, unknown format type!\n");
+ }
+
+ if (idprom->id_cksum != calc_idprom_cksum(idprom)) {
+ prom_printf("IDPROM: Warning, checksum failure (nvram=%x, calc=%x)!\n",
+ idprom->id_cksum, calc_idprom_cksum(idprom));
+ }
+
+ printk("Ethernet address: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ idprom->id_ethaddr[0], idprom->id_ethaddr[1],
+ idprom->id_ethaddr[2], idprom->id_ethaddr[3],
+ idprom->id_ethaddr[4], idprom->id_ethaddr[5]);
+}
diff --git a/arch/sparc64/kernel/ioport.c b/arch/sparc64/kernel/ioport.c
new file mode 100644
index 000000000..2f94e9102
--- /dev/null
+++ b/arch/sparc64/kernel/ioport.c
@@ -0,0 +1,139 @@
+/* $Id: ioport.c,v 1.7 1997/04/10 05:13:01 davem Exp $
+ * ioport.c: Simple io mapping allocator.
+ *
+ * Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+
+#include <asm/io.h>
+#include <asm/vaddrs.h>
+#include <asm/oplib.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+/* This points to the next to use virtual memory for io mappings */
+static unsigned long dvma_next_free = DVMA_VADDR;
+unsigned long sparc_iobase_vaddr = IOBASE_VADDR;
+
+extern void mmu_map_dma_area(unsigned long addr, int len, __u32 *dvma_addr);
+
+/*
+ * sparc_alloc_io:
+ * Map and allocates an obio device.
+ * Implements a simple linear allocator, you can force the function
+ * to use your own mapping, but in practice this should not be used.
+ *
+ * Input:
+ * address: Physical address to map
+ * virtual: if non zero, specifies a fixed virtual address where
+ * the mapping should take place.
+ * len: the length of the mapping
+ * bus_type: Optional high word of physical address.
+ *
+ * Returns:
+ * The virtual address where the mapping actually took place.
+ */
+
+void *sparc_alloc_io (u32 address, void *virtual, int len, char *name,
+ u32 bus_type, int rdonly)
+{
+ unsigned long vaddr, base_address;
+ unsigned long addr = ((unsigned long) address) + (((unsigned long) bus_type) << 32);
+ unsigned long offset = (addr & (~PAGE_MASK));
+
+ if (virtual) {
+ vaddr = (unsigned long) virtual;
+
+ len += offset;
+ if(((unsigned long) virtual + len) > (IOBASE_VADDR + IOBASE_LEN)) {
+ prom_printf("alloc_io: Mapping outside IOBASE area\n");
+ prom_halt();
+ }
+ if(check_region ((vaddr | offset), len)) {
+ prom_printf("alloc_io: 0x%lx is already in use\n", vaddr);
+ prom_halt();
+ }
+
+ /* Tell Linux resource manager about the mapping */
+ request_region ((vaddr | offset), len, name);
+ } else {
+ vaddr = occupy_region(sparc_iobase_vaddr, IOBASE_END,
+ (offset + len + PAGE_SIZE-1) & PAGE_MASK, PAGE_SIZE, name);
+ if (vaddr == 0) {
+ /* Usually we cannot see printks in this case. */
+ prom_printf("alloc_io: cannot occupy %d region\n", len);
+ prom_halt();
+ }
+ }
+
+ base_address = vaddr;
+ /* Do the actual mapping */
+ for (; len > 0; len -= PAGE_SIZE) {
+ mapioaddr(addr, vaddr, bus_type, rdonly);
+ vaddr += PAGE_SIZE;
+ addr += PAGE_SIZE;
+ }
+
+ return (void *) (base_address | offset);
+}
+
+void sparc_free_io (void *virtual, int len)
+{
+ unsigned long vaddr = (unsigned long) virtual & PAGE_MASK;
+ unsigned long plen = (((unsigned long)virtual & ~PAGE_MASK) + len + PAGE_SIZE-1) & PAGE_MASK;
+
+ release_region(vaddr, plen);
+
+ for (; plen != 0;) {
+ plen -= PAGE_SIZE;
+ unmapioaddr(vaddr + plen);
+ }
+}
+
+/* Does DVMA allocations with PAGE_SIZE granularity. How this basically
+ * works is that the ESP chip can do DVMA transfers at ANY address with
+ * certain size and boundary restrictions. But other devices that are
+ * attached to it and would like to do DVMA have to set things up in
+ * a special way, if the DVMA sees a device attached to it transfer data
+ * at addresses above DVMA_VADDR it will grab them, this way it does not
+ * now have to know the peculiarities of where to read the Lance data
+ * from. (for example)
+ *
+ * Returns CPU visible address for the buffer returned, dvma_addr is
+ * set to the DVMA visible address.
+ */
+void *sparc_dvma_malloc (int len, char *name, __u32 *dvma_addr)
+{
+ unsigned long vaddr, base_address;
+
+ vaddr = dvma_next_free;
+ if(check_region (vaddr, len)) {
+ prom_printf("alloc_dma: 0x%lx is already in use\n", vaddr);
+ prom_halt();
+ }
+ if(vaddr + len > (DVMA_VADDR + DVMA_LEN)) {
+ prom_printf("alloc_dvma: out of dvma memory\n");
+ prom_halt();
+ }
+
+ /* Basically these can be mapped just like any old
+ * IO pages, cacheable bit off, etc. The physical
+ * pages are now mapped dynamically to save space.
+ */
+ base_address = vaddr;
+ mmu_map_dma_area(base_address, len, dvma_addr);
+
+ /* Assign the memory area. */
+ dvma_next_free = PAGE_ALIGN(dvma_next_free+len);
+
+ request_region(base_address, len, name);
+
+ return (void *) base_address;
+}
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
new file mode 100644
index 000000000..bc9a8053e
--- /dev/null
+++ b/arch/sparc64/kernel/irq.c
@@ -0,0 +1,638 @@
+/* $Id: irq.c,v 1.12 1997/04/16 05:56:20 davem Exp $
+ * irq.c: UltraSparc IRQ handling/init/registry.
+ *
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/config.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/kernel_stat.h>
+#include <linux/signal.h>
+#include <linux/interrupt.h>
+#include <linux/malloc.h>
+#include <linux/random.h> /* XXX ADD add_foo_randomness() calls... -DaveM */
+#include <linux/init.h>
+
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+#include <asm/atomic.h>
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/sbus.h>
+#include <asm/iommu.h>
+#include <asm/upa.h>
+#include <asm/oplib.h>
+#include <asm/smp.h>
+#include <asm/hardirq.h>
+#include <asm/softirq.h>
+
+/* Internal flag, should not be visible elsewhere at all. */
+#define SA_SYSIO_MASKED 0x100
+
+/* UPA nodes send interrupt packet to UltraSparc with first data reg value
+ * low 5 bits holding the IRQ identifier being delivered. We must translate
+ * this into a non-vector IRQ so we can set the softint on this cpu. To
+ * make things even more swift we store the complete mask here.
+ */
+
+#define NUM_IVECS 2048 /* XXX may need more on sunfire/wildfire */
+
+unsigned long ivector_to_mask[NUM_IVECS];
+
+/* This is based upon code in the 32-bit Sparc kernel written mostly by
+ * David Redman (djhr@tadpole.co.uk).
+ */
+#define MAX_STATIC_ALLOC 4
+static struct irqaction static_irqaction[MAX_STATIC_ALLOC];
+static int static_irq_count = 0;
+
+static struct irqaction *irq_action[NR_IRQS+1] = {
+ NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL
+};
+
+int get_irq_list(char *buf)
+{
+ int i, len = 0;
+ struct irqaction *action;
+
+ for(i = 0; i < (NR_IRQS + 1); i++) {
+ if(!(action = *(i + irq_action)))
+ continue;
+ len += sprintf(buf + len, "%2d: %8d %c %s",
+ i, kstat.interrupts[i],
+ (action->flags & SA_INTERRUPT) ? '+' : ' ',
+ action->name);
+ for(action = action->next; action; action = action->next) {
+ len += sprintf(buf+len, ",%s %s",
+ (action->flags & SA_INTERRUPT) ? " +" : "",
+ action->name);
+ }
+ len += sprintf(buf + len, "\n");
+ }
+ return len;
+}
+
+/* INO number to Sparc PIL level. */
+static unsigned char ino_to_pil[] = {
+ 0, 1, 2, 3, 5, 7, 8, 9, /* SBUS slot 0 */
+ 0, 1, 2, 3, 5, 7, 8, 9, /* SBUS slot 1 */
+ 0, 1, 2, 3, 5, 7, 8, 9, /* SBUS slot 2 */
+ 0, 1, 2, 3, 5, 7, 8, 9, /* SBUS slot 3 */
+ 3, /* Onboard SCSI */
+ 5, /* Onboard Ethernet */
+/*XXX*/ 8, /* Onboard BPP */
+ 0, /* Bogon */
+ 13, /* Audio */
+/*XXX*/15, /* PowerFail */
+ 0, /* Bogon */
+ 0, /* Bogon */
+ 12, /* Zilog Serial Channels (incl. Keyboard/Mouse lines) */
+ 11, /* Floppy */
+ 0, /* Spare Hardware (bogon for now) */
+ 0, /* Keyboard (bogon for now) */
+ 0, /* Mouse (bogon for now) */
+ 0, /* Serial (bogon for now) */
+ 0, 0, /* Bogon, Bogon */
+ 10, /* Timer 0 */
+ 11, /* Timer 1 */
+ 0, 0, /* Bogon, Bogon */
+ 15, /* Uncorrectable SBUS Error */
+ 15, /* Correctable SBUS Error */
+ 15, /* SBUS Error */
+/*XXX*/ 0, /* Power Management (bogon for now) */
+};
+
+/* INO number to IMAP register offset for SYSIO external IRQ's.
+ * This should conform to both Sunfire/Wildfire server and Fusion
+ * desktop designs.
+ */
+#define offset(x) ((unsigned long)(&(((struct sysio_regs *)0)->x)))
+#define bogon ((unsigned long) -1)
+static unsigned long irq_offsets[] = {
+/* SBUS Slot 0 --> 3, level 1 --> 7 */
+offset(imap_slot0),offset(imap_slot0),offset(imap_slot0),offset(imap_slot0),
+offset(imap_slot0),offset(imap_slot0),offset(imap_slot0),offset(imap_slot0),
+offset(imap_slot1),offset(imap_slot1),offset(imap_slot1),offset(imap_slot1),
+offset(imap_slot1),offset(imap_slot1),offset(imap_slot1),offset(imap_slot1),
+offset(imap_slot2),offset(imap_slot2),offset(imap_slot2),offset(imap_slot2),
+offset(imap_slot2),offset(imap_slot2),offset(imap_slot2),offset(imap_slot2),
+offset(imap_slot3),offset(imap_slot3),offset(imap_slot3),offset(imap_slot3),
+offset(imap_slot3),offset(imap_slot3),offset(imap_slot3),offset(imap_slot3),
+/* Onboard devices (not relevant/used on SunFire). */
+offset(imap_scsi), offset(imap_eth), offset(imap_bpp), bogon,
+offset(imap_audio), offset(imap_pfail), bogon, bogon,
+offset(imap_kms), offset(imap_flpy), offset(imap_shw),
+offset(imap_kbd), offset(imap_ms), offset(imap_ser), bogon, bogon,
+offset(imap_tim0), offset(imap_tim1), bogon, bogon,
+offset(imap_ue), offset(imap_ce), offset(imap_sberr),
+offset(imap_pmgmt),
+};
+
+#undef bogon
+
+#define NUM_IRQ_ENTRIES (sizeof(irq_offsets) / sizeof(irq_offsets[0]))
+
+/* Convert an "interrupts" property IRQ level to an SBUS/SYSIO
+ * Interrupt Mapping register pointer, or NULL if none exists.
+ */
+static unsigned int *irq_to_imap(unsigned int irq)
+{
+ unsigned long offset;
+ struct sysio_regs *sregs;
+
+ if((irq == 14) ||
+ (irq >= NUM_IRQ_ENTRIES) ||
+ ((offset = irq_offsets[irq]) == ((unsigned long)-1)))
+ return NULL;
+ sregs = SBus_chain->iommu->sysio_regs;
+ offset += ((unsigned long) sregs);
+ return ((unsigned int *)offset) + 1;
+}
+
+/* Convert Interrupt Mapping register pointer to assosciated
+ * Interrupt Clear register pointer.
+ */
+static unsigned int *imap_to_iclr(unsigned int *imap)
+{
+ unsigned long diff;
+
+ diff = offset(iclr_unused0) - offset(imap_slot0);
+ return (unsigned int *) (((unsigned long)imap) + diff);
+}
+
+#undef offset
+
+/* For non-SBUS IRQ's we do nothing, else we must enable them in the
+ * appropriate SYSIO interrupt map registers.
+ */
+void enable_irq(unsigned int irq)
+{
+ unsigned long tid;
+ unsigned int *imap;
+
+ /* If this is for the tick interrupt, just ignore, note
+ * that this is the one and only locally generated interrupt
+ * source, all others come from external sources (essentially
+ * any UPA device which is an interruptor). (actually, on
+ * second thought Ultra can generate local interrupts for
+ * async memory errors and we may setup handlers for those
+ * at some point as well)
+ *
+ * XXX See commentary below in request_irq() this assumption
+ * XXX is broken and needs to be fixed.
+ */
+ if(irq == 14)
+ return;
+
+ /* Check for bogons. */
+ imap = irq_to_imap(irq);
+ if(imap == NULL)
+ goto do_the_stb_watoosi;
+
+ /* We send it to our UPA MID, for SMP this will be different. */
+ __asm__ __volatile__("ldxa [%%g0] %1, %0" : "=r" (tid) : "i" (ASI_UPA_CONFIG));
+ tid = ((tid & UPA_CONFIG_MID) << 9);
+
+ /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product
+ * of this SYSIO's preconfigured IGN in the SYSIO Control
+ * Register, the hardware just mirrors that value here.
+ * However for Graphics and UPA Slave devices the full
+ * SYSIO_IMAP_INR field can be set by the programmer here.
+ * (XXX we will have to handle those for FFB etc. XXX)
+ */
+ *imap = SYSIO_IMAP_VALID | (tid & SYSIO_IMAP_TID);
+ return;
+
+do_the_stb_watoosi:
+ printk("Cannot enable irq(%d), doing the \"STB Watoosi\" instead.", irq);
+ panic("Trying to enable bogon IRQ");
+}
+
+void disable_irq(unsigned int irq)
+{
+ unsigned int *imap;
+
+ /* XXX Grrr, I know this is broken... */
+ if(irq == 14)
+ return;
+
+ /* Check for bogons. */
+ imap = irq_to_imap(irq);
+ if(imap == NULL)
+ goto do_the_stb_watoosi;
+
+ /* NOTE: We do not want to futz with the IRQ clear registers
+ * and move the state to IDLE, the SCSI code does call
+ * disable_irq() to assure atomicity in the queue cmd
+ * SCSI adapter driver code. Thus we'd lose interrupts.
+ */
+ *imap &= ~(SYSIO_IMAP_VALID);
+ return;
+
+do_the_stb_watoosi:
+ printk("Cannot disable irq(%d), doing the \"STB Watoosi\" instead.", irq);
+ panic("Trying to enable bogon IRQ");
+}
+
+int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),
+ unsigned long irqflags, const char *name, void *dev_cookie)
+{
+ struct irqaction *action, *tmp = NULL;
+ unsigned long flags;
+ unsigned int cpu_irq, *imap, *iclr;
+
+ /* XXX This really is not the way to do it, the "right way"
+ * XXX is to have drivers set SA_SBUS or something like that
+ * XXX in irqflags and we base our decision here on whether
+ * XXX that flag bit is set or not.
+ */
+ if(irq == 14)
+ cpu_irq = irq;
+ else
+ cpu_irq = ino_to_pil[irq];
+
+ if(!handler)
+ return -EINVAL;
+
+ imap = irq_to_imap(irq);
+
+ action = *(cpu_irq + irq_action);
+ if(action) {
+ if((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ))
+ for (tmp = action; tmp->next; tmp = tmp->next)
+ ;
+ else
+ return -EBUSY;
+
+ if((action->flags & SA_INTERRUPT) ^ (irqflags & SA_INTERRUPT)) {
+ printk("Attempt to mix fast and slow interrupts on IRQ%d "
+ "denied\n", irq);
+ return -EBUSY;
+ }
+ action = NULL; /* Or else! */
+ }
+
+ save_and_cli(flags);
+
+ /* If this is flagged as statically allocated then we use our
+ * private struct which is never freed.
+ */
+ if(irqflags & SA_STATIC_ALLOC)
+ if(static_irq_count < MAX_STATIC_ALLOC)
+ action = &static_irqaction[static_irq_count++];
+ else
+ printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
+ "using kmalloc\n", irq, name);
+
+ if(action == NULL)
+ action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
+ GFP_KERNEL);
+
+ if(!action) {
+ restore_flags(flags);
+ return -ENOMEM;
+ }
+
+ if(imap) {
+ int ivindex = (*imap & (SYSIO_IMAP_IGN | SYSIO_IMAP_INO));
+
+ ivector_to_mask[ivindex] = (1<<cpu_irq);
+ iclr = imap_to_iclr(imap);
+ action->mask = (unsigned long) iclr;
+ irqflags |= SA_SYSIO_MASKED;
+ } else {
+ action->mask = 0;
+ }
+
+ action->handler = handler;
+ action->flags = irqflags;
+ action->name = name;
+ action->next = NULL;
+ action->dev_id = dev_cookie;
+
+ if(tmp)
+ tmp->next = action;
+ else
+ *(cpu_irq + irq_action) = action;
+
+ enable_irq(irq);
+ restore_flags(flags);
+ return 0;
+}
+
+void free_irq(unsigned int irq, void *dev_cookie)
+{
+ struct irqaction *action;
+ struct irqaction *tmp = NULL;
+ unsigned long flags;
+ unsigned int cpu_irq;
+
+ if(irq == 14)
+ cpu_irq = irq;
+ else
+ cpu_irq = ino_to_pil[irq];
+ action = *(cpu_irq + irq_action);
+ if(!action->handler) {
+ printk("Freeing free IRQ %d\n", irq);
+ return;
+ }
+ if(dev_cookie) {
+ for( ; action; action = action->next) {
+ if(action->dev_id == dev_cookie)
+ break;
+ tmp = action;
+ }
+ if(!action) {
+ printk("Trying to free free shared IRQ %d\n", irq);
+ return;
+ }
+ } else if(action->flags & SA_SHIRQ) {
+ printk("Trying to free shared IRQ %d with NULL device cookie\n", irq);
+ return;
+ }
+
+ if(action->flags & SA_STATIC_ALLOC) {
+ printk("Attempt to free statically allocated IRQ %d (%s)\n",
+ irq, action->name);
+ return;
+ }
+
+ save_and_cli(flags);
+ if(action && tmp)
+ tmp->next = action->next;
+ else
+ *(cpu_irq + irq_action) = action->next;
+
+ if(action->flags & SA_SYSIO_MASKED) {
+ unsigned int *imap = irq_to_imap(irq);
+ if(imap != NULL)
+ ivector_to_mask[*imap & (SYSIO_IMAP_IGN | SYSIO_IMAP_INO)] = 0;
+ else
+ printk("free_irq: WHeee, SYSIO_MASKED yet no imap reg.\n");
+ }
+
+ kfree(action);
+ if(!*(cpu_irq + irq_action))
+ disable_irq(irq);
+
+ restore_flags(flags);
+}
+
+/* Per-processor IRQ locking depth, both SMP and non-SMP code use this. */
+unsigned int local_irq_count[NR_CPUS];
+atomic_t __sparc64_bh_counter = ATOMIC_INIT(0);
+
+#ifdef __SMP__
+#error SMP not supported on sparc64 just yet
+#else
+
+#define irq_enter(cpu, irq) (local_irq_count[cpu]++)
+#define irq_exit(cpu, irq) (local_irq_count[cpu]--)
+
+#endif /* __SMP__ */
+
+void report_spurious_ivec(struct pt_regs *regs)
+{
+ printk("IVEC: Spurious interrupt vector received at (%016lx)\n",
+ regs->tpc);
+ return;
+}
+
+void unexpected_irq(int irq, void *dev_cookie, struct pt_regs *regs)
+{
+ int i;
+ struct irqaction *action;
+ unsigned int cpu_irq;
+
+ cpu_irq = irq & NR_IRQS;
+ action = *(cpu_irq + irq_action);
+
+ prom_printf("Unexpected IRQ[%d]: ", irq);
+ prom_printf("PC[%016lx] NPC[%016lx] FP[%016lx]\n",
+ regs->tpc, regs->tnpc, regs->u_regs[14]);
+
+ if(action) {
+ prom_printf("Expecting: ");
+ for(i = 0; i < 16; i++) {
+ if(action->handler)
+ prom_printf("[%s:%d:0x%016lx] ", action->name,
+ i, (unsigned long) action->handler);
+ }
+ }
+ prom_printf("AIEEE\n");
+ prom_printf("bogus interrupt received\n");
+ prom_cmdline ();
+}
+
+void handler_irq(int irq, struct pt_regs *regs)
+{
+ struct irqaction *action;
+ int cpu = smp_processor_id();
+
+ /* XXX */
+ if(irq != 14)
+ clear_softint(1 << irq);
+
+ irq_enter(cpu, irq);
+ action = *(irq + irq_action);
+ kstat.interrupts[irq]++;
+ do {
+ if(!action || !action->handler)
+ unexpected_irq(irq, 0, regs);
+ action->handler(irq, action->dev_id, regs);
+ if(action->flags & SA_SYSIO_MASKED)
+ *((unsigned int *)action->mask) = SYSIO_ICLR_IDLE;
+ } while((action = action->next) != NULL);
+ irq_exit(cpu, irq);
+}
+
+#ifdef CONFIG_BLK_DEV_FD
+extern void floppy_interrupt(int irq, void *dev_cookie, struct pt_regs *regs);
+
+void sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs)
+{
+ struct irqaction *action = *(irq + irq_action);
+ int cpu = smp_processor_id();
+
+ irq_enter(cpu, irq);
+ floppy_interrupt(irq, dev_cookie, regs);
+ if(action->flags & SA_SYSIO_MASKED)
+ *((unsigned int *)action->mask) = SYSIO_ICLR_IDLE;
+ irq_exit(cpu, irq);
+}
+#endif
+
+/* XXX This needs to be written for floppy driver, and soon will be necessary
+ * XXX for serial driver as well.
+ */
+int request_fast_irq(unsigned int irq,
+ void (*handler)(int, void *, struct pt_regs *),
+ unsigned long irqflags, const char *name)
+{
+ return -1;
+}
+
+/* We really don't need these at all on the Sparc. We only have
+ * stubs here because they are exported to modules.
+ */
+unsigned long probe_irq_on(void)
+{
+ return 0;
+}
+
+int probe_irq_off(unsigned long mask)
+{
+ return 0;
+}
+
+/* XXX This is a hack, make it per-cpu so that SMP port will work correctly
+ * XXX with mixed MHZ Ultras in the machine. -DaveM
+ */
+static unsigned long cpu_cfreq;
+static unsigned long tick_offset;
+
+/* XXX This doesn't belong here, just do this cruft in the timer.c handler code. */
+static void timer_handler(int irq, void *dev_id, struct pt_regs *regs)
+{
+ extern void timer_interrupt(int, void *, struct pt_regs *);
+ unsigned long compare;
+
+ if (!(get_softint () & 1)) {
+ /* Just to be sure... */
+ clear_softint(1 << 14);
+ printk("Spurious level14 at %016lx\n", regs->tpc);
+ return;
+ }
+
+ timer_interrupt(irq, dev_id, regs);
+
+ /* Acknowledge INT_TIMER */
+ clear_softint(1 << 0);
+
+ /* Set up for next timer tick. */
+ __asm__ __volatile__("rd %%tick_cmpr, %0\n\t"
+ "add %0, %1, %0\n\t"
+ "wr %0, 0x0, %%tick_cmpr"
+ : "=r" (compare)
+ : "r" (tick_offset));
+}
+
+/* This is called from time_init() to get the jiffies timer going. */
+void init_timers(void (*cfunc)(int, void *, struct pt_regs *))
+{
+ int node, err;
+
+ /* XXX FIX this for SMP -JJ */
+ node = linux_cpus [0].prom_node;
+ cpu_cfreq = prom_getint(node, "clock-frequency");
+ tick_offset = cpu_cfreq / HZ;
+ err = request_irq(14, timer_handler, (SA_INTERRUPT|SA_STATIC_ALLOC),
+ "timer", NULL);
+ if(err) {
+ prom_printf("Serious problem, cannot register timer interrupt\n");
+ prom_halt();
+ } else {
+ unsigned long flags;
+
+ save_and_cli(flags);
+
+ __asm__ __volatile__("wr %0, 0x0, %%tick_cmpr\n\t"
+ "wrpr %%g0, 0x0, %%tick"
+ : /* No outputs */
+ : "r" (tick_offset));
+
+ clear_softint (get_softint ());
+
+ restore_flags(flags);
+ }
+ sti();
+}
+
+/* We use this nowhere else, so only define it's layout here. */
+struct sun5_timer {
+ volatile u32 count0, _unused0;
+ volatile u32 limit0, _unused1;
+ volatile u32 count1, _unused2;
+ volatile u32 limit1, _unused3;
+} *prom_timers;
+
+static void map_prom_timers(void)
+{
+ unsigned int addr[3];
+ int tnode, err;
+
+ /* PROM timer node hangs out in the top level of device siblings... */
+ tnode = prom_finddevice("/counter-timer");
+
+ /* Assume if node is not present, PROM uses different tick mechanism
+ * which we should not care about.
+ */
+ if(tnode == 0) {
+ prom_timers = (struct sun5_timer *) 0;
+ prom_printf("AIEEE, no timers\n");
+ return;
+ }
+
+ /* If PROM is really using this, it must be mapped by him. */
+ err = prom_getproperty(tnode, "address", (char *)addr, sizeof(addr));
+ if(err == -1) {
+ prom_printf("PROM does not have timer mapped, trying to continue.\n");
+ prom_timers = (struct sun5_timer *) 0;
+ return;
+ }
+ prom_timers = (struct sun5_timer *) addr[0];
+}
+
+static void kill_prom_timer(void)
+{
+ if(!prom_timers)
+ return;
+
+ /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
+ * We turn both off here just to be paranoid.
+ */
+ prom_timers->limit0 = 0;
+ prom_timers->limit1 = 0;
+}
+
+#if 0 /* Unused at this time. -DaveM */
+static void enable_prom_timer(void)
+{
+ if(!prom_timers)
+ return;
+
+ /* Set it to fire off every 10ms. */
+ prom_timers->limit1 = 0xa000270f;
+ prom_timers->count1 = 0;
+}
+#endif
+
+__initfunc(void init_IRQ(void))
+{
+ int i;
+
+ map_prom_timers();
+ kill_prom_timer();
+ for(i = 0; i < NUM_IVECS; i++)
+ ivector_to_mask[i] = 0;
+
+ /* We need to clear any IRQ's pending in the soft interrupt
+ * registers, a spurious one could be left around from the
+ * PROM timer which we just disabled.
+ */
+ clear_softint(get_softint());
+
+ /* Now that ivector table is initialized, it is safe
+ * to receive IRQ vector traps. We will normally take
+ * one or two right now, in case some device PROM used
+ * to boot us wants to speak to us. We just ignore them.
+ */
+ __asm__ __volatile__("rdpr %%pstate, %%g1\n\t"
+ "or %%g1, %0, %%g1\n\t"
+ "wrpr %%g1, 0x0, %%pstate"
+ : /* No outputs */
+ : "i" (PSTATE_IE)
+ : "g1");
+}
diff --git a/arch/sparc64/kernel/itlb_miss.S b/arch/sparc64/kernel/itlb_miss.S
new file mode 100644
index 000000000..61233125c
--- /dev/null
+++ b/arch/sparc64/kernel/itlb_miss.S
@@ -0,0 +1,49 @@
+/* $Id: itlb_miss.S,v 1.10 1997/03/26 12:24:18 davem Exp $
+ * itlb_miss.S: Instruction TLB miss code, this is included directly
+ * into the trap table.
+ *
+ * Copyright (C) 1996,1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+/* Gratuitous comment. */
+
+ /* ICACHE line 1 */
+ /*0x00*/ ldxa [%g0] ASI_IMMU, %g1 ! Get TAG_TARGET
+ /*0x04*/ srlx %g1, 8, %g3 ! Position PGD offset
+ /*0x08*/ srlx %g1, 48, %g5 ! Shift down CONTEXT bits
+ /*0x0c*/ and %g3, %g2, %g3 ! Mask PGD offset
+ /*0x10*/ sllx %g1, 2, %g4 ! Position PMD offset
+ /*0x14*/ ldxa [%g0] ASI_IMMU_TSB_8KB_PTR, %g1 ! For PTE offset
+ /*0x18*/ brz,pn %g5, 3f ! Context 0 == kernel
+ /*0x1c*/ and %g4, %g2, %g4 ! Mask PMD offset
+
+ /* ICACHE line 2 */
+ /*0x20*/ ldxa [%g7 + %g3] ASI_PHYS_USE_EC, %g5 ! Load user PGD
+ /*0x24*/ srlx %g1, 1, %g1 ! PTE offset
+ /*0x28*/ ldxa [%g5 + %g4] ASI_PHYS_USE_EC, %g3 ! Load PMD
+2:/*0x2c*/ ldxa [%g3 + %g1] ASI_PHYS_USE_EC, %g5 ! Load PTE
+ /*0x30*/ brlz,a,pt %g5, 1f ! Valid set?
+ /*0x34*/ stxa %g5, [%g0] ASI_ITLB_DATA_IN ! TLB load
+ /*0x38*/ ba,a,pt %xcc, sparc64_itlb_refbit_catch ! Nope...
+1:/*0x3c*/ retry ! Trap return
+
+3: /* ICACHE line 3 */
+ /*0x40*/ ldxa [%g6 + %g3] ASI_PHYS_USE_EC, %g5 ! Load kern PGD
+ /*0x44*/ srlx %g1, 1, %g1 ! PTE offset
+ /*0x48*/ ba,pt %xcc, 2b ! Continue above
+ /*0x4c*/ ldxa [%g5 + %g4] ASI_PHYS_USE_EC, %g3 ! Load PMD
+ /*0x50*/ nop
+ /*0x54*/ nop
+ /*0x58*/ nop
+ /*0x5c*/ nop
+
+ /* ICACHE line 4 */
+ /*0x60*/ nop
+ /*0x64*/ nop
+ /*0x68*/ nop
+ /*0x6c*/ nop
+ /*0x70*/ nop
+ /*0x74*/ nop
+ /*0x78*/ nop
+ /*0x7c*/ nop
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
new file mode 100644
index 000000000..5ec62a6b6
--- /dev/null
+++ b/arch/sparc64/kernel/process.c
@@ -0,0 +1,594 @@
+/* $Id: process.c,v 1.6 1997/04/07 18:57:07 jj Exp $
+ * arch/sparc64/kernel/process.c
+ *
+ * Copyright (C) 1995, 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+/*
+ * This file handles the architecture-dependent parts of process handling..
+ */
+
+#define __KERNEL_SYSCALLS__
+#include <stdarg.h>
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/malloc.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/config.h>
+#include <linux/reboot.h>
+
+#include <asm/oplib.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/delay.h>
+#include <asm/processor.h>
+#include <asm/pstate.h>
+#include <asm/elf.h>
+#include <asm/fpumacro.h>
+
+#ifndef __SMP__
+
+/*
+ * the idle loop on a Sparc... ;)
+ */
+asmlinkage int sys_idle(void)
+{
+ if (current->pid != 0)
+ return -EPERM;
+
+ /* endless idle loop with no priority at all */
+ current->counter = -100;
+ for (;;)
+ schedule();
+ return 0;
+}
+
+#else
+
+/*
+ * the idle loop on a UltraMultiPenguin...
+ */
+asmlinkage int sys_idle(void)
+{
+ if (current->pid != 0)
+ return -EPERM;
+
+ /* endless idle loop with no priority at all */
+ current->counter = -100;
+ schedule();
+ return 0;
+}
+
+/* This is being executed in task 0 'user space'. */
+int cpu_idle(void *unused)
+{
+ volatile int *spap = &smp_process_available;
+ volatile int cval;
+
+ while(1) {
+ if(0==*spap)
+ continue;
+ cli();
+ /* Acquire exclusive access. */
+ while((cval = smp_swap(spap, -1)) == -1)
+ while(*spap == -1)
+ ;
+ if (0==cval) {
+ /* ho hum, release it. */
+ *spap = 0;
+ sti();
+ continue;
+ }
+ /* Something interesting happened, whee... */
+ *spap = (cval - 1);
+ sti();
+ idle();
+ }
+}
+
+#endif
+
+extern char reboot_command [];
+
+#ifdef CONFIG_SUN_CONSOLE
+extern void console_restore_palette (void);
+extern int serial_console;
+#endif
+
+void machine_halt(void)
+{
+ sti();
+ udelay(8000);
+ cli();
+#ifdef CONFIG_SUN_CONSOLE
+ if (!serial_console)
+ console_restore_palette ();
+#endif
+ prom_halt();
+ panic("Halt failed!");
+}
+
+void machine_restart(char * cmd)
+{
+ char *p;
+
+ sti();
+ udelay(8000);
+ cli();
+
+ p = strchr (reboot_command, '\n');
+ if (p) *p = 0;
+#ifdef CONFIG_SUN_CONSOLE
+ if (!serial_console)
+ console_restore_palette ();
+#endif
+ if (cmd)
+ prom_reboot(cmd);
+ if (*reboot_command)
+ prom_reboot(reboot_command);
+ prom_feval ("reset");
+ panic("Reboot failed!");
+}
+
+void machine_power_off(void)
+{
+ machine_halt();
+}
+
+void show_regwindow(struct reg_window *rw)
+{
+ printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n",
+ rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3]);
+ printk("l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n",
+ rw->locals[4], rw->locals[5], rw->locals[6], rw->locals[7]);
+ printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n",
+ rw->ins[0], rw->ins[1], rw->ins[2], rw->ins[3]);
+ printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
+ rw->ins[4], rw->ins[5], rw->ins[6], rw->ins[7]);
+}
+
+void show_regwindow32(struct reg_window32 *rw)
+{
+ printk("l0: %08x l1: %08x l2: %08x l3: %08x\n"
+ "l4: %08x l5: %08x l6: %08x l7: %08x\n",
+ rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
+ rw->locals[4], rw->locals[5], rw->locals[6], rw->locals[7]);
+ printk("i0: %08x i1: %08x i2: %08x i3: %08x\n"
+ "i4: %08x i5: %08x i6: %08x i7: %08x\n",
+ rw->ins[0], rw->ins[1], rw->ins[2], rw->ins[3],
+ rw->ins[4], rw->ins[5], rw->ins[6], rw->ins[7]);
+}
+
+void show_stackframe(struct sparc_stackf *sf)
+{
+ unsigned long size;
+ unsigned long *stk;
+ int i;
+
+ printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n"
+ "l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n",
+ sf->locals[0], sf->locals[1], sf->locals[2], sf->locals[3],
+ sf->locals[4], sf->locals[5], sf->locals[6], sf->locals[7]);
+ printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n"
+ "i4: %016lx i5: %016lx fp: %016lx ret_pc: %016lx\n",
+ sf->ins[0], sf->ins[1], sf->ins[2], sf->ins[3],
+ sf->ins[4], sf->ins[5], (unsigned long)sf->fp, sf->callers_pc);
+ printk("sp: %016lx x0: %016lx x1: %016lx x2: %016lx\n"
+ "x3: %016lx x4: %016lx x5: %016lx xx: %016lx\n",
+ (unsigned long)sf->structptr, sf->xargs[0], sf->xargs[1],
+ sf->xargs[2], sf->xargs[3], sf->xargs[4], sf->xargs[5],
+ sf->xxargs[0]);
+ size = ((unsigned long)sf->fp) - ((unsigned long)sf);
+ size -= STACKFRAME_SZ;
+ stk = (unsigned long *)((unsigned long)sf + STACKFRAME_SZ);
+ i = 0;
+ do {
+ printk("s%d: %016lx\n", i++, *stk++);
+ } while ((size -= sizeof(unsigned long)));
+}
+
+void show_stackframe32(struct sparc_stackf32 *sf)
+{
+ unsigned long size;
+ unsigned *stk;
+ int i;
+
+ printk("l0: %08x l1: %08x l2: %08x l3: %08x\n",
+ sf->locals[0], sf->locals[1], sf->locals[2], sf->locals[3]);
+ printk("l4: %08x l5: %08x l6: %08x l7: %08x\n",
+ sf->locals[4], sf->locals[5], sf->locals[6], sf->locals[7]);
+ printk("i0: %08x i1: %08x i2: %08x i3: %08x\n",
+ sf->ins[0], sf->ins[1], sf->ins[2], sf->ins[3]);
+ printk("i4: %08x i5: %08x fp: %08x ret_pc: %08x\n",
+ sf->ins[4], sf->ins[5], sf->fp, sf->callers_pc);
+ printk("sp: %08x x0: %08x x1: %08x x2: %08x\n"
+ "x3: %08x x4: %08x x5: %08x xx: %08x\n",
+ sf->structptr, sf->xargs[0], sf->xargs[1],
+ sf->xargs[2], sf->xargs[3], sf->xargs[4], sf->xargs[5],
+ sf->xxargs[0]);
+ size = ((unsigned long)sf->fp) - ((unsigned long)sf);
+ size -= STACKFRAME32_SZ;
+ stk = (unsigned *)((unsigned long)sf + STACKFRAME32_SZ);
+ i = 0;
+ do {
+ printk("s%d: %08x\n", i++, *stk++);
+ } while ((size -= sizeof(unsigned)));
+}
+
+void show_regs(struct pt_regs * regs)
+{
+#if __MPP__
+ printk("CID: %d\n",mpp_cid());
+#endif
+ printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %016lx\n", regs->tstate,
+ regs->tpc, regs->tnpc, regs->y);
+ printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
+ regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
+ regs->u_regs[3]);
+ printk("g4: %016lx g5: %016lx g6: %016lx g7: %016lx\n",
+ regs->u_regs[4], regs->u_regs[5], regs->u_regs[6],
+ regs->u_regs[7]);
+ printk("o0: %016lx o1: %016lx o2: %016lx o3: %016lx\n",
+ regs->u_regs[8], regs->u_regs[9], regs->u_regs[10],
+ regs->u_regs[11]);
+ printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
+ regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
+ regs->u_regs[15]);
+#if 0
+ show_regwindow((struct reg_window *)(regs->u_regs[14] + STACK_BIAS));
+#endif
+}
+
+void show_regs32(struct pt_regs32 *regs)
+{
+#if __MPP__
+ printk("CID: %d\n",mpp_cid());
+#endif
+ printk("PSR: %08x PC: %08x NPC: %08x Y: %08x\n", regs->psr,
+ regs->pc, regs->npc, regs->y);
+ printk("g0: %08x g1: %08x g2: %08x g3: %08x\n",
+ regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
+ regs->u_regs[3]);
+ printk("g4: %08x g5: %08x g6: %08x g7: %08x\n",
+ regs->u_regs[4], regs->u_regs[5], regs->u_regs[6],
+ regs->u_regs[7]);
+ printk("o0: %08x o1: %08x o2: %08x o3: %08x\n",
+ regs->u_regs[8], regs->u_regs[9], regs->u_regs[10],
+ regs->u_regs[11]);
+ printk("o4: %08x o5: %08x sp: %08x ret_pc: %08x\n",
+ regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
+ regs->u_regs[15]);
+ show_regwindow32((struct reg_window32 *)((unsigned long)regs->u_regs[14]));
+}
+
+void show_thread(struct thread_struct *tss)
+{
+ int i;
+
+#if 0
+ printk("kregs: 0x%016lx\n", (unsigned long)tss->kregs);
+ show_regs(tss->kregs);
+#endif
+ printk("sig_address: 0x%016lx\n", tss->sig_address);
+ printk("sig_desc: 0x%016lx\n", tss->sig_desc);
+ printk("ksp: 0x%016lx\n", tss->ksp);
+ printk("kpc: 0x%016lx\n", tss->kpc);
+
+ for (i = 0; i < NSWINS; i++) {
+ if (!tss->rwbuf_stkptrs[i])
+ continue;
+ printk("reg_window[%d]:\n", i);
+ printk("stack ptr: 0x%016lx\n", tss->rwbuf_stkptrs[i]);
+ show_regwindow(&tss->reg_window[i]);
+ }
+ printk("w_saved: 0x%08lx\n", tss->w_saved);
+
+ /* XXX missing: float_regs */
+ printk("fsr: 0x%016lx\n", tss->fsr);
+
+ printk("sstk_info.stack: 0x%016lx\n",
+ (unsigned long)tss->sstk_info.the_stack);
+ printk("sstk_info.status: 0x%016lx\n",
+ (unsigned long)tss->sstk_info.cur_status);
+ printk("flags: 0x%016lx\n", tss->flags);
+ printk("current_ds: 0x%016x\n", tss->current_ds);
+
+ /* XXX missing: core_exec */
+}
+
+/*
+ * Free current thread data structures etc..
+ */
+void exit_thread(void)
+{
+#ifndef __SMP__
+ if(last_task_used_math == current) {
+#else
+ if(current->flags & PF_USEDFPU) {
+#endif
+#ifndef __SMP__
+ last_task_used_math = NULL;
+#else
+ current->flags &= ~PF_USEDFPU;
+#endif
+ }
+}
+
+void flush_thread(void)
+{
+ current->tss.w_saved = 0;
+ current->tss.sstk_info.cur_status = 0;
+ current->tss.sstk_info.the_stack = 0;
+
+ /* No new signal delivery by default */
+ current->tss.new_signal = 0;
+#ifndef __SMP__
+ if(last_task_used_math == current) {
+#else
+ if(current->flags & PF_USEDFPU) {
+#endif
+#ifndef __SMP__
+ last_task_used_math = NULL;
+#else
+ current->flags &= ~PF_USEDFPU;
+#endif
+ }
+
+ /* Now, this task is no longer a kernel thread. */
+ current->tss.flags &= ~SPARC_FLAG_KTHREAD;
+ current->tss.current_ds = USER_DS;
+}
+
+static __inline__ void copy_regs(struct pt_regs *dst, struct pt_regs *src)
+{
+ __asm__ __volatile__("ldd\t[%1 + 0x00], %%g2\n\t"
+ "ldd\t[%1 + 0x08], %%g4\n\t"
+ "ldd\t[%1 + 0x10], %%o4\n\t"
+ "std\t%%g2, [%0 + 0x00]\n\t"
+ "std\t%%g4, [%0 + 0x08]\n\t"
+ "std\t%%o4, [%0 + 0x10]\n\t"
+ "ldd\t[%1 + 0x18], %%g2\n\t"
+ "ldd\t[%1 + 0x20], %%g4\n\t"
+ "ldd\t[%1 + 0x28], %%o4\n\t"
+ "std\t%%g2, [%0 + 0x18]\n\t"
+ "std\t%%g4, [%0 + 0x20]\n\t"
+ "std\t%%o4, [%0 + 0x28]\n\t"
+ "ldd\t[%1 + 0x30], %%g2\n\t"
+ "ldd\t[%1 + 0x38], %%g4\n\t"
+ "ldd\t[%1 + 0x40], %%o4\n\t"
+ "std\t%%g2, [%0 + 0x30]\n\t"
+ "std\t%%g4, [%0 + 0x38]\n\t"
+ "ldd\t[%1 + 0x48], %%g2\n\t"
+ "std\t%%o4, [%0 + 0x40]\n\t"
+ "std\t%%g2, [%0 + 0x48]\n\t" : :
+ "r" (dst), "r" (src) :
+ "g2", "g3", "g4", "g5", "o4", "o5");
+}
+
+static __inline__ void copy_regwin(struct reg_window *dst, struct reg_window *src)
+{
+ __asm__ __volatile__("ldd\t[%1 + 0x00], %%g2\n\t"
+ "ldd\t[%1 + 0x08], %%g4\n\t"
+ "ldd\t[%1 + 0x10], %%o4\n\t"
+ "std\t%%g2, [%0 + 0x00]\n\t"
+ "std\t%%g4, [%0 + 0x08]\n\t"
+ "std\t%%o4, [%0 + 0x10]\n\t"
+ "ldd\t[%1 + 0x18], %%g2\n\t"
+ "ldd\t[%1 + 0x20], %%g4\n\t"
+ "ldd\t[%1 + 0x28], %%o4\n\t"
+ "std\t%%g2, [%0 + 0x18]\n\t"
+ "std\t%%g4, [%0 + 0x20]\n\t"
+ "std\t%%o4, [%0 + 0x28]\n\t"
+ "ldd\t[%1 + 0x30], %%g2\n\t"
+ "ldd\t[%1 + 0x38], %%g4\n\t"
+ "std\t%%g2, [%0 + 0x30]\n\t"
+ "std\t%%g4, [%0 + 0x38]\n\t" : :
+ "r" (dst), "r" (src) :
+ "g2", "g3", "g4", "g5", "o4", "o5");
+}
+
+static __inline__ struct sparc_stackf *
+clone_stackframe(struct sparc_stackf *dst, struct sparc_stackf *src)
+{
+ struct sparc_stackf *sp;
+
+#if 0
+ unsigned long size;
+ size = ((unsigned long)src->fp) - ((unsigned long)src);
+ sp = (struct sparc_stackf *)(((unsigned long)dst) - size);
+
+ if (copy_to_user(sp, src, size))
+ return 0;
+ if (put_user(dst, &sp->fp))
+ return 0;
+#endif
+ return sp;
+}
+
+
+/* Copy a Sparc thread. The fork() return value conventions
+ * under SunOS are nothing short of bletcherous:
+ * Parent --> %o0 == childs pid, %o1 == 0
+ * Child --> %o0 == parents pid, %o1 == 1
+ *
+ * NOTE: We have a separate fork kpsr/kwim because
+ * the parent could change these values between
+ * sys_fork invocation and when we reach here
+ * if the parent should sleep while trying to
+ * allocate the task_struct and kernel stack in
+ * do_fork().
+ */
+extern void ret_from_syscall(void);
+
+int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
+ struct task_struct *p, struct pt_regs *regs)
+{
+ struct pt_regs *childregs;
+ struct reg_window *new_stack, *old_stack;
+ unsigned long stack_offset;
+
+#if 0
+#ifndef __SMP__
+ if(last_task_used_math == current) {
+#else
+ if(current->flags & PF_USEDFPU) {
+#endif
+ put_psr(get_psr() | PSR_EF);
+ fpsave(&p->tss.float_regs[0], &p->tss.fsr);
+#ifdef __SMP__
+ current->flags &= ~PF_USEDFPU;
+#endif
+ }
+#endif
+
+ /* Calculate offset to stack_frame & pt_regs */
+ stack_offset = (PAGE_SIZE - TRACEREG_SZ);
+
+ if(regs->tstate & TSTATE_PRIV)
+ stack_offset -= REGWIN_SZ;
+
+ childregs = ((struct pt_regs *) (p->kernel_stack_page + stack_offset));
+ *childregs = *regs;
+ new_stack = (((struct reg_window *) childregs) - 1);
+ old_stack = (((struct reg_window *) regs) - 1);
+ *new_stack = *old_stack;
+
+ p->saved_kernel_stack = ((unsigned long) new_stack);
+ p->tss.ksp = p->saved_kernel_stack - STACK_BIAS;
+ p->tss.kpc = ((unsigned long) ret_from_syscall) - 0x8;
+ p->tss.kregs = childregs;
+
+ /* Don't look... */
+ p->tss.cwp = regs->u_regs[UREG_G0];
+
+ /* tss.wstate was copied by do_fork() */
+
+ if(regs->tstate & TSTATE_PRIV) {
+ childregs->u_regs[UREG_FP] = p->tss.ksp;
+ p->tss.flags |= SPARC_FLAG_KTHREAD;
+ p->tss.current_ds = KERNEL_DS;
+ childregs->u_regs[UREG_G6] = (unsigned long) p;
+ } else {
+ childregs->u_regs[UREG_FP] = sp;
+ p->tss.flags &= ~SPARC_FLAG_KTHREAD;
+ p->tss.current_ds = USER_DS;
+
+#if 0
+ if (sp != current->tss.kregs->u_regs[UREG_FP]) {
+ struct sparc_stackf *childstack;
+ struct sparc_stackf *parentstack;
+
+ /*
+ * This is a clone() call with supplied user stack.
+ * Set some valid stack frames to give to the child.
+ */
+ childstack = (struct sparc_stackf *)sp;
+ parentstack = (struct sparc_stackf *)
+ current->tss.kregs->u_regs[UREG_FP];
+
+#if 0
+ printk("clone: parent stack:\n");
+ show_stackframe(parentstack);
+#endif
+
+ childstack = clone_stackframe(childstack, parentstack);
+ if (!childstack)
+ return -EFAULT;
+
+#if 0
+ printk("clone: child stack:\n");
+ show_stackframe(childstack);
+#endif
+
+ childregs->u_regs[UREG_FP] = (unsigned long)childstack;
+ }
+#endif
+ }
+
+ /* Set the return value for the child. */
+ childregs->u_regs[UREG_I0] = current->pid;
+ childregs->u_regs[UREG_I1] = 1;
+
+ /* Set the return value for the parent. */
+ regs->u_regs[UREG_I1] = 0;
+#if 0
+ printk("CHILD register dump\n");
+ show_regs(childregs);
+ show_regwindow(new_stack);
+ while(1)
+ barrier();
+#endif
+ return 0;
+}
+
+/*
+ * fill in the user structure for a core dump..
+ */
+void dump_thread(struct pt_regs * regs, struct user * dump)
+{
+#if 0
+ unsigned long first_stack_page;
+ dump->magic = SUNOS_CORE_MAGIC;
+ dump->len = sizeof(struct user);
+ dump->regs.psr = regs->psr;
+ dump->regs.pc = regs->pc;
+ dump->regs.npc = regs->npc;
+ dump->regs.y = regs->y;
+ /* fuck me plenty */
+ memcpy(&dump->regs.regs[0], &regs->u_regs[1], (sizeof(unsigned long) * 15));
+ dump->uexec = current->tss.core_exec;
+ dump->u_tsize = (((unsigned long) current->mm->end_code) -
+ ((unsigned long) current->mm->start_code)) & ~(PAGE_SIZE - 1);
+ dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1)));
+ dump->u_dsize -= dump->u_tsize;
+ dump->u_dsize &= ~(PAGE_SIZE - 1);
+ first_stack_page = (regs->u_regs[UREG_FP] & ~(PAGE_SIZE - 1));
+ dump->u_ssize = (TASK_SIZE - first_stack_page) & ~(PAGE_SIZE - 1);
+ memcpy(&dump->fpu.fpstatus.fregs.regs[0], &current->tss.float_regs[0], (sizeof(unsigned long) * 32));
+ dump->fpu.fpstatus.fsr = current->tss.fsr;
+ dump->fpu.fpstatus.flags = dump->fpu.fpstatus.extra = 0;
+ dump->sigcode = current->tss.sig_desc;
+#endif
+}
+
+/*
+ * fill in the fpu structure for a core dump.
+ */
+int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs)
+{
+ /* Currently we report that we couldn't dump the fpu structure */
+ return 0;
+}
+
+/*
+ * sparc_execve() executes a new program after the asm stub has set
+ * things up for us. This should basically do what I want it to.
+ */
+asmlinkage int sparc_execve(struct pt_regs *regs)
+{
+ int error, base = 0;
+ char *filename;
+
+ /* Check for indirect call. */
+ if(regs->u_regs[UREG_G1] == 0)
+ base = 1;
+
+ error = getname((char *) regs->u_regs[base + UREG_I0], &filename);
+ if(error)
+ return error;
+ error = do_execve(filename, (char **) regs->u_regs[base + UREG_I1],
+ (char **) regs->u_regs[base + UREG_I2], regs);
+ putname(filename);
+ return error;
+}
diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S
new file mode 100644
index 000000000..dfebd7ad8
--- /dev/null
+++ b/arch/sparc64/kernel/rtrap.S
@@ -0,0 +1,109 @@
+/* $Id: rtrap.S,v 1.11 1997/04/03 13:03:50 davem Exp $
+ * rtrap.S: Preparing for return from trap on Sparc V9.
+ *
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/asi.h>
+#include <asm/pstate.h>
+#include <asm/ptrace.h>
+#include <asm/spitfire.h>
+
+/* We assume here this is entered with AG, MG and IG bits in pstate clear */
+
+ .text
+ .align 4
+ .globl rtrap
+rtrap:
+ sethi %hi(bh_active), %l2
+ or %l2, %lo(bh_active), %l2
+ sethi %hi(bh_mask), %l1
+ or %l1, %lo(bh_mask), %l1
+ ldx [%l2 + %g4], %l3
+ ldx [%l1 + %g4], %l4
+ andcc %l3, %l4, %g0
+ be,pt %xcc, 2f
+ nop
+ call do_bottom_half
+ nop
+2:
+ ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TSTATE], %l1
+ ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TPC], %l2
+ sethi %hi(0xf << 20), %l4
+ andcc %l1, TSTATE_PRIV, %l3
+ and %l1, %l4, %l4
+ rdpr %pstate, %l7
+ andn %l1, %l4, %l1 /* XXX May not be needed -DaveM */
+ be,pt %icc, to_user
+ andn %l7, PSTATE_IE, %l7
+3:
+ ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G1], %g1
+ ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G2], %g2
+ ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G3], %g3
+ ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G4], %g4
+ ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G5], %g5
+ ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G6], %g6
+ ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G7], %g7
+ ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I0], %i0
+ ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I1], %i1
+ ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I2], %i2
+ ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I3], %i3
+ ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I4], %i4
+ ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I5], %i5
+ ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I6], %i6
+ ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I7], %i7
+ ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_Y], %o3
+ ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TNPC], %o2
+ rdpr %tl, %o4
+ wr %o3, %g0, %y
+ add %o4, 1, %o4
+
+ srl %l4, 20, %l4
+ wrpr %l7, %g0, %pstate
+ wrpr %l4, 0x0, %pil
+ wrpr %o4, %g0, %tl
+ wrpr %l1, %g0, %tstate
+ wrpr %l2, %g0, %tpc
+ brnz,pn %l3, 1f
+ wrpr %o2, %g0, %tnpc
+
+ /* We came here from to_user, ie. we have now AG.
+ * Also have to push user context back into primary.
+ */
+ restore
+
+ mov SECONDARY_CONTEXT, %g6
+ mov PRIMARY_CONTEXT, %g7
+ ldxa [%g6] ASI_DMMU, %g4
+ stxa %g4, [%g7] ASI_DMMU
+
+ rdpr %wstate, %g1
+ rdpr %otherwin, %g2
+ srl %g1, 3, %g1
+ wrpr %g2, %g0, %canrestore
+ wrpr %g1, %g0, %wstate
+ wrpr %g0, %g0, %otherwin
+ retry
+1:
+ restore
+ retry
+to_user:
+ sethi %hi(need_resched), %l0
+ or %l0, %lo(need_resched), %l0
+ ld [%l0 + %g4], %l0
+ wrpr %l7, PSTATE_IE, %pstate
+ brz,pt %l0, 1f
+ ldx [%g6 + AOFF_task_signal], %l0
+ call schedule
+ nop
+1:
+ ldx [%g6 + AOFF_task_blocked], %o0
+ or %l7, PSTATE_AG, %l7 ! Will need this for setting back wstate
+ andncc %l0, %o0, %g0
+ be,pt %xcc, 3b
+ mov %l5, %o2
+ mov %l6, %o3
+ add %sp, STACK_BIAS + REGWIN_SZ, %o1
+ call do_signal
+ add %o7, 3b-.-4, %o7
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
new file mode 100644
index 000000000..170e5563e
--- /dev/null
+++ b/arch/sparc64/kernel/setup.c
@@ -0,0 +1,435 @@
+/* $Id: setup.c,v 1.5 1997/04/04 00:49:52 davem Exp $
+ * linux/arch/sparc64/kernel/setup.c
+ *
+ * Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/malloc.h>
+#include <asm/smp.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/tty.h>
+#include <linux/delay.h>
+#include <linux/config.h>
+#include <linux/fs.h>
+#include <linux/kdev_t.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/blk.h>
+#include <linux/init.h>
+
+#include <asm/segment.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/oplib.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/idprom.h>
+
+struct screen_info screen_info = {
+ 0, 0, /* orig-x, orig-y */
+ { 0, 0, }, /* unused */
+ 0, /* orig-video-page */
+ 0, /* orig-video-mode */
+ 128, /* orig-video-cols */
+ 0,0,0, /* ega_ax, ega_bx, ega_cx */
+ 54, /* orig-video-lines */
+ 0, /* orig-video-isVGA */
+ 16 /* orig-video-points */
+};
+
+unsigned int phys_bytes_of_ram, end_of_phys_memory;
+
+unsigned long bios32_init(unsigned long memory_start, unsigned long memory_end)
+{
+ return memory_start;
+}
+
+/* Typing sync at the prom prompt calls the function pointed to by
+ * the sync callback which I set to the following function.
+ * This should sync all filesystems and return, for now it just
+ * prints out pretty messages and returns.
+ */
+
+extern unsigned long sparc64_ttable_tl0;
+extern void breakpoint(void);
+#if CONFIG_SUN_CONSOLE
+extern void console_restore_palette(void);
+#endif
+asmlinkage void sys_sync(void); /* it's really int */
+
+/* Pretty sick eh? */
+void prom_sync_me(long *args)
+{
+ unsigned long prom_tba, flags;
+
+ save_and_cli(flags);
+ __asm__ __volatile__("flushw; rdpr %%tba, %0\n\t" : "=r" (prom_tba));
+ __asm__ __volatile__("wrpr %0, 0x0, %%tba\n\t" : : "r" (&sparc64_ttable_tl0));
+
+#ifdef CONFIG_SUN_CONSOLE
+ console_restore_palette ();
+#endif
+ prom_printf("PROM SYNC COMMAND...\n");
+ show_free_areas();
+ if(current->pid != 0) {
+ sti();
+ sys_sync();
+ cli();
+ }
+ prom_printf("Returning to prom\n");
+
+ __asm__ __volatile__("flushw; wrpr %0, 0x0, %%tba\n\t" : : "r" (prom_tba));
+ restore_flags(flags);
+
+ return;
+}
+
+extern void rs_kgdb_hook(int tty_num); /* sparc/serial.c */
+
+unsigned int boot_flags = 0;
+#define BOOTME_DEBUG 0x1
+#define BOOTME_SINGLE 0x2
+#define BOOTME_KGDB 0x4
+
+#ifdef CONFIG_SUN_CONSOLE
+extern char *console_fb_path;
+static int console_fb = 0;
+#endif
+static unsigned long memory_size = 0;
+
+void kernel_enter_debugger(void)
+{
+#if 0
+ if (boot_flags & BOOTME_KGDB) {
+ printk("KGDB: Entered\n");
+ breakpoint();
+ }
+#endif
+}
+
+int obp_system_intr(void)
+{
+ if (boot_flags & BOOTME_KGDB) {
+ printk("KGDB: system interrupted\n");
+ breakpoint();
+ return 1;
+ }
+ if (boot_flags & BOOTME_DEBUG) {
+ printk("OBP: system interrupted\n");
+ prom_halt();
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Process kernel command line switches that are specific to the
+ * SPARC or that require special low-level processing.
+ */
+__initfunc(static void process_switch(char c))
+{
+ switch (c) {
+ case 'd':
+ boot_flags |= BOOTME_DEBUG;
+ break;
+ case 's':
+ boot_flags |= BOOTME_SINGLE;
+ break;
+ case 'h':
+ prom_printf("boot_flags_init: Halt!\n");
+ halt();
+ break;
+ default:
+ printk("Unknown boot switch (-%c)\n", c);
+ break;
+ }
+}
+
+__initfunc(static void boot_flags_init(char *commands))
+{
+ while (*commands) {
+ /* Move to the start of the next "argument". */
+ while (*commands && *commands == ' ')
+ commands++;
+
+ /* Process any command switches, otherwise skip it. */
+ if (*commands == '\0')
+ break;
+ else if (*commands == '-') {
+ commands++;
+ while (*commands && *commands != ' ')
+ process_switch(*commands++);
+ } else if (strlen(commands) >= 9
+ && !strncmp(commands, "kgdb=tty", 8)) {
+ boot_flags |= BOOTME_KGDB;
+ switch (commands[8]) {
+#ifdef CONFIG_SUN_SERIAL
+ case 'a':
+ rs_kgdb_hook(0);
+ prom_printf("KGDB: Using serial line /dev/ttya.\n");
+ break;
+ case 'b':
+ rs_kgdb_hook(1);
+ prom_printf("KGDB: Using serial line /dev/ttyb.\n");
+ break;
+#endif
+ default:
+ printk("KGDB: Unknown tty line.\n");
+ boot_flags &= ~BOOTME_KGDB;
+ break;
+ }
+ commands += 9;
+ } else {
+#if CONFIG_SUN_CONSOLE
+ if (!strncmp(commands, "console=", 8)) {
+ commands += 8;
+ if (!strncmp (commands, "ttya", 4)) {
+ console_fb = 2;
+ prom_printf ("Using /dev/ttya as console.\n");
+ } else if (!strncmp (commands, "ttyb", 4)) {
+ console_fb = 3;
+ prom_printf ("Using /dev/ttyb as console.\n");
+ } else {
+ console_fb = 1;
+ console_fb_path = commands;
+ }
+ } else
+#endif
+ if (!strncmp(commands, "mem=", 4)) {
+ /*
+ * "mem=XXX[kKmM]" overrides the PROM-reported
+ * memory size.
+ */
+ memory_size = simple_strtoul(commands + 4,
+ &commands, 0);
+ if (*commands == 'K' || *commands == 'k') {
+ memory_size <<= 10;
+ commands++;
+ } else if (*commands=='M' || *commands=='m') {
+ memory_size <<= 20;
+ commands++;
+ }
+ }
+ while (*commands && *commands != ' ')
+ commands++;
+ }
+ }
+}
+
+extern int prom_probe_memory(void);
+extern unsigned long start, end;
+extern void panic_setup(char *, int *);
+extern unsigned long sun_serial_setup(unsigned long);
+
+extern unsigned short root_flags;
+extern unsigned short root_dev;
+extern unsigned short ram_flags;
+extern unsigned ramdisk_image;
+extern unsigned ramdisk_size;
+#define RAMDISK_IMAGE_START_MASK 0x07FF
+#define RAMDISK_PROMPT_FLAG 0x8000
+#define RAMDISK_LOAD_FLAG 0x4000
+
+extern int root_mountflags;
+
+extern void register_console(void (*proc)(const char *));
+
+char saved_command_line[256];
+char reboot_command[256];
+
+unsigned long phys_base;
+
+static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
+
+__initfunc(void setup_arch(char **cmdline_p,
+ unsigned long * memory_start_p, unsigned long * memory_end_p))
+{
+ unsigned long lowest_paddr;
+ int total, i;
+
+ /* Initialize PROM console and command line. */
+ *cmdline_p = prom_getbootargs();
+ strcpy(saved_command_line, *cmdline_p);
+
+ printk("ARCH: SUN4U\n");
+
+ boot_flags_init(*cmdline_p);
+#if 0
+ if((boot_flags&BOOTME_DEBUG) && (linux_dbvec!=0) &&
+ ((*(short *)linux_dbvec) != -1)) {
+ printk("Booted under KADB. Syncing trap table.\n");
+ (*(linux_dbvec->teach_debugger))();
+ }
+ if((boot_flags & BOOTME_KGDB)) {
+ set_debug_traps();
+ prom_printf ("Breakpoint!\n");
+ breakpoint();
+ }
+#endif
+
+ idprom_init();
+ total = prom_probe_memory();
+
+ lowest_paddr = 0xffffffffffffffffUL;
+ for(i=0; sp_banks[i].num_bytes != 0; i++) {
+ if(sp_banks[i].base_addr < lowest_paddr)
+ lowest_paddr = sp_banks[i].base_addr;
+ end_of_phys_memory = sp_banks[i].base_addr +
+ sp_banks[i].num_bytes;
+ if (memory_size) {
+ if (end_of_phys_memory > memory_size) {
+ sp_banks[i].num_bytes -=
+ (end_of_phys_memory - memory_size);
+ end_of_phys_memory = memory_size;
+ sp_banks[++i].base_addr = 0xdeadbeef;
+ sp_banks[i].num_bytes = 0;
+ }
+ }
+ }
+ prom_setsync(prom_sync_me);
+
+ /* In paging_init() we tip off this value to see if we need
+ * to change init_mm.pgd to point to the real alias mapping.
+ */
+ phys_base = lowest_paddr;
+
+ *memory_start_p = PAGE_ALIGN(((unsigned long) &end));
+ *memory_end_p = (end_of_phys_memory + PAGE_OFFSET);
+
+#ifndef NO_DAVEM_DEBUGGING
+ prom_printf("phys_base[%016lx] memory_start[%016lx] memory_end[%016lx]\n",
+ phys_base, *memory_start_p, *memory_end_p);
+#endif
+
+ if (!root_flags)
+ root_mountflags &= ~MS_RDONLY;
+ ROOT_DEV = to_kdev_t(root_dev);
+#ifdef CONFIG_BLK_DEV_RAM
+ rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
+ rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0);
+ rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
+#endif
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (ramdisk_image) {
+ initrd_start = ramdisk_image;
+ if (initrd_start < PAGE_OFFSET) initrd_start += PAGE_OFFSET;
+ initrd_end = initrd_start + ramdisk_size;
+ if (initrd_end > *memory_end_p) {
+ printk(KERN_CRIT "initrd extends beyond end of memory "
+ "(0x%016lx > 0x%016lx)\ndisabling initrd\n",
+ initrd_end,*memory_end_p);
+ initrd_start = 0;
+ }
+ if (initrd_start >= *memory_start_p && initrd_start < *memory_start_p + 2 * PAGE_SIZE) {
+ initrd_below_start_ok = 1;
+ *memory_start_p = PAGE_ALIGN (initrd_end);
+ }
+ }
+#endif
+
+ /* Due to stack alignment restrictions and assumptions... */
+ init_task.mm->mmap->vm_page_prot = PAGE_SHARED;
+ init_task.mm->mmap->vm_start = PAGE_OFFSET;
+ init_task.mm->mmap->vm_end = *memory_end_p;
+ init_task.mm->context = (unsigned long) NO_CONTEXT;
+ init_task.tss.kregs = &fake_swapper_regs;
+
+#ifdef CONFIG_SUN_SERIAL
+ *memory_start_p = sun_serial_setup(*memory_start_p); /* set this up ASAP */
+#endif
+ {
+ extern int serial_console; /* in console.c, of course */
+#if !CONFIG_SUN_SERIAL
+ serial_console = 0;
+#else
+ switch (console_fb) {
+ case 0: /* Let's get our io devices from prom */
+ {
+ int idev = prom_query_input_device();
+ int odev = prom_query_output_device();
+ if (idev == PROMDEV_IKBD && odev == PROMDEV_OSCREEN) {
+ serial_console = 0;
+ } else if (idev == PROMDEV_ITTYA && odev == PROMDEV_OTTYA) {
+ serial_console = 1;
+ } else if (idev == PROMDEV_ITTYB && odev == PROMDEV_OTTYB) {
+ serial_console = 2;
+ } else {
+ prom_printf("Inconsistent console\n");
+ prom_halt();
+ }
+ }
+ break;
+ case 1: serial_console = 0; break; /* Force one of the framebuffers as console */
+ case 2: serial_console = 1; break; /* Force ttya as console */
+ case 3: serial_console = 2; break; /* Force ttyb as console */
+ }
+#endif
+ }
+}
+
+asmlinkage int sys_ioperm(unsigned long from, unsigned long num, int on)
+{
+ return -EIO;
+}
+
+/* BUFFER is PAGE_SIZE bytes long. */
+
+extern char *sparc_cpu_type[];
+extern char *sparc_fpu_type[];
+
+extern char *smp_info(void);
+extern char *mmu_info(void);
+
+int get_cpuinfo(char *buffer)
+{
+ int cpuid=get_cpuid();
+
+ return sprintf(buffer, "cpu\t\t: %s\n"
+ "fpu\t\t: %s\n"
+ "promlib\t\t: Version 3 Revision %d\n"
+ "prom\t\t: %d.%d.%d\n"
+ "type\t\t: sun4u\n"
+ "ncpus probed\t: %d\n"
+ "ncpus active\t: %d\n"
+#ifndef __SMP__
+ "BogoMips\t: %lu.%02lu\n"
+#else
+ "Cpu0Bogo\t: %lu.%02lu\n"
+ "Cpu1Bogo\t: %lu.%02lu\n"
+ "Cpu2Bogo\t: %lu.%02lu\n"
+ "Cpu3Bogo\t: %lu.%02lu\n"
+#endif
+ "%s"
+#ifdef __SMP__
+ "%s"
+#endif
+ ,
+ sparc_cpu_type[cpuid],
+ sparc_fpu_type[cpuid],
+ prom_rev, prom_prev >> 16, (prom_prev >> 8) & 0xff, prom_prev & 0xff,
+ linux_num_cpus, smp_num_cpus,
+#ifndef __SMP__
+ loops_per_sec/500000, (loops_per_sec/5000) % 100,
+#else
+ cpu_data[0].udelay_val/500000, (cpu_data[0].udelay_val/5000)%100,
+ cpu_data[1].udelay_val/500000, (cpu_data[1].udelay_val/5000)%100,
+ cpu_data[2].udelay_val/500000, (cpu_data[2].udelay_val/5000)%100,
+ cpu_data[3].udelay_val/500000, (cpu_data[3].udelay_val/5000)%100,
+#endif
+ mmu_info()
+#ifdef __SMP__
+ , smp_info()
+#endif
+ );
+
+}
diff --git a/arch/sparc64/kernel/signal32.c b/arch/sparc64/kernel/signal32.c
new file mode 100644
index 000000000..e1129dfd6
--- /dev/null
+++ b/arch/sparc64/kernel/signal32.c
@@ -0,0 +1,806 @@
+/* $Id: signal32.c,v 1.6 1997/04/16 10:27:17 jj Exp $
+ * arch/sparc64/kernel/signal32.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
+ * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/mm.h>
+
+#include <asm/uaccess.h>
+#include <asm/bitops.h>
+#include <asm/ptrace.h>
+#include <asm/svr4.h>
+#include <asm/pgtable.h>
+#include <asm/psrcompat.h>
+#include <asm/fpumacro.h>
+#include <asm/smp_lock.h>
+
+#define _S(nr) (1<<((nr)-1))
+
+#define _BLOCKABLE (~(_S(SIGKILL) | _S(SIGSTOP)))
+
+#define synchronize_user_stack() do { } while (0)
+
+asmlinkage int sys_wait4(pid_t pid, unsigned long *stat_addr,
+ int options, unsigned long *ru);
+
+asmlinkage int do_signal32(unsigned long oldmask, struct pt_regs * regs,
+ unsigned long orig_o0, int ret_from_syscall);
+
+/* This turned off for production... */
+/* #define DEBUG_SIGNALS 1 */
+
+/* Signal frames: the original one (compatible with SunOS):
+ *
+ * Set up a signal frame... Make the stack look the way SunOS
+ * expects it to look which is basically:
+ *
+ * ---------------------------------- <-- %sp at signal time
+ * Struct sigcontext
+ * Signal address
+ * Ptr to sigcontext area above
+ * Signal code
+ * The signal number itself
+ * One register window
+ * ---------------------------------- <-- New %sp
+ */
+struct signal_sframe32 {
+ struct reg_window32 sig_window;
+ int sig_num;
+ int sig_code;
+ /* struct sigcontext32 * */ u32 sig_scptr;
+ int sig_address;
+ struct sigcontext32 sig_context;
+};
+
+/*
+ * And the new one, intended to be used for Linux applications only
+ * (we have enough in there to work with clone).
+ * All the interesting bits are in the info field.
+ */
+
+struct new_signal_frame32 {
+ struct sparc_stackf32 ss;
+ __siginfo32_t info;
+ /* __siginfo_fpu32_t * */ u32 fpu_save;
+ unsigned int insns [2];
+ __siginfo_fpu32_t fpu_state;
+};
+
+/* Align macros */
+#define SF_ALIGNEDSZ (((sizeof(struct signal_sframe32) + 7) & (~7)))
+#define NF_ALIGNEDSZ (((sizeof(struct new_signal_frame32) + 7) & (~7)))
+
+/*
+ * atomically swap in the new signal mask, and wait for a signal.
+ * This is really tricky on the Sparc, watch out...
+ */
+asmlinkage void _sigpause32_common(unsigned int set, struct pt_regs *regs)
+{
+ unsigned int mask;
+
+ spin_lock_irq(&current->sigmask_lock);
+ mask = current->blocked;
+ current->blocked = set & _BLOCKABLE;
+ spin_unlock_irq(&current->sigmask_lock);
+
+ regs->tpc = regs->tnpc;
+ regs->tnpc += 4;
+
+ /* Condition codes and return value where set here for sigpause,
+ * and so got used by setup_frame, which again causes sigreturn()
+ * to return -EINTR.
+ */
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ /*
+ * Return -EINTR and set condition code here,
+ * so the interrupted system call actually returns
+ * these.
+ */
+ regs->tstate |= TSTATE_ICARRY;
+ regs->u_regs[UREG_I0] = EINTR;
+ if (do_signal32(mask, regs, 0, 0))
+ return;
+ }
+}
+
+asmlinkage void do_sigpause32(unsigned int set, struct pt_regs *regs)
+{
+ _sigpause32_common(set, regs);
+}
+
+asmlinkage void do_sigsuspend32(struct pt_regs *regs)
+{
+ _sigpause32_common(regs->u_regs[UREG_I0], regs);
+}
+
+
+static inline void
+restore_fpu_state32(struct pt_regs *regs, __siginfo_fpu32_t *fpu)
+{
+#ifdef __SMP__
+ if (current->flags & PF_USEDFPU)
+ regs->tstate &= ~(TSTATE_PEF);
+#else
+ if (current == last_task_used_math) {
+ last_task_used_math = 0;
+ regs->tstate &= ~(TSTATE_PEF);
+ }
+#endif
+ current->used_math = 1;
+ current->flags &= ~PF_USEDFPU;
+
+ copy_from_user(&current->tss.float_regs[0],
+ &fpu->si_float_regs[0],
+ (sizeof(unsigned int) * 32));
+ __get_user(current->tss.fsr, &fpu->si_fsr);
+}
+
+void do_new_sigreturn32(struct pt_regs *regs)
+{
+ struct new_signal_frame32 *sf;
+ unsigned int psr, i;
+ unsigned pc, npc, fpu_save, mask;
+
+ sf = (struct new_signal_frame32 *) regs->u_regs [UREG_FP];
+ /* 1. Make sure we are not getting garbage from the user */
+ if (verify_area (VERIFY_READ, sf, sizeof (*sf))){
+ goto segv;
+ }
+ if (((unsigned long) sf) & 3){
+ goto segv;
+ }
+ get_user(pc, &sf->info.si_regs.pc);
+ __get_user(npc, &sf->info.si_regs.npc);
+ if ((pc | npc) & 3){
+ goto segv;
+ }
+ regs->tpc = pc;
+ regs->tnpc = npc;
+
+ /* 2. Restore the state */
+ __get_user(regs->y, &sf->info.si_regs.y);
+ __get_user(psr, &sf->info.si_regs.psr);
+ for (i = 0; i < 16; i++)
+ __get_user(regs->u_regs[i], &sf->info.si_regs.u_regs[i]);
+
+ /* User can only change condition codes and FPU enabling in %tstate. */
+ regs->tstate &= ~(TSTATE_ICC | TSTATE_PEF);
+ regs->tstate |= psr_to_tstate_icc(psr);
+ if (psr & PSR_EF) regs->tstate |= TSTATE_PEF;
+
+ __get_user(fpu_save, &sf->fpu_save);
+ if (fpu_save)
+ restore_fpu_state32(regs, &sf->fpu_state);
+ __get_user(mask, &sf->info.si_mask);
+ current->blocked = mask & _BLOCKABLE;
+ return;
+segv:
+ lock_kernel();
+ do_exit(SIGSEGV);
+}
+
+asmlinkage void do_sigreturn32(struct pt_regs *regs)
+{
+ struct sigcontext32 *scptr;
+ unsigned pc, npc, psr;
+ unsigned long mask;
+
+ synchronize_user_stack();
+ if (current->tss.new_signal)
+ return do_new_sigreturn32(regs);
+
+ scptr = (struct sigcontext32 *) regs->u_regs[UREG_I0];
+ /* Check sanity of the user arg. */
+ if(verify_area(VERIFY_READ, scptr, sizeof(struct sigcontext32)) ||
+ (((unsigned long) scptr) & 3)) {
+ goto segv;
+ }
+ __get_user(pc, &scptr->sigc_pc);
+ __get_user(npc, &scptr->sigc_npc);
+ if((pc | npc) & 3)
+ goto segv; /* Nice try. */
+
+ __get_user(mask, &scptr->sigc_mask);
+ current->blocked = (mask & _BLOCKABLE);
+ __get_user(current->tss.sstk_info.cur_status, &scptr->sigc_onstack);
+ current->tss.sstk_info.cur_status &= 1;
+ regs->tpc = pc;
+ regs->tnpc = npc;
+ __get_user(regs->u_regs[UREG_FP], &scptr->sigc_sp);
+ __get_user(regs->u_regs[UREG_I0], &scptr->sigc_o0);
+ __get_user(regs->u_regs[UREG_G1], &scptr->sigc_g1);
+
+ /* User can only change condition codes in %tstate. */
+ __get_user(psr, &scptr->sigc_psr);
+ regs->tstate &= ~(TSTATE_ICC);
+ regs->tstate |= psr_to_tstate_icc(psr);
+ return;
+segv:
+ lock_kernel ();
+ do_exit (SIGSEGV);
+}
+
+/* Checks if the fp is valid */
+static int invalid_frame_pointer(void *fp, int fplen)
+{
+ if ((((unsigned long) fp) & 7) || ((unsigned long)fp) > 0x100000000ULL - fplen)
+ return 1;
+ return 0;
+}
+
+static void
+setup_frame32(struct sigaction *sa, unsigned long pc, unsigned long npc,
+ struct pt_regs *regs, int signr, unsigned long oldmask)
+{
+ struct signal_sframe32 *sframep;
+ struct sigcontext32 *sc;
+ int window = 0;
+ int old_status = current->tss.sstk_info.cur_status;
+ unsigned psr;
+ int i;
+ u32 temp;
+
+ synchronize_user_stack();
+ sframep = (struct signal_sframe32 *) regs->u_regs[UREG_FP];
+ sframep = (struct signal_sframe32 *) (((unsigned long) sframep)-SF_ALIGNEDSZ);
+ if (invalid_frame_pointer (sframep, sizeof(*sframep))){
+#ifdef DEBUG_SIGNALS /* fills up the console logs during crashme runs, yuck... */
+ printk("%s [%d]: User has trashed signal stack\n",
+ current->comm, current->pid);
+ printk("Sigstack ptr %p handler at pc<%016lx> for sig<%d>\n",
+ sframep, pc, signr);
+#endif
+ /* Don't change signal code and address, so that
+ * post mortem debuggers can have a look.
+ */
+ lock_kernel ();
+ do_exit(SIGILL);
+ }
+
+ sc = &sframep->sig_context;
+
+ /* We've already made sure frame pointer isn't in kernel space... */
+ __put_user(old_status, &sc->sigc_onstack);
+ __put_user(oldmask, &sc->sigc_mask);
+ __put_user(regs->u_regs[UREG_FP], &sc->sigc_sp);
+ __put_user(pc, &sc->sigc_pc);
+ __put_user(npc, &sc->sigc_npc);
+ psr = tstate_to_psr (regs->tstate);
+ __put_user(psr, &sc->sigc_psr);
+ __put_user(regs->u_regs[UREG_G1], &sc->sigc_g1);
+ __put_user(regs->u_regs[UREG_I0], &sc->sigc_o0);
+ __put_user(current->tss.w_saved, &sc->sigc_oswins);
+#if 0
+/* w_saved is not currently used... */
+ if(current->tss.w_saved)
+ for(window = 0; window < current->tss.w_saved; window++) {
+ sc->sigc_spbuf[window] =
+ (char *)current->tss.rwbuf_stkptrs[window];
+ copy_to_user(&sc->sigc_wbuf[window],
+ &current->tss.reg_window[window],
+ sizeof(struct reg_window));
+ }
+ else
+#endif
+ for (i = 0; i < 16; i++) {
+ get_user (temp, (((u32 *)(regs->u_regs[UREG_FP]))+i));
+ put_user (temp, (((u32 *)sframep)+i));
+ }
+
+ current->tss.w_saved = 0; /* So process is allowed to execute. */
+ __put_user(signr, &sframep->sig_num);
+ if(signr == SIGSEGV ||
+ signr == SIGILL ||
+ signr == SIGFPE ||
+ signr == SIGBUS ||
+ signr == SIGEMT) {
+ __put_user(current->tss.sig_desc, &sframep->sig_code);
+ __put_user(current->tss.sig_address, &sframep->sig_address);
+ } else {
+ __put_user(0, &sframep->sig_code);
+ __put_user(0, &sframep->sig_address);
+ }
+ __put_user((u64)sc, &sframep->sig_scptr);
+ regs->u_regs[UREG_FP] = (unsigned long) sframep;
+ regs->tpc = (unsigned long) sa->sa_handler;
+ regs->tnpc = (regs->tpc + 4);
+}
+
+
+static inline void
+save_fpu_state32(struct pt_regs *regs, __siginfo_fpu32_t *fpu)
+{
+#ifdef __SMP__
+ if (current->flags & PF_USEDFPU) {
+ fpsave32(&current->tss.float_regs[0], &current->tss.fsr);
+ regs->tstate &= ~(TSTATE_PEF);
+ current->flags &= ~(PF_USEDFPU);
+ }
+#else
+ if (current == last_task_used_math) {
+ fpsave32((unsigned long *)&current->tss.float_regs[0], &current->tss.fsr);
+ last_task_used_math = 0;
+ regs->tstate &= ~(TSTATE_PEF);
+ }
+#endif
+ copy_to_user(&fpu->si_float_regs[0], &current->tss.float_regs[0],
+ (sizeof(unsigned int) * 32));
+ __put_user(current->tss.fsr, &fpu->si_fsr);
+ current->used_math = 0;
+}
+
+static inline void
+new_setup_frame32(struct sigaction *sa, struct pt_regs *regs,
+ int signo, unsigned long oldmask)
+{
+ struct new_signal_frame32 *sf;
+ int sigframe_size;
+ u32 psr, tmp;
+ int i;
+
+ /* 1. Make sure everything is clean */
+ synchronize_user_stack();
+ sigframe_size = NF_ALIGNEDSZ;
+ if (!current->used_math)
+ sigframe_size -= sizeof(__siginfo_fpu32_t);
+
+ sf = (struct new_signal_frame32 *)(regs->u_regs[UREG_FP] - sigframe_size);
+
+ if (invalid_frame_pointer (sf, sigframe_size)){
+ lock_kernel ();
+ do_exit(SIGILL);
+ }
+
+ if (current->tss.w_saved != 0){
+ printk ("%s[%d]: Invalid user stack frame for "
+ "signal delivery.\n", current->comm, current->pid);
+ lock_kernel ();
+ do_exit (SIGILL);
+ }
+
+ /* 2. Save the current process state */
+ put_user(regs->tpc, &sf->info.si_regs.pc);
+ __put_user(regs->tnpc, &sf->info.si_regs.npc);
+ __put_user(regs->y, &sf->info.si_regs.y);
+ psr = tstate_to_psr (regs->tstate);
+ __put_user(psr, &sf->info.si_regs.psr);
+ for (i = 0; i < 16; i++)
+ __put_user(regs->u_regs[i], &sf->info.si_regs.u_regs[i]);
+
+ if (current->used_math) {
+ save_fpu_state32(regs, &sf->fpu_state);
+ __put_user((u64)&sf->fpu_state, &sf->fpu_save);
+ } else {
+ __put_user(0, &sf->fpu_save);
+ }
+
+ __put_user(oldmask, &sf->info.si_mask);
+ for (i = 0; i < sizeof(struct reg_window32)/4; i++) {
+ __get_user(tmp, (((u32 *)regs->u_regs[UREG_FP])+i));
+ __put_user(tmp, (((u32 *)sf)+i));
+ }
+
+ /* 3. return to kernel instructions */
+ __put_user(0x821020d8, &sf->insns[0]); /* mov __NR_sigreturn, %g1 */
+ __put_user(0x91d02010, &sf->insns[1]); /* t 0x10 */
+
+ /* 4. signal handler back-trampoline and parameters */
+ regs->u_regs[UREG_FP] = (unsigned long) sf;
+ regs->u_regs[UREG_I0] = signo;
+ regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
+ regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
+
+ /* 5. signal handler */
+ regs->tpc = (unsigned long) sa->sa_handler;
+ regs->tnpc = (regs->tpc + 4);
+
+ /* Flush instruction space. */
+ __asm__ __volatile__ ("flush %0; flush %0 + 4" : : "r" (&(sf->insns[0])));
+
+}
+
+/* Setup a Solaris stack frame */
+static inline void
+setup_svr4_frame32(struct sigaction *sa, unsigned long pc, unsigned long npc,
+ struct pt_regs *regs, int signr, unsigned long oldmask)
+{
+ svr4_signal_frame_t *sfp;
+ svr4_gregset_t *gr;
+ svr4_siginfo_t *si;
+ svr4_mcontext_t *mc;
+ svr4_gwindows_t *gw;
+ svr4_ucontext_t *uc;
+ int window = 0;
+ unsigned psr;
+ int i;
+
+ synchronize_user_stack();
+ sfp = (svr4_signal_frame_t *) regs->u_regs[UREG_FP] - REGWIN_SZ;
+ sfp = (svr4_signal_frame_t *) (((unsigned long) sfp)-SVR4_SF_ALIGNED);
+
+ if (invalid_frame_pointer (sfp, sizeof (*sfp))){
+#ifdef DEBUG_SIGNALS
+ printk ("Invalid stack frame\n");
+#endif
+ lock_kernel ();
+ do_exit(SIGILL);
+ }
+
+ /* Start with a clean frame pointer and fill it */
+ clear_user(sfp, sizeof (*sfp));
+
+ /* Setup convenience variables */
+ si = &sfp->si;
+ uc = &sfp->uc;
+ gw = &sfp->gw;
+ mc = &uc->mcontext;
+ gr = &mc->greg;
+
+ /* FIXME: where am I supposed to put this?
+ * sc->sigc_onstack = old_status;
+ * anyways, it does not look like it is used for anything at all.
+ */
+ __put_user(oldmask, &uc->sigmask.sigbits [0]);
+
+ /* Store registers */
+ __put_user(regs->tpc, &((*gr) [SVR4_PC]));
+ __put_user(regs->tnpc, &((*gr) [SVR4_NPC]));
+ psr = tstate_to_psr (regs->tstate);
+ __put_user(psr, &((*gr) [SVR4_PSR]));
+ __put_user(regs->y, &((*gr) [SVR4_Y]));
+
+ /* Copy g [1..7] and o [0..7] registers */
+ for (i = 0; i < 7; i++)
+ __put_user(regs->u_regs[UREG_G1+i], (&(*gr)[SVR4_G1])+i);
+ for (i = 0; i < 8; i++)
+ __put_user(regs->u_regs[UREG_I0+i], (&(*gr)[SVR4_O0])+i);
+
+ /* Setup sigaltstack, FIXME */
+ __put_user(0xdeadbeef, &uc->stack.sp);
+ __put_user(0, &uc->stack.size);
+ __put_user(0, &uc->stack.flags); /* Possible: ONSTACK, DISABLE */
+
+ /* Save the currently window file: */
+
+ /* 1. Link sfp->uc->gwins to our windows */
+ __put_user(gw, &mc->gwin);
+
+ /* 2. Number of windows to restore at setcontext (): */
+ __put_user(current->tss.w_saved, &gw->count);
+
+ /* 3. Save each valid window
+ * Currently, it makes a copy of the windows from the kernel copy.
+ * David's code for SunOS, makes the copy but keeps the pointer to
+ * the kernel. My version makes the pointer point to a userland
+ * copy of those. Mhm, I wonder if I shouldn't just ignore those
+ * on setcontext and use those that are on the kernel, the signal
+ * handler should not be modyfing those, mhm.
+ *
+ * These windows are just used in case synchronize_user_stack failed
+ * to flush the user windows.
+ */
+#if 0
+ for(window = 0; window < current->tss.w_saved; window++) {
+ __put_user((int *) &(gw->win [window]), &gw->winptr [window]);
+ copy_to_user(&gw->win [window], &current->tss.reg_window [window], sizeof (svr4_rwindow_t));
+ __put_user(0, gw->winptr [window]);
+ }
+#endif
+
+ /* 4. We just pay attention to the gw->count field on setcontext */
+ current->tss.w_saved = 0; /* So process is allowed to execute. */
+
+ /* Setup the signal information. Solaris expects a bunch of
+ * information to be passed to the signal handler, we don't provide
+ * that much currently, should use those that David already
+ * is providing with tss.sig_desc
+ */
+ __put_user(signr, &si->siginfo.signo);
+ __put_user(SVR4_SINOINFO, &si->siginfo.code);
+
+ regs->u_regs[UREG_FP] = (unsigned long) sfp;
+ regs->tpc = (unsigned long) sa->sa_handler;
+ regs->tnpc = (regs->tpc + 4);
+
+#ifdef DEBUG_SIGNALS
+ printk ("Solaris-frame: %x %x\n", (int) regs->tpc, (int) regs->tnpc);
+#endif
+ /* Arguments passed to signal handler */
+ if (regs->u_regs [14]){
+ struct reg_window32 *rw = (struct reg_window32 *) regs->u_regs [14];
+
+ __put_user(signr, &rw->ins [0]);
+ __put_user((u64)si, &rw->ins [1]);
+ __put_user((u64)uc, &rw->ins [2]);
+ __put_user((u64)sfp, &rw->ins [6]); /* frame pointer */
+ regs->u_regs[UREG_I0] = signr;
+ regs->u_regs[UREG_I1] = (u32)(u64) si;
+ regs->u_regs[UREG_I2] = (u32)(u64) uc;
+ }
+}
+
+asmlinkage int
+svr4_getcontext32(svr4_ucontext_t *uc, struct pt_regs *regs)
+{
+ svr4_gregset_t *gr;
+ svr4_mcontext_t *mc;
+ int i;
+
+ synchronize_user_stack();
+ if (current->tss.w_saved){
+ printk ("Uh oh, w_saved is not zero (%ld)\n", current->tss.w_saved);
+ do_exit (SIGSEGV);
+ }
+ if(clear_user(uc, sizeof (*uc)))
+ return -EFAULT;
+
+ /* Setup convenience variables */
+ mc = &uc->mcontext;
+ gr = &mc->greg;
+
+ /* We only have < 32 signals, fill the first slot only */
+ __put_user(current->blocked, &uc->sigmask.sigbits [0]);
+
+ /* Store registers */
+ __put_user(regs->tpc, &uc->mcontext.greg [SVR4_PC]);
+ __put_user(regs->tnpc, &uc->mcontext.greg [SVR4_NPC]);
+ __put_user(tstate_to_psr(regs->tstate), &uc->mcontext.greg [SVR4_PSR]);
+ __put_user(regs->y, &uc->mcontext.greg [SVR4_Y]);
+
+ /* Copy g [1..7] and o [0..7] registers */
+ for (i = 0; i < 7; i++)
+ __put_user(regs->u_regs[UREG_G1+i], (&(*gr)[SVR4_G1])+i);
+ for (i = 0; i < 8; i++)
+ __put_user(regs->u_regs[UREG_I0+i], (&(*gr)[SVR4_O0])+i);
+
+ /* Setup sigaltstack, FIXME */
+ __put_user(0xdeadbeef, &uc->stack.sp);
+ __put_user(0, &uc->stack.size);
+ __put_user(0, &uc->stack.flags); /* Possible: ONSTACK, DISABLE */
+
+ /* The register file is not saved
+ * we have already stuffed all of it with sync_user_stack
+ */
+ return 0;
+}
+
+
+/* Set the context for a svr4 application, this is Solaris way to sigreturn */
+asmlinkage int svr4_setcontext32(svr4_ucontext_t *c, struct pt_regs *regs)
+{
+ struct thread_struct *tp = &current->tss;
+ svr4_gregset_t *gr;
+ u32 pc, npc, psr;
+ int i;
+
+ /* Fixme: restore windows, or is this already taken care of in
+ * svr4_setup_frame when sync_user_windows is done?
+ */
+ flush_user_windows();
+
+ if (tp->w_saved){
+ printk ("Uh oh, w_saved is: 0x%lx\n", tp->w_saved);
+ do_exit(SIGSEGV);
+ }
+ if (((unsigned long) c) & 3){
+ printk ("Unaligned structure passed\n");
+ do_exit (SIGSEGV);
+ }
+
+ if(!__access_ok((unsigned long)c, sizeof(*c))) {
+ /* Miguel, add nice debugging msg _here_. ;-) */
+ do_exit(SIGSEGV);
+ }
+
+ /* Check for valid PC and nPC */
+ gr = &c->mcontext.greg;
+ __get_user(pc, &((*gr)[SVR4_PC]));
+ __get_user(npc, &((*gr)[SVR4_NPC]));
+ if((pc | npc) & 3) {
+ printk ("setcontext, PC or nPC were bogus\n");
+ do_exit (SIGSEGV);
+ }
+ /* Retrieve information from passed ucontext */
+ /* note that nPC is ored a 1, this is used to inform entry.S */
+ /* that we don't want it to mess with our PC and nPC */
+ __get_user(current->blocked, &c->sigmask.sigbits [0]);
+ current->blocked &= _BLOCKABLE;
+ regs->tpc = pc;
+ regs->tnpc = npc | 1;
+ __get_user(regs->y, &((*gr) [SVR4_Y]));
+ __get_user(psr, &((*gr) [SVR4_PSR]));
+ regs->tstate &= ~(TSTATE_ICC);
+ regs->tstate |= psr_to_tstate_icc(psr);
+
+ /* Restore g[1..7] and o[0..7] registers */
+ for (i = 0; i < 7; i++)
+ __put_user(regs->u_regs[UREG_G1+i], (&(*gr)[SVR4_G1])+i);
+ for (i = 0; i < 8; i++)
+ __put_user(regs->u_regs[UREG_I0+i], (&(*gr)[SVR4_O0])+i);
+
+ return -EINTR;
+}
+
+static inline void handle_signal32(unsigned long signr, struct sigaction *sa,
+ unsigned long oldmask, struct pt_regs *regs,
+ int svr4_signal)
+{
+ if(svr4_signal)
+ setup_svr4_frame32(sa, regs->tpc, regs->tnpc, regs, signr, oldmask);
+ else {
+ if (current->tss.new_signal)
+ new_setup_frame32(sa, regs, signr, oldmask);
+ else
+ setup_frame32(sa, regs->tpc, regs->tnpc, regs, signr, oldmask);
+ }
+ if(sa->sa_flags & SA_ONESHOT)
+ sa->sa_handler = NULL;
+ if(!(sa->sa_flags & SA_NOMASK))
+ current->blocked |= (sa->sa_mask | _S(signr)) & _BLOCKABLE;
+}
+
+static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs,
+ struct sigaction *sa)
+{
+ switch(regs->u_regs[UREG_I0]) {
+ case ERESTARTNOHAND:
+ no_system_call_restart:
+ regs->u_regs[UREG_I0] = EINTR;
+ regs->tstate |= TSTATE_ICARRY;
+ break;
+ case ERESTARTSYS:
+ if(!(sa->sa_flags & SA_RESTART))
+ goto no_system_call_restart;
+ /* fallthrough */
+ case ERESTARTNOINTR:
+ regs->u_regs[UREG_I0] = orig_i0;
+ regs->tpc -= 4;
+ regs->tnpc -= 4;
+ }
+}
+
+/* Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ */
+asmlinkage int do_signal32(unsigned long oldmask, struct pt_regs * regs,
+ unsigned long orig_i0, int restart_syscall)
+{
+ unsigned long signr, mask = ~current->blocked;
+ struct sigaction *sa;
+ int svr4_signal = current->personality == PER_SVR4;
+
+ while ((signr = current->signal & mask) != 0) {
+ signr = ffz(~signr);
+ clear_bit(signr, &current->signal);
+ sa = current->sig->action + signr;
+ signr++;
+ if ((current->flags & PF_PTRACED) && signr != SIGKILL) {
+ current->exit_code = signr;
+ current->state = TASK_STOPPED;
+ notify_parent(current);
+ schedule();
+ if (!(signr = current->exit_code))
+ continue;
+ current->exit_code = 0;
+ if (signr == SIGSTOP)
+ continue;
+ if (_S(signr) & current->blocked) {
+ current->signal |= _S(signr);
+ continue;
+ }
+ sa = current->sig->action + signr - 1;
+ }
+ if(sa->sa_handler == SIG_IGN) {
+ if(signr != SIGCHLD)
+ continue;
+
+ /* sys_wait4() grabs the master kernel lock, so
+ * we need not do so, that sucker should be
+ * threaded and would not be that difficult to
+ * do anyways.
+ */
+ while(sys_wait4(-1, NULL, WNOHANG, NULL) > 0)
+ ;
+ continue;
+ }
+ if(sa->sa_handler == SIG_DFL) {
+ if(current->pid == 1)
+ continue;
+ switch(signr) {
+ case SIGCONT: case SIGCHLD: case SIGWINCH:
+ continue;
+
+ case SIGTSTP: case SIGTTIN: case SIGTTOU:
+ if (is_orphaned_pgrp(current->pgrp))
+ continue;
+
+ case SIGSTOP:
+ if (current->flags & PF_PTRACED)
+ continue;
+ current->state = TASK_STOPPED;
+ current->exit_code = signr;
+ if(!(current->p_pptr->sig->action[SIGCHLD-1].sa_flags &
+ SA_NOCLDSTOP))
+ notify_parent(current);
+ schedule();
+ continue;
+
+ case SIGQUIT: case SIGILL: case SIGTRAP:
+ case SIGABRT: case SIGFPE: case SIGSEGV: case SIGBUS:
+ if(current->binfmt && current->binfmt->core_dump) {
+ if(current->binfmt->core_dump(signr, regs))
+ signr |= 0x80;
+ }
+#ifdef DEBUG_SIGNALS
+ /* Very useful to debug dynamic linker problems */
+ printk ("Sig ILL going...\n");
+ show_regs (regs);
+#endif
+ /* fall through */
+ default:
+ current->signal |= _S(signr & 0x7f);
+ current->flags |= PF_SIGNALED;
+ do_exit(signr);
+ }
+ }
+ if(restart_syscall)
+ syscall_restart32(orig_i0, regs, sa);
+ handle_signal32(signr, sa, oldmask, regs, svr4_signal);
+ return 1;
+ }
+ if(restart_syscall &&
+ (regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
+ regs->u_regs[UREG_I0] == ERESTARTSYS ||
+ regs->u_regs[UREG_I0] == ERESTARTNOINTR)) {
+ /* replay the system call when we are done */
+ regs->u_regs[UREG_I0] = orig_i0;
+ regs->tpc -= 4;
+ regs->tnpc -= 4;
+ }
+ return 0;
+}
+
+struct sigstack32 {
+ u32 the_stack;
+ int cur_status;
+};
+
+asmlinkage int
+sys32_sigstack(struct sigstack32 *ssptr, struct sigstack32 *ossptr)
+{
+ int ret = -EFAULT;
+
+ lock_kernel();
+ /* First see if old state is wanted. */
+ if(ossptr) {
+ if (put_user ((u64)current->tss.sstk_info.the_stack, &ossptr->the_stack) ||
+ __put_user (current->tss.sstk_info.cur_status, &ossptr->cur_status))
+ goto out;
+ }
+
+ /* Now see if we want to update the new state. */
+ if(ssptr) {
+ if (get_user ((u64)current->tss.sstk_info.the_stack, &ssptr->the_stack) ||
+ __put_user (current->tss.sstk_info.cur_status, &ssptr->cur_status))
+ goto out;
+ }
+ ret = 0;
+out:
+ unlock_kernel();
+ return ret;
+}
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
new file mode 100644
index 000000000..91426c814
--- /dev/null
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -0,0 +1,178 @@
+/* $Id: sparc64_ksyms.c,v 1.4 1997/04/14 17:04:43 jj Exp $
+ * arch/sparc/kernel/ksyms.c: Sparc specific ksyms support.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
+ */
+
+#define PROMLIB_INTERNAL
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+
+#include <asm/oplib.h>
+#include <asm/delay.h>
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/idprom.h>
+#include <asm/svr4.h>
+#include <asm/head.h>
+#include <asm/smp.h>
+#include <asm/mostek.h>
+#include <asm/ptrace.h>
+#include <asm/user.h>
+#include <asm/uaccess.h>
+#ifdef CONFIG_SBUS
+#include <asm/sbus.h>
+#include <asm/dma.h>
+#endif
+#include <asm/a.out.h>
+
+struct poll {
+ int fd;
+ short events;
+ short revents;
+};
+
+extern int svr4_getcontext (svr4_ucontext_t *, struct pt_regs *);
+extern int svr4_setcontext (svr4_ucontext_t *, struct pt_regs *);
+extern unsigned long sunos_mmap(unsigned long, unsigned long, unsigned long,
+ unsigned long, unsigned long, unsigned long);
+void _sigpause_common (unsigned int set, struct pt_regs *);
+extern void __copy_1page(void *, const void *);
+extern void *bzero_1page(void *);
+extern void *__bzero(void *, size_t);
+extern void *__memscan_zero(void *, size_t);
+extern void *__memscan_generic(void *, int, size_t);
+extern int __memcmp(const void *, const void *, __kernel_size_t);
+extern int __strncmp(const char *, const char *, __kernel_size_t);
+extern unsigned int __csum_partial_copy_sparc_generic (const char *, char *);
+
+extern void bcopy (const char *, char *, int);
+extern int __ashrdi3(int, int);
+
+extern void dump_thread(struct pt_regs *, struct user *);
+
+/* One thing to note is that the way the symbols of the mul/div
+ * support routines are named is a mess, they all start with
+ * a '.' which makes it a bitch to export, here is the trick:
+ */
+
+#define EXPORT_SYMBOL_PRIVATE(sym) \
+extern int __sparc_priv_ ## sym (int) __asm__("__" ## #sym); \
+const struct module_symbol __export_priv_##sym \
+__attribute__((section("__ksymtab"))) = \
+{ (unsigned long) &__sparc_priv_ ## sym, "__" ## #sym }
+
+/* used by various drivers */
+#ifdef __SMP__
+EXPORT_SYMBOL(klock_info);
+#endif
+EXPORT_SYMBOL_PRIVATE(_lock_kernel);
+EXPORT_SYMBOL_PRIVATE(_unlock_kernel);
+
+EXPORT_SYMBOL(mstk48t02_regs);
+EXPORT_SYMBOL(request_fast_irq);
+EXPORT_SYMBOL(sparc_alloc_io);
+EXPORT_SYMBOL(sparc_free_io);
+#if 0
+EXPORT_SYMBOL(io_remap_page_range);
+EXPORT_SYMBOL(mmu_unlockarea);
+EXPORT_SYMBOL(mmu_lockarea);
+EXPORT_SYMBOL(mmu_get_scsi_sgl);
+EXPORT_SYMBOL(mmu_get_scsi_one);
+EXPORT_SYMBOL(mmu_release_scsi_sgl);
+EXPORT_SYMBOL(mmu_release_scsi_one);
+#endif
+EXPORT_SYMBOL(sparc_dvma_malloc);
+#if 0
+EXPORT_SYMBOL(sun4c_unmapioaddr);
+EXPORT_SYMBOL(srmmu_unmapioaddr);
+#endif
+#if CONFIG_SBUS
+EXPORT_SYMBOL(SBus_chain);
+EXPORT_SYMBOL(dma_chain);
+#endif
+
+/* Solaris/SunOS binary compatibility */
+EXPORT_SYMBOL(svr4_setcontext);
+EXPORT_SYMBOL(svr4_getcontext);
+EXPORT_SYMBOL(_sigpause_common);
+EXPORT_SYMBOL(sunos_mmap);
+
+/* Should really be in linux/kernel/ksyms.c */
+EXPORT_SYMBOL(dump_thread);
+
+/* prom symbols */
+EXPORT_SYMBOL(idprom);
+EXPORT_SYMBOL(prom_root_node);
+EXPORT_SYMBOL(prom_getchild);
+EXPORT_SYMBOL(prom_getsibling);
+EXPORT_SYMBOL(prom_searchsiblings);
+EXPORT_SYMBOL(prom_firstprop);
+EXPORT_SYMBOL(prom_nextprop);
+EXPORT_SYMBOL(prom_getproplen);
+EXPORT_SYMBOL(prom_getproperty);
+EXPORT_SYMBOL(prom_node_has_property);
+EXPORT_SYMBOL(prom_setprop);
+EXPORT_SYMBOL(prom_getbootargs);
+EXPORT_SYMBOL(prom_getname);
+EXPORT_SYMBOL(prom_feval);
+EXPORT_SYMBOL(prom_getstring);
+EXPORT_SYMBOL(prom_apply_sbus_ranges);
+EXPORT_SYMBOL(prom_getint);
+EXPORT_SYMBOL(prom_getintdefault);
+EXPORT_SYMBOL(__prom_getchild);
+EXPORT_SYMBOL(__prom_getsibling);
+
+/* sparc library symbols */
+EXPORT_SYMBOL(bcopy);
+EXPORT_SYMBOL(memscan);
+EXPORT_SYMBOL(strlen);
+EXPORT_SYMBOL(strnlen);
+EXPORT_SYMBOL(strcpy);
+EXPORT_SYMBOL(strncpy);
+EXPORT_SYMBOL(strcat);
+EXPORT_SYMBOL(strncat);
+EXPORT_SYMBOL(strcmp);
+EXPORT_SYMBOL(strncmp);
+EXPORT_SYMBOL(strchr);
+EXPORT_SYMBOL(strrchr);
+EXPORT_SYMBOL(strpbrk);
+EXPORT_SYMBOL(strtok);
+EXPORT_SYMBOL(strstr);
+EXPORT_SYMBOL(strspn);
+
+/* Special internal versions of library functions. */
+EXPORT_SYMBOL(__copy_1page);
+EXPORT_SYMBOL(__memcpy);
+EXPORT_SYMBOL(__memset);
+EXPORT_SYMBOL(bzero_1page);
+EXPORT_SYMBOL(__bzero);
+EXPORT_SYMBOL(__memscan_zero);
+EXPORT_SYMBOL(__memscan_generic);
+EXPORT_SYMBOL(__memcmp);
+EXPORT_SYMBOL(__strncmp);
+EXPORT_SYMBOL(__memmove);
+
+EXPORT_SYMBOL(__csum_partial_copy_sparc_generic);
+
+/* Moving data to/from userspace. */
+EXPORT_SYMBOL(__copy_to_user);
+EXPORT_SYMBOL(__copy_from_user);
+EXPORT_SYMBOL(__strncpy_from_user);
+
+/* No version information on this, heavily used in inline asm,
+ * and will always be 'void __ret_efault(void)'.
+ */
+EXPORT_SYMBOL_NOVERS(__ret_efault);
+
+/* No version information on these, as gcc produces such symbols. */
+EXPORT_SYMBOL_NOVERS(memcmp);
+EXPORT_SYMBOL_NOVERS(memcpy);
+EXPORT_SYMBOL_NOVERS(memset);
+EXPORT_SYMBOL_NOVERS(memmove);
diff --git a/arch/sparc64/kernel/sparcelf32.c b/arch/sparc64/kernel/sparcelf32.c
new file mode 100644
index 000000000..855c636e4
--- /dev/null
+++ b/arch/sparc64/kernel/sparcelf32.c
@@ -0,0 +1,1281 @@
+/* sparcelf32.c: Support 32-bit Sparc ELF binaries on Ultra.
+ *
+ * This is just binfmt_elf.c with hooks so that the types are those
+ * for a 32-bit ELF binaries.
+ */
+
+/* This makes it work. */
+#define ELF_ARCH EM_SPARC
+#define ELF_CLASS ELFCLASS32
+#define ELF_DATA ELFDATA2MSB;
+
+/*
+ * linux/fs/binfmt_elf.c
+ *
+ * These are the functions used to load ELF format executables as used
+ * on SVr4 machines. Information on the format may be found in the book
+ * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
+ * Tools".
+ *
+ * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
+ */
+
+#include <linux/module.h>
+
+#include <linux/fs.h>
+#include <linux/stat.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/a.out.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/binfmts.h>
+#include <linux/string.h>
+#include <linux/fcntl.h>
+#include <linux/ptrace.h>
+#include <linux/malloc.h>
+#include <linux/shm.h>
+#include <linux/personality.h>
+#include <linux/elfcore.h>
+#include <linux/init.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+
+#include <linux/config.h>
+
+#define DLINFO_ITEMS 12
+
+#include <linux/elf.h>
+
+static int load_elf32_binary(struct linux_binprm * bprm, struct pt_regs * regs);
+static int load_elf32_library(int fd);
+extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
+extern void dump_thread(struct pt_regs *, struct user *);
+
+extern unsigned long get_unmapped_area(unsigned long addr, unsigned long len);
+
+/*
+ * If we don't support core dumping, then supply a NULL so we
+ * don't even try.
+ */
+#ifdef USE_ELF_CORE_DUMP
+static int elf32_core_dump(long signr, struct pt_regs * regs);
+#else
+#define elf32_core_dump NULL
+#endif
+
+#define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_EXEC_PAGESIZE-1))
+#define ELF_PAGEOFFSET(_v) ((_v) & (ELF_EXEC_PAGESIZE-1))
+
+static struct linux_binfmt elf32_format = {
+#ifndef MODULE
+ NULL, NULL, load_elf32_binary, load_elf32_library, elf32_core_dump
+#else
+ NULL, &__this_module, load_elf32_binary, load_elf32_library, elf32_core_dump
+#endif
+};
+
+static void set_brk(unsigned long start, unsigned long end)
+{
+ start = PAGE_ALIGN(start);
+ end = PAGE_ALIGN(end);
+ if (end <= start)
+ return;
+ do_mmap(NULL, start, end - start,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_FIXED | MAP_PRIVATE, 0);
+}
+
+
+/* We need to explicitly zero any fractional pages
+ after the data section (i.e. bss). This would
+ contain the junk from the file that should not
+ be in memory */
+
+
+static void padzero(unsigned long elf_bss)
+{
+ unsigned long nbyte;
+
+ nbyte = elf_bss & (PAGE_SIZE-1);
+ if (nbyte) {
+ nbyte = PAGE_SIZE - nbyte;
+ clear_user((void *) elf_bss, nbyte);
+ }
+}
+
+__u32 *create_elf32_tables(char *p, int argc, int envc,
+ struct elfhdr * exec,
+ unsigned long load_addr,
+ unsigned long interp_load_addr, int ibcs)
+{
+ __u32 *argv, *envp;
+ __u32 *sp, *csp;
+
+ /*
+ * Force 16 byte alignment here for generality.
+ */
+ sp = (__u32 *) (~15UL & (unsigned long) p);
+ csp = sp;
+ csp -= exec ? DLINFO_ITEMS*2 : 2;
+ csp -= envc+1;
+ csp -= argc+1;
+ if (!(((unsigned long) csp) & 4))
+ sp--;
+
+ /*
+ * Put the ELF interpreter info on the stack
+ */
+#define NEW_AUX_ENT(nr, id, val) \
+ __put_user ((id), sp+(nr*2)); \
+ __put_user ((val), sp+(nr*2+1)); \
+
+ sp -= 2;
+ NEW_AUX_ENT(0, AT_NULL, 0);
+
+ if (exec) {
+ sp -= 11*2;
+
+ NEW_AUX_ENT (0, AT_PHDR, load_addr + exec->e_phoff);
+ NEW_AUX_ENT (1, AT_PHENT, sizeof (struct elf_phdr));
+ NEW_AUX_ENT (2, AT_PHNUM, exec->e_phnum);
+ NEW_AUX_ENT (3, AT_PAGESZ, PAGE_SIZE);
+ NEW_AUX_ENT (4, AT_BASE, interp_load_addr);
+ NEW_AUX_ENT (5, AT_FLAGS, 0);
+ NEW_AUX_ENT (6, AT_ENTRY, (__u32) exec->e_entry);
+ NEW_AUX_ENT (7, AT_UID, (__u32) current->uid);
+ NEW_AUX_ENT (8, AT_EUID, (__u32) current->euid);
+ NEW_AUX_ENT (9, AT_GID, (__u32) current->gid);
+ NEW_AUX_ENT (10, AT_EGID, (__u32) current->egid);
+ }
+#undef NEW_AUX_ENT
+
+ sp -= envc+1;
+ envp = (__u32 *) sp;
+ sp -= argc+1;
+ argv = (__u32 *) sp;
+ if (!ibcs) {
+ __put_user(((__u32)(long) envp),--sp);
+ __put_user(((__u32)(long) argv),--sp);
+ }
+
+ __put_user((__u32)argc,--sp);
+ current->mm->arg_start = (unsigned long) p;
+ while (argc-->0) {
+ __put_user(((__u32)(long)p),argv++);
+ p += strlen_user(p);
+ }
+ __put_user(NULL, argv);
+ current->mm->arg_end = current->mm->env_start = (unsigned long) p;
+ while (envc-->0) {
+ __put_user(((__u32)(long)p),envp++);
+ p += strlen_user(p);
+ }
+ __put_user(NULL, envp);
+ current->mm->env_end = (unsigned long) p;
+ return sp;
+}
+
+
+/* This is much more generalized than the library routine read function,
+ so we keep this separate. Technically the library read function
+ is only provided so that we can read a.out libraries that have
+ an ELF header */
+
+static unsigned long load_elf32_interp(struct elfhdr * interp_elf_ex,
+ struct inode * interpreter_inode,
+ unsigned long *interp_load_addr)
+{
+ struct file * file;
+ struct elf_phdr *elf_phdata = NULL;
+ struct elf_phdr *eppnt;
+ unsigned long load_addr;
+ int load_addr_set = 0;
+ int elf_exec_fileno;
+ int retval;
+ unsigned long last_bss, elf_bss;
+ unsigned long error;
+ int i;
+
+ elf_bss = 0;
+ last_bss = 0;
+ error = load_addr = 0;
+
+ /* First of all, some simple consistency checks */
+ if ((interp_elf_ex->e_type != ET_EXEC &&
+ interp_elf_ex->e_type != ET_DYN) ||
+ !elf_check_arch(interp_elf_ex->e_machine) ||
+ (!interpreter_inode->i_op ||
+ !interpreter_inode->i_op->default_file_ops->mmap)){
+ return ~0UL;
+ }
+
+ /* Now read in all of the header information */
+
+ if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > PAGE_SIZE)
+ return ~0UL;
+
+ elf_phdata = (struct elf_phdr *)
+ kmalloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum,
+ GFP_KERNEL);
+ if (!elf_phdata)
+ return ~0UL;
+
+ /*
+ * If the size of this structure has changed, then punt, since
+ * we will be doing the wrong thing.
+ */
+ if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
+ {
+ kfree(elf_phdata);
+ return ~0UL;
+ }
+
+ retval = read_exec(interpreter_inode, interp_elf_ex->e_phoff,
+ (char *) elf_phdata,
+ sizeof(struct elf_phdr) * interp_elf_ex->e_phnum, 1);
+
+ if (retval < 0) {
+ kfree (elf_phdata);
+ return retval;
+ }
+
+ elf_exec_fileno = open_inode(interpreter_inode, O_RDONLY);
+ if (elf_exec_fileno < 0) {
+ kfree(elf_phdata);
+ return ~0UL;
+ }
+
+ file = current->files->fd[elf_exec_fileno];
+
+ eppnt = elf_phdata;
+ for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++)
+ if (eppnt->p_type == PT_LOAD) {
+ int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
+ int elf_prot = 0;
+ unsigned long vaddr = 0;
+ unsigned long k;
+
+ if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
+ if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
+ if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
+ if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) {
+ elf_type |= MAP_FIXED;
+ vaddr = eppnt->p_vaddr;
+ } else {
+ load_addr = get_unmapped_area(0, eppnt->p_filesz +
+ ELF_PAGEOFFSET(eppnt->p_vaddr));
+ }
+
+ error = do_mmap(file,
+ load_addr + ELF_PAGESTART(vaddr),
+ eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr),
+ elf_prot,
+ elf_type,
+ eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr));
+
+ if (error > -1024UL) {
+ /* Real error */
+ sys_close(elf_exec_fileno);
+ kfree(elf_phdata);
+ return ~0UL;
+ }
+
+ if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
+ load_addr = error;
+ load_addr_set = 1;
+ }
+
+ /*
+ * Find the end of the file mapping for this phdr, and keep
+ * track of the largest address we see for this.
+ */
+ k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
+ if (k > elf_bss) elf_bss = k;
+
+ /*
+ * Do the same thing for the memory mapping - between
+ * elf_bss and last_bss is the bss section.
+ */
+ k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
+ if (k > last_bss) last_bss = k;
+ }
+
+ /* Now use mmap to map the library into memory. */
+
+ sys_close(elf_exec_fileno);
+
+ /*
+ * Now fill out the bss section. First pad the last page up
+ * to the page boundary, and then perform a mmap to make sure
+ * that there are zeromapped pages up to and including the last
+ * bss page.
+ */
+ padzero(elf_bss);
+ elf_bss = ELF_PAGESTART(elf_bss + ELF_EXEC_PAGESIZE - 1); /* What we have mapped so far */
+
+ /* Map the last of the bss segment */
+ if (last_bss > elf_bss)
+ do_mmap(NULL, elf_bss, last_bss-elf_bss,
+ PROT_READ|PROT_WRITE|PROT_EXEC,
+ MAP_FIXED|MAP_PRIVATE, 0);
+ kfree(elf_phdata);
+
+ *interp_load_addr = load_addr;
+ return ((unsigned long) interp_elf_ex->e_entry) + load_addr;
+}
+
+static unsigned long load_aout32_interp(struct exec * interp_ex,
+ struct inode * interpreter_inode)
+{
+ int retval;
+ unsigned long elf_entry;
+
+ current->mm->brk = interp_ex->a_bss +
+ (current->mm->end_data = interp_ex->a_data +
+ (current->mm->end_code = interp_ex->a_text));
+ elf_entry = interp_ex->a_entry;
+
+
+ if (N_MAGIC(*interp_ex) == OMAGIC) {
+ do_mmap(NULL, 0, interp_ex->a_text+interp_ex->a_data,
+ PROT_READ|PROT_WRITE|PROT_EXEC,
+ MAP_FIXED|MAP_PRIVATE, 0);
+ retval = read_exec(interpreter_inode, 32, (char *) 0,
+ interp_ex->a_text+interp_ex->a_data, 0);
+ } else if (N_MAGIC(*interp_ex) == ZMAGIC || N_MAGIC(*interp_ex) == QMAGIC) {
+ do_mmap(NULL, 0, interp_ex->a_text+interp_ex->a_data,
+ PROT_READ|PROT_WRITE|PROT_EXEC,
+ MAP_FIXED|MAP_PRIVATE, 0);
+ retval = read_exec(interpreter_inode,
+ N_TXTOFF(*interp_ex) ,
+ (char *) N_TXTADDR(*interp_ex),
+ interp_ex->a_text+interp_ex->a_data, 0);
+ } else
+ retval = -1;
+
+ if (retval >= 0)
+ do_mmap(NULL, ELF_PAGESTART(interp_ex->a_text + interp_ex->a_data + ELF_EXEC_PAGESIZE - 1),
+ interp_ex->a_bss,
+ PROT_READ|PROT_WRITE|PROT_EXEC,
+ MAP_FIXED|MAP_PRIVATE, 0);
+ if (retval < 0) return ~0UL;
+ return elf_entry;
+}
+
+/*
+ * These are the functions used to load ELF style executables and shared
+ * libraries. There is no binary dependent code anywhere else.
+ */
+
+#define INTERPRETER_NONE 0
+#define INTERPRETER_AOUT 1
+#define INTERPRETER_ELF 2
+
+
+static inline int
+do_load_elf32_binary(struct linux_binprm * bprm, struct pt_regs * regs)
+{
+ struct elfhdr elf_ex;
+ struct elfhdr interp_elf_ex;
+ struct file * file;
+ struct exec interp_ex;
+ struct inode *interpreter_inode;
+ unsigned long load_addr;
+ int load_addr_set = 0;
+ unsigned int interpreter_type = INTERPRETER_NONE;
+ unsigned char ibcs2_interpreter;
+ int i;
+ int old_fs;
+ int error;
+ struct elf_phdr * elf_ppnt, *elf_phdata;
+ int elf_exec_fileno;
+ unsigned long elf_bss, k, elf_brk;
+ int retval;
+ char * elf_interpreter;
+ unsigned long elf_entry, interp_load_addr = 0;
+ int status;
+ unsigned long start_code, end_code, end_data;
+ unsigned long elf_stack;
+ char passed_fileno[6];
+
+ ibcs2_interpreter = 0;
+ status = 0;
+ load_addr = 0;
+ elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */
+
+ if (elf_ex.e_ident[0] != 0x7f ||
+ strncmp(&elf_ex.e_ident[1], "ELF",3) != 0) {
+ return -ENOEXEC;
+ }
+
+
+ /* First of all, some simple consistency checks */
+ if ((elf_ex.e_type != ET_EXEC &&
+ elf_ex.e_type != ET_DYN) ||
+ (! elf_check_arch(elf_ex.e_machine)) ||
+ (!bprm->inode->i_op || !bprm->inode->i_op->default_file_ops ||
+ !bprm->inode->i_op->default_file_ops->mmap)){
+ return -ENOEXEC;
+ }
+
+ /* Now read in all of the header information */
+
+ elf_phdata = (struct elf_phdr *) kmalloc(elf_ex.e_phentsize *
+ elf_ex.e_phnum, GFP_KERNEL);
+ if (elf_phdata == NULL) {
+ return -ENOMEM;
+ }
+
+ retval = read_exec(bprm->inode, elf_ex.e_phoff, (char *) elf_phdata,
+ elf_ex.e_phentsize * elf_ex.e_phnum, 1);
+ if (retval < 0) {
+ kfree (elf_phdata);
+ return retval;
+ }
+
+ elf_ppnt = elf_phdata;
+
+ elf_bss = 0;
+ elf_brk = 0;
+
+ elf_exec_fileno = open_inode(bprm->inode, O_RDONLY);
+
+ if (elf_exec_fileno < 0) {
+ kfree (elf_phdata);
+ return elf_exec_fileno;
+ }
+
+ file = current->files->fd[elf_exec_fileno];
+
+ elf_stack = ~0UL;
+ elf_interpreter = NULL;
+ start_code = ~0UL;
+ end_code = 0;
+ end_data = 0;
+
+ for(i=0;i < elf_ex.e_phnum; i++){
+ if (elf_ppnt->p_type == PT_INTERP) {
+ if ( elf_interpreter != NULL )
+ {
+ kfree (elf_phdata);
+ kfree(elf_interpreter);
+ sys_close(elf_exec_fileno);
+ return -EINVAL;
+ }
+
+ /* This is the program interpreter used for
+ * shared libraries - for now assume that this
+ * is an a.out format binary
+ */
+
+ elf_interpreter = (char *) kmalloc(elf_ppnt->p_filesz,
+ GFP_KERNEL);
+ if (elf_interpreter == NULL) {
+ kfree (elf_phdata);
+ sys_close(elf_exec_fileno);
+ return -ENOMEM;
+ }
+
+ retval = read_exec(bprm->inode,elf_ppnt->p_offset,
+ elf_interpreter,
+ elf_ppnt->p_filesz, 1);
+ /* If the program interpreter is one of these two,
+ then assume an iBCS2 image. Otherwise assume
+ a native linux image. */
+ if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
+ strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
+ ibcs2_interpreter = 1;
+#if 0
+ printk("Using ELF interpreter %s\n", elf_interpreter);
+#endif
+ if (retval >= 0) {
+ old_fs = get_fs(); /* This could probably be optimized */
+ set_fs(get_ds());
+ retval = open_namei(elf_interpreter, 0, 0,
+ &interpreter_inode, NULL);
+ set_fs(old_fs);
+ }
+
+ if (retval >= 0)
+ retval = read_exec(interpreter_inode,0,bprm->buf,128, 1);
+
+ if (retval >= 0) {
+ interp_ex = *((struct exec *) bprm->buf); /* exec-header */
+ interp_elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */
+
+ }
+ if (retval < 0) {
+ kfree (elf_phdata);
+ kfree(elf_interpreter);
+ sys_close(elf_exec_fileno);
+ return retval;
+ }
+ }
+ elf_ppnt++;
+ }
+
+ /* Some simple consistency checks for the interpreter */
+ if (elf_interpreter){
+ interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
+
+ /* Now figure out which format our binary is */
+ if ((N_MAGIC(interp_ex) != OMAGIC) &&
+ (N_MAGIC(interp_ex) != ZMAGIC) &&
+ (N_MAGIC(interp_ex) != QMAGIC))
+ interpreter_type = INTERPRETER_ELF;
+
+ if (interp_elf_ex.e_ident[0] != 0x7f ||
+ strncmp(&interp_elf_ex.e_ident[1], "ELF",3) != 0)
+ interpreter_type &= ~INTERPRETER_ELF;
+
+ if (!interpreter_type)
+ {
+ kfree(elf_interpreter);
+ kfree(elf_phdata);
+ sys_close(elf_exec_fileno);
+ return -ELIBBAD;
+ }
+ }
+
+ /* OK, we are done with that, now set up the arg stuff,
+ and then start this sucker up */
+
+ if (!bprm->sh_bang) {
+ char * passed_p;
+
+ if (interpreter_type == INTERPRETER_AOUT) {
+ sprintf(passed_fileno, "%d", elf_exec_fileno);
+ passed_p = passed_fileno;
+
+ if (elf_interpreter) {
+ bprm->p = copy_strings(1,&passed_p,bprm->page,bprm->p,2);
+ bprm->argc++;
+ }
+ }
+ if (!bprm->p) {
+ if (elf_interpreter) {
+ kfree(elf_interpreter);
+ }
+ kfree (elf_phdata);
+ sys_close(elf_exec_fileno);
+ return -E2BIG;
+ }
+ }
+
+ /* OK, This is the point of no return */
+ flush_old_exec(bprm);
+
+ current->mm->end_data = 0;
+ current->mm->end_code = 0;
+ current->mm->start_mmap = ELF_START_MMAP;
+ current->mm->mmap = NULL;
+ elf_entry = (unsigned long) elf_ex.e_entry;
+
+ /* Do this so that we can load the interpreter, if need be. We will
+ change some of these later */
+ current->mm->rss = 0;
+ bprm->p = setup_arg_pages(bprm->p, bprm);
+ current->mm->start_stack = bprm->p;
+
+ /* Now we do a little grungy work by mmaping the ELF image into
+ the correct location in memory. At this point, we assume that
+ the image should be loaded at fixed address, not at a variable
+ address. */
+
+ old_fs = get_fs();
+ set_fs(get_ds());
+ for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
+ if (elf_ppnt->p_type == PT_LOAD) {
+ int elf_prot = 0;
+ if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
+ if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
+ if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
+
+ error = do_mmap(file,
+ ELF_PAGESTART(elf_ppnt->p_vaddr),
+ (elf_ppnt->p_filesz +
+ ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
+ elf_prot,
+ (MAP_FIXED | MAP_PRIVATE |
+ MAP_DENYWRITE | MAP_EXECUTABLE),
+ (elf_ppnt->p_offset -
+ ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
+
+#ifdef LOW_ELF_STACK
+ if (ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack)
+ elf_stack = ELF_PAGESTART(elf_ppnt->p_vaddr);
+#endif
+
+ if (!load_addr_set) {
+ load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset;
+ load_addr_set = 1;
+ }
+ k = elf_ppnt->p_vaddr;
+ if (k < start_code) start_code = k;
+ k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
+ if (k > elf_bss) elf_bss = k;
+#if 1
+ if ((elf_ppnt->p_flags & PF_X) && end_code < k)
+#else
+ if ( !(elf_ppnt->p_flags & PF_W) && end_code < k)
+#endif
+ end_code = k;
+ if (end_data < k) end_data = k;
+ k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
+ if (k > elf_brk) elf_brk = k;
+ }
+ }
+ set_fs(old_fs);
+
+ if (elf_interpreter) {
+ if (interpreter_type & 1)
+ elf_entry = load_aout32_interp(&interp_ex,
+ interpreter_inode);
+ else if (interpreter_type & 2)
+ elf_entry = load_elf32_interp(&interp_elf_ex,
+ interpreter_inode,
+ &interp_load_addr);
+
+ iput(interpreter_inode);
+ kfree(elf_interpreter);
+
+ if (elf_entry == ~0UL) {
+ printk("Unable to load interpreter\n");
+ kfree(elf_phdata);
+ send_sig(SIGSEGV, current, 0);
+ return 0;
+ }
+ }
+
+ kfree(elf_phdata);
+
+ if (interpreter_type != INTERPRETER_AOUT) sys_close(elf_exec_fileno);
+ current->personality = (ibcs2_interpreter ? PER_SVR4 : PER_LINUX);
+
+ if (current->exec_domain && current->exec_domain->module)
+ __MOD_DEC_USE_COUNT(current->exec_domain->module);
+ if (current->binfmt && current->binfmt->module)
+ __MOD_DEC_USE_COUNT(current->binfmt->module);
+ current->exec_domain = lookup_exec_domain(current->personality);
+ current->binfmt = &elf32_format;
+ if (current->exec_domain && current->exec_domain->module)
+ __MOD_INC_USE_COUNT(current->exec_domain->module);
+ if (current->binfmt && current->binfmt->module)
+ __MOD_INC_USE_COUNT(current->binfmt->module);
+
+#ifndef VM_STACK_FLAGS
+ current->executable = bprm->inode;
+ bprm->inode->i_count++;
+#endif
+#ifdef LOW_ELF_STACK
+ current->start_stack = bprm->p = elf_stack - 4;
+#endif
+ current->suid = current->euid = current->fsuid = bprm->e_uid;
+ current->sgid = current->egid = current->fsgid = bprm->e_gid;
+ current->flags &= ~PF_FORKNOEXEC;
+ bprm->p = (unsigned long)
+ create_elf32_tables((char *)bprm->p,
+ bprm->argc,
+ bprm->envc,
+ (interpreter_type == INTERPRETER_ELF ? &elf_ex : NULL),
+ load_addr,
+ interp_load_addr,
+ (interpreter_type == INTERPRETER_AOUT ? 0 : 1));
+ if (interpreter_type == INTERPRETER_AOUT)
+ current->mm->arg_start += strlen(passed_fileno) + 1;
+ current->mm->start_brk = current->mm->brk = elf_brk;
+ current->mm->end_code = end_code;
+ current->mm->start_code = start_code;
+ current->mm->end_data = end_data;
+ current->mm->start_stack = bprm->p;
+
+ /* Calling set_brk effectively mmaps the pages that we need for the bss and break
+ sections */
+ set_brk(elf_bss, elf_brk);
+
+ padzero(elf_bss);
+
+#if 0
+ printk("(start_brk) %x\n" , current->mm->start_brk);
+ printk("(end_code) %x\n" , current->mm->end_code);
+ printk("(start_code) %x\n" , current->mm->start_code);
+ printk("(end_data) %x\n" , current->mm->end_data);
+ printk("(start_stack) %x\n" , current->mm->start_stack);
+ printk("(brk) %x\n" , current->mm->brk);
+#endif
+
+ if ( current->personality == PER_SVR4 )
+ {
+ /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
+ and some applications "depend" upon this behavior.
+ Since we do not have the power to recompile these, we
+ emulate the SVr4 behavior. Sigh. */
+ error = do_mmap(NULL, 0, 4096, PROT_READ | PROT_EXEC,
+ MAP_FIXED | MAP_PRIVATE, 0);
+ }
+
+#ifdef ELF_PLAT_INIT
+ /*
+ * The ABI may specify that certain registers be set up in special
+ * ways (on i386 %edx is the address of a DT_FINI function, for
+ * example. This macro performs whatever initialization to
+ * the regs structure is required.
+ */
+ ELF_PLAT_INIT(regs);
+#endif
+
+
+ start_thread32(regs, elf_entry, bprm->p);
+ if (current->flags & PF_PTRACED)
+ send_sig(SIGTRAP, current, 0);
+ return 0;
+}
+
+static int
+load_elf32_binary(struct linux_binprm * bprm, struct pt_regs * regs)
+{
+ int retval;
+
+ MOD_INC_USE_COUNT;
+ retval = do_load_elf32_binary(bprm, regs);
+ MOD_DEC_USE_COUNT;
+ return retval;
+}
+
+/* This is really simpleminded and specialized - we are loading an
+ a.out library that is given an ELF header. */
+
+static inline int
+do_load_elf32_library(int fd){
+ struct file * file;
+ struct elfhdr elf_ex;
+ struct elf_phdr *elf_phdata = NULL;
+ struct inode * inode;
+ unsigned long len;
+ int elf_bss;
+ int retval;
+ unsigned long bss;
+ int error;
+ int i,j, k;
+
+ len = 0;
+ file = current->files->fd[fd];
+ inode = file->f_inode;
+ elf_bss = 0;
+
+ if (!file || !file->f_op)
+ return -EACCES;
+
+ /* seek to the beginning of the file */
+ if (file->f_op->llseek) {
+ if ((error = file->f_op->llseek(inode, file, 0, 0)) != 0)
+ return -ENOEXEC;
+ } else
+ file->f_pos = 0;
+
+ set_fs(KERNEL_DS);
+ error = file->f_op->read(inode, file, (char *) &elf_ex, sizeof(elf_ex));
+ set_fs(USER_DS);
+ if (error != sizeof(elf_ex))
+ return -ENOEXEC;
+
+ if (elf_ex.e_ident[0] != 0x7f ||
+ strncmp(&elf_ex.e_ident[1], "ELF",3) != 0)
+ return -ENOEXEC;
+
+ /* First of all, some simple consistency checks */
+ if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
+ !elf_check_arch(elf_ex.e_machine) ||
+ (!inode->i_op || !inode->i_op->default_file_ops->mmap))
+ return -ENOEXEC;
+
+ /* Now read in all of the header information */
+
+ if (sizeof(struct elf_phdr) * elf_ex.e_phnum > PAGE_SIZE)
+ return -ENOEXEC;
+
+ elf_phdata = (struct elf_phdr *)
+ kmalloc(sizeof(struct elf_phdr) * elf_ex.e_phnum, GFP_KERNEL);
+ if (elf_phdata == NULL)
+ return -ENOMEM;
+
+ retval = read_exec(inode, elf_ex.e_phoff, (char *) elf_phdata,
+ sizeof(struct elf_phdr) * elf_ex.e_phnum, 1);
+
+ j = 0;
+ for(i=0; i<elf_ex.e_phnum; i++)
+ if ((elf_phdata + i)->p_type == PT_LOAD) j++;
+
+ if (j != 1) {
+ kfree(elf_phdata);
+ return -ENOEXEC;
+ }
+
+ while(elf_phdata->p_type != PT_LOAD) elf_phdata++;
+
+ /* Now use mmap to map the library into memory. */
+ error = do_mmap(file,
+ ELF_PAGESTART(elf_phdata->p_vaddr),
+ (elf_phdata->p_filesz +
+ ELF_PAGEOFFSET(elf_phdata->p_vaddr)),
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
+ (elf_phdata->p_offset -
+ ELF_PAGEOFFSET(elf_phdata->p_vaddr)));
+
+ k = elf_phdata->p_vaddr + elf_phdata->p_filesz;
+ if (k > elf_bss) elf_bss = k;
+
+ if (error != ELF_PAGESTART(elf_phdata->p_vaddr)) {
+ kfree(elf_phdata);
+ return error;
+ }
+
+ padzero(elf_bss);
+
+ len = ELF_PAGESTART(elf_phdata->p_filesz + elf_phdata->p_vaddr+ ELF_EXEC_PAGESIZE - 1);
+ bss = elf_phdata->p_memsz + elf_phdata->p_vaddr;
+ if (bss > len)
+ do_mmap(NULL, len, bss-len,
+ PROT_READ|PROT_WRITE|PROT_EXEC,
+ MAP_FIXED|MAP_PRIVATE, 0);
+ kfree(elf_phdata);
+ return 0;
+}
+
+static int load_elf32_library(int fd)
+{
+ int retval;
+
+ MOD_INC_USE_COUNT;
+ retval = do_load_elf32_library(fd);
+ MOD_DEC_USE_COUNT;
+ return retval;
+}
+
+/*
+ * Note that some platforms still use traditional core dumps and not
+ * the ELF core dump. Each platform can select it as appropriate.
+ */
+#ifdef USE_ELF_CORE_DUMP
+
+/*
+ * ELF core dumper
+ *
+ * Modelled on fs/exec.c:aout_core_dump()
+ * Jeremy Fitzhardinge <jeremy@sw.oz.au>
+ */
+/*
+ * These are the only things you should do on a core-file: use only these
+ * functions to write out all the necessary info.
+ */
+static int dump_write(struct file *file, const void *addr, int nr)
+{
+ return file->f_op->write(file->f_inode, file, addr, nr) == nr;
+}
+
+static int dump_seek(struct file *file, off_t off)
+{
+ if (file->f_op->llseek) {
+ if (file->f_op->llseek(file->f_inode, file, off, 0) != off)
+ return 0;
+ } else
+ file->f_pos = off;
+ return 1;
+}
+
+/*
+ * Decide whether a segment is worth dumping; default is yes to be
+ * sure (missing info is worse than too much; etc).
+ * Personally I'd include everything, and use the coredump limit...
+ *
+ * I think we should skip something. But I am not sure how. H.J.
+ */
+static inline int maydump(struct vm_area_struct *vma)
+{
+ if (!(vma->vm_flags & (VM_READ|VM_WRITE|VM_EXEC)))
+ return 0;
+#if 1
+ if (vma->vm_flags & (VM_WRITE|VM_GROWSUP|VM_GROWSDOWN))
+ return 1;
+ if (vma->vm_flags & (VM_READ|VM_EXEC|VM_EXECUTABLE|VM_SHARED))
+ return 0;
+#endif
+ return 1;
+}
+
+#define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
+
+/* An ELF note in memory */
+struct memelfnote
+{
+ const char *name;
+ int type;
+ unsigned int datasz;
+ void *data;
+};
+
+static int notesize(struct memelfnote *en)
+{
+ int sz;
+
+ sz = sizeof(struct elf_note);
+ sz += roundup(strlen(en->name), 4);
+ sz += roundup(en->datasz, 4);
+
+ return sz;
+}
+
+/* #define DEBUG */
+
+#define DUMP_WRITE(addr, nr) \
+ do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
+#define DUMP_SEEK(off) \
+ do { if (!dump_seek(file, (off))) return 0; } while(0)
+
+static int writenote(struct memelfnote *men, struct file *file)
+{
+ struct elf_note en;
+
+ en.n_namesz = strlen(men->name);
+ en.n_descsz = men->datasz;
+ en.n_type = men->type;
+
+ DUMP_WRITE(&en, sizeof(en));
+ DUMP_WRITE(men->name, en.n_namesz);
+ /* XXX - cast from long long to long to avoid need for libgcc.a */
+ DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
+ DUMP_WRITE(men->data, men->datasz);
+ DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
+
+ return 1;
+}
+#undef DUMP_WRITE
+#undef DUMP_SEEK
+
+#define DUMP_WRITE(addr, nr) \
+ if (!dump_write(&file, (addr), (nr))) \
+ goto close_coredump;
+#define DUMP_SEEK(off) \
+ if (!dump_seek(&file, (off))) \
+ goto close_coredump;
+/*
+ * Actual dumper
+ *
+ * This is a two-pass process; first we find the offsets of the bits,
+ * and then they are actually written out. If we run out of core limit
+ * we just truncate.
+ */
+static int elf32_core_dump(long signr, struct pt_regs * regs)
+{
+ int has_dumped = 0;
+ struct file file;
+ struct inode *inode;
+ unsigned short fs;
+ char corefile[6+sizeof(current->comm)];
+ int segs;
+ int i;
+ size_t size;
+ struct vm_area_struct *vma;
+ struct elfhdr elf;
+ off_t offset = 0, dataoff;
+ int limit = current->rlim[RLIMIT_CORE].rlim_cur;
+ int numnote = 4;
+ struct memelfnote notes[4];
+ struct elf_prstatus prstatus; /* NT_PRSTATUS */
+ elf_fpregset_t fpu; /* NT_PRFPREG */
+ struct elf_prpsinfo psinfo; /* NT_PRPSINFO */
+
+ if (!current->dumpable || limit < PAGE_SIZE || current->mm->count != 1)
+ return 0;
+ current->dumpable = 0;
+
+#ifndef CONFIG_BINFMT_ELF32
+ MOD_INC_USE_COUNT;
+#endif
+
+ /* Count what's needed to dump, up to the limit of coredump size */
+ segs = 0;
+ size = 0;
+ for(vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
+ if (maydump(vma))
+ {
+ int sz = vma->vm_end-vma->vm_start;
+
+ if (size+sz >= limit)
+ break;
+ else
+ size += sz;
+ }
+
+ segs++;
+ }
+#ifdef DEBUG
+ printk("elf_core_dump: %d segs taking %d bytes\n", segs, size);
+#endif
+
+ /* Set up header */
+ memcpy(elf.e_ident, ELFMAG, SELFMAG);
+ elf.e_ident[EI_CLASS] = ELF_CLASS;
+ elf.e_ident[EI_DATA] = ELF_DATA;
+ elf.e_ident[EI_VERSION] = EV_CURRENT;
+ memset(elf.e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
+
+ elf.e_type = ET_CORE;
+ elf.e_machine = ELF_ARCH;
+ elf.e_version = EV_CURRENT;
+ elf.e_entry = 0;
+ elf.e_phoff = sizeof(elf);
+ elf.e_shoff = 0;
+ elf.e_flags = 0;
+ elf.e_ehsize = sizeof(elf);
+ elf.e_phentsize = sizeof(struct elf_phdr);
+ elf.e_phnum = segs+1; /* Include notes */
+ elf.e_shentsize = 0;
+ elf.e_shnum = 0;
+ elf.e_shstrndx = 0;
+
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+ memcpy(corefile,"core.",5);
+#if 0
+ memcpy(corefile+5,current->comm,sizeof(current->comm));
+#else
+ corefile[4] = '\0';
+#endif
+ if (open_namei(corefile,O_CREAT | 2 | O_TRUNC,0600,&inode,NULL)) {
+ inode = NULL;
+ goto end_coredump;
+ }
+ if (!S_ISREG(inode->i_mode))
+ goto end_coredump;
+ if (!inode->i_op || !inode->i_op->default_file_ops)
+ goto end_coredump;
+ file.f_mode = 3;
+ file.f_flags = 0;
+ file.f_count = 1;
+ file.f_inode = inode;
+ file.f_pos = 0;
+ file.f_reada = 0;
+ file.f_op = inode->i_op->default_file_ops;
+ if (file.f_op->open)
+ if (file.f_op->open(inode,&file))
+ goto end_coredump;
+ if (!file.f_op->write)
+ goto close_coredump;
+ has_dumped = 1;
+ current->flags |= PF_DUMPCORE;
+
+ DUMP_WRITE(&elf, sizeof(elf));
+ offset += sizeof(elf); /* Elf header */
+ offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */
+
+ /*
+ * Set up the notes in similar form to SVR4 core dumps made
+ * with info from their /proc.
+ */
+ memset(&psinfo, 0, sizeof(psinfo));
+ memset(&prstatus, 0, sizeof(prstatus));
+
+ notes[0].name = "CORE";
+ notes[0].type = NT_PRSTATUS;
+ notes[0].datasz = sizeof(prstatus);
+ notes[0].data = &prstatus;
+ prstatus.pr_info.si_signo = prstatus.pr_cursig = signr;
+ prstatus.pr_sigpend = current->signal;
+ prstatus.pr_sighold = current->blocked;
+ psinfo.pr_pid = prstatus.pr_pid = current->pid;
+ psinfo.pr_ppid = prstatus.pr_ppid = current->p_pptr->pid;
+ psinfo.pr_pgrp = prstatus.pr_pgrp = current->pgrp;
+ psinfo.pr_sid = prstatus.pr_sid = current->session;
+ prstatus.pr_utime.tv_sec = CT_TO_SECS(current->utime);
+ prstatus.pr_utime.tv_usec = CT_TO_USECS(current->utime);
+ prstatus.pr_stime.tv_sec = CT_TO_SECS(current->stime);
+ prstatus.pr_stime.tv_usec = CT_TO_USECS(current->stime);
+ prstatus.pr_cutime.tv_sec = CT_TO_SECS(current->cutime);
+ prstatus.pr_cutime.tv_usec = CT_TO_USECS(current->cutime);
+ prstatus.pr_cstime.tv_sec = CT_TO_SECS(current->cstime);
+ prstatus.pr_cstime.tv_usec = CT_TO_USECS(current->cstime);
+
+ /*
+ * This transfers the registers from regs into the standard
+ * coredump arrangement, whatever that is.
+ */
+#ifdef ELF_CORE_COPY_REGS
+ ELF_CORE_COPY_REGS(prstatus.pr_reg, regs)
+#else
+ if (sizeof(elf_gregset_t) != sizeof(struct pt_regs))
+ {
+ printk("sizeof(elf_gregset_t) (%ld) != sizeof(struct pt_regs) (%ld)\n",
+ sizeof(elf_gregset_t), sizeof(struct pt_regs));
+ }
+ else
+ *(struct pt_regs *)&prstatus.pr_reg = *regs;
+#endif
+
+ notes[1].name = "CORE";
+ notes[1].type = NT_PRPSINFO;
+ notes[1].datasz = sizeof(psinfo);
+ notes[1].data = &psinfo;
+ psinfo.pr_state = current->state;
+ psinfo.pr_sname = (current->state < 0 || current->state > 5) ? '.' : "RSDZTD"[current->state];
+ psinfo.pr_zomb = psinfo.pr_sname == 'Z';
+ psinfo.pr_nice = current->priority-15;
+ psinfo.pr_flag = current->flags;
+ psinfo.pr_uid = current->uid;
+ psinfo.pr_gid = current->gid;
+ {
+ int i, len;
+
+ set_fs(fs);
+
+ len = current->mm->arg_end - current->mm->arg_start;
+ len = len >= ELF_PRARGSZ ? ELF_PRARGSZ : len;
+ copy_from_user(&psinfo.pr_psargs,
+ (const char *)current->mm->arg_start, len);
+ for(i = 0; i < len; i++)
+ if (psinfo.pr_psargs[i] == 0)
+ psinfo.pr_psargs[i] = ' ';
+ psinfo.pr_psargs[len] = 0;
+
+ set_fs(KERNEL_DS);
+ }
+ strncpy(psinfo.pr_fname, current->comm, sizeof(psinfo.pr_fname));
+
+ notes[2].name = "CORE";
+ notes[2].type = NT_TASKSTRUCT;
+ notes[2].datasz = sizeof(*current);
+ notes[2].data = current;
+
+ /* Try to dump the fpu. */
+ prstatus.pr_fpvalid = dump_fpu (regs, &fpu);
+ if (!prstatus.pr_fpvalid)
+ {
+ numnote--;
+ }
+ else
+ {
+ notes[3].name = "CORE";
+ notes[3].type = NT_PRFPREG;
+ notes[3].datasz = sizeof(fpu);
+ notes[3].data = &fpu;
+ }
+
+ /* Write notes phdr entry */
+ {
+ struct elf_phdr phdr;
+ int sz = 0;
+
+ for(i = 0; i < numnote; i++)
+ sz += notesize(&notes[i]);
+
+ phdr.p_type = PT_NOTE;
+ phdr.p_offset = offset;
+ phdr.p_vaddr = 0;
+ phdr.p_paddr = 0;
+ phdr.p_filesz = sz;
+ phdr.p_memsz = 0;
+ phdr.p_flags = 0;
+ phdr.p_align = 0;
+
+ offset += phdr.p_filesz;
+ DUMP_WRITE(&phdr, sizeof(phdr));
+ }
+
+ /* Page-align dumped data */
+ dataoff = offset = roundup(offset, PAGE_SIZE);
+
+ /* Write program headers for segments dump */
+ for(vma = current->mm->mmap, i = 0;
+ i < segs && vma != NULL; vma = vma->vm_next) {
+ struct elf_phdr phdr;
+ size_t sz;
+
+ i++;
+
+ sz = vma->vm_end - vma->vm_start;
+
+ phdr.p_type = PT_LOAD;
+ phdr.p_offset = offset;
+ phdr.p_vaddr = vma->vm_start;
+ phdr.p_paddr = 0;
+ phdr.p_filesz = maydump(vma) ? sz : 0;
+ phdr.p_memsz = sz;
+ offset += phdr.p_filesz;
+ phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
+ if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
+ if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
+ phdr.p_align = PAGE_SIZE;
+
+ DUMP_WRITE(&phdr, sizeof(phdr));
+ }
+
+ for(i = 0; i < numnote; i++)
+ if (!writenote(&notes[i], &file))
+ goto close_coredump;
+
+ set_fs(fs);
+
+ DUMP_SEEK(dataoff);
+
+ for(i = 0, vma = current->mm->mmap;
+ i < segs && vma != NULL;
+ vma = vma->vm_next) {
+ unsigned long addr = vma->vm_start;
+ unsigned long len = vma->vm_end - vma->vm_start;
+
+ i++;
+ if (!maydump(vma))
+ continue;
+#ifdef DEBUG
+ printk("elf_core_dump: writing %08lx %lx\n", addr, len);
+#endif
+ DUMP_WRITE((void *)addr, len);
+ }
+
+ if ((off_t) file.f_pos != offset) {
+ /* Sanity check */
+ printk("elf_core_dump: file.f_pos (%ld) != offset (%ld)\n",
+ (off_t) file.f_pos, offset);
+ }
+
+ close_coredump:
+ if (file.f_op->release)
+ file.f_op->release(inode,&file);
+
+ end_coredump:
+ set_fs(fs);
+ iput(inode);
+#ifndef CONFIG_BINFMT_ELF32
+ MOD_DEC_USE_COUNT;
+#endif
+ return has_dumped;
+}
+#endif /* USE_ELF_CORE_DUMP */
+
+__initfunc(int init_elf32_binfmt(void))
+{
+ return register_binfmt(&elf32_format);
+}
+
+#ifdef MODULE
+
+int init_module(void)
+{
+ /* Install the COFF, ELF and XOUT loaders.
+ * N.B. We *rely* on the table being the right size with the
+ * right number of free slots...
+ */
+ return init_elf32_binfmt();
+}
+
+
+void cleanup_module( void)
+{
+ /* Remove the COFF and ELF loaders. */
+ unregister_binfmt(&elf32_format);
+}
+#endif
diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c
new file mode 100644
index 000000000..851d1550c
--- /dev/null
+++ b/arch/sparc64/kernel/sys_sparc.c
@@ -0,0 +1,270 @@
+/* $Id: sys_sparc.c,v 1.1 1997/04/09 08:25:18 jj Exp $
+ * linux/arch/sparc64/kernel/sys_sparc.c
+ *
+ * This file contains various random system calls that
+ * have a non-standard calling sequence on the Linux/sparc
+ * platform.
+ */
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/config.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/stat.h>
+#include <linux/mman.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+
+#include <asm/uaccess.h>
+#include <asm/ipc.h>
+
+/* XXX Make this per-binary type, this way we can detect the type of
+ * XXX a binary. Every Sparc executable calls this very early on.
+ */
+asmlinkage unsigned long sys_getpagesize(void)
+{
+ return PAGE_SIZE;
+}
+
+extern asmlinkage unsigned long sys_brk(unsigned long brk);
+
+asmlinkage unsigned long sparc_brk(unsigned long brk)
+{
+ unsigned long ret;
+
+ lock_kernel();
+ if(brk >= 0x80000000000ULL) { /* VM hole */
+ ret = current->mm->brk;
+ goto out;
+ }
+ ret = sys_brk(brk);
+out:
+ unlock_kernel();
+ return ret;
+}
+
+/*
+ * sys_pipe() is the normal C calling standard for creating
+ * a pipe. It's not the way unix traditionally does this, though.
+ */
+asmlinkage int sparc_pipe(struct pt_regs *regs)
+{
+ int fd[2];
+ int error;
+
+ lock_kernel();
+ error = do_pipe(fd);
+ if (error)
+ goto out;
+ regs->u_regs[UREG_I1] = fd[1];
+ error = fd[0];
+out:
+ unlock_kernel();
+ return error;
+}
+
+/*
+ * sys_ipc() is the de-multiplexer for the SysV IPC calls..
+ *
+ * This is really horribly ugly.
+ */
+
+asmlinkage int sys_ipc (unsigned call, int first, int second, unsigned long third, void *ptr, long fifth)
+{
+ int err;
+
+ lock_kernel();
+ /* No need for backward compatibility. We can start fresh... */
+
+ if (call <= SEMCTL)
+ switch (call) {
+ case SEMOP:
+ err = sys_semop (first, (struct sembuf *)ptr, second);
+ goto out;
+ case SEMGET:
+ err = sys_semget (first, second, (int)third);
+ goto out;
+ case SEMCTL: {
+ union semun fourth;
+ err = -EINVAL;
+ if (!ptr)
+ goto out;
+ err = -EFAULT;
+ if(get_user(fourth.__pad, (void **)ptr))
+ goto out;
+ err = sys_semctl (first, second, (int)third, fourth);
+ goto out;
+ }
+ default:
+ err = -EINVAL;
+ goto out;
+ }
+ if (call <= MSGCTL)
+ switch (call) {
+ case MSGSND:
+ err = sys_msgsnd (first, (struct msgbuf *) ptr,
+ second, (int)third);
+ goto out;
+ case MSGRCV:
+ err = sys_msgrcv (first, (struct msgbuf *) ptr, second, fifth, (int)third);
+ goto out;
+ case MSGGET:
+ err = sys_msgget ((key_t) first, second);
+ goto out;
+ case MSGCTL:
+ err = sys_msgctl (first, second, (struct msqid_ds *) ptr);
+ goto out;
+ default:
+ err = -EINVAL;
+ goto out;
+ }
+ if (call <= SHMCTL)
+ switch (call) {
+ case SHMAT:
+ err = sys_shmat (first, (char *) ptr, second, (ulong *) third);
+ goto out;
+ case SHMDT:
+ err = sys_shmdt ((char *)ptr);
+ goto out;
+ case SHMGET:
+ err = sys_shmget (first, second, (int)third);
+ goto out;
+ case SHMCTL:
+ err = sys_shmctl (first, second, (struct shmid_ds *) ptr);
+ goto out;
+ default:
+ err = -EINVAL;
+ goto out;
+ }
+ else
+ err = -EINVAL;
+out:
+ unlock_kernel();
+ return err;
+}
+
+extern unsigned long get_unmapped_area(unsigned long addr, unsigned long len);
+
+/* Linux version of mmap */
+asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags, unsigned long fd,
+ unsigned long off)
+{
+ struct file * file = NULL;
+ unsigned long retval = -EBADF;
+
+ lock_kernel();
+ if (!(flags & MAP_ANONYMOUS)) {
+ if (fd >= NR_OPEN || !(file = current->files->fd[fd])){
+ goto out;
+ }
+ }
+ retval = -ENOMEM;
+ if(!(flags & MAP_FIXED) && !addr) {
+ addr = get_unmapped_area(addr, len);
+ if(!addr){
+ goto out;
+ }
+ }
+
+ /* See asm-sparc64/uaccess.h */
+ retval = -EINVAL;
+ if((len > (TASK_SIZE - PAGE_SIZE)) || (addr > (TASK_SIZE-len-PAGE_SIZE)))
+ goto out;
+
+ if(addr >= 0x80000000000ULL) {
+ retval = current->mm->brk;
+ goto out;
+ }
+
+ retval = do_mmap(file, addr, len, prot, flags, off);
+out:
+ unlock_kernel();
+ return retval;
+}
+
+/* we come to here via sys_nis_syscall so it can setup the regs argument */
+asmlinkage unsigned long
+c_sys_nis_syscall (struct pt_regs *regs)
+{
+ lock_kernel();
+ printk ("Unimplemented SPARC system call %ld\n",regs->u_regs[1]);
+ show_regs (regs);
+ unlock_kernel();
+ return -ENOSYS;
+}
+
+/* #define DEBUG_SPARC_BREAKPOINT */
+
+asmlinkage void
+sparc_breakpoint (struct pt_regs *regs)
+{
+ lock_kernel();
+#ifdef DEBUG_SPARC_BREAKPOINT
+ printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc);
+#endif
+ force_sig(SIGTRAP, current);
+#ifdef DEBUG_SPARC_BREAKPOINT
+ printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc);
+#endif
+ unlock_kernel();
+}
+
+extern void check_pending(int signum);
+
+asmlinkage int
+sparc_sigaction (int signum, const struct sigaction *action, struct sigaction *oldaction)
+{
+ struct sigaction new_sa, *p;
+ int err = -EINVAL;
+
+ lock_kernel();
+ if(signum < 0) {
+ current->tss.new_signal = 1;
+ signum = -signum;
+ }
+
+ if (signum<1 || signum>32)
+ goto out;
+ p = signum - 1 + current->sig->action;
+ if (action) {
+ err = -EINVAL;
+ if (signum==SIGKILL || signum==SIGSTOP)
+ goto out;
+ err = -EFAULT;
+ if(copy_from_user(&new_sa, action, sizeof(struct sigaction)))
+ goto out;
+ if (new_sa.sa_handler != SIG_DFL && new_sa.sa_handler != SIG_IGN) {
+ err = verify_area(VERIFY_READ, new_sa.sa_handler, 1);
+ if (err)
+ goto out;
+ }
+ }
+
+ if (oldaction) {
+ err = -EFAULT;
+ if (copy_to_user(oldaction, p, sizeof(struct sigaction)))
+ goto out;
+ }
+
+ if (action) {
+ *p = new_sa;
+ check_pending(signum);
+ }
+
+ err = 0;
+out:
+ unlock_kernel();
+ return err;
+}
+
+/* only AP+ systems have sys_aplib */
+asmlinkage int sys_aplib(void)
+{
+ return -ENOSYS;
+}
diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c
new file mode 100644
index 000000000..e9911daed
--- /dev/null
+++ b/arch/sparc64/kernel/sys_sparc32.c
@@ -0,0 +1,1741 @@
+/* $Id: sys_sparc32.c,v 1.9 1997/04/21 08:34:24 jj Exp $
+ * sys_sparc32.c: Conversion between 32bit and 64bit native syscalls.
+ *
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ *
+ * These routines maintain argument size conversion between 32bit and 64bit
+ * environment.
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/signal.h>
+#include <linux/utime.h>
+#include <linux/resource.h>
+#include <linux/sched.h>
+#include <linux/times.h>
+#include <linux/utime.h>
+#include <linux/utsname.h>
+#include <linux/timex.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/malloc.h>
+#include <linux/uio.h>
+#include <linux/nfs_fs.h>
+#include <linux/smb_fs.h>
+#include <linux/ncp_fs.h>
+
+#include <asm/types.h>
+#include <asm/poll.h>
+#include <asm/ipc.h>
+#include <asm/uaccess.h>
+
+/* As gcc will warn about casting u32 to some ptr, we have to cast it to unsigned long first, and that's what is A() for.
+ * You just do (void *)A(x), instead of having to type (void *)((unsigned long)x) or instead of just (void *)x, which will
+ * produce warnings */
+#define A(x) ((unsigned long)x)
+
+extern asmlinkage int sys_ioperm(unsigned long from, unsigned long num, int on);
+extern asmlinkage unsigned long sys_brk(unsigned long brk);
+extern asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long off);
+extern asmlinkage int sys_bdflush(int func, long data);
+extern asmlinkage int sys_uselib(const char * library);
+extern asmlinkage long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg);
+extern asmlinkage int sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg);
+extern asmlinkage int sys_mknod(const char * filename, int mode, dev_t dev);
+extern asmlinkage int sys_mkdir(const char * pathname, int mode);
+extern asmlinkage int sys_rmdir(const char * pathname);
+extern asmlinkage int sys_unlink(const char * pathname);
+extern asmlinkage int sys_symlink(const char * oldname, const char * newname);
+extern asmlinkage int sys_link(const char * oldname, const char * newname);
+extern asmlinkage int sys_rename(const char * oldname, const char * newname);
+extern asmlinkage int sys_quotactl(int cmd, const char *special, int id, caddr_t addr);
+extern asmlinkage int sys_statfs(const char * path, struct statfs * buf);
+extern asmlinkage int sys_fstatfs(unsigned int fd, struct statfs * buf);
+extern asmlinkage int sys_truncate(const char * path, unsigned long length);
+extern asmlinkage int sys_ftruncate(unsigned int fd, unsigned long length);
+extern asmlinkage int sys_utime(char * filename, struct utimbuf * times);
+extern asmlinkage int sys_utimes(char * filename, struct timeval * utimes);
+extern asmlinkage int sys_access(const char * filename, int mode);
+extern asmlinkage int sys_chdir(const char * filename);
+extern asmlinkage int sys_chroot(const char * filename);
+extern asmlinkage int sys_chmod(const char * filename, mode_t mode);
+extern asmlinkage int sys_chown(const char * filename, uid_t user, gid_t group);
+extern asmlinkage int sys_open(const char * filename,int flags,int mode);
+extern asmlinkage int sys_creat(const char * pathname, int mode);
+extern asmlinkage long sys_lseek(unsigned int fd, off_t offset, unsigned int origin);
+extern asmlinkage int sys_llseek(unsigned int fd, unsigned long offset_high, unsigned long offset_low, loff_t *result, unsigned int origin);
+extern asmlinkage long sys_read(unsigned int fd, char * buf, unsigned long count);
+extern asmlinkage long sys_write(unsigned int fd, const char * buf, unsigned long count);
+extern asmlinkage long sys_readv(unsigned long fd, const struct iovec * vector, unsigned long count);
+extern asmlinkage long sys_writev(unsigned long fd, const struct iovec * vector, unsigned long count);
+extern asmlinkage int sys_select(int n, fd_set *inp, fd_set *outp, fd_set *exp, struct timeval *tvp);
+extern asmlinkage int sys_poll(struct pollfd * ufds, unsigned int nfds, int timeout);
+extern asmlinkage int sys_newstat(char * filename, struct stat * statbuf);
+extern asmlinkage int sys_newlstat(char * filename, struct stat * statbuf);
+extern asmlinkage int sys_newfstat(unsigned int fd, struct stat * statbuf);
+extern asmlinkage int sys_readlink(const char * path, char * buf, int bufsiz);
+extern asmlinkage int sys_sysfs(int option, ...);
+extern asmlinkage int sys_ustat(dev_t dev, struct ustat * ubuf);
+extern asmlinkage int sys_umount(char * name);
+extern asmlinkage int sys_mount(char * dev_name, char * dir_name, char * type, unsigned long new_flags, void *data);
+extern asmlinkage int sys_syslog(int type, char * bug, int count);
+extern asmlinkage int sys_personality(unsigned long personality);
+extern asmlinkage int sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru);
+extern asmlinkage int sys_waitpid(pid_t pid,unsigned int * stat_addr, int options);
+extern asmlinkage int sys_sysinfo(struct sysinfo *info);
+extern asmlinkage int sys_getitimer(int which, struct itimerval *value);
+extern asmlinkage int sys_setitimer(int which, struct itimerval *value, struct itimerval *ovalue);
+extern asmlinkage int sys_sched_setscheduler(pid_t pid, int policy, struct sched_param *param);
+extern asmlinkage int sys_sched_setparam(pid_t pid, struct sched_param *param);
+extern asmlinkage int sys_sched_getparam(pid_t pid, struct sched_param *param);
+extern asmlinkage int sys_sched_rr_get_interval(pid_t pid, struct timespec *interval);
+extern asmlinkage int sys_nanosleep(struct timespec *rqtp, struct timespec *rmtp);
+extern asmlinkage int sys_sigprocmask(int how, sigset_t *set, sigset_t *oset);
+extern asmlinkage int sys_sigpending(sigset_t *set);
+extern asmlinkage unsigned long sys_signal(int signum, __sighandler_t handler);
+extern asmlinkage int sys_reboot(int magic1, int magic2, int cmd, void * arg);
+extern asmlinkage int sys_acct(const char *name);
+extern asmlinkage int sys_getresuid(uid_t *ruid, uid_t *euid, uid_t *suid);
+extern asmlinkage long sys_times(struct tms * tbuf);
+extern asmlinkage int sys_getgroups(int gidsetsize, gid_t *grouplist);
+extern asmlinkage int sys_setgroups(int gidsetsize, gid_t *grouplist);
+extern asmlinkage int sys_newuname(struct new_utsname * name);
+extern asmlinkage int sys_olduname(struct oldold_utsname * name);
+extern asmlinkage int sys_sethostname(char *name, int len);
+extern asmlinkage int sys_gethostname(char *name, int len);
+extern asmlinkage int sys_setdomainname(char *name, int len);
+extern asmlinkage int sys_getrlimit(unsigned int resource, struct rlimit *rlim);
+extern asmlinkage int sys_setrlimit(unsigned int resource, struct rlimit *rlim);
+extern asmlinkage int sys_getrusage(int who, struct rusage *ru);
+extern asmlinkage int sys_time(int * tloc);
+extern asmlinkage int sys_gettimeofday(struct timeval *tv, struct timezone *tz);
+extern asmlinkage int sys_settimeofday(struct timeval *tv, struct timezone *tz);
+extern asmlinkage int sys_adjtimex(struct timex *txc_p);
+extern asmlinkage int sys_msync(unsigned long start, size_t len, int flags);
+extern asmlinkage int sys_mlock(unsigned long start, size_t len);
+extern asmlinkage int sys_munlock(unsigned long start, size_t len);
+extern asmlinkage int sys_munmap(unsigned long addr, size_t len);
+extern asmlinkage int sys_mprotect(unsigned long start, size_t len, unsigned long prot);
+extern asmlinkage unsigned long sys_mremap(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags);
+extern asmlinkage int sys_swapoff(const char * specialfile);
+extern asmlinkage int sys_swapon(const char * specialfile, int swap_flags);
+extern asmlinkage int sys_bind(int fd, struct sockaddr *umyaddr, int addrlen);
+extern asmlinkage int sys_accept(int fd, struct sockaddr *upeer_sockaddr, int *upeer_addrlen);
+extern asmlinkage int sys_connect(int fd, struct sockaddr *uservaddr, int addrlen);
+extern asmlinkage int sys_getsockname(int fd, struct sockaddr *usockaddr, int *usockaddr_len);
+extern asmlinkage int sys_getpeername(int fd, struct sockaddr *usockaddr, int *usockaddr_len);
+extern asmlinkage int sys_send(int fd, void * buff, size_t len, unsigned flags);
+extern asmlinkage int sys_sendto(int fd, void * buff, size_t len, unsigned flags, struct sockaddr *addr, int addr_len);
+extern asmlinkage int sys_recv(int fd, void * ubuf, size_t size, unsigned flags);
+extern asmlinkage int sys_recvfrom(int fd, void * ubuf, size_t size, unsigned flags, struct sockaddr *addr, int *addr_len);
+extern asmlinkage int sys_setsockopt(int fd, int level, int optname, char *optval, int optlen);
+extern asmlinkage int sys_getsockopt(int fd, int level, int optname, char *optval, int *optlen);
+extern asmlinkage int sys_sendmsg(int fd, struct msghdr *msg, unsigned flags);
+extern asmlinkage int sys_recvmsg(int fd, struct msghdr *msg, unsigned int flags);
+extern asmlinkage int sys_socketcall(int call, unsigned long *args);
+extern asmlinkage int sys_nfsservctl(int cmd, void *argp, void *resp);
+
+asmlinkage int sys32_ioperm(u32 from, u32 num, int on)
+{
+ return sys_ioperm((unsigned long)from, (unsigned long)num, on);
+}
+
+struct msgbuf32 { s32 mtype; char mtext[1]; };
+
+struct ipc_perm32
+{
+ key_t key;
+ __kernel_uid_t32 uid;
+ __kernel_gid_t32 gid;
+ __kernel_uid_t32 cuid;
+ __kernel_gid_t32 cgid;
+ __kernel_mode_t32 mode;
+ unsigned short seq;
+};
+
+struct msqid_ds32
+{
+ struct ipc_perm32 msg_perm;
+ u32 msg_first;
+ u32 msg_last;
+ __kernel_time_t32 msg_stime;
+ __kernel_time_t32 msg_rtime;
+ __kernel_time_t32 msg_ctime;
+ u32 wwait;
+ u32 rwait;
+ unsigned short msg_cbytes;
+ unsigned short msg_qnum;
+ unsigned short msg_qbytes;
+ __kernel_ipc_pid_t32 msg_lspid;
+ __kernel_ipc_pid_t32 msg_lrpid;
+};
+
+struct shmid_ds32 {
+ struct ipc_perm32 shm_perm;
+ int shm_segsz;
+ __kernel_time_t32 shm_atime;
+ __kernel_time_t32 shm_dtime;
+ __kernel_time_t32 shm_ctime;
+ __kernel_ipc_pid_t32 shm_cpid;
+ __kernel_ipc_pid_t32 shm_lpid;
+ unsigned short shm_nattch;
+ unsigned short shm_npages;
+ u32 shm_pages;
+ u32 attaches;
+};
+
+/*
+ * sys32_ipc() is the de-multiplexer for the SysV IPC calls in 32bit emulation..
+ *
+ * This is really horribly ugly.
+ */
+
+asmlinkage int sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
+{
+ int version, err;
+
+ lock_kernel();
+ version = call >> 16; /* hack for backward compatibility */
+ call &= 0xffff;
+
+ if (call <= SEMCTL)
+ switch (call) {
+ case SEMOP:
+ /* struct sembuf is the same on 32 and 64bit :)) */
+ err = sys_semop (first, (struct sembuf *)A(ptr), second);
+ goto out;
+ case SEMGET:
+ err = sys_semget (first, second, third);
+ goto out;
+ case SEMCTL: {
+ /* XXX union semun32 to union semun64 and back conversion */
+ union semun fourth;
+ err = -EINVAL;
+ if (!ptr)
+ goto out;
+ err = -EFAULT;
+ if(get_user(fourth.__pad, (void **)A(ptr)))
+ goto out;
+ err = sys_semctl (first, second, third, fourth);
+ goto out;
+ }
+ default:
+ err = -EINVAL;
+ goto out;
+ }
+ if (call <= MSGCTL)
+ switch (call) {
+ case MSGSND:
+ {
+ struct msgbuf *p = kmalloc (second + sizeof (struct msgbuf), GFP_KERNEL);
+
+ if (!p) err = -ENOMEM;
+ else {
+ if (get_user(p->mtype, &(((struct msgbuf32 *)A(ptr))->mtype)) ||
+ __copy_from_user(p->mtext, &(((struct msgbuf32 *)A(ptr))->mtext), second))
+ err = -EFAULT;
+ else {
+ unsigned long old_fs = get_fs();
+ set_fs (KERNEL_DS);
+ err = sys_msgsnd (first, p, second, third);
+ set_fs (old_fs);
+ }
+ kfree (p);
+ }
+ }
+ goto out;
+ case MSGRCV:
+ {
+ struct msgbuf *p;
+ unsigned long old_fs;
+
+ if (!version) {
+ struct ipc_kludge tmp;
+ err = -EINVAL;
+ if (!ptr)
+ goto out;
+ err = -EFAULT;
+ if(copy_from_user(&tmp,(struct ipc_kludge *)A(ptr), sizeof (tmp)))
+ goto out;
+ ptr = tmp.msgp;
+ fifth = tmp.msgtyp;
+ }
+ p = kmalloc (second + sizeof (struct msgbuf), GFP_KERNEL);
+ if (!p) {
+ err = -EFAULT;
+ goto out;
+ }
+ old_fs = get_fs();
+ set_fs (KERNEL_DS);
+ err = sys_msgrcv (first, p, second, fifth, third);
+ set_fs (old_fs);
+ if (put_user (p->mtype, &(((struct msgbuf32 *)A(ptr))->mtype)) ||
+ __copy_to_user(&(((struct msgbuf32 *)A(ptr))->mtext), p->mtext, second))
+ err = -EFAULT;
+ kfree (p);
+ goto out;
+ }
+ case MSGGET:
+ err = sys_msgget ((key_t) first, second);
+ goto out;
+ case MSGCTL:
+ {
+ struct msqid_ds m;
+ unsigned long old_fs;
+
+ switch (second) {
+ case IPC_INFO:
+ case MSG_INFO:
+ /* struct msginfo is the same */
+ case IPC_RMID:
+ /* and this doesn't care about ptr */
+ err = sys_msgctl (first, second, (struct msqid_ds *)A(ptr));
+ goto out;
+
+ case IPC_SET:
+ if (get_user (m.msg_perm.uid, &(((struct msqid_ds32 *)A(ptr))->msg_perm.uid)) ||
+ __get_user (m.msg_perm.gid, &(((struct msqid_ds32 *)A(ptr))->msg_perm.gid)) ||
+ __get_user (m.msg_perm.mode, &(((struct msqid_ds32 *)A(ptr))->msg_perm.mode)) ||
+ __get_user (m.msg_qbytes, &(((struct msqid_ds32 *)A(ptr))->msg_qbytes))) {
+ err = -EFAULT;
+ goto out;
+ }
+ default:
+ break;
+ }
+ old_fs = get_fs();
+ set_fs (KERNEL_DS);
+ err = sys_msgctl (first, second, &m);
+ set_fs (old_fs);
+ switch (second) {
+ case MSG_STAT:
+ case IPC_STAT:
+ if (put_user (m.msg_perm.key, &(((struct msqid_ds32 *)A(ptr))->msg_perm.key)) ||
+ __put_user (m.msg_perm.uid, &(((struct msqid_ds32 *)A(ptr))->msg_perm.uid)) ||
+ __put_user (m.msg_perm.gid, &(((struct msqid_ds32 *)A(ptr))->msg_perm.gid)) ||
+ __put_user (m.msg_perm.cuid, &(((struct msqid_ds32 *)A(ptr))->msg_perm.cuid)) ||
+ __put_user (m.msg_perm.cgid, &(((struct msqid_ds32 *)A(ptr))->msg_perm.cgid)) ||
+ __put_user (m.msg_perm.mode, &(((struct msqid_ds32 *)A(ptr))->msg_perm.mode)) ||
+ __put_user (m.msg_perm.seq, &(((struct msqid_ds32 *)A(ptr))->msg_perm.seq)) ||
+ __put_user (m.msg_stime, &(((struct msqid_ds32 *)A(ptr))->msg_stime)) ||
+ __put_user (m.msg_rtime, &(((struct msqid_ds32 *)A(ptr))->msg_rtime)) ||
+ __put_user (m.msg_ctime, &(((struct msqid_ds32 *)A(ptr))->msg_ctime)) ||
+ __put_user (m.msg_cbytes, &(((struct msqid_ds32 *)A(ptr))->msg_cbytes)) ||
+ __put_user (m.msg_qnum, &(((struct msqid_ds32 *)A(ptr))->msg_qnum)) ||
+ __put_user (m.msg_qbytes, &(((struct msqid_ds32 *)A(ptr))->msg_qbytes)) ||
+ __put_user (m.msg_lspid, &(((struct msqid_ds32 *)A(ptr))->msg_lspid)) ||
+ __put_user (m.msg_lrpid, &(((struct msqid_ds32 *)A(ptr))->msg_lrpid)))
+ err = -EFAULT;
+ break;
+ default:
+ break;
+ }
+ }
+ goto out;
+ default:
+ err = -EINVAL;
+ goto out;
+ }
+ if (call <= SHMCTL)
+ switch (call) {
+ case SHMAT:
+ switch (version) {
+ case 0: default: {
+ unsigned long raddr;
+ err = sys_shmat (first, (char *)A(ptr), second, &raddr);
+ if (err)
+ goto out;
+ err = -EFAULT;
+ if(put_user (raddr, ((u32 *)A(third))))
+ goto out;
+ err = 0;
+ goto out;
+ }
+ case 1: /* If iBCS2 should ever run, then for sure in 64bit mode, not 32bit... */
+ err = -EINVAL;
+ goto out;
+ }
+ case SHMDT:
+ err = sys_shmdt ((char *)A(ptr));
+ goto out;
+ case SHMGET:
+ err = sys_shmget (first, second, third);
+ goto out;
+ case SHMCTL:
+ {
+ struct shmid_ds s;
+ unsigned long old_fs;
+
+ switch (second) {
+ case IPC_INFO:
+ /* struct shminfo is the same */
+ case SHM_LOCK:
+ case SHM_UNLOCK:
+ case IPC_RMID:
+ /* and these three aren't using ptr at all */
+ err = sys_shmctl (first, second, (struct shmid_ds *)A(ptr));
+ goto out;
+
+ case IPC_SET:
+ if (get_user (s.shm_perm.uid, &(((struct shmid_ds32 *)A(ptr))->shm_perm.uid)) ||
+ __get_user (s.shm_perm.gid, &(((struct shmid_ds32 *)A(ptr))->shm_perm.gid)) ||
+ __get_user (s.shm_perm.mode, &(((struct shmid_ds32 *)A(ptr))->shm_perm.mode))) {
+ err = -EFAULT;
+ goto out;
+ }
+ default:
+ break;
+ }
+ old_fs = get_fs();
+ set_fs (KERNEL_DS);
+ err = sys_shmctl (first, second, &s);
+ set_fs (old_fs);
+ switch (second) {
+ case SHM_INFO:
+ {
+ struct shm_info32 { int used_ids; u32 shm_tot; u32 shm_rss; u32 shm_swp; u32 swap_attempts; u32 swap_successes; };
+ struct shm_info *si = (struct shm_info *)&s;
+
+ if (put_user (si->used_ids, &(((struct shm_info32 *)A(ptr))->used_ids)) ||
+ __put_user (si->shm_tot, &(((struct shm_info32 *)A(ptr))->shm_tot)) ||
+ __put_user (si->shm_rss, &(((struct shm_info32 *)A(ptr))->shm_rss)) ||
+ __put_user (si->shm_swp, &(((struct shm_info32 *)A(ptr))->shm_swp)) ||
+ __put_user (si->swap_attempts, &(((struct shm_info32 *)A(ptr))->swap_attempts)) ||
+ __put_user (si->swap_successes, &(((struct shm_info32 *)A(ptr))->swap_successes)))
+ err = -EFAULT;
+ }
+ break;
+ case SHM_STAT:
+ case IPC_STAT:
+ if (put_user (s.shm_perm.key, &(((struct shmid_ds32 *)A(ptr))->shm_perm.key)) ||
+ __put_user (s.shm_perm.uid, &(((struct shmid_ds32 *)A(ptr))->shm_perm.uid)) ||
+ __put_user (s.shm_perm.gid, &(((struct shmid_ds32 *)A(ptr))->shm_perm.gid)) ||
+ __put_user (s.shm_perm.cuid, &(((struct shmid_ds32 *)A(ptr))->shm_perm.cuid)) ||
+ __put_user (s.shm_perm.cgid, &(((struct shmid_ds32 *)A(ptr))->shm_perm.cgid)) ||
+ __put_user (s.shm_perm.mode, &(((struct shmid_ds32 *)A(ptr))->shm_perm.mode)) ||
+ __put_user (s.shm_perm.seq, &(((struct shmid_ds32 *)A(ptr))->shm_perm.seq)) ||
+ __put_user (s.shm_atime, &(((struct shmid_ds32 *)A(ptr))->shm_atime)) ||
+ __put_user (s.shm_dtime, &(((struct shmid_ds32 *)A(ptr))->shm_dtime)) ||
+ __put_user (s.shm_ctime, &(((struct shmid_ds32 *)A(ptr))->shm_ctime)) ||
+ __put_user (s.shm_segsz, &(((struct shmid_ds32 *)A(ptr))->shm_segsz)) ||
+ __put_user (s.shm_nattch, &(((struct shmid_ds32 *)A(ptr))->shm_nattch)) ||
+ __put_user (s.shm_lpid, &(((struct shmid_ds32 *)A(ptr))->shm_cpid)) ||
+ __put_user (s.shm_cpid, &(((struct shmid_ds32 *)A(ptr))->shm_lpid)))
+ err = -EFAULT;
+ break;
+ default:
+ break;
+ }
+ }
+ goto out;
+ default:
+ err = -EINVAL;
+ goto out;
+ }
+ else
+ err = -EINVAL;
+out:
+ unlock_kernel();
+ return err;
+}
+
+asmlinkage unsigned long sys32_mmap(u32 addr, u32 len, u32 prot, u32 flags, u32 fd, u32 off)
+{
+ return sys_mmap((unsigned long)addr, (unsigned long)len, (unsigned long)prot, (unsigned long)flags,
+ (unsigned long)fd, (unsigned long)off);
+}
+
+asmlinkage int sys32_bdflush(int func, s32 data)
+{
+ return sys_bdflush(func, (long)data);
+}
+
+asmlinkage int sys32_uselib(u32 library)
+{
+ return sys_uselib((const char *)A(library));
+}
+
+asmlinkage long sys32_fcntl(unsigned int fd, unsigned int cmd, u32 arg)
+{
+ switch (cmd) {
+ case F_GETLK:
+ case F_SETLK:
+ case F_SETLKW:
+ {
+ struct flock f;
+ unsigned long old_fs;
+ long ret;
+
+ if (get_user (f.l_type, &(((struct flock32 *)A(arg))->l_type)) ||
+ __get_user (f.l_whence, &(((struct flock32 *)A(arg))->l_whence)) ||
+ __get_user (f.l_start, &(((struct flock32 *)A(arg))->l_start)) ||
+ __get_user (f.l_len, &(((struct flock32 *)A(arg))->l_len)) ||
+ __get_user (f.l_pid, &(((struct flock32 *)A(arg))->l_pid)))
+ return -EFAULT;
+ old_fs = get_fs(); set_fs (KERNEL_DS);
+ ret = sys_fcntl(fd, cmd, (unsigned long)&f);
+ set_fs (old_fs);
+ if (__put_user (f.l_type, &(((struct flock32 *)A(arg))->l_type)) ||
+ __put_user (f.l_whence, &(((struct flock32 *)A(arg))->l_whence)) ||
+ __put_user (f.l_start, &(((struct flock32 *)A(arg))->l_start)) ||
+ __put_user (f.l_len, &(((struct flock32 *)A(arg))->l_len)) ||
+ __put_user (f.l_pid, &(((struct flock32 *)A(arg))->l_pid)))
+ return -EFAULT;
+ return ret;
+ }
+ default:
+ return sys_fcntl(fd, cmd, (unsigned long)arg);
+ }
+}
+
+/* Conversion of args should be probably done in all the locations where it is handled,
+ using if (current->tss.flags & SPARC_FLAG_32BIT */
+asmlinkage int sys32_ioctl(unsigned int fd, unsigned int cmd, u32 arg)
+{
+ return sys_ioctl(fd, cmd, (unsigned long)arg);
+}
+
+asmlinkage int sys32_mknod(u32 filename, int mode, __kernel_dev_t32 dev)
+{
+ return sys_mknod((const char *)A(filename), mode, dev);
+}
+
+asmlinkage int sys32_mkdir(u32 pathname, int mode)
+{
+ return sys_mkdir((const char *)A(pathname), mode);
+}
+
+asmlinkage int sys32_rmdir(u32 pathname)
+{
+ return sys_rmdir((const char *)A(pathname));
+}
+
+asmlinkage int sys32_unlink(u32 pathname)
+{
+ return sys_unlink((const char *)A(pathname));
+}
+
+asmlinkage int sys32_symlink(u32 oldname, u32 newname)
+{
+ return sys_symlink((const char *)A(oldname), (const char *)A(newname));
+}
+
+asmlinkage int sys32_link(u32 oldname, u32 newname)
+{
+ return sys_link((const char *)A(oldname), (const char *)A(newname));
+}
+
+asmlinkage int sys32_rename(u32 oldname, u32 newname)
+{
+ return sys_rename((const char *)A(oldname), (const char *)A(newname));
+}
+
+/* XXX: Play with the addr, it will be ugly :(( */
+asmlinkage int sys32_quotactl(int cmd, u32 special, int id, u32 addr)
+{
+ return sys_quotactl(cmd, (const char *)A(special), id, (caddr_t)A(addr));
+}
+
+static int put_statfs (u32 buf, struct statfs *s)
+{
+ if (put_user (s->f_type, &(((struct statfs32 *)A(buf))->f_type)) ||
+ __put_user (s->f_bsize, &(((struct statfs32 *)A(buf))->f_bsize)) ||
+ __put_user (s->f_blocks, &(((struct statfs32 *)A(buf))->f_blocks)) ||
+ __put_user (s->f_bfree, &(((struct statfs32 *)A(buf))->f_bfree)) ||
+ __put_user (s->f_bavail, &(((struct statfs32 *)A(buf))->f_bavail)) ||
+ __put_user (s->f_files, &(((struct statfs32 *)A(buf))->f_files)) ||
+ __put_user (s->f_ffree, &(((struct statfs32 *)A(buf))->f_ffree)) ||
+ __put_user (s->f_namelen, &(((struct statfs32 *)A(buf))->f_namelen)) ||
+ __put_user (s->f_fsid.val[0], &(((struct statfs32 *)A(buf))->f_fsid.val[0])) ||
+ __put_user (s->f_fsid.val[1], &(((struct statfs32 *)A(buf))->f_fsid.val[1])))
+ return -EFAULT;
+ return 0;
+}
+
+asmlinkage int sys32_statfs(u32 path, u32 buf)
+{
+ int ret;
+ struct statfs s;
+ unsigned long old_fs = get_fs();
+
+ set_fs (KERNEL_DS);
+ ret = sys_statfs((const char *)A(path), &s);
+ set_fs (old_fs);
+ if (put_statfs(buf, &s)) return -EFAULT;
+ return ret;
+}
+
+asmlinkage int sys32_fstatfs(unsigned int fd, u32 buf)
+{
+ int ret;
+ struct statfs s;
+ unsigned long old_fs = get_fs();
+
+ set_fs (KERNEL_DS);
+ ret = sys_fstatfs(fd, &s);
+ set_fs (old_fs);
+ if (put_statfs(buf, &s)) return -EFAULT;
+ return ret;
+}
+
+asmlinkage int sys32_truncate(u32 path, u32 length)
+{
+ return sys_truncate((const char *)A(path), (unsigned long)length);
+}
+
+asmlinkage int sys32_ftruncate(unsigned int fd, u32 length)
+{
+ return sys_ftruncate(fd, (unsigned long)length);
+}
+
+asmlinkage int sys32_utime(u32 filename, u32 times)
+{
+ struct utimbuf32 { __kernel_time_t32 actime, modtime; };
+ struct utimbuf t;
+ unsigned long old_fs;
+ int ret;
+
+ if (get_user (t.actime, &(((struct utimbuf32 *)A(times))->actime)) ||
+ __get_user (t.modtime, &(((struct utimbuf32 *)A(times))->modtime)))
+ return -EFAULT;
+ old_fs = get_fs();
+ set_fs (KERNEL_DS);
+ ret = sys_utime((char *)A(filename), &t);
+ set_fs (old_fs);
+ return ret;
+}
+
+asmlinkage int sys32_utimes(u32 filename, u32 utimes)
+{
+ /* struct timeval is the same :)) */
+ return sys_utimes((char *)A(filename), (struct timeval *)A(utimes));
+}
+
+asmlinkage int sys32_access(u32 filename, int mode)
+{
+ return sys_access((const char *)A(filename), mode);
+}
+
+asmlinkage int sys32_chdir(u32 filename)
+{
+ return sys_chdir((const char *)A(filename));
+}
+
+asmlinkage int sys32_chroot(u32 filename)
+{
+ return sys_chroot((const char *)A(filename));
+}
+
+asmlinkage int sys32_chmod(u32 filename, __kernel_mode_t32 mode)
+{
+ return sys_chmod((const char *)A(filename), mode);
+}
+
+asmlinkage int sys32_chown(u32 filename, __kernel_uid_t32 user, __kernel_gid_t32 group)
+{
+ return sys_chown((const char *)A(filename), user, group);
+}
+
+asmlinkage int sys32_open(u32 filename, int flags, int mode)
+{
+ return sys_open((const char *)A(filename), flags, mode);
+}
+
+asmlinkage int sys32_creat(u32 pathname, int mode)
+{
+ return sys_creat((const char *)A(pathname), mode);
+}
+
+asmlinkage long sys32_lseek(unsigned int fd, s32 offset, unsigned int origin)
+{
+ return sys_lseek(fd, (off_t)offset, origin);
+}
+
+asmlinkage int sys32_llseek(unsigned int fd, u32 offset_high, u32 offset_low, u32 result, unsigned int origin)
+{
+ /* loff_t is the same :)) */
+ return sys_llseek(fd, (unsigned long)offset_high, (unsigned long)offset_low, (loff_t *)A(result), origin);
+}
+
+asmlinkage long sys32_read(unsigned int fd, u32 buf, u32 count)
+{
+ return sys_read(fd, (char *)A(buf), (unsigned long)count);
+}
+
+asmlinkage long sys32_write(unsigned int fd, u32 buf, u32 count)
+{
+ return sys_write(fd, (const char *)A(buf), (unsigned long)count);
+}
+
+struct iovec32 { u32 iov_base; __kernel_size_t32 iov_len; };
+
+asmlinkage long sys32_readv(u32 fd, u32 vector, u32 count)
+{
+ struct iovec *v;
+ struct iovec vf[UIO_FASTIOV];
+ u32 i;
+ long ret;
+ unsigned long old_fs;
+
+ if (!count) return 0; if (count > UIO_MAXIOV) return -EINVAL;
+ if (count <= UIO_FASTIOV)
+ v = vf;
+ else {
+ lock_kernel ();
+ v = kmalloc (count * sizeof (struct iovec), GFP_KERNEL);
+ if (!v) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+ for (i = 0; i < count; i++) {
+ if (__get_user ((unsigned long)(v[i].iov_base), &((((struct iovec32 *)A(vector))+i)->iov_base)) ||
+ __get_user (v[i].iov_len, &((((struct iovec32 *)A(vector))+i)->iov_len))) {
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+ old_fs = get_fs();
+ set_fs (KERNEL_DS);
+ ret = sys_readv((unsigned long)fd, v, (unsigned long)count);
+ set_fs (old_fs);
+out:
+ if (count > UIO_FASTIOV) {
+ kfree (v);
+ unlock_kernel ();
+ }
+ return ret;
+}
+
+asmlinkage long sys32_writev(u32 fd, u32 vector, u32 count)
+{
+ struct iovec *v;
+ struct iovec vf[UIO_FASTIOV];
+ u32 i;
+ long ret;
+ unsigned long old_fs;
+
+ if (!count) return 0; if (count > UIO_MAXIOV) return -EINVAL;
+ if (count <= UIO_FASTIOV)
+ v = vf;
+ else {
+ lock_kernel ();
+ v = kmalloc (count * sizeof (struct iovec), GFP_KERNEL);
+ if (!v) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+ for (i = 0; i < count; i++) {
+ if (__get_user ((unsigned long)(v[i].iov_base), &((((struct iovec32 *)A(vector))+i)->iov_base)) ||
+ __get_user (v[i].iov_len, &((((struct iovec32 *)A(vector))+i)->iov_len))) {
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+ old_fs = get_fs();
+ set_fs (KERNEL_DS);
+ ret = sys_writev((unsigned long)fd, v, (unsigned long)count);
+ set_fs (old_fs);
+out:
+ if (count > UIO_FASTIOV) {
+ kfree (v);
+ unlock_kernel ();
+ }
+ return ret;
+}
+
+/* readdir & getdents */
+
+#define NAME_OFFSET(de) ((int) ((de)->d_name - (char *) (de)))
+#define ROUND_UP(x) (((x)+sizeof(long)-1) & ~(sizeof(long)-1))
+
+struct old_linux_dirent32 {
+ u32 d_ino;
+ u32 d_offset;
+ unsigned short d_namlen;
+ char d_name[1];
+};
+
+struct readdir_callback32 {
+ struct old_linux_dirent32 * dirent;
+ int count;
+};
+
+static int fillonedir(void * __buf, const char * name, int namlen, off_t offset, ino_t ino)
+{
+ struct readdir_callback32 * buf = (struct readdir_callback32 *) __buf;
+ struct old_linux_dirent32 * dirent;
+
+ if (buf->count)
+ return -EINVAL;
+ buf->count++;
+ dirent = buf->dirent;
+ put_user(ino, &dirent->d_ino);
+ put_user(offset, &dirent->d_offset);
+ put_user(namlen, &dirent->d_namlen);
+ copy_to_user(dirent->d_name, name, namlen);
+ put_user(0, dirent->d_name + namlen);
+ return 0;
+}
+
+asmlinkage int old32_readdir(unsigned int fd, u32 dirent, unsigned int count)
+{
+ int error = -EBADF;
+ struct file * file;
+ struct readdir_callback32 buf;
+
+ lock_kernel();
+ if (fd >= NR_OPEN || !(file = current->files->fd[fd]))
+ goto out;
+ error = -ENOTDIR;
+ if (!file->f_op || !file->f_op->readdir)
+ goto out;
+ error = verify_area(VERIFY_WRITE, (void *)A(dirent), sizeof(struct old_linux_dirent32));
+ if (error)
+ goto out;
+ buf.count = 0;
+ buf.dirent = (struct old_linux_dirent32 *)A(dirent);
+ error = file->f_op->readdir(file->f_inode, file, &buf, fillonedir);
+ if (error < 0)
+ goto out;
+ error = buf.count;
+out:
+ unlock_kernel();
+ return error;
+}
+
+struct linux_dirent32 {
+ u32 d_ino;
+ u32 d_off;
+ unsigned short d_reclen;
+ char d_name[1];
+};
+
+struct getdents_callback32 {
+ struct linux_dirent32 * current_dir;
+ struct linux_dirent32 * previous;
+ int count;
+ int error;
+};
+
+static int filldir(void * __buf, const char * name, int namlen, off_t offset, ino_t ino)
+{
+ struct linux_dirent32 * dirent;
+ struct getdents_callback32 * buf = (struct getdents_callback32 *) __buf;
+ int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 1);
+
+ buf->error = -EINVAL; /* only used if we fail.. */
+ if (reclen > buf->count)
+ return -EINVAL;
+ dirent = buf->previous;
+ if (dirent)
+ put_user(offset, &dirent->d_off);
+ dirent = buf->current_dir;
+ buf->previous = dirent;
+ put_user(ino, &dirent->d_ino);
+ put_user(reclen, &dirent->d_reclen);
+ copy_to_user(dirent->d_name, name, namlen);
+ put_user(0, dirent->d_name + namlen);
+ ((char *) dirent) += reclen;
+ buf->current_dir = dirent;
+ buf->count -= reclen;
+ return 0;
+}
+
+asmlinkage int sys32_getdents(unsigned int fd, u32 dirent, unsigned int count)
+{
+ struct file * file;
+ struct linux_dirent32 * lastdirent;
+ struct getdents_callback32 buf;
+ int error = -EBADF;
+
+ lock_kernel();
+ if (fd >= NR_OPEN || !(file = current->files->fd[fd]))
+ goto out;
+ error = -ENOTDIR;
+ if (!file->f_op || !file->f_op->readdir)
+ goto out;
+ error = verify_area(VERIFY_WRITE, (void *)A(dirent), count);
+ if (error)
+ goto out;
+ buf.current_dir = (struct linux_dirent32 *) A(dirent);
+ buf.previous = NULL;
+ buf.count = count;
+ buf.error = 0;
+ error = file->f_op->readdir(file->f_inode, file, &buf, filldir);
+ if (error < 0)
+ goto out;
+ lastdirent = buf.previous;
+ if (!lastdirent) {
+ error = buf.error;
+ } else {
+ put_user(file->f_pos, &lastdirent->d_off);
+ error = count - buf.count;
+ }
+out:
+ unlock_kernel();
+ return error;
+}
+
+/* end of readdir & getdents */
+
+asmlinkage int sys32_select(int n, u32 inp, u32 outp, u32 exp, u32 tvp)
+{
+ unsigned long old_fs;
+ char *p;
+ u32 *q;
+ int i, ret = -EINVAL, nn;
+ u32 *Inp, *Outp, *Exp;
+
+ if (n < 0 || n > PAGE_SIZE*2) return -EINVAL;
+ lock_kernel ();
+ p = (char *)__get_free_page (GFP_KERNEL);
+ if (!p) goto out;
+ q = (u32 *)p;
+ nn = (n + 8 * sizeof(unsigned long) - 1) / (8 * sizeof (unsigned long));
+ Inp = (u32 *)A(inp); Outp = (u32 *)A(outp); Exp = (u32 *)A(exp);
+ ret = -EFAULT;
+ for (i = 0; i < ret; i++, Inp += 2, Outp += 2, Exp += 2, q += 2) {
+ if (__get_user (q[1], Inp) ||
+ __get_user (q[0], Inp+1) ||
+ __get_user (q[1+PAGE_SIZE/4], Outp) ||
+ __get_user (q[PAGE_SIZE/4], Outp+1) ||
+ __get_user (q[1+PAGE_SIZE/2], Exp) ||
+ __get_user (q[PAGE_SIZE/2], Exp+1))
+ goto out;
+ }
+ old_fs = get_fs ();
+ set_fs (KERNEL_DS);
+ ret = sys_select(n, (fd_set *)p, (fd_set *)(p + PAGE_SIZE/4), (fd_set *)(p + PAGE_SIZE/2), (struct timeval *)A(tvp));
+ set_fs (old_fs);
+ q = (u32 *)p;
+ Inp = (u32 *)A(inp); Outp = (u32 *)A(outp); Exp = (u32 *)A(exp);
+ for (i = 0; i < ret; i++, Inp += 2, Outp += 2, Exp += 2, q += 2) {
+ if (__put_user (q[1], Inp) ||
+ __put_user (q[0], Inp+1) ||
+ __put_user (q[1+PAGE_SIZE/4], Outp) ||
+ __put_user (q[PAGE_SIZE/4], Outp+1) ||
+ __put_user (q[1+PAGE_SIZE/2], Exp) ||
+ __put_user (q[PAGE_SIZE/2], Exp+1)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+out:
+ free_page ((unsigned long)p);
+ return ret;
+}
+
+asmlinkage int sys32_poll(u32 ufds, unsigned int nfds, int timeout)
+{
+ return sys_poll((struct pollfd *)A(ufds), nfds, timeout);
+}
+
+static inline int putstat(u32 statbuf, struct stat *s)
+{
+ if (put_user (s->st_dev, &(((struct stat32 *)A(statbuf))->st_dev)) ||
+ __put_user (s->st_ino, &(((struct stat32 *)A(statbuf))->st_ino)) ||
+ __put_user (s->st_mode, &(((struct stat32 *)A(statbuf))->st_mode)) ||
+ __put_user (s->st_nlink, &(((struct stat32 *)A(statbuf))->st_nlink)) ||
+ __put_user (s->st_uid, &(((struct stat32 *)A(statbuf))->st_uid)) ||
+ __put_user (s->st_gid, &(((struct stat32 *)A(statbuf))->st_gid)) ||
+ __put_user (s->st_rdev, &(((struct stat32 *)A(statbuf))->st_rdev)) ||
+ __put_user (s->st_size, &(((struct stat32 *)A(statbuf))->st_size)) ||
+ __put_user (s->st_atime, &(((struct stat32 *)A(statbuf))->st_atime)) ||
+ __put_user (s->st_mtime, &(((struct stat32 *)A(statbuf))->st_mtime)) ||
+ __put_user (s->st_ctime, &(((struct stat32 *)A(statbuf))->st_ctime)) ||
+ __put_user (s->st_blksize, &(((struct stat32 *)A(statbuf))->st_blksize)) ||
+ __put_user (s->st_blocks, &(((struct stat32 *)A(statbuf))->st_blocks)))
+ return -EFAULT;
+ return 0;
+}
+
+asmlinkage int sys32_newstat(u32 filename, u32 statbuf)
+{
+ int ret;
+ struct stat s;
+ unsigned long old_fs = get_fs();
+
+ set_fs (KERNEL_DS);
+ ret = sys_newstat((char *)A(filename), &s);
+ set_fs (old_fs);
+ if (putstat (statbuf, &s)) return -EFAULT;
+ return ret;
+}
+
+asmlinkage int sys32_newlstat(u32 filename, u32 statbuf)
+{
+ int ret;
+ struct stat s;
+ unsigned long old_fs = get_fs();
+
+ set_fs (KERNEL_DS);
+ ret = sys_newlstat((char *)A(filename), &s);
+ set_fs (old_fs);
+ if (putstat (statbuf, &s)) return -EFAULT;
+ return ret;
+}
+
+asmlinkage int sys32_newfstat(unsigned int fd, u32 statbuf)
+{
+ int ret;
+ struct stat s;
+ unsigned long old_fs = get_fs();
+
+ set_fs (KERNEL_DS);
+ ret = sys_newfstat(fd, &s);
+ set_fs (old_fs);
+ if (putstat (statbuf, &s)) return -EFAULT;
+ return ret;
+}
+
+asmlinkage int sys32_readlink(u32 path, u32 buf, int bufsiz)
+{
+ return sys_readlink((const char *)A(path), (char *)A(buf), bufsiz);
+}
+
+asmlinkage int sys32_sysfs(int option, ...)
+{
+ va_list args;
+ unsigned int x;
+ int ret = -EINVAL;
+
+ va_start(args, option);
+ switch (option) {
+ case 1:
+ ret = sys_sysfs(option, (const char *)A(va_arg(args, u32)));
+ break;
+ case 2:
+ x = va_arg(args, unsigned int);
+ ret = sys_sysfs(option, x, (char *)A(va_arg(args, u32)));
+ break;
+ case 3:
+ ret = sys_sysfs(option);
+ break;
+ }
+ va_end(args);
+ return ret;
+}
+
+asmlinkage int sys32_ustat(dev_t dev, u32 ubuf)
+{
+ /* ustat is the same :)) */
+ return sys_ustat(dev, (struct ustat *)A(ubuf));
+}
+
+asmlinkage int sys32_umount(u32 name)
+{
+ return sys_umount((char *)A(name));
+}
+
+asmlinkage int sys32_mount(u32 dev_name, u32 dir_name, u32 type, u32 new_flags, u32 data)
+{
+ return sys_mount((char *)A(dev_name), (char *)A(dir_name), (char *)A(type),
+ (unsigned long)new_flags, (void *)A(data));
+}
+
+asmlinkage int sys32_syslog(int type, u32 bug, int count)
+{
+ return sys_syslog(type, (char *)A(bug), count);
+}
+
+asmlinkage int sys32_personality(u32 personality)
+{
+ return sys_personality((unsigned long)personality);
+}
+
+struct rusage32 {
+ struct timeval ru_utime;
+ struct timeval ru_stime;
+ s32 ru_maxrss;
+ s32 ru_ixrss;
+ s32 ru_idrss;
+ s32 ru_isrss;
+ s32 ru_minflt;
+ s32 ru_majflt;
+ s32 ru_nswap;
+ s32 ru_inblock;
+ s32 ru_oublock;
+ s32 ru_msgsnd;
+ s32 ru_msgrcv;
+ s32 ru_nsignals;
+ s32 ru_nvcsw;
+ s32 ru_nivcsw;
+};
+
+static int put_rusage (u32 ru, struct rusage *r)
+{
+ if (put_user (r->ru_utime.tv_sec, &(((struct rusage32 *)A(ru))->ru_utime.tv_sec)) ||
+ __put_user (r->ru_utime.tv_usec, &(((struct rusage32 *)A(ru))->ru_utime.tv_usec)) ||
+ __put_user (r->ru_stime.tv_sec, &(((struct rusage32 *)A(ru))->ru_stime.tv_sec)) ||
+ __put_user (r->ru_stime.tv_usec, &(((struct rusage32 *)A(ru))->ru_stime.tv_usec)) ||
+ __put_user (r->ru_maxrss, &(((struct rusage32 *)A(ru))->ru_maxrss)) ||
+ __put_user (r->ru_ixrss, &(((struct rusage32 *)A(ru))->ru_ixrss)) ||
+ __put_user (r->ru_idrss, &(((struct rusage32 *)A(ru))->ru_idrss)) ||
+ __put_user (r->ru_isrss, &(((struct rusage32 *)A(ru))->ru_isrss)) ||
+ __put_user (r->ru_minflt, &(((struct rusage32 *)A(ru))->ru_minflt)) ||
+ __put_user (r->ru_majflt, &(((struct rusage32 *)A(ru))->ru_majflt)) ||
+ __put_user (r->ru_nswap, &(((struct rusage32 *)A(ru))->ru_nswap)) ||
+ __put_user (r->ru_inblock, &(((struct rusage32 *)A(ru))->ru_inblock)) ||
+ __put_user (r->ru_oublock, &(((struct rusage32 *)A(ru))->ru_oublock)) ||
+ __put_user (r->ru_msgsnd, &(((struct rusage32 *)A(ru))->ru_msgsnd)) ||
+ __put_user (r->ru_msgrcv, &(((struct rusage32 *)A(ru))->ru_msgrcv)) ||
+ __put_user (r->ru_nsignals, &(((struct rusage32 *)A(ru))->ru_nsignals)) ||
+ __put_user (r->ru_nvcsw, &(((struct rusage32 *)A(ru))->ru_nvcsw)) ||
+ __put_user (r->ru_nivcsw, &(((struct rusage32 *)A(ru))->ru_nivcsw)))
+ return -EFAULT;
+ return 0;
+}
+
+asmlinkage int sys32_wait4(__kernel_pid_t32 pid, u32 stat_addr, int options, u32 ru)
+{
+ if (!ru)
+ return sys_wait4(pid, (unsigned int *)A(stat_addr), options, NULL);
+ else {
+ struct rusage r;
+ int ret;
+ unsigned long old_fs = get_fs();
+
+ set_fs (KERNEL_DS);
+ ret = sys_wait4(pid, (unsigned int *)A(stat_addr), options, &r);
+ set_fs (old_fs);
+ if (put_rusage (ru, &r)) return -EFAULT;
+ return ret;
+ }
+}
+
+asmlinkage int sys32_waitpid(__kernel_pid_t32 pid, u32 stat_addr, int options)
+{
+ return sys_waitpid(pid, (unsigned int *)A(stat_addr), options);
+}
+
+struct sysinfo32 {
+ s32 uptime;
+ u32 loads[3];
+ u32 totalram;
+ u32 freeram;
+ u32 sharedram;
+ u32 bufferram;
+ u32 totalswap;
+ u32 freeswap;
+ unsigned short procs;
+ char _f[22];
+};
+
+asmlinkage int sys32_sysinfo(u32 info)
+{
+ struct sysinfo s;
+ int ret;
+ unsigned long old_fs = get_fs ();
+
+ set_fs (KERNEL_DS);
+ ret = sys_sysinfo(&s);
+ set_fs (old_fs);
+ if (put_user (s.uptime, &(((struct sysinfo32 *)A(info))->uptime)) ||
+ __put_user (s.loads[0], &(((struct sysinfo32 *)A(info))->loads[0])) ||
+ __put_user (s.loads[1], &(((struct sysinfo32 *)A(info))->loads[1])) ||
+ __put_user (s.loads[2], &(((struct sysinfo32 *)A(info))->loads[2])) ||
+ __put_user (s.totalram, &(((struct sysinfo32 *)A(info))->totalram)) ||
+ __put_user (s.freeram, &(((struct sysinfo32 *)A(info))->freeram)) ||
+ __put_user (s.sharedram, &(((struct sysinfo32 *)A(info))->sharedram)) ||
+ __put_user (s.bufferram, &(((struct sysinfo32 *)A(info))->bufferram)) ||
+ __put_user (s.totalswap, &(((struct sysinfo32 *)A(info))->totalswap)) ||
+ __put_user (s.freeswap, &(((struct sysinfo32 *)A(info))->freeswap)) ||
+ __put_user (s.procs, &(((struct sysinfo32 *)A(info))->procs)))
+ return -EFAULT;
+ return ret;
+}
+
+asmlinkage int sys32_getitimer(int which, u32 value)
+{
+ /* itimerval is the same :)) */
+ return sys_getitimer(which, (struct itimerval *)A(value));
+}
+
+asmlinkage int sys32_setitimer(int which, u32 value, u32 ovalue)
+{
+ return sys_setitimer(which, (struct itimerval *)A(value), (struct itimerval *)A(ovalue));
+}
+
+asmlinkage int sys32_sched_setscheduler(__kernel_pid_t32 pid, int policy, u32 param)
+{
+ /* sched_param is the same :)) */
+ return sys_sched_setscheduler(pid, policy, (struct sched_param *)A(param));
+}
+
+asmlinkage int sys32_sched_setparam(__kernel_pid_t32 pid, u32 param)
+{
+ return sys_sched_setparam(pid, (struct sched_param *)A(param));
+}
+
+asmlinkage int sys32_sched_getparam(__kernel_pid_t32 pid, u32 param)
+{
+ return sys_sched_getparam(pid, (struct sched_param *)A(param));
+}
+
+struct timespec32 {
+ s32 tv_sec;
+ s32 tv_nsec;
+};
+
+asmlinkage int sys32_sched_rr_get_interval(__kernel_pid_t32 pid, u32 interval)
+{
+ struct timespec t;
+ int ret;
+ unsigned long old_fs = get_fs ();
+
+ set_fs (KERNEL_DS);
+ ret = sys_sched_rr_get_interval(pid, &t);
+ set_fs (old_fs);
+ if (put_user (t.tv_sec, &(((struct timespec32 *)A(interval))->tv_sec)) ||
+ __put_user (t.tv_nsec, &(((struct timespec32 *)A(interval))->tv_nsec)))
+ return -EFAULT;
+ return ret;
+}
+
+asmlinkage int sys32_nanosleep(u32 rqtp, u32 rmtp)
+{
+ struct timespec t;
+ int ret;
+ unsigned long old_fs = get_fs ();
+
+ if (get_user (t.tv_sec, &(((struct timespec32 *)A(rqtp))->tv_sec)) ||
+ __get_user (t.tv_nsec, &(((struct timespec32 *)A(rqtp))->tv_nsec)))
+ return -EFAULT;
+ set_fs (KERNEL_DS);
+ ret = sys_nanosleep(&t, rmtp ? &t : NULL);
+ set_fs (old_fs);
+ if (rmtp && ret == -EINTR) {
+ if (__put_user (t.tv_sec, &(((struct timespec32 *)A(rmtp))->tv_sec)) ||
+ __put_user (t.tv_nsec, &(((struct timespec32 *)A(rmtp))->tv_nsec)))
+ return -EFAULT;
+ }
+ return ret;
+}
+
+asmlinkage int sys32_sigprocmask(int how, u32 set, u32 oset)
+{
+ sigset_t s;
+ int ret;
+ unsigned long old_fs = get_fs();
+
+ if (set && get_user (s, (sigset_t32 *)A(set))) return -EFAULT;
+ set_fs (KERNEL_DS);
+ ret = sys_sigprocmask(how, set ? &s : NULL, oset ? &s : NULL);
+ set_fs (old_fs);
+ if (oset && put_user (s, (sigset_t32 *)A(oset))) return -EFAULT;
+ return ret;
+}
+
+asmlinkage int sys32_sigpending(u32 set)
+{
+ sigset_t s;
+ int ret;
+ unsigned long old_fs = get_fs();
+
+ set_fs (KERNEL_DS);
+ ret = sys_sigpending(&s);
+ set_fs (old_fs);
+ if (put_user (s, (sigset_t32 *)A(set))) return -EFAULT;
+ return ret;
+}
+
+asmlinkage unsigned long sys32_signal(int signum, u32 handler)
+{
+ return sys_signal(signum, (__sighandler_t)A(handler));
+}
+
+asmlinkage int sys32_reboot(int magic1, int magic2, int cmd, u32 arg)
+{
+ return sys_reboot(magic1, magic2, cmd, (void *)A(arg));
+}
+
+asmlinkage int sys32_acct(u32 name)
+{
+ return sys_acct((const char *)A(name));
+}
+
+asmlinkage int sys32_getresuid(u32 ruid, u32 euid, u32 suid)
+{
+ uid_t a, b, c;
+ int ret;
+ unsigned long old_fs = get_fs();
+
+ set_fs (KERNEL_DS);
+ ret = sys_getresuid(&a, &b, &c);
+ set_fs (old_fs);
+ if (put_user (a, (__kernel_uid_t32 *)A(ruid)) ||
+ put_user (b, (__kernel_uid_t32 *)A(euid)) ||
+ put_user (c, (__kernel_uid_t32 *)A(suid)))
+ return -EFAULT;
+ return ret;
+}
+
+struct tms32 {
+ __kernel_clock_t32 tms_utime;
+ __kernel_clock_t32 tms_stime;
+ __kernel_clock_t32 tms_cutime;
+ __kernel_clock_t32 tms_cstime;
+};
+
+asmlinkage long sys32_times(u32 tbuf)
+{
+ struct tms t;
+ long ret;
+ unsigned long old_fs = get_fs ();
+
+ set_fs (KERNEL_DS);
+ ret = sys_times(tbuf ? &t : NULL);
+ set_fs (old_fs);
+ if (tbuf && (
+ put_user (t.tms_utime, &(((struct tms32 *)A(tbuf))->tms_utime)) ||
+ __put_user (t.tms_stime, &(((struct tms32 *)A(tbuf))->tms_stime)) ||
+ __put_user (t.tms_cutime, &(((struct tms32 *)A(tbuf))->tms_cutime)) ||
+ __put_user (t.tms_cstime, &(((struct tms32 *)A(tbuf))->tms_cstime))))
+ return -EFAULT;
+ return ret;
+}
+
+asmlinkage int sys32_getgroups(int gidsetsize, u32 grouplist)
+{
+ gid_t gl[NGROUPS];
+ int ret, i;
+ unsigned long old_fs = get_fs ();
+
+ set_fs (KERNEL_DS);
+ ret = sys_getgroups(gidsetsize, gl);
+ set_fs (old_fs);
+ if (ret > 0 && ret <= NGROUPS)
+ for (i = 0; i < ret; i++, grouplist += sizeof(__kernel_gid_t32))
+ if (__put_user (gl[i], (__kernel_gid_t32 *)A(grouplist)))
+ return -EFAULT;
+ return ret;
+}
+
+asmlinkage int sys32_setgroups(int gidsetsize, u32 grouplist)
+{
+ gid_t gl[NGROUPS];
+ int ret, i;
+ unsigned long old_fs = get_fs ();
+
+ if ((unsigned) gidsetsize > NGROUPS)
+ return -EINVAL;
+ for (i = 0; i < gidsetsize; i++, grouplist += sizeof(__kernel_gid_t32))
+ if (__get_user (gl[i], (__kernel_gid_t32 *)A(grouplist)))
+ return -EFAULT;
+ set_fs (KERNEL_DS);
+ ret = sys_setgroups(gidsetsize, gl);
+ set_fs (old_fs);
+ return ret;
+}
+
+asmlinkage int sys32_newuname(u32 name)
+{
+ /* utsname is the same :)) */
+ return sys_newuname((struct new_utsname *)A(name));
+}
+
+asmlinkage int sys32_olduname(u32 name)
+{
+ return sys_olduname((struct oldold_utsname *)A(name));
+}
+
+asmlinkage int sys32_sethostname(u32 name, int len)
+{
+ return sys_sethostname((char *)A(name), len);
+}
+
+asmlinkage int sys32_gethostname(u32 name, int len)
+{
+ return sys_gethostname((char *)A(name), len);
+}
+
+asmlinkage int sys32_setdomainname(u32 name, int len)
+{
+ return sys_setdomainname((char *)A(name), len);
+}
+
+struct rlimit32 {
+ s32 rlim_cur;
+ s32 rlim_max;
+};
+
+asmlinkage int sys32_getrlimit(unsigned int resource, u32 rlim)
+{
+ struct rlimit r;
+ int ret;
+ unsigned long old_fs = get_fs ();
+
+ set_fs (KERNEL_DS);
+ ret = sys_getrlimit(resource, &r);
+ set_fs (old_fs);
+ if (!ret && (
+ put_user (r.rlim_cur, &(((struct rlimit32 *)A(rlim))->rlim_cur)) ||
+ __put_user (r.rlim_max, &(((struct rlimit32 *)A(rlim))->rlim_max))))
+ return -EFAULT;
+ return ret;
+}
+
+asmlinkage int sys32_setrlimit(unsigned int resource, u32 rlim)
+{
+ struct rlimit r;
+ int ret;
+ unsigned long old_fs = get_fs ();
+
+ if (resource >= RLIM_NLIMITS) return -EINVAL;
+ if (get_user (r.rlim_cur, &(((struct rlimit32 *)A(rlim))->rlim_cur)) ||
+ __get_user (r.rlim_max, &(((struct rlimit32 *)A(rlim))->rlim_max)))
+ return -EFAULT;
+ set_fs (KERNEL_DS);
+ ret = sys_setrlimit(resource, &r);
+ set_fs (old_fs);
+ return ret;
+}
+
+asmlinkage int sys32_getrusage(int who, u32 ru)
+{
+ struct rusage r;
+ int ret;
+ unsigned long old_fs = get_fs();
+
+ set_fs (KERNEL_DS);
+ ret = sys_getrusage(who, &r);
+ set_fs (old_fs);
+ if (put_rusage (ru, &r)) return -EFAULT;
+ return ret;
+}
+
+asmlinkage int sys32_time(u32 tloc)
+{
+ return sys_time((int *)A(tloc));
+}
+
+asmlinkage int sys32_gettimeofday(u32 tv, u32 tz)
+{
+ /* both timeval and timezone are ok :)) */
+ return sys_gettimeofday((struct timeval *)A(tv), (struct timezone *)A(tz));
+}
+
+asmlinkage int sys32_settimeofday(u32 tv, u32 tz)
+{
+ return sys_settimeofday((struct timeval *)A(tv), (struct timezone *)A(tz));
+}
+
+struct timex32 {
+ unsigned int modes;
+ s32 offset;
+ s32 freq;
+ s32 maxerror;
+ s32 esterror;
+ int status;
+ s32 constant;
+ s32 precision;
+ s32 tolerance;
+ struct timeval time;
+ s32 tick;
+ s32 ppsfreq;
+ s32 jitter;
+ int shift;
+ s32 stabil;
+ s32 jitcnt;
+ s32 calcnt;
+ s32 errcnt;
+ s32 stbcnt;
+ int :32; int :32; int :32; int :32;
+ int :32; int :32; int :32; int :32;
+ int :32; int :32; int :32; int :32;
+};
+
+asmlinkage int sys32_adjtimex(u32 txc_p)
+{
+ struct timex t;
+ int ret;
+ unsigned long old_fs = get_fs ();
+
+ if (get_user (t.modes, &(((struct timex32 *)A(txc_p))->modes)) ||
+ __get_user (t.offset, &(((struct timex32 *)A(txc_p))->offset)) ||
+ __get_user (t.freq, &(((struct timex32 *)A(txc_p))->freq)) ||
+ __get_user (t.maxerror, &(((struct timex32 *)A(txc_p))->maxerror)) ||
+ __get_user (t.esterror, &(((struct timex32 *)A(txc_p))->esterror)) ||
+ __get_user (t.status, &(((struct timex32 *)A(txc_p))->status)) ||
+ __get_user (t.constant, &(((struct timex32 *)A(txc_p))->constant)) ||
+ __get_user (t.tick, &(((struct timex32 *)A(txc_p))->tick)) ||
+ __get_user (t.shift, &(((struct timex32 *)A(txc_p))->shift)))
+ return -EFAULT;
+ set_fs (KERNEL_DS);
+ ret = sys_adjtimex(&t);
+ set_fs (old_fs);
+ if ((unsigned)ret >= 0 && (
+ __put_user (t.modes, &(((struct timex32 *)A(txc_p))->modes)) ||
+ __put_user (t.offset, &(((struct timex32 *)A(txc_p))->offset)) ||
+ __put_user (t.freq, &(((struct timex32 *)A(txc_p))->freq)) ||
+ __put_user (t.maxerror, &(((struct timex32 *)A(txc_p))->maxerror)) ||
+ __put_user (t.esterror, &(((struct timex32 *)A(txc_p))->esterror)) ||
+ __put_user (t.status, &(((struct timex32 *)A(txc_p))->status)) ||
+ __put_user (t.constant, &(((struct timex32 *)A(txc_p))->constant)) ||
+ __put_user (t.precision, &(((struct timex32 *)A(txc_p))->precision)) ||
+ __put_user (t.tolerance, &(((struct timex32 *)A(txc_p))->tolerance)) ||
+ __put_user (t.time.tv_sec, &(((struct timex32 *)A(txc_p))->time.tv_sec)) ||
+ __put_user (t.time.tv_usec, &(((struct timex32 *)A(txc_p))->time.tv_usec)) ||
+ __put_user (t.tick, &(((struct timex32 *)A(txc_p))->tick)) ||
+ __put_user (t.ppsfreq, &(((struct timex32 *)A(txc_p))->ppsfreq)) ||
+ __put_user (t.jitter, &(((struct timex32 *)A(txc_p))->jitter)) ||
+ __put_user (t.shift, &(((struct timex32 *)A(txc_p))->shift)) ||
+ __put_user (t.stabil, &(((struct timex32 *)A(txc_p))->stabil)) ||
+ __put_user (t.jitcnt, &(((struct timex32 *)A(txc_p))->jitcnt)) ||
+ __put_user (t.calcnt, &(((struct timex32 *)A(txc_p))->calcnt)) ||
+ __put_user (t.errcnt, &(((struct timex32 *)A(txc_p))->errcnt)) ||
+ __put_user (t.stbcnt, &(((struct timex32 *)A(txc_p))->stbcnt))))
+ return -EFAULT;
+ return ret;
+}
+
+asmlinkage int sys32_msync(u32 start, __kernel_size_t32 len, int flags)
+{
+ return sys_msync((unsigned long)start, (size_t)len, flags);
+}
+
+asmlinkage int sys32_mlock(u32 start, __kernel_size_t32 len)
+{
+ return sys_mlock((unsigned long)start, (size_t)len);
+}
+
+asmlinkage int sys32_munlock(u32 start, __kernel_size_t32 len)
+{
+ return sys_munlock((unsigned long)start, (size_t)len);
+}
+
+asmlinkage unsigned long sparc32_brk(u32 brk)
+{
+ return sys_brk((unsigned long)brk);
+}
+
+asmlinkage int sys32_munmap(u32 addr, __kernel_size_t32 len)
+{
+ return sys_munmap((unsigned long)addr, (size_t)len);
+}
+
+asmlinkage int sys32_mprotect(u32 start, __kernel_size_t32 len, u32 prot)
+{
+ return sys_mprotect((unsigned long)start, (size_t)len, (unsigned long)prot);
+}
+
+asmlinkage unsigned long sys32_mremap(u32 addr, u32 old_len, u32 new_len, u32 flags)
+{
+ return sys_mremap((unsigned long)addr, (unsigned long)old_len, (unsigned long)new_len, (unsigned long)flags);
+}
+
+asmlinkage int sys32_swapoff(u32 specialfile)
+{
+ return sys_swapoff((const char *)A(specialfile));
+}
+
+asmlinkage int sys32_swapon(u32 specialfile, int swap_flags)
+{
+ return sys_swapon((const char *)A(specialfile), swap_flags);
+}
+
+asmlinkage int sys32_bind(int fd, u32 umyaddr, int addrlen)
+{
+ /* sockaddr is the same :)) */
+ return sys_bind(fd, (struct sockaddr *)A(umyaddr), addrlen);
+}
+
+asmlinkage int sys32_accept(int fd, u32 upeer_sockaddr, u32 upeer_addrlen)
+{
+ return sys_accept(fd, (struct sockaddr *)A(upeer_sockaddr), (int *)A(upeer_addrlen));
+}
+
+asmlinkage int sys32_connect(int fd, u32 uservaddr, int addrlen)
+{
+ return sys_connect(fd, (struct sockaddr *)A(uservaddr), addrlen);
+}
+
+asmlinkage int sys32_getsockname(int fd, u32 usockaddr, u32 usockaddr_len)
+{
+ return sys_getsockname(fd, (struct sockaddr *)A(usockaddr), (int *)A(usockaddr_len));
+}
+
+asmlinkage int sys32_getpeername(int fd, u32 usockaddr, u32 usockaddr_len)
+{
+ return sys_getpeername(fd, (struct sockaddr *)A(usockaddr), (int *)A(usockaddr_len));
+}
+
+asmlinkage int sys32_send(int fd, u32 buff, __kernel_size_t32 len, unsigned flags)
+{
+ return sys_send(fd, (void *)A(buff), (size_t)len, flags);
+}
+
+asmlinkage int sys32_sendto(int fd, u32 buff, __kernel_size_t32 len, unsigned flags, u32 addr, int addr_len)
+{
+ return sys_sendto(fd, (void *)A(buff), (size_t)len, flags, (struct sockaddr *)A(addr), addr_len);
+}
+
+asmlinkage int sys32_recv(int fd, u32 ubuf, __kernel_size_t32 size, unsigned flags)
+{
+ return sys_recv(fd, (void *)A(ubuf), (size_t)size, flags);
+}
+
+asmlinkage int sys32_recvfrom(int fd, u32 ubuf, __kernel_size_t32 size, unsigned flags, u32 addr, u32 addr_len)
+{
+ return sys_recvfrom(fd, (void *)A(ubuf), (size_t)size, flags, (struct sockaddr *)A(addr), (int *)A(addr_len));
+}
+
+asmlinkage int sys32_setsockopt(int fd, int level, int optname, u32 optval, int optlen)
+{
+ /* XXX handle ip_fw32->ip_fw conversion for IP firewalling and accounting.
+ Do it using some macro in ip_sockglue.c
+ Other optval arguments are mostly just ints or 32<->64bit transparent */
+ return sys_setsockopt(fd, level, optname, (char *)A(optval), optlen);
+}
+
+asmlinkage int sys32_getsockopt(int fd, int level, int optname, u32 optval, u32 optlen)
+{
+ return sys_getsockopt(fd, level, optname, (char *)A(optval), (int *)A(optlen));
+}
+
+/* Continue here */
+asmlinkage int sys32_sendmsg(int fd, u32 msg, unsigned flags)
+{
+ return sys_sendmsg(fd, (struct msghdr *)A(msg), flags);
+}
+
+asmlinkage int sys32_recvmsg(int fd, u32 msg, unsigned int flags)
+{
+ return sys_recvmsg(fd, (struct msghdr *)A(msg), flags);
+}
+
+asmlinkage int sys32_socketcall(int call, u32 args)
+{
+ return sys_socketcall(call, (unsigned long *)A(args));
+}
+
+extern void check_pending(int signum);
+
+asmlinkage int sparc32_sigaction (int signum, u32 action, u32 oldaction)
+{
+ struct sigaction32 new_sa, old_sa;
+ struct sigaction *p;
+ int err = -EINVAL;
+
+ lock_kernel();
+ if(signum < 0) {
+ current->tss.new_signal = 1;
+ signum = -signum;
+ }
+
+ if (signum<1 || signum>32)
+ goto out;
+ p = signum - 1 + current->sig->action;
+ if (action) {
+ err = -EINVAL;
+ if (signum==SIGKILL || signum==SIGSTOP)
+ goto out;
+ err = -EFAULT;
+ if(copy_from_user(&new_sa, A(action), sizeof(struct sigaction32)))
+ goto out;
+ if (((__sighandler_t)A(new_sa.sa_handler)) != SIG_DFL &&
+ ((__sighandler_t)A(new_sa.sa_handler)) != SIG_IGN) {
+ err = verify_area(VERIFY_READ, (__sighandler_t)A(new_sa.sa_handler), 1);
+ if (err)
+ goto out;
+ }
+ }
+
+ if (oldaction) {
+ err = -EFAULT;
+ old_sa.sa_handler = (unsigned)(u64)(p->sa_handler);
+ old_sa.sa_mask = (sigset_t32)(p->sa_mask);
+ old_sa.sa_flags = (unsigned)(p->sa_flags);
+ old_sa.sa_restorer = (unsigned)(u64)(p->sa_restorer);
+ if (copy_to_user(A(oldaction), p, sizeof(struct sigaction32)))
+ goto out;
+ }
+
+ if (action) {
+ p->sa_handler = (__sighandler_t)A(new_sa.sa_handler);
+ p->sa_mask = (sigset_t)(new_sa.sa_mask);
+ p->sa_flags = new_sa.sa_flags;
+ p->sa_restorer = (void (*)(void))A(new_sa.sa_restorer);
+ check_pending(signum);
+ }
+
+ err = 0;
+out:
+ unlock_kernel();
+ return err;
+}
+
+asmlinkage int sys32_nfsservctl(int cmd, u32 argp, u32 resp)
+{
+ /* XXX handle argp and resp args */
+ return sys_nfsservctl(cmd, (void *)A(argp), (void *)A(resp));
+}
+
+struct ncp_mount_data32 {
+ int version;
+ unsigned int ncp_fd;
+ __kernel_uid_t32 mounted_uid;
+ __kernel_pid_t32 wdog_pid;
+ unsigned char mounted_vol[NCP_VOLNAME_LEN + 1];
+ unsigned int time_out;
+ unsigned int retry_count;
+ unsigned int flags;
+ __kernel_uid_t32 uid;
+ __kernel_gid_t32 gid;
+ __kernel_mode_t32 file_mode;
+ __kernel_mode_t32 dir_mode;
+};
+
+void *do_ncp_super_data_conv(void *raw_data)
+{
+ struct ncp_mount_data *n = (struct ncp_mount_data *)raw_data;
+ struct ncp_mount_data32 *n32 = (struct ncp_mount_data32 *)raw_data;
+
+ n->dir_mode = n32->dir_mode;
+ n->file_mode = n32->file_mode;
+ n->gid = n32->gid;
+ n->uid = n32->uid;
+ memmove (n->mounted_vol, n32->mounted_vol, (sizeof (n32->mounted_vol) + 3 * sizeof (unsigned int)));
+ n->wdog_pid = n32->wdog_pid;
+ n->mounted_uid = n32->mounted_uid;
+ return raw_data;
+}
+
+struct smb_mount_data32 {
+ int version;
+ unsigned int fd;
+ __kernel_uid_t32 mounted_uid;
+ struct sockaddr_in addr;
+ char server_name[17];
+ char client_name[17];
+ char service[64];
+ char root_path[64];
+ char username[64];
+ char password[64];
+ char domain[64];
+ unsigned short max_xmit;
+ __kernel_uid_t32 uid;
+ __kernel_gid_t32 gid;
+ __kernel_mode_t32 file_mode;
+ __kernel_mode_t32 dir_mode;
+};
+
+void *do_smb_super_data_conv(void *raw_data)
+{
+ struct smb_mount_data *s = (struct smb_mount_data *)raw_data;
+ struct smb_mount_data32 *s32 = (struct smb_mount_data32 *)raw_data;
+
+ s->dir_mode = s32->dir_mode;
+ s->file_mode = s32->file_mode;
+ s->gid = s32->gid;
+ s->uid = s32->uid;
+ memmove (&s->addr, &s32->addr, (((long)&s->uid) - ((long)&s->addr)));
+ s->mounted_uid = s32->mounted_uid;
+ return raw_data;
+}
diff --git a/arch/sparc64/kernel/systbls.S b/arch/sparc64/kernel/systbls.S
new file mode 100644
index 000000000..8ff04f02a
--- /dev/null
+++ b/arch/sparc64/kernel/systbls.S
@@ -0,0 +1,222 @@
+/* $Id: systbls.S,v 1.8 1997/04/21 08:34:23 jj Exp $
+ * systbls.S: System call entry point tables for OS compatibility.
+ * The native Linux system call table lives here also.
+ *
+ * Copyright (C) 1995, 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ *
+ * Based upon preliminary work which is:
+ *
+ * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
+ */
+
+ .data
+ .align 8
+
+ /* First, the 32-bit Linux native syscall table. */
+
+ .globl sys_call_table32
+sys_call_table32:
+/*0*/ .xword sys_setup, sys_exit, sys_fork, sys32_read, sys32_write
+/*5*/ .xword sys32_open, sys_close, sys32_wait4, sys32_creat, sys32_link
+/*10*/ .xword sys32_unlink, sunos_execv, sys32_chdir, sys_nis_syscall, sys32_mknod
+/*15*/ .xword sys32_chmod, sys32_chown, sparc32_brk, sys_nis_syscall, sys32_lseek
+/*20*/ .xword sys_getpid, sys_nis_syscall, sys_nis_syscall, sys_setuid, sys_getuid
+/*25*/ .xword sys32_time, sys32_ptrace, sys_alarm, sys_nis_syscall, sys_pause
+/*30*/ .xword sys32_utime, sys_stty, sys_gtty, sys32_access, sys_nice
+ .xword sys_ftime, sys_sync, sys_kill, sys32_newstat, sys_nis_syscall
+/*40*/ .xword sys32_newlstat, sys_dup, sys_pipe, sys32_times, sys_profil
+ .xword sys_nis_syscall, sys_setgid, sys_getgid, sys32_signal, sys_geteuid
+/*50*/ .xword sys_getegid, sys32_acct, sys_nis_syscall, sys_nis_syscall, sys32_ioctl
+ .xword sys32_reboot, sys_nis_syscall, sys32_symlink, sys32_readlink, sys_execve
+/*60*/ .xword sys_umask, sys32_chroot, sys32_newfstat, sys_nis_syscall, sys_getpagesize
+ .xword sys_nis_syscall, sys_vfork, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
+/*70*/ .xword sys_nis_syscall, sys32_mmap, sys_nis_syscall, sys32_munmap, sys32_mprotect
+ .xword sys_nis_syscall, sys_vhangup, sys_nis_syscall, sys_nis_syscall, sys32_getgroups
+/*80*/ .xword sys32_setgroups, sys_getpgrp, sys_nis_syscall, sys32_setitimer, sys_nis_syscall
+ .xword sys32_swapon, sys32_getitimer, sys_nis_syscall, sys32_sethostname, sys_nis_syscall
+/*90*/ .xword sys_dup2, sys_nis_syscall, sys32_fcntl, sys32_select, sys_nis_syscall
+ .xword sys_fsync, sys_setpriority, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
+/*100*/ .xword sys_getpriority, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
+ .xword sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
+/*110*/ .xword sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
+ .xword sys_nis_syscall, sys32_gettimeofday, sys32_getrusage, sys_nis_syscall, sys_nis_syscall
+/*120*/ .xword sys32_readv, sys32_writev, sys32_settimeofday, sys_fchown, sys_fchmod
+ .xword sys_nis_syscall, sys_setreuid, sys_setregid, sys32_rename, sys32_truncate
+/*130*/ .xword sys32_ftruncate, sys_flock, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
+ .xword sys_nis_syscall, sys32_mkdir, sys32_rmdir, sys_nis_syscall, sys_nis_syscall
+/*140*/ .xword sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys32_getrlimit
+ .xword sys32_setrlimit, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
+/*150*/ .xword sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
+ .xword sys_nis_syscall, sys_nis_syscall, sys32_statfs, sys32_fstatfs, sys32_umount
+/*160*/ .xword sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys32_setdomainname, sys_nis_syscall
+ .xword sys32_quotactl, sys_nis_syscall, sys32_mount, sys32_ustat, sys_nis_syscall
+/*170*/ .xword sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys32_getdents
+ .xword sys_setsid, sys_fchdir, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
+/*180*/ .xword sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys32_sigpending, sys_nis_syscall
+ .xword sys_setpgid, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys32_newuname
+/*190*/ .xword sys_nis_syscall, sys32_personality, sys_prof, sys_break, sys_lock
+ .xword sys_mpx, sys_ulimit, sys_getppid, sparc32_sigaction, sys_sgetmask
+/*200*/ .xword sys_ssetmask, sys_sigsuspend, sys32_newlstat, sys32_uselib, old32_readdir
+ .xword sys_nis_syscall, sys32_socketcall, sys32_syslog, sys32_olduname, sys_nis_syscall
+/*210*/ .xword sys_idle, sys_nis_syscall, sys32_waitpid, sys32_swapoff, sys32_sysinfo
+ .xword sys32_ipc, sys_sigreturn, sys_clone, sys_nis_syscall, sys32_adjtimex
+/*220*/ .xword sys_sigprocmask, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_getpgid
+ .xword sys32_bdflush, sys32_sysfs, sys_nis_syscall, sys_setfsuid, sys_setfsgid
+/*230*/ .xword sys32_llseek, sys32_time, sys_nis_syscall, sys_stime, sys_nis_syscall
+ .xword sys_nis_syscall, sys32_llseek, sys32_mlock, sys32_munlock, sys_mlockall
+/*240*/ .xword sys_munlockall, sys32_sched_setparam, sys32_sched_getparam, sys_nis_syscall, sys_nis_syscall
+ .xword sys_nis_syscall, sys_sched_get_priority_max, sys_sched_get_priority_min, sys32_sched_rr_get_interval, sys_nanosleep
+/*250*/ .xword sys32_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys32_nfsservctl
+ .xword sys_aplib, sys_nis_syscall
+
+ /* Now the 64-bit native Linux syscall table. */
+
+ .globl sys_call_table64, sys_call_table
+sys_call_table64:
+sys_call_table:
+/*0*/ .xword sys_setup, sys_exit, sys_fork, sys_read, sys_write
+/*5*/ .xword sys_open, sys_close, sys_wait4, sys_creat, sys_link
+/*10*/ .xword sys_unlink, sunos_execv, sys_chdir, sys_nis_syscall, sys_mknod
+/*15*/ .xword sys_chmod, sys_chown, sparc_brk, sys_nis_syscall, sys_lseek
+/*20*/ .xword sys_getpid, sys_nis_syscall, sys_nis_syscall, sys_setuid, sys_getuid
+/*25*/ .xword sys_time, sys_ptrace, sys_alarm, sys_nis_syscall, sys_pause
+/*30*/ .xword sys_utime, sys_stty, sys_gtty, sys_access, sys_nice
+ .xword sys_ftime, sys_sync, sys_kill, sys_newstat, sys_nis_syscall
+/*40*/ .xword sys_newlstat, sys_dup, sys_pipe, sys_times, sys_profil
+ .xword sys_nis_syscall, sys_setgid, sys_getgid, sys_signal, sys_geteuid
+/*50*/ .xword sys_getegid, sys_acct, sys_nis_syscall, sys_nis_syscall, sys_ioctl
+ .xword sys_reboot, sys_nis_syscall, sys_symlink, sys_readlink, sys_execve
+/*60*/ .xword sys_umask, sys_chroot, sys_newfstat, sys_nis_syscall, sys_getpagesize
+ .xword sys_nis_syscall, sys_vfork, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
+/*70*/ .xword sys_nis_syscall, sys_mmap, sys_nis_syscall, sys_munmap, sys_mprotect
+ .xword sys_nis_syscall, sys_vhangup, sys_nis_syscall, sys_nis_syscall, sys_getgroups
+/*80*/ .xword sys_setgroups, sys_getpgrp, sys_nis_syscall, sys_setitimer, sys_nis_syscall
+ .xword sys_swapon, sys_getitimer, sys_nis_syscall, sys_sethostname, sys_nis_syscall
+/*90*/ .xword sys_dup2, sys_nis_syscall, sys_fcntl, sys_select, sys_nis_syscall
+ .xword sys_fsync, sys_setpriority, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
+/*100*/ .xword sys_getpriority, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
+ .xword sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
+/*110*/ .xword sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
+ .xword sys_nis_syscall, sys_gettimeofday, sys_getrusage, sys_nis_syscall, sys_nis_syscall
+/*120*/ .xword sys_readv, sys_writev, sys_settimeofday, sys_fchown, sys_fchmod
+ .xword sys_nis_syscall, sys_setreuid, sys_setregid, sys_rename, sys_truncate
+/*130*/ .xword sys_ftruncate, sys_flock, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
+ .xword sys_nis_syscall, sys_mkdir, sys_rmdir, sys_nis_syscall, sys_nis_syscall
+/*140*/ .xword sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_getrlimit
+ .xword sys_setrlimit, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
+/*150*/ .xword sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
+ .xword sys_nis_syscall, sys_nis_syscall, sys_statfs, sys_fstatfs, sys_umount
+/*160*/ .xword sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_setdomainname, sys_nis_syscall
+ .xword sys_quotactl, sys_nis_syscall, sys_mount, sys_ustat, sys_nis_syscall
+/*170*/ .xword sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_getdents
+ .xword sys_setsid, sys_fchdir, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
+/*180*/ .xword sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_sigpending, sys_nis_syscall
+ .xword sys_setpgid, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_newuname
+/*190*/ .xword sys_init_module, sys_personality, sys_prof, sys_break, sys_lock
+ .xword sys_mpx, sys_ulimit, sys_getppid, sparc_sigaction, sys_sgetmask
+/*200*/ .xword sys_ssetmask, sys_sigsuspend, sys_newlstat, sys_uselib, sys_nis_syscall
+ .xword sys_nis_syscall, sys_socketcall, sys_syslog, sys_nis_syscall, sys_nis_syscall
+/*210*/ .xword sys_idle, sys_nis_syscall, sys_waitpid, sys_swapoff, sys_sysinfo
+ .xword sys_ipc, sys_sigreturn, sys_clone, sys_nis_syscall, sys_adjtimex
+/*220*/ .xword sys_sigprocmask, sys_create_module, sys_delete_module, sys_get_kernel_syms, sys_getpgid
+ .xword sys_bdflush, sys_sysfs, sys_nis_syscall, sys_setfsuid, sys_setfsgid
+/*230*/ .xword sys_llseek, sys_time, sys_nis_syscall, sys_stime, sys_nis_syscall
+ .xword sys_nis_syscall, sys_llseek, sys_mlock, sys_munlock, sys_mlockall
+/*240*/ .xword sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_nis_syscall, sys_nis_syscall
+ .xword sys_nis_syscall, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep
+/*250*/ .xword sys_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl
+ .xword sys_aplib, sys_nis_syscall
+
+ /* Now the 32-bit SunOS syscall table. */
+
+ .align 4
+ .globl sunos_sys_table
+sunos_sys_table:
+/*0*/ .xword sunos_indir, sys_exit, sys_fork
+ .xword sunos_read, sunos_write, sunos_open
+ .xword sys_close, sunos_wait4, sys_creat
+ .xword sys_link, sys_unlink, sunos_execv
+ .xword sys_chdir, sunos_nosys, sys_mknod
+ .xword sys_chmod, sys_chown, sunos_brk
+ .xword sunos_nosys, sys_lseek, sunos_getpid
+ .xword sunos_nosys, sunos_nosys, sunos_nosys
+ .xword sunos_getuid, sunos_nosys, sys_ptrace
+ .xword sunos_nosys, sunos_nosys, sunos_nosys
+ .xword sunos_nosys, sunos_nosys, sunos_nosys
+ .xword sys_access, sunos_nosys, sunos_nosys
+ .xword sys_sync, sys_kill, sys_newstat
+ .xword sunos_nosys, sys_newlstat, sys_dup
+ .xword sys_pipe, sunos_nosys, sys_profil
+ .xword sunos_nosys, sunos_nosys, sunos_getgid
+ .xword sunos_nosys, sunos_nosys
+/*50*/ .xword sunos_nosys, sys_acct, sunos_nosys
+ .xword sunos_mctl, sunos_ioctl, sys_reboot
+ .xword sunos_nosys, sys_symlink, sys_readlink
+ .xword sys_execve, sys_umask, sys_chroot
+ .xword sys_newfstat, sunos_nosys, sys_getpagesize
+ .xword sys_msync, sys_vfork, sunos_nosys
+ .xword sunos_nosys, sunos_sbrk, sunos_sstk
+ .xword sunos_mmap, sunos_vadvise, sys_munmap
+ .xword sys_mprotect, sunos_madvise, sys_vhangup
+ .xword sunos_nosys, sunos_mincore, sys_getgroups
+ .xword sys_setgroups, sys_getpgrp, sunos_setpgrp
+ .xword sys_setitimer, sunos_nosys, sys_swapon
+ .xword sys_getitimer, sys_gethostname, sys_sethostname
+ .xword sunos_getdtablesize, sys_dup2, sunos_nop
+ .xword sys_fcntl, sunos_select, sunos_nop
+ .xword sys_fsync, sys_setpriority, sys_socket
+ .xword sys_connect, sunos_accept
+/*100*/ .xword sys_getpriority, sunos_send, sunos_recv
+ .xword sunos_nosys, sys_bind, sunos_setsockopt
+ .xword sys_listen, sunos_nosys, sunos_sigaction
+ .xword sunos_sigblock, sunos_sigsetmask, sys_sigpause
+ .xword sys_sigstack, sys_recvmsg, sys_sendmsg
+ .xword sunos_nosys, sys_gettimeofday, sys_getrusage
+ .xword sunos_getsockopt, sunos_nosys, sunos_readv
+ .xword sunos_writev, sys_settimeofday, sys_fchown
+ .xword sys_fchmod, sys_recvfrom, sys_setreuid
+ .xword sys_setregid, sys_rename, sys_truncate
+ .xword sys_ftruncate, sys_flock, sunos_nosys
+ .xword sys_sendto, sys_shutdown, sys_socketpair
+ .xword sys_mkdir, sys_rmdir, sys_utimes
+ .xword sys_sigreturn, sunos_nosys, sys_getpeername
+ .xword sunos_gethostid, sunos_nosys, sys_getrlimit
+ .xword sys_setrlimit, sunos_killpg, sunos_nosys
+ .xword sunos_nosys, sunos_nosys
+/*150*/ .xword sys_getsockname, sunos_nosys, sunos_nosys
+ .xword sunos_poll, sunos_nosys, sunos_nosys
+ .xword sunos_getdirentries, sys_statfs, sys_fstatfs
+ .xword sys_umount, sunos_nosys, sunos_nosys
+ .xword sunos_getdomainname, sys_setdomainname
+ .xword sunos_nosys, sys_quotactl, sunos_nosys
+ .xword sunos_mount, sys_ustat, sunos_semsys
+ .xword sunos_nosys, sunos_shmsys, sunos_audit
+ .xword sunos_nosys, sunos_getdents, sys_setsid
+ .xword sys_fchdir, sunos_nosys, sunos_nosys
+ .xword sunos_nosys, sunos_nosys, sunos_nosys
+ .xword sunos_nosys, sys_sigpending, sunos_nosys
+ .xword sys_setpgid, sunos_pathconf, sunos_fpathconf
+ .xword sunos_sysconf, sunos_uname, sunos_nosys
+ .xword sunos_nosys, sunos_nosys, sunos_nosys
+ .xword sunos_nosys, sunos_nosys, sunos_nosys
+ .xword sunos_nosys, sunos_nosys, sunos_nosys
+/*200*/ .xword sunos_nosys, sunos_nosys, sunos_nosys
+ .xword sunos_nosys, sunos_nosys, sunos_nosys
+ .xword sunos_nosys, sunos_nosys, sunos_nosys
+ .xword sunos_nosys, sunos_nosys, sunos_nosys
+ .xword sunos_nosys, sunos_nosys, sunos_nosys
+ .xword sunos_nosys, sunos_nosys, sunos_nosys
+ .xword sunos_nosys, sunos_nosys, sunos_nosys
+ .xword sunos_nosys, sunos_nosys, sunos_nosys
+ .xword sunos_nosys, sunos_nosys, sunos_nosys
+ .xword sunos_nosys, sunos_nosys, sunos_nosys
+ .xword sunos_nosys, sunos_nosys, sunos_nosys
+ .xword sunos_nosys, sunos_nosys, sunos_nosys
+ .xword sunos_nosys, sunos_nosys, sunos_nosys
+ .xword sunos_nosys, sunos_nosys, sunos_nosys
+ .xword sunos_nosys, sunos_nosys, sunos_nosys
+ .xword sunos_nosys, sunos_nosys, sunos_nosys
+ .xword sunos_nosys, sunos_nosys
+/*250*/ .xword sunos_nosys, sunos_nosys, sunos_nosys
+ .xword sunos_nosys, sunos_nosys, sys_aplib
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
new file mode 100644
index 000000000..3f15fcb54
--- /dev/null
+++ b/arch/sparc64/kernel/time.c
@@ -0,0 +1,352 @@
+/* $Id: time.c,v 1.2 1997/04/10 03:02:35 davem Exp $
+ * time.c: UltraSparc timer and TOD clock support.
+ *
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ *
+ * Based largely on code which is:
+ *
+ * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu)
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/timex.h>
+#include <linux/init.h>
+
+#include <asm/oplib.h>
+#include <asm/mostek.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+
+struct mostek48t02 *mstk48t02_regs = 0;
+struct mostek48t08 *mstk48t08_regs = 0;
+struct mostek48t59 *mstk48t59_regs = 0;
+
+static int set_rtc_mmss(unsigned long);
+
+/* timer_interrupt() needs to keep up the real-time clock,
+ * as well as call the "do_timer()" routine every clocktick
+ *
+ * NOTE: On SUN5 systems the ticker interrupt comes in using 2
+ * interrupts, one at level14 and one with softint bit 0.
+ */
+void timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
+{
+ /* last time the cmos clock got updated */
+ static long last_rtc_update=0;
+
+ do_timer(regs);
+
+ /* Determine when to update the Mostek clock. */
+ if (time_state != TIME_BAD && xtime.tv_sec > last_rtc_update + 660 &&
+ xtime.tv_usec > 500000 - (tick >> 1) &&
+ xtime.tv_usec < 500000 + (tick >> 1))
+ if (set_rtc_mmss(xtime.tv_sec) == 0)
+ last_rtc_update = xtime.tv_sec;
+ else
+ last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
+}
+
+/* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
+ * Assumes input in normal date format, i.e. 1980-12-31 23:59:59
+ * => year=1980, mon=12, day=31, hour=23, min=59, sec=59.
+ *
+ * [For the Julian calendar (which was used in Russia before 1917,
+ * Britain & colonies before 1752, anywhere else before 1582,
+ * and is still in use by some communities) leave out the
+ * -year/100+year/400 terms, and add 10.]
+ *
+ * This algorithm was first published by Gauss (I think).
+ *
+ * WARNING: this function will overflow on 2106-02-07 06:28:16 on
+ * machines were long is 32-bit! (However, as time_t is signed, we
+ * will already get problems at other places on 2038-01-19 03:14:08)
+ */
+static inline unsigned long mktime(unsigned int year, unsigned int mon,
+ unsigned int day, unsigned int hour,
+ unsigned int min, unsigned int sec)
+{
+ if (0 >= (int) (mon -= 2)) { /* 1..12 -> 11,12,1..10 */
+ mon += 12; /* Puts Feb last since it has leap day */
+ year -= 1;
+ }
+ return (((
+ (unsigned long)(year/4 - year/100 + year/400 + 367*mon/12 + day) +
+ year*365 - 719499
+ )*24 + hour /* now have hours */
+ )*60 + min /* now have minutes */
+ )*60 + sec; /* finally seconds */
+}
+
+/* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */
+static void kick_start_clock(void)
+{
+ register struct mostek48t02 *regs = mstk48t02_regs;
+ unsigned char sec;
+ int i, count;
+
+ prom_printf("CLOCK: Clock was stopped. Kick start ");
+
+ /* Turn on the kick start bit to start the oscillator. */
+ regs->creg |= MSTK_CREG_WRITE;
+ regs->sec &= ~MSTK_STOP;
+ regs->hour |= MSTK_KICK_START;
+ regs->creg &= ~MSTK_CREG_WRITE;
+
+ /* Delay to allow the clock oscillator to start. */
+ sec = MSTK_REG_SEC(regs);
+ for (i = 0; i < 3; i++) {
+ while (sec == MSTK_REG_SEC(regs))
+ for (count = 0; count < 100000; count++)
+ /* nothing */ ;
+ prom_printf(".");
+ sec = regs->sec;
+ }
+ prom_printf("\n");
+
+ /* Turn off kick start and set a "valid" time and date. */
+ regs->creg |= MSTK_CREG_WRITE;
+ regs->hour &= ~MSTK_KICK_START;
+ MSTK_SET_REG_SEC(regs,0);
+ MSTK_SET_REG_MIN(regs,0);
+ MSTK_SET_REG_HOUR(regs,0);
+ MSTK_SET_REG_DOW(regs,5);
+ MSTK_SET_REG_DOM(regs,1);
+ MSTK_SET_REG_MONTH(regs,8);
+ MSTK_SET_REG_YEAR(regs,1996 - MSTK_YEAR_ZERO);
+ regs->creg &= ~MSTK_CREG_WRITE;
+
+ /* Ensure the kick start bit is off. If it isn't, turn it off. */
+ while (regs->hour & MSTK_KICK_START) {
+ prom_printf("CLOCK: Kick start still on!\n");
+ regs->creg |= MSTK_CREG_WRITE;
+ regs->hour &= ~MSTK_KICK_START;
+ regs->creg &= ~MSTK_CREG_WRITE;
+ }
+
+ prom_printf("CLOCK: Kick start procedure successful.\n");
+}
+
+/* Return nonzero if the clock chip battery is low. */
+static int has_low_battery(void)
+{
+ register struct mostek48t02 *regs = mstk48t02_regs;
+ unsigned char data1, data2;
+
+ data1 = regs->eeprom[0]; /* Read some data. */
+ regs->eeprom[0] = ~data1; /* Write back the complement. */
+ data2 = regs->eeprom[0]; /* Read back the complement. */
+ regs->eeprom[0] = data1; /* Restore the original value. */
+
+ return (data1 == data2); /* Was the write blocked? */
+}
+
+/* XXX HACK HACK HACK, delete me soon */
+static struct linux_prom_ranges XXX_sbus_ranges[PROMREG_MAX];
+static int XXX_sbus_nranges;
+
+/* Probe for the real time clock chip. */
+__initfunc(static void clock_probe(void))
+{
+ struct linux_prom_registers clk_reg[2];
+ char model[128];
+ int node, sbusnd, err;
+
+ node = prom_getchild(prom_root_node);
+ sbusnd = prom_searchsiblings(node, "sbus");
+ node = prom_getchild(sbusnd);
+
+ if(node == 0 || node == -1) {
+ prom_printf("clock_probe: Serious problem can't find sbus PROM node.\n");
+ prom_halt();
+ }
+
+ /* XXX FIX ME */
+ err = prom_getproperty(sbusnd, "ranges", (char *) XXX_sbus_ranges,
+ sizeof(XXX_sbus_ranges));
+ if(err == -1) {
+ prom_printf("clock_probe: Cannot get XXX sbus ranges\n");
+ prom_halt();
+ }
+ XXX_sbus_nranges = (err / sizeof(struct linux_prom_ranges));
+
+ while(1) {
+ prom_getstring(node, "model", model, sizeof(model));
+ if(strcmp(model, "mk48t02") &&
+ strcmp(model, "mk48t08") &&
+ strcmp(model, "mk48t59")) {
+ node = prom_getsibling(node);
+ if(node == 0) {
+ prom_printf("clock_probe: Cannot find timer chip\n");
+ prom_halt();
+ }
+ continue;
+ }
+
+ err = prom_getproperty(node, "reg", (char *)clk_reg,
+ sizeof(clk_reg));
+ if(err == -1) {
+ prom_printf("clock_probe: Cannot make Mostek\n");
+ prom_halt();
+ }
+
+ /* XXX fix me badly */
+ prom_adjust_regs(clk_reg, 1, XXX_sbus_ranges, XXX_sbus_nranges);
+
+ if(model[5] == '0' && model[6] == '2') {
+ mstk48t02_regs = (struct mostek48t02 *)
+ sparc_alloc_io(clk_reg[0].phys_addr,
+ (void *) 0, sizeof(*mstk48t02_regs),
+ "clock", clk_reg[0].which_io, 0x0);
+ } else if(model[5] == '0' && model[6] == '8') {
+ mstk48t08_regs = (struct mostek48t08 *)
+ sparc_alloc_io(clk_reg[0].phys_addr,
+ (void *) 0, sizeof(*mstk48t08_regs),
+ "clock", clk_reg[0].which_io, 0x0);
+ mstk48t02_regs = &mstk48t08_regs->regs;
+ } else {
+ mstk48t59_regs = (struct mostek48t59 *)
+ sparc_alloc_io(clk_reg[0].phys_addr,
+ (void *) 0, sizeof(*mstk48t59_regs),
+ "clock", clk_reg[0].which_io, 0x0);
+ mstk48t02_regs = &mstk48t59_regs->regs;
+ }
+ break;
+ }
+
+ /* Report a low battery voltage condition. */
+ if (has_low_battery())
+ prom_printf("NVRAM: Low battery voltage!\n");
+
+ /* Kick start the clock if it is completely stopped. */
+ if (mstk48t02_regs->sec & MSTK_STOP)
+ kick_start_clock();
+}
+
+#ifndef BCD_TO_BIN
+#define BCD_TO_BIN(val) (((val)&15) + ((val)>>4)*10)
+#endif
+
+#ifndef BIN_TO_BCD
+#define BIN_TO_BCD(val) ((((val)/10)<<4) + (val)%10)
+#endif
+
+__initfunc(void time_init(void))
+{
+ extern void init_timers(void (*func)(int, void *, struct pt_regs *));
+ unsigned int year, mon, day, hour, min, sec;
+ struct mostek48t02 *mregs;
+
+ do_get_fast_time = do_gettimeofday;
+
+ clock_probe();
+ init_timers(timer_interrupt);
+
+ mregs = mstk48t02_regs;
+ if(!mregs) {
+ prom_printf("Something wrong, clock regs not mapped yet.\n");
+ prom_halt();
+ }
+
+ mregs->creg |= MSTK_CREG_READ;
+ sec = MSTK_REG_SEC(mregs);
+ min = MSTK_REG_MIN(mregs);
+ hour = MSTK_REG_HOUR(mregs);
+ day = MSTK_REG_DOM(mregs);
+ mon = MSTK_REG_MONTH(mregs);
+ year = MSTK_CVT_YEAR( MSTK_REG_YEAR(mregs) );
+ xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
+ xtime.tv_usec = 0;
+ mregs->creg &= ~MSTK_CREG_READ;
+}
+
+static __inline__ unsigned long do_gettimeoffset(void)
+{
+ unsigned long offset = 0;
+ unsigned int count;
+
+ /* XXX -DaveM */
+#if 0
+ count = (*master_l10_counter >> 10) & 0x1fffff;
+#else
+ count = 0;
+#endif
+
+ if(test_bit(TIMER_BH, &bh_active))
+ offset = 1000000;
+
+ return offset + count;
+}
+
+void do_gettimeofday(struct timeval *tv)
+{
+ unsigned long flags;
+
+ save_and_cli(flags);
+ *tv = xtime;
+ tv->tv_usec += do_gettimeoffset();
+ if(tv->tv_usec >= 1000000) {
+ tv->tv_usec -= 1000000;
+ tv->tv_sec++;
+ }
+ restore_flags(flags);
+}
+
+void do_settimeofday(struct timeval *tv)
+{
+ cli();
+
+ tv->tv_usec -= do_gettimeoffset();
+ if(tv->tv_usec < 0) {
+ tv->tv_usec += 1000000;
+ tv->tv_sec--;
+ }
+
+ xtime = *tv;
+ time_state = TIME_BAD;
+ time_maxerror = 0x70000000;
+ time_esterror = 0x70000000;
+ sti();
+}
+
+static int set_rtc_mmss(unsigned long nowtime)
+{
+ int real_seconds, real_minutes, mostek_minutes;
+ struct mostek48t02 *regs = mstk48t02_regs;
+
+ /* Not having a register set can lead to trouble. */
+ if (!regs)
+ return -1;
+
+ /* Read the current RTC minutes. */
+ regs->creg |= MSTK_CREG_READ;
+ mostek_minutes = MSTK_REG_MIN(regs);
+ regs->creg &= ~MSTK_CREG_READ;
+
+ /*
+ * since we're only adjusting minutes and seconds,
+ * don't interfere with hour overflow. This avoids
+ * messing with unknown time zones but requires your
+ * RTC not to be off by more than 15 minutes
+ */
+ real_seconds = nowtime % 60;
+ real_minutes = nowtime / 60;
+ if (((abs(real_minutes - mostek_minutes) + 15)/30) & 1)
+ real_minutes += 30; /* correct for half hour time zone */
+ real_minutes %= 60;
+
+ if (abs(real_minutes - mostek_minutes) < 30) {
+ regs->creg |= MSTK_CREG_WRITE;
+ MSTK_SET_REG_SEC(regs,real_seconds);
+ MSTK_SET_REG_MIN(regs,real_minutes);
+ regs->creg &= ~MSTK_CREG_WRITE;
+ } else
+ return -1;
+
+ return 0;
+}
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
new file mode 100644
index 000000000..48648c39d
--- /dev/null
+++ b/arch/sparc64/kernel/traps.c
@@ -0,0 +1,185 @@
+/* $Id: traps.c,v 1.5 1997/04/14 06:56:55 davem Exp $
+ * arch/sparc/kernel/traps.c
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+/*
+ * I hate traps on the sparc, grrr...
+ */
+
+#include <linux/sched.h> /* for jiffies */
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+
+#include <asm/delay.h>
+#include <asm/system.h>
+#include <asm/ptrace.h>
+#include <asm/oplib.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/unistd.h>
+
+/* #define TRAP_DEBUG */
+
+struct trap_trace_entry {
+ unsigned long pc;
+ unsigned long type;
+};
+
+int trap_curbuf = 0;
+struct trap_trace_entry trapbuf[1024];
+
+void syscall_trace_entry(struct pt_regs *regs)
+{
+ printk("%s[%d]: ", current->comm, current->pid);
+ printk("scall<%ld> (could be %ld)\n", (long) regs->u_regs[UREG_G1],
+ (long) regs->u_regs[UREG_I0]);
+}
+
+void syscall_trace_exit(struct pt_regs *regs)
+{
+}
+
+void sparc64_dtlb_fault_handler (void)
+{
+ printk ("sparc64_dtlb_fault_handler\n");
+ while (1);
+ /* Die for now... */
+}
+
+void sparc64_dtlb_refbit_handler (struct pt_regs *regs)
+{
+ printk ("sparc64_dtlb_refbit_handler[%016lx]\n", regs->tpc);
+ while (1);
+ /* Die for now... */
+}
+
+void sparc64_itlb_refbit_handler (void)
+{
+ printk ("sparc64_itlb_refbit_handler\n");
+ while (1);
+ /* Die for now... */
+}
+
+void bad_trap (struct pt_regs *regs, long lvl)
+{
+ printk ("Bad trap %d (tstate %016lx tpc %016lx tnpc %016lx)\n", lvl, regs->tstate, regs->tpc, regs->tnpc);
+ while (1);
+ /* Die for now... */
+}
+
+void bad_trap_tl1 (struct pt_regs *regs, long lvl)
+{
+ printk ("Bad trap %d at tl1+ (tstate %016lx tpc %016lx tnpc %016lx)\n", lvl, regs->tstate, regs->tpc, regs->tnpc);
+ while (1);
+ /* Die for now... */
+}
+
+void data_access_exception (struct pt_regs *regs)
+{
+ printk ("Unhandled data access exception sfsr %016lx sfar %016lx\n", spitfire_get_dsfsr(), spitfire_get_sfar());
+ die_if_kernel("Data access exception", regs);
+}
+
+void instruction_access_exception (struct pt_regs *regs)
+{
+ printk ("Unhandled instruction access exception sfsr %016lx\n", spitfire_get_isfsr());
+ die_if_kernel("Instruction access exception", regs);
+}
+
+void instruction_dump (unsigned int *pc)
+{
+ int i;
+
+ if((((unsigned long) pc) & 3))
+ return;
+
+ for(i = -3; i < 6; i++)
+ printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
+ printk("\n");
+}
+
+void die_if_kernel(char *str, struct pt_regs *regs)
+{
+ /* Amuse the user. */
+ printk(
+" \\|/ ____ \\|/\n"
+" \"@'/ .` \\`@\"\n"
+" /_| \\__/ |_\\\n"
+" \\__U_/\n");
+
+ printk("%s(%d): %s\n", current->comm, current->pid, str);
+ show_regs(regs);
+ printk("Instruction DUMP:");
+ instruction_dump ((unsigned int *) regs->tpc);
+ if(regs->tstate & TSTATE_PRIV)
+ do_exit(SIGKILL);
+ do_exit(SIGSEGV);
+}
+
+void do_illegal_instruction(struct pt_regs *regs, unsigned long pc, unsigned long npc,
+ unsigned long tstate)
+{
+ lock_kernel();
+ if(tstate & TSTATE_PRIV)
+ die_if_kernel("Kernel illegal instruction", regs);
+#ifdef TRAP_DEBUG
+ printk("Ill instr. at pc=%016lx instruction is %08x\n",
+ regs->tpc, *(unsigned int *)regs->tpc);
+#endif
+ current->tss.sig_address = pc;
+ current->tss.sig_desc = SUBSIG_ILLINST;
+ send_sig(SIGILL, current, 1);
+ unlock_kernel();
+}
+
+void do_priv_instruction(struct pt_regs *regs, unsigned long pc, unsigned long npc,
+ unsigned long tstate)
+{
+ lock_kernel();
+ if(tstate & TSTATE_PRIV)
+ die_if_kernel("Penguin instruction from Penguin mode??!?!", regs);
+ current->tss.sig_address = pc;
+ current->tss.sig_desc = SUBSIG_PRIVINST;
+ send_sig(SIGILL, current, 1);
+ unlock_kernel();
+}
+
+/* XXX User may want to be allowed to do this. XXX */
+
+void do_memaccess_unaligned(struct pt_regs *regs, unsigned long pc, unsigned long npc,
+ unsigned long tstate)
+{
+ lock_kernel();
+ if(regs->tstate & TSTATE_PRIV) {
+ printk("KERNEL MNA at pc %016lx npc %016lx called by %016lx\n", pc, npc,
+ regs->u_regs[UREG_RETPC]);
+ die_if_kernel("BOGUS", regs);
+ /* die_if_kernel("Kernel MNA access", regs); */
+ }
+ current->tss.sig_address = pc;
+ current->tss.sig_desc = SUBSIG_PRIVINST;
+#if 0
+ show_regs (regs);
+ instruction_dump ((unsigned long *) regs->tpc);
+ printk ("do_MNA!\n");
+#endif
+ send_sig(SIGBUS, current, 1);
+ unlock_kernel();
+}
+
+void handle_hw_divzero(struct pt_regs *regs, unsigned long pc, unsigned long npc,
+ unsigned long psr)
+{
+ lock_kernel();
+ send_sig(SIGILL, current, 1);
+ unlock_kernel();
+}
+
+void trap_init(void)
+{
+}
diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S
new file mode 100644
index 000000000..f22d85014
--- /dev/null
+++ b/arch/sparc64/kernel/ttable.S
@@ -0,0 +1,252 @@
+/* $Id: ttable.S,v 1.11 1997/03/25 09:47:21 davem Exp $
+ * ttable.S: Sparc V9 Trap Table(s) with SpitFire extensions.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+ .globl sparc64_ttable_tl0, sparc64_ttable_tl1
+
+sparc64_ttable_tl0:
+tl0_resv000: BOOT_KERNEL BTRAP(0x1) BTRAP(0x2) BTRAP(0x3)
+tl0_resv004: BTRAP(0x4) BTRAP(0x5) BTRAP(0x6) BTRAP(0x7)
+tl0_iax: ACCESS_EXCEPTION_TRAP(instruction_access_exception)
+tl0_resv009: BTRAP(0x9)
+tl0_iae: TRAP(do_iae)
+tl0_resv00b: BTRAP(0xb) BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf)
+tl0_ill: TRAP(do_ill)
+tl0_privop: TRAP(do_privop)
+tl0_resv012: BTRAP(0x12) BTRAP(0x13) BTRAP(0x14) BTRAP(0x15) BTRAP(0x16) BTRAP(0x17)
+tl0_resv018: BTRAP(0x18) BTRAP(0x19) BTRAP(0x1a) BTRAP(0x1b) BTRAP(0x1c) BTRAP(0x1d)
+tl0_resv01e: BTRAP(0x1e) BTRAP(0x1f)
+tl0_fpdis: TRAP(do_fpdis)
+tl0_fpieee: TRAP(do_fpieee)
+tl0_fpother: TRAP(do_fpother)
+tl0_tof: TRAP(do_tof)
+tl0_cwin: CLEAN_WINDOW
+tl0_div0: TRAP(do_div0)
+tl0_resv029: BTRAP(0x29) BTRAP(0x2a) BTRAP(0x2b) BTRAP(0x2c) BTRAP(0x2d) BTRAP(0x2e)
+tl0_resv02f: BTRAP(0x2f)
+tl0_dax: ACCESS_EXCEPTION_TRAP(data_access_exception)
+tl0_resv031: BTRAP(0x31)
+tl0_dae: TRAP(do_dae)
+tl0_resv033: BTRAP(0x33)
+tl0_mna: TRAP(do_mna)
+tl0_lddfmna: TRAP(do_lddfmna)
+tl0_stdfmna: TRAP(do_stdfmna)
+tl0_privact: TRAP(do_privact)
+tl0_resv038: BTRAP(0x38) BTRAP(0x39) BTRAP(0x3a) BTRAP(0x3b) BTRAP(0x3c) BTRAP(0x3d)
+tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40)
+tl0_irq1: TRAP_IRQ(handler_irq, 1) TRAP_IRQ(handler_irq, 2)
+tl0_irq3: TRAP_IRQ(handler_irq, 3) TRAP_IRQ(handler_irq, 4)
+tl0_irq5: TRAP_IRQ(handler_irq, 5) TRAP_IRQ(handler_irq, 6)
+tl0_irq7: TRAP_IRQ(handler_irq, 7) TRAP_IRQ(handler_irq, 8)
+tl0_irq9: TRAP_IRQ(handler_irq, 9) TRAP_IRQ(handler_irq, 10)
+tl0_irq11: TRAP_IRQ(handler_irq, 11) TRAP_IRQ(handler_irq, 12)
+tl0_irq13: TRAP_IRQ(handler_irq, 13) TRAP_IRQ(handler_irq, 14)
+tl0_irq15: TRAP_IRQ(handler_irq, 15)
+tl0_resv050: BTRAP(0x50) BTRAP(0x51) BTRAP(0x52) BTRAP(0x53) BTRAP(0x54) BTRAP(0x55)
+tl0_resv056: BTRAP(0x56) BTRAP(0x57) BTRAP(0x58) BTRAP(0x59) BTRAP(0x5a) BTRAP(0x5b)
+tl0_resv05c: BTRAP(0x5c) BTRAP(0x5d) BTRAP(0x5e) BTRAP(0x5f)
+tl0_ivec: TRAP_IVEC
+tl0_paw: TRAP(do_paw)
+tl0_vaw: TRAP(do_vaw)
+tl0_cee: TRAP(do_cee)
+tl0_iamiss:
+#include "itlb_miss.S"
+tl0_damiss:
+#include "dtlb_miss.S"
+tl0_daprot:
+#include "dtlb_prot.S"
+tl0_resv070: BTRAP(0x70) BTRAP(0x71) BTRAP(0x72) BTRAP(0x73) BTRAP(0x74) BTRAP(0x75)
+tl0_resv076: BTRAP(0x76) BTRAP(0x77) BTRAP(0x78) BTRAP(0x79) BTRAP(0x7a) BTRAP(0x7b)
+tl0_resv07c: BTRAP(0x7c) BTRAP(0x7d) BTRAP(0x7e) BTRAP(0x7f)
+tl0_s0n: SPILL_0_NORMAL
+tl0_s1n: SPILL_1_NORMAL
+tl0_s2n: SPILL_2_NORMAL
+tl0_s3n: SPILL_3_NORMAL
+tl0_s4n: SPILL_4_NORMAL
+tl0_s5n: SPILL_5_NORMAL
+tl0_s6n: SPILL_6_NORMAL
+tl0_s7n: SPILL_7_NORMAL
+tl0_s0o: SPILL_0_OTHER
+tl0_s1o: SPILL_1_OTHER
+tl0_s2o: SPILL_2_OTHER
+tl0_s3o: SPILL_3_OTHER
+tl0_s4o: SPILL_4_OTHER
+tl0_s5o: SPILL_5_OTHER
+tl0_s6o: SPILL_6_OTHER
+tl0_s7o: SPILL_7_OTHER
+tl0_f0n: FILL_0_NORMAL
+tl0_f1n: FILL_1_NORMAL
+tl0_f2n: FILL_2_NORMAL
+tl0_f3n: FILL_3_NORMAL
+tl0_f4n: FILL_4_NORMAL
+tl0_f5n: FILL_5_NORMAL
+tl0_f6n: FILL_6_NORMAL
+tl0_f7n: FILL_7_NORMAL
+tl0_f0o: FILL_0_OTHER
+tl0_f1o: FILL_1_OTHER
+tl0_f2o: FILL_2_OTHER
+tl0_f3o: FILL_3_OTHER
+tl0_f4o: FILL_4_OTHER
+tl0_f5o: FILL_5_OTHER
+tl0_f6o: FILL_6_OTHER
+tl0_f7o: FILL_7_OTHER
+tl0_sunos: SUNOS_SYSCALL_TRAP
+tl0_bkpt: BREAKPOINT_TRAP
+tl0_resv102: BTRAP(0x102)
+tl0_flushw: FLUSH_WINDOW_TRAP
+tl0_resv104: BTRAP(0x104) BTRAP(0x105) BTRAP(0x106) BTRAP(0x107)
+tl0_solaris: SOLARIS_SYSCALL_TRAP
+tl0_netbsd: NETBSD_SYSCALL_TRAP
+tl0_resv10a: BTRAP(0x10a) BTRAP(0x10b) BTRAP(0x10c) BTRAP(0x10d) BTRAP(0x10e)
+tl0_resv10f: BTRAP(0x10f)
+tl0_linux32: LINUX_32BIT_SYSCALL_TRAP
+tl0_linux64: LINUX_64BIT_SYSCALL_TRAP
+tl0_resv112: BTRAP(0x112) BTRAP(0x113) BTRAP(0x114) BTRAP(0x115) BTRAP(0x116)
+tl0_resv117: BTRAP(0x117) BTRAP(0x118) BTRAP(0x119) BTRAP(0x11a) BTRAP(0x11b)
+tl0_resv11c: BTRAP(0x11c) BTRAP(0x11d) BTRAP(0x11e) BTRAP(0x11f)
+tl0_getcc: GETCC_TRAP
+tl0_setcc: SETCC_TRAP
+tl0_resv122: BTRAP(0x122) BTRAP(0x123) BTRAP(0x124) BTRAP(0x125) BTRAP(0x126)
+tl0_solindir: INDIRECT_SOLARIS_SYSCALL(156)
+tl0_resv128: BTRAP(0x128) BTRAP(0x129) BTRAP(0x12a) BTRAP(0x12b) BTRAP(0x12c)
+tl0_resv12d: BTRAP(0x12d) BTRAP(0x12e) BTRAP(0x12f) BTRAP(0x130) BTRAP(0x131)
+tl0_resv132: BTRAP(0x132) BTRAP(0x133) BTRAP(0x134) BTRAP(0x135) BTRAP(0x136)
+tl0_resv137: BTRAP(0x137) BTRAP(0x138) BTRAP(0x139) BTRAP(0x13a) BTRAP(0x13b)
+tl0_resv13c: BTRAP(0x13c) BTRAP(0x13d) BTRAP(0x13e) BTRAP(0x13f) BTRAP(0x140)
+tl0_resv141: BTRAP(0x141) BTRAP(0x142) BTRAP(0x143) BTRAP(0x144) BTRAP(0x145)
+tl0_resv146: BTRAP(0x146) BTRAP(0x147) BTRAP(0x148) BTRAP(0x149) BTRAP(0x14a)
+tl0_resv14b: BTRAP(0x14b) BTRAP(0x14c) BTRAP(0x14d) BTRAP(0x14e) BTRAP(0x14f)
+tl0_resv150: BTRAP(0x150) BTRAP(0x151) BTRAP(0x152) BTRAP(0x153) BTRAP(0x154)
+tl0_resv155: BTRAP(0x155) BTRAP(0x156) BTRAP(0x157) BTRAP(0x158) BTRAP(0x159)
+tl0_resv15a: BTRAP(0x15a) BTRAP(0x15b) BTRAP(0x15c) BTRAP(0x15d) BTRAP(0x15e)
+tl0_resv15f: BTRAP(0x15f) BTRAP(0x160) BTRAP(0x161) BTRAP(0x162) BTRAP(0x163)
+tl0_resv164: BTRAP(0x164) BTRAP(0x165) BTRAP(0x166) BTRAP(0x167) BTRAP(0x168)
+tl0_resv169: BTRAP(0x169) BTRAP(0x16a) BTRAP(0x16b) BTRAP(0x16c) BTRAP(0x16d)
+tl0_resv16e: BTRAP(0x16e) BTRAP(0x16f) BTRAP(0x170) BTRAP(0x171) BTRAP(0x172)
+tl0_resv173: BTRAP(0x173) BTRAP(0x174) BTRAP(0x175) BTRAP(0x176) BTRAP(0x177)
+tl0_resv178: BTRAP(0x178) BTRAP(0x179) BTRAP(0x17a) BTRAP(0x17b) BTRAP(0x17c)
+tl0_resv17d: BTRAP(0x17d) BTRAP(0x17e) BTRAP(0x17f)
+#define BTRAPS(x) BTRAP(x) BTRAP(x+1) BTRAP(x+2) BTRAP(x+3) BTRAP(x+4) BTRAP(x+5) BTRAP(x+6) BTRAP(x+7)
+tl0_resv180: BTRAPS(0x180) BTRAPS(0x188)
+tl0_resv190: BTRAPS(0x190) BTRAPS(0x198)
+tl0_resv1a0: BTRAPS(0x1a0) BTRAPS(0x1a8)
+tl0_resv1b0: BTRAPS(0x1b0) BTRAPS(0x1b8)
+tl0_resv1c0: BTRAPS(0x1c0) BTRAPS(0x1c8)
+tl0_resv1d0: BTRAPS(0x1d0) BTRAPS(0x1d8)
+tl0_resv1e0: BTRAPS(0x1e0) BTRAPS(0x1e8)
+tl0_resv1f0: BTRAPS(0x1f0) BTRAPS(0x1f8)
+
+sparc64_ttable_tl1:
+tl1_resv000: BOOT_KERNEL BTRAPTL1(0x1) BTRAPTL1(0x2) BTRAPTL1(0x3)
+tl1_resv004: BTRAPTL1(0x4) BTRAPTL1(0x5) BTRAPTL1(0x6) BTRAPTL1(0x7)
+tl1_iax: ACCESS_EXCEPTION_TRAPTL1(instruction_access_exception)
+tl1_resv009: BTRAPTL1(0x9)
+tl1_iae: TRAPTL1(do_iae_tl1)
+tl1_resv00b: BTRAPTL1(0xb) BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf)
+tl1_ill: TRAPTL1(do_ill_tl1)
+tl1_privop: BTRAPTL1(0x11)
+tl1_resv012: BTRAPTL1(0x12) BTRAPTL1(0x13) BTRAPTL1(0x14) BTRAPTL1(0x15)
+tl1_resv016: BTRAPTL1(0x16) BTRAPTL1(0x17) BTRAPTL1(0x18) BTRAPTL1(0x19)
+tl1_resv01a: BTRAPTL1(0x1a) BTRAPTL1(0x1b) BTRAPTL1(0x1c) BTRAPTL1(0x1d)
+tl1_resv01e: BTRAPTL1(0x1e) BTRAPTL1(0x1f)
+tl1_fpdis: TRAPTL1(do_fpdis_tl1)
+tl1_fpieee: TRAPTL1(do_fpieee_tl1)
+tl1_fpother: TRAPTL1(do_fpother_tl1)
+tl1_tof: TRAPTL1(do_tof_tl1)
+tl1_cwin: CLEAN_WINDOW
+tl1_div0: TRAPTL1(do_div0_tl1)
+tl1_resv029: BTRAPTL1(0x29) BTRAPTL1(0x2a) BTRAPTL1(0x2b) BTRAPTL1(0x2c)
+tl1_resv02d: BTRAPTL1(0x2d) BTRAPTL1(0x2e) BTRAPTL1(0x2f)
+tl1_dax: ACCESS_EXCEPTION_TRAPTL1(data_access_exception)
+tl1_resv031: BTRAPTL1(0x31)
+tl1_dae: TRAPTL1(do_dae_tl1)
+tl1_resv033: BTRAPTL1(0x33)
+tl1_mna: TRAPTL1(do_mna_tl1)
+tl1_lddfmna: TRAPTL1(do_lddfmna_tl1)
+tl1_stdfmna: TRAPTL1(do_stdfmna_tl1)
+tl1_privact: BTRAPTL1(0x37)
+tl1_resv038: BTRAPTL1(0x38) BTRAPTL1(0x39) BTRAPTL1(0x3a) BTRAPTL1(0x3b)
+tl1_resv03c: BTRAPTL1(0x3c) BTRAPTL1(0x3d) BTRAPTL1(0x3e) BTRAPTL1(0x3f)
+tl1_resv040: BTRAPTL1(0x40)
+tl1_irq1: TRAP_IRQ(do_irq_tl1, 1) TRAP_IRQ(do_irq_tl1, 2) TRAP_IRQ(do_irq_tl1, 3)
+tl1_irq4: TRAP_IRQ(do_irq_tl1, 4) TRAP_IRQ(do_irq_tl1, 5) TRAP_IRQ(do_irq_tl1, 6)
+tl1_irq7: TRAP_IRQ(do_irq_tl1, 7) TRAP_IRQ(do_irq_tl1, 8) TRAP_IRQ(do_irq_tl1, 9)
+tl1_irq10: TRAP_IRQ(do_irq_tl1, 10) TRAP_IRQ(do_irq_tl1, 11)
+tl1_irq12: TRAP_IRQ(do_irq_tl1, 12) TRAP_IRQ(do_irq_tl1, 13)
+tl1_irq14: TRAP_IRQ(do_irq_tl1, 14) TRAP_IRQ(do_irq_tl1, 15)
+tl1_resv050: BTRAPTL1(0x50) BTRAPTL1(0x51) BTRAPTL1(0x52) BTRAPTL1(0x53)
+tl1_resv054: BTRAPTL1(0x54) BTRAPTL1(0x55) BTRAPTL1(0x56) BTRAPTL1(0x57)
+tl1_resv058: BTRAPTL1(0x58) BTRAPTL1(0x59) BTRAPTL1(0x5a) BTRAPTL1(0x5b)
+tl1_resv05c: BTRAPTL1(0x5c) BTRAPTL1(0x5d) BTRAPTL1(0x5e) BTRAPTL1(0x5f)
+tl1_ivec: TRAP_IVEC
+tl1_paw: TRAPTL1(do_paw_tl1)
+tl1_vaw: TRAPTL1(do_vaw_tl1)
+tl1_cee: TRAPTL1(do_cee_tl1)
+tl1_iamiss:
+#include "itlb_miss.S"
+tl1_damiss:
+#include "dtlb_miss.S"
+tl1_daprot:
+#include "dtlb_prot.S"
+tl1_resv070: BTRAPTL1(0x70) BTRAPTL1(0x71) BTRAPTL1(0x72) BTRAPTL1(0x73)
+tl1_resv074: BTRAPTL1(0x74) BTRAPTL1(0x75) BTRAPTL1(0x76) BTRAPTL1(0x77)
+tl1_resv078: BTRAPTL1(0x78) BTRAPTL1(0x79) BTRAPTL1(0x7a) BTRAPTL1(0x7b)
+tl1_resv07c: BTRAPTL1(0x7c) BTRAPTL1(0x7d) BTRAPTL1(0x7e) BTRAPTL1(0x7f)
+tl1_s0n: SPILL_0_NORMAL
+tl1_s1n: SPILL_1_NORMAL
+tl1_s2n: SPILL_2_NORMAL
+tl1_s3n: SPILL_3_NORMAL
+tl1_s4n: SPILL_4_NORMAL
+tl1_s5n: SPILL_5_NORMAL
+tl1_s6n: SPILL_6_NORMAL
+tl1_s7n: SPILL_7_NORMAL
+tl1_s0o: SPILL_0_OTHER
+tl1_s1o: SPILL_1_OTHER
+tl1_s2o: SPILL_2_OTHER
+tl1_s3o: SPILL_3_OTHER
+tl1_s4o: SPILL_4_OTHER
+tl1_s5o: SPILL_5_OTHER
+tl1_s6o: SPILL_6_OTHER
+tl1_s7o: SPILL_7_OTHER
+tl1_f0n: FILL_0_NORMAL
+tl1_f1n: FILL_1_NORMAL
+tl1_f2n: FILL_2_NORMAL
+tl1_f3n: FILL_3_NORMAL
+tl1_f4n: FILL_4_NORMAL
+tl1_f5n: FILL_5_NORMAL
+tl1_f6n: FILL_6_NORMAL
+tl1_f7n: FILL_7_NORMAL
+tl1_f0o: FILL_0_OTHER
+tl1_f1o: FILL_1_OTHER
+tl1_f2o: FILL_2_OTHER
+tl1_f3o: FILL_3_OTHER
+tl1_f4o: FILL_4_OTHER
+tl1_f5o: FILL_5_OTHER
+tl1_f6o: FILL_6_OTHER
+tl1_f7o: FILL_7_OTHER
+
+#if 0
+/* Unless we are going to have software trap insns in the kernel code, we
+ * don't need this. For now we just save 8KB.
+ */
+
+#define BTRAPSTL1(x) BTRAPTL1(x) BTRAPTL1(x+1) BTRAPTL1(x+2) BTRAPTL1(x+3) BTRAPTL1(x+4) BTRAPTL1(x+5) BTRAPTL1(x+6) BTRAPTL1(x+7)
+
+tl1_sunos: BTRAPTL1(0x100)
+tl1_bkpt: BREAKPOINT_TRAP
+tl1_resv102: BTRAPTL1(0x102)
+tl1_flushw: FLUSH_WINDOW_TRAP
+tl1_resv104: BTRAPTL1(0x104) BTRAPTL1(0x105) BTRAPTL1(0x106)
+tl1_resv107: BTRAPTL1(0x107) BTRAPTL1(0x108) BTRAPTL1(0x109) BTRAPTL1(0x10a)
+tl1_resv10b: BTRAPTL1(0x10b) BTRAPTL1(0x10c) BTRAPTL1(0x10d) BTRAPTL1(0x10e)
+tl1_resv10f: BTRAPTL1(0x10f)
+tl1_resv110: BTRAPSTL1(0x110) BTRAPSTL1(0x118)
+tl1_resv120: BTRAPSTL1(0x120) BTRAPSTL1(0x128)
+tl1_resv130: BTRAPSTL1(0x130) BTRAPSTL1(0x138)
+tl1_resv140: BTRAPSTL1(0x140) BTRAPSTL1(0x148)
+tl1_resv150: BTRAPSTL1(0x150) BTRAPSTL1(0x158)
+tl1_resv160: BTRAPSTL1(0x160) BTRAPSTL1(0x168)
+tl1_resv170: BTRAPSTL1(0x170) BTRAPSTL1(0x178)
+#endif
diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile
new file mode 100644
index 000000000..56c506507
--- /dev/null
+++ b/arch/sparc64/lib/Makefile
@@ -0,0 +1,56 @@
+# $Id: Makefile,v 1.7 1997/04/07 18:57:05 jj Exp $
+# Makefile for Sparc library files..
+#
+
+CFLAGS := $(CFLAGS) -ansi
+
+OBJS = memset.o blockops.o locks.o memcpy.o strlen.o strncmp.o \
+ memscan.o strncpy_from_user.o strlen_user.o memcmp.o checksum.o \
+ copy_to_user.o copy_from_user.o
+
+lib.a: $(OBJS)
+ $(AR) rcs lib.a $(OBJS)
+ sync
+
+blockops.o: blockops.S
+ $(CC) -ansi -c -o blockops.o blockops.S
+
+memset.o: memset.S
+ $(CC) -D__ASSEMBLY__ -ansi -c -o memset.o memset.S
+
+copy_to_user.o: copy_to_user.S
+ $(CC) -D__ASSEMBLY__ -ansi -c -o copy_to_user.o copy_to_user.S
+
+copy_from_user.o: copy_from_user.S
+ $(CC) -D__ASSEMBLY__ -ansi -c -o copy_from_user.o copy_from_user.S
+
+memcpy.o: memcpy.S
+ $(CC) -D__ASSEMBLY__ -ansi -c -o memcpy.o memcpy.S
+
+strlen.o: strlen.S
+ $(CC) -D__ASSEMBLY__ -ansi -c -o strlen.o strlen.S
+
+strncmp.o: strncmp.S
+ $(CC) -D__ASSEMBLY__ -ansi -c -o strncmp.o strncmp.S
+
+memcmp.o: memcmp.S
+ $(CC) -D__ASSEMBLY__ -ansi -c -o memcmp.o memcmp.S
+
+locks.o: locks.S
+ $(CC) -D__ASSEMBLY__ -ansi -c -o locks.o locks.S
+
+checksum.o: checksum.S
+ $(CC) -D__ASSEMBLY__ -ansi -c -o checksum.o checksum.S
+
+memscan.o: memscan.S
+ $(CC) -D__ASSEMBLY__ -ansi -c -o memscan.o memscan.S
+
+strncpy_from_user.o: strncpy_from_user.S
+ $(CC) -D__ASSEMBLY__ -ansi -c -o strncpy_from_user.o strncpy_from_user.S
+
+strlen_user.o: strlen_user.S
+ $(CC) -D__ASSEMBLY__ -ansi -c -o strlen_user.o strlen_user.S
+
+dep:
+
+include $(TOPDIR)/Rules.make
diff --git a/arch/sparc64/lib/blockops.S b/arch/sparc64/lib/blockops.S
new file mode 100644
index 000000000..b3f06c18d
--- /dev/null
+++ b/arch/sparc64/lib/blockops.S
@@ -0,0 +1,138 @@
+/* $Id: blockops.S,v 1.5 1997/03/26 18:34:28 jj Exp $
+ * arch/sparc64/lib/blockops.S: UltraSparc block zero optimized routines.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <asm/asi.h>
+
+ /* Zero out 256 bytes of memory at (buf + offset). */
+#define BLAST_BLOCK(buf, offset) \
+ stda %f48, [buf + offset + 0x00] %asi; \
+ stda %f48, [buf + offset + 0x40] %asi; \
+ stda %f48, [buf + offset + 0x80] %asi; \
+ stda %f48, [buf + offset + 0xc0] %asi;
+
+ /* Copy 256 bytes of memory at (src + offset) to
+ * (dst + offset).
+ */
+#define MIRROR_BLOCK(dst, src, offset, sync) \
+ ldda [src + offset + 0x000] %asi, %f0; \
+ ldda [src + offset + 0x040] %asi, %f16; \
+ ldda [src + offset + 0x080] %asi, %f32; \
+ ldda [src + offset + 0x0c0] %asi, %f48; \
+ membar sync; \
+ stda %f0, [dst + offset + 0x000] %asi; \
+ stda %f16, [dst + offset + 0x040] %asi; \
+ stda %f32, [dst + offset + 0x080] %asi; \
+ stda %f48, [dst + offset + 0x0c0] %asi;
+
+ .text
+ .align 4
+
+ .globl bzero_2page, bzero_1page
+bzero_2page:
+ /* %o0 = buf */
+ mov %o0, %o1
+ wr %g0, ASI_BLK_P, %asi
+ mov 0x10, %g2
+
+ membar #Sync|#StoreLoad
+
+ fzero %f48
+ fzero %f50
+ fzero %f52
+ fzero %f54
+ fzero %f56
+ fzero %f58
+ fzero %f60
+ fzero %f62
+1:
+ BLAST_BLOCK(%o0, 0x000)
+ BLAST_BLOCK(%o0, 0x100)
+ BLAST_BLOCK(%o0, 0x200)
+ BLAST_BLOCK(%o0, 0x300)
+ subcc %g2, 1, %g2
+ bne,pt %icc, 1b
+ add %o0, 0x400, %o0
+
+ membar #Sync|#LoadStore|#StoreStore
+
+ retl
+ mov %o1, %o0
+
+bzero_1page:
+ /* %o0 = buf */
+ mov %o0, %o1
+ wr %g0, ASI_BLK_P, %asi
+ mov 0x08, %g2
+ membar #Sync|#StoreLoad
+ fzero %f48
+ fzero %f50
+ fzero %f52
+ fzero %f54
+ fzero %f56
+ fzero %f58
+ fzero %f60
+ fzero %f62
+1:
+ BLAST_BLOCK(%o0, 0x000)
+ BLAST_BLOCK(%o0, 0x100)
+ BLAST_BLOCK(%o0, 0x200)
+ BLAST_BLOCK(%o0, 0x300)
+ subcc %g2, 1, %g2
+ bne,pt %icc, 1b
+ add %o0, 0x400, %o0
+
+ membar #Sync|#LoadStore|#StoreStore
+
+ retl
+ mov %o1, %o0
+
+ .globl __bfill64
+__bfill64:
+ /* %o0 = buf */
+ stx %o1, [%sp + 0x7ff + 128]
+ wr %g0, ASI_BLK_P, %asi
+ mov 0x08, %g2
+ ldd [%sp + 0x7ff + 128], %f48
+ membar #Sync|#StoreLoad
+ fmovd %f48, %f50
+ fmovd %f48, %f52
+ fmovd %f48, %f54
+ fmovd %f48, %f56
+ fmovd %f48, %f58
+ fmovd %f48, %f60
+ fmovd %f48, %f62
+1:
+ BLAST_BLOCK(%o0, 0x000)
+ BLAST_BLOCK(%o0, 0x100)
+ BLAST_BLOCK(%o0, 0x200)
+ BLAST_BLOCK(%o0, 0x300)
+ subcc %g2, 1, %g2
+ bne,pt %icc, 1b
+ add %o0, 0x400, %o0
+
+ retl
+ membar #Sync|#LoadStore|#StoreStore
+
+ .globl __copy_1page
+__copy_1page:
+ /* %o0 = dst, %o1 = src */
+ or %g0, 0x08, %g1
+ wr %g0, ASI_BLK_P, %asi
+ membar #Sync|#StoreLoad
+1:
+ MIRROR_BLOCK(%o0, %o1, 0x000, #Sync)
+ MIRROR_BLOCK(%o0, %o1, 0x100, #Sync)
+ MIRROR_BLOCK(%o0, %o1, 0x200, #Sync)
+ MIRROR_BLOCK(%o0, %o1, 0x300, #Sync)
+ subcc %g1, 1, %g1
+ add %o0, 0x400, %o0
+ bne,pt %icc, 1b
+ add %o1, 0x400, %o1
+
+ retl
+ membar #Sync|#LoadStore|#StoreStore
+
diff --git a/arch/sparc64/lib/checksum.S b/arch/sparc64/lib/checksum.S
new file mode 100644
index 000000000..8a06003ee
--- /dev/null
+++ b/arch/sparc64/lib/checksum.S
@@ -0,0 +1,565 @@
+/* checksum.S: Sparc V9 optimized checksum code.
+ *
+ * Copyright(C) 1995 Linus Torvalds
+ * Copyright(C) 1995 Miguel de Icaza
+ * Copyright(C) 1996 David S. Miller
+ * Copyright(C) 1997 Jakub Jelinek
+ *
+ * derived from:
+ * Linux/Alpha checksum c-code
+ * Linux/ix86 inline checksum assembly
+ * RFC1071 Computing the Internet Checksum (esp. Jacobsons m68k code)
+ * David Mosberger-Tang for optimized reference c-code
+ * BSD4.4 portable checksum routine
+ */
+
+#include <asm/errno.h>
+#include <asm/head.h>
+#include <asm/ptrace.h>
+#include <asm/asi.h>
+
+#define CSUM_BIGCHUNK(buf, offset, sum, t0, t1, t2, t3, t4, t5) \
+ ldd [buf + offset + 0x00], t0; \
+ ldd [buf + offset + 0x08], t2; \
+ addccc t0, sum, sum; \
+ addccc t1, sum, sum; \
+ ldd [buf + offset + 0x10], t4; \
+ addccc t2, sum, sum; \
+ addccc t3, sum, sum; \
+ ldd [buf + offset + 0x18], t0; \
+ addccc t4, sum, sum; \
+ addccc t5, sum, sum; \
+ addccc t0, sum, sum; \
+ addccc t1, sum, sum;
+
+#define CSUM_LASTCHUNK(buf, offset, sum, t0, t1, t2, t3) \
+ ldd [buf - offset - 0x08], t0; \
+ ldd [buf - offset - 0x00], t2; \
+ addccc t0, sum, sum; \
+ addccc t1, sum, sum; \
+ addccc t2, sum, sum; \
+ addccc t3, sum, sum;
+
+ /* Do end cruft out of band to get better cache patterns. */
+csum_partial_end_cruft:
+ andcc %o1, 8, %g0 ! check how much
+ be,pn %icc, 1f ! caller asks %o1 & 0x8
+ and %o1, 4, %g3 ! nope, check for word remaining
+ ldd [%o0], %g2 ! load two
+ addcc %g2, %o2, %o2 ! add first word to sum
+ addccc %g3, %o2, %o2 ! add second word as well
+ add %o0, 8, %o0 ! advance buf ptr
+ addc %g0, %o2, %o2 ! add in final carry
+1: brz,pn %g3, 1f ! nope, skip this code
+ andcc %o1, 3, %o1 ! check for trailing bytes
+ ld [%o0], %g2 ! load it
+ addcc %g2, %o2, %o2 ! add to sum
+ add %o0, 4, %o0 ! advance buf ptr
+ addc %g0, %o2, %o2 ! add in final carry
+1: brz,pn %o1, 1f ! no trailing bytes, return
+ addcc %o1, -1, %g0 ! only one byte remains?
+ bne,pn %icc, 2f ! at least two bytes more
+ subcc %o1, 2, %o1 ! only two bytes more?
+ ba,pt %xcc, 4f ! only one byte remains
+ clr %o4 ! clear fake hword value
+2: lduh [%o0], %o4 ! get hword
+ be,pn %icc, 6f ! jmp if only hword remains
+ add %o0, 2, %o0 ! advance buf ptr either way
+ sll %o4, 16, %o4 ! create upper hword
+4: ldub [%o0], %o5 ! get final byte
+ sll %o5, 8, %o5 ! put into place
+ or %o5, %o4, %o4 ! coalese with hword (if any)
+6: addcc %o4, %o2, %o2 ! add to sum
+1: sllx %g4, 32, %g4 ! give gfp back
+ retl ! get outta here
+ addc %g0, %o2, %o0 ! add final carry into retval
+
+ /* Also do alignment out of band to get better cache patterns. */
+csum_partial_fix_alignment:
+
+ /* The common case is to get called with a nicely aligned
+ * buffer of size 0x20. Follow the code path for that case.
+ */
+ .globl csum_partial
+csum_partial: /* %o0=buf, %o1=len, %o2=sum */
+ andcc %o0, 0x7, %g0 ! alignment problems?
+ be,pt %icc, csum_partial_fix_aligned ! yep, handle it
+ andn %o1, 0x7f, %o3 ! num loop iterations
+ cmp %o1, 6
+ bl,pn %icc, cpte - 0x4
+ andcc %o0, 0x2, %g0
+ be,pn %icc, 1f
+ and %o0, 0x4, %g7
+ lduh [%o0 + 0x00], %g2
+ sub %o1, 2, %o1
+ add %o0, 2, %o0
+ sll %g2, 16, %g2
+ addcc %g2, %o2, %o2
+ srl %o2, 16, %g3
+ addc %g0, %g3, %g2
+ sll %o2, 16, %o2
+ sll %g2, 16, %g3
+ srl %o2, 16, %o2
+ or %g3, %o2, %o2
+1: brz,pn %g7, csum_partial_fix_aligned
+ nop
+ ld [%o0 + 0x00], %g2
+ sub %o1, 4, %o1
+ addcc %g2, %o2, %o2
+ add %o0, 4, %o0
+ addc %g0, %o2, %o2
+csum_partial_fix_aligned:
+ brz,pt %o3, 3f ! none to do
+ andcc %o1, 0x70, %g1 ! clears carry flag too
+5: CSUM_BIGCHUNK(%o0, 0x00, %o2, %o4, %o5, %g2, %g3, %g4, %g5)
+ CSUM_BIGCHUNK(%o0, 0x20, %o2, %o4, %o5, %g2, %g3, %g4, %g5)
+ CSUM_BIGCHUNK(%o0, 0x40, %o2, %o4, %o5, %g2, %g3, %g4, %g5)
+ CSUM_BIGCHUNK(%o0, 0x60, %o2, %o4, %o5, %g2, %g3, %g4, %g5)
+ sub %o3, 128, %o3 ! detract from loop iters
+ addc %g0, %o2, %o2 ! sink in final carry
+ brnz,pt %o3, 5b ! more to do
+ add %o0, 128, %o0 ! advance buf ptr
+3: brz,pn %g1, cpte ! nope
+ andcc %o1, 0xf, %o3 ! anything left at all?
+10: rd %pc, %g7 ! get pc
+ srl %g1, 1, %o4 ! compute offset
+ sub %g7, %g1, %g7 ! adjust jmp ptr
+ sub %g7, %o4, %g7 ! final jmp ptr adjust
+ jmp %g7 + (cpte - 8 - 10b) ! enter the table
+ add %o0, %g1, %o0 ! advance buf ptr
+cptbl: CSUM_LASTCHUNK(%o0, 0x68, %o2, %g2, %g3, %g4, %g5)
+ CSUM_LASTCHUNK(%o0, 0x58, %o2, %g2, %g3, %g4, %g5)
+ CSUM_LASTCHUNK(%o0, 0x48, %o2, %g2, %g3, %g4, %g5)
+ CSUM_LASTCHUNK(%o0, 0x38, %o2, %g2, %g3, %g4, %g5)
+ CSUM_LASTCHUNK(%o0, 0x28, %o2, %g2, %g3, %g4, %g5)
+ CSUM_LASTCHUNK(%o0, 0x18, %o2, %g2, %g3, %g4, %g5)
+ CSUM_LASTCHUNK(%o0, 0x08, %o2, %g2, %g3, %g4, %g5)
+ addc %g0, %o2, %o2 ! fetch final carry
+ andcc %o1, 0xf, %g0 ! anything left at all?
+cpte: brnz,pn %o3, csum_partial_end_cruft ! yep, handle it
+ sethi %uhi(KERNBASE), %g4
+ mov %o2, %o0 ! return computed csum
+ retl ! get outta here
+ sllx %g4, 32, %g4 ! give gfp back
+
+ .globl __csum_partial_copy_start, __csum_partial_copy_end
+__csum_partial_copy_start:
+
+#define EX(x,y,a,b,z) \
+98: x,y; \
+ .section .fixup,z##alloc,z##execinstr; \
+ .align 4; \
+99: ba,pt %xcc, 30f; \
+ a, b, %o3; \
+ .section __ex_table,z##alloc; \
+ .align 4; \
+ .word 98b, 99b; \
+ .text; \
+ .align 4
+
+#define EX2(x,y,z) \
+98: x,y; \
+ .section __ex_table,z##alloc; \
+ .align 4; \
+ .word 98b, 30f; \
+ .text; \
+ .align 4
+
+#define EX3(x,y,z) \
+98: x,y; \
+ .section __ex_table,z##alloc; \
+ .align 4; \
+ .word 98b, 96f; \
+ .text; \
+ .align 4
+
+#define EXT(start,end,handler,z) \
+ .section __ex_table,z##alloc; \
+ .align 4; \
+ .word start, 0, end, handler; \
+ .text; \
+ .align 4
+
+ /* This aligned version executes typically in 8.5 superscalar cycles, this
+ * is the best I can do. I say 8.5 because the final add will pair with
+ * the next ldd in the main unrolled loop. Thus the pipe is always full.
+ * If you change these macros (including order of instructions),
+ * please check the fixup code below as well.
+ */
+#define CSUMCOPY_BIGCHUNK_ALIGNED(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \
+ ldd [src + off + 0x00], t0; \
+ ldd [src + off + 0x08], t2; \
+ addccc t0, sum, sum; \
+ ldd [src + off + 0x10], t4; \
+ addccc t1, sum, sum; \
+ ldd [src + off + 0x18], t6; \
+ addccc t2, sum, sum; \
+ std t0, [dst + off + 0x00]; \
+ addccc t3, sum, sum; \
+ std t2, [dst + off + 0x08]; \
+ addccc t4, sum, sum; \
+ std t4, [dst + off + 0x10]; \
+ addccc t5, sum, sum; \
+ std t6, [dst + off + 0x18]; \
+ addccc t6, sum, sum; \
+ addccc t7, sum, sum;
+
+ /* 12 superscalar cycles seems to be the limit for this case,
+ * because of this we thus do all the ldd's together to get
+ * Viking MXCC into streaming mode. Ho hum...
+ */
+#define CSUMCOPY_BIGCHUNK(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \
+ ldd [src + off + 0x00], t0; \
+ ldd [src + off + 0x08], t2; \
+ ldd [src + off + 0x10], t4; \
+ ldd [src + off + 0x18], t6; \
+ st t0, [dst + off + 0x00]; \
+ addccc t0, sum, sum; \
+ st t1, [dst + off + 0x04]; \
+ addccc t1, sum, sum; \
+ st t2, [dst + off + 0x08]; \
+ addccc t2, sum, sum; \
+ st t3, [dst + off + 0x0c]; \
+ addccc t3, sum, sum; \
+ st t4, [dst + off + 0x10]; \
+ addccc t4, sum, sum; \
+ st t5, [dst + off + 0x14]; \
+ addccc t5, sum, sum; \
+ st t6, [dst + off + 0x18]; \
+ addccc t6, sum, sum; \
+ st t7, [dst + off + 0x1c]; \
+ addccc t7, sum, sum;
+
+ /* Yuck, 6 superscalar cycles... */
+#define CSUMCOPY_LASTCHUNK(src, dst, sum, off, t0, t1, t2, t3) \
+ ldd [src - off - 0x08], t0; \
+ ldd [src - off - 0x00], t2; \
+ addccc t0, sum, sum; \
+ st t0, [dst - off - 0x08]; \
+ addccc t1, sum, sum; \
+ st t1, [dst - off - 0x04]; \
+ addccc t2, sum, sum; \
+ st t2, [dst - off - 0x00]; \
+ addccc t3, sum, sum; \
+ st t3, [dst - off + 0x04];
+
+ /* Handle the end cruft code out of band for better cache patterns. */
+cc_end_cruft:
+ andcc %o3, 8, %g0 ! begin checks for that code
+ be,pn %icc, 1f
+ and %o3, 4, %g5
+ EX(ldd [%o0 + 0x00], %g2, and %o3, 0xf,#)
+ add %o1, 8, %o1
+ addcc %g2, %g7, %g7
+ add %o0, 8, %o0
+ addccc %g3, %g7, %g7
+ EX2(st %g2, [%o1 - 0x08],#)
+ addc %g0, %g7, %g7
+ EX2(st %g3, [%o1 - 0x04],#)
+1: brz,pt %g5, 1f
+ andcc %o3, 3, %o3
+ EX(ld [%o0 + 0x00], %g2, add %o3, 4,#)
+ add %o1, 4, %o1
+ addcc %g2, %g7, %g7
+ EX2(st %g2, [%o1 - 0x04],#)
+ addc %g0, %g7, %g7
+ add %o0, 4, %o0
+1: brz,pn %o3, 1f
+ addcc %o3, -1, %g0
+ bne,pn %icc, 2f
+ subcc %o3, 2, %o3
+ ba,pt %xcc, 4f
+ clr %o4
+2: EX(lduh [%o0 + 0x00], %o4, add %o3, 2,#)
+ add %o0, 2, %o0
+ EX2(sth %o4, [%o1 + 0x00],#)
+ be,pn %icc, 6f
+ add %o1, 2, %o1
+ sll %o4, 16, %o4
+4: EX(ldub [%o0 + 0x00], %o5, add %g0, 1,#)
+ EX2(stb %o5, [%o1 + 0x00],#)
+ sll %o5, 8, %o5
+ or %o5, %o4, %o4
+6: addcc %o4, %g7, %g7
+1: sllx %g4, 32, %g4
+ retl
+ addc %g0, %g7, %o0
+
+ /* Sun, you just can't beat me, you just can't. Stop trying,
+ * give up. I'm serious, I am going to kick the living shit
+ * out of you, game over, lights out.
+ */
+ .align 8
+ .globl __csum_partial_copy_sparc_generic
+__csum_partial_copy_sparc_generic:
+ /* %o0=src, %o1=dest, %g1=len, %g7=sum */
+ xor %o0, %o1, %o4 ! get changing bits
+ andcc %o4, 3, %g0 ! check for mismatched alignment
+ bne,pn %icc, ccslow ! better this than unaligned/fixups
+ andcc %o0, 7, %g0 ! need to align things?
+ be,pt %icc, cc_dword_aligned ! yes, we check for short lengths there
+ andn %g1, 0x7f, %g2 ! can we use unrolled loop?
+ cmp %g1, 6
+ bl,a,pn %icc, ccte
+ andcc %g1, 0xf, %o3
+ andcc %o0, 0x1, %g0
+ bne,pn %icc, ccslow
+ andcc %o0, 0x2, %g0
+ be,pn %icc, 1f
+ andcc %o0, 0x4, %g0
+ EX(lduh [%o0 + 0x00], %g4, add %g1, 0,#)
+ sub %g1, 2, %g1
+ EX2(sth %g4, [%o1 + 0x00],#)
+ add %o0, 2, %o0
+ sll %g4, 16, %g4
+ addcc %g4, %g7, %g7
+ add %o1, 2, %o1
+ srl %g7, 16, %g3
+ addc %g0, %g3, %g4
+ sll %g7, 16, %g7
+ sll %g4, 16, %g3
+ srl %g7, 16, %g7
+ andcc %o0, 0x4, %g0
+ or %g3, %g7, %g7
+1: be,pt %icc, 3f
+ andn %g1, 0x7f, %g0
+ EX(ld [%o0 + 0x00], %g4, add %g1, 0,#)
+ sub %g1, 4, %g1
+ EX2(st %g4, [%o1 + 0x00],#)
+ add %o0, 4, %o0
+ addcc %g4, %g7, %g7
+ add %o1, 4, %o1
+ addc %g0, %g7, %g7
+cc_dword_aligned:
+3: brz,pn %g2, 3f ! nope, less than one loop remains
+ andcc %o1, 4, %g0 ! dest aligned on 4 or 8 byte boundry?
+ be,pn %icc, ccdbl + 4 ! 8 byte aligned, kick ass
+5: CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
+ CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
+ CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
+ CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
+10: EXT(5b, 10b, 20f,#) ! note for exception handling
+ sub %g1, 128, %g1 ! detract from length
+ addc %g0, %g7, %g7 ! add in last carry bit
+ andncc %g1, 0x7f, %g0 ! more to csum?
+ add %o0, 128, %o0 ! advance src ptr
+ bne,pt %icc, 5b ! we did not go negative, continue looping
+ add %o1, 128, %o1 ! advance dest ptr
+3: andcc %g1, 0x70, %o2 ! can use table?
+ccmerge:be,pn %icc, ccte ! nope, go and check for end cruft
+ andcc %g1, 0xf, %o3 ! get low bits of length (clears carry btw)
+ srl %o2, 1, %o4 ! begin negative offset computation
+13: rd %pc, %o5 ! set up table ptr end
+ add %o0, %o2, %o0 ! advance src ptr
+ sub %o5, %o4, %o5 ! continue table calculation
+ sll %o2, 1, %g2 ! constant multiplies are fun...
+ sub %o5, %g2, %o5 ! some more adjustments
+ jmpl %o5 + (12f-13b), %g0 ! jump into it, duff style, wheee...
+ add %o1, %o2, %o1 ! advance dest ptr (carry is clear btw)
+cctbl: CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x68,%g2,%g3,%g4,%g5)
+ CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x58,%g2,%g3,%g4,%g5)
+ CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x48,%g2,%g3,%g4,%g5)
+ CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x38,%g2,%g3,%g4,%g5)
+ CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x28,%g2,%g3,%g4,%g5)
+ CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x18,%g2,%g3,%g4,%g5)
+ CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x08,%g2,%g3,%g4,%g5)
+12: EXT(cctbl, 12b, 22f,#) ! note for exception table handling
+ addc %g0, %g7, %g7
+ andcc %o3, 0xf, %g0 ! check for low bits set
+ccte: bne,pn %icc, cc_end_cruft ! something left, handle it out of band
+ sethi %uhi(KERNBASE), %g4 ! restore gfp
+ mov %g7, %o0 ! give em the computed checksum
+ retl ! return
+ sllx %g4, 32, %g4 ! finish gfp restoration
+ccdbl: CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
+ CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
+ CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
+ CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
+11: EXT(ccdbl, 11b, 21f,#) ! note for exception table handling
+ sub %g1, 128, %g1 ! detract from length
+ addc %g0, %g7, %g7 ! add in last carry bit
+ andncc %g1, 0x7f, %g0 ! more to csum?
+ add %o0, 128, %o0 ! advance src ptr
+ bne,pt %icc, ccdbl ! we did not go negative, continue looping
+ add %o1, 128, %o1 ! advance dest ptr
+ ba,pt %xcc, ccmerge ! finish it off, above
+ andcc %g1, 0x70, %o2 ! can use table? (clears carry btw)
+
+ccslow: mov 0, %g5
+ brlez,pn %g1, 4f
+ andcc %o0, 1, %o5
+ be,a,pt %icc, 1f
+ srl %g1, 1, %o3
+ sub %g1, 1, %g1
+ EX(ldub [%o0], %g5, add %g1, 1,#)
+ add %o0, 1, %o0
+ EX2(stb %g5, [%o1],#)
+ srl %g1, 1, %o3
+ add %o1, 1, %o1
+1: brz,a,pn %o3, 3f
+ andcc %g1, 1, %g0
+ andcc %o0, 2, %g0
+ be,a,pt %icc, 1f
+ srl %o3, 1, %o3
+ EX(lduh [%o0], %o4, add %g1, 0,#)
+ sub %g1, 2, %g1
+ srl %o4, 8, %g2
+ sub %o3, 1, %o3
+ EX2(stb %g2, [%o1],#)
+ add %o4, %g5, %g5
+ EX2(stb %o4, [%o1 + 1],#)
+ add %o0, 2, %o0
+ srl %o3, 1, %o3
+ add %o1, 2, %o1
+1: brz,a,pn %o3, 2f
+ andcc %g1, 2, %g0
+ EX3(ld [%o0], %o4,#)
+5: srl %o4, 24, %g2
+ srl %o4, 16, %g3
+ EX2(stb %g2, [%o1],#)
+ srl %o4, 8, %g2
+ EX2(stb %g3, [%o1 + 1],#)
+ add %o0, 4, %o0
+ EX2(stb %g2, [%o1 + 2],#)
+ addcc %o4, %g5, %g5
+ EX2(stb %o4, [%o1 + 3],#)
+ addc %g5, %g0, %g5 ! I am now to lazy to optimize this (question is if it
+ add %o1, 4, %o1 ! is worthy). Maybe some day - with the sll/srl
+ subcc %o3, 1, %o3 ! tricks
+ bne,a,pt %icc, 5b
+ EX3(ld [%o0], %o4,#)
+ sll %g5, 16, %g2
+ srl %g5, 16, %g5
+ srl %g2, 16, %g2
+ andcc %g1, 2, %g0
+ add %g2, %g5, %g5
+2: be,a,pt %icc, 3f
+ andcc %g1, 1, %g0
+ EX(lduh [%o0], %o4, and %g1, 3,#)
+ andcc %g1, 1, %g0
+ srl %o4, 8, %g2
+ add %o0, 2, %o0
+ EX2(stb %g2, [%o1],#)
+ add %g5, %o4, %g5
+ EX2(stb %o4, [%o1 + 1],#)
+ add %o1, 2, %o1
+3: be,a,pt %icc, 1f
+ sll %g5, 16, %o4
+ EX(ldub [%o0], %g2, add %g0, 1,#)
+ sll %g2, 8, %o4
+ EX2(stb %g2, [%o1],#)
+ add %g5, %o4, %g5
+ sll %g5, 16, %o4
+1: addcc %o4, %g5, %g5
+ srl %g5, 16, %o4
+ addc %g0, %o4, %g5
+ brz,pt %o5, 4f
+ srl %g5, 8, %o4
+ and %g5, 0xff, %g2
+ and %o4, 0xff, %o4
+ sll %g2, 8, %g2
+ or %g2, %o4, %g5
+4: addcc %g7, %g5, %g7
+ retl
+ addc %g0, %g7, %o0
+__csum_partial_copy_end:
+
+ .section .fixup,#alloc,#execinstr
+ .align 4
+/* We do these strange calculations for the csum_*_from_user case only, ie.
+ * we only bother with faults on loads... */
+
+/* o2 = ((g2%20)&3)*8
+ * o3 = g1 - (g2/20)*32 - o2 */
+20:
+ cmp %g2, 20
+ blu,a,pn %icc, 1f
+ and %g2, 3, %o2
+ sub %g1, 32, %g1
+ ba,pt %xcc, 20b
+ sub %g2, 20, %g2
+1:
+ sll %o2, 3, %o2
+ ba,pt %xcc, 31f
+ sub %g1, %o2, %o3
+
+/* o2 = (!(g2 & 15) ? 0 : (((g2 & 15) + 1) & ~1)*8)
+ * o3 = g1 - (g2/16)*32 - o2 */
+21:
+ andcc %g2, 15, %o3
+ srl %g2, 4, %g2
+ be,a,pn %icc, 1f
+ clr %o2
+ add %o3, 1, %o3
+ and %o3, 14, %o3
+ sll %o3, 3, %o2
+1:
+ sll %g2, 5, %g2
+ sub %g1, %g2, %o3
+ ba,pt %xcc, 31f
+ sub %o3, %o2, %o3
+
+/* o0 += (g2/10)*16 - 0x70
+ * 01 += (g2/10)*16 - 0x70
+ * o2 = (g2 % 10) ? 8 : 0
+ * o3 += 0x70 - (g2/10)*16 - o2 */
+22:
+ cmp %g2, 10
+ blu,a,pt %xcc, 1f
+ sub %o0, 0x70, %o0
+ add %o0, 16, %o0
+ add %o1, 16, %o1
+ sub %o3, 16, %o3
+ ba,pt %xcc, 22b
+ sub %g2, 10, %g2
+1:
+ sub %o1, 0x70, %o1
+ add %o3, 0x70, %o3
+ clr %o2
+ movrnz %g2, 8, %o2
+ ba,pt %xcc, 31f
+ sub %o3, %o2, %o3
+96:
+ and %g1, 3, %g1
+ sll %o3, 2, %o3
+ add %g1, %o3, %o3
+30:
+/* %o1 is dst
+ * %o3 is # bytes to zero out
+ * %o4 is faulting address
+ * %o5 is %pc where fault occured */
+ clr %o2
+31:
+/* %o0 is src
+ * %o1 is dst
+ * %o2 is # of bytes to copy from src to dst
+ * %o3 is # bytes to zero out
+ * %o4 is faulting address
+ * %o5 is %pc where fault occured */
+ save %sp, -136, %sp
+ mov %i5, %o0
+ mov %i7, %o1
+ mov %i4, %o2
+ call lookup_fault
+ mov %g7, %i4
+ cmp %o0, 2
+ bne,pn %icc, 1f
+ add %g0, -EFAULT, %i5
+ brz,pn %i2, 2f
+ mov %i0, %o1
+ mov %i1, %o0
+ call __copy_from_user
+ mov %i2, %o2
+ brnz,a,pn %o0, 2f
+ add %i3, %i2, %i3
+ add %i1, %i2, %i1
+2:
+ mov %i1, %o0
+ wr %%g0, ASI_S, %%asi
+ call __bzero_noasi
+ mov %i3, %o1
+1:
+ ldx [%sp + STACK_BIAS + 264], %o2 ! struct_ptr of parent
+ st %i5, [%o2]
+ ret
+ restore
diff --git a/arch/sparc64/lib/copy_from_user.S b/arch/sparc64/lib/copy_from_user.S
new file mode 100644
index 000000000..ba26a1c01
--- /dev/null
+++ b/arch/sparc64/lib/copy_from_user.S
@@ -0,0 +1,456 @@
+/* copy_user.S: Sparc optimized copy_from_user code.
+ *
+ * Copyright(C) 1995 Linus Torvalds
+ * Copyright(C) 1996 David S. Miller
+ * Copyright(C) 1996 Eddie C. Dost
+ * Copyright(C) 1996,1997 Jakub Jelinek
+ *
+ * derived from:
+ * e-mail between David and Eddie.
+ *
+ * Returns 0 if successful, otherwise count of bytes not copied yet
+ *
+ * FIXME: This code should be optimized for sparc64... -jj
+ */
+
+#include <asm/ptrace.h>
+#include <asm/asi.h>
+
+#define EX(x,y,a,b,z) \
+98: x,y; \
+ .section .fixup,z##alloc,z##execinstr; \
+ .align 4; \
+99: retl; \
+ a, b, %o0; \
+ .section __ex_table,z##alloc; \
+ .align 4; \
+ .word 98b, 99b; \
+ .text; \
+ .align 4
+
+#define EX2(x,y,c,d,e,a,b,z) \
+98: x,y; \
+ .section .fixup,z##alloc,z##execinstr; \
+ .align 4; \
+99: c, d, e; \
+ retl; \
+ a, b, %o0; \
+ .section __ex_table,z##alloc; \
+ .align 4; \
+ .word 98b, 99b; \
+ .text; \
+ .align 4
+
+#define EXO2(x,y,z) \
+98: x,##y; \
+ .section __ex_table,z##alloc; \
+ .align 4; \
+ .word 98b, 97f; \
+ .text; \
+ .align 4
+
+#define EXT(start,end,handler,z) \
+ .section __ex_table,z##alloc; \
+ .align 4; \
+ .word start, 0, end, handler; \
+ .text; \
+ .align 4
+
+/* Please do not change following macros unless you change logic used
+ * in .fixup at the end of this file as well
+ */
+
+/* Both these macros have to start with exactly the same insn */
+#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
+ ldda [%src + offset + 0x00] %asi, %t0; \
+ ldda [%src + offset + 0x08] %asi, %t2; \
+ ldda [%src + offset + 0x10] %asi, %t4; \
+ ldda [%src + offset + 0x18] %asi, %t6; \
+ st %t0, [%dst + offset + 0x00]; \
+ st %t1, [%dst + offset + 0x04]; \
+ st %t2, [%dst + offset + 0x08]; \
+ st %t3, [%dst + offset + 0x0c]; \
+ st %t4, [%dst + offset + 0x10]; \
+ st %t5, [%dst + offset + 0x14]; \
+ st %t6, [%dst + offset + 0x18]; \
+ st %t7, [%dst + offset + 0x1c];
+
+#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
+ ldda [%src + offset + 0x00] %asi, %t0; \
+ ldda [%src + offset + 0x08] %asi, %t2; \
+ ldda [%src + offset + 0x10] %asi, %t4; \
+ ldda [%src + offset + 0x18] %asi, %t6; \
+ std %t0, [%dst + offset + 0x00]; \
+ std %t2, [%dst + offset + 0x08]; \
+ std %t4, [%dst + offset + 0x10]; \
+ std %t6, [%dst + offset + 0x18];
+
+#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
+ ldda [%src - offset - 0x10] %asi, %t0; \
+ ldda [%src - offset - 0x08] %asi, %t2; \
+ st %t0, [%dst - offset - 0x10]; \
+ st %t1, [%dst - offset - 0x0c]; \
+ st %t2, [%dst - offset - 0x08]; \
+ st %t3, [%dst - offset - 0x04];
+
+#define MOVE_HALFCHUNK(src, dst, offset, t0, t1, t2, t3) \
+ lduha [%src + offset + 0x00] %asi, %t0; \
+ lduha [%src + offset + 0x02] %asi, %t1; \
+ lduha [%src + offset + 0x04] %asi, %t2; \
+ lduha [%src + offset + 0x06] %asi, %t3; \
+ sth %t0, [%dst + offset + 0x00]; \
+ sth %t1, [%dst + offset + 0x02]; \
+ sth %t2, [%dst + offset + 0x04]; \
+ sth %t3, [%dst + offset + 0x06];
+
+#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
+ lduba [%src - offset - 0x02] %asi, %t0; \
+ lduba [%src - offset - 0x01] %asi, %t1; \
+ stb %t0, [%dst - offset - 0x02]; \
+ stb %t1, [%dst - offset - 0x01];
+
+ .text
+ .align 4
+
+ .globl __copy_from_user
+dword_align:
+ andcc %o1, 1, %g0
+ be 4f
+ andcc %o1, 2, %g0
+
+ EXO2(lduba [%o1] %asi, %g2,#)
+ add %o1, 1, %o1
+ stb %g2, [%o0]
+ sub %o2, 1, %o2
+ bne 3f
+ add %o0, 1, %o0
+
+ EXO2(lduha [%o1] %asi, %g2,#)
+ add %o1, 2, %o1
+ sth %g2, [%o0]
+ sub %o2, 2, %o2
+ ba,pt %xcc, 3f
+ add %o0, 2, %o0
+4:
+ EXO2(lduha [%o1] %asi, %g2,#)
+ add %o1, 2, %o1
+ sth %g2, [%o0]
+ sub %o2, 2, %o2
+ ba,pt %xcc, 3f
+ add %o0, 2, %o0
+
+__copy_from_user: /* %o0=dst %o1=src %o2=len */
+ wr %g0, ASI_S, %asi
+ xor %o0, %o1, %o4
+1:
+ andcc %o4, 3, %o5
+2:
+ bne,pn %icc, cannot_optimize
+ cmp %o2, 15
+
+ bleu,pn %xcc, short_aligned_end
+ andcc %o1, 3, %g0
+
+ bne,pn %icc, dword_align
+3:
+ andcc %o1, 4, %g0
+
+ be,pt %icc, 2f
+ mov %o2, %g1
+
+ EXO2(lda [%o1] %asi, %o4,#)
+ sub %g1, 4, %g1
+ st %o4, [%o0]
+ add %o1, 4, %o1
+ add %o0, 4, %o0
+2:
+ andcc %g1, 0xffffffffffffff80, %g7
+ be,pn %xcc, 3f
+ andcc %o0, 4, %g0
+
+ be,pn %icc, ldd_std + 4
+5:
+ MOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
+ MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
+ MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
+ MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
+80:
+ EXT(5b, 80b, 50f,#)
+ subcc %g7, 128, %g7
+ add %o1, 128, %o1
+ bne,pt %xcc, 5b
+ add %o0, 128, %o0
+3:
+ andcc %g1, 0x70, %g7
+ be,pn %icc, copy_user_table_end
+ andcc %g1, 8, %g0
+100:
+ rd %pc, %o5
+ srl %g7, 1, %o4
+ add %g7, %o4, %o4
+ add %o1, %g7, %o1
+ sub %o5, %o4, %o5
+ jmpl %o5 + (copy_user_table_end - 100b), %g0
+ add %o0, %g7, %o0
+
+copy_user_table:
+ MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
+ MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
+ MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
+ MOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
+ MOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
+ MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
+ MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
+copy_user_table_end:
+ EXT(copy_user_table, copy_user_table_end, 51f,#)
+ be,pt %icc, copy_user_last7
+ andcc %g1, 4, %g0
+
+ EX(ldda [%o1] %asi, %g2, and %g1, 0xf,#)
+ add %o0, 8, %o0
+ add %o1, 8, %o1
+ st %g2, [%o0 - 0x08]
+ st %g3, [%o0 - 0x04]
+copy_user_last7:
+ be,pn %icc, 1f
+ andcc %g1, 2, %g0
+
+ EX(lda [%o1] %asi, %g2, and %g1, 7,#)
+ add %o1, 4, %o1
+ st %g2, [%o0]
+ add %o0, 4, %o0
+1:
+ be,pn %icc, 1f
+ andcc %g1, 1, %g0
+
+ EX(lduha [%o1] %asi, %g2, and %g1, 3,#)
+ add %o1, 2, %o1
+ sth %g2, [%o0]
+ add %o0, 2, %o0
+1:
+ be,pn %icc, 1f
+ nop
+
+ EX(lduba [%o1] %asi, %g2, add %g0, 1,#)
+ stb %g2, [%o0]
+1:
+ retl
+ clr %o0
+
+ldd_std:
+ MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
+ MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
+ MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
+ MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
+81:
+ EXT(ldd_std, 81b, 52f,#)
+ subcc %g7, 128, %g7
+ add %o1, 128, %o1
+ bne,pt %xcc, ldd_std
+ add %o0, 128, %o0
+
+ andcc %g1, 0x70, %g7
+ be,pn %icc, copy_user_table_end
+ andcc %g1, 8, %g0
+101:
+ rd %pc, %o5
+ srl %g7, 1, %o4
+ add %g7, %o4, %o4
+ add %o1, %g7, %o1
+ sub %o5, %o4, %o5
+ jmpl %o5 + (copy_user_table_end - 101b), %g0
+ add %o0, %g7, %o0
+
+cannot_optimize:
+ bleu short_end
+ cmp %o5, 2
+
+ bne byte_chunk
+ and %o2, 0xfffffffffffffff0, %o3
+
+ andcc %o1, 1, %g0
+ be 10f
+ nop
+
+ EXO2(lduba [%o1] %asi, %g2,#)
+ add %o1, 1, %o1
+ stb %g2, [%o0]
+ sub %o2, 1, %o2
+ andcc %o2, 0xfffffffffffffff0, %o3
+ be short_end
+ add %o0, 1, %o0
+10:
+ MOVE_HALFCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
+ MOVE_HALFCHUNK(o1, o0, 0x08, g2, g3, g4, g5)
+82:
+ EXT(10b, 82b, 53f,#)
+ subcc %o3, 0x10, %o3
+ add %o1, 0x10, %o1
+ bne 10b
+ add %o0, 0x10, %o0
+ ba,pt %xcc, 2f
+ and %o2, 0xe, %o3
+
+byte_chunk:
+ MOVE_SHORTCHUNK(o1, o0, -0x02, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, -0x04, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, -0x06, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, -0x08, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, -0x0a, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, -0x0c, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, -0x0e, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, -0x10, g2, g3)
+83:
+ EXT(byte_chunk, 83b, 54f,#)
+ subcc %o3, 0x10, %o3
+ add %o1, 0x10, %o1
+ bne,pt %xcc, byte_chunk
+ add %o0, 0x10, %o0
+
+short_end:
+ and %o2, 0xe, %o3
+2:
+ rd %pc, %o5
+ sll %o3, 3, %o4
+ add %o0, %o3, %o0
+ sub %o5, %o4, %o5
+ add %o1, %o3, %o1
+ jmpl %o5 + (short_table_end - 2b), %g0
+ andcc %o2, 1, %g0
+84:
+ MOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
+short_table_end:
+ EXT(84b, short_table_end, 55f,#)
+ be 1f
+ nop
+ EX(lduba [%o1] %asi, %g2, add %g0, 1,#)
+ stb %g2, [%o0]
+1:
+ retl
+ clr %o0
+
+short_aligned_end:
+ bne short_end
+ andcc %o2, 8, %g0
+
+ be 1f
+ andcc %o2, 4, %g0
+
+ EXO2(lda [%o1 + 0x00] %asi, %g2,#)
+ EX(lda [%o1 + 0x04] %asi, %g3, sub %o2, 4,#)
+ add %o1, 8, %o1
+ st %g2, [%o0 + 0x00]
+ st %g3, [%o0 + 0x04]
+ add %o0, 8, %o0
+1:
+ ba,pt %xcc, copy_user_last7
+ mov %o2, %g1
+
+ .section .fixup,#alloc,#execinstr
+ .align 4
+97:
+ retl
+ mov %o2, %o0
+/* exception routine sets %g2 to (broken_insn - first_insn)>>2 */
+50:
+/* This magic counts how many bytes are left when crash in MOVE_BIGCHUNK
+ * happens. This is derived from the amount ldd reads, st stores, etc.
+ * x = g2 % 12;
+ * o0 = g1 + g7 - ((g2 / 12) * 32 + (x < 4) ? x * 8 : (x - 4) * 4)
+ */
+ cmp %g2, 12
+ bcs 1f
+ cmp %g2, 24
+ bcs 2f
+ cmp %g2, 36
+ bcs 3f
+ nop
+ sub %g2, 12, %g2
+ sub %g7, 32, %g7
+3:
+ sub %g2, 12, %g2
+ sub %g7, 32, %g7
+2:
+ sub %g2, 12, %g2
+ sub %g7, 32, %g7
+1:
+ cmp %g2, 4
+ bcs,a 1f
+ sll %g2, 3, %g2
+ sub %g2, 4, %g2
+ sll %g2, 2, %g2
+1:
+ and %g1, 0x7f, %o0
+ add %o0, %g7, %o0
+ retl
+ sub %o0, %g2, %o0
+51:
+/* i = 41 - g2; j = i % 6;
+ * o0 = (g1 & 15) + (i / 6) * 16 + (j < 4) ? (j + 1) * 4 : (j - 3) * 8;
+ */
+ neg %g2
+ and %g1, 0xf, %g1
+ add %g2, 41, %g2
+1:
+ cmp %g2, 6
+ bcs,a 2f
+ cmp %g2, 4
+ add %g1, 16, %g1
+ b 1b
+ sub %g2, 6, %g2
+2:
+ bcs,a 3f
+ inc %g2
+ sub %g2, 3, %g2
+ b 2f
+ sll %g2, 3, %g2
+3:
+ sll %g2, 2, %g2
+2:
+ retl
+ add %g1, %g2, %o0
+52:
+/* o0 = g1 + g7 - (g2 / 8) * 32 + (x & 3) * 8 */
+ and %g2, 0xfffffffffffffff8, %g4
+ and %g2, 3, %g2
+ sll %g4, 2, %g4
+ sll %g2, 3, %g2
+ add %g2, %g4, %g2
+ b,a 1b
+53:
+/* o0 = o3 + (o2 & 15) - (g2 & 8) - (g2 & 3) * 2 */
+ and %g2, 3, %g4
+ and %g2, 0xfffffffffffffff8, %g2
+ sll %g4, 1, %g4
+ add %g2, %g4, %g2
+ and %o2, 0xf, %o0
+ add %o0, %o3, %o0
+ retl
+ sub %o0, %g2, %o0
+54:
+/* o0 = o3 + (o2 & 15) - (g2 / 4) * 2 - (g2 & 1) */
+ srl %g2, 2, %o4
+ and %g2, 1, %o1
+ sll %o4, 1, %o4
+ and %o2, 0xf, %o2
+ sub %o3, %o1, %o3
+ sub %o2, %o4, %o2
+ retl
+ add %o2, %o3, %o0
+55:
+/* o0 = (o2 & 1) + (27 - g2)/4 * 2 + ((27 - g2) & 1) */
+ neg %g2
+ and %o2, 1, %o2
+ add %g2, 27, %g2
+ srl %g2, 2, %o1
+ and %g2, 1, %g2
+ sll %o1, 1, %o1
+ add %o2, %g2, %o0
+ retl
+ add %o0, %o1, %o0
diff --git a/arch/sparc64/lib/copy_to_user.S b/arch/sparc64/lib/copy_to_user.S
new file mode 100644
index 000000000..47a6bd337
--- /dev/null
+++ b/arch/sparc64/lib/copy_to_user.S
@@ -0,0 +1,456 @@
+/* copy_user.S: Sparc optimized copy_to_user code.
+ *
+ * Copyright(C) 1995 Linus Torvalds
+ * Copyright(C) 1996 David S. Miller
+ * Copyright(C) 1996 Eddie C. Dost
+ * Copyright(C) 1996,1997 Jakub Jelinek
+ *
+ * derived from:
+ * e-mail between David and Eddie.
+ *
+ * Returns 0 if successful, otherwise count of bytes not copied yet
+ *
+ * FIXME: This code should be optimized for sparc64... -jj
+ */
+
+#include <asm/ptrace.h>
+#include <asm/asi.h>
+
+#define EX(x,y,a,b,z) \
+98: x,y; \
+ .section .fixup,z##alloc,z##execinstr; \
+ .align 4; \
+99: retl; \
+ a, b, %o0; \
+ .section __ex_table,z##alloc; \
+ .align 4; \
+ .word 98b, 99b; \
+ .text; \
+ .align 4
+
+#define EX2(x,y,c,d,e,a,b,z) \
+98: x,y; \
+ .section .fixup,z##alloc,z##execinstr; \
+ .align 4; \
+99: c, d, e; \
+ retl; \
+ a, b, %o0; \
+ .section __ex_table,z##alloc; \
+ .align 4; \
+ .word 98b, 99b; \
+ .text; \
+ .align 4
+
+#define EXO2(x,y,z) \
+98: x,##y; \
+ .section __ex_table,z##alloc; \
+ .align 4; \
+ .word 98b, 97f; \
+ .text; \
+ .align 4
+
+#define EXT(start,end,handler,z) \
+ .section __ex_table,z##alloc; \
+ .align 4; \
+ .word start, 0, end, handler; \
+ .text; \
+ .align 4
+
+/* Please do not change following macros unless you change logic used
+ * in .fixup at the end of this file as well
+ */
+
+/* Both these macros have to start with exactly the same insn */
+#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
+ ldd [%src + offset + 0x00], %t0; \
+ ldd [%src + offset + 0x08], %t2; \
+ ldd [%src + offset + 0x10], %t4; \
+ ldd [%src + offset + 0x18], %t6; \
+ sta %t0, [%dst + offset + 0x00] %asi; \
+ sta %t1, [%dst + offset + 0x04] %asi; \
+ sta %t2, [%dst + offset + 0x08] %asi; \
+ sta %t3, [%dst + offset + 0x0c] %asi; \
+ sta %t4, [%dst + offset + 0x10] %asi; \
+ sta %t5, [%dst + offset + 0x14] %asi; \
+ sta %t6, [%dst + offset + 0x18] %asi; \
+ sta %t7, [%dst + offset + 0x1c] %asi;
+
+#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
+ ldd [%src + offset + 0x00], %t0; \
+ ldd [%src + offset + 0x08], %t2; \
+ ldd [%src + offset + 0x10], %t4; \
+ ldd [%src + offset + 0x18], %t6; \
+ stda %t0, [%dst + offset + 0x00] %asi; \
+ stda %t2, [%dst + offset + 0x08] %asi; \
+ stda %t4, [%dst + offset + 0x10] %asi; \
+ stda %t6, [%dst + offset + 0x18] %asi;
+
+#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
+ ldd [%src - offset - 0x10], %t0; \
+ ldd [%src - offset - 0x08], %t2; \
+ sta %t0, [%dst - offset - 0x10] %asi; \
+ sta %t1, [%dst - offset - 0x0c] %asi; \
+ sta %t2, [%dst - offset - 0x08] %asi; \
+ sta %t3, [%dst - offset - 0x04] %asi;
+
+#define MOVE_HALFCHUNK(src, dst, offset, t0, t1, t2, t3) \
+ lduh [%src + offset + 0x00], %t0; \
+ lduh [%src + offset + 0x02], %t1; \
+ lduh [%src + offset + 0x04], %t2; \
+ lduh [%src + offset + 0x06], %t3; \
+ stha %t0, [%dst + offset + 0x00] %asi; \
+ stha %t1, [%dst + offset + 0x02] %asi; \
+ stha %t2, [%dst + offset + 0x04] %asi; \
+ stha %t3, [%dst + offset + 0x06] %asi;
+
+#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
+ ldub [%src - offset - 0x02], %t0; \
+ ldub [%src - offset - 0x01], %t1; \
+ stba %t0, [%dst - offset - 0x02] %asi; \
+ stba %t1, [%dst - offset - 0x01] %asi;
+
+ .text
+ .align 4
+
+ .globl __copy_to_user
+dword_align:
+ andcc %o1, 1, %g0
+ be 4f
+ andcc %o1, 2, %g0
+
+ ldub [%o1], %g2
+ add %o1, 1, %o1
+ EXO2(stba %g2, [%o0] %asi,#)
+ sub %o2, 1, %o2
+ bne 3f
+ add %o0, 1, %o0
+
+ lduh [%o1], %g2
+ add %o1, 2, %o1
+ EXO2(stha %g2, [%o0] %asi,#)
+ sub %o2, 2, %o2
+ ba,pt %xcc, 3f
+ add %o0, 2, %o0
+4:
+ lduh [%o1], %g2
+ add %o1, 2, %o1
+ EXO2(stha %g2, [%o0] %asi,#)
+ sub %o2, 2, %o2
+ ba,pt %xcc, 3f
+ add %o0, 2, %o0
+
+__copy_to_user: /* %o0=dst %o1=src %o2=len */
+ wr %g0, ASI_S, %asi
+ xor %o0, %o1, %o4
+1:
+ andcc %o4, 3, %o5
+2:
+ bne,pn %icc, cannot_optimize
+ cmp %o2, 15
+
+ bleu,pn %xcc, short_aligned_end
+ andcc %o1, 3, %g0
+
+ bne,pn %icc, dword_align
+3:
+ andcc %o1, 4, %g0
+
+ be,pt %icc, 2f
+ mov %o2, %g1
+
+ ld [%o1], %o4
+ sub %g1, 4, %g1
+ EXO2(sta %o4, [%o0] %asi,#)
+ add %o1, 4, %o1
+ add %o0, 4, %o0
+2:
+ andcc %g1, 0xffffffffffffff80, %g7
+ be,pn %xcc, 3f
+ andcc %o0, 4, %g0
+
+ be,pn %icc, ldd_std + 4
+5:
+ MOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
+ MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
+ MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
+ MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
+80:
+ EXT(5b, 80b, 50f,#)
+ subcc %g7, 128, %g7
+ add %o1, 128, %o1
+ bne,pt %xcc, 5b
+ add %o0, 128, %o0
+3:
+ andcc %g1, 0x70, %g7
+ be,pn %icc, copy_user_table_end
+ andcc %g1, 8, %g0
+100:
+ rd %pc, %o5
+ srl %g7, 1, %o4
+ add %g7, %o4, %o4
+ add %o1, %g7, %o1
+ sub %o5, %o4, %o5
+ jmpl %o5 + (copy_user_table_end - 100b), %g0
+ add %o0, %g7, %o0
+
+copy_user_table:
+ MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
+ MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
+ MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
+ MOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
+ MOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
+ MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
+ MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
+copy_user_table_end:
+ EXT(copy_user_table, copy_user_table_end, 51f,#)
+ be,pt %icc, copy_user_last7
+ andcc %g1, 4, %g0
+
+ ldd [%o1], %g2
+ add %o0, 8, %o0
+ add %o1, 8, %o1
+ EX(sta %g2, [%o0 - 0x08] %asi, and %g1, 0xf,#)
+ EX2(sta %g3, [%o0 - 0x04] %asi, and %g1, 0xf, %g1, sub %g1, 4,#)
+copy_user_last7:
+ be,pn %icc, 1f
+ andcc %g1, 2, %g0
+
+ ld [%o1], %g2
+ add %o1, 4, %o1
+ EX(sta %g2, [%o0] %asi, and %g1, 7,#)
+ add %o0, 4, %o0
+1:
+ be,pn %icc, 1f
+ andcc %g1, 1, %g0
+
+ lduh [%o1], %g2
+ add %o1, 2, %o1
+ EX(stha %g2, [%o0] %asi, and %g1, 3,#)
+ add %o0, 2, %o0
+1:
+ be,pn %icc, 1f
+ nop
+
+ ldub [%o1], %g2
+ EX(stba %g2, [%o0] %asi, add %g0, 1,#)
+1:
+ retl
+ clr %o0
+
+ldd_std:
+ MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
+ MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
+ MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
+ MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
+81:
+ EXT(ldd_std, 81b, 52f,#)
+ subcc %g7, 128, %g7
+ add %o1, 128, %o1
+ bne,pt %xcc, ldd_std
+ add %o0, 128, %o0
+
+ andcc %g1, 0x70, %g7
+ be,pn %icc, copy_user_table_end
+ andcc %g1, 8, %g0
+101:
+ rd %pc, %o5
+ srl %g7, 1, %o4
+ add %g7, %o4, %o4
+ add %o1, %g7, %o1
+ sub %o5, %o4, %o5
+ jmpl %o5 + (copy_user_table_end - 101b), %g0
+ add %o0, %g7, %o0
+
+cannot_optimize:
+ bleu short_end
+ cmp %o5, 2
+
+ bne byte_chunk
+ and %o2, 0xfffffffffffffff0, %o3
+
+ andcc %o1, 1, %g0
+ be 10f
+ nop
+
+ ldub [%o1], %g2
+ add %o1, 1, %o1
+ EXO2(stba %g2, [%o0] %asi,#)
+ sub %o2, 1, %o2
+ andcc %o2, 0xfffffffffffffff0, %o3
+ be short_end
+ add %o0, 1, %o0
+10:
+ MOVE_HALFCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
+ MOVE_HALFCHUNK(o1, o0, 0x08, g2, g3, g4, g5)
+82:
+ EXT(10b, 82b, 53f,#)
+ subcc %o3, 0x10, %o3
+ add %o1, 0x10, %o1
+ bne 10b
+ add %o0, 0x10, %o0
+ ba,pt %xcc, 2f
+ and %o2, 0xe, %o3
+
+byte_chunk:
+ MOVE_SHORTCHUNK(o1, o0, -0x02, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, -0x04, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, -0x06, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, -0x08, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, -0x0a, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, -0x0c, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, -0x0e, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, -0x10, g2, g3)
+83:
+ EXT(byte_chunk, 83b, 54f,#)
+ subcc %o3, 0x10, %o3
+ add %o1, 0x10, %o1
+ bne,pt %xcc, byte_chunk
+ add %o0, 0x10, %o0
+
+short_end:
+ and %o2, 0xe, %o3
+2:
+ rd %pc, %o5
+ sll %o3, 3, %o4
+ add %o0, %o3, %o0
+ sub %o5, %o4, %o5
+ add %o1, %o3, %o1
+ jmpl %o5 + (short_table_end - 2b), %g0
+ andcc %o2, 1, %g0
+84:
+ MOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
+short_table_end:
+ EXT(84b, short_table_end, 55f,#)
+ be 1f
+ nop
+ ldub [%o1], %g2
+ EX(stba %g2, [%o0] %asi, add %g0, 1,#)
+1:
+ retl
+ clr %o0
+
+short_aligned_end:
+ bne short_end
+ andcc %o2, 8, %g0
+
+ be 1f
+ andcc %o2, 4, %g0
+
+ ld [%o1 + 0x00], %g2
+ ld [%o1 + 0x04], %g3
+ add %o1, 8, %o1
+ EXO2(sta %g2, [%o0 + 0x00] %asi,#)
+ EX(sta %g3, [%o0 + 0x04] %asi, sub %o2, 4,#)
+ add %o0, 8, %o0
+1:
+ ba,pt %xcc, copy_user_last7
+ mov %o2, %g1
+
+ .section .fixup,#alloc,#execinstr
+ .align 4
+97:
+ retl
+ mov %o2, %o0
+/* exception routine sets %g2 to (broken_insn - first_insn)>>2 */
+50:
+/* This magic counts how many bytes are left when crash in MOVE_BIGCHUNK
+ * happens. This is derived from the amount ldd reads, st stores, etc.
+ * x = g2 % 12;
+ * o0 = g1 + g7 - ((g2 / 12) * 32 + (x < 4) ? x * 8 : (x - 4) * 4)
+ */
+ cmp %g2, 12
+ bcs 1f
+ cmp %g2, 24
+ bcs 2f
+ cmp %g2, 36
+ bcs 3f
+ nop
+ sub %g2, 12, %g2
+ sub %g7, 32, %g7
+3:
+ sub %g2, 12, %g2
+ sub %g7, 32, %g7
+2:
+ sub %g2, 12, %g2
+ sub %g7, 32, %g7
+1:
+ cmp %g2, 4
+ bcs,a 1f
+ sll %g2, 3, %g2
+ sub %g2, 4, %g2
+ sll %g2, 2, %g2
+1:
+ and %g1, 0x7f, %o0
+ add %o0, %g7, %o0
+ retl
+ sub %o0, %g2, %o0
+51:
+/* i = 41 - g2; j = i % 6;
+ * o0 = (g1 & 15) + (i / 6) * 16 + (j < 4) ? (j + 1) * 4 : (j - 3) * 8;
+ */
+ neg %g2
+ and %g1, 0xf, %g1
+ add %g2, 41, %g2
+1:
+ cmp %g2, 6
+ bcs,a 2f
+ cmp %g2, 4
+ add %g1, 16, %g1
+ b 1b
+ sub %g2, 6, %g2
+2:
+ bcs,a 3f
+ inc %g2
+ sub %g2, 3, %g2
+ b 2f
+ sll %g2, 3, %g2
+3:
+ sll %g2, 2, %g2
+2:
+ retl
+ add %g1, %g2, %o0
+52:
+/* o0 = g1 + g7 - (g2 / 8) * 32 + (x & 3) * 8 */
+ and %g2, 0xfffffffffffffff8, %g4
+ and %g2, 3, %g2
+ sll %g4, 2, %g4
+ sll %g2, 3, %g2
+ add %g2, %g4, %g2
+ b,a 1b
+53:
+/* o0 = o3 + (o2 & 15) - (g2 & 8) - (g2 & 3) * 2 */
+ and %g2, 3, %g4
+ and %g2, 0xfffffffffffffff8, %g2
+ sll %g4, 1, %g4
+ add %g2, %g4, %g2
+ and %o2, 0xf, %o0
+ add %o0, %o3, %o0
+ retl
+ sub %o0, %g2, %o0
+54:
+/* o0 = o3 + (o2 & 15) - (g2 / 4) * 2 - (g2 & 1) */
+ srl %g2, 2, %o4
+ and %g2, 1, %o1
+ sll %o4, 1, %o4
+ and %o2, 0xf, %o2
+ sub %o3, %o1, %o3
+ sub %o2, %o4, %o2
+ retl
+ add %o2, %o3, %o0
+55:
+/* o0 = (o2 & 1) + (27 - g2)/4 * 2 + ((27 - g2) & 1) */
+ neg %g2
+ and %o2, 1, %o2
+ add %g2, 27, %g2
+ srl %g2, 2, %o1
+ and %g2, 1, %g2
+ sll %o1, 1, %o1
+ add %o2, %g2, %o0
+ retl
+ add %o0, %o1, %o0
diff --git a/arch/sparc64/lib/locks.S b/arch/sparc64/lib/locks.S
new file mode 100644
index 000000000..a1154cb6d
--- /dev/null
+++ b/arch/sparc64/lib/locks.S
@@ -0,0 +1,77 @@
+/* $Id: locks.S,v 1.2 1997/03/10 12:28:02 jj Exp $
+ * locks.S: SMP low-level lock primitives on Sparc64.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/ptrace.h>
+
+ .text
+ .align 4
+
+ .globl __spinlock_waitfor
+__spinlock_waitfor:
+1: orcc %g2, 0x0, %g0
+ bne 1b
+ ldub [%g1], %g2
+ ldstub [%g1], %g2
+ jmpl %o7 - 12, %g0
+ mov %g5, %o7
+
+ .globl ___become_idt
+___become_idt:
+#if 0 /* Don't know how to do this on the Ultra yet... */
+#endif
+ jmpl %o7 + 8, %g0
+ mov %g5, %o7
+
+___lk_busy_spin:
+ orcc %g2, 0, %g0
+ bne ___lk_busy_spin
+ ldub [%g1 + 0], %g2
+ b 1f
+ ldstub [%g1 + 0], %g2
+
+ .globl ___lock_kernel
+___lock_kernel:
+ addcc %g2, -1, %g2
+ rdpr %pil, %g3
+ bcs,a 9f
+ st %g2, [%g6 + AOFF_task_lock_depth]
+ wrpr 15, %pil
+ ldstub [%g1 + 0], %g2
+1: orcc %g2, 0, %g0
+ bne,a ___lk_busy_spin
+ ldub [%g1 + 0], %g2
+ ldub [%g1 + 2], %g2
+ cmp %g2, %g5
+ be 2f
+ stb %g5, [%g1 + 1]
+ stb %g5, [%g1 + 2]
+#ifdef __SMP__
+ /* XXX Figure out how to become interrupt receiver in SMP system. */
+#endif
+2: mov -1, %g2
+ st %g2, [%g6 + AOFF_task_lock_depth]
+ wrpr %g3, %pil
+9: jmpl %o7 + 0x8, %g0
+ mov %g5, %o7
+
+#undef NO_PROC_ID
+#define NO_PROC_ID 0xff
+
+ .globl ___unlock_kernel
+___unlock_kernel:
+ addcc %g2, 1, %g2
+ rdpr %pil, %g3
+ bne,a 1f
+ st %g2, [%g6 + AOFF_task_lock_depth]
+ wrpr 15, %pil
+ mov NO_PROC_ID, %g2
+ stb %g2, [%g1 + 1]
+ stb %g0, [%g1 + 0]
+ st %g0, [%g6 + AOFF_task_lock_depth]
+ wrpr %g3, %pil
+1: jmpl %o7 + 0x8, %g0
+ mov %g5, %o7
+
diff --git a/arch/sparc64/lib/memcmp.S b/arch/sparc64/lib/memcmp.S
new file mode 100644
index 000000000..4c08d57c3
--- /dev/null
+++ b/arch/sparc64/lib/memcmp.S
@@ -0,0 +1,29 @@
+/* $Id: memcmp.S,v 1.2 1997/04/01 03:43:18 davem Exp $
+ * Sparc64 optimized memcmp code.
+ *
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+ .text
+ .align 4
+ .globl __memcmp, memcmp
+__memcmp:
+memcmp:
+ brlez,pn %o2, 2f
+ sub %g0, %o2, %o3
+ add %o0, %o2, %o0
+ add %o1, %o2, %o1
+ ldub [%o0 + %o3], %o4
+1:
+ ldub [%o1 + %o3], %o5
+ sub %o4, %o5, %o4
+ brnz,pn %o4, 3f
+ addcc %o3, 1, %o3
+ bne,a,pt %xcc, 1b
+ ldub [%o0 + %o3], %o4
+2:
+ retl
+ clr %o0
+3:
+ retl
+ mov %o4, %o0
diff --git a/arch/sparc64/lib/memcpy.S b/arch/sparc64/lib/memcpy.S
new file mode 100644
index 000000000..e9462345a
--- /dev/null
+++ b/arch/sparc64/lib/memcpy.S
@@ -0,0 +1,526 @@
+/* memcpy.S: Sparc optimized memcpy, bcopy and memmove code
+ * Hand optimized from GNU libc's memcpy, bcopy and memmove
+ * for UltraSparc
+ * Copyright (C) 1991,1996 Free Software Foundation
+ * Copyright (C) 1995 Linus Torvalds (Linus.Torvalds@helsinki.fi)
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <asm/asi.h>
+#include <asm/head.h>
+
+#ifdef __KERNEL__
+
+#define FUNC(x) \
+ .globl x; \
+ .type x,@function; \
+ .align 4; \
+x:
+
+#define FASTER_ALIGNED
+
+/* In kernel these functions don't return a value.
+ * One should use macros in asm/string.h for that purpose.
+ * We return 0, so that bugs are more apparent.
+ */
+#define SETUP_RETL
+#define PRE_RETL sethi %uhi(KERNBASE), %g4; clr %o0
+#define RETL_INSN sllx %g4, 32, %g4
+
+#else
+
+/* libc */
+
+#define FASTER_ALIGNED
+
+#ifdef DEBUG
+#define FUNC(x) \
+ .globl jj##x##1; \
+ .type jj##x##1,@function; \
+ .align 4; \
+jj##x##1:
+#else
+#include "DEFS.h"
+#endif
+
+#define SETUP_RETL mov %o0, %g6
+#define PRE_RETL
+#define RETL_INSN mov %g6, %o0
+
+#endif
+
+#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
+ ldd [%src + offset + 0x00], %t0; \
+ ldd [%src + offset + 0x08], %t2; \
+ ldd [%src + offset + 0x10], %t4; \
+ ldd [%src + offset + 0x18], %t6; \
+ stw %t0, [%dst + offset + 0x00]; \
+ stw %t1, [%dst + offset + 0x04]; \
+ stw %t2, [%dst + offset + 0x08]; \
+ stw %t3, [%dst + offset + 0x0c]; \
+ stw %t4, [%dst + offset + 0x10]; \
+ stw %t5, [%dst + offset + 0x14]; \
+ stw %t6, [%dst + offset + 0x18]; \
+ stw %t7, [%dst + offset + 0x1c];
+
+#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
+ ldx [%src + offset + 0x00], %t0; \
+ ldx [%src + offset + 0x08], %t1; \
+ ldx [%src + offset + 0x10], %t2; \
+ ldx [%src + offset + 0x18], %t3; \
+ ldx [%src + offset + 0x20], %t4; \
+ ldx [%src + offset + 0x28], %t5; \
+ ldx [%src + offset + 0x30], %t6; \
+ ldx [%src + offset + 0x38], %t7; \
+ stx %t0, [%dst + offset + 0x00]; \
+ stx %t1, [%dst + offset + 0x08]; \
+ stx %t2, [%dst + offset + 0x10]; \
+ stx %t3, [%dst + offset + 0x18]; \
+ stx %t4, [%dst + offset + 0x20]; \
+ stx %t5, [%dst + offset + 0x28]; \
+ stx %t6, [%dst + offset + 0x30]; \
+ stx %t7, [%dst + offset + 0x38];
+
+#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
+ ldd [%src - offset - 0x10], %t0; \
+ ldd [%src - offset - 0x08], %t2; \
+ stw %t0, [%dst - offset - 0x10]; \
+ stw %t1, [%dst - offset - 0x0c]; \
+ stw %t2, [%dst - offset - 0x08]; \
+ stw %t3, [%dst - offset - 0x04];
+
+#define MOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1) \
+ ldx [%src - offset - 0x10], %t0; \
+ ldx [%src - offset - 0x08], %t1; \
+ stx %t0, [%dst - offset - 0x10]; \
+ stx %t1, [%dst - offset - 0x08];
+
+#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
+ ldub [%src - offset - 0x02], %t0; \
+ ldub [%src - offset - 0x01], %t1; \
+ stb %t0, [%dst - offset - 0x02]; \
+ stb %t1, [%dst - offset - 0x01];
+
+ .text
+ .align 4
+
+FUNC(bcopy)
+
+ mov %o0, %o3
+ mov %o1, %o0
+ mov %o3, %o1
+ brgez,a,pt %o2, 1f
+ cmp %o0, %o1
+
+ retl
+ nop ! Only bcopy returns here and it retuns void...
+
+#ifdef __KERNEL__
+FUNC(amemmove)
+FUNC(__memmove)
+#endif
+FUNC(memmove)
+
+ cmp %o0, %o1
+1:
+ SETUP_RETL
+ bleu,pt %xcc, 9f
+ sub %o0, %o1, %o4
+
+ add %o1, %o2, %o3
+ cmp %o3, %o0
+ bleu,pt %xcc, 0f
+ andcc %o4, 3, %o5
+
+ add %o1, %o2, %o1
+ add %o0, %o2, %o0
+ sub %o1, 1, %o1
+ sub %o0, 1, %o0
+
+1:
+ ldub [%o1], %o4
+ subcc %o2, 1, %o2
+ sub %o1, 1, %o1
+ stb %o4, [%o0]
+ bne,pt %icc, 1b
+ sub %o0, 1, %o0
+
+ PRE_RETL
+ retl
+ RETL_INSN
+
+#ifdef __KERNEL__
+FUNC(__memcpy)
+#endif
+FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
+
+ sub %o0, %o1, %o4
+ SETUP_RETL
+9:
+ andcc %o4, 3, %o5
+0:
+ bne,pn %icc, 86f
+ cmp %o2, 15
+
+ bleu,pn %xcc, 90f
+ andcc %o1, 3, %g0
+
+ be,a,pt %icc, 3f ! check if we need to align
+ andcc %o1, 4, %g0
+
+ andcc %o1, 1, %g0
+ be,pn %icc, 4f
+ andcc %o1, 2, %g0
+
+ ldub [%o1], %g2
+ add %o1, 1, %o1
+ sub %o2, 1, %o2
+ stb %g2, [%o0]
+ bne,pn %icc, 5f
+ add %o0, 1, %o0
+4:
+ lduh [%o1], %g2
+ add %o1, 2, %o1
+ sub %o2, 2, %o2
+ sth %g2, [%o0]
+ add %o0, 2, %o0
+5:
+ andcc %o1, 4, %g0
+3:
+ be,pn %icc, 2f
+ mov %o2, %g1
+
+ lduw [%o1], %o4
+ sub %g1, 4, %g1
+ stw %o4, [%o0]
+ add %o1, 4, %o1
+ add %o0, 4, %o0
+2:
+ andcc %g1, -128, %g7
+ be,pn %xcc, 3f
+ andcc %o0, 4, %g0
+
+ be,a,pn %icc, 82f + 4
+ ldx [%o1], %o2
+5:
+ MOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
+ MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
+ MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
+ MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
+ subcc %g7, 128, %g7
+ add %o1, 128, %o1
+ bne,pt %xcc, 5b
+ add %o0, 128, %o0
+3:
+ andcc %g1, 0x70, %g7
+ be,pn %icc, 80f
+ andcc %g1, 8, %g0
+79:
+ rd %pc, %o5
+ srl %g7, 1, %o4
+ add %g7, %o4, %o4
+ add %o1, %g7, %o1
+ sub %o5, %o4, %o5
+ jmpl %o5 + %lo(80f-79b), %g0
+ add %o0, %g7, %o0
+
+ MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
+ MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
+ MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
+ MOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
+ MOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
+ MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
+ MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
+
+80: /* memcpy_table_end */
+ be,pt %icc, 81f
+ andcc %g1, 4, %g0
+
+ ldd [%o1], %g2
+ add %o0, 8, %o0
+ stw %g2, [%o0 - 0x08]
+ add %o1, 8, %o1
+ stw %g3, [%o0 - 0x04]
+
+81: /* memcpy_last7 */
+
+ be,pt %icc, 1f
+ andcc %g1, 2, %g0
+
+ lduw [%o1], %g2
+ add %o1, 4, %o1
+ stw %g2, [%o0]
+ add %o0, 4, %o0
+1:
+ be,pt %icc, 1f
+ andcc %g1, 1, %g0
+
+ lduh [%o1], %g2
+ add %o1, 2, %o1
+ sth %g2, [%o0]
+ add %o0, 2, %o0
+1:
+ be,pt %icc, 1f
+ nop
+
+ ldub [%o1], %g2
+ stb %g2, [%o0]
+1:
+ PRE_RETL
+ retl
+ RETL_INSN
+
+82: /* ldx_stx */
+ MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
+ MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
+ subcc %g7, 128, %g7
+ add %o1, 128, %o1
+ bne,pt %xcc, 82b
+ add %o0, 128, %o0
+
+#ifndef FASTER_ALIGNED
+
+ andcc %g1, 0x70, %g7
+ be,pn %icc, 80b
+ andcc %g1, 8, %g0
+83:
+ rd %pc, %o5
+ srl %g7, 1, %o4
+ add %g7, %o4, %o4
+ add %o1, %g7, %o1
+ sub %o5, %o4, %o5
+ jmpl %o5 + %lo(80b - 83b), %g0
+ add %o0, %g7, %o0
+
+#else /* FASTER_ALIGNED */
+
+ andcc %g1, 0x70, %g7
+ be,pn %icc, 84f
+ andcc %g1, 8, %g0
+83:
+ rd %pc, %o5
+ add %o1, %g7, %o1
+ sub %o5, %g7, %o5
+ jmpl %o5 + %lo(84f - 83b), %g0
+ add %o0, %g7, %o0
+
+ MOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3)
+ MOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3)
+ MOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3)
+ MOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3)
+ MOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3)
+ MOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3)
+ MOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3)
+
+84: /* amemcpy_table_end */
+ be,pt %icc, 85f
+ andcc %g1, 4, %g0
+
+ ldx [%o1], %g2
+ add %o1, 8, %o1
+ stx %g2, [%o0]
+ add %o0, 8, %o0
+85: /* amemcpy_last7 */
+ be,pt %icc, 1f
+ andcc %g1, 2, %g0
+
+ lduw [%o1], %g2
+ add %o1, 4, %o1
+ stw %g2, [%o0]
+ add %o0, 4, %o0
+1:
+ be,pt %icc, 1f
+ andcc %g1, 1, %g0
+
+ lduh [%o1], %g2
+ add %o1, 2, %o1
+ sth %g2, [%o0]
+ add %o0, 2, %o0
+1:
+ be,pt %icc, 1f
+ nop
+
+ ldub [%o1], %g2
+ stb %g2, [%o0]
+1:
+ PRE_RETL
+ retl
+ RETL_INSN
+
+#endif /* FASTER_ALIGNED */
+
+86: /* non_aligned */
+ cmp %o2, 15
+ bleu,pn %xcc, 88f
+
+ andcc %o0, 3, %g0
+ be,pn %icc, 61f
+ andcc %o0, 1, %g0
+ be,pn %icc, 60f
+ andcc %o0, 2, %g0
+
+ ldub [%o1], %g5
+ add %o1, 1, %o1
+ stb %g5, [%o0]
+ sub %o2, 1, %o2
+ bne,pn %icc, 61f
+ add %o0, 1, %o0
+60:
+ ldub [%o1], %g3
+ add %o1, 2, %o1
+ stb %g3, [%o0]
+ sub %o2, 2, %o2
+ ldub [%o1 - 1], %g3
+ add %o0, 2, %o0
+ stb %g3, [%o0 - 1]
+61:
+ and %o1, 3, %g2
+ and %o2, 0xc, %g3
+ and %o1, -4, %o1
+ cmp %g3, 4
+ sll %g2, 3, %g4
+ mov 32, %g2
+ be,pn %icc, 4f
+ sub %g2, %g4, %g7
+
+ blu,pn %icc, 3f
+ cmp %g3, 0x8
+
+ be,pn %icc, 2f
+ srl %o2, 2, %g3
+
+ lduw [%o1], %o3
+ add %o0, -8, %o0
+ lduw [%o1 + 4], %o4
+ ba,pt %xcc, 8f
+ add %g3, 1, %g3
+2:
+ lduw [%o1], %o4
+ add %o0, -12, %o0
+ lduw [%o1 + 4], %o5
+ add %g3, 2, %g3
+ ba,pt %xcc, 9f
+ add %o1, -4, %o1
+3:
+ lduw [%o1], %g1
+ add %o0, -4, %o0
+ lduw [%o1 + 4], %o3
+ srl %o2, 2, %g3
+ ba,pt %xcc, 7f
+ add %o1, 4, %o1
+4:
+ lduw [%o1], %o5
+ cmp %o2, 7
+ lduw [%o1 + 4], %g1
+ srl %o2, 2, %g3
+ bleu,pn %xcc, 10f
+ add %o1, 8, %o1
+
+ lduw [%o1], %o3
+ add %g3, -1, %g3
+5:
+ sll %o5, %g4, %g2
+ srl %g1, %g7, %g5
+ or %g2, %g5, %g2
+ stw %g2, [%o0]
+7:
+ lduw [%o1 + 4], %o4
+ sll %g1, %g4, %g2
+ srl %o3, %g7, %g5
+ or %g2, %g5, %g2
+ stw %g2, [%o0 + 4]
+8:
+ lduw [%o1 + 8], %o5
+ sll %o3, %g4, %g2
+ srl %o4, %g7, %g5
+ or %g2, %g5, %g2
+ stw %g2, [%o0 + 8]
+9:
+ lduw [%o1 + 12], %g1
+ sll %o4, %g4, %g2
+ srl %o5, %g7, %g5
+ addcc %g3, -4, %g3
+ or %g2, %g5, %g2
+ add %o1, 16, %o1
+ stw %g2, [%o0 + 12]
+ add %o0, 16, %o0
+ bne,a,pt %xcc, 5b
+ lduw [%o1], %o3
+10:
+ sll %o5, %g4, %g2
+ srl %g1, %g7, %g5
+ srl %g7, 3, %g3
+ or %g2, %g5, %g2
+ sub %o1, %g3, %o1
+ andcc %o2, 2, %g0
+ stw %g2, [%o0]
+ be,pt %icc, 1f
+ andcc %o2, 1, %g0
+
+ ldub [%o1], %g2
+ add %o1, 2, %o1
+ stb %g2, [%o0 + 4]
+ add %o0, 2, %o0
+ ldub [%o1 - 1], %g2
+ stb %g2, [%o0 + 3]
+1:
+ be,pt %icc, 1f
+ nop
+
+ ldub [%o1], %g2
+ stb %g2, [%o0 + 4]
+1:
+ PRE_RETL
+ retl
+ RETL_INSN
+
+88: /* short_end */
+
+ and %o2, 0xe, %o3
+20:
+ rd %pc, %o5
+ sll %o3, 3, %o4
+ add %o0, %o3, %o0
+ sub %o5, %o4, %o5
+ add %o1, %o3, %o1
+ jmpl %o5 + %lo(89f - 20b), %g0
+ andcc %o2, 1, %g0
+
+ MOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
+ MOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
+
+89: /* short_table_end */
+
+ be,pt %icc, 1f
+ nop
+
+ ldub [%o1], %g2
+ stb %g2, [%o0]
+1:
+ PRE_RETL
+ retl
+ RETL_INSN
+
+90: /* short_aligned_end */
+ bne,pn %xcc, 88b
+ andcc %o2, 8, %g0
+
+ be,pt %icc, 1f
+ andcc %o2, 4, %g0
+
+ lduw [%o1 + 0x00], %g2
+ lduw [%o1 + 0x04], %g3
+ add %o1, 8, %o1
+ stw %g2, [%o0 + 0x00]
+ stw %g3, [%o0 + 0x04]
+ add %o0, 8, %o0
+1:
+ ba,pt %xcc, 81b
+ mov %o2, %g1
diff --git a/arch/sparc64/lib/memscan.S b/arch/sparc64/lib/memscan.S
new file mode 100644
index 000000000..83abe4040
--- /dev/null
+++ b/arch/sparc64/lib/memscan.S
@@ -0,0 +1,116 @@
+/* $Id: memscan.S,v 1.1 1997/03/14 21:04:24 jj Exp $
+ * memscan.S: Optimized memscan for the Sparc64.
+ *
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+/* In essence, this is just a fancy strlen. */
+
+#define LO_MAGIC 0x01010101
+#define HI_MAGIC 0x80808080
+
+ .text
+ .align 4
+ .globl __memscan_zero, __memscan_generic
+ .globl memscan
+__memscan_zero:
+ /* %o0 = addr, %o1 = size */
+ brlez,pn %o1, 0f
+ andcc %o0, 3, %g0
+ be,pt %icc, 9f
+ sethi %hi(HI_MAGIC), %o4
+ ldub [%o0], %o5
+ subcc %o1, 1, %o1
+ brz,pn %o5, 10f
+ add %o0, 1, %o0
+ be,pn %xcc, 0f
+ andcc %o0, 3, %g0
+ be,pn %icc, 4f
+ or %o4, %lo(HI_MAGIC), %o3
+ ldub [%o0], %o5
+ subcc %o1, 1, %o1
+ brz,pn %o5, 10f
+ add %o0, 1, %o0
+ be,pn %xcc, 0f
+ andcc %o0, 3, %g0
+ be,pt %icc, 5f
+ sethi %hi(LO_MAGIC), %o4
+ ldub [%o0], %o5
+ subcc %o1, 1, %o1
+ brz,pn %o5, 10f
+ add %o0, 1, %o0
+ be,pn %xcc, 0f
+ or %o4, %lo(LO_MAGIC), %o2
+ ba,pt %xcc, 2f
+ ld [%o0], %o5
+9:
+ or %o4, %lo(HI_MAGIC), %o3
+4:
+ sethi %hi(LO_MAGIC), %o4
+5:
+ or %o4, %lo(LO_MAGIC), %o2
+ ld [%o0], %o5
+2:
+ sub %o5, %o2, %o4
+ sub %o1, 4, %o1
+ andcc %o4, %o3, %g0
+ be,pn %icc, 1f
+ add %o0, 4, %o0
+ brgz,pt %o1, 2b
+ ld [%o0], %o5
+
+ retl
+ add %o0, %o1, %o0
+1:
+ /* Check every byte. */
+ srl %o5, 24, %g5
+ andcc %g5, 0xff, %g0
+ be,pn %icc, 1f
+ add %o0, -4, %o4
+ srl %o5, 16, %g5
+ andcc %g5, 0xff, %g0
+ be,pn %icc, 1f
+ add %o4, 1, %o4
+ srl %o5, 8, %g5
+ andcc %g5, 0xff, %g0
+ be,pn %icc, 1f
+ add %o4, 1, %o4
+ andcc %o5, 0xff, %g0
+ be,pn %icc, 1f
+ add %o4, 1, %o4
+ brgz,pt %o1, 2b
+ ld [%o0], %o5
+1:
+ add %o0, %o1, %o0
+ cmp %o4, %o0
+ retl
+ movle %xcc, %o4, %o0
+0:
+ retl
+ nop
+10:
+ retl
+ sub %o0, 1, %o0
+
+memscan:
+__memscan_generic:
+ /* %o0 = addr, %o1 = c, %o2 = size */
+ brz,pn %o2, 3f
+ add %o0, %o2, %o3
+ ldub [%o0], %o5
+ sub %g0, %o2, %o4
+1:
+ cmp %o5, %o1
+ be,pn %icc, 2f
+ addcc %o4, 1, %o4
+ bne,a,pt %xcc, 1b
+ ldub [%o3 + %o4], %o5
+ retl
+ /* The delay slot is the same as the next insn, this is just to make it look more awful */
+2:
+ add %o3, %o4, %o0
+ retl
+ sub %o0, 1, %o0
+3:
+ retl
+ nop
diff --git a/arch/sparc64/lib/memset.S b/arch/sparc64/lib/memset.S
new file mode 100644
index 000000000..55de4ea9d
--- /dev/null
+++ b/arch/sparc64/lib/memset.S
@@ -0,0 +1,196 @@
+/* linux/arch/sparc64/lib/memset.S: Sparc optimized memset, bzero and clear_user code
+ * Copyright (C) 1991,1996 Free Software Foundation
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ *
+ * Returns 0, if ok, and number of bytes not yet set if exception
+ * occurs and we were called as clear_user.
+ */
+
+#include <asm/asi.h>
+#include <asm/ptrace.h>
+
+#define EX(x,y,a,b,z) \
+98: x,y; \
+ .section .fixup,z##alloc,z##execinstr; \
+ .align 4; \
+99: ba,pt %xcc, 30f; \
+ a, b, %o0; \
+ .section __ex_table,z##alloc; \
+ .align 4; \
+ .word 98b, 99b; \
+ .text; \
+ .align 4
+
+#define EXT(start,end,handler,z) \
+ .section __ex_table,z##alloc; \
+ .align 4; \
+ .word start, 0, end, handler; \
+ .text; \
+ .align 4
+
+/* Please don't change these macros, unless you change the logic
+ * in the .fixup section below as well.
+ * Store 64 bytes at (BASE + OFFSET) using value SOURCE. */
+#define ZERO_BIG_BLOCK(base, offset, source) \
+ stxa source, [base + offset + 0x00] %asi; \
+ stxa source, [base + offset + 0x08] %asi; \
+ stxa source, [base + offset + 0x10] %asi; \
+ stxa source, [base + offset + 0x18] %asi; \
+ stxa source, [base + offset + 0x20] %asi; \
+ stxa source, [base + offset + 0x28] %asi; \
+ stxa source, [base + offset + 0x30] %asi; \
+ stxa source, [base + offset + 0x38] %asi;
+
+#define ZERO_LAST_BLOCKS(base, offset, source) \
+ stxa source, [base - offset - 0x38] %asi; \
+ stxa source, [base - offset - 0x30] %asi; \
+ stxa source, [base - offset - 0x28] %asi; \
+ stxa source, [base - offset - 0x20] %asi; \
+ stxa source, [base - offset - 0x18] %asi; \
+ stxa source, [base - offset - 0x10] %asi; \
+ stxa source, [base - offset - 0x08] %asi; \
+ stxa source, [base - offset - 0x00] %asi;
+
+ .text
+ .align 4
+
+ .globl __bzero, __memset, __bzero_noasi
+ .globl memset, __memset_start, __memset_end
+__memset_start:
+__memset:
+memset:
+ and %o1, 0xff, %g3
+ sll %g3, 8, %g2
+ or %g3, %g2, %g3
+ sll %g3, 16, %g2
+ or %g3, %g2, %g3
+ mov %o2, %o1
+ wr %g0, ASI_P, %asi
+ sllx %g3, 32, %g2
+ ba,pt %xcc, 1f
+ or %g3, %g2, %g3
+__bzero:
+ wr %g0, ASI_P, %asi
+__bzero_noasi:
+ mov %g0, %g3
+1:
+ cmp %o1, 7
+ bleu,pn %xcc, 7f
+ andcc %o0, 3, %o2
+
+ be,a,pt %icc, 4f
+ andcc %o0, 4, %g0
+
+ cmp %o2, 3
+ be,pn %icc, 2f
+ EX(stba %g3, [%o0] %asi, sub %o1, 0,#)
+
+ cmp %o2, 2
+ be,pt %icc, 2f
+ EX(stba %g3, [%o0 + 0x01] %asi, sub %o1, 1,#)
+
+ EX(stba %g3, [%o0 + 0x02] %asi, sub %o1, 2,#)
+2:
+ sub %o2, 4, %o2
+ sub %o0, %o2, %o0
+ add %o1, %o2, %o1
+ andcc %o0, 4, %g0
+4:
+ be,a,pt %icc, 2f
+ andncc %o1, 0x7f, %o3
+
+ EX(sta %g3, [%o0] %asi, sub %o1, 0,#)
+ sub %o1, 4, %o1
+ add %o0, 4, %o0
+ andncc %o1, 0x7f, %o3 ! Now everything is 8 aligned and o1 is len to run
+2:
+ be,pn %xcc, 9f
+ andcc %o1, 0x78, %o2
+10:
+ ZERO_BIG_BLOCK(%o0, 0x00, %g3)
+ subcc %o3, 128, %o3
+ ZERO_BIG_BLOCK(%o0, 0x40, %g3)
+11:
+ EXT(10b, 11b, 20f,#)
+ bne,pt %xcc, 10b
+ add %o0, 128, %o0
+
+ tst %o2
+9:
+ be,pn %xcc, 13f
+ andcc %o1, 7, %o1
+14:
+ rd %pc, %o4
+ srl %o2, 1, %o3
+ sub %o4, %o3, %o4
+ jmpl %o4 + (13f - 14b), %g0
+ add %o0, %o2, %o0
+12:
+ ZERO_LAST_BLOCKS(%o0, 0x48, %g3)
+ ZERO_LAST_BLOCKS(%o0, 0x08, %g3)
+13:
+ be,pn %icc, 8f
+ andcc %o1, 4, %g0
+
+ be,pn %icc, 1f
+ andcc %o1, 2, %g0
+
+ EX(sta %g3, [%o0] %asi, and %o1, 7,#)
+ add %o0, 4, %o0
+1:
+ be,pn %icc, 1f
+ andcc %o1, 1, %g0
+
+ EX(stha %g3, [%o0] %asi, and %o1, 3,#)
+ add %o0, 2, %o0
+1:
+ bne,a,pn %icc, 8f
+ EX(stba %g3, [%o0] %asi, and %o1, 1,#)
+8:
+ retl
+ clr %o0
+7:
+ be,pn %icc, 13b
+ orcc %o1, 0, %g0
+
+ be,pn %icc, 0f
+8:
+ add %o0, 1, %o0
+ subcc %o1, 1, %o1
+ bne,a,pt %icc, 8b
+ EX(stba %g3, [%o0 - 1] %asi, add %o1, 1,#)
+0:
+ retl
+ clr %o0
+__memset_end:
+
+ .section .fixup,#alloc,#execinstr
+ .align 4
+20:
+ cmp %g2, 8
+ bleu,pn %xcc, 1f
+ and %o1, 0x7f, %o1
+ sub %g2, 9, %g2
+ add %o3, 64, %o3
+1:
+ sll %g2, 3, %g2
+ add %o3, %o1, %o0
+ ba,pt %xcc, 30f
+ sub %o0, %g2, %o0
+21:
+ mov 8, %o0
+ and %o1, 7, %o1
+ sub %o0, %g2, %o0
+ sll %o0, 3, %o0
+ ba,pt %xcc, 30f
+ add %o0, %o1, %o0
+30:
+/* %o4 is faulting address, %o5 is %pc where fault occured */
+ save %sp, -160, %sp
+ mov %i5, %o0
+ mov %i7, %o1
+ call lookup_fault
+ mov %i4, %o2
+ ret
+ restore
diff --git a/arch/sparc64/lib/strlen.S b/arch/sparc64/lib/strlen.S
new file mode 100644
index 000000000..5f2ec6bb4
--- /dev/null
+++ b/arch/sparc64/lib/strlen.S
@@ -0,0 +1,77 @@
+/* strlen.S: Sparc64 optimized strlen code
+ * Hand optimized from GNU libc's strlen
+ * Copyright (C) 1991,1996 Free Software Foundation
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996, 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#define LO_MAGIC 0x01010101
+#define HI_MAGIC 0x80808080
+
+ .align 4
+ .global strlen
+strlen:
+ mov %o0, %o1
+ andcc %o0, 3, %g0
+ be,pt %icc, 9f
+ sethi %hi(HI_MAGIC), %o4
+ ldub [%o0], %o5
+ brz,pn %o5, 11f
+ add %o0, 1, %o0
+ andcc %o0, 3, %g0
+ be,pn %icc, 4f
+ or %o4, %lo(HI_MAGIC), %o3
+ ldub [%o0], %o5
+ brz,pn %o5, 12f
+ add %o0, 1, %o0
+ andcc %o0, 3, %g0
+ be,pt %icc, 5f
+ sethi %hi(LO_MAGIC), %o4
+ ldub [%o0], %o5
+ brz,pn %o5, 13f
+ add %o0, 1, %o0
+ ba,pt %icc, 8f
+ or %o4, %lo(LO_MAGIC), %o2
+9:
+ or %o4, %lo(HI_MAGIC), %o3
+4:
+ sethi %hi(LO_MAGIC), %o4
+5:
+ or %o4, %lo(LO_MAGIC), %o2
+8:
+ ld [%o0], %o5
+2:
+ sub %o5, %o2, %o4
+ andcc %o4, %o3, %g0
+ be,pt %icc, 8b
+ add %o0, 4, %o0
+
+ /* Check every byte. */
+ srl %o5, 24, %g5
+ andcc %g5, 0xff, %g0
+ be,pn %icc, 1f
+ add %o0, -4, %o4
+ srl %o5, 16, %g5
+ andcc %g5, 0xff, %g0
+ be,pn %icc, 1f
+ add %o4, 1, %o4
+ srl %o5, 8, %g5
+ andcc %g5, 0xff, %g0
+ be,pn %icc, 1f
+ add %o4, 1, %o4
+ andcc %o5, 0xff, %g0
+ bne,a,pt %icc, 2b
+ ld [%o0], %o5
+ add %o4, 1, %o4
+1:
+ retl
+ sub %o4, %o1, %o0
+11:
+ retl
+ mov 0, %o0
+12:
+ retl
+ mov 1, %o0
+13:
+ retl
+ mov 2, %o0
diff --git a/arch/sparc64/lib/strlen_user.S b/arch/sparc64/lib/strlen_user.S
new file mode 100644
index 000000000..24bea73fd
--- /dev/null
+++ b/arch/sparc64/lib/strlen_user.S
@@ -0,0 +1,99 @@
+/* strlen_user.S: Sparc64 optimized strlen_user code
+ *
+ * Return length of string in userspace including terminating 0
+ * or 0 for error
+ *
+ * Copyright (C) 1991,1996 Free Software Foundation
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#define LO_MAGIC 0x01010101
+#define HI_MAGIC 0x80808080
+
+ .align 4
+ .global __strlen_user
+__strlen_user:
+ mov %o0, %o1
+ andcc %o0, 3, %g0
+ be,pt %icc, 9f
+ sethi %hi(HI_MAGIC), %o4
+10:
+ ldub [%o0], %o5
+ brz,pn %o5, 21f
+ add %o0, 1, %o0
+ andcc %o0, 3, %g0
+ be,pn %icc, 4f
+ or %o4, %lo(HI_MAGIC), %o3
+11:
+ ldub [%o0], %o5
+ brz,pn %o5, 22f
+ add %o0, 1, %o0
+ andcc %o0, 3, %g0
+ be,pt %icc, 5f
+ sethi %hi(LO_MAGIC), %o4
+12:
+ ldub [%o0], %o5
+ brz,pn %o5, 23f
+ add %o0, 1, %o0
+ ba,pt %icc, 13f
+ or %o4, %lo(LO_MAGIC), %o2
+9:
+ or %o4, %lo(HI_MAGIC), %o3
+4:
+ sethi %hi(LO_MAGIC), %o4
+5:
+ or %o4, %lo(LO_MAGIC), %o2
+13:
+ ld [%o0], %o5
+2:
+ sub %o5, %o2, %o4
+ andcc %o4, %o3, %g0
+ be,pt %icc, 13b
+ add %o0, 4, %o0
+
+ /* Check every byte. */
+ srl %o5, 24, %g5
+ andcc %g5, 0xff, %g0
+ be,pn %icc, 1f
+ add %o0, -3, %o4
+ srl %o5, 16, %g5
+ andcc %g5, 0xff, %g0
+ be,pn %icc, 1f
+ add %o4, 1, %o4
+ srl %o5, 8, %g5
+ andcc %g5, 0xff, %g0
+ be,pn %icc, 1f
+ add %o4, 1, %o4
+ andcc %o5, 0xff, %g0
+ bne,a,pt %icc, 2b
+14:
+ ld [%o0], %o5
+ add %o4, 1, %o4
+1:
+ retl
+ sub %o4, %o1, %o0
+21:
+ retl
+ mov 1, %o0
+22:
+ retl
+ mov 2, %o0
+23:
+ retl
+ mov 3, %o0
+
+ .section .fixup,#alloc,#execinstr
+ .align 4
+30:
+ retl
+ clr %o0
+
+ .section __ex_table,#alloc
+ .align 4
+
+ .word 10b, 30b
+ .word 11b, 30b
+ .word 12b, 30b
+ .word 13b, 30b
+ .word 14b, 30b
diff --git a/arch/sparc64/lib/strncmp.S b/arch/sparc64/lib/strncmp.S
new file mode 100644
index 000000000..474ba7296
--- /dev/null
+++ b/arch/sparc64/lib/strncmp.S
@@ -0,0 +1,31 @@
+/* $Id: strncmp.S,v 1.2 1997/03/11 17:51:44 jj Exp $
+ * Sparc64 optimized strncmp code.
+ *
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <asm/asi.h>
+
+ .text
+ .align 4
+ .global __strncmp, strncmp
+__strncmp:
+strncmp:
+ brlez,pn %o2, 3f
+ lduba [%o0] (ASI_PNF), %o3
+1:
+ add %o0, 1, %o0
+ ldub [%o1], %o4
+ brz,pn %o3, 2f
+ add %o1, 1, %o1
+ cmp %o3, %o4
+ bne,pn %icc, 2f
+ subcc %o2, 1, %o2
+ bne,a,pt %xcc, 1b
+ ldub [%o0], %o3
+2:
+ retl
+ sub %o3, %o4, %o0
+3:
+ retl
+ clr %o0
diff --git a/arch/sparc64/lib/strncpy_from_user.S b/arch/sparc64/lib/strncpy_from_user.S
new file mode 100644
index 000000000..05a48eb5a
--- /dev/null
+++ b/arch/sparc64/lib/strncpy_from_user.S
@@ -0,0 +1,54 @@
+/* strncpy_from_user.S: Sparc64 strncpy from userspace.
+ *
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <asm/asi.h>
+#include <asm/errno.h>
+
+ .text
+ .align 4
+
+ /* Must return:
+ *
+ * -EFAULT for an exception
+ * count if we hit the buffer limit
+ * bytes copied if we hit a null byte
+ */
+
+ .globl __strncpy_from_user
+__strncpy_from_user:
+ /* %o0=dest, %o1=src, %o2=count */
+ brlez,pn %o2, 3f
+ add %o1, %o2, %o1
+ sub %g0, %o2, %o3
+ add %o0, %o2, %o0
+10:
+ ldub [%o1 + %o3], %o4
+1:
+ brz,pn %o4, 2f
+ stb %o4, [%o0 + %o3]
+ addcc %o3, 1, %o3
+ bne,pt %xcc, 1b
+11:
+ ldub [%o1 + %o3], %o4
+ retl
+ mov %o2, %o0
+2:
+ add %o3, 1, %o3
+ retl
+ add %o2, %o3, %o0
+3:
+ retl
+ clr %o0
+
+ .section .fixup,#alloc,#execinstr
+ .align 4
+4:
+ retl
+ mov -EFAULT, %o0
+
+ .section __ex_table,#alloc
+ .align 4
+ .word 10b, 4b
+ .word 11b, 4b
diff --git a/arch/sparc64/mm/Makefile b/arch/sparc64/mm/Makefile
new file mode 100644
index 000000000..c41c7a938
--- /dev/null
+++ b/arch/sparc64/mm/Makefile
@@ -0,0 +1,13 @@
+# $Id: Makefile,v 1.1 1996/12/26 10:24:22 davem Exp $
+# Makefile for the linux Sparc64-specific parts of the memory manager.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definition is now in the main makefile...
+
+O_TARGET := mm.o
+O_OBJS := fault.o init.o generic.o asyncd.o extable.o
+
+include $(TOPDIR)/Rules.make
diff --git a/arch/sparc64/mm/asyncd.c b/arch/sparc64/mm/asyncd.c
new file mode 100644
index 000000000..4e7de16fb
--- /dev/null
+++ b/arch/sparc64/mm/asyncd.c
@@ -0,0 +1,272 @@
+/* $Id: asyncd.c,v 1.1 1996/12/26 10:24:24 davem Exp $
+ * The asyncd kernel daemon. This handles paging on behalf of
+ * processes that receive page faults due to remote (async) memory
+ * accesses.
+ *
+ * Idea and skeleton code courtesy of David Miller (bless his cotton socks)
+ *
+ * Implemented by tridge
+ */
+
+#include <linux/mm.h>
+#include <linux/malloc.h>
+#include <linux/sched.h>
+#include <linux/head.h>
+#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/stat.h>
+#include <linux/swap.h>
+#include <linux/fs.h>
+#include <linux/config.h>
+#include <linux/interrupt.h>
+
+#include <asm/dma.h>
+#include <asm/system.h> /* for cli()/sti() */
+#include <asm/segment.h> /* for memcpy_to/fromfs */
+#include <asm/bitops.h>
+#include <asm/pgtable.h>
+
+#define DEBUG 0
+
+#define WRITE_LIMIT 100
+#define LOOP_LIMIT 200
+
+static struct {
+ int faults, read, write, success, failure, errors;
+} stats;
+
+/*
+ * The wait queue for waking up the async daemon:
+ */
+static struct wait_queue * asyncd_wait = NULL;
+
+struct async_job {
+ volatile struct async_job *next;
+ int taskid;
+ struct mm_struct *mm;
+ unsigned long address;
+ int write;
+ void (*callback)(int,unsigned long,int,int);
+};
+
+static volatile struct async_job *async_queue = NULL;
+static volatile struct async_job *async_queue_end = NULL;
+
+static void add_to_async_queue(int taskid,
+ struct mm_struct *mm,
+ unsigned long address,
+ int write,
+ void (*callback)(int,unsigned long,int,int))
+{
+ struct async_job *a = kmalloc(sizeof(*a),GFP_ATOMIC);
+
+ if (!a) {
+ printk("ERROR: out of memory in asyncd\n");
+ a->callback(taskid,address,write,1);
+ return;
+ }
+
+ if (write)
+ stats.write++;
+ else
+ stats.read++;
+
+ a->next = NULL;
+ a->taskid = taskid;
+ a->mm = mm;
+ a->address = address;
+ a->write = write;
+ a->callback = callback;
+
+ if (!async_queue) {
+ async_queue = a;
+ } else {
+ async_queue_end->next = a;
+ }
+ async_queue_end = a;
+}
+
+
+void async_fault(unsigned long address, int write, int taskid,
+ void (*callback)(int,unsigned long,int,int))
+{
+ struct task_struct *tsk = task[taskid];
+ struct mm_struct *mm = tsk->mm;
+
+ stats.faults++;
+
+#if 0
+ printk("paging in %x for task=%d\n",address,taskid);
+#endif
+
+ add_to_async_queue(taskid, mm, address, write, callback);
+ wake_up(&asyncd_wait);
+ mark_bh(TQUEUE_BH);
+}
+
+static int fault_in_page(int taskid,
+ struct vm_area_struct *vma,
+ unsigned address,int write)
+{
+ static unsigned last_address;
+ static int last_task, loop_counter;
+ struct task_struct *tsk = task[taskid];
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ if (!tsk || !tsk->mm)
+ return 1;
+
+ if (!vma || (write && !(vma->vm_flags & VM_WRITE)))
+ goto bad_area;
+ if (vma->vm_start > address)
+ goto bad_area;
+
+ if (address == last_address && taskid == last_task) {
+ loop_counter++;
+ } else {
+ loop_counter = 0;
+ last_address = address;
+ last_task = taskid;
+ }
+
+ if (loop_counter == WRITE_LIMIT && !write) {
+ printk("MSC bug? setting write request\n");
+ stats.errors++;
+ write = 1;
+ }
+
+ if (loop_counter == LOOP_LIMIT) {
+ printk("MSC bug? failing request\n");
+ stats.errors++;
+ return 1;
+ }
+
+ pgd = pgd_offset(vma->vm_mm, address);
+ pmd = pmd_alloc(pgd,address);
+ if(!pmd)
+ goto no_memory;
+ pte = pte_alloc(pmd, address);
+ if(!pte)
+ goto no_memory;
+ if(!pte_present(*pte)) {
+ do_no_page(tsk, vma, address, write);
+ goto finish_up;
+ }
+ set_pte(pte, pte_mkyoung(*pte));
+ flush_tlb_page(vma, address);
+ if(!write)
+ goto finish_up;
+ if(pte_write(*pte)) {
+ set_pte(pte, pte_mkdirty(*pte));
+ flush_tlb_page(vma, address);
+ goto finish_up;
+ }
+ do_wp_page(tsk, vma, address, write);
+
+ /* Fall through for do_wp_page */
+finish_up:
+ stats.success++;
+ update_mmu_cache(vma, address, *pte);
+ return 0;
+
+no_memory:
+ stats.failure++;
+ oom(tsk);
+ return 1;
+
+bad_area:
+ stats.failure++;
+ tsk->tss.sig_address = address;
+ tsk->tss.sig_desc = SUBSIG_NOMAPPING;
+ send_sig(SIGSEGV, tsk, 1);
+ return 1;
+}
+
+
+/* Note the semaphore operations must be done here, and _not_
+ * in async_fault().
+ */
+static void run_async_queue(void)
+{
+ int ret;
+ unsigned flags;
+
+ while (async_queue) {
+ volatile struct async_job *a;
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+
+ save_flags(flags); cli();
+ a = async_queue;
+ async_queue = async_queue->next;
+ restore_flags(flags);
+
+ mm = a->mm;
+
+ down(&mm->mmap_sem);
+ vma = find_vma(mm, a->address);
+ ret = fault_in_page(a->taskid,vma,a->address,a->write);
+#if DEBUG
+ printk("fault_in_page(task=%d addr=%x write=%d) = %d\n",
+ a->taskid,a->address,a->write,ret);
+#endif
+ a->callback(a->taskid,a->address,a->write,ret);
+ up(&mm->mmap_sem);
+ kfree_s((void *)a,sizeof(*a));
+ }
+}
+
+
+#if CONFIG_AP1000
+static void asyncd_info(void)
+{
+ printk("CID(%d) faults: total=%d read=%d write=%d success=%d fail=%d err=%d\n",
+ mpp_cid(),stats.faults, stats.read, stats.write, stats.success,
+ stats.failure, stats.errors);
+}
+#endif
+
+
+/*
+ * The background async daemon.
+ * Started as a kernel thread from the init process.
+ */
+int asyncd(void *unused)
+{
+ current->session = 1;
+ current->pgrp = 1;
+ sprintf(current->comm, "asyncd");
+ current->blocked = ~0UL; /* block all signals */
+
+ /* Give asyncd a realtime priority. */
+ current->policy = SCHED_FIFO;
+ current->priority = 32; /* Fixme --- we need to standardise our
+ namings for POSIX.4 realtime scheduling
+ priorities. */
+
+ printk("Started asyncd\n");
+
+#if CONFIG_AP1000
+ bif_add_debug_key('a',asyncd_info,"stats on asyncd");
+#endif
+
+ while (1) {
+ unsigned flags;
+
+ save_flags(flags); cli();
+
+ while (!async_queue) {
+ current->signal = 0;
+ interruptible_sleep_on(&asyncd_wait);
+ }
+
+ restore_flags(flags);
+
+ run_async_queue();
+ }
+}
+
diff --git a/arch/sparc64/mm/extable.c b/arch/sparc64/mm/extable.c
new file mode 100644
index 000000000..b2df0e169
--- /dev/null
+++ b/arch/sparc64/mm/extable.c
@@ -0,0 +1,69 @@
+/*
+ * linux/arch/sparc64/mm/extable.c
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <asm/uaccess.h>
+
+extern const struct exception_table_entry __start___ex_table[];
+extern const struct exception_table_entry __stop___ex_table[];
+
+static unsigned long
+search_one_table(const struct exception_table_entry *start,
+ const struct exception_table_entry *last,
+ unsigned long value, unsigned long *g2)
+{
+ const struct exception_table_entry *first = start;
+ const struct exception_table_entry *mid;
+ long diff = 0;
+ while (first <= last) {
+ mid = (last - first) / 2 + first;
+ diff = mid->insn - value;
+ if (diff == 0) {
+ if (!mid->fixup) {
+ *g2 = 0;
+ return (mid + 1)->fixup;
+ } else
+ return mid->fixup;
+ } else if (diff < 0)
+ first = mid+1;
+ else
+ last = mid-1;
+ }
+ if (last->insn < value && !last->fixup && last[1].insn > value) {
+ *g2 = (value - last->insn)/4;
+ return last[1].fixup;
+ }
+ if (first > start && first[-1].insn < value
+ && !first[-1].fixup && first->insn < value) {
+ *g2 = (value - first[-1].insn)/4;
+ return first->fixup;
+ }
+ return 0;
+}
+
+unsigned long
+search_exception_table(unsigned long addr, unsigned long *g2)
+{
+ unsigned long ret;
+
+#ifndef CONFIG_MODULES
+ /* There is only the kernel to search. */
+ ret = search_one_table(__start___ex_table,
+ __stop___ex_table-1, addr, g2);
+ if (ret) return ret;
+#else
+ /* The kernel is the last "module" -- no need to treat it special. */
+ struct module *mp;
+ for (mp = module_list; mp != NULL; mp = mp->next) {
+ if (mp->ex_table_start == NULL)
+ continue;
+ ret = search_one_table(mp->ex_table_start,
+ mp->ex_table_end-1, addr, g2);
+ if (ret) return ret;
+ }
+#endif
+
+ return 0;
+}
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
new file mode 100644
index 000000000..0dd118c8e
--- /dev/null
+++ b/arch/sparc64/mm/fault.c
@@ -0,0 +1,201 @@
+/* $Id: fault.c,v 1.4 1997/03/11 17:37:07 jj Exp $
+ * arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <asm/head.h>
+
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/signal.h>
+#include <linux/mm.h>
+#include <linux/smp_lock.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/uaccess.h>
+
+#define ELEMENTS(arr) (sizeof (arr)/sizeof (arr[0]))
+
+extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
+
+/* Nice, simple, prom library does all the sweating for us. ;) */
+unsigned long prom_probe_memory (void)
+{
+ register struct linux_mlist_p1275 *mlist;
+ register unsigned long bytes, base_paddr, tally;
+ register int i;
+
+ i = 0;
+ mlist = *prom_meminfo()->p1275_available;
+ bytes = tally = mlist->num_bytes;
+ base_paddr = (unsigned int) mlist->start_adr;
+
+ sp_banks[0].base_addr = base_paddr;
+ sp_banks[0].num_bytes = bytes;
+
+ while (mlist->theres_more != (void *) 0){
+ i++;
+ mlist = mlist->theres_more;
+ bytes = mlist->num_bytes;
+ tally += bytes;
+ if (i >= SPARC_PHYS_BANKS-1) {
+ printk ("The machine has more banks than "
+ "this kernel can support\n"
+ "Increase the SPARC_PHYS_BANKS "
+ "setting (currently %d)\n",
+ SPARC_PHYS_BANKS);
+ i = SPARC_PHYS_BANKS-1;
+ break;
+ }
+
+ sp_banks[i].base_addr = (unsigned long) mlist->start_adr;
+ sp_banks[i].num_bytes = mlist->num_bytes;
+ }
+
+ i++;
+ sp_banks[i].base_addr = 0xdeadbeef;
+ sp_banks[i].num_bytes = 0;
+
+ /* Now mask all bank sizes on a page boundary, it is all we can
+ * use anyways.
+ */
+ for(i=0; sp_banks[i].num_bytes != 0; i++)
+ sp_banks[i].num_bytes &= PAGE_MASK;
+
+ return tally;
+}
+
+/* Traverse the memory lists in the prom to see how much physical we
+ * have.
+ */
+unsigned long
+probe_memory(void)
+{
+ unsigned long total;
+
+ total = prom_probe_memory();
+
+ /* Oh man, much nicer, keep the dirt in promlib. */
+ return total;
+}
+
+void unhandled_fault(unsigned long address, struct task_struct *tsk,
+ struct pt_regs *regs)
+{
+ if((unsigned long) address < PAGE_SIZE) {
+ printk(KERN_ALERT "Unable to handle kernel NULL "
+ "pointer dereference");
+ } else {
+ printk(KERN_ALERT "Unable to handle kernel paging request "
+ "at virtual address %016lx\n", (unsigned long)address);
+ }
+ printk(KERN_ALERT "tsk->mm->context = %016lx\n",
+ (unsigned long) tsk->mm->context);
+ printk(KERN_ALERT "tsk->mm->pgd = %016lx\n",
+ (unsigned long) tsk->mm->pgd);
+ die_if_kernel("Oops", regs);
+}
+
+asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
+ unsigned long address)
+{
+ unsigned long g2;
+ int i;
+ unsigned insn;
+ struct pt_regs regs;
+
+ i = search_exception_table (ret_pc, &g2);
+ switch (i) {
+ /* load & store will be handled by fixup */
+ case 3: return 3;
+ /* store will be handled by fixup, load will bump out */
+ /* for _to_ macros */
+ case 1: insn = *(unsigned *)pc; if ((insn >> 21) & 1) return 1; break;
+ /* load will be handled by fixup, store will bump out */
+ /* for _from_ macros */
+ case 2: insn = *(unsigned *)pc;
+ if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15) return 2;
+ break;
+ default: break;
+ }
+ memset (&regs, 0, sizeof (regs));
+ regs.tpc = pc;
+ regs.tnpc = pc + 4;
+ /* FIXME: Should set up regs->tstate? */
+ unhandled_fault (address, current, &regs);
+ /* Not reached */
+ return 0;
+}
+
+asmlinkage void do_sparc64_fault(struct pt_regs *regs, int text_fault, int write,
+ unsigned long address)
+{
+ struct vm_area_struct *vma;
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->mm;
+ unsigned long fixup;
+ unsigned long g2;
+ int from_user = !(regs->tstate & TSTATE_PRIV);
+
+ lock_kernel ();
+ down(&mm->mmap_sem);
+ vma = find_vma(mm, address);
+ if(!vma)
+ goto bad_area;
+ if(vma->vm_start <= address)
+ goto good_area;
+ if(!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+ if(expand_stack(vma, address))
+ goto bad_area;
+ /*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+good_area:
+ if(write) {
+ if(!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ } else {
+ /* Allow reads even for write-only mappings */
+ if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+ }
+ handle_mm_fault(vma, address, write);
+ up(&mm->mmap_sem);
+ goto out;
+ /*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+ up(&mm->mmap_sem);
+ /* Is this in ex_table? */
+
+ g2 = regs->u_regs[UREG_G2];
+ if (!from_user && (fixup = search_exception_table (regs->tpc, &g2))) {
+ printk("Exception: PC<%016lx> faddr<%016lx>\n", regs->tpc, address);
+ printk("EX_TABLE: insn<%016lx> fixup<%016lx> g2<%016lx>\n",
+ regs->tpc, fixup, g2);
+ regs->tpc = fixup;
+ regs->tnpc = regs->tpc + 4;
+ regs->u_regs[UREG_G2] = g2;
+ goto out;
+ }
+ if(from_user) {
+ tsk->tss.sig_address = address;
+ tsk->tss.sig_desc = SUBSIG_NOMAPPING;
+ send_sig(SIGSEGV, tsk, 1);
+ goto out;
+ }
+ unhandled_fault (address, tsk, regs);
+out:
+ unlock_kernel();
+}
diff --git a/arch/sparc64/mm/generic.c b/arch/sparc64/mm/generic.c
new file mode 100644
index 000000000..289ddd411
--- /dev/null
+++ b/arch/sparc64/mm/generic.c
@@ -0,0 +1,124 @@
+/* $Id: generic.c,v 1.1 1996/12/26 10:24:23 davem Exp $
+ * generic.c: Generic Sparc mm routines that are not dependent upon
+ * MMU type but are Sparc specific.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+
+#include <asm/pgtable.h>
+#include <asm/page.h>
+
+
+/* Allocate a block of RAM which is aligned to its size.
+ * This procedure can be used until the call to mem_init().
+ */
+void *sparc_init_alloc(unsigned long *kbrk, unsigned long size)
+{
+ unsigned long mask = size - 1;
+ unsigned long ret;
+
+ if(!size)
+ return 0x0;
+ if(size & mask) {
+ prom_printf("panic: sparc_init_alloc botch\n");
+ prom_halt();
+ }
+ ret = (*kbrk + mask) & ~mask;
+ *kbrk = ret + size;
+ memset((void*) ret, 0, size);
+ return (void*) ret;
+}
+
+static inline void forget_pte(pte_t page)
+{
+ if (pte_none(page))
+ return;
+ if (pte_present(page)) {
+ unsigned long addr = pte_page(page);
+ if (MAP_NR(addr) >= max_mapnr || PageReserved(mem_map+MAP_NR(addr)))
+ return;
+ free_page(addr);
+ if (current->mm->rss <= 0)
+ return;
+ current->mm->rss--;
+ return;
+ }
+ swap_free(pte_val(page));
+}
+
+/* Remap IO memory, the same way as remap_page_range(), but use
+ * the obio memory space.
+ *
+ * They use a pgprot that sets PAGE_IO and does not check the
+ * mem_map table as this is independent of normal memory.
+ */
+static inline void io_remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
+ unsigned long offset, pgprot_t prot, int space)
+{
+ unsigned long end;
+
+ address &= ~PMD_MASK;
+ end = address + size;
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ do {
+ pte_t oldpage = *pte;
+ pte_clear(pte);
+ set_pte(pte, mk_pte_io(offset, prot, space));
+ forget_pte(oldpage);
+ address += PAGE_SIZE;
+ offset += PAGE_SIZE;
+ pte++;
+ } while (address < end);
+}
+
+static inline int io_remap_pmd_range(pmd_t * pmd, unsigned long address, unsigned long size,
+ unsigned long offset, pgprot_t prot, int space)
+{
+ unsigned long end;
+
+ address &= ~PGDIR_MASK;
+ end = address + size;
+ if (end > PGDIR_SIZE)
+ end = PGDIR_SIZE;
+ offset -= address;
+ do {
+ pte_t * pte = pte_alloc(pmd, address);
+ if (!pte)
+ return -ENOMEM;
+ io_remap_pte_range(pte, address, end - address, address + offset, prot, space);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address < end);
+ return 0;
+}
+
+int io_remap_page_range(unsigned long from, unsigned long offset, unsigned long size, pgprot_t prot, int space)
+{
+ int error = 0;
+ pgd_t * dir;
+ unsigned long beg = from;
+ unsigned long end = from + size;
+
+ prot = __pgprot(pg_iobits);
+ offset -= from;
+ dir = pgd_offset(current->mm, from);
+ flush_cache_range(current->mm, beg, end);
+ while (from < end) {
+ pmd_t *pmd = pmd_alloc(dir, from);
+ error = -ENOMEM;
+ if (!pmd)
+ break;
+ error = io_remap_pmd_range(pmd, from, end - from, offset + from, prot, space);
+ if (error)
+ break;
+ from = (from + PGDIR_SIZE) & PGDIR_MASK;
+ dir++;
+ }
+ flush_tlb_range(current->mm, beg, end);
+ return error;
+}
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
new file mode 100644
index 000000000..57ca5eb92
--- /dev/null
+++ b/arch/sparc64/mm/init.c
@@ -0,0 +1,730 @@
+/* $Id: init.c,v 1.24 1997/04/17 21:49:41 jj Exp $
+ * arch/sparc64/mm/init.c
+ *
+ * Copyright (C) 1996,1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/blk.h>
+#include <linux/swap.h>
+
+#include <asm/system.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/oplib.h>
+#include <asm/iommu.h>
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm/vaddrs.h>
+
+extern void show_net_buffers(void);
+extern unsigned long device_scan(unsigned long);
+
+struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
+
+/* Ugly, but necessary... -DaveM */
+unsigned long phys_base, null_pmd_table, null_pte_table;
+
+extern unsigned long empty_null_pmd_table;
+extern unsigned long empty_null_pte_table;
+
+unsigned long tlb_context_cache = CTX_FIRST_VERSION;
+
+/* References to section boundaries */
+extern char __init_begin, __init_end, etext, __p1275_loc, __bss_start;
+
+/*
+ * BAD_PAGE is the page that is used for page faults when linux
+ * is out-of-memory. Older versions of linux just did a
+ * do_exit(), but using this instead means there is less risk
+ * for a process dying in kernel mode, possibly leaving an inode
+ * unused etc..
+ *
+ * BAD_PAGETABLE is the accompanying page-table: it is initialized
+ * to point to BAD_PAGE entries.
+ *
+ * ZERO_PAGE is a special page that is used for zero-initialized
+ * data and COW.
+ */
+pmd_t *__bad_pmd(void)
+{
+ pmd_t *pmdp = (pmd_t *) &empty_bad_pmd_table;
+
+ __init_pmd(pmdp);
+ return pmdp;
+}
+
+pte_t *__bad_pte(void)
+{
+ memset((void *) &empty_bad_pte_table, 0, PAGE_SIZE);
+ return (pte_t *) (((unsigned long)&empty_bad_pte_table) + phys_base);
+}
+
+pte_t __bad_page(void)
+{
+ memset((void *) &empty_bad_page, 0, PAGE_SIZE);
+ return pte_mkdirty(mk_pte((((unsigned long) &empty_bad_page)+phys_base),
+ PAGE_SHARED));
+}
+
+void show_mem(void)
+{
+ int i,free = 0,total = 0,reserved = 0;
+ int shared = 0;
+
+ printk("\nMem-info:\n");
+ show_free_areas();
+ printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
+ i = max_mapnr;
+ while (i-- > 0) {
+ total++;
+ if (PageReserved(mem_map + i))
+ reserved++;
+ else if (!atomic_read(&mem_map[i].count))
+ free++;
+ else
+ shared += atomic_read(&mem_map[i].count) - 1;
+ }
+ printk("%d pages of RAM\n",total);
+ printk("%d free pages\n",free);
+ printk("%d reserved pages\n",reserved);
+ printk("%d pages shared\n",shared);
+ show_buffers();
+#ifdef CONFIG_NET
+ show_net_buffers();
+#endif
+}
+
+/* IOMMU support, the ideas are right, the code should be cleaned a bit still... */
+
+/* XXX Also, play with the streaming buffers at some point, both
+ * XXX Fusion and Sunfire both have them aparently... -DaveM
+ */
+
+/* This keeps track of pages used in sparc_alloc_dvma() invocations. */
+static unsigned long dvma_map_pages[0x10000000 >> 16] = { 0, };
+static unsigned long dvma_pages_current_offset = 0;
+static int dvma_pages_current_index = 0;
+
+__initfunc(unsigned long iommu_init(int iommu_node, unsigned long memory_start,
+ unsigned long memory_end, struct linux_sbus *sbus))
+{
+ struct iommu_struct *iommu;
+ struct sysio_regs *sregs;
+ struct linux_prom_registers rprop[2];
+ unsigned long impl, vers;
+ unsigned long control, tsbbase;
+ unsigned long *iopte;
+ int err, i;
+
+ err = prom_getproperty(iommu_node, "reg", (char *)rprop,
+ sizeof(rprop));
+ if(err == -1) {
+ prom_printf("iommu_init: Cannot map SYSIO control registers.\n");
+ prom_halt();
+ }
+ sregs = (struct sysio_regs *) sparc_alloc_io(rprop[0].phys_addr,
+ (void *)0,
+ sizeof(struct sysio_regs),
+ "SYSIO Regs",
+ rprop[0].which_io, 0x0);
+
+ memory_start = (memory_start + 7) & ~7;
+ iommu = (struct iommu_struct *) memory_start;
+ memory_start += sizeof(struct iommu_struct);
+ iommu->sysio_regs = sregs;
+ sbus->iommu = iommu;
+
+ control = sregs->iommu_control;
+ impl = (control & IOMMU_CTRL_IMPL) >> 60;
+ vers = (control & IOMMU_CTRL_VERS) >> 56;
+ printk("IOMMU: IMPL[%x] VERS[%x] SYSIO mapped at %016lx\n",
+ (unsigned int) impl, (unsigned int)vers, (unsigned long) sregs);
+
+ control &= ~(IOMMU_CTRL_TSBSZ);
+ control |= (IOMMU_TSBSZ_64K | IOMMU_CTRL_TBWSZ | IOMMU_CTRL_ENAB);
+
+ /* Use only 64k pages, things are layed out in the 32-bit SBUS
+ * address space like this:
+ *
+ * 0x00000000 ----------------------------------------
+ * | Direct physical mappings for most |
+ * | DVMA to paddr's within this range |
+ * 0xf0000000 ----------------------------------------
+ * | For mappings requested via |
+ * | sparc_alloc_dvma() |
+ * 0xffffffff ----------------------------------------
+ */
+ tsbbase = PAGE_ALIGN(memory_start);
+ memory_start = (tsbbase + ((64 * 1024) * 8));
+ iommu->page_table = (iopte_t *) tsbbase;
+ iopte = (unsigned long *) tsbbase;
+
+ /* Setup aliased mappings... */
+ for(i = 0; i < (65536 - 4096); i++) {
+ *iopte = (IOPTE_VALID | IOPTE_64K | IOPTE_CACHE | IOPTE_WRITE);
+ *iopte |= (i << 16);
+ iopte++;
+ }
+
+ /* Clear all sparc_alloc_dvma() maps. */
+ for( ; i < 65536; i++)
+ *iopte++ = 0;
+
+ sregs->iommu_tsbbase = __pa(tsbbase);
+ sregs->iommu_control = control;
+
+ return memory_start;
+}
+
+void mmu_map_dma_area(unsigned long addr, int len, __u32 *dvma_addr)
+{
+ struct iommu_struct *iommu = SBus_chain->iommu; /* GROSS ME OUT! */
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ /* Find out if we need to grab some pages. */
+ if(!dvma_map_pages[dvma_pages_current_index] ||
+ ((dvma_pages_current_offset + len) > (1 << 16))) {
+ unsigned long *iopte;
+ unsigned long newpages = __get_free_pages(GFP_KERNEL, 3, 0);
+ int i;
+
+ if(!newpages)
+ panic("AIEEE cannot get DVMA pages.");
+
+ memset((char *)newpages, 0, (1 << 16));
+
+ if(!dvma_map_pages[dvma_pages_current_index]) {
+ dvma_map_pages[dvma_pages_current_index] = newpages;
+ i = dvma_pages_current_index;
+ } else {
+ dvma_map_pages[dvma_pages_current_index + 1] = newpages;
+ i = dvma_pages_current_index + 1;
+ }
+
+ /* Stick it in the IOMMU. */
+ i = (65536 - 4096) + i;
+ iopte = (unsigned long *)(iommu->page_table + i);
+ *iopte = (IOPTE_VALID | IOPTE_64K | IOPTE_CACHE | IOPTE_WRITE);
+ *iopte |= __pa(newpages);
+ }
+
+ /* Get this out of the way. */
+ *dvma_addr = (__u32) ((0xf0000000) +
+ (dvma_pages_current_index << 16) +
+ (dvma_pages_current_offset));
+
+ while(len > 0) {
+ while((len > 0) && (dvma_pages_current_offset < (1 << 16))) {
+ pte_t pte;
+ unsigned long the_page =
+ dvma_map_pages[dvma_pages_current_index] +
+ dvma_pages_current_offset;
+
+ /* Map the CPU's view. */
+ pgdp = pgd_offset(init_task.mm, addr);
+ pmdp = pmd_alloc_kernel(pgdp, addr);
+ ptep = pte_alloc_kernel(pmdp, addr);
+ pte = mk_pte(the_page, PAGE_KERNEL);
+ set_pte(ptep, pte);
+
+ dvma_pages_current_offset += PAGE_SIZE;
+ addr += PAGE_SIZE;
+ len -= PAGE_SIZE;
+ }
+ dvma_pages_current_index++;
+ dvma_pages_current_offset = 0;
+ }
+}
+
+__u32 mmu_get_scsi_one(char *vaddr, unsigned long len, struct linux_sbus *sbus)
+{
+ __u32 sbus_addr = (__u32) __pa(vaddr);
+
+ if((sbus_addr < 0xf0000000) &&
+ ((sbus_addr + len) < 0xf0000000))
+ return sbus_addr;
+
+ /* "can't happen"... GFP_DMA assures this. */
+ panic("Very high scsi_one mappings should never happen.");
+ return (__u32)0;
+}
+
+void mmu_get_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
+{
+ while(sz >= 0) {
+ __u32 page = (__u32) __pa(((unsigned long) sg[sz].addr));
+ if((page < 0xf0000000) &&
+ (page + sg[sz].len) < 0xf0000000) {
+ sg[sz].dvma_addr = page;
+ } else {
+ /* "can't happen"... GFP_DMA assures this. */
+ panic("scsi_sgl high mappings should never happen.");
+ }
+ sz--;
+ }
+}
+
+char *mmu_info(void)
+{
+ /* XXX */
+ return "MMU Type: Spitfire\n\tFIXME: Write this\n";
+}
+
+static unsigned long mempool;
+
+struct linux_prom_translation {
+ unsigned long virt;
+ unsigned long size;
+ unsigned long data;
+};
+
+#define MAX_TRANSLATIONS 64
+static void inherit_prom_mappings(void)
+{
+ struct linux_prom_translation transl[MAX_TRANSLATIONS];
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+ int node, n, i;
+
+ node = prom_finddevice("/virtual-memory");
+ if ((n = prom_getproperty(node, "translations", (char *) transl,
+ sizeof(transl))) == -1) {
+ prom_printf("Couldn't get translation property\n");
+ prom_halt();
+ }
+ n = n / sizeof(transl[0]);
+
+ for (i = 0; i < n; i++) {
+ unsigned long vaddr;
+
+ if (transl[i].virt >= 0xf0000000 && transl[i].virt < 0x100000000) {
+ for (vaddr = transl[i].virt;
+ vaddr < transl[i].virt + transl[i].size;
+ vaddr += PAGE_SIZE) {
+ pgdp = pgd_offset(init_task.mm, vaddr);
+ if (pgd_none(*pgdp)) {
+ pmdp = sparc_init_alloc(&mempool,
+ PMD_TABLE_SIZE);
+ __init_pmd(pmdp);
+ pgd_set(pgdp, pmdp);
+ }
+ pmdp = pmd_offset(pgdp, vaddr);
+ if (pmd_none(*pmdp)) {
+ ptep = sparc_init_alloc(&mempool,
+ PTE_TABLE_SIZE);
+ pmd_set(pmdp, ptep);
+ }
+ ptep = pte_offset(pmdp, vaddr);
+ set_pte (ptep, __pte(transl[i].data | _PAGE_MODIFIED));
+ transl[i].data += PAGE_SIZE;
+ }
+ }
+ }
+}
+
+static void inherit_locked_prom_mappings(void)
+{
+ int i;
+ int dtlb_seen = 0;
+ int itlb_seen = 0;
+
+ /* Fucking losing PROM has more mappings in the TLB, but
+ * it (conveniently) fails to mention any of these in the
+ * translations property. The only ones that matter are
+ * the locked PROM tlb entries, so we impose the following
+ * irrecovable rule on the PROM, it is allowed 1 locked
+ * entry in the ITLB and 1 in the DTLB. We move those
+ * (if necessary) up into tlb entry 62.
+ *
+ * Supposedly the upper 16GB of the address space is
+ * reserved for OBP, BUT I WISH THIS WAS DOCUMENTED
+ * SOMEWHERE!!!!!!!!!!!!!!!!! Furthermore the entire interface
+ * used between the client program and the firmware on sun5
+ * systems to coordinate mmu mappings is also COMPLETELY
+ * UNDOCUMENTED!!!!!! Thanks S(t)un!
+ */
+ for(i = 0; i < 62; i++) {
+ unsigned long data;
+
+ data = spitfire_get_dtlb_data(i);
+ if(!dtlb_seen && (data & _PAGE_L)) {
+ unsigned long tag = spitfire_get_dtlb_tag(i);
+ __asm__ __volatile__("stxa %%g0, [%0] %1"
+ : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
+ membar("#Sync");
+ spitfire_put_dtlb_data(i, 0x0UL);
+ membar("#Sync");
+
+ /* Re-install it. */
+ __asm__ __volatile__("stxa %0, [%1] %2"
+ : : "r" (tag), "r" (TLB_TAG_ACCESS),
+ "i" (ASI_DMMU));
+ membar("#Sync");
+ spitfire_put_dtlb_data(62, data);
+ membar("#Sync");
+ dtlb_seen = 1;
+ if(itlb_seen)
+ break;
+ }
+ data = spitfire_get_itlb_data(i);
+ if(!itlb_seen && (data & _PAGE_L)) {
+ unsigned long tag = spitfire_get_itlb_tag(i);
+ __asm__ __volatile__("stxa %%g0, [%0] %1"
+ : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
+ membar("#Sync");
+ spitfire_put_itlb_data(i, 0x0UL);
+ membar("#Sync");
+
+ /* Re-install it. */
+ __asm__ __volatile__("stxa %0, [%1] %2"
+ : : "r" (tag), "r" (TLB_TAG_ACCESS),
+ "i" (ASI_IMMU));
+ membar("#Sync");
+ spitfire_put_itlb_data(62, data);
+ membar("#Sync");
+ itlb_seen = 1;
+ if(dtlb_seen)
+ break;
+ }
+ }
+}
+
+__initfunc(static void
+allocate_ptable_skeleton(unsigned long start, unsigned long end))
+{
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ while (start < end) {
+ pgdp = pgd_offset(init_task.mm, start);
+ if (pgd_none(*pgdp)) {
+ pmdp = sparc_init_alloc(&mempool,
+ PMD_TABLE_SIZE);
+ __init_pmd(pmdp);
+ pgd_set(pgdp, pmdp);
+ }
+ pmdp = pmd_offset(pgdp, start);
+ if (pmd_none(*pmdp)) {
+ ptep = sparc_init_alloc(&mempool,
+ PTE_TABLE_SIZE);
+ pmd_set(pmdp, ptep);
+ }
+ start = (start + PMD_SIZE) & PMD_MASK;
+ }
+}
+
+/*
+ * Create a mapping for an I/O register. Have to make sure the side-effect
+ * bit is set.
+ */
+
+void sparc_ultra_mapioaddr(unsigned long physaddr, unsigned long virt_addr,
+ int bus, int rdonly)
+{
+ pgd_t *pgdp = pgd_offset(init_task.mm, virt_addr);
+ pmd_t *pmdp = pmd_offset(pgdp, virt_addr);
+ pte_t *ptep = pte_offset(pmdp, virt_addr);
+ pte_t pte;
+
+ physaddr &= PAGE_MASK;
+
+ if(rdonly)
+ pte = mk_pte_phys(physaddr, __pgprot(pg_iobits));
+ else
+ pte = mk_pte_phys(physaddr, __pgprot(pg_iobits | __DIRTY_BITS));
+
+ set_pte(ptep, pte);
+}
+
+void sparc_ultra_unmapioaddr(unsigned long virt_addr)
+{
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ pgdp = pgd_offset(init_task.mm, virt_addr);
+ pmdp = pmd_offset(pgdp, virt_addr);
+ ptep = pte_offset(pmdp, virt_addr);
+
+ /* No need to flush uncacheable page. */
+ pte_clear(ptep);
+}
+
+#ifdef DEBUG_MMU
+void sparc_ultra_dump_itlb(void)
+{
+ int slot;
+
+ prom_printf ("Contents of itlb:\n");
+ for (slot = 0; slot < 64; slot+=2) {
+ prom_printf ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
+ slot, spitfire_get_itlb_tag(slot), spitfire_get_itlb_data(slot),
+ slot+1, spitfire_get_itlb_tag(slot+1), spitfire_get_itlb_data(slot+1));
+ }
+}
+
+void sparc_ultra_dump_dtlb(void)
+{
+ int slot;
+
+ prom_printf ("Contents of dtlb:\n");
+ for (slot = 0; slot < 64; slot+=2) {
+ prom_printf ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
+ slot, spitfire_get_dtlb_tag(slot), spitfire_get_dtlb_data(slot),
+ slot+1, spitfire_get_dtlb_tag(slot+1), spitfire_get_dtlb_data(slot+1));
+ }
+}
+#endif
+
+/* paging_init() sets up the page tables */
+
+extern unsigned long free_area_init(unsigned long, unsigned long);
+
+__initfunc(unsigned long
+paging_init(unsigned long start_mem, unsigned long end_mem))
+{
+ extern unsigned long phys_base;
+ extern void setup_tba(unsigned long kpgdir);
+ extern void __bfill64(void *, unsigned long);
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep, pte;
+ int i;
+
+ /* Must create 2nd locked DTLB entry if physical ram starts at
+ * 4MB absolute or higher, kernel image has been placed in the
+ * right place at PAGE_OFFSET but references to start_mem and pages
+ * will be to the perfect alias mapping, so set it up now.
+ */
+ if(phys_base >= (4 * 1024 * 1024)) {
+ unsigned long alias_base = phys_base + PAGE_OFFSET;
+ unsigned long pte;
+ unsigned long flags;
+
+ /* We assume physical memory starts at some 4mb multiple,
+ * if this were not true we wouldn't boot up to this point
+ * anyways.
+ */
+ pte = phys_base | _PAGE_VALID | _PAGE_SZ4MB;
+ pte |= _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W;
+ save_flags(flags); cli();
+ __asm__ __volatile__("
+ stxa %1, [%0] %3
+ stxa %2, [%5] %4
+ membar #Sync
+ flush %%g4
+ nop
+ nop
+ nop"
+ : /* No outputs */
+ : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pte),
+ "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (61 << 3)
+ : "memory");
+ restore_flags(flags);
+
+ /* Now set kernel pgd to upper alias so physical page computations
+ * work.
+ */
+ init_mm.pgd += (phys_base / (sizeof(pgd_t *)));
+ }
+
+ null_pmd_table = __pa(((unsigned long)&empty_null_pmd_table) + phys_base);
+ null_pte_table = __pa(((unsigned long)&empty_null_pte_table) + phys_base);
+
+ pmdp = (pmd_t *) &empty_null_pmd_table;
+ for(i = 0; i < 1024; i++)
+ pmd_val(pmdp[i]) = null_pte_table;
+
+ memset((void *) &empty_null_pte_table, 0, PAGE_SIZE);
+
+ /* Now can init the kernel/bad page tables. */
+ __bfill64((void *)swapper_pg_dir, null_pmd_table);
+ __bfill64((void *)&empty_bad_pmd_table, null_pte_table);
+
+ /* We use mempool to create page tables, therefore adjust it up
+ * such that __pa() macros etc. work.
+ */
+ mempool = PAGE_ALIGN(start_mem) + phys_base;
+
+ /* FIXME: This should be done much nicer.
+ * Just now we allocate 64M for each.
+ */
+ allocate_ptable_skeleton(IOBASE_VADDR, IOBASE_VADDR + 0x4000000);
+ allocate_ptable_skeleton(DVMA_VADDR, DVMA_VADDR + 0x4000000);
+ inherit_prom_mappings();
+ allocate_ptable_skeleton(0, 0x8000 + PAGE_SIZE);
+
+ /* Map prom interface page. */
+ pgdp = pgd_offset(init_task.mm, 0x8000);
+ pmdp = pmd_offset(pgdp, 0x8000);
+ ptep = pte_offset(pmdp, 0x8000);
+ pte = mk_pte(((unsigned long)&__p1275_loc)+phys_base, PAGE_KERNEL);
+ set_pte(ptep, pte);
+
+ /* Ok, we can use our TLB miss and window trap handlers safely. */
+ setup_tba((unsigned long)init_mm.pgd);
+
+ /* Kill locked PROM interface page mapping, the mapping will
+ * re-enter on the next PROM interface call via our TLB miss
+ * handlers.
+ */
+ spitfire_flush_dtlb_primary_page(0x8000);
+ membar("#Sync");
+ spitfire_flush_itlb_primary_page(0x8000);
+ membar("#Sync");
+
+ /* Really paranoid. */
+ flushi(PAGE_OFFSET);
+ membar("#Sync");
+
+ /* Cleanup the extra locked TLB entry we created since we have the
+ * nice TLB miss handlers of ours installed now.
+ */
+ if(phys_base >= (4 * 1024 * 1024)) {
+ /* We only created DTLB mapping of this stuff. */
+ spitfire_flush_dtlb_nucleus_page(phys_base + PAGE_OFFSET);
+ membar("#Sync");
+
+ /* Paranoid */
+ flushi(PAGE_OFFSET);
+ membar("#Sync");
+ }
+
+ inherit_locked_prom_mappings();
+
+ flush_tlb_all();
+
+ start_mem = free_area_init(PAGE_ALIGN(mempool), end_mem);
+
+ return device_scan (PAGE_ALIGN (start_mem));
+}
+
+extern int min_free_pages;
+extern int free_pages_low;
+extern int free_pages_high;
+
+__initfunc(static void taint_real_pages(unsigned long start_mem, unsigned long end_mem))
+{
+ unsigned long addr, tmp2 = 0;
+
+ for(addr = PAGE_OFFSET; addr < end_mem; addr += PAGE_SIZE) {
+ if(addr >= PAGE_OFFSET && addr < start_mem)
+ addr = start_mem;
+ for(tmp2=0; sp_banks[tmp2].num_bytes != 0; tmp2++) {
+ unsigned long phys_addr = __pa(addr);
+ unsigned long base = sp_banks[tmp2].base_addr;
+ unsigned long limit = base + sp_banks[tmp2].num_bytes;
+
+ if((phys_addr >= base) && (phys_addr < limit) &&
+ ((phys_addr + PAGE_SIZE) < limit))
+ mem_map[MAP_NR(addr)].flags &= ~(1<<PG_reserved);
+ }
+ }
+}
+
+__initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem))
+{
+ int codepages = 0;
+ int datapages = 0;
+ int initpages = 0;
+ int prompages = 0;
+ unsigned long tmp2, addr;
+ unsigned long data_end;
+
+ end_mem &= PAGE_MASK;
+ max_mapnr = MAP_NR(end_mem);
+ high_memory = (void *) end_mem;
+
+ start_mem = PAGE_ALIGN(start_mem);
+ num_physpages = (start_mem - phys_base - PAGE_OFFSET) >> PAGE_SHIFT;
+
+ addr = PAGE_OFFSET;
+ while(addr < start_mem) {
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (initrd_below_start_ok && addr >= initrd_start && addr < initrd_end)
+ mem_map[MAP_NR(addr)].flags &= ~(1<<PG_reserved);
+ else
+#endif
+ mem_map[MAP_NR(addr)].flags |= (1<<PG_reserved);
+ addr += PAGE_SIZE;
+ }
+
+ taint_real_pages(start_mem, end_mem);
+ data_end = start_mem - phys_base;
+ for (addr = PAGE_OFFSET; addr < end_mem; addr += PAGE_SIZE) {
+ if(PageReserved(mem_map + MAP_NR(addr))) {
+ if ((addr < (unsigned long) &etext) && (addr >= PAGE_OFFSET))
+ codepages++;
+ else if((addr >= (unsigned long)&__init_begin && addr < (unsigned long)&__init_end))
+ initpages++;
+ else if((addr >= (unsigned long)&__p1275_loc && addr < (unsigned long)&__bss_start))
+ prompages++;
+ else if((addr < data_end) && (addr >= PAGE_OFFSET))
+ datapages++;
+ continue;
+ }
+ atomic_set(&mem_map[MAP_NR(addr)].count, 1);
+ num_physpages++;
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (!initrd_start ||
+ (addr < initrd_start || addr >= initrd_end))
+#endif
+ free_page(addr);
+ }
+
+ tmp2 = nr_free_pages << PAGE_SHIFT;
+
+ printk("Memory: %luk available (%dk kernel code, %dk data, %dk init, %dk prom) [%016lx,%016lx]\n",
+ tmp2 >> 10,
+ codepages << (PAGE_SHIFT-10),
+ datapages << (PAGE_SHIFT-10),
+ initpages << (PAGE_SHIFT-10),
+ prompages << (PAGE_SHIFT-10),
+ PAGE_OFFSET, end_mem);
+
+ min_free_pages = nr_free_pages >> 7;
+ if(min_free_pages < 16)
+ min_free_pages = 16;
+ free_pages_low = min_free_pages + (min_free_pages >> 1);
+ free_pages_high = min_free_pages + min_free_pages;
+}
+
+void free_initmem (void)
+{
+ unsigned long addr;
+
+ addr = (unsigned long)(&__init_begin);
+ for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
+ mem_map[MAP_NR(addr)].flags &= ~(1 << PG_reserved);
+ atomic_set(&mem_map[MAP_NR(addr)].count, 1);
+ free_page(addr);
+ }
+}
+
+void si_meminfo(struct sysinfo *val)
+{
+ int i;
+
+ i = MAP_NR(high_memory);
+ val->totalram = 0;
+ val->sharedram = 0;
+ val->freeram = nr_free_pages << PAGE_SHIFT;
+ val->bufferram = buffermem;
+ while (i-- > 0) {
+ if (PageReserved(mem_map + i))
+ continue;
+ val->totalram++;
+ if (!atomic_read(&mem_map[i].count))
+ continue;
+ val->sharedram += atomic_read(&mem_map[i].count) - 1;
+ }
+ val->totalram <<= PAGE_SHIFT;
+ val->sharedram <<= PAGE_SHIFT;
+}
diff --git a/arch/sparc64/prom/Makefile b/arch/sparc64/prom/Makefile
new file mode 100644
index 000000000..1cec2111e
--- /dev/null
+++ b/arch/sparc64/prom/Makefile
@@ -0,0 +1,23 @@
+# $Id: Makefile,v 1.2 1997/02/25 12:40:25 jj Exp $
+# Makefile for the Sun Boot PROM interface library under
+# Linux.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definitions are now in the main makefile...
+
+OBJS = bootstr.o devops.o init.o memory.o misc.o \
+ ranges.o tree.o console.o printf.o p1275.o
+
+all: promlib.a
+
+promlib.a: $(OBJS)
+ $(AR) rcs promlib.a $(OBJS)
+ sync
+
+dep:
+ $(CPP) -M *.c > .depend
+
+include $(TOPDIR)/Rules.make
diff --git a/arch/sparc64/prom/bootstr.c b/arch/sparc64/prom/bootstr.c
new file mode 100644
index 000000000..e226c6e95
--- /dev/null
+++ b/arch/sparc64/prom/bootstr.c
@@ -0,0 +1,23 @@
+/* $Id: bootstr.c,v 1.3 1997/03/04 16:27:06 jj Exp $
+ * bootstr.c: Boot string/argument acquisition from the PROM.
+ *
+ * Copyright(C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright(C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/string.h>
+#include <asm/oplib.h>
+
+#define BARG_LEN 256
+static char barg_buf[BARG_LEN];
+static char fetched = 0;
+
+char *
+prom_getbootargs(void)
+{
+ /* This check saves us from a panic when bootfd patches args. */
+ if (fetched) return barg_buf;
+ prom_getstring(prom_chosen_node, "bootargs", barg_buf, BARG_LEN);
+ fetched = 1;
+ return barg_buf;
+}
diff --git a/arch/sparc64/prom/console.c b/arch/sparc64/prom/console.c
new file mode 100644
index 000000000..a4aea63b8
--- /dev/null
+++ b/arch/sparc64/prom/console.c
@@ -0,0 +1,128 @@
+/* $Id: console.c,v 1.6 1997/03/18 17:59:59 jj Exp $
+ * console.c: Routines that deal with sending and receiving IO
+ * to/from the current console device using the PROM.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/system.h>
+#include <linux/string.h>
+
+extern int prom_stdin, prom_stdout;
+
+/* Non blocking get character from console input device, returns -1
+ * if no input was taken. This can be used for polling.
+ */
+__inline__ int
+prom_nbgetchar(void)
+{
+ char inc;
+
+ if (p1275_cmd("read", P1275_ARG(1,P1275_ARG_OUT_BUF)|
+ P1275_INOUT(3,1),
+ prom_stdin, &inc, P1275_SIZE(1)) == 1)
+ return inc;
+ else
+ return -1;
+}
+
+/* Non blocking put character to console device, returns -1 if
+ * unsuccessful.
+ */
+__inline__ int
+prom_nbputchar(char c)
+{
+ char outc;
+
+ outc = c;
+ if (p1275_cmd("write", P1275_ARG(1,P1275_ARG_IN_BUF)|
+ P1275_INOUT(3,1),
+ prom_stdout, &outc, P1275_SIZE(1)) == 1)
+ return 0;
+ else
+ return -1;
+}
+
+/* Blocking version of get character routine above. */
+char
+prom_getchar(void)
+{
+ int character;
+ while((character = prom_nbgetchar()) == -1) ;
+ return (char) character;
+}
+
+/* Blocking version of put character routine above. */
+void
+prom_putchar(char c)
+{
+ prom_nbputchar(c);
+ return;
+}
+
+void
+prom_puts(char *s, int len)
+{
+ p1275_cmd("write", P1275_ARG(1,P1275_ARG_IN_BUF)|
+ P1275_INOUT(3,1),
+ prom_stdout, s, P1275_SIZE(len));
+}
+
+/* Query for input device type */
+enum prom_input_device
+prom_query_input_device()
+{
+ int st_p;
+ char propb[64];
+
+ st_p = prom_inst2pkg(prom_stdin);
+ if(prom_node_has_property(st_p, "keyboard"))
+ return PROMDEV_IKBD;
+ prom_getproperty(st_p, "device_type", propb, sizeof(propb));
+ if(strncmp(propb, "serial", sizeof("serial")))
+ return PROMDEV_I_UNK;
+ /* FIXME: Is there any better way how to find out? */
+ st_p = prom_finddevice ("/options");
+ prom_getproperty(st_p, "input-device", propb, sizeof(propb));
+ if (strncmp (propb, "tty", 3) || !propb[3] || propb[4])
+ return PROMDEV_I_UNK;
+ switch (propb[3]) {
+ case 'a': return PROMDEV_ITTYA;
+ case 'b': return PROMDEV_ITTYB;
+ default: return PROMDEV_I_UNK;
+ }
+}
+
+/* Query for output device type */
+
+enum prom_output_device
+prom_query_output_device()
+{
+ int st_p;
+ char propb[64];
+ int propl;
+
+ st_p = prom_inst2pkg(prom_stdout);
+ propl = prom_getproperty(st_p, "device_type", propb, sizeof(propb));
+ if (propl >= 0 && propl == sizeof("display") &&
+ strncmp("display", propb, sizeof("display")) == 0)
+ return PROMDEV_OSCREEN;
+ if(strncmp("serial", propb, sizeof("serial")))
+ return PROMDEV_O_UNK;
+ /* FIXME: Is there any better way how to find out? */
+ st_p = prom_finddevice ("/options");
+ prom_getproperty(st_p, "output-device", propb, sizeof(propb));
+ if (strncmp (propb, "tty", 3) || !propb[3] || propb[4])
+ return PROMDEV_O_UNK;
+ switch (propb[3]) {
+ case 'a': return PROMDEV_OTTYA;
+ case 'b': return PROMDEV_OTTYB;
+ default: return PROMDEV_O_UNK;
+ }
+}
diff --git a/arch/sparc64/prom/devops.c b/arch/sparc64/prom/devops.c
new file mode 100644
index 000000000..980717928
--- /dev/null
+++ b/arch/sparc64/prom/devops.c
@@ -0,0 +1,41 @@
+/* $Id: devops.c,v 1.2 1997/02/25 12:40:20 jj Exp $
+ * devops.c: Device operations using the PROM.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+
+/* Open the device described by the string 'dstr'. Returns the handle
+ * to that device used for subsequent operations on that device.
+ * Returns -1 on failure.
+ */
+int
+prom_devopen(char *dstr)
+{
+ return p1275_cmd ("open", P1275_ARG(0,P1275_ARG_IN_STRING)|
+ P1275_INOUT(1,1),
+ dstr);
+}
+
+/* Close the device described by device handle 'dhandle'. */
+int
+prom_devclose(int dhandle)
+{
+ p1275_cmd ("close", P1275_INOUT(1,0), dhandle);
+ return 0;
+}
+
+/* Seek to specified location described by 'seekhi' and 'seeklo'
+ * for device 'dhandle'.
+ */
+void
+prom_seek(int dhandle, unsigned int seekhi, unsigned int seeklo)
+{
+ p1275_cmd ("seek", P1275_INOUT(3,1), dhandle, seekhi, seeklo);
+}
diff --git a/arch/sparc64/prom/init.c b/arch/sparc64/prom/init.c
new file mode 100644
index 000000000..7dcef7642
--- /dev/null
+++ b/arch/sparc64/prom/init.c
@@ -0,0 +1,79 @@
+/* $Id: init.c,v 1.7 1997/03/24 17:43:59 jj Exp $
+ * init.c: Initialize internal variables used by the PROM
+ * library functions.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/string.h>
+
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+
+enum prom_major_version prom_vers;
+unsigned int prom_rev, prom_prev;
+
+/* The root node of the prom device tree. */
+int prom_root_node;
+int prom_stdin, prom_stdout;
+int prom_chosen_node;
+
+/* You must call prom_init() before you attempt to use any of the
+ * routines in the prom library. It returns 0 on success, 1 on
+ * failure. It gets passed the pointer to the PROM vector.
+ */
+
+extern void prom_meminit(void);
+extern void prom_ranges_init(void);
+extern void prom_cif_init(void *, void *);
+
+__initfunc(void prom_init(void *cif_handler, void *cif_stack))
+{
+ char buffer[80];
+ int node;
+
+ prom_vers = PROM_P1275;
+
+ prom_cif_init(cif_handler, cif_stack);
+
+ prom_root_node = prom_getsibling(0);
+ if((prom_root_node == 0) || (prom_root_node == -1))
+ prom_halt();
+
+ prom_chosen_node = prom_finddevice("/chosen");
+ if (!prom_chosen_node || prom_chosen_node == -1)
+ prom_halt();
+
+ prom_stdin = prom_getint (prom_chosen_node, "stdin");
+ prom_stdout = prom_getint (prom_chosen_node, "stdout");
+
+ node = prom_finddevice("/openprom");
+ if (!node || node == -1)
+ prom_halt();
+
+ prom_getstring (node, "version", buffer, sizeof (buffer));
+
+ prom_printf ("\n");
+
+ if (strncmp (buffer, "OBP ", 4) || buffer[5] != '.' || buffer[7] != '.') {
+ prom_printf ("Strange OBP version `%s'.\n", buffer);
+ prom_halt ();
+ }
+ /* Version field is expected to be 'OBP x.y.z date...' */
+
+ prom_rev = buffer[6] - '0';
+ prom_prev = ((buffer[4] - '0') << 16) |
+ ((buffer[6] - '0') << 8) |
+ (buffer[8] - '0');
+
+ printk ("PROMLIB: Sun IEEE Boot Prom %s\n", buffer + 4);
+
+ prom_meminit();
+
+ prom_ranges_init();
+
+ /* Initialization successful. */
+}
diff --git a/arch/sparc64/prom/memory.c b/arch/sparc64/prom/memory.c
new file mode 100644
index 000000000..bb3e50a92
--- /dev/null
+++ b/arch/sparc64/prom/memory.c
@@ -0,0 +1,152 @@
+/* $Id: memory.c,v 1.3 1997/03/04 16:27:10 jj Exp $
+ * memory.c: Prom routine for acquiring various bits of information
+ * about RAM on the machine, both virtual and physical.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+
+/* This routine, for consistency, returns the ram parameters in the
+ * V0 prom memory descriptor format. I choose this format because I
+ * think it was the easiest to work with. I feel the religious
+ * arguments now... ;) Also, I return the linked lists sorted to
+ * prevent paging_init() upset stomach as I have not yet written
+ * the pepto-bismol kernel module yet.
+ */
+
+struct linux_prom64_registers prom_reg_memlist[64];
+struct linux_prom64_registers prom_reg_tmp[64];
+
+struct linux_mlist_p1275 prom_phys_total[64];
+struct linux_mlist_p1275 prom_prom_taken[64];
+struct linux_mlist_p1275 prom_phys_avail[64];
+
+struct linux_mlist_p1275 *prom_ptot_ptr = prom_phys_total;
+struct linux_mlist_p1275 *prom_ptak_ptr = prom_prom_taken;
+struct linux_mlist_p1275 *prom_pavl_ptr = prom_phys_avail;
+
+struct linux_mem_p1275 prom_memlist;
+
+
+/* Internal Prom library routine to sort a linux_mlist_p1275 memory
+ * list. Used below in initialization.
+ */
+__initfunc(static void
+prom_sortmemlist(struct linux_mlist_p1275 *thislist))
+{
+ int swapi = 0;
+ int i, mitr, tmpsize;
+ unsigned long tmpaddr;
+ unsigned long lowest;
+
+ for(i=0; thislist[i].theres_more != 0; i++) {
+ lowest = thislist[i].start_adr;
+ for(mitr = i+1; thislist[mitr-1].theres_more != 0; mitr++)
+ if(thislist[mitr].start_adr < lowest) {
+ lowest = thislist[mitr].start_adr;
+ swapi = mitr;
+ }
+ if(lowest == thislist[i].start_adr) continue;
+ tmpaddr = thislist[swapi].start_adr;
+ tmpsize = thislist[swapi].num_bytes;
+ for(mitr = swapi; mitr > i; mitr--) {
+ thislist[mitr].start_adr = thislist[mitr-1].start_adr;
+ thislist[mitr].num_bytes = thislist[mitr-1].num_bytes;
+ }
+ thislist[i].start_adr = tmpaddr;
+ thislist[i].num_bytes = tmpsize;
+ }
+}
+
+/* Initialize the memory lists based upon the prom version. */
+__initfunc(void prom_meminit(void))
+{
+ int node = 0;
+ unsigned int iter, num_regs;
+
+ node = prom_finddevice("/memory");
+ num_regs = prom_getproperty(node, "available",
+ (char *) prom_reg_memlist,
+ sizeof(prom_reg_memlist));
+ num_regs = (num_regs/sizeof(struct linux_prom64_registers));
+ for(iter=0; iter<num_regs; iter++) {
+ prom_phys_avail[iter].start_adr =
+ prom_reg_memlist[iter].phys_addr;
+ prom_phys_avail[iter].num_bytes =
+ (unsigned long) prom_reg_memlist[iter].reg_size;
+ prom_phys_avail[iter].theres_more =
+ &prom_phys_avail[iter+1];
+ }
+ prom_phys_avail[iter-1].theres_more = 0x0;
+
+ num_regs = prom_getproperty(node, "reg",
+ (char *) prom_reg_memlist,
+ sizeof(prom_reg_memlist));
+ num_regs = (num_regs/sizeof(struct linux_prom64_registers));
+ for(iter=0; iter<num_regs; iter++) {
+ prom_phys_total[iter].start_adr =
+ prom_reg_memlist[iter].phys_addr;
+ prom_phys_total[iter].num_bytes =
+ (unsigned long) prom_reg_memlist[iter].reg_size;
+ prom_phys_total[iter].theres_more =
+ &prom_phys_total[iter+1];
+ }
+ prom_phys_total[iter-1].theres_more = 0x0;
+
+ node = prom_finddevice("/virtual-memory");
+ num_regs = prom_getproperty(node, "available",
+ (char *) prom_reg_memlist,
+ sizeof(prom_reg_memlist));
+ num_regs = (num_regs/sizeof(struct linux_prom64_registers));
+
+ /* Convert available virtual areas to taken virtual
+ * areas. First sort, then convert.
+ */
+ for(iter=0; iter<num_regs; iter++) {
+ prom_prom_taken[iter].start_adr =
+ prom_reg_memlist[iter].phys_addr;
+ prom_prom_taken[iter].num_bytes =
+ (unsigned long) prom_reg_memlist[iter].reg_size;
+ prom_prom_taken[iter].theres_more =
+ &prom_phys_total[iter+1];
+ }
+ prom_prom_taken[iter-1].theres_more = 0x0;
+
+ prom_sortmemlist(prom_prom_taken);
+
+ /* Finally, convert. */
+ for(iter=0; iter<num_regs; iter++) {
+ prom_prom_taken[iter].start_adr =
+ prom_prom_taken[iter].start_adr +
+ prom_prom_taken[iter].num_bytes;
+ prom_prom_taken[iter].num_bytes =
+ prom_prom_taken[iter+1].start_adr -
+ prom_prom_taken[iter].start_adr;
+ }
+ prom_prom_taken[iter-1].num_bytes =
+ ((unsigned long)-1) - (unsigned long) prom_prom_taken[iter-1].start_adr;
+
+ /* Sort the other two lists. */
+ prom_sortmemlist(prom_phys_total);
+ prom_sortmemlist(prom_phys_avail);
+
+ /* Link all the lists into the top-level descriptor. */
+ prom_memlist.p1275_totphys=&prom_ptot_ptr;
+ prom_memlist.p1275_prommap=&prom_ptak_ptr;
+ prom_memlist.p1275_available=&prom_pavl_ptr;
+}
+
+/* This returns a pointer to our libraries internal p1275 format
+ * memory descriptor.
+ */
+struct linux_mem_p1275 *
+prom_meminfo(void)
+{
+ return &prom_memlist;
+}
diff --git a/arch/sparc64/prom/misc.c b/arch/sparc64/prom/misc.c
new file mode 100644
index 000000000..fe9bf9c6b
--- /dev/null
+++ b/arch/sparc64/prom/misc.c
@@ -0,0 +1,134 @@
+/* $Id: misc.c,v 1.6 1997/04/10 05:13:05 davem Exp $
+ * misc.c: Miscellaneous prom functions that don't belong
+ * anywhere else.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+
+/* Reset and reboot the machine with the command 'bcommand'. */
+void
+prom_reboot(char *bcommand)
+{
+ p1275_cmd ("boot", P1275_ARG(0,P1275_ARG_IN_STRING)|
+ P1275_INOUT(1,0), bcommand);
+}
+
+/* Forth evaluate the expression contained in 'fstring'. */
+void
+prom_feval(char *fstring)
+{
+ if(!fstring || fstring[0] == 0)
+ return;
+ p1275_cmd ("interpret", P1275_ARG(0,P1275_ARG_IN_STRING)|
+ P1275_INOUT(1,1), fstring);
+}
+
+/* We want to do this more nicely some day. */
+#ifdef CONFIG_SUN_CONSOLE
+extern void console_restore_palette(void);
+extern void set_palette(void);
+extern int serial_console;
+#endif
+
+/* Drop into the prom, with the chance to continue with the 'go'
+ * prom command.
+ */
+/* XXX Fix the pre and post calls as it locks up my Ultra at the moment -DaveM */
+void
+prom_cmdline(void)
+{
+ extern void kernel_enter_debugger(void);
+ extern void install_obp_ticker(void);
+ extern void install_linux_ticker(void);
+ unsigned long flags;
+
+ /* kernel_enter_debugger(); */
+#ifdef CONFIG_SUN_CONSOLE
+#if 0
+ if(!serial_console)
+ console_restore_palette ();
+#endif
+#endif
+ /* install_obp_ticker(); */
+ save_flags(flags); cli();
+ p1275_cmd ("enter", P1275_INOUT(0,0));
+ restore_flags(flags);
+ /* install_linux_ticker(); */
+#ifdef CONFIG_SUN_CONSOLE
+#if 0
+ if(!serial_console)
+ set_palette ();
+#endif
+#endif
+}
+
+/* Drop into the prom, but completely terminate the program.
+ * No chance of continuing.
+ */
+void
+prom_halt(void)
+{
+again:
+ p1275_cmd ("exit", P1275_INOUT(0,0));
+ goto again; /* PROM is out to get me -DaveM */
+}
+
+/* Set prom sync handler to call function 'funcp'. */
+void
+prom_setsync(sync_func_t funcp)
+{
+ if(!funcp) return;
+ p1275_cmd ("set-callback", P1275_ARG(0,P1275_ARG_IN_FUNCTION)|
+ P1275_INOUT(1,1), funcp);
+}
+
+/* Get the idprom and stuff it into buffer 'idbuf'. Returns the
+ * format type. 'num_bytes' is the number of bytes that your idbuf
+ * has space for. Returns 0xff on error.
+ */
+unsigned char
+prom_get_idprom(char *idbuf, int num_bytes)
+{
+ int len;
+
+ len = prom_getproplen(prom_root_node, "idprom");
+ if((len>num_bytes) || (len==-1)) return 0xff;
+ if(!prom_getproperty(prom_root_node, "idprom", idbuf, num_bytes))
+ return idbuf[0];
+
+ return 0xff;
+}
+
+/* Get the major prom version number. */
+int
+prom_version(void)
+{
+ return PROM_P1275;
+}
+
+/* Get the prom plugin-revision. */
+int
+prom_getrev(void)
+{
+ return prom_rev;
+}
+
+/* Get the prom firmware print revision. */
+int
+prom_getprev(void)
+{
+ return prom_prev;
+}
+
+/* Install Linux trap table so PROM uses that instead of it's own. */
+void prom_set_trap_table(unsigned long tba)
+{
+ p1275_cmd("SUNW,set-trap-table", P1275_INOUT(1, 0), tba);
+}
diff --git a/arch/sparc64/prom/p1275.c b/arch/sparc64/prom/p1275.c
new file mode 100644
index 000000000..3eb0311df
--- /dev/null
+++ b/arch/sparc64/prom/p1275.c
@@ -0,0 +1,170 @@
+/* $Id: p1275.c,v 1.8 1997/04/03 09:29:21 davem Exp $
+ * p1275.c: Sun IEEE 1275 PROM low level interface routines
+ *
+ * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/string.h>
+
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/system.h>
+#include <asm/spitfire.h>
+#include <asm/pstate.h>
+
+/* If you change layout of this structure, please change the prom_doit
+ function below as well. */
+typedef struct {
+ unsigned prom_doit_code [24]; /* 0x8000 */
+ long prom_sync_routine; /* 0x8060 */
+ void (*prom_cif_handler)(long *); /* 0x8068 */
+ unsigned long prom_cif_stack; /* 0x8070 */
+ unsigned long prom_args [23]; /* 0x8078 */
+ char prom_buffer [7888];
+} at0x8000;
+
+static void (*prom_do_it)(void);
+
+void prom_cif_interface (void) __attribute__ ((__section__ (".p1275")));
+
+/* At most 14 insns */
+void prom_cif_interface (void)
+{
+ __asm__ __volatile__ ("
+ sethi %%hi(0x8000), %%o0
+ ldx [%%o0 + 0x070], %%o1 ! prom_cif_stack
+ save %%o1, -0xc0, %%sp
+ ldx [%%i0 + 0x068], %%l2 ! prom_cif_handler
+ rdpr %%pstate, %%l4
+ mov %%g4, %%l0
+ mov %%g6, %%l1
+ wrpr %%l4, %0, %%pstate ! turn on address masking
+ call %%l2
+ or %%i0, 0x078, %%o0 ! prom_args
+ wrpr %%l4, 0, %%pstate ! put pstate back
+ mov %%l0, %%g4
+ ret
+ restore %%l1, 0, %%g6
+ save %%sp, -0xc0, %%sp ! If you change the offset of the save
+ rdpr %%pstate, %%l4 ! here, please change the 0x8038
+ andn %%l4, %0, %%l3 ! constant below as well
+ wrpr %%l3, %%pstate
+ ldx [%%o0 + 0x060], %%l2
+ call %%l2
+ nop
+ wrpr %%l4, 0, %%pstate
+ ret
+ restore
+ " : : "i" (PSTATE_AM));
+}
+
+long p1275_cmd (char *service, long fmt, ...)
+{
+ char *p, *q;
+ unsigned long flags;
+ int nargs, nrets, i;
+ va_list list;
+ long attrs, x;
+ long ctx = 0;
+ at0x8000 *low = (at0x8000 *)(0x8000);
+
+ p = low->prom_buffer;
+ save_and_cli(flags);
+ ctx = spitfire_get_primary_context ();
+ if (ctx) {
+ flushw_user ();
+ spitfire_set_primary_context (0);
+ }
+ low->prom_args[0] = (unsigned long)p; /* service */
+ strcpy (p, service);
+ p = (char *)(((long)(strchr (p, 0) + 8)) & ~7);
+ low->prom_args[1] = nargs = (fmt & 0x0f); /* nargs */
+ low->prom_args[2] = nrets = ((fmt & 0xf0) >> 4); /* nrets */
+ attrs = fmt >> 8;
+ va_start(list, fmt);
+ for (i = 0; i < nargs; i++, attrs >>= 3) {
+ switch (attrs & 0x7) {
+ case P1275_ARG_NUMBER:
+ low->prom_args[i + 3] = (unsigned)va_arg(list, long); break;
+ case P1275_ARG_IN_STRING:
+ strcpy (p, va_arg(list, char *));
+ low->prom_args[i + 3] = (unsigned long)p;
+ p = (char *)(((long)(strchr (p, 0) + 8)) & ~7);
+ break;
+ case P1275_ARG_OUT_BUF:
+ (void) va_arg(list, char *);
+ low->prom_args[i + 3] = (unsigned long)p;
+ x = va_arg(list, long);
+ i++; attrs >>= 3;
+ p = (char *)(((long)(p + (int)x + 7)) & ~7);
+ low->prom_args[i + 3] = x;
+ break;
+ case P1275_ARG_IN_BUF:
+ q = va_arg(list, char *);
+ low->prom_args[i + 3] = (unsigned long)p;
+ x = va_arg(list, long);
+ i++; attrs >>= 3;
+ memcpy (p, q, (int)x);
+ p = (char *)(((long)(p + (int)x + 7)) & ~7);
+ low->prom_args[i + 3] = x;
+ break;
+ case P1275_ARG_OUT_32B:
+ (void) va_arg(list, char *);
+ low->prom_args[i + 3] = (unsigned long)p;
+ p += 32;
+ break;
+ case P1275_ARG_IN_FUNCTION:
+ low->prom_args[i + 3] = 0x8038;
+ low->prom_sync_routine = va_arg(list, long); break;
+ }
+ }
+ va_end(list);
+
+ (*prom_do_it)();
+
+ attrs = fmt >> 8;
+ va_start(list, fmt);
+ for (i = 0; i < nargs; i++, attrs >>= 3) {
+ switch (attrs & 0x7) {
+ case P1275_ARG_NUMBER:
+ (void) va_arg(list, long); break;
+ case P1275_ARG_IN_STRING:
+ (void) va_arg(list, char *); break;
+ case P1275_ARG_IN_FUNCTION:
+ (void) va_arg(list, long); break;
+ case P1275_ARG_IN_BUF:
+ (void) va_arg(list, char *);
+ (void) va_arg(list, long);
+ i++; attrs >>= 3;
+ break;
+ case P1275_ARG_OUT_BUF:
+ p = va_arg(list, char *);
+ x = va_arg(list, long);
+ memcpy (p, (char *)(low->prom_args[i + 3]), (int)x);
+ i++; attrs >>= 3;
+ break;
+ case P1275_ARG_OUT_32B:
+ p = va_arg(list, char *);
+ memcpy (p, (char *)(low->prom_args[i + 3]), 32);
+ break;
+ }
+ }
+ va_end(list);
+ x = low->prom_args [nargs + 3];
+
+ if (ctx)
+ spitfire_set_primary_context (ctx);
+ restore_flags(flags);
+ return x;
+}
+
+void prom_cif_init(void *cif_handler, void *cif_stack)
+{
+ at0x8000 *low = (at0x8000 *)(0x8000);
+
+ low->prom_cif_handler = (void (*)(long *))cif_handler;
+ low->prom_cif_stack = (unsigned long)cif_stack;
+ prom_do_it = (void (*)(void))(0x8000);
+}
diff --git a/arch/sparc64/prom/printf.c b/arch/sparc64/prom/printf.c
new file mode 100644
index 000000000..33428e9f6
--- /dev/null
+++ b/arch/sparc64/prom/printf.c
@@ -0,0 +1,46 @@
+/* $Id: printf.c,v 1.3 1997/03/18 18:00:00 jj Exp $
+ * printf.c: Internal prom library printf facility.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+/* This routine is internal to the prom library, no one else should know
+ * about or use it! It's simple and smelly anyway....
+ */
+
+#include <linux/kernel.h>
+
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+
+static char ppbuf[1024];
+
+extern void prom_puts (char *, int);
+
+void
+prom_printf(char *fmt, ...)
+{
+ va_list args;
+ char ch, *bptr, *last;
+ int i;
+
+ va_start(args, fmt);
+ i = vsprintf(ppbuf, fmt, args);
+
+ bptr = ppbuf;
+ last = ppbuf;
+
+ while((ch = *(bptr++)) != 0) {
+ if(ch == '\n') {
+ if (last < bptr - 1)
+ prom_puts (last, bptr - 1 - last);
+ prom_putchar('\r');
+ last = bptr - 1;
+ }
+ }
+ if (last < bptr - 1)
+ prom_puts (last, bptr - 1 - last);
+ va_end(args);
+ return;
+}
diff --git a/arch/sparc64/prom/ranges.c b/arch/sparc64/prom/ranges.c
new file mode 100644
index 000000000..323539237
--- /dev/null
+++ b/arch/sparc64/prom/ranges.c
@@ -0,0 +1,107 @@
+/* $Id: ranges.c,v 1.3 1997/03/21 12:33:36 jj Exp $
+ * ranges.c: Handle ranges in newer proms for obio/sbus.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/init.h>
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/sbus.h>
+#include <asm/system.h>
+
+struct linux_prom_ranges promlib_obio_ranges[PROMREG_MAX];
+int num_obio_ranges;
+
+/* Adjust register values based upon the ranges parameters. */
+inline void
+prom_adjust_regs(struct linux_prom_registers *regp, int nregs,
+ struct linux_prom_ranges *rangep, int nranges)
+{
+ int regc, rngc;
+
+ for(regc=0; regc < nregs; regc++) {
+ for(rngc=0; rngc < nranges; rngc++)
+ if(regp[regc].which_io == rangep[rngc].ot_child_space)
+ break; /* Fount it */
+ if(rngc==nranges) /* oops */
+ prom_printf("adjust_regs: Could not find range with matching bus type...\n");
+ regp[regc].which_io = rangep[rngc].ot_parent_space;
+ regp[regc].phys_addr += rangep[rngc].ot_parent_base;
+ }
+}
+
+inline void
+prom_adjust_ranges(struct linux_prom_ranges *ranges1, int nranges1,
+ struct linux_prom_ranges *ranges2, int nranges2)
+{
+ int rng1c, rng2c;
+
+ for(rng1c=0; rng1c < nranges1; rng1c++) {
+ for(rng2c=0; rng2c < nranges2; rng2c++)
+ if(ranges1[rng1c].ot_child_space ==
+ ranges2[rng2c].ot_child_space) break;
+ if(rng2c == nranges2) /* oops */
+ prom_printf("adjust_ranges: Could not find matching bus type...\n");
+ ranges1[rng1c].ot_parent_space = ranges2[rng2c].ot_parent_space;
+ ranges1[rng1c].ot_parent_base += ranges2[rng2c].ot_parent_base;
+ }
+}
+
+/* Apply probed sbus ranges to registers passed, if no ranges return. */
+void prom_apply_sbus_ranges(struct linux_sbus *sbus, struct linux_prom_registers *regs,
+ int nregs, struct linux_sbus_device *sdev)
+{
+ if(sbus->num_sbus_ranges) {
+ if(sdev && (sdev->ranges_applied == 0)) {
+ sdev->ranges_applied = 1;
+ prom_adjust_regs(regs, nregs, sbus->sbus_ranges,
+ sbus->num_sbus_ranges);
+ }
+ }
+}
+
+__initfunc(void prom_ranges_init(void))
+{
+}
+
+__initfunc(void prom_sbus_ranges_init(int iommund, struct linux_sbus *sbus))
+{
+ int success;
+
+ sbus->num_sbus_ranges = 0;
+ success = prom_getproperty(sbus->prom_node, "ranges",
+ (char *) sbus->sbus_ranges,
+ sizeof (sbus->sbus_ranges));
+ if (success != -1)
+ sbus->num_sbus_ranges = (success/sizeof(struct linux_prom_ranges));
+}
+
+void
+prom_apply_generic_ranges (int node, int parent, struct linux_prom_registers *regs, int nregs)
+{
+ int success;
+ int num_ranges;
+ struct linux_prom_ranges ranges[PROMREG_MAX];
+
+ success = prom_getproperty(node, "ranges",
+ (char *) ranges,
+ sizeof (ranges));
+ if (success != -1) {
+ num_ranges = (success/sizeof(struct linux_prom_ranges));
+ if (parent) {
+ struct linux_prom_ranges parent_ranges[PROMREG_MAX];
+ int num_parent_ranges;
+
+ success = prom_getproperty(parent, "ranges",
+ (char *) parent_ranges,
+ sizeof (parent_ranges));
+ if (success != -1) {
+ num_parent_ranges = (success/sizeof(struct linux_prom_ranges));
+ prom_adjust_ranges (ranges, num_ranges, parent_ranges, num_parent_ranges);
+ }
+ }
+ prom_adjust_regs(regs, nregs, ranges, num_ranges);
+ }
+}
diff --git a/arch/sparc64/prom/tree.c b/arch/sparc64/prom/tree.c
new file mode 100644
index 000000000..ae4baf858
--- /dev/null
+++ b/arch/sparc64/prom/tree.c
@@ -0,0 +1,328 @@
+/* $Id: tree.c,v 1.5 1997/03/24 17:44:01 jj Exp $
+ * tree.c: Basic device tree traversal/scanning for the Linux
+ * prom library.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+
+/* Return the child of node 'node' or zero if no this node has no
+ * direct descendent.
+ */
+__inline__ int
+__prom_getchild(int node)
+{
+ return p1275_cmd ("child", P1275_INOUT(1, 1), node);
+}
+
+__inline__ int
+prom_getchild(int node)
+{
+ long cnode;
+
+ if(node == -1) return 0;
+ cnode = __prom_getchild(node);
+ if(cnode == -1) return 0;
+ return (int)cnode;
+}
+
+__inline__ int
+prom_getparent(int node)
+{
+ long cnode;
+
+ if(node == -1) return 0;
+ cnode = p1275_cmd ("parent", P1275_INOUT(1, 1), node);
+ if(cnode == -1) return 0;
+ return (int)cnode;
+}
+
+/* Return the next sibling of node 'node' or zero if no more siblings
+ * at this level of depth in the tree.
+ */
+__inline__ int
+__prom_getsibling(int node)
+{
+ return p1275_cmd ("peer", P1275_INOUT(1, 1), node);
+}
+
+__inline__ int
+prom_getsibling(int node)
+{
+ long sibnode;
+
+ if(node == -1) return 0;
+ sibnode = __prom_getsibling(node);
+ if(sibnode == -1) return 0;
+ return (int)sibnode;
+}
+
+/* Return the length in bytes of property 'prop' at node 'node'.
+ * Return -1 on error.
+ */
+__inline__ int
+prom_getproplen(int node, char *prop)
+{
+ if((!node) || (!prop)) return -1;
+ return p1275_cmd ("getproplen",
+ P1275_ARG(1,P1275_ARG_IN_STRING)|
+ P1275_INOUT(2, 1),
+ node, prop);
+}
+
+/* Acquire a property 'prop' at node 'node' and place it in
+ * 'buffer' which has a size of 'bufsize'. If the acquisition
+ * was successful the length will be returned, else -1 is returned.
+ */
+__inline__ int
+prom_getproperty(int node, char *prop, char *buffer, int bufsize)
+{
+ int plen;
+
+ plen = prom_getproplen(node, prop);
+ if((plen > bufsize) || (plen == 0) || (plen == -1))
+ return -1;
+ else {
+ /* Ok, things seem all right. */
+ return p1275_cmd ("getprop",
+ P1275_ARG(1,P1275_ARG_IN_STRING)|
+ P1275_ARG(2,P1275_ARG_OUT_BUF)|
+ P1275_INOUT(4, 1),
+ node, prop, buffer, P1275_SIZE(plen));
+ }
+}
+
+/* Acquire an integer property and return its value. Returns -1
+ * on failure.
+ */
+__inline__ int
+prom_getint(int node, char *prop)
+{
+ int intprop;
+
+ if(prom_getproperty(node, prop, (char *) &intprop, sizeof(int)) != -1)
+ return intprop;
+
+ return -1;
+}
+
+/* Acquire an integer property, upon error return the passed default
+ * integer.
+ */
+
+int
+prom_getintdefault(int node, char *property, int deflt)
+{
+ int retval;
+
+ retval = prom_getint(node, property);
+ if(retval == -1) return deflt;
+
+ return retval;
+}
+
+/* Acquire a boolean property, 1=TRUE 0=FALSE. */
+int
+prom_getbool(int node, char *prop)
+{
+ int retval;
+
+ retval = prom_getproplen(node, prop);
+ if(retval == -1) return 0;
+ return 1;
+}
+
+/* Acquire a property whose value is a string, returns a null
+ * string on error. The char pointer is the user supplied string
+ * buffer.
+ */
+void
+prom_getstring(int node, char *prop, char *user_buf, int ubuf_size)
+{
+ int len;
+
+ len = prom_getproperty(node, prop, user_buf, ubuf_size);
+ if(len != -1) return;
+ user_buf[0] = 0;
+ return;
+}
+
+
+/* Does the device at node 'node' have name 'name'?
+ * YES = 1 NO = 0
+ */
+int
+prom_nodematch(int node, char *name)
+{
+ char namebuf[128];
+ prom_getproperty(node, "name", namebuf, sizeof(namebuf));
+ if(strcmp(namebuf, name) == 0) return 1;
+ return 0;
+}
+
+/* Search siblings at 'node_start' for a node with name
+ * 'nodename'. Return node if successful, zero if not.
+ */
+int
+prom_searchsiblings(int node_start, char *nodename)
+{
+
+ int thisnode, error;
+ char promlib_buf[128];
+
+ for(thisnode = node_start; thisnode;
+ thisnode=prom_getsibling(thisnode)) {
+ error = prom_getproperty(thisnode, "name", promlib_buf,
+ sizeof(promlib_buf));
+ /* Should this ever happen? */
+ if(error == -1) continue;
+ if(strcmp(nodename, promlib_buf)==0) return thisnode;
+ }
+
+ return 0;
+}
+
+/* Gets name in the {name@x,yyyyy|name (if no reg)} form */
+int
+prom_getname (int node, char *buffer, int len)
+{
+ int i, sbus = 0;
+ struct linux_prom_registers *reg;
+ struct linux_prom64_registers reg64[PROMREG_MAX];
+
+ for (sbus = prom_getparent (node); sbus; sbus = prom_getparent (sbus)) {
+ i = prom_getproperty (sbus, "name", buffer, len);
+ if (i > 0) {
+ buffer [i] = 0;
+ if (!strcmp (buffer, "sbus"))
+ break;
+ }
+ }
+ i = prom_getproperty (node, "name", buffer, len);
+ if (i <= 0) {
+ buffer [0] = 0;
+ return -1;
+ }
+ buffer [i] = 0;
+ len -= i;
+ i = prom_getproperty (node, "reg", (char *)reg64, sizeof (reg64));
+ if (i <= 0) return 0;
+ if (len < 16) return -1;
+ buffer = strchr (buffer, 0);
+ if (sbus) {
+ reg = (struct linux_prom_registers *)reg64;
+ sprintf (buffer, "@%x,%x", reg[0].which_io, (uint)reg[0].phys_addr);
+ } else {
+ sprintf (buffer, "@%x,%x", (unsigned int)(reg64[0].phys_addr >> 36), (unsigned int)(reg64[0].phys_addr));
+ }
+ return 0;
+}
+
+/* Return the first property type for node 'node'.
+ * buffer should be at least 32B in length
+ */
+__inline__ char *
+prom_firstprop(int node, char *buffer)
+{
+ *buffer = 0;
+ if(node == -1) return buffer;
+ p1275_cmd ("nextprop", P1275_ARG(2,P1275_ARG_OUT_32B)|
+ P1275_INOUT(3, 0),
+ node, (char *) 0x0, buffer);
+ return buffer;
+}
+
+/* Return the property type string after property type 'oprop'
+ * at node 'node' . Returns NULL string if no more
+ * property types for this node.
+ */
+__inline__ char *
+prom_nextprop(int node, char *oprop, char *buffer)
+{
+ char buf[32];
+
+ if(node == -1) {
+ *buffer = 0;
+ return buffer;
+ }
+ if (oprop == buffer) {
+ strcpy (buf, oprop);
+ oprop = buf;
+ }
+ p1275_cmd ("nextprop", P1275_ARG(1,P1275_ARG_IN_STRING)|
+ P1275_ARG(2,P1275_ARG_OUT_32B)|
+ P1275_INOUT(3, 0),
+ node, oprop, buffer);
+ return buffer;
+}
+
+int
+prom_finddevice(char *name)
+{
+ if(!name) return 0;
+ return p1275_cmd ("finddevice", P1275_ARG(0,P1275_ARG_IN_STRING)|
+ P1275_INOUT(1, 1),
+ name);
+}
+
+int prom_node_has_property(int node, char *prop)
+{
+ char buf [32];
+
+ *buf = 0;
+ do {
+ prom_nextprop(node, buf, buf);
+ if(!strcmp(buf, prop))
+ return 1;
+ } while (*buf);
+ return 0;
+}
+
+/* Set property 'pname' at node 'node' to value 'value' which has a length
+ * of 'size' bytes. Return the number of bytes the prom accepted.
+ */
+int
+prom_setprop(int node, char *pname, char *value, int size)
+{
+ if(size == 0) return 0;
+ if((pname == 0) || (value == 0)) return 0;
+
+ return p1275_cmd ("setprop", P1275_ARG(1,P1275_ARG_IN_STRING)|
+ P1275_ARG(2,P1275_ARG_IN_BUF)|
+ P1275_INOUT(4, 1),
+ node, pname, value, P1275_SIZE(size));
+}
+
+__inline__ int
+prom_inst2pkg(int inst)
+{
+ int node;
+
+ node = p1275_cmd ("instance-to-package", P1275_INOUT(1, 1), inst);
+ if (node == -1) return 0;
+ return node;
+}
+
+/* Return 'node' assigned to a particular prom 'path'
+ * FIXME: Should work for v0 as well
+ */
+int
+prom_pathtoinode(char *path)
+{
+ int node, inst;
+
+ inst = prom_devopen (path);
+ if (inst == -1) return 0;
+ node = prom_inst2pkg (inst);
+ prom_devclose (inst);
+ if (node == -1) return 0;
+ return node;
+}
diff --git a/arch/sparc64/vmlinux.lds b/arch/sparc64/vmlinux.lds
new file mode 100644
index 000000000..f8ba23528
--- /dev/null
+++ b/arch/sparc64/vmlinux.lds
@@ -0,0 +1,83 @@
+/* ld script to make UltraLinux kernel */
+OUTPUT_FORMAT("elf64-sparc", "elf64-sparc", "elf64-sparc")
+OUTPUT_ARCH(sparc:v9a)
+ENTRY(_start)
+
+SECTIONS
+{
+ empty_zero_page = 0xfffff80000000000;
+ swapper_pg_dir = 0xfffff80000002000;
+ . = 0x4000;
+ .text 0xfffff80000004000 :
+ {
+ *(.text)
+ *(.gnu.warning)
+ } =0
+ _etext = .;
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata) }
+ .rodata1 : { *(.rodata1) }
+ .data :
+ {
+ *(.data)
+ CONSTRUCTORS
+ }
+ .data1 : { *(.data1) }
+ _edata = .;
+ PROVIDE (edata = .);
+ .fixup : { *(.fixup) }
+ __start___ex_table = .;
+ __ex_table : { *(__ex_table) }
+ __stop___ex_table = .;
+ __start___ksymtab = .;
+ __ksymtab : { *(__ksymtab) }
+ __stop___ksymtab = .;
+ __kstrtab : { *(.kstrtab) }
+ . = ALIGN(8192);
+ __init_begin = .;
+ .text.init : { *(.text.init) }
+ .data.init : { *(.data.init) }
+ . = ALIGN(8192);
+ __init_end = .;
+ __p1275_loc = .;
+ .p1275 :
+ {
+ *(.p1275)
+ . = ALIGN(8192);
+ }
+ __bss_start = .;
+ .sbss : { *(.sbss) *(.scommon) }
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss)
+ *(COMMON)
+ . = ALIGN(8192);
+ empty_bad_pmd_table = .;
+ . += 8192;
+ empty_bad_pte_table = .;
+ . += 8192;
+ empty_null_pmd_table = .;
+ . += 8192;
+ empty_null_pte_table = .;
+ . += 8192;
+ empty_bad_page = .;
+ . += 8192;
+ }
+ _end = . ;
+ PROVIDE (end = .);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ .debug 0 : { *(.debug) }
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ .line 0 : { *(.line) }
+}