summaryrefslogtreecommitdiffstats
path: root/arch/sparc64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64')
-rw-r--r--arch/sparc64/Makefile9
-rw-r--r--arch/sparc64/boot/Makefile23
-rw-r--r--arch/sparc64/boot/piggyback.c109
-rw-r--r--arch/sparc64/config.in5
-rw-r--r--arch/sparc64/defconfig97
-rw-r--r--arch/sparc64/kernel/Makefile18
-rw-r--r--arch/sparc64/kernel/binfmt_aout32.c482
-rw-r--r--arch/sparc64/kernel/binfmt_elf32.c2
-rw-r--r--arch/sparc64/kernel/cpu.c19
-rw-r--r--arch/sparc64/kernel/devices.c5
-rw-r--r--arch/sparc64/kernel/dtlb_miss.S33
-rw-r--r--arch/sparc64/kernel/entry.S697
-rw-r--r--arch/sparc64/kernel/etrap.S189
-rw-r--r--arch/sparc64/kernel/hack.S170
-rw-r--r--arch/sparc64/kernel/head.S255
-rw-r--r--arch/sparc64/kernel/ioctl32.c205
-rw-r--r--arch/sparc64/kernel/ioport.c13
-rw-r--r--arch/sparc64/kernel/irq.c161
-rw-r--r--arch/sparc64/kernel/process.c354
-rw-r--r--arch/sparc64/kernel/ptrace.c370
-rw-r--r--arch/sparc64/kernel/rtrap.S217
-rw-r--r--arch/sparc64/kernel/setup.c53
-rw-r--r--arch/sparc64/kernel/signal.c276
-rw-r--r--arch/sparc64/kernel/signal32.c200
-rw-r--r--arch/sparc64/kernel/smp.c347
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c35
-rw-r--r--arch/sparc64/kernel/sunos_ioctl32.c281
-rw-r--r--arch/sparc64/kernel/sys32.S427
-rw-r--r--arch/sparc64/kernel/sys_sparc.c3
-rw-r--r--arch/sparc64/kernel/sys_sparc32.c1090
-rw-r--r--arch/sparc64/kernel/sys_sunos32.c1511
-rw-r--r--arch/sparc64/kernel/systbls.S120
-rw-r--r--arch/sparc64/kernel/time.c9
-rw-r--r--arch/sparc64/kernel/traps.c396
-rw-r--r--arch/sparc64/kernel/ttable.S16
-rw-r--r--arch/sparc64/kernel/unaligned.c517
-rw-r--r--arch/sparc64/kernel/winfixup.S123
-rw-r--r--arch/sparc64/lib/Makefile48
-rw-r--r--arch/sparc64/lib/VIS.h113
-rw-r--r--arch/sparc64/lib/VISbzero.S246
-rw-r--r--arch/sparc64/lib/VIScopy.S1060
-rw-r--r--arch/sparc64/lib/VIScsum.S436
-rw-r--r--arch/sparc64/lib/VISmemset.S228
-rw-r--r--arch/sparc64/lib/blockops.S178
-rw-r--r--arch/sparc64/lib/checksum.S871
-rw-r--r--arch/sparc64/lib/copy_from_user.S469
-rw-r--r--arch/sparc64/lib/copy_to_user.S469
-rw-r--r--arch/sparc64/lib/memcpy.S526
-rw-r--r--arch/sparc64/lib/memset.S196
-rw-r--r--arch/sparc64/lib/strlen_user.S47
-rw-r--r--arch/sparc64/mm/Makefile10
-rw-r--r--arch/sparc64/mm/fault.c161
-rw-r--r--arch/sparc64/mm/generic.c36
-rw-r--r--arch/sparc64/mm/init.c235
-rw-r--r--arch/sparc64/mm/modutil.c66
-rw-r--r--arch/sparc64/mm/ultra.S226
-rw-r--r--arch/sparc64/prom/bootstr.c20
-rw-r--r--arch/sparc64/prom/misc.c14
-rw-r--r--arch/sparc64/prom/p1275.c84
-rw-r--r--arch/sparc64/vmlinux.lds12
60 files changed, 9654 insertions, 4934 deletions
diff --git a/arch/sparc64/Makefile b/arch/sparc64/Makefile
index a70f9ebf8..b8cf06878 100644
--- a/arch/sparc64/Makefile
+++ b/arch/sparc64/Makefile
@@ -1,4 +1,4 @@
-# $Id: Makefile,v 1.16 1997/05/04 07:21:08 davem Exp $
+# $Id: Makefile,v 1.20 1997/07/11 11:05:29 jj Exp $
# sparc64/Makefile
#
# Makefile for the architecture dependent flags and dependencies on the
@@ -24,8 +24,8 @@ ELF2AOUT64 = elf2aout64
# debugging of the kernel to get the proper debugging information.
#CFLAGS := $(CFLAGS) -g -pipe -fcall-used-g5 -fcall-used-g7
-CFLAGS := $(CFLAGS) -pipe \
- -fcall-used-g5 -fcall-used-g7 -Wno-sign-compare
+CFLAGS := $(CFLAGS) -pipe -mno-fpu -mtune=ultrasparc -mmedlow \
+ -ffixed-g4 -fcall-used-g5 -fcall-used-g7 -Wno-sign-compare
LINKFLAGS = -T arch/sparc64/vmlinux.lds
@@ -49,3 +49,6 @@ archdep:
check_asm:
$(MAKE) -C arch/sparc64/kernel check_asm
+
+tftpboot.img:
+ $(MAKE) -C arch/sparc64/boot tftpboot.img
diff --git a/arch/sparc64/boot/Makefile b/arch/sparc64/boot/Makefile
new file mode 100644
index 000000000..ed3fc6cdb
--- /dev/null
+++ b/arch/sparc64/boot/Makefile
@@ -0,0 +1,23 @@
+# $Id: Makefile,v 1.1 1997/07/18 06:26:30 ralf Exp $
+# Makefile for the Sparc64 boot stuff.
+#
+# Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+# Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+
+ROOT_IMG =/usr/src/root.img
+ELFTOAOUT =elftoaout
+
+all: boot
+
+boot:
+ @echo "Nothing special to be done for 'boot' on Linux/UltraSPARC."
+
+tftpboot.img: piggyback
+ $(ELFTOAOUT) $(TOPDIR)/vmlinux -o tftpboot.img
+ ./piggyback tftpboot.img $(TOPDIR)/System.map $(ROOT_IMG)
+
+piggyback: piggyback.c
+ $(HOSTCC) $(HOSTCFLAGS) -o piggyback piggyback.c
+
+dep:
+
diff --git a/arch/sparc64/boot/piggyback.c b/arch/sparc64/boot/piggyback.c
new file mode 100644
index 000000000..869b3492c
--- /dev/null
+++ b/arch/sparc64/boot/piggyback.c
@@ -0,0 +1,109 @@
+/* $Id: piggyback.c,v 1.1 1997/07/18 06:26:30 ralf Exp $
+ Simple utility to make a single-image install kernel with initial ramdisk
+ for Sparc64 tftpbooting without need to set up nfs.
+
+ Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#include <stdio.h>
+#include <string.h>
+#include <ctype.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <dirent.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+/* Note: run this on an a.out kernel (use elftoaout for it), as PROM looks for a.out image onlly
+ usage: piggyback vmlinux System.map tail, where tail is gzipped fs of the initial ramdisk */
+
+void die(char *str)
+{
+ perror (str);
+ exit(1);
+}
+
+int main(int argc,char **argv)
+{
+ char buffer [1024], *q, *r;
+ unsigned int i, j, k, start, end, offset;
+ FILE *map;
+ struct stat s;
+ int image, tail;
+
+ if (stat (argv[3], &s) < 0) die (argv[3]);
+ map = fopen (argv[2], "r");
+ if (!map) die(argv[2]);
+ while (fgets (buffer, 1024, map)) {
+ if (!strcmp (buffer + 19, "start\n"))
+ start = strtoul (buffer + 8, NULL, 16);
+ else if (!strcmp (buffer + 19, "end\n"))
+ end = strtoul (buffer + 8, NULL, 16);
+ }
+ fclose (map);
+ if ((image = open(argv[1],O_RDWR)) < 0) die(argv[1]);
+ if (read(image,buffer,512) != 512) die(argv[1]);
+ if (!memcmp (buffer, "\177ELF", 4)) {
+ unsigned int *p = (unsigned int *)(buffer + *(unsigned int *)(buffer + 28));
+
+ i = p[1] + *(unsigned int *)(buffer + 24) - p[2];
+ if (lseek(image,i,0) < 0) die("lseek");
+ if (read(image,buffer,512) != 512) die(argv[1]);
+ j = 0;
+ } else if (*(unsigned int *)buffer == 0x01030107) {
+ i = j = 32;
+ } else {
+ fprintf (stderr, "Not ELF nor a.out. Don't blame me.\n");
+ exit(1);
+ }
+ k = i;
+ if (j == 32 && buffer[40] == 'H' && buffer[41] == 'd' && buffer[42] == 'r' && buffer[43] == 'S') {
+ offset = 40 + 10;
+ } else {
+ i += ((*(unsigned short *)(buffer + j + 2))<<2) - 512;
+ if (lseek(image,i,0) < 0) die("lseek");
+ if (read(image,buffer,1024) != 1024) die(argv[1]);
+ for (q = buffer, r = q + 512; q < r; q += 4) {
+ if (*q == 'H' && q[1] == 'd' && q[2] == 'r' && q[3] == 'S')
+ break;
+ }
+ if (q == r) {
+ fprintf (stderr, "Couldn't find headers signature in the kernel.\n");
+ exit(1);
+ }
+ offset = i + (q - buffer) + 10;
+ }
+ if (lseek(image, offset, 0) < 0) die ("lseek");
+ *(unsigned *)buffer = 0;
+ *(unsigned *)(buffer + 4) = 0x01000000;
+ *(unsigned *)(buffer + 8) = ((end + 32 + 8191) & ~8191);
+ *(unsigned *)(buffer + 12) = s.st_size;
+ if (write(image,buffer+2,14) != 14) die (argv[1]);
+ if (lseek(image, 4, 0) < 0) die ("lseek");
+ *(unsigned *)buffer = ((end + 32 + 8191) & ~8191) - (start & ~0x3fffffUL) + s.st_size;
+ *(unsigned *)(buffer + 4) = 0;
+ *(unsigned *)(buffer + 8) = 0;
+ if (write(image,buffer,12) != 12) die (argv[1]);
+ if (lseek(image, k - start + ((end + 32 + 8191) & ~8191), 0) < 0) die ("lseek");
+ if ((tail = open(argv[3],O_RDONLY)) < 0) die(argv[3]);
+ while ((i = read (tail,buffer,1024)) > 0)
+ if (write(image,buffer,i) != i) die (argv[1]);
+ if (close(image) < 0) die("close");
+ if (close(tail) < 0) die("close");
+ return 0;
+}
diff --git a/arch/sparc64/config.in b/arch/sparc64/config.in
index 6354edded..fcbac5c1a 100644
--- a/arch/sparc64/config.in
+++ b/arch/sparc64/config.in
@@ -1,4 +1,4 @@
-# $Id: config.in,v 1.6 1997/04/17 20:35:42 jj Exp $
+# $Id: config.in,v 1.9 1997/07/04 11:33:05 davem Exp $
# For a description of the syntax of this configuration file,
# see the Configure script.
#
@@ -51,10 +51,10 @@ bool 'Networking support' CONFIG_NET
bool 'System V IPC' CONFIG_SYSVIPC
bool 'Sysctl support' CONFIG_SYSCTL
bool 'Kernel support for Linux/Sparc 32bit binary compatibility' CONFIG_SPARC32_COMPAT
-tristate 'Kernel support for a.out binaries' CONFIG_BINFMT_AOUT
tristate 'Kernel support for 64-bit ELF binaries' CONFIG_BINFMT_ELF
if [ "$CONFIG_SPARC32_COMPAT" != "n" ]; then
tristate 'Kernel support for 32-bit ELF binaries' CONFIG_BINFMT_ELF32
+ bool 'Kernel support for 32-bit (ie. SunOS) a.out binaries' CONFIG_BINFMT_AOUT32
fi
if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
tristate 'Kernel support for JAVA binaries' CONFIG_BINFMT_JAVA
@@ -157,4 +157,5 @@ bool 'Kernel profiling support' CONFIG_PROFILE
if [ "$CONFIG_PROFILE" = "y" ]; then
int ' Profile shift count' CONFIG_PROFILE_SHIFT 2
fi
+bool 'ECache flush trap support at ta 0x72' CONFIG_EC_FLUSH_TRAP
endmenu
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig
index fbc4a5073..92a003853 100644
--- a/arch/sparc64/defconfig
+++ b/arch/sparc64/defconfig
@@ -5,12 +5,14 @@
#
# Code maturity level options
#
-# CONFIG_EXPERIMENTAL is not set
+CONFIG_EXPERIMENTAL=y
#
# Loadable module support
#
-# CONFIG_MODULES is not set
+CONFIG_MODULES=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_KERNELD is not set
#
# General setup
@@ -45,24 +47,31 @@ SUN_FB_CREATOR=y
#
# Misc Linux/SPARC drivers
#
-# CONFIG_SUN_OPENPROMIO is not set
-# CONFIG_SUN_MOSTEK_RTC is not set
-# CONFIG_SUN_OPENPROMFS is not set
+CONFIG_SUN_OPENPROMIO=m
+CONFIG_SUN_MOSTEK_RTC=y
+# CONFIG_SUN_BPP is not set
+# CONFIG_SUN_VIDEOPIX is not set
+CONFIG_SUN_OPENPROMFS=m
CONFIG_NET=y
CONFIG_SYSVIPC=y
CONFIG_SYSCTL=y
CONFIG_SPARC32_COMPAT=y
-CONFIG_BINFMT_AOUT=y
CONFIG_BINFMT_ELF=y
CONFIG_BINFMT_ELF32=y
+CONFIG_BINFMT_AOUT32=y
+CONFIG_BINFMT_JAVA=m
+CONFIG_BINFMT_MISC=m
#
# Floppy, IDE, and other block devices
#
# CONFIG_BLK_DEV_FD is not set
-# CONFIG_BLK_DEV_MD is not set
-# CONFIG_BLK_DEV_RAM is not set
-# CONFIG_BLK_DEV_LOOP is not set
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=m
+CONFIG_MD_STRIPED=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_BLK_DEV_LOOP=m
#
# Networking options
@@ -75,6 +84,7 @@ CONFIG_INET=y
# CONFIG_IP_ACCT is not set
# CONFIG_IP_ROUTER is not set
# CONFIG_NET_IPIP is not set
+# CONFIG_SYN_COOKIES is not set
#
# (it is safe to leave these untouched)
@@ -84,13 +94,22 @@ CONFIG_INET=y
CONFIG_PATH_MTU_DISCOVERY=y
CONFIG_IP_NOSR=y
CONFIG_SKB_LARGE=y
+CONFIG_IPV6=m
#
#
#
-# CONFIG_IPX is not set
-# CONFIG_ATALK is not set
+CONFIG_IPX=m
+# CONFIG_IPX_INTERN is not set
+# CONFIG_IPX_PPROP_ROUTING is not set
+CONFIG_ATALK=m
+# CONFIG_IPDDP is not set
# CONFIG_AX25 is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_LLC is not set
+# CONFIG_WAN_ROUTER is not set
#
# SCSI support
@@ -101,10 +120,10 @@ CONFIG_SCSI=y
# SCSI support type (disk, tape, CDrom)
#
CONFIG_BLK_DEV_SD=y
-# CONFIG_CHR_DEV_ST is not set
+CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
CONFIG_BLK_DEV_SR_VENDOR=y
-# CONFIG_CHR_DEV_SG is not set
+CONFIG_CHR_DEV_SG=m
#
# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
@@ -116,46 +135,59 @@ CONFIG_SCSI_CONSTANTS=y
# SCSI low-level drivers
#
CONFIG_SCSI_SUNESP=y
-# CONFIG_SCSI_QLOGICPTI is not set
+CONFIG_SCSI_QLOGICPTI=m
#
# Network device support
#
CONFIG_NETDEVICES=y
-CONFIG_DUMMY=y
-# CONFIG_PPP is not set
-# CONFIG_SLIP is not set
+CONFIG_DUMMY=m
+CONFIG_PPP=m
+
+#
+# CCP compressors for PPP are only built as modules.
+#
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+# CONFIG_SLIP_MODE_SLIP6 is not set
CONFIG_SUNLANCE=y
-# CONFIG_HAPPYMEAL is not set
-# CONFIG_SUNQE is not set
-# CONFIG_MYRI_SBUS is not set
+CONFIG_HAPPYMEAL=y
+CONFIG_SUNQE=m
+CONFIG_MYRI_SBUS=m
#
# Filesystems
#
# CONFIG_QUOTA is not set
-# CONFIG_MINIX_FS is not set
+# CONFIG_DCACHE_PRELOAD is not set
+# CONFIG_OMIRR is not set
+# CONFIG_TRANS_NAMES is not set
+CONFIG_MINIX_FS=m
CONFIG_EXT2_FS=y
-# CONFIG_FAT_FS is not set
-# CONFIG_MSDOS_FS is not set
-# CONFIG_VFAT_FS is not set
+CONFIG_FAT_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
# CONFIG_UMSDOS_FS is not set
CONFIG_PROC_FS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
CONFIG_RNFS_BOOTP=y
# CONFIG_RNFS_RARP is not set
-# CONFIG_NFSD is not set
+CONFIG_NFSD=m
CONFIG_SUNRPC=y
CONFIG_LOCKD=y
-# CONFIG_SMB_FS is not set
+CONFIG_SMB_FS=m
+CONFIG_SMB_WIN95=y
+CONFIG_NCP_FS=m
CONFIG_ISO9660_FS=y
-# CONFIG_HPFS_FS is not set
-# CONFIG_SYSV_FS is not set
-# CONFIG_AFFS_FS is not set
-# CONFIG_ROMFS_FS is not set
-# CONFIG_AUTOFS_FS is not set
-CONFIG_UFS_FS=y
+CONFIG_HPFS_FS=m
+CONFIG_SYSV_FS=m
+CONFIG_AFFS_FS=m
+CONFIG_ROMFS_FS=m
+CONFIG_AUTOFS_FS=m
+CONFIG_AMIGA_PARTITION=y
+CONFIG_UFS_FS=m
CONFIG_BSD_DISKLABEL=y
CONFIG_SMD_DISKLABEL=y
@@ -163,3 +195,4 @@ CONFIG_SMD_DISKLABEL=y
# Kernel hacking
#
# CONFIG_PROFILE is not set
+# CONFIG_EC_FLUSH_TRAP is not set
diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile
index 199360a5f..9e9013735 100644
--- a/arch/sparc64/kernel/Makefile
+++ b/arch/sparc64/kernel/Makefile
@@ -1,4 +1,4 @@
-# $Id: Makefile,v 1.22 1997/05/27 19:30:17 jj Exp $
+# $Id: Makefile,v 1.28 1997/07/05 09:52:20 davem Exp $
# Makefile for the linux kernel.
#
# Note! Dependencies are done automagically by 'make dep', which also
@@ -16,20 +16,26 @@
all: kernel.o head.o init_task.o
O_TARGET := kernel.o
-O_OBJS := etrap.o rtrap.o hack.o process.o setup.o cpu.o idprom.o \
- systbls.o traps.o entry.o devices.o auxio.o ioport.o \
- irq.o ptrace.o time.o sys_sparc.o signal.o winfixup.o
+O_OBJS := process.o setup.o cpu.o idprom.o \
+ systbls.o traps.o devices.o auxio.o ioport.o \
+ irq.o ptrace.o time.o sys_sparc.o signal.o \
+ unaligned.o sys_sunos32.o sunos_ioctl32.o
OX_OBJS := sparc64_ksyms.o
ifdef CONFIG_SPARC32_COMPAT
- O_OBJS += sys_sparc32.o signal32.o ioctl32.o
+ O_OBJS += sys32.o sys_sparc32.o signal32.o ioctl32.o
endif
ifdef CONFIG_BINFMT_ELF32
O_OBJS += binfmt_elf32.o
endif
-head.o: head.S ttable.S itlb_miss.S dtlb_miss.S dtlb_prot.S
+ifdef CONFIG_BINFMT_AOUT32
+ O_OBJS += binfmt_aout32.o
+endif
+
+head.o: head.S ttable.S itlb_miss.S dtlb_miss.S dtlb_prot.S etrap.S rtrap.S \
+ winfixup.S entry.S
$(CC) -D__ASSEMBLY__ -ansi -c $*.S -o $*.o
#
diff --git a/arch/sparc64/kernel/binfmt_aout32.c b/arch/sparc64/kernel/binfmt_aout32.c
new file mode 100644
index 000000000..215aaf06f
--- /dev/null
+++ b/arch/sparc64/kernel/binfmt_aout32.c
@@ -0,0 +1,482 @@
+/*
+ * linux/fs/binfmt_aout.c
+ *
+ * Copyright (C) 1991, 1992, 1996 Linus Torvalds
+ *
+ * Hacked a bit by DaveM to make it work with 32-bit SunOS
+ * binaries on the sparc64 port.
+ */
+
+#include <linux/module.h>
+
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/a.out.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/string.h>
+#include <linux/stat.h>
+#include <linux/fcntl.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+#include <linux/malloc.h>
+#include <linux/binfmts.h>
+#include <linux/personality.h>
+#include <linux/init.h>
+
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+
+static int load_aout32_binary(struct linux_binprm *, struct pt_regs * regs);
+static int load_aout32_library(int fd);
+static int aout32_core_dump(long signr, struct pt_regs * regs);
+
+extern void dump_thread(struct pt_regs *, struct user *);
+
+static struct linux_binfmt aout32_format = {
+ NULL, NULL, load_aout32_binary, load_aout32_library, aout32_core_dump
+};
+
+static void set_brk(unsigned long start, unsigned long end)
+{
+ start = PAGE_ALIGN(start);
+ end = PAGE_ALIGN(end);
+ if (end <= start)
+ return;
+ do_mmap(NULL, start, end - start,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_FIXED | MAP_PRIVATE, 0);
+}
+
+/*
+ * These are the only things you should do on a core-file: use only these
+ * macros to write out all the necessary info.
+ */
+#define DUMP_WRITE(addr,nr) \
+while (file.f_op->write(inode,&file,(char *)(addr),(nr)) != (nr)) goto close_coredump
+
+#define DUMP_SEEK(offset) \
+if (file.f_op->llseek) { \
+ if (file.f_op->llseek(inode,&file,(offset),0) != (offset)) \
+ goto close_coredump; \
+} else file.f_pos = (offset)
+
+/*
+ * Routine writes a core dump image in the current directory.
+ * Currently only a stub-function.
+ *
+ * Note that setuid/setgid files won't make a core-dump if the uid/gid
+ * changed due to the set[u|g]id. It's enforced by the "current->dumpable"
+ * field, which also makes sure the core-dumps won't be recursive if the
+ * dumping of the process results in another error..
+ */
+
+static inline int
+do_aout32_core_dump(long signr, struct pt_regs * regs)
+{
+ struct dentry * dentry = NULL;
+ struct inode * inode = NULL;
+ struct file file;
+ unsigned short fs;
+ int has_dumped = 0;
+ char corefile[6+sizeof(current->comm)];
+ unsigned long dump_start, dump_size;
+ struct user dump;
+# define START_DATA(u) (u.u_tsize)
+# define START_STACK(u) ((regs->u_regs[UREG_FP]) & ~(PAGE_SIZE - 1))
+
+ if (!current->dumpable || current->mm->count != 1)
+ return 0;
+ current->dumpable = 0;
+
+/* See if we have enough room to write the upage. */
+ if (current->rlim[RLIMIT_CORE].rlim_cur < PAGE_SIZE)
+ return 0;
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+ memcpy(corefile,"core.",5);
+#if 0
+ memcpy(corefile+5,current->comm,sizeof(current->comm));
+#else
+ corefile[4] = '\0';
+#endif
+ dentry = open_namei(corefile,O_CREAT | 2 | O_TRUNC, 0600);
+ if (IS_ERR(dentry)) {
+ dentry = NULL;
+ goto end_coredump;
+ }
+ inode = dentry->d_inode;
+ if (!S_ISREG(inode->i_mode))
+ goto end_coredump;
+ if (!inode->i_op || !inode->i_op->default_file_ops)
+ goto end_coredump;
+ if (get_write_access(inode))
+ goto end_coredump;
+ file.f_mode = 3;
+ file.f_flags = 0;
+ file.f_count = 1;
+ file.f_dentry = dentry;
+ file.f_pos = 0;
+ file.f_reada = 0;
+ file.f_op = inode->i_op->default_file_ops;
+ if (file.f_op->open)
+ if (file.f_op->open(inode,&file))
+ goto done_coredump;
+ if (!file.f_op->write)
+ goto close_coredump;
+ has_dumped = 1;
+ current->flags |= PF_DUMPCORE;
+ strncpy(dump.u_comm, current->comm, sizeof(current->comm));
+ dump.signal = signr;
+ dump_thread(regs, &dump);
+
+/* If the size of the dump file exceeds the rlimit, then see what would happen
+ if we wrote the stack, but not the data area. */
+ if ((dump.u_dsize+dump.u_ssize) >
+ current->rlim[RLIMIT_CORE].rlim_cur)
+ dump.u_dsize = 0;
+
+/* Make sure we have enough room to write the stack and data areas. */
+ if ((dump.u_ssize) >
+ current->rlim[RLIMIT_CORE].rlim_cur)
+ dump.u_ssize = 0;
+
+/* make sure we actually have a data and stack area to dump */
+ set_fs(USER_DS);
+ if (verify_area(VERIFY_READ, (void *) START_DATA(dump), dump.u_dsize))
+ dump.u_dsize = 0;
+ if (verify_area(VERIFY_READ, (void *) START_STACK(dump), dump.u_ssize))
+ dump.u_ssize = 0;
+
+ set_fs(KERNEL_DS);
+/* struct user */
+ DUMP_WRITE(&dump,sizeof(dump));
+/* now we start writing out the user space info */
+ set_fs(USER_DS);
+/* Dump the data area */
+ if (dump.u_dsize != 0) {
+ dump_start = START_DATA(dump);
+ dump_size = dump.u_dsize;
+ DUMP_WRITE(dump_start,dump_size);
+ }
+/* Now prepare to dump the stack area */
+ if (dump.u_ssize != 0) {
+ dump_start = START_STACK(dump);
+ dump_size = dump.u_ssize;
+ DUMP_WRITE(dump_start,dump_size);
+ }
+/* Finally dump the task struct. Not be used by gdb, but could be useful */
+ set_fs(KERNEL_DS);
+ DUMP_WRITE(current,sizeof(*current));
+close_coredump:
+ if (file.f_op->release)
+ file.f_op->release(inode,&file);
+done_coredump:
+ put_write_access(inode);
+end_coredump:
+ set_fs(fs);
+ dput(dentry);
+ return has_dumped;
+}
+
+static int
+aout32_core_dump(long signr, struct pt_regs * regs)
+{
+ int retval;
+
+ MOD_INC_USE_COUNT;
+ retval = do_aout32_core_dump(signr, regs);
+ MOD_DEC_USE_COUNT;
+ return retval;
+}
+
+/*
+ * create_aout32_tables() parses the env- and arg-strings in new user
+ * memory and creates the pointer tables from them, and puts their
+ * addresses on the "stack", returning the new stack pointer value.
+ */
+#define A(x) ((unsigned long)x)
+static u32 *create_aout32_tables(char * p, struct linux_binprm * bprm)
+{
+ u32 *argv, *envp;
+ u32 *sp;
+ int argc = bprm->argc;
+ int envc = bprm->envc;
+
+ sp = (u32 *) ((-(unsigned long)sizeof(char *)) & (unsigned long) p);
+
+ /* This imposes the proper stack alignment for a new process. */
+ sp = (u32 *) (((unsigned long) sp) & ~7);
+ if ((envc+argc+3)&1)
+ --sp;
+
+ sp -= envc+1;
+ envp = (u32 *) sp;
+ sp -= argc+1;
+ argv = (u32 *) sp;
+ put_user(argc,--sp);
+ current->mm->arg_start = (unsigned long) p;
+ while (argc-->0) {
+ char c;
+ put_user(((u32)A(p)),argv++);
+ do {
+ get_user(c,p++);
+ } while (c);
+ }
+ put_user(NULL,argv);
+ current->mm->arg_end = current->mm->env_start = (unsigned long) p;
+ while (envc-->0) {
+ char c;
+ put_user(((u32)A(p)),envp++);
+ do {
+ get_user(c,p++);
+ } while (c);
+ }
+ put_user(NULL,envp);
+ current->mm->env_end = (unsigned long) p;
+ return sp;
+}
+
+/*
+ * These are the functions used to load a.out style executables and shared
+ * libraries. There is no binary dependent code anywhere else.
+ */
+
+static inline int do_load_aout32_binary(struct linux_binprm * bprm,
+ struct pt_regs * regs)
+{
+ struct exec ex;
+ struct file * file;
+ int fd;
+ unsigned long error;
+ unsigned long p = bprm->p;
+ unsigned long fd_offset;
+ unsigned long rlim;
+
+ ex = *((struct exec *) bprm->buf); /* exec-header */
+ if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != OMAGIC &&
+ N_MAGIC(ex) != QMAGIC && N_MAGIC(ex) != NMAGIC) ||
+ N_TRSIZE(ex) || N_DRSIZE(ex) ||
+ bprm->dentry->d_inode->i_size < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
+ return -ENOEXEC;
+ }
+
+ current->personality = PER_LINUX;
+ fd_offset = N_TXTOFF(ex);
+
+ /* Check initial limits. This avoids letting people circumvent
+ * size limits imposed on them by creating programs with large
+ * arrays in the data or bss.
+ */
+ rlim = current->rlim[RLIMIT_DATA].rlim_cur;
+ if (rlim >= RLIM_INFINITY)
+ rlim = ~0;
+ if (ex.a_data + ex.a_bss > rlim)
+ return -ENOMEM;
+
+ /* OK, This is the point of no return */
+ flush_old_exec(bprm);
+ memcpy(&current->tss.core_exec, &ex, sizeof(struct exec));
+
+ current->mm->end_code = ex.a_text +
+ (current->mm->start_code = N_TXTADDR(ex));
+ current->mm->end_data = ex.a_data +
+ (current->mm->start_data = N_DATADDR(ex));
+ current->mm->brk = ex.a_bss +
+ (current->mm->start_brk = N_BSSADDR(ex));
+
+ current->mm->rss = 0;
+ current->mm->mmap = NULL;
+ current->suid = current->euid = current->fsuid = bprm->e_uid;
+ current->sgid = current->egid = current->fsgid = bprm->e_gid;
+ current->flags &= ~PF_FORKNOEXEC;
+ if (N_MAGIC(ex) == NMAGIC) {
+ /* Fuck me plenty... */
+ error = do_mmap(NULL, N_TXTADDR(ex), ex.a_text,
+ PROT_READ|PROT_WRITE|PROT_EXEC,
+ MAP_FIXED|MAP_PRIVATE, 0);
+ read_exec(bprm->dentry, fd_offset, (char *) N_TXTADDR(ex),
+ ex.a_text, 0);
+ error = do_mmap(NULL, N_DATADDR(ex), ex.a_data,
+ PROT_READ|PROT_WRITE|PROT_EXEC,
+ MAP_FIXED|MAP_PRIVATE, 0);
+ read_exec(bprm->dentry, fd_offset + ex.a_text, (char *) N_DATADDR(ex),
+ ex.a_data, 0);
+ goto beyond_if;
+ }
+
+ if (N_MAGIC(ex) == OMAGIC) {
+ do_mmap(NULL, N_TXTADDR(ex) & PAGE_MASK,
+ ex.a_text+ex.a_data + PAGE_SIZE - 1,
+ PROT_READ|PROT_WRITE|PROT_EXEC,
+ MAP_FIXED|MAP_PRIVATE, 0);
+ read_exec(bprm->dentry, fd_offset, (char *) N_TXTADDR(ex),
+ ex.a_text+ex.a_data, 0);
+ } else {
+ if ((ex.a_text & 0xfff || ex.a_data & 0xfff) &&
+ (N_MAGIC(ex) != NMAGIC))
+ printk(KERN_NOTICE "executable not page aligned\n");
+
+ fd = open_dentry(bprm->dentry, O_RDONLY);
+
+ if (fd < 0)
+ return fd;
+ file = current->files->fd[fd];
+ if (!file->f_op || !file->f_op->mmap) {
+ sys_close(fd);
+ do_mmap(NULL, 0, ex.a_text+ex.a_data,
+ PROT_READ|PROT_WRITE|PROT_EXEC,
+ MAP_FIXED|MAP_PRIVATE, 0);
+ read_exec(bprm->dentry, fd_offset,
+ (char *) N_TXTADDR(ex), ex.a_text+ex.a_data, 0);
+ goto beyond_if;
+ }
+
+ error = do_mmap(file, N_TXTADDR(ex), ex.a_text,
+ PROT_READ | PROT_EXEC,
+ MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
+ fd_offset);
+
+ if (error != N_TXTADDR(ex)) {
+ sys_close(fd);
+ send_sig(SIGKILL, current, 0);
+ return error;
+ }
+
+ error = do_mmap(file, N_DATADDR(ex), ex.a_data,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
+ fd_offset + ex.a_text);
+ sys_close(fd);
+ if (error != N_DATADDR(ex)) {
+ send_sig(SIGKILL, current, 0);
+ return error;
+ }
+ }
+beyond_if:
+ if (current->exec_domain && current->exec_domain->module)
+ __MOD_DEC_USE_COUNT(current->exec_domain->module);
+ if (current->binfmt && current->binfmt->module)
+ __MOD_DEC_USE_COUNT(current->binfmt->module);
+ current->exec_domain = lookup_exec_domain(current->personality);
+ current->binfmt = &aout32_format;
+ if (current->exec_domain && current->exec_domain->module)
+ __MOD_INC_USE_COUNT(current->exec_domain->module);
+ if (current->binfmt && current->binfmt->module)
+ __MOD_INC_USE_COUNT(current->binfmt->module);
+
+ set_brk(current->mm->start_brk, current->mm->brk);
+
+ p = setup_arg_pages(p, bprm);
+
+ p = (unsigned long) create_aout32_tables((char *)p, bprm);
+ current->mm->start_stack = p;
+ start_thread32(regs, ex.a_entry, p);
+ if (current->flags & PF_PTRACED)
+ send_sig(SIGTRAP, current, 0);
+ return 0;
+}
+
+
+static int
+load_aout32_binary(struct linux_binprm * bprm, struct pt_regs * regs)
+{
+ int retval;
+
+ MOD_INC_USE_COUNT;
+ retval = do_load_aout32_binary(bprm, regs);
+ MOD_DEC_USE_COUNT;
+ return retval;
+}
+
+static inline int
+do_load_aout32_library(int fd)
+{
+ struct file * file;
+ struct exec ex;
+ struct dentry * dentry;
+ struct inode * inode;
+ unsigned int len;
+ unsigned int bss;
+ unsigned int start_addr;
+ unsigned long error;
+
+ file = current->files->fd[fd];
+
+ if (!file || !file->f_op)
+ return -EACCES;
+
+ dentry = file->f_dentry;
+ inode = dentry->d_inode;
+
+ /* Seek into the file */
+ if (file->f_op->llseek) {
+ if ((error = file->f_op->llseek(inode, file, 0, 0)) != 0)
+ return -ENOEXEC;
+ } else
+ file->f_pos = 0;
+
+ set_fs(KERNEL_DS);
+ error = file->f_op->read(inode, file, (char *) &ex, sizeof(ex));
+ set_fs(USER_DS);
+ if (error != sizeof(ex))
+ return -ENOEXEC;
+
+ /* We come in here for the regular a.out style of shared libraries */
+ if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != QMAGIC) || N_TRSIZE(ex) ||
+ N_DRSIZE(ex) || ((ex.a_entry & 0xfff) && N_MAGIC(ex) == ZMAGIC) ||
+ inode->i_size < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
+ return -ENOEXEC;
+ }
+ if (N_MAGIC(ex) == ZMAGIC && N_TXTOFF(ex) &&
+ (N_TXTOFF(ex) < inode->i_sb->s_blocksize)) {
+ printk("N_TXTOFF < BLOCK_SIZE. Please convert library\n");
+ return -ENOEXEC;
+ }
+
+ if (N_FLAGS(ex)) return -ENOEXEC;
+
+ /* For QMAGIC, the starting address is 0x20 into the page. We mask
+ this off to get the starting address for the page */
+
+ start_addr = ex.a_entry & 0xfffff000;
+
+ /* Now use mmap to map the library into memory. */
+ error = do_mmap(file, start_addr, ex.a_text + ex.a_data,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
+ N_TXTOFF(ex));
+ if (error != start_addr)
+ return error;
+ len = PAGE_ALIGN(ex.a_text + ex.a_data);
+ bss = ex.a_text + ex.a_data + ex.a_bss;
+ if (bss > len) {
+ error = do_mmap(NULL, start_addr + len, bss-len,
+ PROT_READ|PROT_WRITE|PROT_EXEC,
+ MAP_PRIVATE|MAP_FIXED, 0);
+ if (error != start_addr + len)
+ return error;
+ }
+ return 0;
+}
+
+static int
+load_aout32_library(int fd)
+{
+ int retval;
+
+ MOD_INC_USE_COUNT;
+ retval = do_load_aout32_library(fd);
+ MOD_DEC_USE_COUNT;
+ return retval;
+}
+
+
+__initfunc(int init_aout32_binfmt(void))
+{
+ return register_binfmt(&aout32_format);
+}
diff --git a/arch/sparc64/kernel/binfmt_elf32.c b/arch/sparc64/kernel/binfmt_elf32.c
index 05d50fe56..9ab2b7aca 100644
--- a/arch/sparc64/kernel/binfmt_elf32.c
+++ b/arch/sparc64/kernel/binfmt_elf32.c
@@ -6,6 +6,8 @@
#define ELF_CLASS ELFCLASS32
#define ELF_DATA ELFDATA2MSB;
+#define elf_check_arch(x) (((x) == EM_SPARC) || ((x) == EM_SPARC32PLUS))
+
#include <asm/processor.h>
#include <linux/module.h>
#include <linux/config.h>
diff --git a/arch/sparc64/kernel/cpu.c b/arch/sparc64/kernel/cpu.c
index 695ad680e..d6cdf9162 100644
--- a/arch/sparc64/kernel/cpu.c
+++ b/arch/sparc64/kernel/cpu.c
@@ -6,7 +6,9 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <asm/asi.h>
#include <asm/system.h>
+#include <asm/fpumacro.h>
struct cpu_iu_info {
short manuf;
@@ -26,6 +28,7 @@ struct cpu_fp_info {
*/
struct cpu_fp_info linux_sparc_fpu[] = {
{ 0x17, 0x10, 0, "UltraSparc I integrated FPU"},
+ { 0x22, 0x10, 0, "UltraSparc II integrated FPU"},
{ 0x17, 0x11, 0, "UltraSparc II integrated FPU"},
{ 0x17, 0x12, 0, "UltraSparc III integrated FPU"},
};
@@ -34,6 +37,7 @@ struct cpu_fp_info linux_sparc_fpu[] = {
struct cpu_iu_info linux_sparc_chips[] = {
{ 0x17, 0x10, "TI UltraSparc I (SpitFire)"},
+ { 0x22, 0x10, "TI UltraSparc II (BlackBird)"},
{ 0x17, 0x11, "TI UltraSparc II (BlackBird)"},
{ 0x17, 0x12, "TI UltraSparc III (Cheetah)"}, /* A guess... */
};
@@ -50,11 +54,20 @@ __initfunc(void cpu_probe(void))
int manuf, impl;
unsigned i, cpuid;
long ver, fpu_vers;
-
- cpuid = get_cpuid();
+ long fprs;
+#ifndef __SMP__
+ cpuid = 0;
+#else
+#error SMP not supported on sparc64 yet
+ /* cpuid = get_cpuid(); */
+#endif
+
+ fprs = fprs_read ();
+ fprs_write (FPRS_FEF);
__asm__ __volatile__ ("rdpr %%ver, %0; stx %%fsr, [%1]" : "=r" (ver) : "r" (&fpu_vers));
-
+ fprs_write (fprs);
+
manuf = ((ver >> 48)&0xffff);
impl = ((ver >> 32)&0xffff);
diff --git a/arch/sparc64/kernel/devices.c b/arch/sparc64/kernel/devices.c
index 6aadd14e0..5e6705896 100644
--- a/arch/sparc64/kernel/devices.c
+++ b/arch/sparc64/kernel/devices.c
@@ -6,7 +6,6 @@
#include <linux/kernel.h>
#include <linux/tasks.h>
-#include <linux/config.h>
#include <linux/init.h>
#include <asm/page.h>
@@ -15,7 +14,7 @@
#include <asm/smp.h>
struct prom_cpuinfo linux_cpus[NCPUS];
-int linux_num_cpus;
+int linux_num_cpus = 0;
extern void cpu_probe(void);
@@ -54,7 +53,7 @@ device_scan(unsigned long mem_start))
};
if(cpu_ctr == 0) {
printk("No CPU nodes found, cannot continue.\n");
- halt();
+ prom_halt();
}
printk("Found %d CPU prom device tree node(s).\n", cpu_ctr);
};
diff --git a/arch/sparc64/kernel/dtlb_miss.S b/arch/sparc64/kernel/dtlb_miss.S
index 31b87f3de..b034ef407 100644
--- a/arch/sparc64/kernel/dtlb_miss.S
+++ b/arch/sparc64/kernel/dtlb_miss.S
@@ -1,4 +1,4 @@
-/* $Id: dtlb_miss.S,v 1.11 1997/04/10 01:59:35 davem Exp $
+/* $Id: dtlb_miss.S,v 1.12 1997/06/26 12:47:08 jj Exp $
* dtlb_miss.S: Data TLB miss code, this is included directly
* into the trap table.
*
@@ -19,9 +19,11 @@
* }
* goto longer_processing;
* } else {
- * if(fault_address >= KERNBASE &&
- * fault_address < VMALLOC_START) {
- * tlb_load(__pa(fault_address) | PAGE_KERNEL);
+ * if(fault_address >= PAGE_OFFSET) {
+ * pte_val = PAGE_KERNEL;
+ * if (fault_address & 0x10000000000)
+ * pte_val = PAGE_KERNEL_IO;
+ * tlb_load(__pa(fault_address) | pte_val);
* return_from_trap();
* } else {
* pgd = pgd_offset(swapper_pg_dir, fault_address);
@@ -32,9 +34,9 @@
* This is optimized for user TLB misses on purpose.
*/
-#define KERN_HIGHBITS (_PAGE_VALID | _PAGE_SZ4MB)
+#define KERN_HIGHBITS ((_PAGE_VALID | _PAGE_SZ4MB) ^ 0xfffff80000000000)
#define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W)
-#define KERN_LOWBITS_IO (_PAGE_E | _PAGE_P | _PAGE_W)
+#define KERN_LOWBITS_IO ((_PAGE_E | _PAGE_P | _PAGE_W) ^ KERN_LOWBITS)
/* ICACHE line 1 */
/*0x00*/ ldxa [%g0] ASI_DMMU, %g1 ! Get TAG_TARGET
@@ -57,17 +59,17 @@
1:/*0x3c*/ retry ! Trap return
3: /* ICACHE line 3 */
- /*0x40*/ sllx %g1, 43, %g5 ! This gets >= VMALLOC_START...
- /*0x44*/ brlz,pn %g5, 4f ! ...if now less than zero.
- /*0x48*/ andncc %g1, 0x3ff, %g0 ! Slick trick...
- /*0x4c*/ be,pn %xcc, 4f ! Yes, it is some PROM mapping
- /*0x50*/ srlx %g5, 21, %g5 ! This is now physical page
- /*0x54*/ sethi %uhi(KERN_HIGHBITS), %g1 ! Construct PTE
- /*0x58*/ sllx %g1, 32, %g1 ! Move priv bits up
- /*0x5c*/ or %g1, %g5, %g1 ! Or in the page
+ /*0x40*/ sllx %g1, 22, %g5 ! This is now physical page + PAGE_OFFSET
+ /*0x44*/ brgez,pn %g5, 4f ! If >= 0, then walk down page tables
+ /*0x48*/ sethi %uhi(KERN_HIGHBITS), %g1 ! Construct PTE ^ PAGE_OFFSET
+ /*0x4c*/ andcc %g3, 0x80, %g0 ! Slick trick...
+ /*0x50*/ sllx %g1, 32, %g1 ! Move high bits up
+ /*0x54*/ or %g1, (KERN_LOWBITS), %g1 ! Assume not IO
+ /*0x58*/ bne,a,pn %icc, 5f ! Is it an IO page?
+ /*0x5c*/ xor %g1, (KERN_LOWBITS_IO), %g1 ! Aha, it is IO...
/* ICACHE line 4 */
- /*0x60*/ or %g1, (KERN_LOWBITS), %g1 ! Set low priv bits
+5:/*0x60*/ xor %g1, %g5, %g1 ! Slick trick II...
/*0x64*/ stxa %g1, [%g0] ASI_DTLB_DATA_IN ! TLB load
/*0x68*/ retry ! Trap return
4:/*0x6c*/ ldxa [%g0] ASI_DMMU_TSB_8KB_PTR, %g1 ! For PTE offset
@@ -78,3 +80,4 @@
#undef KERN_HIGHBITS
#undef KERN_LOWBITS
+#undef KERN_LOWBITS_IO
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index 0d95e1b75..a410cfe80 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -1,7 +1,7 @@
-/* $Id: entry.S,v 1.31 1997/06/02 06:33:25 davem Exp $
+/* $Id: entry.S,v 1.50 1997/07/15 16:53:00 davem Exp $
* arch/sparc64/kernel/entry.S: Sparc64 trap low-level entry points.
*
- * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
* Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
@@ -25,7 +25,6 @@
#define NR_SYSCALLS 256 /* Each OS is different... */
.text
- .align 4
.globl sparc64_dtlb_prot_catch, sparc64_dtlb_refbit_catch
.globl sparc64_itlb_refbit_catch
@@ -38,24 +37,27 @@
* to update the dirty bit) and since we left crap in the sfsr
* it will not get updated properly.
*/
+ .align 32
sparc64_dtlb_prot_catch:
wr %g0, ASI_DMMU, %asi
rdpr %pstate, %g1
wrpr %g1, PSTATE_AG|PSTATE_MG, %pstate
rdpr %tl, %g3
ldxa [%g0 + TLB_TAG_ACCESS] %asi, %g5
- ldxa [%g0 + TLB_SFSR] %asi, %g4
- cmp %g3, 1
stxa %g0, [%g0 + TLB_SFSR] %asi
+ membar #Sync
+ cmp %g3, 1
+
bgu,a,pn %icc, winfix_trampoline
rdpr %tpc, %g3
ba,pt %xcc, etrap
rd %pc, %g7
- b,a,pt %xcc, 1f
-
+ b,pt %xcc, 1f
+ mov 1, %o2
sparc64_dtlb_refbit_catch:
srlx %g5, 9, %g4
and %g4, ((_PAGE_PRESENT | _PAGE_READ) >> 9), %g4
+
cmp %g4, ((_PAGE_PRESENT | _PAGE_READ) >> 9)
be,a,pt %xcc, 2f
mov 1, %g4
@@ -64,23 +66,24 @@ sparc64_dtlb_refbit_catch:
wrpr %g1, PSTATE_AG|PSTATE_MG, %pstate
rdpr %tl, %g3
ldxa [%g0 + TLB_TAG_ACCESS] %asi, %g5
+
cmp %g3, 1
- clr %g4 ! sfsr not updated for tlb misses
- bgu,a,pn %icc, winfix_trampoline
+ bgu,pn %icc, winfix_trampoline
rdpr %tpc, %g3
- ba,pt %xcc, etrap
+ b,pt %xcc, etrap
rd %pc, %g7
-1:
- mov %l5, %o4 ! raw tag access
- mov %l4, %o5 ! raw sfsr
- srlx %l5, PAGE_SHIFT, %o3
- clr %o1 ! text_fault == 0
- sllx %o3, PAGE_SHIFT, %o3 ! address
- and %l4, 0x4, %o2 ! write == sfsr.W
+ clr %o2
+1: srlx %l5, PAGE_SHIFT, %o1
+ add %sp, STACK_BIAS + REGWIN_SZ, %o0
+
call do_sparc64_fault
- add %sp, STACK_BIAS + REGWIN_SZ, %o0 ! pt_regs ptr
- ba,pt %xcc, rtrap
+ sllx %o1, PAGE_SHIFT, %o1
+ b,pt %xcc, rtrap
clr %l6
+ nop
+ nop
+ nop
+ nop
sparc64_itlb_refbit_catch:
srlx %g5, 9, %g4
@@ -90,47 +93,119 @@ sparc64_itlb_refbit_catch:
mov 1, %g4
rdpr %pstate, %g1
wrpr %g1, PSTATE_AG|PSTATE_MG, %pstate
- ba,pt %xcc, etrap
- rd %pc, %g7
+ rdpr %tpc, %g5
- ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TPC], %o3
- mov 1, %o1 ! text_fault == 1
- clr %o2 ! write == 0
- clr %o4 ! tag access (N/A)
- clr %o5 ! raw sfsr (N/A)
- call do_sparc64_fault
- add %sp, STACK_BIAS + REGWIN_SZ, %o0 ! pt_regs ptr
- ba,pt %xcc, rtrap
- clr %l6
-
-2:
- sllx %g4, 63, %g4 ! _PAGE_VALID
+ b,pt %xcc, etrap
+ rd %pc, %g7
+ b,pt %xcc, 1b
+ clr %o2
+2: sllx %g4, 63, %g4 ! _PAGE_VALID
or %g5, _PAGE_ACCESSED, %g5
or %g5, %g4, %g5
stxa %g5, [%g3 + %g1] ASI_PHYS_USE_EC ! store new PTE
+
stxa %g5, [%g0] ASI_DTLB_DATA_IN ! TLB load
retry
-
-3:
- sllx %g4, 63, %g4 ! _PAGE_VALID
+3: sllx %g4, 63, %g4 ! _PAGE_VALID
or %g5, _PAGE_ACCESSED, %g5
or %g5, %g4, %g5
stxa %g5, [%g3 + %g1] ASI_PHYS_USE_EC ! store new PTE
stxa %g5, [%g0] ASI_ITLB_DATA_IN ! TLB load
retry
+ /* This is trivial with the new code... */
+ .align 32
+ .globl do_fpdis
+do_fpdis:
+ wr %g0, FPRS_FEF, %fprs
+ ldx [%g6 + AOFF_task_flags], %g2
+ sethi %hi(0x00100000), %g4 ! XXX PF_USEDFPU
+ andcc %g2, %g4, %g0
+
+ bne,a,pt %xcc, fpload_fromkstk
+ sethi %hi((((PAGE_SIZE<<1)-((64*4)+(2*8))) & ~(64 - 1))), %g2
+ fzero %f0
+ fzero %f2
+ faddd %f0, %f2, %f4
+ fmuld %f0, %f2, %f6
+ faddd %f0, %f2, %f8
+ fmuld %f0, %f2, %f10
+
+ faddd %f0, %f2, %f12
+ fmuld %f0, %f2, %f14
+ faddd %f0, %f2, %f16
+ fmuld %f0, %f2, %f18
+ faddd %f0, %f2, %f20
+ fmuld %f0, %f2, %f22
+ faddd %f0, %f2, %f24
+ fmuld %f0, %f2, %f26
+
+ faddd %f0, %f2, %f28
+ fmuld %f0, %f2, %f30
+ faddd %f0, %f2, %f32
+ fmuld %f0, %f2, %f34
+ faddd %f0, %f2, %f36
+ fmuld %f0, %f2, %f38
+ faddd %f0, %f2, %f40
+ fmuld %f0, %f2, %f42
+
+ faddd %f0, %f2, %f44
+ fmuld %f0, %f2, %f46
+ ldx [%g6 + AOFF_task_flags], %g2
+ faddd %f0, %f2, %f48
+ fmuld %f0, %f2, %f50
+ or %g2, %g4, %g2
+ faddd %f0, %f2, %f52
+ fmuld %f0, %f2, %f54
+
+ stx %g2, [%g6 + AOFF_task_flags]
+ faddd %f0, %f2, %f56
+ sethi %hi(empty_zero_page), %g3
+ fmuld %f0, %f2, %f58
+
+ faddd %f0, %f2, %f60
+ ldx [%g3], %fsr ! wheee, empty_zero_page
+ b,pt %xcc, fpdis_exit
+ wr %g0, 0, %gsr
+
+fpload_fromkstk:
+ or %g2, %lo((((PAGE_SIZE<<1)-((64*4)+(2*8))) & ~(64 - 1))), %g2
+ add %g6, %g2, %g2
+ mov SECONDARY_CONTEXT, %g3
+ stxa %g0, [%g3] ASI_DMMU
+ flush %g2
+ wr %g0, ASI_BLK_S, %asi ! grrr, where is ASI_BLK_NUCLEUS 8-(
+ membar #StoreLoad | #LoadLoad
+
+ ldda [%g2 + 0x000] %asi, %f0
+ ldda [%g2 + 0x040] %asi, %f16
+ ldda [%g2 + 0x080] %asi, %f32
+ ldda [%g2 + 0x0c0] %asi, %f48
+ ldx [%g2 + 0x100], %fsr
+ ldx [%g2 + 0x108], %g2
+ membar #Sync
+ wr %g2, 0, %gsr
+fpdis_exit:
+ rdpr %tstate, %g3
+ sethi %hi(TSTATE_PEF), %g4
+ or %g3, %g4, %g3 ! anal...
+ wrpr %g3, %tstate
+ retry
+
+#ifdef __SMP__
/* Note check out head.h, this code isn't even used for UP,
* for SMP things will be different. In particular the data
* registers for cross calls will be:
*
- * DATA 0: Address of function to call
- * DATA 1: Argument 1, place in %g6
- * DATA 2: Argument 2, place in %g7
+ * DATA 0: [low 32-bits] Address of function to call, jmp to this
+ * [high 32-bits] MMU Context Argument 0, place in %g5
+ * DATA 1: Address Argument 1, place in %g6
+ * DATA 2: Address Argument 2, place in %g7
*
* With this method we can do most of the cross-call tlb/cache
- * flushing in very quickly.
+ * flushing very quickly.
*/
- .align 4
+ .align 32
.globl do_ivec
do_ivec:
ldxa [%g0] ASI_INTR_RECEIVE, %g1
@@ -139,16 +214,14 @@ do_ivec:
mov 0x40, %g2
/* Load up Interrupt Vector Data 0 register. */
- sethi %uhi(ivector_to_mask), %g4
+ sethi %hi(KERNBASE), %g4
ldxa [%g2] ASI_UDB_INTR_R, %g3
- or %g4, %ulo(ivector_to_mask), %g4
+ cmp %g3, %g4
+ bgeu,pn %xcc, do_ivec_xcall
+ nop
and %g3, 0x7ff, %g3
- sllx %g4, 32, %g4
- sethi %hi(ivector_to_mask), %g5
sllx %g3, 3, %g3
- or %g5, %lo(ivector_to_mask), %g5
- add %g5, %g4, %g4
- ldx [%g4 + %g3], %g2
+ ldx [%g1 + %g3], %g2
brz,pn %g2, do_ivec_spurious
nop
@@ -163,9 +236,17 @@ do_ivec_return:
stxa %g0, [%g0] ASI_INTR_RECEIVE
membar #Sync
retry
-
+do_ivec_xcall:
+ srlx %g3, 32, %g5
+ add %g2, 0x10, %g2
+ sra %g3, 0, %g3
+ ldxa [%g2] ASI_UDB_INTR_R, %g6
+ add %g2, 0x10, %g2
+ jmpl %g3, %g0
+ ldxa [%g2] ASI_UDB_INTR_R, %g7
do_ivec_spurious:
stxa %g0, [%g0] ASI_INTR_RECEIVE
+ membar #Sync
rdpr %pstate, %g1
wrpr %g1, PSTATE_IG | PSTATE_AG, %pstate
ba,pt %xcc, etrap
@@ -174,8 +255,132 @@ do_ivec_spurious:
add %sp, STACK_BIAS + REGWIN_SZ, %o0
ba,pt %xcc, rtrap
clr %l6
+#endif /* __SMP__ */
+
+ .globl getcc, setcc
+getcc:
+ ldx [%o0 + PT_V9_TSTATE], %o1
+ srlx %o1, 32, %o1
+ and %o1, 0xf, %o1
+ retl
+ stx %o1, [%o0 + PT_V9_G1]
+setcc:
+ ldx [%o0 + PT_V9_TSTATE], %o1
+ ldx [%o0 + PT_V9_G1], %o2
+ or %g0, %ulo(TSTATE_ICC), %o3
+ sllx %o3, 32, %o3
+ andn %o1, %o3, %o1
+ sllx %o2, 32, %o2
+ and %o2, %o3, %o2
+ or %o1, %o2, %o1
+ retl
+ stx %o1, [%o0 + PT_V9_TSTATE]
+
+#ifdef CONFIG_BLK_DEV_FD
+ .globl floppy_hardint
+floppy_hardint:
+ sethi %hi(doing_pdma), %g1
+ ld [%g1 + %lo(doing_pdma)], %g2
+ brz,pn %g2, floppy_dosoftint
+ sethi %hi(fdc_status), %g3
+ ldx [%g3 + %lo(fdc_status)], %g3
+ sethi %hi(pdma_vaddr), %g5
+ ldx [%g5 + %lo(pdma_vaddr)], %g4
+ sethi %hi(pdma_size), %g5
+ ldx [%g5 + %lo(pdma_size)], %g5
+
+next_byte:
+ ldub [%g3], %g7
+ andcc %g7, 0x80, %g0
+ be,pn %icc, floppy_fifo_emptied
+ andcc %g7, 0x20, %g0
+ be,pn %icc, floppy_overrun
+ andcc %g7, 0x40, %g0
+ be,pn %icc, floppy_write
+ sub %g5, 1, %g5
+
+ ldub [%g3 + 1], %g7
+ orcc %g0, %g5, %g0
+ stb %g7, [%g4]
+ bne,pn %xcc, next_byte
+ add %g4, 1, %g4
+
+ b,pt %xcc, floppy_tdone
+ nop
+
+floppy_write:
+ ldub [%g4], %g7
+ orcc %g0, %g5, %g0
+ stb %g7, [%g3 + 1]
+ bne,pn %xcc, next_byte
+ add %g4, 1, %g4
+
+floppy_tdone:
+ sethi %hi(pdma_vaddr), %g1
+ stx %g4, [%g1 + %lo(pdma_vaddr)]
+ sethi %hi(pdma_size), %g1
+ stx %g5, [%g1 + %lo(pdma_size)]
+ sethi %hi(auxio_register), %g1
+ ldx [%g1 + %lo(auxio_register)], %g7
+ ldub [%g7], %g5
+ or %g5, 0xc2, %g5
+ stb %g5, [%g7]
+ andn %g5, 0x02, %g5
+
+ nop; nop; nop; nop; nop; nop;
+ nop; nop; nop; nop; nop; nop;
+
+ stb %g5, [%g7]
+ sethi %hi(doing_pdma), %g1
+ b,pt %xcc, floppy_dosoftint
+ st %g0, [%g1 + %lo(doing_pdma)]
+
+floppy_fifo_emptied:
+ sethi %hi(pdma_vaddr), %g1
+ stx %g4, [%g1 + %lo(pdma_vaddr)]
+ sethi %hi(pdma_size), %g1
+ stx %g5, [%g1 + %lo(pdma_size)]
+ sethi %hi(irq_action), %g1
+ or %g1, %lo(irq_action), %g1
+ ldx [%g1 + (11 << 3)], %g3 ! irqaction[floppy_irq]
+ ldx [%g3 + 0x10], %g4 ! action->mask
+ st %g0, [%g4] ! SYSIO_ICLR_IDLE
+ membar #Sync ! probably not needed...
+ retry
+
+floppy_overrun:
+ sethi %hi(pdma_vaddr), %g1
+ stx %g4, [%g1 + %lo(pdma_vaddr)]
+ sethi %hi(pdma_size), %g1
+ stx %g5, [%g1 + %lo(pdma_size)]
+ sethi %hi(doing_pdma), %g1
+ st %g0, [%g1 + %lo(doing_pdma)]
+
+floppy_dosoftint:
+ rdpr %pil, %g2
+ wrpr %g0, 15, %pil
+ b,pt %xcc, etrap_irq
+ rd %pc, %g7
+
+ mov 11, %o0
+ mov 0, %o1
+ call sparc_floppy_irq
+ add %sp, STACK_BIAS + REGWIN_SZ, %o2
- .globl do_mna
+ b,pt %xcc, rtrap
+ clr %l6
+
+#endif /* CONFIG_BLK_DEV_FD */
+
+ /* XXX Here is stuff we still need to write... -DaveM XXX */
+ .globl indirect_syscall, netbsd_syscall, solaris_syscall
+indirect_syscall:
+netbsd_syscall:
+solaris_syscall:
+ retl
+ nop
+
+ .globl do_mna
do_mna:
rdpr %tl, %g3
cmp %g3, 1
@@ -195,187 +400,239 @@ breakpoint_trap:
ba,pt %xcc, rtrap
nop
- .globl sys_pipe, sys_execve, sys_sigpause, sys_nis_syscall
- .globl sys_sigsuspend, sys_sigreturn
- .globl sys32_execve, sys_ptrace
-
-sys_pipe:
- sethi %hi(sparc_pipe), %g1
- add %g1, %g4, %g1
- jmpl %g1 + %lo(sparc_pipe), %g0
- add %sp, STACK_BIAS + REGWIN_SZ, %o0
-
-sys_nis_syscall:
- sethi %hi(c_sys_nis_syscall), %g1
- add %g1, %g4, %g1
- jmpl %g1 + %lo(c_sys_nis_syscall), %g0
- add %sp, STACK_BIAS + REGWIN_SZ, %o0
-
-sys_execve:
- sethi %hi(sparc_execve), %g1
- add %g1, %g4, %g1
- jmpl %g1 + %lo(sparc_execve), %g0
- add %sp, STACK_BIAS + REGWIN_SZ, %o0
-
-sys32_execve:
- sethi %hi(sparc32_execve), %g1
- add %g1, %g4, %g1
- jmpl %g1 + %lo(sparc32_execve), %g0
- add %sp, STACK_BIAS + REGWIN_SZ, %o0
-
-sys_sigpause:
- /* NOTE: %o0 has a correct value already */
- call do_sigpause
- add %sp, STACK_BIAS + REGWIN_SZ, %o1
-
- ld [%curptr + AOFF_task_flags], %l5
- andcc %l5, 0x20, %g0
- be,pt %icc, rtrap
- clr %l6
- call syscall_trace
+ /* SunOS uses syscall zero as the 'indirect syscall' it looks
+ * like indir_syscall(scall_num, arg0, arg1, arg2...); etc.
+ * This is complete brain damage.
+ */
+ .globl sunos_indir
+sunos_indir:
+ srl %o0, 0, %o0
+ mov %o7, %l4
+ cmp %o0, NR_SYSCALLS
+ blu,a,pt %icc, 1f
+ sll %o0, 0x3, %o0
+ sethi %hi(sunos_nosys), %l6
+ b,pt %xcc, 2f
+ or %l6, %lo(sunos_nosys), %l6
+1: sethi %hi(sunos_sys_table), %l7
+ or %l7, %lo(sunos_sys_table), %l7
+ ldx [%l7 + %o0], %l6
+2: mov %o1, %o0
+ mov %o2, %o1
+ mov %o3, %o2
+ mov %o4, %o3
+ mov %o5, %o4
+ call %l6
+ mov %l4, %o7
+
+ .globl sunos_getpid
+sunos_getpid:
+ call sys_getppid
nop
- ba,pt %xcc, rtrap
- clr %l6
-
-sys_sigsuspend:
- call do_sigsuspend
- add %sp, STACK_BIAS + REGWIN_SZ, %o0
-
- ld [%curptr + AOFF_task_flags], %l5
- andcc %l5, 0x20, %g0
- be,pt %icc, rtrap
- clr %l6
- call syscall_trace
+ call sys_getpid
+ stx %o0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I1]
+ b,pt %xcc, ret_sys_call
+ stx %o0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I0]
+
+ /* SunOS getuid() returns uid in %o0 and euid in %o1 */
+ .globl sunos_getuid
+sunos_getuid:
+ call sys_geteuid
nop
- ba,pt %xcc, rtrap
- clr %l6
-
-sys_sigreturn:
- call do_sigreturn
- add %sp, STACK_BIAS + REGWIN_SZ, %o0
-
- ld [%curptr + AOFF_task_flags], %l5
- andcc %l5, 0x20, %g0
- be,pt %icc, rtrap
- clr %l6
- call syscall_trace
+ call sys_getuid
+ stx %o0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I1]
+ b,pt %xcc, ret_sys_call
+ stx %o0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I0]
+
+ /* SunOS getgid() returns gid in %o0 and egid in %o1 */
+ .globl sunos_getgid
+sunos_getgid:
+ call sys_getegid
nop
- ba,pt %xcc, rtrap
- clr %l6
+ call sys_getgid
+ stx %o0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I1]
+ b,pt %xcc, ret_sys_call
+ stx %o0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I0]
-sys_ptrace:
- call do_ptrace
- add %sp, STACK_BIAS + REGWIN_SZ, %o0
+ /* SunOS's execv() call only specifies the argv argument, the
+ * environment settings are the same as the calling processes.
+ */
+ .globl sunos_execv
+sunos_execv:
+ sethi %hi(sparc32_execve), %g1
+ stx %g0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I2]
+ jmpl %g1 + %lo(sparc32_execve), %g0
+ add %sp, STACK_BIAS + REGWIN_SZ, %o0
+
+ .globl sys_pipe, sys_execve, sys_sigpause, sys_nis_syscall
+ .globl sys_sigsuspend, sys_sigreturn
+ .globl sys32_execve, sys_ptrace
+ .align 32
+sys_pipe: sethi %hi(sparc_pipe), %g1
+ add %sp, STACK_BIAS + REGWIN_SZ, %o0
+ jmpl %g1 + %lo(sparc_pipe), %g0
+ nop
+sys_nis_syscall:sethi %hi(c_sys_nis_syscall), %g1
+ add %sp, STACK_BIAS + REGWIN_SZ, %o0
+ jmpl %g1 + %lo(c_sys_nis_syscall), %g0
+ nop
+
+sys_execve: sethi %hi(sparc_execve), %g1
+ add %sp, STACK_BIAS + REGWIN_SZ, %o0
+ jmpl %g1 + %lo(sparc_execve), %g0
+ nop
+sys32_execve: sethi %hi(sparc32_execve), %g1
+ add %sp, STACK_BIAS + REGWIN_SZ, %o0
+ jmpl %g1 + %lo(sparc32_execve), %g0
+ nop
+
+ /* NOTE: %o0 has a correct value already */
+sys_sigpause: call do_sigpause
+ add %sp, STACK_BIAS + REGWIN_SZ, %o1
+ ldx [%curptr + AOFF_task_flags], %l5
+ andcc %l5, 0x20, %g0
+ be,pt %icc, rtrap
+ clr %l6
+ call syscall_trace
+ nop
+
+ ba,pt %xcc, rtrap
+ clr %l6
+linux_sparc_ni_syscall:
+ sethi %hi(sys_ni_syscall), %l7
+ b,pt %xcc,syscall_is_too_hard
+ or %l7, %lo(sys_ni_syscall), %l7
+ nop
+
+ .align 32
+sys_sigsuspend: call do_sigsuspend
+ add %sp, STACK_BIAS + REGWIN_SZ, %o0
+ ldx [%curptr + AOFF_task_flags], %l5
+ andcc %l5, 0x20, %g0
+ be,pt %icc, rtrap
+ clr %l6
+ call syscall_trace
+ nop
+
+ ba,pt %xcc, rtrap
+ clr %l6
- ld [%curptr + AOFF_task_flags], %l5
- andcc %l5, 0x20, %g0
- be,pt %icc, rtrap
- clr %l6
- call syscall_trace
- nop
- ba,pt %xcc, rtrap
- clr %l6
+ .align 32
+sys_sigreturn: call do_sigreturn
+ add %sp, STACK_BIAS + REGWIN_SZ, %o0
+ ldx [%curptr + AOFF_task_flags], %l5
+ andcc %l5, 0x20, %g0
+ be,pt %icc, rtrap
+ clr %l6
+ call syscall_trace
+ nop
+
+ ba,pt %xcc, rtrap
+ clr %l6
+
+ .align 32
+sys_ptrace: call do_ptrace
+ add %sp, STACK_BIAS + REGWIN_SZ, %o0
+ ldx [%curptr + AOFF_task_flags], %l5
+ andcc %l5, 0x20, %g0
+ be,pt %icc, rtrap
+ clr %l6
+ call syscall_trace
+ nop
+
+ ba,pt %xcc, rtrap
+ clr %l6
- /* This is how fork() was meant to be done, 12 instruction entry. -DaveM */
+ /* This is how fork() was meant to be done, 12 instruction entry.
+ *
+ * I questioned the following code briefly, let me clear things
+ * up so you must not reason on it like I did.
+ *
+ * Know the fork_kpsr etc. we use in the sparc32 port? We don't
+ * need it here because the only piece of window state we copy to
+ * the child is the CWP register. Even if the parent sleeps,
+ * we are safe because we stuck it into pt_regs of the parent
+ * so it will not change.
+ *
+ * XXX This raises the question, whether we can do the same on
+ * XXX sparc32 to get rid of fork_kpsr _and_ fork_kwim. The
+ * XXX answer is yes. We stick fork_kpsr in UREG_G0 and
+ * XXX fork_kwim in UREG_G1 (global registers are considered
+ * XXX volatile across a system call in the sparc ABI I think
+ * XXX if it isn't we can use regs->y instead, anyone who depends
+ * XXX upon the Y register being preserved across a fork deserves
+ * XXX to lose).
+ *
+ * In fact we should take advantage of that fact for other things
+ * during system calls...
+ */
.globl sys_fork, sys_vfork, sys_clone
+ .globl ret_from_syscall, ret_from_smpfork
+ .align 32
sys_fork:
-sys_vfork:
- mov SIGCHLD, %o0
- clr %o1
-sys_clone:
- mov %o7, %l5
- save %sp, -REGWIN_SZ, %sp
- flushw
- restore %g0, %g0, %g0
- rdpr %cwp, %o4
- add %sp, STACK_BIAS + REGWIN_SZ, %o2
- movrz %o1, %fp, %o1
-
- /* Don't try this at home. */
- stx %o4, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G0]
- call do_fork
- mov %l5, %o7
-
-linux_sparc_ni_syscall:
- sethi %hi(sys_ni_syscall), %l7
- or %l7, %lo(sys_ni_syscall), %l7
- ba,pt %xcc,syscall_is_too_hard
- add %l7, %g4, %l7
-
-linux_fast_syscall:
- andn %l7, 3, %l7
- mov %i0, %o0
- mov %i1, %o1
- mov %i2, %o2
- jmpl %l7 + %g0, %g0
- mov %i3, %o3
+sys_vfork: mov SIGCHLD, %o0
+ clr %o1
+sys_clone: mov %o7, %l5
+/*???*/ save %sp, -REGWIN_SZ, %sp
+ flushw
+/*???*/ restore %g0, %g0, %g0
+ rdpr %cwp, %o4
+ add %sp, STACK_BIAS + REGWIN_SZ, %o2
+
+ movrz %o1, %fp, %o1
+ stx %o4, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G0]
+ call do_fork
+ mov %l5, %o7
+#ifdef __SMP__
+ret_from_smpfork:
+ sethi %hi(scheduler_lock), %o4
+ membar #StoreStore | #LoadStore
+ stb %g0, [%o4 + %lo(scheduler_lock)]
+#endif
+ret_from_syscall:
+ b,pt %xcc, ret_sys_call
+ ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I0], %o0
linux_syscall_trace:
- call syscall_trace
- nop
- mov %i0, %o0
- mov %i1, %o1
- mov %i2, %o2
- mov %i3, %o3
- ba,pt %xcc, 2f
- mov %i4, %o4
-
- .globl ret_from_syscall
-ret_from_syscall:
- ba,pt %xcc, ret_sys_call
- ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I0], %o0
+ call syscall_trace
+ nop
+ mov %i0, %o0
+ mov %i1, %o1
+ mov %i2, %o2
+ mov %i3, %o3
+ b,pt %xcc, 2f
+ mov %i4, %o4
/* Linux native and SunOS system calls enter here... */
- .align 4
- .globl linux_sparc_syscall
+ .align 32
+ .globl linux_sparc_syscall, syscall_is_too_hard, ret_sys_call
linux_sparc_syscall:
/* Direct access to user regs, must faster. */
- cmp %g1, NR_SYSCALLS
- add %l7, %g4, %l7
- bgeu,pn %xcc, linux_sparc_ni_syscall
- sll %g1, 3, %l4
- ldx [%l7 + %l4], %l7
- andcc %l7, 1, %g0
- bne,pn %icc, linux_fast_syscall
- /* Just do the next insn in the delay slot */
-
- .globl syscall_is_too_hard
+ cmp %g1, NR_SYSCALLS ! IEU1 Group
+ bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI
+ mov %i0, %o0 ! IEU0
+ sll %g1, 3, %l4 ! IEU0 Group
+ mov %i1, %o1 ! IEU1
+ ldx [%l7 + %l4], %l7 ! Load
syscall_is_too_hard:
-#ifdef SYSCALL_TRACING /* Debugging... */
- mov %g1, %o0 ! o0=scall, o1=ptregs
- call syscall_trace_entry
- add %sp, STACK_BIAS + REGWIN_SZ, %o1
-#endif
- mov %i0, %o0
- mov %i1, %o1
- mov %i2, %o2
-
- ldx [%curptr + AOFF_task_flags], %l5
- mov %i3, %o3
- mov %i4, %o4
- andcc %l5, 0x20, %g0
- bne,pn %icc, linux_syscall_trace
- mov %i0, %l5
-2:
- call %l7
- mov %i5, %o5
-
-#ifdef SYSCALL_TRACING /* Debugging... */
- call syscall_trace_exit ! o0=sysret, o1=ptregs
- add %sp, STACK_BIAS + REGWIN_SZ, %o1
-#endif
+ mov %i2, %o2 ! IEU0 Group
+ ldx [%curptr + AOFF_task_flags], %l5 ! Load
+
+ st %g0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_FPRS]
+ mov %i3, %o3 ! IEU1
+ mov %i4, %o4 ! IEU0 Group
+ andcc %l5, 0x20, %g0 ! IEU1 2 bubbles
+ bne,pn %icc, linux_syscall_trace ! CTI Group
+ mov %i0, %l5 ! IEU0
+2: call %l7 ! CTI Group brk forced
+ mov %i5, %o5 ! IEU0
stx %o0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I0]
- .globl ret_sys_call
ret_sys_call:
ldx [%curptr + AOFF_task_flags], %l6
- ldx [%curptr + AOFF_task_tss + AOFF_thread_flags], %l2
+ sra %o0, 0, %o0
mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
- and %l2, SPARC_FLAG_32BIT, %l2
ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TSTATE], %g3
- brnz,a,pn %l2, 1f
- sra %o0, 0, %o0
-1:
cmp %o0, -ENOIOCTLCMD
sllx %g2, 32, %g2
bgeu,pn %xcc, 1f
@@ -383,13 +640,12 @@ ret_sys_call:
/* System call success, clear Carry condition code. */
andn %g3, %g2, %g3
- clr %l6
stx %g3, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TSTATE]
bne,pn %icc, linux_syscall_trace2
ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TNPC], %l1 ! pc = npc
add %l1, 0x4, %l2 !npc = npc+4
stx %l1, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TPC]
- ba,pt %xcc, rtrap
+ b,pt %xcc, rtrap_clr_l6
stx %l2, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TNPC]
1:
/* System call failure, set Carry condition code.
@@ -403,10 +659,10 @@ ret_sys_call:
bne,pn %icc, linux_syscall_trace2
ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TNPC], %l1 ! pc = npc
add %l1, 0x4, %l2 !npc = npc+4
+
stx %l1, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TPC]
- ba,pt %xcc, rtrap
+ b,pt %xcc, rtrap
stx %l2, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TNPC]
-
linux_syscall_trace2:
call syscall_trace
add %l1, 0x4, %l2 /* npc = npc+4 */
@@ -414,4 +670,15 @@ linux_syscall_trace2:
ba,pt %xcc, rtrap
stx %l2, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TNPC]
-/* End of entry.S */
+ .align 32
+ .globl __flushw_user
+__flushw_user:
+1: save %sp, -128, %sp
+ rdpr %otherwin, %g1
+ brnz,pt %g1, 1b
+ add %g2, 1, %g2
+1: sub %g2, 1, %g2
+ brnz,pt %g2, 1b
+ restore %g0, %g0, %g0
+2: retl
+ mov %g3, %o7
diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S
index efb1b48fc..4daf30e21 100644
--- a/arch/sparc64/kernel/etrap.S
+++ b/arch/sparc64/kernel/etrap.S
@@ -1,4 +1,4 @@
-/* $Id: etrap.S,v 1.21 1997/06/02 06:33:28 davem Exp $
+/* $Id: etrap.S,v 1.30 1997/06/30 10:31:37 jj Exp $
* etrap.S: Preparing for entry into the kernel on Sparc V9.
*
* Copyright (C) 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
@@ -12,88 +12,121 @@
#include <asm/spitfire.h>
#include <asm/head.h>
- /* We assume that pstate, when entering this, has AG and
- * IE bits set, MG and IG clear.
- *
- * We also guarentee for caller that AG %g4 and %g5 will have
- * their values preserved and left in %l4 and %l5 respectively
- * for him (fault handling needs this).
- */
+#define FPUREG_SZ ((64 * 4) + (2 * 8))
+#define TASK_REGOFF ((((PAGE_SIZE<<1)-FPUREG_SZ)&~(64-1)) - \
+ TRACEREG_SZ-REGWIN_SZ)
- .text
- .align 32
- .globl etrap, etrap_irq, etraptl1
-etrap:
- rdpr %pil, %g2
-etrap_irq:
- rdpr %tstate, %g1
- sllx %g2, 20, %g2
- or %g1, %g2, %g1
- andcc %g1, TSTATE_PRIV, %g0
- bne,a,pn %xcc, 1f
- sub %sp, REGWIN_SZ + TRACEREG_SZ - STACK_BIAS, %g2
- rd %pic, %g3
+ .text
+ .align 32
+ .globl etrap, etrap_irq, etraptl1
- sethi %hi((PAGE_SIZE<<1)-TRACEREG_SZ-REGWIN_SZ), %g2
- or %g2, %lo((PAGE_SIZE<<1)-TRACEREG_SZ-REGWIN_SZ), %g2
- add %g3, %g2, %g2
-1: stx %g1, [%g2 + REGWIN_SZ + PT_V9_TSTATE]
- rdpr %tpc, %g1
- rdpr %tnpc, %g3
- stx %g1, [%g2 + REGWIN_SZ + PT_V9_TPC]
- rd %y, %g1
+etrap: rdpr %pil, %g2
+etrap_irq: rdpr %tstate, %g1
+ sllx %g2, 20, %g2
+ or %g1, %g2, %g1
+ andcc %g1, TSTATE_PRIV, %g0
+ bne,pn %xcc, etrap_maybe_fpu
+ sub %sp, REGWIN_SZ + TRACEREG_SZ - STACK_BIAS, %g2
+ sethi %hi(TASK_REGOFF), %g2
- stx %g3, [%g2 + REGWIN_SZ + PT_V9_TNPC]
- stx %g1, [%g2 + REGWIN_SZ + PT_V9_Y]
- save %g2, -STACK_BIAS, %sp ! The ordering of these two instructions
- rdpr %pstate, %g1 ! is critical, see winfixup.S for details
- bne,pn %xcc, 2f
- rdpr %canrestore, %g3
- rdpr %wstate, %g6
- wrpr %g0, 7, %cleanwin
+ or %g2, %lo(TASK_REGOFF), %g2
+ add %g6, %g2, %g2
+etrap_maybe_fpu:rd %fprs, %g3
+ brnz,pn %g3, etrap_save_fpu
+ st %g0, [%g2 + REGWIN_SZ + PT_V9_FPRS]
+etrap_after_fpu:rdpr %tpc, %g3
+ stx %g1, [%g2 + REGWIN_SZ + PT_V9_TSTATE]
+ rdpr %tnpc, %g1
- wrpr %g0, 0, %canrestore
- sll %g6, 3, %g6
- wrpr %g3, 0, %otherwin
- wrpr %g6, %wstate
- sethi %uhi(KERNBASE), %g3
- sllx %g3, 32, %g3
- mov PRIMARY_CONTEXT, %g2
- stxa %g0, [%g2] ASI_DMMU
+ stx %g3, [%g2 + REGWIN_SZ + PT_V9_TPC]
+ rd %y, %g3
+ stx %g1, [%g2 + REGWIN_SZ + PT_V9_TNPC]
+ st %g3, [%g2 + REGWIN_SZ + PT_V9_Y]
+ save %g2, -STACK_BIAS, %sp ! The ordering here is
+ rdpr %pstate, %g1 ! critical, see winfixup
+ bne,pn %xcc, 2f
+ rdpr %canrestore, %g3
- flush %g3
-2: wrpr %g0, 0x0, %tl
- mov %g1, %l1
- mov %g4, %l4
- mov %g5, %l5
- mov %g7, %l2
- wrpr %l1, PSTATE_AG, %pstate
- stx %g1, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G1]
+ rdpr %wstate, %g2
+ wrpr %g0, 7, %cleanwin
+ wrpr %g0, 0, %canrestore
+ sll %g2, 3, %g2
+ wrpr %g3, 0, %otherwin
+ wrpr %g2, 0, %wstate
+ wr %g0, ASI_DMMU, %asi
+ ldxa [%g0 + PRIMARY_CONTEXT] %asi, %g2
- stx %g2, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G2]
- stx %g3, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G3]
- stx %g4, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G4]
- stx %g5, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G5]
- stx %g6, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G6]
- stx %g7, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G7]
- stx %i0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I0]
- stx %i1, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I1]
+ stxa %g0, [%g0 + PRIMARY_CONTEXT] %asi
+ stxa %g2, [%g0 + SECONDARY_CONTEXT] %asi
+ flush %g6
+2: wrpr %g0, 0x0, %tl
+ or %g1, 0, %l1
+ add %g4, 0, %l4
+ or %g5, 0, %l5
+ add %g7, 0, %l2
- stx %i2, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I2]
- stx %i3, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I3]
- stx %i4, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I4]
- stx %i5, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I5]
- stx %i6, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I6]
- stx %i7, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I7]
- wrpr %l1, (PSTATE_IE | PSTATE_AG), %pstate
- sethi %uhi(KERNBASE), %g4
+ or %g6, 0, %l6
+ wrpr %l1, (PSTATE_AG|PSTATE_RMO), %pstate
+ stx %g1, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G1]
+ stx %g2, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G2]
+ stx %g3, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G3]
+ stx %g4, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G4]
+ stx %g5, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G5]
+ stx %g6, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G6]
- rd %pic, %g6
- jmpl %l2 + 0x4, %g0
- sllx %g4, 32, %g4
-etraptl1:
- rdpr %tstate, %g1
- sub %sp, REGWIN_SZ + TRACEREG_SZ - STACK_BIAS, %g2
- ba,pt %xcc, 1b
- andcc %g1, TSTATE_PRIV, %g0
- nop
+ stx %g7, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G7]
+ stx %i0, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I0]
+ stx %i1, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I1]
+ stx %i2, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I2]
+ stx %i3, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I3]
+ stx %i4, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I4]
+ sethi %uhi(PAGE_OFFSET), %g4
+ stx %i5, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I5]
+
+ stx %i6, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I6]
+ sllx %g4, 32, %g4
+ stx %i7, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I7]
+ wrpr %l1, (PSTATE_IE|PSTATE_AG|PSTATE_RMO), %pstate
+ jmpl %l2 + 0x4, %g0
+ mov %l6, %g6
+etrap_save_fpu: and %g3, FPRS_FEF, %g3
+ brz,pn %g3, 2f
+
+ nop
+ be,a,pt %xcc, 3f
+ add %g2, (TRACEREG_SZ + REGWIN_SZ), %g2
+ wr %g0, ASI_BLK_P, %asi
+ add %g2, ((TRACEREG_SZ+REGWIN_SZ)-FPUREG_SZ), %g2
+ andn %g2, (64 - 1), %g2
+1: st %g3, [%g2 - 0x4 /*REGWIN_SZ + PT_V9_FPRS*/]
+ rd %gsr, %g3
+
+ stx %fsr, [%g2 + 0x100]
+ stx %g3, [%g2 + 0x108]
+ membar #StoreStore | #LoadStore
+ stda %f0, [%g2 + 0x000] %asi
+ stda %f16, [%g2 + 0x040] %asi
+ stda %f32, [%g2 + 0x080] %asi
+ stda %f48, [%g2 + 0x0c0] %asi
+ membar #Sync
+
+ sub %g2, (TRACEREG_SZ + REGWIN_SZ), %g2
+2: b,pt %xcc, etrap_after_fpu
+ wr %g0, 0, %fprs
+3: /* Because Ultra lacks ASI_BLK_NUCLEUS a hack has to take place. */
+ mov SECONDARY_CONTEXT, %g3
+ stxa %g0, [%g3] ASI_DMMU
+ flush %g2
+ wr %g0, ASI_BLK_S, %asi
+ nop
+
+ b,pt %xcc, 1b
+ mov FPRS_FEF, %g3
+ nop
+etraptl1: rdpr %tstate, %g1
+ sub %sp, REGWIN_SZ + TRACEREG_SZ - STACK_BIAS, %g2
+ ba,pt %xcc, etrap_maybe_fpu
+ andcc %g1, TSTATE_PRIV, %g0
+ nop
+#undef TASK_REGOFF
+#undef FPUREG_SZ
diff --git a/arch/sparc64/kernel/hack.S b/arch/sparc64/kernel/hack.S
deleted file mode 100644
index 843221395..000000000
--- a/arch/sparc64/kernel/hack.S
+++ /dev/null
@@ -1,170 +0,0 @@
-/* <hack>
- This is just a huge ugly hack to get things compiled.
- Hopefully will disappear quickly, once we get everything
- to compile... */
- .text
- .align 8
- .globl breakpoint
-breakpoint: retl;nop
- .globl do_cee
-do_cee: retl;nop
- .globl do_cee_tl1
-do_cee_tl1: retl;nop
- .globl do_dae_tl1
-do_dae_tl1: retl;nop
- .globl do_div0_tl1
-do_div0_tl1: retl;nop
- .globl do_fpdis_tl1
-do_fpdis_tl1: retl;nop
- .globl do_fpieee_tl1
-do_fpieee_tl1: retl;nop
- .globl do_fpother_tl1
-do_fpother_tl1: retl;nop
- .globl do_iae_tl1
-do_iae_tl1: retl;nop
- .globl do_ill_tl1
-do_ill_tl1: retl;nop
- .globl do_irq_tl1
-do_irq_tl1: retl;nop
- .globl do_lddfmna
-do_lddfmna: retl;nop
- .globl do_lddfmna_tl1
-do_lddfmna_tl1: retl;nop
- .globl do_paw
-do_paw: retl;nop
- .globl do_paw_tl1
-do_paw_tl1: retl;nop
- .globl do_stdfmna
-do_stdfmna: retl;nop
- .globl do_stdfmna_tl1
-do_stdfmna_tl1: retl;nop
- .globl do_tof_tl1
-do_tof_tl1: retl;nop
- .globl do_vaw
-do_vaw: retl;nop
- .globl do_vaw_tl1
-do_vaw_tl1: retl;nop
- .globl floppy_hardint
-floppy_hardint: retl;nop
- .globl get_cpuid
-get_cpuid: retl;mov 0, %o0
- .globl getcc
-getcc: retl;nop
- .globl halt
-halt: retl;nop
- .globl indirect_syscall
-indirect_syscall: retl;nop
- .globl install_linux_ticker
-install_linux_ticker: retl;nop
- .globl install_obp_ticker
-install_obp_ticker: retl;nop
- .globl linux_dbvec
-linux_dbvec: retl;nop
- .globl linux_num_cpus
-linux_num_cpus: retl;nop
- .globl netbsd_syscall
-netbsd_syscall: retl;nop
- .globl setcc
-setcc: retl;nop
- .globl solaris_syscall
-solaris_syscall: retl;nop
- .globl sunos_mmap
-sunos_mmap: retl;nop
- .globl sunos_syscall
-sunos_syscall: retl;nop
- .globl svr4_getcontext
-svr4_getcontext: retl;nop
- .globl svr4_setcontext
-svr4_setcontext: retl;nop
- .globl sunos_accept
-sunos_accept: retl;nop
- .globl sunos_audit
-sunos_audit: retl;nop
- .globl sunos_brk
-sunos_brk: retl;nop
- .globl sunos_execv
-sunos_execv: retl;nop
- .globl sunos_fpathconf
-sunos_fpathconf: retl;nop
- .globl sunos_getdents
-sunos_getdents: retl;nop
- .globl sunos_getdirentries
-sunos_getdirentries: retl;nop
- .globl sunos_getdomainname
-sunos_getdomainname: retl;nop
- .globl sunos_getdtablesize
-sunos_getdtablesize: retl;nop
- .globl sunos_getgid
-sunos_getgid: retl;nop
- .globl sunos_gethostid
-sunos_gethostid: retl;nop
- .globl sunos_getpid
-sunos_getpid: retl;nop
- .globl sunos_getsockopt
-sunos_getsockopt: retl;nop
- .globl sunos_getuid
-sunos_getuid: retl;nop
- .globl sunos_indir
-sunos_indir: retl;nop
- .globl sunos_ioctl
-sunos_ioctl: retl;nop
- .globl sunos_killpg
-sunos_killpg: retl;nop
- .globl sunos_madvise
-sunos_madvise: retl;nop
- .globl sunos_mctl
-sunos_mctl: retl;nop
- .globl sunos_mincore
-sunos_mincore: retl;nop
- .globl sunos_mount
-sunos_mount: retl;nop
- .globl sunos_nop
-sunos_nop: retl;nop
- .globl sunos_nosys
-sunos_nosys: retl;nop
- .globl sunos_open
-sunos_open: retl;nop
- .globl sunos_pathconf
-sunos_pathconf: retl;nop
- .globl sunos_poll
-sunos_poll: retl;nop
- .globl sunos_read
-sunos_read: retl;nop
- .globl sunos_readv
-sunos_readv: retl;nop
- .globl sunos_recv
-sunos_recv: retl;nop
- .globl sunos_sbrk
-sunos_sbrk: retl;nop
- .globl sunos_select
-sunos_select: retl;nop
- .globl sunos_semsys
-sunos_semsys: retl;nop
- .globl sunos_send
-sunos_send: retl;nop
- .globl sunos_setpgrp
-sunos_setpgrp: retl;nop
- .globl sunos_setsockopt
-sunos_setsockopt: retl;nop
- .globl sunos_shmsys
-sunos_shmsys: retl;nop
- .globl sunos_sigaction
-sunos_sigaction: retl;nop
- .globl sunos_sigblock
-sunos_sigblock: retl;nop
- .globl sunos_sigsetmask
-sunos_sigsetmask: retl;nop
- .globl sunos_sstk
-sunos_sstk: retl;nop
- .globl sunos_sysconf
-sunos_sysconf: retl;nop
- .globl sunos_uname
-sunos_uname: retl;nop
- .globl sunos_vadvise
-sunos_vadvise: retl;nop
- .globl sunos_wait4
-sunos_wait4: retl;nop
- .globl sunos_write
-sunos_write: retl;nop
- .globl sunos_writev
-sunos_writev: retl;nop
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index 3844c24c3..0ed975aff 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -1,4 +1,4 @@
-/* $Id: head.S,v 1.31 1997/05/30 22:35:28 davem Exp $
+/* $Id: head.S,v 1.43 1997/07/07 03:05:25 davem Exp $
* head.S: Initial boot code for the Sparc64 port of Linux.
*
* Copyright (C) 1996,1997 David S. Miller (davem@caip.rutgers.edu)
@@ -8,20 +8,25 @@
*/
#include <linux/version.h>
+#include <linux/errno.h>
+#include <asm/asm_offsets.h>
+#include <asm/asi.h>
#include <asm/pstate.h>
#include <asm/ptrace.h>
#include <asm/spitfire.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/errno.h>
+#include <asm/signal.h>
+#include <asm/processor.h>
#include <asm/lsu.h>
#include <asm/head.h>
/* This section from from _start to sparc64_boot_end should fit into
- * 0xffff.f800.0000.4000 to 0xffff.f800.0000.8000 and will be sharing space
- * with bootup_user_stack, which is from 0xffff.f800.0000.4000 to
- * 0xffff.f800.0000.6000 and bootup_kernel_stack, which is from
- * 0xffff.f800.0000.6000 to 0xffff.f800.0000.8000.
+ * 0x0000.0000.0040.4000 to 0x0000.0000.0040.8000 and will be sharing space
+ * with bootup_user_stack, which is from 0x0000.0000.0040.4000 to
+ * 0x0000.0000.0040.6000 and bootup_kernel_stack, which is from
+ * 0x0000.0000.0040.6000 to 0x0000.0000.0040.8000.
*/
.text
@@ -31,7 +36,7 @@ start:
_stext:
stext:
bootup_user_stack:
-! 0xfffff80000004000
+! 0x0000000000404000
b sparc64_boot
flushw /* Flush register file. */
@@ -41,10 +46,11 @@ bootup_user_stack:
*/
.global root_flags, ram_flags, root_dev
.global ramdisk_image, ramdisk_size
+ .globl silo_args
.ascii "HdrS"
.word LINUX_VERSION_CODE
- .half 0x0201 /* HdrS version */
+ .half 0x0202 /* HdrS version */
root_flags:
.half 1
root_dev:
@@ -55,7 +61,8 @@ ramdisk_image:
.word 0
ramdisk_size:
.word 0
- .xword reboot_command
+ .xword reboot_command
+ .xword bootstr_len
/* We must be careful, 32-bit OpenBOOT will get confused if it
* tries to save away a register window to a 64-bit kernel
@@ -80,26 +87,7 @@ sparc64_boot:
* Again, typically PROM has left %pil at 13 or similar, and
* (PSTATE_PRIV | PSTATE_PEF | PSTATE_IE) in %pstate.
*/
- wrpr %g0, 0xf, %pil /* Interrupts off. */
- wrpr %g0, (PSTATE_PRIV|PSTATE_PEF), %pstate
-
- /* Check if we are mapped where we expect to be in virtual
- * memory. The Solaris /boot elf format bootloader
- * will peek into our elf header and load us where
- * we want to be, otherwise we have to re-map.
- */
-current_pc:
- rd %pc, %g3
- sethi %uhi(KERNBASE), %g4
- sllx %g4, 32, %g4
-
- /* Check the run time program counter. */
-
- set current_pc, %g5
- add %g5, %g4, %g5
- cmp %g3, %g5
- be %xcc, sun4u_init
- nop
+ wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate
create_mappings:
/* %g5 holds the tlb data */
@@ -136,15 +124,10 @@ create_mappings:
cmp %g1, %g2
be,a,pn %xcc, got_tlbentry
ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g1
- cmp %l1, (63 << 3)
+ cmp %l0, (63 << 3)
blu,pt %xcc, 1b
add %l0, (1 << 3), %l0
-boot_failed:
- /* Debugging 8-) */
- set 0xdeadbeef, %g1
- t 0x11
-
got_tlbentry:
/* Nops here again, perhaps Cheetah/Blackbird are better behaved... */
nop
@@ -159,33 +142,73 @@ got_tlbentry:
or %g5, %g1, %g5 /* Or it into TAG being built. */
+ clr %l0 /* TLB entry walker. */
+ sethi %hi(KERNBASE), %g3 /* 4M lower limit */
+ sethi %hi(KERNBASE<<1), %g7 /* 8M upper limit */
+ mov TLB_TAG_ACCESS, %l7
+1:
+ /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
+ ldxa [%l0] ASI_ITLB_TAG_READ, %g1
+ nop
+ nop
+ nop
+ andn %g1, %l2, %g1 /* Get vaddr */
+ cmp %g1, %g3
+ blu,pn %xcc, 2f
+ cmp %g1, %g7
+ bgeu,pn %xcc, 2f
+ nop
+ stxa %g0, [%l7] ASI_IMMU
+ stxa %g0, [%l0] ASI_ITLB_DATA_ACCESS
+2:
+ cmp %l0, (63 << 3)
+ blu,pt %xcc, 1b
+ add %l0, (1 << 3), %l0
+
+ nop; nop; nop
+
+ clr %l0 /* TLB entry walker. */
+1:
+ /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
+ ldxa [%l0] ASI_DTLB_TAG_READ, %g1
+ nop
+ nop
+ nop
+ andn %g1, %l2, %g1 /* Get vaddr */
+ cmp %g1, %g3
+ blu,pn %xcc, 2f
+ cmp %g1, %g7
+ bgeu,pn %xcc, 2f
+ nop
+ stxa %g0, [%l7] ASI_DMMU
+ stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS
+2:
+ cmp %l0, (63 << 3)
+ blu,pt %xcc, 1b
+ add %l0, (1 << 3), %l0
+
+ nop; nop; nop
+
+
/* PROM never puts any TLB entries into the MMU with the lock bit
- * set. So we gladly use tlb entry 63 for KERNBASE, 62 for
- * boot time locked PROM CIF handler page, we remove the locked
- * bit for the CIF page in paging_init().
+ * set. So we gladly use tlb entry 63 for KERNBASE.
*/
- mov TLB_TAG_ACCESS, %g3
- mov (63 << 3), %g7
- stxa %g4, [%g3] ASI_IMMU /* KERNBASE into TLB TAG */
- stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS /* TTE into TLB DATA */
- membar #Sync
- /* Same for DTLB */
- stxa %g4, [%g3] ASI_DMMU /* KERNBASE into TLB TAG */
+ sethi %hi(KERNBASE), %g3
+ mov (63 << 3), %g7
+ stxa %g3, [%l7] ASI_DMMU /* KERNBASE into TLB TAG */
stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS /* TTE into TLB DATA */
membar #Sync
-
- /* Kill instruction prefetch queues. */
- flush %g4
+ stxa %g3, [%l7] ASI_IMMU /* KERNBASE into TLB TAG */
+ stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS /* TTE into TLB DATA */
membar #Sync
-
- ba,pt %xcc, go_to_highmem
+ flush %g3
+ membar #Sync
+ ba,pt %xcc, 1f
nop
-
-go_to_highmem:
- /* Now do a non-relative jump so that PC is in high-memory */
+1:
set sun4u_init, %g2
- jmpl %g2 + %g4, %g0
+ jmpl %g2 + %g0, %g0
nop
sun4u_init:
@@ -198,42 +221,16 @@ sun4u_init:
stxa %g0, [%g7] ASI_DMMU
membar #Sync
- /* The lock bit has to be removed from this page later on,
- * but before firing up init we will use PROM a lot, so we
- * lock it there now...
- */
-
- /* Compute PROM CIF interface page TTE. */
- sethi %hi(__p1275_loc), %g7
- or %g7, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W | _PAGE_L), %g7
- sethi %uhi(_PAGE_VALID), %g5
- sethi %hi(0x8000), %g3
- sllx %g5, 32, %g5
- mov TLB_TAG_ACCESS, %g6
- or %g5, %g7, %g5
- add %g5, %g1, %g5 /* Add in physbase. */
-
- mov (62 << 3), %g7 /* TLB entry 62 */
- stxa %g3, [%g6] ASI_IMMU /* CIF page into TLB TAG */
- stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS /* TTE into TLB DATA */
- membar #Sync
-
- /* Same for DTLB */
- stxa %g3, [%g6] ASI_DMMU /* CIF page into TLB TAG */
- stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS /* TTE into TLB DATA */
- membar #Sync
-
- /* Kill instruction prefetch queues. */
- flush %g3
- membar #Sync
+ sethi %uhi(PAGE_OFFSET), %g4
+ sllx %g4, 32, %g4
/* We are now safely (we hope) in Nucleus context (0), rewrite
* the KERNBASE TTE's so they no longer have the global bit set.
* Don't forget to setup TAG_ACCESS first 8-)
*/
mov TLB_TAG_ACCESS, %g2
- stxa %g4, [%g2] ASI_IMMU
- stxa %g4, [%g2] ASI_DMMU
+ stxa %g3, [%g2] ASI_IMMU
+ stxa %g3, [%g2] ASI_DMMU
mov (63 << 3), %g7
ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g1
@@ -247,30 +244,22 @@ sun4u_init:
membar #Sync
/* Kill instruction prefetch queues. */
- flush %g4
+ flush %g3
membar #Sync
- /* Compute the number of windows in this machine
- * store this in nwindows and nwindowsm1
- */
- rdpr %ver, %g1 /* Get VERSION register. */
- sethi %hi(nwindows), %g2
- and %g1, VERS_MAXWIN, %g5
- or %g2,%lo(nwindows),%g2
- add %g5, 1, %g6
- add %g2, (nwindows - nwindowsm1), %g3
- stx %g6, [%g2 + %g4]
- stx %g5, [%g3 + %g4]
-
sethi %hi(init_task_union), %g6
or %g6, %lo(init_task_union), %g6
- add %g6, %g4, %g6 ! g6 usage is fixed as well
mov %sp, %l6
mov %o4, %l7
+#if 0 /* We don't do it like this anymore, but for historical hack value
+ * I leave this snippet here to show how crazy we can be sometimes. 8-)
+ */
+
/* Setup "Linux Current Register", thanks Sun 8-) */
wr %g0, 0x1, %pcr
wr %g6, 0x0, %pic
+#endif
mov 1, %g5
sllx %g5, (PAGE_SHIFT + 1), %g5
@@ -291,23 +280,19 @@ sun4u_init:
add %l1, %l2, %l1
andn %l1, %l2, %l1
add %l2, 1, %l2
- add %l0, %g4, %o0
+ add %l0, %g0, %o0
1:
- clr %o1
- sethi %hi(PAGE_SIZE), %o2
- or %o2, %lo(PAGE_SIZE), %o2
- call __memset
+ mov %l2, %o1
+ call __bzero
add %l0, %l2, %l0
cmp %l0, %l1
blu,pt %xcc, 1b
- add %l0, %g4, %o0
+ add %l0, %g0, %o0
/* Now clear empty_zero_page */
- clr %o1
- sethi %hi(PAGE_SIZE), %o2
- or %o2, %lo(PAGE_SIZE), %o2
- call __memset
- mov %g4, %o0
+ mov %l2, %o1
+ call __bzero
+ mov %g3, %o0
mov %l6, %o1 ! OpenPROM stack
call prom_init
@@ -320,36 +305,45 @@ sun4u_init:
.globl setup_tba
setup_tba:
+ save %sp, -160, %sp
+
+ rdpr %tba, %g7
+ sethi %hi(prom_tba), %o1
+ or %o1, %lo(prom_tba), %o1
+ stx %g7, [%o1]
+
+ /* Setup "Linux" globals 8-) */
+ rdpr %pstate, %o1
+ mov %g6, %o2
+ wrpr %o1, (PSTATE_AG|PSTATE_IE), %pstate
sethi %hi(sparc64_ttable_tl0), %g5
- add %g5, %g4, %g5
wrpr %g5, %tba
+ mov %o2, %g6
/* Set up MMU globals */
- rdpr %pstate, %o1
- wrpr %o1, PSTATE_MG, %pstate
+ wrpr %o1, (PSTATE_MG|PSTATE_IE), %pstate
/* PGD/PMD offset mask, used by TLB miss handlers. */
sethi %hi(0x1ff8), %g2
or %g2, %lo(0x1ff8), %g2
/* Kernel PGDIR used by TLB miss handlers. */
- mov %o0, %g6
+ mov %i0, %g6
/* To catch bootup bugs, this is user PGDIR for TLB miss handlers. */
clr %g7
/* Setup Interrupt globals */
- wrpr %o1, PSTATE_IG, %pstate
- sethi %uhi(ivector_to_mask), %g4
- or %g4, %ulo(ivector_to_mask), %g4
+ wrpr %o1, (PSTATE_IG|PSTATE_IE), %pstate
sethi %hi(ivector_to_mask), %g5
- or %g5, %lo(ivector_to_mask), %g5
- or %g5, %g4, %g1 /* IVECTOR table */
+ or %g5, %lo(ivector_to_mask), %g1 /* IVECTOR table */
mov 0x40, %g2 /* INTR data 0 register */
- andn %o1, PSTATE_IE, %o1
+ /* Ok, we're done setting up all the state our trap mechanims needs,
+ * now get back into normal globals and let the PROM know what it up.
+ */
wrpr %g0, %g0, %wstate
- wrpr %o1, %g0, %pstate
+ wrpr %o1, PSTATE_IE, %pstate
/* Zap TSB BASE to zero with TSB_size==1. */
mov TSB_REG, %o4
@@ -359,8 +353,16 @@ setup_tba:
membar #Sync
- retl
- nop
+ sethi %hi(sparc64_ttable_tl0), %g5
+ call prom_set_trap_table
+ mov %g5, %o0
+
+ rdpr %pstate, %o1
+ or %o1, PSTATE_IE, %o1
+ wrpr %o1, 0, %pstate
+
+ ret
+ restore
sparc64_boot_end:
.skip 0x2000 + _start - sparc64_boot_end
@@ -369,18 +371,21 @@ bootup_user_stack_end:
bootup_kernel_stack:
.skip 0x2000
-! 0xfffff80000008000
+! 0x0000000000408000
#include "ttable.S"
+#include "etrap.S"
+#include "rtrap.S"
+#include "winfixup.S"
+#include "entry.S"
/* This is just anal retentiveness on my part... */
.align 16384
.data
.align 8
- .globl nwindows, nwindowsm1
-nwindows: .xword 0
-nwindowsm1: .xword 0
+ .globl prom_tba
+prom_tba: .xword 0
.section ".fixup",#alloc,#execinstr
.globl __ret_efault
__ret_efault:
diff --git a/arch/sparc64/kernel/ioctl32.c b/arch/sparc64/kernel/ioctl32.c
index d3792dec6..b7a0f312d 100644
--- a/arch/sparc64/kernel/ioctl32.c
+++ b/arch/sparc64/kernel/ioctl32.c
@@ -1,4 +1,4 @@
-/* $Id: ioctl32.c,v 1.8 1997/06/04 13:05:15 jj Exp $
+/* $Id: ioctl32.c,v 1.13 1997/07/17 02:20:38 davem Exp $
* ioctl32.c: Conversion between 32bit and 64bit native ioctls.
*
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
@@ -20,9 +20,16 @@
#include <linux/kd.h>
#include <linux/route.h>
#include <linux/netlink.h>
+#include <linux/vt.h>
+#include <linux/fs.h>
+#include <linux/fd.h>
#include <asm/types.h>
#include <asm/uaccess.h>
+#include <asm/fbio.h>
+#include <asm/kbio.h>
+#include <asm/vuid_event.h>
+#include <asm/rtc.h>
/* As gcc will warn about casting u32 to some ptr, we have to cast it to
* unsigned long first, and that's what is A() for.
@@ -370,14 +377,122 @@ static inline int hdio_getgeo(unsigned int fd, u32 arg)
return err;
}
+struct fbcmap32 {
+ int index; /* first element (0 origin) */
+ int count;
+ u32 red;
+ u32 green;
+ u32 blue;
+};
+
+#define FBIOPUTCMAP32 _IOW('F', 3, struct fbcmap32)
+#define FBIOGETCMAP32 _IOW('F', 4, struct fbcmap32)
+
+static inline int fbiogetputcmap(unsigned int fd, unsigned int cmd, u32 arg)
+{
+ struct fbcmap f;
+ int ret;
+ char red[256], green[256], blue[256];
+ u32 r, g, b;
+ unsigned long old_fs = get_fs();
+
+ if (get_user(f.index, &(((struct fbcmap32 *)A(arg))->index)) ||
+ __get_user(f.count, &(((struct fbcmap32 *)A(arg))->count)) ||
+ __get_user(r, &(((struct fbcmap32 *)A(arg))->red)) ||
+ __get_user(g, &(((struct fbcmap32 *)A(arg))->green)) ||
+ __get_user(b, &(((struct fbcmap32 *)A(arg))->blue)))
+ return -EFAULT;
+ if ((f.index < 0) || (f.index > 255)) return -EINVAL;
+ if (f.index + f.count > 256)
+ f.count = 256 - f.index;
+ if (cmd == FBIOPUTCMAP32) {
+ if (copy_from_user (red, (char *)A(r), f.count) ||
+ copy_from_user (green, (char *)A(g), f.count) ||
+ copy_from_user (blue, (char *)A(b), f.count))
+ return -EFAULT;
+ }
+ f.red = red; f.green = green; f.blue = blue;
+ set_fs (KERNEL_DS);
+ ret = sys_ioctl (fd, (cmd == FBIOPUTCMAP32) ? FBIOPUTCMAP : FBIOGETCMAP, (long)&f);
+ set_fs (old_fs);
+ if (!ret && cmd == FBIOGETCMAP32) {
+ if (copy_to_user ((char *)A(r), red, f.count) ||
+ copy_to_user ((char *)A(g), green, f.count) ||
+ copy_to_user ((char *)A(b), blue, f.count))
+ return -EFAULT;
+ }
+ return ret;
+}
+
+struct fbcursor32 {
+ short set; /* what to set, choose from the list above */
+ short enable; /* cursor on/off */
+ struct fbcurpos pos; /* cursor position */
+ struct fbcurpos hot; /* cursor hot spot */
+ struct fbcmap32 cmap; /* color map info */
+ struct fbcurpos size; /* cursor bit map size */
+ u32 image; /* cursor image bits */
+ u32 mask; /* cursor mask bits */
+};
+
+#define FBIOSCURSOR32 _IOW('F', 24, struct fbcursor32)
+#define FBIOGCURSOR32 _IOW('F', 25, struct fbcursor32)
+
+static inline int fbiogscursor(unsigned int fd, unsigned int cmd, u32 arg)
+{
+ struct fbcursor f;
+ int ret;
+ char red[2], green[2], blue[2];
+ char image[128], mask[128];
+ u32 r, g, b;
+ u32 m, i;
+ unsigned long old_fs = get_fs();
+
+ if (copy_from_user (&f, (struct fbcursor32 *)A(arg), 2 * sizeof (short) + 2 * sizeof(struct fbcurpos)) ||
+ __get_user(f.size.fbx, &(((struct fbcursor32 *)A(arg))->size.fbx)) ||
+ __get_user(f.size.fby, &(((struct fbcursor32 *)A(arg))->size.fby)) ||
+ __get_user(f.cmap.index, &(((struct fbcursor32 *)A(arg))->cmap.index)) ||
+ __get_user(f.cmap.count, &(((struct fbcursor32 *)A(arg))->cmap.count)) ||
+ __get_user(r, &(((struct fbcursor32 *)A(arg))->cmap.red)) ||
+ __get_user(g, &(((struct fbcursor32 *)A(arg))->cmap.green)) ||
+ __get_user(b, &(((struct fbcursor32 *)A(arg))->cmap.blue)) ||
+ __get_user(m, &(((struct fbcursor32 *)A(arg))->mask)) ||
+ __get_user(i, &(((struct fbcursor32 *)A(arg))->image)))
+ return -EFAULT;
+ if (f.set & FB_CUR_SETCMAP) {
+ if ((uint) f.size.fby > 32)
+ return -EINVAL;
+ if (copy_from_user (mask, (char *)A(m), f.size.fby * 4) ||
+ copy_from_user (image, (char *)A(i), f.size.fby * 4))
+ return -EFAULT;
+ f.image = image; f.mask = mask;
+ }
+ if (f.set & FB_CUR_SETCMAP) {
+ if (copy_from_user (red, (char *)A(r), 2) ||
+ copy_from_user (green, (char *)A(g), 2) ||
+ copy_from_user (blue, (char *)A(b), 2))
+ return -EFAULT;
+ f.cmap.red = red; f.cmap.green = green; f.cmap.blue = blue;
+ }
+ set_fs (KERNEL_DS);
+ ret = sys_ioctl (fd, FBIOSCURSOR, (long)&f);
+ set_fs (old_fs);
+ return ret;
+}
+
asmlinkage int sys32_ioctl(unsigned int fd, unsigned int cmd, u32 arg)
{
struct file * filp;
int error = -EBADF;
lock_kernel();
- if (fd >= NR_OPEN || !(filp = current->files->fd[fd]))
+ if(fd >= NR_OPEN)
+ goto out;
+
+ filp = current->files->fd[fd];
+ if(!filp)
goto out;
+
if (!filp->f_op || !filp->f_op->ioctl) {
error = sys_ioctl (fd, cmd, (unsigned long)arg);
goto out;
@@ -431,6 +546,15 @@ asmlinkage int sys32_ioctl(unsigned int fd, unsigned int cmd, u32 arg)
case BLKGETSIZE:
error = w_long(fd, cmd, arg);
goto out;
+
+ case FBIOPUTCMAP32:
+ case FBIOGETCMAP32:
+ error = fbiogetputcmap(fd, cmd, arg);
+ goto out;
+
+ case FBIOSCURSOR32:
+ error = fbiogscursor(fd, cmd, arg);
+ goto out;
/* List here exlicitly which ioctl's are known to have
* compatable types passed or none at all...
@@ -471,6 +595,17 @@ asmlinkage int sys32_ioctl(unsigned int fd, unsigned int cmd, u32 arg)
case TIOCSPGRP:
case TIOCGPGRP:
case TIOCSCTTY:
+
+ /* Big F */
+ case FBIOGTYPE:
+ case FBIOSATTR:
+ case FBIOGATTR:
+ case FBIOSVIDEO:
+ case FBIOGVIDEO:
+ case FBIOGCURSOR32: /* This is not implemented yet. Later it should be converted... */
+ case FBIOSCURPOS:
+ case FBIOGCURPOS:
+ case FBIOGCURMAX:
/* Little f */
case FIOCLEX:
@@ -479,6 +614,19 @@ asmlinkage int sys32_ioctl(unsigned int fd, unsigned int cmd, u32 arg)
case FIONBIO:
case FIONREAD: /* This is also TIOCINQ */
+ /* 0x00 */
+ case FIBMAP:
+ case FIGETBSZ:
+
+ /* 0x02 -- Floppy ioctls */
+ case FDSETEMSGTRESH:
+ case FDFLUSH:
+ case FDSETMAXERRS:
+ case FDGETMAXERRS:
+ case FDGETDRVTYP:
+ case FDEJECT:
+ /* XXX The rest need struct floppy_* translations. */
+
/* 0x12 */
case BLKRRPART:
case BLKFLSBUF:
@@ -495,6 +643,59 @@ asmlinkage int sys32_ioctl(unsigned int fd, unsigned int cmd, u32 arg)
case KDSIGACCEPT:
case KDGETKEYCODE:
case KDSETKEYCODE:
+ case KIOCSOUND:
+ case KDMKTONE:
+ case KDGKBTYPE:
+ case KDSETMODE:
+ case KDGETMODE:
+ case KDSKBMODE:
+ case KDGKBMODE:
+ case KDSKBMETA:
+ case KDGKBMETA:
+ case KDGKBENT:
+ case KDSKBENT:
+ case KDGKBSENT:
+ case KDSKBSENT:
+ case KDGKBDIACR:
+ case KDSKBDIACR:
+ case KDGKBLED:
+ case KDSKBLED:
+ case KDGETLED:
+ case KDSETLED:
+
+ /* Little k */
+ case KIOCTYPE:
+ case KIOCLAYOUT:
+ case KIOCGTRANS:
+ case KIOCTRANS:
+ case KIOCCMD:
+ case KIOCSDIRECT:
+ case KIOCSLED:
+ case KIOCGLED:
+ case KIOCSRATE:
+ case KIOCGRATE:
+
+ /* Big V */
+ case VT_SETMODE:
+ case VT_GETMODE:
+ case VT_GETSTATE:
+ case VT_OPENQRY:
+ case VT_ACTIVATE:
+ case VT_WAITACTIVE:
+ case VT_RELDISP:
+ case VT_DISALLOCATE:
+ case VT_RESIZE:
+ case VT_RESIZEX:
+ case VT_LOCKSWITCH:
+ case VT_UNLOCKSWITCH:
+
+ /* Little v */
+ case VUIDSFORMAT:
+ case VUIDGFORMAT:
+
+ /* Little p (/dev/rtc etc.) */
+ case RTCGET:
+ case RTCSET:
/* Socket level stuff */
case FIOSETOWN:
diff --git a/arch/sparc64/kernel/ioport.c b/arch/sparc64/kernel/ioport.c
index 2f94e9102..390c33517 100644
--- a/arch/sparc64/kernel/ioport.c
+++ b/arch/sparc64/kernel/ioport.c
@@ -1,4 +1,4 @@
-/* $Id: ioport.c,v 1.7 1997/04/10 05:13:01 davem Exp $
+/* $Id: ioport.c,v 1.10 1997/06/30 09:24:02 jj Exp $
* ioport.c: Simple io mapping allocator.
*
* Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
@@ -64,13 +64,7 @@ void *sparc_alloc_io (u32 address, void *virtual, int len, char *name,
/* Tell Linux resource manager about the mapping */
request_region ((vaddr | offset), len, name);
} else {
- vaddr = occupy_region(sparc_iobase_vaddr, IOBASE_END,
- (offset + len + PAGE_SIZE-1) & PAGE_MASK, PAGE_SIZE, name);
- if (vaddr == 0) {
- /* Usually we cannot see printks in this case. */
- prom_printf("alloc_io: cannot occupy %d region\n", len);
- prom_halt();
- }
+ return __va(addr);
}
base_address = vaddr;
@@ -88,6 +82,9 @@ void sparc_free_io (void *virtual, int len)
{
unsigned long vaddr = (unsigned long) virtual & PAGE_MASK;
unsigned long plen = (((unsigned long)virtual & ~PAGE_MASK) + len + PAGE_SIZE-1) & PAGE_MASK;
+
+ if (virtual >= PAGE_OFFSET + 0x10000000000UL)
+ return;
release_region(vaddr, plen);
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 3c9b1a89e..c4e7f0e74 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -1,4 +1,4 @@
-/* $Id: irq.c,v 1.13 1997/05/27 07:54:28 davem Exp $
+/* $Id: irq.c,v 1.16 1997/07/11 03:03:08 davem Exp $
* irq.c: UltraSparc IRQ handling/init/registry.
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
@@ -47,7 +47,8 @@ unsigned long ivector_to_mask[NUM_IVECS];
static struct irqaction static_irqaction[MAX_STATIC_ALLOC];
static int static_irq_count = 0;
-static struct irqaction *irq_action[NR_IRQS+1] = {
+/* XXX Must be exported so that fast IRQ handlers can get at it... -DaveM */
+struct irqaction *irq_action[NR_IRQS+1] = {
NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL
};
@@ -279,13 +280,13 @@ int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *)
/* If this is flagged as statically allocated then we use our
* private struct which is never freed.
*/
- if(irqflags & SA_STATIC_ALLOC)
+ if(irqflags & SA_STATIC_ALLOC) {
if(static_irq_count < MAX_STATIC_ALLOC)
action = &static_irqaction[static_irq_count++];
else
printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
"using kmalloc\n", irq, name);
-
+ }
if(action == NULL)
action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
GFP_KERNEL);
@@ -464,14 +465,101 @@ void sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs)
}
#endif
-/* XXX This needs to be written for floppy driver, and soon will be necessary
- * XXX for serial driver as well.
+/* The following assumes that the branch lies before the place we
+ * are branching to. This is the case for a trap vector...
+ * You have been warned.
*/
+#define SPARC_BRANCH(dest_addr, inst_addr) \
+ (0x10800000 | ((((dest_addr)-(inst_addr))>>2)&0x3fffff))
+
+#define SPARC_NOP (0x01000000)
+
+static void install_fast_irq(unsigned int cpu_irq,
+ void (*handler)(int, void *, struct pt_regs *))
+{
+ extern unsigned long sparc64_ttable_tl0;
+ unsigned long ttent = (unsigned long) &sparc64_ttable_tl0;
+ unsigned int *insns;
+
+ ttent += 0x820;
+ ttent += (cpu_irq - 1) << 5;
+ insns = (unsigned int *) ttent;
+ insns[0] = SPARC_BRANCH(((unsigned long) handler),
+ ((unsigned long)&insns[0]));
+ insns[1] = SPARC_NOP;
+ __asm__ __volatile__("flush %0" : : "r" (ttent));
+}
+
int request_fast_irq(unsigned int irq,
void (*handler)(int, void *, struct pt_regs *),
unsigned long irqflags, const char *name)
{
- return -1;
+ struct irqaction *action;
+ unsigned long flags;
+ unsigned int cpu_irq, *imap, *iclr;
+
+ /* XXX This really is not the way to do it, the "right way"
+ * XXX is to have drivers set SA_SBUS or something like that
+ * XXX in irqflags and we base our decision here on whether
+ * XXX that flag bit is set or not.
+ *
+ * In this case nobody can have a fast interrupt at the level
+ * where TICK interrupts live.
+ */
+ if(irq == 14)
+ return -EINVAL;
+ cpu_irq = ino_to_pil[irq];
+
+ if(!handler)
+ return -EINVAL;
+ imap = irq_to_imap(irq);
+ action = *(cpu_irq + irq_action);
+ if(action) {
+ if(action->flags & SA_SHIRQ)
+ panic("Trying to register fast irq when already shared.\n");
+ if(irqflags & SA_SHIRQ)
+ panic("Trying to register fast irq as shared.\n");
+ printk("request_fast_irq: Trying to register yet already owned.\n");
+ return -EBUSY;
+ }
+ save_and_cli(flags);
+ if(irqflags & SA_STATIC_ALLOC) {
+ if(static_irq_count < MAX_STATIC_ALLOC)
+ action = &static_irqaction[static_irq_count++];
+ else
+ printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
+ "using kmalloc\n", irq, name);
+ }
+ if(action == NULL)
+ action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
+ GFP_KERNEL);
+ if(!action) {
+ restore_flags(flags);
+ return -ENOMEM;
+ }
+ install_fast_irq(cpu_irq, handler);
+
+ if(imap) {
+ int ivindex = (*imap & (SYSIO_IMAP_IGN | SYSIO_IMAP_INO));
+
+ ivector_to_mask[ivindex] = (1 << cpu_irq);
+ iclr = imap_to_iclr(imap);
+ action->mask = (unsigned long) iclr;
+ irqflags |= SA_SYSIO_MASKED;
+ } else
+ action->mask = 0;
+
+ action->handler = handler;
+ action->flags = irqflags;
+ action->dev_id = NULL;
+ action->name = name;
+ action->next = NULL;
+
+ *(cpu_irq + irq_action) = action;
+
+ enable_irq(irq);
+ restore_flags(flags);
+ return 0;
}
/* We really don't need these at all on the Sparc. We only have
@@ -496,27 +584,31 @@ static unsigned long tick_offset;
/* XXX This doesn't belong here, just do this cruft in the timer.c handler code. */
static void timer_handler(int irq, void *dev_id, struct pt_regs *regs)
{
- extern void timer_interrupt(int, void *, struct pt_regs *);
- unsigned long compare;
-
if (!(get_softint () & 1)) {
/* Just to be sure... */
clear_softint(1 << 14);
printk("Spurious level14 at %016lx\n", regs->tpc);
return;
- }
+ } else {
+ unsigned long compare, tick;
+
+ do {
+ extern void timer_interrupt(int, void *, struct pt_regs *);
- timer_interrupt(irq, dev_id, regs);
+ timer_interrupt(irq, dev_id, regs);
- /* Acknowledge INT_TIMER */
- clear_softint(1 << 0);
+ /* Acknowledge INT_TIMER */
+ clear_softint(1 << 0);
- /* Set up for next timer tick. */
- __asm__ __volatile__("rd %%tick_cmpr, %0\n\t"
- "add %0, %1, %0\n\t"
- "wr %0, 0x0, %%tick_cmpr"
- : "=r" (compare)
- : "r" (tick_offset));
+ /* Set up for next timer tick. */
+ __asm__ __volatile__("rd %%tick_cmpr, %0\n\t"
+ "add %0, %2, %0\n\t"
+ "wr %0, 0x0, %%tick_cmpr\n\t"
+ "rd %%tick, %1"
+ : "=&r" (compare), "=r" (tick)
+ : "r" (tick_offset));
+ } while(tick >= compare);
+ }
}
/* This is called from time_init() to get the jiffies timer going. */
@@ -558,6 +650,8 @@ struct sun5_timer {
volatile u32 limit1, _unused3;
} *prom_timers;
+static u32 prom_limit0, prom_limit1;
+
static void map_prom_timers(void)
{
unsigned int addr[3];
@@ -582,7 +676,7 @@ static void map_prom_timers(void)
prom_timers = (struct sun5_timer *) 0;
return;
}
- prom_timers = (struct sun5_timer *) addr[0];
+ prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
}
static void kill_prom_timer(void)
@@ -590,24 +684,39 @@ static void kill_prom_timer(void)
if(!prom_timers)
return;
+ /* Save them away for later. */
+ prom_limit0 = prom_timers->limit0;
+ prom_limit1 = prom_timers->limit1;
+
/* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
* We turn both off here just to be paranoid.
*/
prom_timers->limit0 = 0;
prom_timers->limit1 = 0;
+
+ /* Wheee, eat the interrupt packet too... */
+ __asm__ __volatile__("
+ mov 0x40, %%g2
+ ldxa [%%g0] %0, %%g1
+ ldxa [%%g2] %1, %%g1
+ stxa %%g0, [%%g0] %0
+ membar #Sync
+" : /* no outputs */
+ : "i" (ASI_INTR_RECEIVE), "i" (ASI_UDB_INTR_R)
+ : "g1", "g2");
}
-#if 0 /* Unused at this time. -DaveM */
-static void enable_prom_timer(void)
+void enable_prom_timer(void)
{
if(!prom_timers)
return;
- /* Set it to fire off every 10ms. */
- prom_timers->limit1 = 0xa000270f;
+ /* Set it to whatever was there before. */
+ prom_timers->limit1 = prom_limit1;
prom_timers->count1 = 0;
+ prom_timers->limit0 = prom_limit0;
+ prom_timers->count0 = 0;
}
-#endif
__initfunc(void init_IRQ(void))
{
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index cc8183618..89f63f78f 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -1,4 +1,4 @@
-/* $Id: process.c,v 1.17 1997/06/02 06:33:32 davem Exp $
+/* $Id: process.c,v 1.29 1997/07/17 02:20:40 davem Exp $
* arch/sparc64/kernel/process.c
*
* Copyright (C) 1995, 1996 David S. Miller (davem@caip.rutgers.edu)
@@ -17,6 +17,8 @@
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
@@ -137,7 +139,7 @@ void machine_restart(char * cmd)
prom_reboot(cmd);
if (*reboot_command)
prom_reboot(reboot_command);
- prom_feval ("reset");
+ prom_reboot("");
panic("Reboot failed!");
}
@@ -146,8 +148,55 @@ void machine_power_off(void)
machine_halt();
}
-void show_regwindow(struct reg_window *rw)
+static void show_regwindow32(struct pt_regs *regs)
{
+ struct reg_window32 *rw;
+ struct reg_window32 r_w;
+ unsigned long old_fs;
+
+ __asm__ __volatile__ ("flushw");
+ rw = (struct reg_window32 *)((long)(unsigned)regs->u_regs[14]);
+ old_fs = get_fs();
+ set_fs (USER_DS);
+ if (copy_from_user (&r_w, rw, sizeof(r_w))) {
+ set_fs (old_fs);
+ return;
+ }
+ rw = &r_w;
+ set_fs (old_fs);
+ printk("l0: %016x l1: %016x l2: %016x l3: %016x\n"
+ "l4: %016x l5: %016x l6: %016x l7: %016x\n",
+ rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
+ rw->locals[4], rw->locals[5], rw->locals[6], rw->locals[7]);
+ printk("i0: %016x i1: %016x i2: %016x i3: %016x\n"
+ "i4: %016x i5: %016x i6: %016x i7: %016x\n",
+ rw->ins[0], rw->ins[1], rw->ins[2], rw->ins[3],
+ rw->ins[4], rw->ins[5], rw->ins[6], rw->ins[7]);
+}
+
+static void show_regwindow(struct pt_regs *regs)
+{
+ struct reg_window *rw;
+ struct reg_window r_w;
+ unsigned long old_fs;
+
+ if ((regs->tstate & TSTATE_PRIV) || !(current->tss.flags & SPARC_FLAG_32BIT)) {
+ __asm__ __volatile__ ("flushw");
+ rw = (struct reg_window *)(regs->u_regs[14] + STACK_BIAS);
+ if (!(regs->tstate & TSTATE_PRIV)) {
+ old_fs = get_fs();
+ set_fs (USER_DS);
+ if (copy_from_user (&r_w, rw, sizeof(r_w))) {
+ set_fs (old_fs);
+ return;
+ }
+ rw = &r_w;
+ set_fs (old_fs);
+ }
+ } else {
+ show_regwindow32(regs);
+ return;
+ }
printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n",
rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3]);
printk("l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n",
@@ -158,18 +207,6 @@ void show_regwindow(struct reg_window *rw)
rw->ins[4], rw->ins[5], rw->ins[6], rw->ins[7]);
}
-void show_regwindow32(struct reg_window32 *rw)
-{
- printk("l0: %08x l1: %08x l2: %08x l3: %08x\n"
- "l4: %08x l5: %08x l6: %08x l7: %08x\n",
- rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
- rw->locals[4], rw->locals[5], rw->locals[6], rw->locals[7]);
- printk("i0: %08x i1: %08x i2: %08x i3: %08x\n"
- "i4: %08x i5: %08x i6: %08x i7: %08x\n",
- rw->ins[0], rw->ins[1], rw->ins[2], rw->ins[3],
- rw->ins[4], rw->ins[5], rw->ins[6], rw->ins[7]);
-}
-
void show_stackframe(struct sparc_stackf *sf)
{
unsigned long size;
@@ -228,10 +265,7 @@ void show_stackframe32(struct sparc_stackf32 *sf)
void show_regs(struct pt_regs * regs)
{
-#if __MPP__
- printk("CID: %d\n",mpp_cid());
-#endif
- printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %016lx\n", regs->tstate,
+ printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x\n", regs->tstate,
regs->tpc, regs->tnpc, regs->y);
printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
@@ -245,16 +279,11 @@ void show_regs(struct pt_regs * regs)
printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
regs->u_regs[15]);
-#if 0
- show_regwindow((struct reg_window *)(regs->u_regs[14] + STACK_BIAS));
-#endif
+ show_regwindow(regs);
}
void show_regs32(struct pt_regs32 *regs)
{
-#if __MPP__
- printk("CID: %d\n",mpp_cid());
-#endif
printk("PSR: %08x PC: %08x NPC: %08x Y: %08x\n", regs->psr,
regs->pc, regs->npc, regs->y);
printk("g0: %08x g1: %08x g2: %08x g3: %08x\n",
@@ -269,7 +298,6 @@ void show_regs32(struct pt_regs32 *regs)
printk("o4: %08x o5: %08x sp: %08x ret_pc: %08x\n",
regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
regs->u_regs[15]);
- show_regwindow32((struct reg_window32 *)((unsigned long)regs->u_regs[14]));
}
void show_thread(struct thread_struct *tss)
@@ -290,46 +318,22 @@ void show_thread(struct thread_struct *tss)
continue;
printk("reg_window[%d]:\n", i);
printk("stack ptr: 0x%016lx\n", tss->rwbuf_stkptrs[i]);
- show_regwindow(&tss->reg_window[i]);
}
printk("w_saved: 0x%08lx\n", tss->w_saved);
- /* XXX missing: float_regs */
- printk("fsr: 0x%016lx\n", tss->fsr);
-
printk("sstk_info.stack: 0x%016lx\n",
(unsigned long)tss->sstk_info.the_stack);
printk("sstk_info.status: 0x%016lx\n",
(unsigned long)tss->sstk_info.cur_status);
- printk("flags: 0x%016lx\n", tss->flags);
- printk("current_ds: 0x%016x\n", tss->current_ds);
+ printk("flags: 0x%08x\n", tss->flags);
+ printk("current_ds: 0x%016lx\n", tss->current_ds);
/* XXX missing: core_exec */
}
-/*
- * Free current thread data structures etc..
- */
+/* Free current thread data structures etc.. */
void exit_thread(void)
{
-#ifndef __SMP__
- if(last_task_used_math == current) {
-#else
- if(current->flags & PF_USEDFPU) {
-#endif
- fprs_write(FPRS_FEF);
- if(current->tss.flags & SPARC_FLAG_32BIT)
- fpsave32((unsigned long *)&current->tss.float_regs[0],
- &current->tss.fsr);
- else
- fpsave((unsigned long *)&current->tss.float_regs[0],
- &current->tss.fsr);
-#ifndef __SMP__
- last_task_used_math = NULL;
-#else
- current->flags &= ~PF_USEDFPU;
-#endif
- }
}
void flush_thread(void)
@@ -338,28 +342,12 @@ void flush_thread(void)
current->tss.sstk_info.cur_status = 0;
current->tss.sstk_info.the_stack = 0;
- /* No new signal delivery by default */
+ /* No new signal delivery by default. */
current->tss.new_signal = 0;
-#ifndef __SMP__
- if(last_task_used_math == current) {
-#else
- if(current->flags & PF_USEDFPU) {
-#endif
- fprs_write(FPRS_FEF);
- if(current->tss.flags & SPARC_FLAG_32BIT)
- fpsave32((unsigned long *)&current->tss.float_regs[0],
- &current->tss.fsr);
- else
- fpsave((unsigned long *)&current->tss.float_regs[0],
- &current->tss.fsr);
-#ifndef __SMP__
- last_task_used_math = NULL;
-#else
- current->flags &= ~PF_USEDFPU;
-#endif
- }
+ current->flags &= ~PF_USEDFPU;
/* Now, this task is no longer a kernel thread. */
+ current->tss.current_ds = USER_DS;
if(current->tss.flags & SPARC_FLAG_KTHREAD) {
current->tss.flags &= ~SPARC_FLAG_KTHREAD;
@@ -368,78 +356,38 @@ void flush_thread(void)
*/
get_mmu_context(current);
}
- current->tss.current_ds = USER_DS;
- spitfire_set_secondary_context (current->mm->context);
+ current->tss.ctx = current->mm->context & 0x1fff;
+ spitfire_set_secondary_context (current->tss.ctx);
+ __asm__ __volatile__("flush %g6");
}
-static __inline__ void copy_regs(struct pt_regs *dst, struct pt_regs *src)
+/* It's a bit more tricky when 64-bit tasks are involved... */
+static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
{
- __asm__ __volatile__("ldd\t[%1 + 0x00], %%g2\n\t"
- "ldd\t[%1 + 0x08], %%g4\n\t"
- "ldd\t[%1 + 0x10], %%o4\n\t"
- "std\t%%g2, [%0 + 0x00]\n\t"
- "std\t%%g4, [%0 + 0x08]\n\t"
- "std\t%%o4, [%0 + 0x10]\n\t"
- "ldd\t[%1 + 0x18], %%g2\n\t"
- "ldd\t[%1 + 0x20], %%g4\n\t"
- "ldd\t[%1 + 0x28], %%o4\n\t"
- "std\t%%g2, [%0 + 0x18]\n\t"
- "std\t%%g4, [%0 + 0x20]\n\t"
- "std\t%%o4, [%0 + 0x28]\n\t"
- "ldd\t[%1 + 0x30], %%g2\n\t"
- "ldd\t[%1 + 0x38], %%g4\n\t"
- "ldd\t[%1 + 0x40], %%o4\n\t"
- "std\t%%g2, [%0 + 0x30]\n\t"
- "std\t%%g4, [%0 + 0x38]\n\t"
- "ldd\t[%1 + 0x48], %%g2\n\t"
- "std\t%%o4, [%0 + 0x40]\n\t"
- "std\t%%g2, [%0 + 0x48]\n\t" : :
- "r" (dst), "r" (src) :
- "g2", "g3", "g4", "g5", "o4", "o5");
-}
-
-static __inline__ void copy_regwin(struct reg_window *dst, struct reg_window *src)
-{
- __asm__ __volatile__("ldd\t[%1 + 0x00], %%g2\n\t"
- "ldd\t[%1 + 0x08], %%g4\n\t"
- "ldd\t[%1 + 0x10], %%o4\n\t"
- "std\t%%g2, [%0 + 0x00]\n\t"
- "std\t%%g4, [%0 + 0x08]\n\t"
- "std\t%%o4, [%0 + 0x10]\n\t"
- "ldd\t[%1 + 0x18], %%g2\n\t"
- "ldd\t[%1 + 0x20], %%g4\n\t"
- "ldd\t[%1 + 0x28], %%o4\n\t"
- "std\t%%g2, [%0 + 0x18]\n\t"
- "std\t%%g4, [%0 + 0x20]\n\t"
- "std\t%%o4, [%0 + 0x28]\n\t"
- "ldd\t[%1 + 0x30], %%g2\n\t"
- "ldd\t[%1 + 0x38], %%g4\n\t"
- "std\t%%g2, [%0 + 0x30]\n\t"
- "std\t%%g4, [%0 + 0x38]\n\t" : :
- "r" (dst), "r" (src) :
- "g2", "g3", "g4", "g5", "o4", "o5");
-}
-
-static __inline__ struct sparc_stackf *
-clone_stackframe(struct sparc_stackf *dst, struct sparc_stackf *src)
-{
- struct sparc_stackf *sp;
-
-#if 0
- unsigned long size;
- size = ((unsigned long)src->fp) - ((unsigned long)src);
- sp = (struct sparc_stackf *)(((unsigned long)dst) - size);
-
- if (copy_to_user(sp, src, size))
- return 0;
- if (put_user(dst, &sp->fp))
+ unsigned long fp, distance, rval;
+
+ if(!(current->tss.flags & SPARC_FLAG_32BIT)) {
+ csp += STACK_BIAS;
+ psp += STACK_BIAS;
+ __get_user(fp, &(((struct reg_window *)psp)->ins[6]));
+ } else
+ __get_user(fp, &(((struct reg_window32 *)psp)->ins[6]));
+ distance = fp - psp;
+ rval = (csp - distance);
+ if(copy_in_user(rval, psp, distance))
return 0;
-#endif
- return sp;
+ if(current->tss.flags & SPARC_FLAG_32BIT) {
+ if(put_user(((u32)csp), &(((struct reg_window32 *)rval)->ins[6])))
+ return 0;
+ return rval;
+ } else {
+ if(put_user(((u64)csp - STACK_BIAS),
+ &(((struct reg_window *)rval)->ins[6])))
+ return 0;
+ return rval - STACK_BIAS;
+ }
}
-/* #define DEBUG_WINFIXUPS */
-
/* Standard stuff. */
static inline void shift_window_buffer(int first_win, int last_win,
struct thread_struct *tp)
@@ -461,16 +409,16 @@ void synchronize_user_stack(void)
flush_user_windows();
if((window = tp->w_saved) != 0) {
int winsize = REGWIN_SZ;
+ int bias = 0;
-#ifdef DEBUG_WINFIXUPS
- printk("sus(%d", (int)window);
-#endif
if(tp->flags & SPARC_FLAG_32BIT)
winsize = REGWIN32_SZ;
+ else
+ bias = STACK_BIAS;
window -= 1;
do {
- unsigned long sp = tp->rwbuf_stkptrs[window];
+ unsigned long sp = (tp->rwbuf_stkptrs[window] + bias);
struct reg_window *rwin = &tp->reg_window[window];
if(!copy_to_user((char *)sp, rwin, winsize)) {
@@ -478,9 +426,6 @@ void synchronize_user_stack(void)
tp->w_saved--;
}
} while(window--);
-#ifdef DEBUG_WINFIXUPS
- printk(")");
-#endif
}
}
@@ -489,18 +434,18 @@ void fault_in_user_windows(struct pt_regs *regs)
struct thread_struct *tp = &current->tss;
unsigned long window;
int winsize = REGWIN_SZ;
+ int bias = 0;
if(tp->flags & SPARC_FLAG_32BIT)
winsize = REGWIN32_SZ;
+ else
+ bias = STACK_BIAS;
flush_user_windows();
window = tp->w_saved;
-#ifdef DEBUG_WINFIXUPS
- printk("fiuw(%d", (int)window);
-#endif
if(window != 0) {
window -= 1;
do {
- unsigned long sp = tp->rwbuf_stkptrs[window];
+ unsigned long sp = (tp->rwbuf_stkptrs[window] + bias);
struct reg_window *rwin = &tp->reg_window[window];
if(copy_to_user((char *)sp, rwin, winsize))
@@ -508,9 +453,6 @@ void fault_in_user_windows(struct pt_regs *regs)
} while(window--);
}
current->tss.w_saved = 0;
-#ifdef DEBUG_WINFIXUPS
- printk(")");
-#endif
}
/* Copy a Sparc thread. The fork() return value conventions
@@ -530,97 +472,49 @@ extern void ret_from_syscall(void);
int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
struct task_struct *p, struct pt_regs *regs)
{
- struct pt_regs *childregs;
- struct reg_window *new_stack, *old_stack;
unsigned long stack_offset;
-
-#ifndef __SMP__
- if(last_task_used_math == current) {
-#else
- if(current->flags & PF_USEDFPU) {
-#endif
- fprs_write(FPRS_FEF);
- fpsave((unsigned long *)&p->tss.float_regs[0], &p->tss.fsr);
-#ifdef __SMP__
- current->flags &= ~PF_USEDFPU;
-#endif
- }
+ char *child_trap_frame;
+ int tframe_size;
/* Calculate offset to stack_frame & pt_regs */
- stack_offset = ((PAGE_SIZE<<1) - TRACEREG_SZ);
-
- if(regs->tstate & TSTATE_PRIV)
- stack_offset -= REGWIN_SZ;
-
- childregs = ((struct pt_regs *) (((unsigned long)p) + stack_offset));
- *childregs = *regs;
- new_stack = (((struct reg_window *) childregs) - 1);
- old_stack = (((struct reg_window *) regs) - 1);
- *new_stack = *old_stack;
-
- p->tss.ksp = ((unsigned long) new_stack) - STACK_BIAS;
+ stack_offset = (((PAGE_SIZE << 1) -
+ ((sizeof(unsigned int)*64) + (2*sizeof(unsigned long)))) &
+ ~(64 - 1)) - (TRACEREG_SZ+REGWIN_SZ);
+ tframe_size = (TRACEREG_SZ + REGWIN_SZ) +
+ (sizeof(unsigned int) * 64) + (2 * sizeof(unsigned long));
+ child_trap_frame = ((char *)p) + stack_offset;
+ memcpy(child_trap_frame, (((struct reg_window *)regs)-1), tframe_size);
+ p->tss.ksp = ((unsigned long) child_trap_frame) - STACK_BIAS;
p->tss.kpc = ((unsigned long) ret_from_syscall) - 0x8;
- p->tss.kregs = childregs;
-
- /* Don't look... */
+ p->tss.kregs = (struct pt_regs *)(child_trap_frame+sizeof(struct reg_window));
p->tss.cwp = regs->u_regs[UREG_G0];
-
- /* tss.wstate was copied by do_fork() */
-
if(regs->tstate & TSTATE_PRIV) {
- childregs->u_regs[UREG_FP] = p->tss.ksp;
+ p->tss.kregs->u_regs[UREG_FP] = p->tss.ksp;
p->tss.flags |= SPARC_FLAG_KTHREAD;
p->tss.current_ds = KERNEL_DS;
- childregs->u_regs[UREG_G6] = (unsigned long) p;
+ p->tss.ctx = 0;
+ p->tss.kregs->u_regs[UREG_G6] = (unsigned long) p;
} else {
- childregs->u_regs[UREG_FP] = sp;
+ p->tss.kregs->u_regs[UREG_FP] = sp;
p->tss.flags &= ~SPARC_FLAG_KTHREAD;
p->tss.current_ds = USER_DS;
-
-#if 0
+ p->tss.ctx = (p->mm->context & 0x1fff);
if (sp != regs->u_regs[UREG_FP]) {
- struct sparc_stackf *childstack;
- struct sparc_stackf *parentstack;
-
- /*
- * This is a clone() call with supplied user stack.
- * Set some valid stack frames to give to the child.
- */
- childstack = (struct sparc_stackf *)sp;
- parentstack = (struct sparc_stackf *)regs->u_regs[UREG_FP];
+ unsigned long csp;
-#if 0
- printk("clone: parent stack:\n");
- show_stackframe(parentstack);
-#endif
-
- childstack = clone_stackframe(childstack, parentstack);
- if (!childstack)
+ csp = clone_stackframe(sp, regs->u_regs[UREG_FP]);
+ if(!csp)
return -EFAULT;
-
-#if 0
- printk("clone: child stack:\n");
- show_stackframe(childstack);
-#endif
-
- childregs->u_regs[UREG_FP] = (unsigned long)childstack;
+ p->tss.kregs->u_regs[UREG_FP] = csp;
}
-#endif
}
/* Set the return value for the child. */
- childregs->u_regs[UREG_I0] = current->pid;
- childregs->u_regs[UREG_I1] = 1;
+ p->tss.kregs->u_regs[UREG_I0] = current->pid;
+ p->tss.kregs->u_regs[UREG_I1] = 1;
- /* Set the return value for the parent. */
+ /* Set the second return value for the parent. */
regs->u_regs[UREG_I1] = 0;
-#if 0
- printk("CHILD register dump\n");
- show_regs(childregs);
- show_regwindow(new_stack);
- while(1)
- barrier();
-#endif
return 0;
}
@@ -676,11 +570,19 @@ asmlinkage int sparc_execve(struct pt_regs *regs)
if(regs->u_regs[UREG_G1] == 0)
base = 1;
- error = getname((char *) regs->u_regs[base + UREG_I0], &filename);
- if(error)
- return error;
+ lock_kernel();
+ filename = getname((char *)regs->u_regs[base + UREG_I0]);
+ error = PTR_ERR(filename);
+ if(IS_ERR(filename))
+ goto out;
error = do_execve(filename, (char **) regs->u_regs[base + UREG_I1],
(char **) regs->u_regs[base + UREG_I2], regs);
putname(filename);
+ if(!error) {
+ fprs_write(0);
+ regs->fprs = 0;
+ }
+out:
+ unlock_kernel();
return error;
}
diff --git a/arch/sparc64/kernel/ptrace.c b/arch/sparc64/kernel/ptrace.c
index 24fe052cd..ac91df894 100644
--- a/arch/sparc64/kernel/ptrace.c
+++ b/arch/sparc64/kernel/ptrace.c
@@ -100,7 +100,16 @@ static inline void put_long(struct task_struct * tsk, struct vm_area_struct * vm
/* this is a hack for non-kernel-mapped video buffers and similar */
flush_cache_page(vma, addr);
if (MAP_NR(page) < max_mapnr) {
- *(unsigned long *) (page + (addr & ~PAGE_MASK)) = data;
+ unsigned long pgaddr;
+
+ pgaddr = page + (addr & ~PAGE_MASK);
+ *(unsigned long *) (pgaddr) = data;
+
+ __asm__ __volatile__("
+ membar #StoreStore
+ flush %0
+" : : "r" (pgaddr & ~7) : "memory");
+
flush_page_to_ram(page);
}
/* we're bypassing pagetables, so we have to set the dirty bit ourselves */
@@ -138,7 +147,16 @@ static inline void put_int(struct task_struct * tsk, struct vm_area_struct * vma
/* this is a hack for non-kernel-mapped video buffers and similar */
flush_cache_page(vma, addr);
if (MAP_NR(page) < max_mapnr) {
- *(unsigned int *) (page + (addr & ~PAGE_MASK)) = data;
+ unsigned long pgaddr;
+
+ pgaddr = page + (addr & ~PAGE_MASK);
+ *(unsigned int *) (pgaddr) = data;
+
+ __asm__ __volatile__("
+ membar #StoreStore
+ flush %0
+" : : "r" (pgaddr & ~7) : "memory");
+
flush_page_to_ram(page);
}
/* we're bypassing pagetables, so we have to set the dirty bit ourselves */
@@ -570,6 +588,21 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
pt_error_return(regs, ESRCH);
goto out;
}
+
+ if(!(child->tss.flags & SPARC_FLAG_32BIT) &&
+ ((request == PTRACE_READDATA64) ||
+ (request == PTRACE_WRITEDATA64) ||
+ (request == PTRACE_READTEXT64) ||
+ (request == PTRACE_WRITETEXT64) ||
+ (request == PTRACE_PEEKTEXT64) ||
+ (request == PTRACE_POKETEXT64) ||
+ (request == PTRACE_PEEKDATA64) ||
+ (request == PTRACE_POKEDATA64))) {
+ addr = regs->u_regs[UREG_G2];
+ addr2 = regs->u_regs[UREG_G3];
+ request -= 30; /* wheee... */
+ }
+
switch(request) {
case PTRACE_PEEKTEXT: /* read word at location addr. */
case PTRACE_PEEKDATA: {
@@ -641,195 +674,207 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
goto out;
}
- case PTRACE_GETREGS:
- if (current->tss.flags & SPARC_FLAG_32BIT) {
- struct pt_regs32 *pregs = (struct pt_regs32 *) addr;
- struct pt_regs *cregs = child->tss.kregs;
- int rval;
-
- if (__put_user(tstate_to_psr(cregs->tstate), (&pregs->psr)) ||
- __put_user(cregs->tpc, (&pregs->pc)) ||
- __put_user(cregs->tnpc, (&pregs->npc)) ||
- __put_user(cregs->y, (&pregs->y))) {
+ case PTRACE_GETREGS: {
+ struct pt_regs32 *pregs = (struct pt_regs32 *) addr;
+ struct pt_regs *cregs = child->tss.kregs;
+ int rval;
+
+ if (__put_user(tstate_to_psr(cregs->tstate), (&pregs->psr)) ||
+ __put_user(cregs->tpc, (&pregs->pc)) ||
+ __put_user(cregs->tnpc, (&pregs->npc)) ||
+ __put_user(cregs->y, (&pregs->y))) {
+ pt_error_return(regs, EFAULT);
+ goto out;
+ }
+ for(rval = 1; rval < 16; rval++)
+ if (__put_user(cregs->u_regs[rval], (&pregs->u_regs[rval - 1]))) {
pt_error_return(regs, EFAULT);
goto out;
}
- for(rval = 1; rval < 16; rval++)
- if (__put_user(cregs->u_regs[rval], (&pregs->u_regs[rval - 1]))) {
- pt_error_return(regs, EFAULT);
- goto out;
- }
- pt_succ_return(regs, 0);
+ pt_succ_return(regs, 0);
#ifdef DEBUG_PTRACE
- printk ("PC=%lx nPC=%lx o7=%lx\n", cregs->tpc, cregs->tnpc, cregs->u_regs [15]);
+ printk ("PC=%lx nPC=%lx o7=%lx\n", cregs->tpc, cregs->tnpc, cregs->u_regs [15]);
#endif
+ goto out;
+ }
+
+ case PTRACE_GETREGS64: {
+ struct pt_regs *pregs = (struct pt_regs *) addr;
+ struct pt_regs *cregs = child->tss.kregs;
+ int rval;
+
+ if (__put_user(cregs->tstate, (&pregs->tstate)) ||
+ __put_user(cregs->tpc, (&pregs->tpc)) ||
+ __put_user(cregs->tnpc, (&pregs->tnpc)) ||
+ __put_user(cregs->y, (&pregs->y))) {
+ pt_error_return(regs, EFAULT);
goto out;
- } else {
- struct pt_regs *pregs = (struct pt_regs *) addr;
- struct pt_regs *cregs = child->tss.kregs;
- int rval;
-
- if (__put_user(cregs->tstate, (&pregs->tstate)) ||
- __put_user(cregs->tpc, (&pregs->tpc)) ||
- __put_user(cregs->tnpc, (&pregs->tnpc)) ||
- __put_user(cregs->y, (&pregs->y))) {
+ }
+ for(rval = 1; rval < 16; rval++)
+ if (__put_user(cregs->u_regs[rval], (&pregs->u_regs[rval - 1]))) {
pt_error_return(regs, EFAULT);
goto out;
}
- for(rval = 1; rval < 16; rval++)
- if (__put_user(cregs->u_regs[rval], (&pregs->u_regs[rval - 1]))) {
- pt_error_return(regs, EFAULT);
- goto out;
- }
- pt_succ_return(regs, 0);
+ pt_succ_return(regs, 0);
#ifdef DEBUG_PTRACE
- printk ("PC=%lx nPC=%lx o7=%lx\n", cregs->tpc, cregs->tnpc, cregs->u_regs [15]);
+ printk ("PC=%lx nPC=%lx o7=%lx\n", cregs->tpc, cregs->tnpc, cregs->u_regs [15]);
#endif
+ goto out;
+ }
+
+ case PTRACE_SETREGS: {
+ struct pt_regs32 *pregs = (struct pt_regs32 *) addr;
+ struct pt_regs *cregs = child->tss.kregs;
+ unsigned int psr, pc, npc, y;
+ int i;
+
+ /* Must be careful, tracing process can only set certain
+ * bits in the psr.
+ */
+ if (__get_user(psr, (&pregs->psr)) ||
+ __get_user(pc, (&pregs->pc)) ||
+ __get_user(npc, (&pregs->npc)) ||
+ __get_user(y, (&pregs->y))) {
+ pt_error_return(regs, EFAULT);
goto out;
}
-
- case PTRACE_SETREGS:
- if (current->tss.flags & SPARC_FLAG_32BIT) {
- struct pt_regs32 *pregs = (struct pt_regs32 *) addr;
- struct pt_regs *cregs = child->tss.kregs;
- unsigned int psr, pc, npc, y;
- int i;
-
- /* Must be careful, tracing process can only set certain
- * bits in the psr.
- */
- if (__get_user(psr, (&pregs->psr)) ||
- __get_user(pc, (&pregs->pc)) ||
- __get_user(npc, (&pregs->npc)) ||
- __get_user(y, (&pregs->y))) {
+ cregs->tstate &= ~(TSTATE_ICC);
+ cregs->tstate |= psr_to_tstate_icc(psr);
+ if(!((pc | npc) & 3)) {
+ cregs->tpc = pc;
+ cregs->tnpc = npc;
+ }
+ cregs->y = y;
+ for(i = 1; i < 16; i++)
+ if (__get_user(cregs->u_regs[i], (&pregs->u_regs[i-1]))) {
pt_error_return(regs, EFAULT);
goto out;
}
- cregs->tstate &= ~(TSTATE_ICC);
- cregs->tstate |= psr_to_tstate_icc(psr);
- if(!((pc | npc) & 3)) {
- cregs->tpc = pc;
- cregs->tpc = npc;
- }
- cregs->y = y;
- for(i = 1; i < 16; i++)
- if (__get_user(cregs->u_regs[i], (&pregs->u_regs[i-1]))) {
- pt_error_return(regs, EFAULT);
- goto out;
- }
- pt_succ_return(regs, 0);
- goto out;
- } else {
- struct pt_regs *pregs = (struct pt_regs *) addr;
- struct pt_regs *cregs = child->tss.kregs;
- unsigned long tstate, tpc, tnpc, y;
- int i;
+ pt_succ_return(regs, 0);
+ goto out;
+ }
- /* Must be careful, tracing process can only set certain
- * bits in the psr.
- */
- if (__get_user(tstate, (&pregs->tstate)) ||
- __get_user(tpc, (&pregs->tpc)) ||
- __get_user(tnpc, (&pregs->tnpc)) ||
- __get_user(y, (&pregs->y))) {
- pt_error_return(regs, EFAULT);
- goto out;
- }
- tstate &= (TSTATE_ICC | TSTATE_XCC);
- cregs->tstate &= ~(TSTATE_ICC | TSTATE_XCC);
- cregs->tstate |= tstate;
- if(!((tpc | tnpc) & 3)) {
- cregs->tpc = tpc;
- cregs->tnpc = tnpc;
- }
- cregs->y = y;
- for(i = 1; i < 16; i++)
- if (__get_user(cregs->u_regs[i], (&pregs->u_regs[i-1]))) {
- pt_error_return(regs, EFAULT);
- goto out;
- }
- pt_succ_return(regs, 0);
+ case PTRACE_SETREGS64: {
+ struct pt_regs *pregs = (struct pt_regs *) addr;
+ struct pt_regs *cregs = child->tss.kregs;
+ unsigned long tstate, tpc, tnpc, y;
+ int i;
+
+ /* Must be careful, tracing process can only set certain
+ * bits in the psr.
+ */
+ if (__get_user(tstate, (&pregs->tstate)) ||
+ __get_user(tpc, (&pregs->tpc)) ||
+ __get_user(tnpc, (&pregs->tnpc)) ||
+ __get_user(y, (&pregs->y))) {
+ pt_error_return(regs, EFAULT);
goto out;
}
-
- case PTRACE_GETFPREGS:
- if (current->tss.flags & SPARC_FLAG_32BIT) {
- struct fps {
- unsigned int regs[32];
- unsigned int fsr;
- unsigned int flags;
- unsigned int extra;
- unsigned int fpqd;
- struct fq {
- unsigned int insnaddr;
- unsigned int insn;
- } fpq[16];
- } *fps = (struct fps *) addr;
-
- if (copy_to_user(&fps->regs[0], &child->tss.float_regs[0], (32 * sizeof(unsigned int))) ||
- __put_user(child->tss.fsr, (&fps->fsr)) ||
- __put_user(0, (&fps->fpqd)) ||
- __put_user(0, (&fps->flags)) ||
- __put_user(0, (&fps->extra)) ||
- clear_user(&fps->fpq[0], 32 * sizeof(unsigned int))) {
+ tstate &= (TSTATE_ICC | TSTATE_XCC);
+ cregs->tstate &= ~(TSTATE_ICC | TSTATE_XCC);
+ cregs->tstate |= tstate;
+ if(!((tpc | tnpc) & 3)) {
+ cregs->tpc = tpc;
+ cregs->tnpc = tnpc;
+ }
+ cregs->y = y;
+ for(i = 1; i < 16; i++)
+ if (__get_user(cregs->u_regs[i], (&pregs->u_regs[i-1]))) {
pt_error_return(regs, EFAULT);
goto out;
}
- pt_succ_return(regs, 0);
+ pt_succ_return(regs, 0);
+ goto out;
+ }
+
+ case PTRACE_GETFPREGS: {
+ struct fps {
+ unsigned int regs[32];
+ unsigned int fsr;
+ unsigned int flags;
+ unsigned int extra;
+ unsigned int fpqd;
+ struct fq {
+ unsigned int insnaddr;
+ unsigned int insn;
+ } fpq[16];
+ } *fps = (struct fps *) addr;
+ unsigned long *fpregs = (unsigned long *)(child->tss.kregs+1);
+
+ if (copy_to_user(&fps->regs[0], fpregs,
+ (32 * sizeof(unsigned int))) ||
+ __put_user(((unsigned int)fpregs[32]), (&fps->fsr)) ||
+ __put_user(0, (&fps->fpqd)) ||
+ __put_user(0, (&fps->flags)) ||
+ __put_user(0, (&fps->extra)) ||
+ clear_user(&fps->fpq[0], 32 * sizeof(unsigned int))) {
+ pt_error_return(regs, EFAULT);
goto out;
- } else {
- struct fps {
- unsigned int regs[64];
- unsigned long fsr;
- } *fps = (struct fps *) addr;
+ }
+ pt_succ_return(regs, 0);
+ goto out;
+ }
- if (copy_to_user(&fps->regs[0], &child->tss.float_regs[0], (64 * sizeof(unsigned int))) ||
- __put_user(child->tss.fsr, (&fps->fsr))) {
- pt_error_return(regs, EFAULT);
- goto out;
- }
- pt_succ_return(regs, 0);
+ case PTRACE_GETFPREGS64: {
+ struct fps {
+ unsigned int regs[64];
+ unsigned long fsr;
+ } *fps = (struct fps *) addr;
+ unsigned long *fpregs = (unsigned long *)(child->tss.kregs+1);
+
+ if (copy_to_user(&fps->regs[0], fpregs,
+ (64 * sizeof(unsigned int))) ||
+ __put_user(fpregs[32], (&fps->fsr))) {
+ pt_error_return(regs, EFAULT);
goto out;
}
+ pt_succ_return(regs, 0);
+ goto out;
+ }
- case PTRACE_SETFPREGS:
- if (current->tss.flags & SPARC_FLAG_32BIT) {
- struct fps {
- unsigned int regs[32];
- unsigned int fsr;
- unsigned int flags;
- unsigned int extra;
- unsigned int fpqd;
- struct fq {
- unsigned int insnaddr;
- unsigned int insn;
- } fpq[16];
- } *fps = (struct fps *) addr;
- unsigned fsr;
-
- if (copy_from_user(&child->tss.float_regs[0], &fps->regs[0], (32 * sizeof(unsigned int))) ||
- __get_user(fsr, (&fps->fsr))) {
- pt_error_return(regs, EFAULT);
- goto out;
- }
- child->tss.fsr &= 0xffffffff00000000UL;
- child->tss.fsr |= fsr;
- pt_succ_return(regs, 0);
+ case PTRACE_SETFPREGS: {
+ struct fps {
+ unsigned int regs[32];
+ unsigned int fsr;
+ unsigned int flags;
+ unsigned int extra;
+ unsigned int fpqd;
+ struct fq {
+ unsigned int insnaddr;
+ unsigned int insn;
+ } fpq[16];
+ } *fps = (struct fps *) addr;
+ unsigned long *fpregs = (unsigned long *)(child->tss.kregs+1);
+ unsigned fsr;
+
+ if (copy_from_user(fpregs, &fps->regs[0],
+ (32 * sizeof(unsigned int))) ||
+ __get_user(fsr, (&fps->fsr))) {
+ pt_error_return(regs, EFAULT);
goto out;
- } else {
- struct fps {
- unsigned int regs[64];
- unsigned long fsr;
- } *fps = (struct fps *) addr;
+ }
+ fpregs[32] &= 0xffffffff00000000UL;
+ fpregs[32] |= fsr;
+ pt_succ_return(regs, 0);
+ goto out;
+ }
- if (copy_from_user(&child->tss.float_regs[0], &fps->regs[0], (64 * sizeof(unsigned int))) ||
- __get_user(child->tss.fsr, (&fps->fsr))) {
- pt_error_return(regs, EFAULT);
- goto out;
- }
- pt_succ_return(regs, 0);
+ case PTRACE_SETFPREGS64: {
+ struct fps {
+ unsigned int regs[64];
+ unsigned long fsr;
+ } *fps = (struct fps *) addr;
+ unsigned long *fpregs = (unsigned long *)(child->tss.kregs+1);
+
+ if (copy_from_user(fpregs, &fps->regs[0],
+ (64 * sizeof(unsigned int))) ||
+ __get_user(fpregs[32], (&fps->fsr))) {
+ pt_error_return(regs, EFAULT);
goto out;
}
+ pt_succ_return(regs, 0);
+ goto out;
+ }
case PTRACE_READTEXT:
case PTRACE_READDATA: {
@@ -1022,7 +1067,10 @@ asmlinkage void syscall_trace(void)
current->pid, current->exit_code);
#endif
if (current->exit_code) {
- set_bit(current->exit_code + 31, &current->signal);
+ /* spin_lock_irq(&current->sigmask_lock); */
+ current->signal |= (1 << (current->exit_code - 1));
+ /* spin_unlock_irq(&current->sigmask_lock); */
}
+
current->exit_code = 0;
}
diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S
index 165b17ef0..9f087a969 100644
--- a/arch/sparc64/kernel/rtrap.S
+++ b/arch/sparc64/kernel/rtrap.S
@@ -1,4 +1,4 @@
-/* $Id: rtrap.S,v 1.21 1997/06/02 07:26:54 davem Exp $
+/* $Id: rtrap.S,v 1.28 1997/06/30 10:31:39 jj Exp $
* rtrap.S: Preparing for return from trap on Sparc V9.
*
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
@@ -11,121 +11,124 @@
#include <asm/spitfire.h>
#include <asm/head.h>
- /* We assume here that this is entered with AG, MG and IG bits
- * in pstate clear.
- */
+ .text
+ .align 32
+ .globl rtrap_clr_l6, rtrap
+#define PTREGS_OFF (STACK_BIAS + REGWIN_SZ)
+rtrap_clr_l6: ba,pt %xcc, rtrap
+ clr %l6
+rtrap: sethi %hi(bh_active), %l2
+ sethi %hi(bh_mask), %l1
+ ldx [%l2 + %lo(bh_active)], %l4
+ ldx [%l1 + %lo(bh_mask)], %l7
- .text
- .align 32
- .globl rtrap_clr_l6, rtrap
-rtrap_clr_l6:
- ba,pt %xcc, rtrap
- clr %l6
-rtrap: sethi %hi(bh_active), %l2
- or %l2, %lo(bh_active), %l2
- sethi %hi(bh_mask), %l1
- or %l1, %lo(bh_mask), %l1
- ldx [%l2 + %g4], %l3
- ldx [%l1 + %g4], %l4
+ andcc %l4, %l7, %g0
+ be,pt %xcc, 2f
+ nop
+ call do_bottom_half
+ nop
+2: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
+ sethi %hi(0xf << 20), %l4
+ andcc %l1, TSTATE_PRIV, %l3
- andcc %l3, %l4, %g0
- be,pt %xcc, 2f
- nop
- call do_bottom_half
- nop
-2: ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TSTATE], %l1
- sethi %hi(0xf << 20), %l4
- andcc %l1, TSTATE_PRIV, %l3
+ and %l1, %l4, %l4
+ rdpr %pstate, %l7
+ andn %l1, %l4, %l1
+ be,pt %icc, to_user
+ andn %l7, PSTATE_IE, %l7
+rt_continue: ld [%sp + PTREGS_OFF + PT_V9_FPRS], %l2
+ ld [%g6 + AOFF_task_tss + AOFF_thread_ctx], %l0
+ ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
- and %l1, %l4, %l4
- rdpr %pstate, %l7
- andn %l1, %l4, %l1
- be,pt %icc, to_user
- andn %l7, PSTATE_IE, %l7
-3: ldx [%g6 + AOFF_task_tss + AOFF_thread_ctx], %l0
- ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G1], %g1
- ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G2], %g2
+ brnz,pn %l2, rt_fpu_restore
+ ldx [%sp + PTREGS_OFF + PT_V9_G2], %g2
+rt_after_fpu: ldx [%sp + PTREGS_OFF + PT_V9_G3], %g3
+ mov %g6, %l6
+ ldx [%sp + PTREGS_OFF + PT_V9_G4], %g4
+ ldx [%sp + PTREGS_OFF + PT_V9_G5], %g5
+ ldx [%sp + PTREGS_OFF + PT_V9_G6], %g6
+ ldx [%sp + PTREGS_OFF + PT_V9_G7], %g7
- ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G3], %g3
- ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G4], %g4
- ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G5], %g5
- ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G6], %g6
- ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G7], %g7
- wrpr %l7, PSTATE_AG, %pstate
- ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I0], %i0
- ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I1], %i1
+ wrpr %l7, PSTATE_AG, %pstate
+ ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0
+ ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1
+ ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2
+ ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3
+ ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4
+ ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5
+ ldx [%sp + PTREGS_OFF + PT_V9_I6], %i6
- ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I2], %i2
- ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I3], %i3
- ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I4], %i4
- ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I5], %i5
- ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I6], %i6
- ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_I7], %i7
- ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_Y], %o3
- ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TPC], %l2
+ ldx [%sp + PTREGS_OFF + PT_V9_I7], %i7
+ ld [%sp + PTREGS_OFF + PT_V9_Y], %o3
+ ldx [%sp + PTREGS_OFF + PT_V9_TPC], %l2
+ ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %o2
+ wr %o3, %g0, %y
+ srl %l4, 20, %l4
+ wrpr %l4, 0x0, %pil
+ wrpr %g0, 0x1, %tl
- ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TNPC], %o2
- wr %o3, %g0, %y
- srl %l4, 20, %l4
- wrpr %l4, 0x0, %pil
- wrpr %g0, 0x1, %tl
- wrpr %l1, %g0, %tstate
- wrpr %l2, %g0, %tpc
- mov PRIMARY_CONTEXT, %l7
+ wrpr %l1, %g0, %tstate
+ wrpr %l2, %g0, %tpc
+ mov PRIMARY_CONTEXT, %l7
+ brnz,pn %l3, kern_rtt
+ wrpr %o2, %g0, %tnpc
+ stxa %l0, [%l7] ASI_DMMU
+ flush %l6
+ rdpr %wstate, %l1
- wrpr %o2, %g0, %tnpc
- brnz,a,pn %l3, 1f
- restore
- sethi %uhi(KERNBASE), %l5
- sllx %l5, 32, %l5
- stxa %l0, [%l7] ASI_DMMU
- flush %l5
- rdpr %wstate, %l1
+ rdpr %otherwin, %l2
+ srl %l1, 3, %l1
+ wrpr %l2, %g0, %canrestore
+ wrpr %l1, %g0, %wstate
+ wrpr %g0, %g0, %otherwin
+ restore
+ rdpr %canrestore, %g1
+ wrpr %g1, 0x0, %cleanwin
- rdpr %otherwin, %l2
- srl %l1, 3, %l1
- wrpr %l2, %g0, %canrestore
- wrpr %l1, %g0, %wstate
- wrpr %g0, %g0, %otherwin
- restore
- rdpr %canrestore, %g1
- wrpr %g1, 0x0, %cleanwin
+ retry
+kern_rtt: restore
+ retry
+to_user: sethi %hi(need_resched), %l0
+ ld [%l0 + %lo(need_resched)], %l0
+ wrpr %l7, PSTATE_IE, %pstate
+ brz,pt %l0, check_signal
+ ldx [%g6 + AOFF_task_signal], %l0
-1: retry
-to_user:
- sethi %hi(need_resched), %l0
- or %l0, %lo(need_resched), %l0
- ld [%l0 + %g4], %l0
- wrpr %l7, PSTATE_IE, %pstate
- brz,pt %l0, check_signal
- ldx [%g6 + AOFF_task_signal], %l0
- nop
+ call schedule
+ nop
+ ldx [%g6 + AOFF_task_signal], %l0
+ nop
+check_signal: ldx [%g6 + AOFF_task_blocked], %o0
+ andncc %l0, %o0, %g0
+ be,pt %xcc, check_user_wins
+ ldx [%g6 + AOFF_task_tss + AOFF_thread_w_saved], %o2
- call schedule
- nop
- ba,pt %xcc, check_signal
- ldx [%g6 + AOFF_task_signal], %l0
-check_signal:
- ldx [%g6 + AOFF_task_blocked], %o0
- andncc %l0, %o0, %g0
- be,a,pt %xcc, check_user_wins
- ldx [%g6 + AOFF_task_tss + AOFF_thread_w_saved], %o2
-
- mov %l5, %o2
- mov %l6, %o3
- call do_signal
- add %sp, STACK_BIAS + REGWIN_SZ, %o1
- ldx [%g6 + AOFF_task_tss + AOFF_thread_w_saved], %o2
- clr %l6
+ mov %l5, %o2
+ mov %l6, %o3
+ call do_signal
+ add %sp, STACK_BIAS + REGWIN_SZ, %o1
+ ldx [%g6 + AOFF_task_tss + AOFF_thread_w_saved], %o2
+ clr %l6
check_user_wins:
- brz,pt %o2, 3b
- nop
+ brz,pt %o2, rt_continue
+ nop
+
+ call fault_in_user_windows
+ add %sp, STACK_BIAS + REGWIN_SZ, %o0
+ ba,a,pt %xcc, rt_continue
+rt_fpu_restore: wr %g0, FPRS_FEF, %fprs
+ add %sp, PTREGS_OFF + TRACEREG_SZ, %g4
+ wr %g0, ASI_BLK_P, %asi
+
+ membar #StoreLoad | #LoadLoad
+ ldda [%g4 + 0x000] %asi, %f0
+ ldda [%g4 + 0x040] %asi, %f16
+ ldda [%g4 + 0x080] %asi, %f32
+ ldda [%g4 + 0x0c0] %asi, %f48
+ ldx [%g4 + 0x100], %fsr
+ ldx [%g4 + 0x108], %g3
+ membar #Sync
- call fault_in_user_windows
- add %sp, STACK_BIAS + REGWIN_SZ, %o0
- ba,a,pt %xcc, 3b
- nop
- nop
- nop
- nop
- nop
+ b,pt %xcc, rt_after_fpu
+ wr %g3, 0, %gsr
+#undef PTREGS_OFF
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index 832d3b97f..680baf7de 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -1,4 +1,4 @@
-/* $Id: setup.c,v 1.7 1997/05/20 07:58:56 jj Exp $
+/* $Id: setup.c,v 1.10 1997/07/08 11:07:47 jj Exp $
* linux/arch/sparc64/kernel/setup.c
*
* Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
@@ -35,6 +35,7 @@
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/idprom.h>
+#include <asm/head.h>
struct screen_info screen_info = {
0, 0, /* orig-x, orig-y */
@@ -62,7 +63,6 @@ unsigned long bios32_init(unsigned long memory_start, unsigned long memory_end)
*/
extern unsigned long sparc64_ttable_tl0;
-extern void breakpoint(void);
#if CONFIG_SUN_CONSOLE
extern void console_restore_palette(void);
#endif
@@ -108,23 +108,13 @@ static int console_fb = 0;
#endif
static unsigned long memory_size = 0;
+/* XXX Implement this at some point... */
void kernel_enter_debugger(void)
{
-#if 0
- if (boot_flags & BOOTME_KGDB) {
- printk("KGDB: Entered\n");
- breakpoint();
- }
-#endif
}
int obp_system_intr(void)
{
- if (boot_flags & BOOTME_KGDB) {
- printk("KGDB: system interrupted\n");
- breakpoint();
- return 1;
- }
if (boot_flags & BOOTME_DEBUG) {
printk("OBP: system interrupted\n");
prom_halt();
@@ -148,7 +138,7 @@ __initfunc(static void process_switch(char c))
break;
case 'h':
prom_printf("boot_flags_init: Halt!\n");
- halt();
+ prom_halt();
break;
default:
printk("Unknown boot switch (-%c)\n", c);
@@ -266,23 +256,9 @@ __initfunc(void setup_arch(char **cmdline_p,
*cmdline_p = prom_getbootargs();
strcpy(saved_command_line, *cmdline_p);
- prom_printf("BOOT: args[%s] saved[%s]\n", *cmdline_p, saved_command_line);
-
printk("ARCH: SUN4U\n");
boot_flags_init(*cmdline_p);
-#if 0
- if((boot_flags&BOOTME_DEBUG) && (linux_dbvec!=0) &&
- ((*(short *)linux_dbvec) != -1)) {
- printk("Booted under KADB. Syncing trap table.\n");
- (*(linux_dbvec->teach_debugger))();
- }
- if((boot_flags & BOOTME_KGDB)) {
- set_debug_traps();
- prom_printf ("Breakpoint!\n");
- breakpoint();
- }
-#endif
idprom_init();
total = prom_probe_memory();
@@ -313,7 +289,7 @@ __initfunc(void setup_arch(char **cmdline_p,
*memory_start_p = PAGE_ALIGN(((unsigned long) &end));
*memory_end_p = (end_of_phys_memory + PAGE_OFFSET);
-#ifndef NO_DAVEM_DEBUGGING
+#ifdef DAVEM_DEBUGGING
prom_printf("phys_base[%016lx] memory_start[%016lx] memory_end[%016lx]\n",
phys_base, *memory_start_p, *memory_end_p);
#endif
@@ -328,8 +304,11 @@ __initfunc(void setup_arch(char **cmdline_p,
#endif
#ifdef CONFIG_BLK_DEV_INITRD
if (ramdisk_image) {
- initrd_start = ramdisk_image;
- if (initrd_start < PAGE_OFFSET) initrd_start += PAGE_OFFSET;
+ unsigned long start = 0;
+
+ if (ramdisk_image >= (unsigned long)&end - 2 * PAGE_SIZE)
+ ramdisk_image -= KERNBASE;
+ initrd_start = ramdisk_image + phys_base + PAGE_OFFSET;
initrd_end = initrd_start + ramdisk_size;
if (initrd_end > *memory_end_p) {
printk(KERN_CRIT "initrd extends beyond end of memory "
@@ -337,9 +316,11 @@ __initfunc(void setup_arch(char **cmdline_p,
initrd_end,*memory_end_p);
initrd_start = 0;
}
- if (initrd_start >= *memory_start_p && initrd_start < *memory_start_p + 2 * PAGE_SIZE) {
+ if (initrd_start)
+ start = ramdisk_image + KERNBASE;
+ if (start >= *memory_start_p && start < *memory_start_p + 2 * PAGE_SIZE) {
initrd_below_start_ok = 1;
- *memory_start_p = PAGE_ALIGN (initrd_end);
+ *memory_start_p = PAGE_ALIGN (start + ramdisk_size);
}
}
#endif
@@ -424,7 +405,11 @@ extern char *mmu_info(void);
int get_cpuinfo(char *buffer)
{
- int cpuid=get_cpuid();
+#ifndef __SMP__
+ int cpuid=0;
+#else
+#error SMP not supported on sparc64 yet
+#endif
return sprintf(buffer, "cpu\t\t: %s\n"
"fpu\t\t: %s\n"
diff --git a/arch/sparc64/kernel/signal.c b/arch/sparc64/kernel/signal.c
index fe4615a6b..cfc55fc2e 100644
--- a/arch/sparc64/kernel/signal.c
+++ b/arch/sparc64/kernel/signal.c
@@ -1,4 +1,4 @@
-/* $Id: signal.c,v 1.6 1997/05/29 12:44:48 jj Exp $
+/* $Id: signal.c,v 1.20 1997/07/14 03:10:28 davem Exp $
* arch/sparc64/kernel/signal.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
@@ -8,6 +8,7 @@
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
+#include <linux/config.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/signal.h>
@@ -23,6 +24,7 @@
#include <asm/svr4.h>
#include <asm/pgtable.h>
#include <asm/fpumacro.h>
+#include <asm/uctx.h>
#include <asm/smp_lock.h>
#define _S(nr) (1<<((nr)-1))
@@ -38,6 +40,124 @@ asmlinkage int do_signal(unsigned long oldmask, struct pt_regs * regs,
/* This turned off for production... */
/* #define DEBUG_SIGNALS 1 */
+/* {set, get}context() needed for 64-bit SparcLinux userland. */
+asmlinkage void sparc64_set_context(struct pt_regs *regs)
+{
+ struct ucontext *ucp = (struct ucontext *) regs->u_regs[UREG_I0];
+ struct thread_struct *tp = &current->tss;
+ mc_gregset_t *grp;
+ unsigned long pc, npc, tstate;
+ unsigned long fp, i7;
+ unsigned char fenab;
+
+ __asm__ __volatile__("flushw");
+ if(tp->w_saved ||
+ (((unsigned long)ucp) & (sizeof(unsigned long)-1)) ||
+ (!__access_ok((unsigned long)ucp, sizeof(*ucp))))
+ do_exit(SIGSEGV);
+ grp = &ucp->uc_mcontext.mc_gregs;
+ __get_user(pc, &((*grp)[MC_PC]));
+ __get_user(npc, &((*grp)[MC_NPC]));
+ if((pc | npc) & 3)
+ do_exit(SIGSEGV);
+ if(regs->u_regs[UREG_I1]) {
+ __get_user(current->blocked, &ucp->uc_sigmask);
+ current->blocked &= _BLOCKABLE;
+ }
+ regs->tpc = pc;
+ regs->tnpc = npc;
+ __get_user(regs->y, &((*grp)[MC_Y]));
+ __get_user(tstate, &((*grp)[MC_Y]));
+ regs->tstate &= ~(TSTATE_ICC | TSTATE_XCC);
+ regs->tstate |= (tstate & (TSTATE_ICC | TSTATE_XCC));
+ __get_user(regs->u_regs[UREG_G1], (&(*grp)[MC_G1]));
+ __get_user(regs->u_regs[UREG_G2], (&(*grp)[MC_G2]));
+ __get_user(regs->u_regs[UREG_G3], (&(*grp)[MC_G3]));
+ __get_user(regs->u_regs[UREG_G4], (&(*grp)[MC_G4]));
+ __get_user(regs->u_regs[UREG_G5], (&(*grp)[MC_G5]));
+ __get_user(regs->u_regs[UREG_G6], (&(*grp)[MC_G6]));
+ __get_user(regs->u_regs[UREG_G7], (&(*grp)[MC_G7]));
+ __get_user(regs->u_regs[UREG_I0], (&(*grp)[MC_O0]));
+ __get_user(regs->u_regs[UREG_I1], (&(*grp)[MC_O1]));
+ __get_user(regs->u_regs[UREG_I2], (&(*grp)[MC_O2]));
+ __get_user(regs->u_regs[UREG_I3], (&(*grp)[MC_O3]));
+ __get_user(regs->u_regs[UREG_I4], (&(*grp)[MC_O4]));
+ __get_user(regs->u_regs[UREG_I5], (&(*grp)[MC_O5]));
+ __get_user(regs->u_regs[UREG_I6], (&(*grp)[MC_O6]));
+ __get_user(regs->u_regs[UREG_I7], (&(*grp)[MC_O7]));
+
+ __get_user(fp, &(ucp->uc_mcontext.mc_fp));
+ __get_user(i7, &(ucp->uc_mcontext.mc_i7));
+ __put_user(fp, (&(((struct reg_window *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6])));
+ __put_user(i7, (&(((struct reg_window *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7])));
+
+ __get_user(fenab, &(ucp->uc_mcontext.mc_fpregs.mcfpu_enab));
+ if(fenab) {
+ unsigned long *fpregs = (unsigned long *)(regs+1);
+ copy_from_user(fpregs, &(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs),
+ (sizeof(unsigned long) * 32));
+ __get_user(fpregs[32], &(ucp->uc_mcontext.mc_fpregs.mcfpu_fsr));
+ __get_user(fpregs[33], &(ucp->uc_mcontext.mc_fpregs.mcfpu_gsr));
+ regs->fprs = FPRS_FEF;
+ }
+}
+
+asmlinkage void sparc64_get_context(struct pt_regs *regs)
+{
+ struct ucontext *ucp = (struct ucontext *) regs->u_regs[UREG_I0];
+ struct thread_struct *tp = &current->tss;
+ mc_gregset_t *grp;
+ mcontext_t *mcp;
+ unsigned long fp, i7;
+ unsigned char fenab = (current->flags & PF_USEDFPU);
+
+ synchronize_user_stack();
+ if(tp->w_saved || clear_user(ucp, sizeof(*ucp)))
+ do_exit(SIGSEGV);
+ mcp = &ucp->uc_mcontext;
+ grp = &mcp->mc_gregs;
+
+ /* Skip over the trap instruction, first. */
+ regs->tpc = regs->tnpc;
+ regs->tnpc += 4;
+
+ __put_user(current->blocked, &ucp->uc_sigmask);
+ __put_user(regs->tstate, &((*grp)[MC_TSTATE]));
+ __put_user(regs->tpc, &((*grp)[MC_PC]));
+ __put_user(regs->tnpc, &((*grp)[MC_NPC]));
+ __put_user(regs->y, &((*grp)[MC_Y]));
+ __put_user(regs->u_regs[UREG_G1], &((*grp)[MC_G1]));
+ __put_user(regs->u_regs[UREG_G2], &((*grp)[MC_G2]));
+ __put_user(regs->u_regs[UREG_G3], &((*grp)[MC_G3]));
+ __put_user(regs->u_regs[UREG_G4], &((*grp)[MC_G4]));
+ __put_user(regs->u_regs[UREG_G5], &((*grp)[MC_G5]));
+ __put_user(regs->u_regs[UREG_G6], &((*grp)[MC_G6]));
+ __put_user(regs->u_regs[UREG_G6], &((*grp)[MC_G7]));
+ __put_user(regs->u_regs[UREG_I0], &((*grp)[MC_O0]));
+ __put_user(regs->u_regs[UREG_I1], &((*grp)[MC_O1]));
+ __put_user(regs->u_regs[UREG_I2], &((*grp)[MC_O2]));
+ __put_user(regs->u_regs[UREG_I3], &((*grp)[MC_O3]));
+ __put_user(regs->u_regs[UREG_I4], &((*grp)[MC_O4]));
+ __put_user(regs->u_regs[UREG_I5], &((*grp)[MC_O5]));
+ __put_user(regs->u_regs[UREG_I6], &((*grp)[MC_O6]));
+ __put_user(regs->u_regs[UREG_I7], &((*grp)[MC_O7]));
+
+ __get_user(fp, (&(((struct reg_window *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6])));
+ __get_user(i7, (&(((struct reg_window *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7])));
+ __put_user(fp, &(mcp->mc_fp));
+ __put_user(i7, &(mcp->mc_i7));
+
+ __put_user(fenab, &(mcp->mc_fpregs.mcfpu_enab));
+ if(fenab) {
+ unsigned long *fpregs = (unsigned long *)(regs+1);
+ copy_to_user(&(mcp->mc_fpregs.mcfpu_fregs), fpregs,
+ (sizeof(unsigned long) * 32));
+ __put_user(fpregs[32], &(mcp->mc_fpregs.mcfpu_fsr));
+ __put_user(fpregs[33], &(mcp->mc_fpregs.mcfpu_gsr));
+ __put_user(FPRS_FEF, &(mcp->mc_fpregs.mcfpu_fprs));
+ }
+}
+
/*
* The new signal frame, intended to be used for Linux applications only
* (we have enough in there to work with clone).
@@ -65,7 +185,8 @@ asmlinkage void _sigpause_common(unsigned int set, struct pt_regs *regs)
#ifdef CONFIG_SPARC32_COMPAT
if (current->tss.flags & SPARC_FLAG_32BIT) {
- extern asmlinkage void _sigpause32_common(unsigned int, struct pt_regs *);
+ extern asmlinkage void _sigpause32_common(unsigned int,
+ struct pt_regs *);
_sigpause32_common(set, regs);
return;
}
@@ -111,22 +232,12 @@ asmlinkage void do_sigsuspend(struct pt_regs *regs)
static inline void
restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t *fpu)
{
-#ifdef __SMP__
- if (current->flags & PF_USEDFPU)
- regs->tstate &= ~(TSTATE_PEF);
-#else
- if (current == last_task_used_math) {
- last_task_used_math = 0;
- regs->tstate &= ~(TSTATE_PEF);
- }
-#endif
- current->used_math = 1;
- current->flags &= ~PF_USEDFPU;
-
- copy_from_user(&current->tss.float_regs[0],
- &fpu->si_float_regs[0],
+ unsigned long *fpregs = (unsigned long *)(regs+1);
+ copy_from_user(fpregs, &fpu->si_float_regs[0],
(sizeof(unsigned int) * 64));
- __get_user(current->tss.fsr, &fpu->si_fsr);
+ __get_user(fpregs[32], &fpu->si_fsr);
+ __get_user(fpregs[33], &fpu->si_gsr);
+ regs->fprs = FPRS_FEF;
}
void do_sigreturn(struct pt_regs *regs)
@@ -139,24 +250,25 @@ void do_sigreturn(struct pt_regs *regs)
#ifdef CONFIG_SPARC32_COMPAT
if (current->tss.flags & SPARC_FLAG_32BIT) {
extern asmlinkage void do_sigreturn32(struct pt_regs *);
- do_sigreturn32(regs);
- return;
+ return do_sigreturn32(regs);
}
#endif
synchronize_user_stack ();
- sf = (struct new_signal_frame *) regs->u_regs [UREG_FP];
+ sf = (struct new_signal_frame *)
+ (regs->u_regs [UREG_FP] + STACK_BIAS);
+
/* 1. Make sure we are not getting garbage from the user */
- if (verify_area (VERIFY_READ, sf, sizeof (*sf))){
+ if (verify_area (VERIFY_READ, sf, sizeof (*sf)))
goto segv;
- }
- if (((unsigned long) sf) & 3){
+
+ if (((unsigned long) sf) & 3)
goto segv;
- }
+
get_user(tpc, &sf->info.si_regs.tpc);
__get_user(tnpc, &sf->info.si_regs.tnpc);
- if ((tpc | tnpc) & 3){
+ if ((tpc | tnpc) & 3)
goto segv;
- }
+
regs->tpc = tpc;
regs->tnpc = tnpc;
@@ -165,9 +277,9 @@ void do_sigreturn(struct pt_regs *regs)
__get_user(tstate, &sf->info.si_regs.tstate);
copy_from_user(regs->u_regs, sf->info.si_regs.u_regs, sizeof(regs->u_regs));
- /* User can only change condition codes and FPU enabling in %tstate. */
- regs->tstate &= ~(TSTATE_ICC | TSTATE_PEF);
- regs->tstate |= (tstate & (TSTATE_ICC | TSTATE_PEF));
+ /* User can only change condition codes in %tstate. */
+ regs->tstate &= ~(TSTATE_ICC);
+ regs->tstate |= (tstate & TSTATE_ICC);
__get_user(fpu_save, &sf->fpu_save);
if (fpu_save)
@@ -191,27 +303,12 @@ static int invalid_frame_pointer(void *fp, int fplen)
static inline void
save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t *fpu)
{
-#ifdef __SMP__
- if (current->flags & PF_USEDFPU) {
- fprs_write(FPRS_FEF);
- fpsave((unsigned long *)&current->tss.float_regs[0],
- &current->tss.fsr);
- regs->tstate &= ~(TSTATE_PEF);
- current->flags &= ~(PF_USEDFPU);
- }
-#else
- if (current == last_task_used_math) {
- fprs_write(FPRS_FEF);
- fpsave((unsigned long *)&current->tss.float_regs[0],
- &current->tss.fsr);
- last_task_used_math = 0;
- regs->tstate &= ~(TSTATE_PEF);
- }
-#endif
- copy_to_user(&fpu->si_float_regs[0], &current->tss.float_regs[0],
+ unsigned long *fpregs = (unsigned long *)(regs+1);
+ copy_to_user(&fpu->si_float_regs[0], fpregs,
(sizeof(unsigned int) * 64));
- __put_user(current->tss.fsr, &fpu->si_fsr);
- current->used_math = 0;
+ __put_user(fpregs[32], &fpu->si_fsr);
+ __put_user(fpregs[33], &fpu->si_gsr);
+ regs->fprs = 0;
}
static inline void
@@ -220,33 +317,29 @@ new_setup_frame(struct sigaction *sa, struct pt_regs *regs,
{
struct new_signal_frame *sf;
int sigframe_size;
- unsigned long tmp;
- int i;
/* 1. Make sure everything is clean */
synchronize_user_stack();
sigframe_size = NF_ALIGNEDSZ;
- if (!current->used_math)
+ if (!(current->flags & PF_USEDFPU))
sigframe_size -= sizeof(__siginfo_fpu_t);
- sf = (struct new_signal_frame *)(regs->u_regs[UREG_FP] - sigframe_size);
+ sf = (struct new_signal_frame *)
+ (regs->u_regs[UREG_FP] + STACK_BIAS - sigframe_size);
- if (invalid_frame_pointer (sf, sigframe_size)){
- lock_kernel ();
- do_exit(SIGILL);
- }
+ if (invalid_frame_pointer (sf, sigframe_size))
+ goto sigill;
- if (current->tss.w_saved != 0){
+ if (current->tss.w_saved != 0) {
printk ("%s[%d]: Invalid user stack frame for "
"signal delivery.\n", current->comm, current->pid);
- lock_kernel ();
- do_exit (SIGILL);
+ goto sigill;
}
/* 2. Save the current process state */
copy_to_user(&sf->info.si_regs, regs, sizeof (*regs));
- if (current->used_math) {
+ if (current->flags & PF_USEDFPU) {
save_fpu_state(regs, &sf->fpu_state);
__put_user((u64)&sf->fpu_state, &sf->fpu_save);
} else {
@@ -254,17 +347,17 @@ new_setup_frame(struct sigaction *sa, struct pt_regs *regs,
}
__put_user(oldmask, &sf->info.si_mask);
- for (i = 0; i < sizeof(struct reg_window)/8; i++) {
- __get_user(tmp, (((u64 *)regs->u_regs[UREG_FP])+i));
- __put_user(tmp, (((u64 *)sf)+i));
- }
+
+ copy_in_user((u64 *)sf,
+ (u64 *)(regs->u_regs[UREG_FP]+STACK_BIAS),
+ sizeof(struct reg_window));
/* 3. return to kernel instructions */
__put_user(0x821020d8, &sf->insns[0]); /* mov __NR_sigreturn, %g1 */
__put_user(0x91d02011, &sf->insns[1]); /* t 0x11 */
/* 4. signal handler back-trampoline and parameters */
- regs->u_regs[UREG_FP] = (unsigned long) sf;
+ regs->u_regs[UREG_FP] = ((unsigned long) sf) - STACK_BIAS;
regs->u_regs[UREG_I0] = signo;
regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
@@ -274,15 +367,27 @@ new_setup_frame(struct sigaction *sa, struct pt_regs *regs,
regs->tnpc = (regs->tpc + 4);
/* Flush instruction space. */
- __asm__ __volatile__("
- membar #StoreStore
- stxa %%g0, [%0] %2
- stxa %%g0, [%1] %2
- flush %%g4
- " : /* no outputs */
- : "r" (((unsigned long)&(sf->insns[0])) & ~(PAGE_MASK)),
- "r" ((((unsigned long)&(sf->insns[0])) & ~(PAGE_MASK)) + PAGE_SIZE),
- "i" (ASI_IC_TAG));
+ {
+ unsigned long address = ((unsigned long)&(sf->insns[0]));
+ pgd_t *pgdp = pgd_offset(current->mm, address);
+ pmd_t *pmdp = pmd_offset(pgdp, address);
+ pte_t *ptep = pte_offset(pmdp, address);
+
+ if(pte_present(*ptep)) {
+ unsigned long page = pte_page(*ptep);
+
+ __asm__ __volatile__("
+ membar #StoreStore
+ flush %0 + %1"
+ : : "r" (page), "r" (address & (PAGE_SIZE - 1))
+ : "memory");
+ }
+ }
+ return;
+
+sigill:
+ lock_kernel();
+ do_exit(SIGILL);
}
static inline void handle_signal(unsigned long signr, struct sigaction *sa,
@@ -291,8 +396,11 @@ static inline void handle_signal(unsigned long signr, struct sigaction *sa,
new_setup_frame(sa, regs, signr, oldmask);
if(sa->sa_flags & SA_ONESHOT)
sa->sa_handler = NULL;
- if(!(sa->sa_flags & SA_NOMASK))
+ if(!(sa->sa_flags & SA_NOMASK)) {
+ spin_lock_irq(&current->sigmask_lock);
current->blocked |= (sa->sa_mask | _S(signr)) & _BLOCKABLE;
+ spin_unlock_irq(&current->sigmask_lock);
+ }
}
static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
@@ -334,7 +442,11 @@ asmlinkage int do_signal(unsigned long oldmask, struct pt_regs * regs,
#endif
while ((signr = current->signal & mask) != 0) {
signr = ffz(~signr);
- clear_bit(signr + 32, &current->signal);
+
+ spin_lock_irq(&current->sigmask_lock);
+ current->signal &= ~(1 << signr);
+ spin_unlock_irq(&current->sigmask_lock);
+
sa = current->sig->action + signr;
signr++;
if ((current->flags & PF_PTRACED) && signr != SIGKILL) {
@@ -348,7 +460,9 @@ asmlinkage int do_signal(unsigned long oldmask, struct pt_regs * regs,
if (signr == SIGSTOP)
continue;
if (_S(signr) & current->blocked) {
+ spin_lock_irq(&current->sigmask_lock);
current->signal |= _S(signr);
+ spin_unlock_irq(&current->sigmask_lock);
continue;
}
sa = current->sig->action + signr - 1;
@@ -391,8 +505,10 @@ asmlinkage int do_signal(unsigned long oldmask, struct pt_regs * regs,
case SIGQUIT: case SIGILL: case SIGTRAP:
case SIGABRT: case SIGFPE: case SIGSEGV: case SIGBUS:
if(current->binfmt && current->binfmt->core_dump) {
+ lock_kernel();
if(current->binfmt->core_dump(signr, regs))
signr |= 0x80;
+ unlock_kernel();
}
#ifdef DEBUG_SIGNALS
/* Very useful to debug dynamic linker problems */
@@ -401,9 +517,15 @@ asmlinkage int do_signal(unsigned long oldmask, struct pt_regs * regs,
#endif
/* fall through */
default:
+ spin_lock_irq(&current->sigmask_lock);
current->signal |= _S(signr & 0x7f);
+ spin_unlock_irq(&current->sigmask_lock);
+
current->flags |= PF_SIGNALED;
+
+ lock_kernel();
do_exit(signr);
+ unlock_kernel();
}
}
if(restart_syscall)
diff --git a/arch/sparc64/kernel/signal32.c b/arch/sparc64/kernel/signal32.c
index c0454658b..5135c2ae5 100644
--- a/arch/sparc64/kernel/signal32.c
+++ b/arch/sparc64/kernel/signal32.c
@@ -1,4 +1,4 @@
-/* $Id: signal32.c,v 1.13 1997/06/01 05:46:09 davem Exp $
+/* $Id: signal32.c,v 1.26 1997/07/14 03:10:31 davem Exp $
* arch/sparc64/kernel/signal32.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
@@ -67,13 +67,12 @@ struct signal_sframe32 {
* (we have enough in there to work with clone).
* All the interesting bits are in the info field.
*/
-
struct new_signal_frame32 {
struct sparc_stackf32 ss;
__siginfo32_t info;
/* __siginfo_fpu32_t * */ u32 fpu_save;
unsigned int insns [2];
- __siginfo_fpu32_t fpu_state;
+ __siginfo_fpu_t fpu_state;
};
/* Align macros */
@@ -115,25 +114,12 @@ asmlinkage void _sigpause32_common(unsigned int set, struct pt_regs *regs)
}
}
-static inline void
-restore_fpu_state32(struct pt_regs *regs, __siginfo_fpu32_t *fpu)
+static inline void restore_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t *fpu)
{
-#ifdef __SMP__
- if (current->flags & PF_USEDFPU)
- regs->tstate &= ~(TSTATE_PEF);
-#else
- if (current == last_task_used_math) {
- last_task_used_math = 0;
- regs->tstate &= ~(TSTATE_PEF);
- }
-#endif
- current->used_math = 1;
- current->flags &= ~PF_USEDFPU;
-
- copy_from_user(&current->tss.float_regs[0],
- &fpu->si_float_regs[0],
- (sizeof(unsigned int) * 32));
- __get_user(current->tss.fsr, &fpu->si_fsr);
+ unsigned long *fpregs = (unsigned long *)(regs + 1);
+ copy_from_user(fpregs, &fpu->si_float_regs[0], (sizeof(unsigned int) * 64));
+ __get_user(fpregs[32], &fpu->si_fsr);
+ __get_user(fpregs[33], &fpu->si_gsr);
}
void do_new_sigreturn32(struct pt_regs *regs)
@@ -142,6 +128,7 @@ void do_new_sigreturn32(struct pt_regs *regs)
unsigned int psr;
unsigned pc, npc, fpu_save, mask;
+ regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
sf = (struct new_signal_frame32 *) regs->u_regs [UREG_FP];
/* 1. Make sure we are not getting garbage from the user */
@@ -178,12 +165,12 @@ void do_new_sigreturn32(struct pt_regs *regs)
__get_user(regs->u_regs[UREG_I6], &sf->info.si_regs.u_regs[UREG_I6]);
__get_user(regs->u_regs[UREG_I7], &sf->info.si_regs.u_regs[UREG_I7]);
- /* User can only change condition codes and FPU enabling in %tstate. */
- regs->tstate &= ~(TSTATE_ICC | TSTATE_PEF);
+ /* User can only change condition codes in %tstate. */
+ regs->tstate &= ~(TSTATE_ICC);
regs->tstate |= psr_to_tstate_icc(psr);
if (psr & PSR_EF)
- regs->tstate |= TSTATE_PEF;
+ regs->fprs = FPRS_FEF;
__get_user(fpu_save, &sf->fpu_save);
if (fpu_save)
@@ -206,7 +193,8 @@ asmlinkage void do_sigreturn32(struct pt_regs *regs)
if (current->tss.new_signal)
return do_new_sigreturn32(regs);
- scptr = (struct sigcontext32 *) regs->u_regs[UREG_I0];
+ scptr = (struct sigcontext32 *)
+ (regs->u_regs[UREG_I0] & 0x00000000ffffffffUL);
/* Check sanity of the user arg. */
if(verify_area(VERIFY_READ, scptr, sizeof(struct sigcontext32)) ||
(((unsigned long) scptr) & 3))
@@ -257,9 +245,9 @@ setup_frame32(struct sigaction *sa, unsigned long pc, unsigned long npc,
#endif
int old_status = current->tss.sstk_info.cur_status;
unsigned psr;
- int i;
synchronize_user_stack();
+ regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
sframep = (struct signal_sframe32 *) regs->u_regs[UREG_FP];
sframep = (struct signal_sframe32 *) (((unsigned long) sframep)-SF_ALIGNEDSZ);
if (invalid_frame_pointer (sframep, sizeof(*sframep))){
@@ -285,6 +273,8 @@ setup_frame32(struct sigaction *sa, unsigned long pc, unsigned long npc,
__put_user(pc, &sc->sigc_pc);
__put_user(npc, &sc->sigc_npc);
psr = tstate_to_psr (regs->tstate);
+ if(current->flags & PF_USEDFPU)
+ psr |= PSR_EF;
__put_user(psr, &sc->sigc_psr);
__put_user(regs->u_regs[UREG_G1], &sc->sigc_g1);
__put_user(regs->u_regs[UREG_I0], &sc->sigc_o0);
@@ -301,13 +291,9 @@ setup_frame32(struct sigaction *sa, unsigned long pc, unsigned long npc,
}
else
#endif
- /* XXX Perhaps we need a copy_in_user()? -DaveM */
- for (i = 0; i < 16; i++) {
- u32 temp;
-
- get_user (temp, (((u32 *)(regs->u_regs[UREG_FP]))+i));
- put_user (temp, (((u32 *)sframep)+i));
- }
+ copy_in_user((u32 *)sframep,
+ (u32 *)(regs->u_regs[UREG_FP]),
+ sizeof(struct reg_window32));
current->tss.w_saved = 0; /* So process is allowed to execute. */
__put_user(signr, &sframep->sig_num);
@@ -329,35 +315,17 @@ setup_frame32(struct sigaction *sa, unsigned long pc, unsigned long npc,
}
-static inline void
-save_fpu_state32(struct pt_regs *regs, __siginfo_fpu32_t *fpu)
+static inline void save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t *fpu)
{
-#ifdef __SMP__
- if (current->flags & PF_USEDFPU) {
- fprs_write(FPRS_FEF);
- fpsave32((unsigned long *)&current->tss.float_regs[0],
- &current->tss.fsr);
- regs->tstate &= ~(TSTATE_PEF);
- current->flags &= ~(PF_USEDFPU);
- }
-#else
- if (current == last_task_used_math) {
- fprs_write(FPRS_FEF);
- fpsave32((unsigned long *)&current->tss.float_regs[0],
- &current->tss.fsr);
- last_task_used_math = 0;
- regs->tstate &= ~(TSTATE_PEF);
- }
-#endif
- copy_to_user(&fpu->si_float_regs[0], &current->tss.float_regs[0],
- (sizeof(unsigned int) * 32));
- __put_user(current->tss.fsr, &fpu->si_fsr);
- current->used_math = 0;
+ unsigned long *fpregs = (unsigned long *)(regs+1);
+ copy_to_user(&fpu->si_float_regs[0], fpregs, (sizeof(unsigned int) * 64));
+ __put_user(fpregs[32], &fpu->si_fsr);
+ __put_user(fpregs[33], &fpu->si_gsr);
+ regs->fprs = 0;
}
-static inline void
-new_setup_frame32(struct sigaction *sa, struct pt_regs *regs,
- int signo, unsigned long oldmask)
+static inline void new_setup_frame32(struct sigaction *sa, struct pt_regs *regs,
+ int signo, unsigned long oldmask)
{
struct new_signal_frame32 *sf;
int sigframe_size;
@@ -367,21 +335,26 @@ new_setup_frame32(struct sigaction *sa, struct pt_regs *regs,
/* 1. Make sure everything is clean */
synchronize_user_stack();
sigframe_size = NF_ALIGNEDSZ;
- if (!current->used_math)
- sigframe_size -= sizeof(__siginfo_fpu32_t);
+ if (!(current->flags & PF_USEDFPU))
+ sigframe_size -= sizeof(__siginfo_fpu_t);
+ regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
sf = (struct new_signal_frame32 *)(regs->u_regs[UREG_FP] - sigframe_size);
if (invalid_frame_pointer (sf, sigframe_size)) {
- lock_kernel ();
- do_exit(SIGILL);
+#ifdef DEBUG_SIGNALS
+ printk("new_setup_frame32(%s:%d): invalid_frame_pointer(%p, %d)\n",
+ current->comm, current->pid, sf, sigframe_size);
+#endif
+ goto sigill;
}
if (current->tss.w_saved != 0) {
+#ifdef DEBUG_SIGNALS
printk ("%s[%d]: Invalid user stack frame for "
"signal delivery.\n", current->comm, current->pid);
- lock_kernel ();
- do_exit (SIGILL);
+#endif
+ goto sigill;
}
/* 2. Save the current process state */
@@ -389,11 +362,13 @@ new_setup_frame32(struct sigaction *sa, struct pt_regs *regs,
__put_user(regs->tnpc, &sf->info.si_regs.npc);
__put_user(regs->y, &sf->info.si_regs.y);
psr = tstate_to_psr (regs->tstate);
+ if(current->flags & PF_USEDFPU)
+ psr |= PSR_EF;
__put_user(psr, &sf->info.si_regs.psr);
for (i = 0; i < 16; i++)
__put_user(regs->u_regs[i], &sf->info.si_regs.u_regs[i]);
- if (current->used_math) {
+ if (psr & PSR_EF) {
save_fpu_state32(regs, &sf->fpu_state);
__put_user((u64)&sf->fpu_state, &sf->fpu_save);
} else {
@@ -402,13 +377,9 @@ new_setup_frame32(struct sigaction *sa, struct pt_regs *regs,
__put_user(oldmask, &sf->info.si_mask);
- /* XXX Perhaps we need a copy_in_user()? -DaveM */
- for (i = 0; i < sizeof(struct reg_window32)/4; i++) {
- u32 tmp;
-
- __get_user(tmp, (((u32 *)regs->u_regs[UREG_FP])+i));
- __put_user(tmp, (((u32 *)sf)+i));
- }
+ copy_in_user((u32 *)sf,
+ (u32 *)(regs->u_regs[UREG_FP]),
+ sizeof(struct reg_window32));
/* 3. return to kernel instructions */
__put_user(0x821020d8, &sf->insns[0]); /* mov __NR_sigreturn, %g1 */
@@ -425,15 +396,27 @@ new_setup_frame32(struct sigaction *sa, struct pt_regs *regs,
regs->tnpc = (regs->tpc + 4);
/* Flush instruction space. */
- __asm__ __volatile__("
- membar #StoreStore
- stxa %%g0, [%0] %2
- stxa %%g0, [%1] %2
- flush %%g4
- " : /* no outputs */
- : "r" (((unsigned long)&(sf->insns[0])) & ~(PAGE_MASK)),
- "r" ((((unsigned long)&(sf->insns[0])) & ~(PAGE_MASK)) + PAGE_SIZE),
- "i" (ASI_IC_TAG));
+ {
+ unsigned long address = ((unsigned long)&(sf->insns[0]));
+ pgd_t *pgdp = pgd_offset(current->mm, address);
+ pmd_t *pmdp = pmd_offset(pgdp, address);
+ pte_t *ptep = pte_offset(pmdp, address);
+
+ if(pte_present(*ptep)) {
+ unsigned long page = pte_page(*ptep);
+
+ __asm__ __volatile__("
+ membar #StoreStore
+ flush %0 + %1"
+ : : "r" (page), "r" (address & (PAGE_SIZE - 1))
+ : "memory");
+ }
+ }
+ return;
+
+sigill:
+ lock_kernel();
+ do_exit(SIGILL);
}
/* Setup a Solaris stack frame */
@@ -454,6 +437,7 @@ setup_svr4_frame32(struct sigaction *sa, unsigned long pc, unsigned long npc,
int i;
synchronize_user_stack();
+ regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
sfp = (svr4_signal_frame_t *) regs->u_regs[UREG_FP] - REGWIN_SZ;
sfp = (svr4_signal_frame_t *) (((unsigned long) sfp)-SVR4_SF_ALIGNED);
@@ -485,6 +469,8 @@ setup_svr4_frame32(struct sigaction *sa, unsigned long pc, unsigned long npc,
__put_user(regs->tpc, &((*gr) [SVR4_PC]));
__put_user(regs->tnpc, &((*gr) [SVR4_NPC]));
psr = tstate_to_psr (regs->tstate);
+ if(current->flags & PF_USEDFPU)
+ psr |= PSR_EF;
__put_user(psr, &((*gr) [SVR4_PSR]));
__put_user(regs->y, &((*gr) [SVR4_Y]));
@@ -546,7 +532,8 @@ setup_svr4_frame32(struct sigaction *sa, unsigned long pc, unsigned long npc,
#endif
/* Arguments passed to signal handler */
if (regs->u_regs [14]){
- struct reg_window32 *rw = (struct reg_window32 *) regs->u_regs [14];
+ struct reg_window32 *rw = (struct reg_window32 *)
+ (regs->u_regs [14] & 0x00000000ffffffffUL);
__put_user(signr, &rw->ins [0]);
__put_user((u64)si, &rw->ins [1]);
@@ -583,7 +570,9 @@ svr4_getcontext32(svr4_ucontext_t *uc, struct pt_regs *regs)
/* Store registers */
__put_user(regs->tpc, &uc->mcontext.greg [SVR4_PC]);
__put_user(regs->tnpc, &uc->mcontext.greg [SVR4_NPC]);
- __put_user(tstate_to_psr(regs->tstate), &uc->mcontext.greg [SVR4_PSR]);
+ __put_user((tstate_to_psr(regs->tstate) |
+ ((current->flags & PF_USEDFPU) ? PSR_EF : 0)),
+ &uc->mcontext.greg [SVR4_PSR]);
__put_user(regs->y, &uc->mcontext.greg [SVR4_Y]);
/* Copy g [1..7] and o [0..7] registers */
@@ -619,16 +608,16 @@ asmlinkage int svr4_setcontext32(svr4_ucontext_t *c, struct pt_regs *regs)
if (tp->w_saved){
printk ("Uh oh, w_saved is: 0x%lx\n", tp->w_saved);
- do_exit(SIGSEGV);
+ goto sigsegv;
}
if (((unsigned long) c) & 3){
printk ("Unaligned structure passed\n");
- do_exit (SIGSEGV);
+ goto sigsegv;
}
if(!__access_ok((unsigned long)c, sizeof(*c))) {
/* Miguel, add nice debugging msg _here_. ;-) */
- do_exit(SIGSEGV);
+ goto sigsegv;
}
/* Check for valid PC and nPC */
@@ -637,7 +626,7 @@ asmlinkage int svr4_setcontext32(svr4_ucontext_t *c, struct pt_regs *regs)
__get_user(npc, &((*gr)[SVR4_NPC]));
if((pc | npc) & 3) {
printk ("setcontext, PC or nPC were bogus\n");
- do_exit (SIGSEGV);
+ goto sigsegv;
}
/* Retrieve information from passed ucontext */
/* note that nPC is ored a 1, this is used to inform entry.S */
@@ -650,14 +639,19 @@ asmlinkage int svr4_setcontext32(svr4_ucontext_t *c, struct pt_regs *regs)
__get_user(psr, &((*gr) [SVR4_PSR]));
regs->tstate &= ~(TSTATE_ICC);
regs->tstate |= psr_to_tstate_icc(psr);
+ if(psr & PSR_EF)
+ regs->fprs = FPRS_FEF;
/* Restore g[1..7] and o[0..7] registers */
for (i = 0; i < 7; i++)
- __put_user(regs->u_regs[UREG_G1+i], (&(*gr)[SVR4_G1])+i);
+ __get_user(regs->u_regs[UREG_G1+i], (&(*gr)[SVR4_G1])+i);
for (i = 0; i < 8; i++)
- __put_user(regs->u_regs[UREG_I0+i], (&(*gr)[SVR4_O0])+i);
+ __get_user(regs->u_regs[UREG_I0+i], (&(*gr)[SVR4_O0])+i);
return -EINTR;
+sigsegv:
+ lock_kernel();
+ do_exit(SIGSEGV);
}
static inline void handle_signal32(unsigned long signr, struct sigaction *sa,
@@ -674,8 +668,11 @@ static inline void handle_signal32(unsigned long signr, struct sigaction *sa,
}
if(sa->sa_flags & SA_ONESHOT)
sa->sa_handler = NULL;
- if(!(sa->sa_flags & SA_NOMASK))
+ if(!(sa->sa_flags & SA_NOMASK)) {
+ spin_lock_irq(&current->sigmask_lock);
current->blocked |= (sa->sa_mask | _S(signr)) & _BLOCKABLE;
+ spin_unlock_irq(&current->sigmask_lock);
+ }
}
static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs,
@@ -711,7 +708,11 @@ asmlinkage int do_signal32(unsigned long oldmask, struct pt_regs * regs,
while ((signr = current->signal & mask) != 0) {
signr = ffz(~signr);
- clear_bit(signr + 32, &current->signal);
+
+ spin_lock_irq(&current->sigmask_lock);
+ current->signal &= ~(1 << signr);
+ spin_unlock_irq(&current->sigmask_lock);
+
sa = current->sig->action + signr;
signr++;
if ((current->flags & PF_PTRACED) && signr != SIGKILL) {
@@ -725,7 +726,9 @@ asmlinkage int do_signal32(unsigned long oldmask, struct pt_regs * regs,
if (signr == SIGSTOP)
continue;
if (_S(signr) & current->blocked) {
+ spin_lock_irq(&current->sigmask_lock);
current->signal |= _S(signr);
+ spin_unlock_irq(&current->sigmask_lock);
continue;
}
sa = current->sig->action + signr - 1;
@@ -768,8 +771,10 @@ asmlinkage int do_signal32(unsigned long oldmask, struct pt_regs * regs,
case SIGQUIT: case SIGILL: case SIGTRAP:
case SIGABRT: case SIGFPE: case SIGSEGV: case SIGBUS:
if(current->binfmt && current->binfmt->core_dump) {
+ lock_kernel();
if(current->binfmt->core_dump(signr, regs))
signr |= 0x80;
+ unlock_kernel();
}
#ifdef DEBUG_SIGNALS
/* Very useful to debug dynamic linker problems */
@@ -778,9 +783,15 @@ asmlinkage int do_signal32(unsigned long oldmask, struct pt_regs * regs,
#endif
/* fall through */
default:
+ spin_lock_irq(&current->sigmask_lock);
current->signal |= _S(signr & 0x7f);
+ spin_unlock_irq(&current->sigmask_lock);
+
current->flags |= PF_SIGNALED;
+
+ lock_kernel();
do_exit(signr);
+ unlock_kernel();
}
}
if(restart_syscall)
@@ -805,9 +816,10 @@ struct sigstack32 {
int cur_status;
};
-asmlinkage int
-sys32_sigstack(struct sigstack32 *ssptr, struct sigstack32 *ossptr)
+asmlinkage int sys32_sigstack(u32 u_ssptr, u32 u_ossptr)
{
+ struct sigstack32 *ssptr = (struct sigstack32 *)((unsigned long)(u_ssptr));
+ struct sigstack32 *ossptr = (struct sigstack32 *)((unsigned long)(u_ossptr));
int ret = -EFAULT;
lock_kernel();
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
new file mode 100644
index 000000000..88d7a8ecf
--- /dev/null
+++ b/arch/sparc64/kernel/smp.c
@@ -0,0 +1,347 @@
+/* smp.c: Sparc64 SMP support.
+ *
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#define __KERNEL_SYSCALLS__
+#include <linux/unistd.h>
+
+extern int linux_num_cpus;
+extern void calibrate_delay(void);
+
+volatile int smp_processors_ready = 0;
+unsigned long cpu_present_map = 0;
+int smp_num_cpus = 1;
+int smp_threads_ready = 0;
+
+struct cpuinfo_sparc64 cpu_data[NR_CPUS];
+static unsigned char boot_cpu_id = 0;
+static int smp_activated = 0;
+
+volatile int cpu_number_map[NR_CPUS];
+volatile int cpu_logical_map[NR_CPUS];
+
+struct klock_info klock_info = { KLOCK_CLEAR, 0 };
+
+static volatile int smp_commenced = 0;
+
+void smp_setup(char *str, int *ints)
+{
+ /* XXX implement me XXX */
+}
+
+static char smp_buf[512];
+
+char *smp_info(void)
+{
+ /* XXX not SMP safe and need to support up to 64 penguins */
+ sprintf(smp_buf,
+" CPU0\t\tCPU1\t\tCPU2\t\tCPU3\n"
+"State: %s\t\t%s\t\t%s\t\t%s\n",
+(cpu_present_map & 1) ? ((klock_info.akp == 0) ? "akp" : "online") : "offline",
+(cpu_present_map & 2) ? ((klock_info.akp == 1) ? "akp" : "online") : "offline",
+(cpu_present_map & 4) ? ((klock_info.akp == 2) ? "akp" : "online") : "offline",
+(cpu_present_map & 8) ? ((klock_info.akp == 3) ? "akp" : "online") : "offline");
+ return smp_buf;
+}
+
+void smp_store_cpu_info(int id)
+{
+ cpu_data[id].udelay_val = loops_per_sec;
+}
+
+void smp_commence(void)
+{
+ local_flush_cache_all();
+ local_flush_tlb_all();
+ smp_commenced = 1;
+ local_flush_cache_all();
+ local_flush_tlb_all();
+}
+
+static void smp_setup_percpu_timer(void);
+
+static volatile unsigned long callin_flag = 0;
+
+void smp_callin(void)
+{
+ int cpuid = hard_smp_processor_id();
+
+ local_flush_cache_all();
+ local_flush_tlb_all();
+
+ smp_setup_percpu_timer();
+
+ calibrate_delay();
+ smp_store_cpu_info(cpuid);
+ callin_flag = 1;
+ __asm__ __volatile__("membar #Sync\n\t"
+ "flush %g6" : : : "memory");
+
+ while(!task[cpuid])
+ barrier();
+ current = task[cpuid];
+
+ while(!smp_commenced)
+ barrier();
+
+ __sti();
+}
+
+extern int cpu_idle(void *unused);
+extern void init_IRQ(void);
+
+void initialize_secondary(void)
+{
+}
+
+int start_secondary(void *unused)
+{
+ trap_init();
+ init_IRQ();
+ smp_callin();
+ return cpu_idle(NULL);
+}
+
+extern struct prom_cpuinfo linux_cpus[NR_CPUS];
+
+void smp_boot_cpus(void)
+{
+ int cpucount = 0, i, first, prev;
+
+ printk("Entering UltraSMPenguin Mode...\n");
+ __sti();
+ cpu_present_map = 0;
+ for(i = 0; i < linux_num_cpus; i++)
+ cpu_present_map |= (1 << i);
+ for(i = 0; i < NR_CPUS; i++) {
+ cpu_number_map[i] = -1;
+ cpu_logical_map[i] = -1;
+ }
+ cpu_number_map[boot_cpu_id] = 0;
+ cpu_logical_map[0] = boot_cpu_id;
+ klock_info.akp = boot_cpu_id;
+ current->processor = boot_cpu_id;
+ smp_store_cpu_info(boot_cpu_id);
+ smp_setup_percpu_timer();
+
+ if(linux_num_cpus == 1)
+ return;
+
+ for(i = 0; i < NR_CPUS; i++) {
+ if(i == boot_cpu_id)
+ continue;
+
+ if(cpu_present_map & (1 << i)) {
+ extern unsigned long sparc64_cpu_startup;
+ unsigned long entry = (unsigned long)&sparc_cpu_startup;
+ struct task_struct *p;
+ int timeout;
+
+ kernel_thread(start_secondary, NULL, CLONE_PID);
+ p = task[++cpucount];
+ p->processor = i;
+ prom_startcpu(linux_cpus[i].prom_node, entry, i);
+ for(timeout = 0; timeout < 5000000; timeout++) {
+ if(cpu_callin_map[i])
+ break;
+ udelay(100);
+ }
+ if(cpu_callin_map[i]) {
+ /* XXX fix this */
+ cpu_number_map[i] = i;
+ cpu_logical_map[i] = i;
+ } else {
+ cpucount--;
+ printk("Processor %d is stuck.\n", i);
+ }
+ }
+ if(!(cpu_callin_map[i])) {
+ cpu_present_map &= ~(1 << i);
+ cpu_number_map[i] = -1;
+ }
+ }
+ if(cpucount == 0) {
+ printk("Error: only one processor found.\n");
+ cpu_present_map = (1 << smp_processor_id());
+ } else {
+ unsigned long bogosum = 0;
+
+ for(i = 0; i < NR_CPUS; i++) {
+ if(cpu_present_map & (1 << i))
+ bogosum += cpu_data[i].udelay_val;
+ }
+ printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
+ cpucount + 1,
+ (bogosum + 2500)/500000,
+ ((bogosum + 2500)/5000)%100);
+ smp_activated = 1;
+ smp_num_cpus = cpucount + 1;
+ }
+ smp_processors_ready = 1;
+}
+
+/* XXX deprecated interface... */
+void smp_message_pass(int target, int msg, unsigned long data, int wait)
+{
+ printk("smp_message_pass() called, this is bad, spinning.\n");
+ __sti();
+ while(1)
+ barrier();
+}
+
+/* XXX Make it fast later. */
+void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2)
+{
+ if(smp_processors_ready) {
+ unsigned long mask;
+ u64 data0 = (((unsigned long)ctx)<<32 |
+ (((unsigned long)func) & 0xffffffff));
+ u64 pstate;
+ int i, ncpus = smp_num_cpus;
+
+ __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
+ mask = (cpu_present_map & ~(1 << smp_processor_id()));
+ for(i = 0; i < ncpus; i++) {
+ if(mask & (1 << i)) {
+ u64 target = mid<<14 | 0x70;
+ u64 result;
+
+ __asm__ __volatile__("
+ wrpr %0, %1, %%pstate
+ wrpr %%g0, %2, %%asi
+ stxa %3, [0x40] %%asi
+ stxa %4, [0x50] %%asi
+ stxa %5, [0x60] %%asi
+ stxa %%g0, [%6] %7
+ membar #Sync"
+ : /* No outputs */
+ : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_UDB_INTR_W),
+ "r" (data0), "r" (data1), "r" (data2),
+ "r" (target), "i" (ASI_UDB_INTR_W));
+
+ /* NOTE: PSTATE_IE is still clear. */
+ do {
+ __asm__ __volatile__("ldxa [%%g0] %1, %0",
+ : "=r" (result)
+ : "i" (ASI_INTR_DISPATCH_STAT));
+ } while(result & 0x1);
+ __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
+ : : "r" (pstate));
+ if(result & 0x2)
+ panic("Penguin NACK's master!");
+ }
+ }
+
+ /* NOTE: Caller runs local copy on master. */
+ }
+}
+
+extern unsigned long xcall_flush_tlb_page;
+extern unsigned long xcall_flush_tlb_mm;
+extern unsigned long xcall_flush_tlb_range;
+extern unsigned long xcall_flush_tlb_all;
+extern unsigned long xcall_flush_cache_all;
+
+void smp_flush_cache_all(void)
+{
+ smp_cross_call(&xcall_flush_cache_all, 0, 0, 0);
+}
+
+void smp_flush_tlb_all(void)
+{
+ smp_cross_call(&xcall_flush_tlb_all, 0, 0, 0);
+}
+
+void smp_flush_tlb_mm(struct mm_struct *mm)
+{
+ u32 ctx = mm->context & 0x1fff;
+ if(mm->cpu_vm_mask != (1 << smp_processor_id()))
+ smp_cross_call(&xcall_flush_tlb_mm, ctx, 0, 0);
+ __flush_tlb_mm(ctx);
+}
+
+void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end)
+{
+ u32 ctx = mm->context & 0x1fff;
+ if(mm->cpu_vm_mask != (1 << smp_processor_id()))
+ smp_cross_call(&xcall_flush_tlb_range, ctx, start, end);
+ __flush_tlb_range(ctx, start, end);
+}
+
+void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ u32 ctx = mm->context & 0x1fff;
+
+ if(mm->cpu_vm_mask != (1 << smp_processor_id()))
+ smp_cross_call(&xcall_flush_tlb_page, ctx, page, 0);
+ __flush_tlb_page(ctx, page);
+}
+
+static spinlock_t ticker_lock = SPIN_LOCK_UNLOCKED;
+
+static inline void sparc64_do_profile(unsigned long pc)
+{
+ if(prof_buffer && current->pid) {
+ extern int _stext;
+
+ pc -= (unsigned long) &_stext;
+ pc >>= prof_shift;
+
+ spin_lock(&ticker_lock);
+ if(pc < prof_len)
+ prof_buffer[pc]++;
+ else
+ prof_buffer[prof_len - 1]++;
+ spin_unlock(&ticker_lock);
+ }
+}
+
+unsigned int prof_multiplier[NR_CPUS];
+unsigned int prof_counter[NR_CPUS];
+
+extern void update_one_process(struct task_struct *p, unsigned long ticks,
+ unsigned long user, unsigned long system);
+
+void smp_percpu_timer_interrupt(struct pt_regs *regs)
+{
+ int cpu = smp_processor_id();
+
+ clear_profile_irq(cpu);
+ if(!user_mode(regs))
+ sparc_do_profile(regs->pc);
+ if(!--prof_counter[cpu]) {
+ int user = user_mode(regs);
+ if(current->pid) {
+ update_one_process(current, 1, user, !user);
+ if(--current->counter < 0) {
+ current->counter = 0;
+ need_resched = 1;
+ }
+
+ spin_lock(&ticker_lock);
+ if(user) {
+ if(current->priority < DEF_PRIORITY)
+ kstat.cpu_nice++;
+ else
+ kstat.cpu_user++;
+ } else {
+ kstat.cpu_system++;
+ }
+ spin_unlock(&ticker_lock);
+ }
+ prof_counter[cpu] = prof_multiplier[cpu];
+ }
+}
+
+static void smp_setup_percpu_timer(void)
+{
+ /* XXX implement me */
+}
+
+int setup_profiling_timer(unsigned int multiplier)
+{
+ /* XXX implement me */
+}
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index 91426c814..990202bac 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -1,5 +1,5 @@
-/* $Id: sparc64_ksyms.c,v 1.4 1997/04/14 17:04:43 jj Exp $
- * arch/sparc/kernel/ksyms.c: Sparc specific ksyms support.
+/* $Id: sparc64_ksyms.c,v 1.11 1997/07/14 23:58:20 davem Exp $
+ * arch/sparc64/kernel/sparc64_ksyms.c: Sparc64 specific ksyms support.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
@@ -18,6 +18,8 @@
#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/irq.h>
+#include <asm/softirq.h>
+#include <asm/hardirq.h>
#include <asm/idprom.h>
#include <asm/svr4.h>
#include <asm/head.h>
@@ -38,19 +40,17 @@ struct poll {
short revents;
};
-extern int svr4_getcontext (svr4_ucontext_t *, struct pt_regs *);
-extern int svr4_setcontext (svr4_ucontext_t *, struct pt_regs *);
extern unsigned long sunos_mmap(unsigned long, unsigned long, unsigned long,
unsigned long, unsigned long, unsigned long);
void _sigpause_common (unsigned int set, struct pt_regs *);
-extern void __copy_1page(void *, const void *);
-extern void *bzero_1page(void *);
+extern void *__bzero_1page(void *);
extern void *__bzero(void *, size_t);
extern void *__memscan_zero(void *, size_t);
extern void *__memscan_generic(void *, int, size_t);
extern int __memcmp(const void *, const void *, __kernel_size_t);
extern int __strncmp(const char *, const char *, __kernel_size_t);
extern unsigned int __csum_partial_copy_sparc_generic (const char *, char *);
+extern char saved_command_line[];
extern void bcopy (const char *, char *, int);
extern int __ashrdi3(int, int);
@@ -75,32 +75,24 @@ EXPORT_SYMBOL(klock_info);
EXPORT_SYMBOL_PRIVATE(_lock_kernel);
EXPORT_SYMBOL_PRIVATE(_unlock_kernel);
+EXPORT_SYMBOL_PRIVATE(flushw_user);
+
EXPORT_SYMBOL(mstk48t02_regs);
EXPORT_SYMBOL(request_fast_irq);
EXPORT_SYMBOL(sparc_alloc_io);
EXPORT_SYMBOL(sparc_free_io);
-#if 0
-EXPORT_SYMBOL(io_remap_page_range);
-EXPORT_SYMBOL(mmu_unlockarea);
-EXPORT_SYMBOL(mmu_lockarea);
+EXPORT_SYMBOL(local_irq_count);
+EXPORT_SYMBOL(__sparc64_bh_counter);
+EXPORT_SYMBOL(sparc_ultra_unmapioaddr);
EXPORT_SYMBOL(mmu_get_scsi_sgl);
EXPORT_SYMBOL(mmu_get_scsi_one);
-EXPORT_SYMBOL(mmu_release_scsi_sgl);
-EXPORT_SYMBOL(mmu_release_scsi_one);
-#endif
EXPORT_SYMBOL(sparc_dvma_malloc);
-#if 0
-EXPORT_SYMBOL(sun4c_unmapioaddr);
-EXPORT_SYMBOL(srmmu_unmapioaddr);
-#endif
#if CONFIG_SBUS
EXPORT_SYMBOL(SBus_chain);
EXPORT_SYMBOL(dma_chain);
#endif
/* Solaris/SunOS binary compatibility */
-EXPORT_SYMBOL(svr4_setcontext);
-EXPORT_SYMBOL(svr4_getcontext);
EXPORT_SYMBOL(_sigpause_common);
EXPORT_SYMBOL(sunos_mmap);
@@ -119,7 +111,7 @@ EXPORT_SYMBOL(prom_getproplen);
EXPORT_SYMBOL(prom_getproperty);
EXPORT_SYMBOL(prom_node_has_property);
EXPORT_SYMBOL(prom_setprop);
-EXPORT_SYMBOL(prom_getbootargs);
+EXPORT_SYMBOL(saved_command_line);
EXPORT_SYMBOL(prom_getname);
EXPORT_SYMBOL(prom_feval);
EXPORT_SYMBOL(prom_getstring);
@@ -148,10 +140,9 @@ EXPORT_SYMBOL(strstr);
EXPORT_SYMBOL(strspn);
/* Special internal versions of library functions. */
-EXPORT_SYMBOL(__copy_1page);
EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(__memset);
-EXPORT_SYMBOL(bzero_1page);
+EXPORT_SYMBOL(__bzero_1page);
EXPORT_SYMBOL(__bzero);
EXPORT_SYMBOL(__memscan_zero);
EXPORT_SYMBOL(__memscan_generic);
diff --git a/arch/sparc64/kernel/sunos_ioctl32.c b/arch/sparc64/kernel/sunos_ioctl32.c
new file mode 100644
index 000000000..311110d3c
--- /dev/null
+++ b/arch/sparc64/kernel/sunos_ioctl32.c
@@ -0,0 +1,281 @@
+/* $Id: sunos_ioctl32.c,v 1.1 1997/07/18 06:26:42 ralf Exp $
+ * sunos_ioctl32.c: SunOS ioctl compatability on sparc64.
+ *
+ * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
+ * Copyright (C) 1995, 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/uaccess.h>
+
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/termios.h>
+#include <linux/ioctl.h>
+#include <linux/route.h>
+#include <linux/sockios.h>
+#include <linux/if.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <asm/kbio.h>
+
+#define A(x) ((unsigned long)x)
+
+#define SUNOS_NR_OPEN 256
+
+struct rtentry32 {
+ u32 rt_pad1;
+ struct sockaddr rt_dst; /* target address */
+ struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */
+ struct sockaddr rt_genmask; /* target network mask (IP) */
+ unsigned short rt_flags;
+ short rt_pad2;
+ u32 rt_pad3;
+ unsigned char rt_tos;
+ unsigned char rt_class;
+ short rt_pad4;
+ short rt_metric; /* +1 for binary compatibility! */
+ /* char * */ u32 rt_dev; /* forcing the device at add */
+ u32 rt_mtu; /* per route MTU/Window */
+ u32 rt_window; /* Window clamping */
+ unsigned short rt_irtt; /* Initial RTT */
+
+};
+
+struct ifmap32 {
+ u32 mem_start;
+ u32 mem_end;
+ unsigned short base_addr;
+ unsigned char irq;
+ unsigned char dma;
+ unsigned char port;
+};
+
+struct ifreq32 {
+#define IFHWADDRLEN 6
+#define IFNAMSIZ 16
+ union {
+ char ifrn_name[IFNAMSIZ]; /* if name, e.g. "en0" */
+ } ifr_ifrn;
+ union {
+ struct sockaddr ifru_addr;
+ struct sockaddr ifru_dstaddr;
+ struct sockaddr ifru_broadaddr;
+ struct sockaddr ifru_netmask;
+ struct sockaddr ifru_hwaddr;
+ short ifru_flags;
+ int ifru_ivalue;
+ int ifru_mtu;
+ struct ifmap32 ifru_map;
+ char ifru_slave[IFNAMSIZ]; /* Just fits the size */
+ __kernel_caddr_t32 ifru_data;
+ } ifr_ifru;
+};
+
+struct ifconf32 {
+ int ifc_len; /* size of buffer */
+ __kernel_caddr_t32 ifcbuf;
+};
+
+extern asmlinkage int sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg);
+
+extern asmlinkage int sys32_ioctl(unsigned int, unsigned int, u32);
+extern asmlinkage int sys_setsid(void);
+
+asmlinkage int sunos_ioctl (int fd, u32 cmd, u32 arg)
+{
+ struct file *filp;
+ int ret = -EBADF;
+
+ lock_kernel();
+ if(fd >= SUNOS_NR_OPEN)
+ goto out;
+
+ filp = current->files->fd[fd];
+ if(!filp)
+ goto out;
+
+ if(cmd == TIOCSETD) {
+ unsigned long old_fs = get_fs();
+ int *p, ntty = N_TTY;
+ int tmp;
+
+ p = (int *)A(arg);
+ ret = -EFAULT;
+ if(get_user(tmp, p))
+ goto out;
+ if(tmp == 2) {
+ set_fs(KERNEL_DS);
+ ret = sys_ioctl(fd, cmd, (unsigned long) &ntty);
+ set_fs(old_fs);
+ ret = (ret == -EINVAL ? -EOPNOTSUPP : ret);
+ goto out;
+ }
+ }
+ if(cmd == TIOCNOTTY) {
+ ret = sys_setsid();
+ goto out;
+ }
+ switch(cmd) {
+ case _IOW('r', 10, struct rtentry32):
+ ret = sys32_ioctl(fd, SIOCADDRT, arg);
+ goto out;
+ case _IOW('r', 11, struct rtentry32):
+ ret = sys32_ioctl(fd, SIOCDELRT, arg);
+ goto out;
+
+ case _IOW('i', 12, struct ifreq32):
+ ret = sys32_ioctl(fd, SIOCSIFADDR, arg);
+ goto out;
+ case _IOWR('i', 13, struct ifreq32):
+ ret = sys32_ioctl(fd, SIOCGIFADDR, arg);
+ goto out;
+ case _IOW('i', 14, struct ifreq32):
+ ret = sys32_ioctl(fd, SIOCSIFDSTADDR, arg);
+ goto out;
+ case _IOWR('i', 15, struct ifreq32):
+ ret = sys32_ioctl(fd, SIOCGIFDSTADDR, arg);
+ goto out;
+ case _IOW('i', 16, struct ifreq32):
+ ret = sys32_ioctl(fd, SIOCSIFFLAGS, arg);
+ goto out;
+ case _IOWR('i', 17, struct ifreq32):
+ ret = sys32_ioctl(fd, SIOCGIFFLAGS, arg);
+ goto out;
+ case _IOW('i', 18, struct ifreq32):
+ ret = sys32_ioctl(fd, SIOCSIFMEM, arg);
+ goto out;
+ case _IOWR('i', 19, struct ifreq32):
+ ret = sys32_ioctl(fd, SIOCGIFMEM, arg);
+ goto out;
+
+ case _IOWR('i', 20, struct ifconf32):
+ ret = sys32_ioctl(fd, SIOCGIFCONF, arg);
+ goto out;
+
+ case _IOW('i', 21, struct ifreq): /* SIOCSIFMTU */
+ ret = sys_ioctl(fd, SIOCSIFMTU, arg);
+ goto out;
+ case _IOWR('i', 22, struct ifreq): /* SIOCGIFMTU */
+ ret = sys_ioctl(fd, SIOCGIFMTU, arg);
+ goto out;
+
+ case _IOWR('i', 23, struct ifreq32):
+ ret = sys32_ioctl(fd, SIOCGIFBRDADDR, arg);
+ goto out;
+ case _IOW('i', 24, struct ifreq32):
+ ret = sys32_ioctl(fd, SIOCGIFBRDADDR, arg);
+ goto out;
+ case _IOWR('i', 25, struct ifreq32):
+ ret = sys32_ioctl(fd, SIOCGIFNETMASK, arg);
+ goto out;
+ case _IOW('i', 26, struct ifreq32):
+ ret = sys32_ioctl(fd, SIOCSIFNETMASK, arg);
+ goto out;
+ case _IOWR('i', 27, struct ifreq32):
+ ret = sys32_ioctl(fd, SIOCGIFMETRIC, arg);
+ goto out;
+ case _IOW('i', 28, struct ifreq32):
+ ret = sys32_ioctl(fd, SIOCSIFMETRIC, arg);
+ goto out;
+
+ case _IOW('i', 30, struct arpreq):
+ ret = sys32_ioctl(fd, SIOCSARP, arg);
+ goto out;
+ case _IOWR('i', 31, struct arpreq):
+ ret = sys32_ioctl(fd, SIOCGARP, arg);
+ goto out;
+ case _IOW('i', 32, struct arpreq):
+ ret = sys32_ioctl(fd, SIOCDARP, arg);
+ goto out;
+
+ case _IOW('i', 40, struct ifreq32): /* SIOCUPPER */
+ case _IOW('i', 41, struct ifreq32): /* SIOCLOWER */
+ case _IOW('i', 44, struct ifreq32): /* SIOCSETSYNC */
+ case _IOW('i', 45, struct ifreq32): /* SIOCGETSYNC */
+ case _IOW('i', 46, struct ifreq32): /* SIOCSSDSTATS */
+ case _IOW('i', 47, struct ifreq32): /* SIOCSSESTATS */
+ case _IOW('i', 48, struct ifreq32): /* SIOCSPROMISC */
+ ret = -EOPNOTSUPP;
+ goto out;
+
+ case _IOW('i', 49, struct ifreq32):
+ ret = sys32_ioctl(fd, SIOCADDMULTI, arg);
+ goto out;
+ case _IOW('i', 50, struct ifreq32):
+ ret = sys32_ioctl(fd, SIOCDELMULTI, arg);
+ goto out;
+
+ /* FDDI interface ioctls, unsupported. */
+
+ case _IOW('i', 51, struct ifreq32): /* SIOCFDRESET */
+ case _IOW('i', 52, struct ifreq32): /* SIOCFDSLEEP */
+ case _IOW('i', 53, struct ifreq32): /* SIOCSTRTFMWAR */
+ case _IOW('i', 54, struct ifreq32): /* SIOCLDNSTRTFW */
+ case _IOW('i', 55, struct ifreq32): /* SIOCGETFDSTAT */
+ case _IOW('i', 56, struct ifreq32): /* SIOCFDNMIINT */
+ case _IOW('i', 57, struct ifreq32): /* SIOCFDEXUSER */
+ case _IOW('i', 58, struct ifreq32): /* SIOCFDGNETMAP */
+ case _IOW('i', 59, struct ifreq32): /* SIOCFDGIOCTL */
+ printk("FDDI ioctl, returning EOPNOTSUPP\n");
+ ret = -EOPNOTSUPP;
+ goto out;
+
+ case _IOW('t', 125, int):
+ /* More stupid tty sunos ioctls, just
+ * say it worked.
+ */
+ ret = 0;
+ goto out;
+
+ /* Non posix grp */
+ case _IOW('t', 118, int): {
+ int oldval, newval, *ptr;
+
+ cmd = TIOCSPGRP;
+ ptr = (int *) A(arg);
+ ret = -EFAULT;
+ if(get_user(oldval, ptr))
+ goto out;
+ ret = sys32_ioctl(fd, cmd, arg);
+ __get_user(newval, ptr);
+ if(newval == -1) {
+ __put_user(oldval, ptr);
+ ret = -EIO;
+ }
+ if(ret == -ENOTTY)
+ ret = -EIO;
+ goto out;
+ }
+
+ case _IOR('t', 119, int): {
+ int oldval, newval, *ptr;
+
+ cmd = TIOCGPGRP;
+ ptr = (int *) A(arg);
+ ret = -EFAULT;
+ if(get_user(oldval, ptr))
+ goto out;
+ ret = sys32_ioctl(fd, cmd, arg);
+ __get_user(newval, ptr);
+ if(newval == -1) {
+ __put_user(oldval, ptr);
+ ret = -EIO;
+ }
+ if(ret == -ENOTTY)
+ ret = -EIO;
+ goto out;
+ }
+ };
+
+ ret = sys32_ioctl(fd, cmd, arg);
+ /* so stupid... */
+ ret = (ret == -EINVAL ? -EOPNOTSUPP : ret);
+out:
+ unlock_kernel();
+ return ret;
+}
diff --git a/arch/sparc64/kernel/sys32.S b/arch/sparc64/kernel/sys32.S
new file mode 100644
index 000000000..ddd726069
--- /dev/null
+++ b/arch/sparc64/kernel/sys32.S
@@ -0,0 +1,427 @@
+/* $Id: sys32.S,v 1.1 1997/07/18 06:26:42 ralf Exp $
+ * sys32.S: I-cache tricks for 32-bit compatability layer simple
+ * conversions.
+ *
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+ .text
+
+ .align 32
+ .globl sys32_mmap, sys32_mprotect, sys32_munmap, sys32_msync
+ .globl sys32_mlock, sys32_munlock, sys32_mremap, sparc32_brk
+sys32_mmap:
+ srl %o0, 0, %o0 ! IEU0 Group
+ sethi %hi(0xffffffff), %g2 ! IEU1
+ srl %o1, 0, %o1 ! IEU0 Group
+ or %g2, %lo(0xffffffff), %g2 ! IEU1
+ srl %o2, 0, %o2 ! IEU0 Group
+ mov %o7, %g1 ! IEU1
+ and %o3, %g2, %o3 ! IEU0 Group
+ and %o4, %g2, %o4 ! IEU1
+ and %o5, %g2, %o5 ! IEU0 Group
+ call sys_mmap ! CTI Group brk forced
+ mov %g1, %o7 ! IEU0 Group (regdep)
+sys32_mprotect:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ srl %o1, 0, %o1
+ srl %o2, 0, %o2
+ call sys_mprotect
+ mov %g1, %o7
+sys32_munmap:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ srl %o1, 0, %o1
+ call sys_munmap
+ mov %g1, %o7
+sparc32_brk:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ call sys_brk
+ mov %g1, %o7
+sys32_msync:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ srl %o1, 0, %o1
+ call sys_msync
+ mov %g1, %o7
+sys32_mlock:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ srl %o1, 0, %o1
+ call sys_mlock
+ mov %g1, %o7
+sys32_munlock:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ srl %o1, 0, %o1
+ call sys_munlock
+ mov %g1, %o7
+sys32_mremap:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ srl %o1, 0, %o1
+ srl %o2, 0, %o2
+ srl %o3, 0, %o3
+ call sys_mremap
+ mov %g1, %o7
+
+ .align 32
+ .globl sys32_read, sys32_write, sys32_open, sys32_access
+ .globl sys32_chdir, sys32_lseek, sys32_llseek, sys32_poll
+ .globl sys32_readlink, sys32_unlink, sys32_rmdir, sys32_symlink
+ .globl sys32_link, sys32_rename, sys32_truncate, sys32_ftruncate
+ .globl sys32_chroot, sys32_chmod, sys32_chown, sys32_creat
+ .globl sys32_mkdir, sys32_mknod, sys32_utimes, sys32_ustat
+sys32_read:
+ srl %o1, 0, %o1
+ mov %o7, %g1
+ srl %o2, 0, %o2
+ call sys_read
+ mov %g1, %o7
+sys32_write:
+ srl %o1, 0, %o1
+ mov %o7, %g1
+ srl %o2, 0, %o2
+ call sys_write
+ mov %g1, %o7
+sys32_open:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ call sys_open
+ mov %g1, %o7
+sys32_access:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ call sys_access
+ mov %g1, %o7
+sys32_chdir:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ call sys_chdir
+ mov %g1, %o7
+sys32_lseek:
+ sra %o1, 0, %o1
+ mov %o7, %g1
+ call sys_lseek
+ mov %g1, %o7
+sys32_llseek:
+ srl %o1, 0, %o1
+ mov %o7, %g1
+ srl %o2, 0, %o2
+ srl %o3, 0, %o3
+ call sys_llseek
+ mov %g1, %o7
+sys32_poll:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ call sys_poll
+ mov %g1, %o7
+sys32_readlink:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ srl %o1, 0, %o1
+ call sys_readlink
+ mov %g1, %o7
+sys32_unlink:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ call sys_unlink
+ mov %g1, %o7
+sys32_rmdir:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ call sys_rmdir
+ mov %g1, %o7
+sys32_symlink:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ srl %o1, 0, %o1
+ call sys_symlink
+ mov %g1, %o7
+sys32_link:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ srl %o1, 0, %o1
+ call sys_link
+ mov %g1, %o7
+sys32_rename:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ srl %o1, 0, %o1
+ call sys_rename
+ mov %g1, %o7
+ nop
+sys32_truncate:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ srl %o1, 0, %o1
+ call sys_truncate
+ mov %g1, %o7
+sys32_ftruncate:
+ srl %o1, 0, %o1
+ mov %o7, %g1
+ call sys_ftruncate
+ mov %g1, %o7
+sys32_chroot:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ call sys_chroot
+ mov %g1, %o7
+sys32_chmod:
+ sll %o1, 16, %o1
+ mov %o7, %g1
+ srl %o0, 0, %o0
+ srl %o1, 16, %o1
+ call sys_chmod
+ mov %g1, %o7
+sys32_chown:
+ sll %o1, 16, %o1
+ mov %o7, %g1
+ sll %o2, 16, %o2
+ srl %o0, 0, %o0
+ srl %o1, 16, %o1
+ srl %o2, 16, %o2
+ call sys_chown
+ mov %g1, %o7
+sys32_creat:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ call sys_creat
+ mov %g1, %o7
+sys32_mkdir:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ call sys_mkdir
+ mov %g1, %o7
+sys32_mknod:
+ sll %o2, 16, %o2
+ mov %o7, %g1
+ srl %o0, 0, %o0
+ srl %o2, 16, %o2
+ call sys_mknod
+ mov %g1, %o7
+sys32_utimes:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ srl %o1, 0, %o1
+ call sys_utimes
+ mov %g1, %o7
+sys32_ustat:
+ srl %o1, 0, %o1
+ mov %o7, %g1
+ call sys_ustat
+ mov %g1, %o7
+
+ .align 32
+ .globl sys32_bind, sys32_accept, sys32_connect, sys32_getsockname
+ .globl sys32_getpeername, sys32_send, sys32_sendto, sys32_recv
+ .globl sys32_recvfrom, sys32_setsockopt, sys32_getsockopt
+sys32_bind:
+ srl %o1, 0, %o1
+ mov %o7, %g1
+ call sys_bind
+ mov %g1, %o7
+sys32_accept:
+ srl %o1, 0, %o1
+ mov %o7, %g1
+ srl %o2, 0, %o2
+ call sys_accept
+ mov %g1, %o7
+sys32_connect:
+ srl %o1, 0, %o1
+ mov %o7, %g1
+ call sys_connect
+ mov %g1, %o7
+sys32_getsockname:
+ srl %o1, 0, %o1
+ mov %o7, %g1
+ srl %o2, 0, %o2
+ call sys_getsockname
+ mov %g1, %o7
+sys32_getpeername:
+ srl %o1, 0, %o1
+ mov %o7, %g1
+ srl %o2, 0, %o2
+ call sys_getpeername
+ mov %g1, %o7
+sys32_send:
+ srl %o1, 0, %o1
+ mov %o7, %g1
+ srl %o2, 0, %o2
+ call sys_send
+ mov %g1, %o7
+sys32_sendto:
+ srl %o1, 0, %o1
+ mov %o7, %g1
+ srl %o2, 0, %o2
+ srl %o4, 0, %o4
+ call sys_sendto
+ mov %g1, %o7
+sys32_recv:
+ srl %o1, 0, %o1
+ mov %o7, %g1
+ srl %o2, 0, %o2
+ call sys_recv
+ mov %g1, %o7
+sys32_recvfrom:
+ srl %o1, 0, %o1
+ mov %o7, %g1
+ srl %o2, 0, %o2
+ srl %o4, 0, %o4
+ srl %o5, 0, %o5
+ call sys_recvfrom
+ mov %g1, %o7
+sys32_setsockopt:
+ srl %o3, 0, %o3
+ mov %o7, %g1
+ call sys_setsockopt
+ mov %g1, %o7
+sys32_getsockopt:
+ srl %o3, 0, %o3
+ mov %o7, %g1
+ srl %o4, 0, %o4
+ call sys_setsockopt
+ mov %g1, %o7
+
+ .align 32
+ .globl sys32_gettimeofday, sys32_settimeofday
+sys32_gettimeofday:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ srl %o1, 0, %o1
+ call sys_gettimeofday
+ mov %g1, %o7
+sys32_settimeofday:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ srl %o1, 0, %o1
+ call sys_settimeofday
+ mov %g1, %o7
+
+ .globl sys32_bdflush, sys32_uselib, sys32_umount, sys32_syslog
+ .globl sys32_personality, sys32_waitpid, sys32_getitimer
+ .globl sys32_setitimer, sys32_sched_setscheduler
+ .globl sys32_sched_setparam, sys32_sched_getparam, sys32_signal
+ .globl sys32_reboot, sys32_acct, sys32_newuname, sys32_olduname
+ .globl sys32_sethostname, sys32_gethostname, sys32_setdomainname
+ .globl sys32_time, sys32_swapoff, sys32_swapon, sys32_nfsservctl
+ .globl sys32_create_module, sys32_init_module, sys32_delete_module
+sys32_bdflush:
+ sra %o1, 0, %o1
+ mov %o7, %g1
+ call sys_bdflush
+ mov %g1, %o7
+sys32_uselib:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ call sys_uselib
+ mov %g1, %o7
+sys32_umount:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ call sys_umount
+ mov %g1, %o7
+sys32_syslog:
+ srl %o1, 0, %o1
+ mov %o7, %g1
+ call sys_syslog
+ mov %g1, %o7
+sys32_personality:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ call sys_personality
+ mov %g1, %o7
+sys32_waitpid:
+ srl %o1, 0, %o1
+ mov %o7, %g1
+ call sys_waitpid
+ mov %g1, %o7
+sys32_getitimer:
+ srl %o1, 0, %o1
+ mov %o7, %g1
+ call sys_getitimer
+ mov %g1, %o7
+sys32_setitimer:
+ srl %o1, 0, %o1
+ mov %o7, %g1
+ srl %o2, 0, %o2
+ call sys_setitimer
+ mov %g1, %o7
+sys32_sched_setscheduler:
+ srl %o2, 0, %o2
+ mov %o7, %g1
+ call sys_sched_setscheduler
+ mov %g1, %o7
+sys32_sched_setparam:
+ srl %o1, 0, %o1
+ mov %o7, %g1
+ call sys_sched_setparam
+ mov %g1, %o7
+sys32_sched_getparam:
+ srl %o1, 0, %o1
+ mov %o7, %g1
+ call sys_sched_getparam
+ mov %g1, %o7
+sys32_signal:
+ srl %o1, 0, %o1
+ mov %o7, %g1
+ call sys_signal
+ mov %g1, %o7
+sys32_reboot:
+ srl %o3, 0, %o3
+ mov %o7, %g1
+ call sys_reboot
+ mov %g1, %o7
+sys32_acct:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ call sys_acct
+ mov %g1, %o7
+sys32_newuname:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ call sys_newuname
+ mov %g1, %o7
+sys32_olduname:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ call sys_olduname
+ mov %g1, %o7
+sys32_sethostname:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ call sys_sethostname
+ mov %g1, %o7
+sys32_gethostname:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ call sys_gethostname
+ mov %g1, %o7
+sys32_setdomainname:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ call sys_setdomainname
+ mov %g1, %o7
+sys32_time:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ call sys_time
+ mov %g1, %o7
+sys32_swapoff:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ call sys_swapoff
+ mov %g1, %o7
+sys32_swapon:
+ srl %o0, 0, %o0
+ mov %o7, %g1
+ call sys_swapon
+ mov %g1, %o7
+sys32_nfsservctl:
+ srl %o1, 0, %o1
+ mov %o7, %g1
+ srl %o2, 0, %o2
+ call sys_nfsservctl
+ mov %g1, %o7
diff --git a/arch/sparc64/kernel/sys_sparc.c b/arch/sparc64/kernel/sys_sparc.c
index 851d1550c..c827df7a1 100644
--- a/arch/sparc64/kernel/sys_sparc.c
+++ b/arch/sparc64/kernel/sys_sparc.c
@@ -1,4 +1,4 @@
-/* $Id: sys_sparc.c,v 1.1 1997/04/09 08:25:18 jj Exp $
+/* $Id: sys_sparc.c,v 1.2 1997/07/05 09:52:34 davem Exp $
* linux/arch/sparc64/kernel/sys_sparc.c
*
* This file contains various random system calls that
@@ -9,7 +9,6 @@
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/sched.h>
-#include <linux/config.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/sem.h>
diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c
index 59815b7a8..1f607da98 100644
--- a/arch/sparc64/kernel/sys_sparc32.c
+++ b/arch/sparc64/kernel/sys_sparc32.c
@@ -1,4 +1,4 @@
-/* $Id: sys_sparc32.c,v 1.26 1997/06/04 13:05:21 jj Exp $
+/* $Id: sys_sparc32.c,v 1.43 1997/07/17 02:20:45 davem Exp $
* sys_sparc32.c: Conversion between 32bit and 64bit native syscalls.
*
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
@@ -8,6 +8,7 @@
* environment.
*/
+#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/signal.h>
@@ -30,11 +31,13 @@
#include <linux/ncp_fs.h>
#include <linux/quota.h>
#include <linux/file.h>
+#include <linux/module.h>
#include <asm/types.h>
#include <asm/poll.h>
#include <asm/ipc.h>
#include <asm/uaccess.h>
+#include <asm/fpumacro.h>
/* As gcc will warn about casting u32 to some ptr, we have to cast it to
* unsigned long first, and that's what is A() for.
@@ -372,11 +375,12 @@ asmlinkage int sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u
switch (version) {
case 0: default: {
unsigned long raddr;
+ u32 *uptr = (u32 *) A(((u32)third));
err = sys_shmat (first, (char *)A(ptr), second, &raddr);
if (err)
goto out;
err = -EFAULT;
- if(put_user (raddr, ((u32 *)A(third))))
+ if(put_user (raddr, uptr))
goto out;
err = 0;
goto out;
@@ -469,32 +473,6 @@ out:
return err;
}
-extern asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long off);
-
-asmlinkage unsigned long sys32_mmap(u32 addr, u32 len, u32 prot,
- u32 flags, u32 fd, u32 off)
-{
- return sys_mmap((unsigned long)addr, (unsigned long)len,
- (unsigned long)prot, (unsigned long)flags,
- (unsigned long)fd, (unsigned long)off);
-}
-
-extern asmlinkage int sys_bdflush(int func, long data);
-
-asmlinkage int sys32_bdflush(int func, s32 data)
-{
- return sys_bdflush(func, (long)data);
-}
-
-extern asmlinkage int sys_uselib(const char * library);
-
-asmlinkage int sys32_uselib(u32 library)
-{
- return sys_uselib((const char *)A(library));
-}
-
static inline int get_flock(struct flock *kfl, struct flock32 *ufl)
{
if(get_user(kfl->l_type, &ufl->l_type) ||
@@ -544,55 +522,6 @@ asmlinkage long sys32_fcntl(unsigned int fd, unsigned int cmd, u32 arg)
}
}
-extern asmlinkage int sys_mknod(const char * filename, int mode, dev_t dev);
-
-asmlinkage int sys32_mknod(u32 filename, int mode, __kernel_dev_t32 dev)
-{
- return sys_mknod((const char *)A(filename), mode, dev);
-}
-
-extern asmlinkage int sys_mkdir(const char * pathname, int mode);
-
-asmlinkage int sys32_mkdir(u32 pathname, int mode)
-{
- return sys_mkdir((const char *)A(pathname), mode);
-}
-
-extern asmlinkage int sys_rmdir(const char * pathname);
-
-asmlinkage int sys32_rmdir(u32 pathname)
-{
- return sys_rmdir((const char *)A(pathname));
-}
-
-extern asmlinkage int sys_unlink(const char * pathname);
-
-asmlinkage int sys32_unlink(u32 pathname)
-{
- return sys_unlink((const char *)A(pathname));
-}
-
-extern asmlinkage int sys_symlink(const char * oldname, const char * newname);
-
-asmlinkage int sys32_symlink(u32 oldname, u32 newname)
-{
- return sys_symlink((const char *)A(oldname), (const char *)A(newname));
-}
-
-extern asmlinkage int sys_link(const char * oldname, const char * newname);
-
-asmlinkage int sys32_link(u32 oldname, u32 newname)
-{
- return sys_link((const char *)A(oldname), (const char *)A(newname));
-}
-
-extern asmlinkage int sys_rename(const char * oldname, const char * newname);
-
-asmlinkage int sys32_rename(u32 oldname, u32 newname)
-{
- return sys_rename((const char *)A(oldname), (const char *)A(newname));
-}
-
struct dqblk32 {
__u32 dqb_bhardlimit;
__u32 dqb_bsoftlimit;
@@ -701,20 +630,6 @@ asmlinkage int sys32_fstatfs(unsigned int fd, u32 buf)
return ret;
}
-extern asmlinkage int sys_truncate(const char * path, unsigned long length);
-
-asmlinkage int sys32_truncate(u32 path, u32 length)
-{
- return sys_truncate((const char *)A(path), (unsigned long)length);
-}
-
-extern asmlinkage int sys_ftruncate(unsigned int fd, unsigned long length);
-
-asmlinkage int sys32_ftruncate(unsigned int fd, u32 length)
-{
- return sys_ftruncate(fd, (unsigned long)length);
-}
-
extern asmlinkage int sys_utime(char * filename, struct utimbuf * times);
asmlinkage int sys32_utime(u32 filename, u32 times)
@@ -741,96 +656,6 @@ asmlinkage int sys32_utime(u32 filename, u32 times)
return ret;
}
-extern asmlinkage int sys_utimes(char * filename, struct timeval * utimes);
-
-asmlinkage int sys32_utimes(u32 filename, u32 utimes)
-{
- /* struct timeval is the same :)) */
- return sys_utimes((char *)A(filename), (struct timeval *)A(utimes));
-}
-
-extern asmlinkage int sys_access(const char * filename, int mode);
-
-asmlinkage int sys32_access(u32 filename, int mode)
-{
- return sys_access((const char *)A(filename), mode);
-}
-
-extern asmlinkage int sys_chdir(const char * filename);
-
-asmlinkage int sys32_chdir(u32 filename)
-{
- return sys_chdir((const char *)A(filename));
-}
-
-extern asmlinkage int sys_chroot(const char * filename);
-
-asmlinkage int sys32_chroot(u32 filename)
-{
- return sys_chroot((const char *)A(filename));
-}
-
-extern asmlinkage int sys_chmod(const char * filename, mode_t mode);
-
-asmlinkage int sys32_chmod(u32 filename, __kernel_mode_t32 mode)
-{
- return sys_chmod((const char *)A(filename), mode);
-}
-
-extern asmlinkage int sys_chown(const char * filename, uid_t user, gid_t group);
-
-asmlinkage int sys32_chown(u32 filename, __kernel_uid_t32 user, __kernel_gid_t32 group)
-{
- return sys_chown((const char *)A(filename), user, group);
-}
-
-extern asmlinkage int sys_open(const char * filename,int flags,int mode);
-
-asmlinkage int sys32_open(u32 filename, int flags, int mode)
-{
- return sys_open((const char *)A(filename), flags, mode);
-}
-
-extern asmlinkage int sys_creat(const char * pathname, int mode);
-
-asmlinkage int sys32_creat(u32 pathname, int mode)
-{
- return sys_creat((const char *)A(pathname), mode);
-}
-
-extern asmlinkage long sys_lseek(unsigned int fd, off_t offset, unsigned int origin);
-
-asmlinkage long sys32_lseek(unsigned int fd, s32 offset, unsigned int origin)
-{
- return sys_lseek(fd, (off_t)offset, origin);
-}
-
-extern asmlinkage int sys_llseek(unsigned int fd, unsigned long offset_high,
- unsigned long offset_low,
- loff_t *result, unsigned int origin);
-
-asmlinkage int sys32_llseek(unsigned int fd, u32 offset_high,
- u32 offset_low, u32 result, unsigned int origin)
-{
- /* loff_t is the same :)) */
- return sys_llseek(fd, (unsigned long)offset_high, (unsigned long)offset_low,
- (loff_t *)A(result), origin);
-}
-
-extern asmlinkage long sys_read(unsigned int fd, char * buf, unsigned long count);
-
-asmlinkage long sys32_read(unsigned int fd, u32 buf, u32 count)
-{
- return sys_read(fd, (char *)A(buf), (unsigned long)count);
-}
-
-extern asmlinkage long sys_write(unsigned int fd, const char * buf, unsigned long count);
-
-asmlinkage long sys32_write(unsigned int fd, u32 buf, u32 count)
-{
- return sys_write(fd, (const char *)A(buf), (unsigned long)count);
-}
-
struct iovec32 { u32 iov_base; __kernel_size_t32 iov_len; };
typedef long (*IO_fn_t)(struct inode *, struct file *, char *, unsigned long);
@@ -934,14 +759,29 @@ static long do_readv_writev32(int type, struct inode *inode, struct file *file,
asmlinkage long sys32_readv(int fd, u32 vector, u32 count)
{
struct file *file;
+ struct dentry *dentry;
struct inode *inode;
long err = -EBADF;
lock_kernel();
- if (fd >= NR_OPEN || !(file = current->files->fd[fd]) || !(inode=file->f_inode))
+ if(fd >= NR_OPEN)
+ goto out;
+
+ file = current->files->fd[fd];
+ if(!file)
+ goto out;
+
+ if(!(file->f_mode & 1))
+ goto out;
+
+ dentry = file->f_dentry;
+ if(!dentry)
goto out;
- if (!(file->f_mode & 1))
+
+ inode = dentry->d_inode;
+ if(!inode)
goto out;
+
err = do_readv_writev32(VERIFY_WRITE, inode, file,
(struct iovec32 *)A(vector), count);
out:
@@ -953,13 +793,28 @@ asmlinkage long sys32_writev(int fd, u32 vector, u32 count)
{
int error = -EBADF;
struct file *file;
+ struct dentry *dentry;
struct inode *inode;
lock_kernel();
- if (fd >= NR_OPEN || !(file = current->files->fd[fd]) || !(inode=file->f_inode))
+ if(fd >= NR_OPEN)
+ goto out;
+
+ file = current->files->fd[fd];
+ if(!file)
+ goto out;
+
+ if(!(file->f_mode & 2))
+ goto out;
+
+ dentry = file->f_dentry;
+ if(!dentry)
goto out;
- if (!(file->f_mode & 2))
+
+ inode = dentry->d_inode;
+ if(!inode)
goto out;
+
down(&inode->i_sem);
error = do_readv_writev32(VERIFY_READ, inode, file,
(struct iovec32 *)A(vector), count);
@@ -1008,21 +863,34 @@ asmlinkage int old32_readdir(unsigned int fd, u32 dirent, unsigned int count)
{
int error = -EBADF;
struct file * file;
+ struct dentry * dentry;
+ struct inode * inode;
struct readdir_callback32 buf;
lock_kernel();
- if (fd >= NR_OPEN || !(file = current->files->fd[fd]))
+ if(fd >= NR_OPEN)
goto out;
- error = -ENOTDIR;
- if (!file->f_op || !file->f_op->readdir)
+
+ file = current->files->fd[fd];
+ if(!file)
goto out;
- error = verify_area(VERIFY_WRITE, (void *)A(dirent),
- sizeof(struct old_linux_dirent32));
- if (error)
+
+ dentry = file->f_dentry;
+ if(!dentry)
+ goto out;
+
+ inode = dentry->d_inode;
+ if(!inode)
goto out;
+
buf.count = 0;
buf.dirent = (struct old_linux_dirent32 *)A(dirent);
- error = file->f_op->readdir(file->f_inode, file, &buf, fillonedir);
+
+ error = -ENOTDIR;
+ if (!file->f_op || !file->f_op->readdir)
+ goto out;
+
+ error = file->f_op->readdir(inode, file, &buf, fillonedir);
if (error < 0)
goto out;
error = buf.count;
@@ -1072,30 +940,43 @@ static int filldir(void * __buf, const char * name, int namlen, off_t offset, in
asmlinkage int sys32_getdents(unsigned int fd, u32 dirent, unsigned int count)
{
struct file * file;
+ struct dentry * dentry;
+ struct inode *inode;
struct linux_dirent32 * lastdirent;
struct getdents_callback32 buf;
int error = -EBADF;
lock_kernel();
- if (fd >= NR_OPEN || !(file = current->files->fd[fd]))
+ if(fd >= NR_OPEN)
goto out;
- error = -ENOTDIR;
- if (!file->f_op || !file->f_op->readdir)
+
+ file = current->files->fd[fd];
+ if(!file)
+ goto out;
+
+ dentry = file->f_dentry;
+ if(!dentry)
goto out;
- error = verify_area(VERIFY_WRITE, (void *)A(dirent), count);
- if (error)
+
+ inode = dentry->d_inode;
+ if(!inode)
goto out;
+
buf.current_dir = (struct linux_dirent32 *) A(dirent);
buf.previous = NULL;
buf.count = count;
buf.error = 0;
- error = file->f_op->readdir(file->f_inode, file, &buf, filldir);
+
+ error = -ENOTDIR;
+ if (!file->f_op || !file->f_op->readdir)
+ goto out;
+
+ error = file->f_op->readdir(inode, file, &buf, filldir);
if (error < 0)
goto out;
lastdirent = buf.previous;
- if (!lastdirent) {
- error = buf.error;
- } else {
+ error = buf.error;
+ if(lastdirent) {
put_user(file->f_pos, &lastdirent->d_off);
error = count - buf.count;
}
@@ -1196,13 +1077,6 @@ out:
return ret;
}
-extern asmlinkage int sys_poll(struct pollfd * ufds, unsigned int nfds, int timeout);
-
-asmlinkage int sys32_poll(u32 ufds, unsigned int nfds, int timeout)
-{
- return sys_poll((struct pollfd *)A(ufds), nfds, timeout);
-}
-
static inline int putstat(struct stat32 *ubuf, struct stat *kbuf)
{
if (put_user (kbuf->st_dev, &ubuf->st_dev) ||
@@ -1280,13 +1154,6 @@ asmlinkage int sys32_newfstat(unsigned int fd, u32 statbuf)
return ret;
}
-extern asmlinkage int sys_readlink(const char * path, char * buf, int bufsiz);
-
-asmlinkage int sys32_readlink(u32 path, u32 buf, int bufsiz)
-{
- return sys_readlink((const char *)A(path), (char *)A(buf), bufsiz);
-}
-
extern asmlinkage int sys_sysfs(int option, ...);
asmlinkage int sys32_sysfs(int option, ...)
@@ -1312,42 +1179,162 @@ asmlinkage int sys32_sysfs(int option, ...)
return ret;
}
-extern asmlinkage int sys_ustat(dev_t dev, struct ustat * ubuf);
+struct ncp_mount_data32 {
+ int version;
+ unsigned int ncp_fd;
+ __kernel_uid_t32 mounted_uid;
+ __kernel_pid_t32 wdog_pid;
+ unsigned char mounted_vol[NCP_VOLNAME_LEN + 1];
+ unsigned int time_out;
+ unsigned int retry_count;
+ unsigned int flags;
+ __kernel_uid_t32 uid;
+ __kernel_gid_t32 gid;
+ __kernel_mode_t32 file_mode;
+ __kernel_mode_t32 dir_mode;
+};
-asmlinkage int sys32_ustat(dev_t dev, u32 ubuf)
+static void *do_ncp_super_data_conv(void *raw_data)
{
- /* ustat is the same :)) */
- return sys_ustat(dev, (struct ustat *)A(ubuf));
+ struct ncp_mount_data *n = (struct ncp_mount_data *)raw_data;
+ struct ncp_mount_data32 *n32 = (struct ncp_mount_data32 *)raw_data;
+
+ n->dir_mode = n32->dir_mode;
+ n->file_mode = n32->file_mode;
+ n->gid = n32->gid;
+ n->uid = n32->uid;
+ memmove (n->mounted_vol, n32->mounted_vol, (sizeof (n32->mounted_vol) + 3 * sizeof (unsigned int)));
+ n->wdog_pid = n32->wdog_pid;
+ n->mounted_uid = n32->mounted_uid;
+ return raw_data;
}
-extern asmlinkage int sys_umount(char * name);
+struct smb_mount_data32 {
+ int version;
+ unsigned int fd;
+ __kernel_uid_t32 mounted_uid;
+ struct sockaddr_in addr;
+ char server_name[17];
+ char client_name[17];
+ char service[64];
+ char root_path[64];
+ char username[64];
+ char password[64];
+ char domain[64];
+ unsigned short max_xmit;
+ __kernel_uid_t32 uid;
+ __kernel_gid_t32 gid;
+ __kernel_mode_t32 file_mode;
+ __kernel_mode_t32 dir_mode;
+};
-asmlinkage int sys32_umount(u32 name)
+static void *do_smb_super_data_conv(void *raw_data)
{
- return sys_umount((char *)A(name));
-}
+ struct smb_mount_data *s = (struct smb_mount_data *)raw_data;
+ struct smb_mount_data32 *s32 = (struct smb_mount_data32 *)raw_data;
-extern asmlinkage int sys_mount(char * dev_name, char * dir_name, char * type,
- unsigned long new_flags, void *data);
+ s->dir_mode = s32->dir_mode;
+ s->file_mode = s32->file_mode;
+ s->gid = s32->gid;
+ s->uid = s32->uid;
+ memmove (&s->addr, &s32->addr, (((long)&s->uid) - ((long)&s->addr)));
+ s->mounted_uid = s32->mounted_uid;
+ return raw_data;
+}
-asmlinkage int sys32_mount(u32 dev_name, u32 dir_name, u32 type, u32 new_flags, u32 data)
+static int copy_mount_stuff_to_kernel(const void *user, unsigned long *kernel)
{
- return sys_mount((char *)A(dev_name), (char *)A(dir_name), (char *)A(type),
- (unsigned long)new_flags, (void *)A(data));
+ int i;
+ unsigned long page;
+ struct vm_area_struct *vma;
+
+ *kernel = 0;
+ if(!user)
+ return 0;
+ vma = find_vma(current->mm, (unsigned long)user);
+ if(!vma || (unsigned long)user < vma->vm_start)
+ return -EFAULT;
+ if(!(vma->vm_flags & VM_READ))
+ return -EFAULT;
+ i = vma->vm_end - (unsigned long) user;
+ if(PAGE_SIZE <= (unsigned long) i)
+ i = PAGE_SIZE - 1;
+ if(!(page = __get_free_page(GFP_KERNEL)))
+ return -ENOMEM;
+ if(copy_from_user((void *) page, user, i)) {
+ free_page(page);
+ return -EFAULT;
+ }
+ *kernel = page;
+ return 0;
}
-extern asmlinkage int sys_syslog(int type, char * bug, int count);
+extern asmlinkage int sys_mount(char * dev_name, char * dir_name, char * type,
+ unsigned long new_flags, void *data);
+
+#define SMBFS_NAME "smbfs"
+#define NCPFS_NAME "ncpfs"
-asmlinkage int sys32_syslog(int type, u32 bug, int count)
+asmlinkage int sys32_mount(u32 dev_name, u32 dir_name, u32 type, u32 new_flags, u32 data)
{
- return sys_syslog(type, (char *)A(bug), count);
-}
+ unsigned long type_page;
+ int err, is_smb, is_ncp;
-extern asmlinkage int sys_personality(unsigned long personality);
+ if(!suser())
+ return -EPERM;
+ is_smb = is_ncp = 0;
+ err = copy_mount_stuff_to_kernel((const void *)A(type), &type_page);
+ if(err)
+ return err;
+ if(type_page) {
+ is_smb = !strcmp((char *)type_page, SMBFS_NAME);
+ is_ncp = !strcmp((char *)type_page, NCPFS_NAME);
+ }
+ if(!is_smb && !is_ncp) {
+ if(type_page)
+ free_page(type_page);
+ return sys_mount((char *)A(dev_name), (char *)A(dir_name),
+ (char *)A(type), (unsigned long)new_flags,
+ (void *)A(data));
+ } else {
+ unsigned long dev_page, dir_page, data_page;
+ int old_fs;
-asmlinkage int sys32_personality(u32 personality)
-{
- return sys_personality((unsigned long)personality);
+ err = copy_mount_stuff_to_kernel((const void *)A(dev_name), &dev_page);
+ if(err)
+ goto out;
+ err = copy_mount_stuff_to_kernel((const void *)A(dir_name), &dir_page);
+ if(err)
+ goto dev_out;
+ err = copy_mount_stuff_to_kernel((const void *)A(data), &data_page);
+ if(err)
+ goto dir_out;
+ if(is_ncp)
+ do_ncp_super_data_conv((void *)data_page);
+ else if(is_smb)
+ do_smb_super_data_conv((void *)data_page);
+ else
+ panic("Tell DaveM he fucked up...");
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+ err = sys_mount((char *)dev_page, (char *)dir_page,
+ (char *)type_page, (unsigned long)new_flags,
+ (void *)data_page);
+ set_fs(old_fs);
+
+ if(data_page)
+ free_page(data_page);
+ dir_out:
+ if(dir_page)
+ free_page(dir_page);
+ dev_out:
+ if(dev_page)
+ free_page(dev_page);
+ out:
+ if(type_page)
+ free_page(type_page);
+ return err;
+ }
}
struct rusage32 {
@@ -1416,13 +1403,6 @@ asmlinkage int sys32_wait4(__kernel_pid_t32 pid, u32 stat_addr, int options, u32
}
}
-extern asmlinkage int sys_waitpid(pid_t pid,unsigned int * stat_addr, int options);
-
-asmlinkage int sys32_waitpid(__kernel_pid_t32 pid, u32 stat_addr, int options)
-{
- return sys_waitpid(pid, (unsigned int *)A(stat_addr), options);
-}
-
struct sysinfo32 {
s32 uptime;
u32 loads[3];
@@ -1462,46 +1442,6 @@ asmlinkage int sys32_sysinfo(u32 info)
return ret;
}
-extern asmlinkage int sys_getitimer(int which, struct itimerval *value);
-
-asmlinkage int sys32_getitimer(int which, u32 value)
-{
- /* itimerval is the same :)) */
- return sys_getitimer(which, (struct itimerval *)A(value));
-}
-
-extern asmlinkage int sys_setitimer(int which, struct itimerval *value,
- struct itimerval *ovalue);
-
-asmlinkage int sys32_setitimer(int which, u32 value, u32 ovalue)
-{
- return sys_setitimer(which, (struct itimerval *)A(value),
- (struct itimerval *)A(ovalue));
-}
-
-extern asmlinkage int sys_sched_setscheduler(pid_t pid, int policy,
- struct sched_param *param);
-
-asmlinkage int sys32_sched_setscheduler(__kernel_pid_t32 pid, int policy, u32 param)
-{
- /* sched_param is the same :)) */
- return sys_sched_setscheduler(pid, policy, (struct sched_param *)A(param));
-}
-
-extern asmlinkage int sys_sched_setparam(pid_t pid, struct sched_param *param);
-
-asmlinkage int sys32_sched_setparam(__kernel_pid_t32 pid, u32 param)
-{
- return sys_sched_setparam(pid, (struct sched_param *)A(param));
-}
-
-extern asmlinkage int sys_sched_getparam(pid_t pid, struct sched_param *param);
-
-asmlinkage int sys32_sched_getparam(__kernel_pid_t32 pid, u32 param)
-{
- return sys_sched_getparam(pid, (struct sched_param *)A(param));
-}
-
struct timespec32 {
s32 tv_sec;
s32 tv_nsec;
@@ -1577,25 +1517,29 @@ asmlinkage int sys32_sigpending(u32 set)
return ret;
}
-extern asmlinkage unsigned long sys_signal(int signum, __sighandler_t handler);
+extern asmlinkage int sys_setreuid(uid_t ruid, uid_t euid);
-asmlinkage unsigned long sys32_signal(int signum, u32 handler)
+asmlinkage int sys32_setreuid(__kernel_uid_t32 ruid, __kernel_uid_t32 euid)
{
- return sys_signal(signum, (__sighandler_t)A(handler));
-}
-
-extern asmlinkage int sys_reboot(int magic1, int magic2, int cmd, void * arg);
+ uid_t sruid, seuid;
-asmlinkage int sys32_reboot(int magic1, int magic2, int cmd, u32 arg)
-{
- return sys_reboot(magic1, magic2, cmd, (void *)A(arg));
+ sruid = (ruid == (__kernel_uid_t32)-1) ? ((uid_t)-1) : ((uid_t)ruid);
+ seuid = (euid == (__kernel_uid_t32)-1) ? ((uid_t)-1) : ((uid_t)euid);
+ return sys_setreuid(sruid, seuid);
}
-extern asmlinkage int sys_acct(const char *name);
+extern asmlinkage int sys_setresuid(uid_t ruid, uid_t euid, uid_t suid);
-asmlinkage int sys32_acct(u32 name)
+asmlinkage int sys32_setresuid(__kernel_uid_t32 ruid,
+ __kernel_uid_t32 euid,
+ __kernel_uid_t32 suid)
{
- return sys_acct((const char *)A(name));
+ uid_t sruid, seuid, ssuid;
+
+ sruid = (ruid == (__kernel_uid_t32)-1) ? ((uid_t)-1) : ((uid_t)ruid);
+ seuid = (euid == (__kernel_uid_t32)-1) ? ((uid_t)-1) : ((uid_t)euid);
+ ssuid = (suid == (__kernel_uid_t32)-1) ? ((uid_t)-1) : ((uid_t)suid);
+ return sys_setresuid(sruid, seuid, ssuid);
}
extern asmlinkage int sys_getresuid(uid_t *ruid, uid_t *euid, uid_t *suid);
@@ -1654,7 +1598,7 @@ asmlinkage int sys32_getgroups(int gidsetsize, u32 grouplist)
set_fs (KERNEL_DS);
ret = sys_getgroups(gidsetsize, gl);
set_fs (old_fs);
- if (ret > 0 && ret <= NGROUPS)
+ if (gidsetsize && ret > 0 && ret <= NGROUPS)
for (i = 0; i < ret; i++, grouplist += sizeof(__kernel_gid_t32))
if (__put_user (gl[i], (__kernel_gid_t32 *)A(grouplist)))
return -EFAULT;
@@ -1680,41 +1624,8 @@ asmlinkage int sys32_setgroups(int gidsetsize, u32 grouplist)
return ret;
}
-extern asmlinkage int sys_newuname(struct new_utsname * name);
-
-asmlinkage int sys32_newuname(u32 name)
-{
- /* utsname is the same :)) */
- return sys_newuname((struct new_utsname *)A(name));
-}
-
-extern asmlinkage int sys_olduname(struct oldold_utsname * name);
-
-asmlinkage int sys32_olduname(u32 name)
-{
- return sys_olduname((struct oldold_utsname *)A(name));
-}
-
-extern asmlinkage int sys_sethostname(char *name, int len);
-
-asmlinkage int sys32_sethostname(u32 name, int len)
-{
- return sys_sethostname((char *)A(name), len);
-}
-
-extern asmlinkage int sys_gethostname(char *name, int len);
-
-asmlinkage int sys32_gethostname(u32 name, int len)
-{
- return sys_gethostname((char *)A(name), len);
-}
-
-extern asmlinkage int sys_setdomainname(char *name, int len);
-
-asmlinkage int sys32_setdomainname(u32 name, int len)
-{
- return sys_setdomainname((char *)A(name), len);
-}
+#define RLIM_INFINITY32 0x7fffffff
+#define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x)
struct rlimit32 {
s32 rlim_cur;
@@ -1733,8 +1644,8 @@ asmlinkage int sys32_getrlimit(unsigned int resource, u32 rlim)
ret = sys_getrlimit(resource, &r);
set_fs (old_fs);
if (!ret && (
- put_user (r.rlim_cur, &(((struct rlimit32 *)A(rlim))->rlim_cur)) ||
- __put_user (r.rlim_max, &(((struct rlimit32 *)A(rlim))->rlim_max))))
+ put_user (RESOURCE32(r.rlim_cur), &(((struct rlimit32 *)A(rlim))->rlim_cur)) ||
+ __put_user (RESOURCE32(r.rlim_max), &(((struct rlimit32 *)A(rlim))->rlim_max))))
return -EFAULT;
return ret;
}
@@ -1751,6 +1662,10 @@ asmlinkage int sys32_setrlimit(unsigned int resource, u32 rlim)
if (get_user (r.rlim_cur, &(((struct rlimit32 *)A(rlim))->rlim_cur)) ||
__get_user (r.rlim_max, &(((struct rlimit32 *)A(rlim))->rlim_max)))
return -EFAULT;
+ if (r.rlim_cur == RLIM_INFINITY32)
+ r.rlim_cur = RLIM_INFINITY;
+ if (r.rlim_max == RLIM_INFINITY32)
+ r.rlim_max = RLIM_INFINITY;
set_fs (KERNEL_DS);
ret = sys_setrlimit(resource, &r);
set_fs (old_fs);
@@ -1772,28 +1687,6 @@ asmlinkage int sys32_getrusage(int who, u32 ru)
return ret;
}
-extern asmlinkage int sys_time(int * tloc);
-
-asmlinkage int sys32_time(u32 tloc)
-{
- return sys_time((int *)A(tloc));
-}
-
-extern asmlinkage int sys_gettimeofday(struct timeval *tv, struct timezone *tz);
-
-asmlinkage int sys32_gettimeofday(u32 tv, u32 tz)
-{
- /* both timeval and timezone are ok :)) */
- return sys_gettimeofday((struct timeval *)A(tv), (struct timezone *)A(tz));
-}
-
-extern asmlinkage int sys_settimeofday(struct timeval *tv, struct timezone *tz);
-
-asmlinkage int sys32_settimeofday(u32 tv, u32 tz)
-{
- return sys_settimeofday((struct timeval *)A(tv), (struct timezone *)A(tz));
-}
-
struct timex32 {
unsigned int modes;
s32 offset;
@@ -1865,170 +1758,6 @@ asmlinkage int sys32_adjtimex(u32 txc_p)
return ret;
}
-extern asmlinkage int sys_msync(unsigned long start, size_t len, int flags);
-
-asmlinkage int sys32_msync(u32 start, __kernel_size_t32 len, int flags)
-{
- return sys_msync((unsigned long)start, (size_t)len, flags);
-}
-
-extern asmlinkage int sys_mlock(unsigned long start, size_t len);
-
-asmlinkage int sys32_mlock(u32 start, __kernel_size_t32 len)
-{
- return sys_mlock((unsigned long)start, (size_t)len);
-}
-
-extern asmlinkage int sys_munlock(unsigned long start, size_t len);
-
-asmlinkage int sys32_munlock(u32 start, __kernel_size_t32 len)
-{
- return sys_munlock((unsigned long)start, (size_t)len);
-}
-
-extern asmlinkage unsigned long sys_brk(unsigned long brk);
-
-asmlinkage unsigned long sparc32_brk(u32 brk)
-{
- return sys_brk((unsigned long)brk);
-}
-
-extern asmlinkage int sys_munmap(unsigned long addr, size_t len);
-
-asmlinkage int sys32_munmap(u32 addr, __kernel_size_t32 len)
-{
- return sys_munmap((unsigned long)addr, (size_t)len);
-}
-
-extern asmlinkage int sys_mprotect(unsigned long start, size_t len, unsigned long prot);
-
-asmlinkage int sys32_mprotect(u32 start, __kernel_size_t32 len, u32 prot)
-{
- return sys_mprotect((unsigned long)start, (size_t)len, (unsigned long)prot);
-}
-
-extern asmlinkage unsigned long sys_mremap(unsigned long addr, unsigned long old_len,
- unsigned long new_len, unsigned long flags);
-
-asmlinkage unsigned long sys32_mremap(u32 addr, u32 old_len, u32 new_len, u32 flags)
-{
- return sys_mremap((unsigned long)addr, (unsigned long)old_len,
- (unsigned long)new_len, (unsigned long)flags);
-}
-
-extern asmlinkage int sys_swapoff(const char * specialfile);
-
-asmlinkage int sys32_swapoff(u32 specialfile)
-{
- return sys_swapoff((const char *)A(specialfile));
-}
-
-extern asmlinkage int sys_swapon(const char * specialfile, int swap_flags);
-
-asmlinkage int sys32_swapon(u32 specialfile, int swap_flags)
-{
- return sys_swapon((const char *)A(specialfile), swap_flags);
-}
-
-extern asmlinkage int sys_bind(int fd, struct sockaddr *umyaddr, int addrlen);
-
-asmlinkage inline int sys32_bind(int fd, u32 umyaddr, int addrlen)
-{
- /* sockaddr is the same :)) */
- return sys_bind(fd, (struct sockaddr *)A(umyaddr), addrlen);
-}
-
-extern asmlinkage int sys_accept(int fd, struct sockaddr *upeer_sockaddr,
- int *upeer_addrlen);
-
-asmlinkage inline int sys32_accept(int fd, u32 upeer_sockaddr, u32 upeer_addrlen)
-{
- return sys_accept(fd, (struct sockaddr *)A(upeer_sockaddr),
- (int *)A(upeer_addrlen));
-}
-
-extern asmlinkage int sys_connect(int fd, struct sockaddr *uservaddr, int addrlen);
-
-asmlinkage inline int sys32_connect(int fd, u32 uservaddr, int addrlen)
-{
- return sys_connect(fd, (struct sockaddr *)A(uservaddr), addrlen);
-}
-
-extern asmlinkage int sys_getsockname(int fd, struct sockaddr *usockaddr,
- int *usockaddr_len);
-
-asmlinkage int sys32_getsockname(int fd, u32 usockaddr, u32 usockaddr_len)
-{
- return sys_getsockname(fd, (struct sockaddr *)A(usockaddr),
- (int *)A(usockaddr_len));
-}
-
-extern asmlinkage int sys_getpeername(int fd, struct sockaddr *usockaddr,
- int *usockaddr_len);
-
-asmlinkage int sys32_getpeername(int fd, u32 usockaddr, u32 usockaddr_len)
-{
- return sys_getpeername(fd, (struct sockaddr *)A(usockaddr),
- (int *)A(usockaddr_len));
-}
-
-extern asmlinkage int sys_send(int fd, void * buff, size_t len, unsigned flags);
-
-asmlinkage inline int sys32_send(int fd, u32 buff,
- __kernel_size_t32 len, unsigned flags)
-{
- return sys_send(fd, (void *)A(buff), (size_t)len, flags);
-}
-
-extern asmlinkage int sys_sendto(int fd, void * buff, size_t len, unsigned flags,
- struct sockaddr *addr, int addr_len);
-
-asmlinkage inline int sys32_sendto(int fd, u32 buff, __kernel_size_t32 len,
- unsigned flags, u32 addr, int addr_len)
-{
- return sys_sendto(fd, (void *)A(buff), (size_t)len, flags,
- (struct sockaddr *)A(addr), addr_len);
-}
-
-extern asmlinkage int sys_recv(int fd, void * ubuf, size_t size, unsigned flags);
-
-asmlinkage inline int sys32_recv(int fd, u32 ubuf,
- __kernel_size_t32 size, unsigned flags)
-{
- return sys_recv(fd, (void *)A(ubuf), (size_t)size, flags);
-}
-
-extern asmlinkage int sys_recvfrom(int fd, void * ubuf, size_t size, unsigned flags,
- struct sockaddr *addr, int *addr_len);
-
-asmlinkage inline int sys32_recvfrom(int fd, u32 ubuf, __kernel_size_t32 size,
- unsigned flags, u32 addr, u32 addr_len)
-{
- return sys_recvfrom(fd, (void *)A(ubuf), (size_t)size, flags,
- (struct sockaddr *)A(addr), (int *)A(addr_len));
-}
-
-extern asmlinkage int sys_setsockopt(int fd, int level, int optname,
- char *optval, int optlen);
-
-asmlinkage inline int sys32_setsockopt(int fd, int level, int optname,
- u32 optval, int optlen)
-{
- /* XXX handle ip_fw32->ip_fw conversion for IP firewalling and accounting.
- Do it using some macro in ip_sockglue.c
- Other optval arguments are mostly just ints or 32<->64bit transparent */
- return sys_setsockopt(fd, level, optname, (char *)A(optval), optlen);
-}
-
-extern asmlinkage int sys_getsockopt(int fd, int level, int optname,
- char *optval, int *optlen);
-
-asmlinkage inline int sys32_getsockopt(int fd, int level, int optname,
- u32 optval, u32 optlen)
-{
- return sys_getsockopt(fd, level, optname, (char *)A(optval), (int *)A(optlen));
-}
-
/* XXX This really belongs in some header file... -DaveM */
#define MAX_SOCK_ADDR 128 /* 108 for Unix domain -
16 for IP, 16 for IPX,
@@ -2052,11 +1781,11 @@ extern __inline__ struct socket *sockfd_lookup(int fd, int *err)
return NULL;
}
- inode = file->f_inode;
+ inode = file->f_dentry->d_inode;
if (!inode || !inode->i_sock || !socki_lookup(inode))
{
*err = -ENOTSOCK;
- fput(file,inode);
+ fput(file);
return NULL;
}
@@ -2065,7 +1794,7 @@ extern __inline__ struct socket *sockfd_lookup(int fd, int *err)
extern __inline__ void sockfd_put(struct socket *sock)
{
- fput(sock->file,sock->inode);
+ fput(sock->file);
}
struct msghdr32 {
@@ -2293,6 +2022,24 @@ static unsigned char nargs[18]={AL(0),AL(3),AL(3),AL(3),AL(2),AL(3),
AL(6),AL(2),AL(5),AL(5),AL(3),AL(3)};
#undef AL
+extern asmlinkage int sys32_bind(int fd, u32 umyaddr, int addrlen);
+extern asmlinkage int sys32_connect(int fd, u32 uservaddr, int addrlen);
+extern asmlinkage int sys32_accept(int fd, u32 upeer_sockaddr, u32 upeer_addrlen);
+extern asmlinkage int sys32_getsockname(int fd, u32 usockaddr, u32 usockaddr_len);
+extern asmlinkage int sys32_getpeername(int fd, u32 usockaddr, u32 usockaddr_len);
+extern asmlinkage int sys32_send(int fd, u32 buff, __kernel_size_t32 len,
+ unsigned flags);
+extern asmlinkage int sys32_sendto(int fd, u32 buff, __kernel_size_t32 len,
+ unsigned flags, u32 addr, int addr_len);
+extern asmlinkage int sys32_recv(int fd, u32 ubuf, __kernel_size_t32 size,
+ unsigned flags);
+extern asmlinkage int sys32_recvfrom(int fd, u32 ubuf, __kernel_size_t32 size,
+ unsigned flags, u32 addr, u32 addr_len);
+extern asmlinkage int sys32_setsockopt(int fd, int level, int optname,
+ u32 optval, int optlen);
+extern asmlinkage int sys32_getsockopt(int fd, int level, int optname,
+ u32 optval, u32 optlen);
+
extern asmlinkage int sys_socket(int family, int type, int protocol);
extern asmlinkage int sys_socketpair(int family, int type, int protocol,
int usockvec[2]);
@@ -2389,7 +2136,7 @@ asmlinkage int sparc32_sigaction (int signum, u32 action, u32 oldaction)
old_sa.sa_mask = (sigset_t32)(p->sa_mask);
old_sa.sa_flags = (unsigned)(p->sa_flags);
old_sa.sa_restorer = (unsigned)(u64)(p->sa_restorer);
- if (copy_to_user(A(oldaction), p, sizeof(struct sigaction32)))
+ if (copy_to_user(A(oldaction), &old_sa, sizeof(struct sigaction32)))
goto out;
}
@@ -2407,14 +2154,6 @@ out:
return err;
}
-extern asmlinkage int sys_nfsservctl(int cmd, void *argp, void *resp);
-
-asmlinkage int sys32_nfsservctl(int cmd, u32 argp, u32 resp)
-{
- /* XXX handle argp and resp args */
- return sys_nfsservctl(cmd, (void *)A(argp), (void *)A(resp));
-}
-
/*
* count32() counts the number of arguments/envelopes
*/
@@ -2485,25 +2224,33 @@ static inline int
do_execve32(char * filename, u32 * argv, u32 * envp, struct pt_regs * regs)
{
struct linux_binprm bprm;
+ struct dentry * dentry;
int retval;
int i;
bprm.p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
for (i=0 ; i<MAX_ARG_PAGES ; i++) /* clear page-table */
bprm.page[i] = 0;
- retval = open_namei(filename, 0, 0, &bprm.inode, NULL);
- if (retval)
+
+ dentry = open_namei(filename, 0, 0);
+ retval = PTR_ERR(dentry);
+ if (IS_ERR(dentry))
return retval;
+
+ bprm.dentry = dentry;
bprm.filename = filename;
bprm.sh_bang = 0;
bprm.java = 0;
bprm.loader = 0;
bprm.exec = 0;
- bprm.dont_iput = 0;
- if ((bprm.argc = count32(argv)) < 0)
+ if ((bprm.argc = count32(argv)) < 0) {
+ dput(dentry);
return bprm.argc;
- if ((bprm.envc = count32(envp)) < 0)
+ }
+ if ((bprm.envc = count32(envp)) < 0) {
+ dput(dentry);
return bprm.envc;
+ }
retval = prepare_binprm(&bprm);
@@ -2523,8 +2270,9 @@ do_execve32(char * filename, u32 * argv, u32 * envp, struct pt_regs * regs)
return retval;
/* Something went wrong, return the inode and free the argument pages*/
- if(!bprm.dont_iput)
- iput(bprm.inode);
+ if(bprm.dentry)
+ dput(bprm.dentry);
+
for (i=0 ; i<MAX_ARG_PAGES ; i++)
free_page(bprm.page[i]);
return(retval);
@@ -2543,81 +2291,231 @@ asmlinkage int sparc32_execve(struct pt_regs *regs)
if((u32)regs->u_regs[UREG_G1] == 0)
base = 1;
- error = getname((char *)(unsigned long)(u32)regs->u_regs[base + UREG_I0], &filename);
- if(error)
- return error;
+ lock_kernel();
+ filename = getname((char *)(unsigned long)(u32)regs->u_regs[base + UREG_I0]);
+ error = PTR_ERR(filename);
+ if(IS_ERR(filename))
+ goto out;
error = do_execve32(filename,
(u32 *)A((u32)regs->u_regs[base + UREG_I1]),
(u32 *)A((u32)regs->u_regs[base + UREG_I2]), regs);
putname(filename);
+
+ if(!error) {
+ fprs_write(0);
+ regs->fprs = 0;
+ }
+out:
+ unlock_kernel();
return error;
}
-/* Modules will be supported with 64bit modutils only */
-asmlinkage int sys32_no_modules(void)
+#ifdef CONFIG_MODULES
+
+extern asmlinkage unsigned long sys_create_module(const char *name_user, size_t size);
+
+asmlinkage unsigned long sys32_create_module(u32 name_user, __kernel_size_t32 size)
{
- return -ENOSYS;
+ return sys_create_module((const char *)A(name_user), (size_t)size);
}
-struct ncp_mount_data32 {
- int version;
- unsigned int ncp_fd;
- __kernel_uid_t32 mounted_uid;
- __kernel_pid_t32 wdog_pid;
- unsigned char mounted_vol[NCP_VOLNAME_LEN + 1];
- unsigned int time_out;
- unsigned int retry_count;
- unsigned int flags;
- __kernel_uid_t32 uid;
- __kernel_gid_t32 gid;
- __kernel_mode_t32 file_mode;
- __kernel_mode_t32 dir_mode;
-};
+extern asmlinkage int sys_init_module(const char *name_user, struct module *mod_user);
-void *do_ncp_super_data_conv(void *raw_data)
+/* Hey, when you're trying to init module, take time and prepare us a nice 64bit
+ * module structure, even if from 32bit modutils... Why to pollute kernel... :))
+ */
+asmlinkage int sys32_init_module(u32 nameuser, u32 mod_user)
{
- struct ncp_mount_data *n = (struct ncp_mount_data *)raw_data;
- struct ncp_mount_data32 *n32 = (struct ncp_mount_data32 *)raw_data;
+ return sys_init_module((const char *)A(nameuser), (struct module *)A(mod_user));
+}
- n->dir_mode = n32->dir_mode;
- n->file_mode = n32->file_mode;
- n->gid = n32->gid;
- n->uid = n32->uid;
- memmove (n->mounted_vol, n32->mounted_vol, (sizeof (n32->mounted_vol) + 3 * sizeof (unsigned int)));
- n->wdog_pid = n32->wdog_pid;
- n->mounted_uid = n32->mounted_uid;
- return raw_data;
+extern asmlinkage int sys_delete_module(const char *name_user);
+
+asmlinkage int sys32_delete_module(u32 name_user)
+{
+ return sys_delete_module((const char *)A(name_user));
}
-struct smb_mount_data32 {
- int version;
- unsigned int fd;
- __kernel_uid_t32 mounted_uid;
- struct sockaddr_in addr;
- char server_name[17];
- char client_name[17];
- char service[64];
- char root_path[64];
- char username[64];
- char password[64];
- char domain[64];
- unsigned short max_xmit;
- __kernel_uid_t32 uid;
- __kernel_gid_t32 gid;
- __kernel_mode_t32 file_mode;
- __kernel_mode_t32 dir_mode;
+struct module_info32 {
+ u32 addr;
+ u32 size;
+ u32 flags;
+ s32 usecount;
};
-void *do_smb_super_data_conv(void *raw_data)
+extern asmlinkage int sys_query_module(const char *name_user, int which, char *buf, size_t bufsize, size_t *ret);
+
+asmlinkage int sys32_query_module(u32 name_user, int which, u32 buf, __kernel_size_t32 bufsize, u32 retv)
{
- struct smb_mount_data *s = (struct smb_mount_data *)raw_data;
- struct smb_mount_data32 *s32 = (struct smb_mount_data32 *)raw_data;
+ char *buff;
+ unsigned long old_fs = get_fs();
+ size_t val;
+ int ret, i, j;
+ unsigned long *p;
+ char *usernam = NULL;
+ int bufsiz = bufsize;
+ struct module_info mi;
+
+ switch (which) {
+ case 0: return sys_query_module ((const char *)A(name_user), which, (char *)A(buf), (size_t)bufsize, (size_t *)A(retv));
+ case QM_SYMBOLS:
+ bufsiz <<= 1;
+ case QM_MODULES:
+ case QM_REFS:
+ case QM_DEPS:
+ if (name_user && (ret = getname32 (name_user, &usernam)))
+ return ret;
+ buff = kmalloc (bufsiz, GFP_KERNEL);
+ if (!buff) {
+ if (name_user) putname32 (usernam);
+ return -ENOMEM;
+ }
+qmsym_toshort:
+ set_fs (KERNEL_DS);
+ ret = sys_query_module (usernam, which, buff, bufsiz, &val);
+ set_fs (old_fs);
+ if (which != QM_SYMBOLS) {
+ if (ret == -ENOSPC || !ret) {
+ if (put_user (val, (__kernel_size_t32 *)A(retv)))
+ ret = -EFAULT;
+ }
+ if (!ret) {
+ if (copy_to_user ((char *)A(buf), buff, bufsize))
+ ret = -EFAULT;
+ }
+ } else {
+ if (ret == -ENOSPC) {
+ if (put_user (2 * val, (__kernel_size_t32 *)A(retv)))
+ ret = -EFAULT;
+ }
+ p = (unsigned long *)buff;
+ if (!ret) {
+ if (put_user (val, (__kernel_size_t32 *)A(retv)))
+ ret = -EFAULT;
+ }
+ if (!ret) {
+ j = val * 8;
+ for (i = 0; i < val; i++, p += 2) {
+ if (bufsize < (2 * sizeof (u32))) {
+ bufsiz = 0;
+ goto qmsym_toshort;
+ }
+ if (put_user (p[0], (u32 *)A(buf)) ||
+ __put_user (p[1] - j, (((u32 *)A(buf))+1))) {
+ ret = -EFAULT;
+ break;
+ }
+ bufsize -= (2 * sizeof (u32));
+ buf += (2 * sizeof (u32));
+ }
+ }
+ if (!ret && val) {
+ char *strings = buff + ((unsigned long *)buff)[1];
+ j = *(p - 1) - ((unsigned long *)buff)[1];
+ j = j + strlen (buff + j) + 1;
+ if (bufsize < j) {
+ bufsiz = 0;
+ goto qmsym_toshort;
+ }
+ if (copy_to_user ((char *)A(buf), strings, j))
+ ret = -EFAULT;
+ }
+ }
+ kfree (buff);
+ if (name_user) putname32 (usernam);
+ return ret;
+ case QM_INFO:
+ if (name_user && (ret = getname32 (name_user, &usernam)))
+ return ret;
+ set_fs (KERNEL_DS);
+ ret = sys_query_module (usernam, which, (char *)&mi, sizeof (mi), &val);
+ set_fs (old_fs);
+ if (!ret) {
+ if (put_user (sizeof (struct module_info32), (__kernel_size_t32 *)A(retv)))
+ ret = -EFAULT;
+ else if (bufsize < sizeof (struct module_info32))
+ ret = -ENOSPC;
+ }
+ if (!ret) {
+ if (put_user (mi.addr, &(((struct module_info32 *)A(buf))->addr)) ||
+ __put_user (mi.size, &(((struct module_info32 *)A(buf))->size)) ||
+ __put_user (mi.flags, &(((struct module_info32 *)A(buf))->flags)) ||
+ __put_user (mi.usecount, &(((struct module_info32 *)A(buf))->usecount)))
+ ret = -EFAULT;
+ }
+ if (name_user) putname32 (usernam);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
- s->dir_mode = s32->dir_mode;
- s->file_mode = s32->file_mode;
- s->gid = s32->gid;
- s->uid = s32->uid;
- memmove (&s->addr, &s32->addr, (((long)&s->uid) - ((long)&s->addr)));
- s->mounted_uid = s32->mounted_uid;
- return raw_data;
+struct kernel_sym32 {
+ u32 value;
+ char name[60];
+};
+
+extern asmlinkage int sys_get_kernel_syms(struct kernel_sym *table);
+
+asmlinkage int sys32_get_kernel_syms(u32 table)
+{
+ int len, i;
+ struct kernel_sym *tbl;
+ unsigned long old_fs;
+
+ len = sys_get_kernel_syms(NULL);
+ if (!table) return len;
+ tbl = kmalloc (len * sizeof (struct kernel_sym), GFP_KERNEL);
+ if (!tbl) return -ENOMEM;
+ old_fs = get_fs();
+ set_fs (KERNEL_DS);
+ sys_get_kernel_syms(tbl);
+ set_fs (old_fs);
+ for (i = 0; i < len; i++, table += sizeof (struct kernel_sym32)) {
+ if (put_user (tbl[i].value, &(((struct kernel_sym32 *)A(table))->value)) ||
+ copy_to_user (((struct kernel_sym32 *)A(table))->name, tbl[i].name, 60))
+ break;
+ }
+ kfree (tbl);
+ return i;
+}
+
+#else /* CONFIG_MODULES */
+
+asmlinkage unsigned long
+sys_create_module(const char *name_user, size_t size)
+{
+ return -ENOSYS;
+}
+
+asmlinkage int
+sys_init_module(const char *name_user, struct module *mod_user)
+{
+ return -ENOSYS;
}
+
+asmlinkage int
+sys_delete_module(const char *name_user)
+{
+ return -ENOSYS;
+}
+
+asmlinkage int
+sys_query_module(const char *name_user, int which, char *buf, size_t bufsize,
+ size_t *ret)
+{
+ /* Let the program know about the new interface. Not that
+ it'll do them much good. */
+ if (which == 0)
+ return 0;
+
+ return -ENOSYS;
+}
+
+asmlinkage int
+sys_get_kernel_syms(struct kernel_sym *table)
+{
+ return -ENOSYS;
+}
+
+#endif /* CONFIG_MODULES */
diff --git a/arch/sparc64/kernel/sys_sunos32.c b/arch/sparc64/kernel/sys_sunos32.c
new file mode 100644
index 000000000..3dfdad5a7
--- /dev/null
+++ b/arch/sparc64/kernel/sys_sunos32.c
@@ -0,0 +1,1511 @@
+/* $Id: sys_sunos32.c,v 1.1 1997/07/18 06:26:43 ralf Exp $
+ * sys_sunos32.c: SunOS binary compatability layer on sparc64.
+ *
+ * Copyright (C) 1995, 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
+ *
+ * Based upon preliminary work which is:
+ *
+ * Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/fs.h>
+#include <linux/resource.h>
+#include <linux/ipc.h>
+#include <linux/shm.h>
+#include <linux/msg.h>
+#include <linux/sem.h>
+#include <linux/signal.h>
+#include <linux/uio.h>
+#include <linux/utsname.h>
+#include <linux/fs.h>
+#include <linux/major.h>
+#include <linux/stat.h>
+#include <linux/malloc.h>
+#include <linux/pagemap.h>
+#include <linux/errno.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+
+#include <asm/uaccess.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/pconf.h>
+#include <asm/idprom.h> /* for gethostid() */
+#include <asm/unistd.h>
+#include <asm/system.h>
+
+/* For the nfs mount emulation */
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/nfs.h>
+#include <linux/nfs_mount.h>
+
+/* for sunos_select */
+#include <linux/time.h>
+#include <linux/personality.h>
+
+#define A(x) ((unsigned long)x)
+
+#define SUNOS_NR_OPEN 256
+
+extern unsigned long get_unmapped_area(unsigned long addr, unsigned long len);
+
+asmlinkage u32 sunos_mmap(u32 addr, u32 len, u32 prot, u32 flags, u32 fd, u32 off)
+{
+ struct file *file = NULL;
+ unsigned long retval, ret_type;
+
+ lock_kernel();
+ current->personality |= PER_BSD;
+ if(flags & MAP_NORESERVE) {
+ printk("%s: unimplemented SunOS MAP_NORESERVE mmap() flag\n",
+ current->comm);
+ flags &= ~MAP_NORESERVE;
+ }
+ retval = -EBADF;
+ if(!(flags & MAP_ANONYMOUS))
+ if(fd >= SUNOS_NR_OPEN || !(file = current->files->fd[fd]))
+ goto out;
+ retval = -ENOMEM;
+ if(!(flags & MAP_FIXED) && !addr) {
+ unsigned long attempt = get_unmapped_area(addr, len);
+ if(!attempt || (attempt >= 0xf0000000UL))
+ goto out;
+ addr = (u32) attempt;
+ }
+ if(file->f_dentry && file->f_dentry->d_inode) {
+ if(MAJOR(file->f_dentry->d_inode->i_rdev) == MEM_MAJOR &&
+ MINOR(file->f_dentry->d_inode->i_rdev) == 5) {
+ flags |= MAP_ANONYMOUS;
+ file = 0;
+ }
+ }
+ if(!(flags & MAP_FIXED))
+ addr = 0;
+ ret_type = flags & _MAP_NEW;
+ flags &= ~_MAP_NEW;
+
+ retval = do_mmap(file,
+ (unsigned long) addr, (unsigned long) len,
+ (unsigned long) prot, (unsigned long) flags,
+ (unsigned long) off);
+ if(!ret_type)
+ retval = ((retval < 0xf0000000) ? 0 : retval);
+out:
+ unlock_kernel();
+ return (u32) retval;
+}
+
+asmlinkage int sunos_mctl(u32 addr, u32 len, int function, u32 arg)
+{
+ return 0;
+}
+
+asmlinkage int sunos_brk(u32 baddr)
+{
+ int freepages, retval = -ENOMEM;
+ unsigned long rlim;
+ unsigned long newbrk, oldbrk, brk = (unsigned long) baddr;
+
+ lock_kernel();
+ if (brk < current->mm->end_code)
+ goto out;
+ newbrk = PAGE_ALIGN(brk);
+ oldbrk = PAGE_ALIGN(current->mm->brk);
+ retval = 0;
+ if (oldbrk == newbrk) {
+ current->mm->brk = brk;
+ goto out;
+ }
+ /* Always allow shrinking brk. */
+ if (brk <= current->mm->brk) {
+ current->mm->brk = brk;
+ do_munmap(newbrk, oldbrk-newbrk);
+ goto out;
+ }
+ /* Check against rlimit and stack.. */
+ retval = -ENOMEM;
+ rlim = current->rlim[RLIMIT_DATA].rlim_cur;
+ if (rlim >= RLIM_INFINITY)
+ rlim = ~0;
+ if (brk - current->mm->end_code > rlim)
+ goto out;
+ /* Check against existing mmap mappings. */
+ if (find_vma_intersection(current->mm, oldbrk, newbrk+PAGE_SIZE))
+ goto out;
+ /* stupid algorithm to decide if we have enough memory: while
+ * simple, it hopefully works in most obvious cases.. Easy to
+ * fool it, but this should catch most mistakes.
+ */
+ freepages = buffermem >> PAGE_SHIFT;
+ freepages += page_cache_size;
+ freepages >>= 1;
+ freepages += nr_free_pages;
+ freepages += nr_swap_pages;
+ freepages -= num_physpages >> 4;
+ freepages -= (newbrk-oldbrk) >> PAGE_SHIFT;
+ if (freepages < 0)
+ goto out;
+ /* Ok, we have probably got enough memory - let it rip. */
+ current->mm->brk = brk;
+ do_mmap(NULL, oldbrk, newbrk-oldbrk,
+ PROT_READ|PROT_WRITE|PROT_EXEC,
+ MAP_FIXED|MAP_PRIVATE, 0);
+ retval = 0;
+out:
+ unlock_kernel();
+ return retval;
+}
+
+asmlinkage u32 sunos_sbrk(int increment)
+{
+ int error, oldbrk;
+
+ /* This should do it hopefully... */
+ lock_kernel();
+ oldbrk = (int)current->mm->brk;
+ error = sunos_brk(((int) current->mm->brk) + increment);
+ if(!error)
+ error = oldbrk;
+ unlock_kernel();
+ return error;
+}
+
+asmlinkage u32 sunos_sstk(int increment)
+{
+ lock_kernel();
+ printk("%s: Call to sunos_sstk(increment<%d>) is unsupported\n",
+ current->comm, increment);
+ unlock_kernel();
+ return (u32)-1;
+}
+
+/* Give hints to the kernel as to what paging strategy to use...
+ * Completely bogus, don't remind me.
+ */
+#define VA_NORMAL 0 /* Normal vm usage expected */
+#define VA_ABNORMAL 1 /* Abnormal/random vm usage probable */
+#define VA_SEQUENTIAL 2 /* Accesses will be of a sequential nature */
+#define VA_INVALIDATE 3 /* Page table entries should be flushed ??? */
+static char *vstrings[] = {
+ "VA_NORMAL",
+ "VA_ABNORMAL",
+ "VA_SEQUENTIAL",
+ "VA_INVALIDATE",
+};
+
+asmlinkage void sunos_vadvise(u32 strategy)
+{
+ /* I wanna see who uses this... */
+ lock_kernel();
+ printk("%s: Advises us to use %s paging strategy\n",
+ current->comm,
+ strategy <= 3 ? vstrings[strategy] : "BOGUS");
+ unlock_kernel();
+}
+
+/* Same as vadvise, and just as bogus, but for a range of virtual
+ * process address space.
+ */
+#define MADV_NORMAL 0 /* Nothing special... */
+#define MADV_RANDOM 1 /* I am emacs... */
+#define MADV_SEQUENTIAL 2 /* I am researcher code... */
+#define MADV_WILLNEED 3 /* Pages in this range will be needed */
+#define MADV_DONTNEED 4 /* Pages in this range won't be needed */
+
+static char *mstrings[] = {
+ "MADV_NORMAL",
+ "MADV_RANDOM",
+ "MADV_SEQUENTIAL",
+ "MADV_WILLNEED",
+ "MADV_DONTNEED",
+};
+
+asmlinkage void sunos_madvise(u32 address, u32 len, u32 strategy)
+{
+ /* I wanna see who uses this... */
+ lock_kernel();
+ printk("%s: Advises us to use %s paging strategy for addr<%08x> len<%08x>\n",
+ current->comm, strategy <= 4 ? mstrings[strategy] : "BOGUS",
+ address, len);
+ unlock_kernel();
+}
+
+/* Places into character array, the status of all the pages in the passed
+ * range from 'addr' to 'addr + len'. -1 on failure, 0 on success...
+ * The encoding in each character is:
+ * low-bit is zero == Page is not in physical ram right now
+ * low-bit is one == Page is currently residing in core
+ * All other bits are undefined within the character so there...
+ * Also, if you try to get stats on an area outside of the user vm area
+ * *or* the passed base address is not aligned on a page boundary you
+ * get an error.
+ */
+asmlinkage int sunos_mincore(u32 addr, u32 len, u32 u_array)
+{
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+ unsigned long limit;
+ int num_pages, pnum, retval = -EINVAL;
+ char *array = (char *)A(u_array);
+
+ lock_kernel();
+ if(addr & ~(4096))
+ goto out;
+ num_pages = (len / 4096);
+ retval = -EFAULT;
+ if(verify_area(VERIFY_WRITE, array, num_pages))
+ goto out;
+ retval = -ENOMEM;
+ if((addr >= 0xf0000000) || ((addr + len) > 0xf0000000))
+ goto out; /* I'm sure you're curious about kernel mappings.. */
+ /* Wheee, go through pte's */
+ pnum = 0;
+ for(limit = addr + len; addr < limit; addr += 4096, pnum++) {
+ pgdp = pgd_offset(current->mm, addr);
+ if(pgd_none(*pgdp))
+ goto out; /* As per SunOS manpage */
+ pmdp = pmd_offset(pgdp, addr);
+ if(pmd_none(*pmdp))
+ goto out; /* As per SunOS manpage */
+ ptep = pte_offset(pmdp, addr);
+ if(pte_none(*ptep))
+ goto out; /* As per SunOS manpage */
+ /* Page in core or Swapped page? */
+ __put_user((pte_present(*ptep) ? 1 : 0), &array[pnum]);
+ }
+ retval = 0; /* Success... I think... */
+out:
+ unlock_kernel();
+ return retval;
+}
+
+/* This just wants the soft limit (ie. rlim_cur element) of the RLIMIT_NOFILE
+ * resource limit and is for backwards compatibility with older sunos
+ * revs.
+ */
+asmlinkage int sunos_getdtablesize(void)
+{
+ return SUNOS_NR_OPEN;
+}
+
+#define _S(nr) (1<<((nr)-1))
+
+#define _BLOCKABLE (~(_S(SIGKILL) | _S(SIGSTOP)))
+
+asmlinkage u32 sunos_sigblock(u32 blk_mask)
+{
+ unsigned long flags;
+ u32 old;
+
+ lock_kernel();
+ save_and_cli(flags);
+ old = (u32) current->blocked;
+ current->blocked |= (blk_mask & _BLOCKABLE);
+ restore_flags(flags);
+ unlock_kernel();
+ return old;
+}
+
+asmlinkage u32 sunos_sigsetmask(u32 newmask)
+{
+ unsigned long flags;
+ u32 retval;
+
+ lock_kernel();
+ save_and_cli(flags);
+ retval = (u32) current->blocked;
+ current->blocked = (newmask & _BLOCKABLE);
+ restore_flags(flags);
+ unlock_kernel();
+ return retval;
+}
+
+/* SunOS getdents is very similar to the newer Linux (iBCS2 compliant) */
+/* getdents system call, the format of the structure just has a different */
+/* layout (d_off+d_ino instead of d_ino+d_off) */
+struct sunos_dirent {
+ s32 d_off;
+ u32 d_ino;
+ u16 d_reclen;
+ u16 d_namlen;
+ char d_name[1];
+};
+
+struct sunos_dirent_callback {
+ struct sunos_dirent *curr;
+ struct sunos_dirent *previous;
+ int count;
+ int error;
+};
+
+#define NAME_OFFSET(de) ((int) ((de)->d_name - (char *) (de)))
+#define ROUND_UP(x) (((x)+sizeof(s32)-1) & ~(sizeof(s32)-1))
+
+static int sunos_filldir(void * __buf, const char * name, int namlen,
+ off_t offset, ino_t ino)
+{
+ struct sunos_dirent * dirent;
+ struct sunos_dirent_callback * buf = (struct sunos_dirent_callback *) __buf;
+ int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 1);
+
+ buf->error = -EINVAL; /* only used if we fail.. */
+ if (reclen > buf->count)
+ return -EINVAL;
+ dirent = buf->previous;
+ if (dirent)
+ put_user(offset, &dirent->d_off);
+ dirent = buf->curr;
+ buf->previous = dirent;
+ put_user(ino, &dirent->d_ino);
+ put_user(namlen, &dirent->d_namlen);
+ put_user(reclen, &dirent->d_reclen);
+ copy_to_user(dirent->d_name, name, namlen);
+ put_user(0, dirent->d_name + namlen);
+ ((char *) dirent) += reclen;
+ buf->curr = dirent;
+ buf->count -= reclen;
+ return 0;
+}
+
+asmlinkage int sunos_getdents(unsigned int fd, u32 u_dirent, int cnt)
+{
+ struct file * file;
+ struct dentry * dentry;
+ struct inode * inode;
+ struct sunos_dirent * lastdirent;
+ struct sunos_dirent_callback buf;
+ int error = -EBADF;
+ void *dirent = (void *)A(u_dirent);
+
+ lock_kernel();
+ if(fd >= SUNOS_NR_OPEN)
+ goto out;
+
+ file = current->files->fd[fd];
+ if(!file)
+ goto out;
+
+ dentry = file->f_dentry;
+ if(!dentry)
+ goto out;
+
+ inode = dentry->d_inode;
+ if(!inode)
+ goto out;
+
+ error = -ENOTDIR;
+ if (!file->f_op || !file->f_op->readdir)
+ goto out;
+
+ error = -EINVAL;
+ if(cnt < (sizeof(struct sunos_dirent) + 255))
+ goto out;
+
+ buf.curr = (struct sunos_dirent *) dirent;
+ buf.previous = NULL;
+ buf.count = cnt;
+ buf.error = 0;
+
+ error = file->f_op->readdir(inode, file, &buf, sunos_filldir);
+ if (error < 0)
+ goto out;
+ lastdirent = buf.previous;
+ error = buf.error;
+ if (lastdirent) {
+ put_user(file->f_pos, &lastdirent->d_off);
+ error = cnt - buf.count;
+ }
+out:
+ unlock_kernel();
+ return error;
+}
+
+/* Old sunos getdirentries, severely broken compatibility stuff here. */
+struct sunos_direntry {
+ u32 d_ino;
+ u16 d_reclen;
+ u16 d_namlen;
+ char d_name[1];
+};
+
+struct sunos_direntry_callback {
+ struct sunos_direntry *curr;
+ struct sunos_direntry *previous;
+ int count;
+ int error;
+};
+
+static int sunos_filldirentry(void * __buf, const char * name, int namlen,
+ off_t offset, ino_t ino)
+{
+ struct sunos_direntry * dirent;
+ struct sunos_direntry_callback * buf = (struct sunos_direntry_callback *) __buf;
+ int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 1);
+
+ buf->error = -EINVAL; /* only used if we fail.. */
+ if (reclen > buf->count)
+ return -EINVAL;
+ dirent = buf->previous;
+ dirent = buf->curr;
+ buf->previous = dirent;
+ put_user(ino, &dirent->d_ino);
+ put_user(namlen, &dirent->d_namlen);
+ put_user(reclen, &dirent->d_reclen);
+ copy_to_user(dirent->d_name, name, namlen);
+ put_user(0, dirent->d_name + namlen);
+ ((char *) dirent) += reclen;
+ buf->curr = dirent;
+ buf->count -= reclen;
+ return 0;
+}
+
+asmlinkage int sunos_getdirentries(unsigned int fd, u32 u_dirent,
+ int cnt, u32 u_basep)
+{
+ struct file * file;
+ struct dentry * dentry;
+ struct inode * inode;
+ struct sunos_direntry * lastdirent;
+ struct sunos_direntry_callback buf;
+ int error = -EBADF;
+ void *dirent = (void *) A(u_dirent);
+ unsigned int *basep = (unsigned int *)A(u_basep);
+
+ lock_kernel();
+ if(fd >= SUNOS_NR_OPEN)
+ goto out;
+
+ file = current->files->fd[fd];
+ if(!file)
+ goto out;
+
+ dentry = file->f_dentry;
+ if(!dentry)
+ goto out;
+
+ inode = dentry->d_inode;
+ if(!inode)
+ goto out;
+
+ error = -ENOTDIR;
+ if (!file->f_op || !file->f_op->readdir)
+ goto out;
+
+ error = -EINVAL;
+ if(cnt < (sizeof(struct sunos_direntry) + 255))
+ goto out;
+
+ buf.curr = (struct sunos_direntry *) dirent;
+ buf.previous = NULL;
+ buf.count = cnt;
+ buf.error = 0;
+
+ error = file->f_op->readdir(inode, file, &buf, sunos_filldirentry);
+ if (error < 0)
+ goto out;
+ lastdirent = buf.previous;
+ error = buf.error;
+ if (lastdirent) {
+ put_user(file->f_pos, basep);
+ error = cnt - buf.count;
+ }
+out:
+ unlock_kernel();
+ return error;
+}
+
+asmlinkage int sunos_getdomainname(u32 u_name, int len)
+{
+ int nlen = strlen(system_utsname.domainname);
+ int ret = -EFAULT;
+ char *name = (char *)A(u_name);
+
+ lock_kernel();
+ if (nlen < len)
+ len = nlen;
+
+ if(len > __NEW_UTS_LEN)
+ goto out;
+ if(copy_to_user(name, system_utsname.domainname, len))
+ goto out;
+ ret = 0;
+out:
+ unlock_kernel();
+ return ret;
+}
+
+struct sunos_utsname {
+ char sname[9];
+ char nname[9];
+ char nnext[56];
+ char rel[9];
+ char ver[9];
+ char mach[9];
+};
+
+asmlinkage int sunos_uname(u32 u_name)
+{
+ struct sunos_utsname *name = (struct sunos_utsname *)A(u_name);
+ int ret = -EFAULT;
+
+ lock_kernel();
+ if(!name)
+ goto out;
+ if(copy_to_user(&name->sname[0],
+ &system_utsname.sysname[0],
+ sizeof(name->sname) - 1))
+ goto out;
+ copy_to_user(&name->nname[0],
+ &system_utsname.nodename[0],
+ sizeof(name->nname) - 1);
+ put_user('\0', &name->nname[8]);
+ copy_to_user(&name->rel[0], &system_utsname.release[0], sizeof(name->rel) - 1);
+ copy_to_user(&name->ver[0], &system_utsname.version[0], sizeof(name->ver) - 1);
+ copy_to_user(&name->mach[0], &system_utsname.machine[0], sizeof(name->mach) - 1);
+ ret = 0;
+out:
+ unlock_kernel();
+ return ret;
+}
+
+asmlinkage int sunos_nosys(void)
+{
+ struct pt_regs *regs;
+
+ lock_kernel();
+ regs = current->tss.kregs;
+ current->tss.sig_address = regs->tpc;
+ current->tss.sig_desc = regs->u_regs[UREG_G1];
+ send_sig(SIGSYS, current, 1);
+ printk("Process makes ni_syscall number %d, register dump:\n",
+ (int) regs->u_regs[UREG_G1]);
+ show_regs(regs);
+ unlock_kernel();
+ return -ENOSYS;
+}
+
+/* This is not a real and complete implementation yet, just to keep
+ * the easy SunOS binaries happy.
+ */
+asmlinkage int sunos_fpathconf(int fd, int name)
+{
+ int ret;
+
+ lock_kernel();
+ switch(name) {
+ case _PCONF_LINK:
+ ret = LINK_MAX;
+ break;
+ case _PCONF_CANON:
+ ret = MAX_CANON;
+ break;
+ case _PCONF_INPUT:
+ ret = MAX_INPUT;
+ break;
+ case _PCONF_NAME:
+ ret = NAME_MAX;
+ break;
+ case _PCONF_PATH:
+ ret = PATH_MAX;
+ break;
+ case _PCONF_PIPE:
+ ret = PIPE_BUF;
+ break;
+ case _PCONF_CHRESTRICT: /* XXX Investigate XXX */
+ ret = 1;
+ break;
+ case _PCONF_NOTRUNC: /* XXX Investigate XXX */
+ case _PCONF_VDISABLE:
+ ret = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ unlock_kernel();
+ return ret;
+}
+
+asmlinkage int sunos_pathconf(u32 u_path, int name)
+{
+ int ret;
+
+ lock_kernel();
+ ret = sunos_fpathconf(0, name); /* XXX cheese XXX */
+ unlock_kernel();
+ return ret;
+}
+
+/* SunOS mount system call emulation */
+extern asmlinkage int
+sys32_select(int n, u32 inp, u32 outp, u32 exp, u32 tvp);
+
+asmlinkage int sunos_select(int width, u32 inp, u32 outp, u32 exp, u32 tvp)
+{
+ int ret;
+
+ /* SunOS binaries expect that select won't change the tvp contents */
+ lock_kernel();
+ current->personality |= STICKY_TIMEOUTS;
+ ret = sys32_select (width, inp, outp, exp, tvp);
+ unlock_kernel();
+ return ret;
+}
+
+asmlinkage void sunos_nop(void)
+{
+ return;
+}
+
+/* XXXXXXXXXX SunOS mount/umount. XXXXXXXXXXX */
+#define SMNT_RDONLY 1
+#define SMNT_NOSUID 2
+#define SMNT_NEWTYPE 4
+#define SMNT_GRPID 8
+#define SMNT_REMOUNT 16
+#define SMNT_NOSUB 32
+#define SMNT_MULTI 64
+#define SMNT_SYS5 128
+
+struct sunos_fh_t {
+ char fh_data [NFS_FHSIZE];
+};
+
+struct sunos_nfs_mount_args {
+ struct sockaddr_in *addr; /* file server address */
+ struct nfs_fh *fh; /* File handle to be mounted */
+ int flags; /* flags */
+ int wsize; /* write size in bytes */
+ int rsize; /* read size in bytes */
+ int timeo; /* initial timeout in .1 secs */
+ int retrans; /* times to retry send */
+ char *hostname; /* server's hostname */
+ int acregmin; /* attr cache file min secs */
+ int acregmax; /* attr cache file max secs */
+ int acdirmin; /* attr cache dir min secs */
+ int acdirmax; /* attr cache dir max secs */
+ char *netname; /* server's netname */
+};
+
+extern int do_mount(kdev_t, const char *, const char *, char *, int, void *);
+extern dev_t get_unnamed_dev(void);
+extern void put_unnamed_dev(dev_t);
+extern asmlinkage int sys_mount(char *, char *, char *, unsigned long, void *);
+extern asmlinkage int sys_connect(int fd, struct sockaddr *uservaddr, int addrlen);
+extern asmlinkage int sys_socket(int family, int type, int protocol);
+extern asmlinkage int sys_bind(int fd, struct sockaddr *umyaddr, int addrlen);
+
+
+/* Bind the socket on a local reserved port and connect it to the
+ * remote server. This on Linux/i386 is done by the mount program,
+ * not by the kernel.
+ */
+/* XXXXXXXXXXXXXXXXXXXX */
+static int
+sunos_nfs_get_server_fd (int fd, struct sockaddr_in *addr)
+{
+ struct sockaddr_in local;
+ struct sockaddr_in server;
+ int try_port;
+ int ret;
+ struct socket *socket;
+ struct dentry *dentry;
+ struct inode *inode;
+ struct file *file;
+
+ file = current->files->fd [fd];
+ if(!file)
+ return 0;
+
+ dentry = file->f_dentry;
+ if(!dentry)
+ return 0;
+
+ inode = dentry->d_inode;
+ if(!inode)
+ return 0;
+
+ socket = &inode->u.socket_i;
+ local.sin_family = AF_INET;
+ local.sin_addr.s_addr = INADDR_ANY;
+
+ /* IPPORT_RESERVED = 1024, can't find the definition in the kernel */
+ try_port = 1024;
+ do {
+ local.sin_port = htons (--try_port);
+ ret = socket->ops->bind(socket, (struct sockaddr*)&local,
+ sizeof(local));
+ } while (ret && try_port > (1024 / 2));
+
+ if (ret)
+ return 0;
+
+ server.sin_family = AF_INET;
+ server.sin_addr = addr->sin_addr;
+ server.sin_port = NFS_PORT;
+
+ /* Call sys_connect */
+ ret = socket->ops->connect (socket, (struct sockaddr *) &server,
+ sizeof (server), file->f_flags);
+ if (ret < 0)
+ return 0;
+ return 1;
+}
+
+/* XXXXXXXXXXXXXXXXXXXX */
+static int get_default (int value, int def_value)
+{
+ if (value)
+ return value;
+ else
+ return def_value;
+}
+
+/* XXXXXXXXXXXXXXXXXXXX */
+asmlinkage int sunos_nfs_mount(char *dir_name, int linux_flags, void *data)
+{
+ int ret = -ENODEV;
+ int server_fd;
+ char *the_name;
+ struct nfs_mount_data linux_nfs_mount;
+ struct sunos_nfs_mount_args *sunos_mount = data;
+ dev_t dev;
+
+ /* Ok, here comes the fun part: Linux's nfs mount needs a
+ * socket connection to the server, but SunOS mount does not
+ * require this, so we use the information on the destination
+ * address to create a socket and bind it to a reserved
+ * port on this system
+ */
+ server_fd = sys_socket (AF_INET, SOCK_DGRAM, IPPROTO_UDP);
+ if (server_fd < 0)
+ return -ENXIO;
+
+ if (!sunos_nfs_get_server_fd (server_fd, sunos_mount->addr)){
+ sys_close (server_fd);
+ return -ENXIO;
+ }
+
+ /* Now, bind it to a locally reserved port */
+ linux_nfs_mount.version = NFS_MOUNT_VERSION;
+ linux_nfs_mount.flags = sunos_mount->flags;
+ linux_nfs_mount.addr = *sunos_mount->addr;
+ linux_nfs_mount.root = *sunos_mount->fh;
+ linux_nfs_mount.fd = server_fd;
+
+ linux_nfs_mount.rsize = get_default (sunos_mount->rsize, 8192);
+ linux_nfs_mount.wsize = get_default (sunos_mount->wsize, 8192);
+ linux_nfs_mount.timeo = get_default (sunos_mount->timeo, 10);
+ linux_nfs_mount.retrans = sunos_mount->retrans;
+
+ linux_nfs_mount.acregmin = sunos_mount->acregmin;
+ linux_nfs_mount.acregmax = sunos_mount->acregmax;
+ linux_nfs_mount.acdirmin = sunos_mount->acdirmin;
+ linux_nfs_mount.acdirmax = sunos_mount->acdirmax;
+
+ the_name = getname(sunos_mount->hostname);
+ if(IS_ERR(the_name))
+ return -EFAULT;
+
+ strncpy (linux_nfs_mount.hostname, the_name, 254);
+ linux_nfs_mount.hostname [255] = 0;
+ putname (the_name);
+
+ dev = get_unnamed_dev ();
+
+ ret = do_mount (dev, "", dir_name, "nfs", linux_flags, &linux_nfs_mount);
+ if (ret)
+ put_unnamed_dev(dev);
+
+ return ret;
+}
+
+/* XXXXXXXXXXXXXXXXXXXX */
+asmlinkage int
+sunos_mount(char *type, char *dir, int flags, void *data)
+{
+ int linux_flags = MS_MGC_MSK; /* new semantics */
+ int ret = -EINVAL;
+ char *dev_fname = 0;
+
+ lock_kernel();
+ /* We don't handle the integer fs type */
+ if ((flags & SMNT_NEWTYPE) == 0)
+ goto out;
+
+ /* Do not allow for those flags we don't support */
+ if (flags & (SMNT_GRPID|SMNT_NOSUB|SMNT_MULTI|SMNT_SYS5))
+ goto out;
+
+ if(flags & SMNT_REMOUNT)
+ linux_flags |= MS_REMOUNT;
+ if(flags & SMNT_RDONLY)
+ linux_flags |= MS_RDONLY;
+ if(flags & SMNT_NOSUID)
+ linux_flags |= MS_NOSUID;
+ if(strcmp(type, "ext2") == 0) {
+ dev_fname = (char *) data;
+ } else if(strcmp(type, "iso9660") == 0) {
+ dev_fname = (char *) data;
+ } else if(strcmp(type, "minix") == 0) {
+ dev_fname = (char *) data;
+ } else if(strcmp(type, "nfs") == 0) {
+ ret = sunos_nfs_mount (dir, flags, data);
+ goto out;
+ } else if(strcmp(type, "ufs") == 0) {
+ printk("Warning: UFS filesystem mounts unsupported.\n");
+ ret = -ENODEV;
+ goto out;
+ } else if(strcmp(type, "proc")) {
+ ret = -ENODEV;
+ goto out;
+ }
+ ret = sys_mount(dev_fname, dir, type, linux_flags, NULL);
+out:
+ unlock_kernel();
+ return ret;
+}
+
+extern asmlinkage int sys_setsid(void);
+extern asmlinkage int sys_setpgid(pid_t, pid_t);
+
+asmlinkage int sunos_setpgrp(pid_t pid, pid_t pgid)
+{
+ int ret;
+
+ /* So stupid... */
+ lock_kernel();
+ if((!pid || pid == current->pid) &&
+ !pgid) {
+ sys_setsid();
+ ret = 0;
+ } else {
+ ret = sys_setpgid(pid, pgid);
+ }
+ unlock_kernel();
+ return ret;
+}
+
+/* So stupid... */
+extern asmlinkage int sys32_wait4(__kernel_pid_t32 pid,
+ u32 stat_addr, int options, u32 ru);
+
+asmlinkage int sunos_wait4(__kernel_pid_t32 pid, u32 stat_addr, int options, u32 ru)
+{
+ int ret;
+
+ lock_kernel();
+ ret = sys32_wait4((pid ? pid : ((__kernel_pid_t32)-1)),
+ stat_addr, options, ru);
+ unlock_kernel();
+ return ret;
+}
+
+extern int kill_pg(int, int, int);
+asmlinkage int sunos_killpg(int pgrp, int sig)
+{
+ int ret;
+
+ lock_kernel();
+ ret = kill_pg(pgrp, sig, 0);
+ unlock_kernel();
+ return ret;
+}
+
+asmlinkage int sunos_audit(void)
+{
+ lock_kernel();
+ printk ("sys_audit\n");
+ unlock_kernel();
+ return -1;
+}
+
+extern asmlinkage u32 sunos_gethostid(void)
+{
+ u32 ret;
+
+ lock_kernel();
+ ret = (((u32)idprom->id_machtype << 24) | ((u32)idprom->id_sernum));
+ unlock_kernel();
+ return ret;
+}
+
+/* sysconf options, for SunOS compatibility */
+#define _SC_ARG_MAX 1
+#define _SC_CHILD_MAX 2
+#define _SC_CLK_TCK 3
+#define _SC_NGROUPS_MAX 4
+#define _SC_OPEN_MAX 5
+#define _SC_JOB_CONTROL 6
+#define _SC_SAVED_IDS 7
+#define _SC_VERSION 8
+
+extern asmlinkage s32 sunos_sysconf (int name)
+{
+ s32 ret;
+
+ lock_kernel();
+ switch (name){
+ case _SC_ARG_MAX:
+ ret = ARG_MAX;
+ break;
+ case _SC_CHILD_MAX:
+ ret = CHILD_MAX;
+ break;
+ case _SC_CLK_TCK:
+ ret = HZ;
+ break;
+ case _SC_NGROUPS_MAX:
+ ret = NGROUPS_MAX;
+ break;
+ case _SC_OPEN_MAX:
+ ret = OPEN_MAX;
+ break;
+ case _SC_JOB_CONTROL:
+ ret = 1; /* yes, we do support job control */
+ break;
+ case _SC_SAVED_IDS:
+ ret = 1; /* yes, we do support saved uids */
+ break;
+ case _SC_VERSION:
+ /* mhm, POSIX_VERSION is in /usr/include/unistd.h
+ * should it go on /usr/include/linux?
+ */
+ ret = 199009;
+ break;
+ default:
+ ret = -1;
+ break;
+ };
+ unlock_kernel();
+ return ret;
+}
+
+extern asmlinkage int sys_semctl (int semid, int semnum, int cmd, union semun arg);
+extern asmlinkage int sys_semget (key_t key, int nsems, int semflg);
+extern asmlinkage int sys_semop (int semid, struct sembuf *tsops, unsigned nsops);
+
+asmlinkage int sunos_semsys(int op, u32 arg1, u32 arg2, u32 arg3, u32 ptr)
+{
+ union semun arg4;
+ int ret;
+
+ lock_kernel();
+ switch (op) {
+ case 0:
+ /* Most arguments match on a 1:1 basis but cmd doesn't */
+ switch(arg3) {
+ case 4:
+ arg3=GETPID; break;
+ case 5:
+ arg3=GETVAL; break;
+ case 6:
+ arg3=GETALL; break;
+ case 3:
+ arg3=GETNCNT; break;
+ case 7:
+ arg3=GETZCNT; break;
+ case 8:
+ arg3=SETVAL; break;
+ case 9:
+ arg3=SETALL; break;
+ }
+ /* sys_semctl(): */
+ arg4.__pad=(void *)A(ptr); /* value to modify semaphore to */
+ ret = sys_semctl((int)arg1, (int)arg2, (int)arg3, arg4);
+ break;
+ case 1:
+ /* sys_semget(): */
+ ret = sys_semget((key_t)arg1, (int)arg2, (int)arg3);
+ break;
+ case 2:
+ /* sys_semop(): */
+ ret = sys_semop((int)arg1, (struct sembuf *)A(arg2), (unsigned)arg3);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ };
+ unlock_kernel();
+ return ret;
+}
+
+struct msgbuf32 {
+ s32 mtype;
+ char mtext[1];
+};
+
+struct ipc_perm32
+{
+ key_t key;
+ __kernel_uid_t32 uid;
+ __kernel_gid_t32 gid;
+ __kernel_uid_t32 cuid;
+ __kernel_gid_t32 cgid;
+ __kernel_mode_t32 mode;
+ unsigned short seq;
+};
+
+struct msqid_ds32
+{
+ struct ipc_perm32 msg_perm;
+ u32 msg_first;
+ u32 msg_last;
+ __kernel_time_t32 msg_stime;
+ __kernel_time_t32 msg_rtime;
+ __kernel_time_t32 msg_ctime;
+ u32 wwait;
+ u32 rwait;
+ unsigned short msg_cbytes;
+ unsigned short msg_qnum;
+ unsigned short msg_qbytes;
+ __kernel_ipc_pid_t32 msg_lspid;
+ __kernel_ipc_pid_t32 msg_lrpid;
+};
+
+static inline int sunos_msqid_get(struct msqid_ds32 *user,
+ struct msqid_ds *kern)
+{
+ if(get_user(kern->msg_perm.key, &user->msg_perm.key) ||
+ __get_user(kern->msg_perm.uid, &user->msg_perm.uid) ||
+ __get_user(kern->msg_perm.gid, &user->msg_perm.gid) ||
+ __get_user(kern->msg_perm.cuid, &user->msg_perm.cuid) ||
+ __get_user(kern->msg_perm.cgid, &user->msg_perm.cgid) ||
+ __get_user(kern->msg_stime, &user->msg_stime) ||
+ __get_user(kern->msg_rtime, &user->msg_rtime) ||
+ __get_user(kern->msg_ctime, &user->msg_ctime) ||
+ __get_user(kern->msg_ctime, &user->msg_cbytes) ||
+ __get_user(kern->msg_ctime, &user->msg_qnum) ||
+ __get_user(kern->msg_ctime, &user->msg_qbytes) ||
+ __get_user(kern->msg_ctime, &user->msg_lspid) ||
+ __get_user(kern->msg_ctime, &user->msg_lrpid))
+ return -EFAULT;
+ return 0;
+}
+
+static inline int sunos_msqid_put(struct msqid_ds32 *user,
+ struct msqid_ds *kern)
+{
+ if(put_user(kern->msg_perm.key, &user->msg_perm.key) ||
+ __put_user(kern->msg_perm.uid, &user->msg_perm.uid) ||
+ __put_user(kern->msg_perm.gid, &user->msg_perm.gid) ||
+ __put_user(kern->msg_perm.cuid, &user->msg_perm.cuid) ||
+ __put_user(kern->msg_perm.cgid, &user->msg_perm.cgid) ||
+ __put_user(kern->msg_stime, &user->msg_stime) ||
+ __put_user(kern->msg_rtime, &user->msg_rtime) ||
+ __put_user(kern->msg_ctime, &user->msg_ctime) ||
+ __put_user(kern->msg_ctime, &user->msg_cbytes) ||
+ __put_user(kern->msg_ctime, &user->msg_qnum) ||
+ __put_user(kern->msg_ctime, &user->msg_qbytes) ||
+ __put_user(kern->msg_ctime, &user->msg_lspid) ||
+ __put_user(kern->msg_ctime, &user->msg_lrpid))
+ return -EFAULT;
+ return 0;
+}
+
+static inline int sunos_msgbuf_get(struct msgbuf32 *user, struct msgbuf *kern, int len)
+{
+ if(get_user(kern->mtype, &user->mtype) ||
+ __copy_from_user(kern->mtext, &user->mtext, len))
+ return -EFAULT;
+ return 0;
+}
+
+static inline int sunos_msgbuf_put(struct msgbuf32 *user, struct msgbuf *kern, int len)
+{
+ if(put_user(kern->mtype, &user->mtype) ||
+ __copy_to_user(user->mtext, kern->mtext, len))
+ return -EFAULT;
+ return 0;
+}
+
+extern asmlinkage int sys_msgget (key_t key, int msgflg);
+extern asmlinkage int sys_msgrcv (int msqid, struct msgbuf *msgp,
+ size_t msgsz, long msgtyp, int msgflg);
+extern asmlinkage int sys_msgsnd (int msqid, struct msgbuf *msgp,
+ size_t msgsz, int msgflg);
+extern asmlinkage int sys_msgctl (int msqid, int cmd, struct msqid_ds *buf);
+
+asmlinkage int sunos_msgsys(int op, u32 arg1, u32 arg2, u32 arg3, u32 arg4)
+{
+ struct sparc_stackf32 *sp;
+ struct msqid_ds kds;
+ struct msgbuf *kmbuf;
+ unsigned long old_fs = get_fs();
+ u32 arg5;
+ int rval;
+
+ lock_kernel();
+ switch(op) {
+ case 0:
+ rval = sys_msgget((key_t)arg1, (int)arg2);
+ break;
+ case 1:
+ if(!sunos_msqid_get((struct msqid_ds32 *)A(arg3), &kds)) {
+ set_fs(KERNEL_DS);
+ rval = sys_msgctl((int)arg1, (int)arg2,
+ (struct msqid_ds *)A(arg3));
+ set_fs(old_fs);
+ if(!rval)
+ rval = sunos_msqid_put((struct msqid_ds32 *)A(arg3),
+ &kds);
+ } else
+ rval = -EFAULT;
+ break;
+ case 2:
+ rval = -EFAULT;
+ kmbuf = (struct msgbuf *)kmalloc(sizeof(struct msgbuf) + arg3,
+ GFP_KERNEL);
+ if(!kmbuf)
+ break;
+ sp = (struct sparc_stackf32 *)
+ (current->tss.kregs->u_regs[UREG_FP] & 0xffffffffUL);
+ if(get_user(arg5, &sp->xxargs[0])) {
+ rval = -EFAULT;
+ break;
+ }
+ set_fs(KERNEL_DS);
+ rval = sys_msgrcv((int)arg1, kmbuf, (size_t)arg3,
+ (long)arg4, (int)arg5);
+ set_fs(old_fs);
+ if(!rval)
+ rval = sunos_msgbuf_put((struct msgbuf32 *)A(arg2),
+ kmbuf, arg3);
+ kfree(kmbuf);
+ break;
+ case 3:
+ rval = -EFAULT;
+ kmbuf = (struct msgbuf *)kmalloc(sizeof(struct msgbuf) + arg3,
+ GFP_KERNEL);
+ if(!kmbuf || sunos_msgbuf_get((struct msgbuf32 *)A(arg2),
+ kmbuf, arg3))
+ break;
+ set_fs(KERNEL_DS);
+ rval = sys_msgsnd((int)arg1, kmbuf, (size_t)arg3, (int)arg4);
+ set_fs(old_fs);
+ kfree(kmbuf);
+ break;
+ default:
+ rval = -EINVAL;
+ break;
+ }
+ unlock_kernel();
+ return rval;
+}
+
+struct shmid_ds32 {
+ struct ipc_perm32 shm_perm;
+ int shm_segsz;
+ __kernel_time_t32 shm_atime;
+ __kernel_time_t32 shm_dtime;
+ __kernel_time_t32 shm_ctime;
+ __kernel_ipc_pid_t32 shm_cpid;
+ __kernel_ipc_pid_t32 shm_lpid;
+ unsigned short shm_nattch;
+ unsigned short shm_npages;
+ u32 shm_pages;
+ u32 attaches;
+};
+
+static inline int sunos_shmid_get(struct shmid_ds32 *user,
+ struct shmid_ds *kern)
+{
+ if(get_user(kern->shm_perm.key, &user->shm_perm.key) ||
+ __get_user(kern->shm_perm.uid, &user->shm_perm.uid) ||
+ __get_user(kern->shm_perm.gid, &user->shm_perm.gid) ||
+ __get_user(kern->shm_perm.cuid, &user->shm_perm.cuid) ||
+ __get_user(kern->shm_perm.cgid, &user->shm_perm.cgid) ||
+ __get_user(kern->shm_segsz, &user->shm_segsz) ||
+ __get_user(kern->shm_atime, &user->shm_atime) ||
+ __get_user(kern->shm_dtime, &user->shm_dtime) ||
+ __get_user(kern->shm_ctime, &user->shm_ctime) ||
+ __get_user(kern->shm_cpid, &user->shm_cpid) ||
+ __get_user(kern->shm_lpid, &user->shm_lpid) ||
+ __get_user(kern->shm_nattch, &user->shm_nattch) ||
+ __get_user(kern->shm_npages, &user->shm_npages))
+ return -EFAULT;
+ return 0;
+}
+
+static inline int sunos_shmid_put(struct shmid_ds32 *user,
+ struct shmid_ds *kern)
+{
+ if(put_user(kern->shm_perm.key, &user->shm_perm.key) ||
+ __put_user(kern->shm_perm.uid, &user->shm_perm.uid) ||
+ __put_user(kern->shm_perm.gid, &user->shm_perm.gid) ||
+ __put_user(kern->shm_perm.cuid, &user->shm_perm.cuid) ||
+ __put_user(kern->shm_perm.cgid, &user->shm_perm.cgid) ||
+ __put_user(kern->shm_segsz, &user->shm_segsz) ||
+ __put_user(kern->shm_atime, &user->shm_atime) ||
+ __put_user(kern->shm_dtime, &user->shm_dtime) ||
+ __put_user(kern->shm_ctime, &user->shm_ctime) ||
+ __put_user(kern->shm_cpid, &user->shm_cpid) ||
+ __put_user(kern->shm_lpid, &user->shm_lpid) ||
+ __put_user(kern->shm_nattch, &user->shm_nattch) ||
+ __put_user(kern->shm_npages, &user->shm_npages))
+ return -EFAULT;
+ return 0;
+}
+
+extern asmlinkage int sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr);
+extern asmlinkage int sys_shmctl (int shmid, int cmd, struct shmid_ds *buf);
+extern asmlinkage int sys_shmdt (char *shmaddr);
+extern asmlinkage int sys_shmget (key_t key, int size, int shmflg);
+
+asmlinkage int sunos_shmsys(int op, u32 arg1, u32 arg2, u32 arg3)
+{
+ struct shmid_ds ksds;
+ unsigned long raddr, old_fs = get_fs();
+ int rval;
+
+ lock_kernel();
+ switch(op) {
+ case 0:
+ /* sys_shmat(): attach a shared memory area */
+ rval = sys_shmat((int)arg1,(char *)A(arg2),(int)arg3,&raddr);
+ if(!rval)
+ rval = (int) raddr;
+ break;
+ case 1:
+ /* sys_shmctl(): modify shared memory area attr. */
+ if(!sunos_shmid_get((struct shmid_ds32 *)A(arg3), &ksds)) {
+ set_fs(KERNEL_DS);
+ rval = sys_shmctl((int)arg1,(int)arg2, &ksds);
+ set_fs(old_fs);
+ if(!rval)
+ rval = sunos_shmid_put((struct shmid_ds32 *)A(arg3),
+ &ksds);
+ } else
+ rval = -EFAULT;
+ break;
+ case 2:
+ /* sys_shmdt(): detach a shared memory area */
+ rval = sys_shmdt((char *)A(arg1));
+ break;
+ case 3:
+ /* sys_shmget(): get a shared memory area */
+ rval = sys_shmget((key_t)arg1,(int)arg2,(int)arg3);
+ break;
+ default:
+ rval = -EINVAL;
+ break;
+ };
+ unlock_kernel();
+ return rval;
+}
+
+asmlinkage int sunos_open(u32 filename, int flags, int mode)
+{
+ int ret;
+
+ lock_kernel();
+ current->personality |= PER_BSD;
+ ret = sys_open ((char *)A(filename), flags, mode);
+ unlock_kernel();
+ return ret;
+}
+
+#define SUNOS_EWOULDBLOCK 35
+
+/* see the sunos man page read(2v) for an explanation
+ of this garbage. We use O_NDELAY to mark
+ file descriptors that have been set non-blocking
+ using 4.2BSD style calls. (tridge) */
+
+static inline int check_nonblock(int ret, int fd)
+{
+ if (ret == -EAGAIN && (current->files->fd[fd]->f_flags & O_NDELAY))
+ return -SUNOS_EWOULDBLOCK;
+ return ret;
+}
+
+extern asmlinkage int sys32_read(unsigned int fd, u32 buf, int count);
+extern asmlinkage int sys32_write(unsigned int fd, u32 buf,int count);
+extern asmlinkage int sys32_recv(int fd, u32 ubuf, int size, unsigned flags);
+extern asmlinkage int sys32_send(int fd, u32 buff, int len, unsigned flags);
+extern asmlinkage int sys32_accept(int fd, u32 sa, u32 addrlen);
+extern asmlinkage int sys32_readv(u32 fd, u32 vector, s32 count);
+extern asmlinkage int sys32_writev(u32 fd, u32 vector, s32 count);
+
+asmlinkage int sunos_read(unsigned int fd, u32 buf, int count)
+{
+ int ret;
+
+ lock_kernel();
+ ret = check_nonblock(sys32_read(fd, buf, count), fd);
+ unlock_kernel();
+ return ret;
+}
+
+asmlinkage int sunos_readv(u32 fd, u32 vector, s32 count)
+{
+ int ret;
+
+ lock_kernel();
+ ret = check_nonblock(sys32_readv(fd, vector, count), fd);
+ lock_kernel();
+ return ret;
+}
+
+asmlinkage int sunos_write(unsigned int fd, u32 buf, int count)
+{
+ int ret;
+
+ lock_kernel();
+ ret = check_nonblock(sys32_write(fd, buf, count), fd);
+ unlock_kernel();
+ return ret;
+}
+
+asmlinkage int sunos_writev(u32 fd, u32 vector, s32 count)
+{
+ int ret;
+
+ lock_kernel();
+ ret = check_nonblock(sys32_writev(fd, vector, count), fd);
+ unlock_kernel();
+ return ret;
+}
+
+asmlinkage int sunos_recv(int fd, u32 ubuf, int size, unsigned flags)
+{
+ int ret;
+
+ lock_kernel();
+ ret = check_nonblock(sys32_recv(fd, ubuf, size, flags), fd);
+ unlock_kernel();
+ return ret;
+}
+
+asmlinkage int sunos_send(int fd, u32 buff, int len, unsigned flags)
+{
+ int ret;
+
+ lock_kernel();
+ ret = check_nonblock(sys32_send(fd, buff, len, flags), fd);
+ unlock_kernel();
+ return ret;
+}
+
+asmlinkage int sunos_accept(int fd, u32 sa, u32 addrlen)
+{
+ int ret;
+
+ lock_kernel();
+ ret = check_nonblock(sys32_accept(fd, sa, addrlen), fd);
+ unlock_kernel();
+ return ret;
+}
+
+#define SUNOS_SV_INTERRUPT 2
+
+extern void check_pending(int signum);
+
+asmlinkage int sunos_sigaction(int signum, u32 action, u32 oldaction)
+{
+ struct sigaction32 new_sa, old_sa;
+ struct sigaction *p;
+ const int sigaction_size = sizeof (struct sigaction32) - sizeof (u32);
+
+ current->personality |= PER_BSD;
+ if(signum < 1 || signum > 32)
+ return -EINVAL;
+
+ p = signum - 1 + current->sig->action;
+
+ if(action) {
+ if (signum==SIGKILL || signum==SIGSTOP)
+ return -EINVAL;
+ memset(&new_sa, 0, sizeof(struct sigaction32));
+ if(copy_from_user(&new_sa, (struct sigaction32 *)A(action),
+ sigaction_size))
+ return -EFAULT;
+ if (((__sighandler_t)A(new_sa.sa_handler) != SIG_DFL) &&
+ (__sighandler_t)A(new_sa.sa_handler) != SIG_IGN) {
+ if(verify_area(VERIFY_READ,
+ (__sighandler_t)A(new_sa.sa_handler), 1))
+ return -EFAULT;
+ }
+ new_sa.sa_flags ^= SUNOS_SV_INTERRUPT;
+ }
+
+ if (oldaction) {
+ /* In the clone() case we could copy half consistant
+ * state to the user, however this could sleep and
+ * deadlock us if we held the signal lock on SMP. So for
+ * now I take the easy way out and do no locking.
+ * But then again we don't support SunOS lwp's anyways ;-)
+ */
+ old_sa.sa_handler = (unsigned)(u64)(p->sa_handler);
+ old_sa.sa_mask = (sigset_t32)(p->sa_mask);
+ old_sa.sa_flags = (unsigned)(p->sa_flags);
+
+ if (old_sa.sa_flags & SA_RESTART)
+ old_sa.sa_flags &= ~SA_RESTART;
+ else
+ old_sa.sa_flags |= SUNOS_SV_INTERRUPT;
+ if (copy_to_user((struct sigaction32 *)A(oldaction),
+ &old_sa, sigaction_size))
+ return -EFAULT;
+ }
+
+ if (action) {
+ spin_lock_irq(&current->sig->siglock);
+ p->sa_handler = (__sighandler_t)A(new_sa.sa_handler);
+ p->sa_mask = (sigset_t)(new_sa.sa_mask);
+ p->sa_flags = new_sa.sa_flags;
+ p->sa_restorer = (void (*)(void))0;
+ check_pending(signum);
+ spin_unlock_irq(&current->sig->siglock);
+ }
+ return 0;
+}
+
+
+extern asmlinkage int sys32_setsockopt(int fd, int level, int optname,
+ u32 optval, int optlen);
+extern asmlinkage int sys32_getsockopt(int fd, int level, int optname,
+ u32 optval, u32 optlen);
+
+asmlinkage int sunos_setsockopt(int fd, int level, int optname, u32 optval,
+ int optlen)
+{
+ int tr_opt = optname;
+ int ret;
+
+ lock_kernel();
+ if (level == SOL_IP) {
+ /* Multicast socketopts (ttl, membership) */
+ if (tr_opt >=2 && tr_opt <= 6)
+ tr_opt += 30;
+ }
+ ret = sys32_setsockopt(fd, level, tr_opt, optval, optlen);
+ unlock_kernel();
+ return ret;
+}
+
+asmlinkage int sunos_getsockopt(int fd, int level, int optname,
+ u32 optval, u32 optlen)
+{
+ int tr_opt = optname;
+ int ret;
+
+ lock_kernel();
+ if (level == SOL_IP) {
+ /* Multicast socketopts (ttl, membership) */
+ if (tr_opt >=2 && tr_opt <= 6)
+ tr_opt += 30;
+ }
+ ret = sys32_getsockopt(fd, level, tr_opt, optval, optlen);
+ unlock_kernel();
+ return ret;
+}
diff --git a/arch/sparc64/kernel/systbls.S b/arch/sparc64/kernel/systbls.S
index a74d0ffbd..eda0ff326 100644
--- a/arch/sparc64/kernel/systbls.S
+++ b/arch/sparc64/kernel/systbls.S
@@ -1,4 +1,4 @@
-/* $Id: systbls.S,v 1.13 1997/06/04 13:05:29 jj Exp $
+/* $Id: systbls.S,v 1.21 1997/07/05 07:09:17 davem Exp $
* systbls.S: System call entry point tables for OS compatibility.
* The native Linux system call table lives here also.
*
@@ -30,7 +30,7 @@ sys_call_table32:
/*50*/ .xword sys_getegid, sys32_acct, sys_nis_syscall, sys_nis_syscall, sys32_ioctl
.xword sys32_reboot, sys_nis_syscall, sys32_symlink, sys32_readlink, sys32_execve
/*60*/ .xword sys_umask, sys32_chroot, sys32_newfstat, sys_nis_syscall, sys_getpagesize
- .xword sys_nis_syscall, sys_vfork, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
+ .xword sys32_msync, sys_vfork, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
/*70*/ .xword sys_nis_syscall, sys32_mmap, sys_nis_syscall, sys32_munmap, sys32_mprotect
.xword sys_nis_syscall, sys_vhangup, sys_nis_syscall, sys_nis_syscall, sys32_getgroups
/*80*/ .xword sys32_setgroups, sys_getpgrp, sys_nis_syscall, sys32_setitimer, sys_nis_syscall
@@ -42,7 +42,7 @@ sys_call_table32:
/*110*/ .xword sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
.xword sys_nis_syscall, sys32_gettimeofday, sys32_getrusage, sys_nis_syscall, sys_nis_syscall
/*120*/ .xword sys32_readv, sys32_writev, sys32_settimeofday, sys_fchown, sys_fchmod
- .xword sys_nis_syscall, sys_setreuid, sys_setregid, sys32_rename, sys32_truncate
+ .xword sys_nis_syscall, sys32_setreuid, sys_setregid, sys32_rename, sys32_truncate
/*130*/ .xword sys32_ftruncate, sys_flock, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
.xword sys_nis_syscall, sys32_mkdir, sys32_rmdir, sys_nis_syscall, sys_nis_syscall
/*140*/ .xword sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys32_getrlimit
@@ -53,15 +53,15 @@ sys_call_table32:
.xword sys32_quotactl, sys_nis_syscall, sys32_mount, sys32_ustat, sys_nis_syscall
/*170*/ .xword sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys32_getdents
.xword sys_setsid, sys_fchdir, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
-/*180*/ .xword sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys32_sigpending, sys32_no_modules
+/*180*/ .xword sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys32_sigpending, sys32_query_module
.xword sys_setpgid, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys32_newuname
-/*190*/ .xword sys32_no_modules, sys32_personality, sys_prof, sys_break, sys_lock
+/*190*/ .xword sys32_init_module, sys32_personality, sys_prof, sys_break, sys_lock
.xword sys_mpx, sys_ulimit, sys_getppid, sparc32_sigaction, sys_sgetmask
/*200*/ .xword sys_ssetmask, sys_sigsuspend, sys32_newlstat, sys32_uselib, old32_readdir
.xword sys_nis_syscall, sys32_socketcall, sys32_syslog, sys32_olduname, sys_nis_syscall
/*210*/ .xword sys_idle, sys_nis_syscall, sys32_waitpid, sys32_swapoff, sys32_sysinfo
.xword sys32_ipc, sys_sigreturn, sys_clone, sys_nis_syscall, sys32_adjtimex
-/*220*/ .xword sys32_sigprocmask, sys32_no_modules, sys32_no_modules, sys32_no_modules, sys_getpgid
+/*220*/ .xword sys32_sigprocmask, sys32_create_module, sys32_delete_module, sys32_get_kernel_syms, sys_getpgid
.xword sys32_bdflush, sys32_sysfs, sys_nis_syscall, sys_setfsuid, sys_setfsgid
/*230*/ .xword sys32_llseek, sys32_time, sys_nis_syscall, sys_stime, sys_nis_syscall
.xword sys_nis_syscall, sys32_llseek, sys32_mlock, sys32_munlock, sys_mlockall
@@ -94,29 +94,29 @@ sys_call_table:
/*80*/ .xword sys_setgroups, sys_getpgrp, sys_nis_syscall, sys_setitimer, sys_nis_syscall
.xword sys_swapon, sys_getitimer, sys_nis_syscall, sys_sethostname, sys_nis_syscall
/*90*/ .xword sys_dup2, sys_nis_syscall, sys_fcntl, sys_select, sys_nis_syscall
- .xword sys_fsync, sys_setpriority, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
-/*100*/ .xword sys_getpriority, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
- .xword sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
-/*110*/ .xword sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
- .xword sys_nis_syscall, sys_gettimeofday, sys_getrusage, sys_nis_syscall, sys_nis_syscall
+ .xword sys_fsync, sys_setpriority, sys_socket, sys_connect, sys_accept
+/*100*/ .xword sys_getpriority, sys_send, sys_recv, sys_nis_syscall, sys_bind
+ .xword sys_setsockopt, sys_listen, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
+/*110*/ .xword sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_recvmsg, sys_sendmsg
+ .xword sys_nis_syscall, sys_gettimeofday, sys_getrusage, sys_getsockopt, sys_nis_syscall
/*120*/ .xword sys_readv, sys_writev, sys_settimeofday, sys_fchown, sys_fchmod
- .xword sys_nis_syscall, sys_setreuid, sys_setregid, sys_rename, sys_truncate
-/*130*/ .xword sys_ftruncate, sys_flock, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
- .xword sys_nis_syscall, sys_mkdir, sys_rmdir, sys_nis_syscall, sys_nis_syscall
-/*140*/ .xword sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_getrlimit
+ .xword sys_recvfrom, sys_setreuid, sys_setregid, sys_rename, sys_truncate
+/*130*/ .xword sys_ftruncate, sys_flock, sys_nis_syscall, sys_sendto, sys_shutdown
+ .xword sys_socketpair, sys_mkdir, sys_rmdir, sys_nis_syscall, sys_nis_syscall
+/*140*/ .xword sys_nis_syscall, sys_getpeername, sys_nis_syscall, sys_nis_syscall, sys_getrlimit
.xword sys_setrlimit, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
-/*150*/ .xword sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
+/*150*/ .xword sys_getsockname, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
.xword sys_nis_syscall, sys_nis_syscall, sys_statfs, sys_fstatfs, sys_umount
/*160*/ .xword sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_setdomainname, sys_nis_syscall
.xword sys_quotactl, sys_nis_syscall, sys_mount, sys_ustat, sys_nis_syscall
/*170*/ .xword sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_getdents
.xword sys_setsid, sys_fchdir, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
-/*180*/ .xword sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_sigpending, sys_nis_syscall
+/*180*/ .xword sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_sigpending, sys_query_module
.xword sys_setpgid, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_newuname
/*190*/ .xword sys_init_module, sys_personality, sys_prof, sys_break, sys_lock
.xword sys_mpx, sys_ulimit, sys_getppid, sparc_sigaction, sys_sgetmask
/*200*/ .xword sys_ssetmask, sys_sigsuspend, sys_newlstat, sys_uselib, sys_nis_syscall
- .xword sys_nis_syscall, sys_socketcall, sys_syslog, sys_nis_syscall, sys_nis_syscall
+ .xword sys_nis_syscall, sys_nis_syscall, sys_syslog, sys_nis_syscall, sys_nis_syscall
/*210*/ .xword sys_idle, sys_nis_syscall, sys_waitpid, sys_swapoff, sys_sysinfo
.xword sys_ipc, sys_sigreturn, sys_clone, sys_nis_syscall, sys_adjtimex
/*220*/ .xword sys_sigprocmask, sys_create_module, sys_delete_module, sys_get_kernel_syms, sys_getpgid
@@ -130,72 +130,72 @@ sys_call_table:
/* Now the 32-bit SunOS syscall table. */
- .align 4
+ .align 8
.globl sunos_sys_table
sunos_sys_table:
/*0*/ .xword sunos_indir, sys_exit, sys_fork
.xword sunos_read, sunos_write, sunos_open
- .xword sys_close, sunos_wait4, sys_creat
- .xword sys_link, sys_unlink, sunos_execv
- .xword sys_chdir, sunos_nosys, sys_mknod
- .xword sys_chmod, sys_chown, sunos_brk
- .xword sunos_nosys, sys_lseek, sunos_getpid
+ .xword sys_close, sunos_wait4, sys32_creat
+ .xword sys32_link, sys32_unlink, sunos_execv
+ .xword sys32_chdir, sunos_nosys, sys32_mknod
+ .xword sys32_chmod, sys32_chown, sunos_brk
+ .xword sunos_nosys, sys32_lseek, sunos_getpid
.xword sunos_nosys, sunos_nosys, sunos_nosys
.xword sunos_getuid, sunos_nosys, sys_ptrace
.xword sunos_nosys, sunos_nosys, sunos_nosys
.xword sunos_nosys, sunos_nosys, sunos_nosys
- .xword sys_access, sunos_nosys, sunos_nosys
- .xword sys_sync, sys_kill, sys_newstat
- .xword sunos_nosys, sys_newlstat, sys_dup
+ .xword sys32_access, sunos_nosys, sunos_nosys
+ .xword sys_sync, sys_kill, sys32_newstat
+ .xword sunos_nosys, sys32_newlstat, sys_dup
.xword sys_pipe, sunos_nosys, sys_profil
.xword sunos_nosys, sunos_nosys, sunos_getgid
.xword sunos_nosys, sunos_nosys
-/*50*/ .xword sunos_nosys, sys_acct, sunos_nosys
- .xword sunos_mctl, sunos_ioctl, sys_reboot
- .xword sunos_nosys, sys_symlink, sys_readlink
- .xword sys32_execve, sys_umask, sys_chroot
- .xword sys_newfstat, sunos_nosys, sys_getpagesize
- .xword sys_msync, sys_vfork, sunos_nosys
+/*50*/ .xword sunos_nosys, sys32_acct, sunos_nosys
+ .xword sunos_mctl, sunos_ioctl, sys32_reboot
+ .xword sunos_nosys, sys32_symlink, sys32_readlink
+ .xword sys32_execve, sys_umask, sys32_chroot
+ .xword sys32_newfstat, sunos_nosys, sys_getpagesize
+ .xword sys32_msync, sys_vfork, sunos_nosys
.xword sunos_nosys, sunos_sbrk, sunos_sstk
- .xword sunos_mmap, sunos_vadvise, sys_munmap
- .xword sys_mprotect, sunos_madvise, sys_vhangup
- .xword sunos_nosys, sunos_mincore, sys_getgroups
- .xword sys_setgroups, sys_getpgrp, sunos_setpgrp
- .xword sys_setitimer, sunos_nosys, sys_swapon
- .xword sys_getitimer, sys_gethostname, sys_sethostname
+ .xword sunos_mmap, sunos_vadvise, sys32_munmap
+ .xword sys32_mprotect, sunos_madvise, sys_vhangup
+ .xword sunos_nosys, sunos_mincore, sys32_getgroups
+ .xword sys32_setgroups, sys_getpgrp, sunos_setpgrp
+ .xword sys32_setitimer, sunos_nosys, sys32_swapon
+ .xword sys32_getitimer, sys32_gethostname, sys32_sethostname
.xword sunos_getdtablesize, sys_dup2, sunos_nop
- .xword sys_fcntl, sunos_select, sunos_nop
+ .xword sys32_fcntl, sunos_select, sunos_nop
.xword sys_fsync, sys_setpriority, sys_socket
- .xword sys_connect, sunos_accept
+ .xword sys32_connect, sunos_accept
/*100*/ .xword sys_getpriority, sunos_send, sunos_recv
- .xword sunos_nosys, sys_bind, sunos_setsockopt
+ .xword sunos_nosys, sys32_bind, sunos_setsockopt
.xword sys_listen, sunos_nosys, sunos_sigaction
.xword sunos_sigblock, sunos_sigsetmask, sys_sigpause
- .xword sys_sigstack, sys_recvmsg, sys_sendmsg
- .xword sunos_nosys, sys_gettimeofday, sys_getrusage
+ .xword sys32_sigstack, sys32_recvmsg, sys32_sendmsg
+ .xword sunos_nosys, sys_gettimeofday, sys32_getrusage
.xword sunos_getsockopt, sunos_nosys, sunos_readv
.xword sunos_writev, sys_settimeofday, sys_fchown
- .xword sys_fchmod, sys_recvfrom, sys_setreuid
- .xword sys_setregid, sys_rename, sys_truncate
- .xword sys_ftruncate, sys_flock, sunos_nosys
- .xword sys_sendto, sys_shutdown, sys_socketpair
- .xword sys_mkdir, sys_rmdir, sys_utimes
- .xword sys_sigreturn, sunos_nosys, sys_getpeername
- .xword sunos_gethostid, sunos_nosys, sys_getrlimit
- .xword sys_setrlimit, sunos_killpg, sunos_nosys
+ .xword sys_fchmod, sys32_recvfrom, sys32_setreuid
+ .xword sys_setregid, sys32_rename, sys32_truncate
+ .xword sys32_ftruncate, sys_flock, sunos_nosys
+ .xword sys32_sendto, sys_shutdown, sys_socketpair
+ .xword sys32_mkdir, sys32_rmdir, sys32_utimes
+ .xword sys_sigreturn, sunos_nosys, sys32_getpeername
+ .xword sunos_gethostid, sunos_nosys, sys32_getrlimit
+ .xword sys32_setrlimit, sunos_killpg, sunos_nosys
.xword sunos_nosys, sunos_nosys
-/*150*/ .xword sys_getsockname, sunos_nosys, sunos_nosys
- .xword sunos_poll, sunos_nosys, sunos_nosys
- .xword sunos_getdirentries, sys_statfs, sys_fstatfs
- .xword sys_umount, sunos_nosys, sunos_nosys
- .xword sunos_getdomainname, sys_setdomainname
- .xword sunos_nosys, sys_quotactl, sunos_nosys
- .xword sunos_mount, sys_ustat, sunos_semsys
+/*150*/ .xword sys32_getsockname, sunos_nosys, sunos_nosys
+ .xword sys32_poll, sunos_nosys, sunos_nosys
+ .xword sunos_getdirentries, sys32_statfs, sys32_fstatfs
+ .xword sys32_umount, sunos_nosys, sunos_nosys
+ .xword sunos_getdomainname, sys32_setdomainname
+ .xword sunos_nosys, sys32_quotactl, sunos_nosys
+ .xword sunos_mount, sys32_ustat, sunos_semsys
.xword sunos_nosys, sunos_shmsys, sunos_audit
.xword sunos_nosys, sunos_getdents, sys_setsid
.xword sys_fchdir, sunos_nosys, sunos_nosys
.xword sunos_nosys, sunos_nosys, sunos_nosys
- .xword sunos_nosys, sys_sigpending, sunos_nosys
+ .xword sunos_nosys, sys32_sigpending, sunos_nosys
.xword sys_setpgid, sunos_pathconf, sunos_fpathconf
.xword sunos_sysconf, sunos_uname, sunos_nosys
.xword sunos_nosys, sunos_nosys, sunos_nosys
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index 3f15fcb54..ad40a5fb5 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -1,4 +1,4 @@
-/* $Id: time.c,v 1.2 1997/04/10 03:02:35 davem Exp $
+/* $Id: time.c,v 1.3 1997/06/17 13:25:29 jj Exp $
* time.c: UltraSparc timer and TOD clock support.
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
@@ -146,9 +146,6 @@ static int has_low_battery(void)
return (data1 == data2); /* Was the write blocked? */
}
-/* XXX HACK HACK HACK, delete me soon */
-static struct linux_prom_ranges XXX_sbus_ranges[PROMREG_MAX];
-static int XXX_sbus_nranges;
/* Probe for the real time clock chip. */
__initfunc(static void clock_probe(void))
@@ -157,6 +154,10 @@ __initfunc(static void clock_probe(void))
char model[128];
int node, sbusnd, err;
+ /* XXX HACK HACK HACK, delete me soon */
+ struct linux_prom_ranges XXX_sbus_ranges[PROMREG_MAX];
+ int XXX_sbus_nranges;
+
node = prom_getchild(prom_root_node);
sbusnd = prom_searchsiblings(node, "sbus");
node = prom_getchild(sbusnd);
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 824a3ddb4..ac3e79958 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -1,14 +1,15 @@
-/* $Id: traps.c,v 1.19 1997/06/05 06:22:49 davem Exp $
- * arch/sparc/kernel/traps.c
+/* $Id: traps.c,v 1.29 1997/07/05 09:52:38 davem Exp $
+ * arch/sparc64/kernel/traps.c
*
* Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
/*
- * I hate traps on the sparc, grrr...
+ * I like traps on v9, :))))
*/
+#include <linux/config.h>
#include <linux/sched.h> /* for jiffies */
#include <linux/kernel.h>
#include <linux/signal.h>
@@ -123,6 +124,8 @@ void syscall_trace_entry(unsigned long g1, struct pt_regs *regs)
int i;
#endif
+ if(strcmp(current->comm, "bash.sunos"))
+ return;
printk("SYS[%s:%d]: PC(%016lx) <%3d> ",
current->comm, current->pid, regs->tpc, (int)g1);
#ifdef VERBOSE_SYSCALL_TRACING
@@ -153,53 +156,12 @@ void syscall_trace_entry(unsigned long g1, struct pt_regs *regs)
unsigned long syscall_trace_exit(unsigned long retval, struct pt_regs *regs)
{
- printk("ret[%016lx]\n", retval);
+ if(!strcmp(current->comm, "bash.sunos"))
+ printk("ret[%016lx]\n", retval);
return retval;
}
#endif /* SYSCALL_TRACING */
-#if 0
-void user_rtrap_report(struct pt_regs *regs)
-{
- static int hits = 0;
-
- /* Bwahhhhrggg... */
- if(regs->tpc == 0x1f294UL && ++hits == 2) {
- register unsigned long ctx asm("o4");
- register unsigned long paddr asm("o5");
- unsigned long cwp, wstate;
-
- printk("RT[%016lx:%016lx] ", regs->tpc, regs->u_regs[UREG_I6]);
- __asm__ __volatile__("rdpr %%cwp, %0" : "=r" (cwp));
- __asm__ __volatile__("rdpr %%wstate, %0" : "=r" (wstate));
- printk("CWP[%d] WSTATE[%016lx]\n"
- "TSS( ksp[%016lx] kpc[%016lx] wstate[%016lx] w_saved[%d] flgs[%x]"
- " cur_ds[%d] )\n", cwp, wstate,
- current->tss.ksp, current->tss.kpc, current->tss.wstate,
- (int) current->tss.w_saved, current->tss.flags,
- current->tss.current_ds);
- __asm__ __volatile__("
- rdpr %%pstate, %%o3
- wrpr %%o3, %2, %%pstate
- mov %%g7, %%o5
- mov 0x10, %%o4
- ldxa [%%o4] %3, %%o4
- wrpr %%o3, 0x0, %%pstate
- " : "=r" (ctx), "=r" (paddr)
- : "i" (PSTATE_MG|PSTATE_IE), "i" (ASI_DMMU));
-
- printk("MMU[ppgd(%016lx)sctx(%d)] ", paddr, ctx);
- printk("mm->context(%016lx) mm->pgd(%p)\n",
- current->mm->context, current->mm->pgd);
- printk("TASK: signal[%016lx] blocked[%016lx]\n",
- current->signal, current->blocked);
- show_regs(regs);
- while(1)
- barrier();
- }
-}
-#endif
-
void bad_trap (struct pt_regs *regs, long lvl)
{
lock_kernel ();
@@ -221,168 +183,44 @@ void bad_trap_tl1 (struct pt_regs *regs, long lvl)
{
char buffer[24];
- lock_kernel ();
+ lock_kernel();
sprintf (buffer, "Bad trap %lx at tl>0", lvl);
die_if_kernel (buffer, regs);
+ unlock_kernel();
}
void data_access_exception (struct pt_regs *regs)
{
- lock_kernel ();
- printk ("Unhandled data access exception ");
- printk("sfsr %016lx sfar %016lx\n", spitfire_get_dsfsr(), spitfire_get_sfar());
- die_if_kernel("Data access exception", regs);
+ send_sig(SIGSEGV, current, 1);
}
void do_dae(struct pt_regs *regs)
{
- printk("DAE: at %016lx\n", regs->tpc);
- while(1)
- barrier();
+ send_sig(SIGSEGV, current, 1);
}
void instruction_access_exception (struct pt_regs *regs)
{
- lock_kernel ();
- printk ("Unhandled instruction access exception ");
- printk("sfsr %016lx\n", spitfire_get_isfsr());
- die_if_kernel("Instruction access exception", regs);
+ send_sig(SIGSEGV, current, 1);
}
void do_iae(struct pt_regs *regs)
{
- printk("IAE at %016lx\n", regs->tpc);
- while(1)
- barrier();
-}
-
-static unsigned long init_fsr = 0x0UL;
-static unsigned int init_fregs[64] __attribute__ ((aligned (64))) =
- { ~0U, ~0U, ~0U, ~0U, ~0U, ~0U, ~0U, ~0U,
- ~0U, ~0U, ~0U, ~0U, ~0U, ~0U, ~0U, ~0U,
- ~0U, ~0U, ~0U, ~0U, ~0U, ~0U, ~0U, ~0U,
- ~0U, ~0U, ~0U, ~0U, ~0U, ~0U, ~0U, ~0U,
- ~0U, ~0U, ~0U, ~0U, ~0U, ~0U, ~0U, ~0U,
- ~0U, ~0U, ~0U, ~0U, ~0U, ~0U, ~0U, ~0U,
- ~0U, ~0U, ~0U, ~0U, ~0U, ~0U, ~0U, ~0U,
- ~0U, ~0U, ~0U, ~0U, ~0U, ~0U, ~0U, ~0U };
-
-void do_fpdis(struct pt_regs *regs)
-{
- lock_kernel();
-
- regs->tstate |= TSTATE_PEF;
- fprs_write(FPRS_FEF);
-
- /* This is allowed now because the V9 ABI varargs passes floating
- * point args in floating point registers, so vsprintf() and sprintf()
- * cause problems. Luckily we never actually pass floating point values
- * to those routines in the kernel and the code generated just does
- * stores of them to the stack. Therefore, for the moment this fix
- * is sufficient. -DaveM
- */
- if(regs->tstate & TSTATE_PRIV)
- goto out;
-
-#ifndef __SMP__
- if(last_task_used_math == current)
- goto out;
- if(last_task_used_math) {
- struct task_struct *fptask = last_task_used_math;
-
- if(fptask->tss.flags & SPARC_FLAG_32BIT)
- fpsave32((unsigned long *)&fptask->tss.float_regs[0],
- &fptask->tss.fsr);
- else
- fpsave((unsigned long *)&fptask->tss.float_regs[0],
- &fptask->tss.fsr);
- }
- last_task_used_math = current;
- if(current->used_math) {
- if(current->tss.flags & SPARC_FLAG_32BIT)
- fpload32(&current->tss.float_regs[0],
- &current->tss.fsr);
- else
- fpload(&current->tss.float_regs[0],
- &current->tss.fsr);
- } else {
- /* Set inital sane state. */
- fpload(&init_fregs[0], &init_fsr);
- current->used_math = 1;
- }
-#else
- if(!current->used_math) {
- fpload(&init_fregs[0], &init_fsr);
- current->used_math = 1;
- } else {
- if(current->tss.flags & SPARC_FLAG_32BIT)
- fpload32(&current->tss.float_regs[0],
- &current->tss.fsr);
- else
- fpload(&current->tss.float_regs[0],
- &current->tss.fsr);
- }
- current->flags |= PF_USEDFPU;
-#endif
-#ifndef __SMP__
-out:
-#endif
- unlock_kernel();
+ send_sig(SIGSEGV, current, 1);
}
-static unsigned long fake_regs[32] __attribute__ ((aligned (8)));
-static unsigned long fake_fsr;
-
void do_fpe_common(struct pt_regs *regs)
{
- static int calls = 0;
-#ifndef __SMP__
- struct task_struct *fpt = last_task_used_math;
-#else
- struct task_struct *fpt = current;
-#endif
-
- lock_kernel();
- fprs_write(FPRS_FEF);
-
-#ifndef __SMP__
- if(!fpt) {
-#else
- if(!(fpt->flags & PF_USEDFPU)) {
-#endif
- fpsave(&fake_regs[0], &fake_fsr);
- regs->tstate &= ~(TSTATE_PEF);
- goto out;
- }
- if(fpt->tss.flags & SPARC_FLAG_32BIT)
- fpsave32((unsigned long *)&fpt->tss.float_regs[0], &fpt->tss.fsr);
- else
- fpsave((unsigned long *)&fpt->tss.float_regs[0], &fpt->tss.fsr);
- fpt->tss.sig_address = regs->tpc;
- fpt->tss.sig_desc = SUBSIG_FPERROR;
-#ifdef __SMP__
- fpt->flags &= ~PF_USEDFPU;
-#endif
if(regs->tstate & TSTATE_PRIV) {
- printk("WARNING: FPU exception from kernel mode. at pc=%016lx\n",
- regs->tpc);
regs->tpc = regs->tnpc;
regs->tnpc += 4;
- calls++;
- if(calls > 2)
- die_if_kernel("Too many Penguin-FPU traps from kernel mode",
- regs);
- goto out;
+ } else {
+ lock_kernel();
+ current->tss.sig_address = regs->tpc;
+ current->tss.sig_desc = SUBSIG_FPERROR;
+ send_sig(SIGFPE, current, 1);
+ unlock_kernel();
}
- send_sig(SIGFPE, fpt, 1);
-#ifndef __SMP__
- last_task_used_math = NULL;
-#endif
- regs->tstate &= ~TSTATE_PEF;
- if(calls > 0)
- calls = 0;
-out:
- unlock_kernel();
}
void do_fpieee(struct pt_regs *regs)
@@ -397,16 +235,16 @@ void do_fpother(struct pt_regs *regs)
void do_tof(struct pt_regs *regs)
{
- printk("TOF: at %016lx\n", regs->tpc);
- while(1)
- barrier();
+ if(regs->tstate & TSTATE_PRIV)
+ die_if_kernel("Penguin overflow trap from kernel mode", regs);
+ current->tss.sig_address = regs->tpc;
+ current->tss.sig_desc = SUBSIG_TAG; /* as good as any */
+ send_sig(SIGEMT, current, 1);
}
void do_div0(struct pt_regs *regs)
{
- printk("DIV0: at %016lx\n", regs->tpc);
- while(1)
- barrier();
+ send_sig(SIGILL, current, 1);
}
void instruction_dump (unsigned int *pc)
@@ -426,7 +264,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
/* Amuse the user. */
printk(
" \\|/ ____ \\|/\n"
-" \"@'/ .` \\`@\"\n"
+" \"@'/ .. \\`@\"\n"
" /_| \\__/ |_\\\n"
" \\__U_/\n");
@@ -437,17 +275,15 @@ void die_if_kernel(char *str, struct pt_regs *regs)
struct reg_window *rw = (struct reg_window *)
(regs->u_regs[UREG_FP] + STACK_BIAS);
- if(rw) {
+ /* Stop the back trace when we hit userland or we
+ * find some badly aligned kernel stack.
+ */
+ while(rw &&
+ (((unsigned long) rw) >= PAGE_OFFSET) &&
+ !(((unsigned long) rw) & 0x7)) {
printk("Caller[%016lx]\n", rw->ins[7]);
rw = (struct reg_window *)
(rw->ins[6] + STACK_BIAS);
- if(rw) {
- printk("Caller[%016lx]\n", rw->ins[7]);
- rw = (struct reg_window *)
- (rw->ins[6] + STACK_BIAS);
- if(rw)
- printk("Caller[%016lx]\n", rw->ins[7]);
- }
}
}
printk("Instruction DUMP:");
@@ -465,16 +301,6 @@ void do_illegal_instruction(struct pt_regs *regs)
lock_kernel();
if(tstate & TSTATE_PRIV)
die_if_kernel("Kernel illegal instruction", regs);
-#if 1
- {
- unsigned int insn;
-
- printk("Ill instr. at pc=%016lx ", pc);
- get_user(insn, ((unsigned int *)pc));
- printk("insn=[%08x]\n", insn);
- show_regs(regs);
- }
-#endif
current->tss.sig_address = pc;
current->tss.sig_desc = SUBSIG_ILLINST;
send_sig(SIGILL, current, 1);
@@ -483,13 +309,11 @@ void do_illegal_instruction(struct pt_regs *regs)
void mem_address_unaligned(struct pt_regs *regs)
{
- printk("AIEEE: do_mna at %016lx\n", regs->tpc);
- show_regs(regs);
if(regs->tstate & TSTATE_PRIV) {
- printk("MNA from kernel, spinning\n");
- sti();
- while(1)
- barrier();
+ extern void kernel_unaligned_trap(struct pt_regs *regs,
+ unsigned int insn);
+
+ return kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
} else {
current->tss.sig_address = regs->tpc;
current->tss.sig_desc = SUBSIG_PRIVINST;
@@ -499,16 +323,17 @@ void mem_address_unaligned(struct pt_regs *regs)
void do_privop(struct pt_regs *regs)
{
- printk("PRIVOP: at %016lx\n", regs->tpc);
- while(1)
- barrier();
+ current->tss.sig_address = regs->tpc;
+ current->tss.sig_desc = SUBSIG_PRIVINST;
+ send_sig(SIGILL, current, 1);
}
void do_privact(struct pt_regs *regs)
{
- printk("PRIVACT: at %016lx\n", regs->tpc);
- while(1)
- barrier();
+ current->tss.sig_address = regs->tpc;
+ current->tss.sig_desc = SUBSIG_PRIVINST;
+ send_sig(SIGILL, current, 1);
+ unlock_kernel();
}
void do_priv_instruction(struct pt_regs *regs, unsigned long pc, unsigned long npc,
@@ -537,11 +362,6 @@ void do_memaccess_unaligned(struct pt_regs *regs, unsigned long pc, unsigned lon
}
current->tss.sig_address = pc;
current->tss.sig_desc = SUBSIG_PRIVINST;
-#if 0
- show_regs (regs);
- instruction_dump ((unsigned long *) regs->tpc);
- printk ("do_MNA!\n");
-#endif
send_sig(SIGBUS, current, 1);
unlock_kernel();
}
@@ -554,6 +374,134 @@ void handle_hw_divzero(struct pt_regs *regs, unsigned long pc, unsigned long npc
unlock_kernel();
}
+/* Trap level 1 stuff or other traps we should never see... */
+void do_cee(struct pt_regs *regs)
+{
+ die_if_kernel("TL0: Cache Error Exception", regs);
+}
+
+void do_cee_tl1(struct pt_regs *regs)
+{
+ die_if_kernel("TL1: Cache Error Exception", regs);
+}
+
+void do_dae_tl1(struct pt_regs *regs)
+{
+ die_if_kernel("TL1: Data Access Exception", regs);
+}
+
+void do_iae_tl1(struct pt_regs *regs)
+{
+ die_if_kernel("TL1: Instruction Access Exception", regs);
+}
+
+void do_div0_tl1(struct pt_regs *regs)
+{
+ die_if_kernel("TL1: DIV0 Exception", regs);
+}
+
+void do_fpdis_tl1(struct pt_regs *regs)
+{
+ die_if_kernel("TL1: FPU Disabled", regs);
+}
+
+void do_fpieee_tl1(struct pt_regs *regs)
+{
+ die_if_kernel("TL1: FPU IEEE Exception", regs);
+}
+
+void do_fpother_tl1(struct pt_regs *regs)
+{
+ die_if_kernel("TL1: FPU Other Exception", regs);
+}
+
+void do_ill_tl1(struct pt_regs *regs)
+{
+ die_if_kernel("TL1: Illegal Instruction Exception", regs);
+}
+
+void do_irq_tl1(struct pt_regs *regs)
+{
+ die_if_kernel("TL1: IRQ Exception", regs);
+}
+
+void do_lddfmna(struct pt_regs *regs)
+{
+ die_if_kernel("TL0: LDDF Exception", regs);
+}
+
+void do_lddfmna_tl1(struct pt_regs *regs)
+{
+ die_if_kernel("TL1: LDDF Exception", regs);
+}
+
+void do_stdfmna(struct pt_regs *regs)
+{
+ die_if_kernel("TL0: STDF Exception", regs);
+}
+
+void do_stdfmna_tl1(struct pt_regs *regs)
+{
+ die_if_kernel("TL1: STDF Exception", regs);
+}
+
+void do_paw(struct pt_regs *regs)
+{
+ die_if_kernel("TL0: Phys Watchpoint Exception", regs);
+}
+
+void do_paw_tl1(struct pt_regs *regs)
+{
+ die_if_kernel("TL1: Phys Watchpoint Exception", regs);
+}
+
+void do_vaw(struct pt_regs *regs)
+{
+ die_if_kernel("TL0: Virt Watchpoint Exception", regs);
+}
+
+void do_vaw_tl1(struct pt_regs *regs)
+{
+ die_if_kernel("TL1: Virt Watchpoint Exception", regs);
+}
+
+void do_tof_tl1(struct pt_regs *regs)
+{
+ die_if_kernel("TL1: Tag Overflow Exception", regs);
+}
+
+#ifdef CONFIG_EC_FLUSH_TRAP
+void cache_flush_trap(struct pt_regs *regs)
+{
+#ifndef __SMP__
+ unsigned node = linux_cpus[get_cpuid()].prom_node;
+#else
+#error SMP not supported on sparc64 yet
+#endif
+ int size = prom_getintdefault(node, "ecache-size", 512*1024);
+ int i, j;
+ unsigned long addr, page_nr;
+
+ regs->tpc = regs->tnpc;
+ regs->tnpc = regs->tnpc + 4;
+ if (!suser()) return;
+ size >>= PAGE_SHIFT;
+ addr = PAGE_OFFSET - PAGE_SIZE;
+ for (i = 0; i < size; i++) {
+ do {
+ addr += PAGE_SIZE;
+ page_nr = MAP_NR(addr);
+ if (page_nr >= max_mapnr) {
+ return;
+ }
+ } while (!PageReserved (mem_map + page_nr));
+ /* E-Cache line size is 64B. Let us pollute it :)) */
+ for (j = 0; j < PAGE_SIZE; j += 64)
+ __asm__ __volatile__ ("ldx [%0 + %1], %%g1" : : "r" (j), "r" (addr) : "g1");
+ }
+}
+#endif
+
void trap_init(void)
{
}
diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S
index 8db708f07..73bda96d9 100644
--- a/arch/sparc64/kernel/ttable.S
+++ b/arch/sparc64/kernel/ttable.S
@@ -1,9 +1,11 @@
-/* $Id: ttable.S,v 1.13 1997/06/02 06:33:34 davem Exp $
+/* $Id: ttable.S,v 1.18 1997/07/05 09:52:41 davem Exp $
* ttable.S: Sparc V9 Trap Table(s) with SpitFire extensions.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
+#include <linux/config.h>
+
.globl sparc64_ttable_tl0, sparc64_ttable_tl1
sparc64_ttable_tl0:
@@ -18,7 +20,7 @@ tl0_privop: TRAP(do_privop)
tl0_resv012: BTRAP(0x12) BTRAP(0x13) BTRAP(0x14) BTRAP(0x15) BTRAP(0x16) BTRAP(0x17)
tl0_resv018: BTRAP(0x18) BTRAP(0x19) BTRAP(0x1a) BTRAP(0x1b) BTRAP(0x1c) BTRAP(0x1d)
tl0_resv01e: BTRAP(0x1e) BTRAP(0x1f)
-tl0_fpdis: TRAP(do_fpdis)
+tl0_fpdis: TRAP_NOSAVE(do_fpdis)
tl0_fpieee: TRAP(do_fpieee)
tl0_fpother: TRAP(do_fpother)
tl0_tof: TRAP(do_tof)
@@ -124,7 +126,13 @@ tl0_resv15a: BTRAP(0x15a) BTRAP(0x15b) BTRAP(0x15c) BTRAP(0x15d) BTRAP(0x15e)
tl0_resv15f: BTRAP(0x15f) BTRAP(0x160) BTRAP(0x161) BTRAP(0x162) BTRAP(0x163)
tl0_resv164: BTRAP(0x164) BTRAP(0x165) BTRAP(0x166) BTRAP(0x167) BTRAP(0x168)
tl0_resv169: BTRAP(0x169) BTRAP(0x16a) BTRAP(0x16b) BTRAP(0x16c) BTRAP(0x16d)
-tl0_resv16e: BTRAP(0x16e) BTRAP(0x16f) BTRAP(0x170) BTRAP(0x171) BTRAP(0x172)
+tl0_gsctx: TRAP(sparc64_get_context) TRAP(sparc64_set_context)
+tl0_resv170: BTRAP(0x170) BTRAP(0x171)
+#ifdef CONFIG_EC_FLUSH_TRAP
+ TRAP(cache_flush_trap)
+#else
+ BTRAP(0x172)
+#endif
tl0_resv173: BTRAP(0x173) BTRAP(0x174) BTRAP(0x175) BTRAP(0x176) BTRAP(0x177)
tl0_resv178: BTRAP(0x178) BTRAP(0x179) BTRAP(0x17a) BTRAP(0x17b) BTRAP(0x17c)
tl0_resv17d: BTRAP(0x17d) BTRAP(0x17e) BTRAP(0x17f)
@@ -151,7 +159,7 @@ tl1_resv012: BTRAPTL1(0x12) BTRAPTL1(0x13) BTRAPTL1(0x14) BTRAPTL1(0x15)
tl1_resv016: BTRAPTL1(0x16) BTRAPTL1(0x17) BTRAPTL1(0x18) BTRAPTL1(0x19)
tl1_resv01a: BTRAPTL1(0x1a) BTRAPTL1(0x1b) BTRAPTL1(0x1c) BTRAPTL1(0x1d)
tl1_resv01e: BTRAPTL1(0x1e) BTRAPTL1(0x1f)
-tl1_fpdis: TRAPTL1(do_fpdis_tl1)
+tl1_fpdis: TRAP_NOSAVE(do_fpdis)
tl1_fpieee: TRAPTL1(do_fpieee_tl1)
tl1_fpother: TRAPTL1(do_fpother_tl1)
tl1_tof: TRAPTL1(do_tof_tl1)
diff --git a/arch/sparc64/kernel/unaligned.c b/arch/sparc64/kernel/unaligned.c
new file mode 100644
index 000000000..f66889195
--- /dev/null
+++ b/arch/sparc64/kernel/unaligned.c
@@ -0,0 +1,517 @@
+/* $Id: unaligned.c,v 1.1 1997/07/18 06:26:45 ralf Exp $
+ * unaligned.c: Unaligned load/store trap handling with special
+ * cases for the kernel to do them more quickly.
+ *
+ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <asm/asi.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+
+/* #define DEBUG_MNA */
+
+enum direction {
+ load, /* ld, ldd, ldh, ldsh */
+ store, /* st, std, sth, stsh */
+ both, /* Swap, ldstub, cas, ... */
+ fpload,
+ fpstore,
+ invalid,
+};
+
+#ifdef DEBUG_MNA
+static char *dirstrings[] = {
+ "load", "store", "both", "fpload", "fpstore", "invalid"
+};
+#endif
+
+static inline enum direction decode_direction(unsigned int insn)
+{
+ unsigned long tmp = (insn >> 21) & 1;
+
+ if(!tmp)
+ return load;
+ else {
+ switch ((insn>>19)&0xf) {
+ case 15: /* swap* */
+ return both;
+ default:
+ return store;
+ }
+ }
+}
+
+/* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */
+static inline int decode_access_size(unsigned int insn)
+{
+ unsigned int tmp;
+
+ if (((insn >> 19) & 0xf) == 14)
+ return 8; /* stx* */
+ tmp = (insn >> 19) & 3;
+ if(!tmp)
+ return 4;
+ else if(tmp == 3)
+ return 16; /* ldd/std - Although it is actually 8 */
+ else if(tmp == 2)
+ return 2;
+ else {
+ printk("Impossible unaligned trap. insn=%08x\n", insn);
+ die_if_kernel("Byte sized unaligned access?!?!", current->tss.kregs);
+ }
+}
+
+static inline int decode_asi(unsigned int insn, struct pt_regs *regs)
+{
+ if (insn & 0x800000) {
+ if (insn & 0x2000)
+ return (unsigned char)(regs->tstate >> 24); /* %asi */
+ else
+ return (unsigned char)(insn >> 5); /* imm_asi */
+ } else
+ return ASI_P;
+}
+
+/* 0x400000 = signed, 0 = unsigned */
+static inline int decode_signedness(unsigned int insn)
+{
+ return (insn & 0x400000);
+}
+
+static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
+ unsigned int rd)
+{
+ if(rs2 >= 16 || rs1 >= 16 || rd >= 16) {
+ flushw_user();
+ }
+}
+
+static inline long sign_extend_imm13(long imm)
+{
+ return imm << 51 >> 51;
+}
+
+static inline unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
+{
+ struct reg_window *win;
+
+ if(reg < 16)
+ return (!reg ? 0 : regs->u_regs[reg]);
+
+ /* Ho hum, the slightly complicated case. */
+ win = (struct reg_window *) regs->u_regs[UREG_FP];
+ return win->locals[reg - 16]; /* yes, I know what this does... */
+}
+
+static inline unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
+{
+ struct reg_window *win;
+
+ if(reg < 16)
+ return &regs->u_regs[reg];
+ win = (struct reg_window *) regs->u_regs[UREG_FP];
+ return &win->locals[reg - 16];
+}
+
+static inline unsigned long compute_effective_address(struct pt_regs *regs,
+ unsigned int insn)
+{
+ unsigned int rs1 = (insn >> 14) & 0x1f;
+ unsigned int rs2 = insn & 0x1f;
+ unsigned int rd = (insn >> 25) & 0x1f;
+
+ if(insn & 0x2000) {
+ maybe_flush_windows(rs1, 0, rd);
+ return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
+ } else {
+ maybe_flush_windows(rs1, rs2, rd);
+ return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
+ }
+}
+
+/* This is just to make gcc think panic does return... */
+static void unaligned_panic(char *str)
+{
+ panic(str);
+}
+
+#define do_integer_load(dest_reg, size, saddr, is_signed, asi, errh) ({ \
+__asm__ __volatile__ ( \
+ "wr %4, 0, %%asi\n\t" \
+ "cmp %1, 8\n\t" \
+ "bge,pn %%icc, 9f\n\t" \
+ " cmp %1, 4\n\t" \
+ "be,pt %%icc, 6f\n" \
+"4:\t" " lduba [%2] %%asi, %%l1\n" \
+"5:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \
+ "sll %%l1, 8, %%l1\n\t" \
+ "brz,pt %3, 3f\n\t" \
+ " add %%l1, %%l2, %%l1\n\t" \
+ "sllx %%l1, 48, %%l1\n\t" \
+ "srax %%l1, 48, %%l1\n" \
+"3:\t" "ba,pt %%xcc, 0f\n\t" \
+ " stx %%l1, [%0]\n" \
+"6:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \
+ "sll %%l1, 24, %%l1\n" \
+"7:\t" "lduba [%2 + 2] %%asi, %%g7\n\t" \
+ "sll %%l2, 16, %%l2\n" \
+"8:\t" "lduba [%2 + 3] %%asi, %%g1\n\t" \
+ "sll %%g7, 8, %%g7\n\t" \
+ "or %%l1, %%l2, %%l1\n\t" \
+ "or %%g7, %%g1, %%g7\n\t" \
+ "or %%l1, %%g7, %%l1\n\t" \
+ "brnz,a,pt %3, 3f\n\t" \
+ " sra %%l1, 0, %%l1\n" \
+"3:\t" "ba,pt %%xcc, 0f\n\t" \
+ " stx %%l1, [%0]\n" \
+"9:\t" "lduba [%2] %%asi, %%l1\n" \
+"10:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \
+ "sllx %%l1, 56, %%l1\n" \
+"11:\t" "lduba [%2 + 2] %%asi, %%g7\n\t" \
+ "sllx %%l2, 48, %%l2\n" \
+"12:\t" "lduba [%2 + 3] %%asi, %%g1\n\t" \
+ "sllx %%g7, 40, %%g7\n\t" \
+ "sllx %%g1, 32, %%g1\n\t" \
+ "or %%l1, %%l2, %%l1\n\t" \
+ "or %%g7, %%g1, %%g7\n" \
+"13:\t" "lduba [%2 + 4] %%asi, %%l2\n\t" \
+ "or %%l1, %%g7, %%g7\n" \
+"14:\t" "lduba [%2 + 5] %%asi, %%g1\n\t" \
+ "sllx %%l2, 24, %%l2\n" \
+"15:\t" "lduba [%2 + 6] %%asi, %%l1\n\t" \
+ "sllx %%g1, 16, %%g1\n\t" \
+ "or %%g7, %%l2, %%g7\n" \
+"16:\t" "lduba [%2 + 7] %%asi, %%l2\n\t" \
+ "sllx %%l1, 8, %%l1\n\t" \
+ "or %%g7, %%g1, %%g7\n\t" \
+ "or %%l1, %%l2, %%l1\n\t" \
+ "or %%g7, %%l1, %%g7\n\t" \
+ "cmp %1, 8\n\t" \
+ "be,a,pt %%icc, 0f\n\t" \
+ " stx %%g7, [%0]\n\t" \
+ "srlx %%g7, 32, %%l1\n\t" \
+ "sra %%g7, 0, %%g7\n\t" \
+ "stx %%l1, [%0]\n\t" \
+ "stx %%g7, [%0 + 8]\n" \
+"0:\n\n\t" \
+ ".section __ex_table\n\t" \
+ ".xword 4b, " #errh "\n\t" \
+ ".xword 5b, " #errh "\n\t" \
+ ".xword 6b, " #errh "\n\t" \
+ ".xword 7b, " #errh "\n\t" \
+ ".xword 8b, " #errh "\n\t" \
+ ".xword 9b, " #errh "\n\t" \
+ ".xword 10b, " #errh "\n\t" \
+ ".xword 11b, " #errh "\n\t" \
+ ".xword 12b, " #errh "\n\t" \
+ ".xword 13b, " #errh "\n\t" \
+ ".xword 14b, " #errh "\n\t" \
+ ".xword 15b, " #errh "\n\t" \
+ ".xword 16b, " #errh "\n\n\t" \
+ ".previous\n\t" \
+ : : "r" (dest_reg), "r" (size), "r" (saddr), "r" (is_signed), "r" (asi) \
+ : "l1", "l2", "g7", "g1", "cc"); \
+})
+
+#define store_common(dst_addr, size, src_val, asi, errh) ({ \
+__asm__ __volatile__ ( \
+ "wr %3, 0, %%asi\n\t" \
+ "ldx [%2], %%l1\n" \
+ "cmp %1, 2\n\t" \
+ "be,pn %%icc, 2f\n\t" \
+ " cmp %1, 4\n\t" \
+ "be,pt %%icc, 1f\n\t" \
+ " srlx %%l1, 24, %%l2\n\t" \
+ "srlx %%l1, 56, %%g1\n\t" \
+ "srlx %%l1, 48, %%g7\n" \
+"4:\t" "stba %%g1, [%0] %%asi\n\t" \
+ "srlx %%l1, 40, %%g1\n" \
+"5:\t" "stba %%g7, [%0 + 1] %%asi\n\t" \
+ "srlx %%l1, 32, %%g7\n" \
+"6:\t" "stba %%g1, [%0 + 2] %%asi\n" \
+"7:\t" "stba %%g7, [%0 + 3] %%asi\n\t" \
+ "srlx %%l1, 16, %%g1\n" \
+"8:\t" "stba %%l2, [%0 + 4] %%asi\n\t" \
+ "srlx %%l1, 8, %%g7\n" \
+"9:\t" "stba %%g1, [%0 + 5] %%asi\n" \
+"10:\t" "stba %%g7, [%0 + 6] %%asi\n\t" \
+ "ba,pt %%xcc, 0f\n" \
+"11:\t" " stba %%l1, [%0 + 7] %%asi\n" \
+"1:\t" "srl %%l1, 16, %%g7\n" \
+"12:\t" "stba %%l2, [%0] %%asi\n\t" \
+ "srl %%l1, 8, %%l2\n" \
+"13:\t" "stba %%g7, [%0 + 1] %%asi\n" \
+"14:\t" "stba %%l2, [%0 + 2] %%asi\n\t" \
+ "ba,pt %%xcc, 0f\n" \
+"15:\t" " stba %%l1, [%0 + 3] %%asi\n" \
+"2:\t" "srl %%l1, 8, %%l2\n" \
+"16:\t" "stba %%l2, [%0] %%asi\n" \
+"17:\t" "stba %%l1, [%0 + 1] %%asi\n" \
+"0:\n\n\t" \
+ ".section __ex_table\n\t" \
+ ".xword 4b, " #errh "\n\t" \
+ ".xword 5b, " #errh "\n\t" \
+ ".xword 6b, " #errh "\n\t" \
+ ".xword 7b, " #errh "\n\t" \
+ ".xword 8b, " #errh "\n\t" \
+ ".xword 9b, " #errh "\n\t" \
+ ".xword 10b, " #errh "\n\t" \
+ ".xword 11b, " #errh "\n\t" \
+ ".xword 12b, " #errh "\n\t" \
+ ".xword 13b, " #errh "\n\t" \
+ ".xword 14b, " #errh "\n\t" \
+ ".xword 15b, " #errh "\n\t" \
+ ".xword 16b, " #errh "\n\t" \
+ ".xword 17b, " #errh "\n\n\t" \
+ ".previous\n\t" \
+ : : "r" (dst_addr), "r" (size), "r" (src_val), "r" (asi) \
+ : "l1", "l2", "g7", "g1", "cc"); \
+})
+
+#define do_integer_store(reg_num, size, dst_addr, regs, asi, errh) ({ \
+ unsigned long zero = 0; \
+ unsigned long *src_val = &zero; \
+ \
+ if (size == 16) { \
+ size = 8; \
+ zero = (((long)(reg_num ? \
+ (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) | \
+ (unsigned)fetch_reg(reg_num + 1, regs); \
+ } else if (reg_num) src_val = fetch_reg_addr(reg_num, regs); \
+ store_common(dst_addr, size, src_val, asi, errh); \
+})
+
+/* XXX Need to capture/release other cpu's for SMP around this. */
+#define do_atomic(srcdest_reg, mem, errh) ({ \
+ unsigned long flags, tmp; \
+ \
+ save_and_cli(flags); \
+ tmp = *srcdest_reg; \
+ do_integer_load(srcdest_reg, 4, mem, 0, errh); \
+ store_common(mem, 4, &tmp, errh); \
+ restore_flags(flags); \
+})
+
+static inline void advance(struct pt_regs *regs)
+{
+ regs->tpc = regs->tnpc;
+ regs->tnpc += 4;
+}
+
+static inline int floating_point_load_or_store_p(unsigned int insn)
+{
+ return (insn >> 24) & 1;
+}
+
+static inline int ok_for_kernel(unsigned int insn)
+{
+ return !floating_point_load_or_store_p(insn);
+}
+
+void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn) __asm__ ("kernel_mna_trap_fault");
+
+void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
+{
+ unsigned long g2 = regs->u_regs [UREG_G2];
+ unsigned long fixup = search_exception_table (regs->tpc, &g2);
+
+ if (!fixup) {
+ unsigned long address = compute_effective_address(regs, insn);
+ if(address < PAGE_SIZE) {
+ printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler");
+ } else
+ printk(KERN_ALERT "Unable to handle kernel paging request in mna handler");
+ printk(KERN_ALERT " at virtual address %016lx\n",address);
+ printk(KERN_ALERT "current->mm->context = %016lx\n",
+ (unsigned long) current->mm->context);
+ printk(KERN_ALERT "current->mm->pgd = %016lx\n",
+ (unsigned long) current->mm->pgd);
+ die_if_kernel("Oops", regs);
+ /* Not reached */
+ }
+ regs->tpc = fixup;
+ regs->tnpc = regs->tpc + 4;
+ regs->u_regs [UREG_G2] = g2;
+}
+
+asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
+{
+ enum direction dir = decode_direction(insn);
+ int size = decode_access_size(insn);
+
+ lock_kernel();
+ if(!ok_for_kernel(insn) || dir == both) {
+ printk("Unsupported unaligned load/store trap for kernel at <%016lx>.\n",
+ regs->tpc);
+ unaligned_panic("Wheee. Kernel does fpu/atomic unaligned load/store.");
+
+ __asm__ __volatile__ ("\n"
+"kernel_unaligned_trap_fault:\n\t"
+ "mov %0, %%o0\n\t"
+ "call kernel_mna_trap_fault\n\t"
+ " mov %1, %%o1\n\t"
+ :
+ : "r" (regs), "r" (insn)
+ : "o0", "o1", "o2", "o3", "o4", "o5", "o7",
+ "g1", "g2", "g3", "g4", "g5", "g7", "cc");
+ } else {
+ unsigned long addr = compute_effective_address(regs, insn);
+
+#ifdef DEBUG_MNA
+ printk("KMNA: pc=%016lx [dir=%s addr=%016lx size=%d] retpc[%016lx]\n",
+ regs->tpc, dirstrings[dir], addr, size, regs->u_regs[UREG_RETPC]);
+#endif
+ switch(dir) {
+ case load:
+ do_integer_load(fetch_reg_addr(((insn>>25)&0x1f), regs),
+ size, (unsigned long *) addr,
+ decode_signedness(insn), decode_asi(insn, regs),
+ kernel_unaligned_trap_fault);
+ break;
+
+ case store:
+ do_integer_store(((insn>>25)&0x1f), size,
+ (unsigned long *) addr, regs,
+ decode_asi(insn, regs),
+ kernel_unaligned_trap_fault);
+ break;
+#if 0 /* unsupported */
+ case both:
+ do_atomic(fetch_reg_addr(((insn>>25)&0x1f), regs),
+ (unsigned long *) addr,
+ kernel_unaligned_trap_fault);
+ break;
+#endif
+ default:
+ panic("Impossible kernel unaligned trap.");
+ /* Not reached... */
+ }
+ advance(regs);
+ }
+ unlock_kernel();
+}
+
+#if 0 /* XXX: Implement user mna some day */
+static inline int ok_for_user(struct pt_regs *regs, unsigned int insn,
+ enum direction dir)
+{
+ unsigned int reg;
+ int retval, check = (dir == load) ? VERIFY_READ : VERIFY_WRITE;
+ int size = ((insn >> 19) & 3) == 3 ? 8 : 4;
+
+ if((regs->pc | regs->npc) & 3)
+ return 0;
+
+ /* Must verify_area() in all the necessary places. */
+#define WINREG_ADDR(regnum) ((void *)(((unsigned long *)regs->u_regs[UREG_FP])+(regnum)))
+ retval = 0;
+ reg = (insn >> 25) & 0x1f;
+ if(reg >= 16) {
+ retval = verify_area(check, WINREG_ADDR(reg - 16), size);
+ if(retval)
+ return retval;
+ }
+ reg = (insn >> 14) & 0x1f;
+ if(reg >= 16) {
+ retval = verify_area(check, WINREG_ADDR(reg - 16), size);
+ if(retval)
+ return retval;
+ }
+ if(!(insn & 0x2000)) {
+ reg = (insn & 0x1f);
+ if(reg >= 16) {
+ retval = verify_area(check, WINREG_ADDR(reg - 16), size);
+ if(retval)
+ return retval;
+ }
+ }
+ return retval;
+#undef WINREG_ADDR
+}
+
+void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn) __asm__ ("user_mna_trap_fault");
+
+void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
+{
+ current->tss.sig_address = regs->pc;
+ current->tss.sig_desc = SUBSIG_PRIVINST;
+ send_sig(SIGBUS, current, 1);
+}
+
+asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
+{
+ enum direction dir;
+
+ lock_kernel();
+ if(!(current->tss.flags & SPARC_FLAG_UNALIGNED) ||
+ (((insn >> 30) & 3) != 3))
+ goto kill_user;
+ dir = decode_direction(insn);
+ if(!ok_for_user(regs, insn, dir)) {
+ goto kill_user;
+ } else {
+ int size = decode_access_size(insn);
+ unsigned long addr;
+
+ if(floating_point_load_or_store_p(insn)) {
+ printk("User FPU load/store unaligned unsupported.\n");
+ goto kill_user;
+ }
+
+ addr = compute_effective_address(regs, insn);
+ switch(dir) {
+ case load:
+ do_integer_load(fetch_reg_addr(((insn>>25)&0x1f), regs),
+ size, (unsigned long *) addr,
+ decode_signedness(insn),
+ user_unaligned_trap_fault);
+ break;
+
+ case store:
+ do_integer_store(((insn>>25)&0x1f), size,
+ (unsigned long *) addr, regs,
+ user_unaligned_trap_fault);
+ break;
+
+ case both:
+ do_atomic(fetch_reg_addr(((insn>>25)&0x1f), regs),
+ (unsigned long *) addr,
+ user_unaligned_trap_fault);
+ break;
+
+ default:
+ unaligned_panic("Impossible user unaligned trap.");
+
+ __asm__ __volatile__ ("\n"
+"user_unaligned_trap_fault:\n\t"
+ "mov %0, %%o0\n\t"
+ "call user_mna_trap_fault\n\t"
+ " mov %1, %%o1\n\t"
+ :
+ : "r" (regs), "r" (insn)
+ : "o0", "o1", "o2", "o3", "o4", "o5", "o7",
+ "g1", "g2", "g3", "g4", "g5", "g7", "cc");
+ goto out;
+ }
+ advance(regs);
+ goto out;
+ }
+
+kill_user:
+ current->tss.sig_address = regs->pc;
+ current->tss.sig_desc = SUBSIG_PRIVINST;
+ send_sig(SIGBUS, current, 1);
+out:
+ unlock_kernel();
+}
+#endif
diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S
index 2ac19a440..f2c714eae 100644
--- a/arch/sparc64/kernel/winfixup.S
+++ b/arch/sparc64/kernel/winfixup.S
@@ -1,4 +1,4 @@
-/* $Id: winfixup.S,v 1.8 1997/06/02 06:33:35 davem Exp $
+/* $Id: winfixup.S,v 1.16 1997/07/13 20:02:42 davem Exp $
*
* winfixup.S: Handle cases where user stack pointer is found to be bogus.
*
@@ -31,6 +31,7 @@
fill_fixup:
rdpr %tstate, %g1
andcc %g1, TSTATE_PRIV, %g0
+ clr %g4
be,pt %xcc, window_scheisse_from_user_common
and %g1, TSTATE_CWP, %g1
@@ -53,25 +54,26 @@ fill_fixup:
rdpr %wstate, %g2 ! Grab user mode wstate.
wrpr %g1, %cwp ! Get into the right window.
sll %g2, 3, %g2 ! NORMAL-->OTHER
- wrpr %g0, 0x0, %canrestore ! Standard etrap stuff.
+ wrpr %g0, 0x0, %canrestore ! Standard etrap stuff.
+ wr %g0, 0x0, %fprs ! zap FPU just in case...
wrpr %g2, 0x0, %wstate ! This must be consistant.
wrpr %g0, 0x0, %otherwin ! We know this.
- sethi %uhi(KERNBASE), %g2 ! Set this up
- sllx %g2, 32, %g2 ! for the iflush
mov PRIMARY_CONTEXT, %g1 ! Change contexts...
stxa %g0, [%g1] ASI_DMMU ! Back into the nucleus.
- flush %g2 ! Flush instruction buffers
+ flush %g6 ! Flush instruction buffers
rdpr %pstate, %l1 ! Prepare to change globals.
- mov %g4, %o5 ! Setup args for
- mov %g5, %o4 ! final call to do_sparc64_fault.
+ mov %g6, %o7 ! Get current.
+ mov %g5, %l5 ! Fault address
+ clr %l4 ! It was a load, not a store
wrpr %g0, 0x0, %tl ! Out of trap levels.
- wrpr %l1, (PSTATE_IE | PSTATE_AG), %pstate
- sethi %uhi(KERNBASE), %g4 ! Restore med-any global reg.
- rd %pic, %g6 ! Get current as well.
+ wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
+ sethi %uhi(PAGE_OFFSET), %g4 ! Prepare page_offset global reg
+ mov %o7, %g6
b,pt %xcc, window_scheisse_merge ! And merge.
- sllx %g4, 32, %g4 ! Finish med-any reg setup.
+
+ sllx %g4, 32, %g4 ! and finish it...
/* Be very careful about usage of the alternate globals here.
* You cannot touch %g4/%g5 as that has the fault information
@@ -82,17 +84,16 @@ fill_fixup:
* do not touch %g7 or %g2 so we handle the two cases fine.
*/
spill_fixup:
- rd %pic, %g1
- ldx [%g1 + AOFF_task_tss + AOFF_thread_flags], %g6
- andcc %g6, SPARC_FLAG_32BIT, %g0
- ldx [%g1 + AOFF_task_tss + AOFF_thread_w_saved], %g6
- sll %g6, 3, %g3
- add %g1, %g3, %g3
+ ld [%g6 + AOFF_task_tss + AOFF_thread_flags], %g1
+ andcc %g1, SPARC_FLAG_32BIT, %g0
+ ldx [%g6 + AOFF_task_tss + AOFF_thread_w_saved], %g1
+ sll %g1, 3, %g3
+ add %g6, %g3, %g3
stx %sp, [%g3 + AOFF_task_tss + AOFF_thread_rwbuf_stkptrs]
- sll %g6, 7, %g3
+ sll %g1, 7, %g3
bne,pt %xcc, 1f
- add %g1, %g3, %g3
+ add %g6, %g3, %g3
stx %l0, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x00]
stx %l1, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x08]
stx %l2, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x10]
@@ -110,43 +111,45 @@ spill_fixup:
stx %i5, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x68]
stx %i6, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x70]
- stx %i7, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x78]
b,pt %xcc, 2f
- add %g6, 1, %g6
-1: std %l0, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x00]
- std %l2, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x08]
- std %l4, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x10]
- std %l6, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x18]
-
- std %i0, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x20]
- std %i2, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x28]
- std %i4, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x30]
- std %i6, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x38]
- add %g6, 1, %g6
-2: stx %g6, [%g1 + AOFF_task_tss + AOFF_thread_w_saved]
+ stx %i7, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x78]
+1: stw %l0, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x00]
+ stw %l1, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x04]
+ stw %l2, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x08]
+ stw %l3, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x0c]
+ stw %l4, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x10]
+
+ stw %l5, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x14]
+ stw %l6, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x18]
+ stw %l7, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x1c]
+ stw %i0, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x20]
+ stw %i1, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x24]
+ stw %i2, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x28]
+ stw %i3, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x2c]
+ stw %i4, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x30]
+
+ stw %i5, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x34]
+ stw %i6, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x38]
+ stw %i7, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x3c]
+2: add %g1, 1, %g1
+ stx %g1, [%g6 + AOFF_task_tss + AOFF_thread_w_saved]
rdpr %tstate, %g1
- nop
-
andcc %g1, TSTATE_PRIV, %g0
saved
+
and %g1, TSTATE_CWP, %g1
be,a,pn %xcc, window_scheisse_from_user_common
or %g4, 0x4, %g4 ! we know it was a write
retry
window_scheisse_from_user_common:
- nop
wrpr %g1, %cwp
-
ba,pt %xcc, etrap
rd %pc, %g7
- mov %l5, %o4
- mov %l4, %o5
window_scheisse_merge:
- srlx %o4, PAGE_SHIFT, %o3
- clr %o1
- sllx %o3, PAGE_SHIFT, %o3
- and %o5, 0x4, %o2
+ srlx %l5, PAGE_SHIFT, %o1
+ and %l4, 0x4, %o2
+ sllx %o1, PAGE_SHIFT, %o1
call do_sparc64_fault
add %sp, STACK_BIAS + REGWIN_SZ, %o0
ba,pt %xcc, rtrap
@@ -154,6 +157,7 @@ window_scheisse_merge:
winfix_trampoline:
andn %g3, 0x7f, %g3
add %g3, 0x7c, %g3
+
wrpr %g3, %tnpc
done
@@ -174,32 +178,31 @@ fill_fixup_mna:
wrpr %g0, 0x0, %canrestore ! Standard etrap stuff.
wrpr %g2, 0x0, %wstate ! This must be consistant.
wrpr %g0, 0x0, %otherwin ! We know this.
- sethi %uhi(KERNBASE), %g2 ! Set this up
- sllx %g2, 32, %g2 ! for the iflush
mov PRIMARY_CONTEXT, %g1 ! Change contexts...
stxa %g0, [%g1] ASI_DMMU ! Back into the nucleus.
- flush %g2 ! Flush instruction buffers
+ flush %g6 ! Flush instruction buffers
rdpr %pstate, %l1 ! Prepare to change globals.
mov %g4, %o5 ! Setup args for
mov %g5, %o4 ! final call to do_sparc64_fault.
+ mov %g6, %o7 ! Stash away current.
wrpr %g0, 0x0, %tl ! Out of trap levels.
- wrpr %l1, (PSTATE_IE | PSTATE_AG), %pstate
- sethi %uhi(KERNBASE), %g4 ! Restore med-any global reg.
- rd %pic, %g6 ! Get current as well.
+ wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
+ sethi %uhi(PAGE_OFFSET), %g4 ! Set page_offset global reg.
+ mov %o7, %g6 ! Get current back.
b,pt %xcc, window_mna_merge ! And merge.
- sllx %g4, 32, %g4 ! Finish med-any reg setup.
+ sllx %g4, 32, %g4 ! Finish it.
+
spill_fixup_mna:
- rd %pic, %g1
- ldx [%g1 + AOFF_task_tss + AOFF_thread_flags], %g6
- andcc %g6, SPARC_FLAG_32BIT, %g0
- ldx [%g1 + AOFF_task_tss + AOFF_thread_w_saved], %g6
- sll %g6, 3, %g3
- add %g1, %g3, %g3
+ ld [%g6 + AOFF_task_tss + AOFF_thread_flags], %g1
+ andcc %g1, SPARC_FLAG_32BIT, %g0
+ ldx [%g6 + AOFF_task_tss + AOFF_thread_w_saved], %g1
+ sll %g1, 3, %g3
+ add %g6, %g3, %g3
stx %sp, [%g3 + AOFF_task_tss + AOFF_thread_rwbuf_stkptrs]
- sll %g6, 7, %g3
+ sll %g1, 7, %g3
bne,pt %xcc, 1f
- add %g1, %g3, %g3
+ add %g6, %g3, %g3
stx %l0, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x00]
stx %l1, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x08]
stx %l2, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x10]
@@ -219,7 +222,7 @@ spill_fixup_mna:
stx %i6, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x70]
stx %i7, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x78]
b,pt %xcc, 2f
- add %g6, 1, %g6
+ add %g1, 1, %g1
1: std %l0, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x00]
std %l2, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x08]
std %l4, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x10]
@@ -229,8 +232,8 @@ spill_fixup_mna:
std %i2, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x28]
std %i4, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x30]
std %i6, [%g3 + AOFF_task_tss + AOFF_thread_reg_window + 0x38]
- add %g6, 1, %g6
-2: stx %g6, [%g1 + AOFF_task_tss + AOFF_thread_w_saved]
+ add %g1, 1, %g1
+2: stx %g1, [%g6 + AOFF_task_tss + AOFF_thread_w_saved]
rdpr %tstate, %g1
nop
diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile
index 56c506507..3da21c606 100644
--- a/arch/sparc64/lib/Makefile
+++ b/arch/sparc64/lib/Makefile
@@ -1,55 +1,25 @@
-# $Id: Makefile,v 1.7 1997/04/07 18:57:05 jj Exp $
+# $Id: Makefile,v 1.13 1997/07/16 10:12:03 jj Exp $
# Makefile for Sparc library files..
#
CFLAGS := $(CFLAGS) -ansi
-OBJS = memset.o blockops.o locks.o memcpy.o strlen.o strncmp.o \
+OBJS = blockops.o locks.o strlen.o strncmp.o \
memscan.o strncpy_from_user.o strlen_user.o memcmp.o checksum.o \
- copy_to_user.o copy_from_user.o
+ VIScopy.o VISbzero.o VISmemset.o VIScsum.o
lib.a: $(OBJS)
$(AR) rcs lib.a $(OBJS)
sync
-blockops.o: blockops.S
- $(CC) -ansi -c -o blockops.o blockops.S
+VIScopy.o: VIScopy.S VIS.h
+VISbzero.o: VISbzero.S VIS.h
-memset.o: memset.S
- $(CC) -D__ASSEMBLY__ -ansi -c -o memset.o memset.S
+.S.s:
+ $(CPP) -D__ASSEMBLY__ -ansi $< -o $*.s
-copy_to_user.o: copy_to_user.S
- $(CC) -D__ASSEMBLY__ -ansi -c -o copy_to_user.o copy_to_user.S
-
-copy_from_user.o: copy_from_user.S
- $(CC) -D__ASSEMBLY__ -ansi -c -o copy_from_user.o copy_from_user.S
-
-memcpy.o: memcpy.S
- $(CC) -D__ASSEMBLY__ -ansi -c -o memcpy.o memcpy.S
-
-strlen.o: strlen.S
- $(CC) -D__ASSEMBLY__ -ansi -c -o strlen.o strlen.S
-
-strncmp.o: strncmp.S
- $(CC) -D__ASSEMBLY__ -ansi -c -o strncmp.o strncmp.S
-
-memcmp.o: memcmp.S
- $(CC) -D__ASSEMBLY__ -ansi -c -o memcmp.o memcmp.S
-
-locks.o: locks.S
- $(CC) -D__ASSEMBLY__ -ansi -c -o locks.o locks.S
-
-checksum.o: checksum.S
- $(CC) -D__ASSEMBLY__ -ansi -c -o checksum.o checksum.S
-
-memscan.o: memscan.S
- $(CC) -D__ASSEMBLY__ -ansi -c -o memscan.o memscan.S
-
-strncpy_from_user.o: strncpy_from_user.S
- $(CC) -D__ASSEMBLY__ -ansi -c -o strncpy_from_user.o strncpy_from_user.S
-
-strlen_user.o: strlen_user.S
- $(CC) -D__ASSEMBLY__ -ansi -c -o strlen_user.o strlen_user.S
+.S.o:
+ $(CC) -D__ASSEMBLY__ -ansi -c $< -o $*.o
dep:
diff --git a/arch/sparc64/lib/VIS.h b/arch/sparc64/lib/VIS.h
new file mode 100644
index 000000000..45bc870a4
--- /dev/null
+++ b/arch/sparc64/lib/VIS.h
@@ -0,0 +1,113 @@
+/* $Id: VIS.h,v 1.1 1997/07/18 06:26:48 ralf Exp $
+ * VIS.h: High speed copy/clear operations utilizing the UltraSparc
+ * Visual Instruction Set.
+ *
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996, 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+ /* VIS code can be used for numerous copy/set operation variants.
+ * It can be made to work in the kernel, one single instance,
+ * for all of memcpy, copy_to_user, and copy_from_user by setting
+ * the ASI src/dest globals correctly. Furthermore it can
+ * be used for kernel-->kernel page copies as well, a hook label
+ * is put in here just for this purpose.
+ *
+ * For userland, compiling this without __KERNEL__ defined makes
+ * it work just fine as a generic libc bcopy and memcpy.
+ * If for userland it is compiled with a 32bit gcc (but you need
+ * -Wa,-Av9a), the code will just rely on lower 32bits of
+ * IEU registers, if you compile it with 64bit gcc (ie. define
+ * __sparc_v9__), the code will use full 64bit.
+ */
+
+#ifndef __VIS_H
+#define __VIS_H
+
+#ifdef __KERNEL__
+#include <asm/head.h>
+#include <asm/asi.h>
+#else
+#define ASI_P 0x80 /* Primary, implicit */
+#define ASI_S 0x81 /* Secondary, implicit */
+#define ASI_BLK_COMMIT_P 0xe0 /* Primary, blk store commit */
+#define ASI_BLK_COMMIT_S 0xe1 /* Secondary, blk store commit */
+#define ASI_BLK_P 0xf0 /* Primary, blk ld/st */
+#define ASI_BLK_S 0xf1 /* Secondary, blk ld/st */
+#define FPRS_FEF 0x04
+#endif
+
+ /* I'm telling you, they really did this chip right.
+ * Perhaps the SunSoft folks should visit some of the
+ * people in Sun Microelectronics and start some brain
+ * cell exchange program...
+ */
+#define ASI_BLK_XOR (ASI_P ^ ASI_BLK_P)
+
+#define asi_src %o3
+#define asi_dest %o4
+
+#ifdef __KERNEL__
+#define ASI_SETSRC_BLK wr asi_src, 0, %asi;
+#define ASI_SETSRC_NOBLK wr asi_src, ASI_BLK_XOR, %asi;
+#define ASI_SETDST_BLK wr asi_dest, 0, %asi;
+#define ASI_SETDST_NOBLK wr asi_dest, ASI_BLK_XOR, %asi;
+#define ASIBLK %asi
+#define ASINORMAL %asi
+#define LDUB lduba
+#define LDUH lduha
+#define LDUW lduwa
+#define LDX ldxa
+#define LDD ldda
+#define LDDF ldda
+#define LDBLK ldda
+#define STB stba
+#define STH stha
+#define STW stwa
+#define STD stda
+#define STX stxa
+#define STDF stda
+#define STBLK stda
+#else
+#define ASI_SETSRC_BLK
+#define ASI_SETSRC_NOBLK
+#define ASI_SETDST_BLK
+#define ASI_SETDST_NOBLK
+#define ASI_SETDST_SPECIAL
+#define ASIBLK %asi
+#define ASINORMAL
+#define LDUB ldub
+#define LDUH lduh
+#define LDUW lduw
+#define LDD ldd
+#define LDX ldx
+#define LDDF ldd
+#define LDBLK ldda
+#define STB stb
+#define STH sth
+#define STW stw
+#define STD std
+#define STX stx
+#define STDF std
+#define STBLK stda
+#endif
+
+#ifdef __KERNEL__
+
+#define REGS_64BIT
+
+#else
+
+#ifndef REGS_64BIT
+#ifdef __sparc_v9__
+#define REGS_64BIT
+#endif
+#endif
+
+#endif
+
+#ifndef REGS_64BIT
+#define xcc icc
+#endif
+
+#endif
diff --git a/arch/sparc64/lib/VISbzero.S b/arch/sparc64/lib/VISbzero.S
new file mode 100644
index 000000000..3c86861fd
--- /dev/null
+++ b/arch/sparc64/lib/VISbzero.S
@@ -0,0 +1,246 @@
+/* $Id: VISbzero.S,v 1.1 1997/07/18 06:26:48 ralf Exp $
+ * VISbzero.S: High speed clear operations utilizing the UltraSparc
+ * Visual Instruction Set.
+ *
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996, 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include "VIS.h"
+
+#ifdef __KERNEL__
+#define EXN(x,y,a,b,z) \
+98: x,y; \
+ .section .fixup; \
+ .align 4; \
+99: ba VISbzerofixup_ret##z; \
+ a, b, %o0; \
+ .section __ex_table; \
+ .align 8; \
+ .xword 98b, 99b; \
+ .text; \
+ .align 4;
+#define EXC(x,y,a,b,c...) \
+98: x,y; \
+ .section .fixup; \
+ .align 4; \
+99: c; \
+ ba VISbzerofixup_ret0; \
+ a, b, %o0; \
+ .section __ex_table; \
+ .align 8; \
+ .xword 98b, 99b; \
+ .text; \
+ .align 4;
+#define EXO1(x,y) \
+98: x,y; \
+ .section __ex_table; \
+ .align 8; \
+ .xword 98b, VISbzerofixup_reto1; \
+ .text; \
+ .align 4;
+#define EX(x,y,a,b) EXN(x,y,a,b,0)
+#define EX1(x,y,a,b) EXN(x,y,a,b,1)
+#define EX2(x,y,a,b) EXN(x,y,a,b,2)
+#define EXT(start,end,handler) \
+ .section __ex_table; \
+ .align 8; \
+ .xword start, 0, end, handler; \
+ .text; \
+ .align 4
+#else
+#define EX(x,y,a,b) x,y
+#define EX1(x,y,a,b) x,y
+#define EX2(x,y,a,b) x,y
+#define EXC(x,y,a,b,c...) x,y
+#define EXO1(x,y) x,y
+#define EXT(a,b,c)
+#endif
+
+#define ZERO_BLOCKS(base, offset, source) \
+ STX source, [base - offset - 0x38] ASINORMAL; \
+ STX source, [base - offset - 0x30] ASINORMAL; \
+ STX source, [base - offset - 0x28] ASINORMAL; \
+ STX source, [base - offset - 0x20] ASINORMAL; \
+ STX source, [base - offset - 0x18] ASINORMAL; \
+ STX source, [base - offset - 0x10] ASINORMAL; \
+ STX source, [base - offset - 0x08] ASINORMAL; \
+ STX source, [base - offset - 0x00] ASINORMAL;
+
+#ifdef __KERNEL__
+#define RETL clr %o0
+#else
+#define RETL mov %g3, %o0
+#endif
+
+ /* Well, bzero is a lot easier to get right than bcopy... */
+#ifdef __KERNEL__
+ .section __ex_table,#alloc
+ .section .fixup,#alloc,#execinstr
+#endif
+ .text
+ .align 32
+#ifdef __KERNEL__
+ .globl __bzero, __bzero_noasi
+__bzero:
+ wr %g0, ASI_P, %asi ! LSU Group
+__bzero_noasi:
+#else
+ .globl bzero
+bzero_private:
+bzero:
+#ifndef REGS_64BIT
+ srl %o1, 0, %o1
+#endif
+ mov %o0, %g3
+#endif
+ cmp %o1, 7
+ bleu,pn %xcc, 17f
+ andcc %o0, 3, %o2
+ be,a,pt %xcc, 4f
+ andcc %o0, 4, %g0
+ cmp %o2, 3
+ be,pn %xcc, 2f
+ EXO1(STB %g0, [%o0 + 0x00] ASINORMAL)
+ cmp %o2, 2
+ be,pt %xcc, 2f
+ EX(STB %g0, [%o0 + 0x01] ASINORMAL, sub %o1, 1)
+ EX(STB %g0, [%o0 + 0x02] ASINORMAL, sub %o1, 2)
+2: sub %o2, 4, %o2
+ sub %o0, %o2, %o0
+ add %o1, %o2, %o1
+ andcc %o0, 4, %g0
+4: be,pt %xcc, 2f
+ cmp %o1, 128
+ EXO1(STW %g0, [%o0] ASINORMAL)
+ sub %o1, 4, %o1
+ add %o0, 4, %o0
+2: blu,pn %xcc, 9f
+ andcc %o0, 0x38, %o2
+ be,pn %icc, 6f
+ mov 64, %o5
+ andcc %o0, 8, %g0
+ be,pn %icc, 1f
+ sub %o5, %o2, %o5
+ EX(STX %g0, [%o0] ASINORMAL, sub %o1, 0)
+ add %o0, 8, %o0
+1: andcc %o5, 16, %g0
+ be,pn %icc, 1f
+ sub %o1, %o5, %o1
+ EX1(STX %g0, [%o0] ASINORMAL, add %g0, 0)
+ EX1(STX %g0, [%o0 + 8] ASINORMAL, sub %g0, 8)
+ add %o0, 16, %o0
+1: andcc %o5, 32, %g0
+ be,pn %icc, 7f
+ andncc %o1, 0x3f, %o3
+ EX(STX %g0, [%o0] ASINORMAL, add %o1, 32)
+ EX(STX %g0, [%o0 + 8] ASINORMAL, add %o1, 24)
+ EX(STX %g0, [%o0 + 16] ASINORMAL, add %o1, 16)
+ EX(STX %g0, [%o0 + 24] ASINORMAL, add %o1, 8)
+ add %o0, 32, %o0
+6: andncc %o1, 0x3f, %o3
+7: be,pn %xcc, 9f
+#ifdef __KERNEL__
+ rd %asi, %g7
+ wr %g0, FPRS_FEF, %fprs
+ wr %g7, ASI_BLK_XOR, %asi
+#else
+ wr %g0, ASI_BLK_P, %asi
+#endif
+ membar #StoreStore | #LoadStore
+ fzero %f0
+ andcc %o3, 0xc0, %o2
+ and %o1, 0x3f, %o1
+ fzero %f2
+ andn %o3, 0xff, %o3
+ faddd %f0, %f2, %f4
+ fmuld %f0, %f2, %f6
+ cmp %o2, 64
+ faddd %f0, %f2, %f8
+ fmuld %f0, %f2, %f10
+ faddd %f0, %f2, %f12
+ brz,pn %o2, 10f
+ fmuld %f0, %f2, %f14
+ be,pn %icc, 2f
+ EXC(STBLK %f0, [%o0 + 0x00] ASIBLK, add %o3, %o2, add %o2, %o1, %o2)
+ cmp %o2, 128
+ be,pn %icc, 2f
+ EXC(STBLK %f0, [%o0 + 0x40] ASIBLK, add %o3, %o2, add %o2, %o1, %o2; sub %o2, 64, %o2)
+ EXC(STBLK %f0, [%o0 + 0x80] ASIBLK, add %o3, %o2, add %o2, %o1, %o2; sub %o2, 128, %o2)
+2: brz,pn %o3, 12f
+ add %o0, %o2, %o0
+10: EX(STBLK %f0, [%o0 + 0x00] ASIBLK, add %o3, %o1)
+ EXC(STBLK %f0, [%o0 + 0x40] ASIBLK, add %o3, %o1, sub %o1, 64, %o1)
+ EXC(STBLK %f0, [%o0 + 0x80] ASIBLK, add %o3, %o1, sub %o1, 128, %o1)
+ EXC(STBLK %f0, [%o0 + 0xc0] ASIBLK, add %o3, %o1, sub %o1, 192, %o1)
+11: subcc %o3, 256, %o3
+ bne,pt %xcc, 10b
+ add %o0, 256, %o0
+12:
+#ifdef __KERNEL__
+ wr %g0, 0, %fprs
+ wr %g7, 0x0, %asi
+#endif
+ membar #Sync
+9: andcc %o1, 0xf8, %o2
+ be,pn %xcc, 13f
+ andcc %o1, 7, %o1
+14: rd %pc, %o4
+ srl %o2, 1, %o3
+ sub %o4, %o3, %o4
+ jmpl %o4 + (13f - 14b), %g0
+ add %o0, %o2, %o0
+12: ZERO_BLOCKS(%o0, 0xc8, %g0)
+ ZERO_BLOCKS(%o0, 0x88, %g0)
+ ZERO_BLOCKS(%o0, 0x48, %g0)
+ ZERO_BLOCKS(%o0, 0x08, %g0)
+ EXT(12b,13f,VISbzerofixup_zb)
+13: be,pn %xcc, 8f
+ andcc %o1, 4, %g0
+ be,pn %xcc, 1f
+ andcc %o1, 2, %g0
+ EX(STW %g0, [%o0] ASINORMAL, and %o1, 7)
+ add %o0, 4, %o0
+1: be,pn %xcc, 1f
+ andcc %o1, 1, %g0
+ EX(STH %g0, [%o0] ASINORMAL, and %o1, 3)
+ add %o0, 2, %o0
+1: bne,a,pn %xcc, 8f
+ EX(STB %g0, [%o0] ASINORMAL, add %g0, 1)
+8: retl
+ RETL
+17: be,pn %xcc, 13b
+ orcc %o1, 0, %g0
+ be,pn %xcc, 0f
+8: add %o0, 1, %o0
+ subcc %o1, 1, %o1
+ bne,pt %xcc, 8b
+ EX(STB %g0, [%o0 - 1] ASINORMAL, add %o1, 1)
+0: retl
+ RETL
+
+#ifdef __KERNEL__
+ .section .fixup
+ .align 4
+VISbzerofixup_reto1:
+ mov %o1, %o0
+VISbzerofixup_ret0:
+ retl
+ wr %g0, 0, %fprs
+VISbzerofixup_ret1:
+ and %o5, 0x30, %o5
+ add %o5, %o1, %o5
+ ba,pt %xcc, VISbzerofixup_ret0
+ add %o0, %o5, %o0
+VISbzerofixup_ret2:
+ and %o5, 0x20, %o5
+ add %o5, %o1, %o5
+ ba,pt %xcc, VISbzerofixup_ret0
+ add %o0, %o5, %o0
+VISbzerofixup_zb:
+ andcc %o1, 7, %o1
+ sll %g2, 3, %g2
+ add %o1, 256, %o1
+ ba,pt %xcc, VISbzerofixup_ret0
+ sub %o1, %g2, %o0
+#endif
diff --git a/arch/sparc64/lib/VIScopy.S b/arch/sparc64/lib/VIScopy.S
new file mode 100644
index 000000000..1429f1658
--- /dev/null
+++ b/arch/sparc64/lib/VIScopy.S
@@ -0,0 +1,1060 @@
+/* $Id: VIScopy.S,v 1.1 1997/07/18 06:26:48 ralf Exp $
+ * VIScopy.S: High speed copy operations utilizing the UltraSparc
+ * Visual Instruction Set.
+ *
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996, 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include "VIS.h"
+
+ /* VIS code can be used for numerous copy/set operation variants.
+ * It can be made to work in the kernel, one single instance,
+ * for all of memcpy, copy_to_user, and copy_from_user by setting
+ * the ASI src/dest globals correctly. Furthermore it can
+ * be used for kernel-->kernel page copies as well, a hook label
+ * is put in here just for this purpose.
+ *
+ * For userland, compiling this without __KERNEL__ defined makes
+ * it work just fine as a generic libc bcopy and memcpy.
+ * If for userland it is compiled with a 32bit gcc (but you need
+ * -Wa,-Av9a for as), the code will just rely on lower 32bits of
+ * IEU registers, if you compile it with 64bit gcc (ie. define
+ * __sparc_v9__), the code will use full 64bit.
+ */
+
+#ifdef __KERNEL__
+#define FPU_CLEAN_RETL \
+ wr %g0, 0, %fprs; \
+ retl; \
+ clr %o0;
+#define FPU_RETL \
+ wr %g0, 0, %fprs; \
+ retl; \
+ clr %o0;
+#define NORMAL_RETL \
+ retl; \
+ clr %o0;
+#define EX(x,y,a,b) \
+98: x,y; \
+ .section .fixup; \
+ .align 4; \
+99: ba VIScopyfixup_ret; \
+ a, b, %o0; \
+ .section __ex_table; \
+ .align 8; \
+ .xword 98b, 99b; \
+ .text; \
+ .align 4;
+#define EX2(x,y,c,d,e,a,b) \
+98: x,y; \
+ .section .fixup; \
+ .align 4; \
+99: c, d, e; \
+ ba VIScopyfixup_ret; \
+ a, b, %o0; \
+ .section __ex_table; \
+ .align 8; \
+ .xword 98b, 99b; \
+ .text; \
+ .align 4;
+#define EXO2(x,y) \
+98: x,y; \
+ .section __ex_table; \
+ .align 8; \
+ .xword 98b, VIScopyfixup_reto2; \
+ .text; \
+ .align 4;
+#define EXVISN(x,y,n) \
+98: x,y; \
+ .section __ex_table; \
+ .align 8; \
+ .xword 98b, VIScopyfixup_vis##n; \
+ .text; \
+ .align 4;
+#define EXT(start,end,handler) \
+ .section __ex_table; \
+ .align 8; \
+ .xword start, 0, end, handler; \
+ .text; \
+ .align 4;
+#else
+#define FPU_CLEAN_RETL \
+ retl; \
+ mov %g6, %o0;
+#define FPU_RETL \
+ retl; \
+ mov %g6, %o0;
+#define NORMAL_RETL \
+ retl; \
+ mov %g6, %o0;
+#define EX(x,y,a,b) x,y
+#define EX2(x,y,c,d,e,a,b) x,y
+#define EXO2(x,y) x,y
+#define EXVISN(x,y,n) x,y
+#define EXT(a,b,c)
+#endif
+#define EXVIS(x,y) EXVISN(x,y,0)
+#define EXVIS1(x,y) EXVISN(x,y,1)
+#define EXVIS2(x,y) EXVISN(x,y,2)
+#define EXVIS3(x,y) EXVISN(x,y,3)
+#define EXVIS4(x,y) EXVISN(x,y,4)
+#define EXVIS5(x,y) EXVISN(x,y,5)
+
+#define FREG_FROB(f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ faligndata %f1, %f2, %f48; \
+ faligndata %f2, %f3, %f50; \
+ faligndata %f3, %f4, %f52; \
+ faligndata %f4, %f5, %f54; \
+ faligndata %f5, %f6, %f56; \
+ faligndata %f6, %f7, %f58; \
+ faligndata %f7, %f8, %f60; \
+ faligndata %f8, %f9, %f62;
+
+#define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, len, jmptgt) \
+ EXVIS(LDBLK [%src] ASIBLK, %fdest); \
+ add %src, 0x40, %src; \
+ ASI_SETDST_BLK \
+ add %dest, 0x40, %dest; \
+ subcc %len, 0x40, %len; \
+ be,pn %xcc, jmptgt; \
+ EXVIS2(STBLK %fsrc, [%dest - 0x40] ASIBLK); \
+ ASI_SETSRC_BLK
+
+#define LOOP_CHUNK1(src, dest, len, branch_dest) \
+ MAIN_LOOP_CHUNK(src, dest, f0, f48, len, branch_dest)
+#define LOOP_CHUNK2(src, dest, len, branch_dest) \
+ MAIN_LOOP_CHUNK(src, dest, f16, f48, len, branch_dest)
+#define LOOP_CHUNK3(src, dest, len, branch_dest) \
+ MAIN_LOOP_CHUNK(src, dest, f32, f48, len, branch_dest)
+
+#define STORE_SYNC(dest, fsrc) \
+ EXVIS(STBLK %fsrc, [%dest] ASIBLK); \
+ add %dest, 0x40, %dest;
+
+#define STORE_JUMP(dest, fsrc, target) \
+ EXVIS3(STBLK %fsrc, [%dest] ASIBLK); \
+ add %dest, 0x40, %dest; \
+ ba,pt %xcc, target;
+
+#ifndef __KERNEL__
+#define VISLOOP_PAD nop; nop; nop; nop; \
+ nop; nop; nop; nop; \
+ nop; nop; nop; nop; \
+ nop; nop; nop;
+#else
+#define VISLOOP_PAD nop; nop; nop; nop; \
+ nop; nop; nop; nop; \
+ nop;
+#endif
+
+#define FINISH_VISCHUNK(dest, f0, f1, left) \
+ ASI_SETDST_NOBLK \
+ subcc %left, 8, %left; \
+ bl,pn %xcc, vis_out; \
+ faligndata %f0, %f1, %f48; \
+ EXVIS4(STDF %f48, [%dest] ASINORMAL); \
+ add %dest, 8, %dest;
+
+#define UNEVEN_VISCHUNK(dest, f0, f1, left) \
+ subcc %left, 8, %left; \
+ bl,pn %xcc, vis_out; \
+ fsrc1 %f0, %f1; \
+ ba,a,pt %xcc, vis_slk;
+
+ /* Macros for non-VIS memcpy code. */
+#ifdef REGS_64BIT
+
+#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3) \
+ ASI_SETSRC_NOBLK \
+ LDX [%src + offset + 0x00] ASINORMAL, %t0; \
+ LDX [%src + offset + 0x08] ASINORMAL, %t1; \
+ LDX [%src + offset + 0x10] ASINORMAL, %t2; \
+ LDX [%src + offset + 0x18] ASINORMAL, %t3; \
+ ASI_SETDST_NOBLK \
+ STW %t0, [%dst + offset + 0x04] ASINORMAL; \
+ srlx %t0, 32, %t0; \
+ STW %t0, [%dst + offset + 0x00] ASINORMAL; \
+ STW %t1, [%dst + offset + 0x0c] ASINORMAL; \
+ srlx %t1, 32, %t1; \
+ STW %t1, [%dst + offset + 0x08] ASINORMAL; \
+ STW %t2, [%dst + offset + 0x14] ASINORMAL; \
+ srlx %t2, 32, %t2; \
+ STW %t2, [%dst + offset + 0x10] ASINORMAL; \
+ STW %t3, [%dst + offset + 0x1c] ASINORMAL; \
+ srlx %t3, 32, %t3; \
+ STW %t3, [%dst + offset + 0x18] ASINORMAL;
+
+#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \
+ ASI_SETSRC_NOBLK \
+ LDX [%src + offset + 0x00] ASINORMAL, %t0; \
+ LDX [%src + offset + 0x08] ASINORMAL, %t1; \
+ LDX [%src + offset + 0x10] ASINORMAL, %t2; \
+ LDX [%src + offset + 0x18] ASINORMAL, %t3; \
+ ASI_SETDST_NOBLK \
+ STX %t0, [%dst + offset + 0x00] ASINORMAL; \
+ STX %t1, [%dst + offset + 0x08] ASINORMAL; \
+ STX %t2, [%dst + offset + 0x10] ASINORMAL; \
+ STX %t3, [%dst + offset + 0x18] ASINORMAL; \
+ ASI_SETSRC_NOBLK \
+ LDX [%src + offset + 0x20] ASINORMAL, %t0; \
+ LDX [%src + offset + 0x28] ASINORMAL, %t1; \
+ LDX [%src + offset + 0x30] ASINORMAL, %t2; \
+ LDX [%src + offset + 0x38] ASINORMAL, %t3; \
+ ASI_SETDST_NOBLK \
+ STX %t0, [%dst + offset + 0x20] ASINORMAL; \
+ STX %t1, [%dst + offset + 0x28] ASINORMAL; \
+ STX %t2, [%dst + offset + 0x30] ASINORMAL; \
+ STX %t3, [%dst + offset + 0x38] ASINORMAL;
+
+#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
+ ASI_SETSRC_NOBLK \
+ LDX [%src - offset - 0x10] ASINORMAL, %t0; \
+ LDX [%src - offset - 0x08] ASINORMAL, %t1; \
+ ASI_SETDST_NOBLK \
+ STW %t0, [%dst - offset - 0x0c] ASINORMAL; \
+ srlx %t0, 32, %t2; \
+ STW %t2, [%dst - offset - 0x10] ASINORMAL; \
+ STW %t1, [%dst - offset - 0x04] ASINORMAL; \
+ srlx %t1, 32, %t3; \
+ STW %t3, [%dst - offset - 0x08] ASINORMAL;
+
+#define MOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1) \
+ ASI_SETSRC_NOBLK \
+ LDX [%src - offset - 0x10] ASINORMAL, %t0; \
+ LDX [%src - offset - 0x08] ASINORMAL, %t1; \
+ ASI_SETDST_NOBLK \
+ STX %t0, [%dst - offset - 0x10] ASINORMAL; \
+ STX %t1, [%dst - offset - 0x08] ASINORMAL;
+
+#else /* !REGS_64BIT */
+
+#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3) \
+ lduw [%src + offset + 0x00], %t0; \
+ lduw [%src + offset + 0x04], %t1; \
+ lduw [%src + offset + 0x08], %t2; \
+ lduw [%src + offset + 0x0c], %t3; \
+ stw %t0, [%dst + offset + 0x00]; \
+ stw %t1, [%dst + offset + 0x04]; \
+ stw %t2, [%dst + offset + 0x08]; \
+ stw %t3, [%dst + offset + 0x0c]; \
+ lduw [%src + offset + 0x10], %t0; \
+ lduw [%src + offset + 0x14], %t1; \
+ lduw [%src + offset + 0x18], %t2; \
+ lduw [%src + offset + 0x1c], %t3; \
+ stw %t0, [%dst + offset + 0x10]; \
+ stw %t1, [%dst + offset + 0x14]; \
+ stw %t2, [%dst + offset + 0x18]; \
+ stw %t3, [%dst + offset + 0x1c];
+
+#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
+ lduw [%src - offset - 0x10], %t0; \
+ lduw [%src - offset - 0x0c], %t1; \
+ lduw [%src - offset - 0x08], %t2; \
+ lduw [%src - offset - 0x04], %t3; \
+ stw %t0, [%dst - offset - 0x10]; \
+ stw %t1, [%dst - offset - 0x0c]; \
+ stw %t2, [%dst - offset - 0x08]; \
+ stw %t3, [%dst - offset - 0x04];
+
+#endif /* !REGS_64BIT */
+
+#ifdef __KERNEL__
+ .section __ex_table,#alloc
+ .section .fixup,#alloc,#execinstr
+#endif
+
+ .text
+ .align 32
+ .globl memcpy
+ .type memcpy,@function
+
+ .globl bcopy
+ .type bcopy,@function
+
+#ifdef __KERNEL__
+ .globl __memcpy
+ .type __memcpy,@function
+
+ .globl __memcpy_384plus
+ .type __memcpy_384plus,@function
+
+ .globl __memcpy_16plus
+ .type __memcpy_16plus,@function
+
+ .globl __memcpy_short
+ .type __memcpy_short,@function
+
+ .globl __memcpy_entry
+ .type __memcpy_entry,@function
+
+ .globl copy_page
+ .type copy_page,@function
+
+memcpy_private:
+__memcpy:
+memcpy: mov ASI_BLK_P, asi_src ! IEU0 Group
+ brnz,pt %o2, __memcpy_entry ! CTI
+ mov ASI_BLK_P, asi_dest ! IEU1
+ retl
+ clr %o0
+
+copy_page: wr %g0, FPRS_FEF, %fprs ! FPU Group
+ sethi %hi(8192), %o2 ! IEU0 Group
+ mov ASI_BLK_P, asi_src ! IEU1
+ b,pt %xcc, dest_is_64byte_aligned ! CTI
+ mov ASI_BLK_COMMIT_P, asi_dest ! IEU0 Group
+
+ .align 32
+ .globl __copy_from_user
+ .type __copy_from_user,@function
+__copy_from_user:mov ASI_BLK_S, asi_src ! IEU0 Group
+ brnz,pt %o2, __memcpy_entry ! CTI
+ mov ASI_BLK_P, asi_dest ! IEU1
+
+ .globl __copy_to_user
+ .type __copy_to_user,@function
+__copy_to_user: mov ASI_BLK_P, asi_src ! IEU0 Group
+ brnz,pt %o2, __memcpy_entry ! CTI
+ mov ASI_BLK_S, asi_dest ! IEU1
+ retl ! CTI Group
+ clr %o0 ! IEU0 Group
+
+ .globl __copy_in_user
+ .type __copy_in_user,@function
+__copy_in_user: mov ASI_BLK_S, asi_src ! IEU0 Group
+ brnz,pt %o2, __memcpy_entry ! CTI
+ mov ASI_BLK_S, asi_dest ! IEU1
+ retl ! CTI Group
+ clr %o0 ! IEU0 Group
+#endif
+
+bcopy: or %o0, 0, %g3 ! IEU0 Group
+ addcc %o1, 0, %o0 ! IEU1
+ brgez,pt %o2, memcpy_private ! CTI
+ or %g3, 0, %o1 ! IEU0 Group
+ retl ! CTI Group brk forced
+ clr %o0 ! IEU0
+
+
+ .align 32
+#ifdef __KERNEL__
+__memcpy_384plus:
+ andcc %o0, 7, %g2 ! IEU1 Group
+#endif
+VIS_enter:
+ be,pt %xcc, dest_is_8byte_aligned ! CTI
+ andcc %o0, 0x38, %g5 ! IEU1 Group
+do_dest_8byte_align:
+ mov 8, %g1 ! IEU0
+ sub %g1, %g2, %g2 ! IEU0 Group
+ andcc %o0, 1, %g0 ! IEU1
+ be,pt %icc, 2f ! CTI
+ sub %o2, %g2, %o2 ! IEU0 Group
+1: ASI_SETSRC_NOBLK ! LSU Group
+ EX(LDUB [%o1] ASINORMAL, %o5,
+ add %o2, %g2) ! Load Group
+ add %o1, 1, %o1 ! IEU0
+ add %o0, 1, %o0 ! IEU1
+ ASI_SETDST_NOBLK ! LSU Group
+ subcc %g2, 1, %g2 ! IEU1 Group
+ be,pn %xcc, 3f ! CTI
+ EX2(STB %o5, [%o0 - 1] ASINORMAL,
+ add %g2, 1, %g2,
+ add %o2, %g2) ! Store
+2: ASI_SETSRC_NOBLK ! LSU Group
+ EX(LDUB [%o1] ASINORMAL, %o5,
+ add %o2, %g2) ! Load Group
+ add %o0, 2, %o0 ! IEU0
+ EX(LDUB [%o1 + 1] ASINORMAL, %g3,
+ add %o2, %g2) ! Load Group
+ ASI_SETDST_NOBLK ! LSU Group
+ subcc %g2, 2, %g2 ! IEU1 Group
+ EX2(STB %o5, [%o0 - 2] ASINORMAL,
+ add %g2, 2, %g2,
+ add %o2, %g2) ! Store
+ add %o1, 2, %o1 ! IEU0
+ bne,pt %xcc, 2b ! CTI Group
+ EX2(STB %g3, [%o0 - 1] ASINORMAL,
+ add %g2, 1, %g2,
+ add %o2, %g2) ! Store
+3: andcc %o0, 0x38, %g5 ! IEU1 Group
+dest_is_8byte_aligned:
+ be,pt %icc, dest_is_64byte_aligned ! CTI
+#ifdef __KERNEL__
+ wr %g0, FPRS_FEF, %fprs ! FPU Group
+do_dest_64byte_align:
+ mov 64, %g1 ! IEU0 Group
+#else
+ mov 64, %g1 ! IEU0 Group
+do_dest_64byte_align:
+#endif
+ fmovd %f0, %f2 ! FPU
+ sub %g1, %g5, %g5 ! IEU0 Group
+ ASI_SETSRC_NOBLK ! LSU Group
+ alignaddr %o1, %g0, %g1 ! GRU Group
+ EXO2(LDDF [%g1] ASINORMAL, %f4) ! Load Group
+ sub %o2, %g5, %o2 ! IEU0
+1: EX(LDDF [%g1 + 0x8] ASINORMAL, %f6,
+ add %o2, %g5) ! Load Group
+ add %g1, 0x8, %g1 ! IEU0 Group
+ subcc %g5, 8, %g5 ! IEU1
+ ASI_SETDST_NOBLK ! LSU Group
+ faligndata %f4, %f6, %f0 ! GRU Group
+ EX2(STDF %f0, [%o0] ASINORMAL,
+ add %g5, 8, %g5,
+ add %o2, %g5) ! Store
+ add %o1, 8, %o1 ! IEU0 Group
+ be,pn %xcc, dest_is_64byte_aligned ! CTI
+ add %o0, 8, %o0 ! IEU1
+ ASI_SETSRC_NOBLK ! LSU Group
+ EX(LDDF [%g1 + 0x8] ASINORMAL, %f4,
+ add %o2, %g5) ! Load Group
+ add %g1, 8, %g1 ! IEU0
+ subcc %g5, 8, %g5 ! IEU1
+ ASI_SETDST_NOBLK ! LSU Group
+ faligndata %f6, %f4, %f0 ! GRU Group
+ EX2(STDF %f0, [%o0] ASINORMAL,
+ add %g5, 8, %g5,
+ add %o2, %g5) ! Store
+ add %o1, 8, %o1 ! IEU0
+ ASI_SETSRC_NOBLK ! LSU Group
+ bne,pt %xcc, 1b ! CTI Group
+ add %o0, 8, %o0 ! IEU0
+dest_is_64byte_aligned:
+ membar #LoadStore | #StoreStore | #StoreLoad ! LSU Group
+#ifndef __KERNEL__
+ wr %g0, ASI_BLK_P, %asi ! LSU Group
+#endif
+ subcc %o2, 0x40, %g7 ! IEU1 Group
+ mov %o1, %g1 ! IEU0
+ andncc %g7, (0x40 - 1), %g7 ! IEU1 Group
+ srl %g1, 3, %g2 ! IEU0
+ sub %o2, %g7, %g3 ! IEU0 Group
+ andn %o1, (0x40 - 1), %o1 ! IEU1
+ and %g2, 7, %g2 ! IEU0 Group
+ andncc %g3, 0x7, %g3 ! IEU1
+ fmovd %f0, %f2 ! FPU
+ sub %g3, 0x10, %g3 ! IEU0 Group
+ sub %o2, %g7, %o2 ! IEU1
+ alignaddr %g1, %g0, %g0 ! GRU Group
+ add %g1, %g7, %g1 ! IEU0 Group
+ subcc %o2, %g3, %o2 ! IEU1
+ ASI_SETSRC_BLK ! LSU Group
+ EXVIS1(LDBLK [%o1 + 0x00] ASIBLK, %f0) ! LSU Group
+ add %g1, %g3, %g1 ! IEU0
+ EXVIS1(LDBLK [%o1 + 0x40] ASIBLK, %f16) ! LSU Group
+ sub %g7, 0x80, %g7 ! IEU0
+ EXVIS(LDBLK [%o1 + 0x80] ASIBLK, %f32) ! LSU Group
+ ! Clk1 Group 8-(
+ ! Clk2 Group 8-(
+ ! Clk3 Group 8-(
+ ! Clk4 Group 8-(
+vispc: rd %pc, %g5 ! PDU Group 8-(
+ addcc %g5, %lo(vis00 - vispc), %g5 ! IEU1 Group
+ sll %g2, 9, %g2 ! IEU0
+ jmpl %g5 + %g2, %g0 ! CTI Group brk forced
+ addcc %o1, 0xc0, %o1 ! IEU1 Group
+ .align 512 /* OK, here comes the fun part... */
+vis00:FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16) LOOP_CHUNK1(o1, o0, g7, vis01)
+ FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32) LOOP_CHUNK2(o1, o0, g7, vis02)
+ FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0) LOOP_CHUNK3(o1, o0, g7, vis03)
+ b,pt %xcc, vis00+4; faligndata %f0, %f2, %f48
+vis01:FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32) STORE_SYNC(o0, f48) membar #Sync
+ FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0) STORE_JUMP(o0, f48, finish_f0) membar #Sync
+vis02:FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0) STORE_SYNC(o0, f48) membar #Sync
+ FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16) STORE_JUMP(o0, f48, check_finish_f16) add %o2, %g3, %g7
+vis03:FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16) STORE_SYNC(o0, f48) membar #Sync
+ FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32) STORE_JUMP(o0, f48, finish_f32) membar #Sync
+ VISLOOP_PAD
+vis10:FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18) LOOP_CHUNK1(o1, o0, g7, vis11)
+ FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34) LOOP_CHUNK2(o1, o0, g7, vis12)
+ FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2) LOOP_CHUNK3(o1, o0, g7, vis13)
+ b,pt %xcc, vis10+4; faligndata %f2, %f4, %f48
+vis11:FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34) STORE_SYNC(o0, f48) membar #Sync
+ FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2) STORE_JUMP(o0, f48, finish_f2) membar #Sync
+vis12:FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2) STORE_SYNC(o0, f48) membar #Sync
+ FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18) STORE_JUMP(o0, f48, finish_f18) membar #Sync
+vis13:FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18) STORE_SYNC(o0, f48) membar #Sync
+ FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34) STORE_JUMP(o0, f48, finish_f34) membar #Sync
+ VISLOOP_PAD
+vis20:FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20) LOOP_CHUNK1(o1, o0, g7, vis21)
+ FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36) LOOP_CHUNK2(o1, o0, g7, vis22)
+ FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4) LOOP_CHUNK3(o1, o0, g7, vis23)
+ b,pt %xcc, vis20+4; faligndata %f4, %f6, %f48
+vis21:FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36) STORE_SYNC(o0, f48) membar #Sync
+ FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4) STORE_JUMP(o0, f48, finish_f4) membar #Sync
+vis22:FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4) STORE_SYNC(o0, f48) membar #Sync
+ FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20) STORE_JUMP(o0, f48, finish_f20) membar #Sync
+vis23:FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20) STORE_SYNC(o0, f48) membar #Sync
+ FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36) STORE_JUMP(o0, f48, finish_f36) membar #Sync
+ VISLOOP_PAD
+vis30:FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22) LOOP_CHUNK1(o1, o0, g7, vis31)
+ FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38) LOOP_CHUNK2(o1, o0, g7, vis32)
+ FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6) LOOP_CHUNK3(o1, o0, g7, vis33)
+ b,pt %xcc, vis30+4; faligndata %f6, %f8, %f48
+vis31:FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38) STORE_SYNC(o0, f48) membar #Sync
+ FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6) STORE_JUMP(o0, f48, finish_f6) membar #Sync
+vis32:FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6) STORE_SYNC(o0, f48) membar #Sync
+ FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22) STORE_JUMP(o0, f48, finish_f22) membar #Sync
+vis33:FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22) STORE_SYNC(o0, f48) membar #Sync
+ FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38) STORE_JUMP(o0, f48, finish_f38) membar #Sync
+ VISLOOP_PAD
+vis40:FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24) LOOP_CHUNK1(o1, o0, g7, vis41)
+ FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40) LOOP_CHUNK2(o1, o0, g7, vis42)
+ FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8) LOOP_CHUNK3(o1, o0, g7, vis43)
+ b,pt %xcc, vis40+4; faligndata %f8, %f10, %f48
+vis41:FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40) STORE_SYNC(o0, f48) membar #Sync
+ FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8) STORE_JUMP(o0, f48, finish_f8) membar #Sync
+vis42:FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8) STORE_SYNC(o0, f48) membar #Sync
+ FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24) STORE_JUMP(o0, f48, finish_f24) membar #Sync
+vis43:FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24) STORE_SYNC(o0, f48) membar #Sync
+ FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40) STORE_JUMP(o0, f48, finish_f40) membar #Sync
+ VISLOOP_PAD
+vis50:FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26) LOOP_CHUNK1(o1, o0, g7, vis51)
+ FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42) LOOP_CHUNK2(o1, o0, g7, vis52)
+ FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10) LOOP_CHUNK3(o1, o0, g7, vis53)
+ b,pt %xcc, vis50+4; faligndata %f10, %f12, %f48
+vis51:FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42) STORE_SYNC(o0, f48) membar #Sync
+ FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10) STORE_JUMP(o0, f48, finish_f10) membar #Sync
+vis52:FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10) STORE_SYNC(o0, f48) membar #Sync
+ FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26) STORE_JUMP(o0, f48, finish_f26) membar #Sync
+vis53:FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26) STORE_SYNC(o0, f48) membar #Sync
+ FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42) STORE_JUMP(o0, f48, finish_f42) membar #Sync
+ VISLOOP_PAD
+vis60:FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28) LOOP_CHUNK1(o1, o0, g7, vis61)
+ FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44) LOOP_CHUNK2(o1, o0, g7, vis62)
+ FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12) LOOP_CHUNK3(o1, o0, g7, vis63)
+ b,pt %xcc, vis60+4; faligndata %f12, %f14, %f48
+vis61:FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44) STORE_SYNC(o0, f48) membar #Sync
+ FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12) STORE_JUMP(o0, f48, finish_f12) membar #Sync
+vis62:FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12) STORE_SYNC(o0, f48) membar #Sync
+ FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28) STORE_JUMP(o0, f48, finish_f28) membar #Sync
+vis63:FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28) STORE_SYNC(o0, f48) membar #Sync
+ FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44) STORE_JUMP(o0, f48, finish_f44) membar #Sync
+ VISLOOP_PAD
+vis70:FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30) LOOP_CHUNK1(o1, o0, g7, vis71)
+ FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) LOOP_CHUNK2(o1, o0, g7, vis72)
+ FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14) LOOP_CHUNK3(o1, o0, g7, vis73)
+ b,pt %xcc, vis70+4; faligndata %f14, %f16, %f48
+vis71:FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) STORE_SYNC(o0, f48) membar #Sync
+ FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14) STORE_JUMP(o0, f48, finish_f14) membar #Sync
+vis72:FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14) STORE_SYNC(o0, f48) membar #Sync
+ FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30) STORE_JUMP(o0, f48, finish_f30) membar #Sync
+vis73:FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30) STORE_SYNC(o0, f48) membar #Sync
+ FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46) STORE_JUMP(o0, f48, finish_f46) membar #Sync
+ VISLOOP_PAD
+finish_f0: FINISH_VISCHUNK(o0, f0, f2, g3)
+finish_f2: FINISH_VISCHUNK(o0, f2, f4, g3)
+finish_f4: FINISH_VISCHUNK(o0, f4, f6, g3)
+finish_f6: FINISH_VISCHUNK(o0, f6, f8, g3)
+finish_f8: FINISH_VISCHUNK(o0, f8, f10, g3)
+finish_f10: FINISH_VISCHUNK(o0, f10, f12, g3)
+finish_f12: FINISH_VISCHUNK(o0, f12, f14, g3)
+finish_f14: UNEVEN_VISCHUNK(o0, f14, f0, g3)
+/* This is a special hack to speed up 8K page copy */
+check_finish_f16:
+ andcc %g1, 7, %g0
+ bne,pn %icc, finish_f16
+ cmp %g7, 0x40
+ bne,pn %icc, finish_f16
+ FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
+ membar #Sync
+ EXVIS1(STBLK %f48, [%o0] ASIBLK)
+ b,pt %xcc, vis_ret
+finish_f16: membar #Sync
+ FINISH_VISCHUNK(o0, f16, f18, g3)
+finish_f18: FINISH_VISCHUNK(o0, f18, f20, g3)
+finish_f20: FINISH_VISCHUNK(o0, f20, f22, g3)
+finish_f22: FINISH_VISCHUNK(o0, f22, f24, g3)
+finish_f24: FINISH_VISCHUNK(o0, f24, f26, g3)
+finish_f26: FINISH_VISCHUNK(o0, f26, f28, g3)
+finish_f28: FINISH_VISCHUNK(o0, f28, f30, g3)
+finish_f30: UNEVEN_VISCHUNK(o0, f30, f0, g3)
+finish_f32: FINISH_VISCHUNK(o0, f32, f34, g3)
+finish_f34: FINISH_VISCHUNK(o0, f34, f36, g3)
+finish_f36: FINISH_VISCHUNK(o0, f36, f38, g3)
+finish_f38: FINISH_VISCHUNK(o0, f38, f40, g3)
+finish_f40: FINISH_VISCHUNK(o0, f40, f42, g3)
+finish_f42: FINISH_VISCHUNK(o0, f42, f44, g3)
+finish_f44: FINISH_VISCHUNK(o0, f44, f46, g3)
+finish_f46: UNEVEN_VISCHUNK(o0, f46, f0, g3)
+vis_slk:ASI_SETSRC_NOBLK ! LSU Group
+ EXVIS4(LDDF [%o1] ASINORMAL, %f2) ! Load Group
+ add %o1, 8, %o1 ! IEU0
+ subcc %g3, 8, %g3 ! IEU1
+ ASI_SETDST_NOBLK ! LSU Group
+ faligndata %f0, %f2, %f8 ! GRU Group
+ EXVIS5(STDF %f8, [%o0] ASINORMAL) ! Store
+ bl,pn %xcc, vis_out ! CTI
+ add %o0, 8, %o0 ! IEU0 Group
+ ASI_SETSRC_NOBLK ! LSU Group
+ EXVIS4(LDDF [%o1] ASINORMAL, %f0) ! Load Group
+ add %o1, 8, %o1 ! IEU0
+ subcc %g3, 8, %g3 ! IEU1
+ ASI_SETDST_NOBLK ! LSU Group
+ faligndata %f2, %f0, %f8 ! GRU Group
+ EXVIS5(STDF %f8, [%o0] ASINORMAL) ! Store
+ bge,pt %xcc, vis_slk ! CTI
+ add %o0, 8, %o0 ! IEU0 Group
+vis_out:brz,pt %o2, vis_ret ! CTI Group
+ mov %g1, %o1 ! IEU0
+vis_slp:ASI_SETSRC_NOBLK ! LSU Group
+ EXO2(LDUB [%o1] ASINORMAL, %g5) ! LOAD
+ add %o1, 1, %o1 ! IEU0
+ add %o0, 1, %o0 ! IEU1
+ ASI_SETDST_NOBLK ! LSU Group
+ subcc %o2, 1, %o2 ! IEU1
+ bne,pt %xcc, vis_slp ! CTI
+ EX(STB %g5, [%o0 - 1] ASINORMAL,
+ add %o2, 1) ! Store Group
+vis_ret:membar #StoreLoad | #StoreStore ! LSU Group
+ FPU_CLEAN_RETL
+
+
+__memcpy_short:
+ andcc %o2, 1, %g0 ! IEU1 Group
+ be,pt %icc, 2f ! CTI
+1: ASI_SETSRC_NOBLK ! LSU Group
+ EXO2(LDUB [%o1] ASINORMAL, %g5) ! LOAD Group
+ add %o1, 1, %o1 ! IEU0
+ add %o0, 1, %o0 ! IEU1
+ ASI_SETDST_NOBLK ! LSU Group
+ subcc %o2, 1, %o2 ! IEU1 Group
+ be,pn %xcc, short_ret ! CTI
+ EX(STB %g5, [%o0 - 1] ASINORMAL,
+ add %o2, 1) ! Store
+2: ASI_SETSRC_NOBLK ! LSU Group
+ EXO2(LDUB [%o1] ASINORMAL, %g5) ! LOAD Group
+ add %o0, 2, %o0 ! IEU0
+ EXO2(LDUB [%o1 + 1] ASINORMAL, %o5) ! LOAD Group
+ add %o1, 2, %o1 ! IEU0
+ ASI_SETDST_NOBLK ! LSU Group
+ subcc %o2, 2, %o2 ! IEU1 Group
+ EX(STB %g5, [%o0 - 2] ASINORMAL,
+ add %o2, 2) ! Store
+ bne,pt %xcc, 2b ! CTI
+ EX(STB %o5, [%o0 - 1] ASINORMAL,
+ add %o2, 1) ! Store
+short_ret:
+ NORMAL_RETL
+
+#ifndef __KERNEL__
+memcpy_private:
+memcpy:
+#ifndef REGS_64BIT
+ srl %o2, 0, %o2 ! IEU1 Group
+#endif
+ brz,pn %o2, short_ret ! CTI Group
+ mov %o0, %g6 ! IEU0
+#endif
+__memcpy_entry:
+ cmp %o2, 15 ! IEU1 Group
+ bleu,pn %xcc, __memcpy_short ! CTI
+ cmp %o2, (64 * 6) ! IEU1 Group
+ bgeu,pn %xcc, VIS_enter ! CTI
+#ifdef __KERNEL__
+__memcpy_16plus:
+#endif
+ andcc %o0, 7, %g2 ! IEU1 Group
+ sub %o0, %o1, %g5 ! IEU0
+ andcc %g5, 3, %o5 ! IEU1 Group
+ bne,pn %xcc, memcpy_noVIS_misaligned ! CTI
+ andcc %o1, 3, %g0 ! IEU1 Group
+#ifdef REGS_64BIT
+ be,a,pt %xcc, 3f ! CTI
+ andcc %o1, 4, %g0 ! IEU1 Group
+ andcc %o1, 1, %g0 ! IEU1 Group
+#else /* !REGS_64BIT */
+ be,pt %xcc, 5f ! CTI
+ andcc %o1, 1, %g0 ! IEU1 Group
+#endif /* !REGS_64BIT */
+ be,pn %xcc, 4f ! CTI
+ andcc %o1, 2, %g0 ! IEU1 Group
+ ASI_SETSRC_NOBLK ! LSU Group
+ EXO2(LDUB [%o1] ASINORMAL, %g2) ! Load Group
+ add %o1, 1, %o1 ! IEU0
+ add %o0, 1, %o0 ! IEU1
+ sub %o2, 1, %o2 ! IEU0 Group
+ ASI_SETDST_NOBLK ! LSU Group
+ bne,pn %xcc, 5f ! CTI Group
+ EX(STB %g2, [%o0 - 1] ASINORMAL,
+ add %o2, 1) ! Store
+4: ASI_SETSRC_NOBLK ! LSU Group
+ EXO2(LDUH [%o1] ASINORMAL, %g2) ! Load Group
+ add %o1, 2, %o1 ! IEU0
+ add %o0, 2, %o0 ! IEU1
+ ASI_SETDST_NOBLK ! LSU Group
+ sub %o2, 2, %o2 ! IEU0
+ EX(STH %g2, [%o0 - 2] ASINORMAL,
+ add %o2, 2) ! Store Group + bubble
+#ifdef REGS_64BIT
+5: andcc %o1, 4, %g0 ! IEU1
+3: be,a,pn %xcc, 2f ! CTI
+ andcc %o2, -128, %g7 ! IEU1 Group
+ ASI_SETSRC_NOBLK ! LSU Group
+ EXO2(LDUW [%o1] ASINORMAL, %g5) ! Load Group
+ add %o1, 4, %o1 ! IEU0
+ add %o0, 4, %o0 ! IEU1
+ ASI_SETDST_NOBLK ! LSU Group
+ sub %o2, 4, %o2 ! IEU0 Group
+ EX(STW %g5, [%o0 - 4] ASINORMAL,
+ add %o2, 4) ! Store
+ andcc %o2, -128, %g7 ! IEU1 Group
+2: be,pn %xcc, 3f ! CTI
+ andcc %o0, 4, %g0 ! IEU1 Group
+ be,pn %xcc, 82f + 4 ! CTI Group
+#else /* !REGS_64BIT */
+5: andcc %o2, -128, %g7 ! IEU1
+ be,a,pn %xcc, 41f ! CTI
+ andcc %o2, 0x70, %g7 ! IEU1 Group
+#endif /* !REGS_64BIT */
+5: MOVE_BIGCHUNK(o1, o0, 0x00, g1, g3, g5, o5)
+ MOVE_BIGCHUNK(o1, o0, 0x20, g1, g3, g5, o5)
+ MOVE_BIGCHUNK(o1, o0, 0x40, g1, g3, g5, o5)
+ MOVE_BIGCHUNK(o1, o0, 0x60, g1, g3, g5, o5)
+ EXT(5b,35f,VIScopyfixup1)
+35: subcc %g7, 128, %g7 ! IEU1 Group
+ add %o1, 128, %o1 ! IEU0
+ bne,pt %xcc, 5b ! CTI
+ add %o0, 128, %o0 ! IEU0 Group
+3: andcc %o2, 0x70, %g7 ! IEU1 Group
+41: be,pn %xcc, 80f ! CTI
+ andcc %o2, 8, %g0 ! IEU1 Group
+ ! Clk1 8-(
+ ! Clk2 8-(
+ ! Clk3 8-(
+ ! Clk4 8-(
+79: rd %pc, %o5 ! PDU Group
+#ifdef __KERNEL__
+ sll %g7, 1, %g5 ! IEU0 Group
+ add %o1, %g7, %o1 ! IEU1
+ srl %g7, 1, %g2 ! IEU0 Group
+ sub %o5, %g5, %o5 ! IEU1
+ sub %o5, %g2, %o5 ! IEU0 Group
+ jmpl %o5 + %lo(80f - 79b), %g0 ! CTI Group brk forced
+ add %o0, %g7, %o0 ! IEU0 Group
+#else
+ sll %g7, 1, %g5 ! IEU0 Group
+ add %o1, %g7, %o1 ! IEU1
+ sub %o5, %g5, %o5 ! IEU0 Group
+ jmpl %o5 + %lo(80f - 79b), %g0 ! CTI Group brk forced
+ add %o0, %g7, %o0 ! IEU0 Group
+#endif
+36: MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g5, o5)
+ MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g5, o5)
+ MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g5, o5)
+ MOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g5, o5)
+ MOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g5, o5)
+ MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g5, o5)
+ MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g5, o5)
+ EXT(36b,80f,VIScopyfixup2)
+80: be,pt %xcc, 81f ! CTI
+ andcc %o2, 4, %g0 ! IEU1
+#ifdef REGS_64BIT
+ ASI_SETSRC_NOBLK ! LSU Group
+ EX(LDX [%o1] ASINORMAL, %g2,
+ and %o2, 0xf) ! Load Group
+ add %o0, 8, %o0 ! IEU0
+ ASI_SETDST_NOBLK ! LSU Group
+ EX(STW %g2, [%o0 - 0x4] ASINORMAL,
+ and %o2, 0xf) ! Store Group
+ add %o1, 8, %o1 ! IEU1
+ srlx %g2, 32, %g2 ! IEU0 Group
+ EX2(STW %g2, [%o0 - 0x8] ASINORMAL,
+ and %o2, 0xf, %o2,
+ sub %o2, 4) ! Store
+#else /* !REGS_64BIT */
+ lduw [%o1], %g2 ! Load Group
+ add %o0, 8, %o0 ! IEU0
+ lduw [%o1 + 0x4], %g3 ! Load Group
+ add %o1, 8, %o1 ! IEU0
+ stw %g2, [%o0 - 0x8] ! Store Group
+ stw %g3, [%o0 - 0x4] ! Store Group
+#endif /* !REGS_64BIT */
+81: be,pt %xcc, 1f ! CTI
+ andcc %o2, 2, %g0 ! IEU1 Group
+ ASI_SETSRC_NOBLK ! LSU Group
+ EX(LDUW [%o1] ASINORMAL, %g2,
+ and %o2, 0x7) ! Load Group
+ add %o1, 4, %o1 ! IEU0
+ ASI_SETDST_NOBLK ! LSU Group
+ EX(STW %g2, [%o0] ASINORMAL,
+ and %o2, 0x7) ! Store Group
+ add %o0, 4, %o0 ! IEU0
+1: be,pt %xcc, 1f ! CTI
+ andcc %o2, 1, %g0 ! IEU1 Group
+ ASI_SETSRC_NOBLK ! LSU Group
+ EX(LDUH [%o1] ASINORMAL, %g2,
+ and %o2, 0x3) ! Load Group
+ add %o1, 2, %o1 ! IEU0
+ ASI_SETDST_NOBLK ! LSU Group
+ EX(STH %g2, [%o0] ASINORMAL,
+ and %o2, 0x3) ! Store Group
+ add %o0, 2, %o0 ! IEU0
+1: be,pt %xcc, normal_retl ! CTI
+ nop ! IEU1
+ ASI_SETSRC_NOBLK ! LSU Group
+ EX(LDUB [%o1] ASINORMAL, %g2,
+ add %g0, 1) ! Load Group
+ ASI_SETDST_NOBLK ! LSU Group
+ EX(STB %g2, [%o0] ASINORMAL,
+ add %g0, 1) ! Store Group + bubble
+normal_retl:
+ NORMAL_RETL
+
+#ifdef REGS_64BIT
+82: MOVE_BIGALIGNCHUNK(o1, o0, 0x00, g1, g3, g5, o5)
+ MOVE_BIGALIGNCHUNK(o1, o0, 0x40, g1, g3, g5, o5)
+ EXT(82b,37f,VIScopyfixup3)
+37: subcc %g7, 128, %g7 ! IEU1 Group
+ add %o1, 128, %o1 ! IEU0
+ bne,pt %xcc, 82b ! CTI
+ add %o0, 128, %o0 ! IEU0 Group
+ andcc %o2, 0x70, %g7 ! IEU1
+ be,pn %xcc, 84f ! CTI
+ andcc %o2, 8, %g0 ! IEU1 Group
+ ! Clk1 8-(
+ ! Clk2 8-(
+ ! Clk3 8-(
+ ! Clk4 8-(
+83: rd %pc, %o5 ! PDU Group
+#ifdef __KERNEL__
+ srl %g7, 1, %g5 ! IEU0 Group
+ add %g7, %g5, %g5 ! IEU0 Group
+ add %o1, %g7, %o1 ! IEU1
+ sub %o5, %g5, %o5 ! IEU0 Group
+ jmpl %o5 + %lo(84f - 83b), %g0 ! CTI Group brk forced
+ add %o0, %g7, %o0 ! IEU0 Group
+#else
+ add %o1, %g7, %o1 ! IEU0 Group
+ sub %o5, %g7, %o5 ! IEU1
+ jmpl %o5 + %lo(84f - 83b), %g0 ! CTI Group brk forced
+ add %o0, %g7, %o0 ! IEU0 Group
+#endif
+38: MOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3)
+ MOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3)
+ MOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3)
+ MOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3)
+ MOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3)
+ MOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3)
+ MOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3)
+ EXT(38b,84f,VIScopyfixup4)
+84: be,pt %xcc, 85f ! CTI Group
+ andcc %o2, 4, %g0 ! IEU1
+ ASI_SETSRC_NOBLK ! LSU Group
+ EX(LDX [%o1] ASINORMAL, %g2,
+ and %o2, 0xf) ! Load Group
+ add %o1, 8, %o1 ! IEU0
+ ASI_SETDST_NOBLK ! LSU Group
+ add %o0, 8, %o0 ! IEU0 Group
+ EX(STX %g2, [%o0 - 0x8] ASINORMAL,
+ and %o2, 0xf) ! Store
+85: be,pt %xcc, 1f ! CTI
+ andcc %o2, 2, %g0 ! IEU1 Group
+ ASI_SETSRC_NOBLK ! LSU Group
+ EX(LDUW [%o1] ASINORMAL, %g2,
+ and %o2, 0x7) ! Load Group
+ add %o1, 4, %o1 ! IEU0
+ ASI_SETDST_NOBLK ! LSU Group
+ add %o0, 4, %o0 ! IEU0 Group
+ EX(STW %g2, [%o0 - 0x4] ASINORMAL,
+ and %o2, 0x7) ! Store
+1: be,pt %xcc, 1f ! CTI
+ andcc %o2, 1, %g0 ! IEU1 Group
+ ASI_SETSRC_NOBLK ! LSU Group
+ EX(LDUH [%o1] ASINORMAL, %g2,
+ and %o2, 0x3) ! Load Group
+ add %o1, 2, %o1 ! IEU0
+ ASI_SETDST_NOBLK ! LSU Group
+ add %o0, 2, %o0 ! IEU0 Group
+ EX(STH %g2, [%o0 - 0x2] ASINORMAL,
+ and %o2, 0x3) ! Store
+1: be,pt %xcc, 1f ! CTI
+ nop ! IEU0 Group
+ ASI_SETSRC_NOBLK ! LSU Group
+ EX(LDUB [%o1] ASINORMAL, %g2,
+ add %g0, 1) ! Load Group
+ ASI_SETDST_NOBLK ! LSU Group
+ EX(STB %g2, [%o0] ASINORMAL,
+ add %g0, 1) ! Store Group + bubble
+1: NORMAL_RETL
+#endif /* REGS_64BIT */
+
+memcpy_noVIS_misaligned:
+ brz,pt %g2, 2f ! CTI Group
+ mov 8, %g1 ! IEU0
+ sub %g1, %g2, %g2 ! IEU0 Group
+ sub %o2, %g2, %o2 ! IEU0 Group
+1: ASI_SETSRC_NOBLK ! LSU Group
+ EX(LDUB [%o1] ASINORMAL, %g5,
+ add %o2, %g2) ! Load Group
+ add %o1, 1, %o1 ! IEU0
+ add %o0, 1, %o0 ! IEU1
+ ASI_SETDST_NOBLK ! LSU Group
+ subcc %g2, 1, %g2 ! IEU1 Group
+ bne,pt %xcc, 1b ! CTI
+ EX2(STB %g5, [%o0 - 1] ASINORMAL,
+ add %o2, %g2, %o2,
+ add %o2, 1) ! Store
+2:
+#ifdef __KERNEL__
+ wr %g0, FPRS_FEF, %fprs ! FPU Group
+#endif
+ andn %o2, 7, %g5 ! IEU0 Group
+ and %o2, 7, %o2 ! IEU1
+ fmovd %f0, %f2 ! FPU
+ ASI_SETSRC_NOBLK ! LSU Group
+ alignaddr %o1, %g0, %g1 ! GRU Group
+ EXO2(LDDF [%g1] ASINORMAL, %f4) ! Load Group
+1: EX(LDDF [%g1 + 0x8] ASINORMAL, %f6,
+ add %o2, %g5) ! Load Group
+ add %g1, 0x8, %g1 ! IEU0 Group
+ subcc %g5, 8, %g5 ! IEU1
+ ASI_SETDST_NOBLK ! LSU Group
+ faligndata %f4, %f6, %f0 ! GRU Group
+ EX2(STDF %f0, [%o0] ASINORMAL,
+ add %o2, %g5, %o2,
+ add %o2, 8) ! Store
+ add %o1, 8, %o1 ! IEU0 Group
+ be,pn %xcc, end_cruft ! CTI
+ add %o0, 8, %o0 ! IEU1
+ ASI_SETSRC_NOBLK ! LSU Group
+ EX(LDDF [%g1 + 0x8] ASINORMAL, %f4,
+ add %o2, %g5) ! Load Group
+ add %g1, 8, %g1 ! IEU0
+ subcc %g5, 8, %g5 ! IEU1
+ ASI_SETDST_NOBLK ! LSU Group
+ faligndata %f6, %f4, %f0 ! GRU Group
+ EX2(STDF %f0, [%o0] ASINORMAL,
+ add %o2, %g5, %o2,
+ add %o2, 8) ! Store
+ add %o1, 8, %o1 ! IEU0
+ ASI_SETSRC_NOBLK ! LSU Group
+ bne,pn %xcc, 1b ! CTI Group
+ add %o0, 8, %o0 ! IEU0
+end_cruft:
+ brz,pn %o2, fpu_retl ! CTI Group
+#ifndef __KERNEL__
+ nop ! IEU0
+#else
+ ASI_SETSRC_NOBLK ! LSU Group
+#endif
+ EXO2(LDUB [%o1] ASINORMAL, %g5) ! LOAD
+ add %o1, 1, %o1 ! IEU0
+ add %o0, 1, %o0 ! IEU1
+ ASI_SETDST_NOBLK ! LSU Group
+ subcc %o2, 1, %o2 ! IEU1
+ bne,pt %xcc, vis_slp ! CTI
+ EX(STB %g5, [%o0 - 1] ASINORMAL,
+ add %o2, 1) ! Store Group
+fpu_retl:
+ FPU_RETL
+
+#ifdef __KERNEL__
+ .section .fixup
+ .align 4
+VIScopyfixup_reto2:
+ mov %o2, %o0
+VIScopyfixup_ret:
+ retl
+ wr %g0, 0, %fprs
+VIScopyfixup1: subcc %g2, 18, %g2
+ bgeu,a,pt %icc, VIScopyfixup1
+ sub %g7, 32, %g7
+ rd %pc, %g5
+ add %g2, 18, %g2
+ add %g2, 20, %g2
+ ldub [%g5 + %g2], %g2
+ ba,a,pt %xcc, 2f
+.byte 0, 0, 0, 0, 0, 0, 0, 4, 4, 8, 12, 12, 16, 20, 20, 24, 28, 28
+ .align 4
+VIScopyfixup2: mov (7 * 16), %g7
+1: subcc %g2, 10, %g2
+ bgeu,a,pt %icc, 1b
+ sub %g7, 16, %g7
+ rd %pc, %g5
+ add %g2, 10, %g2
+ add %g2, 20, %g2
+ ldub [%g5 + %g2], %g2
+ ba,a,pt %xcc, 4f
+.byte 0, 0, 0, 0, 0, 4, 4, 8, 12, 12
+ .align 4
+VIScopyfixup3: subcc %g2, 10, %g2
+ bgeu,a,pt %icc, VIScopyfixup3
+ sub %g7, 32, %g7
+ rd %pc, %g5
+ add %g2, 10, %g2
+ add %g2, 20, %g2
+ ldub [%g5 + %g2], %g2
+ ba,a,pt %xcc, 2f
+.byte 0, 0, 0, 0, 0, 0, 0, 8, 16, 24
+ .align 4
+2: and %g1, 0x7f, %g1
+ sub %g7, %g2, %g7
+ ba,pt %xcc, VIScopyfixup_ret
+ add %g7, %g1, %o0
+VIScopyfixup4: mov (7 * 16), %g7
+3: subcc %g2, 6, %g2
+ bgeu,a,pt %icc, 3b
+ sub %g7, 16, %g7
+ rd %pc, %g5
+ add %g2, 6, %g2
+ add %g2, 20, %g2
+ ldub [%g5 + %g2], %g2
+ ba,a,pt %xcc, 4f
+.byte 0, 0, 0, 0, 0, 8
+ .align 4
+4: and %g1, 7, %g1
+ ba,pt %xcc, VIScopyfixup_ret
+ add %g7, %g1, %o0
+VIScopyfixup_vis3:
+ sub %o2, 0x80, %o2
+VIScopyfixup_vis2:
+ add %o2, 0x40, %o2
+VIScopyfixup_vis0:
+ add %o2, 0x80, %o2
+VIScopyfixup_vis1:
+ add %g7, %g3, %g7
+ ba,pt %xcc, VIScopyfixup_ret
+ add %o2, %g7, %o0
+VIScopyfixup_vis5:
+ add %g3, 8, %g3
+VIScopyfixup_vis4:
+ add %g3, 8, %g3
+ ba,pt %xcc, VIScopyfixup_ret
+ add %o2, %g3, %o0
+#endif
+
+#ifdef __KERNEL__
+ .text
+ .align 32
+
+ .globl __memmove
+ .type __memmove,@function
+
+ .globl memmove
+ .type memmove,@function
+
+memmove:
+__memmove: cmp %o0, %o1
+ blu,pt %xcc, memcpy_private
+ sub %o0, %o1, %g5
+ add %o1, %o2, %g3
+ cmp %g3, %o0
+ bleu,pt %xcc, memcpy_private
+ add %o1, %o2, %g5
+ add %o0, %o2, %o5
+
+ sub %g5, 1, %o1
+ sub %o5, 1, %o0
+1: ldub [%o1], %g5
+ subcc %o2, 1, %o2
+ sub %o1, 1, %o1
+ stb %g5, [%o0]
+ bne,pt %icc, 1b
+ sub %o0, 1, %o0
+
+ retl
+ clr %o0
+#endif
diff --git a/arch/sparc64/lib/VIScsum.S b/arch/sparc64/lib/VIScsum.S
new file mode 100644
index 000000000..1ccb98759
--- /dev/null
+++ b/arch/sparc64/lib/VIScsum.S
@@ -0,0 +1,436 @@
+/* $Id: VIScsum.S,v 1.1 1997/07/18 06:26:49 ralf Exp $
+ * VIScsum.S: High bandwidth IP checksumming utilizing the UltraSparc
+ * Visual Instruction Set.
+ *
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ *
+ * Based on older sparc32/sparc64 checksum.S, which is:
+ *
+ * Copyright(C) 1995 Linus Torvalds
+ * Copyright(C) 1995 Miguel de Icaza
+ * Copyright(C) 1996,1997 David S. Miller
+ * derived from:
+ * Linux/Alpha checksum c-code
+ * Linux/ix86 inline checksum assembly
+ * RFC1071 Computing the Internet Checksum (esp. Jacobsons m68k code)
+ * David Mosberger-Tang for optimized reference c-code
+ * BSD4.4 portable checksum routine
+ */
+
+#ifdef __sparc_v9__
+#define STACKOFF 2175
+#else
+#define STACKOFF 64
+#endif
+
+#ifdef __KERNEL__
+#include <asm/head.h>
+#include <asm/asi.h>
+#else
+#define ASI_BLK_P 0xf0
+#define FRPS_FEF 0x04
+#endif
+
+/* Dobrou noc, SunSoft engineers. Spete sladce.
+ * This has a couple of tricks in and those
+ * tricks are UltraLinux trade secrets :))
+ */
+
+#define START_THE_TRICK(fz,f0,f2,f4,f6,f8,f10) \
+ fcmpgt32 %fz, %f0, %g1 /* FPM Group */; \
+ fcmpgt32 %fz, %f2, %g2 /* FPM Group */; \
+ fcmpgt32 %fz, %f4, %g3 /* FPM Group */; \
+ fcmpgt32 %fz, %f6, %g5 /* FPM Group */; \
+ inc %g1 /* IEU0 */; \
+ fcmpgt32 %fz, %f8, %g7 /* FPM Group */; \
+ srl %g1, 1, %g1 /* IEU0 */; \
+ inc %g2 /* IEU1 */; \
+ fcmpgt32 %fz, %f10, %o3 /* FPM Group */; \
+ srl %g2, 1, %g2 /* IEU0 */; \
+ add %o2, %g1, %o2 /* IEU1 */; \
+ add %g3, 1, %g3 /* IEU0 Group */; \
+ srl %g3, 1, %g3 /* IEU0 Group */; \
+ add %o2, %g2, %o2 /* IEU1 */; \
+ inc %g5 /* IEU0 Group */; \
+ add %o2, %g3, %o2 /* IEU1 */;
+
+#define DO_THE_TRICK(O12,O14,f0,f2,f4,f6,f8,f10,f12,f14,F0,F2,F4,F6,F8,F10,F12,F14) \
+ fcmpgt32 %O12, %f12, %o4 /* FPM Group */; \
+ srl %g5, 1, %g5 /* IEU0 */; \
+ inc %g7 /* IEU1 */; \
+ fpadd32 %F0, %f0, %F0 /* FPA */; \
+ fcmpgt32 %O14, %f14, %o5 /* FPM Group */; \
+ srl %g7, 1, %g7 /* IEU0 */; \
+ add %o2, %g5, %o2 /* IEU1 */; \
+ fpadd32 %F2, %f2, %F2 /* FPA */; \
+ inc %o3 /* IEU0 Group */; \
+ add %o2, %g7, %o2 /* IEU1 */; \
+ fcmpgt32 %f0, %F0, %g1 /* FPM Group */; \
+ srl %o3, 1, %o3 /* IEU0 */; \
+ inc %o4 /* IEU1 */; \
+ fpadd32 %F4, %f4, %F4 /* FPA */; \
+ fcmpgt32 %f2, %F2, %g2 /* FPM Group */; \
+ srl %o4, 1, %o4 /* IEU0 */; \
+ add %o2, %o3, %o2 /* IEU1 */; \
+ fpadd32 %F6, %f6, %F6 /* FPA */; \
+ inc %o5 /* IEU0 Group */; \
+ add %o2, %o4, %o2 /* IEU1 */; \
+ fcmpgt32 %f4, %F4, %g3 /* FPM Group */; \
+ srl %o5, 1, %o5 /* IEU0 */; \
+ inc %g1 /* IEU1 */; \
+ fpadd32 %F8, %f8, %F8 /* FPA */; \
+ fcmpgt32 %f6, %F6, %g5 /* FPM Group */; \
+ srl %g1, 1, %g1 /* IEU0 */; \
+ add %o2, %o5, %o2 /* IEU1 */; \
+ fpadd32 %F10, %f10, %F10 /* FPA */; \
+ inc %g2 /* IEU0 Group */; \
+ add %o2, %g1, %o2 /* IEU1 */; \
+ fcmpgt32 %f8, %F8, %g7 /* FPM Group */; \
+ srl %g2, 1, %g2 /* IEU0 */; \
+ inc %g3 /* IEU1 */; \
+ fpadd32 %F12, %f12, %F12 /* FPA */; \
+ fcmpgt32 %f10, %F10, %o3 /* FPM Group */; \
+ srl %g3, 1, %g3 /* IEU0 */; \
+ add %o2, %g2, %o2 /* IEU1 */; \
+ fpadd32 %F14, %f14, %F14 /* FPA */; \
+ inc %g5 /* IEU0 Group */; \
+ add %o2, %g3, %o2 /* IEU1 */;
+
+#define END_THE_TRICK(O12,O14,f0,f2,f4,f6,f8,f10,f12,f14,S0,S1,S2,S3,T0,T1,U0,fz) \
+ fcmpgt32 %O12, %f12, %o4 /* FPM Group */; \
+ srl %g5, 1, %g5 /* IEU0 */; \
+ inc %g7 /* IEU1 */; \
+ fpadd32 %f2, %f0, %S0 /* FPA */; \
+ fcmpgt32 %O14, %f14, %o5 /* FPM Group */; \
+ srl %g7, 1, %g7 /* IEU0 */; \
+ add %o2, %g5, %o2 /* IEU1 */; \
+ fpadd32 %f6, %f4, %S1 /* FPA */; \
+ inc %o3 /* IEU0 Group */; \
+ add %o2, %g7, %o2 /* IEU1 */; \
+ fcmpgt32 %f0, %S0, %g1 /* FPM Group */; \
+ srl %o3, 1, %o3 /* IEU0 */; \
+ inc %o4 /* IEU1 */; \
+ fpadd32 %f10, %f8, %S2 /* FPA */; \
+ fcmpgt32 %f4, %S1, %g2 /* FPM Group */; \
+ srl %o4, 1, %o4 /* IEU0 */; \
+ add %o2, %o3, %o2 /* IEU1 */; \
+ fpadd32 %f14, %f12, %S3 /* FPA */; \
+ inc %o5 /* IEU0 Group */; \
+ add %o2, %o4, %o2 /* IEU1 */; \
+ fzero %fz /* FPA */; \
+ fcmpgt32 %f8, %S2, %g3 /* FPM Group */; \
+ srl %o5, 1, %o5 /* IEU0 */; \
+ inc %g1 /* IEU1 */; \
+ fpadd32 %S0, %S1, %T0 /* FPA */; \
+ fcmpgt32 %f12, %S3, %g5 /* FPM Group */; \
+ srl %g1, 1, %g1 /* IEU0 */; \
+ add %o2, %o5, %o2 /* IEU1 */; \
+ fpadd32 %S2, %S3, %T1 /* FPA */; \
+ inc %g2 /* IEU0 Group */; \
+ add %o2, %g1, %o2 /* IEU1 */; \
+ fcmpgt32 %S0, %T0, %g7 /* FPM Group */; \
+ srl %g2, 1, %g2 /* IEU0 */; \
+ inc %g3 /* IEU1 */; \
+ fcmpgt32 %S2, %T1, %o3 /* FPM Group */; \
+ srl %g3, 1, %g3 /* IEU0 */; \
+ add %o2, %g2, %o2 /* IEU1 */; \
+ inc %g5 /* IEU0 Group */; \
+ add %o2, %g3, %o2 /* IEU1 */; \
+ fcmpgt32 %fz, %f2, %o4 /* FPM Group */; \
+ srl %g5, 1, %g5 /* IEU0 */; \
+ inc %g7 /* IEU1 */; \
+ fpadd32 %T0, %T1, %U0 /* FPA */; \
+ fcmpgt32 %fz, %f6, %o5 /* FPM Group */; \
+ srl %g7, 1, %g7 /* IEU0 */; \
+ add %o2, %g5, %o2 /* IEU1 */; \
+ inc %o3 /* IEU0 Group */; \
+ add %o2, %g7, %o2 /* IEU1 */; \
+ fcmpgt32 %fz, %f10, %g1 /* FPM Group */; \
+ srl %o3, 1, %o3 /* IEU0 */; \
+ inc %o4 /* IEU1 */; \
+ fcmpgt32 %fz, %f14, %g2 /* FPM Group */; \
+ srl %o4, 1, %o4 /* IEU0 */; \
+ add %o2, %o3, %o2 /* IEU1 */; \
+ std %U0, [%sp + STACKOFF] /* Store Group */; \
+ inc %o5 /* IEU0 */; \
+ sub %o2, %o4, %o2 /* IEU1 */; \
+ fcmpgt32 %fz, %S1, %g3 /* FPM Group */; \
+ srl %o5, 1, %o5 /* IEU0 */; \
+ inc %g1 /* IEU1 */; \
+ fcmpgt32 %fz, %S3, %g5 /* FPM Group */; \
+ srl %g1, 1, %g1 /* IEU0 */; \
+ sub %o2, %o5, %o2 /* IEU1 */; \
+ ldx [%sp + STACKOFF], %o5 /* Load Group */; \
+ inc %g2 /* IEU0 */; \
+ sub %o2, %g1, %o2 /* IEU1 */; \
+ fcmpgt32 %fz, %T1, %g7 /* FPM Group */; \
+ srl %g2, 1, %g2 /* IEU0 */; \
+ inc %g3 /* IEU1 */; \
+ fcmpgt32 %T0, %U0, %o3 /* FPM Group */; \
+ srl %g3, 1, %g3 /* IEU0 */; \
+ sub %o2, %g2, %o2 /* IEU1 */; \
+ inc %g5 /* IEU0 Group */; \
+ sub %o2, %g3, %o2 /* IEU1 */; \
+ fcmpgt32 %fz, %U0, %o4 /* FPM Group */; \
+ srl %g5, 1, %g5 /* IEU0 */; \
+ inc %g7 /* IEU1 */; \
+ srl %g7, 1, %g7 /* IEU0 Group */; \
+ sub %o2, %g5, %o2 /* IEU1 */; \
+ inc %o3 /* IEU0 Group */; \
+ sub %o2, %g7, %o2 /* IEU1 */; \
+ srl %o3, 1, %o3 /* IEU0 Group */; \
+ inc %o4 /* IEU1 */; \
+ srl %o4, 1, %o4 /* IEU0 Group */; \
+ add %o2, %o3, %o2 /* IEU1 */; \
+ sub %o2, %o4, %o2 /* IEU0 Group */; \
+ addcc %o2, %o5, %o2 /* IEU1 Group */; \
+ bcs,a,pn %xcc, 33f /* CTI */; \
+ add %o2, 1, %o2 /* IEU0 */; \
+33: /* That's it */;
+
+#define CSUM_LASTCHUNK(offset) \
+ ldx [%o0 - offset - 0x10], %g2; \
+ ldx [%o0 - offset - 0x08], %g3; \
+ addcc %g2, %o2, %o2; \
+ bcs,a,pn %xcc, 31f; \
+ add %o2, 1, %o2; \
+31: addcc %g3, %o2, %o2; \
+ bcs,a,pn %xcc, 32f; \
+ add %o2, 1, %o2; \
+32:
+
+ .text
+ .globl csum_partial
+ .align 32
+csum_partial:
+ andcc %o0, 7, %g0 /* IEU1 Group */
+ be,pt %icc, 4f /* CTI */
+ andcc %o0, 0x38, %g3 /* IEU1 */
+ mov 1, %g5 /* IEU0 Group */
+ cmp %o1, 6 /* IEU1 */
+ bl,pn %icc, 21f /* CTI */
+ andcc %o0, 2, %g0 /* IEU1 Group */
+ be,pt %icc, 1f /* CTI */
+ and %o0, 4, %g7 /* IEU0 */
+ lduh [%o0], %g2 /* Load */
+ sub %o1, 2, %o1 /* IEU0 Group */
+ add %o0, 2, %o0 /* IEU1 */
+ andcc %o0, 4, %g7 /* IEU1 Group */
+ sll %g5, 16, %g5 /* IEU0 */
+ sll %g2, 16, %g2 /* IEU0 Group */
+ addcc %g2, %o2, %o2 /* IEU1 Group (regdep) */
+ bcs,a,pn %icc, 1f /* CTI */
+ add %o2, %g5, %o2 /* IEU0 */
+1: ld [%o0], %g2 /* Load */
+ brz,a,pn %g7, 4f /* CTI+IEU1 Group */
+ and %o0, 0x38, %g3 /* IEU0 */
+ add %o0, 4, %o0 /* IEU0 Group */
+ sub %o1, 4, %o1 /* IEU1 */
+ addcc %g2, %o2, %o2 /* IEU1 Group */
+ bcs,a,pn %icc, 1f /* CTI */
+ add %o2, 1, %o2 /* IEU0 */
+1: and %o0, 0x38, %g3 /* IEU1 Group */
+4: srl %o2, 0, %o2 /* IEU0 Group */
+ mov 0x40, %g1 /* IEU1 */
+ brz,pn %g3, 3f /* CTI+IEU1 Group */
+ sub %g1, %g3, %g1 /* IEU0 */
+ cmp %o1, 56 /* IEU1 Group */
+ blu,pn %icc, 20f /* CTI */
+ andcc %o0, 8, %g0 /* IEU1 Group */
+ be,pn %icc, 1f /* CTI */
+ ldx [%o0], %g2 /* Load */
+ add %o0, 8, %o0 /* IEU0 Group */
+ sub %o1, 8, %o1 /* IEU1 */
+ addcc %g2, %o2, %o2 /* IEU1 Group */
+ bcs,a,pn %xcc, 1f /* CTI */
+ add %o2, 1, %o2 /* IEU0 */
+1: andcc %g1, 0x10, %g0 /* IEU1 Group */
+ be,pn %icc, 2f /* CTI */
+ and %g1, 0x20, %g1 /* IEU0 */
+ ldx [%o0], %g2 /* Load */
+ ldx [%o0+8], %g3 /* Load Group */
+ add %o0, 16, %o0 /* IEU0 */
+ sub %o1, 16, %o1 /* IEU1 */
+ addcc %g2, %o2, %o2 /* IEU1 Group */
+ bcs,a,pn %xcc, 1f /* CTI */
+ add %o2, 1, %o2 /* IEU0 */
+1: addcc %g3, %o2, %o2 /* IEU1 Group */
+ bcs,a,pn %xcc, 2f /* CTI */
+ add %o2, 1, %o2 /* IEU0 */
+2: brz,pn %g1, 3f /* CTI+IEU1 Group */
+ ldx [%o0], %g2 /* Load */
+ ldx [%o0+8], %g3 /* Load Group */
+ ldx [%o0+16], %g5 /* Load Group */
+ ldx [%o0+24], %g7 /* Load Group */
+ add %o0, 32, %o0 /* IEU0 */
+ sub %o1, 32, %o1 /* IEU1 */
+ addcc %g2, %o2, %o2 /* IEU1 Group */
+ bcs,a,pn %xcc, 1f /* CTI */
+ add %o2, 1, %o2 /* IEU0 */
+1: addcc %g3, %o2, %o2 /* IEU1 Group */
+ bcs,a,pn %xcc, 1f /* CTI */
+ add %o2, 1, %o2 /* IEU0 */
+1: addcc %g5, %o2, %o2 /* IEU1 Group */
+ bcs,a,pn %xcc, 1f /* CTI */
+ add %o2, 1, %o2 /* IEU0 */
+1: addcc %g7, %o2, %o2 /* IEU1 Group */
+ bcs,a,pn %xcc, 3f /* CTI */
+ add %o2, 1, %o2 /* IEU0 */
+3: cmp %o1, 0xc0 /* IEU1 Group */
+ blu,pn %icc, 20f /* CTI */
+ sllx %o2, 32, %g1 /* IEU0 */
+ addcc %o2, %g1, %o2 /* IEU1 Group */
+ sub %o1, 0xc0, %o1 /* IEU0 */
+ wr %g0, ASI_BLK_P, %asi /* LSU Group */
+#ifdef __KERNEL__
+ wr %g0, FPRS_FEF, %fprs /* LSU Group */
+#endif
+ membar #StoreLoad /* LSU Group */
+ srlx %o2, 32, %o2 /* IEU0 Group */
+ bcs,a,pn %xcc, 1f /* CTI */
+ add %o2, 1, %o2 /* IEU1 */
+1: andcc %o1, 0x80, %g0 /* IEU1 Group */
+ bne,pn %icc, 7f /* CTI */
+ andcc %o1, 0x40, %g0 /* IEU1 Group */
+ be,pn %icc, 6f /* CTI */
+ fzero %f12 /* FPA */
+ fzero %f14 /* FPA Group */
+ ldda [%o0 + 0x000] %asi, %f16
+ ldda [%o0 + 0x040] %asi, %f32
+ ldda [%o0 + 0x080] %asi, %f48
+ START_THE_TRICK(f12,f16,f18,f20,f22,f24,f26)
+ ba,a,pt %xcc, 3f
+6: sub %o0, 0x40, %o0 /* IEU0 Group */
+ fzero %f28 /* FPA */
+ fzero %f30 /* FPA Group */
+ ldda [%o0 + 0x040] %asi, %f32
+ ldda [%o0 + 0x080] %asi, %f48
+ ldda [%o0 + 0x0c0] %asi, %f0
+ START_THE_TRICK(f28,f32,f34,f36,f38,f40,f42)
+ ba,a,pt %xcc, 4f
+7: bne,pt %icc, 8f /* CTI */
+ fzero %f44 /* FPA */
+ add %o0, 0x40, %o0 /* IEU0 Group */
+ fzero %f60 /* FPA */
+ fzero %f62 /* FPA Group */
+ ldda [%o0 - 0x040] %asi, %f0
+ ldda [%o0 + 0x000] %asi, %f16
+ ldda [%o0 + 0x040] %asi, %f32
+ START_THE_TRICK(f60,f0,f2,f4,f6,f8,f10)
+ ba,a,pt %xcc, 2f
+8: add %o0, 0x80, %o0 /* IEU0 Group */
+ fzero %f46 /* FPA */
+ ldda [%o0 - 0x080] %asi, %f48
+ ldda [%o0 - 0x040] %asi, %f0
+ ldda [%o0 + 0x000] %asi, %f16
+ START_THE_TRICK(f44,f48,f50,f52,f54,f56,f58)
+1: DO_THE_TRICK(f44,f46,f48,f50,f52,f54,f56,f58,f60,f62,f0,f2,f4,f6,f8,f10,f12,f14)
+ ldda [%o0 + 0x040] %asi, %f32
+2: DO_THE_TRICK(f60,f62,f0,f2,f4,f6,f8,f10,f12,f14,f16,f18,f20,f22,f24,f26,f28,f30)
+ ldda [%o0 + 0x080] %asi, %f48
+3: DO_THE_TRICK(f12,f14,f16,f18,f20,f22,f24,f26,f28,f30,f32,f34,f36,f38,f40,f42,f44,f46)
+ ldda [%o0 + 0x0c0] %asi, %f0
+4: DO_THE_TRICK(f28,f30,f32,f34,f36,f38,f40,f42,f44,f46,f48,f50,f52,f54,f56,f58,f60,f62)
+ add %o0, 0x100, %o0 /* IEU0 Group */
+ subcc %o1, 0x100, %o1 /* IEU1 */
+ bgeu,a,pt %icc, 1b /* CTI */
+ ldda [%o0 + 0x000] %asi, %f16
+ membar #Sync /* LSU Group */
+ DO_THE_TRICK(f44,f46,f48,f50,f52,f54,f56,f58,f60,f62,f0,f2,f4,f6,f8,f10,f12,f14)
+ END_THE_TRICK(f60,f62,f0,f2,f4,f6,f8,f10,f12,f14,f16,f18,f20,f22,f24,f26,f28,f30)
+ and %o1, 0x3f, %o1 /* IEU0 Group */
+#ifdef __KERNEL__
+ wr %g0, 0, %fprs /* LSU Group */
+#endif
+20: andcc %o1, 0xf0, %g1 /* IEU1 Group */
+ be,pn %icc, 23f /* CTI */
+ and %o1, 0xf, %o3 /* IEU0 */
+22: rd %pc, %g7 /* LSU Group */
+ sll %g1, 1, %o4 /* IEU0 Group */
+ sub %g7, %o4, %g7 /* IEU0 Group (regdep) */
+ jmpl %g7 + (23f - 22b), %g0 /* CTI Group brk forced */
+ add %o0, %g1, %o0 /* IEU0 */
+ CSUM_LASTCHUNK(0xe0)
+ CSUM_LASTCHUNK(0xd0)
+ CSUM_LASTCHUNK(0xc0)
+ CSUM_LASTCHUNK(0xb0)
+ CSUM_LASTCHUNK(0xa0)
+ CSUM_LASTCHUNK(0x90)
+ CSUM_LASTCHUNK(0x80)
+ CSUM_LASTCHUNK(0x70)
+ CSUM_LASTCHUNK(0x60)
+ CSUM_LASTCHUNK(0x50)
+ CSUM_LASTCHUNK(0x40)
+ CSUM_LASTCHUNK(0x30)
+ CSUM_LASTCHUNK(0x20)
+ CSUM_LASTCHUNK(0x10)
+ CSUM_LASTCHUNK(0x00)
+23: brnz,pn %o3, 26f /* CTI+IEU1 Group */
+24: sllx %o2, 32, %g1 /* IEU0 */
+25: addcc %o2, %g1, %o0 /* IEU1 Group */
+ srlx %o0, 32, %o0 /* IEU0 Group (regdep) */
+ bcs,a,pn %xcc, 1f /* CTI */
+ add %o0, 1, %o0 /* IEU1 */
+1: retl /* CTI Group brk forced */
+ srl %o0, 0, %o0 /* IEU0 */
+26: andcc %o1, 8, %g0 /* IEU1 Group */
+ be,pn %icc, 1f /* CTI */
+ ldx [%o0], %g3 /* Load */
+ add %o0, 8, %o0 /* IEU0 Group */
+ addcc %g3, %o2, %o2 /* IEU1 Group */
+ bcs,a,pn %xcc, 1f /* CTI */
+ add %o2, 1, %o2 /* IEU0 */
+1: andcc %o1, 4, %g0 /* IEU1 Group */
+ be,a,pn %icc, 1f /* CTI */
+ clr %g2 /* IEU0 */
+ ld [%o0], %g2 /* Load */
+ add %o0, 4, %o0 /* IEU0 Group */
+ sllx %g2, 32, %g2 /* IEU0 Group */
+1: andcc %o1, 2, %g0 /* IEU1 */
+ be,a,pn %icc, 1f /* CTI */
+ clr %o4 /* IEU0 Group */
+ lduh [%o0], %o4 /* Load */
+ add %o0, 2, %o0 /* IEU1 */
+ sll %o4, 16, %o4 /* IEU0 Group */
+1: andcc %o1, 1, %g0 /* IEU1 */
+ be,a,pn %icc, 1f /* CTI */
+ clr %o5 /* IEU0 Group */
+ ldub [%o0], %o5 /* Load */
+ sll %o5, 8, %o5 /* IEU0 Group */
+1: or %g2, %o4, %o4 /* IEU1 */
+ or %o5, %o4, %o4 /* IEU0 Group (regdep) */
+ addcc %o4, %o2, %o2 /* IEU1 Group (regdep) */
+ bcs,a,pn %xcc, 1f /* CTI */
+ add %o2, 1, %o2 /* IEU0 */
+1: ba,pt %xcc, 25b /* CTI Group */
+ sllx %o2, 32, %g1 /* IEU0 */
+21: srl %o2, 0, %o2 /* IEU0 Group */
+ cmp %o1, 0 /* IEU1 */
+ be,pn %icc, 24b /* CTI */
+ andcc %o1, 4, %g0 /* IEU1 Group */
+ be,a,pn %icc, 1f /* CTI */
+ clr %g2 /* IEU0 */
+ lduh [%o0], %g3 /* Load */
+ lduh [%o0+2], %g2 /* Load Group */
+ add %o0, 4, %o0 /* IEU0 Group */
+ sllx %g3, 48, %g3 /* IEU0 Group */
+ sllx %g2, 32, %g2 /* IEU0 Group */
+ or %g3, %g2, %g2 /* IEU0 Group */
+1: andcc %o1, 2, %g0 /* IEU1 */
+ be,a,pn %icc, 1f /* CTI */
+ clr %o4 /* IEU0 Group */
+ lduh [%o0], %o4 /* Load */
+ add %o0, 2, %o0 /* IEU1 */
+ sll %o4, 16, %o4 /* IEU0 Group */
+1: andcc %o1, 1, %g0 /* IEU1 */
+ be,a,pn %icc, 1f /* CTI */
+ clr %o5 /* IEU0 Group */
+ ldub [%o0], %o5 /* Load */
+ sll %o5, 8, %o5 /* IEU0 Group */
+1: or %g2, %o4, %o4 /* IEU1 */
+ or %o5, %o4, %o4 /* IEU0 Group (regdep) */
+ addcc %o4, %o2, %o2 /* IEU1 Group (regdep) */
+ bcs,a,pn %xcc, 1f /* CTI */
+ add %o2, 1, %o2 /* IEU0 */
+1: ba,pt %xcc, 25b /* CTI Group */
+ sllx %o2, 32, %g1 /* IEU0 */
diff --git a/arch/sparc64/lib/VISmemset.S b/arch/sparc64/lib/VISmemset.S
new file mode 100644
index 000000000..d674f2a6e
--- /dev/null
+++ b/arch/sparc64/lib/VISmemset.S
@@ -0,0 +1,228 @@
+/* $Id: VISmemset.S,v 1.1 1997/07/18 06:26:49 ralf Exp $
+ * VISmemset.S: High speed memset operations utilizing the UltraSparc
+ * Visual Instruction Set.
+ *
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996, 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include "VIS.h"
+
+#ifdef REGS_64BIT
+#define SET_BLOCKS(base, offset, source) \
+ stx source, [base - offset - 0x18]; \
+ stx source, [base - offset - 0x10]; \
+ stx source, [base - offset - 0x08]; \
+ stx source, [base - offset - 0x00];
+#else
+#define SET_BLOCKS(base, offset, source) \
+ stw source, [base - offset - 0x18]; \
+ stw source, [base - offset - 0x14]; \
+ stw source, [base - offset - 0x10]; \
+ stw source, [base - offset - 0x0c]; \
+ stw source, [base - offset - 0x08]; \
+ stw source, [base - offset - 0x04]; \
+ stw source, [base - offset - 0x00]; \
+ stw source, [base - offset + 0x04];
+#endif
+
+#ifndef __KERNEL__
+/* So that the brz,a,pt in memset doesn't have to get through PLT, here we go... */
+#include "VISbzero.S"
+#endif
+
+#ifdef __KERNEL__
+#define RETL clr %o0
+#else
+#define RETL mov %g3, %o0
+#endif
+
+ /* Well, memset is a lot easier to get right than bcopy... */
+ .text
+ .align 32
+#ifdef __KERNEL__
+ .globl __memset
+__memset:
+#endif
+ .globl memset
+memset:
+#ifndef __KERNEL__
+ brz,a,pt %o1, bzero_private
+ mov %o2, %o1
+#ifndef REGS_64BIT
+ srl %o2, 0, %o2
+#endif
+ mov %o0, %g3
+#endif
+ cmp %o2, 7
+ bleu,pn %xcc, 17f
+ andcc %o0, 3, %g5
+ be,pt %xcc, 4f
+ and %o1, 0xff, %o1
+ cmp %g5, 3
+ be,pn %xcc, 2f
+ stb %o1, [%o0 + 0x00]
+ cmp %g5, 2
+ be,pt %xcc, 2f
+ stb %o1, [%o0 + 0x01]
+ stb %o1, [%o0 + 0x02]
+2: sub %g5, 4, %g5
+ sub %o0, %g5, %o0
+ add %o2, %g5, %o2
+4: sllx %o1, 8, %g1
+ andcc %o0, 4, %g0
+ or %o1, %g1, %o1
+ sllx %o1, 16, %g1
+ or %o1, %g1, %o1
+ be,pt %xcc, 2f
+#ifdef REGS_64BIT
+ sllx %o1, 32, %g1
+#else
+ cmp %o2, 128
+#endif
+ stw %o1, [%o0]
+ sub %o2, 4, %o2
+ add %o0, 4, %o0
+2:
+#ifdef REGS_64BIT
+ cmp %o2, 128
+ or %o1, %g1, %o1
+#endif
+ blu,pn %xcc, 9f
+ andcc %o0, 0x38, %g5
+ be,pn %icc, 6f
+ mov 64, %o5
+ andcc %o0, 8, %g0
+ be,pn %icc, 1f
+ sub %o5, %g5, %o5
+#ifdef REGS_64BIT
+ stx %o1, [%o0]
+#else
+ stw %o1, [%o0]
+ stw %o1, [%o0 + 4]
+#endif
+ add %o0, 8, %o0
+1: andcc %o5, 16, %g0
+ be,pn %icc, 1f
+ sub %o2, %o5, %o2
+#ifdef REGS_64BIT
+ stx %o1, [%o0]
+ stx %o1, [%o0 + 8]
+#else
+ stw %o1, [%o0]
+ stw %o1, [%o0 + 4]
+ stw %o1, [%o0 + 8]
+ stw %o1, [%o0 + 12]
+#endif
+ add %o0, 16, %o0
+1: andcc %o5, 32, %g0
+ be,pn %icc, 7f
+ andncc %o2, 0x3f, %o3
+#ifdef REGS_64BIT
+ stx %o1, [%o0]
+ stx %o1, [%o0 + 8]
+ stx %o1, [%o0 + 16]
+ stx %o1, [%o0 + 24]
+#else
+ stw %o1, [%o0]
+ stw %o1, [%o0 + 4]
+ stw %o1, [%o0 + 8]
+ stw %o1, [%o0 + 12]
+ stw %o1, [%o0 + 16]
+ stw %o1, [%o0 + 20]
+ stw %o1, [%o0 + 24]
+ stw %o1, [%o0 + 28]
+#endif
+ add %o0, 32, %o0
+7: be,pn %xcc, 9f
+#ifdef __KERNEL__
+ wr %g0, FPRS_FEF, %fprs
+#endif
+ ldd [%o0 - 8], %f0
+18: wr %g0, ASI_BLK_P, %asi
+ membar #StoreStore | #LoadStore
+ andcc %o3, 0xc0, %g5
+ and %o2, 0x3f, %o2
+ fmovd %f0, %f2
+ fmovd %f0, %f4
+ andn %o3, 0xff, %o3
+ fmovd %f0, %f6
+ cmp %g5, 64
+ fmovd %f0, %f8
+ fmovd %f0, %f10
+ fmovd %f0, %f12
+ brz,pn %g5, 10f
+ fmovd %f0, %f14
+ be,pn %icc, 2f
+ stda %f0, [%o0 + 0x00] %asi
+ cmp %g5, 128
+ be,pn %icc, 2f
+ stda %f0, [%o0 + 0x40] %asi
+ stda %f0, [%o0 + 0x80] %asi
+2: brz,pn %o3, 12f
+ add %o0, %g5, %o0
+10: stda %f0, [%o0 + 0x00] %asi
+ stda %f0, [%o0 + 0x40] %asi
+ stda %f0, [%o0 + 0x80] %asi
+ stda %f0, [%o0 + 0xc0] %asi
+11: subcc %o3, 256, %o3
+ bne,pt %xcc, 10b
+ add %o0, 256, %o0
+12:
+#ifdef __KERNEL__
+ wr %g0, 0, %fprs
+#endif
+ membar #Sync
+9: andcc %o2, 0x78, %g5
+ be,pn %xcc, 13f
+ andcc %o2, 7, %o2
+14: rd %pc, %o4
+#ifdef REGS_64BIT
+ srl %g5, 1, %o3
+ sub %o4, %o3, %o4
+#else
+ sub %o4, %g5, %o4
+#endif
+ jmpl %o4 + (13f - 14b), %g0
+ add %o0, %g5, %o0
+12: SET_BLOCKS(%o0, 0x68, %o1)
+ SET_BLOCKS(%o0, 0x48, %o1)
+ SET_BLOCKS(%o0, 0x28, %o1)
+ SET_BLOCKS(%o0, 0x08, %o1)
+13: be,pn %xcc, 8f
+ andcc %o2, 4, %g0
+ be,pn %xcc, 1f
+ andcc %o2, 2, %g0
+ stw %o1, [%o0]
+ add %o0, 4, %o0
+1: be,pn %xcc, 1f
+ andcc %o2, 1, %g0
+ sth %o1, [%o0]
+ add %o0, 2, %o0
+1: bne,a,pn %xcc, 8f
+ stb %o1, [%o0]
+8: retl
+ RETL
+17: brz,pn %o2, 0f
+8: add %o0, 1, %o0
+ subcc %o2, 1, %o2
+ bne,pt %xcc, 8b
+ stb %o1, [%o0 - 1]
+0: retl
+ RETL
+6:
+#ifdef REGS_64BIT
+ stx %o1, [%o0]
+#else
+ stw %o1, [%o0]
+ stw %o1, [%o0 + 4]
+#endif
+ andncc %o2, 0x3f, %o3
+ be,pn %xcc, 9b
+#ifdef __KERNEL__
+ wr %g0, FPRS_FEF, %fprs
+#else
+ nop
+#endif
+ ba,pt %xcc, 18b
+ ldd [%o0], %f0
diff --git a/arch/sparc64/lib/blockops.S b/arch/sparc64/lib/blockops.S
index d0f023d1b..59083aa02 100644
--- a/arch/sparc64/lib/blockops.S
+++ b/arch/sparc64/lib/blockops.S
@@ -1,138 +1,70 @@
-/* $Id: blockops.S,v 1.6 1997/05/18 04:16:49 davem Exp $
+/* $Id: blockops.S,v 1.10 1997/06/24 17:29:10 jj Exp $
* arch/sparc64/lib/blockops.S: UltraSparc block zero optimized routines.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
-#include <asm/asi.h>
-
- /* Zero out 256 bytes of memory at (buf + offset). */
-#define BLAST_BLOCK(buf, offset) \
- stda %f48, [buf + offset + 0x00] %asi; \
- stda %f48, [buf + offset + 0x40] %asi; \
- stda %f48, [buf + offset + 0x80] %asi; \
- stda %f48, [buf + offset + 0xc0] %asi;
-
- /* Copy 256 bytes of memory at (src + offset) to
- * (dst + offset).
- */
-#define MIRROR_BLOCK(dst, src, offset, sync) \
- ldda [src + offset + 0x000] %asi, %f0; \
- ldda [src + offset + 0x040] %asi, %f16; \
- ldda [src + offset + 0x080] %asi, %f32; \
- ldda [src + offset + 0x0c0] %asi, %f48; \
- membar sync; \
- stda %f0, [dst + offset + 0x000] %asi; \
- stda %f16, [dst + offset + 0x040] %asi; \
- stda %f32, [dst + offset + 0x080] %asi; \
- stda %f48, [dst + offset + 0x0c0] %asi;
+#include "VIS.h"
.text
- .align 4
-
-#if 0
- .globl bzero_1page
-bzero_1page:
- /* %o0 = buf */
- mov %o0, %o1
- wr %g0, ASI_BLK_P, %asi
- mov 0x08, %g2
- membar #Sync|#StoreLoad
- fzero %f48
- fzero %f50
- fzero %f52
- fzero %f54
- fzero %f56
- fzero %f58
- fzero %f60
- fzero %f62
-1:
- BLAST_BLOCK(%o0, 0x000)
- BLAST_BLOCK(%o0, 0x100)
- BLAST_BLOCK(%o0, 0x200)
- BLAST_BLOCK(%o0, 0x300)
- subcc %g2, 1, %g2
- bne,pt %icc, 1b
- add %o0, 0x400, %o0
-
- membar #Sync|#LoadStore|#StoreStore
-
- retl
- mov %o1, %o0
-#endif
+ .align 32
.globl __bfill64
-__bfill64:
-#if 1
- /* %o0 = buf, %o1 = 64-bit pattern */
-#define FILL_BLOCK(buf, offset) \
- stx %o1, [buf + offset + 0x38]; \
- stx %o1, [buf + offset + 0x30]; \
- stx %o1, [buf + offset + 0x28]; \
- stx %o1, [buf + offset + 0x20]; \
- stx %o1, [buf + offset + 0x18]; \
- stx %o1, [buf + offset + 0x10]; \
- stx %o1, [buf + offset + 0x08]; \
- stx %o1, [buf + offset + 0x00];
+__bfill64: /* %o0 = buf, %o1= ptr to pattern */
+ wr %g0, FPRS_FEF, %fprs ! FPU Group
+ ldd [%o1], %f48 ! Load Group
+ wr %g0, ASI_BLK_P, %asi ! LSU Group
+ membar #StoreStore | #LoadStore ! LSU Group
+ mov 32, %g2 ! IEU0 Group
+
+ /* Cannot perform real arithmatic on the pattern, that can
+ * lead to fp_exception_other ;-)
+ */
+ fmovd %f48, %f50 ! FPA Group
+ fmovd %f48, %f52 ! FPA Group
+ fmovd %f48, %f54 ! FPA Group
+ fmovd %f48, %f56 ! FPA Group
+ fmovd %f48, %f58 ! FPA Group
+ fmovd %f48, %f60 ! FPA Group
+ fmovd %f48, %f62 ! FPA Group
- mov 0x20, %g2
-1:
- FILL_BLOCK(%o0, 0x00)
- FILL_BLOCK(%o0, 0x40)
- FILL_BLOCK(%o0, 0x80)
- FILL_BLOCK(%o0, 0xc0)
- subcc %g2, 1, %g2
- bne,pt %icc, 1b
- add %o0, 0x100, %o0
- retl
- nop
-#undef FILL_BLOCK
+1: stda %f48, [%o0 + 0x00] %asi ! Store Group
+ stda %f48, [%o0 + 0x40] %asi ! Store Group
+ stda %f48, [%o0 + 0x80] %asi ! Store Group
+ stda %f48, [%o0 + 0xc0] %asi ! Store Group
+ subcc %g2, 1, %g2 ! IEU1 Group
+ bne,pt %icc, 1b ! CTI
+ add %o0, 0x100, %o0 ! IEU0
+ membar #Sync ! LSU Group
-#else
- /* %o0 = buf */
- stx %o1, [%sp + 0x7ff + 128]
- wr %g0, ASI_BLK_P, %asi
- mov 0x08, %g2
- ldd [%sp + 0x7ff + 128], %f48
- membar #Sync|#StoreLoad
- fmovd %f48, %f50
- fmovd %f48, %f52
- fmovd %f48, %f54
- fmovd %f48, %f56
- fmovd %f48, %f58
- fmovd %f48, %f60
- fmovd %f48, %f62
-1:
- BLAST_BLOCK(%o0, 0x000)
- BLAST_BLOCK(%o0, 0x100)
- BLAST_BLOCK(%o0, 0x200)
- BLAST_BLOCK(%o0, 0x300)
- subcc %g2, 1, %g2
- bne,pt %icc, 1b
- add %o0, 0x400, %o0
+ jmpl %o7 + 0x8, %g0 ! CTI Group brk forced
+ wr %g0, 0, %fprs ! FPU Group
- retl
- membar #Sync|#LoadStore|#StoreStore
-#endif
+ .align 32
+ .globl __bzero_1page
+__bzero_1page:
+ wr %g0, FPRS_FEF, %fprs ! FPU Group
+ fzero %f0 ! FPA Group
+ mov 32, %g1 ! IEU0
+ fzero %f2 ! FPA Group
+ faddd %f0, %f2, %f4 ! FPA Group
+ fmuld %f0, %f2, %f6 ! FPM
+ faddd %f0, %f2, %f8 ! FPA Group
+ fmuld %f0, %f2, %f10 ! FPM
-#if 0
- .globl __copy_1page
-__copy_1page:
- /* %o0 = dst, %o1 = src */
- or %g0, 0x08, %g1
- wr %g0, ASI_BLK_P, %asi
- membar #Sync|#StoreLoad
-1:
- MIRROR_BLOCK(%o0, %o1, 0x000, #Sync)
- MIRROR_BLOCK(%o0, %o1, 0x100, #Sync)
- MIRROR_BLOCK(%o0, %o1, 0x200, #Sync)
- MIRROR_BLOCK(%o0, %o1, 0x300, #Sync)
- subcc %g1, 1, %g1
- add %o0, 0x400, %o0
- bne,pt %icc, 1b
- add %o1, 0x400, %o1
+ faddd %f0, %f2, %f12 ! FPA Group
+ fmuld %f0, %f2, %f14 ! FPM
+ wr %g0, ASI_BLK_P, %asi ! LSU Group
+ membar #StoreStore | #LoadStore ! LSU Group
+1: stda %f0, [%o0 + 0x00] %asi ! Store Group
+ stda %f0, [%o0 + 0x40] %asi ! Store Group
+ stda %f0, [%o0 + 0x80] %asi ! Store Group
+ stda %f0, [%o0 + 0xc0] %asi ! Store Group
- retl
- membar #Sync|#LoadStore|#StoreStore
-#endif
+ subcc %g1, 1, %g1 ! IEU1
+ bne,pt %icc, 1b ! CTI
+ add %o0, 0x100, %o0 ! IEU0 Group
+ membar #Sync ! LSU Group
+ jmpl %o7 + 0x8, %g0 ! CTI Group brk forced
+ wr %g0, 0, %fprs ! FPU Group
diff --git a/arch/sparc64/lib/checksum.S b/arch/sparc64/lib/checksum.S
index 10eebb8df..703370fc6 100644
--- a/arch/sparc64/lib/checksum.S
+++ b/arch/sparc64/lib/checksum.S
@@ -17,383 +17,398 @@
#include <asm/head.h>
#include <asm/ptrace.h>
#include <asm/asi.h>
+#include <asm/page.h>
-#define CSUM_BIGCHUNK(buf, offset, sum, t0, t1, t2, t3, t4, t5) \
- ldd [buf + offset + 0x00], t0; \
- ldd [buf + offset + 0x08], t2; \
- addccc t0, sum, sum; \
- addccc t1, sum, sum; \
- ldd [buf + offset + 0x10], t4; \
- addccc t2, sum, sum; \
- addccc t3, sum, sum; \
- ldd [buf + offset + 0x18], t0; \
- addccc t4, sum, sum; \
- addccc t5, sum, sum; \
- addccc t0, sum, sum; \
- addccc t1, sum, sum;
-
-#define CSUM_LASTCHUNK(buf, offset, sum, t0, t1, t2, t3) \
- ldd [buf - offset - 0x08], t0; \
- ldd [buf - offset - 0x00], t2; \
- addccc t0, sum, sum; \
- addccc t1, sum, sum; \
- addccc t2, sum, sum; \
- addccc t3, sum, sum;
-
- /* Do end cruft out of band to get better cache patterns. */
-csum_partial_end_cruft:
- andcc %o1, 8, %g0 ! check how much
- be,pn %icc, 1f ! caller asks %o1 & 0x8
- and %o1, 4, %g5 ! nope, check for word remaining
- ldd [%o0], %g2 ! load two
- addcc %g2, %o2, %o2 ! add first word to sum
- addccc %g3, %o2, %o2 ! add second word as well
- add %o0, 8, %o0 ! advance buf ptr
- addc %g0, %o2, %o2 ! add in final carry
-1: brz,pn %g5, 1f ! nope, skip this code
- andcc %o1, 3, %o1 ! check for trailing bytes
- ld [%o0], %g2 ! load it
- addcc %g2, %o2, %o2 ! add to sum
- add %o0, 4, %o0 ! advance buf ptr
- addc %g0, %o2, %o2 ! add in final carry
-1: brz,pn %o1, 1f ! no trailing bytes, return
- addcc %o1, -1, %g0 ! only one byte remains?
- bne,pn %icc, 2f ! at least two bytes more
- subcc %o1, 2, %o1 ! only two bytes more?
- ba,pt %xcc, 4f ! only one byte remains
- clr %o4 ! clear fake hword value
-2: lduh [%o0], %o4 ! get hword
- be,pn %icc, 6f ! jmp if only hword remains
- add %o0, 2, %o0 ! advance buf ptr either way
- sll %o4, 16, %o4 ! create upper hword
-4: ldub [%o0], %o5 ! get final byte
- sll %o5, 8, %o5 ! put into place
- or %o5, %o4, %o4 ! coalese with hword (if any)
-6: addcc %o4, %o2, %o2 ! add to sum
-1: sllx %g4, 32, %g4 ! give gfp back
- addc %g0, %o2, %o0 ! add final carry into retval
- retl ! get outta here
- srl %o0, 0, %o0
-
- /* Also do alignment out of band to get better cache patterns. */
-csum_partial_fix_alignment:
-
- /* The common case is to get called with a nicely aligned
- * buffer of size 0x20. Follow the code path for that case.
+ /* The problem with the "add with carry" instructions on Ultra
+ * are two fold. Firstly, they cannot pair with jack shit,
+ * and also they only add in the 32-bit carry condition bit
+ * into the accumulated sum. The following is much better.
+ *
+ * This should run at max bandwidth for ecache hits, a better
+ * technique is to use VIS and fpu operations. This is already
+ * done for csum_partial, needs to be written for the copy stuff
+ * still.
*/
- .globl csum_partial
-csum_partial: /* %o0=buf, %o1=len, %o2=sum */
- srl %o1, 0, %o1 ! doof scheiss
- andcc %o0, 0x7, %g0 ! alignment problems?
- srl %o2, 0, %o2
- be,pt %icc, csum_partial_fix_aligned ! yep, handle it
- andn %o1, 0x7f, %o3 ! num loop iterations
- cmp %o1, 6
- bl,pn %icc, cpte - 0x4
- andcc %o0, 0x2, %g0
- be,pn %icc, 1f
- and %o0, 0x4, %g7
- lduh [%o0 + 0x00], %g2
- sub %o1, 2, %o1
- add %o0, 2, %o0
- sll %g2, 16, %g2
- addcc %g2, %o2, %o2
- srl %o2, 16, %g3
- addc %g0, %g3, %g2
- sll %o2, 16, %o2
- and %o0, 0x4, %g7
- sll %g2, 16, %g3
- srl %o2, 16, %o2
- or %g3, %o2, %o2
-1: brz,pn %g7, csum_partial_fix_aligned
- andn %o1, 0x7f, %o3
- ld [%o0 + 0x00], %g2
- sub %o1, 4, %o1
- addcc %g2, %o2, %o2
- add %o0, 4, %o0
- andn %o1, 0x7f, %o3
- addc %g0, %o2, %o2
-csum_partial_fix_aligned:
- brz,pt %o3, 3f ! none to do
- andcc %o1, 0x70, %g1 ! clears carry flag too
-5: CSUM_BIGCHUNK(%o0, 0x00, %o2, %o4, %o5, %g2, %g3, %g4, %g5)
- CSUM_BIGCHUNK(%o0, 0x20, %o2, %o4, %o5, %g2, %g3, %g4, %g5)
- CSUM_BIGCHUNK(%o0, 0x40, %o2, %o4, %o5, %g2, %g3, %g4, %g5)
- CSUM_BIGCHUNK(%o0, 0x60, %o2, %o4, %o5, %g2, %g3, %g4, %g5)
- addc %g0, %o2, %o2 ! sink in final carry
- subcc %o3, 128, %o3 ! detract from loop iters
- bne,pt %icc, 5b ! more to do
- add %o0, 128, %o0 ! advance buf ptr
-3: brz,pn %g1, cpte ! nope
- andcc %o1, 0xf, %o3 ! anything left at all?
-10: rd %pc, %g7 ! get pc
- srl %g1, 1, %o4 ! compute offset
- sub %g7, %g1, %g7 ! adjust jmp ptr
- sub %g7, %o4, %g7 ! final jmp ptr adjust
- jmp %g7 + (11f-10b) ! enter the table
- add %o0, %g1, %o0 ! advance buf ptr
-cptbl: CSUM_LASTCHUNK(%o0, 0x68, %o2, %g2, %g3, %g4, %g5)
- CSUM_LASTCHUNK(%o0, 0x58, %o2, %g2, %g3, %g4, %g5)
- CSUM_LASTCHUNK(%o0, 0x48, %o2, %g2, %g3, %g4, %g5)
- CSUM_LASTCHUNK(%o0, 0x38, %o2, %g2, %g3, %g4, %g5)
- CSUM_LASTCHUNK(%o0, 0x28, %o2, %g2, %g3, %g4, %g5)
- CSUM_LASTCHUNK(%o0, 0x18, %o2, %g2, %g3, %g4, %g5)
- CSUM_LASTCHUNK(%o0, 0x08, %o2, %g2, %g3, %g4, %g5)
-11: addc %g0, %o2, %o2 ! fetch final carry
- andcc %o1, 0xf, %o3 ! anything left at all?
-cpte: brnz,pn %o3, csum_partial_end_cruft ! yep, handle it
- sethi %uhi(KERNBASE), %g4
- mov %o2, %o0 ! return computed csum
- retl ! get outta here
- sllx %g4, 32, %g4 ! give gfp back
+ .text
.globl __csum_partial_copy_start, __csum_partial_copy_end
__csum_partial_copy_start:
-#define EX(x,y,a,b,z) \
-98: x,y; \
- .section .fixup,z##alloc,z##execinstr; \
- .align 4; \
-99: ba,pt %xcc, 30f; \
- a, b, %o3; \
- .section __ex_table,z##alloc; \
- .align 8; \
- .xword 98b, 99b; \
- .text; \
- .align 4
+ /* I think I have an erection... Once _AGAIN_ the SunSoft
+ * engineers are caught asleep at the keyboard, tsk tsk...
+ */
+#define CSUMCOPY_ECACHE_LOAD(src, off, t0, t1, t2, t3, t4, t5, t6, t7) \
+ ldxa [src + off + 0x00] %asi, t0; \
+ ldxa [src + off + 0x08] %asi, t1; \
+ ldxa [src + off + 0x10] %asi, t2; \
+ ldxa [src + off + 0x18] %asi, t3; \
+ ldxa [src + off + 0x20] %asi, t4; \
+ ldxa [src + off + 0x28] %asi, t5; \
+ ldxa [src + off + 0x30] %asi, t6; \
+ ldxa [src + off + 0x38] %asi, t7; \
+ nop; nop; /* DO NOT TOUCH THIS!!!!! */
-#define EX2(x,y,z) \
-98: x,y; \
- .section __ex_table,z##alloc; \
- .align 8; \
- .xword 98b, 30f; \
- .text; \
- .align 4
+#define CSUMCOPY_EC_STALIGNED_LDNXT(src, dest, off, sum, t0, t1, t2, t3, t4, t5, t6, t7)\
+ stx t0, [dest + off - 0x40]; \
+ addcc sum, t0, sum; \
+ bcc,pt %xcc, 11f; \
+ ldxa [src + off + 0x00] %asi, t0; \
+ add sum, 1, sum; \
+11: stx t1, [dest + off - 0x38]; \
+ addcc sum, t1, sum; \
+ bcc,pt %xcc, 12f; \
+ ldxa [src + off + 0x08] %asi, t1; \
+ add sum, 1, sum; \
+12: stx t2, [dest + off - 0x30]; \
+ addcc sum, t2, sum; \
+ bcc,pt %xcc, 13f; \
+ ldxa [src + off + 0x10] %asi, t2; \
+ add sum, 1, sum; \
+13: stx t3, [dest + off - 0x28]; \
+ addcc sum, t3, sum; \
+ bcc,pt %xcc, 14f; \
+ ldxa [src + off + 0x18] %asi, t3; \
+ add sum, 1, sum; \
+14: stx t4, [dest + off - 0x20]; \
+ addcc sum, t4, sum; \
+ bcc,pt %xcc, 15f; \
+ ldxa [src + off + 0x20] %asi, t4; \
+ add sum, 1, sum; \
+15: stx t5, [dest + off - 0x18]; \
+ addcc sum, t5, sum; \
+ bcc,pt %xcc, 16f; \
+ ldxa [src + off + 0x28] %asi, t5; \
+ add sum, 1, sum; \
+16: stx t6, [dest + off - 0x10]; \
+ addcc sum, t6, sum; \
+ bcc,pt %xcc, 17f; \
+ ldxa [src + off + 0x30] %asi, t6; \
+ add sum, 1, sum; \
+17: stx t7, [dest + off - 0x08]; \
+ addcc sum, t7, sum; \
+ bcc,pt %xcc, 18f; \
+ ldxa [src + off + 0x38] %asi, t7; \
+ add sum, 1, sum; \
+18:
-#define EX3(x,y,z) \
-98: x,y; \
- .section __ex_table,z##alloc; \
- .align 8; \
- .xword 98b, 96f; \
- .text; \
- .align 4
+#define CSUMCOPY_EC_STUNALIGN_LDNXT(src, dest, off, sum, t0, t1, t2, t3, t4, t5, t6, t7)\
+ stw t0, [dest + off - 0x3c]; \
+ addcc sum, t0, sum; \
+ srlx t0, 32, t0; \
+ stw t0, [dest + off - 0x40]; \
+ bcc,pt %xcc, 21f; \
+ ldxa [src + off + 0x00] %asi, t0; \
+ add sum, 1, sum; \
+21: stw t1, [dest + off - 0x34]; \
+ addcc sum, t1, sum; \
+ srlx t1, 32, t1; \
+ stw t1, [dest + off - 0x38]; \
+ bcc,pt %xcc, 22f; \
+ ldxa [src + off + 0x08] %asi, t1; \
+ add sum, 1, sum; \
+22: stw t2, [dest + off - 0x2c]; \
+ addcc sum, t2, sum; \
+ srlx t2, 32, t2; \
+ stw t2, [dest + off - 0x30]; \
+ bcc,pt %xcc, 23f; \
+ ldxa [src + off + 0x10] %asi, t2; \
+ add sum, 1, sum; \
+23: stw t3, [dest + off - 0x24]; \
+ addcc sum, t3, sum; \
+ srlx t3, 32, t3; \
+ stw t3, [dest + off - 0x28]; \
+ bcc,pt %xcc, 24f; \
+ ldxa [src + off + 0x18] %asi, t3; \
+ add sum, 1, sum; \
+24: stw t4, [dest + off - 0x1c]; \
+ addcc sum, t4, sum; \
+ srlx t4, 32, t4; \
+ stw t4, [dest + off - 0x20]; \
+ bcc,pt %xcc, 25f; \
+ ldxa [src + off + 0x20] %asi, t4; \
+ add sum, 1, sum; \
+25: stw t5, [dest + off - 0x14]; \
+ addcc sum, t5, sum; \
+ srlx t5, 32, t5; \
+ stw t5, [dest + off - 0x18]; \
+ bcc,pt %xcc, 26f; \
+ ldxa [src + off + 0x28] %asi, t5; \
+ add sum, 1, sum; \
+26: stw t6, [dest + off - 0x0c]; \
+ addcc sum, t6, sum; \
+ srlx t6, 32, t6; \
+ stw t6, [dest + off - 0x10]; \
+ bcc,pt %xcc, 27f; \
+ ldxa [src + off + 0x30] %asi, t6; \
+ add sum, 1, sum; \
+27: stw t7, [dest + off - 0x04]; \
+ addcc sum, t7, sum; \
+ srlx t7, 32, t7; \
+ stw t7, [dest + off - 0x08]; \
+ bcc,pt %xcc, 28f; \
+ ldxa [src + off + 0x38] %asi, t7; \
+ add sum, 1, sum; \
+28:
-#define EXT(start,end,handler,z) \
- .section __ex_table,z##alloc; \
- .align 8; \
- .xword start, 0, end, handler; \
- .text; \
- .align 4
+#define CSUMCOPY_EC_STALIGNED(dest, off, sum, t0, t1, t2, t3, t4, t5, t6, t7) \
+ addcc sum, t0, sum; \
+ bcc,pt %xcc, 31f; \
+ stx t0, [dest + off + 0x00]; \
+ add sum, 1, sum; \
+31: addcc sum, t1, sum; \
+ bcc,pt %xcc, 32f; \
+ stx t1, [dest + off + 0x08]; \
+ add sum, 1, sum; \
+32: addcc sum, t2, sum; \
+ bcc,pt %xcc, 33f; \
+ stx t2, [dest + off + 0x10]; \
+ add sum, 1, sum; \
+33: addcc sum, t3, sum; \
+ bcc,pt %xcc, 34f; \
+ stx t3, [dest + off + 0x18]; \
+ add sum, 1, sum; \
+34: addcc sum, t4, sum; \
+ bcc,pt %xcc, 35f; \
+ stx t4, [dest + off + 0x20]; \
+ add sum, 1, sum; \
+35: addcc sum, t5, sum; \
+ bcc,pt %xcc, 36f; \
+ stx t5, [dest + off + 0x28]; \
+ add sum, 1, sum; \
+36: addcc sum, t6, sum; \
+ bcc,pt %xcc, 37f; \
+ stx t6, [dest + off + 0x30]; \
+ add sum, 1, sum; \
+37: addcc sum, t7, sum; \
+ bcc,pt %xcc, 38f; \
+ stx t7, [dest + off + 0x38]; \
+ add sum, 1, sum; \
+38:
- /* This aligned version executes typically in 8.5 superscalar cycles, this
- * is the best I can do. I say 8.5 because the final add will pair with
- * the next ldd in the main unrolled loop. Thus the pipe is always full.
- * If you change these macros (including order of instructions),
- * please check the fixup code below as well.
- */
-#define CSUMCOPY_BIGCHUNK_ALIGNED(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldda [src + off + 0x00] %asi, t0; \
- ldda [src + off + 0x08] %asi, t2; \
- addccc t0, sum, sum; \
- ldda [src + off + 0x10] %asi, t4; \
- addccc t1, sum, sum; \
- ldda [src + off + 0x18] %asi, t6; \
- addccc t2, sum, sum; \
- std t0, [dst + off + 0x00]; \
- addccc t3, sum, sum; \
- std t2, [dst + off + 0x08]; \
- addccc t4, sum, sum; \
- std t4, [dst + off + 0x10]; \
- addccc t5, sum, sum; \
- std t6, [dst + off + 0x18]; \
- addccc t6, sum, sum; \
- addccc t7, sum, sum;
+#define CSUMCOPY_EC_STUNALIGN(dest, off, sum, t0, t1, t2, t3, t4, t5, t6, t7) \
+ stw t0, [dest + off + 0x04]; \
+ addcc sum, t0, sum; \
+ srlx t0, 32, t0; \
+ bcc,pt %xcc, 41f; \
+ stw t0, [dest + off + 0x00]; \
+ add sum, 1, sum; \
+41: stw t1, [dest + off + 0x0c]; \
+ addcc sum, t1, sum; \
+ srlx t1, 32, t1; \
+ bcc,pt %xcc, 42f; \
+ stw t1, [dest + off + 0x08]; \
+ add sum, 1, sum; \
+42: stw t2, [dest + off + 0x14]; \
+ addcc sum, t2, sum; \
+ srlx t2, 32, t2; \
+ bcc,pt %xcc, 43f; \
+ stw t2, [dest + off + 0x10]; \
+ add sum, 1, sum; \
+43: stw t3, [dest + off + 0x1c]; \
+ addcc sum, t3, sum; \
+ srlx t3, 32, t3; \
+ bcc,pt %xcc, 44f; \
+ stw t3, [dest + off + 0x18]; \
+ add sum, 1, sum; \
+44: stw t4, [dest + off + 0x24]; \
+ addcc sum, t4, sum; \
+ srlx t4, 32, t4; \
+ bcc,pt %xcc, 45f; \
+ stw t4, [dest + off + 0x20]; \
+ add sum, 1, sum; \
+45: stw t5, [dest + off + 0x2c]; \
+ addcc sum, t5, sum; \
+ srlx t5, 32, t5; \
+ bcc,pt %xcc, 46f; \
+ stw t5, [dest + off + 0x28]; \
+ add sum, 1, sum; \
+46: stw t6, [dest + off + 0x34]; \
+ addcc sum, t6, sum; \
+ srlx t6, 32, t6; \
+ bcc,pt %xcc, 47f; \
+ stw t6, [dest + off + 0x30]; \
+ add sum, 1, sum; \
+47: stw t7, [dest + off + 0x3c]; \
+ addcc sum, t7, sum; \
+ srlx t7, 32, t7; \
+ bcc,pt %xcc, 48f; \
+ stw t7, [dest + off + 0x38]; \
+ add sum, 1, sum; \
+48:
- /* 12 superscalar cycles seems to be the limit for this case,
- * because of this we thus do all the ldd's together to get
- * Viking MXCC into streaming mode. Ho hum...
- */
-#define CSUMCOPY_BIGCHUNK(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldda [src + off + 0x00] %asi, t0; \
- ldda [src + off + 0x08] %asi, t2; \
- ldda [src + off + 0x10] %asi, t4; \
- ldda [src + off + 0x18] %asi, t6; \
- st t0, [dst + off + 0x00]; \
- addccc t0, sum, sum; \
- st t1, [dst + off + 0x04]; \
- addccc t1, sum, sum; \
- st t2, [dst + off + 0x08]; \
- addccc t2, sum, sum; \
- st t3, [dst + off + 0x0c]; \
- addccc t3, sum, sum; \
- st t4, [dst + off + 0x10]; \
- addccc t4, sum, sum; \
- st t5, [dst + off + 0x14]; \
- addccc t5, sum, sum; \
- st t6, [dst + off + 0x18]; \
- addccc t6, sum, sum; \
- st t7, [dst + off + 0x1c]; \
- addccc t7, sum, sum;
+#define CSUMCOPY_LASTCHUNK(src, dst, sum, off, t0, t1) \
+ ldxa [src - off - 0x08] %asi, t0; \
+ ldxa [src - off - 0x00] %asi, t1; \
+ nop; nop; \
+ addcc t0, sum, sum; \
+ stw t0, [dst - off - 0x04]; \
+ srlx t0, 32, t0; \
+ bcc,pt %xcc, 51f; \
+ stw t0, [dst - off - 0x08]; \
+ add sum, 1, sum; \
+51: addcc t1, sum, sum; \
+ stw t1, [dst - off + 0x04]; \
+ srlx t1, 32, t1; \
+ bcc,pt %xcc, 52f; \
+ stw t1, [dst - off - 0x00]; \
+ add sum, 1, sum; \
+52:
- /* Yuck, 6 superscalar cycles... */
-#define CSUMCOPY_LASTCHUNK(src, dst, sum, off, t0, t1, t2, t3) \
- ldda [src - off - 0x08] %asi, t0; \
- ldda [src - off - 0x00] %asi, t2; \
- addccc t0, sum, sum; \
- st t0, [dst - off - 0x08]; \
- addccc t1, sum, sum; \
- st t1, [dst - off - 0x04]; \
- addccc t2, sum, sum; \
- st t2, [dst - off - 0x00]; \
- addccc t3, sum, sum; \
- st t3, [dst - off + 0x04];
-
- /* Handle the end cruft code out of band for better cache patterns. */
cc_end_cruft:
- andcc %o3, 8, %g0 ! begin checks for that code
- be,pn %icc, 1f
- and %o3, 4, %g5
- EX(ldda [%o0 + 0x00] %asi, %g2, and %o3, 0xf,#)
- add %o1, 8, %o1
- addcc %g2, %g7, %g7
- add %o0, 8, %o0
- addccc %g3, %g7, %g7
- EX2(st %g2, [%o1 - 0x08],#)
- addc %g0, %g7, %g7
- EX2(st %g3, [%o1 - 0x04],#)
-1: brz,pt %g5, 1f
- andcc %o3, 3, %o3
- EX(lda [%o0 + 0x00] %asi, %g2, add %o3, 4,#)
- add %o1, 4, %o1
- addcc %g2, %g7, %g7
- EX2(st %g2, [%o1 - 0x04],#)
- addc %g0, %g7, %g7
- add %o0, 4, %o0
-1: brz,pn %o3, 1f
- addcc %o3, -1, %g0
- bne,pn %icc, 2f
- subcc %o3, 2, %o3
- ba,pt %xcc, 4f
- clr %o4
-2: EX(lduha [%o0 + 0x00] %asi, %o4, add %o3, 2,#)
- add %o0, 2, %o0
- EX2(sth %o4, [%o1 + 0x00],#)
- be,pn %icc, 6f
- add %o1, 2, %o1
- sll %o4, 16, %o4
-4: EX(lduba [%o0 + 0x00] %asi, %o5, add %g0, 1,#)
- EX2(stb %o5, [%o1 + 0x00],#)
- sll %o5, 8, %o5
- or %o5, %o4, %o4
-6: addcc %o4, %g7, %g7
-1: sllx %g4, 32, %g4
- addc %g0, %g7, %o0
- retl
- srl %o0, 0, %o0
+ andcc %o3, 8, %g0 ! IEU1 Group
+ be,pn %icc, 1f ! CTI
+ and %o3, 4, %g5 ! IEU0
+ ldxa [%o0 + 0x00] %asi, %g2 ! Load Group
+ add %o1, 8, %o1 ! IEU0
+ add %o0, 8, %o0 ! IEU1
+ addcc %g2, %g7, %g7 ! IEU1 Group + 2 bubbles
+ stw %g2, [%o1 - 0x04] ! Store
+ srlx %g2, 32, %g2 ! IEU0
+ bcc,pt %xcc, 1f ! CTI Group
+ stw %g2, [%o1 - 0x08] ! Store
+ add %g7, 1, %g7 ! IEU0
+1: brz,pt %g5, 1f ! CTI Group
+ clr %g2 ! IEU0
+ lduwa [%o0 + 0x00] %asi, %g2 ! Load
+ add %o1, 4, %o1 ! IEU0 Group
+ add %o0, 4, %o0 ! IEU1
+ stw %g2, [%o1 - 0x04] ! Store Group + 2 bubbles
+ sllx %g2, 32, %g2 ! IEU0
+1: andcc %o3, 2, %g0 ! IEU1
+ be,pn %icc, 1f ! CTI Group
+ clr %o4 ! IEU1
+ lduha [%o0 + 0x00] %asi, %o4 ! Load
+ add %o0, 2, %o0 ! IEU0 Group
+ add %o1, 2, %o1 ! IEU1
+ sth %o4, [%o1 - 0x2] ! Store Group + 2 bubbles
+ sll %o4, 16, %o4 ! IEU0
+1: andcc %o3, 1, %g0 ! IEU1
+ be,pn %icc, 1f ! CTI Group
+ clr %o5 ! IEU0
+ lduba [%o0 + 0x00] %asi, %o5 ! Load
+ stb %o5, [%o1 + 0x00] ! Store Group + 2 bubbles
+ sll %o5, 8, %o5 ! IEU0
+1: or %g2, %o4, %o4 ! IEU1
+ or %o5, %o4, %o4 ! IEU0 Group
+ addcc %o4, %g7, %g7 ! IEU1
+ bcc,pt %xcc, ccfold ! CTI
+ sethi %uhi(PAGE_OFFSET), %g4 ! IEU0 Group
+ b,pt %xcc, ccfold ! CTI
+ add %g7, 1, %g7 ! IEU1
- /* Sun, you just can't beat me, you just can't. Stop trying,
- * give up. I'm serious, I am going to kick the living shit
- * out of you, game over, lights out.
- */
- .align 8
- .globl __csum_partial_copy_sparc_generic
-__csum_partial_copy_sparc_generic:
- /* %o0=src, %o1=dest, %g1=len, %g7=sum */
- srl %g7, 0, %g7 ! you neve know...
- xor %o0, %o1, %o4 ! get changing bits
- srl %g1, 0, %g1 ! doof scheiss
- andcc %o4, 3, %g0 ! check for mismatched alignment
- bne,pn %icc, ccslow ! better this than unaligned/fixups
- andcc %o0, 7, %g0 ! need to align things?
- be,pt %icc, cc_dword_aligned ! yes, we check for short lengths there
- andn %g1, 0x7f, %g2 ! can we use unrolled loop?
- cmp %g1, 6
- bl,a,pn %icc, ccte
- andcc %g1, 0xf, %o3
- andcc %o0, 0x1, %g0
- bne,pn %icc, ccslow
- andcc %o0, 0x2, %g0
- be,pn %icc, 1f
- andcc %o0, 0x4, %g0
- EX(lduha [%o0 + 0x00] %asi, %g4, add %g1, 0,#)
- sub %g1, 2, %g1
- EX2(sth %g4, [%o1 + 0x00],#)
- add %o0, 2, %o0
- sll %g4, 16, %g4
- addcc %g4, %g7, %g7
- add %o1, 2, %o1
- srl %g7, 16, %g3
- addc %g0, %g3, %g4
- sll %g7, 16, %g7
- sll %g4, 16, %g3
- srl %g7, 16, %g7
- andcc %o0, 0x4, %g0
- or %g3, %g7, %g7
-1: be,pt %icc, 3f
- andn %g1, 0x7f, %g2
- EX(lda [%o0 + 0x00] %asi, %g4, add %g1, 0,#)
- sub %g1, 4, %g1
- EX2(st %g4, [%o1 + 0x00],#)
- add %o0, 4, %o0
- addcc %g4, %g7, %g7
- add %o1, 4, %o1
- andn %g1, 0x7f, %g2
- addc %g0, %g7, %g7
+cc_fixit:
+ bl,a,pn %icc, ccte ! CTI
+ andcc %g1, 0xf, %o3 ! IEU1 Group
+ andcc %o0, 1, %g0 ! IEU1 Group
+ bne,pn %icc, ccslow ! CTI
+ andcc %o0, 2, %g0 ! IEU1 Group
+ be,pn %icc, 1f ! CTI
+ andcc %o0, 0x4, %g0 ! IEU1 Group
+ lduha [%o0 + 0x00] %asi, %g4 ! Load
+ sub %g1, 2, %g1 ! IEU0
+ add %o0, 2, %o0 ! IEU0 Group
+ add %o1, 2, %o1 ! IEU1
+ sll %g4, 16, %g3 ! IEU0 Group + 1 bubble
+ addcc %g3, %g7, %g7 ! IEU1
+ bcc,pt %xcc, 0f ! CTI
+ srl %g7, 16, %g3 ! IEU0 Group
+ add %g3, 1, %g3 ! IEU0 4 clocks (mispredict)
+0: andcc %o0, 0x4, %g0 ! IEU1 Group
+ sth %g4, [%o1 - 0x2] ! Store
+ sll %g7, 16, %g7 ! IEU0
+ sll %g3, 16, %g3 ! IEU0 Group
+ srl %g7, 16, %g7 ! IEU0 Group
+ or %g3, %g7, %g7 ! IEU0 Group (regdep)
+1: be,pt %icc, cc_dword_aligned ! CTI
+ andn %g1, 0xff, %g2 ! IEU1
+ lduwa [%o0 + 0x00] %asi, %g4 ! Load Group
+ sub %g1, 4, %g1 ! IEU0
+ add %o0, 4, %o0 ! IEU1
+ add %o1, 4, %o1 ! IEU0 Group
+ addcc %g4, %g7, %g7 ! IEU1 Group + 1 bubble
+ stw %g4, [%o1 - 0x4] ! Store
+ bcc,pt %xcc, cc_dword_aligned ! CTI
+ andn %g1, 0xff, %g2 ! IEU0 Group
+ b,pt %xcc, cc_dword_aligned ! CTI 4 clocks (mispredict)
+ add %g7, 1, %g7 ! IEU0
+
+ .align 32
+ .globl __csum_partial_copy_sparc_generic, csum_partial_copy
+csum_partial_copy:
+__csum_partial_copy_sparc_generic: /* %o0=src, %o1=dest, %g1=len, %g7=sum */
+ xorcc %o0, %o1, %o4 ! IEU1 Group
+ srl %g7, 0, %g7 ! IEU0
+ andcc %o4, 3, %g0 ! IEU1 Group
+ srl %g1, 0, %g1 ! IEU0
+ bne,pn %icc, ccslow ! CTI
+ andcc %o0, 7, %g0 ! IEU1 Group
+ be,pt %icc, cc_dword_aligned ! CTI
+ andn %g1, 0xff, %g2 ! IEU0
+ b,pt %xcc, cc_fixit ! CTI Group
+ cmp %g1, 6 ! IEU1
cc_dword_aligned:
-3: brz,pn %g2, 3f ! nope, less than one loop remains
- andcc %o1, 4, %g0 ! dest aligned on 4 or 8 byte boundry?
- be,pn %icc, ccdbl + 4 ! 8 byte aligned, kick ass
-5: CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
- CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
- CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
- CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
-10: EXT(5b, 10b, 20f,#) ! note for exception handling
- sub %g1, 128, %g1 ! detract from length
- addc %g0, %g7, %g7 ! add in last carry bit
- andncc %g1, 0x7f, %g0 ! more to csum?
- add %o0, 128, %o0 ! advance src ptr
- bne,pt %icc, 5b ! we did not go negative, continue looping
- add %o1, 128, %o1 ! advance dest ptr
-3: andcc %g1, 0x70, %o2 ! can use table?
-ccmerge:be,pn %icc, ccte ! nope, go and check for end cruft
- andcc %g1, 0xf, %o3 ! get low bits of length (clears carry btw)
- srl %o2, 1, %o4 ! begin negative offset computation
-13: rd %pc, %o5 ! set up table ptr end
- add %o0, %o2, %o0 ! advance src ptr
- sub %o5, %o4, %o5 ! continue table calculation
- sll %o2, 1, %g2 ! constant multiplies are fun...
- sub %o5, %g2, %o5 ! some more adjustments
- jmpl %o5 + (12f-13b), %g0 ! jump into it, duff style, wheee...
- add %o1, %o2, %o1 ! advance dest ptr (carry is clear btw)
-cctbl: CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x68,%g2,%g3,%g4,%g5)
- CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x58,%g2,%g3,%g4,%g5)
- CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x48,%g2,%g3,%g4,%g5)
- CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x38,%g2,%g3,%g4,%g5)
- CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x28,%g2,%g3,%g4,%g5)
- CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x18,%g2,%g3,%g4,%g5)
- CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x08,%g2,%g3,%g4,%g5)
-12: EXT(cctbl, 12b, 22f,#) ! note for exception table handling
- addc %g0, %g7, %g7
- andcc %g1, 0xf, %o3 ! check for low bits set
-ccte: bne,pn %icc, cc_end_cruft ! something left, handle it out of band
- sethi %uhi(KERNBASE), %g4 ! restore gfp
- mov %g7, %o0 ! give em the computed checksum
- sllx %g4, 32, %g4 ! finish gfp restoration
- retl ! return
- srl %o0, 0, %o0
-ccdbl: CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
- CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
- CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
- CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
-11: EXT(ccdbl, 11b, 21f,#) ! note for exception table handling
- sub %g1, 128, %g1 ! detract from length
- addc %g0, %g7, %g7 ! add in last carry bit
- andncc %g1, 0x7f, %g0 ! more to csum?
- add %o0, 128, %o0 ! advance src ptr
- bne,pt %icc, ccdbl ! we did not go negative, continue looping
- add %o1, 128, %o1 ! advance dest ptr
- ba,pt %xcc, ccmerge ! finish it off, above
- andcc %g1, 0x70, %o2 ! can use table? (clears carry btw)
+ brz,pn %g2, 3f ! CTI Group
+ andcc %o1, 4, %g0 ! IEU1 Group (brz uses IEU1)
+ be,pn %icc, ccdbl + 4 ! CTI
+5: CSUMCOPY_ECACHE_LOAD( %o0, 0x00, %o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
+ CSUMCOPY_EC_STUNALIGN_LDNXT(%o0,%o1,0x40,%g7,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
+ CSUMCOPY_EC_STUNALIGN_LDNXT(%o0,%o1,0x80,%g7,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
+ CSUMCOPY_EC_STUNALIGN_LDNXT(%o0,%o1,0xc0,%g7,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
+ CSUMCOPY_EC_STUNALIGN( %o1,0xc0,%g7,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
+10:
+ sub %g1, 256, %g1 ! IEU0 Group
+ add %o0, 256, %o0 ! IEU1
+ andncc %g1, 0xff, %g0 ! IEU1 Group
+ bne,pt %icc, 5b ! CTI
+ add %o1, 256, %o1 ! IEU0
+3: andcc %g1, 0xf0, %o2 ! IEU1 Group
+ccmerge:be,pn %icc, ccte ! CTI
+ andcc %g1, 0xf, %o3 ! IEU1 Group
+ sll %o2, 2, %o4 ! IEU0
+13: rd %pc, %o5 ! LSU Group + 4 clocks
+ add %o0, %o2, %o0 ! IEU0 Group
+ sub %o5, %o4, %o5 ! IEU1 Group
+ jmpl %o5 + (12f - 13b), %g0 ! CTI Group brk forced
+ add %o1, %o2, %o1 ! IEU0 Group
+cctbl: CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0xe8,%g2,%g3)
+ CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0xd8,%g2,%g3)
+ CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0xc8,%g2,%g3)
+ CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0xb8,%g2,%g3)
+ CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0xa8,%g2,%g3)
+ CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x98,%g2,%g3)
+ CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x88,%g2,%g3)
+ CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x78,%g2,%g3)
+ CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x68,%g2,%g3)
+ CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x58,%g2,%g3)
+ CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x48,%g2,%g3)
+ CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x38,%g2,%g3)
+ CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x28,%g2,%g3)
+ CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x18,%g2,%g3)
+ CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x08,%g2,%g3)
+12:
+ andcc %g1, 0xf, %o3 ! IEU1 Group
+ccte: bne,pn %icc, cc_end_cruft ! CTI
+ sethi %uhi(PAGE_OFFSET), %g4 ! IEU0
+ccfold: sllx %g7, 32, %o0 ! IEU0 Group
+ addcc %g7, %o0, %o0 ! IEU1 Group (regdep)
+ srlx %o0, 32, %o0 ! IEU0 Group (regdep)
+ bcs,a,pn %xcc, 1f ! CTI
+ add %o0, 1, %o0 ! IEU1 4 clocks (mispredict)
+1: retl ! CTI Group brk forced
+ sllx %g4, 32,%g4 ! IEU0 Group
+ccdbl: CSUMCOPY_ECACHE_LOAD( %o0, 0x00, %o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
+ CSUMCOPY_EC_STALIGNED_LDNXT(%o0,%o1,0x40,%g7,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
+ CSUMCOPY_EC_STALIGNED_LDNXT(%o0,%o1,0x80,%g7,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
+ CSUMCOPY_EC_STALIGNED_LDNXT(%o0,%o1,0xc0,%g7,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
+ CSUMCOPY_EC_STALIGNED( %o1,0xc0,%g7,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
+11:
+ sub %g1, 256, %g1 ! IEU0 Group
+ add %o0, 256, %o0 ! IEU1
+ andncc %g1, 0xff, %g0 ! IEU1 Group
+ bne,pt %icc, ccdbl ! CTI
+ add %o1, 256, %o1 ! IEU0
+ b,pt %xcc, ccmerge ! CTI Group
+ andcc %g1, 0xf0, %o2 ! IEU1
ccslow: mov 0, %g5
brlez,pn %g1, 4f
@@ -401,9 +416,9 @@ ccslow: mov 0, %g5
be,a,pt %icc, 1f
srl %g1, 1, %o3
sub %g1, 1, %g1
- EX(lduba [%o0] %asi, %g5, add %g1, 1,#)
+ lduba [%o0] %asi, %g5
add %o0, 1, %o0
- EX2(stb %g5, [%o1],#)
+ stb %g5, [%o1]
srl %g1, 1, %o3
add %o1, 1, %o1
1: brz,a,pn %o3, 3f
@@ -411,33 +426,33 @@ ccslow: mov 0, %g5
andcc %o0, 2, %g0
be,a,pt %icc, 1f
srl %o3, 1, %o3
- EX(lduha [%o0] %asi, %o4, add %g1, 0,#)
+ lduha [%o0] %asi, %o4
sub %g1, 2, %g1
srl %o4, 8, %g2
sub %o3, 1, %o3
- EX2(stb %g2, [%o1],#)
+ stb %g2, [%o1]
add %o4, %g5, %g5
- EX2(stb %o4, [%o1 + 1],#)
+ stb %o4, [%o1 + 1]
add %o0, 2, %o0
srl %o3, 1, %o3
add %o1, 2, %o1
1: brz,a,pn %o3, 2f
andcc %g1, 2, %g0
- EX3(lda [%o0] %asi, %o4,#)
+ lda [%o0] %asi, %o4
5: srl %o4, 24, %g2
srl %o4, 16, %g3
- EX2(stb %g2, [%o1],#)
+ stb %g2, [%o1]
srl %o4, 8, %g2
- EX2(stb %g3, [%o1 + 1],#)
+ stb %g3, [%o1 + 1]
add %o0, 4, %o0
- EX2(stb %g2, [%o1 + 2],#)
+ stb %g2, [%o1 + 2]
addcc %o4, %g5, %g5
- EX2(stb %o4, [%o1 + 3],#)
+ stb %o4, [%o1 + 3]
addc %g5, %g0, %g5 ! I am now to lazy to optimize this (question is if it
add %o1, 4, %o1 ! is worthy). Maybe some day - with the sll/srl
subcc %o3, 1, %o3 ! tricks
bne,a,pt %icc, 5b
- EX3(lda [%o0] %asi, %o4,#)
+ lda [%o0] %asi, %o4
sll %g5, 16, %g2
srl %g5, 16, %g5
srl %g2, 16, %g2
@@ -445,19 +460,19 @@ ccslow: mov 0, %g5
add %g2, %g5, %g5
2: be,a,pt %icc, 3f
andcc %g1, 1, %g0
- EX(lduha [%o0] %asi, %o4, and %g1, 3,#)
+ lduha [%o0] %asi, %o4
andcc %g1, 1, %g0
srl %o4, 8, %g2
add %o0, 2, %o0
- EX2(stb %g2, [%o1],#)
+ stb %g2, [%o1]
add %g5, %o4, %g5
- EX2(stb %o4, [%o1 + 1],#)
+ stb %o4, [%o1 + 1]
add %o1, 2, %o1
3: be,a,pt %icc, 1f
sll %g5, 16, %o4
- EX(lduba [%o0] %asi, %g2, add %g0, 1,#)
+ lduba [%o0] %asi, %g2
sll %g2, 8, %o4
- EX2(stb %g2, [%o1],#)
+ stb %g2, [%o1]
add %g5, %o4, %g5
sll %g5, 16, %o4
1: addcc %o4, %g5, %g5
@@ -474,103 +489,3 @@ ccslow: mov 0, %g5
retl
srl %o0, 0, %o0
__csum_partial_copy_end:
-
- .section .fixup,#alloc,#execinstr
- .align 4
-/* We do these strange calculations for the csum_*_from_user case only, ie.
- * we only bother with faults on loads... */
-
-/* o2 = ((g2%20)&3)*8
- * o3 = g1 - (g2/20)*32 - o2 */
-20:
- cmp %g2, 20
- blu,a,pn %icc, 1f
- and %g2, 3, %o2
- sub %g1, 32, %g1
- ba,pt %xcc, 20b
- sub %g2, 20, %g2
-1:
- sll %o2, 3, %o2
- ba,pt %xcc, 31f
- sub %g1, %o2, %o3
-
-/* o2 = (!(g2 & 15) ? 0 : (((g2 & 15) + 1) & ~1)*8)
- * o3 = g1 - (g2/16)*32 - o2 */
-21:
- andcc %g2, 15, %o3
- srl %g2, 4, %g2
- be,a,pn %icc, 1f
- clr %o2
- add %o3, 1, %o3
- and %o3, 14, %o3
- sll %o3, 3, %o2
-1:
- sll %g2, 5, %g2
- sub %g1, %g2, %o3
- ba,pt %xcc, 31f
- sub %o3, %o2, %o3
-
-/* o0 += (g2/10)*16 - 0x70
- * 01 += (g2/10)*16 - 0x70
- * o2 = (g2 % 10) ? 8 : 0
- * o3 += 0x70 - (g2/10)*16 - o2 */
-22:
- cmp %g2, 10
- blu,a,pt %xcc, 1f
- sub %o0, 0x70, %o0
- add %o0, 16, %o0
- add %o1, 16, %o1
- sub %o3, 16, %o3
- ba,pt %xcc, 22b
- sub %g2, 10, %g2
-1:
- sub %o1, 0x70, %o1
- add %o3, 0x70, %o3
- clr %o2
- movrnz %g2, 8, %o2
- ba,pt %xcc, 31f
- sub %o3, %o2, %o3
-96:
- and %g1, 3, %g1
- sll %o3, 2, %o3
- add %g1, %o3, %o3
-30:
-/* %o1 is dst
- * %o3 is # bytes to zero out
- * %o4 is faulting address
- * %o5 is %pc where fault occured */
- clr %o2
-31:
-/* %o0 is src
- * %o1 is dst
- * %o2 is # of bytes to copy from src to dst
- * %o3 is # bytes to zero out
- * %o4 is faulting address
- * %o5 is %pc where fault occured */
- save %sp, -136, %sp
- mov %i5, %o0
- mov %i7, %o1
- mov %i4, %o2
- call lookup_fault
- mov %g7, %i4
- cmp %o0, 2
- bne,pn %icc, 1f
- add %g0, -EFAULT, %i5
- brz,pn %i2, 2f
- mov %i0, %o1
- mov %i1, %o0
- call __copy_from_user
- mov %i2, %o2
- brnz,a,pn %o0, 2f
- add %i3, %i2, %i3
- add %i1, %i2, %i1
-2:
- mov %i1, %o0
- wr %g0, ASI_S, %asi
- call __bzero_noasi
- mov %i3, %o1
-1:
- ldx [%sp + STACK_BIAS + 264], %o2 ! struct_ptr of parent
- st %i5, [%o2]
- ret
- restore
diff --git a/arch/sparc64/lib/copy_from_user.S b/arch/sparc64/lib/copy_from_user.S
deleted file mode 100644
index 196435aed..000000000
--- a/arch/sparc64/lib/copy_from_user.S
+++ /dev/null
@@ -1,469 +0,0 @@
-/* copy_user.S: Sparc optimized copy_from_user code.
- *
- * Copyright(C) 1995 Linus Torvalds
- * Copyright(C) 1996 David S. Miller
- * Copyright(C) 1996 Eddie C. Dost
- * Copyright(C) 1996,1997 Jakub Jelinek
- *
- * derived from:
- * e-mail between David and Eddie.
- *
- * Returns 0 if successful, otherwise count of bytes not copied yet
- *
- * FIXME: This code should be optimized for sparc64... -jj
- */
-
-#include <asm/ptrace.h>
-#include <asm/asi.h>
-#include <asm/head.h>
-
-#define PRE_RETL sethi %uhi(KERNBASE), %g4; sllx %g4, 32, %g4;
-
-#define EX(x,y,a,b,z) \
-98: x,y; \
- .section .fixup,z##alloc,z##execinstr; \
- .align 4; \
-99: PRE_RETL \
- retl; \
- a, b, %o0; \
- .section __ex_table,z##alloc; \
- .align 8; \
- .xword 98b, 99b; \
- .text; \
- .align 4
-
-#define EX2(x,y,c,d,e,a,b,z) \
-98: x,y; \
- .section .fixup,z##alloc,z##execinstr; \
- .align 4; \
-99: c, d, e; \
- PRE_RETL \
- retl; \
- a, b, %o0; \
- .section __ex_table,z##alloc; \
- .align 8; \
- .xword 98b, 99b; \
- .text; \
- .align 4
-
-#define EXO2(x,y,z) \
-98: x,##y; \
- .section __ex_table,z##alloc; \
- .align 8; \
- .xword 98b, 97f; \
- .text; \
- .align 4
-
-#define EXT(start,end,handler,z) \
- .section __ex_table,z##alloc; \
- .align 8; \
- .xword start, 0, end, handler; \
- .text; \
- .align 4
-
-/* Please do not change following macros unless you change logic used
- * in .fixup at the end of this file as well
- */
-
-/* Both these macros have to start with exactly the same insn */
-#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldda [%src + offset + 0x00] %asi, %t0; \
- ldda [%src + offset + 0x08] %asi, %t2; \
- ldda [%src + offset + 0x10] %asi, %t4; \
- ldda [%src + offset + 0x18] %asi, %t6; \
- st %t0, [%dst + offset + 0x00]; \
- st %t1, [%dst + offset + 0x04]; \
- st %t2, [%dst + offset + 0x08]; \
- st %t3, [%dst + offset + 0x0c]; \
- st %t4, [%dst + offset + 0x10]; \
- st %t5, [%dst + offset + 0x14]; \
- st %t6, [%dst + offset + 0x18]; \
- st %t7, [%dst + offset + 0x1c];
-
-#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldda [%src + offset + 0x00] %asi, %t0; \
- ldda [%src + offset + 0x08] %asi, %t2; \
- ldda [%src + offset + 0x10] %asi, %t4; \
- ldda [%src + offset + 0x18] %asi, %t6; \
- std %t0, [%dst + offset + 0x00]; \
- std %t2, [%dst + offset + 0x08]; \
- std %t4, [%dst + offset + 0x10]; \
- std %t6, [%dst + offset + 0x18];
-
-#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
- ldda [%src - offset - 0x10] %asi, %t0; \
- ldda [%src - offset - 0x08] %asi, %t2; \
- st %t0, [%dst - offset - 0x10]; \
- st %t1, [%dst - offset - 0x0c]; \
- st %t2, [%dst - offset - 0x08]; \
- st %t3, [%dst - offset - 0x04];
-
-#define MOVE_HALFCHUNK(src, dst, offset, t0, t1, t2, t3) \
- lduha [%src + offset + 0x00] %asi, %t0; \
- lduha [%src + offset + 0x02] %asi, %t1; \
- lduha [%src + offset + 0x04] %asi, %t2; \
- lduha [%src + offset + 0x06] %asi, %t3; \
- sth %t0, [%dst + offset + 0x00]; \
- sth %t1, [%dst + offset + 0x02]; \
- sth %t2, [%dst + offset + 0x04]; \
- sth %t3, [%dst + offset + 0x06];
-
-#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
- lduba [%src - offset - 0x02] %asi, %t0; \
- lduba [%src - offset - 0x01] %asi, %t1; \
- stb %t0, [%dst - offset - 0x02]; \
- stb %t1, [%dst - offset - 0x01];
-
- .text
- .align 4
-
- .globl __copy_from_user
-dword_align:
- andcc %o1, 1, %g0
- be 4f
- andcc %o1, 2, %g0
-
- EXO2(lduba [%o1] %asi, %g2,#)
- add %o1, 1, %o1
- stb %g2, [%o0]
- sub %o2, 1, %o2
- bne 3f
- add %o0, 1, %o0
-
- EXO2(lduha [%o1] %asi, %g2,#)
- add %o1, 2, %o1
- sth %g2, [%o0]
- sub %o2, 2, %o2
- ba,pt %xcc, 3f
- add %o0, 2, %o0
-4:
- EXO2(lduha [%o1] %asi, %g2,#)
- add %o1, 2, %o1
- sth %g2, [%o0]
- sub %o2, 2, %o2
- ba,pt %xcc, 3f
- add %o0, 2, %o0
-
-__copy_from_user: /* %o0=dst %o1=src %o2=len */
- wr %g0, ASI_S, %asi
- xor %o0, %o1, %o4
-1:
- andcc %o4, 3, %o5
-2:
- bne,pn %icc, cannot_optimize
- cmp %o2, 15
-
- bleu,pn %xcc, short_aligned_end
- andcc %o1, 3, %g0
-
- bne,pn %icc, dword_align
-3:
- andcc %o1, 4, %g0
-
- be,pt %icc, 2f
- mov %o2, %g1
-
- EXO2(lda [%o1] %asi, %o4,#)
- sub %g1, 4, %g1
- st %o4, [%o0]
- add %o1, 4, %o1
- add %o0, 4, %o0
-2:
- andcc %g1, 0xffffffffffffff80, %g7
- be,pn %xcc, 3f
- andcc %o0, 4, %g0
-
- be,pn %icc, ldd_std + 4
-5:
- MOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
- MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
- MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
- MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
-80:
- EXT(5b, 80b, 50f,#)
- subcc %g7, 128, %g7
- add %o1, 128, %o1
- bne,pt %xcc, 5b
- add %o0, 128, %o0
-3:
- andcc %g1, 0x70, %g7
- be,pn %icc, copy_user_table_end
- andcc %g1, 8, %g0
-100:
- rd %pc, %o5
- srl %g7, 1, %o4
- add %g7, %o4, %o4
- add %o1, %g7, %o1
- sub %o5, %o4, %o5
- jmpl %o5 + (copy_user_table_end - 100b), %g0
- add %o0, %g7, %o0
-
-copy_user_table:
- MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
- MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
- MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
- MOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
- MOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
- MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
- MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
-copy_user_table_end:
- EXT(copy_user_table, copy_user_table_end, 51f,#)
- be,pt %icc, copy_user_last7
- andcc %g1, 4, %g0
-
- EX(ldda [%o1] %asi, %g2, and %g1, 0xf,#)
- add %o0, 8, %o0
- add %o1, 8, %o1
- st %g2, [%o0 - 0x08]
- st %g3, [%o0 - 0x04]
-copy_user_last7:
- be,pn %icc, 1f
- andcc %g1, 2, %g0
-
- EX(lda [%o1] %asi, %g2, and %g1, 7,#)
- add %o1, 4, %o1
- st %g2, [%o0]
- add %o0, 4, %o0
-1:
- be,pn %icc, 1f
- andcc %g1, 1, %g0
-
- EX(lduha [%o1] %asi, %g2, and %g1, 3,#)
- add %o1, 2, %o1
- sth %g2, [%o0]
- add %o0, 2, %o0
-1:
- be,pn %icc, 1f
- nop
-
- EX(lduba [%o1] %asi, %g2, add %g0, 1,#)
- stb %g2, [%o0]
-1:
- PRE_RETL
- retl
- clr %o0
-
-ldd_std:
- MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
- MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
- MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
- MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
-81:
- EXT(ldd_std, 81b, 52f,#)
- subcc %g7, 128, %g7
- add %o1, 128, %o1
- bne,pt %xcc, ldd_std
- add %o0, 128, %o0
-
- andcc %g1, 0x70, %g7
- be,pn %icc, copy_user_table_end
- andcc %g1, 8, %g0
-101:
- rd %pc, %o5
- srl %g7, 1, %o4
- add %g7, %o4, %o4
- add %o1, %g7, %o1
- sub %o5, %o4, %o5
- jmpl %o5 + (copy_user_table_end - 101b), %g0
- add %o0, %g7, %o0
-
-cannot_optimize:
- bleu short_end
- cmp %o5, 2
-
- bne byte_chunk
- and %o2, 0xfffffffffffffff0, %o3
-
- andcc %o1, 1, %g0
- be 10f
- nop
-
- EXO2(lduba [%o1] %asi, %g2,#)
- add %o1, 1, %o1
- stb %g2, [%o0]
- sub %o2, 1, %o2
- andcc %o2, 0xfffffffffffffff0, %o3
- be short_end
- add %o0, 1, %o0
-10:
- MOVE_HALFCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
- MOVE_HALFCHUNK(o1, o0, 0x08, g2, g3, g4, g5)
-82:
- EXT(10b, 82b, 53f,#)
- subcc %o3, 0x10, %o3
- add %o1, 0x10, %o1
- bne 10b
- add %o0, 0x10, %o0
- ba,pt %xcc, 2f
- and %o2, 0xe, %o3
-
-byte_chunk:
- MOVE_SHORTCHUNK(o1, o0, -0x02, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, -0x04, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, -0x06, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, -0x08, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, -0x0a, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, -0x0c, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, -0x0e, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, -0x10, g2, g3)
-83:
- EXT(byte_chunk, 83b, 54f,#)
- subcc %o3, 0x10, %o3
- add %o1, 0x10, %o1
- bne,pt %xcc, byte_chunk
- add %o0, 0x10, %o0
-
-short_end:
- and %o2, 0xe, %o3
-2:
- rd %pc, %o5
- sll %o3, 3, %o4
- add %o0, %o3, %o0
- sub %o5, %o4, %o5
- add %o1, %o3, %o1
- jmpl %o5 + (short_table_end - 2b), %g0
- andcc %o2, 1, %g0
-84:
- MOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
-short_table_end:
- EXT(84b, short_table_end, 55f,#)
- be 1f
- nop
- EX(lduba [%o1] %asi, %g2, add %g0, 1,#)
- stb %g2, [%o0]
-1:
- PRE_RETL
- retl
- clr %o0
-
-short_aligned_end:
- bne short_end
- andcc %o2, 8, %g0
-
- be 1f
- andcc %o2, 4, %g0
-
- EXO2(lda [%o1 + 0x00] %asi, %g2,#)
- EX(lda [%o1 + 0x04] %asi, %g3, sub %o2, 4,#)
- add %o1, 8, %o1
- st %g2, [%o0 + 0x00]
- st %g3, [%o0 + 0x04]
- add %o0, 8, %o0
-1:
- ba,pt %xcc, copy_user_last7
- mov %o2, %g1
-
- .section .fixup,#alloc,#execinstr
- .align 4
-97:
- PRE_RETL
- retl
- mov %o2, %o0
-/* exception routine sets %g2 to (broken_insn - first_insn)>>2 */
-50:
-/* This magic counts how many bytes are left when crash in MOVE_BIGCHUNK
- * happens. This is derived from the amount ldd reads, st stores, etc.
- * x = g2 % 12;
- * o0 = g1 + g7 - ((g2 / 12) * 32 + (x < 4) ? x * 8 : (x - 4) * 4)
- */
- cmp %g2, 12
- bcs 1f
- cmp %g2, 24
- bcs 2f
- cmp %g2, 36
- bcs 3f
- nop
- sub %g2, 12, %g2
- sub %g7, 32, %g7
-3:
- sub %g2, 12, %g2
- sub %g7, 32, %g7
-2:
- sub %g2, 12, %g2
- sub %g7, 32, %g7
-1:
- cmp %g2, 4
- bcs,a 1f
- sll %g2, 3, %g2
- sub %g2, 4, %g2
- sll %g2, 2, %g2
-1:
- and %g1, 0x7f, %o0
- add %o0, %g7, %o0
- PRE_RETL
- retl
- sub %o0, %g2, %o0
-51:
-/* i = 41 - g2; j = i % 6;
- * o0 = (g1 & 15) + (i / 6) * 16 + (j < 4) ? (j + 1) * 4 : (j - 3) * 8;
- */
- neg %g2
- and %g1, 0xf, %g1
- add %g2, 41, %g2
-1:
- cmp %g2, 6
- bcs,a 2f
- cmp %g2, 4
- add %g1, 16, %g1
- b 1b
- sub %g2, 6, %g2
-2:
- bcs,a 3f
- inc %g2
- sub %g2, 3, %g2
- b 2f
- sll %g2, 3, %g2
-3:
- sll %g2, 2, %g2
-2:
- PRE_RETL
- retl
- add %g1, %g2, %o0
-52:
-/* o0 = g1 + g7 - (g2 / 8) * 32 + (x & 3) * 8 */
- and %g2, 0xfffffffffffffff8, %g4
- and %g2, 3, %g2
- sll %g4, 2, %g4
- sll %g2, 3, %g2
- add %g2, %g4, %g2
- b,a 1b
-53:
-/* o0 = o3 + (o2 & 15) - (g2 & 8) - (g2 & 3) * 2 */
- and %g2, 3, %g4
- and %g2, 0xfffffffffffffff8, %g2
- sll %g4, 1, %g4
- add %g2, %g4, %g2
- and %o2, 0xf, %o0
- add %o0, %o3, %o0
- PRE_RETL
- retl
- sub %o0, %g2, %o0
-54:
-/* o0 = o3 + (o2 & 15) - (g2 / 4) * 2 - (g2 & 1) */
- srl %g2, 2, %o4
- and %g2, 1, %o1
- sll %o4, 1, %o4
- and %o2, 0xf, %o2
- sub %o3, %o1, %o3
- sub %o2, %o4, %o2
- PRE_RETL
- retl
- add %o2, %o3, %o0
-55:
-/* o0 = (o2 & 1) + (27 - g2)/4 * 2 + ((27 - g2) & 1) */
- neg %g2
- and %o2, 1, %o2
- add %g2, 27, %g2
- srl %g2, 2, %o1
- and %g2, 1, %g2
- sll %o1, 1, %o1
- add %o2, %g2, %o0
- PRE_RETL
- retl
- add %o0, %o1, %o0
diff --git a/arch/sparc64/lib/copy_to_user.S b/arch/sparc64/lib/copy_to_user.S
deleted file mode 100644
index cc6db141f..000000000
--- a/arch/sparc64/lib/copy_to_user.S
+++ /dev/null
@@ -1,469 +0,0 @@
-/* copy_user.S: Sparc optimized copy_to_user code.
- *
- * Copyright(C) 1995 Linus Torvalds
- * Copyright(C) 1996 David S. Miller
- * Copyright(C) 1996 Eddie C. Dost
- * Copyright(C) 1996,1997 Jakub Jelinek
- *
- * derived from:
- * e-mail between David and Eddie.
- *
- * Returns 0 if successful, otherwise count of bytes not copied yet
- *
- * FIXME: This code should be optimized for sparc64... -jj
- */
-
-#include <asm/ptrace.h>
-#include <asm/head.h>
-#include <asm/asi.h>
-
-#define PRE_RETL sethi %uhi(KERNBASE), %g4; sllx %g4, 32, %g4;
-
-#define EX(x,y,a,b,z) \
-98: x,y; \
- .section .fixup,z##alloc,z##execinstr; \
- .align 4; \
-99: PRE_RETL \
- retl; \
- a, b, %o0; \
- .section __ex_table,z##alloc; \
- .align 8; \
- .xword 98b, 99b; \
- .text; \
- .align 4
-
-#define EX2(x,y,c,d,e,a,b,z) \
-98: x,y; \
- .section .fixup,z##alloc,z##execinstr; \
- .align 4; \
-99: c, d, e; \
- PRE_RETL \
- retl; \
- a, b, %o0; \
- .section __ex_table,z##alloc; \
- .align 8; \
- .xword 98b, 99b; \
- .text; \
- .align 4
-
-#define EXO2(x,y,z) \
-98: x,##y; \
- .section __ex_table,z##alloc; \
- .align 8; \
- .xword 98b, 97f; \
- .text; \
- .align 4
-
-#define EXT(start,end,handler,z) \
- .section __ex_table,z##alloc; \
- .align 8; \
- .xword start, 0, end, handler; \
- .text; \
- .align 4
-
-/* Please do not change following macros unless you change logic used
- * in .fixup at the end of this file as well
- */
-
-/* Both these macros have to start with exactly the same insn */
-#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldd [%src + offset + 0x00], %t0; \
- ldd [%src + offset + 0x08], %t2; \
- ldd [%src + offset + 0x10], %t4; \
- ldd [%src + offset + 0x18], %t6; \
- sta %t0, [%dst + offset + 0x00] %asi; \
- sta %t1, [%dst + offset + 0x04] %asi; \
- sta %t2, [%dst + offset + 0x08] %asi; \
- sta %t3, [%dst + offset + 0x0c] %asi; \
- sta %t4, [%dst + offset + 0x10] %asi; \
- sta %t5, [%dst + offset + 0x14] %asi; \
- sta %t6, [%dst + offset + 0x18] %asi; \
- sta %t7, [%dst + offset + 0x1c] %asi;
-
-#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldd [%src + offset + 0x00], %t0; \
- ldd [%src + offset + 0x08], %t2; \
- ldd [%src + offset + 0x10], %t4; \
- ldd [%src + offset + 0x18], %t6; \
- stda %t0, [%dst + offset + 0x00] %asi; \
- stda %t2, [%dst + offset + 0x08] %asi; \
- stda %t4, [%dst + offset + 0x10] %asi; \
- stda %t6, [%dst + offset + 0x18] %asi;
-
-#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
- ldd [%src - offset - 0x10], %t0; \
- ldd [%src - offset - 0x08], %t2; \
- sta %t0, [%dst - offset - 0x10] %asi; \
- sta %t1, [%dst - offset - 0x0c] %asi; \
- sta %t2, [%dst - offset - 0x08] %asi; \
- sta %t3, [%dst - offset - 0x04] %asi;
-
-#define MOVE_HALFCHUNK(src, dst, offset, t0, t1, t2, t3) \
- lduh [%src + offset + 0x00], %t0; \
- lduh [%src + offset + 0x02], %t1; \
- lduh [%src + offset + 0x04], %t2; \
- lduh [%src + offset + 0x06], %t3; \
- stha %t0, [%dst + offset + 0x00] %asi; \
- stha %t1, [%dst + offset + 0x02] %asi; \
- stha %t2, [%dst + offset + 0x04] %asi; \
- stha %t3, [%dst + offset + 0x06] %asi;
-
-#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
- ldub [%src - offset - 0x02], %t0; \
- ldub [%src - offset - 0x01], %t1; \
- stba %t0, [%dst - offset - 0x02] %asi; \
- stba %t1, [%dst - offset - 0x01] %asi;
-
- .text
- .align 4
-
- .globl __copy_to_user
-dword_align:
- andcc %o1, 1, %g0
- be 4f
- andcc %o1, 2, %g0
-
- ldub [%o1], %g2
- add %o1, 1, %o1
- EXO2(stba %g2, [%o0] %asi,#)
- sub %o2, 1, %o2
- bne 3f
- add %o0, 1, %o0
-
- lduh [%o1], %g2
- add %o1, 2, %o1
- EXO2(stha %g2, [%o0] %asi,#)
- sub %o2, 2, %o2
- ba,pt %xcc, 3f
- add %o0, 2, %o0
-4:
- lduh [%o1], %g2
- add %o1, 2, %o1
- EXO2(stha %g2, [%o0] %asi,#)
- sub %o2, 2, %o2
- ba,pt %xcc, 3f
- add %o0, 2, %o0
-
-__copy_to_user: /* %o0=dst %o1=src %o2=len */
- wr %g0, ASI_S, %asi
- xor %o0, %o1, %o4
-1:
- andcc %o4, 3, %o5
-2:
- bne,pn %icc, cannot_optimize
- cmp %o2, 15
-
- bleu,pn %xcc, short_aligned_end
- andcc %o1, 3, %g0
-
- bne,pn %icc, dword_align
-3:
- andcc %o1, 4, %g0
-
- be,pt %icc, 2f
- mov %o2, %g1
-
- ld [%o1], %o4
- sub %g1, 4, %g1
- EXO2(sta %o4, [%o0] %asi,#)
- add %o1, 4, %o1
- add %o0, 4, %o0
-2:
- andcc %g1, 0xffffffffffffff80, %g7
- be,pn %xcc, 3f
- andcc %o0, 4, %g0
-
- be,pn %icc, ldd_std + 4
-5:
- MOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
- MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
- MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
- MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
-80:
- EXT(5b, 80b, 50f,#)
- subcc %g7, 128, %g7
- add %o1, 128, %o1
- bne,pt %xcc, 5b
- add %o0, 128, %o0
-3:
- andcc %g1, 0x70, %g7
- be,pn %icc, copy_user_table_end
- andcc %g1, 8, %g0
-100:
- rd %pc, %o5
- srl %g7, 1, %o4
- add %g7, %o4, %o4
- add %o1, %g7, %o1
- sub %o5, %o4, %o5
- jmpl %o5 + (copy_user_table_end - 100b), %g0
- add %o0, %g7, %o0
-
-copy_user_table:
- MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
- MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
- MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
- MOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
- MOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
- MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
- MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
-copy_user_table_end:
- EXT(copy_user_table, copy_user_table_end, 51f,#)
- be,pt %icc, copy_user_last7
- andcc %g1, 4, %g0
-
- ldd [%o1], %g2
- add %o0, 8, %o0
- add %o1, 8, %o1
- EX(sta %g2, [%o0 - 0x08] %asi, and %g1, 0xf,#)
- EX2(sta %g3, [%o0 - 0x04] %asi, and %g1, 0xf, %g1, sub %g1, 4,#)
-copy_user_last7:
- be,pn %icc, 1f
- andcc %g1, 2, %g0
-
- ld [%o1], %g2
- add %o1, 4, %o1
- EX(sta %g2, [%o0] %asi, and %g1, 7,#)
- add %o0, 4, %o0
-1:
- be,pn %icc, 1f
- andcc %g1, 1, %g0
-
- lduh [%o1], %g2
- add %o1, 2, %o1
- EX(stha %g2, [%o0] %asi, and %g1, 3,#)
- add %o0, 2, %o0
-1:
- be,pn %icc, 1f
- nop
-
- ldub [%o1], %g2
- EX(stba %g2, [%o0] %asi, add %g0, 1,#)
-1:
- PRE_RETL
- retl
- clr %o0
-
-ldd_std:
- MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
- MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
- MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
- MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
-81:
- EXT(ldd_std, 81b, 52f,#)
- subcc %g7, 128, %g7
- add %o1, 128, %o1
- bne,pt %xcc, ldd_std
- add %o0, 128, %o0
-
- andcc %g1, 0x70, %g7
- be,pn %icc, copy_user_table_end
- andcc %g1, 8, %g0
-101:
- rd %pc, %o5
- srl %g7, 1, %o4
- add %g7, %o4, %o4
- add %o1, %g7, %o1
- sub %o5, %o4, %o5
- jmpl %o5 + (copy_user_table_end - 101b), %g0
- add %o0, %g7, %o0
-
-cannot_optimize:
- bleu short_end
- cmp %o5, 2
-
- bne byte_chunk
- and %o2, 0xfffffffffffffff0, %o3
-
- andcc %o1, 1, %g0
- be 10f
- nop
-
- ldub [%o1], %g2
- add %o1, 1, %o1
- EXO2(stba %g2, [%o0] %asi,#)
- sub %o2, 1, %o2
- andcc %o2, 0xfffffffffffffff0, %o3
- be short_end
- add %o0, 1, %o0
-10:
- MOVE_HALFCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
- MOVE_HALFCHUNK(o1, o0, 0x08, g2, g3, g4, g5)
-82:
- EXT(10b, 82b, 53f,#)
- subcc %o3, 0x10, %o3
- add %o1, 0x10, %o1
- bne 10b
- add %o0, 0x10, %o0
- ba,pt %xcc, 2f
- and %o2, 0xe, %o3
-
-byte_chunk:
- MOVE_SHORTCHUNK(o1, o0, -0x02, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, -0x04, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, -0x06, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, -0x08, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, -0x0a, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, -0x0c, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, -0x0e, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, -0x10, g2, g3)
-83:
- EXT(byte_chunk, 83b, 54f,#)
- subcc %o3, 0x10, %o3
- add %o1, 0x10, %o1
- bne,pt %xcc, byte_chunk
- add %o0, 0x10, %o0
-
-short_end:
- and %o2, 0xe, %o3
-2:
- rd %pc, %o5
- sll %o3, 3, %o4
- add %o0, %o3, %o0
- sub %o5, %o4, %o5
- add %o1, %o3, %o1
- jmpl %o5 + (short_table_end - 2b), %g0
- andcc %o2, 1, %g0
-84:
- MOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
-short_table_end:
- EXT(84b, short_table_end, 55f,#)
- be 1f
- nop
- ldub [%o1], %g2
- EX(stba %g2, [%o0] %asi, add %g0, 1,#)
-1:
- PRE_RETL
- retl
- clr %o0
-
-short_aligned_end:
- bne short_end
- andcc %o2, 8, %g0
-
- be 1f
- andcc %o2, 4, %g0
-
- ld [%o1 + 0x00], %g2
- ld [%o1 + 0x04], %g3
- add %o1, 8, %o1
- EXO2(sta %g2, [%o0 + 0x00] %asi,#)
- EX(sta %g3, [%o0 + 0x04] %asi, sub %o2, 4,#)
- add %o0, 8, %o0
-1:
- ba,pt %xcc, copy_user_last7
- mov %o2, %g1
-
- .section .fixup,#alloc,#execinstr
- .align 4
-97:
- PRE_RETL
- retl
- mov %o2, %o0
-/* exception routine sets %g2 to (broken_insn - first_insn)>>2 */
-50:
-/* This magic counts how many bytes are left when crash in MOVE_BIGCHUNK
- * happens. This is derived from the amount ldd reads, st stores, etc.
- * x = g2 % 12;
- * o0 = g1 + g7 - ((g2 / 12) * 32 + (x < 4) ? x * 8 : (x - 4) * 4)
- */
- cmp %g2, 12
- bcs 1f
- cmp %g2, 24
- bcs 2f
- cmp %g2, 36
- bcs 3f
- nop
- sub %g2, 12, %g2
- sub %g7, 32, %g7
-3:
- sub %g2, 12, %g2
- sub %g7, 32, %g7
-2:
- sub %g2, 12, %g2
- sub %g7, 32, %g7
-1:
- cmp %g2, 4
- bcs,a 1f
- sll %g2, 3, %g2
- sub %g2, 4, %g2
- sll %g2, 2, %g2
-1:
- and %g1, 0x7f, %o0
- add %o0, %g7, %o0
- PRE_RETL
- retl
- sub %o0, %g2, %o0
-51:
-/* i = 41 - g2; j = i % 6;
- * o0 = (g1 & 15) + (i / 6) * 16 + (j < 4) ? (j + 1) * 4 : (j - 3) * 8;
- */
- neg %g2
- and %g1, 0xf, %g1
- add %g2, 41, %g2
-1:
- cmp %g2, 6
- bcs,a 2f
- cmp %g2, 4
- add %g1, 16, %g1
- b 1b
- sub %g2, 6, %g2
-2:
- bcs,a 3f
- inc %g2
- sub %g2, 3, %g2
- b 2f
- sll %g2, 3, %g2
-3:
- sll %g2, 2, %g2
-2:
- PRE_RETL
- retl
- add %g1, %g2, %o0
-52:
-/* o0 = g1 + g7 - (g2 / 8) * 32 + (x & 3) * 8 */
- and %g2, 0xfffffffffffffff8, %g4
- and %g2, 3, %g2
- sll %g4, 2, %g4
- sll %g2, 3, %g2
- add %g2, %g4, %g2
- b,a 1b
-53:
-/* o0 = o3 + (o2 & 15) - (g2 & 8) - (g2 & 3) * 2 */
- and %g2, 3, %g4
- and %g2, 0xfffffffffffffff8, %g2
- sll %g4, 1, %g4
- add %g2, %g4, %g2
- and %o2, 0xf, %o0
- add %o0, %o3, %o0
- PRE_RETL
- retl
- sub %o0, %g2, %o0
-54:
-/* o0 = o3 + (o2 & 15) - (g2 / 4) * 2 - (g2 & 1) */
- srl %g2, 2, %o4
- and %g2, 1, %o1
- sll %o4, 1, %o4
- and %o2, 0xf, %o2
- sub %o3, %o1, %o3
- sub %o2, %o4, %o2
- PRE_RETL
- retl
- add %o2, %o3, %o0
-55:
-/* o0 = (o2 & 1) + (27 - g2)/4 * 2 + ((27 - g2) & 1) */
- neg %g2
- and %o2, 1, %o2
- add %g2, 27, %g2
- srl %g2, 2, %o1
- and %g2, 1, %g2
- sll %o1, 1, %o1
- add %o2, %g2, %o0
- PRE_RETL
- retl
- add %o0, %o1, %o0
diff --git a/arch/sparc64/lib/memcpy.S b/arch/sparc64/lib/memcpy.S
deleted file mode 100644
index e9462345a..000000000
--- a/arch/sparc64/lib/memcpy.S
+++ /dev/null
@@ -1,526 +0,0 @@
-/* memcpy.S: Sparc optimized memcpy, bcopy and memmove code
- * Hand optimized from GNU libc's memcpy, bcopy and memmove
- * for UltraSparc
- * Copyright (C) 1991,1996 Free Software Foundation
- * Copyright (C) 1995 Linus Torvalds (Linus.Torvalds@helsinki.fi)
- * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
- * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
- */
-
-#include <asm/asi.h>
-#include <asm/head.h>
-
-#ifdef __KERNEL__
-
-#define FUNC(x) \
- .globl x; \
- .type x,@function; \
- .align 4; \
-x:
-
-#define FASTER_ALIGNED
-
-/* In kernel these functions don't return a value.
- * One should use macros in asm/string.h for that purpose.
- * We return 0, so that bugs are more apparent.
- */
-#define SETUP_RETL
-#define PRE_RETL sethi %uhi(KERNBASE), %g4; clr %o0
-#define RETL_INSN sllx %g4, 32, %g4
-
-#else
-
-/* libc */
-
-#define FASTER_ALIGNED
-
-#ifdef DEBUG
-#define FUNC(x) \
- .globl jj##x##1; \
- .type jj##x##1,@function; \
- .align 4; \
-jj##x##1:
-#else
-#include "DEFS.h"
-#endif
-
-#define SETUP_RETL mov %o0, %g6
-#define PRE_RETL
-#define RETL_INSN mov %g6, %o0
-
-#endif
-
-#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldd [%src + offset + 0x00], %t0; \
- ldd [%src + offset + 0x08], %t2; \
- ldd [%src + offset + 0x10], %t4; \
- ldd [%src + offset + 0x18], %t6; \
- stw %t0, [%dst + offset + 0x00]; \
- stw %t1, [%dst + offset + 0x04]; \
- stw %t2, [%dst + offset + 0x08]; \
- stw %t3, [%dst + offset + 0x0c]; \
- stw %t4, [%dst + offset + 0x10]; \
- stw %t5, [%dst + offset + 0x14]; \
- stw %t6, [%dst + offset + 0x18]; \
- stw %t7, [%dst + offset + 0x1c];
-
-#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldx [%src + offset + 0x00], %t0; \
- ldx [%src + offset + 0x08], %t1; \
- ldx [%src + offset + 0x10], %t2; \
- ldx [%src + offset + 0x18], %t3; \
- ldx [%src + offset + 0x20], %t4; \
- ldx [%src + offset + 0x28], %t5; \
- ldx [%src + offset + 0x30], %t6; \
- ldx [%src + offset + 0x38], %t7; \
- stx %t0, [%dst + offset + 0x00]; \
- stx %t1, [%dst + offset + 0x08]; \
- stx %t2, [%dst + offset + 0x10]; \
- stx %t3, [%dst + offset + 0x18]; \
- stx %t4, [%dst + offset + 0x20]; \
- stx %t5, [%dst + offset + 0x28]; \
- stx %t6, [%dst + offset + 0x30]; \
- stx %t7, [%dst + offset + 0x38];
-
-#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
- ldd [%src - offset - 0x10], %t0; \
- ldd [%src - offset - 0x08], %t2; \
- stw %t0, [%dst - offset - 0x10]; \
- stw %t1, [%dst - offset - 0x0c]; \
- stw %t2, [%dst - offset - 0x08]; \
- stw %t3, [%dst - offset - 0x04];
-
-#define MOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1) \
- ldx [%src - offset - 0x10], %t0; \
- ldx [%src - offset - 0x08], %t1; \
- stx %t0, [%dst - offset - 0x10]; \
- stx %t1, [%dst - offset - 0x08];
-
-#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
- ldub [%src - offset - 0x02], %t0; \
- ldub [%src - offset - 0x01], %t1; \
- stb %t0, [%dst - offset - 0x02]; \
- stb %t1, [%dst - offset - 0x01];
-
- .text
- .align 4
-
-FUNC(bcopy)
-
- mov %o0, %o3
- mov %o1, %o0
- mov %o3, %o1
- brgez,a,pt %o2, 1f
- cmp %o0, %o1
-
- retl
- nop ! Only bcopy returns here and it retuns void...
-
-#ifdef __KERNEL__
-FUNC(amemmove)
-FUNC(__memmove)
-#endif
-FUNC(memmove)
-
- cmp %o0, %o1
-1:
- SETUP_RETL
- bleu,pt %xcc, 9f
- sub %o0, %o1, %o4
-
- add %o1, %o2, %o3
- cmp %o3, %o0
- bleu,pt %xcc, 0f
- andcc %o4, 3, %o5
-
- add %o1, %o2, %o1
- add %o0, %o2, %o0
- sub %o1, 1, %o1
- sub %o0, 1, %o0
-
-1:
- ldub [%o1], %o4
- subcc %o2, 1, %o2
- sub %o1, 1, %o1
- stb %o4, [%o0]
- bne,pt %icc, 1b
- sub %o0, 1, %o0
-
- PRE_RETL
- retl
- RETL_INSN
-
-#ifdef __KERNEL__
-FUNC(__memcpy)
-#endif
-FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
-
- sub %o0, %o1, %o4
- SETUP_RETL
-9:
- andcc %o4, 3, %o5
-0:
- bne,pn %icc, 86f
- cmp %o2, 15
-
- bleu,pn %xcc, 90f
- andcc %o1, 3, %g0
-
- be,a,pt %icc, 3f ! check if we need to align
- andcc %o1, 4, %g0
-
- andcc %o1, 1, %g0
- be,pn %icc, 4f
- andcc %o1, 2, %g0
-
- ldub [%o1], %g2
- add %o1, 1, %o1
- sub %o2, 1, %o2
- stb %g2, [%o0]
- bne,pn %icc, 5f
- add %o0, 1, %o0
-4:
- lduh [%o1], %g2
- add %o1, 2, %o1
- sub %o2, 2, %o2
- sth %g2, [%o0]
- add %o0, 2, %o0
-5:
- andcc %o1, 4, %g0
-3:
- be,pn %icc, 2f
- mov %o2, %g1
-
- lduw [%o1], %o4
- sub %g1, 4, %g1
- stw %o4, [%o0]
- add %o1, 4, %o1
- add %o0, 4, %o0
-2:
- andcc %g1, -128, %g7
- be,pn %xcc, 3f
- andcc %o0, 4, %g0
-
- be,a,pn %icc, 82f + 4
- ldx [%o1], %o2
-5:
- MOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
- MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
- MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
- MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
- subcc %g7, 128, %g7
- add %o1, 128, %o1
- bne,pt %xcc, 5b
- add %o0, 128, %o0
-3:
- andcc %g1, 0x70, %g7
- be,pn %icc, 80f
- andcc %g1, 8, %g0
-79:
- rd %pc, %o5
- srl %g7, 1, %o4
- add %g7, %o4, %o4
- add %o1, %g7, %o1
- sub %o5, %o4, %o5
- jmpl %o5 + %lo(80f-79b), %g0
- add %o0, %g7, %o0
-
- MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
- MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
- MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
- MOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
- MOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
- MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
- MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
-
-80: /* memcpy_table_end */
- be,pt %icc, 81f
- andcc %g1, 4, %g0
-
- ldd [%o1], %g2
- add %o0, 8, %o0
- stw %g2, [%o0 - 0x08]
- add %o1, 8, %o1
- stw %g3, [%o0 - 0x04]
-
-81: /* memcpy_last7 */
-
- be,pt %icc, 1f
- andcc %g1, 2, %g0
-
- lduw [%o1], %g2
- add %o1, 4, %o1
- stw %g2, [%o0]
- add %o0, 4, %o0
-1:
- be,pt %icc, 1f
- andcc %g1, 1, %g0
-
- lduh [%o1], %g2
- add %o1, 2, %o1
- sth %g2, [%o0]
- add %o0, 2, %o0
-1:
- be,pt %icc, 1f
- nop
-
- ldub [%o1], %g2
- stb %g2, [%o0]
-1:
- PRE_RETL
- retl
- RETL_INSN
-
-82: /* ldx_stx */
- MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
- MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
- subcc %g7, 128, %g7
- add %o1, 128, %o1
- bne,pt %xcc, 82b
- add %o0, 128, %o0
-
-#ifndef FASTER_ALIGNED
-
- andcc %g1, 0x70, %g7
- be,pn %icc, 80b
- andcc %g1, 8, %g0
-83:
- rd %pc, %o5
- srl %g7, 1, %o4
- add %g7, %o4, %o4
- add %o1, %g7, %o1
- sub %o5, %o4, %o5
- jmpl %o5 + %lo(80b - 83b), %g0
- add %o0, %g7, %o0
-
-#else /* FASTER_ALIGNED */
-
- andcc %g1, 0x70, %g7
- be,pn %icc, 84f
- andcc %g1, 8, %g0
-83:
- rd %pc, %o5
- add %o1, %g7, %o1
- sub %o5, %g7, %o5
- jmpl %o5 + %lo(84f - 83b), %g0
- add %o0, %g7, %o0
-
- MOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3)
- MOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3)
- MOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3)
- MOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3)
- MOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3)
- MOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3)
- MOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3)
-
-84: /* amemcpy_table_end */
- be,pt %icc, 85f
- andcc %g1, 4, %g0
-
- ldx [%o1], %g2
- add %o1, 8, %o1
- stx %g2, [%o0]
- add %o0, 8, %o0
-85: /* amemcpy_last7 */
- be,pt %icc, 1f
- andcc %g1, 2, %g0
-
- lduw [%o1], %g2
- add %o1, 4, %o1
- stw %g2, [%o0]
- add %o0, 4, %o0
-1:
- be,pt %icc, 1f
- andcc %g1, 1, %g0
-
- lduh [%o1], %g2
- add %o1, 2, %o1
- sth %g2, [%o0]
- add %o0, 2, %o0
-1:
- be,pt %icc, 1f
- nop
-
- ldub [%o1], %g2
- stb %g2, [%o0]
-1:
- PRE_RETL
- retl
- RETL_INSN
-
-#endif /* FASTER_ALIGNED */
-
-86: /* non_aligned */
- cmp %o2, 15
- bleu,pn %xcc, 88f
-
- andcc %o0, 3, %g0
- be,pn %icc, 61f
- andcc %o0, 1, %g0
- be,pn %icc, 60f
- andcc %o0, 2, %g0
-
- ldub [%o1], %g5
- add %o1, 1, %o1
- stb %g5, [%o0]
- sub %o2, 1, %o2
- bne,pn %icc, 61f
- add %o0, 1, %o0
-60:
- ldub [%o1], %g3
- add %o1, 2, %o1
- stb %g3, [%o0]
- sub %o2, 2, %o2
- ldub [%o1 - 1], %g3
- add %o0, 2, %o0
- stb %g3, [%o0 - 1]
-61:
- and %o1, 3, %g2
- and %o2, 0xc, %g3
- and %o1, -4, %o1
- cmp %g3, 4
- sll %g2, 3, %g4
- mov 32, %g2
- be,pn %icc, 4f
- sub %g2, %g4, %g7
-
- blu,pn %icc, 3f
- cmp %g3, 0x8
-
- be,pn %icc, 2f
- srl %o2, 2, %g3
-
- lduw [%o1], %o3
- add %o0, -8, %o0
- lduw [%o1 + 4], %o4
- ba,pt %xcc, 8f
- add %g3, 1, %g3
-2:
- lduw [%o1], %o4
- add %o0, -12, %o0
- lduw [%o1 + 4], %o5
- add %g3, 2, %g3
- ba,pt %xcc, 9f
- add %o1, -4, %o1
-3:
- lduw [%o1], %g1
- add %o0, -4, %o0
- lduw [%o1 + 4], %o3
- srl %o2, 2, %g3
- ba,pt %xcc, 7f
- add %o1, 4, %o1
-4:
- lduw [%o1], %o5
- cmp %o2, 7
- lduw [%o1 + 4], %g1
- srl %o2, 2, %g3
- bleu,pn %xcc, 10f
- add %o1, 8, %o1
-
- lduw [%o1], %o3
- add %g3, -1, %g3
-5:
- sll %o5, %g4, %g2
- srl %g1, %g7, %g5
- or %g2, %g5, %g2
- stw %g2, [%o0]
-7:
- lduw [%o1 + 4], %o4
- sll %g1, %g4, %g2
- srl %o3, %g7, %g5
- or %g2, %g5, %g2
- stw %g2, [%o0 + 4]
-8:
- lduw [%o1 + 8], %o5
- sll %o3, %g4, %g2
- srl %o4, %g7, %g5
- or %g2, %g5, %g2
- stw %g2, [%o0 + 8]
-9:
- lduw [%o1 + 12], %g1
- sll %o4, %g4, %g2
- srl %o5, %g7, %g5
- addcc %g3, -4, %g3
- or %g2, %g5, %g2
- add %o1, 16, %o1
- stw %g2, [%o0 + 12]
- add %o0, 16, %o0
- bne,a,pt %xcc, 5b
- lduw [%o1], %o3
-10:
- sll %o5, %g4, %g2
- srl %g1, %g7, %g5
- srl %g7, 3, %g3
- or %g2, %g5, %g2
- sub %o1, %g3, %o1
- andcc %o2, 2, %g0
- stw %g2, [%o0]
- be,pt %icc, 1f
- andcc %o2, 1, %g0
-
- ldub [%o1], %g2
- add %o1, 2, %o1
- stb %g2, [%o0 + 4]
- add %o0, 2, %o0
- ldub [%o1 - 1], %g2
- stb %g2, [%o0 + 3]
-1:
- be,pt %icc, 1f
- nop
-
- ldub [%o1], %g2
- stb %g2, [%o0 + 4]
-1:
- PRE_RETL
- retl
- RETL_INSN
-
-88: /* short_end */
-
- and %o2, 0xe, %o3
-20:
- rd %pc, %o5
- sll %o3, 3, %o4
- add %o0, %o3, %o0
- sub %o5, %o4, %o5
- add %o1, %o3, %o1
- jmpl %o5 + %lo(89f - 20b), %g0
- andcc %o2, 1, %g0
-
- MOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
-
-89: /* short_table_end */
-
- be,pt %icc, 1f
- nop
-
- ldub [%o1], %g2
- stb %g2, [%o0]
-1:
- PRE_RETL
- retl
- RETL_INSN
-
-90: /* short_aligned_end */
- bne,pn %xcc, 88b
- andcc %o2, 8, %g0
-
- be,pt %icc, 1f
- andcc %o2, 4, %g0
-
- lduw [%o1 + 0x00], %g2
- lduw [%o1 + 0x04], %g3
- add %o1, 8, %o1
- stw %g2, [%o0 + 0x00]
- stw %g3, [%o0 + 0x04]
- add %o0, 8, %o0
-1:
- ba,pt %xcc, 81b
- mov %o2, %g1
diff --git a/arch/sparc64/lib/memset.S b/arch/sparc64/lib/memset.S
deleted file mode 100644
index 713c78ca8..000000000
--- a/arch/sparc64/lib/memset.S
+++ /dev/null
@@ -1,196 +0,0 @@
-/* linux/arch/sparc64/lib/memset.S: Sparc optimized memset, bzero and clear_user code
- * Copyright (C) 1991,1996 Free Software Foundation
- * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
- *
- * Returns 0, if ok, and number of bytes not yet set if exception
- * occurs and we were called as clear_user.
- */
-
-#include <asm/asi.h>
-#include <asm/ptrace.h>
-
-#define EX(x,y,a,b,z) \
-98: x,y; \
- .section .fixup,z##alloc,z##execinstr; \
- .align 4; \
-99: ba,pt %xcc, 30f; \
- a, b, %o0; \
- .section __ex_table,z##alloc; \
- .align 8; \
- .xword 98b, 99b; \
- .text; \
- .align 4
-
-#define EXT(start,end,handler,z) \
- .section __ex_table,z##alloc; \
- .align 8; \
- .xword start, 0, end, handler; \
- .text; \
- .align 4
-
-/* Please don't change these macros, unless you change the logic
- * in the .fixup section below as well.
- * Store 64 bytes at (BASE + OFFSET) using value SOURCE. */
-#define ZERO_BIG_BLOCK(base, offset, source) \
- stxa source, [base + offset + 0x00] %asi; \
- stxa source, [base + offset + 0x08] %asi; \
- stxa source, [base + offset + 0x10] %asi; \
- stxa source, [base + offset + 0x18] %asi; \
- stxa source, [base + offset + 0x20] %asi; \
- stxa source, [base + offset + 0x28] %asi; \
- stxa source, [base + offset + 0x30] %asi; \
- stxa source, [base + offset + 0x38] %asi;
-
-#define ZERO_LAST_BLOCKS(base, offset, source) \
- stxa source, [base - offset - 0x38] %asi; \
- stxa source, [base - offset - 0x30] %asi; \
- stxa source, [base - offset - 0x28] %asi; \
- stxa source, [base - offset - 0x20] %asi; \
- stxa source, [base - offset - 0x18] %asi; \
- stxa source, [base - offset - 0x10] %asi; \
- stxa source, [base - offset - 0x08] %asi; \
- stxa source, [base - offset - 0x00] %asi;
-
- .text
- .align 4
-
- .globl __bzero, __memset, __bzero_noasi
- .globl memset, __memset_start, __memset_end
-__memset_start:
-__memset:
-memset:
- and %o1, 0xff, %g3
- sll %g3, 8, %g2
- or %g3, %g2, %g3
- sll %g3, 16, %g2
- or %g3, %g2, %g3
- mov %o2, %o1
- wr %g0, ASI_P, %asi
- sllx %g3, 32, %g2
- ba,pt %xcc, 1f
- or %g3, %g2, %g3
-__bzero:
- wr %g0, ASI_P, %asi
-__bzero_noasi:
- mov %g0, %g3
-1:
- cmp %o1, 7
- bleu,pn %xcc, 7f
- andcc %o0, 3, %o2
-
- be,a,pt %icc, 4f
- andcc %o0, 4, %g0
-
- cmp %o2, 3
- be,pn %icc, 2f
- EX(stba %g3, [%o0] %asi, sub %o1, 0,#)
-
- cmp %o2, 2
- be,pt %icc, 2f
- EX(stba %g3, [%o0 + 0x01] %asi, sub %o1, 1,#)
-
- EX(stba %g3, [%o0 + 0x02] %asi, sub %o1, 2,#)
-2:
- sub %o2, 4, %o2
- sub %o0, %o2, %o0
- add %o1, %o2, %o1
- andcc %o0, 4, %g0
-4:
- be,a,pt %icc, 2f
- andncc %o1, 0x7f, %o3
-
- EX(sta %g3, [%o0] %asi, sub %o1, 0,#)
- sub %o1, 4, %o1
- add %o0, 4, %o0
- andncc %o1, 0x7f, %o3 ! Now everything is 8 aligned and o1 is len to run
-2:
- be,pn %xcc, 9f
- andcc %o1, 0x78, %o2
-10:
- ZERO_BIG_BLOCK(%o0, 0x00, %g3)
- subcc %o3, 128, %o3
- ZERO_BIG_BLOCK(%o0, 0x40, %g3)
-11:
- EXT(10b, 11b, 20f,#)
- bne,pt %xcc, 10b
- add %o0, 128, %o0
-
- tst %o2
-9:
- be,pn %xcc, 13f
- andcc %o1, 7, %o1
-14:
- rd %pc, %o4
- srl %o2, 1, %o3
- sub %o4, %o3, %o4
- jmpl %o4 + (13f - 14b), %g0
- add %o0, %o2, %o0
-12:
- ZERO_LAST_BLOCKS(%o0, 0x48, %g3)
- ZERO_LAST_BLOCKS(%o0, 0x08, %g3)
-13:
- be,pn %icc, 8f
- andcc %o1, 4, %g0
-
- be,pn %icc, 1f
- andcc %o1, 2, %g0
-
- EX(sta %g3, [%o0] %asi, and %o1, 7,#)
- add %o0, 4, %o0
-1:
- be,pn %icc, 1f
- andcc %o1, 1, %g0
-
- EX(stha %g3, [%o0] %asi, and %o1, 3,#)
- add %o0, 2, %o0
-1:
- bne,a,pn %icc, 8f
- EX(stba %g3, [%o0] %asi, and %o1, 1,#)
-8:
- retl
- clr %o0
-7:
- be,pn %icc, 13b
- orcc %o1, 0, %g0
-
- be,pn %icc, 0f
-8:
- add %o0, 1, %o0
- subcc %o1, 1, %o1
- bne,a,pt %icc, 8b
- EX(stba %g3, [%o0 - 1] %asi, add %o1, 1,#)
-0:
- retl
- clr %o0
-__memset_end:
-
- .section .fixup,#alloc,#execinstr
- .align 4
-20:
- cmp %g2, 8
- bleu,pn %xcc, 1f
- and %o1, 0x7f, %o1
- sub %g2, 9, %g2
- add %o3, 64, %o3
-1:
- sll %g2, 3, %g2
- add %o3, %o1, %o0
- ba,pt %xcc, 30f
- sub %o0, %g2, %o0
-21:
- mov 8, %o0
- and %o1, 7, %o1
- sub %o0, %g2, %o0
- sll %o0, 3, %o0
- ba,pt %xcc, 30f
- add %o0, %o1, %o0
-30:
-/* %o4 is faulting address, %o5 is %pc where fault occured */
- save %sp, -160, %sp
- mov %i5, %o0
- mov %i7, %o1
- call lookup_fault
- mov %i4, %o2
- ret
- restore
diff --git a/arch/sparc64/lib/strlen_user.S b/arch/sparc64/lib/strlen_user.S
index 4d57aed64..ef6cee5a6 100644
--- a/arch/sparc64/lib/strlen_user.S
+++ b/arch/sparc64/lib/strlen_user.S
@@ -20,36 +20,27 @@ __strlen_user:
andcc %o0, 3, %g0
be,pt %icc, 9f
sethi %hi(HI_MAGIC), %o4
-10:
- lduba [%o0] ASI_S, %o5
+10: lduba [%o0] ASI_S, %o5
brz,pn %o5, 21f
add %o0, 1, %o0
andcc %o0, 3, %g0
be,pn %icc, 4f
or %o4, %lo(HI_MAGIC), %o3
-11:
- lduba [%o0] ASI_S, %o5
+11: lduba [%o0] ASI_S, %o5
brz,pn %o5, 22f
add %o0, 1, %o0
andcc %o0, 3, %g0
- be,pt %icc, 5f
- sethi %hi(LO_MAGIC), %o4
-12:
- lduba [%o0] ASI_S, %o5
+ be,pt %icc, 13f
+ srl %o3, 7, %o2
+12: lduba [%o0] ASI_S, %o5
brz,pn %o5, 23f
add %o0, 1, %o0
- ba,pt %icc, 13f
- or %o4, %lo(LO_MAGIC), %o2
-9:
- or %o4, %lo(HI_MAGIC), %o3
-4:
- sethi %hi(LO_MAGIC), %o4
-5:
- or %o4, %lo(LO_MAGIC), %o2
-13:
- lda [%o0] ASI_S, %o5
-2:
- sub %o5, %o2, %o4
+ ba,pt %icc, 2f
+15: lda [%o0] ASI_S, %o5
+9: or %o4, %lo(HI_MAGIC), %o3
+4: srl %o3, 7, %o2
+13: lda [%o0] ASI_S, %o5
+2: sub %o5, %o2, %o4
andcc %o4, %o3, %g0
be,pt %icc, 13b
add %o0, 4, %o0
@@ -69,20 +60,15 @@ __strlen_user:
add %o4, 1, %o4
andcc %o5, 0xff, %g0
bne,a,pt %icc, 2b
-14:
- lda [%o0] ASI_S, %o5
+14: lda [%o0] ASI_S, %o5
add %o4, 1, %o4
-1:
- retl
+1: retl
sub %o4, %o1, %o0
-21:
- retl
+21: retl
mov 1, %o0
-22:
- retl
+22: retl
mov 2, %o0
-23:
- retl
+23: retl
mov 3, %o0
.section .fixup,#alloc,#execinstr
@@ -97,5 +83,6 @@ __strlen_user:
.xword 10b, 30b
.xword 11b, 30b
.xword 12b, 30b
+ .xword 15b, 30b
.xword 13b, 30b
.xword 14b, 30b
diff --git a/arch/sparc64/mm/Makefile b/arch/sparc64/mm/Makefile
index c41c7a938..dddf3153b 100644
--- a/arch/sparc64/mm/Makefile
+++ b/arch/sparc64/mm/Makefile
@@ -1,4 +1,4 @@
-# $Id: Makefile,v 1.1 1996/12/26 10:24:22 davem Exp $
+# $Id: Makefile,v 1.3 1997/06/27 14:53:38 jj Exp $
# Makefile for the linux Sparc64-specific parts of the memory manager.
#
# Note! Dependencies are done automagically by 'make dep', which also
@@ -7,7 +7,13 @@
#
# Note 2! The CFLAGS definition is now in the main makefile...
+.S.s:
+ $(CPP) -D__ASSEMBLY__ -ansi $< -o $*.s
+
+.S.o:
+ $(CC) -D__ASSEMBLY__ -ansi -c $< -o $*.o
+
O_TARGET := mm.o
-O_OBJS := fault.o init.o generic.o asyncd.o extable.o
+O_OBJS := ultra.o fault.o init.o generic.o asyncd.o extable.o modutil.o
include $(TOPDIR)/Rules.make
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index 6df923a4b..5e16e1218 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -1,4 +1,4 @@
-/* $Id: fault.c,v 1.11 1997/06/01 05:46:15 davem Exp $
+/* $Id: fault.c,v 1.18 1997/07/17 02:20:56 davem Exp $
* arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
@@ -134,44 +134,14 @@ asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
return 0;
}
-/* #define FAULT_TRACER */
-/* #define FAULT_TRACER_VERBOSE */
+/* #define DEBUG_EXCEPTIONS */
-asmlinkage void do_sparc64_fault(struct pt_regs *regs, int text_fault, int write,
- unsigned long address, unsigned long tag,
- unsigned long sfsr)
+asmlinkage void do_sparc64_fault(struct pt_regs *regs, unsigned long address, int write)
{
+ struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- struct task_struct *tsk = current;
- struct mm_struct *mm = tsk->mm;
- unsigned long fixup;
- unsigned long g2;
- int from_user = !(regs->tstate & TSTATE_PRIV);
-#ifdef FAULT_TRACER
- static unsigned long last_addr = 0;
- static int rcnt = 0;
-
-#ifdef FAULT_TRACER_VERBOSE
- printk("FAULT(PC[%016lx],t[%d],w[%d],addr[%016lx])...",
- regs->tpc, text_fault, write, address);
-#else
- printk("F[%016lx:%016lx:w(%d)", regs->tpc, address, write);
-#endif
- if(address == last_addr) {
- if(rcnt++ > 15) {
- printk("Wheee lotsa bogus faults, something wrong, spinning\n");
- __asm__ __volatile__("flushw");
- printk("o7[%016lx] i7[%016lx]\n",
- regs->u_regs[UREG_I7],
- ((struct reg_window *)(regs->u_regs[UREG_FP]+STACK_BIAS))->ins[7]);
- sti();
- while(1)
- barrier();
- }
- } else rcnt = 0;
- last_addr = address;
-#endif
- lock_kernel ();
+
+ lock_kernel();
down(&mm->mmap_sem);
vma = find_vma(mm, address);
if(!vma)
@@ -204,40 +174,99 @@ good_area:
*/
bad_area:
up(&mm->mmap_sem);
- /* Is this in ex_table? */
+
+ {
+ unsigned long g2 = regs->u_regs[UREG_G2];
+
+ /* Is this in ex_table? */
+ if (regs->tstate & TSTATE_PRIV) {
+ unsigned char asi = ASI_P;
+ unsigned int insn;
+ unsigned long fixup;
+
+ insn = *(unsigned int *)regs->tpc;
+ if ((insn & 0xc0800000) == 0xc0800000) {
+ if (insn & 0x2000)
+ asi = (regs->tstate >> 24);
+ else
+ asi = (insn >> 5);
+ }
- g2 = regs->u_regs[UREG_G2];
- if (!from_user && (fixup = search_exception_table (regs->tpc, &g2))) {
- printk("Exception: PC<%016lx> faddr<%016lx>\n", regs->tpc, address);
- printk("EX_TABLE: insn<%016lx> fixup<%016lx> g2<%016lx>\n",
- regs->tpc, fixup, g2);
- regs->tpc = fixup;
- regs->tnpc = regs->tpc + 4;
- regs->u_regs[UREG_G2] = g2;
- goto out;
- }
- if(from_user) {
-#if 1
- unsigned long cpc;
- __asm__ __volatile__("mov %%i7, %0" : "=r" (cpc));
- printk("[%s:%d] SIGSEGV pc[%016lx] addr[%016lx] w[%d] sfsr[%016lx] "
- "caller[%016lx]\n", current->comm, current->pid, regs->tpc,
- address, write, sfsr, cpc);
+ /* Look in asi.h: All _S asis have LS bit set */
+ if ((asi & 0x1) &&
+ (fixup = search_exception_table (regs->tpc, &g2))) {
+#ifdef DEBUG_EXCEPTIONS
+ printk("Exception: PC<%016lx> faddr<%016lx>\n",
+ regs->tpc, address);
+ printk("EX_TABLE: insn<%016lx> fixup<%016lx> "
+ "g2<%016lx>\n", regs->tpc, fixup, g2);
#endif
- tsk->tss.sig_address = address;
- tsk->tss.sig_desc = SUBSIG_NOMAPPING;
- send_sig(SIGSEGV, tsk, 1);
- goto out;
+ regs->tpc = fixup;
+ regs->tnpc = regs->tpc + 4;
+ regs->u_regs[UREG_G2] = g2;
+ goto out;
+ }
+ } else {
+ current->tss.sig_address = address;
+ current->tss.sig_desc = SUBSIG_NOMAPPING;
+ send_sig(SIGSEGV, current, 1);
+ goto out;
+ }
+ unhandled_fault (address, current, regs);
}
- unhandled_fault (address, tsk, regs);
out:
unlock_kernel();
-#ifdef FAULT_TRACER
-#ifdef FAULT_TRACER_VERBOSE
- printk(" done\n");
-#else
- printk("]");
-#endif
-#endif
}
+void fixup_dcache_alias(struct vm_area_struct *vma, unsigned long address, pte_t pte)
+{
+ struct vm_area_struct *vmaring;
+ struct inode *inode;
+ unsigned long vaddr, offset, start;
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+ int alias_found = 0;
+
+ inode = vma->vm_dentry->d_inode;
+ if(!inode)
+ return;
+
+ offset = (address & PAGE_MASK) - vma->vm_start;
+ vmaring = inode->i_mmap;
+ do {
+ vaddr = vmaring->vm_start + offset;
+
+ /* This conditional is misleading... */
+ if((vaddr ^ address) & PAGE_SIZE) {
+ alias_found++;
+ start = vmaring->vm_start;
+ while(start < vmaring->vm_end) {
+ pgdp = pgd_offset(vmaring->vm_mm, start);
+ if(!pgdp) goto next;
+ pmdp = pmd_offset(pgdp, start);
+ if(!pmdp) goto next;
+ ptep = pte_offset(pmdp, start);
+ if(!ptep) goto next;
+
+ if(pte_val(*ptep) & _PAGE_PRESENT) {
+ flush_cache_page(vmaring, start);
+ *ptep = __pte(pte_val(*ptep) &
+ ~(_PAGE_CV));
+ flush_tlb_page(vmaring, start);
+ }
+ next:
+ start += PAGE_SIZE;
+ }
+ }
+ } while((vmaring = vmaring->vm_next_share) != NULL);
+
+ if(alias_found && (pte_val(pte) & _PAGE_CV)) {
+ pgdp = pgd_offset(vma->vm_mm, address);
+ pmdp = pmd_offset(pgdp, address);
+ ptep = pte_offset(pmdp, address);
+ flush_cache_page(vma, address);
+ *ptep = __pte(pte_val(*ptep) & ~(_PAGE_CV));
+ flush_tlb_page(vma, address);
+ }
+}
diff --git a/arch/sparc64/mm/generic.c b/arch/sparc64/mm/generic.c
index 289ddd411..730e8cb32 100644
--- a/arch/sparc64/mm/generic.c
+++ b/arch/sparc64/mm/generic.c
@@ -1,4 +1,4 @@
-/* $Id: generic.c,v 1.1 1996/12/26 10:24:23 davem Exp $
+/* $Id: generic.c,v 1.2 1997/07/01 09:11:42 jj Exp $
* generic.c: Generic Sparc mm routines that are not dependent upon
* MMU type but are Sparc specific.
*
@@ -66,13 +66,35 @@ static inline void io_remap_pte_range(pte_t * pte, unsigned long address, unsign
if (end > PMD_SIZE)
end = PMD_SIZE;
do {
- pte_t oldpage = *pte;
- pte_clear(pte);
- set_pte(pte, mk_pte_io(offset, prot, space));
- forget_pte(oldpage);
- address += PAGE_SIZE;
+ pte_t oldpage;
+ pte_t entry;
+ unsigned long curend = address + PAGE_SIZE;
+
+ entry = mk_pte_io(offset, prot, space);
offset += PAGE_SIZE;
- pte++;
+ if (!(address & 0xffff)) {
+ if (!(address & 0x3fffff) && !(offset & 0x3fffff) && end >= address + 0x400000) {
+ entry = mk_pte_io(offset, __pgprot(pgprot_val (prot) | _PAGE_SZ4MB), space);
+ curend = address + 0x400000;
+ offset += 0x400000 - PAGE_SIZE;
+ } else if (!(address & 0x7ffff) && !(offset & 0x7ffff) && end >= address + 0x80000) {
+ entry = mk_pte_io(offset, __pgprot(pgprot_val (prot) | _PAGE_SZ512K), space);
+ curend = address + 0x80000;
+ offset += 0x80000 - PAGE_SIZE;
+ } else if (!(offset & 0xffff) && end >= address + 0x10000) {
+ entry = mk_pte_io(offset, __pgprot(pgprot_val (prot) | _PAGE_SZ64K), space);
+ curend = address + 0x10000;
+ offset += 0x10000 - PAGE_SIZE;
+ }
+ }
+ do {
+ oldpage = *pte;
+ pte_clear(pte);
+ set_pte(pte, entry);
+ forget_pte(oldpage);
+ address += PAGE_SIZE;
+ pte++;
+ } while (address < curend);
} while (address < end);
}
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 960b3cbbd..a8d903b25 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -1,15 +1,17 @@
-/* $Id: init.c,v 1.29 1997/05/27 06:28:13 davem Exp $
+/* $Id: init.c,v 1.39 1997/07/07 02:50:57 davem Exp $
* arch/sparc64/mm/init.c
*
* Copyright (C) 1996,1997 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
+#include <linux/config.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/blk.h>
#include <linux/swap.h>
+#include <asm/head.h>
#include <asm/system.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -33,7 +35,7 @@ extern unsigned long empty_null_pte_table;
unsigned long tlb_context_cache = CTX_FIRST_VERSION;
/* References to section boundaries */
-extern char __init_begin, __init_end, etext, __p1275_loc, __bss_start;
+extern char __init_begin, __init_end, etext, __bss_start;
/*
* BAD_PAGE is the page that is used for page faults when linux
@@ -59,13 +61,15 @@ pmd_t *__bad_pmd(void)
pte_t *__bad_pte(void)
{
memset((void *) &empty_bad_pte_table, 0, PAGE_SIZE);
- return (pte_t *) (((unsigned long)&empty_bad_pte_table) + phys_base);
+ return (pte_t *) (((unsigned long)&empty_bad_pte_table)
+ - ((unsigned long)&empty_zero_page) + phys_base + PAGE_OFFSET);
}
pte_t __bad_page(void)
{
memset((void *) &empty_bad_page, 0, PAGE_SIZE);
- return pte_mkdirty(mk_pte((((unsigned long) &empty_bad_page)+phys_base),
+ return pte_mkdirty(mk_pte((((unsigned long) &empty_bad_page)
+ - ((unsigned long)&empty_zero_page) + phys_base + PAGE_OFFSET),
PAGE_SHARED));
}
@@ -288,7 +292,7 @@ struct linux_prom_translation {
};
#define MAX_TRANSLATIONS 64
-static void inherit_prom_mappings(void)
+static inline void inherit_prom_mappings(void)
{
struct linux_prom_translation transl[MAX_TRANSLATIONS];
pgd_t *pgdp;
@@ -332,7 +336,11 @@ static void inherit_prom_mappings(void)
}
}
-static void inherit_locked_prom_mappings(void)
+int prom_itlb_ent, prom_dtlb_ent;
+unsigned long prom_itlb_tag, prom_itlb_data;
+unsigned long prom_dtlb_tag, prom_dtlb_data;
+
+static inline void inherit_locked_prom_mappings(void)
{
int i;
int dtlb_seen = 0;
@@ -359,6 +367,9 @@ static void inherit_locked_prom_mappings(void)
data = spitfire_get_dtlb_data(i);
if(!dtlb_seen && (data & _PAGE_L)) {
unsigned long tag = spitfire_get_dtlb_tag(i);
+ prom_dtlb_ent = i;
+ prom_dtlb_tag = tag;
+ prom_dtlb_data = data;
__asm__ __volatile__("stxa %%g0, [%0] %1"
: : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
membar("#Sync");
@@ -379,6 +390,9 @@ static void inherit_locked_prom_mappings(void)
data = spitfire_get_itlb_data(i);
if(!itlb_seen && (data & _PAGE_L)) {
unsigned long tag = spitfire_get_itlb_tag(i);
+ prom_itlb_ent = i;
+ prom_itlb_tag = tag;
+ prom_itlb_data = data;
__asm__ __volatile__("stxa %%g0, [%0] %1"
: : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
membar("#Sync");
@@ -399,6 +413,64 @@ static void inherit_locked_prom_mappings(void)
}
}
+/* Give PROM back his world, done during reboots... */
+void prom_reload_locked(void)
+{
+ __asm__ __volatile__("stxa %0, [%1] %2"
+ : : "r" (prom_dtlb_tag), "r" (TLB_TAG_ACCESS),
+ "i" (ASI_DMMU));
+ membar("#Sync");
+ spitfire_put_dtlb_data(prom_dtlb_ent, prom_dtlb_data);
+ membar("#Sync");
+
+ __asm__ __volatile__("stxa %0, [%1] %2"
+ : : "r" (prom_itlb_tag), "r" (TLB_TAG_ACCESS),
+ "i" (ASI_IMMU));
+ membar("#Sync");
+ spitfire_put_itlb_data(prom_itlb_ent, prom_itlb_data);
+ membar("#Sync");
+}
+
+/* If not locked, zap it. */
+void flush_tlb_all(void)
+{
+ unsigned long flags;
+ int i;
+
+ save_flags(flags); cli();
+ for(i = 0; i < 64; i++) {
+ if(!(spitfire_get_dtlb_data(i) & _PAGE_L)) {
+ __asm__ __volatile__("stxa %%g0, [%0] %1"
+ : /* no outputs */
+ : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
+ membar("#Sync");
+ spitfire_put_dtlb_data(i, 0x0UL);
+ membar("#Sync");
+ }
+ if(!(spitfire_get_itlb_data(i) & _PAGE_L)) {
+ __asm__ __volatile__("stxa %%g0, [%0] %1"
+ : /* no outputs */
+ : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
+ membar("#Sync");
+ spitfire_put_itlb_data(i, 0x0UL);
+ membar("#Sync");
+ }
+ }
+ restore_flags(flags);
+}
+
+void get_new_mmu_context(struct mm_struct *mm, unsigned long ctx)
+{
+ if((ctx & ~(CTX_VERSION_MASK)) == 0) {
+ flush_tlb_all();
+ ctx = (ctx & CTX_VERSION_MASK) + CTX_FIRST_VERSION;
+ if(ctx == 1)
+ ctx = CTX_FIRST_VERSION;
+ }
+ tlb_context_cache = ctx + 1;
+ mm->context = ctx;
+}
+
__initfunc(static void
allocate_ptable_skeleton(unsigned long start, unsigned long end))
{
@@ -440,9 +512,9 @@ void sparc_ultra_mapioaddr(unsigned long physaddr, unsigned long virt_addr,
physaddr &= PAGE_MASK;
if(rdonly)
- pte = mk_pte_phys(physaddr, __pgprot(pg_iobits));
+ pte = mk_pte_phys(physaddr, __pgprot(pg_iobits | __PRIV_BITS));
else
- pte = mk_pte_phys(physaddr, __pgprot(pg_iobits | __DIRTY_BITS));
+ pte = mk_pte_phys(physaddr, __pgprot(pg_iobits | __DIRTY_BITS | __PRIV_BITS));
set_pte(ptep, pte);
}
@@ -500,51 +572,42 @@ paging_init(unsigned long start_mem, unsigned long end_mem))
{
extern unsigned long phys_base;
extern void setup_tba(unsigned long kpgdir);
- extern void __bfill64(void *, unsigned long);
- pgd_t *pgdp;
+ extern void __bfill64(void *, unsigned long *);
pmd_t *pmdp;
- pte_t *ptep, pte;
int i;
-
- /* Must create 2nd locked DTLB entry if physical ram starts at
- * 4MB absolute or higher, kernel image has been placed in the
- * right place at PAGE_OFFSET but references to start_mem and pages
- * will be to the perfect alias mapping, so set it up now.
+ unsigned long alias_base = phys_base + PAGE_OFFSET;
+ unsigned long pt;
+ unsigned long flags;
+ unsigned long shift = alias_base - ((unsigned long)&empty_zero_page);
+
+ /* We assume physical memory starts at some 4mb multiple,
+ * if this were not true we wouldn't boot up to this point
+ * anyways.
*/
- if(phys_base >= (4 * 1024 * 1024)) {
- unsigned long alias_base = phys_base + PAGE_OFFSET;
- unsigned long pte;
- unsigned long flags;
-
- /* We assume physical memory starts at some 4mb multiple,
- * if this were not true we wouldn't boot up to this point
- * anyways.
- */
- pte = phys_base | _PAGE_VALID | _PAGE_SZ4MB;
- pte |= _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W;
- save_flags(flags); cli();
- __asm__ __volatile__("
- stxa %1, [%0] %3
- stxa %2, [%5] %4
- membar #Sync
- flush %%g4
- nop
- nop
- nop"
- : /* No outputs */
- : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pte),
- "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (61 << 3)
- : "memory");
- restore_flags(flags);
-
- /* Now set kernel pgd to upper alias so physical page computations
- * work.
- */
- init_mm.pgd += (phys_base / (sizeof(pgd_t *)));
- }
+ pt = phys_base | _PAGE_VALID | _PAGE_SZ4MB;
+ pt |= _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W;
+ save_flags(flags); cli();
+ __asm__ __volatile__("
+ stxa %1, [%0] %3
+ stxa %2, [%5] %4
+ membar #Sync
+ flush %%g6
+ nop
+ nop
+ nop"
+ : /* No outputs */
+ : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt),
+ "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (61 << 3)
+ : "memory");
+ restore_flags(flags);
+
+ /* Now set kernel pgd to upper alias so physical page computations
+ * work.
+ */
+ init_mm.pgd += ((shift) / (sizeof(pgd_t *)));
- null_pmd_table = __pa(((unsigned long)&empty_null_pmd_table) + phys_base);
- null_pte_table = __pa(((unsigned long)&empty_null_pte_table) + phys_base);
+ null_pmd_table = __pa(((unsigned long)&empty_null_pmd_table) + shift);
+ null_pte_table = __pa(((unsigned long)&empty_null_pte_table) + shift);
pmdp = (pmd_t *) &empty_null_pmd_table;
for(i = 0; i < 1024; i++)
@@ -553,13 +616,13 @@ paging_init(unsigned long start_mem, unsigned long end_mem))
memset((void *) &empty_null_pte_table, 0, PAGE_SIZE);
/* Now can init the kernel/bad page tables. */
- __bfill64((void *)swapper_pg_dir, null_pmd_table);
- __bfill64((void *)&empty_bad_pmd_table, null_pte_table);
+ __bfill64((void *)swapper_pg_dir, &null_pmd_table);
+ __bfill64((void *)&empty_bad_pmd_table, &null_pte_table);
/* We use mempool to create page tables, therefore adjust it up
* such that __pa() macros etc. work.
*/
- mempool = PAGE_ALIGN(start_mem) + phys_base;
+ mempool = PAGE_ALIGN(start_mem) + shift;
/* FIXME: This should be done much nicer.
* Just now we allocate 64M for each.
@@ -567,48 +630,29 @@ paging_init(unsigned long start_mem, unsigned long end_mem))
allocate_ptable_skeleton(IOBASE_VADDR, IOBASE_VADDR + 0x4000000);
allocate_ptable_skeleton(DVMA_VADDR, DVMA_VADDR + 0x4000000);
inherit_prom_mappings();
- allocate_ptable_skeleton(0, 0x8000 + PAGE_SIZE);
-
- /* Map prom interface page. */
- pgdp = pgd_offset(init_task.mm, 0x8000);
- pmdp = pmd_offset(pgdp, 0x8000);
- ptep = pte_offset(pmdp, 0x8000);
- pte = mk_pte(((unsigned long)&__p1275_loc)+phys_base, PAGE_KERNEL);
- set_pte(ptep, pte);
-
+
/* Ok, we can use our TLB miss and window trap handlers safely. */
setup_tba((unsigned long)init_mm.pgd);
- /* Kill locked PROM interface page mapping, the mapping will
- * re-enter on the next PROM interface call via our TLB miss
- * handlers.
- */
- spitfire_flush_dtlb_primary_page(0x8000);
- membar("#Sync");
- spitfire_flush_itlb_primary_page(0x8000);
- membar("#Sync");
-
/* Really paranoid. */
- flushi(PAGE_OFFSET);
+ flushi((long)&empty_zero_page);
membar("#Sync");
/* Cleanup the extra locked TLB entry we created since we have the
* nice TLB miss handlers of ours installed now.
*/
- if(phys_base >= (4 * 1024 * 1024)) {
- /* We only created DTLB mapping of this stuff. */
- spitfire_flush_dtlb_nucleus_page(phys_base + PAGE_OFFSET);
- membar("#Sync");
-
- /* Paranoid */
- flushi(PAGE_OFFSET);
- membar("#Sync");
- }
+ /* We only created DTLB mapping of this stuff. */
+ spitfire_flush_dtlb_nucleus_page(alias_base);
+ membar("#Sync");
- inherit_locked_prom_mappings();
+ /* Paranoid */
+ flushi((long)&empty_zero_page);
+ membar("#Sync");
+ inherit_locked_prom_mappings();
+
flush_tlb_all();
-
+
start_mem = free_area_init(PAGE_ALIGN(mempool), end_mem);
return device_scan (PAGE_ALIGN (start_mem));
@@ -642,9 +686,8 @@ __initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem))
int codepages = 0;
int datapages = 0;
int initpages = 0;
- int prompages = 0;
unsigned long tmp2, addr;
- unsigned long data_end;
+ unsigned long alias_base = phys_base + PAGE_OFFSET - (long)(&empty_zero_page);
end_mem &= PAGE_MASK;
max_mapnr = MAP_NR(end_mem);
@@ -665,16 +708,14 @@ __initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem))
}
taint_real_pages(start_mem, end_mem);
- data_end = start_mem - phys_base;
for (addr = PAGE_OFFSET; addr < end_mem; addr += PAGE_SIZE) {
if(PageReserved(mem_map + MAP_NR(addr))) {
- if ((addr < (unsigned long) &etext) && (addr >= PAGE_OFFSET))
+ if ((addr < ((unsigned long) &etext) + alias_base) && (addr >= alias_base))
codepages++;
- else if((addr >= (unsigned long)&__init_begin && addr < (unsigned long)&__init_end))
+ else if((addr >= ((unsigned long)&__init_begin) + alias_base)
+ && (addr < ((unsigned long)&__init_end) + alias_base))
initpages++;
- else if((addr >= (unsigned long)&__p1275_loc && addr < (unsigned long)&__bss_start))
- prompages++;
- else if((addr < data_end) && (addr >= PAGE_OFFSET))
+ else if((addr < start_mem) && (addr >= alias_base))
datapages++;
continue;
}
@@ -689,12 +730,11 @@ __initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem))
tmp2 = nr_free_pages << PAGE_SHIFT;
- printk("Memory: %luk available (%dk kernel code, %dk data, %dk init, %dk prom) [%016lx,%016lx]\n",
+ printk("Memory: %luk available (%dk kernel code, %dk data, %dk init) [%016lx,%016lx]\n",
tmp2 >> 10,
codepages << (PAGE_SHIFT-10),
datapages << (PAGE_SHIFT-10),
initpages << (PAGE_SHIFT-10),
- prompages << (PAGE_SHIFT-10),
PAGE_OFFSET, end_mem);
min_free_pages = nr_free_pages >> 7;
@@ -702,11 +742,6 @@ __initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem))
min_free_pages = 16;
free_pages_low = min_free_pages + (min_free_pages >> 1);
free_pages_high = min_free_pages + min_free_pages;
-
-#if 0
- printk("Testing fault handling...\n");
- *(char *)0x00000deadbef0000UL = 0;
-#endif
}
void free_initmem (void)
@@ -715,10 +750,8 @@ void free_initmem (void)
addr = (unsigned long)(&__init_begin);
for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
- unsigned long page = addr;
-
- if(page < ((unsigned long)__va(phys_base)))
- page += phys_base;
+ unsigned long page = addr + (long)__va(phys_base)
+ - (long)(&empty_zero_page);
mem_map[MAP_NR(page)].flags &= ~(1 << PG_reserved);
atomic_set(&mem_map[MAP_NR(page)].count, 1);
diff --git a/arch/sparc64/mm/modutil.c b/arch/sparc64/mm/modutil.c
new file mode 100644
index 000000000..a0eba8019
--- /dev/null
+++ b/arch/sparc64/mm/modutil.c
@@ -0,0 +1,66 @@
+/* $Id: modutil.c,v 1.1 1997/07/18 06:26:54 ralf Exp $
+ * arch/sparc64/mm/modutil.c
+ *
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Based upon code written by Linus Torvalds and others.
+ */
+
+#include <linux/malloc.h>
+#include <linux/vmalloc.h>
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/vaddrs.h>
+
+static struct vm_struct * modvmlist = NULL;
+
+void module_unmap (void * addr)
+{
+ struct vm_struct **p, *tmp;
+
+ if (!addr)
+ return;
+ if ((PAGE_SIZE-1) & (unsigned long) addr) {
+ printk("Trying to vfree() bad address (%p)\n", addr);
+ return;
+ }
+ for (p = &modvmlist ; (tmp = *p) ; p = &tmp->next) {
+ if (tmp->addr == addr) {
+ *p = tmp->next;
+ vmfree_area_pages(VMALLOC_VMADDR(tmp->addr), tmp->size);
+ kfree(tmp);
+ return;
+ }
+ }
+ printk("Trying to unmap nonexistent module vm area (%p)\n", addr);
+}
+
+void * module_map (unsigned long size)
+{
+ void * addr;
+ struct vm_struct **p, *tmp, *area;
+
+ size = PAGE_ALIGN(size);
+ if (!size || size > MODULES_LEN) return NULL;
+
+ addr = (void *) MODULES_VADDR;
+ for (p = &modvmlist; (tmp = *p) ; p = &tmp->next) {
+ if (size + (unsigned long) addr < (unsigned long) tmp->addr)
+ break;
+ addr = (void *) (tmp->size + (unsigned long) tmp->addr);
+ }
+ if ((unsigned long) addr + size >= MODULES_END) return NULL;
+
+ area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
+ if (!area) return NULL;
+ area->size = size + PAGE_SIZE;
+ area->addr = addr;
+ area->next = *p;
+ *p = area;
+
+ if (vmalloc_area_pages(VMALLOC_VMADDR(addr), size)) {
+ vfree(addr);
+ return NULL;
+ }
+ return addr;
+}
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
new file mode 100644
index 000000000..b11903a25
--- /dev/null
+++ b/arch/sparc64/mm/ultra.S
@@ -0,0 +1,226 @@
+/* $Id: ultra.S,v 1.1 1997/07/18 06:26:55 ralf Exp $
+ * ultra.S: Don't expand these all over the place...
+ *
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/asi.h>
+#include <asm/spitfire.h>
+
+ /* All callers check mm->context != NO_CONTEXT for us. */
+ .text
+ .align 32
+ .globl __flush_tlb_mm, __flush_tlb_range, __flush_tlb_page
+__flush_tlb_mm: /* %o0 == (mm->context & 0x1fff) */
+ rdpr %otherwin, %g1
+ brz,pt %g1, 1f
+ mov %o7, %g3
+ call __flushw_user
+ clr %g2
+1: rdpr %pil, %g1
+9: mov SECONDARY_CONTEXT, %g7
+ wrpr %g0, 15, %pil
+
+ ldxa [%g7] ASI_DMMU, %g2
+ cmp %g2, %o0
+ be,pt %icc, 1f
+ mov 0x50, %g3
+ stxa %o0, [%g7] ASI_DMMU
+1: stxa %g0, [%g3] ASI_DMMU_DEMAP
+ be,pt %icc, 1f
+ stxa %g0, [%g3] ASI_IMMU_DEMAP
+
+ stxa %g2, [%g7] ASI_DMMU
+1: wrpr %g1, 0x0, %pil
+ retl
+ flush %g6
+__flush_tlb_range: /* %o0 == (mm->context & 0x1fff), %o1 == start, %o2 == end */
+ sethi %hi(8192 - 1), %g5
+ or %g5, %lo(8192 - 1), %g5
+ andn %o1, %g5, %o1
+ andn %o2, %g5, %o2
+
+ sub %o2, %o1, %o3
+ add %g5, 1, %g5
+ orcc %o1, 0x50, %o1
+ srlx %o3, 13, %o4
+ rdpr %otherwin, %g1
+ brz,pt %g1, 1f
+ mov %o7, %g3
+ call __flushw_user
+
+ clr %g2
+1: cmp %o4, 96
+ bgu,pn %icc, 9b
+ rdpr %pil, %g1
+ mov SECONDARY_CONTEXT, %g7
+ wrpr %g0, 15, %pil
+ ldxa [%g7] ASI_DMMU, %g2
+ cmp %g2, %o0
+
+ be,pt %icc, 1f
+ sub %o3, %g5, %o3
+ stxa %o0, [%g7] ASI_DMMU
+1: stxa %g0, [%o1 + %o3] ASI_DMMU_DEMAP
+ stxa %g0, [%o1 + %o3] ASI_IMMU_DEMAP
+ brnz,pt %o3, 1b
+ sub %o3, %g5, %o3
+ nop
+
+ be,pt %icc, 1f
+ wrpr %g1, 0x0, %pil
+ stxa %g2, [%g7] ASI_DMMU
+1: retl
+ flush %g6
+
+ .align 32
+__flush_tlb_page: /* %o0 == (mm->context & 0x1fff), %o1 == page & PAGE_MASK */
+ rdpr %otherwin, %g1
+ brz,pt %g1, 1f
+ mov %o7, %g3
+ call __flushw_user
+ clr %g2
+1: rdpr %pil, %g1
+ mov SECONDARY_CONTEXT, %g7
+ wrpr %g0, 15, %pil
+
+ ldxa [%g7] ASI_DMMU, %g2
+ cmp %g2, %o0
+ be,pt %icc, 1f
+ or %o1, 0x10, %g3
+ stxa %o0, [%g7] ASI_DMMU
+1: stxa %g0, [%g3] ASI_DMMU_DEMAP
+ be,pt %icc, 1f
+ stxa %g0, [%g3] ASI_IMMU_DEMAP
+ stxa %g2, [%g7] ASI_DMMU
+1: wrpr %g1, 0x0, %pil
+ retl
+ flush %g6
+
+#ifdef __SMP__
+ /* These are all called by the slaves of a cross call, at
+ * trap level 1, with interrupts fully disabled.
+ *
+ * Register usage:
+ * %g5 mm->context (all tlb flushes)
+ * %g6 address arg 1 (tlb page and range flushes)
+ * %g7 address arg 2 (tlb range flush only)
+ *
+ * %g1 ivector table, don't touch
+ * %g2 scratch 1
+ * %g3 scratch 2
+ * %g4 scratch 3
+ *
+ * NOTE: We do not acknowledge the UPA until we are done
+ * with the service. This is what tells the master
+ * that he can consider the effects of the flush
+ * "complete" on this cpu.
+ */
+ .align 32
+ .globl xcall_flush_tlb_page
+xcall_flush_tlb_page:
+ mov SECONDARY_CONTEXT, %g2
+ nop
+ ldxa [%g2] ASI_DMMU, %g3
+ cmp %g3, %g5
+ be,pt %icc, 1f
+ or %g6, 0x10, %g4
+ stxa %g5, [%g2] ASI_DMMU
+1: stxa %g0, [%g4] ASI_DMMU_DEMAP
+
+ be,pt %icc, 1f
+ stxa %g0, [%g4] ASI_IMMU_DEMAP
+ stxa %g3, [%g2] ASI_DMMU
+1: b,pt %xcc, do_ivec_return
+ flush %g1
+
+ .align 32
+ .globl xcall_flush_tlb_mm
+xcall_flush_tlb_mm:
+ mov SECONDARY_CONTEXT, %g2
+ nop
+ ldxa [%g2] ASI_DMMU, %g3
+ cmp %g3, %g5
+ be,pt %icc, 1f
+ mov 0x50, %g4
+ stxa %g5, [%g2] ASI_DMMU
+1: stxa %g0, [%g4] ASI_DMMU_DEMAP
+
+ be,pt %icc, 1f
+ stxa %g0, [%g4] ASI_IMMU_DEMAP
+ stxa %g3, [%g2] ASI_DMMU
+1: b,pt %xcc, do_ivec_return
+ flush %g1
+
+ .align 32
+ .globl xcall_flush_tlb_range
+xcall_flush_tlb_range:
+ sethi %hi(8192 - 1), %g2
+ or %g2, %lo(8192 - 1), %g2
+ andn %g6, %g2, %g6
+ andn %g7, %g2, %g7
+ sub %g7, %g6, %g3
+ add %g2, 1, %g2
+ orcc %g6, 0x50, %g6
+ srlx %g3, 13, %g4
+
+ cmp %g4, 96
+ bgu,pn %icc, xcall_flush_tlb_mm
+ mov SECONDARY_CONTEXT, %g4
+ ldxa [%g4] ASI_DMMU, %g7
+ cmp %g7, %g5
+ be,pt %icc, 1f
+ sub %g3, %g2, %g3
+ stxa %g5, [%g4] ASI_DMMU
+
+1: stxa %g0, [%g6 + %g3] ASI_DMMU_DEMAP
+ stxa %g0, [%g6 + %g3] ASI_IMMU_DEMAP
+ brnz,pt %g3, 1b
+ sub %g3, %g2, %g3
+ bne,a,pn %icc, 1f
+ stxa %g7, [%g4] ASI_DMMU
+1: b,pt %xcc, do_ivec_return
+ flush %g1
+
+ /* These two are not performance critical... */
+ .globl xcall_flush_tlb_all
+xcall_flush_tlb_all:
+ clr %g2
+ clr %g3
+1: ldxa [%g3] ASI_DTLB_DATA_ACCESS, %g4
+ and %g4, _PAGE_L, %g5
+ brnz,pn %g5, 2f
+ mov TLB_TAG_ACCESS, %g7
+ stxa %g0, [%g7] ASI_DMMU
+ membar #Sync
+
+ stxa %g0, [%g3] ASI_DTLB_DATA_ACCESS
+ membar #Sync
+2: ldxa [%g3] ASI_ITLB_DATA_ACCESS, %g4
+ and %g4, _PAGE_L, %g5
+ brnz,pn %g5, 2f
+ mov TLB_TAG_ACCESS, %g7
+ stxa %g0, [%g7] ASI_IMMU
+ membar #Sync
+
+ stxa %g0, [%g3] ASI_ITLB_DATA_ACCESS
+2: add %g2, 1, %g2
+ cmp %g2, 63
+ ble,pt %icc, 1b
+ sll %g2, 3, %g3
+ b,pt %xcc, do_ivec_return
+ flush %g1
+
+ .globl xcall_flush_cache_all
+xcall_flush_cache_all:
+ sethi %hi(16383), %g2
+ or %g2, %lo(16383), %g2
+ clr %g3
+1: stxa %g0, [%g3] ASI_IC_TAG
+ add %g3, 32, %g3
+ cmp %g3, %g2
+ bleu,pt %xcc, 1b
+ nop
+ b,pt %xcc, do_ivec_return
+ flush %g1
+#endif /* __SMP__ */
diff --git a/arch/sparc64/prom/bootstr.c b/arch/sparc64/prom/bootstr.c
index e226c6e95..7ef17159d 100644
--- a/arch/sparc64/prom/bootstr.c
+++ b/arch/sparc64/prom/bootstr.c
@@ -1,4 +1,4 @@
-/* $Id: bootstr.c,v 1.3 1997/03/04 16:27:06 jj Exp $
+/* $Id: bootstr.c,v 1.4 1997/06/17 13:25:35 jj Exp $
* bootstr.c: Boot string/argument acquisition from the PROM.
*
* Copyright(C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -6,18 +6,20 @@
*/
#include <linux/string.h>
+#include <linux/init.h>
#include <asm/oplib.h>
#define BARG_LEN 256
-static char barg_buf[BARG_LEN];
-static char fetched = 0;
+int bootstr_len __initdata = BARG_LEN;
+static int bootstr_valid __initdata = 0;
+static char bootstr_buf[BARG_LEN] __initdata = { 0 };
-char *
-prom_getbootargs(void)
+__initfunc(char *
+prom_getbootargs(void))
{
/* This check saves us from a panic when bootfd patches args. */
- if (fetched) return barg_buf;
- prom_getstring(prom_chosen_node, "bootargs", barg_buf, BARG_LEN);
- fetched = 1;
- return barg_buf;
+ if (bootstr_valid) return bootstr_buf;
+ prom_getstring(prom_chosen_node, "bootargs", bootstr_buf, BARG_LEN);
+ bootstr_valid = 1;
+ return bootstr_buf;
}
diff --git a/arch/sparc64/prom/misc.c b/arch/sparc64/prom/misc.c
index fe9bf9c6b..8b738fd41 100644
--- a/arch/sparc64/prom/misc.c
+++ b/arch/sparc64/prom/misc.c
@@ -1,4 +1,4 @@
-/* $Id: misc.c,v 1.6 1997/04/10 05:13:05 davem Exp $
+/* $Id: misc.c,v 1.8 1997/07/14 23:45:28 davem Exp $
* misc.c: Miscellaneous prom functions that don't belong
* anywhere else.
*
@@ -6,6 +6,7 @@
* Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
+#include <linux/config.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -45,8 +46,8 @@ void
prom_cmdline(void)
{
extern void kernel_enter_debugger(void);
- extern void install_obp_ticker(void);
- extern void install_linux_ticker(void);
+ /* extern void install_obp_ticker(void); */
+ /* extern void install_linux_ticker(void); */
unsigned long flags;
/* kernel_enter_debugger(); */
@@ -132,3 +133,10 @@ void prom_set_trap_table(unsigned long tba)
{
p1275_cmd("SUNW,set-trap-table", P1275_INOUT(1, 0), tba);
}
+
+#ifdef __SMP__
+void prom_start_cpu(int cpunode, unsigned long pc, unsigned long o0)
+{
+ p1275_cmd("SUNW,start-cpu", P1275_INOUT(3, 0), cpunode, pc, o0);
+}
+#endif
diff --git a/arch/sparc64/prom/p1275.c b/arch/sparc64/prom/p1275.c
index 3eb0311df..18de40deb 100644
--- a/arch/sparc64/prom/p1275.c
+++ b/arch/sparc64/prom/p1275.c
@@ -1,4 +1,4 @@
-/* $Id: p1275.c,v 1.8 1997/04/03 09:29:21 davem Exp $
+/* $Id: p1275.c,v 1.10 1997/06/27 04:18:30 davem Exp $
* p1275.c: Sun IEEE 1275 PROM low level interface routines
*
* Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
@@ -14,50 +14,42 @@
#include <asm/spitfire.h>
#include <asm/pstate.h>
-/* If you change layout of this structure, please change the prom_doit
- function below as well. */
-typedef struct {
- unsigned prom_doit_code [24]; /* 0x8000 */
- long prom_sync_routine; /* 0x8060 */
- void (*prom_cif_handler)(long *); /* 0x8068 */
- unsigned long prom_cif_stack; /* 0x8070 */
- unsigned long prom_args [23]; /* 0x8078 */
- char prom_buffer [7888];
-} at0x8000;
+struct {
+ long prom_sync_routine; /* 0x00 */
+ void (*prom_cif_handler)(long *); /* 0x08 */
+ unsigned long prom_cif_stack; /* 0x10 */
+ unsigned long prom_args [23]; /* 0x18 */
+ char prom_buffer [3000];
+} p1275buf;
-static void (*prom_do_it)(void);
-
-void prom_cif_interface (void) __attribute__ ((__section__ (".p1275")));
-
-/* At most 14 insns */
void prom_cif_interface (void)
{
__asm__ __volatile__ ("
- sethi %%hi(0x8000), %%o0
- ldx [%%o0 + 0x070], %%o1 ! prom_cif_stack
+ mov %0, %%o0
+ ldx [%%o0 + 0x010], %%o1 ! prom_cif_stack
save %%o1, -0xc0, %%sp
- ldx [%%i0 + 0x068], %%l2 ! prom_cif_handler
+ ldx [%%i0 + 0x008], %%l2 ! prom_cif_handler
rdpr %%pstate, %%l4
mov %%g4, %%l0
mov %%g6, %%l1
- wrpr %%l4, %0, %%pstate ! turn on address masking
+ wrpr %%l4, %1, %%pstate ! turn on address masking
call %%l2
- or %%i0, 0x078, %%o0 ! prom_args
+ add %%i0, 0x018, %%o0 ! prom_args
wrpr %%l4, 0, %%pstate ! put pstate back
mov %%l0, %%g4
ret
restore %%l1, 0, %%g6
save %%sp, -0xc0, %%sp ! If you change the offset of the save
rdpr %%pstate, %%l4 ! here, please change the 0x8038
- andn %%l4, %0, %%l3 ! constant below as well
+ andn %%l4, %1, %%l3 ! constant below as well
wrpr %%l3, %%pstate
- ldx [%%o0 + 0x060], %%l2
+ ldx [%%o0 + 0x000], %%l2
call %%l2
nop
wrpr %%l4, 0, %%pstate
ret
restore
- " : : "i" (PSTATE_AM));
+ " : : "r" (&p1275buf), "i" (0 /* PSTATE_AM */));
}
long p1275_cmd (char *service, long fmt, ...)
@@ -68,61 +60,60 @@ long p1275_cmd (char *service, long fmt, ...)
va_list list;
long attrs, x;
long ctx = 0;
- at0x8000 *low = (at0x8000 *)(0x8000);
- p = low->prom_buffer;
+ p = p1275buf.prom_buffer;
save_and_cli(flags);
ctx = spitfire_get_primary_context ();
if (ctx) {
flushw_user ();
spitfire_set_primary_context (0);
}
- low->prom_args[0] = (unsigned long)p; /* service */
+ p1275buf.prom_args[0] = (unsigned long)p; /* service */
strcpy (p, service);
p = (char *)(((long)(strchr (p, 0) + 8)) & ~7);
- low->prom_args[1] = nargs = (fmt & 0x0f); /* nargs */
- low->prom_args[2] = nrets = ((fmt & 0xf0) >> 4); /* nrets */
+ p1275buf.prom_args[1] = nargs = (fmt & 0x0f); /* nargs */
+ p1275buf.prom_args[2] = nrets = ((fmt & 0xf0) >> 4); /* nrets */
attrs = fmt >> 8;
va_start(list, fmt);
for (i = 0; i < nargs; i++, attrs >>= 3) {
switch (attrs & 0x7) {
case P1275_ARG_NUMBER:
- low->prom_args[i + 3] = (unsigned)va_arg(list, long); break;
+ p1275buf.prom_args[i + 3] = (unsigned)va_arg(list, long); break;
case P1275_ARG_IN_STRING:
strcpy (p, va_arg(list, char *));
- low->prom_args[i + 3] = (unsigned long)p;
+ p1275buf.prom_args[i + 3] = (unsigned long)p;
p = (char *)(((long)(strchr (p, 0) + 8)) & ~7);
break;
case P1275_ARG_OUT_BUF:
(void) va_arg(list, char *);
- low->prom_args[i + 3] = (unsigned long)p;
+ p1275buf.prom_args[i + 3] = (unsigned long)p;
x = va_arg(list, long);
i++; attrs >>= 3;
p = (char *)(((long)(p + (int)x + 7)) & ~7);
- low->prom_args[i + 3] = x;
+ p1275buf.prom_args[i + 3] = x;
break;
case P1275_ARG_IN_BUF:
q = va_arg(list, char *);
- low->prom_args[i + 3] = (unsigned long)p;
+ p1275buf.prom_args[i + 3] = (unsigned long)p;
x = va_arg(list, long);
i++; attrs >>= 3;
memcpy (p, q, (int)x);
p = (char *)(((long)(p + (int)x + 7)) & ~7);
- low->prom_args[i + 3] = x;
+ p1275buf.prom_args[i + 3] = x;
break;
case P1275_ARG_OUT_32B:
(void) va_arg(list, char *);
- low->prom_args[i + 3] = (unsigned long)p;
+ p1275buf.prom_args[i + 3] = (unsigned long)p;
p += 32;
break;
case P1275_ARG_IN_FUNCTION:
- low->prom_args[i + 3] = 0x8038;
- low->prom_sync_routine = va_arg(list, long); break;
+ p1275buf.prom_args[i + 3] = (unsigned long)prom_cif_interface + 0x38;
+ p1275buf.prom_sync_routine = va_arg(list, long); break;
}
}
va_end(list);
-
- (*prom_do_it)();
+
+ prom_cif_interface();
attrs = fmt >> 8;
va_start(list, fmt);
@@ -142,17 +133,17 @@ long p1275_cmd (char *service, long fmt, ...)
case P1275_ARG_OUT_BUF:
p = va_arg(list, char *);
x = va_arg(list, long);
- memcpy (p, (char *)(low->prom_args[i + 3]), (int)x);
+ memcpy (p, (char *)(p1275buf.prom_args[i + 3]), (int)x);
i++; attrs >>= 3;
break;
case P1275_ARG_OUT_32B:
p = va_arg(list, char *);
- memcpy (p, (char *)(low->prom_args[i + 3]), 32);
+ memcpy (p, (char *)(p1275buf.prom_args[i + 3]), 32);
break;
}
}
va_end(list);
- x = low->prom_args [nargs + 3];
+ x = p1275buf.prom_args [nargs + 3];
if (ctx)
spitfire_set_primary_context (ctx);
@@ -162,9 +153,6 @@ long p1275_cmd (char *service, long fmt, ...)
void prom_cif_init(void *cif_handler, void *cif_stack)
{
- at0x8000 *low = (at0x8000 *)(0x8000);
-
- low->prom_cif_handler = (void (*)(long *))cif_handler;
- low->prom_cif_stack = (unsigned long)cif_stack;
- prom_do_it = (void (*)(void))(0x8000);
+ p1275buf.prom_cif_handler = (void (*)(long *))cif_handler;
+ p1275buf.prom_cif_stack = (unsigned long)cif_stack;
}
diff --git a/arch/sparc64/vmlinux.lds b/arch/sparc64/vmlinux.lds
index d2d0cac34..eac8314ca 100644
--- a/arch/sparc64/vmlinux.lds
+++ b/arch/sparc64/vmlinux.lds
@@ -5,10 +5,10 @@ ENTRY(_start)
SECTIONS
{
- empty_zero_page = 0xfffff80000000000;
- swapper_pg_dir = 0xfffff80000002000;
+ empty_zero_page = 0x0000000000400000;
+ swapper_pg_dir = 0x0000000000402000;
. = 0x4000;
- .text 0xfffff80000004000 :
+ .text 0x0000000000404000 :
{
*(.text)
*(.gnu.warning)
@@ -40,12 +40,6 @@ SECTIONS
.data.init : { *(.data.init) }
. = ALIGN(8192);
__init_end = .;
- __p1275_loc = .;
- .p1275 :
- {
- *(.p1275)
- . = ALIGN(8192);
- }
__bss_start = .;
.sbss : { *(.sbss) *(.scommon) }
.bss :