summaryrefslogtreecommitdiffstats
path: root/include/asm-mips64
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1999-08-21 22:19:10 +0000
committerRalf Baechle <ralf@linux-mips.org>1999-08-21 22:19:10 +0000
commitb5710aa33141544bf7cb9c3e509d587ff457a094 (patch)
tree40214b7ea9f82c8a48a9eaeb428c25c0565aee32 /include/asm-mips64
parent892bf98f0c04e9297979936d973c85e62a3f0b96 (diff)
Look ma - a tank has hit the MIPS sources ...
Diffstat (limited to 'include/asm-mips64')
-rw-r--r--include/asm-mips64/arc/types.h40
-rw-r--r--include/asm-mips64/branch.h28
-rw-r--r--include/asm-mips64/cacheops.h48
-rw-r--r--include/asm-mips64/checksum.h94
-rw-r--r--include/asm-mips64/floppy.h4
-rw-r--r--include/asm-mips64/fpregdef.h50
-rw-r--r--include/asm-mips64/hdreg.h18
-rw-r--r--include/asm-mips64/ide.h132
-rw-r--r--include/asm-mips64/processor.h3
-rw-r--r--include/asm-mips64/r4kcache.h494
-rw-r--r--include/asm-mips64/sgiarcs.h6
-rw-r--r--include/asm-mips64/stackframe.h308
-rw-r--r--include/asm-mips64/string.h11
-rw-r--r--include/asm-mips64/unistd.h97
14 files changed, 1133 insertions, 200 deletions
diff --git a/include/asm-mips64/arc/types.h b/include/asm-mips64/arc/types.h
new file mode 100644
index 000000000..b1d72e10e
--- /dev/null
+++ b/include/asm-mips64/arc/types.h
@@ -0,0 +1,40 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright 1999 Ralf Baechle (ralf@gnu.org)
+ * Copyright 1999 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_ARC_TYPES_H
+#define _ASM_ARC_TYPES_H
+
+#include <linux/config.h>
+
+#ifdef CONFIG_ARC32
+
+typedef char CHAR;
+typedef short SHORT;
+typedef long LARGE_INTEGER __attribute__ ((__mode__ (__DI__)));
+typedef long LONG __attribute__ ((__mode__ (__SI__)));
+typedef unsigned char UCHAR;
+typedef unsigned short USHORT;
+typedef unsigned long ULONG __attribute__ ((__mode__ (__SI__)));
+typedef void VOID;
+
+#endif /* CONFIG_ARC32 */
+
+#ifdef CONFIG_ARC64
+
+typedef char CHAR;
+typedef short SHORT;
+typedef long LARGE_INTEGER __attribute__ (__mode__ (__DI__));
+typedef long LONG __attribute__ (__mode__ (__DI__));
+typedef unsigned char UCHAR;
+typedef unsigned short USHORT;
+typedef unsigned long ULONG __attribute__ (__mode__ (__DI__));
+typedef void VOID;
+
+#endif /* CONFIG_ARC64 */
+
+#endif /* _ASM_ARC_TYPES_H */
diff --git a/include/asm-mips64/branch.h b/include/asm-mips64/branch.h
new file mode 100644
index 000000000..1ac45c9dc
--- /dev/null
+++ b/include/asm-mips64/branch.h
@@ -0,0 +1,28 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Branch and jump emulation.
+ *
+ * Copyright (C) 1996, 1997, 1998, 1999 by Ralf Baechle
+ */
+#include <asm/ptrace.h>
+
+extern inline int delay_slot(struct pt_regs *regs)
+{
+ return regs->cp0_cause & CAUSEF_BD;
+}
+
+extern int __compute_return_epc(struct pt_regs *regs);
+
+extern inline int compute_return_epc(struct pt_regs *regs)
+{
+ if (!delay_slot(regs)) {
+ regs->cp0_epc += 4;
+ return 0;
+ }
+
+ return __compute_return_epc(regs);
+}
diff --git a/include/asm-mips64/cacheops.h b/include/asm-mips64/cacheops.h
new file mode 100644
index 000000000..438aa1b94
--- /dev/null
+++ b/include/asm-mips64/cacheops.h
@@ -0,0 +1,48 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Cache operations for the cache instruction.
+ *
+ * (C) Copyright 1996, 1997, 1999 by Ralf Baechle
+ */
+#ifndef _ASM_CACHEOPS_H
+#define _ASM_CACHEOPS_H
+
+/*
+ * Cache Operations
+ */
+#define Index_Invalidate_I 0x00
+#define Index_Writeback_Inv_D 0x01
+#define Index_Invalidate_SI 0x02
+#define Index_Writeback_Inv_SD 0x03
+#define Index_Load_Tag_I 0x04
+#define Index_Load_Tag_D 0x05
+#define Index_Load_Tag_SI 0x06
+#define Index_Load_Tag_SD 0x07
+#define Index_Store_Tag_I 0x08
+#define Index_Store_Tag_D 0x09
+#define Index_Store_Tag_SI 0x0A
+#define Index_Store_Tag_SD 0x0B
+#define Create_Dirty_Excl_D 0x0d
+#define Create_Dirty_Excl_SD 0x0f
+#define Hit_Invalidate_I 0x10
+#define Hit_Invalidate_D 0x11
+#define Hit_Invalidate_SI 0x12
+#define Hit_Invalidate_SD 0x13
+#define Fill 0x14
+#define Hit_Writeback_Inv_D 0x15
+ /* 0x16 is unused */
+#define Hit_Writeback_Inv_SD 0x17
+#define Hit_Writeback_I 0x18
+#define Hit_Writeback_D 0x19
+ /* 0x1a is unused */
+#define Hit_Writeback_SD 0x1b
+ /* 0x1c is unused */
+ /* 0x1e is unused */
+#define Hit_Set_Virtual_SI 0x1e
+#define Hit_Set_Virtual_SD 0x1f
+
+#endif /* _ASM_CACHEOPS_H */
diff --git a/include/asm-mips64/checksum.h b/include/asm-mips64/checksum.h
index d8f8bee72..9db6a989d 100644
--- a/include/asm-mips64/checksum.h
+++ b/include/asm-mips64/checksum.h
@@ -53,7 +53,8 @@ unsigned int csum_partial_copy(const char *src, char *dst, int len, unsigned int
/*
* Fold a partial checksum without adding pseudo headers
*/
-static inline unsigned short int csum_fold(unsigned int sum)
+static inline unsigned short int
+csum_fold(unsigned int sum)
{
__asm__("
.set noat
@@ -78,8 +79,8 @@ static inline unsigned short int csum_fold(unsigned int sum)
* By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
* Arnt Gulbrandsen.
*/
-static inline unsigned short ip_fast_csum(unsigned char * iph,
- unsigned int ihl)
+static inline unsigned short
+ip_fast_csum(unsigned char * iph, unsigned int ihl)
{
unsigned int sum;
unsigned long dummy;
@@ -91,31 +92,31 @@ static inline unsigned short ip_fast_csum(unsigned char * iph,
__asm__ __volatile__("
.set noreorder
.set noat
- lw %0,(%1)
- subu %2,4
- #blez %2,2f
- sll %2,2 # delay slot
-
- lw %3,4(%1)
- addu %2,%1 # delay slot
- addu %0,%3
- sltu $1,%0,%3
- lw %3,8(%1)
- addu %0,$1
- addu %0,%3
- sltu $1,%0,%3
- lw %3,12(%1)
- addu %0,$1
- addu %0,%3
- sltu $1,%0,%3
- addu %0,$1
-
-1: lw %3,16(%1)
- addiu %1,4
- addu %0,%3
- sltu $1,%0,%3
- bne %2,%1,1b
- addu %0,$1 # delay slot
+ lw %0, (%1)
+ subu %2, 4
+ #blez %2, 2f
+ sll %2, 2 # delay slot
+
+ lw %3, 4(%1)
+ daddu %2, %1 # delay slot
+ addu %0, %3
+ sltu $1, %0, %3
+ lw %3, 8(%1)
+ addu %0, $1
+ addu %0, %3
+ sltu $1, %0, %3
+ lw %3, 12(%1)
+ addu %0, $1
+ addu %0, %3
+ sltu $1, %0, %3
+ addu %0, $1
+
+1: lw %3, 16(%1)
+ daddiu %1, 4
+ addu %0, %3
+ sltu $1, %0, %3
+ bne %2, %1, 1b
+ addu %0, $1 # delay slot
2: .set at
.set reorder"
@@ -130,11 +131,9 @@ static inline unsigned short ip_fast_csum(unsigned char * iph,
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
-static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
- unsigned long daddr,
- unsigned short len,
- unsigned short proto,
- unsigned int sum)
+static inline unsigned long
+csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
+ unsigned short len, unsigned short proto, unsigned int sum)
{
__asm__("
.set noat
@@ -167,11 +166,9 @@ static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
-static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
- unsigned long daddr,
- unsigned short len,
- unsigned short proto,
- unsigned int sum)
+static inline unsigned short int
+csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, unsigned short len,
+ unsigned short proto, unsigned int sum)
{
return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
}
@@ -180,17 +177,16 @@ static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
-static inline unsigned short ip_compute_csum(unsigned char * buff, int len)
+static inline unsigned short
+ip_compute_csum(unsigned char * buff, int len)
{
return csum_fold(csum_partial(buff, len, 0));
}
#define _HAVE_ARCH_IPV6_CSUM
-static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
- struct in6_addr *daddr,
- __u16 len,
- unsigned short proto,
- unsigned int sum)
+static inline unsigned short int
+csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr, __u16 len,
+ unsigned short proto, unsigned int sum)
{
__asm__("
.set noreorder
@@ -242,13 +238,9 @@ static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
sltu $1,%0,$1
.set noat
.set noreorder"
- : "=r" (sum),
- "=r" (proto)
- : "r" (saddr),
- "r" (daddr),
- "0" (htonl((__u32) (len))),
- "1" (htonl(proto)),
- "r"(sum)
+ : "=r" (sum), "=r" (proto)
+ : "r" (saddr), "r" (daddr),
+ "0" (htonl((__u32) (len))), "1" (htonl(proto)), "r"(sum)
: "$1");
return csum_fold(sum);
diff --git a/include/asm-mips64/floppy.h b/include/asm-mips64/floppy.h
index d8f9b5005..3e1439758 100644
--- a/include/asm-mips64/floppy.h
+++ b/include/asm-mips64/floppy.h
@@ -12,8 +12,6 @@
#define _ASM_FLOPPY_H
#include <asm/bootinfo.h>
-#include <asm/jazz.h>
-#include <asm/jazzdma.h>
struct fd_ops {
unsigned char (*fd_inb)(unsigned int port);
@@ -79,7 +77,7 @@ extern struct fd_ops *fd_ops;
#define FLOPPY1_TYPE fd_drive_type(1)
#define FDC1 fd_ops->fd_getfdaddr1();
-static int FDC2=-1;
+static int __attribute__((__unused__)) FDC2=-1;
#define N_FDC 1 /* do you *really* want a second controller? */
#define N_DRIVE 8
diff --git a/include/asm-mips64/fpregdef.h b/include/asm-mips64/fpregdef.h
new file mode 100644
index 000000000..5437cf109
--- /dev/null
+++ b/include/asm-mips64/fpregdef.h
@@ -0,0 +1,50 @@
+/*
+ * Definitions for the FPU register names
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 1999 Ralf Baechle
+ * Copyright (C) 1985 MIPS Computer Systems, Inc.
+ * Copyright (C) 1990 - 1992, 1999 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_FPREGDEF_H
+#define _ASM_FPREGDEF_H
+
+#define fv0 $f0 /* return value */
+#define fv1 $f2
+#define fa0 $f12 /* argument registers */
+#define fa1 $f13
+#define fa2 $f14
+#define fa3 $f15
+#define fa4 $f16
+#define fa5 $f17
+#define fa6 $f18
+#define fa7 $f19
+#define ft0 $f4 /* caller saved */
+#define ft1 $f5
+#define ft2 $f6
+#define ft3 $f7
+#define ft4 $f8
+#define ft5 $f9
+#define ft6 $f10
+#define ft7 $f11
+#define ft8 $f20
+#define ft9 $f21
+#define ft10 $f22
+#define ft11 $f23
+#define ft12 $f1
+#define ft13 $f3
+#define fs0 $f24 /* callee saved */
+#define fs1 $f25
+#define fs2 $f26
+#define fs3 $f27
+#define fs4 $f28
+#define fs5 $f29
+#define fs6 $f30
+#define fs7 $f31
+
+#define fcr31 $31
+
+#endif /* _ASM_FPREGDEF_H */
diff --git a/include/asm-mips64/hdreg.h b/include/asm-mips64/hdreg.h
new file mode 100644
index 000000000..757769670
--- /dev/null
+++ b/include/asm-mips64/hdreg.h
@@ -0,0 +1,18 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * This file contains the MIPS architecture specific IDE code.
+ *
+ * Copyright (C) 1994-1996 Linus Torvalds & authors
+ */
+
+#ifndef _ASM_HDREG_H
+#define _ASM_HDREG_H
+
+typedef unsigned short ide_ioreg_t;
+
+#endif /* _ASM_HDREG_H */
+
diff --git a/include/asm-mips64/ide.h b/include/asm-mips64/ide.h
new file mode 100644
index 000000000..879f6c8a9
--- /dev/null
+++ b/include/asm-mips64/ide.h
@@ -0,0 +1,132 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * This file contains the MIPS architecture specific IDE code.
+ *
+ * Copyright (C) 1994-1996 Linus Torvalds & authors
+ */
+
+#ifndef _ASM_IDE_H
+#define _ASM_IDE_H
+
+#ifdef __KERNEL__
+
+#ifndef MAX_HWIFS
+#define MAX_HWIFS 6
+#endif
+
+#define ide__sti() __sti()
+
+struct ide_ops {
+ int (*ide_default_irq)(ide_ioreg_t base);
+ ide_ioreg_t (*ide_default_io_base)(int index);
+ void (*ide_init_hwif_ports)(hw_regs_t *hw, ide_ioreg_t data_port,
+ ide_ioreg_t ctrl_port, int *irq);
+ int (*ide_request_irq)(unsigned int irq, void (*handler)(int, void *,
+ struct pt_regs *), unsigned long flags,
+ const char *device, void *dev_id);
+ void (*ide_free_irq)(unsigned int irq, void *dev_id);
+ int (*ide_check_region) (ide_ioreg_t from, unsigned int extent);
+ void (*ide_request_region)(ide_ioreg_t from, unsigned int extent,
+ const char *name);
+ void (*ide_release_region)(ide_ioreg_t from, unsigned int extent);
+};
+
+extern struct ide_ops *ide_ops;
+
+static inline int
+ide_default_irq(ide_ioreg_t base)
+{
+ return ide_ops->ide_default_irq(base);
+}
+
+static inline ide_ioreg_t
+ide_default_io_base(int index)
+{
+ return ide_ops->ide_default_io_base(index);
+}
+
+static inline void
+ide_init_hwif_ports(hw_regs_t *hw, ide_ioreg_t data_port,
+ ide_ioreg_t ctrl_port, int *irq)
+{
+ ide_ops->ide_init_hwif_ports(hw, data_port, ctrl_port, &hw->irq);
+
+ hw->irq = ide_ops->ide_default_irq(data_port);
+}
+
+/*
+ * This registers the standard ports for this architecture with the IDE
+ * driver.
+ */
+static inline void ide_init_default_hwifs(void)
+{
+#ifdef __DO_I_NEED_THIS
+ hw_regs_t hw;
+ int index;
+
+ for (index = 0; index < MAX_HWIFS; index++) {
+ ide_init_hwif_ports(&hw, ide_default_io_base(index), 0, 0);
+ hw.irq = ide_default_irq(ide_default_io_base(index));
+ ide_register_hw(&hw, NULL);
+ }
+#endif /* __DO_I_NEED_THIS */
+}
+
+typedef union {
+ unsigned all : 8; /* all of the bits together */
+ struct {
+ unsigned head : 4; /* always zeros here */
+ unsigned unit : 1; /* drive select number, 0 or 1 */
+ unsigned bit5 : 1; /* always 1 */
+ unsigned lba : 1; /* using LBA instead of CHS */
+ unsigned bit7 : 1; /* always 1 */
+ } b;
+ } select_t;
+
+static inline int
+ide_request_irq(unsigned int irq,
+ void (*handler)(int,void *, struct pt_regs *),
+ unsigned long flags, const char *device, void *dev_id)
+{
+ return ide_ops->ide_request_irq(irq, handler, flags, device, dev_id);
+}
+
+static inline void
+ide_free_irq(unsigned int irq, void *dev_id)
+{
+ ide_ops->ide_free_irq(irq, dev_id);
+}
+
+static inline int
+ide_check_region(ide_ioreg_t from, unsigned int extent)
+{
+ return ide_ops->ide_check_region(from, extent);
+}
+
+static inline void
+ide_request_region(ide_ioreg_t from, unsigned int extent, const char *name)
+{
+ ide_ops->ide_request_region(from, extent, name);
+}
+
+static inline void
+ide_release_region(ide_ioreg_t from, unsigned int extent)
+{
+ ide_ops->ide_release_region(from, extent);
+}
+
+/*
+ * The following are not needed for the non-m68k ports
+ */
+#define ide_ack_intr(hwif) (1)
+#define ide_fix_driveid(id) do {} while (0)
+#define ide_release_lock(lock) do {} while (0)
+#define ide_get_lock(lock, hdlr, data) do {} while (0)
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_IDE_H */
diff --git a/include/asm-mips64/processor.h b/include/asm-mips64/processor.h
index 2ea66d4d0..989f37de6 100644
--- a/include/asm-mips64/processor.h
+++ b/include/asm-mips64/processor.h
@@ -1,4 +1,4 @@
-/* $Id$
+/* $Id: processor.h,v 1.1 1999/08/18 23:37:51 ralf Exp $
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -25,6 +25,7 @@
#include <asm/system.h>
struct mips_cpuinfo {
+ unsigned long udelay_val;
unsigned long *pgd_quick;
unsigned long *pte_quick;
unsigned long pgtable_cache_sz;
diff --git a/include/asm-mips64/r4kcache.h b/include/asm-mips64/r4kcache.h
new file mode 100644
index 000000000..6801259dd
--- /dev/null
+++ b/include/asm-mips64/r4kcache.h
@@ -0,0 +1,494 @@
+/* $Id$
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Inline assembly cache operations.
+ *
+ * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
+ *
+ * FIXME: Handle split L2 caches.
+ */
+#ifndef _ASM_R4KCACHE_H
+#define _ASM_R4KCACHE_H
+
+#include <asm/asm.h>
+#include <asm/cacheops.h>
+
+extern inline void flush_icache_line_indexed(unsigned long addr)
+{
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ "cache %1, (%0)\n\t"
+ ".set reorder"
+ :
+ : "r" (addr), "i" (Index_Invalidate_I));
+}
+
+extern inline void flush_dcache_line_indexed(unsigned long addr)
+{
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ "cache %1, (%0)\n\t"
+ ".set reorder"
+ :
+ : "r" (addr), "i" (Index_Writeback_Inv_D));
+}
+
+extern inline void flush_scache_line_indexed(unsigned long addr)
+{
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ "cache %1, (%0)\n\t"
+ ".set reorder"
+ :
+ : "r" (addr), "i" (Index_Writeback_Inv_SD));
+}
+
+extern inline void flush_icache_line(unsigned long addr)
+{
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ "cache %1, (%0)\n\t"
+ ".set reorder"
+ :
+ : "r" (addr), "i" (Hit_Invalidate_I));
+}
+
+extern inline void flush_dcache_line(unsigned long addr)
+{
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ "cache %1, (%0)\n\t"
+ ".set reorder"
+ :
+ : "r" (addr), "i" (Hit_Writeback_Inv_D));
+}
+
+extern inline void invalidate_dcache_line(unsigned long addr)
+{
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ "cache %1, (%0)\n\t"
+ ".set reorder"
+ :
+ : "r" (addr), "i" (Hit_Invalidate_D));
+}
+
+extern inline void invalidate_scache_line(unsigned long addr)
+{
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ "cache %1, (%0)\n\t"
+ ".set reorder"
+ :
+ : "r" (addr), "i" (Hit_Invalidate_SD));
+}
+
+extern inline void flush_scache_line(unsigned long addr)
+{
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ "cache %1, (%0)\n\t"
+ ".set reorder"
+ :
+ : "r" (addr), "i" (Hit_Writeback_Inv_SD));
+}
+
+/*
+ * The next two are for badland addresses like signal trampolines.
+ */
+extern inline void protected_flush_icache_line(unsigned long addr)
+{
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ "1:\tcache %1,(%0)\n"
+ "2:\t.set reorder\n\t"
+ ".section\t__ex_table,\"a\"\n\t"
+ ".dword\t1b,2b\n\t"
+ ".previous"
+ :
+ : "r" (addr), "i" (Hit_Invalidate_I));
+}
+
+extern inline void protected_writeback_dcache_line(unsigned long addr)
+{
+ __asm__ __volatile__(
+ ".set noreorder\n\t"
+ "1:\tcache %1,(%0)\n"
+ "2:\t.set reorder\n\t"
+ ".section\t__ex_table,\"a\"\n\t"
+ ".dword\t1b,2b\n\t"
+ ".previous"
+ :
+ : "r" (addr), "i" (Hit_Writeback_D));
+}
+
+#define cache16_unroll32(base,op) \
+ __asm__ __volatile__(" \
+ .set noreorder; \
+ cache %1, 0x000(%0); cache %1, 0x010(%0); \
+ cache %1, 0x020(%0); cache %1, 0x030(%0); \
+ cache %1, 0x040(%0); cache %1, 0x050(%0); \
+ cache %1, 0x060(%0); cache %1, 0x070(%0); \
+ cache %1, 0x080(%0); cache %1, 0x090(%0); \
+ cache %1, 0x0a0(%0); cache %1, 0x0b0(%0); \
+ cache %1, 0x0c0(%0); cache %1, 0x0d0(%0); \
+ cache %1, 0x0e0(%0); cache %1, 0x0f0(%0); \
+ cache %1, 0x100(%0); cache %1, 0x110(%0); \
+ cache %1, 0x120(%0); cache %1, 0x130(%0); \
+ cache %1, 0x140(%0); cache %1, 0x150(%0); \
+ cache %1, 0x160(%0); cache %1, 0x170(%0); \
+ cache %1, 0x180(%0); cache %1, 0x190(%0); \
+ cache %1, 0x1a0(%0); cache %1, 0x1b0(%0); \
+ cache %1, 0x1c0(%0); cache %1, 0x1d0(%0); \
+ cache %1, 0x1e0(%0); cache %1, 0x1f0(%0); \
+ .set reorder" \
+ : \
+ : "r" (base), \
+ "i" (op));
+
+extern inline void blast_dcache16(void)
+{
+ unsigned long start = KSEG0;
+ unsigned long end = (start + dcache_size);
+
+ while(start < end) {
+ cache16_unroll32(start,Index_Writeback_Inv_D);
+ start += 0x200;
+ }
+}
+
+extern inline void blast_dcache16_page(unsigned long page)
+{
+ unsigned long start = page;
+ unsigned long end = (start + PAGE_SIZE);
+
+ while(start < end) {
+ cache16_unroll32(start,Hit_Writeback_Inv_D);
+ start += 0x200;
+ }
+}
+
+extern inline void blast_dcache16_page_indexed(unsigned long page)
+{
+ unsigned long start = page;
+ unsigned long end = (start + PAGE_SIZE);
+
+ while(start < end) {
+ cache16_unroll32(start,Index_Writeback_Inv_D);
+ start += 0x200;
+ }
+}
+
+extern inline void blast_icache16(void)
+{
+ unsigned long start = KSEG0;
+ unsigned long end = (start + icache_size);
+
+ while(start < end) {
+ cache16_unroll32(start,Index_Invalidate_I);
+ start += 0x200;
+ }
+}
+
+extern inline void blast_icache16_page(unsigned long page)
+{
+ unsigned long start = page;
+ unsigned long end = (start + PAGE_SIZE);
+
+ while(start < end) {
+ cache16_unroll32(start,Hit_Invalidate_I);
+ start += 0x200;
+ }
+}
+
+extern inline void blast_icache16_page_indexed(unsigned long page)
+{
+ unsigned long start = page;
+ unsigned long end = (start + PAGE_SIZE);
+
+ while(start < end) {
+ cache16_unroll32(start,Index_Invalidate_I);
+ start += 0x200;
+ }
+}
+
+extern inline void blast_scache16(void)
+{
+ unsigned long start = KSEG0;
+ unsigned long end = KSEG0 + scache_size;
+
+ while(start < end) {
+ cache16_unroll32(start,Index_Writeback_Inv_SD);
+ start += 0x200;
+ }
+}
+
+extern inline void blast_scache16_page(unsigned long page)
+{
+ unsigned long start = page;
+ unsigned long end = page + PAGE_SIZE;
+
+ while(start < end) {
+ cache16_unroll32(start,Hit_Writeback_Inv_SD);
+ start += 0x200;
+ }
+}
+
+extern inline void blast_scache16_page_indexed(unsigned long page)
+{
+ unsigned long start = page;
+ unsigned long end = page + PAGE_SIZE;
+
+ while(start < end) {
+ cache16_unroll32(start,Index_Writeback_Inv_SD);
+ start += 0x200;
+ }
+}
+
+#define cache32_unroll32(base,op) \
+ __asm__ __volatile__(" \
+ .set noreorder; \
+ cache %1, 0x000(%0); cache %1, 0x020(%0); \
+ cache %1, 0x040(%0); cache %1, 0x060(%0); \
+ cache %1, 0x080(%0); cache %1, 0x0a0(%0); \
+ cache %1, 0x0c0(%0); cache %1, 0x0e0(%0); \
+ cache %1, 0x100(%0); cache %1, 0x120(%0); \
+ cache %1, 0x140(%0); cache %1, 0x160(%0); \
+ cache %1, 0x180(%0); cache %1, 0x1a0(%0); \
+ cache %1, 0x1c0(%0); cache %1, 0x1e0(%0); \
+ cache %1, 0x200(%0); cache %1, 0x220(%0); \
+ cache %1, 0x240(%0); cache %1, 0x260(%0); \
+ cache %1, 0x280(%0); cache %1, 0x2a0(%0); \
+ cache %1, 0x2c0(%0); cache %1, 0x2e0(%0); \
+ cache %1, 0x300(%0); cache %1, 0x320(%0); \
+ cache %1, 0x340(%0); cache %1, 0x360(%0); \
+ cache %1, 0x380(%0); cache %1, 0x3a0(%0); \
+ cache %1, 0x3c0(%0); cache %1, 0x3e0(%0); \
+ .set reorder" \
+ : \
+ : "r" (base), \
+ "i" (op));
+
+extern inline void blast_dcache32(void)
+{
+ unsigned long start = KSEG0;
+ unsigned long end = (start + dcache_size);
+
+ while(start < end) {
+ cache32_unroll32(start,Index_Writeback_Inv_D);
+ start += 0x400;
+ }
+}
+
+/*
+ * Call this function only with interrupts disabled or R4600 V2.0 may blow
+ * up on you.
+ *
+ * R4600 v2.0 bug: "The CACHE instructions Hit_Writeback_Inv_D,
+ * Hit_Writeback_D, Hit_Invalidate_D and Create_Dirty_Excl_D will only
+ * operate correctly if the internal data cache refill buffer is empty. These
+ * CACHE instructions should be separated from any potential data cache miss
+ * by a load instruction to an uncached address to empty the response buffer."
+ * (Revision 2.0 device errata from IDT available on http://www.idt.com/
+ * in .pdf format.)
+ */
+extern inline void blast_dcache32_page(unsigned long page)
+{
+ unsigned long start = page;
+ unsigned long end = (start + PAGE_SIZE);
+
+ /*
+ * Sigh ... workaround for R4600 v1.7 bug. Explanation see above.
+ */
+ *(volatile unsigned long *)KSEG1;
+
+ __asm__ __volatile__("nop;nop;nop;nop");
+ while(start < end) {
+ cache32_unroll32(start,Hit_Writeback_Inv_D);
+ start += 0x400;
+ }
+}
+
+extern inline void blast_dcache32_page_indexed(unsigned long page)
+{
+ unsigned long start = page;
+ unsigned long end = (start + PAGE_SIZE);
+
+ while(start < end) {
+ cache32_unroll32(start,Index_Writeback_Inv_D);
+ start += 0x400;
+ }
+}
+
+extern inline void blast_icache32(void)
+{
+ unsigned long start = KSEG0;
+ unsigned long end = (start + icache_size);
+
+ while(start < end) {
+ cache32_unroll32(start,Index_Invalidate_I);
+ start += 0x400;
+ }
+}
+
+extern inline void blast_icache32_page(unsigned long page)
+{
+ unsigned long start = page;
+ unsigned long end = (start + PAGE_SIZE);
+
+ while(start < end) {
+ cache32_unroll32(start,Hit_Invalidate_I);
+ start += 0x400;
+ }
+}
+
+extern inline void blast_icache32_page_indexed(unsigned long page)
+{
+ unsigned long start = page;
+ unsigned long end = (start + PAGE_SIZE);
+
+ while(start < end) {
+ cache32_unroll32(start,Index_Invalidate_I);
+ start += 0x400;
+ }
+}
+
+extern inline void blast_scache32(void)
+{
+ unsigned long start = KSEG0;
+ unsigned long end = KSEG0 + scache_size;
+
+ while(start < end) {
+ cache32_unroll32(start,Index_Writeback_Inv_SD);
+ start += 0x400;
+ }
+}
+
+extern inline void blast_scache32_page(unsigned long page)
+{
+ unsigned long start = page;
+ unsigned long end = page + PAGE_SIZE;
+
+ while(start < end) {
+ cache32_unroll32(start,Hit_Writeback_Inv_SD);
+ start += 0x400;
+ }
+}
+
+extern inline void blast_scache32_page_indexed(unsigned long page)
+{
+ unsigned long start = page;
+ unsigned long end = page + PAGE_SIZE;
+
+ while(start < end) {
+ cache32_unroll32(start,Index_Writeback_Inv_SD);
+ start += 0x400;
+ }
+}
+
+#define cache64_unroll32(base,op) \
+ __asm__ __volatile__(" \
+ .set noreorder; \
+ cache %1, 0x000(%0); cache %1, 0x040(%0); \
+ cache %1, 0x080(%0); cache %1, 0x0c0(%0); \
+ cache %1, 0x100(%0); cache %1, 0x140(%0); \
+ cache %1, 0x180(%0); cache %1, 0x1c0(%0); \
+ cache %1, 0x200(%0); cache %1, 0x240(%0); \
+ cache %1, 0x280(%0); cache %1, 0x2c0(%0); \
+ cache %1, 0x300(%0); cache %1, 0x340(%0); \
+ cache %1, 0x380(%0); cache %1, 0x3c0(%0); \
+ cache %1, 0x400(%0); cache %1, 0x440(%0); \
+ cache %1, 0x480(%0); cache %1, 0x4c0(%0); \
+ cache %1, 0x500(%0); cache %1, 0x540(%0); \
+ cache %1, 0x580(%0); cache %1, 0x5c0(%0); \
+ cache %1, 0x600(%0); cache %1, 0x640(%0); \
+ cache %1, 0x680(%0); cache %1, 0x6c0(%0); \
+ cache %1, 0x700(%0); cache %1, 0x740(%0); \
+ cache %1, 0x780(%0); cache %1, 0x7c0(%0); \
+ .set reorder" \
+ : \
+ : "r" (base), \
+ "i" (op));
+
+extern inline void blast_scache64(void)
+{
+ unsigned long start = KSEG0;
+ unsigned long end = KSEG0 + scache_size;
+
+ while(start < end) {
+ cache64_unroll32(start,Index_Writeback_Inv_SD);
+ start += 0x800;
+ }
+}
+
+extern inline void blast_scache64_page(unsigned long page)
+{
+ unsigned long start = page;
+ unsigned long end = page + PAGE_SIZE;
+
+ while(start < end) {
+ cache64_unroll32(start,Hit_Writeback_Inv_SD);
+ start += 0x800;
+ }
+}
+
+extern inline void blast_scache64_page_indexed(unsigned long page)
+{
+ unsigned long start = page;
+ unsigned long end = page + PAGE_SIZE;
+
+ while(start < end) {
+ cache64_unroll32(start,Index_Writeback_Inv_SD);
+ start += 0x800;
+ }
+}
+
+#define cache128_unroll32(base,op) \
+ __asm__ __volatile__(" \
+ .set noreorder; \
+ cache %1, 0x000(%0); cache %1, 0x080(%0); \
+ cache %1, 0x100(%0); cache %1, 0x180(%0); \
+ cache %1, 0x200(%0); cache %1, 0x280(%0); \
+ cache %1, 0x300(%0); cache %1, 0x380(%0); \
+ cache %1, 0x400(%0); cache %1, 0x480(%0); \
+ cache %1, 0x500(%0); cache %1, 0x580(%0); \
+ cache %1, 0x600(%0); cache %1, 0x680(%0); \
+ cache %1, 0x700(%0); cache %1, 0x780(%0); \
+ cache %1, 0x800(%0); cache %1, 0x880(%0); \
+ cache %1, 0x900(%0); cache %1, 0x980(%0); \
+ cache %1, 0xa00(%0); cache %1, 0xa80(%0); \
+ cache %1, 0xb00(%0); cache %1, 0xb80(%0); \
+ cache %1, 0xc00(%0); cache %1, 0xc80(%0); \
+ cache %1, 0xd00(%0); cache %1, 0xd80(%0); \
+ cache %1, 0xe00(%0); cache %1, 0xe80(%0); \
+ cache %1, 0xf00(%0); cache %1, 0xf80(%0); \
+ .set reorder" \
+ : \
+ : "r" (base), \
+ "i" (op));
+
+extern inline void blast_scache128(void)
+{
+ unsigned long start = KSEG0;
+ unsigned long end = KSEG0 + scache_size;
+
+ while(start < end) {
+ cache128_unroll32(start,Index_Writeback_Inv_SD);
+ start += 0x1000;
+ }
+}
+
+extern inline void blast_scache128_page(unsigned long page)
+{
+ cache128_unroll32(page,Hit_Writeback_Inv_SD);
+}
+
+extern inline void blast_scache128_page_indexed(unsigned long page)
+{
+ cache128_unroll32(page,Index_Writeback_Inv_SD);
+}
+
+#endif /* __ASM_R4KCACHE_H */
diff --git a/include/asm-mips64/sgiarcs.h b/include/asm-mips64/sgiarcs.h
index eb736d2bb..8aed20dce 100644
--- a/include/asm-mips64/sgiarcs.h
+++ b/include/asm-mips64/sgiarcs.h
@@ -1,4 +1,4 @@
-/* $Id: sgiarcs.h,v 1.1 1999/08/18 23:37:52 ralf Exp $
+/* $Id: sgiarcs.h,v 1.2 1999/08/20 21:59:08 ralf Exp $
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -371,6 +371,10 @@ struct linux_smonblock {
int smax; /* Max # of symbols. */
};
+/*
+ * Macros for calling a 32-bit ARC implementation from 64-bit code
+ */
+
#define __arc_clobbers \
"$2","$3","$4","$5","$6","$7","$8","$9","$10","$11", \
"$12","$13","$14","$15","$16","$24","25","$31"
diff --git a/include/asm-mips64/stackframe.h b/include/asm-mips64/stackframe.h
index 35605e3f4..a9e62c232 100644
--- a/include/asm-mips64/stackframe.h
+++ b/include/asm-mips64/stackframe.h
@@ -1,9 +1,12 @@
-/*
+/* $Id$
+ *
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1994, 1995, 1996 by Ralf Baechle, Paul M. Antoine.
+ * Copyright (C) 1994, 1995, 1996, 1999 Ralf Baechle
+ * Copyright (C) 1994, 1995, 1996 Paul M. Antoine.
+ * Copyright (C) 1999 Silicon Graphics, Inc.
*/
#ifndef _ASM_STACKFRAME_H
#define _ASM_STACKFRAME_H
@@ -11,34 +14,7 @@
#include <asm/asm.h>
#include <asm/offset.h>
-#define SAVE_AT \
- sw $1, PT_R1(sp)
-
-#define SAVE_TEMP \
- mfhi v1; \
- sw $8, PT_R8(sp); \
- sw $9, PT_R9(sp); \
- sw v1, PT_HI(sp); \
- mflo v1; \
- sw $10,PT_R10(sp); \
- sw $11, PT_R11(sp); \
- sw v1, PT_LO(sp); \
- sw $12, PT_R12(sp); \
- sw $13, PT_R13(sp); \
- sw $14, PT_R14(sp); \
- sw $15, PT_R15(sp); \
- sw $24, PT_R24(sp)
-
-#define SAVE_STATIC \
- sw $16, PT_R16(sp); \
- sw $17, PT_R17(sp); \
- sw $18, PT_R18(sp); \
- sw $19, PT_R19(sp); \
- sw $20, PT_R20(sp); \
- sw $21, PT_R21(sp); \
- sw $22, PT_R22(sp); \
- sw $23, PT_R23(sp); \
- sw $30, PT_R30(sp)
+#ifdef _LANGUAGE_C
#define __str2(x) #x
#define __str(x) __str2(x)
@@ -57,145 +33,193 @@
: /* No outputs */ \
: "r" (frame))
-#define SAVE_SOME \
- .set push; \
- .set reorder; \
- mfc0 k0, CP0_STATUS; \
- sll k0, 3; /* extract cu0 bit */ \
- .set noreorder; \
- bltz k0, 8f; \
- move k1, sp; \
- .set reorder; \
- /* Called from user mode, new stack. */ \
- lui k1, %hi(kernelsp); \
- lw k1, %lo(kernelsp)(k1); \
-8: \
- move k0, sp; \
- subu sp, k1, PT_SIZE; \
- sw k0, PT_R29(sp); \
- sw $3, PT_R3(sp); \
- sw $0, PT_R0(sp); \
- mfc0 v1, CP0_STATUS; \
- sw $2, PT_R2(sp); \
- sw v1, PT_STATUS(sp); \
- sw $4, PT_R4(sp); \
- mfc0 v1, CP0_CAUSE; \
- sw $5, PT_R5(sp); \
- sw v1, PT_CAUSE(sp); \
- sw $6, PT_R6(sp); \
- mfc0 v1, CP0_EPC; \
- sw $7, PT_R7(sp); \
- sw v1, PT_EPC(sp); \
- sw $25, PT_R25(sp); \
- sw $28, PT_R28(sp); \
- sw $31, PT_R31(sp); \
- ori $28, sp, 0x1fff; \
- xori $28, 0x1fff; \
+#endif /* _LANGUAGE_C */
+
+#ifdef _LANGUAGE_ASSEMBLY
+
+ .macro SAVE_AT
+ sw $1, PT_R1(sp)
+ .endm
+
+ .macro SAVE_TEMP
+ mfhi v1
+ sw $8, PT_R8(sp)
+ sw $9, PT_R9(sp)
+ sw v1, PT_HI(sp)
+ mflo v1
+ sw $10,PT_R10(sp)
+ sw $11, PT_R11(sp)
+ sw v1, PT_LO(sp)
+ sw $12, PT_R12(sp)
+ sw $13, PT_R13(sp)
+ sw $14, PT_R14(sp)
+ sw $15, PT_R15(sp)
+ sw $24, PT_R24(sp)
+ .endm
+
+ .macro SAVE_STATIC
+ sw $16, PT_R16(sp)
+ sw $17, PT_R17(sp)
+ sw $18, PT_R18(sp)
+ sw $19, PT_R19(sp)
+ sw $20, PT_R20(sp)
+ sw $21, PT_R21(sp)
+ sw $22, PT_R22(sp)
+ sw $23, PT_R23(sp)
+ sw $30, PT_R30(sp)
+ .endm
+
+ .macro SAVE_SOME
+ .set push
+ .set reorder
+ mfc0 k0, CP0_STATUS
+ sll k0, 3 /* extract cu0 bit */
+ .set noreorder
+ bltz k0, 8f
+ move k1, sp
+ .set reorder
+ /* Called from user mode, new stack. */
+ lui k1, %hi(kernelsp)
+ lw k1, %lo(kernelsp)(k1)
+8: move k0, sp
+ subu sp, k1, PT_SIZE
+ sw k0, PT_R29(sp)
+ sw $3, PT_R3(sp)
+ sw $0, PT_R0(sp)
+ mfc0 v1, CP0_STATUS
+ sw $2, PT_R2(sp)
+ sw v1, PT_STATUS(sp)
+ sw $4, PT_R4(sp)
+ mfc0 v1, CP0_CAUSE
+ sw $5, PT_R5(sp)
+ sw v1, PT_CAUSE(sp)
+ sw $6, PT_R6(sp)
+ mfc0 v1, CP0_EPC
+ sw $7, PT_R7(sp)
+ sw v1, PT_EPC(sp)
+ sw $25, PT_R25(sp)
+ sw $28, PT_R28(sp)
+ sw $31, PT_R31(sp)
+ ori $28, sp, 0x1fff
+ xori $28, 0x1fff
.set pop
+ .endm
-#define SAVE_ALL \
- SAVE_SOME; \
- SAVE_AT; \
- SAVE_TEMP; \
+ .macro SAVE_ALL
+ SAVE_SOME
+ SAVE_AT
+ SAVE_TEMP
SAVE_STATIC
+ .endm
-#define RESTORE_AT \
- lw $1, PT_R1(sp); \
+ .macro RESTORE_AT
+ lw $1, PT_R1(sp)
+ .endm
-#define RESTORE_SP \
+ .macro RESTORE_SP
lw sp, PT_R29(sp)
-
-#define RESTORE_TEMP \
- lw $24, PT_LO(sp); \
- lw $8, PT_R8(sp); \
- lw $9, PT_R9(sp); \
- mtlo $24; \
- lw $24, PT_HI(sp); \
- lw $10,PT_R10(sp); \
- lw $11, PT_R11(sp); \
- mthi $24; \
- lw $12, PT_R12(sp); \
- lw $13, PT_R13(sp); \
- lw $14, PT_R14(sp); \
- lw $15, PT_R15(sp); \
+ .endm
+
+ .macro RESTORE_TEMP
+ lw $24, PT_LO(sp)
+ lw $8, PT_R8(sp)
+ lw $9, PT_R9(sp)
+ mtlo $24
+ lw $24, PT_HI(sp)
+ lw $10,PT_R10(sp)
+ lw $11, PT_R11(sp)
+ mthi $24
+ lw $12, PT_R12(sp)
+ lw $13, PT_R13(sp)
+ lw $14, PT_R14(sp)
+ lw $15, PT_R15(sp)
lw $24, PT_R24(sp)
-
-#define RESTORE_STATIC \
- lw $16, PT_R16(sp); \
- lw $17, PT_R17(sp); \
- lw $18, PT_R18(sp); \
- lw $19, PT_R19(sp); \
- lw $20, PT_R20(sp); \
- lw $21, PT_R21(sp); \
- lw $22, PT_R22(sp); \
- lw $23, PT_R23(sp); \
+ .endm
+
+ .macro RESTORE_STATIC
+ lw $16, PT_R16(sp)
+ lw $17, PT_R17(sp)
+ lw $18, PT_R18(sp)
+ lw $19, PT_R19(sp)
+ lw $20, PT_R20(sp)
+ lw $21, PT_R21(sp)
+ lw $22, PT_R22(sp)
+ lw $23, PT_R23(sp)
lw $30, PT_R30(sp)
+ .endm
-#define RESTORE_SOME \
- .set push; \
- .set reorder; \
- mfc0 t0, CP0_STATUS; \
- .set pop; \
- ori t0, 0x1f; \
- xori t0, 0x1f; \
- mtc0 t0, CP0_STATUS; \
- li v1, 0xff00; \
- and t0, v1; \
- lw v0, PT_STATUS(sp); \
- nor v1, $0, v1; \
- and v0, v1; \
- or v0, t0; \
- mtc0 v0, CP0_STATUS; \
- lw v1, PT_EPC(sp); \
- mtc0 v1, CP0_EPC; \
- lw $31, PT_R31(sp); \
- lw $28, PT_R28(sp); \
- lw $25, PT_R25(sp); \
- lw $7, PT_R7(sp); \
- lw $6, PT_R6(sp); \
- lw $5, PT_R5(sp); \
- lw $4, PT_R4(sp); \
- lw $3, PT_R3(sp); \
+ .macro RESTORE_SOME
+ .set push
+ .set reorder
+ mfc0 t0, CP0_STATUS
+ .set pop
+ ori t0, 0x1f
+ xori t0, 0x1f
+ mtc0 t0, CP0_STATUS
+ li v1, 0xff00
+ and t0, v1
+ lw v0, PT_STATUS(sp)
+ nor v1, $0, v1
+ and v0, v1
+ or v0, t0
+ mtc0 v0, CP0_STATUS
+ lw v1, PT_EPC(sp)
+ mtc0 v1, CP0_EPC
+ lw $31, PT_R31(sp)
+ lw $28, PT_R28(sp)
+ lw $25, PT_R25(sp)
+ lw $7, PT_R7(sp)
+ lw $6, PT_R6(sp)
+ lw $5, PT_R5(sp)
+ lw $4, PT_R4(sp)
+ lw $3, PT_R3(sp)
lw $2, PT_R2(sp)
+ .endm
-#define RESTORE_ALL \
- RESTORE_SOME; \
- RESTORE_AT; \
- RESTORE_TEMP; \
- RESTORE_STATIC; \
+ .macro RESTORE_ALL
+ RESTORE_SOME
+ RESTORE_AT
+ RESTORE_TEMP
+ RESTORE_STATIC
RESTORE_SP
+ .endm
/*
* Move to kernel mode and disable interrupts.
* Set cp0 enable bit as sign that we're running on the kernel stack
*/
-#define CLI \
- mfc0 t0,CP0_STATUS; \
- li t1,ST0_CU0|0x1f; \
- or t0,t1; \
- xori t0,0x1f; \
- mtc0 t0,CP0_STATUS
+ .macro CLI
+ mfc0 t0, CP0_STATUS
+ li t1, ST0_CU0|0x1f
+ or t0, t1
+ xori t0, 0x1f
+ mtc0 t0, CP0_STATUS
+ .endm
/*
* Move to kernel mode and enable interrupts.
* Set cp0 enable bit as sign that we're running on the kernel stack
*/
-#define STI \
- mfc0 t0,CP0_STATUS; \
- li t1,ST0_CU0|0x1f; \
- or t0,t1; \
- xori t0,0x1e; \
+ .macro STI
+ mfc0 t0,CP0_STATUS
+ li t1,ST0_CU0|0x1f
+ or t0,t1
+ xori t0,0x1e
mtc0 t0,CP0_STATUS
+ .endm
/*
* Just move to kernel mode and leave interrupts as they are.
* Set cp0 enable bit as sign that we're running on the kernel stack
*/
-#define KMODE \
- mfc0 t0,CP0_STATUS; \
- li t1,ST0_CU0|0x1e; \
- or t0,t1; \
- xori t0,0x1e; \
- mtc0 t0,CP0_STATUS
+ .macro KMODE
+ mfc0 t0, CP0_STATUS
+ li t1, ST0_CU0|0x1e
+ or t0, t1
+ xori t0, 0x1e
+ mtc0 t0, CP0_STATUS
+ .endm
+
+#endif /* _LANGUAGE_ASSEMBLY */
#endif /* _ASM_STACKFRAME_H */
diff --git a/include/asm-mips64/string.h b/include/asm-mips64/string.h
index 4b2a82686..56205991b 100644
--- a/include/asm-mips64/string.h
+++ b/include/asm-mips64/string.h
@@ -1,4 +1,4 @@
-/* $Id$
+/* $Id: string.h,v 1.1 1999/08/18 23:37:52 ralf Exp $
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -12,6 +12,15 @@
#ifndef _ASM_STRING_H
#define _ASM_STRING_H
+#define __HAVE_ARCH_MEMSET
+extern void *memset(void *__s, int __c, size_t __count);
+
+#define __HAVE_ARCH_MEMCPY
+extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
+
+#define __HAVE_ARCH_MEMMOVE
+extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
+
/* Don't build bcopy at all ... */
#define __HAVE_ARCH_BCOPY
diff --git a/include/asm-mips64/unistd.h b/include/asm-mips64/unistd.h
index bbaca5107..7e906b1ba 100644
--- a/include/asm-mips64/unistd.h
+++ b/include/asm-mips64/unistd.h
@@ -1,10 +1,11 @@
-/* $Id$
+/* $Id: unistd.h,v 1.1 1999/08/18 23:37:53 ralf Exp $
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995, 1996, 1997, 1998, 1999 by Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
*
* Changed system calls macros _syscall5 - _syscall7 to push args 5 to 7 onto
* the stack. Robin Farine for ACN S.A, Copyright (C) 1996 by ACN S.A
@@ -1308,6 +1309,98 @@ errno = __res; \
return -1; \
}
+#if (_MIPS_SIM == _ABIN32) || (_MIPS_SIM == _ABI64)
+
+#define _syscall5(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e) \
+type name (atype a,btype b,ctype c,dtype d,etype e) \
+{ \
+register long __res __asm__ ("$2"); \
+register long __err __asm__ ("$7"); \
+__asm__ volatile ("move\t$4,%3\n\t" \
+ "move\t$5,%4\n\t" \
+ "move\t$6,%5\n\t" \
+ "move\t$7,%6\n\t" \
+ "move\t$8,%7\n\t" \
+ "sw\t$2,16($29)\n\t" \
+ "li\t$2,%2\n\t" \
+ "syscall" \
+ : "=r" (__res), "=r" (__err) \
+ : "i" (__NR_##name),"r" ((long)(a)), \
+ "r" ((long)(b)), \
+ "r" ((long)(c)), \
+ "r" ((long)(d)), \
+ "r" ((long)(e)) \
+ : "$2","$4","$5","$6","$7","$8","$9","$10","$11","$12", \
+ "$13","$14","$15","$24"); \
+if (__err == 0) \
+ return (type) __res; \
+errno = __res; \
+return -1; \
+}
+
+#define _syscall6(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e,ftype,f) \
+type name (atype a,btype b,ctype c,dtype d,etype e,ftype f) \
+{ \
+register long __res __asm__ ("$2"); \
+register long __err __asm__ ("$7"); \
+__asm__ volatile ("move\t$4,%3\n\t" \
+ "move\t$5,%4\n\t" \
+ "move\t$6,%5\n\t" \
+ "move\t$7,%6\n\t" \
+ "move\t$8,%7\n\t" \
+ "move\t$9,%8\n\t" \
+ "li\t$2,%2\n\t" \
+ "syscall" \
+ : "=r" (__res), "=r" (__err) \
+ : "i" (__NR_##name),"r" ((long)(a)), \
+ "r" ((long)(b)), \
+ "r" ((long)(c)), \
+ "r" ((long)(d)), \
+ "m" ((long)(e)), \
+ "m" ((long)(f)) \
+ : "$2","$3","$4","$5","$6","$7","$8","$9","$10","$11", \
+ "$12","$13","$14","$15","$24"); \
+if (__err == 0) \
+ return (type) __res; \
+errno = __res; \
+return -1; \
+}
+
+#define _syscall7(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e,ftype,f,gtype,g) \
+type name (atype a,btype b,ctype c,dtype d,etype e,ftype f,gtype g) \
+{ \
+register long __res __asm__ ("$2"); \
+register long __err __asm__ ("$7"); \
+__asm__ volatile ("move\t$4,%3\n\t" \
+ "move\t$5,%4\n\t" \
+ "move\t$6,%5\n\t" \
+ "move\t$7,%6\n\t" \
+ "move\t$8,%7\n\t" \
+ "move\t$9,%8\n\t" \
+ "move\t$10,%9\n\t" \
+ "li\t$2,%2\n\t" \
+ "syscall" \
+ : "=r" (__res), "=r" (__err) \
+ : "i" (__NR_##name),"r" ((long)(a)), \
+ "r" ((long)(b)), \
+ "r" ((long)(c)), \
+ "r" ((long)(d)), \
+ "r" ((long)(e)), \
+ "r" ((long)(f)), \
+ "r" ((long)(g)) \
+ : "$2","$3","$4","$5","$6","$7","$8","$9","$10","$11", \
+ "$12","$13","$14","$15","$24"); \
+if (__err == 0) \
+ return (type) __res; \
+errno = __res; \
+return -1; \
+}
+
+#else /* not N32 or 64 ABI */
+
+/* These are here for sake of fucked lusercode that fucking believes to
+ fucking have to fuck around with the syscall interface themselfes. */
+
#define _syscall5(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e) \
type name (atype a,btype b,ctype c,dtype d,etype e) \
{ \
@@ -1404,6 +1497,8 @@ errno = __res; \
return -1; \
}
+#endif
+
#ifdef __KERNEL_SYSCALLS__
/*