summaryrefslogtreecommitdiffstats
path: root/include/asm-mips
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1994-11-28 11:59:19 +0000
committer <ralf@linux-mips.org>1994-11-28 11:59:19 +0000
commit1513ff9b7899ab588401c89db0e99903dbf5f886 (patch)
treef69cc81a940a502ea23d664c3ffb2d215a479667 /include/asm-mips
Import of Linus's Linux 1.1.68
Diffstat (limited to 'include/asm-mips')
-rw-r--r--include/asm-mips/bitops.h20
-rw-r--r--include/asm-mips/delay.h33
-rw-r--r--include/asm-mips/mipsregs.h83
-rw-r--r--include/asm-mips/segment.h217
-rw-r--r--include/asm-mips/string.h209
-rw-r--r--include/asm-mips/system.h70
-rw-r--r--include/asm-mips/unistd.h134
7 files changed, 766 insertions, 0 deletions
diff --git a/include/asm-mips/bitops.h b/include/asm-mips/bitops.h
new file mode 100644
index 000000000..9665c7f01
--- /dev/null
+++ b/include/asm-mips/bitops.h
@@ -0,0 +1,20 @@
+/*
+ * include/asm-mips/bitops.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1994 by Ralf Baechle
+ */
+
+#ifndef _ASM_MIPS_BITOPS_H_
+#define _ASM_MIPS_BITOPS_H_
+
+/*
+ * On MIPS inline assembler bitfunctions are as effective
+ * as the standard C counterparts.
+ */
+#include <asm-generic/bitops.h>
+
+#endif /* _ASM_MIPS_BITOPS_H_ */
diff --git a/include/asm-mips/delay.h b/include/asm-mips/delay.h
new file mode 100644
index 000000000..09fd0c470
--- /dev/null
+++ b/include/asm-mips/delay.h
@@ -0,0 +1,33 @@
+#ifndef _MIPS_DELAY_H
+#define _MIPS_DELAY_H
+
+extern __inline__ void __delay(int loops)
+{
+ __asm__(".align 3\n"
+ "1:\tbeq\t$0,%0,1b\n\t"
+ "addiu\t%0,%0,-1\n\t"
+ :
+ :"d" (loops));
+}
+
+/*
+ * division by multiplication: you don't have to worry about
+ * loss of precision.
+ *
+ * Use only for very small delays ( < 1 msec). Should probably use a
+ * lookup table, really, as the multiplications take much too long with
+ * short delays. This is a "reasonable" implementation, though (and the
+ * first constant multiplications gets optimized away if the delay is
+ * a constant)
+ */
+extern __inline__ void udelay(unsigned long usecs)
+{
+ usecs *= 0x000010c6; /* 2**32 / 1000000 */
+ __asm__("mul\t%0,%0,%1"
+ :"=d" (usecs)
+ :"0" (usecs),"d" (loops_per_sec)
+ :"ax");
+ __delay(usecs);
+}
+
+#endif /* defined(_MIPS_DELAY_H) */
diff --git a/include/asm-mips/mipsregs.h b/include/asm-mips/mipsregs.h
new file mode 100644
index 000000000..2fd47473e
--- /dev/null
+++ b/include/asm-mips/mipsregs.h
@@ -0,0 +1,83 @@
+/*
+ * include/asm-mips/mipsregs.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 by Ralf Baechle
+ */
+
+#ifndef _ASM_MIPS_MIPSREGS_H_
+#define _ASM_MIPS_MIPSREGS_H_
+
+/*
+ * The following macros are especially useful for __asm__
+ * inline assembler.
+ */
+
+#ifndef __STR
+#define __STR(x) #x
+#endif
+#ifndef STR
+#define STR(x) __STR(x)
+#endif
+
+/*
+ * Coprocessor 0 register names
+ */
+#define CP0_INDEX $0
+#define CP0_RANDOM $1
+#define CP0_ENTRYLO0 $2
+#define CP0_ENTRYLO1 $3
+#define CP0_CONTEXT $4
+#define CP0_PAGEMASK $5
+#define CP0_WIRED $6
+#define CP0_BADVADDR $8
+#define CP0_COUNT $9
+#define CP0_ENTRYHI $10
+#define CP0_COMPARE $11
+#define CP0_STATUS $12
+#define CP0_CAUSE $13
+#define CP0_EPC $14
+#define CP0_PRID $15
+#define CP0_CONFIG $16
+#define CP0_LLADDR $17
+#define CP0_WATCHLO $18
+#define CP0_WATCHHI $19
+#define CP0_XCONTEXT $20
+#define CP0_ECC $26
+#define CP0_CACHEERR $27
+#define CP0_TAGLO $28
+#define CP0_TAGHI $29
+#define CP0_ERROREPC $30
+
+/*
+ * Values for pagemask register
+ */
+#define PM_4K 0x000000000
+#define PM_16K 0x000060000
+#define PM_64K 0x0001e0000
+#define PM_256K 0x0007e0000
+#define PM_1M 0x001fe0000
+#define PM_4M 0x007fe0000
+#define PM_16M 0x01ffe0000
+
+/*
+ * Values used for computation of new tlb entries
+ */
+#define PL_4K 12
+#define PL_16K 14
+#define PL_64K 16
+#define PL_256K 18
+#define PL_1M 20
+#define PL_4M 22
+#define PL_16M 24
+
+/*
+ * Compute a vpn/pfn entry for EntryHi register
+ */
+#define VPN(addr,pagesizeshift) ((addr) & ~((1 << (pagesizeshift))-1))
+#define PFN(addr,pagesizeshift) (((addr) & ((1 << (pagesizeshift))-1)) << 6)
+
+#endif /* _ASM_MIPS_MIPSREGS_H_ */
diff --git a/include/asm-mips/segment.h b/include/asm-mips/segment.h
new file mode 100644
index 000000000..8b29ad0b2
--- /dev/null
+++ b/include/asm-mips/segment.h
@@ -0,0 +1,217 @@
+/*
+ * include/asm-mips/segment.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 by Ralf Baechle
+ *
+ */
+
+#ifndef _ASM_MIPS_SEGMENT_H_
+#define _ASM_MIPS_SEGMENT_H_
+
+static inline unsigned char get_user_byte(const char * addr)
+{
+ register unsigned char _v;
+
+ __asm__ ("lbu\t%0,%1":"=r" (_v):"r" (*addr));
+
+ return _v;
+}
+
+#define get_fs_byte(addr) get_user_byte((char *)(addr))
+
+static inline unsigned short get_user_word(const short *addr)
+{
+ unsigned short _v;
+
+ __asm__ ("lhu\t%0,%1":"=r" (_v):"r" (*addr));
+
+ return _v;
+}
+
+#define get_fs_word(addr) get_user_word((short *)(addr))
+
+static inline unsigned long get_user_long(const int *addr)
+{
+ unsigned long _v;
+
+ __asm__ ("lwu\t%0,%1":"=r" (_v):"r" (*addr)); \
+ return _v;
+}
+
+#define get_fs_long(addr) get_user_long((int *)(addr))
+
+static inline unsigned long get_user_dlong(const int *addr)
+{
+ unsigned long _v;
+
+ __asm__ ("ld\t%0,%1":"=r" (_v):"r" (*addr)); \
+ return _v;
+}
+
+#define get_fs_dlong(addr) get_user_dlong((int *)(addr))
+
+static inline void put_user_byte(char val,char *addr)
+{
+__asm__ ("sb\t%0,%1": /* no outputs */ :"r" (val),"r" (*addr));
+}
+
+#define put_fs_byte(x,addr) put_user_byte((x),(char *)(addr))
+
+static inline void put_user_word(short val,short * addr)
+{
+__asm__ ("sh\t%0,%1": /* no outputs */ :"r" (val),"r" (*addr));
+}
+
+#define put_fs_word(x,addr) put_user_word((x),(short *)(addr))
+
+static inline void put_user_long(unsigned long val,int * addr)
+{
+__asm__ ("sw\t%0,%1": /* no outputs */ :"r" (val),"r" (*addr));
+}
+
+#define put_fs_long(x,addr) put_user_long((x),(int *)(addr))
+
+static inline void put_user_dlong(unsigned long val,int * addr)
+{
+__asm__ ("sd\t%0,%1": /* no outputs */ :"r" (val),"r" (*addr));
+}
+
+#define put_fs_dlong(x,addr) put_user_dlong((x),(int *)(addr))
+
+/*
+ * These following two variables are defined in mips/head.S.
+ */
+extern unsigned long segment_fs;
+
+static inline void __generic_memcpy_tofs(void * to, const void * from, unsigned long n)
+{
+ __asm__(
+ ".set\tnoreorder\n\t"
+ ".set\tnoat\n"
+ "1:\tlbu\t$1,(%2)\n\t"
+ "addiu\t%2,%2,1\n\t"
+ "sb\t$1,(%1)\n\t"
+ "addiu\t%0,%0,-1\n\t"
+ "bne\t$0,%0,1b\n\t"
+ "addiu\t%1,%1,1\n\t"
+ ".set\tat\n\t"
+ ".set\treorder"
+ : /* no outputs */
+ :"d" (n),"d" (((long) to)| segment_fs),"d" ((long) from)
+ :"$1");
+}
+
+static inline void __constant_memcpy_tofs(void * to, const void * from, unsigned long n)
+{
+ /*
+ * Use put_user_byte to avoid trouble with alignment.
+ */
+ switch (n) {
+ case 0:
+ return;
+ case 1:
+ put_user_byte(*(const char *) from, (char *) to);
+ return;
+ case 2:
+ put_user_byte(*(const char *) from, (char *) to);
+ put_user_byte(*(1+(const char *) from), 1+(char *) to);
+ return;
+ case 3:
+ put_user_byte(*((const char *) from), (char *) to);
+ put_user_byte(*(1+(const char *) from), 1+(char *) to);
+ put_user_byte(*(2+(const char *) from), 2+(char *) to);
+ return;
+ case 4:
+ put_user_byte(*((const char *) from), (char *) to);
+ put_user_byte(*(1+(const char *) from), 1+(char *) to);
+ put_user_byte(*(2+(const char *) from), 2+(char *) to);
+ put_user_byte(*(3+(const char *) from), 3+(char *) to);
+ return;
+ }
+
+ __generic_memcpy_tofs(to, from, n);
+
+ return;
+}
+
+static inline void __generic_memcpy_fromfs(void * to, const void * from, unsigned long n)
+{
+ __asm__(
+ ".set\tnoreorder\n\t"
+ ".set\tnoat\n"
+ "1:\tlbu\t$1,(%2)\n\t"
+ "addiu\t%2,%2,1\n\t"
+ "sb\t$1,(%1)\n\t"
+ "addiu\t%0,%0,-1\n\t"
+ "bne\t$0,%0,1b\n\t"
+ "addiu\t%1,%1,1\n\t"
+ ".set\tat\n\t"
+ ".set\treorder"
+ : /* no outputs */
+ :"d" (n),"d" ((long) to),"d" (((long) from | segment_fs))
+ :"$1","memory");
+}
+
+static inline void __constant_memcpy_fromfs(void * to, const void * from, unsigned long n)
+{
+ /*
+ * Use put_user_byte to avoid trouble with alignment.
+ */
+ switch (n) {
+ case 0:
+ return;
+ case 1:
+ *(char *)to = get_user_byte((const char *) from);
+ return;
+ case 2:
+ *(char *) to = get_user_byte((const char *) from);
+ *(char *) to = get_user_byte(1+(const char *) from);
+ return;
+ case 3:
+ *(char *) to = get_user_byte((const char *) from);
+ *(char *) to = get_user_byte(1+(const char *) from);
+ *(char *) to = get_user_byte(2+(const char *) from);
+ return;
+ case 4:
+ *(char *) to = get_user_byte((const char *) from);
+ *(char *) to = get_user_byte(1+(const char *) from);
+ *(char *) to = get_user_byte(2+(const char *) from);
+ *(char *) to = get_user_byte(3+(const char *) from);
+ return;
+ }
+
+
+ __generic_memcpy_fromfs(to, from, n);
+ return;
+}
+
+#define memcpy_fromfs(to, from, n) \
+(__builtin_constant_p(n) ? \
+ __constant_memcpy_fromfs((to),(from),(n)) : \
+ __generic_memcpy_fromfs((to),(from),(n)))
+
+#define memcpy_tofs(to, from, n) \
+(__builtin_constant_p(n) ? \
+ __constant_memcpy_tofs((to),(from),(n)) : \
+ __generic_memcpy_tofs((to),(from),(n)))
+
+static inline unsigned long get_fs(void)
+{
+ return segment_fs;
+}
+
+static inline unsigned long get_ds(void)
+{
+ return KERNEL_DS;
+}
+
+static inline void set_fs(unsigned long val)
+{
+ segment_fs = val;
+}
+
+#endif /* _ASM_MIPS_SEGMENT_H_ */
diff --git a/include/asm-mips/string.h b/include/asm-mips/string.h
new file mode 100644
index 000000000..06d4f2ce5
--- /dev/null
+++ b/include/asm-mips/string.h
@@ -0,0 +1,209 @@
+/*
+ * include/asm-mips/string.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1994 by Ralf Baechle
+ */
+
+#ifndef _ASM_MIPS_STRING_H_
+#define _ASM_MIPS_STRING_H_
+
+#define __USE_PORTABLE_STRINGS_H_
+
+extern inline char * strcpy(char * dest,const char *src)
+{
+ char *xdest = dest;
+
+ __asm__ __volatile__(
+ ".set\tnoreorder\n\t"
+ ".set\tnoat\n"
+ "1:\tlbu\t$1,(%1)\n\t"
+ "addiu\t%1,%1,1\n\t"
+ "sb\t$1,(%0)\n\t"
+ "bne\t$0,$1,1b\n\t"
+ "addiu\t%0,%0,1\n\t"
+ ".set\tat\n\t"
+ ".set\treorder"
+ : "=d" (dest), "=d" (src)
+ : "0" (dest), "1" (src)
+ : "$1","memory");
+
+ return xdest;
+}
+
+extern inline char * strncpy(char *dest, const char *src, size_t n)
+{
+ char *xdest = dest;
+
+ if (n == 0)
+ return xdest;
+
+ __asm__ __volatile__(
+ ".set\tnoreorder\n\t"
+ ".set\tnoat\n"
+ "1:\tlbu\t$1,(%1)\n\t"
+ "addiu\t%2,%2,-1\n\t"
+ "sb\t$1,(%0)\n\t"
+ "beq\t$0,$1,2f\n\t"
+ "addiu\t%0,%0,1\n\t"
+ "bne\t$0,%2,1b\n\t"
+ "addiu\t%1,%1,1\n"
+ "2:\n\t"
+ ".set\tat\n\t"
+ ".set\treorder\n\t"
+ : "=d" (dest), "=d" (src), "=d" (n)
+ : "0" (dest), "1" (src), "2" (n)
+ : "$1","memory");
+
+ return dest;
+}
+
+#define __USE_PORTABLE_strcat
+#define __USE_PORTABLE_strncat
+
+extern inline int strcmp(const char * cs,const char * ct)
+{
+ int __res;
+
+ __asm__ __volatile__(
+ ".set\tnoreorder\n\t"
+ ".set\tnoat\n\t"
+ "lbu\t%2,(%0)\n"
+ "1:\tlbu\t$1,(%1)\n\t"
+ "addiu\t%0,%0,1\n\t"
+ "bne\t$1,%2,2f\n\t"
+ "addiu\t%1,%1,1\n\t"
+ "bne\t$0,%2,1b\n\t"
+ "lbu\t%2,(%0)\n"
+ "move\t%2,$1\n"
+ "2:\tsub\t%2,%2,$1\n"
+ "3:\t.set\tat\n\t"
+ ".set\treorder\n\t"
+ : "=d" (cs), "=d" (ct), "=d" (__res)
+ : "0" (cs), "1" (ct)
+ : "$1");
+
+ return __res;
+}
+
+extern inline int strncmp(const char * cs,const char * ct,size_t count)
+{
+ char __res;
+
+ __asm__ __volatile__(
+ ".set\tnoreorder\n\t"
+ ".set\tnoat\n"
+ "1:\tlbu\t%3,(%0)\n\t"
+ "beq\t$0,%2,2f\n\t"
+ "lbu\t$1,(%1)\n\t"
+ "addiu\t%2,%2,-1\n\t"
+ "bne\t$1,%3,3f\n\t"
+ "addiu\t%0,%0,1\n\t"
+ "bne\t$0,%3,1b\n\t"
+ "addiu\t%1,%1,1\n"
+ "2:\tmove\t%3,$1\n"
+ "3:\tsub\t%3,%3,$1\n\t"
+ ".set\tat\n\t"
+ ".set\treorder"
+ : "=d" (cs), "=d" (ct), "=d" (count), "=d" (__res)
+ : "0" (cs), "1" (ct), "2" (count)
+ : "$1");
+
+ return __res;
+}
+
+#define __USE_PORTABLE_strchr
+#define __USE_PORTABLE_strlen
+#define __USE_PORTABLE_strspn
+#define __USE_PORTABLE_strpbrk
+#define __USE_PORTABLE_strtok
+
+extern inline void * memset(void * s,char c,size_t count)
+{
+ void *xs = s;
+
+ if (!count)
+ return xs;
+ __asm__ __volatile__(
+ ".set\tnoreorder\n"
+ "1:\tsb\t%3,(%0)\n\t"
+ "addiu\t%1,%1,-1\n\t"
+ "bne\t$0,%1,1b\n\t"
+ "addiu\t%3,%3,1\n\t"
+ ".set\treorder"
+ : "=d" (s), "=d" (count)
+ : "0" (s), "d" (c), "1" (count)
+ : "memory");
+
+ return xs;
+}
+
+extern inline void * memcpy(void * to, const void * from, size_t n)
+{
+ void *xto = to;
+
+ if (!n)
+ return xto;
+ __asm__ __volatile__(
+ ".set\tnoreorder\n\t"
+ ".set\tnoat\n"
+ "1:\tlbu\t$1,(%1)\n\t"
+ "addiu\t%1,%1,1\n\t"
+ "sb\t$1,(%0)\n\t"
+ "addiu\t%2,%2,-1\n\t"
+ "bne\t$0,%2,1b\n\t"
+ "addiu\t%0,%0,1\n\t"
+ ".set\tat\n\t"
+ ".set\treorder"
+ : "=d" (to), "=d" (from), "=d" (n)
+ : "0" (to), "1" (from), "2" (n)
+ : "$1","memory" );
+ return xto;
+}
+
+extern inline void * memmove(void * dest,const void * src, size_t n)
+{
+ void *xdest = dest;
+
+ if (!n)
+ return xdest;
+
+ if (dest < src)
+ __asm__ __volatile__(
+ ".set\tnoreorder\n\t"
+ ".set\tnoat\n"
+ "1:\tlbu\t$1,(%1)\n\t"
+ "addiu\t%1,%1,1\n\t"
+ "sb\t$1,(%0)\n\t"
+ "addiu\t%2,%2,-1\n\t"
+ "bne\t$0,%2,1b\n\t"
+ "addiu\t%0,%0,1\n\t"
+ ".set\tat\n\t"
+ ".set\treorder"
+ : "=d" (dest), "=d" (src), "=d" (n)
+ : "0" (dest), "1" (src), "2" (n)
+ : "$1","memory" );
+ else
+ __asm__ __volatile__(
+ ".set\tnoreorder\n\t"
+ ".set\tnoat\n"
+ "1:\tlbu\t$1,-1(%1)\n\t"
+ "addiu\t%1,%1,-1\n\t"
+ "sb\t$1,-1(%0)\n\t"
+ "addiu\t%2,%2,-1\n\t"
+ "bne\t$0,%2,1b\n\t"
+ "addiu\t%0,%0,-1\n\t"
+ ".set\tat\n\t"
+ ".set\treorder"
+ : "=d" (dest), "=d" (src), "=d" (n)
+ : "0" (dest+n), "1" (src+n), "2" (n)
+ : "$1","memory" );
+ return xdest;
+}
+
+#define __USE_PORTABLE_memcmp
+
+#endif /* _ASM_MIPS_STRING_H_ */
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h
new file mode 100644
index 000000000..5e0dbfe3c
--- /dev/null
+++ b/include/asm-mips/system.h
@@ -0,0 +1,70 @@
+/*
+ * include/asm-mips/system.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 by Ralf Baechle
+ */
+
+#ifndef _ASM_MIPS_SYSTEM_H_
+#define _ASM_MIPS_SYSTEM_H_
+
+#include <asm/segment.h>
+#include <mips/mipsregs.h>
+
+/*
+ * move_to_user_mode() doesn't switch to user mode on the mips, since
+ * that would run us into problems: The kernel is located at virtual
+ * address 0x80000000. If we now would switch over to user mode, we
+ * we would immediately get an address error exception.
+ * Anyway - we don't have problems with a task running in kernel mode,
+ * as long it's code is foolproof.
+ */
+#define move_to_user_mode()
+
+#define sti() \
+__asm__ __volatile__( \
+ "mfc0\t$1,"STR(CP0_STATUS)"\n\t" \
+ "ori\t$1,$1,1\n\t" \
+ "mtc0\t$1,"STR(CP0_STATUS)"\n\t" \
+ : /* no outputs */ \
+ : /* no inputs */ \
+ : "$1","memory")
+
+#define cli() \
+__asm__ __volatile__( \
+ "mfc0\t$1,"STR(CP0_STATUS)"\n\t" \
+ "srl\t$1,$1,1\n\t" \
+ "sll\t$1,$1,1\n\t" \
+ "mtc0\t$1,"STR(CP0_STATUS)"\n\t" \
+ : /* no outputs */ \
+ : /* no inputs */ \
+ : "$1","memory")
+
+#define nop() __asm__ __volatile__ ("nop")
+
+#define save_flags(x) \
+__asm__ __volatile__( \
+ ".set\tnoreorder\n\t" \
+ ".set\tnoat\n\t" \
+ "mfc0\t%0,$12\n\t" \
+ ".set\tat\n\t" \
+ ".set\treorder" \
+ : "=r" (x) \
+ : /* no inputs */ \
+ : "memory")
+
+#define restore_flags(x) \
+__asm__ __volatile__( \
+ ".set\tnoreorder\n\t" \
+ ".set\tnoat\n\t" \
+ "mtc0\t%0,$12\n\t" \
+ ".set\tat\n\t" \
+ ".set\treorder" \
+ : /* no output */ \
+ : "r" (x) \
+ : "memory")
+
+#endif /* _ASM_MIPS_SYSTEM_H_ */
diff --git a/include/asm-mips/unistd.h b/include/asm-mips/unistd.h
new file mode 100644
index 000000000..46ce46ff0
--- /dev/null
+++ b/include/asm-mips/unistd.h
@@ -0,0 +1,134 @@
+#ifndef _ASM_MIPS_UNISTD_H_
+#define _ASM_MIPS_UNISTD_H_
+
+/* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */
+#define _syscall0(type,name) \
+type name(void) \
+{ \
+register long __res; \
+__asm__ volatile (".set\tnoat\n\t" \
+ "li\t$1,%1\n\t" \
+ ".set\tat\n\t" \
+ "syscall\n\t" \
+ : "=d" (__res) \
+ : "i" (__NR_##name) \
+ : "$1"); \
+if (__res >= 0) \
+ return (type) __res; \
+errno = -__res; \
+return -1; \
+}
+
+#define _syscall1(type,name,atype,a) \
+type name(atype a) \
+{ \
+register long __res; \
+__asm__ volatile ("move\t$2,%2\n\t" \
+ ".set\tnoat\n\t" \
+ "li\t$1,%1\n\t" \
+ ".set\tat\n\t" \
+ "syscall" \
+ : "=d" (__res) \
+ : "i" (__NR_##name),"d" ((long)(a)) \
+ : "$1","$2"); \
+if (__res >= 0) \
+ return (type) __res; \
+errno = -__res; \
+return -1; \
+}
+
+#define _syscall2(type,name,atype,a,btype,b) \
+type name(atype a,btype b) \
+{ \
+register long __res; \
+__asm__ volatile ("move\t$2,%2\n\t" \
+ "move\t$3,%3\n\t" \
+ ".set\tnoat\n\t" \
+ "li\t$1,%1\n\t" \
+ ".set\tat\n\t" \
+ "syscall" \
+ : "=d" (__res) \
+ : "i" (__NR_##name),"d" ((long)(a)), \
+ "d" ((long)(b))); \
+ : "$1","$2","$3"); \
+if (__res >= 0) \
+ return (type) __res; \
+errno = -__res; \
+return -1; \
+}
+
+#define _syscall3(type,name,atype,a,btype,b,ctype,c) \
+type name (atype a, btype b, ctype c) \
+{ \
+register long __res; \
+__asm__ volatile ("move\t$2,%2\n\t" \
+ "move\t$3,%3\n\t" \
+ "move\t$4,%4\n\t" \
+ ".set\tnoat\n\t" \
+ "li\t$1,%1\n\t" \
+ ".set\tat\n\t" \
+ "syscall" \
+ : "=d" (__res) \
+ : "i" (__NR_##name),"d" ((long)(a)), \
+ "d" ((long)(b)), \
+ "d" ((long)(c)) \
+ : "$1","$2","$3","$4"); \
+if (__res>=0) \
+ return (type) __res; \
+errno=-__res; \
+return -1; \
+}
+
+#define _syscall4(type,name,atype,a,btype,b,ctype,c,dtype,d) \
+type name (atype a, btype b, ctype c, dtype d) \
+{ \
+register long __res; \
+__asm__ volatile (".set\tnoat\n\t" \
+ "move\t$2,%2\n\t" \
+ "move\t$3,%3\n\t" \
+ "move\t$4,%4\n\t" \
+ "move\t$5,%5\n\t" \
+ ".set\tnoat\n\t" \
+ "li\t$1,%1\n\t" \
+ ".set\tat\n\t" \
+ "syscall" \
+ : "=d" (__res) \
+ : "i" (__NR_##name),"d" ((long)(a)), \
+ "d" ((long)(b)), \
+ "d" ((long)(c)), \
+ "d" ((long)(d)) \
+ : "$1","$2","$3","$4","$5"); \
+if (__res>=0) \
+ return (type) __res; \
+errno=-__res; \
+return -1; \
+}
+
+#define _syscall5(type,name,atype,a,btype,b,ctype,c,dtype,d,etype,e) \
+type name (atype a,btype b,ctype c,dtype d,etype e) \
+{ \
+register long __res; \
+__asm__ volatile (".set\tnoat\n\t" \
+ "move\t$2,%2\n\t" \
+ "move\t$3,%3\n\t" \
+ "move\t$4,%4\n\t" \
+ "move\t$5,%5\n\t" \
+ "move\t$6,%6\n\t" \
+ ".set\tnoat\n\t" \
+ "li\t$1,%1\n\t" \
+ ".set\tat\n\t" \
+ "syscall" \
+ : "=d" (__res) \
+ : "i" (__NR_##name),"d" ((long)(a)), \
+ "d" ((long)(b)), \
+ "d" ((long)(c)), \
+ "d" ((long)(d)), \
+ "d" ((long)(e)) \
+ : "$1","$2","$3","$4","$5","$6"); \
+if (__res>=0) \
+ return (type) __res; \
+errno=-__res; \
+return -1; \
+}
+
+#endif /* _ASM_MIPS_UNISTD_H_ */