summaryrefslogtreecommitdiffstats
path: root/include/asm-mips64
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-10-05 01:18:40 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-10-05 01:18:40 +0000
commit012bb3e61e5eced6c610f9e036372bf0c8def2d1 (patch)
tree87efc733f9b164e8c85c0336f92c8fb7eff6d183 /include/asm-mips64
parent625a1589d3d6464b5d90b8a0918789e3afffd220 (diff)
Merge with Linux 2.4.0-test9. Please check DECstation, I had a number
of rejects to fixup while integrating Linus patches. I also found that this kernel will only boot SMP on Origin; the UP kernel freeze soon after bootup with SCSI timeout messages. I commit this anyway since I found that the last CVS versions had the same problem.
Diffstat (limited to 'include/asm-mips64')
-rw-r--r--include/asm-mips64/atomic.h52
-rw-r--r--include/asm-mips64/bitops.h82
-rw-r--r--include/asm-mips64/fcntl.h10
-rw-r--r--include/asm-mips64/resource.h4
-rw-r--r--include/asm-mips64/semaphore-helper.h15
-rw-r--r--include/asm-mips64/semaphore.h23
-rw-r--r--include/asm-mips64/spinlock.h34
-rw-r--r--include/asm-mips64/system.h17
-rw-r--r--include/asm-mips64/uaccess.h22
9 files changed, 132 insertions, 127 deletions
diff --git a/include/asm-mips64/atomic.h b/include/asm-mips64/atomic.h
index 5d57f8a8c..0ed12d406 100644
--- a/include/asm-mips64/atomic.h
+++ b/include/asm-mips64/atomic.h
@@ -1,5 +1,4 @@
-/* $Id$
- *
+/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*
@@ -10,20 +9,14 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1996, 1997, 1999 by Ralf Baechle
+ * Copyright (C) 1996, 1997, 1999, 2000 by Ralf Baechle
*/
#ifndef _ASM_ATOMIC_H
#define _ASM_ATOMIC_H
-#include <linux/config.h>
-
#include <asm/sgidefs.h>
-#ifdef CONFIG_SMP
typedef struct { volatile int counter; } atomic_t;
-#else
-typedef struct { int counter; } atomic_t;
-#endif
#ifdef __KERNEL__
#define ATOMIC_INIT(i) { (i) }
@@ -31,26 +24,17 @@ typedef struct { int counter; } atomic_t;
#define atomic_read(v) ((v)->counter)
#define atomic_set(v,i) ((v)->counter = (i))
-/*
- * Make sure gcc doesn't try to be clever and move things around
- * on us. We need to use _exactly_ the address the user gave us,
- * not some alias that contains the same information.
- */
-#define __atomic_fool_gcc(x) (*(volatile struct { int a[100]; } *)x)
-
extern __inline__ void atomic_add(int i, volatile atomic_t * v)
{
unsigned long temp;
__asm__ __volatile__(
- "1:\tll\t%0,%1\n\t"
+ "1:\tll\t%0,%1\t\t\t# atomic_add\n\t"
"addu\t%0,%2\n\t"
"sc\t%0,%1\n\t"
"beqz\t%0,1b"
- :"=&r" (temp),
- "=m" (__atomic_fool_gcc(v))
- :"Ir" (i),
- "m" (__atomic_fool_gcc(v)));
+ : "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter));
}
extern __inline__ void atomic_sub(int i, volatile atomic_t * v)
@@ -58,14 +42,12 @@ extern __inline__ void atomic_sub(int i, volatile atomic_t * v)
unsigned long temp;
__asm__ __volatile__(
- "1:\tll\t%0,%1\n\t"
+ "1:\tll\t%0,%1\t\t\t# atomic_sub\n\t"
"subu\t%0,%2\n\t"
"sc\t%0,%1\n\t"
"beqz\t%0,1b"
- :"=&r" (temp),
- "=m" (__atomic_fool_gcc(v))
- :"Ir" (i),
- "m" (__atomic_fool_gcc(v)));
+ : "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter));
}
/*
@@ -76,18 +58,15 @@ extern __inline__ int atomic_add_return(int i, atomic_t * v)
unsigned long temp, result;
__asm__ __volatile__(
- ".set\tnoreorder\n"
+ ".set\tnoreorder\t\t\t# atomic_add_return\n"
"1:\tll\t%1,%2\n\t"
"addu\t%0,%1,%3\n\t"
"sc\t%0,%2\n\t"
"beqz\t%0,1b\n\t"
"addu\t%0,%1,%3\n\t"
".set\treorder"
- :"=&r" (result),
- "=&r" (temp),
- "=m" (__atomic_fool_gcc(v))
- :"Ir" (i),
- "m" (__atomic_fool_gcc(v)));
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter));
return result;
}
@@ -97,18 +76,15 @@ extern __inline__ int atomic_sub_return(int i, atomic_t * v)
unsigned long temp, result;
__asm__ __volatile__(
- ".set\tnoreorder\n"
+ ".set\tnoreorder\t\t\t# atomic_sub_return\n"
"1:\tll\t%1,%2\n\t"
"subu\t%0,%1,%3\n\t"
"sc\t%0,%2\n\t"
"beqz\t%0,1b\n\t"
"subu\t%0,%1,%3\n\t"
".set\treorder"
- :"=&r" (result),
- "=&r" (temp),
- "=m" (__atomic_fool_gcc(v))
- :"Ir" (i),
- "m" (__atomic_fool_gcc(v)));
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter));
return result;
}
diff --git a/include/asm-mips64/bitops.h b/include/asm-mips64/bitops.h
index a6ff41cc0..179eff6f9 100644
--- a/include/asm-mips64/bitops.h
+++ b/include/asm-mips64/bitops.h
@@ -21,6 +21,12 @@
#include <asm/mipsregs.h>
/*
+ * clear_bit() doesn't provide any barrier for the compiler.
+ */
+#define smp_mb__before_clear_bit() barrier()
+#define smp_mb__after_clear_bit() barrier()
+
+/*
* These functions for MIPS ISA > 1 are interrupt and SMP proof and
* interrupt friendly
*/
@@ -36,8 +42,17 @@ set_bit(unsigned long nr, volatile void *addr)
"or\t%0, %2\n\t"
"scd\t%0, %1\n\t"
"beqz\t%0, 1b"
- :"=&r" (temp), "=m" (*m)
- :"ir" (1UL << (nr & 0x3f)), "m" (*m));
+ : "=&r" (temp), "=m" (*m)
+ : "ir" (1UL << (nr & 0x3f)), "m" (*m)
+ : "memory");
+}
+
+/* WARNING: non atomic and it can be reordered! */
+extern __inline__ void __set_bit(int nr, volatile void * addr)
+{
+ unsigned long * m = ((unsigned long *) addr) + (nr >> 6);
+
+ *m |= 1UL << (nr & 0x3f);
}
extern __inline__ void
@@ -51,10 +66,11 @@ clear_bit(unsigned long nr, volatile void *addr)
"and\t%0, %2\n\t"
"scd\t%0, %1\n\t"
"beqz\t%0, 1b\n\t"
- :"=&r" (temp), "=m" (*m)
- :"ir" (~(1UL << (nr & 0x3f))), "m" (*m));
+ : "=&r" (temp), "=m" (*m)
+ : "ir" (~(1UL << (nr & 0x3f))), "m" (*m));
}
+
extern __inline__ void
change_bit(unsigned long nr, volatile void *addr)
{
@@ -84,12 +100,26 @@ test_and_set_bit(unsigned long nr, volatile void *addr)
"beqz\t%2, 1b\n\t"
" and\t%2, %0, %3\n\t"
".set\treorder"
- :"=&r" (temp), "=m" (*m), "=&r" (res)
- :"r" (1UL << (nr & 0x3f)), "m" (*m));
+ : "=&r" (temp), "=m" (*m), "=&r" (res)
+ : "r" (1UL << (nr & 0x3f)), "m" (*m)
+ : "memory");
return res != 0;
}
+extern __inline__ int __test_and_set_bit(int nr, volatile void * addr)
+{
+ int mask, retval;
+ volatile long *a = addr;
+
+ a += nr >> 6;
+ mask = 1 << (nr & 0x3f);
+ retval = (mask & *a) != 0;
+ *a |= mask;
+
+ return retval;
+}
+
extern __inline__ unsigned long
test_and_clear_bit(unsigned long nr, volatile void *addr)
{
@@ -105,12 +135,26 @@ test_and_clear_bit(unsigned long nr, volatile void *addr)
"beqz\t%2, 1b\n\t"
" and\t%2, %0, %3\n\t"
".set\treorder"
- :"=&r" (temp), "=m" (*m), "=&r" (res)
- :"r" (1UL << (nr & 0x3f)), "m" (*m));
+ : "=&r" (temp), "=m" (*m), "=&r" (res)
+ : "r" (1UL << (nr & 0x3f)), "m" (*m)
+ : "memory");
return res != 0;
}
+extern __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
+{
+ int mask, retval;
+ volatile long *a = addr;
+
+ a += nr >> 6;
+ mask = 1 << (nr & 0x3f);
+ retval = (mask & *a) != 0;
+ *a &= ~mask;
+
+ return retval;
+}
+
extern __inline__ unsigned long
test_and_change_bit(unsigned long nr, volatile void *addr)
{
@@ -125,8 +169,9 @@ test_and_change_bit(unsigned long nr, volatile void *addr)
"beqz\t%2, 1b\n\t"
" and\t%2, %0, %3\n\t"
".set\treorder"
- :"=&r" (temp), "=m" (*m), "=&r" (res)
- :"r" (1UL << (nr & 0x3f)), "m" (*m));
+ : "=&r" (temp), "=m" (*m), "=&r" (res)
+ : "r" (1UL << (nr & 0x3f)), "m" (*m)
+ : "memory");
return res != 0;
}
@@ -175,13 +220,9 @@ find_first_zero_bit (void *addr, unsigned size)
".set\tat\n\t"
".set\treorder\n"
"2:"
- : "=r" (res),
- "=r" (dummy),
- "=r" (addr)
- : "0" ((signed int) 0),
- "1" ((unsigned int) 0xffffffff),
- "2" (addr),
- "r" (size)
+ : "=r" (res), "=r" (dummy), "=r" (addr)
+ : "0" ((signed int) 0), "1" ((unsigned int) 0xffffffff),
+ "2" (addr), "r" (size)
: "$1");
return res;
@@ -208,11 +249,8 @@ find_next_zero_bit (void * addr, int size, int offset)
".set\tat\n\t"
".set\treorder\n"
"1:"
- : "=r" (set),
- "=r" (dummy)
- : "0" (0),
- "1" (1 << bit),
- "r" (*p)
+ : "=r" (set), "=r" (dummy)
+ : "0" (0), "1" (1 << bit), "r" (*p)
: "$1");
if (set < (32 - bit))
return set + offset;
diff --git a/include/asm-mips64/fcntl.h b/include/asm-mips64/fcntl.h
index 95d7dd1c9..3e714eff2 100644
--- a/include/asm-mips64/fcntl.h
+++ b/include/asm-mips64/fcntl.h
@@ -62,6 +62,9 @@
#define F_EXLCK 4 /* or 3 */
#define F_SHLCK 8 /* or 4 */
+/* for leases */
+#define F_INPROGRESS 16
+
/* operations for bsd flock(), also used by the kernel implementation */
#define LOCK_SH 1 /* shared lock */
#define LOCK_EX 2 /* exclusive lock */
@@ -69,6 +72,11 @@
blocking */
#define LOCK_UN 8 /* remove lock */
+#define LOCK_MAND 32 /* This is a mandatory flock */
+#define LOCK_READ 64 /* ... Which allows concurrent read operations */
+#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */
+#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */
+
typedef struct flock {
short l_type;
short l_whence;
@@ -83,4 +91,6 @@ typedef struct flock {
#define flock64 flock
#endif
+#define F_LINUX_SPECIFIC_BASE 1024
+
#endif /* _ASM_FCNTL_H */
diff --git a/include/asm-mips64/resource.h b/include/asm-mips64/resource.h
index b4af33144..448a8bb3a 100644
--- a/include/asm-mips64/resource.h
+++ b/include/asm-mips64/resource.h
@@ -23,8 +23,9 @@
#define RLIMIT_RSS 7 /* max resident set size */
#define RLIMIT_NPROC 8 /* max number of processes */
#define RLIMIT_MEMLOCK 9 /* max locked-in-memory address space */
+#define RLIMIT_LOCKS 10 /* maximum file locks held */
-#define RLIM_NLIMITS 10 /* Number of limit flavors. */
+#define RLIM_NLIMITS 11 /* Number of limit flavors. */
/*
* SuS says limits have to be unsigned.
@@ -46,6 +47,7 @@
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
+ { RLIM_INFINITY, RLIM_INFINITY }, \
}
#endif /* __KERNEL__ */
diff --git a/include/asm-mips64/semaphore-helper.h b/include/asm-mips64/semaphore-helper.h
index fa2f70645..91799d486 100644
--- a/include/asm-mips64/semaphore-helper.h
+++ b/include/asm-mips64/semaphore-helper.h
@@ -1,5 +1,4 @@
-/* $Id: semaphore-helper.h,v 1.2 1999/10/20 18:10:32 ralf Exp $
- *
+/*
* SMP- and interrupt-safe semaphores helper functions.
*
* (C) Copyright 1996 Linus Torvalds
@@ -31,8 +30,8 @@ waking_non_zero(struct semaphore *sem)
"beqz\t%0, 1b\n\t"
"2:"
".text"
- : "=r"(ret), "=r"(tmp), "=m"(__atomic_fool_gcc(&sem->waking))
- : "0"(0));
+ : "=r" (ret), "=r" (tmp), "=m" (sem->waking)
+ : "0" (0));
return ret;
}
@@ -88,8 +87,8 @@ waking_non_zero_interruptible(struct semaphore *sem, struct task_struct *tsk)
scd %1, %2
beqz %1, 0b
.set pop"
- : "=&r"(ret), "=&r"(tmp), "=m"(*sem)
- : "r"(signal_pending(tsk)), "i"(-EINTR));
+ : "=&r" (ret), "=&r" (tmp), "=m" (*sem)
+ : "r" (signal_pending(tsk)), "i" (-EINTR));
#elif defined(__MIPSEL__)
@@ -121,8 +120,8 @@ waking_non_zero_interruptible(struct semaphore *sem, struct task_struct *tsk)
scd %1, %2
beqz %1, 0b
.set pop"
- : "=&r"(ret), "=&r"(tmp), "=m"(*sem)
- : "r"(signal_pending(tsk)), "i"(-EINTR));
+ : "=&r" (ret), "=&r" (tmp), "=m" (*sem)
+ : "r" (signal_pending(tsk)), "i" (-EINTR));
#else
#error "MIPS but neither __MIPSEL__ nor __MIPSEB__?"
diff --git a/include/asm-mips64/semaphore.h b/include/asm-mips64/semaphore.h
index 0ca9689e6..2e164ab33 100644
--- a/include/asm-mips64/semaphore.h
+++ b/include/asm-mips64/semaphore.h
@@ -1,5 +1,4 @@
-/* $Id: semaphore.h,v 1.5 2000/02/18 00:24:49 ralf Exp $
- *
+/*
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
@@ -55,7 +54,7 @@ struct semaphore {
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
-extern inline void sema_init (struct semaphore *sem, int val)
+static inline void sema_init (struct semaphore *sem, int val)
{
atomic_set(&sem->count, val);
atomic_set(&sem->waking, 0);
@@ -80,7 +79,7 @@ asmlinkage int __down_interruptible(struct semaphore * sem);
asmlinkage int __down_trylock(struct semaphore * sem);
asmlinkage void __up(struct semaphore * sem);
-extern inline void down(struct semaphore * sem)
+static inline void down(struct semaphore * sem)
{
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
@@ -89,7 +88,7 @@ extern inline void down(struct semaphore * sem)
__down(sem);
}
-extern inline int down_interruptible(struct semaphore * sem)
+static inline int down_interruptible(struct semaphore * sem)
{
int ret = 0;
@@ -122,7 +121,7 @@ extern inline int down_interruptible(struct semaphore * sem)
* }
* }
*/
-extern inline int down_trylock(struct semaphore * sem)
+static inline int down_trylock(struct semaphore * sem)
{
long ret, tmp, tmp2, sub;
@@ -161,7 +160,7 @@ extern inline int down_trylock(struct semaphore * sem)
* Note! This is subtle. We jump to wake people up only if
* the semaphore was negative (== somebody was waiting on it).
*/
-extern inline void up(struct semaphore * sem)
+static inline void up(struct semaphore * sem)
{
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
@@ -228,7 +227,7 @@ struct rw_semaphore {
#define DECLARE_RWSEM_WRITE_LOCKED(name) \
__DECLARE_RWSEM_GENERIC(name, 0)
-extern inline void init_rwsem(struct rw_semaphore *sem)
+static inline void init_rwsem(struct rw_semaphore *sem)
{
atomic_set(&sem->count, RW_LOCK_BIAS);
sem->granted = 0;
@@ -246,7 +245,7 @@ extern void __down_read(struct rw_semaphore *sem, int count);
extern void __down_write(struct rw_semaphore *sem, int count);
extern void __rwsem_wake(struct rw_semaphore *sem, unsigned long readers);
-extern inline void down_read(struct rw_semaphore *sem)
+static inline void down_read(struct rw_semaphore *sem)
{
int count;
@@ -269,7 +268,7 @@ extern inline void down_read(struct rw_semaphore *sem)
#endif
}
-extern inline void down_write(struct rw_semaphore *sem)
+static inline void down_write(struct rw_semaphore *sem)
{
int count;
@@ -298,7 +297,7 @@ extern inline void down_write(struct rw_semaphore *sem)
there was a writer waiting, and we've bumped the count to 0: we must
wake the writer up. */
-extern inline void up_read(struct rw_semaphore *sem)
+static inline void up_read(struct rw_semaphore *sem)
{
int count;
@@ -321,7 +320,7 @@ extern inline void up_read(struct rw_semaphore *sem)
/*
* Releasing the writer is easy -- just release it and wake up any sleepers.
*/
-extern inline void up_write(struct rw_semaphore *sem)
+static inline void up_write(struct rw_semaphore *sem)
{
int count;
diff --git a/include/asm-mips64/spinlock.h b/include/asm-mips64/spinlock.h
index 9a1ccf198..7e560d4c0 100644
--- a/include/asm-mips64/spinlock.h
+++ b/include/asm-mips64/spinlock.h
@@ -1,5 +1,4 @@
-/* $Id: spinlock.h,v 1.4 2000/01/25 00:41:46 ralf Exp $
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
@@ -32,9 +31,6 @@ typedef struct {
* We make no fairness assumptions. They have a cost.
*/
-typedef struct { unsigned long a[100]; } __dummy_lock_t;
-#define __dummy_lock(lock) (*(__dummy_lock_t *)(lock))
-
static inline void spin_lock(spinlock_t *lock)
{
unsigned int tmp;
@@ -48,8 +44,8 @@ static inline void spin_lock(spinlock_t *lock)
"beqz\t%1, 1b\n\t"
" sync\n\t"
".set\treorder"
- : "=o" (__dummy_lock(lock)), "=&r" (tmp)
- : "o" (__dummy_lock(lock))
+ : "=o" (lock->lock), "=&r" (tmp)
+ : "o" (lock->lock)
: "memory");
}
@@ -60,8 +56,8 @@ static inline void spin_unlock(spinlock_t *lock)
"sync\n\t"
"sw\t$0, %0\n\t"
".set\treorder"
- : "=o" (__dummy_lock(lock))
- : "o" (__dummy_lock(lock))
+ : "=o" (lock->lock)
+ : "o" (lock->lock)
: "memory");
}
@@ -77,8 +73,8 @@ static inline unsigned int spin_trylock(spinlock_t *lock)
"beqz\t%2, 1b\n\t"
" and\t%2, %0, %3\n\t"
".set\treorder"
- :"=&r" (temp), "=m" (*lock), "=&r" (res)
- :"r" (1), "m" (*lock)
+ : "=&r" (temp), "=m" (lock->lock), "=&r" (res)
+ : "r" (1), "m" (lock->lock)
: "memory");
return res == 0;
@@ -112,8 +108,8 @@ static inline void read_lock(rwlock_t *rw)
"beqz\t%1, 1b\n\t"
" sync\n\t"
".set\treorder"
- : "=o" (__dummy_lock(rw)), "=&r" (tmp)
- : "o" (__dummy_lock(rw))
+ : "=o" (rw->lock), "=&r" (tmp)
+ : "o" (rw->lock)
: "memory");
}
@@ -132,8 +128,8 @@ static inline void read_unlock(rwlock_t *rw)
"beqz\t%1, 1b\n\t"
"sync\n\t"
".set\treorder"
- : "=o" (__dummy_lock(rw)), "=&r" (tmp)
- : "o" (__dummy_lock(rw))
+ : "=o" (rw->lock), "=&r" (tmp)
+ : "o" (rw->lock)
: "memory");
}
@@ -150,8 +146,8 @@ static inline void write_lock(rwlock_t *rw)
"beqz\t%1, 1b\n\t"
" sync\n\t"
".set\treorder"
- : "=o" (__dummy_lock(rw)), "=&r" (tmp)
- : "o" (__dummy_lock(rw))
+ : "=o" (rw->lock), "=&r" (tmp)
+ : "o" (rw->lock)
: "memory");
}
@@ -162,8 +158,8 @@ static inline void write_unlock(rwlock_t *rw)
"sync\n\t"
"sw\t$0, %0\n\t"
".set\treorder"
- : "=o" (__dummy_lock(rw))
- : "o" (__dummy_lock(rw))
+ : "=o" (rw->lock)
+ : "o" (rw->lock)
: "memory");
}
diff --git a/include/asm-mips64/system.h b/include/asm-mips64/system.h
index 4b9d9f551..ca81b8bf5 100644
--- a/include/asm-mips64/system.h
+++ b/include/asm-mips64/system.h
@@ -65,9 +65,7 @@ __asm__ __volatile__( \
".set\tnoreorder\n\t" \
"mfc0\t%0,$12\n\t" \
".set\treorder" \
- : "=r" (x) \
- : /* no inputs */ \
- : "memory")
+ : "=r" (x))
#define __save_and_cli(x) \
__asm__ __volatile__( \
@@ -142,8 +140,7 @@ extern void __global_restore_flags(unsigned long);
__asm__ __volatile__( \
"# prevent instructions being moved around\n\t" \
".set\tnoreorder\n\t" \
- "# 8 nops to fool the R4400 pipeline\n\t" \
- "nop;nop;nop;nop;nop;nop;nop;nop\n\t" \
+ "sync\n\t" \
".set\treorder" \
: /* no output */ \
: /* no input */ \
@@ -151,6 +148,16 @@ __asm__ __volatile__( \
#define rmb() mb()
#define wmb() mb()
+#ifdef CONFIG_SMP
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
+#else
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#endif
+
#define set_mb(var, value) \
do { var = value; mb(); } while (0)
diff --git a/include/asm-mips64/uaccess.h b/include/asm-mips64/uaccess.h
index 2a9f2ee16..1727b8de2 100644
--- a/include/asm-mips64/uaccess.h
+++ b/include/asm-mips64/uaccess.h
@@ -84,18 +84,6 @@ extern inline int verify_area(int type, const void * addr, unsigned long size)
#define __get_user(x,ptr) \
__get_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
-/*
- * The "xxx_ret" versions return constant specified in third argument, if
- * something bad happens. These macros can be optimized for the
- * case of just returning from the function xxx_ret is used.
- */
-
-#define put_user_ret(x,ptr,ret) ({ if (put_user(x,ptr)) return ret; })
-#define get_user_ret(x,ptr,ret) ({ if (get_user(x,ptr)) return ret; })
-
-#define __put_user_ret(x,ptr,ret) ({ if (__put_user(x,ptr)) return ret; })
-#define __get_user_ret(x,ptr,ret) ({ if (__get_user(x,ptr)) return ret; })
-
struct __large_struct { unsigned long buf[100]; };
#define __m(x) (*(struct __large_struct *)(x))
@@ -213,16 +201,6 @@ extern void __put_user_unknown(void);
"jal\t" #destination "\n\t"
#endif
-#define copy_to_user_ret(to,from,n,retval) ({ \
-if (copy_to_user(to,from,n)) \
- return retval; \
-})
-
-#define copy_from_user_ret(to,from,n,retval) ({ \
-if (copy_from_user(to,from,n)) \
- return retval; \
-})
-
extern size_t __copy_user(void *__to, const void *__from, size_t __n);
#define __copy_to_user(to,from,n) ({ \