summaryrefslogtreecommitdiffstats
path: root/include/asm-mips
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-10-05 01:18:40 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-10-05 01:18:40 +0000
commit012bb3e61e5eced6c610f9e036372bf0c8def2d1 (patch)
tree87efc733f9b164e8c85c0336f92c8fb7eff6d183 /include/asm-mips
parent625a1589d3d6464b5d90b8a0918789e3afffd220 (diff)
Merge with Linux 2.4.0-test9. Please check DECstation, I had a number
of rejects to fixup while integrating Linus patches. I also found that this kernel will only boot SMP on Origin; the UP kernel freeze soon after bootup with SCSI timeout messages. I commit this anyway since I found that the last CVS versions had the same problem.
Diffstat (limited to 'include/asm-mips')
-rw-r--r--include/asm-mips/atomic.h71
-rw-r--r--include/asm-mips/bitops.h150
-rw-r--r--include/asm-mips/fcntl.h19
-rw-r--r--include/asm-mips/resource.h4
-rw-r--r--include/asm-mips/semaphore.h22
-rw-r--r--include/asm-mips/spinlock.h30
-rw-r--r--include/asm-mips/system.h67
-rw-r--r--include/asm-mips/uaccess.h28
8 files changed, 211 insertions, 180 deletions
diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h
index fbd2daee7..e4084d577 100644
--- a/include/asm-mips/atomic.h
+++ b/include/asm-mips/atomic.h
@@ -9,20 +9,14 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1996, 1997 by Ralf Baechle
- *
- * $Id: atomic.h,v 1.6 1999/07/26 19:42:42 harald Exp $
+ * Copyright (C) 1996, 1997, 2000 by Ralf Baechle
*/
#ifndef __ASM_ATOMIC_H
#define __ASM_ATOMIC_H
#include <linux/config.h>
-#ifdef CONFIG_SMP
typedef struct { volatile int counter; } atomic_t;
-#else
-typedef struct { int counter; } atomic_t;
-#endif
#ifdef __KERNEL__
#define ATOMIC_INIT(i) { (i) }
@@ -38,7 +32,7 @@ typedef struct { int counter; } atomic_t;
* The MIPS I implementation is only atomic with respect to
* interrupts. R3000 based multiprocessor machines are rare anyway ...
*/
-extern __inline__ void atomic_add(int i, volatile atomic_t * v)
+extern __inline__ void atomic_add(int i, atomic_t * v)
{
int flags;
@@ -48,7 +42,7 @@ extern __inline__ void atomic_add(int i, volatile atomic_t * v)
restore_flags(flags);
}
-extern __inline__ void atomic_sub(int i, volatile atomic_t * v)
+extern __inline__ void atomic_sub(int i, atomic_t * v)
{
int flags;
@@ -108,41 +102,30 @@ extern __inline__ void atomic_clear_mask(unsigned long mask, unsigned long * v)
* implementation is SMP safe ...
*/
-/*
- * Make sure gcc doesn't try to be clever and move things around
- * on us. We need to use _exactly_ the address the user gave us,
- * not some alias that contains the same information.
- */
-#define __atomic_fool_gcc(x) (*(volatile struct { int a[100]; } *)x)
-
-extern __inline__ void atomic_add(int i, volatile atomic_t * v)
+extern __inline__ void atomic_add(int i, atomic_t * v)
{
unsigned long temp;
__asm__ __volatile__(
- "1:\tll\t%0,%1\n\t"
- "addu\t%0,%2\n\t"
- "sc\t%0,%1\n\t"
- "beqz\t%0,1b"
- :"=&r" (temp),
- "=m" (__atomic_fool_gcc(v))
- :"Ir" (i),
- "m" (__atomic_fool_gcc(v)));
+ "1:\tll\t%0, %1\t\t\t# atomic_add\n\t"
+ "addu\t%0, %2\n\t"
+ "sc\t%0, %1\n\t"
+ "beqz\t%0, 1b"
+ : "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter));
}
-extern __inline__ void atomic_sub(int i, volatile atomic_t * v)
+extern __inline__ void atomic_sub(int i, atomic_t * v)
{
unsigned long temp;
__asm__ __volatile__(
- "1:\tll\t%0,%1\n\t"
- "subu\t%0,%2\n\t"
- "sc\t%0,%1\n\t"
- "beqz\t%0,1b"
- :"=&r" (temp),
- "=m" (__atomic_fool_gcc(v))
- :"Ir" (i),
- "m" (__atomic_fool_gcc(v)));
+ "1:\tll\t%0, %1\t\t\t# atomic_sub\n\t"
+ "subu\t%0, %2\n\t"
+ "sc\t%0, %1\n\t"
+ "beqz\t%0, 1b"
+ : "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter));
}
/*
@@ -153,18 +136,15 @@ extern __inline__ int atomic_add_return(int i, atomic_t * v)
unsigned long temp, result;
__asm__ __volatile__(
- ".set\tnoreorder\n"
+ ".set\tnoreorder\t\t\t# atomic_add_return\n"
"1:\tll\t%1,%2\n\t"
"addu\t%0,%1,%3\n\t"
"sc\t%0,%2\n\t"
"beqz\t%0,1b\n\t"
"addu\t%0,%1,%3\n\t"
".set\treorder"
- :"=&r" (result),
- "=&r" (temp),
- "=m" (__atomic_fool_gcc(v))
- :"Ir" (i),
- "m" (__atomic_fool_gcc(v)));
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter));
return result;
}
@@ -174,18 +154,15 @@ extern __inline__ int atomic_sub_return(int i, atomic_t * v)
unsigned long temp, result;
__asm__ __volatile__(
- ".set\tnoreorder\n"
+ ".set\tnoreorder\t\t\t# atomic_sub_return\n"
"1:\tll\t%1,%2\n\t"
"subu\t%0,%1,%3\n\t"
"sc\t%0,%2\n\t"
"beqz\t%0,1b\n\t"
"subu\t%0,%1,%3\n\t"
".set\treorder"
- :"=&r" (result),
- "=&r" (temp),
- "=m" (__atomic_fool_gcc(v))
- :"Ir" (i),
- "m" (__atomic_fool_gcc(v)));
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter));
return result;
}
@@ -201,4 +178,4 @@ extern __inline__ int atomic_sub_return(int i, atomic_t * v)
#define atomic_dec(v) atomic_sub(1,(v))
#endif /* defined(__KERNEL__) */
-#endif /* __ASM_MIPS_ATOMIC_H */
+#endif /* __ASM_ATOMIC_H */
diff --git a/include/asm-mips/bitops.h b/include/asm-mips/bitops.h
index 9f209e847..da1f33cc7 100644
--- a/include/asm-mips/bitops.h
+++ b/include/asm-mips/bitops.h
@@ -19,6 +19,12 @@
#include <linux/config.h>
/*
+ * clear_bit() doesn't provide any barrier for the compiler.
+ */
+#define smp_mb__before_clear_bit() barrier()
+#define smp_mb__after_clear_bit() barrier()
+
+/*
* Only disable interrupt for kernel mode stuff to keep usermode stuff
* that dares to use kernel include files alive.
*/
@@ -35,25 +41,6 @@
#define __bi_restore_flags(x)
#endif /* __KERNEL__ */
-/*
- * Note that the bit operations are defined on arrays of 32 bit sized
- * elements. With respect to a future 64 bit implementation it is
- * wrong to use long *. Use u32 * or int *.
- */
-extern __inline__ void set_bit(int nr, volatile void *addr);
-extern __inline__ void clear_bit(int nr, volatile void *addr);
-extern __inline__ void change_bit(int nr, volatile void *addr);
-extern __inline__ int test_and_set_bit(int nr, volatile void *addr);
-extern __inline__ int test_and_clear_bit(int nr, volatile void *addr);
-extern __inline__ int test_and_change_bit(int nr, volatile void *addr);
-
-extern __inline__ int test_bit(int nr, volatile void *addr);
-#ifndef __MIPSEB__
-extern __inline__ int find_first_zero_bit (void *addr, unsigned size);
-#endif
-extern __inline__ int find_next_zero_bit (void * addr, int size, int offset);
-extern __inline__ unsigned long ffz(unsigned long word);
-
#if defined(CONFIG_CPU_HAS_LLSC)
#include <asm/mipsregs.h>
@@ -74,8 +61,16 @@ set_bit(int nr, volatile void *addr)
"or\t%0, %2\n\t"
"sc\t%0, %1\n\t"
"beqz\t%0, 1b"
- :"=&r" (temp), "=m" (*m)
- :"ir" (1UL << (nr & 0x1f)), "m" (*m));
+ : "=&r" (temp), "=m" (*m)
+ : "ir" (1UL << (nr & 0x1f)), "m" (*m));
+}
+
+/* WARNING: non atomic and it can be reordered! */
+extern __inline__ void __set_bit(int nr, volatile void * addr)
+{
+ unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
+
+ *m |= 1UL << (nr & 31);
}
extern __inline__ void
@@ -89,8 +84,8 @@ clear_bit(int nr, volatile void *addr)
"and\t%0, %2\n\t"
"sc\t%0, %1\n\t"
"beqz\t%0, 1b\n\t"
- :"=&r" (temp), "=m" (*m)
- :"ir" (~(1UL << (nr & 0x1f))), "m" (*m));
+ : "=&r" (temp), "=m" (*m)
+ : "ir" (~(1UL << (nr & 0x1f))), "m" (*m));
}
extern __inline__ void
@@ -104,10 +99,15 @@ change_bit(int nr, volatile void *addr)
"xor\t%0, %2\n\t"
"sc\t%0, %1\n\t"
"beqz\t%0, 1b"
- :"=&r" (temp), "=m" (*m)
- :"ir" (1UL << (nr & 0x1f)), "m" (*m));
+ : "=&r" (temp), "=m" (*m)
+ : "ir" (1UL << (nr & 0x1f)), "m" (*m));
}
+/*
+ * It will also imply a memory barrier, thus it must clobber memory
+ * to make sure to reload anything that was cached into registers
+ * outside _this_ critical section.
+ */
extern __inline__ int
test_and_set_bit(int nr, volatile void *addr)
{
@@ -122,12 +122,27 @@ test_and_set_bit(int nr, volatile void *addr)
"beqz\t%2, 1b\n\t"
" and\t%2, %0, %3\n\t"
".set\treorder"
- :"=&r" (temp), "=m" (*m), "=&r" (res)
- :"r" (1UL << (nr & 0x1f)), "m" (*m));
+ : "=&r" (temp), "=m" (*m), "=&r" (res)
+ : "r" (1UL << (nr & 0x1f)), "m" (*m)
+ : "memory");
return res != 0;
}
+/* WARNING: non atomic and it can be reordered! */
+extern __inline__ int __test_and_set_bit(int nr, volatile void * addr)
+{
+ int mask, retval;
+ volatile int *a = addr;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ retval = (mask & *a) != 0;
+ *a |= mask;
+
+ return retval;
+}
+
extern __inline__ int
test_and_clear_bit(int nr, volatile void *addr)
{
@@ -143,12 +158,27 @@ test_and_clear_bit(int nr, volatile void *addr)
"beqz\t%2, 1b\n\t"
" and\t%2, %0, %3\n\t"
".set\treorder"
- :"=&r" (temp), "=m" (*m), "=&r" (res)
- :"r" (1UL << (nr & 0x1f)), "m" (*m));
+ : "=&r" (temp), "=m" (*m), "=&r" (res)
+ : "r" (1UL << (nr & 0x1f)), "m" (*m)
+ : "memory");
return res != 0;
}
+/* WARNING: non atomic and it can be reordered! */
+extern __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
+{
+ int mask, retval;
+ volatile int *a = addr;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ retval = (mask & *a) != 0;
+ *a &= ~mask;
+
+ return retval;
+}
+
extern __inline__ int
test_and_change_bit(int nr, volatile void *addr)
{
@@ -163,8 +193,9 @@ test_and_change_bit(int nr, volatile void *addr)
"beqz\t%2, 1b\n\t"
" and\t%2, %0, %3\n\t"
".set\treorder"
- :"=&r" (temp), "=m" (*m), "=&r" (res)
- :"r" (1UL << (nr & 0x1f)), "m" (*m));
+ : "=&r" (temp), "=m" (*m), "=&r" (res)
+ : "r" (1UL << (nr & 0x1f)), "m" (*m)
+ : "memory");
return res != 0;
}
@@ -184,6 +215,18 @@ extern __inline__ void set_bit(int nr, volatile void * addr)
__bi_restore_flags(flags);
}
+/* WARNING: non atomic and it can be reordered! */
+extern __inline__ void __set_bit(int nr, volatile void * addr)
+{
+ int mask;
+ volatile int *a = addr;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ *a |= mask;
+}
+
+/* WARNING: non atomic and it can be reordered! */
extern __inline__ void clear_bit(int nr, volatile void * addr)
{
int mask;
@@ -226,6 +269,20 @@ extern __inline__ int test_and_set_bit(int nr, volatile void * addr)
return retval;
}
+/* WARNING: non atomic and it can be reordered! */
+extern __inline__ int __test_and_set_bit(int nr, volatile void * addr)
+{
+ int mask, retval;
+ volatile int *a = addr;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ retval = (mask & *a) != 0;
+ *a |= mask;
+
+ return retval;
+}
+
extern __inline__ int test_and_clear_bit(int nr, volatile void * addr)
{
int mask, retval;
@@ -242,6 +299,20 @@ extern __inline__ int test_and_clear_bit(int nr, volatile void * addr)
return retval;
}
+/* WARNING: non atomic and it can be reordered! */
+extern __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
+{
+ int mask, retval;
+ volatile int *a = addr;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ retval = (mask & *a) != 0;
+ *a &= ~mask;
+
+ return retval;
+}
+
extern __inline__ int test_and_change_bit(int nr, volatile void * addr)
{
int mask, retval;
@@ -310,13 +381,9 @@ extern __inline__ int find_first_zero_bit (void *addr, unsigned size)
".set\tat\n\t"
".set\treorder\n"
"2:"
- : "=r" (res),
- "=r" (dummy),
- "=r" (addr)
- : "0" ((signed int) 0),
- "1" ((unsigned int) 0xffffffff),
- "2" (addr),
- "r" (size)
+ : "=r" (res), "=r" (dummy), "=r" (addr)
+ : "0" ((signed int) 0), "1" ((unsigned int) 0xffffffff),
+ "2" (addr), "r" (size)
: "$1");
return res;
@@ -345,11 +412,8 @@ extern __inline__ int find_next_zero_bit (void * addr, int size, int offset)
".set\tat\n\t"
".set\treorder\n"
"1:"
- : "=r" (set),
- "=r" (dummy)
- : "0" (0),
- "1" (1 << bit),
- "r" (*p)
+ : "=r" (set), "=r" (dummy)
+ : "0" (0), "1" (1 << bit), "r" (*p)
: "$1");
if (set < (32 - bit))
return set + offset;
diff --git a/include/asm-mips/fcntl.h b/include/asm-mips/fcntl.h
index 8417da12e..f7a6ada7f 100644
--- a/include/asm-mips/fcntl.h
+++ b/include/asm-mips/fcntl.h
@@ -1,13 +1,12 @@
-/* $Id: fcntl.h,v 1.4 1998/09/19 19:19:36 ralf Exp $
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995, 1996, 1997, 1998 by Ralf Baechle
*/
-#ifndef __ASM_MIPS_FCNTL_H
-#define __ASM_MIPS_FCNTL_H
+#ifndef __ASM_FCNTL_H
+#define __ASM_FCNTL_H
/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
located on an ext2 file system */
@@ -60,6 +59,9 @@
#define F_EXLCK 4 /* or 3 */
#define F_SHLCK 8 /* or 4 */
+/* for leases */
+#define F_INPROGRESS 16
+
/* operations for bsd flock(), also used by the kernel implementation */
#define LOCK_SH 1 /* shared lock */
#define LOCK_EX 2 /* exclusive lock */
@@ -67,6 +69,11 @@
blocking */
#define LOCK_UN 8 /* remove lock */
+#define LOCK_MAND 32 /* This is a mandatory flock */
+#define LOCK_READ 64 /* ... Which allows concurrent read operations */
+#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */
+#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */
+
typedef struct flock {
short l_type;
short l_whence;
@@ -85,4 +92,6 @@ typedef struct flock64 {
pid_t l_pid;
} flock64_t;
-#endif /* __ASM_MIPS_FCNTL_H */
+#define F_LINUX_SPECIFIC_BASE 1024
+
+#endif /* __ASM_FCNTL_H */
diff --git a/include/asm-mips/resource.h b/include/asm-mips/resource.h
index af41618ba..718e983e6 100644
--- a/include/asm-mips/resource.h
+++ b/include/asm-mips/resource.h
@@ -22,8 +22,9 @@
#define RLIMIT_RSS 7 /* max resident set size */
#define RLIMIT_NPROC 8 /* max number of processes */
#define RLIMIT_MEMLOCK 9 /* max locked-in-memory address space */
+#define RLIMIT_LOCKS 10 /* maximum file locks held */
-#define RLIM_NLIMITS 10 /* Number of limit flavors. */
+#define RLIM_NLIMITS 11 /* Number of limit flavors. */
/*
* SuS says limits have to be unsigned.
@@ -45,6 +46,7 @@
{ RLIM_INFINITY, RLIM_INFINITY }, \
{ 0, 0 }, \
{ RLIM_INFINITY, RLIM_INFINITY }, \
+ { RLIM_INFINITY, RLIM_INFINITY }, \
}
#endif /* __KERNEL__ */
diff --git a/include/asm-mips/semaphore.h b/include/asm-mips/semaphore.h
index f8774022f..9be99be4b 100644
--- a/include/asm-mips/semaphore.h
+++ b/include/asm-mips/semaphore.h
@@ -59,7 +59,7 @@ struct semaphore {
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
-extern inline void sema_init (struct semaphore *sem, int val)
+static inline void sema_init (struct semaphore *sem, int val)
{
atomic_set(&sem->count, val);
atomic_set(&sem->waking, 0);
@@ -84,7 +84,7 @@ asmlinkage int __down_interruptible(struct semaphore * sem);
asmlinkage int __down_trylock(struct semaphore * sem);
asmlinkage void __up(struct semaphore * sem);
-extern inline void down(struct semaphore * sem)
+static inline void down(struct semaphore * sem)
{
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
@@ -93,7 +93,7 @@ extern inline void down(struct semaphore * sem)
__down(sem);
}
-extern inline int down_interruptible(struct semaphore * sem)
+static inline int down_interruptible(struct semaphore * sem)
{
int ret = 0;
@@ -107,7 +107,7 @@ extern inline int down_interruptible(struct semaphore * sem)
#if !defined(CONFIG_CPU_HAS_LLSC)
-extern inline int down_trylock(struct semaphore * sem)
+static inline int down_trylock(struct semaphore * sem)
{
int ret = 0;
if (atomic_dec_return(&sem->count) < 0)
@@ -139,7 +139,7 @@ extern inline int down_trylock(struct semaphore * sem)
* }
* }
*/
-extern inline int down_trylock(struct semaphore * sem)
+static inline int down_trylock(struct semaphore * sem)
{
long ret, tmp, tmp2, sub;
@@ -180,7 +180,7 @@ extern inline int down_trylock(struct semaphore * sem)
* Note! This is subtle. We jump to wake people up only if
* the semaphore was negative (== somebody was waiting on it).
*/
-extern inline void up(struct semaphore * sem)
+static inline void up(struct semaphore * sem)
{
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
@@ -247,7 +247,7 @@ struct rw_semaphore {
#define DECLARE_RWSEM_WRITE_LOCKED(name) \
__DECLARE_RWSEM_GENERIC(name, 0)
-extern inline void init_rwsem(struct rw_semaphore *sem)
+static inline void init_rwsem(struct rw_semaphore *sem)
{
atomic_set(&sem->count, RW_LOCK_BIAS);
sem->granted = 0;
@@ -265,7 +265,7 @@ extern void __down_read(struct rw_semaphore *sem, int count);
extern void __down_write(struct rw_semaphore *sem, int count);
extern void __rwsem_wake(struct rw_semaphore *sem, unsigned long readers);
-extern inline void down_read(struct rw_semaphore *sem)
+static inline void down_read(struct rw_semaphore *sem)
{
int count;
@@ -288,7 +288,7 @@ extern inline void down_read(struct rw_semaphore *sem)
#endif
}
-extern inline void down_write(struct rw_semaphore *sem)
+static inline void down_write(struct rw_semaphore *sem)
{
int count;
@@ -317,7 +317,7 @@ extern inline void down_write(struct rw_semaphore *sem)
there was a writer waiting, and we've bumped the count to 0: we must
wake the writer up. */
-extern inline void up_read(struct rw_semaphore *sem)
+static inline void up_read(struct rw_semaphore *sem)
{
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
@@ -336,7 +336,7 @@ extern inline void up_read(struct rw_semaphore *sem)
/*
* Releasing the writer is easy -- just release it and wake up any sleepers.
*/
-extern inline void up_write(struct rw_semaphore *sem)
+static inline void up_write(struct rw_semaphore *sem)
{
int count;
diff --git a/include/asm-mips/spinlock.h b/include/asm-mips/spinlock.h
index 724d10520..4ac282bbc 100644
--- a/include/asm-mips/spinlock.h
+++ b/include/asm-mips/spinlock.h
@@ -1,5 +1,4 @@
-/* $Id: spinlock.h,v 1.8 2000/01/23 21:15:52 ralf Exp $
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
@@ -29,9 +28,6 @@ typedef struct {
* We make no fairness assumptions. They have a cost.
*/
-typedef struct { unsigned long a[100]; } __dummy_lock_t;
-#define __dummy_lock(lock) (*(__dummy_lock_t *)(lock))
-
static inline void spin_lock(spinlock_t *lock)
{
unsigned int tmp;
@@ -45,8 +41,8 @@ static inline void spin_lock(spinlock_t *lock)
"beqz\t%1, 1b\n\t"
" sync\n\t"
".set\treorder"
- : "=o" (__dummy_lock(lock)), "=&r" (tmp)
- : "o" (__dummy_lock(lock))
+ : "=o" (lock->lock), "=&r" (tmp)
+ : "o" (lock->lock)
: "memory");
}
@@ -57,8 +53,8 @@ static inline void spin_unlock(spinlock_t *lock)
"sync\n\t"
"sw\t$0, %0\n\t"
".set\treorder"
- : "=o" (__dummy_lock(lock))
- : "o" (__dummy_lock(lock))
+ : "=o" (lock->lock)
+ : "o" (lock->lock)
: "memory");
}
@@ -92,8 +88,8 @@ static inline void read_lock(rwlock_t *rw)
"beqz\t%1, 1b\n\t"
" sync\n\t"
".set\treorder"
- : "=o" (__dummy_lock(rw)), "=&r" (tmp)
- : "o" (__dummy_lock(rw))
+ : "=o" (rw->lock), "=&r" (tmp)
+ : "o" (rw->lock)
: "memory");
}
@@ -111,8 +107,8 @@ static inline void read_unlock(rwlock_t *rw)
"sc\t%1, %0\n\t"
"beqz\t%1, 1b\n\t"
".set\treorder"
- : "=o" (__dummy_lock(rw)), "=&r" (tmp)
- : "o" (__dummy_lock(rw))
+ : "=o" (rw->lock), "=&r" (tmp)
+ : "o" (rw->lock)
: "memory");
}
@@ -129,8 +125,8 @@ static inline void write_lock(rwlock_t *rw)
"beqz\t%1, 1b\n\t"
" sync\n\t"
".set\treorder"
- : "=o" (__dummy_lock(rw)), "=&r" (tmp)
- : "o" (__dummy_lock(rw))
+ : "=o" (rw->lock), "=&r" (tmp)
+ : "o" (rw->lock)
: "memory");
}
@@ -141,8 +137,8 @@ static inline void write_unlock(rwlock_t *rw)
"sync\n\t"
"sw\t$0, %0\n\t"
".set\treorder"
- : "=o" (__dummy_lock(rw))
- : "o" (__dummy_lock(rw))
+ : "=o" (rw->lock)
+ : "o" (rw->lock)
: "memory");
}
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h
index c057c0925..35800cd5b 100644
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -1,5 +1,4 @@
-/* $Id: system.h,v 1.20 1999/12/06 23:13:21 ralf Exp $
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
@@ -61,32 +60,30 @@ __cli(void)
: "$1", "memory");
}
-#define __save_flags(x) \
-__asm__ __volatile__( \
- ".set\tpush\n\t" \
- ".set\treorder\n\t" \
- "mfc0\t%0,$12\n\t" \
- ".set\tpop\n\t" \
- : "=r" (x) \
- : /* no inputs */ \
- : "memory")
+#define __save_flags(x) \
+__asm__ __volatile__( \
+ ".set\tpush\n\t" \
+ ".set\treorder\n\t" \
+ "mfc0\t%0,$12\n\t" \
+ ".set\tpop\n\t" \
+ : "=r" (x))
-#define __save_and_cli(x) \
-__asm__ __volatile__( \
- ".set\tpush\n\t" \
- ".set\treorder\n\t" \
- ".set\tnoat\n\t" \
- "mfc0\t%0,$12\n\t" \
- "ori\t$1,%0,1\n\t" \
- "xori\t$1,1\n\t" \
- ".set\tnoreorder\n\t" \
- "mtc0\t$1,$12\n\t" \
- "nop\n\t" \
- "nop\n\t" \
- "nop\n\t" \
- ".set\tpop\n\t" \
- : "=r" (x) \
- : /* no inputs */ \
+#define __save_and_cli(x) \
+__asm__ __volatile__( \
+ ".set\tpush\n\t" \
+ ".set\treorder\n\t" \
+ ".set\tnoat\n\t" \
+ "mfc0\t%0,$12\n\t" \
+ "ori\t$1,%0,1\n\t" \
+ "xori\t$1,1\n\t" \
+ ".set\tnoreorder\n\t" \
+ "mtc0\t$1,$12\n\t" \
+ "nop\n\t" \
+ "nop\n\t" \
+ "nop\n\t" \
+ ".set\tpop\n\t" \
+ : "=r" (x) \
+ : /* no inputs */ \
: "$1", "memory")
extern void __inline__
@@ -131,11 +128,14 @@ __restore_flags(int flags)
* These are probably defined overly paranoid ...
*/
#ifdef CONFIG_CPU_HAS_WB
+
#include <asm/wbflush.h>
#define rmb()
#define wmb() wbflush()
#define mb() wbflush()
-#else
+
+#else /* CONFIG_CPU_HAS_WB */
+
#define mb() \
__asm__ __volatile__( \
"# prevent instructions being moved around\n\t" \
@@ -148,6 +148,17 @@ __asm__ __volatile__( \
: "memory")
#define rmb() mb()
#define wmb() mb()
+
+#endif /* CONFIG_CPU_HAS_WB */
+
+#ifdef CONFIG_SMP
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
+#else
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
#endif
#define set_mb(var, value) \
diff --git a/include/asm-mips/uaccess.h b/include/asm-mips/uaccess.h
index 2d3cc959d..c94006594 100644
--- a/include/asm-mips/uaccess.h
+++ b/include/asm-mips/uaccess.h
@@ -84,24 +84,6 @@ extern inline int verify_area(int type, const void * addr, unsigned long size)
#define __get_user(x,ptr) \
__get_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
-/*
- * The "xxx_ret" versions return constant specified in third argument, if
- * something bad happens. These macros can be optimized for the
- * case of just returning from the function xxx_ret is used.
- */
-
-#define put_user_ret(x,ptr,ret) ({ \
-if (put_user(x,ptr)) return ret; })
-
-#define get_user_ret(x,ptr,ret) ({ \
-if (get_user(x,ptr)) return ret; })
-
-#define __put_user_ret(x,ptr,ret) ({ \
-if (__put_user(x,ptr)) return ret; })
-
-#define __get_user_ret(x,ptr,ret) ({ \
-if (__get_user(x,ptr)) return ret; })
-
struct __large_struct { unsigned long buf[100]; };
#define __m(x) (*(struct __large_struct *)(x))
@@ -281,16 +263,6 @@ extern void __put_user_unknown(void);
"jal\t" #destination "\n\t"
#endif
-#define copy_to_user_ret(to,from,n,retval) ({ \
-if (copy_to_user(to,from,n)) \
- return retval; \
-})
-
-#define copy_from_user_ret(to,from,n,retval) ({ \
-if (copy_from_user(to,from,n)) \
- return retval; \
-})
-
extern size_t __copy_user(void *__to, const void *__from, size_t __n);
#define __copy_to_user(to,from,n) ({ \