diff options
author | Harald Koerfgen <hkoerfg@web.de> | 1999-07-26 19:42:38 +0000 |
---|---|---|
committer | Harald Koerfgen <hkoerfg@web.de> | 1999-07-26 19:42:38 +0000 |
commit | 14ab59aa8aba8687dc957c2186e115ac0b8ab542 (patch) | |
tree | c2eb55db21b6b46ddd983e2d40196fa61f19b64c /include | |
parent | 552f7f2f262b8ea12edc36f9a260b068bd10f423 (diff) |
The remaining R3000 changes. From now on the CVS will be R3000 aware. R3000 Indigo anyone? :-)
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-mips/atomic.h | 30 | ||||
-rw-r--r-- | include/asm-mips/bitops.h | 4 | ||||
-rw-r--r-- | include/asm-mips/current.h | 4 | ||||
-rw-r--r-- | include/asm-mips/fp.h | 10 | ||||
-rw-r--r-- | include/asm-mips/mipsregs.h | 10 | ||||
-rw-r--r-- | include/asm-mips/pgtable.h | 166 | ||||
-rw-r--r-- | include/asm-mips/ptrace.h | 5 | ||||
-rw-r--r-- | include/asm-mips/semaphore-helper.h | 62 | ||||
-rw-r--r-- | include/asm-mips/semaphore.h | 16 | ||||
-rw-r--r-- | include/asm-mips/softirq.h | 11 | ||||
-rw-r--r-- | include/asm-mips/stackframe.h | 55 | ||||
-rw-r--r-- | include/asm-mips/system.h | 39 |
12 files changed, 288 insertions, 124 deletions
diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h index 5cebea94d..58e095a53 100644 --- a/include/asm-mips/atomic.h +++ b/include/asm-mips/atomic.h @@ -11,7 +11,7 @@ * * Copyright (C) 1996, 1997 by Ralf Baechle * - * $Id: atomic.h,v 1.3 1997/12/15 10:38:29 ralf Exp $ + * $Id: atomic.h,v 1.5 1998/03/04 09:51:21 ralf Exp $ */ #ifndef __ASM_MIPS_ATOMIC_H #define __ASM_MIPS_ATOMIC_H @@ -44,7 +44,7 @@ extern __inline__ void atomic_add(int i, volatile atomic_t * v) save_flags(flags); cli(); - *v += i; + v->counter += i; restore_flags(flags); } @@ -54,7 +54,7 @@ extern __inline__ void atomic_sub(int i, volatile atomic_t * v) save_flags(flags); cli(); - *v -= i; + v->counter -= i; restore_flags(flags); } @@ -64,9 +64,9 @@ extern __inline__ int atomic_add_return(int i, atomic_t * v) save_flags(flags); cli(); - temp = *v; + temp = v->counter; temp += i; - *v = temp; + v->counter = temp; restore_flags(flags); return temp; @@ -78,13 +78,29 @@ extern __inline__ int atomic_sub_return(int i, atomic_t * v) save_flags(flags); cli(); - temp = *v; + temp = v->counter; temp -= i; - *v = temp; + v->counter = temp; restore_flags(flags); return temp; } + +extern __inline__ void atomic_clear_mask(unsigned long mask, unsigned long * v) +{ + unsigned long temp; + int flags; + + save_flags(flags); + cli(); + temp = *v; + temp &= ~mask; + *v = temp; + restore_flags(flags); + + return; +} + #endif #if (_MIPS_ISA == _MIPS_ISA_MIPS2) || (_MIPS_ISA == _MIPS_ISA_MIPS3) || \ diff --git a/include/asm-mips/bitops.h b/include/asm-mips/bitops.h index d1b1d7152..55e9940ec 100644 --- a/include/asm-mips/bitops.h +++ b/include/asm-mips/bitops.h @@ -11,7 +11,7 @@ #define __ASM_MIPS_BITOPS_H #include <linux/types.h> -#include <linux/byteorder/swab.h> /* sigh ... */ +#include <asm/byteorder.h> /* sigh ... */ #ifdef __KERNEL__ @@ -144,6 +144,8 @@ extern __inline__ int test_and_change_bit(int nr, void *addr) #else /* MIPS I */ +#include <asm/mipsregs.h> + extern __inline__ void set_bit(int nr, void * addr) { int mask; diff --git a/include/asm-mips/current.h b/include/asm-mips/current.h index bcaf22b5c..0311d92e1 100644 --- a/include/asm-mips/current.h +++ b/include/asm-mips/current.h @@ -1,4 +1,4 @@ -/* $Id: current.h,v 1.4 1998/07/20 17:52:19 ralf Exp $ +/* $Id: current.h,v 1.4 1998/08/25 09:21:55 ralf Exp $ * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -24,7 +24,7 @@ register struct task_struct *current asm("$28"); #define _GET_CURRENT(reg) \ lui reg, %hi(kernelsp); \ .set push; \ - .set noreorder; \ + .set reorder; \ lw reg, %lo(kernelsp)(reg); \ .set pop; \ ori reg, 8191; \ diff --git a/include/asm-mips/fp.h b/include/asm-mips/fp.h index 463478147..30c17dc83 100644 --- a/include/asm-mips/fp.h +++ b/include/asm-mips/fp.h @@ -1,4 +1,4 @@ -/* $Id: fp.h,v 1.1 1998/07/16 17:01:54 ralf Exp $ +/* $Id: fp.h,v 1.1 1998/07/16 19:10:04 ralf Exp $ * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -12,19 +12,23 @@ */ #define enable_cp1() \ __asm__ __volatile__( \ + ".set\tpush\n\t" \ ".set\tnoat\n\t" \ + ".set\treorder\n\t" \ "mfc0\t$1,$12\n\t" \ "or\t$1,%0\n\t" \ "mtc0\t$1,$12\n\t" \ - ".set\tat" \ + ".set\tpop" \ : : "r" (ST0_CU1)); #define disable_cp1() \ __asm__ __volatile__( \ + ".set\tpush\n\t" \ ".set\tnoat\n\t" \ + ".set\treorder\n\t" \ "mfc0\t$1,$12\n\t" \ "or\t$1,%0\n\t" \ "xor\t$1,%0\n\t" \ "mtc0\t$1,$12\n\t" \ - ".set\tat" \ + ".set\tpop" \ : : "r" (ST0_CU1)); diff --git a/include/asm-mips/mipsregs.h b/include/asm-mips/mipsregs.h index ab1b4228c..72a657573 100644 --- a/include/asm-mips/mipsregs.h +++ b/include/asm-mips/mipsregs.h @@ -1,4 +1,4 @@ -/* $Id: mipsregs.h,v 1.4 1998/08/25 09:21:57 ralf Exp $ +/* $Id: mipsregs.h,v 1.5 1999/04/11 17:13:57 harald Exp $ * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -103,7 +103,10 @@ #define read_32bit_cp0_register(source) \ ({ int __res; \ __asm__ __volatile__( \ - "mfc0\t%0,"STR(source) \ + ".set\tpush\n\t" \ + ".set\treorder\n\t" \ + "mfc0\t%0,"STR(source)"\n\t" \ + ".set\tpop" \ : "=r" (__res)); \ __res;}) @@ -121,7 +124,8 @@ #define write_32bit_cp0_register(register,value) \ __asm__ __volatile__( \ - "mtc0\t%0,"STR(register) \ + "mtc0\t%0,"STR(register)"\n\t" \ + "nop" \ : : "r" (value)); #define write_64bit_cp0_register(register,value) \ diff --git a/include/asm-mips/pgtable.h b/include/asm-mips/pgtable.h index 2a85a1617..d74a241c9 100644 --- a/include/asm-mips/pgtable.h +++ b/include/asm-mips/pgtable.h @@ -1,4 +1,4 @@ -/* $Id: pgtable.h,v 1.19 1999/06/13 16:35:53 ralf Exp $ +/* $Id: pgtable.h,v 1.20 1999/07/22 01:58:28 ralf Exp $ * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -110,6 +110,20 @@ extern void (*add_wired_entry)(unsigned long entrylo0, unsigned long entrylo1, #define _PAGE_WRITE (1<<2) /* implemented in software */ #define _PAGE_ACCESSED (1<<3) /* implemented in software */ #define _PAGE_MODIFIED (1<<4) /* implemented in software */ + +#if (_MIPS_ISA == _MIPS_ISA_MIPS1) + +#define _PAGE_GLOBAL (1<<8) +#define _PAGE_VALID (1<<9) +#define _PAGE_SILENT_READ (1<<9) /* synonym */ +#define _PAGE_DIRTY (1<<10) /* The MIPS dirty bit */ +#define _PAGE_SILENT_WRITE (1<<10) +#define _CACHE_UNCACHED (1<<11) /* R4[0246]00 */ +#define _CACHE_MASK (1<<11) +#define _CACHE_CACHABLE_NONCOHERENT 0 + +#else + #define _PAGE_R4KBUG (1<<5) /* workaround for r4k bug */ #define _PAGE_GLOBAL (1<<6) #define _PAGE_VALID (1<<7) @@ -126,6 +140,8 @@ extern void (*add_wired_entry)(unsigned long entrylo0, unsigned long entrylo1, #define _CACHE_CACHABLE_ACCELERATED (7<<9) /* R10000 only */ #define _CACHE_MASK (7<<9) +#endif + #define __READABLE (_PAGE_READ | _PAGE_SILENT_READ | _PAGE_ACCESSED) #define __WRITEABLE (_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED) @@ -356,7 +372,7 @@ extern inline pte_t mk_pte(unsigned long page, pgprot_t pgprot) extern inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) { - return __pte(physpage | pgprot_val(pgprot)); + return __pte(((physpage & PAGE_MASK) - PAGE_OFFSET) | pgprot_val(pgprot)); } extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) @@ -595,33 +611,37 @@ extern void (*update_mmu_cache)(struct vm_area_struct *vma, extern inline void tlb_probe(void) { __asm__ __volatile__( - ".set noreorder\n\t" + ".set push\n\t" + ".set reorder\n\t" "tlbp\n\t" - ".set reorder"); + ".set pop"); } extern inline void tlb_read(void) { __asm__ __volatile__( - ".set noreorder\n\t" + ".set push\n\t" + ".set reorder\n\t" "tlbr\n\t" - ".set reorder"); + ".set pop"); } extern inline void tlb_write_indexed(void) { __asm__ __volatile__( - ".set noreorder\n\t" + ".set push\n\t" + ".set reorder\n\t" "tlbwi\n\t" - ".set reorder"); + ".set pop"); } extern inline void tlb_write_random(void) { __asm__ __volatile__( - ".set noreorder\n\t" + ".set push\n\t" + ".set reorder\n\t" "tlbwr\n\t" - ".set reorder"); + ".set pop"); } /* Dealing with various CP0 mmu/cache related registers. */ @@ -632,11 +652,10 @@ extern inline unsigned long get_pagemask(void) unsigned long val; __asm__ __volatile__( - ".set noreorder\n\t" - ".set mips3\n\t" + ".set push\n\t" + ".set reorder\n\t" "mfc0 %0, $5\n\t" - ".set mips0\n\t" - ".set reorder" + ".set pop" : "=r" (val)); return val; } @@ -644,11 +663,10 @@ extern inline unsigned long get_pagemask(void) extern inline void set_pagemask(unsigned long val) { __asm__ __volatile__( - ".set noreorder\n\t" - ".set mips3\n\t" + ".set push\n\t" + ".set reorder\n\t" "mtc0 %0, $5\n\t" - ".set mips0\n\t" - ".set reorder" + ".set pop" : : "r" (val)); } @@ -658,11 +676,10 @@ extern inline unsigned long get_entrylo0(void) unsigned long val; __asm__ __volatile__( - ".set noreorder\n\t" - ".set mips3\n\t" + ".set push\n\t" + ".set reorder\n\t" "mfc0 %0, $2\n\t" - ".set mips0\n\t" - ".set reorder" + ".set pop" : "=r" (val)); return val; } @@ -670,11 +687,10 @@ extern inline unsigned long get_entrylo0(void) extern inline void set_entrylo0(unsigned long val) { __asm__ __volatile__( - ".set noreorder\n\t" - ".set mips3\n\t" + ".set push\n\t" + ".set reorder\n\t" "mtc0 %0, $2\n\t" - ".set mips0\n\t" - ".set reorder" + ".set pop" : : "r" (val)); } @@ -683,11 +699,10 @@ extern inline unsigned long get_entrylo1(void) unsigned long val; __asm__ __volatile__( - ".set noreorder\n\t" - ".set mips3\n\t" + ".set push\n\t" + ".set reorder\n\t" "mfc0 %0, $3\n\t" - ".set mips0\n\t" - ".set reorder" : "=r" (val)); + ".set pop" : "=r" (val)); return val; } @@ -695,11 +710,10 @@ extern inline unsigned long get_entrylo1(void) extern inline void set_entrylo1(unsigned long val) { __asm__ __volatile__( - ".set noreorder\n\t" - ".set mips3\n\t" + ".set push\n\t" + ".set reorder\n\t" "mtc0 %0, $3\n\t" - ".set mips0\n\t" - ".set reorder" + ".set pop" : : "r" (val)); } @@ -709,11 +723,10 @@ extern inline unsigned long get_entryhi(void) unsigned long val; __asm__ __volatile__( - ".set noreorder\n\t" - ".set mips3\n\t" + ".set push\n\t" + ".set reorder\n\t" "mfc0 %0, $10\n\t" - ".set mips0\n\t" - ".set reorder" + ".set pop" : "=r" (val)); return val; @@ -722,11 +735,10 @@ extern inline unsigned long get_entryhi(void) extern inline void set_entryhi(unsigned long val) { __asm__ __volatile__( - ".set noreorder\n\t" - ".set mips3\n\t" + ".set push\n\t" + ".set reorder\n\t" "mtc0 %0, $10\n\t" - ".set mips0\n\t" - ".set reorder" + ".set pop" : : "r" (val)); } @@ -736,11 +748,10 @@ extern inline unsigned long get_index(void) unsigned long val; __asm__ __volatile__( - ".set noreorder\n\t" - ".set mips3\n\t" + ".set push\n\t" + ".set reorder\n\t" "mfc0 %0, $0\n\t" - ".set mips0\n\t" - ".set reorder" + ".set pop" : "=r" (val)); return val; } @@ -748,11 +759,10 @@ extern inline unsigned long get_index(void) extern inline void set_index(unsigned long val) { __asm__ __volatile__( - ".set noreorder\n\t" - ".set mips3\n\t" - "mtc0 %0, $0\n\t" - ".set mips0\n\t" + ".set push\n\t" ".set reorder\n\t" + "mtc0 %0, $0\n\t" + ".set pop" : : "r" (val)); } @@ -762,11 +772,10 @@ extern inline unsigned long get_wired(void) unsigned long val; __asm__ __volatile__( - ".set noreorder\n\t" - ".set mips3\n\t" - "mfc0 %0, $6\n\t" - ".set mips0\n\t" + ".set push\n\t" ".set reorder\n\t" + "mfc0 %0, $6\n\t" + ".set pop" : "=r" (val)); return val; } @@ -774,11 +783,10 @@ extern inline unsigned long get_wired(void) extern inline void set_wired(unsigned long val) { __asm__ __volatile__( - "\n\t.set noreorder\n\t" - ".set mips3\n\t" + ".set push\n\t" + ".set reorder\n\t" "mtc0 %0, $6\n\t" - ".set mips0\n\t" - ".set reorder" + ".set pop" : : "r" (val)); } @@ -788,11 +796,10 @@ extern inline unsigned long get_taglo(void) unsigned long val; __asm__ __volatile__( - ".set noreorder\n\t" - ".set mips3\n\t" + ".set push\n\t" + ".set reorder\n\t" "mfc0 %0, $28\n\t" - ".set mips0\n\t" - ".set reorder" + ".set pop" : "=r" (val)); return val; } @@ -800,11 +807,10 @@ extern inline unsigned long get_taglo(void) extern inline void set_taglo(unsigned long val) { __asm__ __volatile__( - ".set noreorder\n\t" - ".set mips3\n\t" + ".set push\n\t" + ".set reorder\n\t" "mtc0 %0, $28\n\t" - ".set mips0\n\t" - ".set reorder" + ".set pop" : : "r" (val)); } @@ -813,11 +819,10 @@ extern inline unsigned long get_taghi(void) unsigned long val; __asm__ __volatile__( - ".set noreorder\n\t" - ".set mips3\n\t" + ".set push\n\t" + ".set reorder\n\t" "mfc0 %0, $29\n\t" - ".set mips0\n\t" - ".set reorder" + ".set pop" : "=r" (val)); return val; } @@ -825,11 +830,10 @@ extern inline unsigned long get_taghi(void) extern inline void set_taghi(unsigned long val) { __asm__ __volatile__( - ".set noreorder\n\t" - ".set mips3\n\t" + ".set push\n\t" + ".set reorder\n\t" "mtc0 %0, $29\n\t" - ".set mips0\n\t" - ".set reorder" + ".set pop" : : "r" (val)); } @@ -839,11 +843,10 @@ extern inline unsigned long get_context(void) unsigned long val; __asm__ __volatile__( - ".set noreorder\n\t" - ".set mips3\n\t" + ".set push\n\t" + ".set reorder\n\t" "mfc0 %0, $4\n\t" - ".set mips0\n\t" - ".set reorder" + ".set pop" : "=r" (val)); return val; @@ -852,11 +855,10 @@ extern inline unsigned long get_context(void) extern inline void set_context(unsigned long val) { __asm__ __volatile__( - ".set noreorder\n\t" - ".set mips3\n\t" + ".set push\n\t" + ".set reorder\n\t" "mtc0 %0, $4\n\t" - ".set mips0\n\t" - ".set reorder" + ".set pop" : : "r" (val)); } diff --git a/include/asm-mips/ptrace.h b/include/asm-mips/ptrace.h index 4a52e9440..e7db7a022 100644 --- a/include/asm-mips/ptrace.h +++ b/include/asm-mips/ptrace.h @@ -1,4 +1,4 @@ -/* $Id: ptrace.h,v 1.3 1998/07/10 01:14:55 ralf Exp $ +/* $Id: ptrace.h,v 1.4 1999/01/04 16:09:25 ralf Exp $ * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -12,6 +12,7 @@ #ifndef __ASM_MIPS_PTRACE_H #define __ASM_MIPS_PTRACE_H +#include <asm/isadep.h> #include <linux/types.h> /* 0 - 31 are integer registers, 32 - 63 are fp registers. */ @@ -59,7 +60,7 @@ struct pt_regs { /* * Does the process account for user or for system time? */ -#define user_mode(regs) ((regs)->cp0_status & 0x10) +#define user_mode(regs) ((regs)->cp0_status & KU_USER) #define instruction_pointer(regs) ((regs)->cp0_epc) diff --git a/include/asm-mips/semaphore-helper.h b/include/asm-mips/semaphore-helper.h index 95210135f..f80be5b9d 100644 --- a/include/asm-mips/semaphore-helper.h +++ b/include/asm-mips/semaphore-helper.h @@ -17,6 +17,66 @@ static inline void wake_one_more(struct semaphore * sem) atomic_inc(&sem->waking); } +#if (_MIPS_ISA == _MIPS_ISA_MIPS1) + +/* + * It doesn't make sense, IMHO, to endlessly turn interrupts off and on again. + * Do it once and that's it. ll/sc *has* it's advantages. HK + */ +#define read(a) ((a)->counter) +#define inc(a) (((a)->counter)++) +#define dec(a) (((a)->counter)--) + +static inline int waking_non_zero(struct semaphore *sem) +{ + unsigned long flags; + int ret = 0; + + save_and_cli(flags); + if (read(&sem->waking) > 0) { + dec(&sem->waking); + ret = 1; + } + restore_flags(flags); + return ret; +} + +static inline int waking_non_zero_interruptible(struct semaphore *sem, + struct task_struct *tsk) +{ + int ret = 0; + unsigned long flags; + + save_and_cli(flags); + if (read(&sem->waking) > 0) { + dec(&sem->waking); + ret = 1; + } else if (signal_pending(tsk)) { + inc(&sem->count); + ret = -EINTR; + } + restore_flags(flags); + return ret; +} + +static inline int waking_non_zero_trylock(struct semaphore *sem) +{ + int ret = 1; + unsigned long flags; + + save_and_cli(flags); + if (read(&sem->waking) <= 0) + inc(&sem->count); + else { + dec(&sem->waking); + ret = 0; + } + restore_flags(flags); + return ret; +} + +#else + static inline int waking_non_zero(struct semaphore *sem) { @@ -150,4 +210,6 @@ static inline int waking_non_zero_trylock(struct semaphore *sem) return ret; } +#endif + #endif /* __ASM_MIPS_SEMAPHORE_HELPER_H */ diff --git a/include/asm-mips/semaphore.h b/include/asm-mips/semaphore.h index 1f713fe49..c6486fe5b 100644 --- a/include/asm-mips/semaphore.h +++ b/include/asm-mips/semaphore.h @@ -1,4 +1,4 @@ -/* $Id: semaphore.h,v 1.6 1999/06/17 13:30:38 ralf Exp $ +/* $Id: semaphore.h,v 1.7 1999/06/22 22:12:59 tsbogend Exp $ * * SMP- and interrupt-safe semaphores.. * @@ -92,6 +92,18 @@ extern inline int down_interruptible(struct semaphore * sem) return ret; } +#if (_MIPS_ISA == _MIPS_ISA_MIPS1) + +extern inline int down_trylock(struct semaphore * sem) +{ + int ret = 0; + if (atomic_dec_return(&sem->count) < 0) + ret = __down_trylock(sem); + return ret; +} + +#else + /* * down_trylock returns 0 on success, 1 if we failed to get the lock. * @@ -162,6 +174,8 @@ extern inline int down_trylock(struct semaphore * sem) return ret; } +#endif + /* * Note! This is subtle. We jump to wake people up only if * the semaphore was negative (== somebody was waiting on it). diff --git a/include/asm-mips/softirq.h b/include/asm-mips/softirq.h index 7a6e4ff5c..1e5f4a754 100644 --- a/include/asm-mips/softirq.h +++ b/include/asm-mips/softirq.h @@ -1,4 +1,4 @@ -/* $Id: softirq.h,v 1.5 1999/02/15 02:22:12 ralf Exp $ +/* $Id: softirq.h,v 1.6 1999/06/17 13:30:38 ralf Exp $ * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -28,6 +28,12 @@ extern unsigned int local_bh_count[NR_CPUS]; #define get_active_bhs() (bh_mask & bh_active) +#if (_MIPS_ISA == _MIPS_ISA_MIPS1) + +#define clear_active_bhs(x) atomic_clear_mask((x),&bh_active) + +#else + static inline void clear_active_bhs(unsigned long x) { unsigned long temp; @@ -41,8 +47,11 @@ static inline void clear_active_bhs(unsigned long x) "=m" (bh_active) :"Ir" (~x), "m" (bh_active)); + } +#endif + extern inline void init_bh(int nr, void (*routine)(void)) { bh_base[nr] = routine; diff --git a/include/asm-mips/stackframe.h b/include/asm-mips/stackframe.h index 4b3a74043..68c8eabb6 100644 --- a/include/asm-mips/stackframe.h +++ b/include/asm-mips/stackframe.h @@ -3,7 +3,7 @@ * * Copyright (C) 1994, 1995, 1996 by Ralf Baechle and Paul M. Antoine. * - * $Id: stackframe.h,v 1.7 1998/05/04 09:13:01 ralf Exp $ + * $Id: stackframe.h,v 1.8 1999/05/01 10:08:19 harald Exp $ */ #ifndef __ASM_MIPS_STACKFRAME_H #define __ASM_MIPS_STACKFRAME_H @@ -102,9 +102,6 @@ #define RESTORE_AT \ lw $1, PT_R1(sp); \ -#define RESTORE_SP \ - lw sp, PT_R29(sp) - #define RESTORE_TEMP \ lw $24, PT_LO(sp); \ lw $8, PT_R8(sp); \ @@ -131,6 +128,44 @@ lw $23, PT_R23(sp); \ lw $30, PT_R30(sp) +#if (_MIPS_ISA == _MIPS_ISA_MIPS1) + +#define RESTORE_SOME \ + .set push; \ + .set reorder; \ + mfc0 t0, CP0_STATUS; \ + .set pop; \ + ori t0, 0x1f; \ + xori t0, 0x1f; \ + mtc0 t0, CP0_STATUS; \ + li v1, 0xff00; \ + and t0, v1; \ + lw v0, PT_STATUS(sp); \ + nor v1, $0, v1; \ + and v0, v1; \ + or v0, t0; \ + mtc0 v0, CP0_STATUS; \ + lw $31, PT_R31(sp); \ + lw $28, PT_R28(sp); \ + lw $25, PT_R25(sp); \ + lw $7, PT_R7(sp); \ + lw $6, PT_R6(sp); \ + lw $5, PT_R5(sp); \ + lw $4, PT_R4(sp); \ + lw $3, PT_R3(sp); \ + lw $2, PT_R2(sp) + +#define RESTORE_SP_AND_RET \ + .set push; \ + .set noreorder; \ + lw k0, PT_EPC(sp); \ + lw sp, PT_R29(sp); \ + jr k0; \ + rfe; \ + .set pop + +#else + #define RESTORE_SOME \ .set push; \ .set reorder; \ @@ -158,12 +193,20 @@ lw $3, PT_R3(sp); \ lw $2, PT_R2(sp) -#define RESTORE_ALL \ +#define RESTORE_SP_AND_RET \ + lw sp, PT_R29(sp); \ + .set mips3; \ + eret; \ + .set mips0 + +#endif + +#define RESTORE_ALL_AND_RET \ RESTORE_SOME; \ RESTORE_AT; \ RESTORE_TEMP; \ RESTORE_STATIC; \ - RESTORE_SP + RESTORE_SP_AND_RET /* * Move to kernel mode and disable interrupts. diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h index ab62be3d4..a397989a0 100644 --- a/include/asm-mips/system.h +++ b/include/asm-mips/system.h @@ -1,4 +1,4 @@ -/* $Id: system.h,v 1.11 1999/06/17 13:30:39 ralf Exp $ +/* $Id: system.h,v 1.8 1999/02/15 02:22:13 ralf Exp $ * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive @@ -17,14 +17,14 @@ extern __inline__ void __sti(void) { __asm__ __volatile__( - ".set\tnoreorder\n\t" + ".set\tpush\n\t" + ".set\treorder\n\t" ".set\tnoat\n\t" "mfc0\t$1,$12\n\t" "ori\t$1,0x1f\n\t" "xori\t$1,0x1e\n\t" "mtc0\t$1,$12\n\t" - ".set\tat\n\t" - ".set\treorder" + ".set\tpop\n\t" : /* no outputs */ : /* no inputs */ : "$1", "memory"); @@ -41,17 +41,18 @@ extern __inline__ void __cli(void) { __asm__ __volatile__( - ".set\tnoreorder\n\t" + ".set\tpush\n\t" + ".set\treorder\n\t" ".set\tnoat\n\t" "mfc0\t$1,$12\n\t" "ori\t$1,1\n\t" "xori\t$1,1\n\t" + ".set\tnoreorder\n\t" "mtc0\t$1,$12\n\t" "nop\n\t" "nop\n\t" "nop\n\t" - ".set\tat\n\t" - ".set\treorder" + ".set\tpop\n\t" : /* no outputs */ : /* no inputs */ : "$1", "memory"); @@ -59,26 +60,28 @@ __cli(void) #define __save_flags(x) \ __asm__ __volatile__( \ - ".set\tnoreorder\n\t" \ + ".set\tpush\n\t" \ + ".set\treorder\n\t" \ "mfc0\t%0,$12\n\t" \ - ".set\treorder" \ + ".set\tpop\n\t" \ : "=r" (x) \ : /* no inputs */ \ : "memory") #define __save_and_cli(x) \ __asm__ __volatile__( \ - ".set\tnoreorder\n\t" \ + ".set\tpush\n\t" \ + ".set\treorder\n\t" \ ".set\tnoat\n\t" \ "mfc0\t%0,$12\n\t" \ "ori\t$1,%0,1\n\t" \ "xori\t$1,1\n\t" \ + ".set\tnoreorder\n\t" \ "mtc0\t$1,$12\n\t" \ "nop\n\t" \ "nop\n\t" \ "nop\n\t" \ - ".set\tat\n\t" \ - ".set\treorder" \ + ".set\tpop\n\t" \ : "=r" (x) \ : /* no inputs */ \ : "$1", "memory") @@ -87,19 +90,21 @@ extern void __inline__ __restore_flags(int flags) { __asm__ __volatile__( - ".set\tnoreorder\n\t" + ".set\tpush\n\t" + ".set\treorder\n\t" "mfc0\t$8,$12\n\t" "li\t$9,0xff00\n\t" "and\t$8,$9\n\t" "nor\t$9,$0,$9\n\t" "and\t%0,$9\n\t" "or\t%0,$8\n\t" + ".set\tnoreorder\n\t" "mtc0\t%0,$12\n\t" "nop\n\t" "nop\n\t" "nop\n\t" - ".set\treorder" - : /* no output */ + ".set\tpop\n\t" + : : "r" (flags) : "$8", "$9", "memory"); } @@ -171,6 +176,8 @@ extern __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val) : "=r" (val), "=r" (m), "=r" (dummy) : "1" (m), "2" (val) : "memory"); + + return val; #else unsigned long flags, retval; @@ -179,9 +186,9 @@ extern __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val) retval = *m; *m = val; restore_flags(flags); + return retval; #endif /* Processor-dependent optimization */ - return val; } /* |