summaryrefslogtreecommitdiffstats
path: root/include/asm-sparc/system.h
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1997-04-29 21:13:14 +0000
committer <ralf@linux-mips.org>1997-04-29 21:13:14 +0000
commit19c9bba94152148523ba0f7ef7cffe3d45656b11 (patch)
tree40b1cb534496a7f1ca0f5c314a523c69f1fee464 /include/asm-sparc/system.h
parent7206675c40394c78a90e74812bbdbf8cf3cca1be (diff)
Import of Linux/MIPS 2.1.36
Diffstat (limited to 'include/asm-sparc/system.h')
-rw-r--r--include/asm-sparc/system.h255
1 files changed, 161 insertions, 94 deletions
diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h
index 1ec202c8e..e1d33e436 100644
--- a/include/asm-sparc/system.h
+++ b/include/asm-sparc/system.h
@@ -1,4 +1,4 @@
-/* $Id: system.h,v 1.42 1996/09/30 02:23:21 davem Exp $ */
+/* $Id: system.h,v 1.58 1997/04/18 05:44:54 davem Exp $ */
#ifndef __SPARC_SYSTEM_H
#define __SPARC_SYSTEM_H
@@ -10,10 +10,11 @@
#include <asm/page.h>
#include <asm/oplib.h>
#include <asm/psr.h>
-#endif
+#include <asm/ptrace.h>
#define EMPTY_PGT (&empty_bad_page)
#define EMPTY_PGE (&empty_bad_page_table)
+#endif /* __KERNEL__ */
#ifndef __ASSEMBLY__
@@ -28,8 +29,12 @@ enum sparc_cpu {
sun4e = 0x04,
sun4u = 0x05, /* V8 ploos ploos */
sun_unknown = 0x06,
+ ap1000 = 0x07, /* almost a sun4m */
};
+/* Really, userland should not be looking at any of this... */
+#ifdef __KERNEL__
+
extern enum sparc_cpu sparc_cpu_model;
extern unsigned long empty_bad_page;
@@ -45,28 +50,24 @@ extern struct linux_romvec *romvec;
* frames are up to date.
*/
extern void flush_user_windows(void);
+extern void kill_user_windows(void);
extern void synchronize_user_stack(void);
extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
void *fpqueue, unsigned long *fpqdepth);
#ifdef __SMP__
#define SWITCH_ENTER \
- cli(); \
if(prev->flags & PF_USEDFPU) { \
+ put_psr(get_psr() | PSR_EF); \
fpsave(&prev->tss.float_regs[0], &prev->tss.fsr, \
&prev->tss.fpqueue[0], &prev->tss.fpqdepth); \
prev->flags &= ~PF_USEDFPU; \
prev->tss.kregs->psr &= ~PSR_EF; \
- } \
- prev->lock_depth = syscall_count; \
- kernel_counter += (next->lock_depth - prev->lock_depth); \
- syscall_count = next->lock_depth;
+ }
-#define SWITCH_EXIT sti();
#define SWITCH_DO_LAZY_FPU
#else
#define SWITCH_ENTER
-#define SWITCH_EXIT
#define SWITCH_DO_LAZY_FPU if(last_task_used_math != next) next->tss.kregs->psr&=~PSR_EF;
#endif
@@ -87,76 +88,81 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
switch_to_context(next); \
task_pc = ((unsigned long) &&here) - 0x8; \
__asm__ __volatile__( \
- "rd\t%%psr, %%g4\n\t" \
- "std\t%%sp, [%%g6 + %3]\n\t" \
- "rd\t%%wim, %%g5\n\t" \
- "wr\t%%g4, 0x20, %%psr\n\t" \
- "std\t%%g4, [%%g6 + %2]\n\t" \
- "mov\t%1, %%g6\n\t" \
- "ldd\t[%%g6 + %2], %%g4\n\t" \
- "st\t%1, [%0]\n\t" \
- "wr\t%%g4, 0x20, %%psr\n\t" \
- "ldd\t[%%g6 + %3], %%sp\n\t" \
- "wr\t%%g5, 0x0, %%wim\n\t" \
- "ldd\t[%%sp + 0x00], %%l0\n\t" \
- "ldd\t[%%sp + 0x08], %%l2\n\t" \
- "ldd\t[%%sp + 0x10], %%l4\n\t" \
- "ldd\t[%%sp + 0x18], %%l6\n\t" \
- "ldd\t[%%sp + 0x20], %%i0\n\t" \
- "ldd\t[%%sp + 0x28], %%i2\n\t" \
- "ldd\t[%%sp + 0x30], %%i4\n\t" \
- "ldd\t[%%sp + 0x38], %%i6\n\t" \
- "wr\t%%g4, 0x0, %%psr\n\t" \
+ "rd %%psr, %%g4\n\t" \
+ "std %%sp, [%%g6 + %3]\n\t" \
+ "rd %%wim, %%g5\n\t" \
+ "wr %%g4, 0x20, %%psr\n\t" \
+ "nop\n\t" \
+ "std %%g4, [%%g6 + %2]\n\t" \
+ "ldd [%1 + %2], %%g4\n\t" \
+ "mov %1, %%g6\n\t" \
+ "st %1, [%0]\n\t" \
+ "wr %%g4, 0x20, %%psr\n\t" \
+ "nop\n\t" \
+ "nop\n\t" \
+ "ldd [%%g6 + %3], %%sp\n\t" \
+ "wr %%g5, 0x0, %%wim\n\t" \
+ "ldd [%%sp + 0x00], %%l0\n\t" \
+ "ldd [%%sp + 0x38], %%i6\n\t" \
+ "wr %%g4, 0x0, %%psr\n\t" \
+ "nop\n\t" \
"nop\n\t" \
- "jmpl\t%%o7 + 0x8, %%g0\n\t" \
+ "jmpl %%o7 + 0x8, %%g0\n\t" \
" nop\n\t" : : "r" (&(current_set[smp_processor_id()])), "r" (next), \
- "i" ((const unsigned long)(&((struct task_struct *)0)->tss.kpsr)), \
- "i" ((const unsigned long)(&((struct task_struct *)0)->tss.ksp)), \
- "r" (task_pc) : "g4", "g5"); \
-here: SWITCH_EXIT } while(0)
+ "i" ((const unsigned long)(&((struct task_struct *)0)->tss.kpsr)), \
+ "i" ((const unsigned long)(&((struct task_struct *)0)->tss.ksp)), \
+ "r" (task_pc) \
+ : "g1", "g2", "g3", "g4", "g5", "g7", "l2", "l3", \
+ "l4", "l5", "l6", "l7", "i0", "i1", "i2", "i3", "i4", "i5", "o0", "o1", "o2", \
+ "o3"); \
+here: } while(0)
/* Changing the IRQ level on the Sparc. We now avoid writing the psr
* whenever possible.
*/
extern __inline__ void setipl(unsigned long __orig_psr)
{
- __asm__ __volatile__("wr\t%0, 0x0, %%psr\n\t"
- "nop; nop; nop;" : : "r" (__orig_psr) : "memory");
+ __asm__ __volatile__("
+ wr %0, 0x0, %%psr
+ nop; nop; nop
+" : /* no outputs */
+ : "r" (__orig_psr)
+ : "memory", "cc");
}
-extern __inline__ void cli(void)
+extern __inline__ void __cli(void)
{
unsigned long tmp;
- __asm__ __volatile__("rd\t%%psr, %0\n\t"
- "andcc\t%0, %1, %%g0\n\t"
- "be,a\t1f\n\t"
- " wr\t%0, %1, %%psr\n"
- "1:\tnop; nop"
- : "=r" (tmp)
- : "i" (PSR_PIL)
- : "memory");
+ __asm__ __volatile__("
+ rd %%psr, %0
+ or %0, %1, %0
+ wr %0, 0x0, %%psr
+ nop; nop; nop
+" : "=r" (tmp)
+ : "i" (PSR_PIL)
+ : "memory");
}
-extern __inline__ void sti(void)
+extern __inline__ void __sti(void)
{
unsigned long tmp;
- __asm__ __volatile__("rd\t%%psr, %0\n\t"
- "andcc\t%0, %1, %%g0\n\t"
- "bne,a\t1f\n\t"
- " wr\t%0, %1, %%psr\n"
- "1:\tnop; nop"
- : "=r" (tmp)
- : "i" (PSR_PIL)
- : "memory");
+ __asm__ __volatile__("
+ rd %%psr, %0
+ andn %0, %1, %0
+ wr %0, 0x0, %%psr
+ nop; nop; nop
+" : "=r" (tmp)
+ : "i" (PSR_PIL)
+ : "memory");
}
extern __inline__ unsigned long getipl(void)
{
unsigned long retval;
- __asm__ __volatile__("rd\t%%psr, %0" : "=r" (retval));
+ __asm__ __volatile__("rd %%psr, %0" : "=r" (retval));
return retval;
}
@@ -164,16 +170,22 @@ extern __inline__ unsigned long swap_pil(unsigned long __new_psr)
{
unsigned long retval, tmp1, tmp2;
- __asm__ __volatile__("rd\t%%psr, %0\n\t"
- "and\t%0, %4, %1\n\t"
- "and\t%3, %4, %2\n\t"
- "xorcc\t%1, %2, %%g0\n\t"
- "bne,a\t1f\n\t"
- " wr %0, %4, %%psr\n"
- "1:\tnop; nop"
- : "=r" (retval), "=r" (tmp1), "=r" (tmp2)
- : "r" (__new_psr), "i" (PSR_PIL)
- : "memory");
+ __asm__ __volatile__("
+ rd %%psr, %0
+ and %0, %4, %1
+ and %3, %4, %2
+ xorcc %1, %2, %%g0
+ be 1f
+ nop
+ wr %0, %4, %%psr
+ nop
+ nop
+ nop
+1:
+" : "=r" (retval), "=r" (tmp1), "=r" (tmp2)
+ : "r" (__new_psr), "i" (PSR_PIL)
+ : "memory", "cc");
+
return retval;
}
@@ -181,22 +193,76 @@ extern __inline__ unsigned long read_psr_and_cli(void)
{
unsigned long retval;
- __asm__ __volatile__("rd\t%%psr, %0\n\t"
- "andcc\t%0, %1, %%g0\n\t"
- "be,a\t1f\n\t"
- " wr\t%0, %1, %%psr\n"
- "1:\tnop; nop"
- : "=r" (retval)
- : "i" (PSR_PIL)
- : "memory");
+ __asm__ __volatile__("
+ rd %%psr, %0
+ or %0, %1, %%g1
+ wr %%g1, 0x0, %%psr
+ nop; nop; nop
+" : "=r" (retval)
+ : "i" (PSR_PIL)
+ : "g1", "memory");
+
return retval;
}
-extern char spdeb_buf[256];
+#define __save_flags(flags) ((flags) = getipl())
+#define __save_and_cli(flags) ((flags) = read_psr_and_cli())
+#define __restore_flags(flags) setipl((flags))
+
+#ifdef __SMP__
+
+/* Visit arch/sparc/lib/irqlock.S for all the fun details... */
+#define cli() __asm__ __volatile__("mov %%o7, %%g4\n\t" \
+ "call ___global_cli\n\t" \
+ " rd %%tbr, %%g7" : : \
+ : "g1", "g2", "g3", "g4", "g5", "g7", \
+ "memory", "cc")
+
+#define sti() \
+do { register unsigned long bits asm("g7"); \
+ bits = 0; \
+ __asm__ __volatile__("mov %%o7, %%g4\n\t" \
+ "call ___global_sti\n\t" \
+ " rd %%tbr, %%g2" \
+ : /* no outputs */ \
+ : "r" (bits) \
+ : "g1", "g2", "g3", "g4", "g5", \
+ "memory", "cc"); \
+} while(0)
+
+extern unsigned char global_irq_holder;
+
+#define save_flags(x) \
+do { int cpuid; \
+ __asm__ __volatile__("rd %%tbr, %0; srl %0, 12, %0; and %0, 3, %0" \
+ : "=r" (cpuid)); \
+ ((x) = ((global_irq_holder == (unsigned char) cpuid) ? 1 : \
+ ((getipl() & PSR_PIL) ? 2 : 0))); \
+} while(0)
+
+#define restore_flags(flags) \
+do { register unsigned long bits asm("g7"); \
+ bits = flags; \
+ __asm__ __volatile__("mov %%o7, %%g4\n\t" \
+ "call ___global_restore_flags\n\t" \
+ " andcc %%g7, 0x1, %%g0" \
+ : "=&r" (bits) \
+ : "0" (bits) \
+ : "g1", "g2", "g3", "g4", "g5", \
+ "memory", "cc"); \
+} while(0)
+
+#define save_and_cli(flags) do { save_flags(flags); cli(); } while(0)
+
+#else
+
+#define cli() __cli()
+#define sti() __sti()
+#define save_flags(x) __save_flags(x)
+#define restore_flags(x) __restore_flags(x)
+#define save_and_cli(x) __save_and_cli(x)
-#define save_flags(flags) ((flags) = getipl())
-#define save_and_cli(flags) ((flags) = read_psr_and_cli())
-#define restore_flags(flags) setipl((flags))
+#endif
/* XXX Change this if we ever use a PSO mode kernel. */
#define mb() __asm__ __volatile__ ("" : : : "memory")
@@ -205,24 +271,21 @@ extern char spdeb_buf[256];
extern __inline__ unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val)
{
+ register unsigned long *ptr asm("g1");
+ register unsigned long ret asm("g2");
+
+ ptr = (unsigned long *) m;
+ ret = val;
+
__asm__ __volatile__("
- rd %%psr, %%g3
- andcc %%g3, %3, %%g0
- be,a 1f
- wr %%g3, %3, %%psr
-1: ld [%1], %%g2
- andcc %%g3, %3, %%g0
- st %2, [%1]
- be,a 1f
- wr %%g3, 0x0, %%psr
-1: nop
- mov %%g2, %0
- "
- : "=&r" (val)
- : "r" (m), "0" (val), "i" (PSR_PIL)
- : "g2", "g3");
-
- return val;
+ mov %%o7, %%g4
+ call ___xchg32
+ add %%o7, 8, %%o7
+" : "=&r" (ret)
+ : "0" (ret), "r" (ptr)
+ : "g3", "g4", "g7", "memory", "cc");
+
+ return ret;
}
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
@@ -240,6 +303,10 @@ static __inline__ unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
return x;
}
+extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn));
+
+#endif /* __KERNEL__ */
+
#endif /* __ASSEMBLY__ */
#endif /* !(__SPARC_SYSTEM_H) */