From e308faf24f68e262d92d294a01ddca7a17e76762 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Sun, 20 Jul 1997 14:56:40 +0000 Subject: Sync with Linux 2.1.46. --- include/asm-sparc64/asm_offsets.h | 80 +++++------ include/asm-sparc64/atomic.h | 86 +++++------ include/asm-sparc64/bitops.h | 74 ++++------ include/asm-sparc64/byteorder.h | 14 +- include/asm-sparc64/checksum.h | 51 ++++--- include/asm-sparc64/delay.h | 6 +- include/asm-sparc64/elf.h | 4 +- include/asm-sparc64/fbio.h | 49 +++++-- include/asm-sparc64/floppy.h | 4 +- include/asm-sparc64/fpumacro.h | 68 +++------ include/asm-sparc64/fs_mount.h | 44 ------ include/asm-sparc64/hardirq.h | 4 +- include/asm-sparc64/head.h | 71 ++++----- include/asm-sparc64/ioctls.h | 13 +- include/asm-sparc64/mmu_context.h | 77 ++++------ include/asm-sparc64/namei.h | 5 +- include/asm-sparc64/page.h | 17 ++- include/asm-sparc64/pgtable.h | 294 ++++++++++---------------------------- include/asm-sparc64/processor.h | 70 ++++----- include/asm-sparc64/psrcompat.h | 28 +--- include/asm-sparc64/pstate.h | 8 +- include/asm-sparc64/ptrace.h | 28 +++- include/asm-sparc64/reg.h | 31 +++- include/asm-sparc64/resource.h | 3 +- include/asm-sparc64/sigcontext.h | 31 ++-- include/asm-sparc64/softirq.h | 2 + include/asm-sparc64/spinlock.h | 62 +++++--- include/asm-sparc64/string.h | 31 ++-- include/asm-sparc64/system.h | 63 +++----- include/asm-sparc64/uaccess.h | 104 +++++++++----- include/asm-sparc64/uctx.h | 71 +++++++++ include/asm-sparc64/unistd.h | 38 ++--- include/asm-sparc64/vaddrs.h | 17 ++- 33 files changed, 706 insertions(+), 842 deletions(-) delete mode 100644 include/asm-sparc64/fs_mount.h create mode 100644 include/asm-sparc64/uctx.h (limited to 'include/asm-sparc64') diff --git a/include/asm-sparc64/asm_offsets.h b/include/asm-sparc64/asm_offsets.h index 70e6c37a4..18cf7c541 100644 --- a/include/asm-sparc64/asm_offsets.h +++ b/include/asm-sparc64/asm_offsets.h @@ -104,14 +104,8 @@ #define ASIZ_task_it_virt_incr 0x00000008 #define AOFF_task_real_timer 0x000001f8 #define ASIZ_task_real_timer 0x00000028 -#define AOFF_task_utime 0x00000220 -#define ASIZ_task_utime 0x00000008 -#define AOFF_task_stime 0x00000228 -#define ASIZ_task_stime 0x00000008 -#define AOFF_task_cutime 0x00000230 -#define ASIZ_task_cutime 0x00000008 -#define AOFF_task_cstime 0x00000238 -#define ASIZ_task_cstime 0x00000008 +#define AOFF_task_times 0x00000220 +#define ASIZ_task_times 0x00000020 #define AOFF_task_start_time 0x00000240 #define ASIZ_task_start_time 0x00000008 #define AOFF_task_min_flt 0x00000248 @@ -151,24 +145,24 @@ #define AOFF_task_ldt 0x00000370 #define ASIZ_task_ldt 0x00000008 #define AOFF_task_tss 0x00000380 -#define ASIZ_task_tss 0x00000600 -#define AOFF_task_fs 0x00000980 +#define ASIZ_task_tss 0x000004c0 +#define AOFF_task_fs 0x00000840 #define ASIZ_task_fs 0x00000008 -#define AOFF_task_files 0x00000988 +#define AOFF_task_files 0x00000848 #define ASIZ_task_files 0x00000008 -#define AOFF_task_mm 0x00000990 +#define AOFF_task_mm 0x00000850 #define ASIZ_task_mm 0x00000008 -#define AOFF_task_sig 0x00000998 +#define AOFF_task_sig 0x00000858 #define ASIZ_task_sig 0x00000008 -#define AOFF_task_has_cpu 0x000009a0 +#define AOFF_task_has_cpu 0x00000860 #define ASIZ_task_has_cpu 0x00000004 -#define AOFF_task_processor 0x000009a4 +#define AOFF_task_processor 0x00000864 #define ASIZ_task_processor 0x00000004 -#define AOFF_task_last_processor 0x000009a8 +#define AOFF_task_last_processor 0x00000868 #define ASIZ_task_last_processor 0x00000004 -#define AOFF_task_lock_depth 0x000009ac +#define AOFF_task_lock_depth 0x0000086c #define ASIZ_task_lock_depth 0x00000004 -#define AOFF_task_sigmask_lock 0x000009b0 +#define AOFF_task_sigmask_lock 0x00000870 #define ASIZ_task_sigmask_lock 0x00000000 #define AOFF_mm_mmap 0x00000000 #define ASIZ_mm_mmap 0x00000008 @@ -216,41 +210,37 @@ #define ASIZ_mm_def_flags 0x00000008 #define AOFF_mm_cpu_vm_mask 0x000000b8 #define ASIZ_mm_cpu_vm_mask 0x00000008 -#define AOFF_thread_float_regs 0x00000000 -#define ASIZ_thread_float_regs 0x00000100 -#define AOFF_thread_fsr 0x00000100 -#define ASIZ_thread_fsr 0x00000008 -#define AOFF_thread_ksp 0x00000108 +#define AOFF_thread_ksp 0x00000000 #define ASIZ_thread_ksp 0x00000008 -#define AOFF_thread_kpc 0x00000110 +#define AOFF_thread_kpc 0x00000008 #define ASIZ_thread_kpc 0x00000008 -#define AOFF_thread_wstate 0x00000118 +#define AOFF_thread_wstate 0x00000010 #define ASIZ_thread_wstate 0x00000008 -#define AOFF_thread_cwp 0x00000120 -#define ASIZ_thread_cwp 0x00000008 -#define AOFF_thread_ctx 0x00000128 -#define ASIZ_thread_ctx 0x00000008 -#define AOFF_thread_reg_window 0x00000130 +#define AOFF_thread_cwp 0x00000018 +#define ASIZ_thread_cwp 0x00000004 +#define AOFF_thread_ctx 0x0000001c +#define ASIZ_thread_ctx 0x00000004 +#define AOFF_thread_flags 0x00000020 +#define ASIZ_thread_flags 0x00000004 +#define AOFF_thread_new_signal 0x00000024 +#define ASIZ_thread_new_signal 0x00000004 +#define AOFF_thread_current_ds 0x00000028 +#define ASIZ_thread_current_ds 0x00000008 +#define AOFF_thread_w_saved 0x00000030 +#define ASIZ_thread_w_saved 0x00000008 +#define AOFF_thread_kregs 0x00000038 +#define ASIZ_thread_kregs 0x00000008 +#define AOFF_thread_reg_window 0x00000040 #define ASIZ_thread_reg_window 0x00000400 -#define AOFF_thread_rwbuf_stkptrs 0x00000530 +#define AOFF_thread_rwbuf_stkptrs 0x00000440 #define ASIZ_thread_rwbuf_stkptrs 0x00000040 -#define AOFF_thread_w_saved 0x00000570 -#define ASIZ_thread_w_saved 0x00000008 -#define AOFF_thread_flags 0x00000578 -#define ASIZ_thread_flags 0x00000008 -#define AOFF_thread_sig_address 0x00000580 +#define AOFF_thread_sig_address 0x00000480 #define ASIZ_thread_sig_address 0x00000008 -#define AOFF_thread_sig_desc 0x00000588 +#define AOFF_thread_sig_desc 0x00000488 #define ASIZ_thread_sig_desc 0x00000008 -#define AOFF_thread_sstk_info 0x00000590 +#define AOFF_thread_sstk_info 0x00000490 #define ASIZ_thread_sstk_info 0x00000010 -#define AOFF_thread_current_ds 0x000005a0 -#define ASIZ_thread_current_ds 0x00000004 -#define AOFF_thread_new_signal 0x000005a4 -#define ASIZ_thread_new_signal 0x00000004 -#define AOFF_thread_kregs 0x000005a8 -#define ASIZ_thread_kregs 0x00000008 -#define AOFF_thread_core_exec 0x000005b0 +#define AOFF_thread_core_exec 0x000004a0 #define ASIZ_thread_core_exec 0x00000020 #endif /* __ASM_OFFSETS_H__ */ diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h index ec496fa17..12baf0222 100644 --- a/include/asm-sparc64/atomic.h +++ b/include/asm-sparc64/atomic.h @@ -1,8 +1,8 @@ -/* $Id: atomic.h,v 1.14 1997/04/16 05:57:06 davem Exp $ +/* $Id: atomic.h,v 1.15 1997/07/03 09:18:09 davem Exp $ * atomic.h: Thankfully the V9 is at least reasonable for this * stuff. * - * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 1996, 1997 David S. Miller (davem@caip.rutgers.edu) */ #ifndef __ARCH_SPARC64_ATOMIC__ @@ -22,73 +22,63 @@ typedef struct { int counter; } atomic_t; extern __inline__ void atomic_add(int i, atomic_t *v) { - unsigned long temp0, temp1; __asm__ __volatile__(" - lduw [%3], %0 -1: - add %0, %2, %1 - cas [%3], %0, %1 - cmp %0, %1 - bne,a,pn %%icc, 1b - lduw [%3], %0 -2: -" : "=&r" (temp0), "=&r" (temp1) +1: lduw [%1], %%g1 + add %%g1, %0, %%g2 + cas [%1], %%g1, %%g2 + sub %%g1, %%g2, %%g1 + brnz,pn %%g1, 1b + nop" + : /* No outputs */ : "HIr" (i), "r" (__atomic_fool_gcc(v)) - : "cc"); + : "g1", "g2"); } extern __inline__ void atomic_sub(int i, atomic_t *v) { - unsigned long temp0, temp1; __asm__ __volatile__(" - lduw [%3], %0 -1: - sub %0, %2, %1 - cas [%3], %0, %1 - cmp %0, %1 - bne,a,pn %%icc, 1b - lduw [%3], %0 -2: -" : "=&r" (temp0), "=&r" (temp1) +1: lduw [%1], %%g1 + sub %%g1, %0, %%g2 + cas [%1], %%g1, %%g2 + sub %%g1, %%g2, %%g1 + brnz,pn %%g1, 1b + nop" + : /* No outputs */ : "HIr" (i), "r" (__atomic_fool_gcc(v)) - : "cc"); + : "g1", "g2"); } /* Same as above, but return the result value. */ extern __inline__ int atomic_add_return(int i, atomic_t *v) { - unsigned long temp0, oldval; + unsigned long oldval; __asm__ __volatile__(" - lduw [%3], %0 -1: - add %0, %2, %1 - cas [%3], %0, %1 - cmp %0, %1 - bne,a,pn %%icc, 1b - lduw [%3], %0 -2: -" : "=&r" (temp0), "=&r" (oldval) +1: lduw [%2], %%g1 + add %%g1, %1, %%g2 + cas [%2], %%g1, %%g2 + sub %%g1, %%g2, %%g1 + brnz,pn %%g1, 1b + add %%g2, %1, %0" + : "=&r" (oldval) : "HIr" (i), "r" (__atomic_fool_gcc(v)) - : "cc"); - return (((int)oldval) + 1); + : "g1", "g2"); + return (int)oldval; } extern __inline__ int atomic_sub_return(int i, atomic_t *v) { - unsigned long temp0, oldval; + unsigned long oldval; __asm__ __volatile__(" - lduw [%3], %0 -1: - sub %0, %2, %1 - cas [%3], %0, %1 - cmp %0, %1 - bne,a,pn %%icc, 1b - lduw [%3], %0 -2: -" : "=&r" (temp0), "=&r" (oldval) +1: lduw [%2], %%g1 + sub %%g1, %1, %%g2 + cas [%2], %%g1, %%g2 + sub %%g1, %%g2, %%g1 + brnz,pn %%g1, 1b + sub %%g2, %1, %0" + : "=&r" (oldval) : "HIr" (i), "r" (__atomic_fool_gcc(v)) - : "cc"); - return (((int)oldval) - 1); + : "g1", "g2"); + return (int)oldval; } #define atomic_dec_return(v) atomic_sub_return(1,(v)) diff --git a/include/asm-sparc64/bitops.h b/include/asm-sparc64/bitops.h index 5060d88ae..f0d11e6ef 100644 --- a/include/asm-sparc64/bitops.h +++ b/include/asm-sparc64/bitops.h @@ -1,4 +1,4 @@ -/* $Id: bitops.h,v 1.16 1997/05/28 13:48:56 jj Exp $ +/* $Id: bitops.h,v 1.19 1997/07/08 10:17:37 davem Exp $ * bitops.h: Bit string operations on the V9. * * Copyright 1996 David S. Miller (davem@caip.rutgers.edu) @@ -23,21 +23,21 @@ extern __inline__ unsigned long test_and_set_bit(unsigned long nr, void *addr) { unsigned long oldbit; unsigned long temp0, temp1; - unsigned int * m = ((unsigned int *) addr) + (nr >> 5); + unsigned long * m = ((unsigned long *) addr) + (nr >> 6); __asm__ __volatile__(" - lduw [%4], %0 + ldx [%4], %0 1: andcc %0, %3, %2 - bne,pn %%icc, 2f + bne,pn %%xcc, 2f xor %0, %3, %1 - cas [%4], %0, %1 + casx [%4], %0, %1 cmp %0, %1 - bne,a,pn %%icc, 1b - lduw [%4], %0 + bne,a,pn %%xcc, 1b + ldx [%4], %0 2: " : "=&r" (temp0), "=&r" (temp1), "=&r" (oldbit) - : "HIr" (1UL << (nr & 31)), "r" (m) + : "HIr" (1UL << (nr & 63)), "r" (m) : "cc"); return oldbit != 0; } @@ -51,21 +51,21 @@ extern __inline__ unsigned long test_and_clear_bit(unsigned long nr, void *addr) { unsigned long oldbit; unsigned long temp0, temp1; - unsigned int * m = ((unsigned int *) addr) + (nr >> 5); + unsigned long * m = ((unsigned long *) addr) + (nr >> 6); __asm__ __volatile__(" - lduw [%4], %0 + ldx [%4], %0 1: andcc %0, %3, %2 - be,pn %%icc, 2f + be,pn %%xcc, 2f xor %0, %3, %1 - cas [%4], %0, %1 + casx [%4], %0, %1 cmp %0, %1 - bne,a,pn %%icc, 1b - lduw [%4], %0 + bne,a,pn %%xcc, 1b + ldx [%4], %0 2: " : "=&r" (temp0), "=&r" (temp1), "=&r" (oldbit) - : "HIr" (1UL << (nr & 31)), "r" (m) + : "HIr" (1UL << (nr & 63)), "r" (m) : "cc"); return oldbit != 0; } @@ -79,19 +79,19 @@ extern __inline__ unsigned long test_and_change_bit(unsigned long nr, void *addr { unsigned long oldbit; unsigned long temp0, temp1; - unsigned int * m = ((unsigned int *) addr) + (nr >> 5); + unsigned long * m = ((unsigned long *) addr) + (nr >> 6); __asm__ __volatile__(" - lduw [%4], %0 + ldx [%4], %0 1: and %0, %3, %2 xor %0, %3, %1 - cas [%4], %0, %1 + casx [%4], %0, %1 cmp %0, %1 - bne,a,pn %%icc, 1b - lduw [%4], %0 + bne,a,pn %%xcc, 1b + ldx [%4], %0 " : "=&r" (temp0), "=&r" (temp1), "=&r" (oldbit) - : "HIr" (1UL << (nr & 31)), "r" (m) + : "HIr" (1UL << (nr & 63)), "r" (m) : "cc"); return oldbit != 0; } @@ -103,7 +103,7 @@ extern __inline__ void change_bit(unsigned long nr, void *addr) extern __inline__ unsigned long test_bit(int nr, __const__ void *addr) { - return 1UL & (((__const__ int *) addr)[nr >> 5] >> (nr & 31)); + return 1UL & (((__const__ long *) addr)[nr >> 6] >> (nr & 63)); } /* The easy/cheese version for now. */ @@ -121,7 +121,7 @@ extern __inline__ unsigned long ffz(unsigned long word) : "0" (word) : "g1", "g2"); #else -#ifdef EASY_CHEESE_VERSION +#if 1 /* def EASY_CHEESE_VERSION */ result = 0; while(word & 1) { result++; @@ -177,13 +177,11 @@ extern __inline__ unsigned long find_next_zero_bit(void *addr, unsigned long siz size -= 64; result += 64; } - offset = size >> 6; - size &= 63UL; - while (offset) { + while (size & ~63UL) { if (~(tmp = *(p++))) goto found_middle; result += 64; - offset--; + size -= 64; } if (!size) return result; @@ -260,22 +258,12 @@ extern __inline__ int test_le_bit(int nr, __const__ void * addr) #define find_first_zero_le_bit(addr, size) \ find_next_zero_le_bit((addr), (size), 0) -extern __inline__ unsigned long __swab64(unsigned long value) -{ - return (((value>>56) & 0x00000000000000ff) | - ((value>>40) & 0x000000000000ff00) | - ((value>>24) & 0x0000000000ff0000) | - ((value>>8) & 0x00000000ff000000) | - ((value<<8) & 0x000000ff00000000) | - ((value<<24) & 0x0000ff0000000000) | - ((value<<40) & 0x00ff000000000000) | - ((value<<56) & 0xff00000000000000)); -} - extern __inline__ unsigned long __swab64p(unsigned long *addr) { unsigned long ret; - __asm__ __volatile__ ("ldxa [%1] %2, %0" : "=r" (ret) : "r" (addr), "i" (ASI_PL)); + __asm__ __volatile__ ("ldxa [%1] %2, %0" + : "=r" (ret) + : "r" (addr), "i" (ASI_PL)); return ret; } @@ -299,13 +287,11 @@ extern __inline__ unsigned long find_next_zero_le_bit(void *addr, unsigned long size -= 64; result += 64; } - offset = size >> 6; - size &= 63UL; - while(offset) { + while(size & ~63) { if(~(tmp = __swab64p(p++))) goto found_middle; result += 64; - offset--; + size -= 64; } if(!size) return result; diff --git a/include/asm-sparc64/byteorder.h b/include/asm-sparc64/byteorder.h index 2325ef29c..dce2db246 100644 --- a/include/asm-sparc64/byteorder.h +++ b/include/asm-sparc64/byteorder.h @@ -1,4 +1,4 @@ -/* $Id: byteorder.h,v 1.5 1997/05/28 11:35:41 jj Exp $ */ +/* $Id: byteorder.h,v 1.6 1997/06/14 17:35:07 davem Exp $ */ #ifndef _SPARC64_BYTEORDER_H #define _SPARC64_BYTEORDER_H @@ -56,21 +56,27 @@ extern __inline__ __u64 cpu_to_le64(__u64 value) extern __inline__ __u16 cpu_to_le16p(__u16 *addr) { __u16 ret; - __asm__ __volatile__ ("lduha [%1] %2, %0" : "=r" (ret) : "r" (addr), "i" (ASI_PL)); + __asm__ __volatile__ ("lduha [%1] %2, %0" + : "=r" (ret) + : "r" (addr), "i" (ASI_PL)); return ret; } extern __inline__ __u32 cpu_to_le32p(__u32 *addr) { __u32 ret; - __asm__ __volatile__ ("lduwa [%1] %2, %0" : "=r" (ret) : "r" (addr), "i" (ASI_PL)); + __asm__ __volatile__ ("lduwa [%1] %2, %0" + : "=r" (ret) + : "r" (addr), "i" (ASI_PL)); return ret; } extern __inline__ __u64 cpu_to_le64p(__u64 *addr) { __u64 ret; - __asm__ __volatile__ ("ldxa [%1] %2, %0" : "=r" (ret) : "r" (addr), "i" (ASI_PL)); + __asm__ __volatile__ ("ldxa [%1] %2, %0" + : "=r" (ret) + : "r" (addr), "i" (ASI_PL)); return ret; } extern __inline__ __u16 cpu_to_be16p(__u16 *addr) { return *addr; } diff --git a/include/asm-sparc64/checksum.h b/include/asm-sparc64/checksum.h index d04abac7e..b1ff474c3 100644 --- a/include/asm-sparc64/checksum.h +++ b/include/asm-sparc64/checksum.h @@ -1,4 +1,4 @@ -/* $Id: checksum.h,v 1.8 1997/05/29 12:45:03 jj Exp $ */ +/* $Id: checksum.h,v 1.9 1997/06/26 04:05:17 davem Exp $ */ #ifndef __SPARC64_CHECKSUM_H #define __SPARC64_CHECKSUM_H @@ -108,31 +108,30 @@ extern __inline__ unsigned short ip_fast_csum(__const__ unsigned char *iph, * both operands. */ __asm__ __volatile__(" - sub %2, 4, %%g7 - lduw [%1 + 0x00], %0 - lduw [%1 + 0x04], %%g2 - lduw [%1 + 0x08], %%g3 - addcc %%g2, %0, %0 - addccc %%g3, %0, %0 - lduw [%1 + 0x0c], %%g2 - lduw [%1 + 0x10], %%g3 - addccc %%g2, %0, %0 - addc %0, %%g0, %0 -1: - addcc %%g3, %0, %0 - add %1, 4, %1 - addccc %0, %%g0, %0 - subcc %%g7, 1, %%g7 - be,a,pt %%icc, 2f - sll %0, 16, %%g2 - ba,pt %%xcc, 1b - lduw [%1 + 0x10], %%g3 -2: - addcc %0, %%g2, %%g2 - srl %%g2, 16, %0 - addc %0, %%g0, %0 - xnor %%g0, %0, %0 - srl %0, 0, %0 + sub %2, 4, %%g7 ! IEU0 + lduw [%1 + 0x00], %0 ! Load Group + lduw [%1 + 0x04], %%g2 ! Load Group + lduw [%1 + 0x08], %%g3 ! Load Group + addcc %%g2, %0, %0 ! IEU1 1 Load Bubble + Group + lduw [%1 + 0x0c], %%g2 ! Load + addccc %%g3, %0, %0 ! Sngle Group no Bubble + lduw [%1 + 0x10], %%g3 ! Load Group + addccc %%g2, %0, %0 ! Sngle Group no Bubble + addc %0, %%g0, %0 ! Sngle Group +1: addcc %%g3, %0, %0 ! IEU1 Group no Bubble + add %1, 4, %1 ! IEU0 + addccc %0, %%g0, %0 ! Sngle Group no Bubble + subcc %%g7, 1, %%g7 ! IEU1 Group + be,a,pt %%icc, 2f ! CTI + sll %0, 16, %%g2 ! IEU0 + lduw [%1 + 0x10], %%g3 ! Load Group + ba,pt %%xcc, 1b ! CTI + nop ! IEU0 +2: addcc %0, %%g2, %%g2 ! IEU1 Group + srl %%g2, 16, %0 ! IEU0 Group regdep XXX Scheisse! + addc %0, %%g0, %0 ! Sngle Group + xnor %%g0, %0, %0 ! IEU0 Group + srl %0, 0, %0 ! IEU0 Group XXX Scheisse! " : "=r" (sum), "=&r" (iph) : "r" (ihl), "1" (iph) : "g2", "g3", "g7", "cc"); diff --git a/include/asm-sparc64/delay.h b/include/asm-sparc64/delay.h index 7923b5014..f70d99b68 100644 --- a/include/asm-sparc64/delay.h +++ b/include/asm-sparc64/delay.h @@ -1,4 +1,4 @@ -/* $Id: delay.h,v 1.4 1997/04/10 23:32:44 davem Exp $ +/* $Id: delay.h,v 1.5 1997/06/18 12:36:23 jj Exp $ * delay.h: Linux delay routines on the V9. * * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu). @@ -12,7 +12,9 @@ extern unsigned long loops_per_sec; extern __inline__ void __delay(unsigned long loops) { __asm__ __volatile__(" - cmp %0, 0 + b,pt %%xcc, 1f + cmp %0, 0 + .align 32 1: bne,pt %%xcc, 1b subcc %0, 1, %0 diff --git a/include/asm-sparc64/elf.h b/include/asm-sparc64/elf.h index 9a43b6c3f..1cef89ff1 100644 --- a/include/asm-sparc64/elf.h +++ b/include/asm-sparc64/elf.h @@ -1,4 +1,4 @@ -/* $Id: elf.h,v 1.6 1997/05/17 11:51:27 davem Exp $ */ +/* $Id: elf.h,v 1.7 1997/06/14 21:28:07 davem Exp $ */ #ifndef __ASM_SPARC64_ELF_H #define __ASM_SPARC64_ELF_H @@ -32,7 +32,9 @@ typedef unsigned long elf_fpregset_t; /* * This is used to ensure we don't load something for the wrong architecture. */ +#ifndef elf_check_arch #define elf_check_arch(x) ((x) == ELF_ARCH) /* Might be EM_SPARC64 or EM_SPARC */ +#endif #define USE_ELF_CORE_DUMP #define ELF_EXEC_PAGESIZE 8192 diff --git a/include/asm-sparc64/fbio.h b/include/asm-sparc64/fbio.h index 3d8713468..6d2f1e730 100644 --- a/include/asm-sparc64/fbio.h +++ b/include/asm-sparc64/fbio.h @@ -48,10 +48,6 @@ struct fbtype { }; #define FBIOGTYPE _IOR('F', 0, struct fbtype) -/* Used by FBIOPUTCMAP - * - * XXX 32-bit binary compatability item... -DaveM - */ struct fbcmap { int index; /* first element (0 origin) */ int count; @@ -104,7 +100,6 @@ struct fbcurpos { #define FB_CUR_SETSHAPE 0x10 /* set shape */ #define FB_CUR_SETALL 0x1F /* all of the above */ -/* XXX 32-bit binary compatability item... -DaveM */ struct fbcursor { short set; /* what to set, choose from the list above */ short enable; /* cursor on/off */ @@ -143,7 +138,6 @@ struct fb_wid_item { __u32 wi_attrs; __u32 wi_values[32]; }; -/* XXX 32-bit binary compatability item... -DaveM */ struct fb_wid_list { __u32 wl_flags; __u32 wl_count; @@ -155,6 +149,21 @@ struct fb_wid_list { #define FBIO_WID_PUT _IOW('F', 32, struct fb_wid_list) #define FBIO_WID_GET _IOWR('F', 33, struct fb_wid_list) +/* Creator ioctls */ +#define FFB_IOCTL ('F'<<8) +#define FFB_SYS_INFO (FFB_IOCTL|80) +#define FFB_CLUTREAD (FFB_IOCTL|81) +#define FFB_CLUTPOST (FFB_IOCTL|82) +#define FFB_SETDIAGMODE (FFB_IOCTL|83) +#define FFB_GETMONITORID (FFB_IOCTL|84) +#define FFB_GETVIDEOMODE (FFB_IOCTL|85) +#define FFB_SETVIDEOMODE (FFB_IOCTL|86) +#define FFB_SETSERVER (FFB_IOCTL|87) +#define FFB_SETOVCTL (FFB_IOCTL|88) +#define FFB_GETOVCTL (FFB_IOCTL|89) +#define FFB_GETSAXNUM (FFB_IOCTL|90) +#define FFB_FBDEBUG (FFB_IOCTL|91) + /* Cg14 ioctls */ #define MDI_IOCTL ('M'<<8) #define MDI_RESET (MDI_IOCTL|1) @@ -179,16 +188,15 @@ struct mdi_cfginfo { */ #define MDI_CLEAR_XLUT (MDI_IOCTL|9) -/* leo ioctls */ -struct leo_clut_alloc { +/* leo & ffb ioctls */ +struct fb_clut_alloc { __u32 clutid; /* Set on return */ __u32 flag; __u32 index; }; -/* XXX 32-bit binary compatability item... -DaveM */ -struct leo_clut { -#define LEO_CLUT_WAIT 0x00000001 /* Not yet implemented */ +struct fb_clut { +#define FB_CLUT_WAIT 0x00000001 /* Not yet implemented */ __u32 flag; __u32 clutid; __u32 offset; @@ -197,10 +205,21 @@ struct leo_clut { char * green; char * blue; }; -#define LEO_CLUTALLOC _IOWR('L', 53, struct leo_clut_alloc) -#define LEO_CLUTFREE _IOW('L', 54, struct leo_clut_alloc) -#define LEO_CLUTREAD _IOW('L', 55, struct leo_clut) -#define LEO_CLUTPOST _IOW('L', 56, struct leo_clut) + +struct fb_clut32 { + __u32 flag; + __u32 clutid; + __u32 offset; + __u32 count; + __u32 red; + __u32 green; + __u32 blue; +}; + +#define LEO_CLUTALLOC _IOWR('L', 53, struct fb_clut_alloc) +#define LEO_CLUTFREE _IOW('L', 54, struct fb_clut_alloc) +#define LEO_CLUTREAD _IOW('L', 55, struct fb_clut) +#define LEO_CLUTPOST _IOW('L', 56, struct fb_clut) #define LEO_SETGAMMA _IOW('L', 68, int) /* Not yet implemented */ #define LEO_GETGAMMA _IOR('L', 69, int) /* Not yet implemented */ diff --git a/include/asm-sparc64/floppy.h b/include/asm-sparc64/floppy.h index c7aa7cc81..bbef85483 100644 --- a/include/asm-sparc64/floppy.h +++ b/include/asm-sparc64/floppy.h @@ -1,4 +1,4 @@ -/* $Id: floppy.h,v 1.2 1997/03/14 21:05:25 jj Exp $ +/* $Id: floppy.h,v 1.3 1997/07/11 03:03:22 davem Exp $ * asm-sparc64/floppy.h: Sparc specific parts of the Floppy driver. * * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) @@ -315,7 +315,7 @@ static int sun_floppy_init(void) /* printk("DOR @0x%p\n", &sun_fdc->dor_82077); */ /* P3 */ /* Success... */ - return (int) sun_fdc; + return (int) ((unsigned long)sun_fdc); } static int sparc_eject(void) diff --git a/include/asm-sparc64/fpumacro.h b/include/asm-sparc64/fpumacro.h index f6323254d..dab134472 100644 --- a/include/asm-sparc64/fpumacro.h +++ b/include/asm-sparc64/fpumacro.h @@ -21,68 +21,44 @@ extern __inline__ void fprs_write(unsigned long val) __asm__ __volatile__("wr %0, 0x0, %%fprs" : : "r" (val)); } -extern __inline__ void fpsave32(unsigned int *fpregs, unsigned long *fsr) +extern __inline__ void fpsave(unsigned long *fpregs, + unsigned long *fsr, + unsigned long *gsr) { __asm__ __volatile__ (" - wr %%g0, %2, %%asi - stx %%fsr, [%1] - stda %%f0, [%0] %%asi - stda %%f16, [%0 + 64] %%asi - " : : "r" (fpregs), "r" (fsr), "i" (ASI_BLK_P)); -} - -extern __inline__ void fpload32(unsigned int *fpregs, unsigned long *fsr) -{ - __asm__ __volatile__ (" - wr %%g0, %2, %%asi - ldda [%0] %%asi, %%f0 - ldda [%0 + 64] %%asi, %%f16 - ldx [%1], %%fsr - " : : "r" (fpregs), "r" (fsr), "i" (ASI_BLK_P)); -} - -extern __inline__ void fpsave64hi(unsigned int *fpregs, unsigned long *fsr) -{ - __asm__ __volatile__ (" - wr %%g0, %2, %%asi - stx %%fsr, [%1] - stda %%f32, [%0 + 128] %%asi - stda %%f48, [%0 + 192] %%asi - " : : "r" (fpregs), "r" (fsr), "i" (ASI_BLK_P)); -} - -extern __inline__ void fpload64hi(unsigned int *fpregs, unsigned long *fsr) -{ - __asm__ __volatile__ (" - wr %%g0, %2, %%asi - ldda [%0 + 128] %%asi, %%f32 - ldda [%0 + 192] %%asi, %%f48 - ldx [%1], %%fsr - " : : "r" (fpregs), "r" (fsr), "i" (ASI_BLK_P)); -} - -extern __inline__ void fpsave(unsigned int *fpregs, unsigned long *fsr) -{ - __asm__ __volatile__ (" - wr %%g0, %2, %%asi + wr %%g0, %3, %%asi + rd %%gsr, %%g1 + membar #LoadStore | #StoreStore stx %%fsr, [%1] + stx %%g1, [%2] stda %%f0, [%0] %%asi stda %%f16, [%0 + 64] %%asi stda %%f32, [%0 + 128] %%asi stda %%f48, [%0 + 192] %%asi - " : : "r" (fpregs), "r" (fsr), "i" (ASI_BLK_P)); + membar #Sync +" : /* No outputs */ + : "r" (fpregs), "r" (fsr), "r" (gsr), "i" (ASI_BLK_P) + : "g1"); } -extern __inline__ void fpload(unsigned int *fpregs, unsigned long *fsr) +extern __inline__ void fpload(unsigned long *fpregs, + unsigned long *fsr, + unsigned long *gsr) { __asm__ __volatile__ (" - wr %%g0, %2, %%asi + wr %%g0, %3, %%asi + membar #StoreLoad | #LoadLoad ldda [%0] %%asi, %%f0 ldda [%0 + 64] %%asi, %%f16 ldda [%0 + 128] %%asi, %%f32 ldda [%0 + 192] %%asi, %%f48 ldx [%1], %%fsr - " : : "r" (fpregs), "r" (fsr), "i" (ASI_BLK_P)); + ldx [%2], %%g1 + wr %%g1, 0, %%gsr + membar #Sync +" : /* No outputs */ + : "r" (fpregs), "r" (fsr), "r" (gsr), "i" (ASI_BLK_P) + : "g1"); } #endif /* !(_SPARC64_FPUMACRO_H) */ diff --git a/include/asm-sparc64/fs_mount.h b/include/asm-sparc64/fs_mount.h deleted file mode 100644 index 3ad7ad698..000000000 --- a/include/asm-sparc64/fs_mount.h +++ /dev/null @@ -1,44 +0,0 @@ -/* $Id: fs_mount.h,v 1.2 1997/04/18 14:34:46 jj Exp $ - * fs_mount.h: Definitions for mount structure conversions. - * - * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) - */ - -#ifndef __ASM_FS_MOUNT_H -#define __ASM_FS_MOUNT_H - -#if defined(CONFIG_SPARC32_COMPAT) || defined(CONFIG_SPARC32_COMPAT_MODULE) - -#include - -/* We need this to convert 32bit mount structures to 64bit */ - -extern void *do_ncp_super_data_conv(void *raw_data); -extern void *do_smb_super_data_conv(void *raw_data); - -extern __inline__ void *ncp_super_data_conv(void *raw_data) -{ - if (current->tss.flags & SPARC_FLAG_32BIT) - return do_ncp_super_data_conv(raw_data); - else - return raw_data; -} - -extern __inline__ void *smb_super_data_conv(void *raw_data) -{ - if (current->tss.flags & SPARC_FLAG_32BIT) - return do_smb_super_data_conv(raw_data); - else - return raw_data; -} - -#else /* CONFIG_SPARC32_COMPAT* */ - -#define ncp_super_data_conv(__x) __x -#define smb_super_data_conv(__x) __x - -#endif /* CONFIG_SPARC32_COMPAT* */ - -#define nfs_super_data_conv(__x) __x - -#endif /* __ASM_FS_MOUNT_H */ diff --git a/include/asm-sparc64/hardirq.h b/include/asm-sparc64/hardirq.h index 4680a4095..03ee543b1 100644 --- a/include/asm-sparc64/hardirq.h +++ b/include/asm-sparc64/hardirq.h @@ -13,8 +13,8 @@ extern unsigned int local_irq_count[NR_CPUS]; #ifndef __SMP__ -#define hardirq_trylock(cpu) (++local_irq_count[cpu], (cpu)==0) -#define hardirq_endlock(cpu) (--local_irq_count[cpu]) +#define hardirq_trylock(cpu) (local_irq_count[cpu] == 0) +#define hardirq_endlock(cpu) do { } while(0) #define hardirq_enter(cpu) (local_irq_count[cpu]++) #define hardirq_exit(cpu) (local_irq_count[cpu]--) diff --git a/include/asm-sparc64/head.h b/include/asm-sparc64/head.h index 62fe9a08f..e3ff51686 100644 --- a/include/asm-sparc64/head.h +++ b/include/asm-sparc64/head.h @@ -1,10 +1,10 @@ -/* $Id: head.h,v 1.22 1997/06/02 06:33:40 davem Exp $ */ +/* $Id: head.h,v 1.27 1997/07/13 17:30:43 davem Exp $ */ #ifndef _SPARC64_HEAD_H #define _SPARC64_HEAD_H #include -#define KERNBASE 0xFFFFF80000000000 +#define KERNBASE 0x400000 #define BOOT_KERNEL b sparc64_boot; nop; nop; nop; nop; nop; nop; nop; /* We need a "cleaned" instruction... */ @@ -43,17 +43,6 @@ nop; \ nop; -/* Just for testing */ -#define PROM_TRAP \ - rd %pc, %g1; \ - sethi %uhi(KERNBASE), %g4; \ - sethi %hi(0xf0000000-0x8000), %g2; \ - sllx %g4, 32, %g4; \ - add %g1, %g2, %g1; \ - sub %g1, %g4, %g1; \ - jmpl %g1 + %g0, %g0; \ - nop; - #define TRAP_ARG(routine, arg) \ ba,pt %xcc, etrap; \ rd %pc, %g7; \ @@ -105,12 +94,12 @@ #define SUNOS_SYSCALL_TRAP SYSCALL_TRAP(linux_sparc_syscall, sunos_sys_table) #define LINUX_32BIT_SYSCALL_TRAP SYSCALL_TRAP(linux_sparc_syscall, sys_call_table32) #define LINUX_64BIT_SYSCALL_TRAP SYSCALL_TRAP(linux_sparc_syscall, sys_call_table64) +#define GETCC_TRAP TRAP(getcc) +#define SETCC_TRAP TRAP(setcc) /* FIXME: Write these actually */ #define NETBSD_SYSCALL_TRAP TRAP(netbsd_syscall) #define SOLARIS_SYSCALL_TRAP TRAP(solaris_syscall) #define BREAKPOINT_TRAP TRAP(breakpoint_trap) -#define GETCC_TRAP TRAP(getcc) -#define SETCC_TRAP TRAP(setcc) #define INDIRECT_SOLARIS_SYSCALL(tlvl) TRAP_ARG(indirect_syscall, tlvl) #define TRAP_IRQ(routine, level) \ @@ -126,7 +115,7 @@ /* On UP this is ok, and worth the effort, for SMP we need * a different mechanism and thus cannot do it all in trap table. -DaveM */ -#if 0 /* ndef __SMP__ */ +#ifndef __SMP__ #define TRAP_IVEC \ ldxa [%g2] ASI_UDB_INTR_R, %g3; \ and %g3, 0x7ff, %g3; \ @@ -207,16 +196,23 @@ #define SPILL_2_GENERIC(xxx) \ wr %g0, xxx, %asi; \ srl %sp, 0, %sp; \ - stda %l0, [%sp + 0x00] %asi; \ - stda %l2, [%sp + 0x08] %asi; \ - stda %l4, [%sp + 0x10] %asi; \ - stda %l6, [%sp + 0x18] %asi; \ - stda %i0, [%sp + 0x20] %asi; \ - stda %i2, [%sp + 0x28] %asi; \ - stda %i4, [%sp + 0x30] %asi; \ - stda %i6, [%sp + 0x38] %asi; \ + stwa %l0, [%sp + 0x00] %asi; \ + stwa %l1, [%sp + 0x04] %asi; \ + stwa %l2, [%sp + 0x08] %asi; \ + stwa %l3, [%sp + 0x0c] %asi; \ + stwa %l4, [%sp + 0x10] %asi; \ + stwa %l5, [%sp + 0x14] %asi; \ + stwa %l6, [%sp + 0x18] %asi; \ + stwa %l7, [%sp + 0x1c] %asi; \ + stwa %i0, [%sp + 0x20] %asi; \ + stwa %i1, [%sp + 0x24] %asi; \ + stwa %i2, [%sp + 0x28] %asi; \ + stwa %i3, [%sp + 0x2c] %asi; \ + stwa %i4, [%sp + 0x30] %asi; \ + stwa %i5, [%sp + 0x34] %asi; \ + stwa %i6, [%sp + 0x38] %asi; \ + stwa %i7, [%sp + 0x3c] %asi; \ saved; retry; nop; nop; nop; nop; \ - nop; nop; nop; nop; nop; nop; nop; nop; \ nop; nop; nop; nop; nop; nop; \ b,a,pt %xcc, spill_fixup_mna; \ b,a,pt %xcc, spill_fixup; @@ -287,16 +283,23 @@ #define FILL_2_GENERIC(xxx) \ wr %g0, xxx, %asi; \ srl %sp, 0, %sp; \ - ldda [%sp + 0x00] %asi, %l0; \ - ldda [%sp + 0x08] %asi, %l2; \ - ldda [%sp + 0x10] %asi, %l4; \ - ldda [%sp + 0x18] %asi, %l6; \ - ldda [%sp + 0x20] %asi, %i0; \ - ldda [%sp + 0x28] %asi, %i2; \ - ldda [%sp + 0x30] %asi, %i4; \ - ldda [%sp + 0x38] %asi, %i6; \ + lduwa [%sp + 0x00] %asi, %l0; \ + lduwa [%sp + 0x04] %asi, %l1; \ + lduwa [%sp + 0x08] %asi, %l2; \ + lduwa [%sp + 0x0c] %asi, %l3; \ + lduwa [%sp + 0x10] %asi, %l4; \ + lduwa [%sp + 0x14] %asi, %l5; \ + lduwa [%sp + 0x18] %asi, %l6; \ + lduwa [%sp + 0x1c] %asi, %l7; \ + lduwa [%sp + 0x20] %asi, %i0; \ + lduwa [%sp + 0x24] %asi, %i1; \ + lduwa [%sp + 0x28] %asi, %i2; \ + lduwa [%sp + 0x2c] %asi, %i3; \ + lduwa [%sp + 0x30] %asi, %i4; \ + lduwa [%sp + 0x34] %asi, %i5; \ + lduwa [%sp + 0x38] %asi, %i6; \ + lduwa [%sp + 0x3c] %asi, %i7; \ restored; retry; nop; nop; nop; nop; \ - nop; nop; nop; nop; nop; nop; nop; nop; \ nop; nop; nop; nop; nop; nop; \ b,a,pt %xcc, fill_fixup_mna; \ b,a,pt %xcc, fill_fixup; diff --git a/include/asm-sparc64/ioctls.h b/include/asm-sparc64/ioctls.h index 0432cb46f..1d6c1cace 100644 --- a/include/asm-sparc64/ioctls.h +++ b/include/asm-sparc64/ioctls.h @@ -1,14 +1,9 @@ -/* $Id: ioctls.h,v 1.2 1997/04/04 00:50:18 davem Exp $ */ +/* $Id: ioctls.h,v 1.4 1997/06/23 07:26:03 davem Exp $ */ #ifndef _ASM_SPARC64_IOCTLS_H #define _ASM_SPARC64_IOCTLS_H #include -/* XXX 32-bit binary compatability issues, I am sure that - * XXX only IOCTL's which reference structures will be of - * XXX concern and these are easily fabricated using wrappers. - */ - /* Big T */ #define TCGETA _IOR('T', 1, struct termio) #define TCSETA _IOW('T', 2, struct termio) @@ -24,7 +19,7 @@ /* Note that all the ioctls that are not available in Linux have a * double underscore on the front to: a) avoid some programs to - * thing we support some ioctls under Linux (autoconfiguration stuff) + * think we support some ioctls under Linux (autoconfiguration stuff) */ /* Little t */ #define TIOCGETD _IOR('t', 0, int) @@ -69,8 +64,8 @@ /* 119 is the non-posix getpgrp tty ioctl */ #define __TIOCCDTR _IO('t', 120) /* SunOS Specific */ #define __TIOCSDTR _IO('t', 121) /* SunOS Specific */ -#define __TIOCCBRK _IO('t', 122) /* SunOS Specific */ -#define __TIOCSBRK _IO('t', 123) /* SunOS Specific */ +#define TIOCCBRK _IO('t', 122) +#define TIOCSBRK _IO('t', 123) #define __TIOCLGET _IOW('t', 124, int) /* SunOS Specific */ #define __TIOCLSET _IOW('t', 125, int) /* SunOS Specific */ #define __TIOCLBIC _IOW('t', 126, int) /* SunOS Specific */ diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h index 9a5b10458..7e7aa0433 100644 --- a/include/asm-sparc64/mmu_context.h +++ b/include/asm-sparc64/mmu_context.h @@ -1,4 +1,4 @@ -/* $Id: mmu_context.h,v 1.10 1997/05/23 09:35:55 jj Exp $ */ +/* $Id: mmu_context.h,v 1.17 1997/07/13 19:13:39 davem Exp $ */ #ifndef __SPARC64_MMU_CONTEXT_H #define __SPARC64_MMU_CONTEXT_H @@ -11,11 +11,6 @@ #ifndef __ASSEMBLY__ -/* Initialize the context related info for a new mm_struct - * instance. - */ -#define init_new_context(mm) ((mm)->context = NO_CONTEXT) - #define destroy_context(mm) do { } while(0) extern unsigned long tlb_context_cache; @@ -24,61 +19,41 @@ extern unsigned long tlb_context_cache; #define CTX_VERSION_MASK ((~0UL) << CTX_VERSION_SHIFT) #define CTX_FIRST_VERSION ((1UL << CTX_VERSION_SHIFT) + 1UL) -extern __inline__ void get_new_mmu_context(struct mm_struct *mm, - unsigned long ctx) -{ - if((ctx & ~(CTX_VERSION_MASK)) == 0) { - unsigned long flags; - int entry; - - save_and_cli(flags); - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "stxa %%g0, [%0] %2" - : /* No outputs */ - : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU), - "i" (ASI_DMMU)); - for(entry = 0; entry < 62; entry++) { - spitfire_put_dtlb_data(entry, 0x0UL); - spitfire_put_itlb_data(entry, 0x0UL); - } - membar("#Sync"); - flushi(PAGE_OFFSET); - restore_flags(flags); +extern void get_new_mmu_context(struct mm_struct *mm, unsigned long ctx); - ctx = (ctx & CTX_VERSION_MASK) + CTX_FIRST_VERSION; - if(!ctx) - ctx = CTX_FIRST_VERSION; - } - tlb_context_cache = ctx + 1; - mm->context = ctx; -} +/* Initialize the context related info for a new mm_struct + * instance. + */ +#define init_new_context(mm) get_new_mmu_context((mm), tlb_context_cache) extern __inline__ void get_mmu_context(struct task_struct *tsk) { + register unsigned long paddr asm("o5"); struct mm_struct *mm = tsk->mm; - if(mm && - !(tsk->tss.flags & SPARC_FLAG_KTHREAD) && + flushw_user(); + if(!(tsk->tss.flags & SPARC_FLAG_KTHREAD) && !(tsk->flags & PF_EXITING)) { unsigned long ctx = tlb_context_cache; - register unsigned long paddr asm("o5"); - - flushw_user(); if((mm->context ^ ctx) & CTX_VERSION_MASK) get_new_mmu_context(mm, ctx); - tsk->tss.ctx = (mm->context & 0x1fff); - spitfire_set_secondary_context(tsk->tss.current_ds ? - mm->context : 0); - paddr = __pa(mm->pgd); - __asm__ __volatile__(" - rdpr %%pstate, %%o4 - wrpr %%o4, %1, %%pstate - mov %0, %%g7 - wrpr %%o4, 0x0, %%pstate - " : /* no outputs */ - : "r" (paddr), "i" (PSTATE_MG|PSTATE_IE) - : "o4"); - } + + /* Don't worry, set_fs() will restore it... */ + tsk->tss.ctx = (tsk->tss.current_ds ? + (mm->context & 0x1fff) : 0); + } else + tsk->tss.ctx = 0; + spitfire_set_secondary_context(tsk->tss.ctx); + __asm__ __volatile__("flush %g6"); + paddr = __pa(mm->pgd); + __asm__ __volatile__(" + rdpr %%pstate, %%o4 + wrpr %%o4, %1, %%pstate + mov %0, %%g7 + wrpr %%o4, 0x0, %%pstate + " : /* no outputs */ + : "r" (paddr), "i" (PSTATE_MG|PSTATE_IE) + : "o4"); } #endif /* !(__ASSEMBLY__) */ diff --git a/include/asm-sparc64/namei.h b/include/asm-sparc64/namei.h index af5afb721..e80c11979 100644 --- a/include/asm-sparc64/namei.h +++ b/include/asm-sparc64/namei.h @@ -1,4 +1,4 @@ -/* $Id: namei.h,v 1.4 1997/06/07 08:32:56 ecd Exp $ +/* $Id: namei.h,v 1.5 1997/07/17 02:24:28 davem Exp $ * linux/include/asm-sparc64/namei.h * * Routines to handle famous /usr/gnemul/s*. @@ -11,6 +11,7 @@ #define SPARC_BSD_EMUL "usr/gnemul/sunos/" #define SPARC_SOL_EMUL "usr/gnemul/solaris/" +#if 0 /* XXX FIXME */ extern int __namei(int, const char *, struct inode *, char *, struct inode **, struct inode **, struct qstr *, struct dentry **, int *); @@ -44,4 +45,6 @@ __prefix_namei(int retrieve_mode, const char * name, struct inode * base, return 0; } +#endif /* XXX FIXME */ + #endif /* __SPARC64_NAMEI_H */ diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h index 71679e351..d39d3d494 100644 --- a/include/asm-sparc64/page.h +++ b/include/asm-sparc64/page.h @@ -1,4 +1,4 @@ -/* $Id: page.h,v 1.8 1997/03/26 12:24:21 davem Exp $ */ +/* $Id: page.h,v 1.14 1997/06/26 22:32:03 davem Exp $ */ #ifndef _SPARC64_PAGE_H #define _SPARC64_PAGE_H @@ -18,10 +18,15 @@ #ifndef __ASSEMBLY__ -#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) -#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE) +#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) -#define STRICT_MM_TYPECHECKS +extern void copy_page(unsigned long to, unsigned long from); + +/* GROSS, defining this makes gcc pass these types as aggregates, + * and thus on the stack, turn this crap off... -DaveM + */ + +/* #define STRICT_MM_TYPECHECKS */ #ifdef STRICT_MM_TYPECHECKS /* These are used to make use of C type-checking.. */ @@ -89,7 +94,9 @@ typedef unsigned long iopgprot_t; #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) #ifndef __ASSEMBLY__ -#define PAGE_OFFSET 0xFFFFF80000000000UL +/* Do prdele, look what happens to be in %g4... */ +register unsigned long page_offset asm("g4"); +#define PAGE_OFFSET page_offset #else #define PAGE_OFFSET 0xFFFFF80000000000 #endif diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index e56a4024d..5cbd9a3c5 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -1,4 +1,4 @@ -/* $Id: pgtable.h,v 1.34 1997/06/02 06:33:41 davem Exp $ +/* $Id: pgtable.h,v 1.49 1997/06/30 09:24:12 jj Exp $ * pgtable.h: SpitFire page table operations. * * Copyright 1996,1997 David S. Miller (davem@caip.rutgers.edu) @@ -51,7 +51,7 @@ #define PTRS_PER_PAGE (1UL << (PAGE_SHIFT-3)) /* NOTE: TLB miss handlers depend heavily upon where this is. */ -#define VMALLOC_START 0xFFFFFc0000000000UL +#define VMALLOC_START 0x0000000800000000UL #define VMALLOC_VMADDR(x) ((unsigned long)(x)) #endif /* !(__ASSEMBLY__) */ @@ -78,18 +78,17 @@ #define _PAGE_G 0x0000000000000001 /* Global */ /* Here are the SpitFire software bits we use in the TTE's. */ -#define _PAGE_PRESENT 0x0000000000001000 /* Present Page (ie. not swapped out) */ #define _PAGE_MODIFIED 0x0000000000000800 /* Modified Page (ie. dirty) */ #define _PAGE_ACCESSED 0x0000000000000400 /* Accessed Page (ie. referenced) */ #define _PAGE_READ 0x0000000000000200 /* Readable SW Bit */ #define _PAGE_WRITE 0x0000000000000100 /* Writable SW Bit */ -#define _PAGE_PRIV 0x0000000000000080 /* Software privilege bit */ +#define _PAGE_PRESENT 0x0000000000000080 /* Present Page (ie. not swapped out) */ #define _PAGE_CACHE (_PAGE_CP | _PAGE_CV) #define __DIRTY_BITS (_PAGE_MODIFIED | _PAGE_WRITE | _PAGE_W) #define __ACCESS_BITS (_PAGE_ACCESSED | _PAGE_READ | _PAGE_R) -#define __PRIV_BITS (_PAGE_P | _PAGE_PRIV) +#define __PRIV_BITS _PAGE_P #define PAGE_NONE __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \ __PRIV_BITS | __ACCESS_BITS) @@ -112,7 +111,7 @@ #define _PAGE_CHG_MASK (_PFN_MASK | _PAGE_MODIFIED | _PAGE_ACCESSED | _PAGE_PRESENT) -#define pg_iobits (_PAGE_VALID | __PRIV_BITS | __ACCESS_BITS | _PAGE_E) +#define pg_iobits (_PAGE_VALID | _PAGE_PRESENT | __DIRTY_BITS | __ACCESS_BITS | _PAGE_E) #define __P000 PAGE_NONE #define __P001 PAGE_READONLY @@ -147,8 +146,7 @@ extern pte_t *__bad_pte(void); * hit for all __pa()/__va() operations. */ extern unsigned long phys_base; - -#define ZERO_PAGE (PAGE_OFFSET + phys_base) +#define ZERO_PAGE ((unsigned long)__va(phys_base)) /* This is for making TLB miss faster to process. */ extern unsigned long null_pmd_table; @@ -160,156 +158,47 @@ extern void *sparc_init_alloc(unsigned long *kbrk, unsigned long size); /* Cache and TLB flush operations. */ -extern __inline__ void flush_cache_all(void) -{ - unsigned long addr; - - flushw_all(); - for(addr = 0; addr < (PAGE_SIZE << 1); addr += 32) - spitfire_put_icache_tag(addr, 0x0UL); -} - -extern __inline__ void flush_cache_mm(struct mm_struct *mm) -{ - if(mm->context != NO_CONTEXT) { - unsigned long addr; - - flushw_user(); - for(addr = 0; addr < (PAGE_SIZE << 1); addr += 32) - spitfire_put_icache_tag(addr, 0x0UL); - } -} - -extern __inline__ void flush_cache_range(struct mm_struct *mm, unsigned long start, - unsigned long end) -{ - if(mm->context != NO_CONTEXT) { - unsigned long addr; +#define flush_cache_all() \ +do { unsigned long va; \ + flushw_all(); \ + for(va = 0; \ + va<(PAGE_SIZE<<1); \ + va += 32) \ +spitfire_put_icache_tag(va,0x0);\ +} while(0) - flushw_user(); - for(addr = 0; addr < (PAGE_SIZE << 1); addr += 32) - spitfire_put_icache_tag(addr, 0x0UL); - } -} - -extern __inline__ void flush_cache_page(struct vm_area_struct *vma, unsigned long page) -{ - struct mm_struct *mm = vma->vm_mm; - - if(mm->context != NO_CONTEXT) { - unsigned long addr; - - flushw_user(); - for(addr = 0; addr < (PAGE_SIZE << 1); addr += 32) - spitfire_put_icache_tag(addr, 0x0UL); - } -} +#define flush_cache_mm(mm) do { } while(0) +#define flush_cache_range(mm, start, end) do { } while(0) +#define flush_cache_page(vma, page) do { } while(0) /* This operation in unnecessary on the SpitFire since D-CACHE is write-through. */ #define flush_page_to_ram(page) do { } while (0) -extern __inline__ void flush_tlb_all(void) -{ - unsigned long flags; - int entry; - - /* Invalidate all non-locked TTE's in both the dtlb and itlb. */ - save_and_cli(flags); - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "stxa %%g0, [%0] %2" - : /* No outputs */ - : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU), "i" (ASI_DMMU)); - for(entry = 0; entry < 62; entry++) { - spitfire_put_dtlb_data(entry, 0x0UL); - spitfire_put_itlb_data(entry, 0x0UL); - } - membar("#Sync"); - flushi(PAGE_OFFSET); - restore_flags(flags); -} +extern void flush_tlb_all(void); +extern void __flush_tlb_mm(unsigned long context); extern __inline__ void flush_tlb_mm(struct mm_struct *mm) { - if(mm->context != NO_CONTEXT) { - __asm__ __volatile__(" - /* flush_tlb_mm() */ - rdpr %%pil, %%g1 - mov %1, %%g7 - wrpr %%g0, 15, %%pil - ldxa [%%g7] %2, %%g2 - cmp %%g2, %0 - be,pt %%icc, 1f - mov 0x50, %%g3 - stxa %0, [%%g7] %2 -1: - stxa %%g0, [%%g3] %3 - stxa %%g0, [%%g3] %4 - be,a,pt %%icc, 1f - nop - stxa %%g2, [%%g7] %2 -1: - flush %%g4 - wrpr %%g1, 0x0, %%pil -" : /* no outputs */ - : "r" (mm->context & 0x1fff), "i" (SECONDARY_CONTEXT), "i" (ASI_DMMU), - "i" (ASI_DMMU_DEMAP), "i" (ASI_IMMU_DEMAP) - : "g1", "g2", "g3", "g7", "cc"); - } + if(mm->context != NO_CONTEXT) + __flush_tlb_mm(mm->context & 0x1fff); } +extern void __flush_tlb_range(unsigned long context, unsigned long start, + unsigned long end); extern __inline__ void flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end) { - if(mm->context != NO_CONTEXT) { - unsigned long old_ctx = spitfire_get_secondary_context(); - unsigned long new_ctx = (mm->context & 0x1fff); - unsigned long flags; - - start &= PAGE_MASK; - save_and_cli(flags); - if(new_ctx != old_ctx) - spitfire_set_secondary_context(mm->context); - while(start < end) { - spitfire_flush_dtlb_secondary_page(start); - spitfire_flush_itlb_secondary_page(start); - start += PAGE_SIZE; - } - if(new_ctx != old_ctx) - spitfire_set_secondary_context(old_ctx); - __asm__ __volatile__("flush %g4"); - restore_flags(flags); - } + if(mm->context != NO_CONTEXT) + __flush_tlb_range(mm->context & 0x1fff, start, end); } +extern void __flush_tlb_page(unsigned long context, unsigned long page); extern __inline__ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { struct mm_struct *mm = vma->vm_mm; - if(mm->context != NO_CONTEXT) { - __asm__ __volatile__(" - /* flush_tlb_page() */ - rdpr %%pil, %%g1 - mov %1, %%g7 - wrpr %%g0, 15, %%pil - ldxa [%%g7] %2, %%g2 - cmp %%g2, %0 - be,pt %%icc, 1f - or %5, 0x10, %%g3 - stxa %0, [%%g7] %2 -1: - stxa %%g0, [%%g3] %3 - stxa %%g0, [%%g3] %4 - be,a,pt %%icc, 1f - nop - stxa %%g2, [%%g7] %2 -1: - flush %%g4 - wrpr %%g1, 0x0, %%pil -" : /* no outputs */ - : "r" (mm->context & 0x1fff), "i" (SECONDARY_CONTEXT), "i" (ASI_DMMU), - "i" (ASI_DMMU_DEMAP), "i" (ASI_IMMU_DEMAP), "r" (page & PAGE_MASK) - : "g1", "g2", "g3", "g7", "cc"); - } + if(mm->context != NO_CONTEXT) + __flush_tlb_page(mm->context & 0x1fff, page & PAGE_MASK); } extern inline pte_t mk_pte(unsigned long page, pgprot_t pgprot) @@ -394,24 +283,6 @@ extern inline pte_t pte_mkyoung(pte_t pte) return __pte(pte_val(pte) | (_PAGE_ACCESSED)); } -extern inline void SET_PAGE_DIR(struct task_struct *tsk, pgd_t *pgdir) -{ - register unsigned long paddr asm("o5"); - - paddr = __pa(pgdir); - - if(tsk == current) { - __asm__ __volatile__ (" - rdpr %%pstate, %%o4 - wrpr %%o4, %1, %%pstate - mov %0, %%g7 - wrpr %%o4, 0x0, %%pstate - " : /* No outputs */ - : "r" (paddr), "i" (PSTATE_MG|PSTATE_IE) - : "o4"); - } -} - /* to find an entry in a page-table-directory. */ extern inline pgd_t *pgd_offset(struct mm_struct *mm, unsigned long address) { return mm->pgd + ((address >> PGDIR_SHIFT) & (PTRS_PER_PAGE - 1)); } @@ -429,11 +300,16 @@ extern inline pte_t *pte_offset(pmd_t *dir, unsigned long address) extern __inline__ void __init_pmd(pmd_t *pmdp) { - extern void __bfill64(void *, unsigned long); + extern void __bfill64(void *, unsigned long *); - __bfill64((void *)pmdp, null_pte_table); + __bfill64((void *)pmdp, &null_pte_table); } +/* Turning this off makes things much faster, but eliminates some + * sanity checking as well. + */ +/* #define PGTABLE_SANITY_CHECKS */ + /* Allocate and free page tables. The xxx_kernel() versions are * used to allocate a kernel page table - this turns on supervisor * bits if any. @@ -456,11 +332,13 @@ extern inline pte_t * pte_alloc_kernel(pmd_t *pmd, unsigned long address) } free_page((unsigned long) page); } +#ifdef PGTABLE_SANITY_CHECKS if (pmd_bad(*pmd)) { printk("Bad pmd in pte_alloc_kernel: %08lx\n", pmd_val(*pmd)); pmd_set(pmd, BAD_PTE); return NULL; } +#endif return (pte_t *) pmd_page(*pmd) + address; } @@ -483,11 +361,13 @@ extern inline pmd_t * pmd_alloc_kernel(pgd_t *pgd, unsigned long address) } free_page((unsigned long) page); } +#ifdef PGTABLE_SANITY_CHECKS if (pgd_bad(*pgd)) { printk("Bad pgd in pmd_alloc_kernel: %08lx\n", pgd_val(*pgd)); pgd_set(pgd, BAD_PMD); return NULL; } +#endif return (pmd_t *) pgd_page(*pgd) + address; } @@ -509,11 +389,13 @@ extern inline pte_t * pte_alloc(pmd_t *pmd, unsigned long address) } free_page((unsigned long) page); } +#ifdef PGTABLE_SANITY_CHECKS if (pmd_bad(*pmd)) { printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd)); pmd_set(pmd, BAD_PTE); return NULL; } +#endif return (pte_t *) pmd_page(*pmd) + address; } @@ -536,11 +418,13 @@ extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address) } free_page((unsigned long) page); } +#ifdef PGTABLE_SANITY_CHECKS if (pgd_bad(*pgd)) { printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd)); pgd_set(pgd, BAD_PMD); return NULL; } +#endif return (pmd_t *) pgd_page(*pgd) + address; } @@ -549,16 +433,33 @@ extern inline void pgd_free(pgd_t * pgd) extern inline pgd_t * pgd_alloc(void) { - extern void __bfill64(void *, unsigned long); + extern void __bfill64(void *, unsigned long *); pgd_t *pgd = (pgd_t *) __get_free_page(GFP_KERNEL); if (pgd) - __bfill64((void *)pgd, null_pmd_table); + __bfill64((void *)pgd, &null_pmd_table); return pgd; } extern pgd_t swapper_pg_dir[1024]; +extern inline void SET_PAGE_DIR(struct task_struct *tsk, pgd_t *pgdir) +{ + if(pgdir != swapper_pg_dir && tsk == current) { + register unsigned long paddr asm("o5"); + + paddr = __pa(pgdir); + __asm__ __volatile__ (" + rdpr %%pstate, %%o4 + wrpr %%o4, %1, %%pstate + mov %0, %%g7 + wrpr %%o4, 0x0, %%pstate + " : /* No outputs */ + : "r" (paddr), "i" (PSTATE_MG|PSTATE_IE) + : "o4"); + } +} + /* Routines for getting a dvma scsi buffer. */ struct mmu_sglist { char *addr; @@ -576,61 +477,15 @@ extern void mmu_get_scsi_sgl(struct mmu_sglist *, int, struct linux_sbus *sbus) #define mmu_lockarea(vaddr, len) (vaddr) #define mmu_unlockarea(vaddr, len) do { } while(0) +extern void fixup_dcache_alias(struct vm_area_struct *vma, unsigned long address, + pte_t pte); + extern inline void update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte) { /* Find and fix bad virutal cache aliases. */ - if((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED)) { - struct vm_area_struct *vmaring; - struct inode *inode; - unsigned long vaddr, offset, start; - pgd_t *pgdp; - pmd_t *pmdp; - pte_t *ptep; - int alias_found = 0; - - inode = vma->vm_inode; - if(!inode) - return; - - offset = (address & PAGE_MASK) - vma->vm_start; - vmaring = inode->i_mmap; - do { - vaddr = vmaring->vm_start + offset; - - /* This conditional is misleading... */ - if((vaddr ^ address) & PAGE_SIZE) { - alias_found++; - start = vmaring->vm_start; - while(start < vmaring->vm_end) { - pgdp = pgd_offset(vmaring->vm_mm, start); - if(!pgdp) goto next; - pmdp = pmd_offset(pgdp, start); - if(!pmdp) goto next; - ptep = pte_offset(pmdp, start); - if(!ptep) goto next; - - if(pte_val(*ptep) & _PAGE_PRESENT) { - flush_cache_page(vmaring, start); - *ptep = __pte(pte_val(*ptep) & - ~(_PAGE_CV)); - flush_tlb_page(vmaring, start); - } - next: - start += PAGE_SIZE; - } - } - } while((vmaring = vmaring->vm_next_share) != NULL); - - if(alias_found && (pte_val(pte) & _PAGE_CV)) { - pgdp = pgd_offset(vma->vm_mm, address); - pmdp = pmd_offset(pgdp, address); - ptep = pte_offset(pmdp, address); - flush_cache_page(vma, address); - *ptep = __pte(pte_val(*ptep) & ~(_PAGE_CV)); - flush_tlb_page(vma, address); - } - } + if((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED)) + fixup_dcache_alias(vma, address, pte); } /* Make a non-present pseudo-TTE. */ @@ -638,7 +493,7 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) { pte_t pte; pte_val(pte) = (type<>PAGE_SHIFT) & 0xff)) #define SWP_OFFSET(entry) ((entry) >> (PAGE_SHIFT+8)) @@ -650,17 +505,19 @@ sun4u_get_pte (unsigned long addr) pgd_t *pgdp; pmd_t *pmdp; pte_t *ptep; - - pgdp = pgd_offset (current->mm, addr); + + if (addr >= PAGE_OFFSET) + return addr & _PAGE_PADDR; + pgdp = pgd_offset_k (addr); pmdp = pmd_offset (pgdp, addr); ptep = pte_offset (pmdp, addr); return pte_val (*ptep) & _PAGE_PADDR; } -extern __inline__ unsigned int +extern __inline__ unsigned long __get_phys (unsigned long addr) { - return (sun4u_get_pte (addr) & 0x0fffffff); + return sun4u_get_pte (addr); } extern __inline__ int @@ -669,6 +526,9 @@ __get_iospace (unsigned long addr) return ((sun4u_get_pte (addr) & 0xf0000000) >> 28); } +extern void * module_map (unsigned long size); +extern void module_unmap (void *addr); + #endif /* !(__ASSEMBLY__) */ #endif /* !(_SPARC64_PGTABLE_H) */ diff --git a/include/asm-sparc64/processor.h b/include/asm-sparc64/processor.h index f58c9da70..019bbf600 100644 --- a/include/asm-sparc64/processor.h +++ b/include/asm-sparc64/processor.h @@ -1,4 +1,4 @@ -/* $Id: processor.h,v 1.27 1997/05/23 09:35:52 jj Exp $ +/* $Id: processor.h,v 1.32 1997/07/01 21:59:38 davem Exp $ * include/asm-sparc64/processor.h * * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) @@ -32,34 +32,24 @@ /* The Sparc processor specific thread struct. */ struct thread_struct { - /* Floating point regs */ - /* Please check asm_offsets, so that not to much precious space - is wasted by this alignment and move the float_regs wherever - is better in this structure. Remember every byte of alignment - is multiplied by 512 to get the amount of wasted kernel memory. */ - unsigned int float_regs[64] __attribute__ ((aligned (64))); - unsigned long fsr; - - /* Context switch saved kernel state. */ - unsigned long ksp, kpc, wstate, cwp, ctx; +/*DC1*/ unsigned long ksp __attribute__ ((aligned(16))); + unsigned long kpc; +/*DC2*/ unsigned long wstate; + unsigned int cwp; + unsigned int ctx; + +/*DC3*/ unsigned int flags; + unsigned int new_signal; + unsigned long current_ds; +/*DC4*/ unsigned long w_saved; + struct pt_regs *kregs; - /* Storage for windows when user stack is bogus. */ struct reg_window reg_window[NSWINS] __attribute__ ((aligned (16))); unsigned long rwbuf_stkptrs[NSWINS] __attribute__ ((aligned (8))); - unsigned long w_saved; - /* Arch-specific task state flags, see below. */ - unsigned long flags; - - /* For signal handling */ unsigned long sig_address __attribute__ ((aligned (8))); unsigned long sig_desc; - struct sigstack sstk_info; - int current_ds, new_signal; - - struct pt_regs *kregs; - struct exec core_exec; /* just what it says. */ }; @@ -74,30 +64,18 @@ struct thread_struct { PAGE_SHARED , VM_READ | VM_WRITE | VM_EXEC, NULL, &init_mm.mmap } #define INIT_TSS { \ -/* FPU regs */ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, \ -/* FPU status */ \ - 0, \ /* ksp, kpc, wstate, cwp, secctx */ \ 0, 0, 0, 0, 0, \ +/* flags, new_signal, current_ds, */ \ + SPARC_FLAG_KTHREAD, 0, USER_DS, \ +/* w_saved, kregs, */ \ + 0, 0, \ /* reg_window */ \ -{ { { 0, }, { 0, } }, }, \ + { { { 0, }, { 0, } }, }, \ /* rwbuf_stkptrs */ \ -{ 0, 0, 0, 0, 0, 0, 0, 0, }, \ -/* w_saved */ \ - 0, \ -/* flags */ \ - SPARC_FLAG_KTHREAD, \ -/* sig_address, sig_desc */ \ - 0, 0, \ -/* ex, sstk_info, current_ds, */ \ - { 0, 0, }, USER_DS, \ -/* new_signal, kregs */ \ - 0, 0, \ -/* core_exec */ \ -{ 0, }, \ + { 0, 0, 0, 0, 0, 0, 0, 0, }, \ +/* sig_address, sig_desc, sstk_info, core_exec */ \ + 0, 0, { 0, 0, }, { 0, }, \ } #ifndef __ASSEMBLY__ @@ -111,11 +89,12 @@ extern __inline__ unsigned long thread_saved_pc(struct thread_struct *t) /* Do necessary setup to start up a newly executed thread. */ #define start_thread(regs, pc, sp) \ do { \ - regs->tstate = (regs->tstate & (TSTATE_CWP)) | TSTATE_IE; \ + regs->tstate = (regs->tstate & (TSTATE_CWP)) | (TSTATE_IE|TSTATE_PEF); \ regs->tpc = ((pc & (~3)) - 4); \ regs->tnpc = regs->tpc + 4; \ regs->y = 0; \ current->tss.flags &= ~SPARC_FLAG_32BIT; \ + current->tss.wstate = (1 << 3); \ __asm__ __volatile__( \ "stx %%g0, [%0 + %2 + 0x00]\n\t" \ "stx %%g0, [%0 + %2 + 0x08]\n\t" \ @@ -135,7 +114,7 @@ do { \ "stx %%g0, [%0 + %2 + 0x78]\n\t" \ "wrpr %%g0, (1 << 3), %%wstate\n\t" \ : \ - : "r" (regs), "r" (sp - REGWIN_SZ), \ + : "r" (regs), "r" (sp - REGWIN_SZ - STACK_BIAS), \ "i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))); \ } while(0) @@ -146,11 +125,12 @@ do { \ pc &= 0x00000000ffffffffUL; \ sp &= 0x00000000ffffffffUL; \ \ - regs->tstate = (regs->tstate & (TSTATE_CWP)) | (TSTATE_IE | TSTATE_AM); \ + regs->tstate = (regs->tstate & (TSTATE_CWP))|(TSTATE_IE|TSTATE_AM|TSTATE_PEF); \ regs->tpc = ((pc & (~3)) - 4); \ regs->tnpc = regs->tpc + 4; \ regs->y = 0; \ current->tss.flags |= SPARC_FLAG_32BIT; \ + current->tss.wstate = (2 << 3); \ zero = 0; \ __asm__ __volatile__( \ "stx %%g0, [%0 + %2 + 0x00]\n\t" \ diff --git a/include/asm-sparc64/psrcompat.h b/include/asm-sparc64/psrcompat.h index b971514d6..22e9da3d6 100644 --- a/include/asm-sparc64/psrcompat.h +++ b/include/asm-sparc64/psrcompat.h @@ -1,4 +1,4 @@ -/* $Id: psrcompat.h,v 1.3 1997/06/05 06:22:54 davem Exp $ */ +/* $Id: psrcompat.h,v 1.4 1997/06/20 11:54:39 davem Exp $ */ #ifndef _SPARC64_PSRCOMPAT_H #define _SPARC64_PSRCOMPAT_H @@ -23,33 +23,19 @@ extern inline unsigned int tstate_to_psr(unsigned long tstate) { - unsigned int psr; unsigned long vers; - /* These fields are in the same place. */ - psr = (tstate & (TSTATE_CWP | TSTATE_PEF)); - - /* This is what the user would have always seen. */ - psr |= PSR_S; - - /* Slam in the 32-bit condition codes. */ - psr |= ((tstate & TSTATE_ICC) >> 12); - - /* This is completely arbitrary. */ __asm__ __volatile__("rdpr %%ver, %0" : "=r" (vers)); - psr |= ((vers << 8) >> 32) & PSR_IMPL; - psr |= ((vers << 24) >> 36) & PSR_VERS; - - return psr; + return ((tstate & TSTATE_CWP) | + PSR_S | + ((tstate & TSTATE_ICC) >> 12) | + (((vers << 8) >> 32) & PSR_IMPL) | + (((vers << 24) >> 36) & PSR_VERS)); } extern inline unsigned long psr_to_tstate_icc(unsigned int psr) { - unsigned long tstate; - - tstate = ((unsigned long)(psr & PSR_ICC)) << 12; - - return tstate; + return ((unsigned long)(psr & PSR_ICC)) << 12; } #endif /* !(_SPARC64_PSRCOMPAT_H) */ diff --git a/include/asm-sparc64/pstate.h b/include/asm-sparc64/pstate.h index 2233ee7f0..a1e1414d6 100644 --- a/include/asm-sparc64/pstate.h +++ b/include/asm-sparc64/pstate.h @@ -1,4 +1,4 @@ -/* $Id: pstate.h,v 1.4 1997/05/29 12:45:02 jj Exp $ */ +/* $Id: pstate.h,v 1.6 1997/06/25 07:39:45 jj Exp $ */ #ifndef _SPARC64_PSTATE_H #define _SPARC64_PSTATE_H @@ -14,6 +14,9 @@ #define PSTATE_CLE 0x0000000000000200 /* Current Little Endian. */ #define PSTATE_TLE 0x0000000000000100 /* Trap Little Endian. */ #define PSTATE_MM 0x00000000000000c0 /* Memory Model. */ +#define PSTATE_TSO 0x0000000000000000 /* MM: Total Store Order */ +#define PSTATE_PSO 0x0000000000000040 /* MM: Partial Store Order */ +#define PSTATE_RMO 0x0000000000000080 /* MM: Relaxed Memory Order */ #define PSTATE_RED 0x0000000000000020 /* Reset Error Debug State. */ #define PSTATE_PEF 0x0000000000000010 /* Floating Point Enable. */ #define PSTATE_AM 0x0000000000000008 /* Address Mask. */ @@ -47,6 +50,9 @@ #define TSTATE_CLE 0x0000000000020000 /* Current Little Endian. */ #define TSTATE_TLE 0x0000000000010000 /* Trap Little Endian. */ #define TSTATE_MM 0x000000000000c000 /* Memory Model. */ +#define TSTATE_TSO 0x0000000000000000 /* MM: Total Store Order */ +#define TSTATE_PSO 0x0000000000004000 /* MM: Partial Store Order */ +#define TSTATE_RMO 0x0000000000008000 /* MM: Relaxed Memory Order */ #define TSTATE_RED 0x0000000000002000 /* Reset Error Debug State. */ #define TSTATE_PEF 0x0000000000001000 /* Floating Point Enable. */ #define TSTATE_AM 0x0000000000000800 /* Address Mask. */ diff --git a/include/asm-sparc64/ptrace.h b/include/asm-sparc64/ptrace.h index 5da6f6dd1..a4784d41e 100644 --- a/include/asm-sparc64/ptrace.h +++ b/include/asm-sparc64/ptrace.h @@ -1,4 +1,4 @@ -/* $Id: ptrace.h,v 1.8 1997/05/27 19:30:27 jj Exp $ */ +/* $Id: ptrace.h,v 1.12 1997/06/24 16:30:35 davem Exp $ */ #ifndef _SPARC64_PTRACE_H #define _SPARC64_PTRACE_H @@ -15,7 +15,8 @@ struct pt_regs { unsigned long tstate; unsigned long tpc; unsigned long tnpc; - unsigned long y; + unsigned int y; + unsigned int fprs; }; struct pt_regs32 { @@ -137,6 +138,7 @@ extern void show_regs(struct pt_regs *); #define PT_V9_TPC 0x88 #define PT_V9_TNPC 0x90 #define PT_V9_Y 0x98 +#define PT_V9_FPRS 0x9c #define PT_TSTATE PT_V9_TSTATE #define PT_TPC PT_V9_TPC #define PT_TNPC PT_V9_TNPC @@ -265,6 +267,28 @@ extern void show_regs(struct pt_regs *); #define PTRACE_GETFPAREGS 20 #define PTRACE_SETFPAREGS 21 +/* There are for debugging 64-bit processes, either from a 32 or 64 bit + * parent. Thus their compliments are for debugging 32-bit processes only. + */ + +#define PTRACE_GETREGS64 22 +#define PTRACE_SETREGS64 23 +/* PTRACE_SYSCALL is 24 */ +#define PTRACE_GETFPREGS64 25 +#define PTRACE_SETFPREGS64 26 + #define PTRACE_GETUCODE 29 /* stupid bsd-ism */ +/* These are for 32-bit processes debugging 64-bit ones. + * Here addr and addr2 are passed in %g2 and %g3 respectively. + */ +#define PTRACE_PEEKTEXT64 (30 + PTRACE_PEEKTEXT) +#define PTRACE_POKETEXT64 (30 + PTRACE_POKETEXT) +#define PTRACE_PEEKDATA64 (30 + PTRACE_PEEKDATA) +#define PTRACE_POKEDATA64 (30 + PTRACE_POKEDATA) +#define PTRACE_READDATA64 (30 + PTRACE_READDATA) +#define PTRACE_WRITEDATA64 (30 + PTRACE_WRITEDATA) +#define PTRACE_READTEXT64 (30 + PTRACE_READTEXT) +#define PTRACE_WRITETEXT64 (30 + PTRACE_WRITETEXT) + #endif /* !(_SPARC64_PTRACE_H) */ diff --git a/include/asm-sparc64/reg.h b/include/asm-sparc64/reg.h index 716b8f8c6..ea3fc6e9c 100644 --- a/include/asm-sparc64/reg.h +++ b/include/asm-sparc64/reg.h @@ -1,4 +1,4 @@ -/* $Id: reg.h,v 1.1 1996/12/26 14:22:34 davem Exp $ +/* $Id: reg.h,v 1.2 1997/06/24 23:19:55 davem Exp $ * linux/asm-sparc64/reg.h * Layout of the registers as expected by gdb on the Sparc * we should replace the user.h definitions with those in @@ -76,4 +76,33 @@ struct fpu { struct fp_status f_fpstatus; }; +struct regs64 { + unsigned long r_g1; + unsigned long r_g2; + unsigned long r_g3; + unsigned long r_g4; + unsigned long r_g5; + unsigned long r_g6; + unsigned long r_g7; + unsigned long r_o0; + unsigned long r_o1; + unsigned long r_o2; + unsigned long r_o3; + unsigned long r_o4; + unsigned long r_o5; + unsigned long r_o6; + unsigned long r_o7; + unsigned long tstate; + unsigned long tpc; + unsigned long tnpc; + unsigned int y; + unsigned int fprs; +}; + +struct fp_status64 { + unsigned long regs[32]; + unsigned long fsr; +}; + + #endif /* __SPARC64_REG_H */ diff --git a/include/asm-sparc64/resource.h b/include/asm-sparc64/resource.h index 5e7a7f8c1..b3aedd4ee 100644 --- a/include/asm-sparc64/resource.h +++ b/include/asm-sparc64/resource.h @@ -1,4 +1,4 @@ -/* $Id: resource.h,v 1.2 1997/04/04 00:50:27 davem Exp $ +/* $Id: resource.h,v 1.3 1997/06/14 17:35:09 davem Exp $ * resource.h: Resource definitions. * * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) @@ -25,7 +25,6 @@ #define RLIM_NLIMITS 10 #ifdef __KERNEL__ -/* XXX 32-bit binary compatability... */ #define INIT_RLIMITS \ { \ {LONG_MAX, LONG_MAX}, {LONG_MAX, LONG_MAX}, \ diff --git a/include/asm-sparc64/sigcontext.h b/include/asm-sparc64/sigcontext.h index 9d35493d9..3fba2f834 100644 --- a/include/asm-sparc64/sigcontext.h +++ b/include/asm-sparc64/sigcontext.h @@ -1,14 +1,9 @@ -/* $Id: sigcontext.h,v 1.4 1997/04/04 00:50:28 davem Exp $ */ +/* $Id: sigcontext.h,v 1.8 1997/06/20 11:54:41 davem Exp $ */ #ifndef __SPARC64_SIGCONTEXT_H #define __SPARC64_SIGCONTEXT_H #include -/* XXX This gets exported to userland as well as kernel, it is probably - * XXX riddled with many hard to find 32-bit binary compatability issues. - * XXX Signals and this file need to be investigated heavily. -DaveM - */ - #define SUNOS_MAXWIN 31 #ifndef __ASSEMBLY__ @@ -47,12 +42,12 @@ struct sigcontext32 { struct sigcontext { int sigc_onstack; /* state to restore */ int sigc_mask; /* sigmask to restore */ - int sigc_sp; /* stack pointer */ - int sigc_pc; /* program counter */ - int sigc_npc; /* next program counter */ - int sigc_psr; /* for condition codes etc */ - int sigc_g1; /* User uses these two registers */ - int sigc_o0; /* within the trampoline code. */ + unsigned long sigc_sp; /* stack pointer */ + unsigned long sigc_pc; /* program counter */ + unsigned long sigc_npc; /* next program counter */ + unsigned long sigc_psr; /* for condition codes etc */ + unsigned long sigc_g1; /* User uses these two registers */ + unsigned long sigc_o0; /* within the trampoline code. */ /* Now comes information regarding the users window set * at the time of the signal. @@ -71,17 +66,6 @@ typedef struct { int si_mask; } __siginfo32_t; -typedef struct { - unsigned int si_float_regs [32]; - unsigned int si_fsr; - unsigned int si_fpqdepth; - struct { - unsigned int *insn_addr; - unsigned int insn; - } si_fpqueue [16]; -} __siginfo_fpu32_t; - - typedef struct { struct pt_regs si_regs; int si_mask; @@ -90,6 +74,7 @@ typedef struct { typedef struct { unsigned int si_float_regs [64]; unsigned long si_fsr; + unsigned long si_gsr; unsigned int si_fpqdepth; struct { unsigned int *insn_addr; diff --git a/include/asm-sparc64/softirq.h b/include/asm-sparc64/softirq.h index fa32f67e5..8386e4a15 100644 --- a/include/asm-sparc64/softirq.h +++ b/include/asm-sparc64/softirq.h @@ -43,10 +43,12 @@ do { int ent = nr; \ do { int ent = nr; \ bh_mask &= ~(1 << ent); \ bh_mask_count[ent]++; \ + barrier(); \ } while(0) #define enable_bh(nr) \ do { int ent = nr; \ + barrier(); \ if (!--bh_mask_count[ent]) \ bh_mask |= 1 << ent; \ } while(0) diff --git a/include/asm-sparc64/spinlock.h b/include/asm-sparc64/spinlock.h index ec1ad2ea0..cefd43309 100644 --- a/include/asm-sparc64/spinlock.h +++ b/include/asm-sparc64/spinlock.h @@ -53,6 +53,11 @@ typedef struct { } rwlock_t; #else /* !(__SMP__) */ +/* All of these locking primitives are expected to work properly + * even in an RMO memory model, which currently is what the kernel + * runs in. + */ + typedef unsigned char spinlock_t; #define SPIN_LOCK_UNLOCKED 0 #define spin_lock_init(lock) (*(lock) = 0) @@ -64,6 +69,7 @@ extern __inline__ void spin_lock(spinlock_t *lock) 1: ldstub [%0], %%g2 brnz,a,pn %%g2, 2f ldub [%0], %%g2 + membar #LoadLoad | #LoadStore .text 2 2: brnz,a,pt 2b ldub [%0], %%g2 @@ -77,7 +83,8 @@ extern __inline__ void spin_lock(spinlock_t *lock) extern __inline__ int spin_trylock(spinlock_t *lock) { unsigned int result; - __asm__ __volatile__("ldstub [%1], %0" + __asm__ __volatile__("ldstub [%1], %0\n\t" + "membar #LoadLoad | #LoadStore" : "=r" (result) : "r" (lock) : "memory"); @@ -86,7 +93,11 @@ extern __inline__ int spin_trylock(spinlock_t *lock) extern __inline__ void spin_unlock(spinlock_t *lock) { - __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); + __asm__ __volatile__("membar #StoreStore | #LoadStore\n\t" + "stb %%g0, [%0]" + : /* No outputs */ + : "r" (lock) + : "memory"); } extern __inline__ void spin_lock_irq(spinlock_t *lock) @@ -96,6 +107,7 @@ extern __inline__ void spin_lock_irq(spinlock_t *lock) ldstub [%0], %%g2 brnz,a,pn %%g2, 2f ldub [%0], %%g2 + membar #LoadLoad | #LoadStore .text 2 2: brnz,a,pt 2b ldub [%0], %%g2 @@ -109,6 +121,7 @@ extern __inline__ void spin_lock_irq(spinlock_t *lock) extern __inline__ void spin_unlock_irq(spinlock_t *lock) { __asm__ __volatile__(" + membar #StoreStore | #LoadStore stb %%g0, [%0] wrpr %%g0, 0x0, %%pil " : /* no outputs */ @@ -116,28 +129,30 @@ extern __inline__ void spin_unlock_irq(spinlock_t *lock) : "memory"); } -#define spin_lock_irqsave(lock, flags) \ -do { register spinlock_t *lp asm("g1"); \ - lp = lock; \ - __asm__ __volatile__( \ - " rdpr %%pil, %0\n\t" \ - " wrpr %%g0, 15, %%pil\n\t" \ - "1: ldstub [%1], %%g2\n\t" \ - " brnz,a,pnt %%g2, 2f\n\t" \ - " ldub [%1], %%g2\n\t" \ - " .text 2\n\t" \ - "2: brnz,a,pt %%g2, 2b\n\t" \ - " ldub [%1], %%g2\n\t" \ - " b,a,pt %%xcc, 1b\n\t" \ - " .previous\n" \ - : "=r" (flags) \ - : "r" (lp) \ - : "g2", "memory"); \ +#define spin_lock_irqsave(lock, flags) \ +do { register spinlock_t *lp asm("g1"); \ + lp = lock; \ + __asm__ __volatile__( \ + " rdpr %%pil, %0\n\t" \ + " wrpr %%g0, 15, %%pil\n\t" \ + "1: ldstub [%1], %%g2\n\t" \ + " brnz,a,pnt %%g2, 2f\n\t" \ + " ldub [%1], %%g2\n\t" \ + " membar #LoadLoad | #LoadStore\n\t" \ + " .text 2\n\t" \ + "2: brnz,a,pt %%g2, 2b\n\t" \ + " ldub [%1], %%g2\n\t" \ + " b,a,pt %%xcc, 1b\n\t" \ + " .previous\n" \ + : "=r" (flags) \ + : "r" (lp) \ + : "g2", "memory"); \ } while(0) extern __inline__ void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) { __asm__ __volatile__(" + membar #StoreStore | #LoadStore stb %%g0, [%0] wrpr %1, 0x0, %%pil " : /* no outputs */ @@ -161,6 +176,7 @@ extern __inline__ void read_lock(rwlock_t *rw) cmp %%g2, %%g3 bne,a,pn %%xcc, 1b ldx [%0],%%g2 + membar #LoadLoad | #LoadStore .text 2 2: ldx [%0], %%g2 3: brlz,pt %%g2, 3b @@ -169,12 +185,13 @@ extern __inline__ void read_lock(rwlock_t *rw) .previous " : /* no outputs */ : "r" (rw) - : "g2", "g3", "memory"); + : "g2", "g3", "cc", "memory"); } extern __inline__ void read_unlock(rwlock_t *rw) { __asm__ __volatile__(" + membar #StoreStore | #LoadStore ldx [%0], %%g2 1: sub %%g2, 1, %%g3 @@ -184,7 +201,7 @@ extern __inline__ void read_unlock(rwlock_t *rw) ldx [%0], %%g2 " : /* no outputs */ : "r" (rw) - : "g2", "g3", "memory"); + : "g2", "g3", "cc", "memory"); } extern __inline__ void write_lock(rwlock_t *rw) @@ -203,6 +220,7 @@ extern __inline__ void write_lock(rwlock_t *rw) andncc %%g3, %%g5, %%g0 bne,a,pn %%xcc, 3f ldx [%0], %%g2 + membar #LoadLoad | #LoadStore .text 2 3: andn %%g2, %%g5, %%g3 @@ -210,6 +228,7 @@ extern __inline__ void write_lock(rwlock_t *rw) cmp %%g2, %%g3 bne,a,pn %%xcc, 3b ldx [%0], %%g2 + membar #LoadLoad | #LoadStore 5: ldx [%0], %%g2 6: brlz,pt %%g2, 6b ldx [%0], %%g2 @@ -222,6 +241,7 @@ extern __inline__ void write_lock(rwlock_t *rw) extern __inline__ void write_unlock(rwlock_t *rw) { __asm__ __volatile__(" + membar #StoreStore | #LoadStore sethi %%uhi(0x8000000000000000), %%g5 ldx [%0], %%g2 sllx %%g5, 32, %%g5 diff --git a/include/asm-sparc64/string.h b/include/asm-sparc64/string.h index b420d80bb..45b166c91 100644 --- a/include/asm-sparc64/string.h +++ b/include/asm-sparc64/string.h @@ -1,4 +1,4 @@ -/* $Id: string.h,v 1.5 1997/05/18 04:16:57 davem Exp $ +/* $Id: string.h,v 1.7 1997/07/13 18:23:44 davem Exp $ * string.h: External definitions for optimized assembly string * routines for the Linux Kernel. * @@ -13,8 +13,14 @@ #ifdef __KERNEL__ +#include + extern void __memmove(void *,const void *,__kernel_size_t); extern __kernel_size_t __memcpy(void *,const void *,__kernel_size_t); +extern __kernel_size_t __memcpy_short(void *,const void *,__kernel_size_t,long,long); +extern __kernel_size_t __memcpy_entry(void *,const void *,__kernel_size_t,long,long); +extern __kernel_size_t __memcpy_16plus(void *,const void *,__kernel_size_t,long,long); +extern __kernel_size_t __memcpy_384plus(void *,const void *,__kernel_size_t,long,long); extern __kernel_size_t __memset(void *,int,__kernel_size_t); #ifndef EXPORT_SYMTAB @@ -35,24 +41,11 @@ extern __kernel_size_t __memset(void *,int,__kernel_size_t); extern inline void *__constant_memcpy(void *to, const void *from, __kernel_size_t n) { - extern void __copy_1page(void *, const void *); - if(n) { if(n <= 32) { __builtin_memcpy(to, from, n); } else { -#if 0 - switch(n) { - case 8192: - __copy_1page(to, from); - break; - default: -#endif - __memcpy(to, from, n); -#if 0 - break; - } -#endif + __memcpy(to, from, n); } } return to; @@ -74,15 +67,13 @@ extern inline void *__nonconstant_memcpy(void *to, const void *from, __kernel_si extern inline void *__constant_c_and_count_memset(void *s, char c, __kernel_size_t count) { - extern void *bzero_1page(void *); + extern void *__bzero_1page(void *); extern __kernel_size_t __bzero(void *, __kernel_size_t); if(!c) { -#if 0 - if(count == 8192) - bzero_1page(s); + if (count == 8192) + __bzero_1page(s); else -#endif __bzero(s, count); } else { __memset(s, c, count); diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h index d0d88fa5c..6e7c42e55 100644 --- a/include/asm-sparc64/system.h +++ b/include/asm-sparc64/system.h @@ -1,4 +1,4 @@ -/* $Id: system.h,v 1.22 1997/06/01 10:27:28 davem Exp $ */ +/* $Id: system.h,v 1.26 1997/06/28 10:04:03 davem Exp $ */ #ifndef __SPARC64_SYSTEM_H #define __SPARC64_SYSTEM_H @@ -95,45 +95,15 @@ extern __inline__ void flushw_user(void) { __asm__ __volatile__(" rdpr %%otherwin, %%g1 - brz,pt %%g1, 2f + brz,pt %%g1, 1f + mov %%o7, %%g3 + call __flushw_user clr %%g2 -1: - save %%sp, %0, %%sp - rdpr %%otherwin, %%g1 - brnz,pt %%g1, 1b - add %%g2, 1, %%g2 -1: - subcc %%g2, 1, %%g2 - bne,pt %%xcc, 1b - restore %%g0, %%g0, %%g0 -2: - " : : "i" (-REGWIN_SZ) - : "g1", "g2", "cc"); +1:" : : : "g1", "g2", "g3"); } #define flush_user_windows flushw_user -#ifdef __SMP__ - -#include - -#define SWITCH_ENTER(prev) \ - if((prev)->flags & PF_USEDFPU) { \ - fprs_write(FPRS_FEF); \ - fpsave((unsigned long *) &(prev)->tss.float_regs[0], \ - &(prev)->tss.fsr); \ - (prev)->flags &= ~PF_USEDFPU; \ - (prev)->tss.kregs->tstate &= ~TSTATE_PEF; \ - } - -#define SWITCH_DO_LAZY_FPU(next) -#else -#define SWITCH_ENTER(prev) -#define SWITCH_DO_LAZY_FPU(next) \ - if(last_task_used_math != (next)) \ - (next)->tss.kregs->tstate &= ~TSTATE_PEF -#endif - /* See what happens when you design the chip correctly? * NOTE NOTE NOTE this is extremely non-trivial what I * am doing here. GCC needs only one register to stuff @@ -146,13 +116,13 @@ extern __inline__ void flushw_user(void) do { \ __label__ switch_continue; \ register unsigned long task_pc asm("o7"); \ - SWITCH_ENTER(prev) \ - SWITCH_DO_LAZY_FPU(next); \ + (prev)->tss.kregs->fprs = 0; \ task_pc = ((unsigned long) &&switch_continue) - 0x8; \ __asm__ __volatile__( \ "rdpr %%pstate, %%g2\n\t" \ - "wrpr %%g2, 0x2, %%pstate\n\t" \ + "wrpr %%g2, 0x3, %%pstate\n\t" \ "flushw\n\t" \ +/*XXX*/ "wr %%g0, 0, %%fprs\n\t" \ "stx %%i6, [%%sp + 2047 + 0x70]\n\t" \ "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \ "rdpr %%wstate, %%o5\n\t" \ @@ -160,19 +130,20 @@ do { \ "stx %%o5, [%%g6 + %2]\n\t" \ "rdpr %%cwp, %%o5\n\t" \ "stx %%o7, [%%g6 + %4]\n\t" \ - "stx %%o5, [%%g6 + %5]\n\t" \ + "st %%o5, [%%g6 + %5]\n\t" \ "mov %0, %%g6\n\t" \ - "ldx [%0 + %5], %%g1\n\t" \ - "wr %0, 0x0, %%pic\n\t" \ + "ld [%0 + %5], %%g1\n\t" \ "wrpr %%g1, %%cwp\n\t" \ "ldx [%%g6 + %2], %%o5\n\t" \ "ldx [%%g6 + %3], %%o6\n\t" \ "ldx [%%g6 + %4], %%o7\n\t" \ + "mov %%g6, %0\n\t" \ "wrpr %%o5, 0x0, %%wstate\n\t" \ "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \ "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \ + "wrpr %%g0, 0x96, %%pstate\n\t" \ "jmpl %%o7 + 0x8, %%g0\n\t" \ - " wrpr %%g2, 0x0, %%pstate\n\t" \ + " mov %0, %%g6\n\t" \ : /* No outputs */ \ : "r" (next), "r" (task_pc), \ "i" ((const unsigned long)(&((struct task_struct *)0)->tss.wstate)), \ @@ -200,15 +171,15 @@ extern __inline__ unsigned long xchg_u64(__volatile__ unsigned long *m, { unsigned long temp; __asm__ __volatile__(" - ldx [%3], %1 -1: + mov %0, %%g1 +1: ldx [%3], %1 casx [%3], %1, %0 cmp %1, %0 bne,a,pn %%xcc, 1b - ldx [%3], %1 + mov %%g1, %0 " : "=&r" (val), "=&r" (temp) : "0" (val), "r" (m) - : "cc"); + : "g1", "cc"); return val; } diff --git a/include/asm-sparc64/uaccess.h b/include/asm-sparc64/uaccess.h index 40ad3ee21..c0668e3f2 100644 --- a/include/asm-sparc64/uaccess.h +++ b/include/asm-sparc64/uaccess.h @@ -1,4 +1,4 @@ -/* $Id: uaccess.h,v 1.13 1997/05/29 12:45:04 jj Exp $ */ +/* $Id: uaccess.h,v 1.20 1997/07/13 18:23:45 davem Exp $ */ #ifndef _ASM_UACCESS_H #define _ASM_UACCESS_H @@ -22,26 +22,26 @@ * * "For historical reasons, these macros are grossly misnamed." -Linus */ -#define KERNEL_DS 0 -#define USER_DS -1 +#define KERNEL_DS 0x00 +#define USER_DS 0x2B /* har har har */ #define VERIFY_READ 0 #define VERIFY_WRITE 1 #define get_fs() (current->tss.current_ds) #define get_ds() (KERNEL_DS) -extern __inline__ void set_fs(int val) -{ - if (val != current->tss.current_ds) { - if (val == KERNEL_DS) { - flushw_user (); - spitfire_set_secondary_context (0); - } else { - spitfire_set_secondary_context (current->mm->context); - } - current->tss.current_ds = val; - } -} +#define set_fs(val) \ +do { \ + current->tss.current_ds = (val); \ + if ((val) == KERNEL_DS) { \ + flushw_user (); \ + current->tss.ctx = 0; \ + } else { \ + current->tss.ctx = (current->mm->context & 0x1fff); \ + } \ + spitfire_set_secondary_context(current->tss.ctx); \ + __asm__ __volatile__("flush %g6"); \ +} while(0) #define __user_ok(addr,size) 1 #define __kernel_ok (get_fs() == KERNEL_DS) @@ -255,8 +255,44 @@ __asm__ __volatile__( \ extern int __get_user_bad(void); -extern __kernel_size_t __copy_to_user(void *to, void *from, __kernel_size_t size); -extern __kernel_size_t __copy_from_user(void *to, void *from, __kernel_size_t size); +extern __kernel_size_t __memcpy_short(void *to, const void *from, + __kernel_size_t size, + long asi_src, long asi_dst); + +extern __kernel_size_t __memcpy_entry(void *to, const void *from, + __kernel_size_t size, + long asi_src, long asi_dst); + +extern __kernel_size_t __memcpy_16plus(void *to, const void *from, + __kernel_size_t size, + long asi_src, long asi_dst); + +extern __kernel_size_t __memcpy_386plus(void *to, const void *from, + __kernel_size_t size, + long asi_src, long asi_dst); + +extern __kernel_size_t __copy_from_user(void *to, const void *from, + __kernel_size_t size); + +extern __kernel_size_t __copy_to_user(void *to, const void *from, + __kernel_size_t size); + +extern __kernel_size_t __copy_in_user(void *to, const void *from, + __kernel_size_t size); + +#define copy_from_user(to,from,n) \ + __copy_from_user((void *)(to), \ + (void *)(from), (__kernel_size_t)(n)) + +#define copy_from_user_ret(to,from,n,retval) ({ \ +if (copy_from_user(to,from,n)) \ + return retval; \ +}) + +#define __copy_from_user_ret(to,from,n,retval) ({ \ +if (__copy_from_user(to,from,n)) \ + return retval; \ +}) #define copy_to_user(to,from,n) \ __copy_to_user((void *)(to), \ @@ -272,37 +308,27 @@ if (__copy_to_user(to,from,n)) \ return retval; \ }) -#define copy_from_user(to,from,n) \ - __copy_from_user((void *)(to), \ - (void *)(from), (__kernel_size_t)(n)) +#define copy_in_user(to,from,n) \ + __copy_in_user((void *)(to), \ + (void *) (from), (__kernel_size_t)(n)) -#define copy_from_user_ret(to,from,n,retval) ({ \ -if (copy_from_user(to,from,n)) \ +#define copy_in_user_ret(to,from,n,retval) ({ \ +if (copy_in_user(to,from,n)) \ return retval; \ }) -#define __copy_from_user_ret(to,from,n,retval) ({ \ -if (__copy_from_user(to,from,n)) \ +#define __copy_in_user_ret(to,from,n,retval) ({ \ +if (__copy_in_user(to,from,n)) \ return retval; \ }) extern __inline__ __kernel_size_t __clear_user(void *addr, __kernel_size_t size) { - __kernel_size_t ret; - __asm__ __volatile__ (" - .section __ex_table,#alloc - .align 8 - .xword 1f,3 - .previous -1: - wr %%g0, %3, %%asi - mov %2, %%o1 - call __bzero_noasi - mov %1, %%o0 - mov %%o0, %0 - " : "=r" (ret) : "r" (addr), "r" (size), "i" (ASI_S) : - "cc", "o0", "o1", "o2", "o3", "o4", "o5", "o7", "g1", "g2", "g3", "g5", "g7"); - return ret; + extern __kernel_size_t __bzero_noasi(void *addr, __kernel_size_t size); + + + __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "i" (ASI_S)); + return __bzero_noasi(addr, size); } #define clear_user(addr,n) \ diff --git a/include/asm-sparc64/uctx.h b/include/asm-sparc64/uctx.h new file mode 100644 index 000000000..1899ff971 --- /dev/null +++ b/include/asm-sparc64/uctx.h @@ -0,0 +1,71 @@ +/* $Id: uctx.h,v 1.1 1997/07/18 06:29:24 ralf Exp $ + * uctx.h: Sparc64 {set,get}context() register state layouts. + * + * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) + */ + +#ifndef __SPARC64_UCTX_H +#define __SPARC64_UCTX_H + +#define MC_TSTATE 0 +#define MC_PC 1 +#define MC_NPC 2 +#define MC_Y 3 +#define MC_G1 4 +#define MC_G2 5 +#define MC_G3 6 +#define MC_G4 7 +#define MC_G5 8 +#define MC_G6 9 +#define MC_G7 10 +#define MC_O0 11 +#define MC_O1 12 +#define MC_O2 13 +#define MC_O3 14 +#define MC_O4 15 +#define MC_O5 16 +#define MC_O6 17 +#define MC_O7 18 +#define MC_NGREG 19 + +typedef unsigned long mc_greg_t; +typedef mc_greg_t mc_gregset_t[MC_NGREG]; + +#define MC_MAXFPQ 16 +struct mc_fq { + unsigned long *mcfq_addr; + unsigned int mcfq_insn; +}; + +struct mc_fpu { + union { + unsigned int sregs[32]; + unsigned long dregs[32]; + long double qregs[16]; + } mcfpu_fregs; + unsigned long mcfpu_fsr; + unsigned long mcfpu_fprs; + unsigned long mcfpu_gsr; + struct mc_fq *mcfpu_fq; + unsigned char mcfpu_qcnt; + unsigned char mcfpu_qentsz; + unsigned char mcfpu_enab; +}; +typedef struct mc_fpu mc_fpu_t; + +typedef struct { + mc_gregset_t mc_gregs; + mc_greg_t mc_fp; + mc_greg_t mc_i7; + mc_fpu_t mc_fpregs; +} mcontext_t; + +struct ucontext { + struct ucontext *uc_link; + unsigned long uc_flags; + sigset_t uc_sigmask; + mcontext_t uc_mcontext; +}; +typedef struct ucontext ucontext_t; + +#endif /* __SPARC64_UCTX_H */ diff --git a/include/asm-sparc64/unistd.h b/include/asm-sparc64/unistd.h index cb17f1888..27afe645e 100644 --- a/include/asm-sparc64/unistd.h +++ b/include/asm-sparc64/unistd.h @@ -1,4 +1,4 @@ -/* $Id: unistd.h,v 1.5 1997/05/21 10:21:57 jj Exp $ */ +/* $Id: unistd.h,v 1.7 1997/06/16 05:37:44 davem Exp $ */ #ifndef _SPARC64_UNISTD_H #define _SPARC64_UNISTD_H @@ -113,35 +113,35 @@ #define __NR_setdopt 94 /* SunOS Specific */ #define __NR_fsync 95 /* Common */ #define __NR_setpriority 96 /* Common */ -#define __NR_socket 97 /* SunOS Specific */ -#define __NR_connect 98 /* SunOS Specific */ -#define __NR_accept 99 /* SunOS Specific */ +#define __NR_socket 97 /* Common */ +#define __NR_connect 98 /* Common */ +#define __NR_accept 99 /* Common */ #define __NR_getpriority 100 /* Common */ -#define __NR_send 101 /* SunOS Specific */ -#define __NR_recv 102 /* SunOS Specific */ +#define __NR_send 101 /* Common */ +#define __NR_recv 102 /* Common */ /* #define __NR_ni_syscall 103 ENOSYS under SunOS */ -#define __NR_bind 104 /* SunOS Specific */ -#define __NR_setsockopt 105 /* SunOS Specific */ -#define __NR_listen 106 /* SunOS Specific */ +#define __NR_bind 104 /* Common */ +#define __NR_setsockopt 105 /* Common */ +#define __NR_listen 106 /* Common */ /* #define __NR_ni_syscall 107 ENOSYS under SunOS */ #define __NR_sigvec 108 /* SunOS Specific */ #define __NR_sigblock 109 /* SunOS Specific */ #define __NR_sigsetmask 110 /* SunOS Specific */ #define __NR_sigpause 111 /* SunOS Specific */ #define __NR_sigstack 112 /* SunOS Specific */ -#define __NR_recvmsg 113 /* SunOS Specific */ -#define __NR_sendmsg 114 /* SunOS Specific */ +#define __NR_recvmsg 113 /* Common */ +#define __NR_sendmsg 114 /* Common */ #define __NR_vtrace 115 /* SunOS Specific */ #define __NR_gettimeofday 116 /* Common */ #define __NR_getrusage 117 /* Common */ -#define __NR_getsockopt 118 /* SunOS Specific */ +#define __NR_getsockopt 118 /* Common */ /* #define __NR_ni_syscall 119 ENOSYS under SunOS */ #define __NR_readv 120 /* Common */ #define __NR_writev 121 /* Common */ #define __NR_settimeofday 122 /* Common */ #define __NR_fchown 123 /* Common */ #define __NR_fchmod 124 /* Common */ -#define __NR_recvfrom 125 /* SunOS Specific */ +#define __NR_recvfrom 125 /* Common */ #define __NR_setreuid 126 /* Common */ #define __NR_setregid 127 /* Common */ #define __NR_rename 128 /* Common */ @@ -149,15 +149,15 @@ #define __NR_ftruncate 130 /* Common */ #define __NR_flock 131 /* Common */ /* #define __NR_ni_syscall 132 ENOSYS under SunOS */ -#define __NR_sendto 133 /* SunOS Specific */ -#define __NR_shutdown 134 /* SunOS Specific */ -#define __NR_socketpair 135 /* SunOS Specific */ +#define __NR_sendto 133 /* Common */ +#define __NR_shutdown 134 /* Common */ +#define __NR_socketpair 135 /* Common */ #define __NR_mkdir 136 /* Common */ #define __NR_rmdir 137 /* Common */ #define __NR_utimes 138 /* SunOS Specific */ /* #define __NR_ni_syscall 139 ENOSYS under SunOS */ #define __NR_adjtime 140 /* SunOS Specific */ -#define __NR_getpeername 141 /* SunOS Specific */ +#define __NR_getpeername 141 /* Common */ #define __NR_gethostid 142 /* SunOS Specific */ /* #define __NR_ni_syscall 143 ENOSYS under SunOS */ #define __NR_getrlimit 144 /* Common */ @@ -166,7 +166,7 @@ /* #define __NR_ni_syscall 147 ENOSYS under SunOS */ /* #define __NR_ni_syscall 148 ENOSYS under SunOS */ /* #define __NR_ni_syscall 149 ENOSYS under SunOS */ -#define __NR_getsockname 150 /* SunOS Specific */ +#define __NR_getsockname 150 /* Common */ #define __NR_getmsg 151 /* SunOS Specific */ #define __NR_putmsg 152 /* SunOS Specific */ #define __NR_poll 153 /* SunOS Specific */ @@ -467,6 +467,7 @@ static __inline__ pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned lo #endif /* __KERNEL_SYSCALLS__ */ +#ifdef __KERNEL__ /* sysconf options, for SunOS compatibility */ #define _SC_ARG_MAX 1 #define _SC_CHILD_MAX 2 @@ -476,5 +477,6 @@ static __inline__ pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned lo #define _SC_JOB_CONTROL 6 #define _SC_SAVED_IDS 7 #define _SC_VERSION 8 +#endif #endif /* _SPARC64_UNISTD_H */ diff --git a/include/asm-sparc64/vaddrs.h b/include/asm-sparc64/vaddrs.h index cd82abb06..b88085668 100644 --- a/include/asm-sparc64/vaddrs.h +++ b/include/asm-sparc64/vaddrs.h @@ -1,4 +1,4 @@ -/* $Id: vaddrs.h,v 1.6 1997/04/04 00:50:31 davem Exp $ */ +/* $Id: vaddrs.h,v 1.8 1997/06/27 14:55:13 jj Exp $ */ #ifndef _SPARC64_VADDRS_H #define _SPARC64_VADDRS_H @@ -14,12 +14,15 @@ * mappings for devices and is the speedup improvements of not loading * a pointer and then the value in the assembly code */ -#define IOBASE_VADDR 0xfffffd0000000000ULL /* Base for mapping pages */ -#define IOBASE_LEN 0x0000008000000000ULL /* Length of the IO area */ -#define IOBASE_END 0xfffffd8000000000ULL -#define DVMA_VADDR 0xfffffd8000000000ULL /* Base area of the DVMA on suns */ -#define DVMA_LEN 0x0000004000000000ULL /* Size of the DVMA address space */ -#define DVMA_END 0xfffffdc000000000ULL +#define IOBASE_VADDR 0x0000006000000000ULL /* Base for mapping pages */ +#define IOBASE_LEN 0x0000001000000000ULL /* Length of the IO area */ +#define IOBASE_END 0x0000007000000000ULL +#define DVMA_VADDR 0x0000007000000000ULL /* Base area of the DVMA on suns */ +#define DVMA_LEN 0x0000001000000000ULL /* Size of the DVMA address space */ +#define DVMA_END 0x0000008000000000ULL +#define MODULES_VADDR 0x0000000001000000ULL /* Where to map modules */ +#define MODULES_LEN 0x000000007f000000ULL +#define MODULES_END 0x0000000080000000ULL #endif /* !(_SPARC_VADDRS_H) */ -- cgit v1.2.3