diff options
author | Ralf Baechle <ralf@linux-mips.org> | 1997-06-17 13:20:30 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 1997-06-17 13:20:30 +0000 |
commit | 7acb77a6e7bddd4c4c5aa975bbf976927c013798 (patch) | |
tree | 4139829ec6edb85f73774bb95cdec376758bfc73 /include/asm-sparc64 | |
parent | 64d58d4c8cd6a89ee218301ec0dc0ebfec91a4db (diff) |
Merge with 2.1.43.
Diffstat (limited to 'include/asm-sparc64')
-rw-r--r-- | include/asm-sparc64/bitops.h | 74 | ||||
-rw-r--r-- | include/asm-sparc64/byteorder.h | 85 | ||||
-rw-r--r-- | include/asm-sparc64/checksum.h | 76 | ||||
-rw-r--r-- | include/asm-sparc64/fpumacro.h | 43 | ||||
-rw-r--r-- | include/asm-sparc64/head.h | 35 | ||||
-rw-r--r-- | include/asm-sparc64/namei.h | 73 | ||||
-rw-r--r-- | include/asm-sparc64/pgtable.h | 68 | ||||
-rw-r--r-- | include/asm-sparc64/psrcompat.h | 4 | ||||
-rw-r--r-- | include/asm-sparc64/pstate.h | 30 | ||||
-rw-r--r-- | include/asm-sparc64/system.h | 36 | ||||
-rw-r--r-- | include/asm-sparc64/uaccess.h | 30 | ||||
-rw-r--r-- | include/asm-sparc64/vuid_event.h | 2 |
12 files changed, 332 insertions, 224 deletions
diff --git a/include/asm-sparc64/bitops.h b/include/asm-sparc64/bitops.h index b76772016..5060d88ae 100644 --- a/include/asm-sparc64/bitops.h +++ b/include/asm-sparc64/bitops.h @@ -1,4 +1,4 @@ -/* $Id: bitops.h,v 1.13 1997/05/27 06:47:16 davem Exp $ +/* $Id: bitops.h,v 1.16 1997/05/28 13:48:56 jj Exp $ * bitops.h: Bit string operations on the V9. * * Copyright 1996 David S. Miller (davem@caip.rutgers.edu) @@ -121,11 +121,33 @@ extern __inline__ unsigned long ffz(unsigned long word) : "0" (word) : "g1", "g2"); #else +#ifdef EASY_CHEESE_VERSION result = 0; while(word & 1) { result++; word >>= 1; } +#else + unsigned long tmp; + + result = 0; + tmp = ~word & -~word; + if (!(unsigned)tmp) { + tmp >>= 32; + result = 32; + } + if (!(unsigned short)tmp) { + tmp >>= 16; + result += 16; + } + if (!(unsigned char)tmp) { + tmp >>= 8; + result += 8; + } + if (tmp & 0xf0) result += 4; + if (tmp & 0xcc) result += 2; + if (tmp & 0xaa) result ++; +#endif #endif return result; } @@ -137,29 +159,31 @@ extern __inline__ unsigned long ffz(unsigned long word) extern __inline__ unsigned long find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) { - unsigned long *p = ((unsigned long *) addr) + (offset >> 5); - unsigned long result = offset & ~31UL; + unsigned long *p = ((unsigned long *) addr) + (offset >> 6); + unsigned long result = offset & ~63UL; unsigned long tmp; if (offset >= size) return size; size -= result; - offset &= 31UL; + offset &= 63UL; if (offset) { tmp = *(p++); - tmp |= ~0UL >> (32-offset); - if (size < 32) + tmp |= ~0UL >> (64-offset); + if (size < 64) goto found_first; if (~tmp) goto found_middle; - size -= 32; - result += 32; + size -= 64; + result += 64; } - while (size & ~31UL) { + offset = size >> 6; + size &= 63UL; + while (offset) { if (~(tmp = *(p++))) goto found_middle; - result += 32; - size -= 32; + result += 64; + offset--; } if (!size) return result; @@ -248,9 +272,16 @@ extern __inline__ unsigned long __swab64(unsigned long value) ((value<<56) & 0xff00000000000000)); } +extern __inline__ unsigned long __swab64p(unsigned long *addr) +{ + unsigned long ret; + __asm__ __volatile__ ("ldxa [%1] %2, %0" : "=r" (ret) : "r" (addr), "i" (ASI_PL)); + return ret; +} + extern __inline__ unsigned long find_next_zero_le_bit(void *addr, unsigned long size, unsigned long offset) { - unsigned long *p = ((unsigned long *) addr) + (offset >> 5); + unsigned long *p = ((unsigned long *) addr) + (offset >> 6); unsigned long result = offset & ~63UL; unsigned long tmp; @@ -259,8 +290,8 @@ extern __inline__ unsigned long find_next_zero_le_bit(void *addr, unsigned long size -= result; offset &= 63UL; if(offset) { - tmp = *(p++); - tmp |= __swab64((~0UL >> (64-offset))); + tmp = __swab64p(p++); + tmp |= (~0UL >> (64-offset)); if(size < 64) goto found_first; if(~tmp) @@ -268,20 +299,21 @@ extern __inline__ unsigned long find_next_zero_le_bit(void *addr, unsigned long size -= 64; result += 64; } - while(size & ~63UL) { - if(~(tmp = *(p++))) + offset = size >> 6; + size &= 63UL; + while(offset) { + if(~(tmp = __swab64p(p++))) goto found_middle; result += 64; - size -= 64; + offset--; } if(!size) return result; - tmp = *p; - + tmp = __swab64p(p); found_first: - return result + ffz(__swab64(tmp) | (~0UL << size)); + tmp |= (~0UL << size); found_middle: - return result + ffz(__swab64(tmp)); + return result + ffz(tmp); } #ifdef __KERNEL__ diff --git a/include/asm-sparc64/byteorder.h b/include/asm-sparc64/byteorder.h index 21f4b0ba0..2325ef29c 100644 --- a/include/asm-sparc64/byteorder.h +++ b/include/asm-sparc64/byteorder.h @@ -1,7 +1,9 @@ -/* $Id: byteorder.h,v 1.4 1997/05/26 23:37:47 davem Exp $ */ +/* $Id: byteorder.h,v 1.5 1997/05/28 11:35:41 jj Exp $ */ #ifndef _SPARC64_BYTEORDER_H #define _SPARC64_BYTEORDER_H +#include <asm/asi.h> + #define ntohl(x) ((unsigned long int)(x)) #define ntohs(x) ((unsigned short int)(x)) #define htonl(x) ((unsigned long int)(x)) @@ -34,22 +36,87 @@ extern __inline__ __u32 cpu_to_le32(__u32 value) return((value>>24) | ((value>>8)&0xff00) | ((value<<8)&0xff0000) | (value<<24)); } + +extern __inline__ __u64 cpu_to_le64(__u64 value) +{ + return (((value>>56) & 0x00000000000000ffUL) | + ((value>>40) & 0x000000000000ff00UL) | + ((value>>24) & 0x0000000000ff0000UL) | + ((value>>8) & 0x00000000ff000000UL) | + ((value<<8) & 0x000000ff00000000UL) | + ((value<<24) & 0x0000ff0000000000UL) | + ((value<<40) & 0x00ff000000000000UL) | + ((value<<56) & 0xff00000000000000UL)); +} #define cpu_to_be16(x) (x) #define cpu_to_be32(x) (x) +#define cpu_to_be64(x) (x) -/* Convert from specified byte order, to CPU byte order. */ -extern __inline__ __u16 le16_to_cpu(__u16 value) +/* The same, but returns converted value from the location pointer by addr. */ +extern __inline__ __u16 cpu_to_le16p(__u16 *addr) { - return (value >> 8) | (value << 8); + __u16 ret; + __asm__ __volatile__ ("lduha [%1] %2, %0" : "=r" (ret) : "r" (addr), "i" (ASI_PL)); + return ret; } -extern __inline__ __u32 le32_to_cpu(__u32 value) +extern __inline__ __u32 cpu_to_le32p(__u32 *addr) { - return((value>>24) | ((value>>8)&0xff00) | - ((value<<8)&0xff0000) | (value<<24)); + __u32 ret; + __asm__ __volatile__ ("lduwa [%1] %2, %0" : "=r" (ret) : "r" (addr), "i" (ASI_PL)); + return ret; +} + +extern __inline__ __u64 cpu_to_le64p(__u64 *addr) +{ + __u64 ret; + __asm__ __volatile__ ("ldxa [%1] %2, %0" : "=r" (ret) : "r" (addr), "i" (ASI_PL)); + return ret; +} +extern __inline__ __u16 cpu_to_be16p(__u16 *addr) { return *addr; } +extern __inline__ __u32 cpu_to_be32p(__u32 *addr) { return *addr; } +extern __inline__ __u64 cpu_to_be64p(__u64 *addr) { return *addr; } + +/* The same, but do the conversion in situ, ie. put the value back to addr. */ +extern __inline__ void cpu_to_le16s(__u16 *addr) +{ + *addr = cpu_to_le16p(addr); +} + +extern __inline__ void cpu_to_le32s(__u32 *addr) +{ + *addr = cpu_to_le32p(addr); +} + +extern __inline__ void cpu_to_le64s(__u64 *addr) +{ + *addr = cpu_to_le64p(addr); } -#define be16_to_cpu(x) (x) -#define be32_to_cpu(x) (x) +#define cpu_to_be16s(x) do { } while (0) +#define cpu_to_be32s(x) do { } while (0) +#define cpu_to_be64s(x) do { } while (0) + +/* Convert from specified byte order, to CPU byte order. */ +#define le16_to_cpu(x) cpu_to_le16(x) +#define le32_to_cpu(x) cpu_to_le32(x) +#define le64_to_cpu(x) cpu_to_le64(x) +#define be16_to_cpu(x) cpu_to_be16(x) +#define be32_to_cpu(x) cpu_to_be32(x) +#define be64_to_cpu(x) cpu_to_be64(x) + +#define le16_to_cpup(x) cpu_to_le16p(x) +#define le32_to_cpup(x) cpu_to_le32p(x) +#define le64_to_cpup(x) cpu_to_le64p(x) +#define be16_to_cpup(x) cpu_to_be16p(x) +#define be32_to_cpup(x) cpu_to_be32p(x) +#define be64_to_cpup(x) cpu_to_be64p(x) + +#define le16_to_cpus(x) cpu_to_le16s(x) +#define le32_to_cpus(x) cpu_to_le32s(x) +#define le64_to_cpus(x) cpu_to_le64s(x) +#define be16_to_cpus(x) cpu_to_be16s(x) +#define be32_to_cpus(x) cpu_to_be32s(x) +#define be64_to_cpus(x) cpu_to_be64s(x) #endif /* __KERNEL__ */ diff --git a/include/asm-sparc64/checksum.h b/include/asm-sparc64/checksum.h index 63dbfec3d..d04abac7e 100644 --- a/include/asm-sparc64/checksum.h +++ b/include/asm-sparc64/checksum.h @@ -1,4 +1,4 @@ -/* $Id: checksum.h,v 1.7 1997/05/14 07:02:44 davem Exp $ */ +/* $Id: checksum.h,v 1.8 1997/05/29 12:45:03 jj Exp $ */ #ifndef __SPARC64_CHECKSUM_H #define __SPARC64_CHECKSUM_H @@ -41,7 +41,7 @@ extern unsigned int csum_partial(unsigned char * buff, int len, unsigned int sum #define csum_partial_copy(src, dst, len, sum) \ csum_partial_copy_nocheck(src,dst,len,sum) #define csum_partial_copy_fromuser(s, d, l, w) \ - csum_partial_copy((char *) (s), (d), (l), (w)) + csum_partial_copy_from_user((char *) (s), (d), (l), (w), NULL) extern __inline__ unsigned int csum_partial_copy_nocheck (const char *src, char *dst, int len, @@ -50,12 +50,13 @@ csum_partial_copy_nocheck (const char *src, char *dst, int len, register unsigned long ret asm("o0") = (unsigned long)src; register char *d asm("o1") = dst; register unsigned long l asm("g1") = len; - + __asm__ __volatile__ (" + wr %%g0, %5, %%asi call __csum_partial_copy_sparc_generic mov %4, %%g7 srl %%o0, 0, %%o0 - " : "=r" (ret) : "0" (ret), "r" (d), "r" (l), "r" (sum) : + " : "=r" (ret) : "0" (ret), "r" (d), "r" (l), "r" (sum), "i" (ASI_P) : "o1", "o2", "o3", "o4", "o5", "o7", "g1", "g2", "g3", "g5", "g7"); return (unsigned int)ret; } @@ -64,58 +65,35 @@ extern __inline__ unsigned int csum_partial_copy_from_user(const char *src, char *dst, int len, unsigned int sum, int *err) { - if (!access_ok (VERIFY_READ, src, len)) { - *err = -EFAULT; - memset (dst, 0, len); - return sum; - } else { - register unsigned long ret asm("o0") = (unsigned long)src; - register char *d asm("o1") = dst; - register unsigned long l asm("g1") = len; - register unsigned long s asm("g7") = sum; - - __asm__ __volatile__ (" - .section __ex_table,#alloc - .align 4 - .word 1f,2 - .previous + register unsigned long ret asm("o0") = (unsigned long)src; + register char *d asm("o1") = dst; + register unsigned long l asm("g1") = len; + register unsigned long s asm("g7") = sum; + + __asm__ __volatile__ (" + .section __ex_table,#alloc + .align 8 + .xword 1f,2 + .previous + wr %%g0, %6, %%asi 1: - call __csum_partial_copy_sparc_generic - stx %5, [%%sp + 0x7ff + 128] - srl %%o0, 0, %%o0 - " : "=r" (ret) : "0" (ret), "r" (d), "r" (l), "r" (s), "r" (err) : - "o1", "o2", "o3", "o4", "o5", "o7", "g1", "g2", "g3", "g5", "g7"); - return (unsigned int)ret; - } + call __csum_partial_copy_sparc_generic + stx %5, [%%sp + 0x7ff + 128] + srl %%o0, 0, %%o0 + " : "=r" (ret) : "0" (ret), "r" (d), "r" (l), "r" (s), "r" (err), "i" (ASI_S) : + "o1", "o2", "o3", "o4", "o5", "o7", "g1", "g2", "g3", "g5", "g7"); + return (unsigned int)ret; } - + +#if 0 +/* Not implemented, but nobody uses it yet... */ extern __inline__ unsigned int csum_partial_copy_to_user(const char *src, char *dst, int len, unsigned int sum, int *err) { - if (!access_ok (VERIFY_WRITE, dst, len)) { - *err = -EFAULT; - return sum; - } else { - register unsigned long ret asm("o0") = (unsigned long)src; - register char *d asm("o1") = dst; - register unsigned long l asm("g1") = len; - register unsigned long s asm("g7") = sum; - - __asm__ __volatile__ (" - .section __ex_table,#alloc - .align 4 - .word 1f,1 - .previous -1: - call __csum_partial_copy_sparc_generic - stx %5, [%%sp + 0x7ff + 128] - srl %%o0, 0, %%o0 - " : "=r" (ret) : "0" (ret), "r" (d), "r" (l), "r" (s), "r" (err) : - "o1", "o2", "o3", "o4", "o5", "o7", "g1", "g2", "g3", "g5", "g7"); - return (unsigned int)ret; - } + return 0; } +#endif /* ihl is always 5 or greater, almost always is 5, and iph is word aligned * the majority of the time. diff --git a/include/asm-sparc64/fpumacro.h b/include/asm-sparc64/fpumacro.h index 9928a38c2..f6323254d 100644 --- a/include/asm-sparc64/fpumacro.h +++ b/include/asm-sparc64/fpumacro.h @@ -1,12 +1,27 @@ /* fpumacro.h: FPU related macros. * * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) + * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) */ #ifndef _SPARC64_FPUMACRO_H #define _SPARC64_FPUMACRO_H -extern __inline__ void fpsave32(unsigned long *fpregs, unsigned long *fsr) +extern __inline__ unsigned long fprs_read(void) +{ + unsigned long retval; + + __asm__ __volatile__("rd %%fprs, %0" : "=r" (retval)); + + return retval; +} + +extern __inline__ void fprs_write(unsigned long val) +{ + __asm__ __volatile__("wr %0, 0x0, %%fprs" : : "r" (val)); +} + +extern __inline__ void fpsave32(unsigned int *fpregs, unsigned long *fsr) { __asm__ __volatile__ (" wr %%g0, %2, %%asi @@ -16,7 +31,7 @@ extern __inline__ void fpsave32(unsigned long *fpregs, unsigned long *fsr) " : : "r" (fpregs), "r" (fsr), "i" (ASI_BLK_P)); } -extern __inline__ void fpload32(unsigned long *fpregs, unsigned long *fsr) +extern __inline__ void fpload32(unsigned int *fpregs, unsigned long *fsr) { __asm__ __volatile__ (" wr %%g0, %2, %%asi @@ -26,7 +41,27 @@ extern __inline__ void fpload32(unsigned long *fpregs, unsigned long *fsr) " : : "r" (fpregs), "r" (fsr), "i" (ASI_BLK_P)); } -extern __inline__ void fpsave(unsigned long *fpregs, unsigned long *fsr) +extern __inline__ void fpsave64hi(unsigned int *fpregs, unsigned long *fsr) +{ + __asm__ __volatile__ (" + wr %%g0, %2, %%asi + stx %%fsr, [%1] + stda %%f32, [%0 + 128] %%asi + stda %%f48, [%0 + 192] %%asi + " : : "r" (fpregs), "r" (fsr), "i" (ASI_BLK_P)); +} + +extern __inline__ void fpload64hi(unsigned int *fpregs, unsigned long *fsr) +{ + __asm__ __volatile__ (" + wr %%g0, %2, %%asi + ldda [%0 + 128] %%asi, %%f32 + ldda [%0 + 192] %%asi, %%f48 + ldx [%1], %%fsr + " : : "r" (fpregs), "r" (fsr), "i" (ASI_BLK_P)); +} + +extern __inline__ void fpsave(unsigned int *fpregs, unsigned long *fsr) { __asm__ __volatile__ (" wr %%g0, %2, %%asi @@ -38,7 +73,7 @@ extern __inline__ void fpsave(unsigned long *fpregs, unsigned long *fsr) " : : "r" (fpregs), "r" (fsr), "i" (ASI_BLK_P)); } -extern __inline__ void fpload(unsigned long *fpregs, unsigned long *fsr) +extern __inline__ void fpload(unsigned int *fpregs, unsigned long *fsr) { __asm__ __volatile__ (" wr %%g0, %2, %%asi diff --git a/include/asm-sparc64/head.h b/include/asm-sparc64/head.h index 7127ca74c..62fe9a08f 100644 --- a/include/asm-sparc64/head.h +++ b/include/asm-sparc64/head.h @@ -1,4 +1,4 @@ -/* $Id: head.h,v 1.21 1997/05/27 06:28:17 davem Exp $ */ +/* $Id: head.h,v 1.22 1997/06/02 06:33:40 davem Exp $ */ #ifndef _SPARC64_HEAD_H #define _SPARC64_HEAD_H @@ -9,12 +9,13 @@ /* We need a "cleaned" instruction... */ #define CLEAN_WINDOW \ + rdpr %cleanwin, %l0; add %l0, 1, %l0; \ + wrpr %l0, 0x0, %cleanwin; \ clr %o0; clr %o1; clr %o2; clr %o3; \ clr %o4; clr %o5; clr %o6; clr %o7; \ clr %l0; clr %l1; clr %l2; clr %l3; \ clr %l4; clr %l5; clr %l6; clr %l7; \ - rdpr %cleanwin, %g1; add %g1, 1, %g1; \ - wrpr %g1, 0x0, %cleanwin; retry; \ + retry; \ nop;nop;nop;nop;nop;nop;nop;nop;nop;nop;nop;nop; #define TRAP(routine) \ @@ -23,7 +24,7 @@ call routine; \ add %sp, STACK_BIAS + REGWIN_SZ, %o0; \ ba,pt %xcc, rtrap; \ - nop; \ + clr %l6; \ nop; \ nop; @@ -38,7 +39,7 @@ call routine; \ add %sp, STACK_BIAS + REGWIN_SZ, %o0; \ ba,pt %xcc, rtrap; \ - nop; \ + clr %l6; \ nop; \ nop; @@ -60,7 +61,7 @@ call routine; \ mov arg, %o1; \ ba,pt %xcc, rtrap; \ - nop; \ + clr %l6; \ nop; #define TRAPTL1_ARG(routine, arg) \ @@ -70,7 +71,7 @@ call routine; \ mov arg, %o1; \ ba,pt %xcc, rtrap; \ - nop; \ + clr %l6; \ nop; #define SYSCALL_TRAP(routine, systbl) \ @@ -89,7 +90,7 @@ call routine; \ add %sp, STACK_BIAS + REGWIN_SZ, %o0; \ ba,pt %xcc, rtrap; \ - nop; + clr %l6; #define ACCESS_EXCEPTION_TRAPTL1(routine) \ rdpr %pstate, %g1; \ @@ -99,7 +100,7 @@ call routine; \ add %sp, STACK_BIAS + REGWIN_SZ, %o0; \ ba,pt %xcc, rtrap; \ - nop; + clr %l6; #define SUNOS_SYSCALL_TRAP SYSCALL_TRAP(linux_sparc_syscall, sunos_sys_table) #define LINUX_32BIT_SYSCALL_TRAP SYSCALL_TRAP(linux_sparc_syscall, sys_call_table32) @@ -120,7 +121,7 @@ mov level, %o0; \ call routine; \ add %sp, STACK_BIAS + REGWIN_SZ, %o1; \ - ba,a,pt %xcc, rtrap; + ba,a,pt %xcc, rtrap_clr_l6; /* On UP this is ok, and worth the effort, for SMP we need * a different mechanism and thus cannot do it all in trap table. -DaveM @@ -150,7 +151,7 @@ ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TNPC], %l1; \ add %l1, 4, %l2; \ stx %l1, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TPC]; \ - ba,pt %xcc, rtrap; \ + ba,pt %xcc, rtrap_clr_l6; \ stx %l2, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TNPC]; /* Before touching these macros, you owe it to yourself to go and @@ -198,7 +199,8 @@ stxa %i6, [%sp + STACK_BIAS + 0x70] %asi; \ stxa %i7, [%sp + STACK_BIAS + 0x78] %asi; \ saved; retry; nop; nop; nop; nop; nop; nop; \ - nop; nop; nop; nop; nop; nop; \ + nop; nop; nop; nop; nop; \ + b,a,pt %xcc, spill_fixup_mna; \ b,a,pt %xcc, spill_fixup; /* Normal 32bit spill */ @@ -215,7 +217,8 @@ stda %i6, [%sp + 0x38] %asi; \ saved; retry; nop; nop; nop; nop; \ nop; nop; nop; nop; nop; nop; nop; nop; \ - nop; nop; nop; nop; nop; nop; nop; \ + nop; nop; nop; nop; nop; nop; \ + b,a,pt %xcc, spill_fixup_mna; \ b,a,pt %xcc, spill_fixup; #define SPILL_1_NORMAL SPILL_1_GENERIC(ASI_AIUP) @@ -276,7 +279,8 @@ ldxa [%sp + STACK_BIAS + 0x70] %asi, %i6; \ ldxa [%sp + STACK_BIAS + 0x78] %asi, %i7; \ restored; retry; nop; nop; nop; nop; nop; nop; \ - nop; nop; nop; nop; nop; nop; \ + nop; nop; nop; nop; nop; \ + b,a,pt %xcc, fill_fixup_mna; \ b,a,pt %xcc, fill_fixup; /* Normal 32bit fill */ @@ -293,7 +297,8 @@ ldda [%sp + 0x38] %asi, %i6; \ restored; retry; nop; nop; nop; nop; \ nop; nop; nop; nop; nop; nop; nop; nop; \ - nop; nop; nop; nop; nop; nop; nop; \ + nop; nop; nop; nop; nop; nop; \ + b,a,pt %xcc, fill_fixup_mna; \ b,a,pt %xcc, fill_fixup; #define FILL_1_NORMAL FILL_1_GENERIC(ASI_AIUP) diff --git a/include/asm-sparc64/namei.h b/include/asm-sparc64/namei.h index f8fdbb533..af5afb721 100644 --- a/include/asm-sparc64/namei.h +++ b/include/asm-sparc64/namei.h @@ -1,4 +1,4 @@ -/* $Id: namei.h,v 1.2 1997/03/19 17:28:27 jj Exp $ +/* $Id: namei.h,v 1.4 1997/06/07 08:32:56 ecd Exp $ * linux/include/asm-sparc64/namei.h * * Routines to handle famous /usr/gnemul/s*. @@ -11,44 +11,37 @@ #define SPARC_BSD_EMUL "usr/gnemul/sunos/" #define SPARC_SOL_EMUL "usr/gnemul/solaris/" -#define translate_namei(pathname, base, follow_links, res_inode) ({ \ - if ((current->personality & (PER_BSD|PER_SVR4)) && !base && *pathname == '/') { \ - struct inode *emul_ino; \ - int namelen; \ - const char *name; \ - \ - while (*pathname == '/') \ - pathname++; \ - current->fs->root->i_count++; \ - if (dir_namei (current->personality & PER_BSD ? SPARC_BSD_EMUL : SPARC_SOL_EMUL, \ - &namelen, &name, current->fs->root, &emul_ino) >= 0 && emul_ino) { \ - *res_inode = NULL; \ - if (_namei (pathname, emul_ino, follow_links, res_inode) >= 0 && *res_inode) \ - return 0; \ - } \ - base = current->fs->root; \ - base->i_count++; \ - } \ -}) - -#define translate_open_namei(pathname, flag, mode, res_inode, base) ({ \ - if ((current->personality & (PER_BSD|PER_SVR4)) && !base && *pathname == '/') { \ - struct inode *emul_ino; \ - int namelen; \ - const char *name; \ - \ - while (*pathname == '/') \ - pathname++; \ - current->fs->root->i_count++; \ - if (dir_namei (current->personality & PER_BSD ? SPARC_BSD_EMUL : SPARC_SOL_EMUL, \ - &namelen, &name, current->fs->root, &emul_ino) >= 0 && emul_ino) { \ - *res_inode = NULL; \ - if (open_namei (pathname, flag, mode, res_inode, emul_ino) >= 0 && *res_inode) \ - return 0; \ - } \ - base = current->fs->root; \ - base->i_count++; \ - } \ -}) +extern int __namei(int, const char *, struct inode *, char *, struct inode **, + struct inode **, struct qstr *, struct dentry **, int *); + +static inline int +__prefix_namei(int retrieve_mode, const char * name, struct inode * base, + char * buf, struct inode ** res_dir, struct inode ** res_inode, + struct qstr * last_name, struct dentry ** last_entry, + int * last_error) +{ + int error; + + if (!(current->personality & (PER_BSD|PER_SVR4))) + return -ENOENT; + + while (*name == '/') + name++; + + atomic_inc(¤t->fs->root->i_count); + error = __namei(NAM_FOLLOW_LINK, + current->personality & PER_BSD ? + SPARC_BSD_EMUL : SPARC_SOL_EMUL, current->fs->root, + buf, NULL, &base, NULL, NULL, NULL); + if (error) + return error; + + error = __namei(retrieve_mode, name, base, buf, res_dir, res_inode, + last_name, last_entry, last_error); + if (error) + return error; + + return 0; +} #endif /* __SPARC64_NAMEI_H */ diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index a739cea5e..e56a4024d 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -1,4 +1,4 @@ -/* $Id: pgtable.h,v 1.32 1997/05/26 23:39:20 davem Exp $ +/* $Id: pgtable.h,v 1.34 1997/06/02 06:33:41 davem Exp $ * pgtable.h: SpitFire page table operations. * * Copyright 1996,1997 David S. Miller (davem@caip.rutgers.edu) @@ -160,45 +160,6 @@ extern void *sparc_init_alloc(unsigned long *kbrk, unsigned long size); /* Cache and TLB flush operations. */ -/* This is a bit tricky to do most efficiently. The I-CACHE on the - * SpitFire will snoop stores from _other_ processors and changes done - * by DMA, but it does _not_ snoop stores on the local processor. - * Also, even if the I-CACHE snoops the store from someone else correctly, - * you can still lose if the instructions are in the pipeline already. - * A big issue is that this cache is only 16K in size, using a pseudo - * 2-set associative scheme. A full flush of the cache is far too much - * for me to accept, especially since most of the time when we get to - * running this code the icache data we want to flush is not even in - * the cache. Thus the following seems to be the best method. - */ -extern __inline__ void spitfire_flush_icache_page(unsigned long page) -{ - unsigned long temp; - - /* Commit all potential local stores to the instruction space - * on this processor before the flush. - */ - membar("#StoreStore"); - - /* Actually perform the flush. */ - __asm__ __volatile__(" -1: - flush %0 + 0x00 - flush %0 + 0x08 - flush %0 + 0x10 - flush %0 + 0x18 - flush %0 + 0x20 - flush %0 + 0x28 - flush %0 + 0x30 - flush %0 + 0x38 - subcc %1, 0x40, %1 - bge,pt %%icc, 1b - add %2, %1, %0 -" : "=&r" (page), "=&r" (temp) - : "r" (page), "0" (page + PAGE_SIZE - 0x40), "1" (PAGE_SIZE - 0x40) - : "cc"); -} - extern __inline__ void flush_cache_all(void) { unsigned long addr; @@ -283,13 +244,14 @@ extern __inline__ void flush_tlb_mm(struct mm_struct *mm) 1: stxa %%g0, [%%g3] %3 stxa %%g0, [%%g3] %4 - bne,a,pn %%icc, 1f - stxa %%g2, [%%g7] %2 + be,a,pt %%icc, 1f + nop + stxa %%g2, [%%g7] %2 1: flush %%g4 wrpr %%g1, 0x0, %%pil " : /* no outputs */ - : "r" (mm->context), "i" (SECONDARY_CONTEXT), "i" (ASI_DMMU), + : "r" (mm->context & 0x1fff), "i" (SECONDARY_CONTEXT), "i" (ASI_DMMU), "i" (ASI_DMMU_DEMAP), "i" (ASI_IMMU_DEMAP) : "g1", "g2", "g3", "g7", "cc"); } @@ -300,7 +262,7 @@ extern __inline__ void flush_tlb_range(struct mm_struct *mm, unsigned long start { if(mm->context != NO_CONTEXT) { unsigned long old_ctx = spitfire_get_secondary_context(); - unsigned long new_ctx = mm->context; + unsigned long new_ctx = (mm->context & 0x1fff); unsigned long flags; start &= PAGE_MASK; @@ -332,22 +294,20 @@ extern __inline__ void flush_tlb_page(struct vm_area_struct *vma, unsigned long ldxa [%%g7] %2, %%g2 cmp %%g2, %0 be,pt %%icc, 1f - or %5, 0x10, %5 + or %5, 0x10, %%g3 stxa %0, [%%g7] %2 1: - stxa %%g0, [%5] %3 - brnz,a %6, 1f - stxa %%g0, [%5] %4 -1: - bne,a,pn %%icc, 1f - stxa %%g2, [%%g7] %2 + stxa %%g0, [%%g3] %3 + stxa %%g0, [%%g3] %4 + be,a,pt %%icc, 1f + nop + stxa %%g2, [%%g7] %2 1: flush %%g4 wrpr %%g1, 0x0, %%pil " : /* no outputs */ - : "r" (mm->context), "i" (SECONDARY_CONTEXT), "i" (ASI_DMMU), - "i" (ASI_DMMU_DEMAP), "i" (ASI_IMMU_DEMAP), "r" (page & PAGE_MASK), - "r" (vma->vm_flags & VM_EXEC) + : "r" (mm->context & 0x1fff), "i" (SECONDARY_CONTEXT), "i" (ASI_DMMU), + "i" (ASI_DMMU_DEMAP), "i" (ASI_IMMU_DEMAP), "r" (page & PAGE_MASK) : "g1", "g2", "g3", "g7", "cc"); } } diff --git a/include/asm-sparc64/psrcompat.h b/include/asm-sparc64/psrcompat.h index dccc4f69a..b971514d6 100644 --- a/include/asm-sparc64/psrcompat.h +++ b/include/asm-sparc64/psrcompat.h @@ -1,4 +1,4 @@ -/* $Id: psrcompat.h,v 1.2 1997/04/07 18:57:17 jj Exp $ */ +/* $Id: psrcompat.h,v 1.3 1997/06/05 06:22:54 davem Exp $ */ #ifndef _SPARC64_PSRCOMPAT_H #define _SPARC64_PSRCOMPAT_H @@ -47,7 +47,7 @@ extern inline unsigned long psr_to_tstate_icc(unsigned int psr) { unsigned long tstate; - tstate = (psr & PSR_ICC) << 12; + tstate = ((unsigned long)(psr & PSR_ICC)) << 12; return tstate; } diff --git a/include/asm-sparc64/pstate.h b/include/asm-sparc64/pstate.h index 490d837a8..2233ee7f0 100644 --- a/include/asm-sparc64/pstate.h +++ b/include/asm-sparc64/pstate.h @@ -1,4 +1,4 @@ -/* $Id: pstate.h,v 1.3 1997/03/25 03:58:31 davem Exp $ */ +/* $Id: pstate.h,v 1.4 1997/05/29 12:45:02 jj Exp $ */ #ifndef _SPARC64_PSTATE_H #define _SPARC64_PSTATE_H @@ -79,4 +79,32 @@ #define VERS_MAXTL 0x000000000000ff00 /* Maximum Trap Level. */ #define VERS_MAXWIN 0x000000000000001f /* Maximum Reg Window Index. */ +#if defined(__KERNEL__) && !defined(__ASSEMBLY__) +#define set_pstate(bits) \ + __asm__ __volatile__( \ + "rdpr %%pstate, %%g1\n\t" \ + "or %%g1, %0, %%g1\n\t" \ + "wrpr %%g1, 0x0, %%pstate\n\t" \ + : /* no outputs */ \ + : "i" (bits) \ + : "g1") + +#define clear_pstate(bits) \ + __asm__ __volatile__( \ + "rdpr %%pstate, %%g1\n\t" \ + "andn %%g1, %0, %%g1\n\t" \ + "wrpr %%g1, 0x0, %%pstate\n\t" \ + : /* no outputs */ \ + : "i" (bits) \ + : "g1") + +#define change_pstate(bits) \ + __asm__ __volatile__( \ + "rdpr %%pstate, %%g1\n\t" \ + "wrpr %%g1, %0, %%pstate\n\t" \ + : /* no outputs */ \ + : "i" (bits) \ + : "g1") +#endif + #endif /* !(_SPARC64_PSTATE_H) */ diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h index 786cfd2af..d0d88fa5c 100644 --- a/include/asm-sparc64/system.h +++ b/include/asm-sparc64/system.h @@ -1,4 +1,4 @@ -/* $Id: system.h,v 1.19 1997/05/18 22:52:32 davem Exp $ */ +/* $Id: system.h,v 1.22 1997/06/01 10:27:28 davem Exp $ */ #ifndef __SPARC64_SYSTEM_H #define __SPARC64_SYSTEM_H @@ -114,16 +114,24 @@ extern __inline__ void flushw_user(void) #define flush_user_windows flushw_user #ifdef __SMP__ -#error SMP not supported on sparc64 + +#include <asm/fpumacro.h> + +#define SWITCH_ENTER(prev) \ + if((prev)->flags & PF_USEDFPU) { \ + fprs_write(FPRS_FEF); \ + fpsave((unsigned long *) &(prev)->tss.float_regs[0], \ + &(prev)->tss.fsr); \ + (prev)->flags &= ~PF_USEDFPU; \ + (prev)->tss.kregs->tstate &= ~TSTATE_PEF; \ + } + +#define SWITCH_DO_LAZY_FPU(next) #else -#if 0 +#define SWITCH_ENTER(prev) #define SWITCH_DO_LAZY_FPU(next) \ if(last_task_used_math != (next)) \ - (next)->tss.kregs->tstate&=~TSTATE_PEF -#else -/* XXX FIX ME BIG TIME XXX -DaveM */ -#define SWITCH_DO_LAZY_FPU(next) do { } while(0) -#endif + (next)->tss.kregs->tstate &= ~TSTATE_PEF #endif /* See what happens when you design the chip correctly? @@ -138,29 +146,33 @@ extern __inline__ void flushw_user(void) do { \ __label__ switch_continue; \ register unsigned long task_pc asm("o7"); \ + SWITCH_ENTER(prev) \ SWITCH_DO_LAZY_FPU(next); \ task_pc = ((unsigned long) &&switch_continue) - 0x8; \ __asm__ __volatile__( \ + "rdpr %%pstate, %%g2\n\t" \ + "wrpr %%g2, 0x2, %%pstate\n\t" \ "flushw\n\t" \ "stx %%i6, [%%sp + 2047 + 0x70]\n\t" \ "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \ - "stx %%o6, [%%g6 + %3]\n\t" \ "rdpr %%wstate, %%o5\n\t" \ - "stx %%o7, [%%g6 + %4]\n\t" \ + "stx %%o6, [%%g6 + %3]\n\t" \ "stx %%o5, [%%g6 + %2]\n\t" \ "rdpr %%cwp, %%o5\n\t" \ + "stx %%o7, [%%g6 + %4]\n\t" \ "stx %%o5, [%%g6 + %5]\n\t" \ "mov %0, %%g6\n\t" \ + "ldx [%0 + %5], %%g1\n\t" \ "wr %0, 0x0, %%pic\n\t" \ - "ldx [%%g6 + %5], %%g1\n\t" \ "wrpr %%g1, %%cwp\n\t" \ "ldx [%%g6 + %2], %%o5\n\t" \ "ldx [%%g6 + %3], %%o6\n\t" \ "ldx [%%g6 + %4], %%o7\n\t" \ "wrpr %%o5, 0x0, %%wstate\n\t" \ "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \ + "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \ "jmpl %%o7 + 0x8, %%g0\n\t" \ - " ldx [%%sp + 2047 + 0x78], %%i7\n\t" \ + " wrpr %%g2, 0x0, %%pstate\n\t" \ : /* No outputs */ \ : "r" (next), "r" (task_pc), \ "i" ((const unsigned long)(&((struct task_struct *)0)->tss.wstate)), \ diff --git a/include/asm-sparc64/uaccess.h b/include/asm-sparc64/uaccess.h index 93320f335..40ad3ee21 100644 --- a/include/asm-sparc64/uaccess.h +++ b/include/asm-sparc64/uaccess.h @@ -1,4 +1,4 @@ -/* $Id: uaccess.h,v 1.12 1997/04/10 23:32:50 davem Exp $ */ +/* $Id: uaccess.h,v 1.13 1997/05/29 12:45:04 jj Exp $ */ #ifndef _ASM_UACCESS_H #define _ASM_UACCESS_H @@ -151,8 +151,8 @@ __asm__ __volatile__( \ " mov %3, %0\n\n\t" \ ".previous\n\t" \ ".section __ex_table,#alloc\n\t" \ - ".align 4\n\t" \ - ".word 1b, 3b\n\t" \ + ".align 8\n\t" \ + ".xword 1b, 3b\n\t" \ ".previous\n\n\t" \ : "=r" (ret) : "r" (x), "r" (__m(addr)), \ "i" (-EFAULT), "i" (ASI_S)) @@ -163,8 +163,8 @@ __asm__ __volatile__( \ "/* Put user asm ret, inline. */\n" \ "1:\t" "st"#size "a %1, [%2] %3\n\n\t" \ ".section __ex_table,#alloc\n\t" \ - ".align 4\n\t" \ - ".word 1b, __ret_efault\n\n\t" \ + ".align 8\n\t" \ + ".xword 1b, __ret_efault\n\n\t" \ ".previous\n\n\t" \ : "=r" (foo) : "r" (x), "r" (__m(addr)), "i" (ASI_S)); \ else \ @@ -178,8 +178,8 @@ __asm__ __volatile( \ " restore %%g0, %3, %%o0\n\n\t" \ ".previous\n\t" \ ".section __ex_table,#alloc\n\t" \ - ".align 4\n\t" \ - ".word 1b, 3b\n\n\t" \ + ".align 8\n\t" \ + ".xword 1b, 3b\n\n\t" \ ".previous\n\n\t" \ : "=r" (foo) : "r" (x), "r" (__m(addr)), \ "i" (ret), "i" (ASI_S)) @@ -221,8 +221,8 @@ __asm__ __volatile__( \ " mov %3, %0\n\n\t" \ ".previous\n\t" \ ".section __ex_table,#alloc\n\t" \ - ".align 4\n\t" \ - ".word 1b, 3b\n\n\t" \ + ".align 8\n\t" \ + ".xword 1b, 3b\n\n\t" \ ".previous\n\t" \ : "=r" (ret), "=r" (x) : "r" (__m(addr)), \ "i" (-EFAULT), "i" (ASI_S)) @@ -233,8 +233,8 @@ __asm__ __volatile__( \ "/* Get user asm ret, inline. */\n" \ "1:\t" "ld"#size "a [%1] %2, %0\n\n\t" \ ".section __ex_table,#alloc\n\t" \ - ".align 4\n\t" \ - ".word 1b,__ret_efault\n\n\t" \ + ".align 8\n\t" \ + ".xword 1b,__ret_efault\n\n\t" \ ".previous\n\t" \ : "=r" (x) : "r" (__m(addr)), "i" (ASI_S)); \ else \ @@ -248,8 +248,8 @@ __asm__ __volatile__( \ " restore %%g0, %3, %%o0\n\n\t" \ ".previous\n\t" \ ".section __ex_table,#alloc\n\t" \ - ".align 4\n\t" \ - ".word 1b, 3b\n\n\t" \ + ".align 8\n\t" \ + ".xword 1b, 3b\n\n\t" \ ".previous\n\t" \ : "=r" (x) : "r" (__m(addr)), "i" (retval), "i" (ASI_S)) @@ -291,8 +291,8 @@ extern __inline__ __kernel_size_t __clear_user(void *addr, __kernel_size_t size) __kernel_size_t ret; __asm__ __volatile__ (" .section __ex_table,#alloc - .align 4 - .word 1f,3 + .align 8 + .xword 1f,3 .previous 1: wr %%g0, %3, %%asi diff --git a/include/asm-sparc64/vuid_event.h b/include/asm-sparc64/vuid_event.h index 0c5977fab..9ef4d17ad 100644 --- a/include/asm-sparc64/vuid_event.h +++ b/include/asm-sparc64/vuid_event.h @@ -5,8 +5,6 @@ typedef struct firm_event { unsigned char pair_type; /* unused by X11 */ unsigned char pair; /* unused by X11 */ int value; /* VKEY_UP, VKEY_DOWN or delta */ - - /* XXX Timeval could hose old 32-bit programs, investigate and fixme XXX */ struct timeval time; } Firm_event; |