summaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/entry.S
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1997-09-12 01:29:55 +0000
committerRalf Baechle <ralf@linux-mips.org>1997-09-12 01:29:55 +0000
commit545f435ebcfd94a1e7c20b46efe81b4d6ac4e698 (patch)
treee9ce4bc598d06374bda906f18365984bf22a526a /arch/sparc64/kernel/entry.S
parent4291a610eef89d0d5c69d9a10ee6560e1aa36c74 (diff)
Merge with Linux 2.1.55. More bugfixes and goodies from my private
CVS archive.
Diffstat (limited to 'arch/sparc64/kernel/entry.S')
-rw-r--r--arch/sparc64/kernel/entry.S203
1 files changed, 129 insertions, 74 deletions
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index 425c2d873..a6e2d6da7 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -1,9 +1,9 @@
-/* $Id: entry.S,v 1.51 1997/07/24 12:15:04 davem Exp $
+/* $Id: entry.S,v 1.65 1997/08/29 15:51:29 jj Exp $
* arch/sparc64/kernel/entry.S: Sparc64 trap low-level entry points.
*
* Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
- * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
+ * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
* Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
@@ -17,6 +17,7 @@
#include <asm/page.h>
#include <asm/signal.h>
#include <asm/pgtable.h>
+#include <asm/processor.h>
/* #define SYSCALL_TRACING */
@@ -50,41 +51,39 @@ sparc64_dtlb_prot_catch:
bgu,a,pn %icc, winfix_trampoline
rdpr %tpc, %g3
+ sethi %hi(109f), %g7
ba,pt %xcc, etrap
- rd %pc, %g7
+109: or %g7, %lo(109b), %g7
b,pt %xcc, 1f
mov 1, %o2
+ .align 32
sparc64_dtlb_refbit_catch:
srlx %g5, 9, %g4
and %g4, ((_PAGE_PRESENT | _PAGE_READ) >> 9), %g4
-
cmp %g4, ((_PAGE_PRESENT | _PAGE_READ) >> 9)
be,a,pt %xcc, 2f
mov 1, %g4
wr %g0, ASI_DMMU, %asi
rdpr %pstate, %g1
wrpr %g1, PSTATE_AG|PSTATE_MG, %pstate
+
rdpr %tl, %g3
ldxa [%g0 + TLB_TAG_ACCESS] %asi, %g5
-
cmp %g3, 1
bgu,pn %icc, winfix_trampoline
rdpr %tpc, %g3
+ sethi %hi(109f), %g7
b,pt %xcc, etrap
- rd %pc, %g7
+109: or %g7, %lo(109b), %g7
+
clr %o2
1: srlx %l5, PAGE_SHIFT, %o1
add %sp, STACK_BIAS + REGWIN_SZ, %o0
-
call do_sparc64_fault
sllx %o1, PAGE_SHIFT, %o1
b,pt %xcc, rtrap
clr %l6
- nop
- nop
- nop
- nop
-
+ .align 32
sparc64_itlb_refbit_catch:
srlx %g5, 9, %g4
and %g4, ((_PAGE_PRESENT | _PAGE_READ) >> 9), %g4
@@ -95,17 +94,21 @@ sparc64_itlb_refbit_catch:
wrpr %g1, PSTATE_AG|PSTATE_MG, %pstate
rdpr %tpc, %g5
+ sethi %hi(109f), %g7
b,pt %xcc, etrap
- rd %pc, %g7
+109: or %g7, %lo(109b), %g7
b,pt %xcc, 1b
clr %o2
+
+ .align 32
2: sllx %g4, 63, %g4 ! _PAGE_VALID
or %g5, _PAGE_ACCESSED, %g5
or %g5, %g4, %g5
stxa %g5, [%g3 + %g1] ASI_PHYS_USE_EC ! store new PTE
-
stxa %g5, [%g0] ASI_DTLB_DATA_IN ! TLB load
retry
+
+ .align 32
3: sllx %g4, 63, %g4 ! _PAGE_VALID
or %g5, _PAGE_ACCESSED, %g5
or %g5, %g4, %g5
@@ -113,24 +116,33 @@ sparc64_itlb_refbit_catch:
stxa %g5, [%g0] ASI_ITLB_DATA_IN ! TLB load
retry
+#define FPDIS_OFF (((PAGE_SIZE<<1)-((64*4)+(2*8))) & ~(64 - 1))
/* This is trivial with the new code... */
.align 32
.globl do_fpdis
do_fpdis:
- wr %g0, FPRS_FEF, %fprs
- ldx [%g6 + AOFF_task_flags], %g2
- sethi %hi(0x00100000), %g4 ! XXX PF_USEDFPU
- andcc %g2, %g4, %g0
-
- bne,a,pt %xcc, fpload_fromkstk
- sethi %hi((((PAGE_SIZE<<1)-((64*4)+(2*8))) & ~(64 - 1))), %g2
- fzero %f0
- fzero %f2
+ ldx [%g6 + AOFF_task_tss + AOFF_thread_flags], %g5 ! Load Group
+ sethi %hi(TSTATE_PEF), %g4 ! IEU0
+ sethi %hi(FPDIS_OFF), %g3 ! IEU1
+ wr %g0, FPRS_FEF, %fprs ! LSU Group+4bubbles
+ andcc %g5, SPARC_FLAG_USEDFPU, %g0 ! IEU1 Group
+ or %g3, %lo(FPDIS_OFF), %g2 ! IEU0
+ sethi %hi(empty_zero_page), %g1 ! IEU0 Group
+ add %g6, %g2, %g2 ! IEU1
+ be,a,pn %icc, 1f ! CTI
+ clr %g7 ! IEU0 Group
+ add %g2, 0x100, %g1 ! IEU1
+ ldx [%g2 + 0x108], %g7 ! Load
+1: andcc %g5, SPARC_FLAG_USEDFPUL, %g0 ! IEU1 Group
+ bne,pn %icc, 2f ! CTI
+ fzero %f0 ! FPA
+ andcc %g5, SPARC_FLAG_USEDFPUU, %g0 ! IEU1 Group
+ bne,pn %icc, 1f ! CTI
+ fzero %f2 ! FPA
faddd %f0, %f2, %f4
fmuld %f0, %f2, %f6
faddd %f0, %f2, %f8
fmuld %f0, %f2, %f10
-
faddd %f0, %f2, %f12
fmuld %f0, %f2, %f14
faddd %f0, %f2, %f16
@@ -139,7 +151,6 @@ do_fpdis:
fmuld %f0, %f2, %f22
faddd %f0, %f2, %f24
fmuld %f0, %f2, %f26
-
faddd %f0, %f2, %f28
fmuld %f0, %f2, %f30
faddd %f0, %f2, %f32
@@ -148,54 +159,92 @@ do_fpdis:
fmuld %f0, %f2, %f38
faddd %f0, %f2, %f40
fmuld %f0, %f2, %f42
-
faddd %f0, %f2, %f44
fmuld %f0, %f2, %f46
- ldx [%g6 + AOFF_task_flags], %g2
faddd %f0, %f2, %f48
fmuld %f0, %f2, %f50
- or %g2, %g4, %g2
faddd %f0, %f2, %f52
fmuld %f0, %f2, %f54
-
- stx %g2, [%g6 + AOFF_task_flags]
faddd %f0, %f2, %f56
- sethi %hi(empty_zero_page), %g3
fmuld %f0, %f2, %f58
-
- faddd %f0, %f2, %f60
- ldx [%g3], %fsr ! wheee, empty_zero_page
+ b,pt %xcc, fpdis_exit2
+ faddd %f0, %f2, %f60
+1: mov SECONDARY_CONTEXT, %g3
+ faddd %f0, %f2, %f4
+ fmuld %f0, %f2, %f6
+ ldxa [%g3] ASI_DMMU, %g5
+ stxa %g0, [%g3] ASI_DMMU
+ faddd %f0, %f2, %f8
+ fmuld %f0, %f2, %f10
+ flush %g2
+ wr %g0, ASI_BLK_S, %asi ! grrr, where is ASI_BLK_NUCLEUS 8-(
+ membar #StoreLoad | #LoadLoad
+ ldda [%g2 + 0x080] %asi, %f32
+ ldda [%g2 + 0x0c0] %asi, %f48
+ faddd %f0, %f2, %f12
+ fmuld %f0, %f2, %f14
+ faddd %f0, %f2, %f16
+ fmuld %f0, %f2, %f18
+ faddd %f0, %f2, %f20
+ fmuld %f0, %f2, %f22
+ faddd %f0, %f2, %f24
+ fmuld %f0, %f2, %f26
+ faddd %f0, %f2, %f28
+ fmuld %f0, %f2, %f30
b,pt %xcc, fpdis_exit
- wr %g0, 0, %gsr
-
-fpload_fromkstk:
- or %g2, %lo((((PAGE_SIZE<<1)-((64*4)+(2*8))) & ~(64 - 1))), %g2
- add %g6, %g2, %g2
+ membar #Sync
+2: andcc %g5, SPARC_FLAG_USEDFPUU, %g0
+ bne,pt %icc, 3f
+ fzero %f32
mov SECONDARY_CONTEXT, %g3
+ fzero %f34
+ ldxa [%g3] ASI_DMMU, %g5
+ stxa %g0, [%g3] ASI_DMMU
+ faddd %f32, %f34, %f36
+ fmuld %f32, %f34, %f38
+ flush %g2
+ wr %g0, ASI_BLK_S, %asi ! grrr, where is ASI_BLK_NUCLEUS 8-(
+ membar #StoreLoad | #LoadLoad
+ ldda [%g2 + 0x000] %asi, %f0
+ ldda [%g2 + 0x040] %asi, %f16
+ faddd %f32, %f34, %f40
+ fmuld %f32, %f34, %f42
+ faddd %f32, %f34, %f44
+ fmuld %f32, %f34, %f46
+ faddd %f32, %f34, %f48
+ fmuld %f32, %f34, %f50
+ faddd %f32, %f34, %f52
+ fmuld %f32, %f34, %f54
+ faddd %f32, %f34, %f56
+ fmuld %f32, %f34, %f58
+ faddd %f32, %f34, %f60
+ fmuld %f32, %f34, %f62
+ b,pt %xcc, fpdis_exit
+ membar #Sync
+3: mov SECONDARY_CONTEXT, %g3
+ ldxa [%g3] ASI_DMMU, %g5
stxa %g0, [%g3] ASI_DMMU
flush %g2
wr %g0, ASI_BLK_S, %asi ! grrr, where is ASI_BLK_NUCLEUS 8-(
membar #StoreLoad | #LoadLoad
-
ldda [%g2 + 0x000] %asi, %f0
ldda [%g2 + 0x040] %asi, %f16
ldda [%g2 + 0x080] %asi, %f32
ldda [%g2 + 0x0c0] %asi, %f48
- ldx [%g2 + 0x100], %fsr
- ldx [%g2 + 0x108], %g2
membar #Sync
- wr %g2, 0, %gsr
fpdis_exit:
+ stxa %g5, [%g3] ASI_DMMU
+ flush %g2
+fpdis_exit2:
+ wr %g7, 0, %gsr
+ ldx [%g1], %fsr
rdpr %tstate, %g3
- sethi %hi(TSTATE_PEF), %g4
or %g3, %g4, %g3 ! anal...
wrpr %g3, %tstate
+ wr %g0, FPRS_FEF, %fprs ! clean DU/DL bits
retry
-#ifdef __SMP__
- /* Note check out head.h, this code isn't even used for UP,
- * for SMP things will be different. In particular the data
- * registers for cross calls will be:
+ /* The registers for cross calls will be:
*
* DATA 0: [low 32-bits] Address of function to call, jmp to this
* [high 32-bits] MMU Context Argument 0, place in %g5
@@ -205,11 +254,17 @@ fpdis_exit:
* With this method we can do most of the cross-call tlb/cache
* flushing very quickly.
*/
+ .data
+ .align 8
+ .globl ivec_spurious_cookie
+ivec_spurious_cookie: .xword 0
+
+ .text
.align 32
- .globl do_ivec, do_ivec_return
+ .globl do_ivec
do_ivec:
- ldxa [%g0] ASI_INTR_RECEIVE, %g1
- andcc %g1, 0x20, %g0
+ ldxa [%g0] ASI_INTR_RECEIVE, %g5
+ andcc %g5, 0x20, %g0
be,pn %xcc, do_ivec_return
mov 0x40, %g2
@@ -223,39 +278,45 @@ do_ivec:
sllx %g3, 3, %g3
ldx [%g1 + %g3], %g2
brz,pn %g2, do_ivec_spurious
- nop
+ sethi %hi(0x80000000), %g5
+
+ or %g2, %g5, %g2
+ stx %g2, [%g1 + %g3]
/* No branches, worse case we don't know about this interrupt
* yet, so we would just write a zero into the softint register
* which is completely harmless.
*/
wr %g2, 0x0, %set_softint
-
do_ivec_return:
- /* Acknowledge the UPA */
stxa %g0, [%g0] ASI_INTR_RECEIVE
membar #Sync
retry
do_ivec_xcall:
srlx %g3, 32, %g5
add %g2, 0x10, %g2
- sra %g3, 0, %g3
+ srl %g3, 0, %g3
ldxa [%g2] ASI_UDB_INTR_R, %g6
add %g2, 0x10, %g2
+ ldxa [%g2] ASI_UDB_INTR_R, %g7
+ stxa %g0, [%g0] ASI_INTR_RECEIVE
jmpl %g3, %g0
- ldxa [%g2] ASI_UDB_INTR_R, %g7
+ membar #Sync
do_ivec_spurious:
+ srl %g3, 3, %g3
+ sethi %hi(ivec_spurious_cookie), %g2
+ stx %g3, [%g2 + %lo(ivec_spurious_cookie)]
stxa %g0, [%g0] ASI_INTR_RECEIVE
membar #Sync
- rdpr %pstate, %g1
- wrpr %g1, PSTATE_IG | PSTATE_AG, %pstate
+ rdpr %pstate, %g5
+ wrpr %g5, PSTATE_IG | PSTATE_AG, %pstate
+ sethi %hi(109f), %g7
ba,pt %xcc, etrap
- rd %pc, %g7
+109: or %g7, %lo(109b), %g7
call report_spurious_ivec
add %sp, STACK_BIAS + REGWIN_SZ, %o0
ba,pt %xcc, rtrap
clr %l6
-#endif /* __SMP__ */
.globl getcc, setcc
getcc:
@@ -359,8 +420,9 @@ floppy_overrun:
floppy_dosoftint:
rdpr %pil, %g2
wrpr %g0, 15, %pil
+ sethi %hi(109f), %g7
b,pt %xcc, etrap_irq
- rd %pc, %g7
+109: or %g7, %lo(109b), %g7
mov 11, %o0
mov 0, %o1
@@ -373,10 +435,8 @@ floppy_dosoftint:
#endif /* CONFIG_BLK_DEV_FD */
/* XXX Here is stuff we still need to write... -DaveM XXX */
- .globl indirect_syscall, netbsd_syscall, solaris_syscall
-indirect_syscall:
+ .globl netbsd_syscall
netbsd_syscall:
-solaris_syscall:
retl
nop
@@ -386,8 +446,9 @@ do_mna:
cmp %g3, 1
bgu,a,pn %icc, winfix_mna
rdpr %tpc, %g3
+ sethi %hi(109f), %g7
ba,pt %xcc, etrap
- rd %pc, %g7
+109: or %g7, %lo(109b), %g7
call mem_address_unaligned
add %sp, STACK_BIAS + REGWIN_SZ, %o0
ba,pt %xcc, rtrap
@@ -573,14 +634,8 @@ sys_fork:
sys_vfork: mov SIGCHLD, %o0
clr %o1
sys_clone: mov %o7, %l5
-/*???*/ save %sp, -REGWIN_SZ, %sp
- flushw
-/*???*/ restore %g0, %g0, %g0
- rdpr %cwp, %o4
add %sp, STACK_BIAS + REGWIN_SZ, %o2
-
movrz %o1, %fp, %o1
- stx %o4, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_G0]
call do_fork
mov %l5, %o7
#ifdef __SMP__
@@ -611,9 +666,9 @@ linux_sparc_syscall:
cmp %g1, NR_SYSCALLS ! IEU1 Group
bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI
mov %i0, %o0 ! IEU0
- sll %g1, 3, %l4 ! IEU0 Group
+ sll %g1, 2, %l4 ! IEU0 Group
mov %i1, %o1 ! IEU1
- ldx [%l7 + %l4], %l7 ! Load
+ lduw [%l7 + %l4], %l7 ! Load
syscall_is_too_hard:
mov %i2, %o2 ! IEU0 Group
ldx [%curptr + AOFF_task_flags], %l5 ! Load