summaryrefslogtreecommitdiffstats
path: root/arch/ppc/kernel/head.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ppc/kernel/head.S')
-rw-r--r--arch/ppc/kernel/head.S351
1 files changed, 213 insertions, 138 deletions
diff --git a/arch/ppc/kernel/head.S b/arch/ppc/kernel/head.S
index f88c5383d..8c5911d30 100644
--- a/arch/ppc/kernel/head.S
+++ b/arch/ppc/kernel/head.S
@@ -36,7 +36,19 @@
#include <asm/amigappc.h>
#endif
-#ifdef CONFIG_PPC64
+#ifndef CONFIG_PPC64BRIDGE
+CACHELINE_BYTES = 32
+LG_CACHELINE_BYTES = 5
+CACHELINE_MASK = 0x1f
+CACHELINE_WORDS = 8
+#else
+CACHELINE_BYTES = 128
+LG_CACHELINE_BYTES = 7
+CACHELINE_MASK = 0x7f
+CACHELINE_WORDS = 32
+#endif /* CONFIG_PPC64BRIDGE */
+
+#ifdef CONFIG_PPC64BRIDGE
#define LOAD_BAT(n, reg, RA, RB) \
ld RA,(n*32)+0(reg); \
ld RB,(n*32)+8(reg); \
@@ -47,7 +59,7 @@
mtspr DBAT##n##U,RA; \
mtspr DBAT##n##L,RB; \
-#else /* CONFIG_PPC64 */
+#else /* CONFIG_PPC64BRIDGE */
/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
#define LOAD_BAT(n, reg, RA, RB) \
@@ -65,7 +77,7 @@
mtspr DBAT##n##U,RA; \
mtspr DBAT##n##L,RB; \
1:
-#endif /* CONFIG_PPC64 */
+#endif /* CONFIG_PPC64BRIDGE */
.text
.globl _stext
@@ -125,16 +137,6 @@ _start:
.globl __start
__start:
-#ifdef CONFIG_PPC64
-/*
- * Go into 32-bit mode to boot. OF should do this for
- * us already but just in case...
- * -- Cort
- */
- mfmsr r10
- clrldi r10,r10,3
- mtmsr r10
-#endif
/*
* We have to do any OF calls before we map ourselves to KERNELBASE,
* because OF may have I/O devices mapped into that area
@@ -166,67 +168,23 @@ __after_prom_start:
bl flush_tlbs
#endif
+#ifndef CONFIG_POWER4
+ /* POWER4 doesn't have BATs */
+ bl initial_bats
+#else /* CONFIG_POWER4 */
/*
- * Use the first pair of BAT registers to map the 1st 16MB
- * of RAM to KERNELBASE. From this point on we can't safely
- * call OF any more.
+ * Load up the SDR1 and segment register values now
+ * since we don't have the BATs.
*/
- lis r11,KERNELBASE@h
-#ifndef CONFIG_PPC64
- mfspr r9,PVR
- rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
- cmpi 0,r9,1
- bne 4f
- ori r11,r11,4 /* set up BAT registers for 601 */
- li r8,0x7f /* valid, block length = 8MB */
- oris r9,r11,0x800000@h /* set up BAT reg for 2nd 8M */
- oris r10,r8,0x800000@h /* set up BAT reg for 2nd 8M */
- mtspr IBAT0U,r11 /* N.B. 601 has valid bit in */
- mtspr IBAT0L,r8 /* lower BAT register */
- mtspr IBAT1U,r9
- mtspr IBAT1L,r10
- b 5f
-#endif /* CONFIG_PPC64 */
-
-4: tophys(r8,r11)
-#ifdef CONFIG_SMP
- ori r8,r8,0x12 /* R/W access, M=1 */
-#else
- ori r8,r8,2 /* R/W access */
-#endif /* CONFIG_SMP */
-#ifdef CONFIG_APUS
- ori r11,r11,BL_8M<<2|0x2 /* set up 8MB BAT registers for 604 */
-#else
- ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
-#endif /* CONFIG_APUS */
-
-#ifdef CONFIG_PPC64
- /* clear out the high 32 bits in the BAT */
- clrldi r11,r11,32
- clrldi r8,r8,32
- /* turn off the pagetable mappings just in case */
- clrldi r16,r16,63
- mtsdr1 r16
-#else /* CONFIG_PPC64 */
- /*
- * If the MMU is off clear the bats. See clear_bat() -- Cort
- */
- mfmsr r20
- andi. r20,r20,MSR_DR
- bne 100f
- bl clear_bats
-100:
-#endif /* CONFIG_PPC64 */
- mtspr DBAT0L,r8 /* N.B. 6xx (not 601) have valid */
- mtspr DBAT0U,r11 /* bit in upper BAT register */
- mtspr IBAT0L,r8
- mtspr IBAT0U,r11
-#if 0 /* Useful debug code, please leave in for now so I don't have to
- * look at docs when I need to setup a BAT ...
- */
- bl setup_screen_bat
-#endif
-5: isync
+ bl reloc_offset
+ addis r4,r3,_SDR1@ha /* get the value from _SDR1 */
+ lwz r4,_SDR1@l(r4) /* assume hash table below 4GB */
+ mtspr SDR1,r4
+ slbia
+ lis r5,0x2000 /* set pseudo-segment reg 12 */
+ ori r5,r5,12
+ mtsr 12,r5
+#endif /* CONFIG_POWER4 */
#ifndef CONFIG_APUS
/*
@@ -267,7 +225,21 @@ turn_on_mmu:
ori r0,r0,start_here@l
mtspr SRR0,r0
SYNC
- rfi /* enables MMU */
+ RFI /* enables MMU */
+
+#ifdef CONFIG_SMP
+ .globl __secondary_hold
+__secondary_hold:
+ /* tell the master we're here */
+ stw r3,4(0)
+100: lwz r4,0(0)
+ /* wait until we're told to start */
+ cmpw 0,r4,r3
+ bne 100b
+ /* our cpu # was at addr 0 - go */
+ mr r24,r3 /* cpu # */
+ b __secondary_start
+#endif
/*
* Exception entry code. This code runs with address translation
@@ -284,7 +256,8 @@ turn_on_mmu:
bne 1f; \
tophys(r21,r1); /* use tophys(kernel sp) otherwise */ \
subi r21,r21,INT_FRAME_SIZE; /* alloc exc. frame */\
-1: stw r20,_CCR(r21); /* save registers */ \
+1: CLR_TOP32(r21); \
+ stw r20,_CCR(r21); /* save registers */ \
stw r22,GPR22(r21); \
stw r23,GPR23(r21); \
mfspr r20,SPRG0; \
@@ -341,8 +314,13 @@ label: \
/* Data access exception. */
. = 0x300
+#ifdef CONFIG_PPC64BRIDGE
+ b DataAccess
+DataAccessCont:
+#else
DataAccess:
EXCEPTION_PROLOG
+#endif /* CONFIG_PPC64BRIDGE */
mfspr r20,DSISR
andis. r0,r20,0xa470 /* weird error? */
bne 1f /* if not, try to put a PTE */
@@ -361,10 +339,30 @@ DataAccess:
.long do_page_fault
.long ret_from_except
+#ifdef CONFIG_PPC64BRIDGE
+/* SLB fault on data access. */
+ . = 0x380
+ b DataSegment
+DataSegmentCont:
+ mfspr r4,DAR
+ stw r4,_DAR(r21)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ li r20,MSR_KERNEL
+ rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
+ bl transfer_to_handler
+ .long UnknownException
+ .long ret_from_except
+#endif /* CONFIG_PPC64BRIDGE */
+
/* Instruction access exception. */
. = 0x400
+#ifdef CONFIG_PPC64BRIDGE
+ b InstructionAccess
+InstructionAccessCont:
+#else
InstructionAccess:
EXCEPTION_PROLOG
+#endif /* CONFIG_PPC64BRIDGE */
andis. r0,r23,0x4000 /* no pte found? */
beq 1f /* if so, try to put a PTE */
mr r3,r22 /* into the hash table */
@@ -380,6 +378,19 @@ InstructionAccess:
.long do_page_fault
.long ret_from_except
+#ifdef CONFIG_PPC64BRIDGE
+/* SLB fault on instruction access. */
+ . = 0x480
+ b InstructionSegment
+InstructionSegmentCont:
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ li r20,MSR_KERNEL
+ rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
+ bl transfer_to_handler
+ .long UnknownException
+ .long ret_from_except
+#endif /* CONFIG_PPC64BRIDGE */
+
/* External interrupt */
. = 0x500;
HardwareInterrupt:
@@ -526,7 +537,7 @@ InstructionTLBMiss:
tlbli r3
mfspr r3,SRR1 /* Need to restore CR0 */
mtcrf 0x80,r3
- rfi
+ rfi
InstructionAddressInvalid:
mfspr r3,SRR1
rlwinm r1,r3,9,6,6 /* Get load/store bit */
@@ -593,7 +604,7 @@ DataLoadTLBMiss:
tlbld r3
mfspr r3,SRR1 /* Need to restore CR0 */
mtcrf 0x80,r3
- rfi
+ rfi
DataAddressInvalid:
mfspr r3,SRR1
rlwinm r1,r3,9,6,6 /* Get load/store bit */
@@ -658,7 +669,7 @@ DataStoreTLBMiss:
tlbld r3
mfspr r3,SRR1 /* Need to restore CR0 */
mtcrf 0x80,r3
- rfi
+ rfi
STD_EXCEPTION(0x1300, Trap_13, InstructionBreakpoint)
STD_EXCEPTION(0x1400, SMI, SMIException)
@@ -706,7 +717,22 @@ Trap_0f:
EXCEPTION_PROLOG
b trap_0f_cont
#endif /* CONFIG_ALTIVEC */
-
+
+#ifdef CONFIG_PPC64BRIDGE
+DataAccess:
+ EXCEPTION_PROLOG
+ b DataAccessCont
+InstructionAccess:
+ EXCEPTION_PROLOG
+ b InstructionAccessCont
+DataSegment:
+ EXCEPTION_PROLOG
+ b DataSegmentCont
+InstructionSegment:
+ EXCEPTION_PROLOG
+ b InstructionSegmentCont
+#endif /* CONFIG_PPC64BRIDGE */
+
/*
* This code finishes saving the registers to the exception frame
* and jumps to the appropriate handler for the exception, turning
@@ -741,11 +767,12 @@ transfer_to_handler:
bgt- stack_ovf /* if r2 < r1 < r2+TASK_STRUCT_SIZE */
lwz r24,0(r23) /* virtual address of handler */
lwz r23,4(r23) /* where to go when done */
+ FIX_SRR1(r20,r22)
mtspr SRR0,r24
mtspr SRR1,r20
mtlr r23
SYNC
- rfi /* jump to handler, enable MMU */
+ RFI /* jump to handler, enable MMU */
/*
* On kernel stack overflow, load up an initial stack pointer
@@ -759,10 +786,11 @@ stack_ovf:
lis r24,StackOverflow@ha
addi r24,r24,StackOverflow@l
li r20,MSR_KERNEL
+ FIX_SRR1(r20,r22)
mtspr SRR0,r24
mtspr SRR1,r20
SYNC
- rfi
+ RFI
/*
* Disable FP for the task which had the FPU previously,
@@ -774,8 +802,11 @@ stack_ovf:
load_up_fpu:
mfmsr r5
ori r5,r5,MSR_FP
+#ifdef CONFIG_PPC64BRIDGE
+ clrldi r5,r5,1 /* turn off 64-bit mode */
+#endif /* CONFIG_PPC64BRIDGE */
SYNC
- mtmsr r5 /* enable use of fpu now */
+ MTMSRD(r5) /* enable use of fpu now */
SYNC
/*
* For SMP, we don't do lazy FPU switching because it just gets too
@@ -827,7 +858,7 @@ load_up_fpu:
REST_2GPRS(22, r21)
lwz r21,GPR21(r21)
SYNC
- rfi
+ RFI
/*
* FP unavailable trap from kernel - print a message, but let
@@ -919,7 +950,7 @@ load_up_altivec:
REST_2GPRS(22, r21)
lwz r21,GPR21(r21)
SYNC
- rfi
+ RFI
/*
* AltiVec unavailable trap from kernel - print a message, but let
@@ -1046,7 +1077,7 @@ relocate_kernel:
copy_and_flush:
addi r5,r5,-4
addi r6,r6,-4
-4: li r0,8
+4: li r0,CACHELINE_WORDS
mtctr r0
3: addi r6,r6,4 /* copy a cache line */
lwzx r0,r6,r4
@@ -1195,27 +1226,6 @@ apus_interrupt_entry:
#endif /* CONFIG_APUS */
#ifdef CONFIG_SMP
- .globl __secondary_hold
-__secondary_hold:
- /* tell the master we're here */
- lis r5,0x4@h
- ori r5,r5,0x4@l
- stw r3,0(r5)
- dcbf 0,r5
-100:
- lis r5,0
- dcbi 0,r5
- lwz r4,0(r5)
- /* wait until we're told to start */
- cmp 0,r4,r3
- bne 100b
- /* our cpu # was at addr 0 - go */
- lis r5,__secondary_start@h
- ori r5,r5,__secondary_start@l
- tophys(r5,r5)
- mtlr r5
- mr r24,r3 /* cpu # */
- blr
#ifdef CONFIG_GEMINI
.globl __secondary_start_gemini
__secondary_start_gemini:
@@ -1243,7 +1253,15 @@ __secondary_start_psurge:
.globl __secondary_start
__secondary_start:
+#ifdef CONFIG_PPC64BRIDGE
+ mfmsr r0
+ clrldi r0,r0,1 /* make sure it's in 32-bit mode */
+ sync
+ MTMSRD(r0)
+ isync
+#else
bl enable_caches
+#endif
/* get current */
lis r2,current_set@h
@@ -1264,6 +1282,7 @@ __secondary_start:
/* ptr to phys current thread */
tophys(r4,r2)
addi r4,r4,THREAD /* phys address of our thread_struct */
+ CLR_TOP32(r4)
mtspr SPRG3,r4
li r3,0
mtspr SPRG2,r3 /* 0 => r1 has kernel sp */
@@ -1275,7 +1294,7 @@ __secondary_start:
mtspr SRR0,r3
mtspr SRR1,r4
SYNC
- rfi
+ RFI
#endif /* CONFIG_SMP */
/*
@@ -1333,14 +1352,11 @@ load_up_mmu:
tophys(r6,r6)
lwz r6,_SDR1@l(r6)
mtspr SDR1,r6
-#ifdef CONFIG_PPC64
- /* clear the v bit in the ASR so we can
- * behave as if we have segment registers
- * -- Cort
- */
- clrldi r6,r6,63
+#ifdef CONFIG_PPC64BRIDGE
+ /* clear the ASR so we only use the pseudo-segment registers. */
+ li r6,0
mtasr r6
-#endif /* CONFIG_PPC64 */
+#endif /* CONFIG_PPC64BRIDGE */
li r0,16 /* load up segment register values */
mtctr r0 /* for context 0 */
lis r3,0x2000 /* Ku = 1, VSID = 0 */
@@ -1349,6 +1365,7 @@ load_up_mmu:
addi r3,r3,1 /* increment VSID */
addis r4,r4,0x1000 /* address of next segment */
bdnz 3b
+#ifndef CONFIG_POWER4
/* Load the BAT registers with the values set up by MMU_init.
MMU_init takes care of whether we're on a 601 or not. */
mfpvr r3
@@ -1361,17 +1378,29 @@ load_up_mmu:
LOAD_BAT(1,r3,r4,r5)
LOAD_BAT(2,r3,r4,r5)
LOAD_BAT(3,r3,r4,r5)
+#endif /* CONFIG_POWER4 */
blr
/*
* This is where the main kernel code starts.
*/
start_here:
+#ifndef CONFIG_PPC64BRIDGE
bl enable_caches
+#endif
/* ptr to current */
lis r2,init_task_union@h
ori r2,r2,init_task_union@l
+ /* Set up for using our exception vectors */
+ /* ptr to phys current thread */
+ tophys(r4,r2)
+ addi r4,r4,THREAD /* init task's THREAD */
+ CLR_TOP32(r4)
+ mtspr SPRG3,r4
+ li r3,0
+ mtspr SPRG2,r3 /* 0 => r1 has kernel sp */
+
/* Clear out the BSS */
lis r11,_end@ha
addi r11,r11,_end@l
@@ -1424,10 +1453,11 @@ start_here:
ori r4,r4,2f@l
tophys(r4,r4)
li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
+ FIX_SRR1(r3,r5)
mtspr SRR0,r4
mtspr SRR1,r3
SYNC
- rfi
+ RFI
/* Load up the kernel context */
2:
SYNC /* Force all PTE updates to finish */
@@ -1439,34 +1469,30 @@ start_here:
#endif
bl load_up_mmu
-
-/* Set up for using our exception vectors */
- /* ptr to phys current thread */
- tophys(r4,r2)
- addi r4,r4,THREAD /* init task's THREAD */
- mtspr SPRG3,r4
- li r3,0
- mtspr SPRG2,r3 /* 0 => r1 has kernel sp */
/* Now turn on the MMU for real! */
li r4,MSR_KERNEL
+ FIX_SRR1(r4,r5)
lis r3,start_kernel@h
ori r3,r3,start_kernel@l
mtspr SRR0,r3
mtspr SRR1,r4
SYNC
- rfi /* enable MMU and jump to start_kernel */
+ RFI
/*
* Set up the segment registers for a new context.
*/
- .globl set_context
-set_context:
+_GLOBAL(set_context)
rlwinm r3,r3,4,8,27 /* VSID = context << 4 */
addis r3,r3,0x6000 /* Set Ks, Ku bits */
li r0,12 /* TASK_SIZE / SEGMENT_SIZE */
mtctr r0
li r4,0
-3: mtsrin r3,r4
+3:
+#ifdef CONFIG_PPC64BRIDGE
+ slbie r4
+#endif /* CONFIG_PPC64BRIDGE */
+ mtsrin r3,r4
addi r3,r3,1 /* next VSID */
addis r4,r4,0x1000 /* address of next segment */
bdnz 3b
@@ -1511,7 +1537,7 @@ clear_bats:
#ifndef CONFIG_GEMINI
flush_tlbs:
- lis r20, 0x1000
+ lis r20, 0x40
1: addic. r20, r20, -0x1000
tlbie r20
blt 1b
@@ -1522,30 +1548,79 @@ mmu_off:
addi r4, r3, __after_prom_start - _start
mfmsr r3
andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */
- beq 1f
+ beqlr
ori r3,r3,MSR_DR|MSR_IR
xori r3,r3,MSR_DR|MSR_IR
mtspr SRR0,r4
mtspr SRR1,r3
sync
- rfi
-1: blr
+ RFI
#endif
-#if 0 /* That's useful debug stuff */
+#ifndef CONFIG_POWER4
+/*
+ * Use the first pair of BAT registers to map the 1st 16MB
+ * of RAM to KERNELBASE. From this point on we can't safely
+ * call OF any more.
+ */
+initial_bats:
+ lis r11,KERNELBASE@h
+#ifndef CONFIG_PPC64BRIDGE
+ mfspr r9,PVR
+ rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
+ cmpi 0,r9,1
+ bne 4f
+ ori r11,r11,4 /* set up BAT registers for 601 */
+ li r8,0x7f /* valid, block length = 8MB */
+ oris r9,r11,0x800000@h /* set up BAT reg for 2nd 8M */
+ oris r10,r8,0x800000@h /* set up BAT reg for 2nd 8M */
+ mtspr IBAT0U,r11 /* N.B. 601 has valid bit in */
+ mtspr IBAT0L,r8 /* lower BAT register */
+ mtspr IBAT1U,r9
+ mtspr IBAT1L,r10
+ isync
+ blr
+#endif /* CONFIG_PPC64BRIDGE */
+
+4: tophys(r8,r11)
+#ifdef CONFIG_SMP
+ ori r8,r8,0x12 /* R/W access, M=1 */
+#else
+ ori r8,r8,2 /* R/W access */
+#endif /* CONFIG_SMP */
+#ifdef CONFIG_APUS
+ ori r11,r11,BL_8M<<2|0x2 /* set up 8MB BAT registers for 604 */
+#else
+ ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
+#endif /* CONFIG_APUS */
+
+#ifdef CONFIG_PPC64BRIDGE
+ /* clear out the high 32 bits in the BAT */
+ clrldi r11,r11,32
+ clrldi r8,r8,32
+#endif /* CONFIG_PPC64BRIDGE */
+ mtspr DBAT0L,r8 /* N.B. 6xx (not 601) have valid */
+ mtspr DBAT0U,r11 /* bit in upper BAT register */
+ mtspr IBAT0L,r8
+ mtspr IBAT0U,r11
+#if 0 /* Useful debug code, please leave in for now so I don't have to
+ * look at docs when I need to setup a BAT ...
+ */
setup_screen_bat:
li r3,0
mtspr DBAT1U,r3
- mtspr IBAT1U,r3
- lis r3, 0x8200
- ori r4,r3,0x2a
+ lis r3,0xfa00
+ CLR_TOP32(r3)
+ lis r4,0xfa00
+ CLR_TOP32(r4)
+ ori r4,r4,0x2a
mtspr DBAT1L,r4
- mtspr IBAT1L,r4
ori r3,r3,(BL_16M<<2)|0x2 /* set up BAT registers for 604 */
mtspr DBAT1U,r3
- mtspr IBAT1U,r3
- blr
#endif
+ isync
+ blr
+#endif /* CONFIG_POWER4 */
#ifdef CONFIG_8260
/* Jump into the system reset for the rom.
@@ -1568,7 +1643,7 @@ m8260_gorom:
mtlr r4
blr
#endif
-
+
/*
* We put a few things here that have to be page-aligned.
* This stuff goes at the beginning of the data segment,