summaryrefslogtreecommitdiffstats
path: root/arch/ppc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ppc')
-rw-r--r--arch/ppc/coffboot/misc.S2
-rw-r--r--arch/ppc/config.in2
-rw-r--r--arch/ppc/configs/common_defconfig66
-rw-r--r--arch/ppc/configs/gemini_defconfig13
-rw-r--r--arch/ppc/defconfig66
-rw-r--r--arch/ppc/kernel/entry.S33
-rw-r--r--arch/ppc/kernel/head.S234
-rw-r--r--arch/ppc/kernel/irq.c23
-rw-r--r--arch/ppc/kernel/misc.S57
-rw-r--r--arch/ppc/kernel/mk_defs.c7
-rw-r--r--arch/ppc/kernel/open_pic.c6
-rw-r--r--arch/ppc/kernel/pci-dma.c18
-rw-r--r--arch/ppc/kernel/pmac_pic.c21
-rw-r--r--arch/ppc/kernel/pmac_setup.c5
-rw-r--r--arch/ppc/kernel/pmac_time.c4
-rw-r--r--arch/ppc/kernel/ppc_asm.h22
-rw-r--r--arch/ppc/kernel/ppc_asm.tmpl33
-rw-r--r--arch/ppc/kernel/ppc_ksyms.c11
-rw-r--r--arch/ppc/kernel/process.c51
-rw-r--r--arch/ppc/kernel/prom.c63
-rw-r--r--arch/ppc/kernel/setup.c3
-rw-r--r--arch/ppc/kernel/smp.c1
-rw-r--r--arch/ppc/kernel/syscalls.c17
-rw-r--r--arch/ppc/kernel/traps.c42
-rw-r--r--arch/ppc/lib/string.S385
-rw-r--r--arch/ppc/mm/init.c4
-rw-r--r--arch/ppc/xmon/start.c61
27 files changed, 851 insertions, 399 deletions
diff --git a/arch/ppc/coffboot/misc.S b/arch/ppc/coffboot/misc.S
index 7defc69e8..05639bdd1 100644
--- a/arch/ppc/coffboot/misc.S
+++ b/arch/ppc/coffboot/misc.S
@@ -9,7 +9,7 @@
.text
/*
- * Use the BAT0 registers to map the 1st 8MB of RAM to
+ * Use the BAT3 registers to map the 1st 8MB of RAM to
* the address given as the 1st argument.
*/
.globl setup_bats
diff --git a/arch/ppc/config.in b/arch/ppc/config.in
index 8ea7e9000..8bb23afa2 100644
--- a/arch/ppc/config.in
+++ b/arch/ppc/config.in
@@ -97,7 +97,7 @@ fi
if [ "$CONFIG_6xx" = "y" -a "$CONFIG_APUS" != "y" ]; then
define_bool CONFIG_PCI y
fi
-if [ "$CONFIG_PREP" = "y" -o "$CONFIG_PMAC" = "y" -o "$CONFIG_CHRP" = "y" -o "$CONFIG_ALL_PPC" = "y"]; then
+if [ "$CONFIG_PREP" = "y" -o "$CONFIG_PMAC" = "y" -o "$CONFIG_CHRP" = "y" -o "$CONFIG_ALL_PPC" = "y" ]; then
define_bool CONFIG_PCI y
fi
diff --git a/arch/ppc/configs/common_defconfig b/arch/ppc/configs/common_defconfig
index ac258f16e..17217702f 100644
--- a/arch/ppc/configs/common_defconfig
+++ b/arch/ppc/configs/common_defconfig
@@ -24,7 +24,7 @@ CONFIG_ALL_PPC=y
# CONFIG_GEMINI is not set
# CONFIG_APUS is not set
# CONFIG_SMP is not set
-# CONFIG_ALTIVEC is not set
+CONFIG_ALTIVEC=y
#
# Loadable module support
@@ -76,7 +76,7 @@ CONFIG_BOOTX_TEXT=y
#
# Block devices
#
-CONFIG_BLK_DEV_FD=y
+# CONFIG_BLK_DEV_FD is not set
CONFIG_BLK_DEV_IDE=y
#
@@ -178,9 +178,12 @@ CONFIG_SCSI=y
# SCSI support type (disk, tape, CD-ROM)
#
CONFIG_BLK_DEV_SD=y
+CONFIG_SD_EXTRA_DEVS=40
CONFIG_CHR_DEV_ST=y
+CONFIG_ST_EXTRA_DEVS=2
CONFIG_BLK_DEV_SR=y
CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_SR_EXTRA_DEVS=2
CONFIG_CHR_DEV_SG=y
#
@@ -239,6 +242,7 @@ CONFIG_SCSI_NCR53C8XX_SYNC=20
# CONFIG_SCSI_QLOGIC_FAS is not set
# CONFIG_SCSI_QLOGIC_ISP is not set
# CONFIG_SCSI_QLOGIC_FC is not set
+# CONFIG_SCSI_QLOGIC_1280 is not set
# CONFIG_SCSI_SEAGATE is not set
# CONFIG_SCSI_DC390T is not set
# CONFIG_SCSI_T128 is not set
@@ -275,6 +279,7 @@ CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
CONFIG_MACE=y
CONFIG_BMAC=y
+CONFIG_GMAC=y
# CONFIG_NCR885E is not set
# CONFIG_OAKNET is not set
# CONFIG_NET_VENDOR_3COM is not set
@@ -472,6 +477,7 @@ CONFIG_USB=y
# USB Controllers
#
# CONFIG_USB_UHCI is not set
+# CONFIG_USB_UHCI_ALT is not set
CONFIG_USB_OHCI=y
#
@@ -491,8 +497,7 @@ CONFIG_USB_OHCI=y
# CONFIG_USB_IBMCAM is not set
# CONFIG_USB_OV511 is not set
# CONFIG_USB_DC2XX is not set
-CONFIG_USB_SCSI=m
-CONFIG_USB_SCSI_DEBUG=y
+# CONFIG_USB_STORAGE is not set
# CONFIG_USB_DABUSB is not set
#
@@ -513,11 +518,15 @@ CONFIG_USB_MOUSE=y
#
# CONFIG_QUOTA is not set
CONFIG_AUTOFS_FS=y
+# CONFIG_AUTOFS4_FS is not set
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
-# CONFIG_HFS_FS is not set
+CONFIG_HFS_FS=y
# CONFIG_BFS_FS is not set
-# CONFIG_FAT_FS is not set
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+# CONFIG_UMSDOS_FS is not set
+CONFIG_VFAT_FS=y
# CONFIG_EFS_FS is not set
# CONFIG_CRAMFS is not set
CONFIG_ISO9660_FS=y
@@ -549,10 +558,51 @@ CONFIG_LOCKD=y
#
# Partition Types
#
-# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
CONFIG_MAC_PARTITION=y
CONFIG_MSDOS_PARTITION=y
-# CONFIG_NLS is not set
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+CONFIG_NLS=y
+
+#
+# Native Language Support
+#
+# CONFIG_NLS_CODEPAGE_437 is not set
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_1 is not set
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
#
# Sound
diff --git a/arch/ppc/configs/gemini_defconfig b/arch/ppc/configs/gemini_defconfig
index 6fe267c9a..90cc5b71f 100644
--- a/arch/ppc/configs/gemini_defconfig
+++ b/arch/ppc/configs/gemini_defconfig
@@ -24,7 +24,7 @@ CONFIG_6xx=y
CONFIG_GEMINI=y
# CONFIG_APUS is not set
# CONFIG_SMP is not set
-# CONFIG_ALTIVEC is not set
+CONFIG_ALTIVEC=y
CONFIG_MACH_SPECIFIC=y
#
@@ -87,7 +87,6 @@ CONFIG_KERNEL_ELF=y
# CONFIG_BLK_DEV_RAM is not set
# CONFIG_BLK_DEV_XD is not set
# CONFIG_BLK_DEV_DAC960 is not set
-CONFIG_PARIDE_PARPORT=y
# CONFIG_PARIDE is not set
# CONFIG_BLK_DEV_IDE_MODES is not set
# CONFIG_BLK_DEV_HD is not set
@@ -151,9 +150,12 @@ CONFIG_SCSI=y
# SCSI support type (disk, tape, CD-ROM)
#
CONFIG_BLK_DEV_SD=y
+CONFIG_SD_EXTRA_DEVS=40
# CONFIG_CHR_DEV_ST is not set
+CONFIG_ST_EXTRA_DEVS=2
CONFIG_BLK_DEV_SR=y
CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_SR_EXTRA_DEVS=2
# CONFIG_CHR_DEV_SG is not set
#
@@ -208,6 +210,7 @@ CONFIG_SCSI_NCR53C8XX_SYNC=20
# CONFIG_SCSI_QLOGIC_FAS is not set
# CONFIG_SCSI_QLOGIC_ISP is not set
# CONFIG_SCSI_QLOGIC_FC is not set
+# CONFIG_SCSI_QLOGIC_1280 is not set
# CONFIG_SCSI_SEAGATE is not set
# CONFIG_SCSI_DC390T is not set
# CONFIG_SCSI_T128 is not set
@@ -216,6 +219,7 @@ CONFIG_SCSI_NCR53C8XX_SYNC=20
# CONFIG_SCSI_DEBUG is not set
# CONFIG_SCSI_MESH is not set
# CONFIG_SCSI_MAC53C94 is not set
+# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
#
# IEEE 1394 (FireWire) support
@@ -242,6 +246,7 @@ CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
# CONFIG_MACE is not set
# CONFIG_BMAC is not set
+# CONFIG_GMAC is not set
CONFIG_NCR885E=y
# CONFIG_OAKNET is not set
# CONFIG_NET_VENDOR_3COM is not set
@@ -369,6 +374,7 @@ CONFIG_UNIX98_PTY_COUNT=256
#
# CONFIG_QUOTA is not set
# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
@@ -405,10 +411,7 @@ CONFIG_LOCKD=y
# Partition Types
#
# CONFIG_PARTITION_ADVANCED is not set
-CONFIG_MAC_PARTITION=y
CONFIG_MSDOS_PARTITION=y
-# CONFIG_SGI_PARTITION is not set
-# CONFIG_SUN_PARTITION is not set
# CONFIG_NLS is not set
#
diff --git a/arch/ppc/defconfig b/arch/ppc/defconfig
index ac258f16e..17217702f 100644
--- a/arch/ppc/defconfig
+++ b/arch/ppc/defconfig
@@ -24,7 +24,7 @@ CONFIG_ALL_PPC=y
# CONFIG_GEMINI is not set
# CONFIG_APUS is not set
# CONFIG_SMP is not set
-# CONFIG_ALTIVEC is not set
+CONFIG_ALTIVEC=y
#
# Loadable module support
@@ -76,7 +76,7 @@ CONFIG_BOOTX_TEXT=y
#
# Block devices
#
-CONFIG_BLK_DEV_FD=y
+# CONFIG_BLK_DEV_FD is not set
CONFIG_BLK_DEV_IDE=y
#
@@ -178,9 +178,12 @@ CONFIG_SCSI=y
# SCSI support type (disk, tape, CD-ROM)
#
CONFIG_BLK_DEV_SD=y
+CONFIG_SD_EXTRA_DEVS=40
CONFIG_CHR_DEV_ST=y
+CONFIG_ST_EXTRA_DEVS=2
CONFIG_BLK_DEV_SR=y
CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_SR_EXTRA_DEVS=2
CONFIG_CHR_DEV_SG=y
#
@@ -239,6 +242,7 @@ CONFIG_SCSI_NCR53C8XX_SYNC=20
# CONFIG_SCSI_QLOGIC_FAS is not set
# CONFIG_SCSI_QLOGIC_ISP is not set
# CONFIG_SCSI_QLOGIC_FC is not set
+# CONFIG_SCSI_QLOGIC_1280 is not set
# CONFIG_SCSI_SEAGATE is not set
# CONFIG_SCSI_DC390T is not set
# CONFIG_SCSI_T128 is not set
@@ -275,6 +279,7 @@ CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
CONFIG_MACE=y
CONFIG_BMAC=y
+CONFIG_GMAC=y
# CONFIG_NCR885E is not set
# CONFIG_OAKNET is not set
# CONFIG_NET_VENDOR_3COM is not set
@@ -472,6 +477,7 @@ CONFIG_USB=y
# USB Controllers
#
# CONFIG_USB_UHCI is not set
+# CONFIG_USB_UHCI_ALT is not set
CONFIG_USB_OHCI=y
#
@@ -491,8 +497,7 @@ CONFIG_USB_OHCI=y
# CONFIG_USB_IBMCAM is not set
# CONFIG_USB_OV511 is not set
# CONFIG_USB_DC2XX is not set
-CONFIG_USB_SCSI=m
-CONFIG_USB_SCSI_DEBUG=y
+# CONFIG_USB_STORAGE is not set
# CONFIG_USB_DABUSB is not set
#
@@ -513,11 +518,15 @@ CONFIG_USB_MOUSE=y
#
# CONFIG_QUOTA is not set
CONFIG_AUTOFS_FS=y
+# CONFIG_AUTOFS4_FS is not set
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
-# CONFIG_HFS_FS is not set
+CONFIG_HFS_FS=y
# CONFIG_BFS_FS is not set
-# CONFIG_FAT_FS is not set
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+# CONFIG_UMSDOS_FS is not set
+CONFIG_VFAT_FS=y
# CONFIG_EFS_FS is not set
# CONFIG_CRAMFS is not set
CONFIG_ISO9660_FS=y
@@ -549,10 +558,51 @@ CONFIG_LOCKD=y
#
# Partition Types
#
-# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
CONFIG_MAC_PARTITION=y
CONFIG_MSDOS_PARTITION=y
-# CONFIG_NLS is not set
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+CONFIG_NLS=y
+
+#
+# Native Language Support
+#
+# CONFIG_NLS_CODEPAGE_437 is not set
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_1 is not set
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
#
# Sound
diff --git a/arch/ppc/kernel/entry.S b/arch/ppc/kernel/entry.S
index 2d1238a6b..013812afc 100644
--- a/arch/ppc/kernel/entry.S
+++ b/arch/ppc/kernel/entry.S
@@ -176,7 +176,7 @@ ret_from_syscall_2:
7: .string "syscall %d(%x, %x, %x, %x, %x, "
77: .string "%x, %x), current=%p\n"
79: .string " -> %x\n"
- .align 2
+ .align 2,0
#endif
/*
@@ -209,6 +209,9 @@ _GLOBAL(_switch)
mflr r20 /* Return to switch caller */
mfmsr r22
li r0,MSR_FP /* Disable floating-point */
+#ifdef CONFIG_ALTIVEC
+ oris r0,r0,MSR_VEC@h
+#endif /* CONFIG_ALTIVEC */
andc r22,r22,r0
stw r20,_NIP(r1)
stw r22,_MSR(r1)
@@ -274,7 +277,7 @@ _GLOBAL(_switch)
SYNC
rfi
-#ifdef __SMP__
+#ifdef CONFIG_SMP
.globl ret_from_smpfork
ret_from_smpfork:
bl schedule_tail
@@ -310,22 +313,32 @@ ret_from_except:
lwz r5,_MSR(r1)
andi. r5,r5,MSR_EE
beq 2f
+ .globl lost_irq_ret
+lost_irq_ret:
3: lis r4,ppc_n_lost_interrupts@ha
lwz r4,ppc_n_lost_interrupts@l(r4)
cmpi 0,r4,0
beq+ 1f
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_IRQ
- .globl lost_irq_ret
-lost_irq_ret:
b 3b
-1: lis r4,bh_mask@ha
- lwz r4,bh_mask@l(r4)
- lis r5,bh_active@ha
- lwz r5,bh_active@l(r5)
- and. r4,r4,r5
+1: lis r4,softirq_state@ha
+ addi r4,r4,softirq_state@l
+#ifdef CONFIG_SMP
+ /* get processor # */
+ lwz r3,PROCESSOR(r2)
+#ifndef CONFIG_PPC64
+ slwi r3,r3,5
+#else
+#error not 64-bit ready
+#endif
+ add r4,r4,r3
+#endif /* CONFIG_SMP */
+ lwz r5,0(r4)
+ lwz r4,4(r4)
+ and. r5,r5,r4
beq+ 2f
- bl do_bottom_half
+ bl do_softirq
.globl do_bottom_half_ret
do_bottom_half_ret:
2: /* disable interrupts */
diff --git a/arch/ppc/kernel/head.S b/arch/ppc/kernel/head.S
index 8b56c635c..dd16b8c27 100644
--- a/arch/ppc/kernel/head.S
+++ b/arch/ppc/kernel/head.S
@@ -460,8 +460,24 @@ SystemCall:
STD_EXCEPTION(0xd00, SingleStep, SingleStepException)
STD_EXCEPTION(0xe00, Trap_0e, UnknownException)
-#ifdef CONFIG_ALTIVEC
- STD_EXCEPTION(0xf20, AltiVec, AltiVecUnavailable)
+#ifndef CONFIG_ALTIVEC
+ STD_EXCEPTION(0xf00, Trap_0f, UnknownException)
+#else
+/*
+ * The Altivec unavailable trap is at 0x0f20. Foo.
+ * We effectively remap it to 0x3000.
+ */
+ . = 0xf00
+ b Trap_0f
+trap_0f_cont:
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ li r20,MSR_KERNEL
+ bl transfer_to_handler
+ .long UnknownException
+ .long ret_from_except
+
+ . = 0xf20
+ b AltiVecUnavailable
#endif /* CONFIG_ALTIVEC */
/*
@@ -674,6 +690,21 @@ DataStoreTLBMiss:
. = 0x3000
+#ifdef CONFIG_ALTIVEC
+AltiVecUnavailable:
+ EXCEPTION_PROLOG
+ bne load_up_altivec /* if from user, just load it up */
+ li r20,MSR_KERNEL
+ bl transfer_to_handler /* if from kernel, take a trap */
+ .long KernelAltiVec
+ .long ret_from_except
+
+/* here are the bits of trap 0xf00 which got displaced */
+Trap_0f:
+ EXCEPTION_PROLOG
+ b trap_0f_cont
+#endif /* CONFIG_ALTIVEC */
+
/*
* This code finishes saving the registers to the exception frame
* and jumps to the appropriate handler for the exception, turning
@@ -813,72 +844,134 @@ KernelFP:
86: .string "floating point used in kernel (task=%p, pc=%x)\n"
.align 4
+#ifdef CONFIG_ALTIVEC
+/* Note that the AltiVec support is closely modeled after the FP
+ * support. Changes to one are likely to be applicable to the
+ * other! */
+load_up_altivec:
/*
- * Take away the altivec regs.
- *
- * For now, ignore the vrsave regs and save them all
- * -- Cort
+ * Disable AltiVec for the task which had AltiVec previously,
+ * and save its AltiVec registers in its thread_struct.
+ * Enables AltiVec for use in the kernel on return.
+ * On SMP we know the AltiVec units are free, since we give it up every
+ * switch. -- Kumar
*/
- .globl giveup_altivec
-giveup_altivec:
-#ifdef CONFIG_ALTIVEC
- /* check for altivec */
- mfspr r4,PVR
- srwi r4,r4,16
- cmpi 0,r4,12
- bnelr
-
- /* enable altivec so we can save */
- mfmsr r4
- oris r4,r4,MSR_VEC@h
- mtmsr r4
+ mfmsr r5
+ oris r5,r5,MSR_VEC@h
+ SYNC
+ mtmsr r5 /* enable use of AltiVec now */
+ SYNC
+/*
+ * For SMP, we don't do lazy AltiVec switching because it just gets too
+ * horrendously complex, especially when a task switches from one CPU
+ * to another. Instead we call giveup_altivec in switch_to.
+ */
+#ifndef __SMP__
+#ifndef CONFIG_APUS
+ lis r6,-KERNELBASE@h
+#else
+ lis r6,CYBERBASEp@h
+ lwz r6,0(r6)
+#endif
+ addis r3,r6,last_task_used_altivec@ha
+ lwz r4,last_task_used_altivec@l(r3)
+ cmpi 0,r4,0
+ beq 1f
+ add r4,r4,r6
+ addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */
+ SAVE_32VR(0,r20,r4)
+ MFVSCR(vr0)
+ li r20,THREAD_VSCR
+ STVX(vr0,r20,r4)
+ lwz r5,PT_REGS(r4)
+ add r5,r5,r6
+ lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+ lis r20,MSR_VEC@h
+ andc r4,r4,r20 /* disable altivec for previous task */
+ stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#endif /* __SMP__ */
+ /* enable use of AltiVec after return */
+ oris r23,r23,MSR_VEC@h
+ mfspr r5,SPRG3 /* current task's THREAD (phys) */
+ li r20,THREAD_VSCR
+ LVX(vr0,r20,r5)
+ MTVSCR(vr0)
+ REST_32VR(0,r20,r5)
+#ifndef __SMP__
+ subi r4,r5,THREAD
+ sub r4,r4,r6
+ stw r4,last_task_used_altivec@l(r3)
+#endif /* __SMP__ */
+ /* restore registers and return */
+ lwz r3,_CCR(r21)
+ lwz r4,_LINK(r21)
+ mtcrf 0xff,r3
+ mtlr r4
+ REST_GPR(1, r21)
+ REST_4GPRS(3, r21)
+ /* we haven't used ctr or xer */
+ mtspr SRR1,r23
+ mtspr SRR0,r22
+ REST_GPR(20, r21)
+ REST_2GPRS(22, r21)
+ lwz r21,GPR21(r21)
+ SYNC
+ rfi
- /* make sure our tsk pointer is valid */
- cmpi 0,r3,0
- beqlr
+/*
+ * AltiVec unavailable trap from kernel - print a message, but let
+ * the task use AltiVec in the kernel until it returns to user mode.
+ */
+KernelAltiVec:
+ lwz r3,_MSR(r1)
+ oris r3,r3,MSR_VEC@h
+ stw r3,_MSR(r1) /* enable use of AltiVec after return */
+ lis r3,87f@h
+ ori r3,r3,87f@l
+ mr r4,r2 /* current */
+ lwz r5,_NIP(r1)
+ bl printk
+ b ret_from_except
+87: .string "AltiVec used in kernel (task=%p, pc=%x) \n"
+ .align 4
- /* save altivec regs */
- addi r4,r3,THREAD+THREAD_VRSAVE
- mfspr r5,256 /* vrsave */
- stw r5,0(r4)
-
- /* get regs for the task */
- addi r4,r3,THREAD+PT_REGS
- /* turn off the altivec bit in the tasks regs */
- lwz r5,_MSR(r4)
- lis r6,MSR_VEC@h
- andi. r5,r5,r6
- stw r5,_MSR(r4)
-
- /* we've given up the altivec - clear the pointer */
- li r3,0
- lis r4,last_task_used_altivec@h
- stw r3,last_task_used_altivec@l(r4)
-#endif /* CONFIG_ALTIVEC */
- blr
-
- .globl load_up_altivec
-load_up_altivec:
-#ifdef CONFIG_ALTIVEC
- /* check for altivec */
- mfspr r4,PVR
- srwi r4,r4,16
- cmpi 0,r4,12
- bnelr
-
- /* restore altivec regs */
- addi r4,r3,THREAD+THREAD_VRSAVE
- lwz r5,0(r4)
- mtspr 256,r5 /* vrsave */
-
- /* get regs for the task */
- addi r4,r3,THREAD+PT_REGS
- /* turn on the altivec bit in the tasks regs */
- lwz r5,_MSR(r4)
+/*
+ * giveup_altivec(tsk)
+ * Disable AltiVec for the task given as the argument,
+ * and save the AltiVec registers in its thread_struct.
+ * Enables AltiVec for use in the kernel on return.
+ */
+
+ .globl giveup_altivec
+giveup_altivec:
+ mfmsr r5
oris r5,r5,MSR_VEC@h
- stw r5,_MSR(r4)
-#endif /* CONFIG_ALTIVEC */
+ SYNC
+ mtmsr r5 /* enable use of AltiVec now */
+ SYNC
+ cmpi 0,r3,0
+ beqlr- /* if no previous owner, done */
+ addi r3,r3,THREAD /* want THREAD of task */
+ lwz r5,PT_REGS(r3)
+ cmpi 0,r5,0
+ SAVE_32VR(0, r4, r3)
+ MFVSCR(vr0)
+ li r4,THREAD_VSCR
+ STVX(vr0, r4, r3)
+ beq 1f
+ lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+ lis r3,MSR_VEC@h
+ andc r4,r4,r3 /* disable AltiVec for previous task */
+ stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#ifndef __SMP__
+ li r5,0
+ lis r4,last_task_used_altivec@ha
+ stw r5,last_task_used_altivec@l(r4)
+#endif /* __SMP__ */
blr
+#endif /* CONFIG_ALTIVEC */
/*
* giveup_fpu(tsk)
@@ -1437,17 +1530,16 @@ mmu_off:
#if 0 /* That's useful debug stuff */
setup_screen_bat:
+ li r3,0
+ mtspr DBAT1U,r3
+ mtspr IBAT1U,r3
lis r3, 0x9100
-#ifdef __SMP__
- ori r3,r3,0x12
-#else
- ori r3,r3,0x2
-#endif
- mtspr DBAT1L, r3
- mtspr IBAT1L, r3
+ ori r4,r3,0x2a
+ mtspr DBAT1L,r4
+ mtspr IBAT1L,r4
ori r3,r3,(BL_8M<<2)|0x2 /* set up BAT registers for 604 */
- mtspr DBAT1U, r3
- mtspr IBAT1U, r3
+ mtspr DBAT1U,r3
+ mtspr IBAT1U,r3
blr
#endif
diff --git a/arch/ppc/kernel/irq.c b/arch/ppc/kernel/irq.c
index 8b5f590fb..fd77fbc36 100644
--- a/arch/ppc/kernel/irq.c
+++ b/arch/ppc/kernel/irq.c
@@ -74,8 +74,8 @@ volatile unsigned char *chrp_int_ack_special;
irq_desc_t irq_desc[NR_IRQS];
int ppc_spurious_interrupts = 0;
-unsigned int ppc_local_bh_count[NR_CPUS];
-unsigned int ppc_local_irq_count[NR_CPUS];
+unsigned int local_bh_count[NR_CPUS];
+unsigned int local_irq_count[NR_CPUS];
struct irqaction *ppc_irq_action[NR_IRQS];
unsigned int ppc_cached_irq_mask[NR_MASK_WORDS];
unsigned int ppc_lost_interrupts[NR_MASK_WORDS];
@@ -350,7 +350,6 @@ unsigned volatile int global_irq_lock;
atomic_t global_irq_count;
atomic_t global_bh_count;
-atomic_t global_bh_lock;
static void show(char * str)
{
@@ -361,12 +360,12 @@ static void show(char * str)
printk("\n%s, CPU %d:\n", str, cpu);
printk("irq: %d [%d %d]\n",
atomic_read(&global_irq_count),
- ppc_local_irq_count[0],
- ppc_local_irq_count[1]);
+ local_irq_count[0],
+ local_irq_count[1]);
printk("bh: %d [%d %d]\n",
atomic_read(&global_bh_count),
- ppc_local_bh_count[0],
- ppc_local_bh_count[1]);
+ local_bh_count[0],
+ local_bh_count[1]);
stack = (unsigned long *) &str;
for (i = 40; i ; i--) {
unsigned long x = *++stack;
@@ -401,7 +400,7 @@ static inline void wait_on_irq(int cpu)
* already executing in one..
*/
if (!atomic_read(&global_irq_count)) {
- if (ppc_local_bh_count[cpu]
+ if (local_bh_count[cpu]
|| !atomic_read(&global_bh_count))
break;
}
@@ -423,7 +422,7 @@ static inline void wait_on_irq(int cpu)
continue;
if (global_irq_lock)
continue;
- if (!ppc_local_bh_count[cpu]
+ if (!local_bh_count[cpu]
&& atomic_read(&global_bh_count))
continue;
if (!test_and_set_bit(0,&global_irq_lock))
@@ -514,7 +513,7 @@ void __global_cli(void)
if (flags & (1 << 15)) {
int cpu = smp_processor_id();
__cli();
- if (!ppc_local_irq_count[cpu])
+ if (!local_irq_count[cpu])
get_irqlock(cpu);
}
}
@@ -523,7 +522,7 @@ void __global_sti(void)
{
int cpu = smp_processor_id();
- if (!ppc_local_irq_count[cpu])
+ if (!local_irq_count[cpu])
release_irqlock(cpu);
__sti();
}
@@ -547,7 +546,7 @@ unsigned long __global_save_flags(void)
retval = 2 + local_enabled;
/* check for global flags if we're not in an interrupt */
- if (!ppc_local_irq_count[smp_processor_id()]) {
+ if (!local_irq_count[smp_processor_id()]) {
if (local_enabled)
retval = 1;
if (global_irq_holder == (unsigned char) smp_processor_id())
diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S
index fde7112c7..50f63eeb4 100644
--- a/arch/ppc/kernel/misc.S
+++ b/arch/ppc/kernel/misc.S
@@ -229,7 +229,7 @@ _GLOBAL(flush_dcache_range)
blr
/*
- * Flush a particular page from the DATA cache
+ * Flush a particular page from the data cache to RAM.
* Note: this is necessary because the instruction cache does *not*
* snoop from the data cache.
* This is a no-op on the 601 which has a unified cache.
@@ -241,18 +241,31 @@ _GLOBAL(__flush_page_to_ram)
rlwinm r5,r5,16,16,31
cmpi 0,r5,1
beqlr /* for 601, do nothing */
- li r4,0x0FFF
- andc r3,r3,r4 /* Get page base address */
li r4,4096/CACHE_LINE_SIZE /* Number of lines in a page */
mtctr r4
- mr r6,r3
0: dcbst 0,r3 /* Write line to ram */
addi r3,r3,CACHE_LINE_SIZE
bdnz 0b
sync
+ blr
+
+/*
+ * Flush a particular page from the instruction cache.
+ * Note: this is necessary because the instruction cache does *not*
+ * snoop from the data cache.
+ * This is a no-op on the 601 which has a unified cache.
+ *
+ * void __flush_icache_page(void *page)
+ */
+_GLOBAL(__flush_icache_page)
+ mfspr r5,PVR
+ rlwinm r5,r5,16,16,31
+ cmpi 0,r5,1
+ beqlr /* for 601, do nothing */
+ li r4,4096/CACHE_LINE_SIZE /* Number of lines in a page */
mtctr r4
-1: icbi 0,r6
- addi r6,r6,CACHE_LINE_SIZE
+1: icbi 0,r3
+ addi r3,r3,CACHE_LINE_SIZE
bdnz 1b
sync
isync
@@ -272,6 +285,38 @@ _GLOBAL(clear_page)
blr
/*
+ * Copy a whole page. We use the dcbz instruction on the destination
+ * to reduce memory traffic (it eliminates the unnecessary reads of
+ * the destination into cache). This requires that the destination
+ * is cacheable.
+ */
+_GLOBAL(copy_page)
+ li r0,4096/CACHE_LINE_SIZE
+ mtctr r0
+ addi r3,r3,-4
+ addi r4,r4,-4
+ li r5,4
+1: dcbz r5,r3
+ lwz r6,4(r4)
+ lwz r7,8(r4)
+ lwz r8,12(r4)
+ lwzu r9,16(r4)
+ stw r6,4(r3)
+ stw r7,8(r3)
+ stw r8,12(r3)
+ stwu r9,16(r3)
+ lwz r6,4(r4)
+ lwz r7,8(r4)
+ lwz r8,12(r4)
+ lwzu r9,16(r4)
+ stw r6,4(r3)
+ stw r7,8(r3)
+ stw r8,12(r3)
+ stwu r9,16(r3)
+ bdnz 1b
+ blr
+
+/*
* Atomic [test&set] exchange
*
* unsigned long xchg_u32(void *ptr, unsigned long val)
diff --git a/arch/ppc/kernel/mk_defs.c b/arch/ppc/kernel/mk_defs.c
index 34682bd2a..4f3c6834d 100644
--- a/arch/ppc/kernel/mk_defs.c
+++ b/arch/ppc/kernel/mk_defs.c
@@ -9,6 +9,7 @@
*/
#include <stddef.h>
+#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -48,9 +49,11 @@ main(void)
DEFINE(NEED_RESCHED, offsetof(struct task_struct, need_resched));
DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
- DEFINE(THREAD_VRF, offsetof(struct thread_struct, vrf));
- DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr));
+#ifdef CONFIG_ALTIVEC
+ DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0]));
DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave));
+ DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr));
+#endif /* CONFIG_ALTIVEC */
/* Interrupt register frame */
DEFINE(TASK_UNION_SIZE, sizeof(union task_union));
DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
diff --git a/arch/ppc/kernel/open_pic.c b/arch/ppc/kernel/open_pic.c
index d4dbe05e5..301a82ba8 100644
--- a/arch/ppc/kernel/open_pic.c
+++ b/arch/ppc/kernel/open_pic.c
@@ -279,7 +279,7 @@ void __init openpic_init(int main_pic)
/* Initialize the spurious interrupt */
if ( ppc_md.progress ) ppc_md.progress("openpic spurious",0x3bd);
openpic_set_spurious(OPENPIC_VEC_SPURIOUS);
- if ( !(_machine && (_MACH_gemini|_MACH_Pmac)) )
+ if ( !(_machine & (_MACH_gemini|_MACH_Pmac)) )
{
if (request_irq(IRQ_8259_CASCADE, no_action, SA_INTERRUPT,
"82c59 cascade", NULL))
@@ -490,7 +490,7 @@ void openpic_enable_irq(u_int irq)
/* make sure mask gets to controller before we return to user */
do {
mb(); /* sync is probably useless here */
- } while(openpic_readfield(&OpenPIC->Source[irq].Vector_Priority,
+ } while(openpic_readfield(&ISU[irq - open_pic_irq_offset].Vector_Priority,
OPENPIC_MASK));
}
@@ -501,7 +501,7 @@ void openpic_disable_irq(u_int irq)
/* make sure mask gets to controller before we return to user */
do {
mb(); /* sync is probably useless here */
- } while(!openpic_readfield(&OpenPIC->Source[irq].Vector_Priority,
+ } while(!openpic_readfield(&ISU[irq - open_pic_irq_offset].Vector_Priority,
OPENPIC_MASK));
}
diff --git a/arch/ppc/kernel/pci-dma.c b/arch/ppc/kernel/pci-dma.c
index 089566908..174de223f 100644
--- a/arch/ppc/kernel/pci-dma.c
+++ b/arch/ppc/kernel/pci-dma.c
@@ -14,20 +14,6 @@
#include <linux/pci.h>
#include <asm/io.h>
-/* Pure 2^n version of get_order */
-extern __inline__ int __get_order(unsigned long size)
-{
- int order;
-
- size = (size-1) >> (PAGE_SHIFT-1);
- order = -1;
- do {
- size >>= 1;
- order++;
- } while (size);
- return order;
-}
-
void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
dma_addr_t *dma_handle)
{
@@ -36,7 +22,7 @@ void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
if (hwdev == NULL || hwdev->dma_mask != 0xffffffff)
gfp |= GFP_DMA;
- ret = (void *)__get_free_pages(gfp, __get_order(size));
+ ret = (void *)__get_free_pages(gfp, get_order(size));
if (ret != NULL) {
memset(ret, 0, size);
@@ -48,5 +34,5 @@ void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
void pci_free_consistent(struct pci_dev *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
- free_pages((unsigned long)vaddr, __get_order(size));
+ free_pages((unsigned long)vaddr, get_order(size));
}
diff --git a/arch/ppc/kernel/pmac_pic.c b/arch/ppc/kernel/pmac_pic.c
index d2d5e6b25..d13875c9f 100644
--- a/arch/ppc/kernel/pmac_pic.c
+++ b/arch/ppc/kernel/pmac_pic.c
@@ -31,8 +31,6 @@ static int max_irqs;
static int max_real_irqs;
static int has_openpic = 0;
-#define MAXCOUNT 10000000
-
#define GATWICK_IRQ_POOL_SIZE 10
static struct interrupt_info gatwick_int_pool[GATWICK_IRQ_POOL_SIZE];
@@ -187,25 +185,6 @@ pmac_get_irq(struct pt_regs *regs)
smp_message_recv();
return -2; /* ignore, already handled */
}
-
- {
- unsigned int loops = MAXCOUNT;
- while (test_bit(0, &global_irq_lock)) {
- if (smp_processor_id() == global_irq_holder) {
- printk("uh oh, interrupt while we hold global irq lock!\n");
-#ifdef CONFIG_XMON
- xmon(0);
-#endif
- break;
- }
- if (loops-- == 0) {
- printk("do_IRQ waiting for irq lock (holder=%d)\n", global_irq_holder);
-#ifdef CONFIG_XMON
- xmon(0);
-#endif
- }
- }
- }
#endif /* __SMP__ */
/* Yeah, I know, this could be a separate do_IRQ function */
diff --git a/arch/ppc/kernel/pmac_setup.c b/arch/ppc/kernel/pmac_setup.c
index e1c1815ac..5fef07e89 100644
--- a/arch/ppc/kernel/pmac_setup.c
+++ b/arch/ppc/kernel/pmac_setup.c
@@ -60,6 +60,7 @@
#include <asm/machdep.h>
#include <asm/keyboard.h>
#include <asm/dma.h>
+#include <asm/bootx.h>
#include "time.h"
#include "local_irq.h"
@@ -440,6 +441,7 @@ kdev_t __init find_ide_boot(void)
{
char *p;
int n;
+ kdev_t __init pmac_find_ide_boot(char *bootdevice, int n);
if (bootdevice == NULL)
return 0;
@@ -695,9 +697,12 @@ pmac_init(unsigned long r3, unsigned long r4, unsigned long r5,
#ifdef CONFIG_BOOTX_TEXT
extern void drawchar(char c);
extern void drawstring(const char *c);
+extern boot_infos_t *disp_bi;
void
pmac_progress(char *s, unsigned short hex)
{
+ if (disp_bi == 0)
+ return;
drawstring(s);
drawchar('\n');
}
diff --git a/arch/ppc/kernel/pmac_time.c b/arch/ppc/kernel/pmac_time.c
index 1c935a625..3b7dd283f 100644
--- a/arch/ppc/kernel/pmac_time.c
+++ b/arch/ppc/kernel/pmac_time.c
@@ -71,8 +71,8 @@ unsigned long pmac_get_rtc_time(void)
if (req.reply_len != 7)
printk(KERN_ERR "pmac_get_rtc_time: got %d byte reply\n",
req.reply_len);
- return (unsigned long)(req.reply[1] << 24) + (req.reply[2] << 16)
- + (req.reply[3] << 8) + (unsigned long)req.reply[4] - RTC_OFFSET;
+ return (req.reply[3] << 24) + (req.reply[4] << 16)
+ + (req.reply[5] << 8) + req.reply[6] - RTC_OFFSET;
#endif /* CONFIG_ADB_CUDA */
#ifdef CONFIG_ADB_PMU
case SYS_CTRLER_PMU:
diff --git a/arch/ppc/kernel/ppc_asm.h b/arch/ppc/kernel/ppc_asm.h
index 2b999ab36..d9093c9e1 100644
--- a/arch/ppc/kernel/ppc_asm.h
+++ b/arch/ppc/kernel/ppc_asm.h
@@ -44,6 +44,28 @@
#define REST_16FPRS(n, base) REST_8FPRS(n, base); REST_8FPRS(n+8, base)
#define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base)
+/*
+ * Once a version of gas that understands the AltiVec instructions
+ * is freely available, we can do this the normal way... - paulus
+ */
+#define LVX(r,a,b) .long (31<<26)+((r)<<21)+((a)<<16)+((b)<<11)+(103<<1)
+#define STVX(r,a,b) .long (31<<26)+((r)<<21)+((a)<<16)+((b)<<11)+(231<<1)
+#define MFVSCR(r) .long (4<<26)+((r)<<21)+(1540<<1)
+#define MTVSCR(r) .long (4<<26)+((r)<<11)+(802<<1)
+
+#define SAVE_VR(n,b,base) li b,THREAD_VR0+(16*(n)); STVX(n,b,base)
+#define SAVE_2VR(n,b,base) SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
+#define SAVE_4VR(n,b,base) SAVE_2VR(n,b,base); SAVE_2VR(n+2,b,base)
+#define SAVE_8VR(n,b,base) SAVE_4VR(n,b,base); SAVE_4VR(n+4,b,base)
+#define SAVE_16VR(n,b,base) SAVE_8VR(n,b,base); SAVE_8VR(n+8,b,base)
+#define SAVE_32VR(n,b,base) SAVE_16VR(n,b,base); SAVE_16VR(n+16,b,base)
+#define REST_VR(n,b,base) li b,THREAD_VR0+(16*(n)); LVX(n,b,base)
+#define REST_2VR(n,b,base) REST_VR(n,b,base); REST_VR(n+1,b,base)
+#define REST_4VR(n,b,base) REST_2VR(n,b,base); REST_2VR(n+2,b,base)
+#define REST_8VR(n,b,base) REST_4VR(n,b,base); REST_4VR(n+4,b,base)
+#define REST_16VR(n,b,base) REST_8VR(n,b,base); REST_8VR(n+8,b,base)
+#define REST_32VR(n,b,base) REST_16VR(n,b,base); REST_16VR(n+16,b,base)
+
#define SYNC \
sync; \
isync
diff --git a/arch/ppc/kernel/ppc_asm.tmpl b/arch/ppc/kernel/ppc_asm.tmpl
index 94a5bd74c..c35192bb4 100644
--- a/arch/ppc/kernel/ppc_asm.tmpl
+++ b/arch/ppc/kernel/ppc_asm.tmpl
@@ -80,3 +80,36 @@
#define fr29 29
#define fr30 30
#define fr31 31
+
+#define vr0 0
+#define vr1 1
+#define vr2 2
+#define vr3 3
+#define vr4 4
+#define vr5 5
+#define vr6 6
+#define vr7 7
+#define vr8 8
+#define vr9 9
+#define vr10 10
+#define vr11 11
+#define vr12 12
+#define vr13 13
+#define vr14 14
+#define vr15 15
+#define vr16 16
+#define vr17 17
+#define vr18 18
+#define vr19 19
+#define vr20 20
+#define vr21 21
+#define vr22 22
+#define vr23 23
+#define vr24 24
+#define vr25 25
+#define vr26 26
+#define vr27 27
+#define vr28 28
+#define vr29 29
+#define vr30 30
+#define vr31 31
diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c
index 9a5444a51..757715512 100644
--- a/arch/ppc/kernel/ppc_ksyms.c
+++ b/arch/ppc/kernel/ppc_ksyms.c
@@ -11,6 +11,7 @@
#include <linux/spinlock.h>
#include <linux/console.h>
#include <linux/irq.h>
+#include <linux/pci.h>
#include <asm/page.h>
#include <asm/semaphore.h>
@@ -72,8 +73,8 @@ EXPORT_SYMBOL(do_lost_interrupts);
EXPORT_SYMBOL(enable_irq);
EXPORT_SYMBOL(disable_irq);
EXPORT_SYMBOL(disable_irq_nosync);
-EXPORT_SYMBOL(ppc_local_irq_count);
-EXPORT_SYMBOL(ppc_local_bh_count);
+EXPORT_SYMBOL(local_irq_count);
+EXPORT_SYMBOL(local_bh_count);
#ifdef __SMP__
EXPORT_SYMBOL(kernel_flag);
#endif /* __SMP__ */
@@ -171,6 +172,11 @@ EXPORT_SYMBOL(chrp_ide_regbase);
EXPORT_SYMBOL(chrp_ide_probe);
#endif
+#ifdef CONFIG_PCI
+EXPORT_SYMBOL(pci_alloc_consistent);
+EXPORT_SYMBOL(pci_free_consistent);
+#endif /* CONFIG_PCI */
+
EXPORT_SYMBOL(start_thread);
EXPORT_SYMBOL(kernel_thread);
@@ -272,4 +278,3 @@ EXPORT_SYMBOL(ppc_irq_dispatch_handler);
EXPORT_SYMBOL(decrementer_count);
EXPORT_SYMBOL(get_wchan);
EXPORT_SYMBOL(console_drivers);
-EXPORT_SYMBOL(do_bottom_half);
diff --git a/arch/ppc/kernel/process.c b/arch/ppc/kernel/process.c
index ed98ba6f0..41382b2d7 100644
--- a/arch/ppc/kernel/process.c
+++ b/arch/ppc/kernel/process.c
@@ -19,6 +19,7 @@
*
*/
+#include <linux/config.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -140,15 +141,31 @@ int check_stack(struct task_struct *tsk)
}
#endif /* defined(CHECK_STACK) */
+#ifdef CONFIG_ALTIVEC
int
-dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs)
+dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
{
- if (regs->msr & MSR_FP)
- giveup_fpu(current);
- memcpy(fpregs, &current->thread.fpr[0], sizeof(*fpregs));
+ if (regs->msr & MSR_VEC)
+ giveup_altivec(current);
+ memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs));
return 1;
}
+void
+enable_kernel_altivec(void)
+{
+#ifdef __SMP__
+ if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
+ giveup_altivec(current);
+ else
+ giveup_altivec(NULL): /* just enable AltiVec for kernel - force */
+#else
+ giveup_altivec(last_task_used_altivec);
+#endif /* __SMP __ */
+ printk("MSR_VEC in enable_altivec_kernel\n");
+}
+#endif /* CONFIG_ALTIVEC */
+
void
enable_kernel_fp(void)
{
@@ -162,6 +179,15 @@ enable_kernel_fp(void)
#endif /* __SMP__ */
}
+int
+dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs)
+{
+ if (regs->msr & MSR_FP)
+ giveup_fpu(current);
+ memcpy(fpregs, &current->thread.fpr[0], sizeof(*fpregs));
+ return 1;
+}
+
void
_switch_to(struct task_struct *prev, struct task_struct *new,
struct task_struct **last)
@@ -194,6 +220,7 @@ _switch_to(struct task_struct *prev, struct task_struct *new,
*/
if ( prev->thread.regs && (prev->thread.regs->msr & MSR_FP) )
giveup_fpu(prev);
+#ifdef CONFIG_ALTIVEC
/*
* If the previous thread 1) has some altivec regs it wants saved
* (has bits in vrsave set) and 2) used altivec in the last quantum
@@ -206,6 +233,7 @@ _switch_to(struct task_struct *prev, struct task_struct *new,
if ( (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)) &&
prev->thread.vrsave )
giveup_altivec(prev);
+#endif /* CONFIG_ALTIVEC */
prev->last_processor = prev->processor;
current_set[smp_processor_id()] = new;
#endif /* __SMP__ */
@@ -337,13 +365,18 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
p->thread.fpscr = current->thread.fpscr;
childregs->msr &= ~MSR_FP;
+#ifdef CONFIG_ALTIVEC
+ /*
+ * copy altiVec info - assume lazy altiVec switch
+ * - kumar
+ */
if (regs->msr & MSR_VEC)
giveup_altivec(current);
- if ( p->thread.vrsave )
- memcpy(&p->thread.vrf, &current->thread.vrf, sizeof(p->thread.vrf));
+
+ memcpy(&p->thread.vr, &current->thread.vr, sizeof(p->thread.vr));
p->thread.vscr = current->thread.vscr;
- p->thread.vrsave = current->thread.vrsave;
childregs->msr &= ~MSR_VEC;
+#endif /* CONFIG_ALTIVEC */
#ifdef __SMP__
p->last_processor = NO_PROC_ID;
@@ -463,6 +496,10 @@ asmlinkage int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
goto out;
if (regs->msr & MSR_FP)
giveup_fpu(current);
+#ifdef CONFIG_ALTIVEC
+ if (regs->msr & MSR_VEC)
+ giveup_altivec(current);
+#endif /* CONFIG_ALTIVEC */
error = do_execve(filename, (char **) a1, (char **) a2, regs);
putname(filename);
out:
diff --git a/arch/ppc/kernel/prom.c b/arch/ppc/kernel/prom.c
index a52bdd804..b86e2a153 100644
--- a/arch/ppc/kernel/prom.c
+++ b/arch/ppc/kernel/prom.c
@@ -802,42 +802,19 @@ setup_disp_fake_bi(ihandle dp)
{
unsigned int len;
int width = 640, height = 480, depth = 8, pitch;
- unsigned address;
+ unsigned address;
boot_infos_t* bi;
unsigned long offset = reloc_offset();
- prom_print(RELOC("Initing fake screen\n"));
+ prom_print(RELOC("Initializing fake screen\n"));
- len = 0;
+ call_prom(RELOC("getprop"), 4, 1, dp, RELOC("width"), &width, sizeof(width));
+ call_prom(RELOC("getprop"), 4, 1, dp, RELOC("height"), &height, sizeof(height));
call_prom(RELOC("getprop"), 4, 1, dp, RELOC("depth"), &len, sizeof(len));
- if (len == 0)
- prom_print(RELOC("Warning: assuming display depth = 8\n"));
- else
- depth = len;
- width = len = 0;
- call_prom(RELOC("getprop"), 4, 1, dp, RELOC("width"), &len, sizeof(len));
- width = len;
- if (width == 0) {
- prom_print(RELOC("Failed to get width\n"));
- return;
- }
- height = len = 0;
- call_prom(RELOC("getprop"), 4, 1, dp, RELOC("height"), &len, sizeof(len));
- height = len;
- if (height == 0) {
- prom_print(RELOC("Failed to get height\n"));
- return;
- }
- pitch = len = 0;
+ pitch = width * ((depth + 7) / 8);
call_prom(RELOC("getprop"), 4, 1, dp, RELOC("linebytes"), &len, sizeof(len));
- pitch = len;
- if (pitch == 0) {
- prom_print(RELOC("Failed to get pitch\n"));
- return;
- }
- address = len = 0;
+ address = 0;
call_prom(RELOC("getprop"), 4, 1, dp, RELOC("address"), &len, sizeof(len));
- address = len;
if (address == 0) {
prom_print(RELOC("Failed to get address\n"));
return;
@@ -846,22 +823,22 @@ setup_disp_fake_bi(ihandle dp)
/* kludge for valkyrie */
if (strcmp(dp->name, "valkyrie") == 0)
address += 0x1000;
- }
#endif
- RELOC(disp_bi) = &fake_bi;
- bi = PTRRELOC((&fake_bi));
- RELOC(g_loc_X) = 0;
- RELOC(g_loc_Y) = 0;
- RELOC(g_max_loc_X) = width / 8;
- RELOC(g_max_loc_Y) = height / 16;
- bi->logicalDisplayBase = (unsigned char *)address;
- bi->dispDeviceBase = (unsigned char *)address;
- bi->dispDeviceRowBytes = pitch;
- bi->dispDeviceDepth = depth;
- bi->dispDeviceRect[0] = bi->dispDeviceRect[1] = 0;
- bi->dispDeviceRect[2] = width;
- bi->dispDeviceRect[3] = height;
+ RELOC(disp_bi) = &fake_bi;
+ bi = PTRRELOC((&fake_bi));
+ RELOC(g_loc_X) = 0;
+ RELOC(g_loc_Y) = 0;
+ RELOC(g_max_loc_X) = width / 8;
+ RELOC(g_max_loc_Y) = height / 16;
+ bi->logicalDisplayBase = (unsigned char *)address;
+ bi->dispDeviceBase = (unsigned char *)address;
+ bi->dispDeviceRowBytes = pitch;
+ bi->dispDeviceDepth = depth;
+ bi->dispDeviceRect[0] = bi->dispDeviceRect[1] = 0;
+ bi->dispDeviceRect[2] = width;
+ bi->dispDeviceRect[3] = height;
+ RELOC(disp_bi) = 0;
}
#endif
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c
index 19ce0a25e..7502ad08e 100644
--- a/arch/ppc/kernel/setup.c
+++ b/arch/ppc/kernel/setup.c
@@ -221,7 +221,7 @@ int get_cpuinfo(char *buffer)
if ( i )
len += sprintf(len+buffer,"\n");
len += sprintf(len+buffer,"processor\t: %lu\n",i);
- len += sprintf(len+buffer,"cpu\t\t: ");
+ len += sprintf(len+buffer,"cpu\t\t: ");
pvr = GET_PVR;
@@ -656,7 +656,6 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_BOOTX_TEXT
map_bootx_text();
- prom_print("identify machine\n");
#endif
#ifdef CONFIG_XMON
diff --git a/arch/ppc/kernel/smp.c b/arch/ppc/kernel/smp.c
index 386764ddd..83dff9246 100644
--- a/arch/ppc/kernel/smp.c
+++ b/arch/ppc/kernel/smp.c
@@ -53,7 +53,6 @@ unsigned int prof_counter[NR_CPUS];
cycles_t cacheflush_time;
/* all cpu mappings are 1-1 -- Cort */
-int cpu_number_map[NR_CPUS] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,};
volatile unsigned long cpu_callin_map[NR_CPUS] = {0,};
int start_secondary(void *);
diff --git a/arch/ppc/kernel/syscalls.c b/arch/ppc/kernel/syscalls.c
index e1a3fdcbb..e31b34cc9 100644
--- a/arch/ppc/kernel/syscalls.c
+++ b/arch/ppc/kernel/syscalls.c
@@ -252,14 +252,13 @@ asmlinkage int sys_pause(void)
asmlinkage int sys_uname(struct old_utsname * name)
{
- int err;
-
- if (!name)
- return -EFAULT;
+ int err = -EFAULT;
+
down_read(&uts_sem);
- err = copy_to_user(name, &system_utsname, sizeof (*name));
- up(&uts_sem);
- return err ? -EFAULT : 0;
+ if (name && !copy_to_user(name, &system_utsname, sizeof (*name)))
+ err = 0;
+ up_read(&uts_sem);
+ return err;
}
asmlinkage int sys_olduname(struct oldold_utsname * name)
@@ -282,8 +281,8 @@ asmlinkage int sys_olduname(struct oldold_utsname * name)
error -= __put_user(0,name->version+__OLD_UTS_LEN);
error -= __copy_to_user(&name->machine,&system_utsname.machine,__OLD_UTS_LEN);
error = __put_user(0,name->machine+__OLD_UTS_LEN);
- error = error ? -EFAULT : 0;
- up(&uts_sem);
+ up_read(&uts_sem);
+ error = error ? -EFAULT : 0;
return error;
}
diff --git a/arch/ppc/kernel/traps.c b/arch/ppc/kernel/traps.c
index 5cc34c5a5..ac7f47602 100644
--- a/arch/ppc/kernel/traps.c
+++ b/arch/ppc/kernel/traps.c
@@ -118,11 +118,11 @@ MachineCheckException(struct pt_regs *regs)
default:
printk("Unknown values in msr\n");
}
+ show_regs(regs);
+ print_backtrace((unsigned long *)regs->gpr[1]);
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
debugger(regs);
#endif
- show_regs(regs);
- print_backtrace((unsigned long *)regs->gpr[1]);
panic("machine check");
}
_exception(SIGSEGV, regs);
@@ -142,44 +142,6 @@ SMIException(struct pt_regs *regs)
panic("System Management Interrupt");
}
-#if defined(CONFIG_ALTIVEC)
-void
-AltiVecUnavailable(struct pt_regs *regs)
-{
- /*
- * This should be changed so we don't take a trap if coming
- * back when last_task_used_altivec == current. We should also
- * allow the kernel to use the altivec regs on UP to store tasks
- * regs during switch
- * -- Cort
- */
- if ( regs->msr & MSR_VEC )
- {
- show_regs(regs);
- panic("AltiVec trap with Altivec enabled!\n");
- }
-
- if ( !user_mode(regs) )
- {
- show_regs(regs);
- panic("Kernel Used Altivec with MSR_VEC off!\n");
- }
-
- if ( last_task_used_altivec != current )
- {
- if ( last_task_used_altivec )
- giveup_altivec(current);
- load_up_altivec(current);
- /* on SMP we always save/restore on switch */
-#ifndef __SMP__
- last_task_used_altivec = current;
-#endif
- }
- /* enable altivec for the task on return */
- regs->msr |= MSR_VEC;
-}
-#endif /* CONFIG_ALTIVEC */
-
void
UnknownException(struct pt_regs *regs)
{
diff --git a/arch/ppc/lib/string.S b/arch/ppc/lib/string.S
index 4ab90f8b7..1c4f1f78e 100644
--- a/arch/ppc/lib/string.S
+++ b/arch/ppc/lib/string.S
@@ -12,6 +12,11 @@
#include <asm/processor.h>
#include <asm/errno.h>
+CACHELINE_BYTES = 32
+LG_CACHELINE_BYTES = 5
+CACHELINE_MASK = 0x1f
+CACHELINE_WORDS = 8
+
.globl strcpy
strcpy:
addi r5,r3,-1
@@ -70,6 +75,55 @@ strlen:
subf r3,r3,r4
blr
+/*
+ * Use dcbz on the complete cache lines in the destination
+ * to set them to zero. This requires that the destination
+ * area is cacheable. -- paulus
+ */
+ .globl cacheable_memzero
+cacheable_memzero:
+ mr r5,r4
+ li r4,0
+ addi r6,r3,-4
+ cmplwi 0,r5,4
+ blt 7f
+ stwu r4,4(r6)
+ beqlr
+ andi. r0,r6,3
+ add r5,r0,r5
+ subf r6,r0,r6
+ clrlwi r7,r6,32-LG_CACHELINE_BYTES
+ add r8,r7,r5
+ srwi r9,r8,LG_CACHELINE_BYTES
+ addic. r9,r9,-1 /* total number of complete cachelines */
+ ble 2f
+ xori r0,r7,CACHELINE_MASK & ~3
+ srwi. r0,r0,2
+ beq 3f
+ mtctr r0
+4: stwu r4,4(r6)
+ bdnz 4b
+3: mtctr r9
+ li r7,4
+10: dcbz r7,r6
+ addi r6,r6,CACHELINE_BYTES
+ bdnz 10b
+ clrlwi r5,r8,32-LG_CACHELINE_BYTES
+ addi r5,r5,4
+2: srwi r0,r5,2
+ mtctr r0
+ bdz 6f
+1: stwu r4,4(r6)
+ bdnz 1b
+6: andi. r5,r5,3
+7: cmpwi 0,r5,0
+ beqlr
+ mtctr r5
+ addi r6,r6,3
+8: stbu r4,1(r6)
+ bdnz 8b
+ blr
+
.globl memset
memset:
rlwimi r4,r4,8,16,23
@@ -82,7 +136,7 @@ memset:
andi. r0,r6,3
add r5,r0,r5
subf r6,r0,r6
- rlwinm r0,r5,32-2,2,31
+ srwi r0,r5,2
mtctr r0
bdz 6f
1: stwu r4,4(r6)
@@ -103,6 +157,87 @@ bcopy:
mr r4,r6
b memcpy
+/*
+ * This version uses dcbz on the complete cache lines in the
+ * destination area to reduce memory traffic. This requires that
+ * the destination area is cacheable.
+ * We only use this version if the source and dest don't overlap.
+ * -- paulus.
+ */
+ .global cacheable_memcpy
+cacheable_memcpy:
+ add r7,r3,r5 /* test if the src & dst overlap */
+ add r8,r4,r5
+ cmplw 0,r4,r7
+ cmplw 1,r3,r8
+ crand 0,0,4 /* cr0.lt &= cr1.lt */
+ blt memcpy /* if regions overlap */
+
+ addi r4,r4,-4
+ addi r6,r3,-4
+ neg r0,r3
+ andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */
+ beq 58f
+
+ cmplw 0,r5,r0 /* is this more than total to do? */
+ blt 63f /* if not much to do */
+ andi. r8,r0,3 /* get it word-aligned first */
+ subf r5,r0,r5
+ mtctr r8
+ beq+ 61f
+70: lbz r9,4(r4) /* do some bytes */
+ stb r9,4(r6)
+ addi r4,r4,1
+ addi r6,r6,1
+ bdnz 70b
+61: srwi. r0,r0,2
+ mtctr r0
+ beq 58f
+72: lwzu r9,4(r4) /* do some words */
+ stwu r9,4(r6)
+ bdnz 72b
+
+58: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
+ clrlwi r5,r5,32-LG_CACHELINE_BYTES
+ li r11,4
+ mtctr r0
+ beq 63f
+53: dcbz r11,r6
+ lwz r7,4(r4)
+ lwz r8,8(r4)
+ lwz r9,12(r4)
+ lwzu r10,16(r4)
+ stw r7,4(r6)
+ stw r8,8(r6)
+ stw r9,12(r6)
+ stwu r10,16(r6)
+ lwz r7,4(r4)
+ lwz r8,8(r4)
+ lwz r9,12(r4)
+ lwzu r10,16(r4)
+ stw r7,4(r6)
+ stw r8,8(r6)
+ stw r9,12(r6)
+ stwu r10,16(r6)
+ bdnz 53b
+
+63: srwi. r0,r5,2
+ mtctr r0
+ beq 64f
+30: lwzu r0,4(r4)
+ stwu r0,4(r6)
+ bdnz 30b
+
+64: andi. r0,r5,3
+ mtctr r0
+ beq+ 65f
+40: lbz r0,4(r4)
+ stb r0,4(r6)
+ addi r4,r4,1
+ addi r6,r6,1
+ bdnz 40b
+65: blr
+
.globl memmove
memmove:
cmplw 0,r3,r4
@@ -111,7 +246,7 @@ memmove:
.globl memcpy
memcpy:
- rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */
+ srwi. r7,r5,3
addi r6,r3,-4
addi r4,r4,-4
beq 2f /* if less than 8 bytes to do */
@@ -218,106 +353,167 @@ memchr:
.globl __copy_tofrom_user
__copy_tofrom_user:
- srwi. r7,r5,3
- addi r6,r3,-4
addi r4,r4,-4
- li r3,0 /* success return value */
- beq 2f /* if less than 8 bytes to do */
- andi. r0,r6,3 /* get dest word aligned */
- mtctr r7
- bne 5f
-1: lwz r7,4(r4)
-11: lwzu r8,8(r4)
-12: stw r7,4(r6)
-13: stwu r8,8(r6)
- bdnz 1b
- andi. r5,r5,7
-2: cmplwi 0,r5,4
- blt 3f
-14: lwzu r0,4(r4)
- addi r5,r5,-4
-15: stwu r0,4(r6)
-3: cmpwi 0,r5,0 /* do 1 byte at a time for the remainder */
- beqlr
- mtctr r5
- addi r4,r4,3
- addi r6,r6,3
-4: lbzu r0,1(r4)
-16: stbu r0,1(r6)
- bdnz 4b
- blr
-5: subfic r0,r0,4 /* copy bytes until we have the */
- mtctr r0 /* destination 4-byte aligned */
- subf r5,r0,r5
-6: lbz r7,4(r4)
+ addi r6,r3,-4
+ neg r0,r3
+ andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */
+ beq 58f
+
+ cmplw 0,r5,r0 /* is this more than total to do? */
+ blt 63f /* if not much to do */
+ andi. r8,r0,3 /* get it word-aligned first */
+ mtctr r8
+ beq+ 61f
+70: lbz r9,4(r4) /* do some bytes */
+71: stb r9,4(r6)
addi r4,r4,1
-17: stb r7,4(r6)
addi r6,r6,1
- bdnz 6b
- srwi. r7,r5,3
- beq 2b
- mtctr r7
- b 1b
-/* we come here on a fault in the 8-byte-at-a-time loop */
-88: subi r4,r4,8 /* compensate for the lwzu */
-98: mfctr r0
- rlwimi r5,r0,3,0,28 /* use the byte-at-a-time loop to */
- b 3b /* copy up to the byte at fault */
-/* here on a write fault in the single-word copy */
-96: subi r4,r4,4
- b 3b
-/* here on a read fault in the initial single-byte copy */
-90: mfctr r3
- add r3,r3,r5
- b 70f
-/* here on a read fault in the final single-byte copy */
-99: mfctr r3
- subi r6,r6,3
-/* clear out the rest of the destination: r3 bytes starting at 4(r6) */
-70: li r0,0
- mr. r5,r3
- beq 76f
-71: andi. r4,r6,3
- beq 72f
-77: stb r0,4(r6)
+ bdnz 70b
+61: subf r5,r0,r5
+ srwi. r0,r0,2
+ mtctr r0
+ beq 58f
+72: lwzu r9,4(r4) /* do some words */
+73: stwu r9,4(r6)
+ bdnz 72b
+
+58: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
+ clrlwi r5,r5,32-LG_CACHELINE_BYTES
+ li r11,4
+ mtctr r0
+ beq 63f
+53: dcbz r11,r6
+10: lwz r7,4(r4)
+11: lwz r8,8(r4)
+12: lwz r9,12(r4)
+13: lwzu r10,16(r4)
+14: stw r7,4(r6)
+15: stw r8,8(r6)
+16: stw r9,12(r6)
+17: stwu r10,16(r6)
+20: lwz r7,4(r4)
+21: lwz r8,8(r4)
+22: lwz r9,12(r4)
+23: lwzu r10,16(r4)
+24: stw r7,4(r6)
+25: stw r8,8(r6)
+26: stw r9,12(r6)
+27: stwu r10,16(r6)
+ bdnz 53b
+
+63: srwi. r0,r5,2
+ mtctr r0
+ beq 64f
+30: lwzu r0,4(r4)
+31: stwu r0,4(r6)
+ bdnz 30b
+
+64: andi. r0,r5,3
+ mtctr r0
+ beq+ 65f
+40: lbz r0,4(r4)
+41: stb r0,4(r6)
+ addi r4,r4,1
addi r6,r6,1
- addic. r5,r5,-1
- bne 71b
-72: srwi. r7,r5,2
- beq 73f
- mtctr r7
-74: stwu r0,4(r6)
- bdnz 74b
-73: andi. r5,r5,3
- beq 76f
- mtctr r5
- addi r6,r6,3
-75: stbu r0,1(r6)
- bdnz 75b
-76: blr
-/* here on a write fault in the initial single-byte copy */
-80: mfctr r3
- add r3,r3,r5
- blr
-/* here on a write fault in the final single-byte copy */
-81: mfctr r3
+ bdnz 40b
+65: li r3,0
blr
+/* read fault, initial single-byte copy */
+100: li r4,0
+ b 90f
+/* write fault, initial single-byte copy */
+101: li r4,1
+90: subf r5,r8,r5
+ li r3,0
+ b 99f
+/* read fault, initial word copy */
+102: li r4,0
+ b 91f
+/* write fault, initial word copy */
+103: li r4,1
+91: li r3,2
+ b 99f
+/* read fault in 2nd half of cacheline loop */
+106: addi r5,r5,-16
+/* read fault in 1st half of cacheline loop */
+104: li r4,0
+ b 92f
+/* write fault in 2nd half of cacheline loop */
+107: addi r5,r5,-16
+/* fault on dcbz (effectively a write fault) */
+/* or write fault in 1st half of cacheline loop */
+105: li r4,1
+92: li r3,LG_CACHELINE_BYTES
+ b 99f
+/* read fault in final word loop */
+108: li r4,0
+ b 93f
+/* write fault in final word loop */
+109: li r4,1
+93: andi. r5,r5,3
+ li r3,2
+ b 99f
+/* read fault in final byte loop */
+110: li r4,0
+ b 94f
+/* write fault in final byte loop */
+111: li r4,1
+94: li r5,0
+ li r3,0
+/*
+ * At this stage the number of bytes not copied is
+ * r5 + (ctr << r3), and r4 is 0 for read or 1 for write.
+ */
+99: mfctr r0
+ slw r3,r0,r3
+ add r3,r3,r5
+ cmpwi 0,r4,0
+ bne 120f
+/* for read fault, clear out the destination: r3 bytes starting at 4(r6) */
+ srwi. r0,r3,2
+ li r9,0
+ mtctr r0
+ beq 113f
+112: stwu r9,4(r6)
+ bdnz 112b
+113: andi. r0,r3,3
+ mtctr r0
+ beq 120f
+114: stb r9,4(r6)
+ addi r6,r6,1
+ bdnz 114b
+120: blr
+
.section __ex_table,"a"
.align 2
- .long 1b,98b
- .long 11b,98b
- .long 12b,88b
- .long 13b,88b
- .long 14b,3b
- .long 15b,96b
- .long 4b,99b
- .long 16b,81b
- .long 6b,90b
- .long 17b,80b
- .long 77b,76b
- .long 74b,76b
- .long 75b,76b
+ .long 70b,100b
+ .long 71b,101b
+ .long 72b,102b
+ .long 73b,103b
+ .long 53b,105b
+ .long 10b,104b
+ .long 11b,104b
+ .long 12b,104b
+ .long 13b,104b
+ .long 14b,105b
+ .long 15b,105b
+ .long 16b,105b
+ .long 17b,105b
+ .long 20b,106b
+ .long 21b,106b
+ .long 22b,106b
+ .long 23b,106b
+ .long 24b,107b
+ .long 25b,107b
+ .long 26b,107b
+ .long 27b,107b
+ .long 30b,108b
+ .long 31b,109b
+ .long 40b,110b
+ .long 41b,111b
+ .long 112b,120b
+ .long 114b,120b
.text
.globl __clear_user
@@ -334,7 +530,6 @@ __clear_user:
andi. r0,r6,3
add r4,r0,r4
subf r6,r0,r6
- /*rlwinm r0,r4,32-2,2,31*/
srwi r0,r4,2
mtctr r0
bdz 6f
diff --git a/arch/ppc/mm/init.c b/arch/ppc/mm/init.c
index 216527e34..25d728fdd 100644
--- a/arch/ppc/mm/init.c
+++ b/arch/ppc/mm/init.c
@@ -1107,7 +1107,7 @@ unsigned long __init find_available_memory(void)
*/
void __init paging_init(void)
{
- unsigned int zones_size[MAX_NR_ZONES], i;
+ unsigned long zones_size[MAX_NR_ZONES], i;
/*
* Grab some memory for bad_page and bad_pagetable to use.
@@ -1197,7 +1197,7 @@ unsigned long __init *pmac_find_end_of_memory(void)
unsigned long a, total;
/* max amount of RAM we allow -- Cort */
-#define RAM_LIMIT (64<<20)
+#define RAM_LIMIT (768<<20)
memory_node = find_devices("memory");
if (memory_node == NULL) {
diff --git a/arch/ppc/xmon/start.c b/arch/ppc/xmon/start.c
index f25060c94..8e924699f 100644
--- a/arch/ppc/xmon/start.c
+++ b/arch/ppc/xmon/start.c
@@ -115,42 +115,41 @@ extern void pmu_poll(void);
int
xmon_write(void *handle, void *ptr, int nb)
{
- char *p = ptr;
- int i, c, ct;
+ char *p = ptr;
+ int i, c, ct;
#ifdef CONFIG_BOOTX_TEXT
- if (use_screen) {
- /* write it on the screen */
- for (i = 0; i < nb; ++i)
- drawchar(*p++);
- return nb;
- }
+ if (use_screen) {
+ /* write it on the screen */
+ for (i = 0; i < nb; ++i)
+ drawchar(*p++);
+ return nb;
+ }
#endif
- if (!scc_initialized)
- xmon_init_scc();
- for (i = 0; i < nb; ++i) {
- ct = 0;
- while ((*sccc & TXRDY) == 0)
+ if (!scc_initialized)
+ xmon_init_scc();
+ ct = 0;
+ for (i = 0; i < nb; ++i) {
+ while ((*sccc & TXRDY) == 0) {
#ifdef CONFIG_ADB
- if (sys_ctrler == SYS_CTRLER_PMU)
- pmu_poll();
-#else
- ;
+ if (sys_ctrler == SYS_CTRLER_PMU)
+ pmu_poll();
#endif /* CONFIG_ADB */
- c = p[i];
- if (c == '\n' && !ct) {
- c = '\r';
- ct = 1;
- --i;
- } else {
- if (console)
- printk("%c", c);
- ct = 0;
+ }
+ c = p[i];
+ if (c == '\n' && !ct) {
+ c = '\r';
+ ct = 1;
+ --i;
+ } else {
+ if (console)
+ printk("%c", c);
+ ct = 0;
+ }
+ buf_access();
+ *sccd = c;
}
- buf_access();
- *sccd = c;
- }
- return i;
+ return i;
}
int xmon_wants_key;
@@ -285,7 +284,7 @@ xmon_init_scc()
{
int i, x;
- if (macio_node != 0) {
+ if (via_modem && macio_node != 0) {
unsigned int t0;
feature_set(macio_node, FEATURE_Modem_power);