summaryrefslogtreecommitdiffstats
path: root/arch/ppc/kernel
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-06-19 22:45:37 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-06-19 22:45:37 +0000
commit6d403070f28cd44860fdb3a53be5da0275c65cf4 (patch)
tree0d0e7fe7b5fb7568d19e11d7d862b77a866ce081 /arch/ppc/kernel
parentecf1bf5f6c2e668d03b0a9fb026db7aa41e292e1 (diff)
Merge with 2.4.0-test1-ac21 + pile of MIPS cleanups to make merging
possible. Chainsawed RM200 kernel to compile again. Jazz machine status unknown.
Diffstat (limited to 'arch/ppc/kernel')
-rw-r--r--arch/ppc/kernel/Makefile9
-rw-r--r--arch/ppc/kernel/align.c8
-rw-r--r--arch/ppc/kernel/chrp_pci.c151
-rw-r--r--arch/ppc/kernel/chrp_setup.c41
-rw-r--r--arch/ppc/kernel/entry.S38
-rw-r--r--arch/ppc/kernel/hashtable.S322
-rw-r--r--arch/ppc/kernel/head.S351
-rw-r--r--arch/ppc/kernel/head_8xx.S45
-rw-r--r--arch/ppc/kernel/i8259.c5
-rw-r--r--arch/ppc/kernel/irq.c19
-rw-r--r--arch/ppc/kernel/m8260_setup.c6
-rw-r--r--arch/ppc/kernel/misc.S119
-rw-r--r--arch/ppc/kernel/mk_defs.c3
-rw-r--r--arch/ppc/kernel/open_pic.c6
-rw-r--r--arch/ppc/kernel/pmac_time.c7
-rw-r--r--arch/ppc/kernel/ppc_asm.h28
-rw-r--r--arch/ppc/kernel/ppc_htab.c29
-rw-r--r--arch/ppc/kernel/ppc_ksyms.c6
-rw-r--r--arch/ppc/kernel/process.c2
-rw-r--r--arch/ppc/kernel/prom.c635
-rw-r--r--arch/ppc/kernel/ptrace.c30
-rw-r--r--arch/ppc/kernel/setup.c49
-rw-r--r--arch/ppc/kernel/signal.c2
-rw-r--r--arch/ppc/kernel/smp.c39
-rw-r--r--arch/ppc/kernel/time.c19
-rw-r--r--arch/ppc/kernel/xics.c214
-rw-r--r--arch/ppc/kernel/xics.h23
27 files changed, 1612 insertions, 594 deletions
diff --git a/arch/ppc/kernel/Makefile b/arch/ppc/kernel/Makefile
index ed16da557..501ed931a 100644
--- a/arch/ppc/kernel/Makefile
+++ b/arch/ppc/kernel/Makefile
@@ -7,8 +7,13 @@
#
# Note 2! The CFLAGS definitions are now in the main makefile...
+ifdef CONFIG_PPC64BRIDGE
+.S.o:
+ $(CC) $(CFLAGS) -D__ASSEMBLY__ -mppc64bridge -c $< -o $*.o
+else
.S.o:
$(CC) $(CFLAGS) -D__ASSEMBLY__ -c $< -o $*.o
+endif
O_TARGET := kernel.o
OX_OBJS := ppc_ksyms.o setup.o
@@ -33,6 +38,10 @@ endif
O_OBJS := entry.o traps.o irq.o idle.o time.o process.o signal.o syscalls.o \
misc.o ptrace.o align.o ppc_htab.o semaphore.o bitops.o
+ifdef CONFIG_POWER4
+O_OBJS += xics.o
+endif
+
ifndef CONFIG_8xx
O_OBJS += hashtable.o
endif
diff --git a/arch/ppc/kernel/align.c b/arch/ppc/kernel/align.c
index 5b7366adc..cfa2fe0aa 100644
--- a/arch/ppc/kernel/align.c
+++ b/arch/ppc/kernel/align.c
@@ -21,7 +21,7 @@ struct aligninfo {
unsigned char flags;
};
-#if defined(CONFIG_4xx)
+#if defined(CONFIG_4xx) || defined(CONFIG_POWER4)
#define OPCD(inst) (((inst) & 0xFC000000) >> 26)
#define RS(inst) (((inst) & 0x03E00000) >> 21)
#define RA(inst) (((inst) & 0x001F0000) >> 16)
@@ -184,7 +184,7 @@ int
fix_alignment(struct pt_regs *regs)
{
int instr, nb, flags;
-#if defined(CONFIG_4xx)
+#if defined(CONFIG_4xx) || defined(CONFIG_POWER4)
int opcode, f1, f2, f3;
#endif
int i, t;
@@ -197,9 +197,11 @@ fix_alignment(struct pt_regs *regs)
unsigned char v[8];
} data;
-#if defined(CONFIG_4xx)
+#if defined(CONFIG_4xx) || defined(CONFIG_POWER4)
/* The 4xx-family processors have no DSISR register,
* so we emulate it.
+ * The POWER4 has a DSISR register but doesn't set it on
+ * an alignment fault. -- paulus
*/
instr = *((unsigned int *)regs->nip);
diff --git a/arch/ppc/kernel/chrp_pci.c b/arch/ppc/kernel/chrp_pci.c
index e609906cb..0d614fba1 100644
--- a/arch/ppc/kernel/chrp_pci.c
+++ b/arch/ppc/kernel/chrp_pci.c
@@ -2,6 +2,7 @@
* CHRP pci routines.
*/
+#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/delay.h>
@@ -21,6 +22,10 @@
#include "pci.h"
+#ifdef CONFIG_POWER4
+static unsigned long pci_address_offset(int, unsigned int);
+#endif /* CONFIG_POWER4 */
+
/* LongTrail */
#define pci_config_addr(bus, dev, offset) \
(GG2_PCI_CONFIG_BASE | ((bus)<<16) | ((dev)<<8) | (offset))
@@ -172,8 +177,11 @@ int __chrp rtas_pcibios_read_config_byte(unsigned char bus, unsigned char dev_fn
unsigned char offset, unsigned char *val)
{
unsigned long addr = (offset&0xff) | ((dev_fn&0xff)<<8) | ((bus & 0xff)<<16);
- if ( call_rtas( "read-pci-config", 2, 2, (ulong *)&val, addr, 1 ) != 0 )
+ unsigned long ret;
+
+ if (call_rtas( "read-pci-config", 2, 2, &ret, addr, 1) != 0)
return PCIBIOS_DEVICE_NOT_FOUND;
+ *val = ret;
return PCIBIOS_SUCCESSFUL;
}
@@ -181,8 +189,11 @@ int __chrp rtas_pcibios_read_config_word(unsigned char bus, unsigned char dev_fn
unsigned char offset, unsigned short *val)
{
unsigned long addr = (offset&0xff) | ((dev_fn&0xff)<<8) | ((bus & 0xff)<<16);
- if ( call_rtas( "read-pci-config", 2, 2, (ulong *)&val, addr, 2 ) != 0 )
+ unsigned long ret;
+
+ if (call_rtas("read-pci-config", 2, 2, &ret, addr, 2) != 0)
return PCIBIOS_DEVICE_NOT_FOUND;
+ *val = ret;
return PCIBIOS_SUCCESSFUL;
}
@@ -191,8 +202,11 @@ int __chrp rtas_pcibios_read_config_dword(unsigned char bus, unsigned char dev_f
unsigned char offset, unsigned int *val)
{
unsigned long addr = (offset&0xff) | ((dev_fn&0xff)<<8) | ((bus & 0xff)<<16);
- if ( call_rtas( "read-pci-config", 2, 2, (ulong *)&val, addr, 4 ) != 0 )
+ unsigned long ret;
+
+ if (call_rtas("read-pci-config", 2, 2, &ret, addr, 4) != 0)
return PCIBIOS_DEVICE_NOT_FOUND;
+ *val = ret;
return PCIBIOS_SUCCESSFUL;
}
@@ -275,14 +289,33 @@ chrp_pcibios_fixup(void)
{
struct pci_dev *dev;
int i;
+ int *brp;
+ struct device_node *np;
extern struct pci_ops generic_pci_ops;
- /* Some IBM's with the python have >1 bus, this finds them */
- for ( i = 0; i < python_busnr ; i++ )
- pci_scan_bus(i+1, &generic_pci_ops, NULL);
+#ifndef CONFIG_POWER4
+ np = find_devices("device-tree");
+ if (np != 0) {
+ for (np = np->child; np != NULL; np = np->sibling) {
+ if (np->type == NULL || strcmp(np->type, "pci") != 0)
+ continue;
+ if ((brp = (int *) get_property(np, "bus-range", NULL)) == 0)
+ continue;
+ if (brp[0] != 0) /* bus 0 is already done */
+ pci_scan_bus(brp[0], &generic_pci_ops, NULL);
+ }
+ }
+#else
+ /* XXX kludge for now because we can't properly handle
+ physical addresses > 4GB. -- paulus */
+ pci_scan_bus(0x1e, &generic_pci_ops, NULL);
+#endif /* CONFIG_POWER4 */
/* PCI interrupts are controlled by the OpenPIC */
pci_for_each_dev(dev) {
+ np = find_pci_device_OFnode(dev->bus->number, dev->devfn);
+ if ( (np != 0) && (np->n_intrs > 0) && (np->intrs[0].line != 0))
+ dev->irq = np->intrs[0].line;
if ( dev->irq )
dev->irq = openpic_to_irq( dev->irq );
/* these need to be absolute addrs for OF and Matrox FB -- Cort */
@@ -301,10 +334,30 @@ chrp_pcibios_fixup(void)
pcibios_write_config_word(dev->bus->number,
dev->devfn, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD);
}
- if ( (dev->bus->number > 0) &&
- ((dev->vendor == PCI_VENDOR_ID_NCR) ||
- (dev->vendor == PCI_VENDOR_ID_AMD)))
- dev->resource[0].start += (dev->bus->number*0x08000000);
+#ifdef CONFIG_POWER4
+ for (i = 0; i < 6; ++i) {
+ unsigned long offset;
+ if (dev->resource[i].start == 0)
+ continue;
+ offset = pci_address_offset(dev->bus->number,
+ dev->resource[i].flags);
+ if (offset) {
+ dev->resource[i].start += offset;
+ dev->resource[i].end += offset;
+ printk("device %x.%x[%d] now [%lx..%lx]\n",
+ dev->bus->number, dev->devfn, i,
+ dev->resource[i].start,
+ dev->resource[i].end);
+ }
+ /* zap the 2nd function of the winbond chip */
+ if (dev->resource[i].flags & IORESOURCE_IO
+ && dev->bus->number == 0 && dev->devfn == 0x81)
+ dev->resource[i].flags &= ~IORESOURCE_IO;
+ }
+#else
+ if (dev->bus->number > 0 && python_busnr > 0)
+ dev->resource[0].start += dev->bus->number*0x01000000;
+#endif
}
}
@@ -316,7 +369,11 @@ void __init
chrp_setup_pci_ptrs(void)
{
struct device_node *py;
-
+
+#ifdef CONFIG_POWER4
+ set_config_access_method(rtas);
+ pci_dram_offset = 0;
+#else /* CONFIG_POWER4 */
if ( !strncmp("MOT",
get_property(find_path_device("/"), "model", NULL),3) )
{
@@ -327,23 +384,27 @@ chrp_setup_pci_ptrs(void)
}
else
{
- if ( (py = find_compatible_devices( "pci", "IBM,python" )) )
+ if ((py = find_compatible_devices("pci", "IBM,python")) != 0
+ || (py = find_compatible_devices("pci", "IBM,python3.0")) != 0)
{
+ char *name = get_property(find_path_device("/"), "name", NULL);
+
/* find out how many pythons */
while ( (py = py->next) ) python_busnr++;
set_config_access_method(python);
+
/*
* We base these values on the machine type but should
* try to read them from the python controller itself.
* -- Cort
*/
- if ( !strncmp("IBM,7025-F50", get_property(find_path_device("/"), "name", NULL),12) )
+ if ( !strncmp("IBM,7025-F50", name, 12) )
{
pci_dram_offset = 0x80000000;
isa_mem_base = 0xa0000000;
isa_io_base = 0x88000000;
- } else if ( !strncmp("IBM,7043-260",
- get_property(find_path_device("/"), "name", NULL),12) )
+ } else if ( !strncmp("IBM,7043-260", name, 12)
+ || !strncmp("IBM,7044-270", name, 12))
{
pci_dram_offset = 0x0;
isa_mem_base = 0xc0000000;
@@ -372,6 +433,66 @@ chrp_setup_pci_ptrs(void)
}
}
}
+#endif /* CONFIG_POWER4 */
ppc_md.pcibios_fixup = chrp_pcibios_fixup;
}
+
+#ifdef CONFIG_PPC64BRIDGE
+/*
+ * Hack alert!!!
+ * 64-bit machines like POWER3 and POWER4 have > 32 bit
+ * physical addresses. For now we remap particular parts
+ * of the 32-bit physical address space that the Linux
+ * page table gives us into parts of the physical address
+ * space above 4GB so we can access the I/O devices.
+ */
+
+#ifdef CONFIG_POWER4
+static unsigned long pci_address_offset(int busnr, unsigned int flags)
+{
+ unsigned long offset = 0;
+
+ if (busnr >= 0x1e) {
+ if (flags & IORESOURCE_IO)
+ offset = -0x100000;
+ else if (flags & IORESOURCE_MEM)
+ offset = 0x38000000;
+ } else if (busnr <= 0xf) {
+ if (flags & IORESOURCE_MEM)
+ offset = -0x40000000;
+ else
+ }
+ return offset;
+}
+
+unsigned long phys_to_bus(unsigned long pa)
+{
+ if (pa >= 0xf8000000)
+ pa -= 0x38000000;
+ else if (pa >= 0x80000000 && pa < 0xc0000000)
+ pa += 0x40000000;
+ return pa;
+}
+
+unsigned long bus_to_phys(unsigned int ba, int busnr)
+{
+ return ba + pci_address_offset(busnr, IORESOURCE_MEM);
+}
+
+#else /* CONFIG_POWER4 */
+/*
+ * For now assume I/O addresses are < 4GB and PCI bridges don't
+ * remap addresses on POWER3 machines.
+ */
+unsigned long phys_to_bus(unsigned long pa)
+{
+ return pa;
+}
+
+unsigned long bus_to_phys(unsigned int ba, int busnr)
+{
+ return ba;
+}
+#endif /* CONFIG_POWER4 */
+#endif /* CONFIG_PPC64BRIDGE */
diff --git a/arch/ppc/kernel/chrp_setup.c b/arch/ppc/kernel/chrp_setup.c
index d8c22e1a6..d75ccaf5c 100644
--- a/arch/ppc/kernel/chrp_setup.c
+++ b/arch/ppc/kernel/chrp_setup.c
@@ -55,6 +55,7 @@
#include "local_irq.h"
#include "i8259.h"
#include "open_pic.h"
+#include "xics.h"
extern volatile unsigned char *chrp_int_ack_special;
@@ -259,6 +260,7 @@ chrp_setup_arch(void)
request_region(0x80,0x10,"dma page reg");
request_region(0xc0,0x20,"dma2");
+#ifndef CONFIG_PPC64BRIDGE
/* PCI bridge config space access area -
* appears to be not in devtree on longtrail. */
ioremap(GG2_PCI_CONFIG_BASE, 0x80000);
@@ -267,14 +269,23 @@ chrp_setup_arch(void)
* -- Geert
*/
hydra_init(); /* Mac I/O */
+#endif /* CONFIG_PPC64BRIDGE */
+#ifndef CONFIG_POWER4
/* Some IBM machines don't have the hydra -- Cort */
if ( !OpenPIC )
{
- OpenPIC = (struct OpenPIC *)*(unsigned long *)get_property(
- find_path_device("/"), "platform-open-pic", NULL);
- OpenPIC = ioremap((unsigned long)OpenPIC, sizeof(struct OpenPIC));
+ unsigned long *opprop;
+
+ opprop = (unsigned long *)get_property(find_path_device("/"),
+ "platform-open-pic", NULL);
+ if (opprop != 0) {
+ printk("OpenPIC addrs: %lx %lx %lx\n",
+ opprop[0], opprop[1], opprop[2]);
+ OpenPIC = ioremap(opprop[0], sizeof(struct OpenPIC));
+ }
}
+#endif
/*
* Fix the Super I/O configuration
@@ -283,7 +294,10 @@ chrp_setup_arch(void)
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
#endif
+
+#ifndef CONFIG_PPC64BRIDGE
pmac_find_bridges();
+#endif /* CONFIG_PPC64BRIDGE */
/* Get the event scan rate for the rtas so we know how
* often it expects a heartbeat. -- Cort
@@ -402,15 +416,15 @@ void __init chrp_init_IRQ(void)
{
struct device_node *np;
int i;
+ unsigned long *addrp;
- if ( !(np = find_devices("pci") ) )
+ if (!(np = find_devices("pci"))
+ || !(addrp = (unsigned long *)
+ get_property(np, "8259-interrupt-acknowledge", NULL)))
printk("Cannot find pci to get ack address\n");
else
- {
chrp_int_ack_special = (volatile unsigned char *)
- (*(unsigned long *)get_property(np,
- "8259-interrupt-acknowledge", NULL));
- }
+ ioremap(*addrp, 1);
open_pic_irq_offset = 16;
for ( i = 16 ; i < NR_IRQS ; i++ )
irq_desc[i].handler = &open_pic;
@@ -435,6 +449,8 @@ chrp_init2(void)
#ifdef CONFIG_NVRAM
pmac_nvram_init();
#endif
+ if (ppc_md.progress)
+ ppc_md.progress(" Have fun! ", 0x7777);
}
#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_IDE_MODULE)
@@ -560,10 +576,16 @@ void __init
ppc_md.setup_residual = NULL;
ppc_md.get_cpuinfo = chrp_get_cpuinfo;
ppc_md.irq_cannonicalize = chrp_irq_cannonicalize;
+#ifndef CONFIG_POWER4
ppc_md.init_IRQ = chrp_init_IRQ;
ppc_md.get_irq = chrp_get_irq;
ppc_md.post_irq = chrp_post_irq;
-
+#else
+ ppc_md.init_IRQ = xics_init_IRQ;
+ ppc_md.get_irq = xics_get_irq;
+ ppc_md.post_irq = NULL;
+#endif /* CONFIG_POWER4 */
+
ppc_md.init = chrp_init2;
ppc_md.restart = chrp_restart;
@@ -652,6 +674,7 @@ chrp_progress(char *s, unsigned short hex)
if ( call_rtas( "display-character", 1, 1, NULL, '\r' ) )
{
/* assume no display-character RTAS method - use hex display */
+ call_rtas("set-indicator", 3, 1, NULL, 6, 0, hex);
return;
}
diff --git a/arch/ppc/kernel/entry.S b/arch/ppc/kernel/entry.S
index ad467894f..354686c2d 100644
--- a/arch/ppc/kernel/entry.S
+++ b/arch/ppc/kernel/entry.S
@@ -31,8 +31,8 @@
#include <linux/sys.h>
#include <linux/config.h>
-#define SHOW_SYSCALLS
-#define SHOW_SYSCALLS_TASK
+#undef SHOW_SYSCALLS
+#undef SHOW_SYSCALLS_TASK
#ifdef SHOW_SYSCALLS_TASK
.data
@@ -83,8 +83,8 @@ _GLOBAL(DoSyscall)
#endif /* SHOW_SYSCALLS */
cmpi 0,r0,0x7777 /* Special case for 'sys_sigreturn' */
beq- 10f
- lwz r10,TASK_FLAGS(r2)
- andi. r10,r10,PF_TRACESYS
+ lwz r10,TASK_PTRACE(r2)
+ andi. r10,r10,PT_TRACESYS
bne- 50f
cmpli 0,r0,NR_syscalls
bge- 66f
@@ -227,12 +227,15 @@ _GLOBAL(_switch)
stw r1,KSP(r3) /* Set old stack pointer */
sync
tophys(r0,r4)
+ CLR_TOP32(r0)
mtspr SPRG3,r0 /* Update current THREAD phys addr */
#ifdef CONFIG_8xx
/* XXX it would be nice to find a SPRGx for this on 6xx,7xx too */
lwz r9,PGDIR(r4) /* cache the page table root */
tophys(r9,r9) /* convert to phys addr */
mtspr M_TWB,r9 /* Update MMU base address */
+ tlbia
+ SYNC
#endif /* CONFIG_8xx */
lwz r1,KSP(r4) /* Load new stack pointer */
/* save the old current 'last' for return value */
@@ -244,6 +247,7 @@ _GLOBAL(_switch)
8: addi r4,r1,INT_FRAME_SIZE /* size of frame */
stw r4,THREAD+KSP(r2) /* save kernel stack pointer */
tophys(r9,r1)
+ CLR_TOP32(r9)
mtspr SPRG2,r9 /* phys exception stack pointer */
10: lwz r2,_CTR(r1)
lwz r0,_LINK(r1)
@@ -270,12 +274,13 @@ _GLOBAL(_switch)
lwz r0,_MSR(r1)
mtspr SRR0,r2
+ FIX_SRR1(r0,r2)
mtspr SRR1,r0
lwz r0,GPR0(r1)
lwz r2,GPR2(r1)
lwz r1,GPR1(r1)
SYNC
- rfi
+ RFI
#ifdef CONFIG_SMP
.globl ret_from_smpfork
@@ -311,11 +316,7 @@ lost_irq_ret:
#ifdef CONFIG_SMP
/* get processor # */
lwz r3,PROCESSOR(r2)
-#ifndef CONFIG_PPC64
slwi r3,r3,5
-#else
-#error not 64-bit ready
-#endif
add r4,r4,r3
#endif /* CONFIG_SMP */
lwz r5,0(r4)
@@ -365,14 +366,17 @@ restore:
/* if returning to user mode, set new sprg2 and save kernel SP */
lwz r0,_MSR(r1)
- mtspr SRR1,r0
andi. r0,r0,MSR_PR
beq+ 1f
addi r0,r1,INT_FRAME_SIZE /* size of frame */
stw r0,THREAD+KSP(r2) /* save kernel stack pointer */
tophys(r2,r1)
+ CLR_TOP32(r2)
mtspr SPRG2,r2 /* phys exception stack pointer */
-1:
+1:
+ lwz r0,_MSR(r1)
+ FIX_SRR1(r0,r2)
+ mtspr SRR1,r0
lwz r2,_CCR(r1)
mtcrf 0xFF,r2
lwz r2,_NIP(r1)
@@ -381,7 +385,7 @@ restore:
lwz r2,GPR2(r1)
lwz r1,GPR1(r1)
SYNC
- rfi
+ RFI
/*
* Fake an interrupt from kernel mode.
@@ -423,7 +427,6 @@ enter_rtas:
stw r0,20(r1)
lis r4,rtas_data@ha
lwz r4,rtas_data@l(r4)
- addis r4,r4,-KERNELBASE@h
lis r6,1f@ha /* physical return address for rtas */
addi r6,r6,1f@l
addis r6,r6,-KERNELBASE@h
@@ -436,20 +439,23 @@ enter_rtas:
li r0,0
ori r0,r0,MSR_EE|MSR_SE|MSR_BE
andc r0,r9,r0
- andi. r9,r9,MSR_ME|MSR_RI
+ li r10,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP
+ andc r9,r0,r10
sync /* disable interrupts so SRR0/1 */
mtmsr r0 /* don't get trashed */
mtlr r6
+ CLR_TOP32(r7)
mtspr SPRG2,r7
mtspr SRR0,r8
mtspr SRR1,r9
- rfi
+ RFI
1: addis r9,r1,-KERNELBASE@h
lwz r8,20(r9) /* get return address */
lwz r9,8(r9) /* original msr value */
+ FIX_SRR1(r9,r0)
li r0,0
mtspr SPRG2,r0
mtspr SRR0,r8
mtspr SRR1,r9
- rfi /* return to caller */
+ RFI /* return to caller */
#endif /* CONFIG_ALL_PPC */
diff --git a/arch/ppc/kernel/hashtable.S b/arch/ppc/kernel/hashtable.S
index 58045be1d..be86a1503 100644
--- a/arch/ppc/kernel/hashtable.S
+++ b/arch/ppc/kernel/hashtable.S
@@ -52,6 +52,13 @@ Hash_msk = (((1 << Hash_bits) - 1) * 64)
.globl hash_page
hash_page:
+#ifdef CONFIG_PPC64BRIDGE
+ mfmsr r0
+ clrldi r0,r0,1 /* make sure it's in 32-bit mode */
+ sync
+ MTMSRD(r0)
+ isync
+#endif
#ifdef CONFIG_SMP
SAVE_2GPRS(7,r21)
eieio
@@ -120,28 +127,183 @@ hash_page:
ori r4,r4,0xe04 /* clear out reserved bits */
andc r6,r6,r4 /* PP=2 or 0, when _PAGE_HWWRITE */
+#ifdef CONFIG_POWER4
+ /*
+ * XXX hack hack hack - translate 32-bit "physical" addresses
+ * in the linux page tables to 42-bit real addresses in such
+ * a fashion that we can get at the I/O we need to access.
+ * -- paulus
+ */
+ cmpwi 0,r6,0
+ rlwinm r4,r6,16,16,30
+ bge 57f
+ cmplwi 0,r4,0xfe00
+ li r5,0x3fd
+ bne 56f
+ li r5,0x3ff
+56: sldi r5,r5,32
+ or r6,r6,r5
+57:
+#endif
+
+#ifdef CONFIG_PPC64BRIDGE
/* Construct the high word of the PPC-style PTE */
mfsrin r5,r3 /* get segment reg for segment */
-#ifdef CONFIG_PPC64
+ rlwinm r5,r5,0,5,31
sldi r5,r5,12
-#else /* CONFIG_PPC64 */
- rlwinm r5,r5,7,1,24 /* put VSID in 0x7fffff80 bits */
-#endif /* CONFIG_PPC64 */
#ifndef CONFIG_SMP /* do this later for SMP */
-#ifdef CONFIG_PPC64
ori r5,r5,1 /* set V (valid) bit */
-#else /* CONFIG_PPC64 */
+#endif
+
+ rlwimi r5,r3,16,20,24 /* put in API (abbrev page index) */
+ /* Get the address of the primary PTE group in the hash table */
+ .globl hash_page_patch_A
+hash_page_patch_A:
+ lis r4,Hash_base@h /* base address of hash table */
+ rlwimi r4,r5,32-5,25-Hash_bits,24 /* (VSID & hash_mask) << 7 */
+ rlwinm r0,r3,32-5,25-Hash_bits,24 /* (PI & hash_mask) << 7 */
+ xor r4,r4,r0 /* make primary hash */
+
+ /* See whether it was a PTE not found exception or a
+ protection violation. */
+ andis. r0,r20,0x4000
+ li r2,8 /* PTEs/group */
+ bne 10f /* no PTE: go look for an empty slot */
+ tlbie r3 /* invalidate TLB entry */
+
+ /* Search the primary PTEG for a PTE whose 1st dword matches r5 */
+ mtctr r2
+ addi r3,r4,-16
+1: ldu r0,16(r3) /* get next PTE */
+ cmpd 0,r0,r5
+ bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
+ beq+ found_slot
+
+ /* Search the secondary PTEG for a matching PTE */
+ ori r5,r5,0x2 /* set H (secondary hash) bit */
+ .globl hash_page_patch_B
+hash_page_patch_B:
+ xoris r3,r4,Hash_msk>>16 /* compute secondary hash */
+ xori r3,r3,0xff80
+ addi r3,r3,-16
+ mtctr r2
+2: ldu r0,16(r3)
+ cmpd 0,r0,r5
+ bdnzf 2,2b
+ beq+ found_slot
+ xori r5,r5,0x2 /* clear H bit again */
+
+ /* Search the primary PTEG for an empty slot */
+10: mtctr r2
+ addi r3,r4,-16 /* search primary PTEG */
+1: ldu r0,16(r3) /* get next PTE */
+ andi. r0,r0,1
+ bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
+ beq+ found_empty
+
+ /* Search the secondary PTEG for an empty slot */
+ ori r5,r5,0x2 /* set H (secondary hash) bit */
+ .globl hash_page_patch_C
+hash_page_patch_C:
+ xoris r3,r4,Hash_msk>>16 /* compute secondary hash */
+ xori r3,r3,0xff80
+ addi r3,r3,-16
+ mtctr r2
+2: ldu r0,16(r3)
+ andi. r0,r0,1
+ bdnzf 2,2b
+ beq+ found_empty
+
+ /*
+ * Choose an arbitrary slot in the primary PTEG to overwrite.
+ * Since both the primary and secondary PTEGs are full, and we
+ * have no information that the PTEs in the primary PTEG are
+ * more important or useful than those in the secondary PTEG,
+ * and we know there is a definite (although small) speed
+ * advantage to putting the PTE in the primary PTEG, we always
+ * put the PTE in the primary PTEG.
+ */
+ xori r5,r5,0x2 /* clear H bit again */
+ lis r3,next_slot@ha
+ tophys(r3,r3)
+ lwz r2,next_slot@l(r3)
+ addi r2,r2,16
+ andi. r2,r2,0x70
+#ifdef CONFIG_POWER4
+ /*
+ * Since we don't have BATs on POWER4, we rely on always having
+ * PTEs in the hash table to map the hash table and the code
+ * that manipulates it in virtual mode, namely flush_hash_page and
+ * flush_hash_segments. Otherwise we can get a DSI inside those
+ * routines which leads to a deadlock on the hash_table_lock on
+ * SMP machines. We avoid this by never overwriting the first
+ * PTE of each PTEG if it is already valid.
+ * -- paulus.
+ */
+ bne 102f
+ li r2,0x10
+102:
+#endif /* CONFIG_POWER4 */
+ stw r2,next_slot@l(r3)
+ add r3,r4,r2
+11:
+ /* update counter of evicted pages */
+ lis r2,htab_evicts@ha
+ tophys(r2,r2)
+ lwz r4,htab_evicts@l(r2)
+ addi r4,r4,1
+ stw r4,htab_evicts@l(r2)
+
+#ifndef CONFIG_SMP
+ /* Store PTE in PTEG */
+found_empty:
+ std r5,0(r3)
+found_slot:
+ std r6,8(r3)
+ sync
+
+#else /* CONFIG_SMP */
+/*
+ * Between the tlbie above and updating the hash table entry below,
+ * another CPU could read the hash table entry and put it in its TLB.
+ * There are 3 cases:
+ * 1. using an empty slot
+ * 2. updating an earlier entry to change permissions (i.e. enable write)
+ * 3. taking over the PTE for an unrelated address
+ *
+ * In each case it doesn't really matter if the other CPUs have the old
+ * PTE in their TLB. So we don't need to bother with another tlbie here,
+ * which is convenient as we've overwritten the register that had the
+ * address. :-) The tlbie above is mainly to make sure that this CPU comes
+ * and gets the new PTE from the hash table.
+ *
+ * We do however have to make sure that the PTE is never in an invalid
+ * state with the V bit set.
+ */
+found_empty:
+found_slot:
+ std r5,0(r3) /* clear V (valid) bit in PTE */
+ sync
+ tlbsync
+ sync
+ std r6,8(r3) /* put in correct RPN, WIMG, PP bits */
+ sync
+ ori r5,r5,1
+ std r5,0(r3) /* finally set V bit in PTE */
+#endif /* CONFIG_SMP */
+
+#else /* CONFIG_PPC64BRIDGE */
+
+ /* Construct the high word of the PPC-style PTE */
+ mfsrin r5,r3 /* get segment reg for segment */
+ rlwinm r5,r5,7,1,24 /* put VSID in 0x7fffff80 bits */
+
+#ifndef CONFIG_SMP /* do this later for SMP */
oris r5,r5,0x8000 /* set V (valid) bit */
-#endif /* CONFIG_PPC64 */
#endif
-#ifdef CONFIG_PPC64
-/* XXX: does this insert the api correctly? -- Cort */
- rlwimi r5,r3,17,21,25 /* put in API (abbrev page index) */
-#else /* CONFIG_PPC64 */
rlwimi r5,r3,10,26,31 /* put in API (abbrev page index) */
-#endif /* CONFIG_PPC64 */
/* Get the address of the primary PTE group in the hash table */
.globl hash_page_patch_A
hash_page_patch_A:
@@ -160,89 +322,44 @@ hash_page_patch_A:
/* Search the primary PTEG for a PTE whose 1st word matches r5 */
mtctr r2
addi r3,r4,-8
-1:
-#ifdef CONFIG_PPC64
- lwzu r0,16(r3) /* get next PTE */
-#else
- lwzu r0,8(r3) /* get next PTE */
-#endif
+1: lwzu r0,8(r3) /* get next PTE */
cmp 0,r0,r5
bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
beq+ found_slot
/* Search the secondary PTEG for a matching PTE */
-#ifdef CONFIG_PPC64
- ori r5,r5,0x2 /* set H (secondary hash) bit */
-#else
ori r5,r5,0x40 /* set H (secondary hash) bit */
-#endif
.globl hash_page_patch_B
hash_page_patch_B:
xoris r3,r4,Hash_msk>>16 /* compute secondary hash */
xori r3,r3,0xffc0
-#ifdef CONFIG_PPC64
- addi r3,r3,-16
-#else
addi r3,r3,-8
-#endif
mtctr r2
-2:
-#ifdef CONFIG_PPC64
- lwzu r0,16(r3)
-#else
- lwzu r0,8(r3)
-#endif
+2: lwzu r0,8(r3)
cmp 0,r0,r5
bdnzf 2,2b
beq+ found_slot
-#ifdef CONFIG_PPC64
- xori r5,r5,0x2 /* clear H bit again */
-#else
xori r5,r5,0x40 /* clear H bit again */
-#endif
/* Search the primary PTEG for an empty slot */
10: mtctr r2
-#ifdef CONFIG_PPC64
- addi r3,r4,-16 /* search primary PTEG */
-#else
addi r3,r4,-8 /* search primary PTEG */
-#endif
-1:
-#ifdef CONFIG_PPC64
- lwzu r0,16(r3) /* get next PTE */
- andi. r0,r0,1
-#else
- lwzu r0,8(r3) /* get next PTE */
+1: lwzu r0,8(r3) /* get next PTE */
rlwinm. r0,r0,0,0,0 /* only want to check valid bit */
-#endif
bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
beq+ found_empty
/* Search the secondary PTEG for an empty slot */
-#ifdef CONFIG_PPC64
- ori r5,r5,0x2 /* set H (secondary hash) bit */
-#else
ori r5,r5,0x40 /* set H (secondary hash) bit */
-#endif
.globl hash_page_patch_C
hash_page_patch_C:
xoris r3,r4,Hash_msk>>16 /* compute secondary hash */
xori r3,r3,0xffc0
-#ifdef CONFIG_PPC64
- addi r3,r3,-16
-#else
addi r3,r3,-8
-#endif
mtctr r2
2:
-#ifdef CONFIG_PPC64
- lwzu r0,16(r3)
- andi. r0,r0,1
-#else
lwzu r0,8(r3)
rlwinm. r0,r0,0,0,0 /* only want to check valid bit */
-#endif
bdnzf 2,2b
beq+ found_empty
@@ -255,21 +372,12 @@ hash_page_patch_C:
* advantage to putting the PTE in the primary PTEG, we always
* put the PTE in the primary PTEG.
*/
-#ifdef CONFIG_PPC64
- xori r5,r5,0x2 /* clear H bit again */
-#else
xori r5,r5,0x40 /* clear H bit again */
-#endif
lis r3,next_slot@ha
tophys(r3,r3)
lwz r2,next_slot@l(r3)
-#ifdef CONFIG_PPC64
- addi r2,r2,16
- andi. r2,r2,0x78
-#else
addi r2,r2,8
andi. r2,r2,0x38
-#endif
stw r2,next_slot@l(r3)
add r3,r4,r2
11:
@@ -283,17 +391,9 @@ hash_page_patch_C:
#ifndef CONFIG_SMP
/* Store PTE in PTEG */
found_empty:
-#ifdef CONFIG_PPC64
- std r5,0(r3)
-#else
stw r5,0(r3)
-#endif
found_slot:
-#ifdef CONFIG_PPC64
- std r6,8(r3)
-#else
stw r6,4(r3)
-#endif
sync
#else /* CONFIG_SMP */
@@ -325,6 +425,7 @@ found_slot:
oris r5,r5,0x8000
stw r5,0(r3) /* finally set V bit in PTE */
#endif /* CONFIG_SMP */
+#endif /* CONFIG_PPC64BRIDGE */
/*
* Update the hash table miss count. We only want misses here
@@ -371,7 +472,7 @@ found_slot:
lwz r22,GPR22(r21)
lwz r23,GPR23(r21)
lwz r21,GPR21(r21)
- rfi
+ RFI
#ifdef CONFIG_SMP
hash_page_out:
@@ -410,7 +511,7 @@ _GLOBAL(flush_hash_segments)
#endif
blr
99:
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) || defined(CONFIG_PPC64BRIDGE)
/* Note - we had better not do anything which could generate
a hash table miss while we have the hash table locked,
or we'll get a deadlock. -paulus */
@@ -419,6 +520,8 @@ _GLOBAL(flush_hash_segments)
rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
mtmsr r0
SYNC
+#endif
+#ifdef CONFIG_SMP
lis r9,hash_table_lock@h
ori r9,r9,hash_table_lock@l
lwz r8,PROCESSOR(r2)
@@ -430,6 +533,7 @@ _GLOBAL(flush_hash_segments)
bne- 10b
eieio
#endif
+#ifndef CONFIG_PPC64BRIDGE
rlwinm r3,r3,7,1,24 /* put VSID lower limit in position */
oris r3,r3,0x8000 /* set V bit */
rlwinm r4,r4,7,1,24 /* put VSID upper limit in position */
@@ -448,6 +552,26 @@ _GLOBAL(flush_hash_segments)
blt 2f /* branch if out of range */
stw r0,0(r5) /* invalidate entry */
2: bdnz 1b /* continue with loop */
+#else /* CONFIG_PPC64BRIDGE */
+ rldic r3,r3,12,20 /* put VSID lower limit in position */
+ ori r3,r3,1 /* set V bit */
+ rldic r4,r4,12,20 /* put VSID upper limit in position */
+ ori r4,r4,0xfff /* set V bit, API etc. */
+ lis r6,Hash_size@ha
+ lwz r6,Hash_size@l(r6) /* size in bytes */
+ srwi r6,r6,4 /* # PTEs */
+ mtctr r6
+ addi r5,r5,-16
+ li r0,0
+1: ldu r6,16(r5) /* get next tag word */
+ cmpld 0,r6,r3
+ cmpld 1,r6,r4
+ cror 0,0,5 /* set cr0.lt if out of range */
+ blt 2f /* branch if out of range */
+ std r0,0(r5) /* invalidate entry */
+2: bdnz 1b /* continue with loop */
+#endif /* CONFIG_PPC64BRIDGE */
+
sync
tlbia
sync
@@ -456,6 +580,8 @@ _GLOBAL(flush_hash_segments)
sync
lis r3,hash_table_lock@ha
stw r0,hash_table_lock@l(r3)
+#endif
+#if defined(CONFIG_SMP) || defined(CONFIG_PPC64BRIDGE)
mtmsr r10
SYNC
#endif
@@ -479,7 +605,7 @@ _GLOBAL(flush_hash_page)
#endif
blr
99:
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) || defined(CONFIG_PPC64BRIDGE)
/* Note - we had better not do anything which could generate
a hash table miss while we have the hash table locked,
or we'll get a deadlock. -paulus */
@@ -488,6 +614,8 @@ _GLOBAL(flush_hash_page)
rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
mtmsr r0
SYNC
+#endif
+#ifdef CONFIG_SMP
lis r9,hash_table_lock@h
ori r9,r9,hash_table_lock@l
lwz r8,PROCESSOR(r2)
@@ -499,6 +627,7 @@ _GLOBAL(flush_hash_page)
bne- 10b
eieio
#endif
+#ifndef CONFIG_PPC64BRIDGE
rlwinm r3,r3,11,1,20 /* put context into vsid */
rlwimi r3,r4,11,21,24 /* put top 4 bits of va into vsid */
oris r3,r3,0x8000 /* set V (valid) bit */
@@ -528,6 +657,37 @@ _GLOBAL(flush_hash_page)
bne 4f /* if we didn't find it */
3: li r0,0
stw r0,0(r7) /* invalidate entry */
+#else /* CONFIG_PPC64BRIDGE */
+ rldic r3,r3,16,16 /* put context into vsid (<< 12) */
+ rlwimi r3,r4,16,16,24 /* top 4 bits of va and API */
+ ori r3,r3,1 /* set V (valid) bit */
+ rlwinm r7,r4,32-5,9,24 /* get page index << 7 */
+ srdi r5,r3,5 /* vsid << 7 */
+ rlwinm r5,r5,0,1,24 /* vsid << 7 (limited to 24 bits) */
+ xor r7,r7,r5 /* primary hash << 7 */
+ lis r5,Hash_mask@ha
+ lwz r5,Hash_mask@l(r5) /* hash mask */
+ slwi r5,r5,7 /* << 7 */
+ and r7,r7,r5
+ add r6,r6,r7 /* address of primary PTEG */
+ li r8,8
+ mtctr r8
+ addi r7,r6,-16
+1: ldu r0,16(r7) /* get next PTE */
+ cmpd 0,r0,r3 /* see if tag matches */
+ bdnzf 2,1b /* while --ctr != 0 && !cr0.eq */
+ beq 3f /* if we found it */
+ ori r3,r3,2 /* set H (alt. hash) bit */
+ xor r6,r6,r5 /* address of secondary PTEG */
+ mtctr r8
+ addi r7,r6,-16
+2: ldu r0,16(r7) /* get next PTE */
+ cmpd 0,r0,r3 /* see if tag matches */
+ bdnzf 2,2b /* while --ctr != 0 && !cr0.eq */
+ bne 4f /* if we didn't find it */
+3: li r0,0
+ std r0,0(r7) /* invalidate entry */
+#endif /* CONFIG_PPC64BRIDGE */
4: sync
tlbie r4 /* in hw tlb too */
sync
@@ -536,6 +696,8 @@ _GLOBAL(flush_hash_page)
sync
li r0,0
stw r0,0(r9) /* clear hash_table_lock */
+#endif
+#if defined(CONFIG_SMP) || defined(CONFIG_PPC64BRIDGE)
mtmsr r10
SYNC
#endif
diff --git a/arch/ppc/kernel/head.S b/arch/ppc/kernel/head.S
index f88c5383d..8c5911d30 100644
--- a/arch/ppc/kernel/head.S
+++ b/arch/ppc/kernel/head.S
@@ -36,7 +36,19 @@
#include <asm/amigappc.h>
#endif
-#ifdef CONFIG_PPC64
+#ifndef CONFIG_PPC64BRIDGE
+CACHELINE_BYTES = 32
+LG_CACHELINE_BYTES = 5
+CACHELINE_MASK = 0x1f
+CACHELINE_WORDS = 8
+#else
+CACHELINE_BYTES = 128
+LG_CACHELINE_BYTES = 7
+CACHELINE_MASK = 0x7f
+CACHELINE_WORDS = 32
+#endif /* CONFIG_PPC64BRIDGE */
+
+#ifdef CONFIG_PPC64BRIDGE
#define LOAD_BAT(n, reg, RA, RB) \
ld RA,(n*32)+0(reg); \
ld RB,(n*32)+8(reg); \
@@ -47,7 +59,7 @@
mtspr DBAT##n##U,RA; \
mtspr DBAT##n##L,RB; \
-#else /* CONFIG_PPC64 */
+#else /* CONFIG_PPC64BRIDGE */
/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
#define LOAD_BAT(n, reg, RA, RB) \
@@ -65,7 +77,7 @@
mtspr DBAT##n##U,RA; \
mtspr DBAT##n##L,RB; \
1:
-#endif /* CONFIG_PPC64 */
+#endif /* CONFIG_PPC64BRIDGE */
.text
.globl _stext
@@ -125,16 +137,6 @@ _start:
.globl __start
__start:
-#ifdef CONFIG_PPC64
-/*
- * Go into 32-bit mode to boot. OF should do this for
- * us already but just in case...
- * -- Cort
- */
- mfmsr r10
- clrldi r10,r10,3
- mtmsr r10
-#endif
/*
* We have to do any OF calls before we map ourselves to KERNELBASE,
* because OF may have I/O devices mapped into that area
@@ -166,67 +168,23 @@ __after_prom_start:
bl flush_tlbs
#endif
+#ifndef CONFIG_POWER4
+ /* POWER4 doesn't have BATs */
+ bl initial_bats
+#else /* CONFIG_POWER4 */
/*
- * Use the first pair of BAT registers to map the 1st 16MB
- * of RAM to KERNELBASE. From this point on we can't safely
- * call OF any more.
+ * Load up the SDR1 and segment register values now
+ * since we don't have the BATs.
*/
- lis r11,KERNELBASE@h
-#ifndef CONFIG_PPC64
- mfspr r9,PVR
- rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
- cmpi 0,r9,1
- bne 4f
- ori r11,r11,4 /* set up BAT registers for 601 */
- li r8,0x7f /* valid, block length = 8MB */
- oris r9,r11,0x800000@h /* set up BAT reg for 2nd 8M */
- oris r10,r8,0x800000@h /* set up BAT reg for 2nd 8M */
- mtspr IBAT0U,r11 /* N.B. 601 has valid bit in */
- mtspr IBAT0L,r8 /* lower BAT register */
- mtspr IBAT1U,r9
- mtspr IBAT1L,r10
- b 5f
-#endif /* CONFIG_PPC64 */
-
-4: tophys(r8,r11)
-#ifdef CONFIG_SMP
- ori r8,r8,0x12 /* R/W access, M=1 */
-#else
- ori r8,r8,2 /* R/W access */
-#endif /* CONFIG_SMP */
-#ifdef CONFIG_APUS
- ori r11,r11,BL_8M<<2|0x2 /* set up 8MB BAT registers for 604 */
-#else
- ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
-#endif /* CONFIG_APUS */
-
-#ifdef CONFIG_PPC64
- /* clear out the high 32 bits in the BAT */
- clrldi r11,r11,32
- clrldi r8,r8,32
- /* turn off the pagetable mappings just in case */
- clrldi r16,r16,63
- mtsdr1 r16
-#else /* CONFIG_PPC64 */
- /*
- * If the MMU is off clear the bats. See clear_bat() -- Cort
- */
- mfmsr r20
- andi. r20,r20,MSR_DR
- bne 100f
- bl clear_bats
-100:
-#endif /* CONFIG_PPC64 */
- mtspr DBAT0L,r8 /* N.B. 6xx (not 601) have valid */
- mtspr DBAT0U,r11 /* bit in upper BAT register */
- mtspr IBAT0L,r8
- mtspr IBAT0U,r11
-#if 0 /* Useful debug code, please leave in for now so I don't have to
- * look at docs when I need to setup a BAT ...
- */
- bl setup_screen_bat
-#endif
-5: isync
+ bl reloc_offset
+ addis r4,r3,_SDR1@ha /* get the value from _SDR1 */
+ lwz r4,_SDR1@l(r4) /* assume hash table below 4GB */
+ mtspr SDR1,r4
+ slbia
+ lis r5,0x2000 /* set pseudo-segment reg 12 */
+ ori r5,r5,12
+ mtsr 12,r5
+#endif /* CONFIG_POWER4 */
#ifndef CONFIG_APUS
/*
@@ -267,7 +225,21 @@ turn_on_mmu:
ori r0,r0,start_here@l
mtspr SRR0,r0
SYNC
- rfi /* enables MMU */
+ RFI /* enables MMU */
+
+#ifdef CONFIG_SMP
+ .globl __secondary_hold
+__secondary_hold:
+ /* tell the master we're here */
+ stw r3,4(0)
+100: lwz r4,0(0)
+ /* wait until we're told to start */
+ cmpw 0,r4,r3
+ bne 100b
+ /* our cpu # was at addr 0 - go */
+ mr r24,r3 /* cpu # */
+ b __secondary_start
+#endif
/*
* Exception entry code. This code runs with address translation
@@ -284,7 +256,8 @@ turn_on_mmu:
bne 1f; \
tophys(r21,r1); /* use tophys(kernel sp) otherwise */ \
subi r21,r21,INT_FRAME_SIZE; /* alloc exc. frame */\
-1: stw r20,_CCR(r21); /* save registers */ \
+1: CLR_TOP32(r21); \
+ stw r20,_CCR(r21); /* save registers */ \
stw r22,GPR22(r21); \
stw r23,GPR23(r21); \
mfspr r20,SPRG0; \
@@ -341,8 +314,13 @@ label: \
/* Data access exception. */
. = 0x300
+#ifdef CONFIG_PPC64BRIDGE
+ b DataAccess
+DataAccessCont:
+#else
DataAccess:
EXCEPTION_PROLOG
+#endif /* CONFIG_PPC64BRIDGE */
mfspr r20,DSISR
andis. r0,r20,0xa470 /* weird error? */
bne 1f /* if not, try to put a PTE */
@@ -361,10 +339,30 @@ DataAccess:
.long do_page_fault
.long ret_from_except
+#ifdef CONFIG_PPC64BRIDGE
+/* SLB fault on data access. */
+ . = 0x380
+ b DataSegment
+DataSegmentCont:
+ mfspr r4,DAR
+ stw r4,_DAR(r21)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ li r20,MSR_KERNEL
+ rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
+ bl transfer_to_handler
+ .long UnknownException
+ .long ret_from_except
+#endif /* CONFIG_PPC64BRIDGE */
+
/* Instruction access exception. */
. = 0x400
+#ifdef CONFIG_PPC64BRIDGE
+ b InstructionAccess
+InstructionAccessCont:
+#else
InstructionAccess:
EXCEPTION_PROLOG
+#endif /* CONFIG_PPC64BRIDGE */
andis. r0,r23,0x4000 /* no pte found? */
beq 1f /* if so, try to put a PTE */
mr r3,r22 /* into the hash table */
@@ -380,6 +378,19 @@ InstructionAccess:
.long do_page_fault
.long ret_from_except
+#ifdef CONFIG_PPC64BRIDGE
+/* SLB fault on instruction access. */
+ . = 0x480
+ b InstructionSegment
+InstructionSegmentCont:
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ li r20,MSR_KERNEL
+ rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
+ bl transfer_to_handler
+ .long UnknownException
+ .long ret_from_except
+#endif /* CONFIG_PPC64BRIDGE */
+
/* External interrupt */
. = 0x500;
HardwareInterrupt:
@@ -526,7 +537,7 @@ InstructionTLBMiss:
tlbli r3
mfspr r3,SRR1 /* Need to restore CR0 */
mtcrf 0x80,r3
- rfi
+ rfi
InstructionAddressInvalid:
mfspr r3,SRR1
rlwinm r1,r3,9,6,6 /* Get load/store bit */
@@ -593,7 +604,7 @@ DataLoadTLBMiss:
tlbld r3
mfspr r3,SRR1 /* Need to restore CR0 */
mtcrf 0x80,r3
- rfi
+ rfi
DataAddressInvalid:
mfspr r3,SRR1
rlwinm r1,r3,9,6,6 /* Get load/store bit */
@@ -658,7 +669,7 @@ DataStoreTLBMiss:
tlbld r3
mfspr r3,SRR1 /* Need to restore CR0 */
mtcrf 0x80,r3
- rfi
+ rfi
STD_EXCEPTION(0x1300, Trap_13, InstructionBreakpoint)
STD_EXCEPTION(0x1400, SMI, SMIException)
@@ -706,7 +717,22 @@ Trap_0f:
EXCEPTION_PROLOG
b trap_0f_cont
#endif /* CONFIG_ALTIVEC */
-
+
+#ifdef CONFIG_PPC64BRIDGE
+DataAccess:
+ EXCEPTION_PROLOG
+ b DataAccessCont
+InstructionAccess:
+ EXCEPTION_PROLOG
+ b InstructionAccessCont
+DataSegment:
+ EXCEPTION_PROLOG
+ b DataSegmentCont
+InstructionSegment:
+ EXCEPTION_PROLOG
+ b InstructionSegmentCont
+#endif /* CONFIG_PPC64BRIDGE */
+
/*
* This code finishes saving the registers to the exception frame
* and jumps to the appropriate handler for the exception, turning
@@ -741,11 +767,12 @@ transfer_to_handler:
bgt- stack_ovf /* if r2 < r1 < r2+TASK_STRUCT_SIZE */
lwz r24,0(r23) /* virtual address of handler */
lwz r23,4(r23) /* where to go when done */
+ FIX_SRR1(r20,r22)
mtspr SRR0,r24
mtspr SRR1,r20
mtlr r23
SYNC
- rfi /* jump to handler, enable MMU */
+ RFI /* jump to handler, enable MMU */
/*
* On kernel stack overflow, load up an initial stack pointer
@@ -759,10 +786,11 @@ stack_ovf:
lis r24,StackOverflow@ha
addi r24,r24,StackOverflow@l
li r20,MSR_KERNEL
+ FIX_SRR1(r20,r22)
mtspr SRR0,r24
mtspr SRR1,r20
SYNC
- rfi
+ RFI
/*
* Disable FP for the task which had the FPU previously,
@@ -774,8 +802,11 @@ stack_ovf:
load_up_fpu:
mfmsr r5
ori r5,r5,MSR_FP
+#ifdef CONFIG_PPC64BRIDGE
+ clrldi r5,r5,1 /* turn off 64-bit mode */
+#endif /* CONFIG_PPC64BRIDGE */
SYNC
- mtmsr r5 /* enable use of fpu now */
+ MTMSRD(r5) /* enable use of fpu now */
SYNC
/*
* For SMP, we don't do lazy FPU switching because it just gets too
@@ -827,7 +858,7 @@ load_up_fpu:
REST_2GPRS(22, r21)
lwz r21,GPR21(r21)
SYNC
- rfi
+ RFI
/*
* FP unavailable trap from kernel - print a message, but let
@@ -919,7 +950,7 @@ load_up_altivec:
REST_2GPRS(22, r21)
lwz r21,GPR21(r21)
SYNC
- rfi
+ RFI
/*
* AltiVec unavailable trap from kernel - print a message, but let
@@ -1046,7 +1077,7 @@ relocate_kernel:
copy_and_flush:
addi r5,r5,-4
addi r6,r6,-4
-4: li r0,8
+4: li r0,CACHELINE_WORDS
mtctr r0
3: addi r6,r6,4 /* copy a cache line */
lwzx r0,r6,r4
@@ -1195,27 +1226,6 @@ apus_interrupt_entry:
#endif /* CONFIG_APUS */
#ifdef CONFIG_SMP
- .globl __secondary_hold
-__secondary_hold:
- /* tell the master we're here */
- lis r5,0x4@h
- ori r5,r5,0x4@l
- stw r3,0(r5)
- dcbf 0,r5
-100:
- lis r5,0
- dcbi 0,r5
- lwz r4,0(r5)
- /* wait until we're told to start */
- cmp 0,r4,r3
- bne 100b
- /* our cpu # was at addr 0 - go */
- lis r5,__secondary_start@h
- ori r5,r5,__secondary_start@l
- tophys(r5,r5)
- mtlr r5
- mr r24,r3 /* cpu # */
- blr
#ifdef CONFIG_GEMINI
.globl __secondary_start_gemini
__secondary_start_gemini:
@@ -1243,7 +1253,15 @@ __secondary_start_psurge:
.globl __secondary_start
__secondary_start:
+#ifdef CONFIG_PPC64BRIDGE
+ mfmsr r0
+ clrldi r0,r0,1 /* make sure it's in 32-bit mode */
+ sync
+ MTMSRD(r0)
+ isync
+#else
bl enable_caches
+#endif
/* get current */
lis r2,current_set@h
@@ -1264,6 +1282,7 @@ __secondary_start:
/* ptr to phys current thread */
tophys(r4,r2)
addi r4,r4,THREAD /* phys address of our thread_struct */
+ CLR_TOP32(r4)
mtspr SPRG3,r4
li r3,0
mtspr SPRG2,r3 /* 0 => r1 has kernel sp */
@@ -1275,7 +1294,7 @@ __secondary_start:
mtspr SRR0,r3
mtspr SRR1,r4
SYNC
- rfi
+ RFI
#endif /* CONFIG_SMP */
/*
@@ -1333,14 +1352,11 @@ load_up_mmu:
tophys(r6,r6)
lwz r6,_SDR1@l(r6)
mtspr SDR1,r6
-#ifdef CONFIG_PPC64
- /* clear the v bit in the ASR so we can
- * behave as if we have segment registers
- * -- Cort
- */
- clrldi r6,r6,63
+#ifdef CONFIG_PPC64BRIDGE
+ /* clear the ASR so we only use the pseudo-segment registers. */
+ li r6,0
mtasr r6
-#endif /* CONFIG_PPC64 */
+#endif /* CONFIG_PPC64BRIDGE */
li r0,16 /* load up segment register values */
mtctr r0 /* for context 0 */
lis r3,0x2000 /* Ku = 1, VSID = 0 */
@@ -1349,6 +1365,7 @@ load_up_mmu:
addi r3,r3,1 /* increment VSID */
addis r4,r4,0x1000 /* address of next segment */
bdnz 3b
+#ifndef CONFIG_POWER4
/* Load the BAT registers with the values set up by MMU_init.
MMU_init takes care of whether we're on a 601 or not. */
mfpvr r3
@@ -1361,17 +1378,29 @@ load_up_mmu:
LOAD_BAT(1,r3,r4,r5)
LOAD_BAT(2,r3,r4,r5)
LOAD_BAT(3,r3,r4,r5)
+#endif /* CONFIG_POWER4 */
blr
/*
* This is where the main kernel code starts.
*/
start_here:
+#ifndef CONFIG_PPC64BRIDGE
bl enable_caches
+#endif
/* ptr to current */
lis r2,init_task_union@h
ori r2,r2,init_task_union@l
+ /* Set up for using our exception vectors */
+ /* ptr to phys current thread */
+ tophys(r4,r2)
+ addi r4,r4,THREAD /* init task's THREAD */
+ CLR_TOP32(r4)
+ mtspr SPRG3,r4
+ li r3,0
+ mtspr SPRG2,r3 /* 0 => r1 has kernel sp */
+
/* Clear out the BSS */
lis r11,_end@ha
addi r11,r11,_end@l
@@ -1424,10 +1453,11 @@ start_here:
ori r4,r4,2f@l
tophys(r4,r4)
li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
+ FIX_SRR1(r3,r5)
mtspr SRR0,r4
mtspr SRR1,r3
SYNC
- rfi
+ RFI
/* Load up the kernel context */
2:
SYNC /* Force all PTE updates to finish */
@@ -1439,34 +1469,30 @@ start_here:
#endif
bl load_up_mmu
-
-/* Set up for using our exception vectors */
- /* ptr to phys current thread */
- tophys(r4,r2)
- addi r4,r4,THREAD /* init task's THREAD */
- mtspr SPRG3,r4
- li r3,0
- mtspr SPRG2,r3 /* 0 => r1 has kernel sp */
/* Now turn on the MMU for real! */
li r4,MSR_KERNEL
+ FIX_SRR1(r4,r5)
lis r3,start_kernel@h
ori r3,r3,start_kernel@l
mtspr SRR0,r3
mtspr SRR1,r4
SYNC
- rfi /* enable MMU and jump to start_kernel */
+ RFI
/*
* Set up the segment registers for a new context.
*/
- .globl set_context
-set_context:
+_GLOBAL(set_context)
rlwinm r3,r3,4,8,27 /* VSID = context << 4 */
addis r3,r3,0x6000 /* Set Ks, Ku bits */
li r0,12 /* TASK_SIZE / SEGMENT_SIZE */
mtctr r0
li r4,0
-3: mtsrin r3,r4
+3:
+#ifdef CONFIG_PPC64BRIDGE
+ slbie r4
+#endif /* CONFIG_PPC64BRIDGE */
+ mtsrin r3,r4
addi r3,r3,1 /* next VSID */
addis r4,r4,0x1000 /* address of next segment */
bdnz 3b
@@ -1511,7 +1537,7 @@ clear_bats:
#ifndef CONFIG_GEMINI
flush_tlbs:
- lis r20, 0x1000
+ lis r20, 0x40
1: addic. r20, r20, -0x1000
tlbie r20
blt 1b
@@ -1522,30 +1548,79 @@ mmu_off:
addi r4, r3, __after_prom_start - _start
mfmsr r3
andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */
- beq 1f
+ beqlr
ori r3,r3,MSR_DR|MSR_IR
xori r3,r3,MSR_DR|MSR_IR
mtspr SRR0,r4
mtspr SRR1,r3
sync
- rfi
-1: blr
+ RFI
#endif
-#if 0 /* That's useful debug stuff */
+#ifndef CONFIG_POWER4
+/*
+ * Use the first pair of BAT registers to map the 1st 16MB
+ * of RAM to KERNELBASE. From this point on we can't safely
+ * call OF any more.
+ */
+initial_bats:
+ lis r11,KERNELBASE@h
+#ifndef CONFIG_PPC64BRIDGE
+ mfspr r9,PVR
+ rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
+ cmpi 0,r9,1
+ bne 4f
+ ori r11,r11,4 /* set up BAT registers for 601 */
+ li r8,0x7f /* valid, block length = 8MB */
+ oris r9,r11,0x800000@h /* set up BAT reg for 2nd 8M */
+ oris r10,r8,0x800000@h /* set up BAT reg for 2nd 8M */
+ mtspr IBAT0U,r11 /* N.B. 601 has valid bit in */
+ mtspr IBAT0L,r8 /* lower BAT register */
+ mtspr IBAT1U,r9
+ mtspr IBAT1L,r10
+ isync
+ blr
+#endif /* CONFIG_PPC64BRIDGE */
+
+4: tophys(r8,r11)
+#ifdef CONFIG_SMP
+ ori r8,r8,0x12 /* R/W access, M=1 */
+#else
+ ori r8,r8,2 /* R/W access */
+#endif /* CONFIG_SMP */
+#ifdef CONFIG_APUS
+ ori r11,r11,BL_8M<<2|0x2 /* set up 8MB BAT registers for 604 */
+#else
+ ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
+#endif /* CONFIG_APUS */
+
+#ifdef CONFIG_PPC64BRIDGE
+ /* clear out the high 32 bits in the BAT */
+ clrldi r11,r11,32
+ clrldi r8,r8,32
+#endif /* CONFIG_PPC64BRIDGE */
+ mtspr DBAT0L,r8 /* N.B. 6xx (not 601) have valid */
+ mtspr DBAT0U,r11 /* bit in upper BAT register */
+ mtspr IBAT0L,r8
+ mtspr IBAT0U,r11
+#if 0 /* Useful debug code, please leave in for now so I don't have to
+ * look at docs when I need to setup a BAT ...
+ */
setup_screen_bat:
li r3,0
mtspr DBAT1U,r3
- mtspr IBAT1U,r3
- lis r3, 0x8200
- ori r4,r3,0x2a
+ lis r3,0xfa00
+ CLR_TOP32(r3)
+ lis r4,0xfa00
+ CLR_TOP32(r4)
+ ori r4,r4,0x2a
mtspr DBAT1L,r4
- mtspr IBAT1L,r4
ori r3,r3,(BL_16M<<2)|0x2 /* set up BAT registers for 604 */
mtspr DBAT1U,r3
- mtspr IBAT1U,r3
- blr
#endif
+ isync
+ blr
+#endif /* CONFIG_POWER4 */
#ifdef CONFIG_8260
/* Jump into the system reset for the rom.
@@ -1568,7 +1643,7 @@ m8260_gorom:
mtlr r4
blr
#endif
-
+
/*
* We put a few things here that have to be page-aligned.
* This stuff goes at the beginning of the data segment,
diff --git a/arch/ppc/kernel/head_8xx.S b/arch/ppc/kernel/head_8xx.S
index 59b8a49c6..a35f6e2a1 100644
--- a/arch/ppc/kernel/head_8xx.S
+++ b/arch/ppc/kernel/head_8xx.S
@@ -31,6 +31,13 @@
#include <asm/cache.h>
#include <asm/pgtable.h>
+/* XXX need definitions here for 16 byte cachelines on some/all 8xx
+ -- paulus */
+CACHELINE_BYTES = 32
+LG_CACHELINE_BYTES = 5
+CACHELINE_MASK = 0x1f
+CACHELINE_WORDS = 8
+
.text
.globl _stext
_stext:
@@ -90,6 +97,9 @@ __start:
li r8, 0
mtspr MI_CTR, r8 /* Set instruction control to zero */
lis r8, MD_RESETVAL@h
+#ifndef CONFIG_8xx_COPYBACK
+ oris r8, r8, MD_WTDEF@h
+#endif
mtspr MD_CTR, r8 /* Set data TLB control */
/* Now map the lower 8 Meg into the TLBs. For this quick hack,
@@ -374,6 +384,16 @@ InstructionTLBMiss:
#endif
mtspr MD_EPN, r20 /* Have to use MD_EPN for walk, MI_EPN can't */
mfspr r20, M_TWB /* Get level 1 table entry address */
+
+ /* If we are faulting a kernel address, we have to use the
+ * kernel page tables.
+ */
+ andi. r21, r20, 0x0800 /* Address >= 0x80000000 */
+ beq 3f
+ lis r21, swapper_pg_dir@h
+ ori r21, r21, swapper_pg_dir@l
+ rlwimi r20, r21, 0, 2, 19
+3:
lwz r21, 0(r20) /* Get the level 1 entry */
rlwinm. r20, r21,0,0,19 /* Extract page descriptor page address */
beq 2f /* If zero, don't try to find a pte */
@@ -445,6 +465,16 @@ DataStoreTLBMiss:
stw r20, 0(r0)
stw r21, 4(r0)
mfspr r20, M_TWB /* Get level 1 table entry address */
+
+ /* If we are faulting a kernel address, we have to use the
+ * kernel page tables.
+ */
+ andi. r21, r20, 0x0800
+ beq 3f
+ lis r21, swapper_pg_dir@h
+ ori r21, r21, swapper_pg_dir@l
+ rlwimi r20, r21, 0, 2, 19
+3:
lwz r21, 0(r20) /* Get the level 1 entry */
rlwinm. r20, r21,0,0,19 /* Extract page descriptor page address */
beq 2f /* If zero, don't try to find a pte */
@@ -546,6 +576,16 @@ DataTLBError:
beq 2f
mfspr r20, M_TWB /* Get level 1 table entry address */
+
+ /* If we are faulting a kernel address, we have to use the
+ * kernel page tables.
+ */
+ andi. r21, r20, 0x0800
+ beq 3f
+ lis r21, swapper_pg_dir@h
+ ori r21, r21, swapper_pg_dir@l
+ rlwimi r20, r21, 0, 2, 19
+3:
lwz r21, 0(r20) /* Get the level 1 entry */
rlwinm. r20, r21,0,0,19 /* Extract page descriptor page address */
beq 2f /* If zero, bail */
@@ -717,7 +757,7 @@ relocate_kernel:
copy_and_flush:
addi r5,r5,-4
addi r6,r6,-4
-4: li r0,8
+4: li r0,CACHELINE_WORDS
mtctr r0
3: addi r6,r6,4 /* copy a cache line */
lwzx r0,r6,r4
@@ -901,6 +941,8 @@ start_here:
*/
_GLOBAL(set_context)
mtspr M_CASID,r3 /* Update context */
+ tophys (r4, r4)
+ mtspr M_TWB, r4 /* and pgd */
tlbia
SYNC
blr
@@ -948,3 +990,4 @@ swapper_pg_dir:
.globl cmd_line
cmd_line:
.space 512
+
diff --git a/arch/ppc/kernel/i8259.c b/arch/ppc/kernel/i8259.c
index 5dfe902df..bdb6ec844 100644
--- a/arch/ppc/kernel/i8259.c
+++ b/arch/ppc/kernel/i8259.c
@@ -104,11 +104,6 @@ struct hw_interrupt_type i8259_pic = {
0
};
-static void
-no_action(int cpl, void *dev_id, struct pt_regs *regs)
-{
-}
-
void __init i8259_init(void)
{
/* init master interrupt controller */
diff --git a/arch/ppc/kernel/irq.c b/arch/ppc/kernel/irq.c
index 28be8bf46..b055f23ec 100644
--- a/arch/ppc/kernel/irq.c
+++ b/arch/ppc/kernel/irq.c
@@ -286,10 +286,18 @@ void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
action = action->next;
} while ( action );
__cli();
- unmask_irq(irq);
+ if (irq_desc[irq].handler) {
+ if (irq_desc[irq].handler->end)
+ irq_desc[irq].handler->end(irq);
+ else if (irq_desc[irq].handler->enable)
+ irq_desc[irq].handler->enable(irq);
+ }
} else {
ppc_spurious_interrupts++;
- disable_irq( irq );
+ printk(KERN_DEBUG "Unhandled interrupt %x, disabled\n", irq);
+ disable_irq(irq);
+ if (irq_desc[irq].handler->end)
+ irq_desc[irq].handler->end(irq);
}
}
@@ -301,6 +309,7 @@ asmlinkage int do_IRQ(struct pt_regs *regs, int isfake)
/* every arch is required to have a get_irq -- Cort */
irq = ppc_md.get_irq( regs );
+
if ( irq < 0 )
{
/* -2 means ignore, already handled */
@@ -313,7 +322,7 @@ asmlinkage int do_IRQ(struct pt_regs *regs, int isfake)
goto out;
}
ppc_irq_dispatch_handler( regs, irq );
- if ( ppc_md.post_irq )
+ if (ppc_md.post_irq)
ppc_md.post_irq( regs, irq );
out:
@@ -770,3 +779,7 @@ void init_irq_proc (void)
register_irq_proc(i);
}
}
+
+void no_action(int irq, void *dev, struct pt_regs *regs)
+{
+}
diff --git a/arch/ppc/kernel/m8260_setup.c b/arch/ppc/kernel/m8260_setup.c
index 65ea973d8..2ce3790c3 100644
--- a/arch/ppc/kernel/m8260_setup.c
+++ b/arch/ppc/kernel/m8260_setup.c
@@ -167,9 +167,11 @@ int m8260_setup_residual(char *buffer)
bp = (bd_t *)__res;
- len += sprintf(len+buffer,"clock\t\t: %dMHz\n"
- "bus clock\t: %dMHz\n",
+ len += sprintf(len+buffer,"core clock\t: %d MHz\n"
+ "CPM clock\t: %d MHz\n"
+ "bus clock\t: %d MHz\n",
bp->bi_intfreq /*/ 1000000*/,
+ bp->bi_cpmfreq /*/ 1000000*/,
bp->bi_busfreq /*/ 1000000*/);
return len;
diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S
index 96adb96cd..f8d230c7e 100644
--- a/arch/ppc/kernel/misc.S
+++ b/arch/ppc/kernel/misc.S
@@ -22,11 +22,14 @@
#include "ppc_asm.h"
#if defined(CONFIG_4xx) || defined(CONFIG_8xx)
-CACHE_LINE_SIZE = 16
-LG_CACHE_LINE_SIZE = 4
+#define CACHE_LINE_SIZE 16
+#define LG_CACHE_LINE_SIZE 4
+#elif !defined(CONFIG_PPC64BRIDGE)
+#define CACHE_LINE_SIZE 32
+#define LG_CACHE_LINE_SIZE 5
#else
-CACHE_LINE_SIZE = 32
-LG_CACHE_LINE_SIZE = 5
+#define CACHE_LINE_SIZE 128
+#define LG_CACHE_LINE_SIZE 7
#endif /* CONFIG_4xx || CONFIG_8xx */
.text
@@ -140,12 +143,33 @@ _GLOBAL(do_lost_interrupts)
* Flush MMU TLB
*/
_GLOBAL(_tlbia)
+#if defined(CONFIG_SMP)
+ mfmsr r10
+ sync
+ rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
+ mtmsr r0
+ SYNC
+ lis r9,hash_table_lock@h
+ ori r9,r9,hash_table_lock@l
+ lwz r8,PROCESSOR(r2)
+ oris r8,r8,10
+10: lwarx r7,0,r9
+ cmpi 0,r7,0
+ bne- 10b
+ stwcx. r8,0,r9
+ bne- 10b
+ eieio
+#endif /* CONFIG_SMP */
sync
tlbia
sync
#ifdef CONFIG_SMP
tlbsync
sync
+ li r0,0
+ stw r0,0(r9) /* clear hash_table_lock */
+ mtmsr r10
+ SYNC
#endif
blr
@@ -153,11 +177,32 @@ _GLOBAL(_tlbia)
* Flush MMU TLB for a particular address
*/
_GLOBAL(_tlbie)
+#if defined(CONFIG_SMP)
+ mfmsr r10
+ sync
+ rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
+ mtmsr r0
+ SYNC
+ lis r9,hash_table_lock@h
+ ori r9,r9,hash_table_lock@l
+ lwz r8,PROCESSOR(r2)
+ oris r8,r8,11
+10: lwarx r7,0,r9
+ cmpi 0,r7,0
+ bne- 10b
+ stwcx. r8,0,r9
+ bne- 10b
+ eieio
+#endif /* CONFIG_SMP */
tlbie r3
sync
#ifdef CONFIG_SMP
tlbsync
sync
+ li r0,0
+ stw r0,0(r9) /* clear hash_table_lock */
+ mtmsr r10
+ SYNC
#endif
blr
@@ -305,6 +350,16 @@ _GLOBAL(clear_page)
* the destination into cache). This requires that the destination
* is cacheable.
*/
+#define COPY_16_BYTES \
+ lwz r6,4(r4); \
+ lwz r7,8(r4); \
+ lwz r8,12(r4); \
+ lwzu r9,16(r4); \
+ stw r6,4(r3); \
+ stw r7,8(r3); \
+ stw r8,12(r3); \
+ stwu r9,16(r3)
+
_GLOBAL(copy_page)
li r0,4096/CACHE_LINE_SIZE
mtctr r0
@@ -312,22 +367,20 @@ _GLOBAL(copy_page)
addi r4,r4,-4
li r5,4
1: dcbz r5,r3
- lwz r6,4(r4)
- lwz r7,8(r4)
- lwz r8,12(r4)
- lwzu r9,16(r4)
- stw r6,4(r3)
- stw r7,8(r3)
- stw r8,12(r3)
- stwu r9,16(r3)
- lwz r6,4(r4)
- lwz r7,8(r4)
- lwz r8,12(r4)
- lwzu r9,16(r4)
- stw r6,4(r3)
- stw r7,8(r3)
- stw r8,12(r3)
- stwu r9,16(r3)
+ COPY_16_BYTES
+#if CACHE_LINE_SIZE >= 32
+ COPY_16_BYTES
+#if CACHE_LINE_SIZE >= 64
+ COPY_16_BYTES
+ COPY_16_BYTES
+#if CACHE_LINE_SIZE >= 128
+ COPY_16_BYTES
+ COPY_16_BYTES
+ COPY_16_BYTES
+ COPY_16_BYTES
+#endif
+#endif
+#endif
bdnz 1b
blr
@@ -464,7 +517,7 @@ _GLOBAL(atomic_set_mask)
* The *_ns versions don't do byte-swapping.
*/
_GLOBAL(_insb)
- cmpw 0,r5,0
+ cmpwi 0,r5,0
mtctr r5
subi r4,r4,1
blelr-
@@ -475,7 +528,7 @@ _GLOBAL(_insb)
blr
_GLOBAL(_outsb)
- cmpw 0,r5,0
+ cmpwi 0,r5,0
mtctr r5
subi r4,r4,1
blelr-
@@ -486,7 +539,7 @@ _GLOBAL(_outsb)
blr
_GLOBAL(_insw)
- cmpw 0,r5,0
+ cmpwi 0,r5,0
mtctr r5
subi r4,r4,2
blelr-
@@ -497,7 +550,7 @@ _GLOBAL(_insw)
blr
_GLOBAL(_outsw)
- cmpw 0,r5,0
+ cmpwi 0,r5,0
mtctr r5
subi r4,r4,2
blelr-
@@ -508,7 +561,7 @@ _GLOBAL(_outsw)
blr
_GLOBAL(_insl)
- cmpw 0,r5,0
+ cmpwi 0,r5,0
mtctr r5
subi r4,r4,4
blelr-
@@ -519,7 +572,7 @@ _GLOBAL(_insl)
blr
_GLOBAL(_outsl)
- cmpw 0,r5,0
+ cmpwi 0,r5,0
mtctr r5
subi r4,r4,4
blelr-
@@ -531,7 +584,7 @@ _GLOBAL(_outsl)
_GLOBAL(ide_insw)
_GLOBAL(_insw_ns)
- cmpw 0,r5,0
+ cmpwi 0,r5,0
mtctr r5
subi r4,r4,2
blelr-
@@ -543,7 +596,7 @@ _GLOBAL(_insw_ns)
_GLOBAL(ide_outsw)
_GLOBAL(_outsw_ns)
- cmpw 0,r5,0
+ cmpwi 0,r5,0
mtctr r5
subi r4,r4,2
blelr-
@@ -554,7 +607,7 @@ _GLOBAL(_outsw_ns)
blr
_GLOBAL(_insl_ns)
- cmpw 0,r5,0
+ cmpwi 0,r5,0
mtctr r5
subi r4,r4,4
blelr-
@@ -565,7 +618,7 @@ _GLOBAL(_insl_ns)
blr
_GLOBAL(_outsl_ns)
- cmpw 0,r5,0
+ cmpwi 0,r5,0
mtctr r5
subi r4,r4,4
blelr-
@@ -650,6 +703,12 @@ _GLOBAL(_set_THRM3)
_GLOBAL(_get_PVR)
mfspr r3,PVR
blr
+
+#ifdef CONFIG_8xx
+_GLOBAL(_get_IMMR)
+ mfspr r3, 638
+ blr
+#endif
_GLOBAL(_get_HID0)
mfspr r3,HID0
diff --git a/arch/ppc/kernel/mk_defs.c b/arch/ppc/kernel/mk_defs.c
index c381ea073..0313fb1b2 100644
--- a/arch/ppc/kernel/mk_defs.c
+++ b/arch/ppc/kernel/mk_defs.c
@@ -44,8 +44,9 @@ main(void)
DEFINE(PGDIR, offsetof(struct thread_struct, pgdir));
DEFINE(LAST_SYSCALL, offsetof(struct thread_struct, last_syscall));
DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
- DEFINE(PF_TRACESYS, PF_TRACESYS);
+ DEFINE(PT_TRACESYS, PT_TRACESYS);
DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
+ DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
DEFINE(NEED_RESCHED, offsetof(struct task_struct, need_resched));
DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
diff --git a/arch/ppc/kernel/open_pic.c b/arch/ppc/kernel/open_pic.c
index a3c6cc4dd..21001a7ce 100644
--- a/arch/ppc/kernel/open_pic.c
+++ b/arch/ppc/kernel/open_pic.c
@@ -97,10 +97,6 @@ struct hw_interrupt_type open_pic = {
#define check_arg_cpu(cpu) do {} while (0)
#endif
-void no_action(int ir1, void *dev, struct pt_regs *regs)
-{
-}
-
#ifdef CONFIG_SMP
void openpic_ipi_action(int cpl, void *dev_id, struct pt_regs *regs)
{
@@ -293,7 +289,7 @@ void __init openpic_init(int main_pic)
void find_ISUs(void)
{
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC64BRIDGE
/* hardcode this for now since the IBM 260 is the only thing with
* a distributed openpic right now. -- Cort
*/
diff --git a/arch/ppc/kernel/pmac_time.c b/arch/ppc/kernel/pmac_time.c
index 3b7dd283f..ebd0037de 100644
--- a/arch/ppc/kernel/pmac_time.c
+++ b/arch/ppc/kernel/pmac_time.c
@@ -28,6 +28,8 @@
#include "time.h"
+extern rwlock_t xtime_lock;
+
/* Apparently the RTC stores seconds since 1 Jan 1904 */
#define RTC_OFFSET 2082844800
@@ -151,16 +153,21 @@ int __init via_calibrate_decr(void)
static int time_sleep_notify(struct pmu_sleep_notifier *self, int when)
{
static unsigned long time_diff;
+ unsigned long flags;
switch (when) {
case PBOOK_SLEEP_NOW:
+ read_lock_irqsave(&xtime_lock, flags);
time_diff = xtime.tv_sec - pmac_get_rtc_time();
+ read_unlock_irqrestore(&xtime_lock, flags);
break;
case PBOOK_WAKE:
+ write_lock_irqsave(&xtime_lock, flags);
xtime.tv_sec = pmac_get_rtc_time() + time_diff;
xtime.tv_usec = 0;
set_dec(decrementer_count);
last_rtc_update = xtime.tv_sec;
+ write_unlock_irqrestore(&xtime_lock, flags);
break;
}
return PBOOK_SLEEP_OK;
diff --git a/arch/ppc/kernel/ppc_asm.h b/arch/ppc/kernel/ppc_asm.h
index d9093c9e1..42b8c9c39 100644
--- a/arch/ppc/kernel/ppc_asm.h
+++ b/arch/ppc/kernel/ppc_asm.h
@@ -73,11 +73,13 @@
/*
* This instruction is not implemented on the PPC 603 or 601; however, on
* the 403GCX and 405GP tlbia IS defined and tlbie is not.
+ * All of these instructions exist in the 8xx, they have magical powers,
+ * and they must be used.
*/
-#if !defined(CONFIG_4xx)
+#if !defined(CONFIG_4xx) && !defined(CONFIG_8xx)
#define tlbia \
- li r4,128; \
+ li r4,1024; \
mtctr r4; \
lis r4,KERNELBASE@h; \
0: tlbie r4; \
@@ -102,3 +104,25 @@
.align 1; \
.long 0b; \
.previous
+
+/*
+ * On 64-bit cpus, we use the rfid instruction instead of rfi, but
+ * we then have to make sure we preserve the top 32 bits except for
+ * the 64-bit mode bit, which we clear.
+ */
+#ifdef CONFIG_PPC64BRIDGE
+#define FIX_SRR1(ra, rb) \
+ mr rb,ra; \
+ mfmsr ra; \
+ clrldi ra,ra,1; /* turn off 64-bit mode */ \
+ rldimi ra,rb,0,32
+#define RFI .long 0x4c000024 /* rfid instruction */
+#define MTMSRD(r) .long (0x7c000164 + ((r) << 21)) /* mtmsrd */
+#define CLR_TOP32(r) rlwinm (r),(r),0,0,31 /* clear top 32 bits */
+
+#else
+#define FIX_SRR1(ra, rb)
+#define RFI rfi
+#define MTMSRD(r) mtmsr r
+#define CLR_TOP32(r)
+#endif /* CONFIG_PPC64BRIDGE */
diff --git a/arch/ppc/kernel/ppc_htab.c b/arch/ppc/kernel/ppc_htab.c
index 655eb4390..32f99ce0e 100644
--- a/arch/ppc/kernel/ppc_htab.c
+++ b/arch/ppc/kernel/ppc_htab.c
@@ -165,7 +165,7 @@ static ssize_t ppc_htab_read(struct file * file, char * buf,
valid = 0;
for_each_task(p)
{
- if ( (ptr->vsid >> 4) == p->mm->context )
+ if (p->mm && (ptr->vsid >> 4) == p->mm->context)
{
valid = 1;
break;
@@ -565,17 +565,22 @@ int proc_dol2crvec(ctl_table *table, int write, struct file *filp,
if (!first)
*p++ = '\t';
val = _get_L2CR();
- p += sprintf(p, "%08x: ", val);
- p += sprintf(p, " %s",
- (val&0x80000000)?"enabled":"disabled");
- p += sprintf(p,",%sparity",(val&0x40000000)?"":"no ");
- p += sprintf(p, ",%s", sizestrings[(val >> 28) & 3]);
- p += sprintf(p, ",%s", clockstrings[(val >> 25) & 7]);
- p += sprintf(p, ",%s", typestrings[(val >> 23) & 0x2]);
- p += sprintf(p,"%s",(val>>22)&1?"":",data only");
- p += sprintf(p,"%s",(val>>20)&1?",ZZ enabled":"");
- p += sprintf(p,",%s",(val>>19)&1?"write-through":"copy-back");
- p += sprintf(p,",%sns hold", holdstrings[(val>>16)&3]);
+ p += sprintf(p, "0x%08x: ", val);
+ p += sprintf(p, " %s", (val >> 31) & 1 ? "enabled" :
+ "disabled");
+ p += sprintf(p, ", %sparity", (val>>30)&1 ? "" : "no ");
+ p += sprintf(p, ", %s", sizestrings[(val >> 28) & 3]);
+ p += sprintf(p, ", %s", clockstrings[(val >> 25) & 7]);
+ p += sprintf(p, ", %s", typestrings[(val >> 23) & 2]);
+ p += sprintf(p, "%s", (val>>22)&1 ? ", data only" : "");
+ p += sprintf(p, "%s", (val>>20)&1 ? ", ZZ enabled": "");
+ p += sprintf(p, ", %s", (val>>19)&1 ? "write-through" :
+ "copy-back");
+ p += sprintf(p, "%s", (val>>18)&1 ? ", testing" : "");
+ p += sprintf(p, ", %sns hold",holdstrings[(val>>16)&3]);
+ p += sprintf(p, "%s", (val>>15)&1 ? ", DLL slow" : "");
+ p += sprintf(p, "%s", (val>>14)&1 ? ", diff clock" :"");
+ p += sprintf(p, "%s", (val>>13)&1 ? ", DLL bypass" :"");
p += sprintf(p,"\n");
diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c
index 6d7f2aff7..76809881b 100644
--- a/arch/ppc/kernel/ppc_ksyms.c
+++ b/arch/ppc/kernel/ppc_ksyms.c
@@ -97,12 +97,14 @@ EXPORT_SYMBOL(ucSystemType);
#endif
#endif
+#if !__INLINE_BITOPS
EXPORT_SYMBOL(set_bit);
EXPORT_SYMBOL(clear_bit);
EXPORT_SYMBOL(change_bit);
EXPORT_SYMBOL(test_and_set_bit);
EXPORT_SYMBOL(test_and_clear_bit);
EXPORT_SYMBOL(test_and_change_bit);
+#endif /* __INLINE_BITOPS */
EXPORT_SYMBOL(strcpy);
EXPORT_SYMBOL(strncpy);
@@ -267,11 +269,13 @@ EXPORT_SYMBOL(ppc_irq_dispatch_handler);
EXPORT_SYMBOL(decrementer_count);
EXPORT_SYMBOL(get_wchan);
EXPORT_SYMBOL(console_drivers);
+EXPORT_SYMBOL(console_lock);
#ifdef CONFIG_XMON
EXPORT_SYMBOL(xmon);
#endif
EXPORT_SYMBOL(down_read_failed);
+#if defined(CONFIG_KGDB) || defined(CONFIG_XMON)
extern void (*debugger)(struct pt_regs *regs);
extern int (*debugger_bpt)(struct pt_regs *regs);
extern int (*debugger_sstep)(struct pt_regs *regs);
@@ -285,5 +289,7 @@ EXPORT_SYMBOL(debugger_sstep);
EXPORT_SYMBOL(debugger_iabr_match);
EXPORT_SYMBOL(debugger_dabr_match);
EXPORT_SYMBOL(debugger_fault_handler);
+#endif
EXPORT_SYMBOL(ret_to_user_hook);
+EXPORT_SYMBOL(do_softirq);
diff --git a/arch/ppc/kernel/process.c b/arch/ppc/kernel/process.c
index e1f1b4983..7bc5cb82f 100644
--- a/arch/ppc/kernel/process.c
+++ b/arch/ppc/kernel/process.c
@@ -501,6 +501,8 @@ asmlinkage int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
giveup_altivec(current);
#endif /* CONFIG_ALTIVEC */
error = do_execve(filename, (char **) a1, (char **) a2, regs);
+ if (error == 0)
+ current->ptrace &= ~PT_DTRACE;
putname(filename);
out:
return error;
diff --git a/arch/ppc/kernel/prom.c b/arch/ppc/kernel/prom.c
index 31fc85068..7f51ca13f 100644
--- a/arch/ppc/kernel/prom.c
+++ b/arch/ppc/kernel/prom.c
@@ -29,6 +29,9 @@
#include <asm/bootx.h>
#include <asm/system.h>
#include <asm/gemini.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
+#include <asm/bitops.h>
#ifdef CONFIG_FB
#include <asm/linux_logo.h>
@@ -80,7 +83,8 @@ struct pci_intr_map {
unsigned intr;
};
-typedef unsigned long interpret_func(struct device_node *, unsigned long);
+typedef unsigned long interpret_func(struct device_node *, unsigned long,
+ int, int);
static interpret_func interpret_pci_props;
static interpret_func interpret_dbdma_props;
static interpret_func interpret_isa_props;
@@ -101,7 +105,7 @@ extern char *klimit;
char *bootpath = 0;
char *bootdevice = 0;
-unsigned int rtas_data = 0; /* virtual pointer */
+unsigned int rtas_data = 0; /* physical pointer */
unsigned int rtas_entry = 0; /* physical pointer */
unsigned int rtas_size = 0;
unsigned int old_rtas = 0;
@@ -145,7 +149,7 @@ static unsigned long copy_device_tree(unsigned long, unsigned long);
static unsigned long inspect_node(phandle, struct device_node *, unsigned long,
unsigned long, struct device_node ***);
static unsigned long finish_node(struct device_node *, unsigned long,
- interpret_func *);
+ interpret_func *, int, int);
static unsigned long finish_node_interrupts(struct device_node *, unsigned long);
static unsigned long check_display(unsigned long);
static int prom_next_node(phandle *);
@@ -158,6 +162,7 @@ static void prom_welcome(boot_infos_t* bi, unsigned long phys);
extern void enter_rtas(void *);
extern unsigned long reloc_offset(void);
+void phys_call_rtas(int, int, int, ...);
extern char cmd_line[512]; /* XXX */
boot_infos_t *boot_infos = 0; /* init it so it's in data segment not bss */
@@ -279,7 +284,267 @@ prom_print(const char *msg)
}
}
-unsigned long smp_chrp_cpu_nr __initdata = 1;
+void
+prom_print_hex(unsigned int v)
+{
+ char buf[16];
+ int i, c;
+
+ for (i = 0; i < 8; ++i) {
+ c = (v >> ((7-i)*4)) & 0xf;
+ c += (c >= 10)? ('a' - 10): '0';
+ buf[i] = c;
+ }
+ buf[i] = ' ';
+ buf[i+1] = 0;
+ prom_print(buf);
+}
+
+void
+prom_print_nl(void)
+{
+ unsigned long offset = reloc_offset();
+ prom_print(RELOC("\n"));
+}
+
+unsigned long smp_chrp_cpu_nr __initdata = 0;
+
+#ifdef CONFIG_SMP
+/*
+ * With CHRP SMP we need to use the OF to start the other
+ * processors so we can't wait until smp_boot_cpus (the OF is
+ * trashed by then) so we have to put the processors into
+ * a holding pattern controlled by the kernel (not OF) before
+ * we destroy the OF.
+ *
+ * This uses a chunk of high memory, puts some holding pattern
+ * code there and sends the other processors off to there until
+ * smp_boot_cpus tells them to do something. We do that by using
+ * physical address 0x0. The holding pattern checks that address
+ * until its cpu # is there, when it is that cpu jumps to
+ * __secondary_start(). smp_boot_cpus() takes care of setting those
+ * values.
+ *
+ * We also use physical address 0x4 here to tell when a cpu
+ * is in its holding pattern code.
+ *
+ * -- Cort
+ */
+static void
+prom_hold_cpus(unsigned long mem)
+{
+ extern void __secondary_hold(void);
+ unsigned long i;
+ int cpu;
+ phandle node;
+ unsigned long offset = reloc_offset();
+ char type[16], *path;
+ unsigned int reg;
+
+ /*
+ * XXX: hack to make sure we're chrp, assume that if we're
+ * chrp we have a device_type property -- Cort
+ */
+ node = call_prom(RELOC("finddevice"), 1, 1, RELOC("/"));
+ if ( (int)call_prom(RELOC("getprop"), 4, 1, node,
+ RELOC("device_type"),type, sizeof(type)) <= 0)
+ return;
+
+ /* copy the holding pattern code to someplace safe (0) */
+ /* the holding pattern is now within the first 0x100
+ bytes of the kernel image -- paulus */
+ memcpy((void *)0, KERNELBASE + offset, 0x100);
+ flush_icache_range(0, 0x100);
+
+ /* look for cpus */
+ *(unsigned long *)(0x0) = 0;
+ asm volatile("dcbf 0,%0": : "r" (0) : "memory");
+ for (node = 0; prom_next_node(&node); ) {
+ type[0] = 0;
+ call_prom(RELOC("getprop"), 4, 1, node, RELOC("device_type"),
+ type, sizeof(type));
+ if (strcmp(type, RELOC("cpu")) != 0)
+ continue;
+ path = (char *) mem;
+ memset(path, 0, 256);
+ if ((int) call_prom(RELOC("package-to-path"), 3, 1,
+ node, path, 255) < 0)
+ continue;
+ reg = -1;
+ call_prom(RELOC("getprop"), 4, 1, node, RELOC("reg"),
+ &reg, sizeof(reg));
+ cpu = RELOC(smp_chrp_cpu_nr)++;
+ RELOC(smp_hw_index)[cpu] = reg;
+ /* XXX: hack - don't start cpu 0, this cpu -- Cort */
+ if (cpu == 0)
+ continue;
+ prom_print(RELOC("starting cpu "));
+ prom_print(path);
+ *(ulong *)(0x4) = 0;
+ call_prom(RELOC("start-cpu"), 3, 0, node,
+ __pa(__secondary_hold), cpu);
+ prom_print(RELOC("..."));
+ for ( i = 0 ; (i < 10000) && (*(ulong *)(0x4) == 0); i++ )
+ ;
+ if (*(ulong *)(0x4) == cpu)
+ prom_print(RELOC("ok\n"));
+ else {
+ prom_print(RELOC("failed: "));
+ prom_print_hex(*(ulong *)0x4);
+ prom_print_nl();
+ }
+ }
+}
+#endif /* CONFIG_SMP */
+
+void
+bootx_init(unsigned long r4, unsigned long phys)
+{
+ boot_infos_t *bi = (boot_infos_t *) r4;
+ unsigned long space;
+ unsigned long ptr, x;
+ char *model;
+ unsigned long offset = reloc_offset();
+
+ RELOC(boot_infos) = PTRUNRELOC(bi);
+ if (!BOOT_INFO_IS_V2_COMPATIBLE(bi))
+ bi->logicalDisplayBase = 0;
+
+#ifdef CONFIG_BOOTX_TEXT
+ RELOC(g_loc_X) = 0;
+ RELOC(g_loc_Y) = 0;
+ RELOC(g_max_loc_X) = (bi->dispDeviceRect[2] - bi->dispDeviceRect[0]) / 8;
+ RELOC(g_max_loc_Y) = (bi->dispDeviceRect[3] - bi->dispDeviceRect[1]) / 16;
+ RELOC(disp_bi) = PTRUNRELOC(bi);
+
+ clearscreen();
+
+ /* Test if boot-info is compatible. Done only in config CONFIG_BOOTX_TEXT since
+ there is nothing much we can do with an incompatible version, except display
+ a message and eventually hang the processor...
+
+ I'll try to keep enough of boot-info compatible in the future to always allow
+ display of this message;
+ */
+ if (!BOOT_INFO_IS_COMPATIBLE(bi))
+ prom_print(RELOC(" !!! WARNING - Incompatible version of BootX !!!\n\n\n"));
+
+ prom_welcome(bi, phys);
+ flushscreen();
+#endif /* CONFIG_BOOTX_TEXT */
+
+ /* New BootX enters kernel with MMU off, i/os are not allowed
+ here. This hack will have been done by the boostrap anyway.
+ */
+ if (bi->version < 4) {
+ /*
+ * XXX If this is an iMac, turn off the USB controller.
+ */
+ model = (char *) early_get_property
+ (r4 + bi->deviceTreeOffset, 4, RELOC("model"));
+ if (model
+ && (strcmp(model, RELOC("iMac,1")) == 0
+ || strcmp(model, RELOC("PowerMac1,1")) == 0)) {
+ out_le32((unsigned *)0x80880008, 1); /* XXX */
+ }
+ }
+
+ /* Move klimit to enclose device tree, args, ramdisk, etc... */
+ if (bi->version < 5) {
+ space = bi->deviceTreeOffset + bi->deviceTreeSize;
+ if (bi->ramDisk)
+ space = bi->ramDisk + bi->ramDiskSize;
+ } else
+ space = bi->totalParamsSize;
+ RELOC(klimit) = PTRUNRELOC((char *) bi + space);
+
+ /* New BootX will have flushed all TLBs and enters kernel with
+ MMU switched OFF, so this should not be useful anymore.
+ */
+ if (bi->version < 4) {
+ /*
+ * Touch each page to make sure the PTEs for them
+ * are in the hash table - the aim is to try to avoid
+ * getting DSI exceptions while copying the kernel image.
+ */
+ for (ptr = (KERNELBASE + offset) & PAGE_MASK;
+ ptr < (unsigned long)bi + space; ptr += PAGE_SIZE)
+ x = *(volatile unsigned long *)ptr;
+ }
+
+#ifdef CONFIG_BOOTX_TEXT
+ prom_drawstring(RELOC("booting...\n"));
+ flushscreen();
+ RELOC(bootx_text_mapped) = 0;
+#endif
+}
+
+#ifdef CONFIG_PPC64BRIDGE
+/*
+ * Set up a hash table with a set of entries in it to map the
+ * first 64MB of RAM. This is used on 64-bit machines since
+ * some of them don't have BATs.
+ * We assume the PTE will fit in the primary PTEG.
+ */
+
+static inline void make_pte(unsigned long htab, unsigned int hsize,
+ unsigned int va, unsigned int pa, int mode)
+{
+ unsigned int *pteg;
+ unsigned int hash, i;
+
+ hash = ((va >> 5) ^ (va >> 21)) & 0x7fff80;
+ pteg = (unsigned int *)(htab + (hash & (hsize - 1)));
+ for (i = 0; i < 8; ++i, pteg += 4) {
+ if ((pteg[1] & 1) == 0) {
+ pteg[1] = ((va >> 16) & 0xff80) | 1;
+ pteg[3] = pa | mode;
+ break;
+ }
+ }
+}
+
+extern unsigned long _SDR1;
+extern PTE *Hash;
+extern unsigned long Hash_size;
+
+void
+prom_alloc_htab(void)
+{
+ unsigned int hsize;
+ unsigned long htab;
+ unsigned int addr;
+ unsigned long offset = reloc_offset();
+
+ /*
+ * Because of OF bugs we can't use the "claim" client
+ * interface to allocate memory for the hash table.
+ * This code is only used on 64-bit PPCs, and the only
+ * 64-bit PPCs at the moment are RS/6000s, and their
+ * OF is based at 0xc00000 (the 12M point), so we just
+ * arbitrarily use the 0x800000 - 0xc00000 region for the
+ * hash table.
+ * -- paulus.
+ */
+#ifdef CONFIG_POWER4
+ hsize = 4 << 20; /* POWER4 has no BATs */
+#else
+ hsize = 2 << 20;
+#endif /* CONFIG_POWER4 */
+ htab = (8 << 20);
+ RELOC(Hash) = (void *)(htab + KERNELBASE);
+ RELOC(Hash_size) = hsize;
+ RELOC(_SDR1) = htab + __ilog2(hsize) - 18;
+
+ /*
+ * Put in PTEs for the first 64MB of RAM
+ */
+ cacheable_memzero((void *)htab, hsize);
+ for (addr = 0; addr < 0x4000000; addr += 0x1000)
+ make_pte(htab, hsize, addr + KERNELBASE, addr,
+ _PAGE_ACCESSED | _PAGE_COHERENT | PP_RWXX);
+}
+#endif /* CONFIG_PPC64BRIDGE */
/*
* We enter here early on, when the Open Firmware prom is still
@@ -289,11 +554,6 @@ __init
unsigned long
prom_init(int r3, int r4, prom_entry pp)
{
-#ifdef CONFIG_SMP
- int i;
- phandle node;
- char type[16], *path;
-#endif
int chrp = 0;
unsigned long mem;
ihandle prom_rtas, prom_mmu, prom_op;
@@ -313,82 +573,7 @@ prom_init(int r3, int r4, prom_entry pp)
/* If we came here from BootX, clear the screen,
* set up some pointers and return. */
if (r3 == 0x426f6f58 && pp == NULL) {
- boot_infos_t *bi = (boot_infos_t *) r4;
- unsigned long space;
- unsigned long ptr, x;
- char *model;
-
- RELOC(boot_infos) = PTRUNRELOC(bi);
- if (!BOOT_INFO_IS_V2_COMPATIBLE(bi))
- bi->logicalDisplayBase = 0;
-
-#ifdef CONFIG_BOOTX_TEXT
- RELOC(g_loc_X) = 0;
- RELOC(g_loc_Y) = 0;
- RELOC(g_max_loc_X) = (bi->dispDeviceRect[2] - bi->dispDeviceRect[0]) / 8;
- RELOC(g_max_loc_Y) = (bi->dispDeviceRect[3] - bi->dispDeviceRect[1]) / 16;
- RELOC(disp_bi) = PTRUNRELOC(bi);
-
- clearscreen();
-
- /* Test if boot-info is compatible. Done only in config CONFIG_BOOTX_TEXT since
- there is nothing much we can do with an incompatible version, except display
- a message and eventually hang the processor...
-
- I'll try to keep enough of boot-info compatible in the future to always allow
- display of this message;
- */
- if (!BOOT_INFO_IS_COMPATIBLE(bi))
- prom_print(RELOC(" !!! WARNING - Incompatible version of BootX !!!\n\n\n"));
-
- prom_welcome(bi, phys);
- flushscreen();
-#endif /* CONFIG_BOOTX_TEXT */
-
- /* New BootX enters kernel with MMU off, i/os are not allowed
- here. This hack will have been done by the boostrap anyway.
- */
- if (bi->version < 4) {
- /*
- * XXX If this is an iMac, turn off the USB controller.
- */
- model = (char *) early_get_property
- (r4 + bi->deviceTreeOffset, 4, RELOC("model"));
- if (model
- && (strcmp(model, RELOC("iMac,1")) == 0
- || strcmp(model, RELOC("PowerMac1,1")) == 0)) {
- out_le32((unsigned *)0x80880008, 1); /* XXX */
- }
- }
-
- /* Move klimit to enclose device tree, args, ramdisk, etc... */
- if (bi->version < 5) {
- space = bi->deviceTreeOffset + bi->deviceTreeSize;
- if (bi->ramDisk)
- space = bi->ramDisk + bi->ramDiskSize;
- } else
- space = bi->totalParamsSize;
- RELOC(klimit) = PTRUNRELOC((char *) bi + space);
-
- /* New BootX will have flushed all TLBs and enters kernel with
- MMU switched OFF, so this should not be useful anymore.
- */
- if (bi->version < 4) {
- /*
- * Touch each page to make sure the PTEs for them
- * are in the hash table - the aim is to try to avoid
- * getting DSI exceptions while copying the kernel image.
- */
- for (ptr = (KERNELBASE + offset) & PAGE_MASK;
- ptr < (unsigned long)bi + space; ptr += PAGE_SIZE)
- x = *(volatile unsigned long *)ptr;
- }
-
-#ifdef CONFIG_BOOTX_TEXT
- prom_print(RELOC("booting...\n"));
- flushscreen();
- RELOC(bootx_text_mapped) = 0;
-#endif
+ bootx_init(r4, phys);
return phys;
}
@@ -421,7 +606,8 @@ prom_init(int r3, int r4, prom_entry pp)
if (prom_op != (void*)-1) {
char model[64];
int sz;
- sz = (int)call_prom(RELOC("getprop"), 4, 1, prom_op, RELOC("model"), model, 64);
+ sz = (int)call_prom(RELOC("getprop"), 4, 1, prom_op,
+ RELOC("model"), model, 64);
if (sz > 0) {
char *c;
/* hack to skip the ibm chrp firmware # */
@@ -454,62 +640,68 @@ prom_init(int r3, int r4, prom_entry pp)
mem = ALIGN(mem + strlen(d) + 1);
}
- mem = check_display(mem);
-
- prom_print(RELOC("copying OF device tree..."));
- mem = copy_device_tree(mem, mem + (1<<20));
- prom_print(RELOC("done\n"));
-
-
- RELOC(klimit) = (char *) (mem - offset);
-
prom_rtas = call_prom(RELOC("finddevice"), 1, 1, RELOC("/rtas"));
if (prom_rtas != (void *) -1) {
+ int i, nargs;
+ struct prom_args prom_args;
+
RELOC(rtas_size) = 0;
call_prom(RELOC("getprop"), 4, 1, prom_rtas,
RELOC("rtas-size"), &RELOC(rtas_size), sizeof(rtas_size));
- prom_print(RELOC("instantiating rtas..."));
+ prom_print(RELOC("instantiating rtas"));
if (RELOC(rtas_size) == 0) {
RELOC(rtas_data) = 0;
} else {
/*
- * We do _not_ want the rtas_data inside the klimit
- * boundry since it'll be squashed when we do the
- * relocate of the kernel on chrp right after prom_init()
- * in head.S. So, we just pick a spot in memory.
- * -- Cort
+ * Ask OF for some space for RTAS.
+ * Actually OF has bugs so we just arbitrarily
+ * use memory at the 6MB point.
*/
-#if 0
- mem = (mem + 4095) & -4096;
- RELOC(rtas_data) = mem + KERNELBASE;
- mem += RELOC(rtas_size);
-#endif
- RELOC(rtas_data) = (6<<20) + KERNELBASE;
+ RELOC(rtas_data) = 6 << 20;
+ prom_print(RELOC(" at "));
+ prom_print_hex(RELOC(rtas_data));
}
prom_rtas = call_prom(RELOC("open"), 1, 1, RELOC("/rtas"));
- {
- int i, nargs;
- struct prom_args prom_args;
- nargs = 3;
- prom_args.service = RELOC("call-method");
- prom_args.nargs = nargs;
- prom_args.nret = 2;
- prom_args.args[0] = RELOC("instantiate-rtas");
- prom_args.args[1] = prom_rtas;
- prom_args.args[2] = ((void *)(RELOC(rtas_data)-KERNELBASE));
- RELOC(prom)(&prom_args);
- if (prom_args.args[nargs] != 0)
- i = 0;
- else
- i = (int)prom_args.args[nargs+1];
- RELOC(rtas_entry) = i;
- }
+ prom_print(RELOC("..."));
+ nargs = 3;
+ prom_args.service = RELOC("call-method");
+ prom_args.nargs = nargs;
+ prom_args.nret = 2;
+ prom_args.args[0] = RELOC("instantiate-rtas");
+ prom_args.args[1] = prom_rtas;
+ prom_args.args[2] = (void *) RELOC(rtas_data);
+ RELOC(prom)(&prom_args);
+ if (prom_args.args[nargs] != 0)
+ i = 0;
+ else
+ i = (int)prom_args.args[nargs+1];
+ RELOC(rtas_entry) = i;
if ((RELOC(rtas_entry) == -1) || (RELOC(rtas_entry) == 0))
prom_print(RELOC(" failed\n"));
else
prom_print(RELOC(" done\n"));
}
+#ifdef CONFIG_PPC64BRIDGE
+ /*
+ * Find out how much memory we have and allocate a
+ * suitably-sized hash table.
+ */
+ prom_alloc_htab();
+#endif
+
+#ifdef CONFIG_SMP
+ prom_hold_cpus(mem);
+#endif
+
+ mem = check_display(mem);
+
+ prom_print(RELOC("copying OF device tree..."));
+ mem = copy_device_tree(mem, mem + (1<<20));
+ prom_print(RELOC("done\n"));
+
+ RELOC(klimit) = (char *) (mem - offset);
+
/* If we are already running at 0xc0000000, we assume we were loaded by
* an OF bootloader which did set a BAT for us. This breaks OF translate
* so we force phys to be 0
@@ -542,85 +734,10 @@ prom_init(int r3, int r4, prom_entry pp)
}
#ifdef CONFIG_BOOTX_TEXT
- if (!chrp && RELOC(prom_disp_node) != 0)
+ if (RELOC(prom_disp_node) != 0)
setup_disp_fake_bi(RELOC(prom_disp_node));
#endif
-#ifdef CONFIG_SMP
- /*
- * With CHRP SMP we need to use the OF to start the other
- * processors so we can't wait until smp_boot_cpus (the OF is
- * trashed by then) so we have to put the processors into
- * a holding pattern controlled by the kernel (not OF) before
- * we destroy the OF.
- *
- * This uses a chunk of high memory, puts some holding pattern
- * code there and sends the other processors off to there until
- * smp_boot_cpus tells them to do something. We do that by using
- * physical address 0x0. The holding pattern checks that address
- * until its cpu # is there, when it is that cpu jumps to
- * __secondary_start(). smp_boot_cpus() takes care of setting those
- * values.
- *
- * We also use physical address 0x4 here to tell when a cpu
- * is in its holding pattern code.
- *
- * -- Cort
- */
- {
- extern void __secondary_hold(void);
- unsigned long i;
- char type[16];
-
-
- /*
- * XXX: hack to make sure we're chrp, assume that if we're
- * chrp we have a device_type property -- Cort
- */
- node = call_prom(RELOC("finddevice"), 1, 1, RELOC("/"));
- if ( (int)call_prom(RELOC("getprop"), 4, 1, node,
- RELOC("device_type"),type, sizeof(type)) <= 0)
- return phys;
-
- /* copy the holding pattern code to someplace safe (8M) */
- memcpy( (void *)(8<<20), RELOC(__secondary_hold), 0x100 );
- for (i = 8<<20; i < ((8<<20)+0x100); i += 32)
- {
- asm volatile("dcbf 0,%0" : : "r" (i) : "memory");
- asm volatile("icbi 0,%0" : : "r" (i) : "memory");
- }
- }
-
- /* look for cpus */
- for (node = 0; prom_next_node(&node);)
- {
- type[0] = 0;
- call_prom(RELOC("getprop"), 4, 1, node, RELOC("device_type"),
- type, sizeof(type));
- if (strcmp(type, RELOC("cpu")) != 0)
- continue;
- path = (char *) mem;
- memset(path, 0, 256);
- if ((int) call_prom(RELOC("package-to-path"), 3, 1,
- node, path, 255) < 0)
- continue;
- /* XXX: hack - don't start cpu 0, this cpu -- Cort */
- if ( smp_chrp_cpu_nr++ == 0 )
- continue;
- prom_print(RELOC("starting cpu "));
- prom_print(path);
- *(unsigned long *)(0x4) = 0;
- asm volatile("dcbf 0,%0": : "r" (0x4) : "memory");
- call_prom(RELOC("start-cpu"), 3, 0, node, 8<<20, smp_chrp_cpu_nr-1);
- for ( i = 0 ; (i < 10000) &&
- (*(ulong *)(0x4) == (ulong)0); i++ )
- ;
- if (*(ulong *)(0x4) == (ulong)smp_chrp_cpu_nr-1 )
- prom_print(RELOC("...ok\n"));
- else
- prom_print(RELOC("...failed\n"));
- }
-#endif
/* If OpenFirmware version >= 3, then use quiesce call */
if (prom_version >= 3) {
prom_print(RELOC("Calling quiesce ...\n"));
@@ -631,17 +748,41 @@ prom_init(int r3, int r4, prom_entry pp)
#ifdef CONFIG_BOOTX_TEXT
if (!chrp && RELOC(disp_bi)) {
- RELOC(prom_stdout) = 0;
clearscreen();
prom_welcome(PTRRELOC(RELOC(disp_bi)), phys);
- prom_print(RELOC("booting...\n"));
+ prom_drawstring(RELOC("booting...\n"));
}
RELOC(bootx_text_mapped) = 0;
#endif
+ prom_print(RELOC("returning from prom_init\n"));
+ RELOC(prom_stdout) = 0;
return phys;
}
+void phys_call_rtas(int service, int nargs, int nret, ...)
+{
+ va_list list;
+ union {
+ unsigned long words[16];
+ double align;
+ } u;
+ unsigned long offset = reloc_offset();
+ void (*rtas)(void *, unsigned long);
+ int i;
+
+ u.words[0] = service;
+ u.words[1] = nargs;
+ u.words[2] = nret;
+ va_start(list, nret);
+ for (i = 0; i < nargs; ++i)
+ u.words[i+3] = va_arg(list, unsigned long);
+ va_end(list);
+
+ rtas = (void (*)(void *, unsigned long)) RELOC(rtas_entry);
+ rtas(&u, RELOC(rtas_data));
+}
+
#ifdef CONFIG_BOOTX_TEXT
__init static void
prom_welcome(boot_infos_t* bi, unsigned long phys)
@@ -650,34 +791,34 @@ prom_welcome(boot_infos_t* bi, unsigned long phys)
unsigned long flags;
unsigned long pvr;
- prom_print(RELOC("Welcome to Linux, kernel " UTS_RELEASE "\n"));
- prom_print(RELOC("\nstarted at : 0x"));
+ prom_drawstring(RELOC("Welcome to Linux, kernel " UTS_RELEASE "\n"));
+ prom_drawstring(RELOC("\nstarted at : 0x"));
prom_drawhex(phys);
- prom_print(RELOC("\nlinked at : 0x"));
+ prom_drawstring(RELOC("\nlinked at : 0x"));
prom_drawhex(KERNELBASE);
- prom_print(RELOC("\nframe buffer at : 0x"));
+ prom_drawstring(RELOC("\nframe buffer at : 0x"));
prom_drawhex((unsigned long)bi->dispDeviceBase);
- prom_print(RELOC(" (phys), 0x"));
+ prom_drawstring(RELOC(" (phys), 0x"));
prom_drawhex((unsigned long)bi->logicalDisplayBase);
- prom_print(RELOC(" (log)"));
- prom_print(RELOC("\nklimit : 0x"));
- prom_drawhex(RELOC(klimit));
- prom_print(RELOC("\nMSR : 0x"));
+ prom_drawstring(RELOC(" (log)"));
+ prom_drawstring(RELOC("\nklimit : 0x"));
+ prom_drawhex((unsigned long)RELOC(klimit));
+ prom_drawstring(RELOC("\nMSR : 0x"));
__asm__ __volatile__ ("mfmsr %0" : "=r" (flags));
prom_drawhex(flags);
__asm__ __volatile__ ("mfspr %0, 287" : "=r" (pvr));
pvr >>= 16;
if (pvr > 1) {
- prom_print(RELOC("\nHID0 : 0x"));
+ prom_drawstring(RELOC("\nHID0 : 0x"));
__asm__ __volatile__ ("mfspr %0, 1008" : "=r" (flags));
prom_drawhex(flags);
}
if (pvr == 8 || pvr == 12) {
- prom_print(RELOC("\nICTC : 0x"));
+ prom_drawstring(RELOC("\nICTC : 0x"));
__asm__ __volatile__ ("mfspr %0, 1019" : "=r" (flags));
prom_drawhex(flags);
}
- prom_print(RELOC("\n\n"));
+ prom_drawstring(RELOC("\n\n"));
}
#endif
@@ -822,6 +963,10 @@ setup_disp_fake_bi(ihandle dp)
call_prom(RELOC("getprop"), 4, 1, dp, RELOC("linebytes"),
&pitch, sizeof(pitch));
address = 0;
+ if (pitch == 1) {
+ address = 0xfa000000;
+ pitch = 0x1000; /* for strange IBM display */
+ }
call_prom(RELOC("getprop"), 4, 1, dp, RELOC("address"),
&address, sizeof(address));
if (address == 0) {
@@ -987,6 +1132,7 @@ finish_device_tree(void)
/* All newworld machines now use the interrupt tree */
struct device_node *np = allnodes;
+
while(np) {
if (get_property(np, "interrupt-parent", 0)) {
pmac_newworld = 1;
@@ -997,7 +1143,7 @@ finish_device_tree(void)
if (boot_infos == 0 && pmac_newworld)
use_of_interrupt_tree = 1;
- mem = finish_node(allnodes, mem, NULL);
+ mem = finish_node(allnodes, mem, NULL, 0, 0);
dev_tree_size = mem - (unsigned long) allnodes;
klimit = (char *) mem;
}
@@ -1025,21 +1171,30 @@ early_get_property(unsigned long base, unsigned long node, char *prop)
__init
static unsigned long
finish_node(struct device_node *np, unsigned long mem_start,
- interpret_func *ifunc)
+ interpret_func *ifunc, int naddrc, int nsizec)
{
struct device_node *child;
+ int *ip;
np->name = get_property(np, "name", 0);
np->type = get_property(np, "device_type", 0);
/* get the device addresses and interrupts */
if (ifunc != NULL) {
- mem_start = ifunc(np, mem_start);
+ mem_start = ifunc(np, mem_start, naddrc, nsizec);
}
if (use_of_interrupt_tree) {
mem_start = finish_node_interrupts(np, mem_start);
}
+ /* Look for #address-cells and #size-cells properties. */
+ ip = (int *) get_property(np, "#address-cells", 0);
+ if (ip != NULL)
+ naddrc = *ip;
+ ip = (int *) get_property(np, "#size-cells", 0);
+ if (ip != NULL)
+ nsizec = *ip;
+
/* the f50 sets the name to 'display' and 'compatible' to what we
* expect for the name -- Cort
*/
@@ -1080,7 +1235,8 @@ finish_node(struct device_node *np, unsigned long mem_start,
}
for (child = np->child; child != NULL; child = child->sibling)
- mem_start = finish_node(child, mem_start, ifunc);
+ mem_start = finish_node(child, mem_start, ifunc,
+ naddrc, nsizec);
return mem_start;
}
@@ -1246,7 +1402,8 @@ void relocate_nodes(void)
__init
static unsigned long
-interpret_pci_props(struct device_node *np, unsigned long mem_start)
+interpret_pci_props(struct device_node *np, unsigned long mem_start,
+ int naddrc, int nsizec)
{
struct address_range *adr;
struct pci_reg_property *pci_addrs;
@@ -1329,7 +1486,8 @@ interpret_pci_props(struct device_node *np, unsigned long mem_start)
__init
static unsigned long
-interpret_dbdma_props(struct device_node *np, unsigned long mem_start)
+interpret_dbdma_props(struct device_node *np, unsigned long mem_start,
+ int naddrc, int nsizec)
{
struct reg_property *rp;
struct address_range *adr;
@@ -1381,7 +1539,8 @@ interpret_dbdma_props(struct device_node *np, unsigned long mem_start)
__init
static unsigned long
-interpret_macio_props(struct device_node *np, unsigned long mem_start)
+interpret_macio_props(struct device_node *np, unsigned long mem_start,
+ int naddrc, int nsizec)
{
struct reg_property *rp;
struct address_range *adr;
@@ -1450,7 +1609,8 @@ interpret_macio_props(struct device_node *np, unsigned long mem_start)
__init
static unsigned long
-interpret_isa_props(struct device_node *np, unsigned long mem_start)
+interpret_isa_props(struct device_node *np, unsigned long mem_start,
+ int naddrc, int nsizec)
{
struct isa_reg_property *rp;
struct address_range *adr;
@@ -1491,21 +1651,24 @@ interpret_isa_props(struct device_node *np, unsigned long mem_start)
__init
static unsigned long
-interpret_root_props(struct device_node *np, unsigned long mem_start)
+interpret_root_props(struct device_node *np, unsigned long mem_start,
+ int naddrc, int nsizec)
{
- struct reg_property *rp;
struct address_range *adr;
int i, l, *ip;
+ unsigned int *rp;
+ int rpsize = (naddrc + nsizec) * sizeof(unsigned int);
- rp = (struct reg_property *) get_property(np, "reg", &l);
- if (rp != 0 && l >= sizeof(struct reg_property)) {
+ rp = (unsigned int *) get_property(np, "reg", &l);
+ if (rp != 0 && l >= rpsize) {
i = 0;
adr = (struct address_range *) mem_start;
- while ((l -= sizeof(struct reg_property)) >= 0) {
+ while ((l -= rpsize) >= 0) {
adr[i].space = 0;
- adr[i].address = rp[i].address;
- adr[i].size = rp[i].size;
+ adr[i].address = rp[naddrc - 1];
+ adr[i].size = rp[naddrc + nsizec - 1];
++i;
+ rp += naddrc + nsizec;
}
np->addrs = adr;
np->n_addrs = i;
@@ -1583,9 +1746,8 @@ find_pci_device_OFnode(unsigned char bus, unsigned char dev_fn)
int l;
for (np = allnodes; np != 0; np = np->allnext) {
- char *pname = np->parent ?
- (char *)get_property(np->parent, "name", &l) : 0;
- if (pname && strcmp(pname, "mac-io") == 0)
+ if (np->parent == NULL || np->parent->type == NULL
+ || strcmp(np->parent->type, "pci") != 0)
continue;
reg = (unsigned int *) get_property(np, "reg", &l);
if (reg == 0 || l < sizeof(struct reg_property))
@@ -1781,6 +1943,8 @@ print_properties(struct device_node *np)
}
#endif
+spinlock_t rtas_lock = SPIN_LOCK_UNLOCKED;
+
/* this can be called after setup -- Cort */
__openfirmware
int
@@ -1813,11 +1977,10 @@ call_rtas(const char *service, int nargs, int nret,
u.words[i+3] = va_arg(list, unsigned long);
va_end(list);
- save_flags(s);
- cli();
-
+ spin_lock_irqsave(&rtas_lock, s);
enter_rtas((void *)__pa(&u));
- restore_flags(s);
+ spin_unlock_irqrestore(&rtas_lock, s);
+
if (nret > 1 && outputs != NULL)
for (i = 0; i < nret-1; ++i)
outputs[i] = u.words[i+nargs+4];
diff --git a/arch/ppc/kernel/ptrace.c b/arch/ppc/kernel/ptrace.c
index 3ccc8f518..e618f22f6 100644
--- a/arch/ppc/kernel/ptrace.c
+++ b/arch/ppc/kernel/ptrace.c
@@ -89,10 +89,10 @@ int sys_ptrace(long request, long pid, long addr, long data)
lock_kernel();
if (request == PTRACE_TRACEME) {
/* are we already being traced? */
- if (current->flags & PF_PTRACED)
+ if (current->ptrace & PT_PTRACED)
goto out;
/* set the ptrace bit in the process flags. */
- current->flags |= PF_PTRACED;
+ current->ptrace |= PT_PTRACED;
ret = 0;
goto out;
}
@@ -123,9 +123,9 @@ int sys_ptrace(long request, long pid, long addr, long data)
&& !capable(CAP_SYS_PTRACE))
goto out_tsk;
/* the same process cannot be attached many times */
- if (child->flags & PF_PTRACED)
+ if (child->ptrace & PT_PTRACED)
goto out_tsk;
- child->flags |= PF_PTRACED;
+ child->ptrace |= PT_PTRACED;
write_lock_irq(&tasklist_lock);
if (child->p_pptr != current) {
@@ -140,7 +140,7 @@ int sys_ptrace(long request, long pid, long addr, long data)
goto out_tsk;
}
ret = -ESRCH;
- if (!(child->flags & PF_PTRACED))
+ if (!(child->ptrace & PT_PTRACED))
goto out_tsk;
if (child->state != TASK_STOPPED) {
if (request != PTRACE_KILL)
@@ -175,7 +175,7 @@ int sys_ptrace(long request, long pid, long addr, long data)
if ((addr & 3) || index > PT_FPSCR)
break;
- if (addr < PT_FPR0) {
+ if (index < PT_FPR0) {
tmp = get_reg(child, (int) index);
} else {
if (child->thread.regs->msr & MSR_FP)
@@ -206,10 +206,10 @@ int sys_ptrace(long request, long pid, long addr, long data)
if ((addr & 3) || index > PT_FPSCR)
break;
- if (addr == PT_ORIG_R3)
+ if (index == PT_ORIG_R3)
break;
- if (addr < PT_FPR0) {
- ret = put_reg(child, addr, data);
+ if (index < PT_FPR0) {
+ ret = put_reg(child, index, data);
} else {
if (child->thread.regs->msr & MSR_FP)
giveup_fpu(child);
@@ -225,9 +225,9 @@ int sys_ptrace(long request, long pid, long addr, long data)
if ((unsigned long) data > _NSIG)
break;
if (request == PTRACE_SYSCALL)
- child->flags |= PF_TRACESYS;
+ child->ptrace |= PT_TRACESYS;
else
- child->flags &= ~PF_TRACESYS;
+ child->ptrace &= ~PT_TRACESYS;
child->exit_code = data;
/* make sure the single step bit is not set. */
clear_single_step(child);
@@ -256,7 +256,7 @@ int sys_ptrace(long request, long pid, long addr, long data)
ret = -EIO;
if ((unsigned long) data > _NSIG)
break;
- child->flags &= ~PF_TRACESYS;
+ child->ptrace &= ~PT_TRACESYS;
set_single_step(child);
child->exit_code = data;
/* give it a chance to run. */
@@ -269,7 +269,7 @@ int sys_ptrace(long request, long pid, long addr, long data)
ret = -EIO;
if ((unsigned long) data > _NSIG)
break;
- child->flags &= ~(PF_PTRACED|PF_TRACESYS);
+ child->ptrace &= ~(PT_PTRACED|PT_TRACESYS);
child->exit_code = data;
write_lock_irq(&tasklist_lock);
REMOVE_LINKS(child);
@@ -296,8 +296,8 @@ out:
void syscall_trace(void)
{
- if ((current->flags & (PF_PTRACED|PF_TRACESYS))
- != (PF_PTRACED|PF_TRACESYS))
+ if ((current->ptrace & (PT_PTRACED|PT_TRACESYS))
+ != (PT_PTRACED|PT_TRACESYS))
return;
current->exit_code = SIGTRAP;
current->state = TASK_STOPPED;
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c
index 67895b87d..6c0c38273 100644
--- a/arch/ppc/kernel/setup.c
+++ b/arch/ppc/kernel/setup.c
@@ -23,6 +23,7 @@
#include <asm/setup.h>
#include <asm/amigappc.h>
#include <asm/smp.h>
+#include <asm/elf.h>
#ifdef CONFIG_8xx
#include <asm/mpc8xx.h>
#include <asm/8xx_immap.h>
@@ -130,6 +131,14 @@ struct screen_info screen_info = {
};
/*
+ * These are used in binfmt_elf.c to put aux entries on the stack
+ * for each elf executable being started.
+ */
+int dcache_bsize;
+int icache_bsize;
+int ucache_bsize;
+
+/*
* I really need to add multiple-console support... -- Cort
*/
int __init pmac_display_supported(char *name)
@@ -275,7 +284,11 @@ int get_cpuinfo(char *buffer)
}
break;
case 0x000C:
- len += sprintf(len+buffer, "7400 (G4)\n");
+ len += sprintf(len+buffer, "7400 (G4");
+#ifdef CONFIG_ALTIVEC
+ len += sprintf(len+buffer, ", altivec enabled");
+#endif /* CONFIG_ALTIVEC */
+ len += sprintf(len+buffer, ")\n");
break;
case 0x0020:
len += sprintf(len+buffer, "403G");
@@ -288,6 +301,15 @@ int get_cpuinfo(char *buffer)
break;
}
break;
+ case 0x0035:
+ len += sprintf(len+buffer, "POWER4\n");
+ break;
+ case 0x0040:
+ len += sprintf(len+buffer, "POWER3 (630)\n");
+ break;
+ case 0x0041:
+ len += sprintf(len+buffer, "POWER3 (630+)\n");
+ break;
case 0x0050:
len += sprintf(len+buffer, "8xx\n");
break;
@@ -458,7 +480,6 @@ identify_machine(unsigned long r3, unsigned long r4, unsigned long r5,
intuit_machine_type();
#endif /* CONFIG_MACH_SPECIFIC */
finish_device_tree();
-
/*
* If we were booted via quik, r3 points to the physical
* address of the command-line parameters.
@@ -494,6 +515,8 @@ identify_machine(unsigned long r3, unsigned long r4, unsigned long r5,
#ifdef CONFIG_BLK_DEV_INITRD
if (r3 && r4 && r4 != 0xdeadbeef)
{
+ if (r3 < KERNELBASE)
+ r3 += KERNELBASE;
initrd_start = r3;
initrd_end = r3 + r4;
ROOT_DEV = MKDEV(RAMDISK_MAJOR, 0);
@@ -574,7 +597,7 @@ identify_machine(unsigned long r3, unsigned long r4, unsigned long r5,
}
__max_memory = maxmem;
}
-
+
/* this is for modules since _machine can be a define -- Cort */
ppc_md.ppc_machine = _machine;
@@ -684,6 +707,24 @@ void __init setup_arch(char **cmdline_p)
breakpoint();
#endif
+ /*
+ * Set cache line size based on type of cpu as a default.
+ * Systems with OF can look in the properties on the cpu node(s)
+ * for a possibly more accurate value.
+ */
+ dcache_bsize = icache_bsize = 32; /* most common value */
+ switch (_get_PVR() >> 16) {
+ case 1: /* 601, with unified cache */
+ ucache_bsize = 32;
+ break;
+ /* XXX need definitions in here for 8xx etc. */
+ case 0x40:
+ case 0x41:
+ case 0x35: /* 64-bit POWER3, POWER3+, POWER4 */
+ dcache_bsize = icache_bsize = 128;
+ break;
+ }
+
/* reboot on panic */
panic_timeout = 180;
@@ -779,7 +820,7 @@ void ppc_generic_ide_fix_driveid(struct hd_driveid *id)
for (i = 0; i < 26; i++)
id->words130_155[i] = __le16_to_cpu(id->words130_155[i]);
id->word156 = __le16_to_cpu(id->word156);
- for (i = 0; i < 4; i++)
+ for (i = 0; i < 3; i++)
id->words157_159[i] = __le16_to_cpu(id->words157_159[i]);
for (i = 0; i < 96; i++)
id->words160_255[i] = __le16_to_cpu(id->words160_255[i]);
diff --git a/arch/ppc/kernel/signal.c b/arch/ppc/kernel/signal.c
index e5452fedb..ee7ba852c 100644
--- a/arch/ppc/kernel/signal.c
+++ b/arch/ppc/kernel/signal.c
@@ -411,7 +411,7 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
if (!signr)
break;
- if ((current->flags & PF_PTRACED) && signr != SIGKILL) {
+ if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
/* Let the debugger run. */
current->exit_code = signr;
current->state = TASK_STOPPED;
diff --git a/arch/ppc/kernel/smp.c b/arch/ppc/kernel/smp.c
index 97543348b..78157554d 100644
--- a/arch/ppc/kernel/smp.c
+++ b/arch/ppc/kernel/smp.c
@@ -53,6 +53,9 @@ unsigned int prof_multiplier[NR_CPUS];
unsigned int prof_counter[NR_CPUS];
cycles_t cacheflush_time;
+/* this has to go in the data section because it is accessed from prom_init */
+int smp_hw_index[NR_CPUS] = {0};
+
/* all cpu mappings are 1-1 -- Cort */
volatile unsigned long cpu_callin_map[NR_CPUS] = {0,};
@@ -238,6 +241,7 @@ void smp_message_pass(int target, int msg, unsigned long data, int wait)
case _MACH_chrp:
case _MACH_prep:
case _MACH_gemini:
+#ifndef CONFIG_POWER4
/* make sure we're sending something that translates to an IPI */
if ( msg > 0x3 )
break;
@@ -254,6 +258,18 @@ void smp_message_pass(int target, int msg, unsigned long data, int wait)
openpic_cause_IPI(smp_processor_id(), msg, 1<<target);
break;
}
+#else /* CONFIG_POWER4 */
+ /* for now, only do reschedule messages
+ since we only have one IPI */
+ if (msg != MSG_RESCHEDULE)
+ break;
+ for (i = 0; i < smp_num_cpus; ++i) {
+ if (target == MSG_ALL || target == i
+ || (target == MSG_ALL_BUT_SELF
+ && i != smp_processor_id()))
+ xics_cause_IPI(i);
+ }
+#endif /* CONFIG_POWER4 */
break;
}
}
@@ -306,8 +322,9 @@ void __init smp_boot_cpus(void)
cpu_nr = 2;
break;
case _MACH_chrp:
- for ( i = 0; i < 4 ; i++ )
- openpic_enable_IPI(i);
+ if (OpenPIC)
+ for ( i = 0; i < 4 ; i++ )
+ openpic_enable_IPI(i);
cpu_nr = smp_chrp_cpu_nr;
break;
case _MACH_gemini:
@@ -390,10 +407,10 @@ void __init smp_boot_cpus(void)
printk("Processor %d is stuck.\n", i);
}
}
-
- if ( _machine & (_MACH_gemini|_MACH_chrp|_MACH_prep) )
+
+ if (OpenPIC && (_machine & (_MACH_gemini|_MACH_chrp|_MACH_prep)))
do_openpic_setup_cpu();
-
+
if ( _machine == _MACH_Pmac )
{
/* reset the entry point so if we get another intr we won't
@@ -433,19 +450,19 @@ void __init smp_callin(void)
set_dec(decrementer_count);
init_idle();
-#if 0
- current->mm->mmap->vm_page_prot = PAGE_SHARED;
- current->mm->mmap->vm_start = PAGE_OFFSET;
- current->mm->mmap->vm_end = init_mm.mmap->vm_end;
-#endif
cpu_callin_map[current->processor] = 1;
+
+#ifndef CONFIG_POWER4
/*
* Each processor has to do this and this is the best
* place to stick it for now.
* -- Cort
*/
- if ( _machine & (_MACH_gemini|_MACH_chrp|_MACH_prep) )
+ if (OpenPIC && _machine & (_MACH_gemini|_MACH_chrp|_MACH_prep))
do_openpic_setup_cpu();
+#else
+ xics_setup_cpu();
+#endif /* CONFIG_POWER4 */
#ifdef CONFIG_GEMINI
if ( _machine == _MACH_gemini )
gemini_init_l2();
diff --git a/arch/ppc/kernel/time.c b/arch/ppc/kernel/time.c
index 3877d11dc..3303bf785 100644
--- a/arch/ppc/kernel/time.c
+++ b/arch/ppc/kernel/time.c
@@ -50,6 +50,7 @@ void smp_local_timer_interrupt(struct pt_regs *);
/* keep track of when we need to update the rtc */
time_t last_rtc_update = 0;
+extern rwlock_t xtime_lock;
/* The decrementer counts down by 128 every 128ns on a 601. */
#define DECREMENTER_COUNT_601 (1000000000 / HZ)
@@ -69,6 +70,7 @@ unsigned long last_tb;
int timer_interrupt(struct pt_regs * regs)
{
int dval, d;
+ unsigned long flags;
unsigned long cpu = smp_processor_id();
hardirq_enter(cpu);
@@ -102,7 +104,6 @@ int timer_interrupt(struct pt_regs * regs)
while ((d = get_dec()) == dval)
;
asm volatile("mftb %0" : "=r" (last_tb) );
-
/*
* Don't play catchup between the call to time_init()
* and sti() in init/main.c.
@@ -122,6 +123,7 @@ int timer_interrupt(struct pt_regs * regs)
/*
* update the rtc when needed
*/
+ read_lock_irqsave(&xtime_lock, flags);
if ( (time_status & STA_UNSYNC) &&
((xtime.tv_sec > last_rtc_update + 60) ||
(xtime.tv_sec < last_rtc_update)) )
@@ -132,6 +134,7 @@ int timer_interrupt(struct pt_regs * regs)
/* do it again in 60 s */
last_rtc_update = xtime.tv_sec;
}
+ read_unlock_irqrestore(&xtime_lock, flags);
}
#ifdef CONFIG_SMP
smp_local_timer_interrupt(regs);
@@ -153,17 +156,18 @@ void do_gettimeofday(struct timeval *tv)
save_flags(flags);
cli();
+ read_lock_irqsave(&xtime_lock, flags);
*tv = xtime;
+ read_unlock_irqrestore(&xtime_lock, flags);
/* XXX we don't seem to have the decrementers synced properly yet */
#ifndef CONFIG_SMP
asm volatile("mftb %0" : "=r" (diff) );
diff -= last_tb;
-
tv->tv_usec += diff * count_period_num / count_period_den;
tv->tv_sec += tv->tv_usec / 1000000;
tv->tv_usec = tv->tv_usec % 1000000;
#endif
-
+
restore_flags(flags);
}
@@ -177,8 +181,10 @@ void do_settimeofday(struct timeval *tv)
frac_tick = tv->tv_usec % (1000000 / HZ);
save_flags(flags);
cli();
+ write_lock_irqsave(&xtime_lock, flags);
xtime.tv_sec = tv->tv_sec;
xtime.tv_usec = tv->tv_usec - frac_tick;
+ write_unlock_irqrestore(&xtime_lock, flags);
set_dec(frac_tick * count_period_den / count_period_num);
time_adjust = 0; /* stop active adjtime() */
time_status |= STA_UNSYNC;
@@ -191,6 +197,7 @@ void do_settimeofday(struct timeval *tv)
void __init time_init(void)
{
+ unsigned long flags;
if (ppc_md.time_init != NULL)
{
ppc_md.time_init();
@@ -205,8 +212,10 @@ void __init time_init(void)
ppc_md.calibrate_decr();
}
- xtime.tv_sec = ppc_md.get_rtc_time();
- xtime.tv_usec = 0;
+ write_lock_irqsave(&xtime_lock, flags);
+ xtime.tv_sec = ppc_md.get_rtc_time();
+ xtime.tv_usec = 0;
+ write_unlock_irqrestore(&xtime_lock, flags);
set_dec(decrementer_count);
/* allow setting the time right away */
diff --git a/arch/ppc/kernel/xics.c b/arch/ppc/kernel/xics.c
new file mode 100644
index 000000000..a9772821b
--- /dev/null
+++ b/arch/ppc/kernel/xics.c
@@ -0,0 +1,214 @@
+/*
+ * arch/ppc/kernel/xics.c
+ *
+ * Copyright 2000 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/threads.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <asm/prom.h>
+#include <asm/io.h>
+#include "i8259.h"
+#include "xics.h"
+
+void xics_enable_irq(u_int irq);
+void xics_disable_irq(u_int irq);
+void xics_mask_and_ack_irq(u_int irq);
+void xics_end_irq(u_int irq);
+
+struct hw_interrupt_type xics_pic = {
+ " XICS ",
+ NULL,
+ NULL,
+ xics_enable_irq,
+ xics_disable_irq,
+ xics_mask_and_ack_irq,
+ xics_end_irq
+};
+
+struct hw_interrupt_type xics_8259_pic = {
+ " XICS/8259",
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ xics_mask_and_ack_irq,
+ NULL
+};
+
+#define XICS_IPI 2
+#define XICS_IRQ_8259_CASCADE 0x2c
+#define XICS_IRQ_OFFSET 16
+#define XICS_IRQ_SPURIOUS 0
+
+#define DEFAULT_SERVER 0
+#define DEFAULT_PRIORITY 0
+
+struct xics_ipl {
+ union {
+ u32 word;
+ u8 bytes[4];
+ } xirr_poll;
+ union {
+ u32 word;
+ u8 bytes[4];
+ } xirr;
+ u32 dummy;
+ union {
+ u32 word;
+ u8 bytes[4];
+ } qirr;
+};
+
+struct xics_info {
+ volatile struct xics_ipl * per_cpu[NR_CPUS];
+};
+
+struct xics_info xics_info;
+
+#define xirr_info(n_cpu) (xics_info.per_cpu[n_cpu]->xirr.word)
+#define cppr_info(n_cpu) (xics_info.per_cpu[n_cpu]->xirr.bytes[0])
+#define poll_info(n_cpu) (xics_info.per_cpu[n_cpu]->xirr_poll.word)
+#define qirr_info(n_cpu) (xics_info.per_cpu[n_cpu]->qirr.bytes[0])
+
+void
+xics_enable_irq(
+ u_int irq
+ )
+{
+ int status;
+ int call_status;
+
+ irq -= XICS_IRQ_OFFSET;
+ if (irq == XICS_IPI)
+ return;
+ call_status = call_rtas("ibm,set-xive", 3, 1, (ulong*)&status,
+ irq, DEFAULT_SERVER, DEFAULT_PRIORITY);
+ if( call_status != 0 ) {
+ printk("xics_enable_irq: irq=%x: call_rtas failed; retn=%x, status=%x\n",
+ irq, call_status, status);
+ return;
+ }
+}
+
+void
+xics_disable_irq(
+ u_int irq
+ )
+{
+ int status;
+ int call_status;
+
+ irq -= XICS_IRQ_OFFSET;
+ call_status = call_rtas("ibm,int-off", 1, 1, (ulong*)&status, irq);
+ if( call_status != 0 ) {
+ printk("xics_disable_irq: irq=%x: call_rtas failed, retn=%x\n",
+ irq, call_status);
+ return;
+ }
+}
+
+void
+xics_end_irq(
+ u_int irq
+ )
+{
+ int cpu = smp_processor_id();
+
+ cppr_info(cpu) = 0; /* actually the value overwritten by ack */
+ xirr_info(cpu) = (0xff<<24) | (irq-XICS_IRQ_OFFSET);
+}
+
+void
+xics_mask_and_ack_irq(
+ u_int irq
+ )
+{
+ int cpu = smp_processor_id();
+
+ if( irq < XICS_IRQ_OFFSET ) {
+ i8259_pic.ack(irq);
+ xirr_info(cpu) = (0xff<<24) | XICS_IRQ_8259_CASCADE;
+ }
+ else {
+ cppr_info(cpu) = 0xff;
+ }
+}
+
+int
+xics_get_irq(struct pt_regs *regs)
+{
+ u_int cpu = smp_processor_id();
+ u_int vec;
+ int irq;
+
+ vec = xirr_info(cpu);
+ /* (vec >> 24) == old priority */
+ vec &= 0x00ffffff;
+ /* for sanity, this had better be < NR_IRQS - 16 */
+ if( vec == XICS_IRQ_8259_CASCADE )
+ irq = i8259_irq(cpu);
+ else if( vec == XICS_IRQ_SPURIOUS )
+ irq = -1;
+ else
+ irq = vec + XICS_IRQ_OFFSET;
+ return irq;
+}
+
+#ifdef CONFIG_SMP
+void xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
+{
+ qirr_info(smp_processor_id()) = 0xff;
+ smp_message_recv(MSG_RESCHEDULE);
+}
+
+void xics_cause_IPI(int cpu)
+{
+ qirr_info(cpu) = 0;
+}
+
+void xics_setup_cpu(void)
+{
+ int cpu = smp_processor_id();
+
+ cppr_info(cpu) = 0xff;
+}
+#endif /* CONFIG_SMP */
+
+void
+xics_init_IRQ( void )
+{
+ int i;
+ extern unsigned long smp_chrp_cpu_nr;
+
+#ifdef CONFIG_SMP
+ for (i = 0; i < smp_chrp_cpu_nr; ++i)
+ xics_info.per_cpu[i] =
+ ioremap(0xfe000000 + smp_hw_index[i] * 0x1000, 0x20);
+#else
+ xics_info.per_cpu[0] = ioremap(0xfe000000, 0x20);
+#endif /* CONFIG_SMP */
+ xics_8259_pic.enable = i8259_pic.enable;
+ xics_8259_pic.disable = i8259_pic.disable;
+ for (i = 0; i < 16; ++i)
+ irq_desc[i].handler = &xics_8259_pic;
+ for (; i < NR_IRQS; ++i)
+ irq_desc[i].handler = &xics_pic;
+
+ cppr_info(0) = 0xff;
+ if (request_irq(XICS_IRQ_8259_CASCADE + XICS_IRQ_OFFSET, no_action,
+ 0, "8259 cascade", 0))
+ printk(KERN_ERR "xics_init_IRQ: couldn't get 8259 cascade\n");
+ i8259_init();
+
+#ifdef CONFIG_SMP
+ request_irq(XICS_IPI + XICS_IRQ_OFFSET, xics_ipi_action, 0, "IPI", 0);
+#endif
+}
diff --git a/arch/ppc/kernel/xics.h b/arch/ppc/kernel/xics.h
new file mode 100644
index 000000000..88b4d4790
--- /dev/null
+++ b/arch/ppc/kernel/xics.h
@@ -0,0 +1,23 @@
+/*
+ * arch/ppc/kernel/xics.h
+ *
+ * Copyright 2000 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _PPC_KERNEL_XICS_H
+#define _PPC_KERNEL_XICS_H
+
+#include "local_irq.h"
+
+extern struct hw_interrupt_type xics_pic;
+extern struct hw_interrupt_type xics_8259_pic;
+
+void xics_init_IRQ(void);
+int xics_get_irq(struct pt_regs *);
+
+#endif /* _PPC_KERNEL_XICS_H */