summaryrefslogtreecommitdiffstats
path: root/arch/ppc/mm
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-02-23 00:40:54 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-02-23 00:40:54 +0000
commit529c593ece216e4aaffd36bd940cb94f1fa63129 (patch)
tree78f1c0b805f5656aa7b0417a043c5346f700a2cf /arch/ppc/mm
parent0bd079751d25808d1972baee5c4eaa1db2227257 (diff)
Merge with 2.3.43. I did ignore all modifications to the qlogicisp.c
driver due to the Origin A64 hacks.
Diffstat (limited to 'arch/ppc/mm')
-rw-r--r--arch/ppc/mm/4xx_tlb.c561
-rw-r--r--arch/ppc/mm/init.c86
2 files changed, 326 insertions, 321 deletions
diff --git a/arch/ppc/mm/4xx_tlb.c b/arch/ppc/mm/4xx_tlb.c
index b9d9d2119..69bf88320 100644
--- a/arch/ppc/mm/4xx_tlb.c
+++ b/arch/ppc/mm/4xx_tlb.c
@@ -1,6 +1,9 @@
/*
*
- * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
+ * Copyright (c) 1998-1999 TiVo, Inc.
+ * Original implementation.
+ * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
+ * Minor rework.
*
* Module name: 4xx_tlb.c
*
@@ -9,7 +12,10 @@
*
*/
+#include <linux/mm.h>
+
#include <asm/processor.h>
+#include <asm/io.h>
#include <asm/mmu.h>
#include <asm/pgtable.h>
#include <asm/system.h>
@@ -26,372 +32,327 @@
#endif
-/* Function Macros */
-
-
-/* Type Definitios */
-
-typedef struct pin_entry_s {
- unsigned int e_pinned: 1, /* This TLB entry is pinned down. */
- e_used: 23; /* Number of users for this mapping. */
-} pin_entry_t;
-
-
/* Global Variables */
-static pin_entry_t pin_table[PPC4XX_TLB_SIZE];
+static int pinned = 0;
/* Function Prototypes */
+static int PPC4xx_tlb_miss(struct pt_regs *, unsigned long, int);
-void
-PPC4xx_tlb_pin(unsigned long va, unsigned long pa, int pagesz, int cache)
-{
- int i, found = FALSE;
- unsigned long tag, data;
- unsigned long opid;
-
- opid = mfspr(SPRN_PID);
- mtspr(SPRN_PID, 0);
-
- data = (pa & TLB_RPN_MASK) | TLB_WR;
-
- if (cache)
- data |= (TLB_EX | TLB_I);
- else
- data |= (TLB_G | TLB_I);
-
- tag = (va & TLB_EPN_MASK) | TLB_VALID | pagesz;
-
- for (i = 0; i < PPC4XX_TLB_SIZE; i++) {
- if (pin_table[i].e_pinned == FALSE) {
- found = TRUE;
- break;
- }
- }
+extern void do_page_fault(struct pt_regs *, unsigned long, unsigned long);
- if (found) {
- /* printk("Pinning %#x -> %#x in entry %d...\n", va, pa, i); */
- asm("tlbwe %0,%1,1" : : "r" (data), "r" (i));
- asm("tlbwe %0,%1,0" : : "r" (tag), "r" (i));
- asm("isync");
- pin_table[i].e_pinned = found;
- }
- mtspr(SPRN_PID, opid);
- return;
-}
-
-void
-PPC4xx_tlb_unpin(unsigned long va, unsigned long pa, int size)
+/*
+ * ()
+ *
+ * Description:
+ * This routine...
+ *
+ * Input(s):
+ *
+ *
+ * Output(s):
+ *
+ *
+ * Returns:
+ *
+ *
+ */
+static inline void
+PPC4xx_tlb_write(unsigned long tag, unsigned long data, unsigned int index)
{
- /* XXX - To beimplemented. */
+ asm("tlbwe %0,%1,1" : : "r" (data), "r" (index));
+ asm("tlbwe %0,%1,0" : : "r" (tag), "r" (index));
}
+/*
+ * ()
+ *
+ * Description:
+ * This routine...
+ *
+ * Input(s):
+ *
+ *
+ * Output(s):
+ *
+ *
+ * Returns:
+ *
+ *
+ */
void
-PPC4xx_tlb_flush_all(void)
+PPC4xx_flush_tlb_all(void)
{
int i;
- unsigned long flags, opid;
+ unsigned long flags, pid;
save_flags(flags);
cli();
- opid = mfspr(SPRN_PID);
+ pid = mfspr(SPRN_PID);
mtspr(SPRN_PID, 0);
- for (i = 0; i < PPC4XX_TLB_SIZE; i++) {
- unsigned long ov = 0;
-
- if (pin_table[i].e_pinned)
- continue;
-
- asm("tlbwe %0,%1,0" : : "r" (ov), "r" (i));
- asm("tlbwe %0,%1,1" : : "r" (ov), "r" (i));
+ for (i = pinned; i < PPC4XX_TLB_SIZE; i++) {
+ PPC4xx_tlb_write(0, 0, i);
}
-
asm("sync;isync");
- mtspr(SPRN_PID, opid);
+ mtspr(SPRN_PID, pid);
restore_flags(flags);
}
+/*
+ * ()
+ *
+ * Description:
+ * This routine...
+ *
+ * Input(s):
+ *
+ *
+ * Output(s):
+ *
+ *
+ * Returns:
+ *
+ *
+ */
void
-PPC4xx_tlb_flush(unsigned long va, int pid)
+PPC4xx_dtlb_miss(struct pt_regs *regs)
{
- unsigned long i, tag, flags, found = 1, opid;
-
- save_flags(flags);
- cli();
+ unsigned long addr = mfspr(SPRN_DEAR);
+ int write = mfspr(SPRN_ESR) & ESR_DST;
- opid = mfspr(SPRN_PID);
- mtspr(SPRN_PID, pid);
-
- asm("tlbsx. %0,0,%2;beq 1f;li %1,0;1:" : "=r" (i), "=r" (found) : "r" (va));
-
- if (found && pin_table[i].e_pinned == 0) {
- asm("tlbre %0,%1,0" : "=r" (tag) : "r" (i));
- tag &= ~ TLB_VALID;
- asm("tlbwe %0,%1,0" : : "r" (tag), "r" (i));
+ if (PPC4xx_tlb_miss(regs, addr, write) < 0) {
+ sti();
+ do_page_fault(regs, addr, write);
+ cli();
}
-
- mtspr(SPRN_PID, opid);
-
- restore_flags(flags);
+
}
-#if 0
/*
- * TLB miss handling code.
+ * ()
+ *
+ * Description:
+ * This routine...
+ *
+ * Input(s):
+ *
+ *
+ * Output(s):
+ *
+ *
+ * Returns:
+ *
+ *
*/
+void
+PPC4xx_itlb_miss(struct pt_regs *regs)
+{
+ unsigned long addr = regs->nip;
+
+ if (PPC4xx_tlb_miss(regs, addr, 0) < 0) {
+ sti();
+ do_page_fault(regs, addr, 0);
+ cli();
+ }
+}
/*
- * Handle TLB faults. We should push this back to assembly code eventually.
- * Caller is responsible for turning off interrupts ...
+ * ()
+ *
+ * Description:
+ * This routine...
+ *
+ * Input(s):
+ *
+ *
+ * Output(s):
+ *
+ *
+ * Returns:
+ *
+ *
*/
-static inline void
-tlbDropin(unsigned long tlbhi, unsigned long tlblo) {
- /*
- * Avoid the divide at the slight cost of a little too
- * much emphasis on the last few entries.
- */
- unsigned long rand = mfspr(SPRN_TBLO);
- rand &= 0x3f;
- rand += NTLB_WIRED;
- if (rand >= NTLB)
- rand -= NTLB_WIRED;
-
- asm("tlbwe %0,%1,1" : : "r" (tlblo), "r" (rand));
- asm("tlbwe %0,%1,0" : : "r" (tlbhi), "r" (rand));
- asm("isync;sync");
-}
+void
+PPC4xx_tlb_pin(unsigned long va, unsigned long pa, int pagesz, int cache)
+{
+ unsigned long tag, data;
+ unsigned long opid;
-static inline void
-mkTlbEntry(unsigned long addr, pte_t *pte) {
- unsigned long tlbhi;
- unsigned long tlblo;
- int found = 1;
- int idx;
+ if (pinned >= PPC4XX_TLB_SIZE)
+ return;
- /*
- * Construct the TLB entry.
- */
- tlbhi = addr & ~(PAGE_SIZE-1);
- tlblo = virt_to_phys(pte_page(*pte)) & TLBLO_RPN;
- if (pte_val(*pte) & _PAGE_HWWRITE)
- tlblo |= TLBLO_WR;
- if (pte_val(*pte) & _PAGE_NO_CACHE)
- tlblo |= TLBLO_I;
- tlblo |= TLBLO_EX;
- if (addr < KERNELBASE)
- tlblo |= TLBLO_Z_USER;
- tlbhi |= TLBHI_PGSZ_4K;
- tlbhi |= TLBHI_VALID;
+ opid = mfspr(SPRN_PID);
+ mtspr(SPRN_PID, 0);
- /*
- * See if a match already exists in the TLB.
- */
- asm("tlbsx. %0,0,%2;beq 1f;li %1,0;1:" : "=r" (idx), "=r" (found) : "r" (tlbhi));
- if (found) {
- /*
- * Found an existing entry. Just reuse the index.
- */
- asm("tlbwe %0,%1,0" : : "r" (tlbhi), "r" (idx));
- asm("tlbwe %0,%1,1" : : "r" (tlblo), "r" (idx));
- }
- else {
- /*
- * Do the more expensive operation
- */
- tlbDropin(tlbhi, tlblo);
- }
+ data = (pa & TLB_RPN_MASK) | TLB_WR;
+
+ if (cache)
+ data |= (TLB_EX);
+ else
+ data |= (TLB_G | TLB_I);
+
+ tag = (va & TLB_EPN_MASK) | TLB_VALID | pagesz;
+
+ PPC4xx_tlb_write(tag, data, pinned++);
+
+ mtspr(SPRN_PID, opid);
+ return;
}
/*
- * Mainline of the TLB miss handler. The above inline routines should fold into
- * this one, eliminating most function call overhead.
+ * ()
+ *
+ * Description:
+ * This routine...
+ *
+ * Input(s):
+ *
+ *
+ * Output(s):
+ *
+ *
+ * Returns:
+ *
+ *
*/
-#ifdef TLBMISS_DEBUG
-volatile unsigned long miss_start;
-volatile unsigned long miss_end;
-#endif
+void
+PPC4xx_tlb_unpin(unsigned long va, unsigned long pa, int size)
+{
+ /* XXX - To be implemented. */
+}
-static inline int tlbMiss(struct pt_regs *regs, unsigned long badaddr, int wasWrite)
+/*
+ * ()
+ *
+ * Description:
+ * This routine...
+ *
+ * Input(s):
+ *
+ *
+ * Output(s):
+ *
+ *
+ * Returns:
+ *
+ *
+ */
+static inline void
+PPC4xx_tlb_update(unsigned long addr, pte_t *pte)
{
- int spid, ospid;
- struct mm_struct *mm;
- pgd_t *pgd;
- pmd_t *pmd;
- pte_t *pte;
-
- if (!user_mode(regs) && (badaddr >= KERNELBASE)) {
- mm = task[0]->mm;
- spid = 0;
-#ifdef TLBMISS_DEBUG
- miss_start = 0;
-#endif
- }
- else {
- mm = current->mm;
- spid = mfspr(SPRN_PID);
-#ifdef TLBMISS_DEBUG
- miss_start = 1;
-#endif
- }
-#ifdef TLBMISS_DEBUG
- store_cache_range((unsigned long)&miss_start, sizeof(miss_start));
-#endif
+ unsigned long data, tag, rand;
+ int i, found = 1;
- pgd = pgd_offset(mm, badaddr);
- if (pgd_none(*pgd))
- goto NOGOOD;
-
- pmd = pmd_offset(pgd, badaddr);
- if (pmd_none(*pmd))
- goto NOGOOD;
-
- pte = pte_offset(pmd, badaddr);
- if (pte_none(*pte))
- goto NOGOOD;
- if (!pte_present(*pte))
- goto NOGOOD;
-#if 1
- prohibit_if_guarded(badaddr, sizeof(int));
-#endif
- if (wasWrite) {
- if (!pte_write(*pte)) {
- goto NOGOOD;
- }
- set_pte(pte, pte_mkdirty(*pte));
- }
- set_pte(pte, pte_mkyoung(*pte));
+ /* Construct the hardware TLB entry from the Linux-style PTE */
- ospid = mfspr(SPRN_PID);
- mtspr(SPRN_PID, spid);
- mkTlbEntry(badaddr, pte);
- mtspr(SPRN_PID, ospid);
+ tag = tag = (addr & PAGE_MASK) | TLB_VALID | TLB_PAGESZ(PAGESZ_4K);
+ data = data = (pte_val(*pte) & PAGE_MASK) | TLB_EX | TLB_WR;
-#ifdef TLBMISS_DEBUG
- miss_end = 0;
- store_cache_range((unsigned long)&miss_end, sizeof(miss_end));
+#if 0
+ if (pte_val(*pte) & _PAGE_HWWRITE)
+ data |= TLB_WR;
#endif
- return 0;
-NOGOOD:
-#ifdef TLBMISS_DEBUG
- miss_end = 1;
- store_cache_range((unsigned long)&miss_end, sizeof(miss_end));
-#endif
- return 1;
-}
+ if (pte_val(*pte) & _PAGE_NO_CACHE)
+ data |= TLB_I;
-/*
- * End TLB miss handling code.
- */
-/* ---------- */
+ if (pte_val(*pte) & _PAGE_GUARDED)
+ data |= TLB_G;
-/*
- * Used to flush the TLB if the page fault handler decides to change
- * something.
- */
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) {
- int spid;
- unsigned long flags;
+ if (addr < KERNELBASE)
+ data |= TLB_ZSEL(1);
- save_flags(flags);
- cli();
+ /* Attempt to match the new tag to an existing entry in the TLB. */
- if (addr >= KERNELBASE)
- spid = 0;
- else
- spid = vma->vm_mm->context;
- tlbFlush1(addr, spid);
+ asm("tlbsx. %0,0,%2;"
+ "beq 1f;"
+ "li %1,0;1:" : "=r" (i), "=r" (found) : "r" (tag));
- restore_flags(flags);
+ /*
+ * If we found a match for the tag, reuse the entry index and update
+ * the tag and data portions. Otherwise, we did not find a match. Use
+ * the lower 5 bits of the lower time base register as a pseudo-random
+ * index into the TLB and replace the entry at that index.
+ */
+
+ if (found) {
+ PPC4xx_tlb_write(tag, data, i);
+ } else {
+ rand = mfspr(SPRN_TBLO) & (PPC4XX_TLB_SIZE - 1);
+ rand += pinned;
+ if (rand >= PPC4XX_TLB_SIZE)
+ rand -= pinned;
+
+ PPC4xx_tlb_write(tag, data, rand);
+ asm("isync;sync");
+ }
}
/*
- * Given a virtual address in the current address space, make
- * sure the associated physical page is present in memory,
- * and if the data is to be modified, that any copy-on-write
- * actions have taken place.
+ * ()
+ *
+ * Description:
+ * This routine...
+ *
+ * Input(s):
+ *
+ *
+ * Output(s):
+ *
+ *
+ * Returns:
+ *
+ *
*/
-unsigned long make_page_present(unsigned long p, int rw) {
+static int
+PPC4xx_tlb_miss(struct pt_regs *regs, unsigned long addr, int write)
+{
+ unsigned long spid, ospid;
+ struct mm_struct *mm;
+ pgd_t *pgd;
+ pmd_t *pmd;
pte_t *pte;
- char c;
- get_user(c, (char *) p);
+ if (!user_mode(regs) && (addr >= KERNELBASE)) {
+ mm = &init_mm;
+ spid = 0;
+ } else {
+ mm = current->mm;
+ spid = mfspr(SPRN_PID);
+ }
+
+ pgd = pgd_offset(mm, addr);
+ if (pgd_none(*pgd))
+ goto bad;
+
+ pmd = pmd_offset(pgd, addr);
+ if (pmd_none(*pmd))
+ goto bad;
- pte = findPTE(current->mm, p);
+ pte = pte_offset(pmd, addr);
if (pte_none(*pte) || !pte_present(*pte))
- debug("make_page_present didn't load page", 0);
-
- if (rw) {
- /*
- * You have to write-touch the page, so that
- * zero-filled pages are forced to be copied
- * rather than still pointing at the zero
- * page.
- */
- extern void tlbFlush1(unsigned long, int);
- tlbFlush1(p, get_context());
- put_user(c, (char *) p);
- if (!pte_write(*pte))
- debug("make_page_present didn't make page writable", 0);
-
- tlbFlush1(p, get_context());
- }
- return pte_page(*pte);
-}
+ goto bad;
-void DataTLBMissException(struct pt_regs *regs)
-{
- unsigned long badaddr = mfspr(SPRN_DEAR);
- int wasWrite = mfspr(SPRN_ESR) & 0x800000;
- if (tlbMiss(regs, badaddr, wasWrite)) {
- sti();
- do_page_fault(regs, badaddr, wasWrite);
- cli();
- }
-}
+ if (write) {
+ if (!pte_write(*pte))
+ goto bad;
-void InstructionTLBMissException(struct pt_regs *regs)
-{
- if (!current) {
- debug("ITLB Miss with no current task", regs);
- sti();
- bad_page_fault(regs, regs->nip);
- cli();
- return;
- }
- if (tlbMiss(regs, regs->nip, 0)) {
- sti();
- do_page_fault(regs, regs->nip, 0);
- cli();
- }
-}
+ set_pte(pte, pte_mkdirty(*pte));
+ }
+ set_pte(pte, pte_mkyoung(*pte));
-void DataPageFault(struct pt_regs *regs)
-{
- unsigned long badaddr = mfspr(SPRN_DEAR);
- int wasWrite = mfspr(SPRN_ESR) & 0x800000;
- sti();
- do_page_fault(regs, badaddr, wasWrite);
- cli();
-}
+ ospid = mfspr(SPRN_PID);
+ mtspr(SPRN_PID, spid);
+ PPC4xx_tlb_update(addr, pte);
+ mtspr(SPRN_PID, ospid);
-void InstructionPageFault(struct pt_regs *regs)
-{
- if (!current) {
- debug("ITLB fault with no current task", regs);
- sti();
- bad_page_fault(regs, regs->nip);
- cli();
- return;
- }
- sti();
- do_page_fault(regs, regs->nip, 0);
- cli();
+ return (0);
+bad:
+ return (-1);
}
-#endif
diff --git a/arch/ppc/mm/init.c b/arch/ppc/mm/init.c
index c558ef051..216527e34 100644
--- a/arch/ppc/mm/init.c
+++ b/arch/ppc/mm/init.c
@@ -107,7 +107,6 @@ unsigned long *oak_find_end_of_memory(void);
static void mapin_ram(void);
void map_page(unsigned long va, unsigned long pa, int flags);
extern void die_if_kernel(char *,struct pt_regs *,long);
-extern void show_net_buffers(void);
struct mem_pieces phys_mem;
@@ -281,9 +280,6 @@ void show_mem(void)
printk("%d pages swap cached\n",cached);
printk("%d pages in page table cache\n",(int)pgtable_cache_size);
show_buffers();
-#ifdef CONFIG_NET
- show_net_buffers();
-#endif
printk("%-8s %3s %8s %8s %8s %9s %8s", "Process", "Pid",
"Ctx", "Ctx<<4", "Last Sys", "pc", "task");
#ifdef __SMP__
@@ -643,7 +639,9 @@ void __init setbat(int index, unsigned long virt, unsigned long phys,
wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX;
bat[1].word[0] = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
bat[1].word[1] = phys | wimgxpp;
+#ifndef CONFIG_KGDB /* want user access for breakpoints */
if (flags & _PAGE_USER)
+#endif
bat[1].bat.batu.vp = 1;
if (flags & _PAGE_GUARDED) {
/* G bit must be zero in IBATs */
@@ -732,6 +730,10 @@ static void __init mapin_ram(void)
* don't get ASID compares on kernel space.
*/
f = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_SHARED;
+#ifdef CONFIG_KGDB
+ /* Allows stub to set breakpoints everywhere */
+ f |= _PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE;
+#else
if ((char *) v < _stext || (char *) v >= etext)
f |= _PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE;
#ifndef CONFIG_8xx
@@ -740,6 +742,7 @@ static void __init mapin_ram(void)
forces R/W kernel access */
f |= _PAGE_USER;
#endif /* CONFIG_8xx */
+#endif /* CONFIG_KGDB */
map_page(v, p, f);
v += PAGE_SIZE;
p += PAGE_SIZE;
@@ -844,6 +847,8 @@ void free_initrd_mem(unsigned long start, unsigned long end)
}
#endif
+extern boot_infos_t *disp_bi;
+
/*
* Do very early mm setup such as finding the size of memory
* and setting up the hash table.
@@ -855,25 +860,45 @@ void free_initrd_mem(unsigned long start, unsigned long end)
void __init
MMU_init(void)
{
+ /*
+ * The Zone Protection Register (ZPR) defines how protection will
+ * be applied to every page which is a member of a given zone. At
+ * present, we utilize only two of the 4xx's zones. The first, zone
+ * 0, is set at '00b and only allows access in supervisor-mode based
+ * on the EX and WR bits. No user-mode access is allowed. The second,
+ * zone 1, is set at '10b and in supervisor-mode allows access
+ * without regard to the EX and WR bits. In user-mode, access is
+ * allowed based on the EX and WR bits.
+ */
+
+ mtspr(SPRN_ZPR, 0x2aaaaaaa);
+
+ /* Hardwire any TLB entries necessary here. */
+
PPC4xx_tlb_pin(KERNELBASE, 0, TLB_PAGESZ(PAGESZ_16M), 1);
- PPC4xx_tlb_pin(OAKNET_IO_BASE, OAKNET_IO_BASE, TLB_PAGESZ(PAGESZ_4K), 0);
- end_of_DRAM = oak_find_end_of_memory();
- /* Map in all of RAM starting at KERNELBASE */
+ /*
+ * Find the top of physical memory and map all of it in starting
+ * at KERNELBASE.
+ */
+ end_of_DRAM = oak_find_end_of_memory();
mapin_ram();
- /* Zone 0 - kernel (above 0x80000000), zone 1 - user */
+ /*
+ * Set up the real-mode cache parameters for the exception vector
+ * handlers (which are run in real-mode).
+ */
- mtspr(SPRN_ZPR, 0x2aaaaaaa);
- mtspr(SPRN_DCWR, 0x00000000); /* all caching is write-back */
+ mtspr(SPRN_DCWR, 0x00000000); /* All caching is write-back */
- /* Cache 128MB of space starting at KERNELBASE. */
+ /*
+ * Cache instruction and data space where the exception
+ * vectors and the kernel live in real-mode.
+ */
- mtspr(SPRN_DCCR, 0x00000000);
- /* flush_instruction_cache(); XXX */
- mtspr(SPRN_ICCR, 0x00000000);
-
+ mtspr(SPRN_DCCR, 0x80000000); /* 128 MB of data space at 0x0. */
+ mtspr(SPRN_ICCR, 0x80000000); /* 128 MB of instr. space at 0x0. */
}
#else
void __init MMU_init(void)
@@ -895,7 +920,11 @@ void __init MMU_init(void)
if ( ppc_md.progress ) ppc_md.progress("MMU:hash init", 0x300);
hash_init();
+#ifdef CONFIG_PPC64
+ _SDR1 = 0; /* temporary hack to just use bats -- Cort */
+#else
_SDR1 = __pa(Hash) | (Hash_mask >> 10);
+#endif
ioremap_base = 0xf8000000;
if ( ppc_md.progress ) ppc_md.progress("MMU:mapin", 0x301);
@@ -916,8 +945,14 @@ void __init MMU_init(void)
break;
case _MACH_chrp:
setbat(0, 0xf8000000, 0xf8000000, 0x08000000, IO_PAGE);
+#ifdef CONFIG_PPC64
+ /* temporary hack to get working until page tables are stable -- Cort*/
+ setbat(1, 0x80000000, 0xc0000000, 0x10000000, IO_PAGE);
+ setbat(3, 0xd0000000, 0xd0000000, 0x10000000, IO_PAGE);
+#else
setbat(1, 0x80000000, 0x80000000, 0x10000000, IO_PAGE);
setbat(3, 0x90000000, 0x90000000, 0x10000000, IO_PAGE);
+#endif
break;
case _MACH_Pmac:
#if 0
@@ -929,6 +964,10 @@ void __init MMU_init(void)
setbat(0, base, base, 0x100000, IO_PAGE);
}
#endif
+#if 0
+ setbat(0, disp_bi->dispDeviceBase, disp_bi->dispDeviceBase, 0x100000, IO_PAGE);
+ disp_bi->logicalDisplayBase = disp_bi->dispDeviceBase;
+#endif
ioremap_base = 0xf0000000;
break;
case _MACH_apus:
@@ -1087,6 +1126,8 @@ void __init paging_init(void)
void __init mem_init(void)
{
+ extern char *sysmap;
+ extern unsigned long sysmap_size;
unsigned long addr;
int codepages = 0;
int datapages = 0;
@@ -1116,6 +1157,11 @@ void __init mem_init(void)
addr += PAGE_SIZE)
SetPageReserved(mem_map + MAP_NR(addr));
#endif /* defined(CONFIG_CHRP) || defined(CONFIG_ALL_PPC) */
+ if ( sysmap_size )
+ for (addr = (unsigned long)sysmap;
+ addr < PAGE_ALIGN((unsigned long)sysmap+sysmap_size) ;
+ addr += PAGE_SIZE)
+ SetPageReserved(mem_map + MAP_NR(addr));
for (addr = PAGE_OFFSET; addr < (unsigned long)end_of_DRAM;
addr += PAGE_SIZE) {
@@ -1131,10 +1177,8 @@ void __init mem_init(void)
}
printk("Memory: %luk available (%dk kernel code, %dk data, %dk init) [%08x,%08lx]\n",
- (unsigned long) nr_free_pages << (PAGE_SHIFT-10),
- codepages << (PAGE_SHIFT-10),
- datapages << (PAGE_SHIFT-10),
- initpages << (PAGE_SHIFT-10),
+ (unsigned long)nr_free_pages()<< (PAGE_SHIFT-10),
+ codepages, datapages, initpages,
PAGE_OFFSET, (unsigned long) end_of_DRAM);
mem_init_done = 1;
}
@@ -1153,7 +1197,7 @@ unsigned long __init *pmac_find_end_of_memory(void)
unsigned long a, total;
/* max amount of RAM we allow -- Cort */
-#define RAM_LIMIT (768<<20)
+#define RAM_LIMIT (64<<20)
memory_node = find_devices("memory");
if (memory_node == NULL) {
@@ -1384,7 +1428,7 @@ static void __init hash_init(void)
{
if ( ppc_md.progress ) ppc_md.progress("hash:patch", 0x345);
Hash_end = (PTE *) ((unsigned long)Hash + Hash_size);
- __clear_user(Hash, Hash_size);
+ /*__clear_user(Hash, Hash_size);*/
/*
* Patch up the instructions in head.S:hash_page