summaryrefslogtreecommitdiffstats
path: root/arch/sparc/mm/sun4c.c
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-02-05 06:47:02 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-02-05 06:47:02 +0000
commit99a7e12f34b3661a0d1354eef83a0eef4df5e34c (patch)
tree3560aca9ca86792f9ab7bd87861ea143a1b3c7a3 /arch/sparc/mm/sun4c.c
parente73a04659c0b8cdee4dd40e58630e2cf63afb316 (diff)
Merge with Linux 2.3.38.
Diffstat (limited to 'arch/sparc/mm/sun4c.c')
-rw-r--r--arch/sparc/mm/sun4c.c1285
1 files changed, 542 insertions, 743 deletions
diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c
index f91ab1ce6..0530e635f 100644
--- a/arch/sparc/mm/sun4c.c
+++ b/arch/sparc/mm/sun4c.c
@@ -1,19 +1,24 @@
-/* $Id: sun4c.c,v 1.176 1999/08/31 06:54:42 davem Exp $
+/* $Id: sun4c.c,v 1.182 1999/12/27 06:30:04 anton Exp $
* sun4c.c: Doing in software what should be done in hardware.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1996 Andrew Tridgell (Andrew.Tridgell@anu.edu.au)
- * Copyright (C) 1997 Anton Blanchard (anton@progsoc.uts.edu.au)
+ * Copyright (C) 1997,99 Anton Blanchard (anton@progsoc.uts.edu.au)
* Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
+#define NR_TASK_BUCKETS 512
+
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <asm/scatterlist.h>
#include <asm/page.h>
+#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/vaddrs.h>
#include <asm/idprom.h>
@@ -27,42 +32,19 @@
#include <asm/mmu_context.h>
#include <asm/sun4paddr.h>
-/* TODO: Make it such that interrupt handlers cannot dick with
- * the user segment lists, most of the cli/sti pairs can
- * disappear once that is taken care of.
- */
-
-/* XXX Ok the real performance win, I figure, will be to use a combined hashing
- * XXX and bitmap scheme to keep track of what we have mapped where. The whole
- * XXX incentive is to make it such that the range flushes can be serviced
- * XXX always in near constant time. --DaveM
+/* Because of our dynamic kernel TLB miss strategy, and how
+ * our DVMA mapping allocation works, you _MUST_:
+ *
+ * 1) Disable interrupts _and_ not touch any dynamic kernel
+ * memory while messing with kernel MMU state. By
+ * dynamic memory I mean any object which is not in
+ * the kernel image itself or a task_struct (both of
+ * which are locked into the MMU).
+ * 2) Disable interrupts while messing with user MMU state.
*/
extern int num_segmaps, num_contexts;
-/* Define this to get extremely anal debugging, undefine for performance. */
-/* #define DEBUG_SUN4C_MM */
-
-#define UWINMASK_OFFSET (const unsigned long)(&(((struct task_struct *)0)->tss.uwinmask))
-
-/* This is used in many routines below. */
-#define FUW_INLINE do { \
- register int ctr asm("g5"); \
- ctr = 0; \
- __asm__ __volatile__("\n" \
- "1: ld [%%g6 + %2], %%g4 ! flush user windows\n" \
- " orcc %%g0, %%g4, %%g0\n" \
- " add %0, 1, %0\n" \
- " bne 1b\n" \
- " save %%sp, -64, %%sp\n" \
- "2: subcc %0, 1, %0\n" \
- " bne 2b\n" \
- " restore %%g0, %%g0, %%g0\n" \
- : "=&r" (ctr) \
- : "0" (ctr), "i" (UWINMASK_OFFSET) \
- : "g4", "cc"); \
-} while(0);
-
#ifdef CONFIG_SUN4
#define SUN4C_VAC_SIZE sun4c_vacinfo.num_bytes
#else
@@ -82,58 +64,21 @@ extern int num_segmaps, num_contexts;
#define MIN(a,b) ((a)<(b)?(a):(b))
#endif
-
-#define KGPROF_PROFILING 0
-#if KGPROF_PROFILING
-#define KGPROF_DEPTH 3 /* this needs to match the code below */
-#define KGPROF_SIZE 100
-static struct {
- unsigned addr[KGPROF_DEPTH];
- unsigned count;
-} kgprof_counters[KGPROF_SIZE];
-
-/* just call this function from whatever function you think needs it then
- look at /proc/cpuinfo to see where the function is being called from
- and how often. This gives a type of "kernel gprof" */
-#define NEXT_PROF(prev,lvl) (prev>PAGE_OFFSET?__builtin_return_address(lvl):0)
-static inline void kgprof_profile(void)
-{
- unsigned ret[KGPROF_DEPTH];
- int i,j;
- /* you can't use a variable argument to __builtin_return_address() */
- ret[0] = (unsigned)__builtin_return_address(0);
- ret[1] = (unsigned)NEXT_PROF(ret[0],1);
- ret[2] = (unsigned)NEXT_PROF(ret[1],2);
-
- for (i=0;i<KGPROF_SIZE && kgprof_counters[i].addr[0];i++) {
- for (j=0;j<KGPROF_DEPTH;j++)
- if (ret[j] != kgprof_counters[i].addr[j]) break;
- if (j==KGPROF_DEPTH) break;
- }
- if (i<KGPROF_SIZE) {
- for (j=0;j<KGPROF_DEPTH;j++)
- kgprof_counters[i].addr[j] = ret[j];
- kgprof_counters[i].count++;
- }
-}
-#endif
-
-
/* Flushing the cache. */
struct sun4c_vac_props sun4c_vacinfo;
-static int ctxflushes, segflushes, pageflushes;
unsigned long sun4c_kernel_faults;
/* convert a virtual address to a physical address and vice
- versa. Easy on the 4c */
+ * versa. Easy on the 4c
+ */
static unsigned long sun4c_v2p(unsigned long vaddr)
{
- return(vaddr - PAGE_OFFSET);
+ return (vaddr - PAGE_OFFSET);
}
static unsigned long sun4c_p2v(unsigned long vaddr)
{
- return(vaddr + PAGE_OFFSET);
+ return (vaddr + PAGE_OFFSET);
}
@@ -142,44 +87,64 @@ void sun4c_flush_all(void)
{
unsigned long begin, end;
- if(sun4c_vacinfo.on)
+ if (sun4c_vacinfo.on)
panic("SUN4C: AIEEE, trying to invalidate vac while"
" it is on.");
/* Clear 'valid' bit in all cache line tags */
begin = AC_CACHETAGS;
end = (AC_CACHETAGS + SUN4C_VAC_SIZE);
- while(begin < end) {
+ while (begin < end) {
__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
"r" (begin), "i" (ASI_CONTROL));
begin += sun4c_vacinfo.linesize;
}
}
-/* Context level flush. */
-static inline void sun4c_flush_context_hw(void)
+static __inline__ void sun4c_flush_context_hw(void)
{
unsigned long end = SUN4C_VAC_SIZE;
- unsigned pgsz = PAGE_SIZE;
- ctxflushes++;
- __asm__ __volatile__("
-1: subcc %0, %2, %0
- bg 1b
- sta %%g0, [%0] %3
- nop; nop; nop; ! Weitek hwbug
-" : "=&r" (end)
- : "0" (end), "r" (pgsz), "i" (ASI_HWFLUSHCONTEXT)
+ __asm__ __volatile__(
+ "1: addcc %0, -4096, %0\n\t"
+ " bne 1b\n\t"
+ " sta %%g0, [%0] %2"
+ : "=&r" (end)
+ : "0" (end), "i" (ASI_HWFLUSHCONTEXT)
: "cc");
}
+/* Must be called minimally with IRQs disabled. */
+static void sun4c_flush_segment_hw(unsigned long addr)
+{
+ if (sun4c_get_segmap(addr) != invalid_segment) {
+ unsigned long vac_size = SUN4C_VAC_SIZE;
+
+ __asm__ __volatile__(
+ "1: addcc %0, -4096, %0\n\t"
+ " bne 1b\n\t"
+ " sta %%g0, [%2 + %0] %3"
+ : "=&r" (vac_size)
+ : "0" (vac_size), "r" (addr), "i" (ASI_HWFLUSHSEG)
+ : "cc");
+ }
+}
+
+/* Must be called minimally with interrupts disabled. */
+static __inline__ void sun4c_flush_page_hw(unsigned long addr)
+{
+ addr &= PAGE_MASK;
+ if ((int)sun4c_get_pte(addr) < 0)
+ __asm__ __volatile__("sta %%g0, [%0] %1"
+ : : "r" (addr), "i" (ASI_HWFLUSHPAGE));
+}
+
/* Don't inline the software version as it eats too many cache lines if expanded. */
static void sun4c_flush_context_sw(void)
{
unsigned long nbytes = SUN4C_VAC_SIZE;
unsigned long lsize = sun4c_vacinfo.linesize;
- ctxflushes++;
__asm__ __volatile__("
add %2, %2, %%g1
add %2, %%g1, %%g2
@@ -203,72 +168,13 @@ static void sun4c_flush_context_sw(void)
: "g1", "g2", "g3", "g4", "g5", "o4", "o5", "cc");
}
-/* Scrape the segment starting at ADDR from the virtual cache. */
-static inline void sun4c_flush_segment(unsigned long addr)
-{
- if(sun4c_get_segmap(addr) == invalid_segment)
- return;
-
- segflushes++;
- if(sun4c_vacinfo.do_hwflushes) {
- unsigned long end = (addr + SUN4C_VAC_SIZE);
-
- for( ; addr < end; addr += PAGE_SIZE)
- __asm__ __volatile__("sta %%g0, [%0] %1;nop;nop;nop;\n\t" : :
- "r" (addr), "i" (ASI_HWFLUSHSEG));
- } else {
- unsigned long nbytes = SUN4C_VAC_SIZE;
- unsigned long lsize = sun4c_vacinfo.linesize;
-
- __asm__ __volatile__("add %2, %2, %%g1\n\t"
- "add %2, %%g1, %%g2\n\t"
- "add %2, %%g2, %%g3\n\t"
- "add %2, %%g3, %%g4\n\t"
- "add %2, %%g4, %%g5\n\t"
- "add %2, %%g5, %%o4\n\t"
- "add %2, %%o4, %%o5\n"
- "1:\n\t"
- "subcc %1, %%o5, %1\n\t"
- "sta %%g0, [%0] %6\n\t"
- "sta %%g0, [%0 + %2] %6\n\t"
- "sta %%g0, [%0 + %%g1] %6\n\t"
- "sta %%g0, [%0 + %%g2] %6\n\t"
- "sta %%g0, [%0 + %%g3] %6\n\t"
- "sta %%g0, [%0 + %%g4] %6\n\t"
- "sta %%g0, [%0 + %%g5] %6\n\t"
- "sta %%g0, [%0 + %%o4] %6\n\t"
- "bg 1b\n\t"
- " add %0, %%o5, %0\n\t"
- : "=&r" (addr), "=&r" (nbytes), "=&r" (lsize)
- : "0" (addr), "1" (nbytes), "2" (lsize),
- "i" (ASI_FLUSHSEG)
- : "g1", "g2", "g3", "g4", "g5", "o4", "o5", "cc");
- }
-}
-
-/* Call this version when you know hardware flushes are available. */
-static inline void sun4c_flush_segment_hw(unsigned long addr)
-{
- if(sun4c_get_segmap(addr) != invalid_segment) {
- unsigned long end;
-
- segflushes++;
- for(end = addr + SUN4C_VAC_SIZE; addr < end; addr += PAGE_SIZE)
- __asm__ __volatile__("sta %%g0, [%0] %1"
- : : "r" (addr), "i" (ASI_HWFLUSHSEG));
- /* Weitek POWER-UP hwbug workaround. */
- __asm__ __volatile__("nop;nop;nop; ! Weitek hwbug");
- }
-}
-
/* Don't inline the software version as it eats too many cache lines if expanded. */
static void sun4c_flush_segment_sw(unsigned long addr)
{
- if(sun4c_get_segmap(addr) != invalid_segment) {
+ if (sun4c_get_segmap(addr) != invalid_segment) {
unsigned long nbytes = SUN4C_VAC_SIZE;
unsigned long lsize = sun4c_vacinfo.linesize;
- segflushes++;
__asm__ __volatile__("
add %2, %2, %%g1
add %2, %%g1, %%g2
@@ -300,12 +206,11 @@ static void sun4c_flush_page(unsigned long addr)
{
addr &= PAGE_MASK;
- if((sun4c_get_pte(addr) & (_SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_VALID)) !=
- _SUN4C_PAGE_VALID)
+ if ((sun4c_get_pte(addr) & (_SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_VALID)) !=
+ _SUN4C_PAGE_VALID)
return;
- pageflushes++;
- if(sun4c_vacinfo.do_hwflushes) {
+ if (sun4c_vacinfo.do_hwflushes) {
__asm__ __volatile__("sta %%g0, [%0] %1;nop;nop;nop;\n\t" : :
"r" (addr), "i" (ASI_HWFLUSHPAGE));
} else {
@@ -338,30 +243,15 @@ static void sun4c_flush_page(unsigned long addr)
}
}
-/* Again, hw-only and sw-only cache page-level flush variants. */
-static inline void sun4c_flush_page_hw(unsigned long addr)
-{
- addr &= PAGE_MASK;
- if((sun4c_get_pte(addr) & (_SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_VALID)) ==
- _SUN4C_PAGE_VALID) {
- pageflushes++;
- __asm__ __volatile__("sta %%g0, [%0] %1"
- : : "r" (addr), "i" (ASI_HWFLUSHPAGE));
- /* Weitek POWER-UP hwbug workaround. */
- __asm__ __volatile__("nop;nop;nop; ! Weitek hwbug");
- }
-}
-
/* Don't inline the software version as it eats too many cache lines if expanded. */
static void sun4c_flush_page_sw(unsigned long addr)
{
addr &= PAGE_MASK;
- if((sun4c_get_pte(addr) & (_SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_VALID)) ==
- _SUN4C_PAGE_VALID) {
+ if ((sun4c_get_pte(addr) & (_SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_VALID)) ==
+ _SUN4C_PAGE_VALID) {
unsigned long left = PAGE_SIZE;
unsigned long lsize = sun4c_vacinfo.linesize;
- pageflushes++;
__asm__ __volatile__("
add %2, %2, %%g1
add %2, %%g1, %%g2
@@ -411,7 +301,7 @@ static inline void sun4c_init_clean_segmap(unsigned char pseg)
unsigned long vaddr;
sun4c_put_segmap(0, pseg);
- for(vaddr = 0; vaddr < SUN4C_REAL_PGDIR_SIZE; vaddr+=PAGE_SIZE)
+ for (vaddr = 0; vaddr < SUN4C_REAL_PGDIR_SIZE; vaddr += PAGE_SIZE)
sun4c_put_pte(vaddr, 0);
sun4c_put_segmap(0, invalid_segment);
}
@@ -423,15 +313,15 @@ static inline void sun4c_init_clean_mmu(unsigned long kernel_end)
savectx = sun4c_get_context();
kernel_end = SUN4C_REAL_PGDIR_ALIGN(kernel_end);
- for(ctx = 0; ctx < num_contexts; ctx++) {
+ for (ctx = 0; ctx < num_contexts; ctx++) {
sun4c_set_context(ctx);
- for(vaddr = 0; vaddr < 0x20000000; vaddr += SUN4C_REAL_PGDIR_SIZE)
+ for (vaddr = 0; vaddr < 0x20000000; vaddr += SUN4C_REAL_PGDIR_SIZE)
sun4c_put_segmap(vaddr, invalid_segment);
- for(vaddr = 0xe0000000; vaddr < KERNBASE; vaddr += SUN4C_REAL_PGDIR_SIZE)
+ for (vaddr = 0xe0000000; vaddr < KERNBASE; vaddr += SUN4C_REAL_PGDIR_SIZE)
sun4c_put_segmap(vaddr, invalid_segment);
- for(vaddr = kernel_end; vaddr < KADB_DEBUGGER_BEGVM; vaddr += SUN4C_REAL_PGDIR_SIZE)
+ for (vaddr = kernel_end; vaddr < KADB_DEBUGGER_BEGVM; vaddr += SUN4C_REAL_PGDIR_SIZE)
sun4c_put_segmap(vaddr, invalid_segment);
- for(vaddr = LINUX_OPPROM_ENDVM; vaddr; vaddr += SUN4C_REAL_PGDIR_SIZE)
+ for (vaddr = LINUX_OPPROM_ENDVM; vaddr; vaddr += SUN4C_REAL_PGDIR_SIZE)
sun4c_put_segmap(vaddr, invalid_segment);
}
sun4c_set_context(savectx);
@@ -442,7 +332,7 @@ void __init sun4c_probe_vac(void)
sun4c_disable_vac();
if (ARCH_SUN4) {
- switch(idprom->id_machtype) {
+ switch (idprom->id_machtype) {
case (SM_SUN4|SM_4_110):
sun4c_vacinfo.type = NONE;
@@ -477,12 +367,12 @@ void __init sun4c_probe_vac(void)
default:
prom_printf("Cannot initialize VAC - wierd sun4 model idprom->id_machtype = %d", idprom->id_machtype);
prom_halt();
- }
+ };
} else {
sun4c_vacinfo.type = WRITE_THROUGH;
- if((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) ||
- (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) {
+ if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) ||
+ (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) {
/* PROM on SS1 lacks this info, to be super safe we
* hard code it here since this arch is cast in stone.
*/
@@ -497,7 +387,7 @@ void __init sun4c_probe_vac(void)
sun4c_vacinfo.do_hwflushes =
prom_getintdefault(prom_root_node, "vac-hwflush", 0);
- if(sun4c_vacinfo.do_hwflushes == 0)
+ if (sun4c_vacinfo.do_hwflushes == 0)
sun4c_vacinfo.do_hwflushes =
prom_getintdefault(prom_root_node, "vac_hwflush", 0);
@@ -509,7 +399,7 @@ void __init sun4c_probe_vac(void)
sun4c_vacinfo.num_lines =
(sun4c_vacinfo.num_bytes / sun4c_vacinfo.linesize);
- switch(sun4c_vacinfo.linesize) {
+ switch (sun4c_vacinfo.linesize) {
case 16:
sun4c_vacinfo.log2lsize = 4;
break;
@@ -566,7 +456,7 @@ static void patch_kernel_fault_handler(void)
prom_printf("Unhandled number of segmaps: %d\n",
num_segmaps);
prom_halt();
- }
+ };
switch (num_contexts) {
case 8:
/* Default, nothing to do. */
@@ -574,19 +464,22 @@ static void patch_kernel_fault_handler(void)
case 16:
PATCH_INSN(num_context_patch1_16,
num_context_patch1);
+#if 0
PATCH_INSN(num_context_patch2_16,
num_context_patch2);
+#endif
break;
default:
prom_printf("Unhandled number of contexts: %d\n",
num_contexts);
prom_halt();
- }
- if(sun4c_vacinfo.do_hwflushes != 0) {
+ };
+
+ if (sun4c_vacinfo.do_hwflushes != 0) {
PATCH_INSN(vac_hwflush_patch1_on, vac_hwflush_patch1);
PATCH_INSN(vac_hwflush_patch2_on, vac_hwflush_patch2);
} else {
- switch(sun4c_vacinfo.linesize) {
+ switch (sun4c_vacinfo.linesize) {
case 16:
/* Default, nothing to do. */
break;
@@ -604,7 +497,7 @@ static void patch_kernel_fault_handler(void)
static void __init sun4c_probe_mmu(void)
{
if (ARCH_SUN4) {
- switch(idprom->id_machtype) {
+ switch (idprom->id_machtype) {
case (SM_SUN4|SM_4_110):
prom_printf("No support for 4100 yet\n");
prom_halt();
@@ -631,10 +524,10 @@ static void __init sun4c_probe_mmu(void)
default:
prom_printf("Invalid SUN4 model\n");
prom_halt();
- }
+ };
} else {
- if((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) ||
- (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) {
+ if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) ||
+ (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) {
/* Hardcode these just to be safe, PROM on SS1 does
* not have this info available in the root node.
*/
@@ -658,20 +551,15 @@ void __init sun4c_probe_memerr_reg(void)
struct linux_prom_registers regs[1];
if (ARCH_SUN4) {
- sun4c_memerr_reg = sparc_alloc_io(sun4_memreg_physaddr, 0,
- PAGE_SIZE,
- "memory parity error",
- 0x0, 0);
+ sun4c_memerr_reg = ioremap(sun4_memreg_physaddr, PAGE_SIZE);
} else {
node = prom_getchild(prom_root_node);
node = prom_searchsiblings(prom_root_node, "memory-error");
if (!node)
return;
prom_getproperty(node, "reg", (char *)regs, sizeof(regs));
- sun4c_memerr_reg = sparc_alloc_io(regs[0].phys_addr, 0,
- regs[0].reg_size,
- "memory parity error",
- regs[0].which_io, 0);
+ /* hmm I think regs[0].which_io is zero here anyways */
+ sun4c_memerr_reg = ioremap(regs[0].phys_addr, regs[0].reg_size);
}
}
@@ -679,10 +567,10 @@ static inline void sun4c_init_ss2_cache_bug(void)
{
extern unsigned long start;
- if((idprom->id_machtype == (SM_SUN4C | SM_4C_SS2)) ||
- (idprom->id_machtype == (SM_SUN4C | SM_4C_IPX)) ||
- (idprom->id_machtype == (SM_SUN4 | SM_4_330)) ||
- (idprom->id_machtype == (SM_SUN4C | SM_4C_ELC))) {
+ if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS2)) ||
+ (idprom->id_machtype == (SM_SUN4C | SM_4C_IPX)) ||
+ (idprom->id_machtype == (SM_SUN4 | SM_4_330)) ||
+ (idprom->id_machtype == (SM_SUN4C | SM_4C_ELC))) {
/* Whee.. */
printk("SS2 cache bug detected, uncaching trap table page\n");
sun4c_flush_page((unsigned int) &start);
@@ -692,17 +580,13 @@ static inline void sun4c_init_ss2_cache_bug(void)
}
/* Addr is always aligned on a page boundry for us already. */
-static void sun4c_map_dma_area(unsigned long addr, int len)
+static void sun4c_map_dma_area(unsigned long va, u32 addr, int len)
{
unsigned long page, end;
end = PAGE_ALIGN((addr + len));
- while(addr < end) {
- page = get_free_page(GFP_KERNEL);
- if(!page) {
- prom_printf("alloc_dvma: Cannot get a dvma page\n");
- prom_halt();
- }
+ while (addr < end) {
+ page = va;
sun4c_flush_page(page);
page -= PAGE_OFFSET;
page >>= PAGE_SHIFT;
@@ -710,9 +594,21 @@ static void sun4c_map_dma_area(unsigned long addr, int len)
_SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_PRIV);
sun4c_put_pte(addr, page);
addr += PAGE_SIZE;
+ va += PAGE_SIZE;
}
}
+static void sun4c_unmap_dma_area(unsigned long addr, int len)
+{
+}
+
+static void sun4c_inval_dma_area(unsigned long addr, int len)
+{
+}
+
+static void sun4c_flush_dma_area(unsigned long addr, int len)
+{
+}
/* TLB management. */
@@ -726,6 +622,13 @@ struct sun4c_mmu_entry {
unsigned long vaddr;
unsigned char pseg;
unsigned char locked;
+
+ /* For user mappings only, and completely hidden from kernel
+ * TLB miss code.
+ */
+ unsigned char ctx;
+ struct sun4c_mmu_entry *lru_next;
+ struct sun4c_mmu_entry *lru_prev;
};
static struct sun4c_mmu_entry mmu_entry_pool[SUN4C_MAX_SEGMAPS];
@@ -734,12 +637,15 @@ static void __init sun4c_init_mmu_entry_pool(void)
{
int i;
- for(i=0; i < SUN4C_MAX_SEGMAPS; i++) {
+ for (i=0; i < SUN4C_MAX_SEGMAPS; i++) {
mmu_entry_pool[i].pseg = i;
mmu_entry_pool[i].next = 0;
mmu_entry_pool[i].prev = 0;
mmu_entry_pool[i].vaddr = 0;
mmu_entry_pool[i].locked = 0;
+ mmu_entry_pool[i].ctx = 0;
+ mmu_entry_pool[i].lru_next = 0;
+ mmu_entry_pool[i].lru_prev = 0;
}
mmu_entry_pool[invalid_segment].locked = 1;
}
@@ -750,8 +656,8 @@ static inline void fix_permissions(unsigned long vaddr, unsigned long bits_on,
unsigned long start, end;
end = vaddr + SUN4C_REAL_PGDIR_SIZE;
- for(start = vaddr; start < end; start += PAGE_SIZE)
- if(sun4c_get_pte(start) & _SUN4C_PAGE_VALID)
+ for (start = vaddr; start < end; start += PAGE_SIZE)
+ if (sun4c_get_pte(start) & _SUN4C_PAGE_VALID)
sun4c_put_pte(start, (sun4c_get_pte(start) | bits_on) &
~bits_off);
}
@@ -762,16 +668,16 @@ static inline void sun4c_init_map_kernelprom(unsigned long kernel_end)
unsigned char pseg, ctx;
#ifdef CONFIG_SUN4
/* sun4/110 and 260 have no kadb. */
- if((idprom->id_machtype != (SM_SUN4 | SM_4_260)) &&
- (idprom->id_machtype != (SM_SUN4 | SM_4_110))) {
+ if ((idprom->id_machtype != (SM_SUN4 | SM_4_260)) &&
+ (idprom->id_machtype != (SM_SUN4 | SM_4_110))) {
#endif
- for(vaddr = KADB_DEBUGGER_BEGVM;
- vaddr < LINUX_OPPROM_ENDVM;
- vaddr += SUN4C_REAL_PGDIR_SIZE) {
+ for (vaddr = KADB_DEBUGGER_BEGVM;
+ vaddr < LINUX_OPPROM_ENDVM;
+ vaddr += SUN4C_REAL_PGDIR_SIZE) {
pseg = sun4c_get_segmap(vaddr);
- if(pseg != invalid_segment) {
+ if (pseg != invalid_segment) {
mmu_entry_pool[pseg].locked = 1;
- for(ctx = 0; ctx < num_contexts; ctx++)
+ for (ctx = 0; ctx < num_contexts; ctx++)
prom_putsegment(ctx, vaddr, pseg);
fix_permissions(vaddr, _SUN4C_PAGE_PRIV, 0);
}
@@ -779,10 +685,10 @@ static inline void sun4c_init_map_kernelprom(unsigned long kernel_end)
#ifdef CONFIG_SUN4
}
#endif
- for(vaddr = KERNBASE; vaddr < kernel_end; vaddr += SUN4C_REAL_PGDIR_SIZE) {
+ for (vaddr = KERNBASE; vaddr < kernel_end; vaddr += SUN4C_REAL_PGDIR_SIZE) {
pseg = sun4c_get_segmap(vaddr);
mmu_entry_pool[pseg].locked = 1;
- for(ctx = 0; ctx < num_contexts; ctx++)
+ for (ctx = 0; ctx < num_contexts; ctx++)
prom_putsegment(ctx, vaddr, pseg);
fix_permissions(vaddr, _SUN4C_PAGE_PRIV, _SUN4C_PAGE_NOCACHE);
}
@@ -792,13 +698,13 @@ static void __init sun4c_init_lock_area(unsigned long start, unsigned long end)
{
int i, ctx;
- while(start < end) {
- for(i=0; i < invalid_segment; i++)
- if(!mmu_entry_pool[i].locked)
+ while (start < end) {
+ for (i = 0; i < invalid_segment; i++)
+ if (!mmu_entry_pool[i].locked)
break;
mmu_entry_pool[i].locked = 1;
sun4c_init_clean_segmap(i);
- for(ctx = 0; ctx < num_contexts; ctx++)
+ for (ctx = 0; ctx < num_contexts; ctx++)
prom_putsegment(ctx, start, mmu_entry_pool[i].pseg);
start += SUN4C_REAL_PGDIR_SIZE;
}
@@ -815,13 +721,15 @@ struct sun4c_mmu_ring {
static struct sun4c_mmu_ring sun4c_context_ring[SUN4C_MAX_CONTEXTS]; /* used user entries */
static struct sun4c_mmu_ring sun4c_ufree_ring; /* free user entries */
+static struct sun4c_mmu_ring sun4c_ulru_ring; /* LRU user entries */
struct sun4c_mmu_ring sun4c_kernel_ring; /* used kernel entries */
struct sun4c_mmu_ring sun4c_kfree_ring; /* free kernel entries */
-static inline void sun4c_init_rings(unsigned long *mempool)
+static inline void sun4c_init_rings(void)
{
int i;
- for(i=0; i<SUN4C_MAX_CONTEXTS; i++) {
+
+ for (i = 0; i < SUN4C_MAX_CONTEXTS; i++) {
sun4c_context_ring[i].ringhd.next =
sun4c_context_ring[i].ringhd.prev =
&sun4c_context_ring[i].ringhd;
@@ -830,6 +738,9 @@ static inline void sun4c_init_rings(unsigned long *mempool)
sun4c_ufree_ring.ringhd.next = sun4c_ufree_ring.ringhd.prev =
&sun4c_ufree_ring.ringhd;
sun4c_ufree_ring.num_entries = 0;
+ sun4c_ulru_ring.ringhd.lru_next = sun4c_ulru_ring.ringhd.lru_prev =
+ &sun4c_ulru_ring.ringhd;
+ sun4c_ulru_ring.num_entries = 0;
sun4c_kernel_ring.ringhd.next = sun4c_kernel_ring.ringhd.prev =
&sun4c_kernel_ring.ringhd;
sun4c_kernel_ring.num_entries = 0;
@@ -838,8 +749,8 @@ static inline void sun4c_init_rings(unsigned long *mempool)
sun4c_kfree_ring.num_entries = 0;
}
-static inline void add_ring(struct sun4c_mmu_ring *ring,
- struct sun4c_mmu_entry *entry)
+static void add_ring(struct sun4c_mmu_ring *ring,
+ struct sun4c_mmu_entry *entry)
{
struct sun4c_mmu_entry *head = &ring->ringhd;
@@ -849,49 +760,58 @@ static inline void add_ring(struct sun4c_mmu_ring *ring,
ring->num_entries++;
}
-static inline void add_ring_ordered(struct sun4c_mmu_ring *ring,
- struct sun4c_mmu_entry *entry)
+static __inline__ void add_lru(struct sun4c_mmu_entry *entry)
+{
+ struct sun4c_mmu_ring *ring = &sun4c_ulru_ring;
+ struct sun4c_mmu_entry *head = &ring->ringhd;
+
+ entry->lru_next = head;
+ (entry->lru_prev = head->lru_prev)->lru_next = entry;
+ head->lru_prev = entry;
+}
+
+static void add_ring_ordered(struct sun4c_mmu_ring *ring,
+ struct sun4c_mmu_entry *entry)
{
struct sun4c_mmu_entry *head = &ring->ringhd;
unsigned long addr = entry->vaddr;
- if(head->next != &ring->ringhd) {
- while((head->next != &ring->ringhd) && (head->next->vaddr < addr))
- head = head->next;
- }
+ while ((head->next != &ring->ringhd) && (head->next->vaddr < addr))
+ head = head->next;
+
entry->prev = head;
(entry->next = head->next)->prev = entry;
head->next = entry;
ring->num_entries++;
+
+ add_lru(entry);
}
-static inline void remove_ring(struct sun4c_mmu_ring *ring,
- struct sun4c_mmu_entry *entry)
+static __inline__ void remove_ring(struct sun4c_mmu_ring *ring,
+ struct sun4c_mmu_entry *entry)
{
struct sun4c_mmu_entry *next = entry->next;
(next->prev = entry->prev)->next = next;
ring->num_entries--;
-#ifdef DEBUG_SUN4C_MM
- if(ring->num_entries < 0)
- panic("sun4c: Ring num_entries < 0!");
-#endif
}
-static inline void free_user_entry(int ctx, struct sun4c_mmu_entry *entry)
+static void remove_lru(struct sun4c_mmu_entry *entry)
{
- remove_ring(sun4c_context_ring+ctx, entry);
- add_ring(&sun4c_ufree_ring, entry);
+ struct sun4c_mmu_entry *next = entry->lru_next;
+
+ (next->lru_prev = entry->lru_prev)->lru_next = next;
}
-static inline void assign_user_entry(int ctx, struct sun4c_mmu_entry *entry)
+static void free_user_entry(int ctx, struct sun4c_mmu_entry *entry)
{
- remove_ring(&sun4c_ufree_ring, entry);
- add_ring_ordered(sun4c_context_ring+ctx, entry);
+ remove_ring(sun4c_context_ring+ctx, entry);
+ remove_lru(entry);
+ add_ring(&sun4c_ufree_ring, entry);
}
-static inline void free_kernel_entry(struct sun4c_mmu_entry *entry,
- struct sun4c_mmu_ring *ring)
+static void free_kernel_entry(struct sun4c_mmu_entry *entry,
+ struct sun4c_mmu_ring *ring)
{
remove_ring(ring, entry);
add_ring(&sun4c_kfree_ring, entry);
@@ -901,9 +821,9 @@ static void __init sun4c_init_fill_kernel_ring(int howmany)
{
int i;
- while(howmany) {
- for(i=0; i < invalid_segment; i++)
- if(!mmu_entry_pool[i].locked)
+ while (howmany) {
+ for (i = 0; i < invalid_segment; i++)
+ if (!mmu_entry_pool[i].locked)
break;
mmu_entry_pool[i].locked = 1;
sun4c_init_clean_segmap(i);
@@ -916,54 +836,40 @@ static void __init sun4c_init_fill_user_ring(void)
{
int i;
- for(i=0; i < invalid_segment; i++) {
- if(mmu_entry_pool[i].locked)
+ for (i = 0; i < invalid_segment; i++) {
+ if (mmu_entry_pool[i].locked)
continue;
sun4c_init_clean_segmap(i);
add_ring(&sun4c_ufree_ring, &mmu_entry_pool[i]);
}
}
-static inline void sun4c_kernel_unmap(struct sun4c_mmu_entry *kentry)
+static void sun4c_kernel_unmap(struct sun4c_mmu_entry *kentry)
{
int savectx, ctx;
savectx = sun4c_get_context();
- for(ctx = 0; ctx < num_contexts; ctx++) {
+ for (ctx = 0; ctx < num_contexts; ctx++) {
sun4c_set_context(ctx);
sun4c_put_segmap(kentry->vaddr, invalid_segment);
}
sun4c_set_context(savectx);
}
-static inline void sun4c_kernel_map(struct sun4c_mmu_entry *kentry)
+static void sun4c_kernel_map(struct sun4c_mmu_entry *kentry)
{
int savectx, ctx;
savectx = sun4c_get_context();
- for(ctx = 0; ctx < num_contexts; ctx++) {
+ for (ctx = 0; ctx < num_contexts; ctx++) {
sun4c_set_context(ctx);
sun4c_put_segmap(kentry->vaddr, kentry->pseg);
}
sun4c_set_context(savectx);
}
-static inline void sun4c_user_unmap(struct sun4c_mmu_entry *uentry)
-{
- sun4c_put_segmap(uentry->vaddr, invalid_segment);
-}
-
-static inline void sun4c_user_map(struct sun4c_mmu_entry *uentry)
-{
- unsigned long start = uentry->vaddr;
- unsigned long end = start + SUN4C_REAL_PGDIR_SIZE;
-
- sun4c_put_segmap(uentry->vaddr, uentry->pseg);
- while(start < end) {
- sun4c_put_pte(start, 0);
- start += PAGE_SIZE;
- }
-}
+#define sun4c_user_unmap(__entry) \
+ sun4c_put_segmap((__entry)->vaddr, invalid_segment)
static void sun4c_demap_context_hw(struct sun4c_mmu_ring *crp, unsigned char ctx)
{
@@ -971,11 +877,11 @@ static void sun4c_demap_context_hw(struct sun4c_mmu_ring *crp, unsigned char ctx
unsigned long flags;
save_and_cli(flags);
- if(head->next != head) {
+ if (head->next != head) {
struct sun4c_mmu_entry *entry = head->next;
int savectx = sun4c_get_context();
- FUW_INLINE
+ flush_user_windows();
sun4c_set_context(ctx);
sun4c_flush_context_hw();
do {
@@ -985,7 +891,7 @@ static void sun4c_demap_context_hw(struct sun4c_mmu_ring *crp, unsigned char ctx
free_user_entry(ctx, entry);
entry = next;
- } while(entry != head);
+ } while (entry != head);
sun4c_set_context(savectx);
}
restore_flags(flags);
@@ -997,11 +903,11 @@ static void sun4c_demap_context_sw(struct sun4c_mmu_ring *crp, unsigned char ctx
unsigned long flags;
save_and_cli(flags);
- if(head->next != head) {
+ if (head->next != head) {
struct sun4c_mmu_entry *entry = head->next;
int savectx = sun4c_get_context();
- FUW_INLINE
+ flush_user_windows();
sun4c_set_context(ctx);
sun4c_flush_context_sw();
do {
@@ -1011,49 +917,31 @@ static void sun4c_demap_context_sw(struct sun4c_mmu_ring *crp, unsigned char ctx
free_user_entry(ctx, entry);
entry = next;
- } while(entry != head);
+ } while (entry != head);
sun4c_set_context(savectx);
}
restore_flags(flags);
}
-static inline void sun4c_demap_one(struct sun4c_mmu_ring *crp, unsigned char ctx)
-{
- /* by using .prev we get a kind of "lru" algorithm */
- struct sun4c_mmu_entry *entry = crp->ringhd.prev;
- unsigned long flags;
- int savectx = sun4c_get_context();
-
-#ifdef DEBUG_SUN4C_MM
- if(entry == &crp->ringhd)
- panic("sun4c_demap_one: Freeing from empty ctx ring.");
-#endif
- FUW_INLINE
- save_and_cli(flags);
- sun4c_set_context(ctx);
- sun4c_flush_segment(entry->vaddr);
- sun4c_user_unmap(entry);
- free_user_entry(ctx, entry);
- sun4c_set_context(savectx);
- restore_flags(flags);
-}
-
static int sun4c_user_taken_entries = 0; /* This is how much we have. */
static int max_user_taken_entries = 0; /* This limits us and prevents deadlock. */
-static inline struct sun4c_mmu_entry *sun4c_kernel_strategy(void)
+static struct sun4c_mmu_entry *sun4c_kernel_strategy(void)
{
struct sun4c_mmu_entry *this_entry;
/* If some are free, return first one. */
- if(sun4c_kfree_ring.num_entries) {
+ if (sun4c_kfree_ring.num_entries) {
this_entry = sun4c_kfree_ring.ringhd.next;
return this_entry;
}
/* Else free one up. */
this_entry = sun4c_kernel_ring.ringhd.prev;
- sun4c_flush_segment(this_entry->vaddr);
+ if (sun4c_vacinfo.do_hwflushes)
+ sun4c_flush_segment_hw(this_entry->vaddr);
+ else
+ sun4c_flush_segment_sw(this_entry->vaddr);
sun4c_kernel_unmap(this_entry);
free_kernel_entry(this_entry, &sun4c_kernel_ring);
this_entry = sun4c_kfree_ring.ringhd.next;
@@ -1061,141 +949,73 @@ static inline struct sun4c_mmu_entry *sun4c_kernel_strategy(void)
return this_entry;
}
-void sun4c_shrink_kernel_ring(void)
-{
- struct sun4c_mmu_entry *entry;
- unsigned long flags;
-
- /* If an interrupt comes in here, we die... */
- save_and_cli(flags);
-
- if (sun4c_user_taken_entries) {
- entry = sun4c_kernel_strategy();
- remove_ring(&sun4c_kfree_ring, entry);
- add_ring(&sun4c_ufree_ring, entry);
- sun4c_user_taken_entries--;
-#if 0
- printk("shrink: ufree= %d, kfree= %d, kernel= %d\n",
- sun4c_ufree_ring.num_entries,
- sun4c_kfree_ring.num_entries,
- sun4c_kernel_ring.num_entries);
-#endif
-#ifdef DEBUG_SUN4C_MM
- if(sun4c_user_taken_entries < 0)
- panic("sun4c_shrink_kernel_ring: taken < 0.");
-#endif
- }
- restore_flags(flags);
-}
-
/* Using this method to free up mmu entries eliminates a lot of
* potential races since we have a kernel that incurs tlb
* replacement faults. There may be performance penalties.
+ *
+ * NOTE: Must be called with interrupts disabled.
*/
-static inline struct sun4c_mmu_entry *sun4c_user_strategy(void)
+static struct sun4c_mmu_entry *sun4c_user_strategy(void)
{
- struct ctx_list *next_one;
- struct sun4c_mmu_ring *rp = 0;
+ struct sun4c_mmu_entry *entry;
unsigned char ctx;
-#ifdef DEBUG_SUN4C_MM
- int lim = num_contexts;
-#endif
+ int savectx;
/* If some are free, return first one. */
- if(sun4c_ufree_ring.num_entries) {
-#ifdef DEBUG_SUN4C_MM
- if(sun4c_ufree_ring.ringhd.next == &sun4c_ufree_ring.ringhd)
- panic("sun4c_user_strategy: num_entries!=0 but ring empty.");
-#endif
- return sun4c_ufree_ring.ringhd.next;
+ if (sun4c_ufree_ring.num_entries) {
+ entry = sun4c_ufree_ring.ringhd.next;
+ goto unlink_out;
}
if (sun4c_user_taken_entries) {
- sun4c_shrink_kernel_ring();
-#ifdef DEBUG_SUN4C_MM
- if(sun4c_ufree_ring.ringhd.next == &sun4c_ufree_ring.ringhd)
- panic("sun4c_user_strategy: kernel shrunk but ufree empty.");
-#endif
- return sun4c_ufree_ring.ringhd.next;
+ entry = sun4c_kernel_strategy();
+ sun4c_user_taken_entries--;
+ goto kunlink_out;
}
- /* Grab one from the LRU context. */
- next_one = ctx_used.next;
- while ((sun4c_context_ring[next_one->ctx_number].num_entries == 0)
-#ifdef DEBUG_SUN4C_MM
- && (--lim >= 0)
-#endif
- )
- next_one = next_one->next;
+ /* Grab from the beginning of the LRU list. */
+ entry = sun4c_ulru_ring.ringhd.lru_next;
+ ctx = entry->ctx;
-#ifdef DEBUG_SUN4C_MM
- if(lim < 0)
- panic("No user segmaps!");
-#endif
+ savectx = sun4c_get_context();
+ flush_user_windows();
+ sun4c_set_context(ctx);
+ if (sun4c_vacinfo.do_hwflushes)
+ sun4c_flush_segment_hw(entry->vaddr);
+ else
+ sun4c_flush_segment_sw(entry->vaddr);
+ sun4c_user_unmap(entry);
+ remove_ring(sun4c_context_ring + ctx, entry);
+ remove_lru(entry);
+ sun4c_set_context(savectx);
- ctx = next_one->ctx_number;
- rp = &sun4c_context_ring[ctx];
+ return entry;
- sun4c_demap_one(rp, ctx);
-#ifdef DEBUG_SUN4C_MM
- if(sun4c_ufree_ring.ringhd.next == &sun4c_ufree_ring.ringhd)
- panic("sun4c_user_strategy: demapped one but ufree empty.");
-#endif
- return sun4c_ufree_ring.ringhd.next;
+unlink_out:
+ remove_ring(&sun4c_ufree_ring, entry);
+ return entry;
+kunlink_out:
+ remove_ring(&sun4c_kfree_ring, entry);
+ return entry;
}
+/* NOTE: Must be called with interrupts disabled. */
void sun4c_grow_kernel_ring(void)
{
struct sun4c_mmu_entry *entry;
-#if 0
- printk("grow: ");
-#endif
-
/* Prevent deadlock condition. */
- if(sun4c_user_taken_entries >= max_user_taken_entries) {
-#if 0
- printk("deadlock avoidance, taken= %d max= %d\n",
- sun4c_user_taken_entries, max_user_taken_entries);
-#endif
+ if (sun4c_user_taken_entries >= max_user_taken_entries)
return;
- }
if (sun4c_ufree_ring.num_entries) {
entry = sun4c_ufree_ring.ringhd.next;
-#ifdef DEBUG_SUN4C_MM
- if(entry == &sun4c_ufree_ring.ringhd)
- panic("\nsun4c_grow_kernel_ring: num_entries!=0, ring empty.");
-#endif
remove_ring(&sun4c_ufree_ring, entry);
add_ring(&sun4c_kfree_ring, entry);
-#ifdef DEBUG_SUN4C_MM
- if(sun4c_user_taken_entries < 0)
- panic("\nsun4c_grow_kernel_ring: taken < 0.");
-#endif
sun4c_user_taken_entries++;
-#if 0
- printk("ufree= %d, kfree= %d, kernel= %d\n",
- sun4c_ufree_ring.num_entries,
- sun4c_kfree_ring.num_entries,
- sun4c_kernel_ring.num_entries);
-#endif
}
}
-static inline void alloc_user_segment(unsigned long address, unsigned char ctx)
-{
- struct sun4c_mmu_entry *entry;
- unsigned long flags;
-
- save_and_cli(flags);
- entry = sun4c_user_strategy();
- entry->vaddr = (address & SUN4C_REAL_PGDIR_MASK);
- assign_user_entry(ctx, entry);
- sun4c_user_map(entry);
- restore_flags(flags);
-}
-
/* This is now a fast in-window trap handler to avoid any and all races. */
static void sun4c_quick_kernel_fault(unsigned long address)
{
@@ -1209,8 +1029,8 @@ static void sun4c_quick_kernel_fault(unsigned long address)
* bucket[0]
* bucket[1]
* [ ... ]
- * bucket[NR_TASKS-1]
- * TASK_STACK_BEGIN + (sizeof(struct task_bucket) * NR_TASKS)
+ * bucket[NR_TASK_BUCKETS-1]
+ * TASK_STACK_BEGIN + (sizeof(struct task_bucket) * NR_TASK_BUCKETS)
*
* Each slot looks like:
*
@@ -1218,7 +1038,7 @@ static void sun4c_quick_kernel_fault(unsigned long address)
* page 2 -- rest of kernel stack
*/
-union task_union *sun4c_bucket[NR_TASKS];
+union task_union *sun4c_bucket[NR_TASK_BUCKETS];
static int sun4c_lowbucket_avail;
@@ -1232,7 +1052,7 @@ static int sun4c_lowbucket_avail;
#define BUCKET_PTE_PAGE(pte) \
(PAGE_OFFSET + (((pte) & SUN4C_PFN_MASK) << PAGE_SHIFT))
-static inline void get_locked_segment(unsigned long addr)
+static void get_locked_segment(unsigned long addr)
{
struct sun4c_mmu_entry *stolen;
unsigned long flags;
@@ -1240,19 +1060,14 @@ static inline void get_locked_segment(unsigned long addr)
save_and_cli(flags);
addr &= SUN4C_REAL_PGDIR_MASK;
stolen = sun4c_user_strategy();
- remove_ring(&sun4c_ufree_ring, stolen);
max_user_taken_entries--;
-#ifdef DEBUG_SUN4C_MM
- if(max_user_taken_entries < 0)
- panic("get_locked_segment: max_user_taken < 0.");
-#endif
stolen->vaddr = addr;
- FUW_INLINE
+ flush_user_windows();
sun4c_kernel_map(stolen);
restore_flags(flags);
}
-static inline void free_locked_segment(unsigned long addr)
+static void free_locked_segment(unsigned long addr)
{
struct sun4c_mmu_entry *entry;
unsigned long flags;
@@ -1263,14 +1078,13 @@ static inline void free_locked_segment(unsigned long addr)
pseg = sun4c_get_segmap(addr);
entry = &mmu_entry_pool[pseg];
- FUW_INLINE
- sun4c_flush_segment(addr);
+ flush_user_windows();
+ if (sun4c_vacinfo.do_hwflushes)
+ sun4c_flush_segment_hw(addr);
+ else
+ sun4c_flush_segment_sw(addr);
sun4c_kernel_unmap(entry);
add_ring(&sun4c_ufree_ring, entry);
-#ifdef DEBUG_SUN4C_MM
- if(max_user_taken_entries < 0)
- panic("free_locked_segment: max_user_taken < 0.");
-#endif
max_user_taken_entries++;
restore_flags(flags);
}
@@ -1282,8 +1096,8 @@ static inline void garbage_collect(int entry)
/* 32 buckets per segment... */
entry &= ~31;
start = entry;
- for(end = (start + 32); start < end; start++)
- if(sun4c_bucket[start] != BUCKET_EMPTY)
+ for (end = (start + 32); start < end; start++)
+ if (sun4c_bucket[start] != BUCKET_EMPTY)
return;
/* Entire segment empty, release it. */
@@ -1302,23 +1116,39 @@ static struct task_struct *sun4c_alloc_task_struct(void)
int entry;
pages = __get_free_pages(GFP_KERNEL, TASK_STRUCT_ORDER);
- if(!pages)
+ if (!pages)
return (struct task_struct *) 0;
- for(entry = sun4c_lowbucket_avail; entry < NR_TASKS; entry++)
- if(sun4c_bucket[entry] == BUCKET_EMPTY)
+ for (entry = sun4c_lowbucket_avail; entry < NR_TASK_BUCKETS; entry++)
+ if (sun4c_bucket[entry] == BUCKET_EMPTY)
break;
- if(entry == NR_TASKS) {
+ if (entry == NR_TASK_BUCKETS) {
free_pages(pages, TASK_STRUCT_ORDER);
return (struct task_struct *) 0;
}
- if(entry >= sun4c_lowbucket_avail)
+ if (entry >= sun4c_lowbucket_avail)
sun4c_lowbucket_avail = entry + 1;
addr = BUCKET_ADDR(entry);
sun4c_bucket[entry] = (union task_union *) addr;
if(sun4c_get_segmap(addr) == invalid_segment)
get_locked_segment(addr);
+
+ /* We are changing the virtual color of the page(s)
+ * so we must flush the cache to guarentee consistancy.
+ */
+ if (sun4c_vacinfo.do_hwflushes) {
+ sun4c_flush_page_hw(pages);
+#ifndef CONFIG_SUN4
+ sun4c_flush_page_hw(pages + PAGE_SIZE);
+#endif
+ } else {
+ sun4c_flush_page_sw(pages);
+#ifndef CONFIG_SUN4
+ sun4c_flush_page_sw(pages + PAGE_SIZE);
+#endif
+ }
+
sun4c_put_pte(addr, BUCKET_PTE(pages));
#ifndef CONFIG_SUN4
sun4c_put_pte(addr + PAGE_SIZE, BUCKET_PTE(pages + PAGE_SIZE));
@@ -1342,7 +1172,7 @@ static void sun4c_free_task_struct_hw(struct task_struct *tsk)
sun4c_put_pte(tsaddr + PAGE_SIZE, 0);
#endif
sun4c_bucket[entry] = BUCKET_EMPTY;
- if(entry < sun4c_lowbucket_avail)
+ if (entry < sun4c_lowbucket_avail)
sun4c_lowbucket_avail = entry;
free_pages(pages, TASK_STRUCT_ORDER);
@@ -1365,7 +1195,7 @@ static void sun4c_free_task_struct_sw(struct task_struct *tsk)
sun4c_put_pte(tsaddr + PAGE_SIZE, 0);
#endif
sun4c_bucket[entry] = BUCKET_EMPTY;
- if(entry < sun4c_lowbucket_avail)
+ if (entry < sun4c_lowbucket_avail)
sun4c_lowbucket_avail = entry;
free_pages(pages, TASK_STRUCT_ORDER);
@@ -1376,10 +1206,10 @@ static void __init sun4c_init_buckets(void)
{
int entry;
- if(sizeof(union task_union) != (PAGE_SIZE << TASK_STRUCT_ORDER)) {
+ if (sizeof(union task_union) != (PAGE_SIZE << TASK_STRUCT_ORDER)) {
prom_printf("task union not %d page(s)!\n", 1 << TASK_STRUCT_ORDER);
}
- for(entry = 0; entry < NR_TASKS; entry++)
+ for (entry = 0; entry < NR_TASK_BUCKETS; entry++)
sun4c_bucket[entry] = BUCKET_EMPTY;
sun4c_lowbucket_avail = 0;
}
@@ -1494,37 +1324,38 @@ static void sun4c_unlockarea(char *vaddr, unsigned long size)
* by implication and fool the page locking code above
* if passed to by mistake.
*/
-static __u32 sun4c_get_scsi_one(char *bufptr, unsigned long len, struct linux_sbus *sbus)
+static __u32 sun4c_get_scsi_one(char *bufptr, unsigned long len, struct sbus_bus *sbus)
{
unsigned long page;
page = ((unsigned long)bufptr) & PAGE_MASK;
- if(MAP_NR(page) > max_mapnr) {
+ if (MAP_NR(page) > max_mapnr) {
sun4c_flush_page(page);
return (__u32)bufptr; /* already locked */
}
return (__u32)sun4c_lockarea(bufptr, len);
}
-static void sun4c_get_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
+static void sun4c_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
{
- while(sz >= 0) {
- sg[sz].dvma_addr = (__u32)sun4c_lockarea(sg[sz].addr, sg[sz].len);
+ while (sz >= 0) {
+ sg[sz].dvma_address = (__u32)sun4c_lockarea(sg[sz].address, sg[sz].length);
+ sg[sz].dvma_length = sg[sz].length;
sz--;
}
}
-static void sun4c_release_scsi_one(__u32 bufptr, unsigned long len, struct linux_sbus *sbus)
+static void sun4c_release_scsi_one(__u32 bufptr, unsigned long len, struct sbus_bus *sbus)
{
- if(bufptr < sun4c_iobuffer_start)
+ if (bufptr < sun4c_iobuffer_start)
return; /* On kernel stack or similar, see above */
sun4c_unlockarea((char *)bufptr, len);
}
-static void sun4c_release_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
+static void sun4c_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
{
- while(sz >= 0) {
- sun4c_unlockarea((char *)sg[sz].dvma_addr, sg[sz].len);
+ while (sz >= 0) {
+ sun4c_unlockarea((char *)sg[sz].dvma_address, sg[sz].length);
sz--;
}
}
@@ -1534,7 +1365,7 @@ static void sun4c_release_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_s
struct vm_area_struct sun4c_kstack_vma;
-static unsigned long __init sun4c_init_lock_areas(unsigned long start_mem)
+static void __init sun4c_init_lock_areas(void)
{
unsigned long sun4c_taskstack_start;
unsigned long sun4c_taskstack_end;
@@ -1543,9 +1374,9 @@ static unsigned long __init sun4c_init_lock_areas(unsigned long start_mem)
sun4c_init_buckets();
sun4c_taskstack_start = SUN4C_LOCK_VADDR;
sun4c_taskstack_end = (sun4c_taskstack_start +
- (TASK_ENTRY_SIZE * NR_TASKS));
- if(sun4c_taskstack_end >= SUN4C_LOCK_END) {
- prom_printf("Too many tasks, decrease NR_TASKS please.\n");
+ (TASK_ENTRY_SIZE * NR_TASK_BUCKETS));
+ if (sun4c_taskstack_end >= SUN4C_LOCK_END) {
+ prom_printf("Too many tasks, decrease NR_TASK_BUCKETS please.\n");
prom_halt();
}
@@ -1556,9 +1387,8 @@ static unsigned long __init sun4c_init_lock_areas(unsigned long start_mem)
bitmap_size = (bitmap_size + 7) >> 3;
bitmap_size = LONG_ALIGN(bitmap_size);
iobuffer_map_size = bitmap_size << 3;
- sun4c_iobuffer_map = (unsigned long *) start_mem;
- memset((void *) start_mem, 0, bitmap_size);
- start_mem += bitmap_size;
+ sun4c_iobuffer_map = __alloc_bootmem(bitmap_size, SMP_CACHE_BYTES, 0UL);
+ memset((void *) sun4c_iobuffer_map, 0, bitmap_size);
sun4c_kstack_vma.vm_mm = &init_mm;
sun4c_kstack_vma.vm_start = sun4c_taskstack_start;
@@ -1566,7 +1396,6 @@ static unsigned long __init sun4c_init_lock_areas(unsigned long start_mem)
sun4c_kstack_vma.vm_page_prot = PAGE_SHARED;
sun4c_kstack_vma.vm_flags = VM_READ | VM_WRITE | VM_EXEC;
insert_vm_struct(&init_mm, &sun4c_kstack_vma);
- return start_mem;
}
/* Cache flushing on the sun4c. */
@@ -1574,12 +1403,12 @@ static void sun4c_flush_cache_all(void)
{
unsigned long begin, end;
- FUW_INLINE
+ flush_user_windows();
begin = (KERNBASE + SUN4C_REAL_PGDIR_SIZE);
end = (begin + SUN4C_VAC_SIZE);
- if(sun4c_vacinfo.linesize == 32) {
- while(begin < end) {
+ if (sun4c_vacinfo.linesize == 32) {
+ while (begin < end) {
__asm__ __volatile__("
ld [%0 + 0x00], %%g0
ld [%0 + 0x20], %%g0
@@ -1601,7 +1430,7 @@ static void sun4c_flush_cache_all(void)
begin += 512;
}
} else {
- while(begin < end) {
+ while (begin < end) {
__asm__ __volatile__("
ld [%0 + 0x00], %%g0
ld [%0 + 0x10], %%g0
@@ -1629,29 +1458,31 @@ static void sun4c_flush_cache_mm_hw(struct mm_struct *mm)
{
int new_ctx = mm->context;
- if(new_ctx != NO_CONTEXT && sun4c_context_ring[new_ctx].num_entries) {
- struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
- unsigned long flags;
+ if (new_ctx != NO_CONTEXT) {
+ flush_user_windows();
+ if (sun4c_context_ring[new_ctx].num_entries) {
+ struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
+ unsigned long flags;
- save_and_cli(flags);
- if(head->next != head) {
- struct sun4c_mmu_entry *entry = head->next;
- int savectx = sun4c_get_context();
+ save_and_cli(flags);
+ if (head->next != head) {
+ struct sun4c_mmu_entry *entry = head->next;
+ int savectx = sun4c_get_context();
- FUW_INLINE
- sun4c_set_context(new_ctx);
- sun4c_flush_context_hw();
- do {
- struct sun4c_mmu_entry *next = entry->next;
+ sun4c_set_context(new_ctx);
+ sun4c_flush_context_hw();
+ do {
+ struct sun4c_mmu_entry *next = entry->next;
- sun4c_user_unmap(entry);
- free_user_entry(new_ctx, entry);
+ sun4c_user_unmap(entry);
+ free_user_entry(new_ctx, entry);
- entry = next;
- } while(entry != head);
- sun4c_set_context(savectx);
+ entry = next;
+ } while (entry != head);
+ sun4c_set_context(savectx);
+ }
+ restore_flags(flags);
}
- restore_flags(flags);
}
}
@@ -1659,29 +1490,28 @@ static void sun4c_flush_cache_range_hw(struct mm_struct *mm, unsigned long start
{
int new_ctx = mm->context;
-#if KGPROF_PROFILING
- kgprof_profile();
-#endif
- if(new_ctx != NO_CONTEXT) {
+ if (new_ctx != NO_CONTEXT) {
struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
struct sun4c_mmu_entry *entry;
unsigned long flags;
- FUW_INLINE
+ flush_user_windows();
+
save_and_cli(flags);
/* All user segmap chains are ordered on entry->vaddr. */
- for(entry = head->next;
- (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);
- entry = entry->next)
+ for (entry = head->next;
+ (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);
+ entry = entry->next)
;
/* Tracing various job mixtures showed that this conditional
* only passes ~35% of the time for most worse case situations,
* therefore we avoid all of this gross overhead ~65% of the time.
*/
- if((entry != head) && (entry->vaddr < end)) {
+ if ((entry != head) && (entry->vaddr < end)) {
int octx = sun4c_get_context();
+
sun4c_set_context(new_ctx);
/* At this point, always, (start >= entry->vaddr) and
@@ -1696,11 +1526,11 @@ static void sun4c_flush_cache_range_hw(struct mm_struct *mm, unsigned long start
/* "realstart" is always >= entry->vaddr */
realend = entry->vaddr + SUN4C_REAL_PGDIR_SIZE;
- if(end < realend)
+ if (end < realend)
realend = end;
- if((realend - entry->vaddr) <= (PAGE_SIZE << 3)) {
+ if ((realend - entry->vaddr) <= (PAGE_SIZE << 3)) {
unsigned long page = entry->vaddr;
- while(page < realend) {
+ while (page < realend) {
sun4c_flush_page_hw(page);
page += PAGE_SIZE;
}
@@ -1710,14 +1540,13 @@ static void sun4c_flush_cache_range_hw(struct mm_struct *mm, unsigned long start
free_user_entry(new_ctx, entry);
}
entry = next;
- } while((entry != head) && (entry->vaddr < end));
+ } while ((entry != head) && (entry->vaddr < end));
sun4c_set_context(octx);
}
restore_flags(flags);
}
}
-/* XXX no save_and_cli/restore_flags needed, but put here if darkside still crashes */
static void sun4c_flush_cache_page_hw(struct vm_area_struct *vma, unsigned long page)
{
struct mm_struct *mm = vma->vm_mm;
@@ -1726,76 +1555,85 @@ static void sun4c_flush_cache_page_hw(struct vm_area_struct *vma, unsigned long
/* Sun4c has no separate I/D caches so cannot optimize for non
* text page flushes.
*/
- if(new_ctx != NO_CONTEXT) {
+ if (new_ctx != NO_CONTEXT) {
int octx = sun4c_get_context();
+ unsigned long flags;
- FUW_INLINE
+ flush_user_windows();
+ save_and_cli(flags);
sun4c_set_context(new_ctx);
sun4c_flush_page_hw(page);
sun4c_set_context(octx);
+ restore_flags(flags);
}
}
-static void sun4c_flush_page_to_ram_hw(unsigned long page)
+static void sun4c_flush_page_to_ram_hw(struct page *page)
{
- sun4c_flush_page_hw(page);
+ unsigned long flags;
+ unsigned long addr = page_address(page);
+
+ save_and_cli(flags);
+ sun4c_flush_page_hw(addr);
+ restore_flags(flags);
}
static void sun4c_flush_cache_mm_sw(struct mm_struct *mm)
{
int new_ctx = mm->context;
- if(new_ctx != NO_CONTEXT && sun4c_context_ring[new_ctx].num_entries) {
- struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
- unsigned long flags;
+ if (new_ctx != NO_CONTEXT) {
+ flush_user_windows();
- save_and_cli(flags);
- if(head->next != head) {
- struct sun4c_mmu_entry *entry = head->next;
- int savectx = sun4c_get_context();
+ if (sun4c_context_ring[new_ctx].num_entries) {
+ struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
+ unsigned long flags;
- FUW_INLINE
- sun4c_set_context(new_ctx);
- sun4c_flush_context_sw();
- do {
- struct sun4c_mmu_entry *next = entry->next;
+ save_and_cli(flags);
+ if (head->next != head) {
+ struct sun4c_mmu_entry *entry = head->next;
+ int savectx = sun4c_get_context();
- sun4c_user_unmap(entry);
- free_user_entry(new_ctx, entry);
+ sun4c_set_context(new_ctx);
+ sun4c_flush_context_sw();
+ do {
+ struct sun4c_mmu_entry *next = entry->next;
- entry = next;
- } while(entry != head);
- sun4c_set_context(savectx);
+ sun4c_user_unmap(entry);
+ free_user_entry(new_ctx, entry);
+
+ entry = next;
+ } while (entry != head);
+ sun4c_set_context(savectx);
+ }
+ restore_flags(flags);
}
- restore_flags(flags);
}
}
static void sun4c_flush_cache_range_sw(struct mm_struct *mm, unsigned long start, unsigned long end)
{
int new_ctx = mm->context;
-
-#if KGPROF_PROFILING
- kgprof_profile();
-#endif
- if(new_ctx != NO_CONTEXT) {
+
+ if (new_ctx != NO_CONTEXT) {
struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
struct sun4c_mmu_entry *entry;
unsigned long flags;
- FUW_INLINE
+ flush_user_windows();
+
save_and_cli(flags);
/* All user segmap chains are ordered on entry->vaddr. */
- for(entry = head->next;
- (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);
- entry = entry->next)
+ for (entry = head->next;
+ (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);
+ entry = entry->next)
;
/* Tracing various job mixtures showed that this conditional
* only passes ~35% of the time for most worse case situations,
* therefore we avoid all of this gross overhead ~65% of the time.
*/
- if((entry != head) && (entry->vaddr < end)) {
+ if ((entry != head) && (entry->vaddr < end)) {
int octx = sun4c_get_context();
sun4c_set_context(new_ctx);
@@ -1811,11 +1649,11 @@ static void sun4c_flush_cache_range_sw(struct mm_struct *mm, unsigned long start
/* "realstart" is always >= entry->vaddr */
realend = entry->vaddr + SUN4C_REAL_PGDIR_SIZE;
- if(end < realend)
+ if (end < realend)
realend = end;
- if((realend - entry->vaddr) <= (PAGE_SIZE << 3)) {
+ if ((realend - entry->vaddr) <= (PAGE_SIZE << 3)) {
unsigned long page = entry->vaddr;
- while(page < realend) {
+ while (page < realend) {
sun4c_flush_page_sw(page);
page += PAGE_SIZE;
}
@@ -1825,7 +1663,7 @@ static void sun4c_flush_cache_range_sw(struct mm_struct *mm, unsigned long start
free_user_entry(new_ctx, entry);
}
entry = next;
- } while((entry != head) && (entry->vaddr < end));
+ } while ((entry != head) && (entry->vaddr < end));
sun4c_set_context(octx);
}
restore_flags(flags);
@@ -1840,19 +1678,27 @@ static void sun4c_flush_cache_page_sw(struct vm_area_struct *vma, unsigned long
/* Sun4c has no separate I/D caches so cannot optimize for non
* text page flushes.
*/
- if(new_ctx != NO_CONTEXT) {
+ if (new_ctx != NO_CONTEXT) {
int octx = sun4c_get_context();
+ unsigned long flags;
- FUW_INLINE
+ flush_user_windows();
+ save_and_cli(flags);
sun4c_set_context(new_ctx);
sun4c_flush_page_sw(page);
sun4c_set_context(octx);
+ restore_flags(flags);
}
}
-static void sun4c_flush_page_to_ram_sw(unsigned long page)
+static void sun4c_flush_page_to_ram_sw(struct page *page)
{
- sun4c_flush_page_sw(page);
+ unsigned long flags;
+ unsigned long addr = page_address(page);
+
+ save_and_cli(flags);
+ sun4c_flush_page_sw(addr);
+ restore_flags(flags);
}
/* Sun4c cache is unified, both instructions and data live there, so
@@ -1879,8 +1725,11 @@ static void sun4c_flush_tlb_all(void)
flush_user_windows();
while (sun4c_kernel_ring.num_entries) {
next_entry = this_entry->next;
- sun4c_flush_segment(this_entry->vaddr);
- for(ctx = 0; ctx < num_contexts; ctx++) {
+ if (sun4c_vacinfo.do_hwflushes)
+ sun4c_flush_segment_hw(this_entry->vaddr);
+ else
+ sun4c_flush_segment_sw(this_entry->vaddr);
+ for (ctx = 0; ctx < num_contexts; ctx++) {
sun4c_set_context(ctx);
sun4c_put_segmap(this_entry->vaddr, invalid_segment);
}
@@ -1895,16 +1744,15 @@ static void sun4c_flush_tlb_mm_hw(struct mm_struct *mm)
{
int new_ctx = mm->context;
- if(new_ctx != NO_CONTEXT) {
+ if (new_ctx != NO_CONTEXT) {
struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
unsigned long flags;
save_and_cli(flags);
- if(head->next != head) {
+ if (head->next != head) {
struct sun4c_mmu_entry *entry = head->next;
int savectx = sun4c_get_context();
- FUW_INLINE
sun4c_set_context(new_ctx);
sun4c_flush_context_hw();
do {
@@ -1914,7 +1762,7 @@ static void sun4c_flush_tlb_mm_hw(struct mm_struct *mm)
free_user_entry(new_ctx, entry);
entry = next;
- } while(entry != head);
+ } while (entry != head);
sun4c_set_context(savectx);
}
restore_flags(flags);
@@ -1925,26 +1773,21 @@ static void sun4c_flush_tlb_range_hw(struct mm_struct *mm, unsigned long start,
{
int new_ctx = mm->context;
- if(new_ctx != NO_CONTEXT) {
+ if (new_ctx != NO_CONTEXT) {
struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
struct sun4c_mmu_entry *entry;
unsigned long flags;
-#if KGPROF_PROFILING
- kgprof_profile();
-#endif
save_and_cli(flags);
/* See commentary in sun4c_flush_cache_range_*(). */
- for(entry = head->next;
- (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);
- entry = entry->next)
+ for (entry = head->next;
+ (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);
+ entry = entry->next)
;
- if((entry != head) && (entry->vaddr < end)) {
+ if ((entry != head) && (entry->vaddr < end)) {
int octx = sun4c_get_context();
- /* This window flush is paranoid I think... -DaveM */
- FUW_INLINE
sun4c_set_context(new_ctx);
do {
struct sun4c_mmu_entry *next = entry->next;
@@ -1954,7 +1797,7 @@ static void sun4c_flush_tlb_range_hw(struct mm_struct *mm, unsigned long start,
free_user_entry(new_ctx, entry);
entry = next;
- } while((entry != head) && (entry->vaddr < end));
+ } while ((entry != head) && (entry->vaddr < end));
sun4c_set_context(octx);
}
restore_flags(flags);
@@ -1966,15 +1809,17 @@ static void sun4c_flush_tlb_page_hw(struct vm_area_struct *vma, unsigned long pa
struct mm_struct *mm = vma->vm_mm;
int new_ctx = mm->context;
- if(new_ctx != NO_CONTEXT) {
+ if (new_ctx != NO_CONTEXT) {
int savectx = sun4c_get_context();
+ unsigned long flags;
- FUW_INLINE
+ save_and_cli(flags);
sun4c_set_context(new_ctx);
page &= PAGE_MASK;
sun4c_flush_page_hw(page);
sun4c_put_pte(page, 0);
sun4c_set_context(savectx);
+ restore_flags(flags);
}
}
@@ -1982,16 +1827,15 @@ static void sun4c_flush_tlb_mm_sw(struct mm_struct *mm)
{
int new_ctx = mm->context;
- if(new_ctx != NO_CONTEXT) {
+ if (new_ctx != NO_CONTEXT) {
struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
unsigned long flags;
save_and_cli(flags);
- if(head->next != head) {
+ if (head->next != head) {
struct sun4c_mmu_entry *entry = head->next;
int savectx = sun4c_get_context();
- FUW_INLINE
sun4c_set_context(new_ctx);
sun4c_flush_context_sw();
do {
@@ -2001,7 +1845,7 @@ static void sun4c_flush_tlb_mm_sw(struct mm_struct *mm)
free_user_entry(new_ctx, entry);
entry = next;
- } while(entry != head);
+ } while (entry != head);
sun4c_set_context(savectx);
}
restore_flags(flags);
@@ -2012,27 +1856,21 @@ static void sun4c_flush_tlb_range_sw(struct mm_struct *mm, unsigned long start,
{
int new_ctx = mm->context;
- if(new_ctx != NO_CONTEXT) {
+ if (new_ctx != NO_CONTEXT) {
struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
struct sun4c_mmu_entry *entry;
unsigned long flags;
-#if KGPROF_PROFILING
- kgprof_profile();
-#endif
-
save_and_cli(flags);
/* See commentary in sun4c_flush_cache_range_*(). */
- for(entry = head->next;
- (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);
- entry = entry->next)
+ for (entry = head->next;
+ (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);
+ entry = entry->next)
;
- if((entry != head) && (entry->vaddr < end)) {
+ if ((entry != head) && (entry->vaddr < end)) {
int octx = sun4c_get_context();
- /* This window flush is paranoid I think... -DaveM */
- FUW_INLINE
sun4c_set_context(new_ctx);
do {
struct sun4c_mmu_entry *next = entry->next;
@@ -2042,7 +1880,7 @@ static void sun4c_flush_tlb_range_sw(struct mm_struct *mm, unsigned long start,
free_user_entry(new_ctx, entry);
entry = next;
- } while((entry != head) && (entry->vaddr < end));
+ } while ((entry != head) && (entry->vaddr < end));
sun4c_set_context(octx);
}
restore_flags(flags);
@@ -2054,15 +1892,17 @@ static void sun4c_flush_tlb_page_sw(struct vm_area_struct *vma, unsigned long pa
struct mm_struct *mm = vma->vm_mm;
int new_ctx = mm->context;
- if(new_ctx != NO_CONTEXT) {
+ if (new_ctx != NO_CONTEXT) {
int savectx = sun4c_get_context();
+ unsigned long flags;
- FUW_INLINE
+ save_and_cli(flags);
sun4c_set_context(new_ctx);
page &= PAGE_MASK;
sun4c_flush_page_sw(page);
sun4c_put_pte(page, 0);
sun4c_set_context(savectx);
+ restore_flags(flags);
}
}
@@ -2075,7 +1915,6 @@ static void sun4c_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
{
}
-
void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr,
int bus_type, int rdonly)
{
@@ -2083,7 +1922,7 @@ void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr,
page_entry = ((physaddr >> PAGE_SHIFT) & SUN4C_PFN_MASK);
page_entry |= ((pg_iobits | _SUN4C_PAGE_PRIV) & ~(_SUN4C_PAGE_PRESENT));
- if(rdonly)
+ if (rdonly)
page_entry &= ~_SUN4C_WRITEABLE;
sun4c_put_pte(virt_addr, page_entry);
}
@@ -2093,12 +1932,12 @@ void sun4c_unmapioaddr(unsigned long virt_addr)
sun4c_put_pte(virt_addr, 0);
}
-static void sun4c_alloc_context_hw(struct mm_struct *mm)
+static void sun4c_alloc_context_hw(struct mm_struct *old_mm, struct mm_struct *mm)
{
struct ctx_list *ctxp;
ctxp = ctx_free.next;
- if(ctxp != &ctx_free) {
+ if (ctxp != &ctx_free) {
remove_from_ctx_list(ctxp);
add_to_used_ctxlist(ctxp);
mm->context = ctxp->ctx_number;
@@ -2106,40 +1945,33 @@ static void sun4c_alloc_context_hw(struct mm_struct *mm)
return;
}
ctxp = ctx_used.next;
- if(ctxp->ctx_mm == current->mm)
+ if (ctxp->ctx_mm == old_mm)
ctxp = ctxp->next;
-#ifdef DEBUG_SUN4C_MM
- if(ctxp == &ctx_used)
- panic("out of mmu contexts");
-#endif
remove_from_ctx_list(ctxp);
add_to_used_ctxlist(ctxp);
ctxp->ctx_mm->context = NO_CONTEXT;
ctxp->ctx_mm = mm;
mm->context = ctxp->ctx_number;
sun4c_demap_context_hw(&sun4c_context_ring[ctxp->ctx_number],
- ctxp->ctx_number);
+ ctxp->ctx_number);
}
-static void sun4c_switch_to_context_hw(struct task_struct *tsk)
+/* Switch the current MM context. */
+static void sun4c_switch_mm_hw(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk, int cpu)
{
struct ctx_list *ctx;
+ int dirty = 0;
- if(tsk->mm->context == NO_CONTEXT) {
- sun4c_alloc_context_hw(tsk->mm);
+ if (mm->context == NO_CONTEXT) {
+ dirty = 1;
+ sun4c_alloc_context_hw(old_mm, mm);
} else {
/* Update the LRU ring of contexts. */
- ctx = ctx_list_pool + tsk->mm->context;
+ ctx = ctx_list_pool + mm->context;
remove_from_ctx_list(ctx);
add_to_used_ctxlist(ctx);
}
- sun4c_set_context(tsk->mm->context);
-}
-
-static void sun4c_init_new_context_hw(struct mm_struct *mm)
-{
- sun4c_alloc_context_hw(mm);
- if(mm == current->mm)
+ if (dirty || old_mm != mm)
sun4c_set_context(mm->context);
}
@@ -2147,7 +1979,7 @@ static void sun4c_destroy_context_hw(struct mm_struct *mm)
{
struct ctx_list *ctx_old;
- if(mm->context != NO_CONTEXT && atomic_read(&mm->count) == 1) {
+ if (mm->context != NO_CONTEXT) {
sun4c_demap_context_hw(&sun4c_context_ring[mm->context], mm->context);
ctx_old = ctx_list_pool + mm->context;
remove_from_ctx_list(ctx_old);
@@ -2156,12 +1988,12 @@ static void sun4c_destroy_context_hw(struct mm_struct *mm)
}
}
-static void sun4c_alloc_context_sw(struct mm_struct *mm)
+static void sun4c_alloc_context_sw(struct mm_struct *old_mm, struct mm_struct *mm)
{
struct ctx_list *ctxp;
ctxp = ctx_free.next;
- if(ctxp != &ctx_free) {
+ if (ctxp != &ctx_free) {
remove_from_ctx_list(ctxp);
add_to_used_ctxlist(ctxp);
mm->context = ctxp->ctx_number;
@@ -2169,40 +2001,34 @@ static void sun4c_alloc_context_sw(struct mm_struct *mm)
return;
}
ctxp = ctx_used.next;
- if(ctxp->ctx_mm == current->mm)
+ if(ctxp->ctx_mm == old_mm)
ctxp = ctxp->next;
-#ifdef DEBUG_SUN4C_MM
- if(ctxp == &ctx_used)
- panic("out of mmu contexts");
-#endif
remove_from_ctx_list(ctxp);
add_to_used_ctxlist(ctxp);
ctxp->ctx_mm->context = NO_CONTEXT;
ctxp->ctx_mm = mm;
mm->context = ctxp->ctx_number;
sun4c_demap_context_sw(&sun4c_context_ring[ctxp->ctx_number],
- ctxp->ctx_number);
+ ctxp->ctx_number);
}
-static void sun4c_switch_to_context_sw(struct task_struct *tsk)
+/* Switch the current MM context. */
+static void sun4c_switch_mm_sw(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk, int cpu)
{
struct ctx_list *ctx;
+ int dirty = 0;
- if(tsk->mm->context == NO_CONTEXT) {
- sun4c_alloc_context_sw(tsk->mm);
+ if (mm->context == NO_CONTEXT) {
+ dirty = 1;
+ sun4c_alloc_context_sw(old_mm, mm);
} else {
/* Update the LRU ring of contexts. */
- ctx = ctx_list_pool + tsk->mm->context;
+ ctx = ctx_list_pool + mm->context;
remove_from_ctx_list(ctx);
add_to_used_ctxlist(ctx);
}
- sun4c_set_context(tsk->mm->context);
-}
-static void sun4c_init_new_context_sw(struct mm_struct *mm)
-{
- sun4c_alloc_context_sw(mm);
- if(mm == current->mm)
+ if (dirty || old_mm != mm)
sun4c_set_context(mm->context);
}
@@ -2210,7 +2036,7 @@ static void sun4c_destroy_context_sw(struct mm_struct *mm)
{
struct ctx_list *ctx_old;
- if(mm->context != NO_CONTEXT && atomic_read(&mm->count) == 1) {
+ if (mm->context != NO_CONTEXT) {
sun4c_demap_context_sw(&sun4c_context_ring[mm->context], mm->context);
ctx_old = ctx_list_pool + mm->context;
remove_from_ctx_list(ctx_old);
@@ -2225,7 +2051,7 @@ static int sun4c_mmu_info(char *buf)
int len;
used_user_entries = 0;
- for(i=0; i < num_contexts; i++)
+ for (i = 0; i < num_contexts; i++)
used_user_entries += sun4c_context_ring[i].num_entries;
len = sprintf(buf,
@@ -2239,10 +2065,7 @@ static int sun4c_mmu_info(char *buf)
"usedpsegs\t: %d\n"
"ufreepsegs\t: %d\n"
"user_taken\t: %d\n"
- "max_taken\t: %d\n"
- "context\t\t: %d flushes\n"
- "segment\t\t: %d flushes\n"
- "page\t\t: %d flushes\n",
+ "max_taken\t: %d\n",
sun4c_vacinfo.num_bytes,
(sun4c_vacinfo.do_hwflushes ? "yes" : "no"),
sun4c_vacinfo.linesize,
@@ -2253,22 +2076,7 @@ static int sun4c_mmu_info(char *buf)
used_user_entries,
sun4c_ufree_ring.num_entries,
sun4c_user_taken_entries,
- max_user_taken_entries,
- ctxflushes, segflushes, pageflushes);
-
-#if KGPROF_PROFILING
- {
- int i,j;
- len += sprintf(buf + len,"kgprof profiling:\n");
- for (i=0;i<KGPROF_SIZE && kgprof_counters[i].addr[0];i++) {
- len += sprintf(buf + len,"%5d ",kgprof_counters[i].count);
- for (j=0;j<KGPROF_DEPTH;j++) {
- len += sprintf(buf + len,"%08x ",kgprof_counters[i].addr[j]);
- }
- len += sprintf(buf + len,"\n");
- }
- }
-#endif
+ max_user_taken_entries);
return len;
}
@@ -2277,13 +2085,6 @@ static int sun4c_mmu_info(char *buf)
* data structures.
*/
-#if 0 /* Not used due to BTFIXUPs */
-static unsigned int sun4c_pmd_align(unsigned int addr) { return SUN4C_PMD_ALIGN(addr); }
-#endif
-#if 0 /* Not used due to BTFIXUPs */
-static unsigned int sun4c_pgdir_align(unsigned int addr) { return SUN4C_PGDIR_ALIGN(addr); }
-#endif
-
/* First the functions which the mid-level code uses to directly
* manipulate the software page tables. Some defines since we are
* emulating the i386 page directory layout.
@@ -2295,17 +2096,6 @@ static unsigned int sun4c_pgdir_align(unsigned int addr) { return SUN4C_PGDIR_AL
#define PGD_DIRTY 0x040
#define PGD_TABLE (PGD_PRESENT | PGD_RW | PGD_USER | PGD_ACCESSED | PGD_DIRTY)
-#if 0 /* Not used due to BTFIXUPs */
-static unsigned long sun4c_vmalloc_start(void)
-{
- return SUN4C_VMALLOC_START;
-}
-#endif
-
-#if 0 /* Not used due to BTFIXUPs */
-static int sun4c_pte_none(pte_t pte) { return !pte_val(pte); }
-#endif
-
static int sun4c_pte_present(pte_t pte)
{
return ((pte_val(pte) & (_SUN4C_PAGE_PRESENT | _SUN4C_PAGE_PRIV)) != 0);
@@ -2334,48 +2124,6 @@ static void sun4c_pgd_clear(pgd_t * pgdp) { }
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
-#if 0 /* Not used due to BTFIXUPs */
-static int sun4c_pte_write(pte_t pte)
-{
- return pte_val(pte) & _SUN4C_PAGE_WRITE;
-}
-#endif
-
-#if 0 /* Not used due to BTFIXUPs */
-static int sun4c_pte_dirty(pte_t pte)
-{
- return pte_val(pte) & _SUN4C_PAGE_MODIFIED;
-}
-#endif
-
-#if 0 /* Not used due to BTFIXUPs */
-static int sun4c_pte_young(pte_t pte)
-{
- return pte_val(pte) & _SUN4C_PAGE_ACCESSED;
-}
-#endif
-
-#if 0 /* Not used due to BTFIXUPs */
-static pte_t sun4c_pte_wrprotect(pte_t pte)
-{
- return __pte(pte_val(pte) & ~(_SUN4C_PAGE_WRITE | _SUN4C_PAGE_SILENT_WRITE));
-}
-#endif
-
-#if 0 /* Not used due to BTFIXUPs */
-static pte_t sun4c_pte_mkclean(pte_t pte)
-{
- return __pte(pte_val(pte) & ~(_SUN4C_PAGE_MODIFIED | _SUN4C_PAGE_SILENT_WRITE));
-}
-#endif
-
-#if 0 /* Not used due to BTFIXUPs */
-static pte_t sun4c_pte_mkold(pte_t pte)
-{
- return __pte(pte_val(pte) & ~(_SUN4C_PAGE_ACCESSED | _SUN4C_PAGE_SILENT_READ));
-}
-#endif
-
static pte_t sun4c_pte_mkwrite(pte_t pte)
{
pte = __pte(pte_val(pte) | _SUN4C_PAGE_WRITE);
@@ -2404,9 +2152,9 @@ static pte_t sun4c_pte_mkyoung(pte_t pte)
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
-static pte_t sun4c_mk_pte(unsigned long page, pgprot_t pgprot)
+static pte_t sun4c_mk_pte(struct page *page, pgprot_t pgprot)
{
- return __pte(((page - PAGE_OFFSET) >> PAGE_SHIFT) | pgprot_val(pgprot));
+ return __pte((page - mem_map) | pgprot_val(pgprot));
}
static pte_t sun4c_mk_pte_phys(unsigned long phys_page, pgprot_t pgprot)
@@ -2419,17 +2167,9 @@ static pte_t sun4c_mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
return __pte(((page - PAGE_OFFSET) >> PAGE_SHIFT) | pgprot_val(pgprot));
}
-#if 0 /* Not used due to BTFIXUPs */
-static pte_t sun4c_pte_modify(pte_t pte, pgprot_t newprot)
-{
- return __pte((pte_val(pte) & _SUN4C_PAGE_CHG_MASK) |
- pgprot_val(newprot));
-}
-#endif
-
-static unsigned long sun4c_pte_page(pte_t pte)
+static unsigned long sun4c_pte_pagenr(pte_t pte)
{
- return (PAGE_OFFSET + ((pte_val(pte) & SUN4C_PFN_MASK) << (PAGE_SHIFT)));
+ return (pte_val(pte) & SUN4C_PFN_MASK);
}
static inline unsigned long sun4c_pmd_page(pmd_t pmd)
@@ -2460,11 +2200,6 @@ pte_t *sun4c_pte_offset(pmd_t * dir, unsigned long address)
return (pte_t *) sun4c_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SUN4C_PTRS_PER_PTE - 1));
}
-/* Update the root mmu directory. */
-static void sun4c_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdir)
-{
-}
-
/* Please take special note on the foo_kernel() routines below, our
* fast in window fault handler wants to get at the pte's for vmalloc
* area with traps off, therefore they _MUST_ be locked down to prevent
@@ -2487,7 +2222,7 @@ static void sun4c_pte_free_kernel(pte_t *pte)
static pte_t *sun4c_pte_alloc_kernel(pmd_t *pmd, unsigned long address)
{
- if(address >= SUN4C_LOCK_VADDR)
+ if (address >= SUN4C_LOCK_VADDR)
return NULL;
address = (address >> PAGE_SHIFT) & (SUN4C_PTRS_PER_PTE - 1);
if (sun4c_pmd_none(*pmd))
@@ -2527,7 +2262,7 @@ extern __inline__ pgd_t *sun4c_get_pgd_fast(void)
{
unsigned long *ret;
- if((ret = pgd_quicklist) != NULL) {
+ if ((ret = pgd_quicklist) != NULL) {
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = ret[1];
pgtable_cache_size--;
@@ -2546,15 +2281,15 @@ extern __inline__ pgd_t *sun4c_get_pgd_fast(void)
static int sun4c_check_pgt_cache(int low, int high)
{
int freed = 0;
- if(pgtable_cache_size > high) {
+ if (pgtable_cache_size > high) {
do {
- if(pgd_quicklist)
+ if (pgd_quicklist)
free_pgd_slow(get_pgd_fast()), freed++;
- if(pmd_quicklist)
+ if (pmd_quicklist)
free_pmd_slow(get_pmd_fast()), freed++;
- if(pte_quicklist)
+ if (pte_quicklist)
free_pte_slow(get_pte_fast()), freed++;
- } while(pgtable_cache_size > low);
+ } while (pgtable_cache_size > low);
}
return freed;
}
@@ -2575,7 +2310,7 @@ extern __inline__ pte_t *sun4c_get_pte_fast(void)
{
unsigned long *ret;
- if((ret = (unsigned long *)pte_quicklist) != NULL) {
+ if ((ret = (unsigned long *)pte_quicklist) != NULL) {
pte_quicklist = (unsigned long *)(*ret);
ret[0] = ret[1];
pgtable_cache_size--;
@@ -2691,19 +2426,21 @@ static void sun4c_vac_alias_fixup(struct vm_area_struct *vma, unsigned long addr
unsigned long start;
/* Do not mistake ourselves as another mapping. */
- if(vmaring == vma)
+ if (vmaring == vma)
continue;
if (S4CVAC_BADALIAS(vaddr, address)) {
alias_found++;
start = vmaring->vm_start;
- while(start < vmaring->vm_end) {
+ while (start < vmaring->vm_end) {
pgdp = sun4c_pgd_offset(vmaring->vm_mm, start);
- if(!pgdp) goto next;
+ if (!pgdp)
+ goto next;
ptep = sun4c_pte_offset((pmd_t *) pgdp, start);
- if(!ptep) goto next;
+ if (!ptep)
+ goto next;
- if(pte_val(*ptep) & _SUN4C_PAGE_PRESENT) {
+ if (pte_val(*ptep) & _SUN4C_PAGE_PRESENT) {
flush_cache_page(vmaring, start);
*ptep = __pte(pte_val(*ptep) |
_SUN4C_PAGE_NOCACHE);
@@ -2716,54 +2453,112 @@ static void sun4c_vac_alias_fixup(struct vm_area_struct *vma, unsigned long addr
} while ((vmaring = vmaring->vm_next_share) != NULL);
spin_unlock(&inode->i_shared_lock);
- if(alias_found && !(pte_val(pte) & _SUN4C_PAGE_NOCACHE)) {
+ if (alias_found && !(pte_val(pte) & _SUN4C_PAGE_NOCACHE)) {
pgdp = sun4c_pgd_offset(vma->vm_mm, address);
ptep = sun4c_pte_offset((pmd_t *) pgdp, address);
*ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_NOCACHE);
- pte = pte_val(*ptep);
+ pte = *ptep;
}
}
}
+/* An experiment, turn off by default for now... -DaveM */
+#define SUN4C_PRELOAD_PSEG
+
void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
unsigned long flags;
+ int pseg;
save_and_cli(flags);
address &= PAGE_MASK;
- if(sun4c_get_segmap(address) == invalid_segment)
- alloc_user_segment(address, sun4c_get_context());
+ if ((pseg = sun4c_get_segmap(address)) == invalid_segment) {
+ struct sun4c_mmu_entry *entry = sun4c_user_strategy();
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long start, end;
+
+ entry->vaddr = start = (address & SUN4C_REAL_PGDIR_MASK);
+ entry->ctx = mm->context;
+ add_ring_ordered(sun4c_context_ring + mm->context, entry);
+ sun4c_put_segmap(entry->vaddr, entry->pseg);
+ end = start + SUN4C_REAL_PGDIR_SIZE;
+ while (start < end) {
+#ifdef SUN4C_PRELOAD_PSEG
+ pgd_t *pgdp = sun4c_pgd_offset(mm, start);
+ pte_t *ptep;
+
+ if (!pgdp)
+ goto no_mapping;
+ ptep = sun4c_pte_offset((pmd_t *) pgdp, start);
+ if (!ptep || !(pte_val(*ptep) & _SUN4C_PAGE_PRESENT))
+ goto no_mapping;
+ sun4c_put_pte(start, pte_val(*ptep));
+ goto next;
+
+ no_mapping:
+#endif
+ sun4c_put_pte(start, 0);
+#ifdef SUN4C_PRELOAD_PSEG
+ next:
+#endif
+ start += PAGE_SIZE;
+ }
+ if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED))
+ sun4c_vac_alias_fixup(vma, address, pte);
+#ifndef SUN4C_PRELOAD_PSEG
+ sun4c_put_pte(address, pte_val(pte));
+#endif
+ restore_flags(flags);
+ return;
+ } else {
+ struct sun4c_mmu_entry *entry = &mmu_entry_pool[pseg];
+
+ remove_lru(entry);
+ add_lru(entry);
+ }
- if((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED))
+ if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED))
sun4c_vac_alias_fixup(vma, address, pte);
sun4c_put_pte(address, pte_val(pte));
restore_flags(flags);
}
-extern unsigned long free_area_init(unsigned long, unsigned long);
-extern unsigned long sparc_context_init(unsigned long, int);
+extern void sparc_context_init(int);
extern unsigned long end;
+extern unsigned long bootmem_init(void);
+extern unsigned long last_valid_pfn;
+extern void sun_serial_setup(void);
-unsigned long __init sun4c_paging_init(unsigned long start_mem, unsigned long end_mem)
+void __init sun4c_paging_init(void)
{
int i, cnt;
unsigned long kernel_end, vaddr;
- extern unsigned long sparc_iobase_vaddr;
+ extern struct resource sparc_iomap;
+ unsigned long end_pfn;
kernel_end = (unsigned long) &end;
kernel_end += (SUN4C_REAL_PGDIR_SIZE * 4);
kernel_end = SUN4C_REAL_PGDIR_ALIGN(kernel_end);
+
+ last_valid_pfn = end_pfn = bootmem_init();
+
+ /* This does not logically belong here, but we need to
+ * call it at the moment we are able to use the bootmem
+ * allocator.
+ */
+ sun_serial_setup();
+
sun4c_probe_mmu();
invalid_segment = (num_segmaps - 1);
sun4c_init_mmu_entry_pool();
- sun4c_init_rings(&start_mem);
+ sun4c_init_rings();
sun4c_init_map_kernelprom(kernel_end);
sun4c_init_clean_mmu(kernel_end);
sun4c_init_fill_kernel_ring(SUN4C_KERNEL_BUCKETS);
- sun4c_init_lock_area(sparc_iobase_vaddr, IOBASE_END);
+ sun4c_init_lock_area(sparc_iomap.start, IOBASE_END);
sun4c_init_lock_area(DVMA_VADDR, DVMA_END);
- start_mem = sun4c_init_lock_areas(start_mem);
+ sun4c_init_lock_areas();
sun4c_init_fill_user_ring();
sun4c_set_context(0);
@@ -2783,18 +2578,23 @@ unsigned long __init sun4c_paging_init(unsigned long start_mem, unsigned long en
vaddr += SUN4C_PGDIR_SIZE;
swapper_pg_dir[vaddr>>SUN4C_PGDIR_SHIFT] = __pgd(PGD_TABLE | (unsigned long) pg3);
sun4c_init_ss2_cache_bug();
- start_mem = PAGE_ALIGN(start_mem);
- start_mem = sparc_context_init(start_mem, num_contexts);
- start_mem = free_area_init(start_mem, end_mem);
+ sparc_context_init(num_contexts);
+
+ {
+ unsigned int zones_size[MAX_NR_ZONES] = { 0, 0, 0};
+
+ zones_size[ZONE_DMA] = end_pfn;
+ free_area_init(zones_size);
+ }
+
cnt = 0;
- for(i = 0; i < num_segmaps; i++)
- if(mmu_entry_pool[i].locked)
+ for (i = 0; i < num_segmaps; i++)
+ if (mmu_entry_pool[i].locked)
cnt++;
max_user_taken_entries = num_segmaps - cnt - 40 - 1;
printk("SUN4C: %d mmu entries for the kernel\n", cnt);
- return start_mem;
}
/* Load up routines and constants for sun4c mmu */
@@ -2839,7 +2639,7 @@ void __init ld_mmu_sun4c(void)
BTFIXUPSET_CALL(flush_cache_all, sun4c_flush_cache_all, BTFIXUPCALL_NORM);
- if(sun4c_vacinfo.do_hwflushes) {
+ if (sun4c_vacinfo.do_hwflushes) {
BTFIXUPSET_CALL(flush_cache_mm, sun4c_flush_cache_mm_hw, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_cache_range, sun4c_flush_cache_range_hw, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_cache_page, sun4c_flush_cache_page_hw, BTFIXUPCALL_NORM);
@@ -2848,9 +2648,8 @@ void __init ld_mmu_sun4c(void)
BTFIXUPSET_CALL(flush_tlb_range, sun4c_flush_tlb_range_hw, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_tlb_page, sun4c_flush_tlb_page_hw, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(free_task_struct, sun4c_free_task_struct_hw, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(switch_to_context, sun4c_switch_to_context_hw, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(switch_mm, sun4c_switch_mm_hw, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(destroy_context, sun4c_destroy_context_hw, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(init_new_context, sun4c_init_new_context_hw, BTFIXUPCALL_NORM);
} else {
BTFIXUPSET_CALL(flush_cache_mm, sun4c_flush_cache_mm_sw, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_cache_range, sun4c_flush_cache_range_sw, BTFIXUPCALL_NORM);
@@ -2860,9 +2659,8 @@ void __init ld_mmu_sun4c(void)
BTFIXUPSET_CALL(flush_tlb_range, sun4c_flush_tlb_range_sw, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_tlb_page, sun4c_flush_tlb_page_sw, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(free_task_struct, sun4c_free_task_struct_sw, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(switch_to_context, sun4c_switch_to_context_sw, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(switch_mm, sun4c_switch_mm_sw, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(destroy_context, sun4c_destroy_context_sw, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(init_new_context, sun4c_init_new_context_sw, BTFIXUPCALL_NORM);
}
BTFIXUPSET_CALL(flush_tlb_all, sun4c_flush_tlb_all, BTFIXUPCALL_NORM);
@@ -2871,15 +2669,13 @@ void __init ld_mmu_sun4c(void)
BTFIXUPSET_CALL(set_pte, sun4c_set_pte, BTFIXUPCALL_STO1O0);
- BTFIXUPSET_CALL(pte_page, sun4c_pte_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_pagenr, sun4c_pte_pagenr, BTFIXUPCALL_NORM);
#if PAGE_SHIFT <= 12
BTFIXUPSET_CALL(pmd_page, sun4c_pmd_page, BTFIXUPCALL_ANDNINT(PAGE_SIZE - 1));
#else
BTFIXUPSET_CALL(pmd_page, sun4c_pmd_page, BTFIXUPCALL_NORM);
#endif
- BTFIXUPSET_CALL(sparc_update_rootmmu_dir, sun4c_update_rootmmu_dir, BTFIXUPCALL_NOP);
-
BTFIXUPSET_CALL(pte_present, sun4c_pte_present, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pte_clear, sun4c_pte_clear, BTFIXUPCALL_STG0O0);
@@ -2931,6 +2727,9 @@ void __init ld_mmu_sun4c(void)
BTFIXUPSET_CALL(mmu_release_scsi_sgl, sun4c_release_scsi_sgl, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mmu_map_dma_area, sun4c_map_dma_area, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_unmap_dma_area, sun4c_unmap_dma_area, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_flush_dma_area, sun4c_flush_dma_area, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(mmu_inval_dma_area, sun4c_inval_dma_area, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mmu_v2p, sun4c_v2p, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mmu_p2v, sun4c_p2v, BTFIXUPCALL_NORM);