summaryrefslogtreecommitdiffstats
path: root/arch/sparc
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-06-19 22:45:37 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-06-19 22:45:37 +0000
commit6d403070f28cd44860fdb3a53be5da0275c65cf4 (patch)
tree0d0e7fe7b5fb7568d19e11d7d862b77a866ce081 /arch/sparc
parentecf1bf5f6c2e668d03b0a9fb026db7aa41e292e1 (diff)
Merge with 2.4.0-test1-ac21 + pile of MIPS cleanups to make merging
possible. Chainsawed RM200 kernel to compile again. Jazz machine status unknown.
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/config.in22
-rw-r--r--arch/sparc/kernel/entry.S6
-rw-r--r--arch/sparc/kernel/ioport.c12
-rw-r--r--arch/sparc/kernel/signal.c2
-rw-r--r--arch/sparc/kernel/sys_sparc.c4
-rw-r--r--arch/sparc/kernel/sys_sunos.c4
-rw-r--r--arch/sparc/kernel/traps.c4
-rw-r--r--arch/sparc/mm/hypersparc.S5
-rw-r--r--arch/sparc/mm/init.c14
-rw-r--r--arch/sparc/mm/srmmu.c892
-rw-r--r--arch/sparc/mm/sun4c.c65
-rw-r--r--arch/sparc/mm/swift.S7
-rw-r--r--arch/sparc/mm/tsunami.S5
-rw-r--r--arch/sparc/mm/viking.S8
14 files changed, 392 insertions, 658 deletions
diff --git a/arch/sparc/config.in b/arch/sparc/config.in
index 9faf12053..6d07d32c6 100644
--- a/arch/sparc/config.in
+++ b/arch/sparc/config.in
@@ -1,6 +1,6 @@
-# $Id: config.in,v 1.93 2000/05/22 08:12:19 davem Exp $
+# $Id: config.in,v 1.94 2000/06/04 22:23:10 anton Exp $
# For a description of the syntax of this configuration file,
-# see the Configure script.
+# see Documentation/kbuild/config-language.txt.
#
mainmenu_name "Linux/SPARC Kernel Configuration"
@@ -12,6 +12,15 @@ bool 'Prompt for development and/or incomplete code/drivers' CONFIG_EXPERIMENTAL
endmenu
mainmenu_option next_comment
+comment 'Loadable module support'
+bool 'Enable loadable module support' CONFIG_MODULES
+if [ "$CONFIG_MODULES" = "y" ]; then
+ bool ' Set version information on all symbols for modules' CONFIG_MODVERSIONS
+ bool ' Kernel module loader' CONFIG_KMOD
+fi
+endmenu
+
+mainmenu_option next_comment
comment 'General setup'
define_bool CONFIG_VT y
@@ -59,15 +68,6 @@ dep_tristate ' Parallel printer support' CONFIG_PRINTER $CONFIG_PARPORT
endmenu
mainmenu_option next_comment
-comment 'Loadable module support'
-bool 'Enable loadable module support' CONFIG_MODULES
-if [ "$CONFIG_MODULES" = "y" ]; then
- bool ' Set version information on all symbols for modules' CONFIG_MODVERSIONS
- bool ' Kernel module loader' CONFIG_KMOD
-fi
-endmenu
-
-mainmenu_option next_comment
comment 'Console drivers'
bool 'PROM console' CONFIG_PROM_CONSOLE
source drivers/video/Config.in
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index 61518872f..7b3855dda 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -1,4 +1,4 @@
-/* $Id: entry.S,v 1.164 2000/05/09 17:40:13 davem Exp $
+/* $Id: entry.S,v 1.165 2000/06/05 06:08:45 anton Exp $
* arch/sparc/kernel/entry.S: Sparc trap low-level entry points.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -945,7 +945,7 @@ sun4c_fault:
and %l5, %l4, %l5
/* Test for NULL pte_t * in vmalloc area. */
- sethi %hi(SUN4C_VMALLOC_START), %l4
+ sethi %hi(VMALLOC_START), %l4
cmp %l5, %l4
blu,a C_LABEL(invalid_segment_patch1)
lduXa [%l5] ASI_SEGMAP, %l4
@@ -1072,7 +1072,7 @@ C_LABEL(invalid_segment_patch2):
andn %l4, 0x1ff, %l5
1:
- sethi %hi(SUN4C_VMALLOC_START), %l4
+ sethi %hi(VMALLOC_START), %l4
cmp %l5, %l4
bgeu 1f
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 20c141a00..71f5dbc88 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -1,4 +1,4 @@
-/* $Id: ioport.c,v 1.37 2000/03/28 06:38:19 davem Exp $
+/* $Id: ioport.c,v 1.38 2000/06/04 06:23:52 anton Exp $
* ioport.c: Simple io mapping allocator.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -52,11 +52,11 @@ static void _sparc_free_io(struct resource *res);
/* This points to the next to use virtual memory for DVMA mappings */
static struct resource _sparc_dvma = {
- "sparc_dvma", DVMA_VADDR, DVMA_VADDR + DVMA_LEN - 1
+ "sparc_dvma", DVMA_VADDR, DVMA_END - 1
};
/* This points to the start of I/O mappings, cluable from outside. */
/*ext*/ struct resource sparc_iomap = {
- "sparc_iomap", IOBASE_VADDR, IOBASE_END-1
+ "sparc_iomap", IOBASE_VADDR, IOBASE_END - 1
};
/*
@@ -453,7 +453,11 @@ void sbus_dma_sync_single(struct sbus_dev *sdev, u32 ba, long size, int directio
panic("sbus_dma_sync_single: 0x%x\n", ba);
va = (unsigned long) phys_to_virt(mmu_translate_dvma(ba));
- mmu_inval_dma_area(va, (size + PAGE_SIZE-1) & PAGE_MASK);
+ /*
+ * XXX This bogosity will be fixed with the iommu rewrite coming soon
+ * to a kernel near you. - Anton
+ */
+ /* mmu_inval_dma_area(va, (size + PAGE_SIZE-1) & PAGE_MASK); */
}
void sbus_dma_sync_sg(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction)
diff --git a/arch/sparc/kernel/signal.c b/arch/sparc/kernel/signal.c
index e807683ea..e7afd3d19 100644
--- a/arch/sparc/kernel/signal.c
+++ b/arch/sparc/kernel/signal.c
@@ -1,4 +1,4 @@
-/* $Id: signal.c,v 1.103 2000/05/09 17:40:13 davem Exp $
+/* $Id: signal.c,v 1.104 2000/05/27 00:49:35 davem Exp $
* linux/arch/sparc/kernel/signal.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
diff --git a/arch/sparc/kernel/sys_sparc.c b/arch/sparc/kernel/sys_sparc.c
index 5beb3adf0..427208d5f 100644
--- a/arch/sparc/kernel/sys_sparc.c
+++ b/arch/sparc/kernel/sys_sparc.c
@@ -215,7 +215,6 @@ static unsigned long do_mmap2(unsigned long addr, unsigned long len,
goto out;
}
- down(&current->mm->mmap_sem);
lock_kernel();
retval = -EINVAL;
len = PAGE_ALIGN(len);
@@ -230,11 +229,12 @@ static unsigned long do_mmap2(unsigned long addr, unsigned long len,
goto out_putf;
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ down(&current->mm->mmap_sem);
retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up(&current->mm->mmap_sem);
out_putf:
unlock_kernel();
- up(&current->mm->mmap_sem);
if (file)
fput(file);
out:
diff --git a/arch/sparc/kernel/sys_sunos.c b/arch/sparc/kernel/sys_sunos.c
index 0dba9b1ca..8591f6085 100644
--- a/arch/sparc/kernel/sys_sunos.c
+++ b/arch/sparc/kernel/sys_sunos.c
@@ -68,7 +68,6 @@ asmlinkage unsigned long sunos_mmap(unsigned long addr, unsigned long len,
struct file * file = NULL;
unsigned long retval, ret_type;
- down(&current->mm->mmap_sem);
lock_kernel();
if(flags & MAP_NORESERVE) {
static int cnt;
@@ -118,7 +117,9 @@ asmlinkage unsigned long sunos_mmap(unsigned long addr, unsigned long len,
}
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ down(&current->mm->mmap_sem);
retval = do_mmap(file, addr, len, prot, flags, off);
+ up(&current->mm->mmap_sem);
if(!ret_type)
retval = ((retval < PAGE_OFFSET) ? 0 : retval);
@@ -127,7 +128,6 @@ out_putf:
fput(file);
out:
unlock_kernel();
- up(&current->mm->mmap_sem);
return retval;
}
diff --git a/arch/sparc/kernel/traps.c b/arch/sparc/kernel/traps.c
index ca74c09fc..3564a4517 100644
--- a/arch/sparc/kernel/traps.c
+++ b/arch/sparc/kernel/traps.c
@@ -1,4 +1,4 @@
-/* $Id: traps.c,v 1.62 2000/05/09 17:40:13 davem Exp $
+/* $Id: traps.c,v 1.63 2000/06/04 06:23:52 anton Exp $
* arch/sparc/kernel/traps.c
*
* Copyright 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -499,8 +499,6 @@ void handle_hw_divzero(struct pt_regs *regs, unsigned long pc, unsigned long npc
extern void sparc_cpu_startup(void);
-extern ctxd_t *srmmu_ctx_table_phys;
-
int linux_smp_still_initting;
unsigned int thiscpus_tbr;
int thiscpus_mid;
diff --git a/arch/sparc/mm/hypersparc.S b/arch/sparc/mm/hypersparc.S
index 10812273b..e5920a801 100644
--- a/arch/sparc/mm/hypersparc.S
+++ b/arch/sparc/mm/hypersparc.S
@@ -1,4 +1,4 @@
-/* $Id: hypersparc.S,v 1.15 2000/05/09 17:40:13 davem Exp $
+/* $Id: hypersparc.S,v 1.16 2000/06/04 06:23:52 anton Exp $
* hypersparc.S: High speed Hypersparc mmu/cache operations.
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
@@ -28,7 +28,7 @@
.globl hypersparc_flush_cache_all, hypersparc_flush_cache_mm
.globl hypersparc_flush_cache_range, hypersparc_flush_cache_page
- .globl hypersparc_flush_page_to_ram, hypersparc_flush_chunk
+ .globl hypersparc_flush_page_to_ram
.globl hypersparc_flush_page_for_dma, hypersparc_flush_sig_insns
.globl hypersparc_flush_tlb_all, hypersparc_flush_tlb_mm
.globl hypersparc_flush_tlb_range, hypersparc_flush_tlb_page
@@ -229,7 +229,6 @@ hypersparc_flush_sig_insns:
/* HyperSparc is copy-back. */
hypersparc_flush_page_to_ram:
-hypersparc_flush_chunk:
sethi %hi(vac_line_size), %g1
ld [%g1 + %lo(vac_line_size)], %o4
andn %o0, (PAGE_SIZE - 1), %o0
diff --git a/arch/sparc/mm/init.c b/arch/sparc/mm/init.c
index 3f3500046..2847bd443 100644
--- a/arch/sparc/mm/init.c
+++ b/arch/sparc/mm/init.c
@@ -1,10 +1,10 @@
-/* $Id: init.c,v 1.85 2000/05/09 17:40:13 davem Exp $
+/* $Id: init.c,v 1.86 2000/06/04 06:23:52 anton Exp $
* linux/arch/sparc/mm/init.c
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1995 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
- * Copyright (C) 2000 Anton Blanchard (anton@progsoc.uts.edu.au)
+ * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com)
*/
#include <linux/config.h>
@@ -125,9 +125,13 @@ unsigned long __init bootmem_init(void)
unsigned long bootmap_pfn;
int i;
- /* Limit maximum memory until we implement highmem for sparc */
- if (!cmdline_memory_size || cmdline_memory_size > 0x0d000000)
- cmdline_memory_size = 0x0d000000;
+ /*
+ * XXX Limit maximum memory until we implement highmem for sparc.
+ * The nocache region has taken up some room but I'll rearrange
+ * the virtual address regions soon - Anton
+ */
+ if (!cmdline_memory_size || cmdline_memory_size > 0x0c000000)
+ cmdline_memory_size = 0x0c000000;
/* XXX It is a bit ambiguous here, whether we should
* XXX treat the user specified mem=xxx as total wanted
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 0b46e7d25..a69bf6b12 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -1,11 +1,11 @@
-/* $Id: srmmu.c,v 1.209 2000/05/09 17:40:13 davem Exp $
+/* $Id: srmmu.c,v 1.211 2000/06/04 06:23:52 anton Exp $
* srmmu.c: SRMMU specific routines for memory management.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1995 Pete Zaitcev
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
- * Copyright (C) 1999 Anton Blanchard (anton@progsoc.uts.edu.au)
+ * Copyright (C) 1999,2000 Anton Blanchard (anton@linuxcare.com)
*/
#include <linux/config.h>
@@ -57,6 +57,8 @@ extern struct resource sparc_iomap;
extern unsigned long last_valid_pfn;
+pgd_t *srmmu_swapper_pg_dir;
+
#ifdef CONFIG_SMP
#define FLUSH_BEGIN(mm)
#define FLUSH_END
@@ -65,40 +67,29 @@ extern unsigned long last_valid_pfn;
#define FLUSH_END }
#endif
-BTFIXUPDEF_CALL(void, ctxd_set, ctxd_t *, pgd_t *)
BTFIXUPDEF_CALL(void, pmd_set, pmd_t *, pte_t *)
-
-#define ctxd_set(ctxp,pgdp) BTFIXUP_CALL(ctxd_set)(ctxp,pgdp)
#define pmd_set(pmdp,ptep) BTFIXUP_CALL(pmd_set)(pmdp,ptep)
BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
-BTFIXUPDEF_CALL(void, flush_chunk, unsigned long)
-
#define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
+
int flush_page_for_dma_global = 1;
-#define flush_chunk(chunk) BTFIXUP_CALL(flush_chunk)(chunk)
+
#ifdef CONFIG_SMP
BTFIXUPDEF_CALL(void, local_flush_page_for_dma, unsigned long)
-
#define local_flush_page_for_dma(page) BTFIXUP_CALL(local_flush_page_for_dma)(page)
#endif
-static struct srmmu_stats {
- int invall;
- int invpg;
- int invrnge;
- int invmm;
-} module_stats;
-
char *srmmu_name;
ctxd_t *srmmu_ctx_table_phys;
ctxd_t *srmmu_context_table;
int viking_mxcc_present = 0;
-static spinlock_t srmmu_context_spinlock = SPIN_LOCK_UNLOCKED;
+spinlock_t srmmu_context_spinlock = SPIN_LOCK_UNLOCKED;
-/* In general all page table modifications should use the V8 atomic
+/*
+ * In general all page table modifications should use the V8 atomic
* swap instruction. This insures the mmu and the cpu are in sync
* with respect to ref/mod bits in the page tables.
*/
@@ -117,31 +108,55 @@ static inline int srmmu_device_memory(unsigned long x)
return ((x & 0xF0000000) != 0);
}
+int srmmu_cache_pagetables;
+
+/* XXX Make this dynamic based on ram size - Anton */
+#define SRMMU_NOCACHE_NPAGES 256
+#define SRMMU_NOCACHE_VADDR 0xfc000000
+#define SRMMU_NOCACHE_SIZE (SRMMU_NOCACHE_NPAGES*PAGE_SIZE)
+#define SRMMU_NOCACHE_END (SRMMU_NOCACHE_VADDR + SRMMU_NOCACHE_SIZE)
+#define SRMMU_NOCACHE_BITMAP_SIZE (SRMMU_NOCACHE_NPAGES * 16)
+#define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4)
+
+void *srmmu_nocache_pool;
+void *srmmu_nocache_bitmap;
+int srmmu_nocache_low;
+int srmmu_nocache_used;
+
+/* This makes sense. Honest it does - Anton */
+#define __nocache_pa(VADDR) (((unsigned long)VADDR) - SRMMU_NOCACHE_VADDR + __pa((unsigned long)srmmu_nocache_pool))
+#define __nocache_va(PADDR) (__va((unsigned long)PADDR) - (unsigned long)srmmu_nocache_pool + SRMMU_NOCACHE_VADDR)
+#define __nocache_fix(VADDR) __va(__nocache_pa(VADDR))
+
static unsigned long srmmu_pgd_page(pgd_t pgd)
-{ return srmmu_device_memory(pgd_val(pgd))?~0:(unsigned long)__va((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); }
+{ return srmmu_device_memory(pgd_val(pgd))?~0:(unsigned long)__nocache_va((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); }
static unsigned long srmmu_pmd_page(pmd_t pmd)
-{ return srmmu_device_memory(pmd_val(pmd))?~0:(unsigned long)__va((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4); }
+{ return srmmu_device_memory(pmd_val(pmd))?~0:(unsigned long)__nocache_va((pmd_val(pmd) & SRMMU_PTD_PMASK) << 4); }
static unsigned long srmmu_pte_pagenr(pte_t pte)
{ return srmmu_device_memory(pte_val(pte))?~0:(((pte_val(pte) & SRMMU_PTE_PMASK) << 4) >> PAGE_SHIFT); }
static inline int srmmu_pte_none(pte_t pte)
{ return !(pte_val(pte) & 0xFFFFFFF); }
+
static inline int srmmu_pte_present(pte_t pte)
{ return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); }
-static inline void srmmu_pte_clear(pte_t *ptep) { set_pte(ptep, __pte(0)); }
+static inline void srmmu_pte_clear(pte_t *ptep)
+{ set_pte(ptep, __pte(0)); }
static inline int srmmu_pmd_none(pmd_t pmd)
{ return !(pmd_val(pmd) & 0xFFFFFFF); }
+
static inline int srmmu_pmd_bad(pmd_t pmd)
{ return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
static inline int srmmu_pmd_present(pmd_t pmd)
{ return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
-static inline void srmmu_pmd_clear(pmd_t *pmdp) { set_pte((pte_t *)pmdp, __pte(0)); }
+static inline void srmmu_pmd_clear(pmd_t *pmdp)
+{ set_pte((pte_t *)pmdp, __pte(0)); }
static inline int srmmu_pgd_none(pgd_t pgd)
{ return !(pgd_val(pgd) & 0xFFFFFFF); }
@@ -152,18 +167,35 @@ static inline int srmmu_pgd_bad(pgd_t pgd)
static inline int srmmu_pgd_present(pgd_t pgd)
{ return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
-static inline void srmmu_pgd_clear(pgd_t * pgdp) { set_pte((pte_t *)pgdp, __pte(0)); }
+static inline void srmmu_pgd_clear(pgd_t * pgdp)
+{ set_pte((pte_t *)pgdp, __pte(0)); }
+
+static inline int srmmu_pte_write(pte_t pte)
+{ return pte_val(pte) & SRMMU_WRITE; }
+
+static inline int srmmu_pte_dirty(pte_t pte)
+{ return pte_val(pte) & SRMMU_DIRTY; }
+
+static inline int srmmu_pte_young(pte_t pte)
+{ return pte_val(pte) & SRMMU_REF; }
+
+static inline pte_t srmmu_pte_wrprotect(pte_t pte)
+{ return __pte(pte_val(pte) & ~SRMMU_WRITE);}
-static inline int srmmu_pte_write(pte_t pte) { return pte_val(pte) & SRMMU_WRITE; }
-static inline int srmmu_pte_dirty(pte_t pte) { return pte_val(pte) & SRMMU_DIRTY; }
-static inline int srmmu_pte_young(pte_t pte) { return pte_val(pte) & SRMMU_REF; }
+static inline pte_t srmmu_pte_mkclean(pte_t pte)
+{ return __pte(pte_val(pte) & ~SRMMU_DIRTY);}
-static inline pte_t srmmu_pte_wrprotect(pte_t pte) { return __pte(pte_val(pte) & ~SRMMU_WRITE);}
-static inline pte_t srmmu_pte_mkclean(pte_t pte) { return __pte(pte_val(pte) & ~SRMMU_DIRTY);}
-static inline pte_t srmmu_pte_mkold(pte_t pte) { return __pte(pte_val(pte) & ~SRMMU_REF);}
-static inline pte_t srmmu_pte_mkwrite(pte_t pte) { return __pte(pte_val(pte) | SRMMU_WRITE);}
-static inline pte_t srmmu_pte_mkdirty(pte_t pte) { return __pte(pte_val(pte) | SRMMU_DIRTY);}
-static inline pte_t srmmu_pte_mkyoung(pte_t pte) { return __pte(pte_val(pte) | SRMMU_REF);}
+static inline pte_t srmmu_pte_mkold(pte_t pte)
+{ return __pte(pte_val(pte) & ~SRMMU_REF);}
+
+static inline pte_t srmmu_pte_mkwrite(pte_t pte)
+{ return __pte(pte_val(pte) | SRMMU_WRITE);}
+
+static inline pte_t srmmu_pte_mkdirty(pte_t pte)
+{ return __pte(pte_val(pte) | SRMMU_DIRTY);}
+
+static inline pte_t srmmu_pte_mkyoung(pte_t pte)
+{ return __pte(pte_val(pte) | SRMMU_REF);}
/*
* Conversion functions: convert a page and protection to a page entry,
@@ -176,265 +208,233 @@ static pte_t srmmu_mk_pte_phys(unsigned long page, pgprot_t pgprot)
{ return __pte(((page) >> 4) | pgprot_val(pgprot)); }
static pte_t srmmu_mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
-{
- return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot));
-}
+{ return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot)); }
+/* XXX should we hyper_flush_whole_icache here - Anton */
static void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
-{
- set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__pa((unsigned long) pgdp) >> 4)));
-}
+{ set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); }
static void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
-{
- set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (__pa((unsigned long) pmdp) >> 4)));
-}
+{ set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pmdp) >> 4))); }
static void srmmu_pmd_set(pmd_t * pmdp, pte_t * ptep)
-{
- set_pte((pte_t *)pmdp, (SRMMU_ET_PTD | (__pa((unsigned long) ptep) >> 4)));
-}
+{ set_pte((pte_t *)pmdp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) ptep) >> 4))); }
static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot)
-{
- return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot));
-}
+{ return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); }
/* to find an entry in a top-level page table... */
extern inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
-{
- return mm->pgd + (address >> SRMMU_PGDIR_SHIFT);
-}
+{ return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); }
/* Find an entry in the second-level page table.. */
static inline pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address)
-{
- return (pmd_t *) srmmu_pgd_page(*dir) + ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1));
-}
+{ return (pmd_t *) pgd_page(*dir) + ((address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1)); }
/* Find an entry in the third-level page table.. */
static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address)
-{
- return (pte_t *) srmmu_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
-}
+{ return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1)); }
-static inline pte_t *srmmu_get_pte_fast(void)
+/* XXX Make this SMP safe - Anton */
+unsigned long __srmmu_get_nocache(int size, int align)
{
- struct page *ret;
-
- spin_lock(&pte_spinlock);
- if ((ret = (struct page *)pte_quicklist) != NULL) {
- unsigned int mask = (unsigned int)ret->pprev_hash;
- unsigned int tmp, off;
-
- if (mask & 0xff)
- for (tmp = 0x001, off = 0; (mask & tmp) == 0; tmp <<= 1, off += 256);
- else
- for (tmp = 0x100, off = 2048; (mask & tmp) == 0; tmp <<= 1, off += 256);
- (unsigned int)ret->pprev_hash = mask & ~tmp;
- if (!(mask & ~tmp))
- pte_quicklist = (unsigned long *)ret->next_hash;
- ret = (struct page *)(page_address(ret) + off);
- pgtable_cache_size--;
+ int offset = srmmu_nocache_low;
+ int i;
+ unsigned long va_tmp, phys_tmp;
+ int lowest_failed = 0;
+
+ size = size >> SRMMU_NOCACHE_BITMAP_SHIFT;
+
+repeat:
+ offset = find_next_zero_bit(srmmu_nocache_bitmap, SRMMU_NOCACHE_BITMAP_SIZE, offset);
+
+ /* we align on physical address */
+ if (align) {
+ va_tmp = (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT));
+ phys_tmp = (__nocache_pa(va_tmp) + align - 1) & ~(align - 1);
+ va_tmp = (unsigned long)__nocache_va(phys_tmp);
+ offset = (va_tmp - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT;
}
- spin_unlock(&pte_spinlock);
- return (pte_t *)ret;
-}
-static inline pte_t *srmmu_get_pte_slow(void)
-{
- pte_t *ret;
- struct page *page;
-
- ret = (pte_t *)get_free_page(GFP_KERNEL);
- if (ret) {
- page = mem_map + MAP_NR(ret);
- flush_chunk((unsigned long)ret);
- (unsigned int)page->pprev_hash = 0xfffe;
- spin_lock(&pte_spinlock);
- (unsigned long *)page->next_hash = pte_quicklist;
- pte_quicklist = (unsigned long *)page;
- pgtable_cache_size += 15;
+ if ((SRMMU_NOCACHE_BITMAP_SIZE - offset) < size) {
+ printk("Run out of nocached RAM!\n");
+ return 0;
}
- return ret;
-}
-static inline pgd_t *srmmu_get_pgd_fast(void)
-{
- struct page *ret;
+ i = 0;
+ while(i < size) {
+ if (test_bit(offset + i, srmmu_nocache_bitmap)) {
+ lowest_failed = 1;
+ offset = offset + i + 1;
+ goto repeat;
+ }
+ i++;
+ }
- spin_lock(&pgd_spinlock);
- if ((ret = (struct page *)pgd_quicklist) != NULL) {
- unsigned int mask = (unsigned int)ret->pprev_hash;
- unsigned int tmp, off;
-
- for (tmp = 0x001, off = 0; (mask & tmp) == 0; tmp <<= 1, off += 1024);
- (unsigned int)ret->pprev_hash = mask & ~tmp;
- if (!(mask & ~tmp))
- pgd_quicklist = (unsigned long *)ret->next_hash;
- ret = (struct page *)(page_address(ret) + off);
- pgd_cache_size--;
+ i = 0;
+ while(i < size) {
+ set_bit(offset + i, srmmu_nocache_bitmap);
+ i++;
+ srmmu_nocache_used++;
}
- spin_unlock(&pgd_spinlock);
- return (pgd_t *)ret;
+
+ if (!lowest_failed && offset > srmmu_nocache_low)
+ srmmu_nocache_low = offset;
+
+ return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT));
}
-static inline pgd_t *srmmu_get_pgd_slow(void)
+unsigned long srmmu_get_nocache(int size, int align)
{
- pgd_t *ret;
- struct page *page;
-
- ret = (pgd_t *)__get_free_page(GFP_KERNEL);
- if (ret) {
- pgd_t *init = pgd_offset(&init_mm, 0);
- memset(ret + (0 * PTRS_PER_PGD), 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
- memcpy(ret + (0 * PTRS_PER_PGD) + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
- (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
- memset(ret + (1 * PTRS_PER_PGD), 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
- memcpy(ret + (1 * PTRS_PER_PGD) + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
- (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
- memset(ret + (2 * PTRS_PER_PGD), 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
- memcpy(ret + (2 * PTRS_PER_PGD) + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
- (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
- memset(ret + (3 * PTRS_PER_PGD), 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
- memcpy(ret + (3 * PTRS_PER_PGD) + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
- (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
- page = mem_map + MAP_NR(ret);
- flush_chunk((unsigned long)ret);
- (unsigned int)page->pprev_hash = 0xe;
- spin_lock(&pgd_spinlock);
- (unsigned long *)page->next_hash = pgd_quicklist;
- pgd_quicklist = (unsigned long *)page;
- pgd_cache_size += 3;
- spin_unlock(&pgd_spinlock);
- }
- return ret;
+ unsigned long tmp;
+
+ tmp = __srmmu_get_nocache(size, align);
+
+ memset((void *)tmp, 0, size);
+
+ return tmp;
}
-static void srmmu_free_pte_slow(pte_t *pte)
+/* XXX Make this SMP safe - Anton */
+void srmmu_free_nocache(unsigned long vaddr, int size)
{
+ int offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT;
+
+ size = size >> SRMMU_NOCACHE_BITMAP_SHIFT;
+
+ while(size--) {
+ clear_bit(offset + size, srmmu_nocache_bitmap);
+ srmmu_nocache_used--;
+ }
+
+ if (offset < srmmu_nocache_low)
+ srmmu_nocache_low = offset;
}
-static void srmmu_free_pgd_slow(pgd_t *pgd)
+void srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned long end);
+
+void srmmu_nocache_init(void)
{
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long paddr, vaddr;
+ unsigned long pteval;
+
+ srmmu_nocache_pool = __alloc_bootmem(SRMMU_NOCACHE_SIZE, PAGE_SIZE, 0UL);
+ memset(srmmu_nocache_pool, 0, SRMMU_NOCACHE_SIZE);
+
+ srmmu_nocache_bitmap = __alloc_bootmem(SRMMU_NOCACHE_BITMAP_SIZE, SMP_CACHE_BYTES, 0UL);
+ memset(srmmu_nocache_bitmap, 0, SRMMU_NOCACHE_BITMAP_SIZE);
+
+ srmmu_swapper_pg_dir = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
+ memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE);
+ init_mm.pgd = srmmu_swapper_pg_dir;
+
+ srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, SRMMU_NOCACHE_END);
+
+ paddr = __pa((unsigned long)srmmu_nocache_pool);
+ vaddr = SRMMU_NOCACHE_VADDR;
+
+ while (vaddr < SRMMU_NOCACHE_END) {
+ pgd = pgd_offset_k(vaddr);
+ pmd = pmd_offset(__nocache_fix(pgd), vaddr);
+ pte = pte_offset(__nocache_fix(pmd), vaddr);
+
+ pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV);
+
+ if (srmmu_cache_pagetables)
+ pteval |= SRMMU_CACHE;
+
+ set_pte(__nocache_fix(pte), pteval);
+
+ vaddr += PAGE_SIZE;
+ paddr += PAGE_SIZE;
+ }
+
+ flush_cache_all();
+ flush_tlb_all();
}
-static inline void srmmu_pte_free(pte_t *pte)
+static inline pgd_t *srmmu_pgd_alloc(void)
{
- struct page *page = mem_map + MAP_NR(pte);
+ pgd_t *pgd = NULL;
- spin_lock(&pte_spinlock);
- if (!page->pprev_hash) {
- (unsigned long *)page->next_hash = pte_quicklist;
- pte_quicklist = (unsigned long *)page;
+ pgd = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
+ if (pgd) {
+ pgd_t *init = pgd_offset_k(0);
+ memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+ memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
}
- (unsigned int)page->pprev_hash |= (1 << ((((unsigned long)pte) >> 8) & 15));
- pgtable_cache_size++;
- spin_unlock(&pte_spinlock);
+
+ return pgd;
+}
+
+static void srmmu_pgd_free(pgd_t *pgd)
+{
+ srmmu_free_nocache((unsigned long)pgd, SRMMU_PGD_TABLE_SIZE);
}
static pte_t *srmmu_pte_alloc(pmd_t * pmd, unsigned long address)
{
address = (address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1);
if(srmmu_pmd_none(*pmd)) {
- pte_t *page = srmmu_get_pte_fast();
-
- if (page) {
+ pte_t *page = (pte_t *)srmmu_get_nocache(SRMMU_PTE_TABLE_SIZE, SRMMU_PTE_TABLE_SIZE);
+ if(page) {
+ spin_unlock(&pte_spinlock);
pmd_set(pmd, page);
return page + address;
}
- page = srmmu_get_pte_slow();
- if(srmmu_pmd_none(*pmd)) {
- if(page) {
- spin_unlock(&pte_spinlock);
- pmd_set(pmd, page);
- return page + address;
- }
- pmd_set(pmd, BAD_PAGETABLE);
- return NULL;
- }
- if (page) {
- (unsigned int)(((struct page *)pte_quicklist)->pprev_hash) = 0xffff;
- pgtable_cache_size++;
- spin_unlock(&pte_spinlock);
- }
+ /* XXX fix this - Anton */
+ pmd_set(pmd, BAD_PAGETABLE);
+ return NULL;
}
if(srmmu_pmd_bad(*pmd)) {
printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
+ /* XXX fix this - Anton */
pmd_set(pmd, BAD_PAGETABLE);
return NULL;
}
return ((pte_t *) pmd_page(*pmd)) + address;
}
-/* Real three-level page tables on SRMMU. */
-static void srmmu_pmd_free(pmd_t * pmd)
+static inline void srmmu_pte_free(pte_t *pte)
{
- return srmmu_pte_free((pte_t *)pmd);
+ srmmu_free_nocache((unsigned long)pte, SRMMU_PTE_TABLE_SIZE);
}
static pmd_t *srmmu_pmd_alloc(pgd_t * pgd, unsigned long address)
{
address = (address >> SRMMU_PMD_SHIFT) & (SRMMU_PTRS_PER_PMD - 1);
if(srmmu_pgd_none(*pgd)) {
- pmd_t *page = (pmd_t *)srmmu_get_pte_fast();
-
- if (page) {
+ pmd_t *page = (pmd_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
+ if(page) {
+ spin_unlock(&pte_spinlock);
pgd_set(pgd, page);
return page + address;
}
- page = (pmd_t *)srmmu_get_pte_slow();
- if(srmmu_pgd_none(*pgd)) {
- if(page) {
- spin_unlock(&pte_spinlock);
- pgd_set(pgd, page);
- return page + address;
- }
- pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
- return NULL;
- }
- if (page) {
- (unsigned int)(((struct page *)pte_quicklist)->pprev_hash) = 0xffff;
- pgtable_cache_size++;
- spin_unlock(&pte_spinlock);
- }
+ /* XXX fix this - Anton */
+ pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
+ return NULL;
}
if(srmmu_pgd_bad(*pgd)) {
printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
+ /* XXX fix this - Anton */
pgd_set(pgd, (pmd_t *) BAD_PAGETABLE);
return NULL;
}
return (pmd_t *) pgd_page(*pgd) + address;
}
-static void srmmu_pgd_free(pgd_t *pgd)
-{
- struct page *page = mem_map + MAP_NR(pgd);
-
- spin_lock(&pgd_spinlock);
- if (!page->pprev_hash) {
- (unsigned long *)page->next_hash = pgd_quicklist;
- pgd_quicklist = (unsigned long *)page;
- }
- (unsigned int)page->pprev_hash |= (1 << ((((unsigned long)pgd) >> 10) & 3));
- pgd_cache_size++;
- spin_unlock(&pgd_spinlock);
-}
-
-static pgd_t *srmmu_pgd_alloc(void)
+static void srmmu_pmd_free(pmd_t * pmd)
{
- pgd_t *ret;
-
- ret = srmmu_get_pgd_fast();
- if (ret) return ret;
- return srmmu_get_pgd_slow();
+ srmmu_free_nocache((unsigned long)pmd, SRMMU_PMD_TABLE_SIZE);
}
-
static void srmmu_set_pgdir(unsigned long address, pgd_t entry)
{
struct task_struct * p;
- struct page *page;
read_lock(&tasklist_lock);
for_each_task(p) {
@@ -443,24 +443,6 @@ static void srmmu_set_pgdir(unsigned long address, pgd_t entry)
*pgd_offset(p->mm,address) = entry;
}
read_unlock(&tasklist_lock);
- spin_lock(&pgd_spinlock);
- address >>= SRMMU_PGDIR_SHIFT;
- for (page = (struct page *)pgd_quicklist; page; page = page->next_hash) {
- pgd_t *pgd = (pgd_t *)page_address(page);
- unsigned int mask = (unsigned int)page->pprev_hash;
-
- if (mask & 1)
- pgd[address + 0 * SRMMU_PTRS_PER_PGD] = entry;
- if (mask & 2)
- pgd[address + 1 * SRMMU_PTRS_PER_PGD] = entry;
- if (mask & 4)
- pgd[address + 2 * SRMMU_PTRS_PER_PGD] = entry;
- if (mask & 8)
- pgd[address + 3 * SRMMU_PTRS_PER_PGD] = entry;
- if (mask)
- flush_chunk((unsigned long)pgd);
- }
- spin_unlock(&pgd_spinlock);
}
static void srmmu_set_pte_cacheable(pte_t *ptep, pte_t pteval)
@@ -468,62 +450,6 @@ static void srmmu_set_pte_cacheable(pte_t *ptep, pte_t pteval)
srmmu_set_entry(ptep, pte_val(pteval));
}
-static void srmmu_set_pte_nocache_cypress(pte_t *ptep, pte_t pteval)
-{
- register unsigned long a, b, c, d, e, f, g;
- unsigned long line, page;
-
- srmmu_set_entry(ptep, pte_val(pteval));
- page = ((unsigned long)ptep) & PAGE_MASK;
- line = (page + PAGE_SIZE) - 0x100;
- a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
- goto inside;
- do {
- line -= 0x100;
- inside:
- __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
- "sta %%g0, [%0 + %2] %1\n\t"
- "sta %%g0, [%0 + %3] %1\n\t"
- "sta %%g0, [%0 + %4] %1\n\t"
- "sta %%g0, [%0 + %5] %1\n\t"
- "sta %%g0, [%0 + %6] %1\n\t"
- "sta %%g0, [%0 + %7] %1\n\t"
- "sta %%g0, [%0 + %8] %1\n\t" : :
- "r" (line),
- "i" (ASI_M_FLUSH_PAGE),
- "r" (a), "r" (b), "r" (c), "r" (d),
- "r" (e), "r" (f), "r" (g));
- } while(line != page);
-}
-
-static void srmmu_set_pte_nocache_viking(pte_t *ptep, pte_t pteval)
-{
- unsigned long vaddr;
- int set;
- int i;
-
- set = ((unsigned long)ptep >> 5) & 0x7f;
- vaddr = (KERNBASE + PAGE_SIZE) | (set << 5);
- srmmu_set_entry(ptep, pte_val(pteval));
- for (i = 0; i < 8; i++) {
- __asm__ __volatile__ ("ld [%0], %%g0" : : "r" (vaddr));
- vaddr += PAGE_SIZE;
- }
-}
-
-static void srmmu_quick_kernel_fault(unsigned long address)
-{
-#ifdef CONFIG_SMP
- printk("CPU[%d]: Kernel faults at addr=0x%08lx\n",
- smp_processor_id(), address);
- while (1) ;
-#else
- printk("Kernel faults at addr=0x%08lx\n", address);
- printk("PTE=%08lx\n", srmmu_hwprobe((address & PAGE_MASK)));
- die_if_kernel("SRMMU bolixed...", current->thread.kregs);
-#endif
-}
-
static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
{
struct ctx_list *ctxp;
@@ -567,8 +493,9 @@ static void srmmu_switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
spin_lock(&srmmu_context_spinlock);
alloc_context(old_mm, mm);
spin_unlock(&srmmu_context_spinlock);
- ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
+ srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
}
+ /* XXX should we hyper_flush_whole_icache() here - Anton */
srmmu_set_context(mm->context);
}
@@ -581,12 +508,13 @@ void srmmu_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_ty
unsigned long tmp;
physaddr &= PAGE_MASK;
- pgdp = srmmu_pgd_offset(&init_mm, virt_addr);
+ pgdp = pgd_offset_k(virt_addr);
pmdp = pmd_offset(pgdp, virt_addr);
ptep = pte_offset(pmdp, virt_addr);
tmp = (physaddr >> 4) | SRMMU_ET_PTE;
- /* I need to test whether this is consistent over all
+ /*
+ * I need to test whether this is consistent over all
* sun4m's. The bus_type represents the upper 4 bits of
* 36-bit physical address on the I/O space lines...
*/
@@ -606,7 +534,7 @@ void srmmu_unmapioaddr(unsigned long virt_addr)
pmd_t *pmdp;
pte_t *ptep;
- pgdp = srmmu_pgd_offset(&init_mm, virt_addr);
+ pgdp = pgd_offset_k(virt_addr);
pmdp = pmd_offset(pgdp, virt_addr);
ptep = pte_offset(pmdp, virt_addr);
@@ -615,7 +543,8 @@ void srmmu_unmapioaddr(unsigned long virt_addr)
flush_tlb_all();
}
-/* On the SRMMU we do not have the problems with limited tlb entries
+/*
+ * On the SRMMU we do not have the problems with limited tlb entries
* for mapping kernel pages, so we just take things from the free page
* pool. As a side effect we are putting a little too much pressure
* on the gfp() subsystem. This setup also makes the logic of the
@@ -646,14 +575,14 @@ extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long p
extern void tsunami_flush_page_to_ram(unsigned long page);
extern void tsunami_flush_page_for_dma(unsigned long page);
extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
-extern void tsunami_flush_chunk(unsigned long chunk);
extern void tsunami_flush_tlb_all(void);
extern void tsunami_flush_tlb_mm(struct mm_struct *mm);
extern void tsunami_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end);
extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
extern void tsunami_setup_blockops(void);
-/* Workaround, until we find what's going on with Swift. When low on memory,
+/*
+ * Workaround, until we find what's going on with Swift. When low on memory,
* it sometimes loops in fault/handle_mm_fault incl. flush_tlb_page to find
* out it is already in page tables/ fault again on the same instruction.
* I really don't understand it, have checked it and contexts
@@ -693,7 +622,6 @@ extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long pag
extern void swift_flush_page_to_ram(unsigned long page);
extern void swift_flush_page_for_dma(unsigned long page);
extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
-extern void swift_flush_chunk(unsigned long chunk);
extern void swift_flush_tlb_all(void);
extern void swift_flush_tlb_mm(struct mm_struct *mm);
extern void swift_flush_tlb_range(struct mm_struct *mm,
@@ -727,11 +655,11 @@ void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
/* same as above: srmmu_flush_tlb_page() */
}
}
- module_stats.invpg++;
}
#endif
-/* The following are all MBUS based SRMMU modules, and therefore could
+/*
+ * The following are all MBUS based SRMMU modules, and therefore could
* be found in a multiprocessor configuration. On the whole, these
* chips seems to be much more touchy about DVMA and page tables
* with respect to cache coherency.
@@ -900,11 +828,6 @@ static void cypress_flush_page_to_ram(unsigned long page)
} while(line != page);
}
-static void cypress_flush_chunk(unsigned long chunk)
-{
- cypress_flush_page_to_ram(chunk);
-}
-
/* Cypress is also IO cache coherent. */
static void cypress_flush_page_for_dma(unsigned long page)
{
@@ -921,7 +844,6 @@ static void cypress_flush_sig_insns(struct mm_struct *mm, unsigned long insn_add
static void cypress_flush_tlb_all(void)
{
srmmu_flush_whole_tlb();
- module_stats.invall++;
}
static void cypress_flush_tlb_mm(struct mm_struct *mm)
@@ -936,7 +858,6 @@ static void cypress_flush_tlb_mm(struct mm_struct *mm)
: "r" (SRMMU_CTX_REG), "r" (0x300), "r" (mm->context),
"i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE)
: "g5");
- module_stats.invmm++;
FLUSH_END
}
@@ -959,7 +880,6 @@ static void cypress_flush_tlb_range(struct mm_struct *mm, unsigned long start, u
"r" (size), "r" (SRMMU_PGDIR_SIZE), "i" (ASI_M_MMUREGS),
"i" (ASI_M_FLUSH_PROBE)
: "g5", "cc");
- module_stats.invrnge++;
FLUSH_END
}
@@ -977,7 +897,6 @@ static void cypress_flush_tlb_page(struct vm_area_struct *vma, unsigned long pag
: "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (page & PAGE_MASK),
"i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE)
: "g5");
- module_stats.invpg++;
FLUSH_END
}
@@ -993,8 +912,6 @@ extern void viking_flush_page_for_dma(unsigned long page);
extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr);
extern void viking_flush_page(unsigned long page);
extern void viking_mxcc_flush_page(unsigned long page);
-extern void viking_flush_chunk(unsigned long chunk);
-extern void viking_mxcc_flush_chunk(unsigned long chunk);
extern void viking_flush_tlb_all(void);
extern void viking_flush_tlb_mm(struct mm_struct *mm);
extern void viking_flush_tlb_range(struct mm_struct *mm, unsigned long start,
@@ -1014,7 +931,6 @@ extern void hypersparc_flush_cache_mm(struct mm_struct *mm);
extern void hypersparc_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end);
extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
extern void hypersparc_flush_page_to_ram(unsigned long page);
-extern void hypersparc_flush_chunk(unsigned long chunk);
extern void hypersparc_flush_page_for_dma(unsigned long page);
extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
extern void hypersparc_flush_tlb_all(void);
@@ -1023,39 +939,8 @@ extern void hypersparc_flush_tlb_range(struct mm_struct *mm, unsigned long start
extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
extern void hypersparc_setup_blockops(void);
-static void srmmu_set_pte_nocache_hyper(pte_t *ptep, pte_t pteval)
-{
- unsigned long page = ((unsigned long)ptep) & PAGE_MASK;
-
- srmmu_set_entry(ptep, pte_val(pteval));
- hypersparc_flush_page_to_ram(page);
-}
-
-static void hypersparc_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
-{
- srmmu_set_entry((pte_t *)ctxp, __pte((SRMMU_ET_PTD | (__pa((unsigned long) pgdp) >> 4))));
- hypersparc_flush_page_to_ram((unsigned long)ctxp);
- hyper_flush_whole_icache();
-}
-
-static void hypersparc_switch_mm(struct mm_struct *old_mm,
- struct mm_struct *mm, struct task_struct *tsk, int cpu)
-{
- if(mm->context == NO_CONTEXT) {
- ctxd_t *ctxp;
-
- spin_lock(&srmmu_context_spinlock);
- alloc_context(old_mm, mm);
- spin_unlock(&srmmu_context_spinlock);
- ctxp = &srmmu_context_table[mm->context];
- srmmu_set_entry((pte_t *)ctxp, __pte((SRMMU_ET_PTD | (__pa((unsigned long) mm->pgd) >> 4))));
- hypersparc_flush_page_to_ram((unsigned long)ctxp);
- }
- hyper_flush_whole_icache();
- srmmu_set_context(mm->context);
-}
-
-/* NOTE: All of this startup code assumes the low 16mb (approx.) of
+/*
+ * NOTE: All of this startup code assumes the low 16mb (approx.) of
* kernel mappings are done with one single contiguous chunk of
* ram. On small ram machines (classics mainly) we only get
* around 8mb mapped for us.
@@ -1067,35 +952,62 @@ void __init early_pgtable_allocfail(char *type)
prom_halt();
}
-static inline void srmmu_allocate_ptable_skeleton(unsigned long start, unsigned long end)
+void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned long end)
+{
+ pgd_t *pgdp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ while(start < end) {
+ pgdp = pgd_offset_k(start);
+ if(pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
+ pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
+ if (pmdp == NULL)
+ early_pgtable_allocfail("pmd");
+ memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
+ pgd_set(__nocache_fix(pgdp), pmdp);
+ }
+ pmdp = pmd_offset(__nocache_fix(pgdp), start);
+ if(pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
+ ptep = (pte_t *)__srmmu_get_nocache(SRMMU_PTE_TABLE_SIZE, SRMMU_PTE_TABLE_SIZE);
+ if (ptep == NULL)
+ early_pgtable_allocfail("pte");
+ memset(__nocache_fix(ptep), 0, SRMMU_PTE_TABLE_SIZE);
+ pmd_set(__nocache_fix(pmdp), ptep);
+ }
+ start = (start + SRMMU_PMD_SIZE) & SRMMU_PMD_MASK;
+ }
+}
+
+void __init srmmu_allocate_ptable_skeleton(unsigned long start, unsigned long end)
{
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
while(start < end) {
- pgdp = srmmu_pgd_offset(&init_mm, start);
- if(srmmu_pgd_none(*pgdp)) {
- pmdp = __alloc_bootmem(SRMMU_PMD_TABLE_SIZE,
- SRMMU_PMD_TABLE_SIZE, 0UL);
+ pgdp = pgd_offset_k(start);
+ if(pgd_none(*pgdp)) {
+ pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
if (pmdp == NULL)
early_pgtable_allocfail("pmd");
memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
- srmmu_pgd_set(pgdp, pmdp);
+ pgd_set(pgdp, pmdp);
}
- pmdp = srmmu_pmd_offset(pgdp, start);
- if(srmmu_pmd_none(*pmdp)) {
- ptep = __alloc_bootmem(SRMMU_PTE_TABLE_SIZE, SRMMU_PTE_TABLE_SIZE, 0UL);
+ pmdp = pmd_offset(pgdp, start);
+ if(pmd_none(*pmdp)) {
+ ptep = (pte_t *)__srmmu_get_nocache(SRMMU_PTE_TABLE_SIZE, SRMMU_PTE_TABLE_SIZE);
if (ptep == NULL)
early_pgtable_allocfail("pte");
memset(ptep, 0, SRMMU_PTE_TABLE_SIZE);
- srmmu_pmd_set(pmdp, ptep);
+ pmd_set(pmdp, ptep);
}
start = (start + SRMMU_PMD_SIZE) & SRMMU_PMD_MASK;
}
}
-/* This is much cleaner than poking around physical address space
+/*
+ * This is much cleaner than poking around physical address space
* looking at the prom's page table directly which is what most
* other OS's do. Yuck... this is much better.
*/
@@ -1131,35 +1043,34 @@ void __init srmmu_inherit_prom_mappings(unsigned long start,unsigned long end)
what = 2;
}
- pgdp = srmmu_pgd_offset(&init_mm, start);
+ pgdp = pgd_offset_k(start);
if(what == 2) {
- *pgdp = __pgd(prompte);
+ *(pgd_t *)__nocache_fix(pgdp) = __pgd(prompte);
start += SRMMU_PGDIR_SIZE;
continue;
}
- if(srmmu_pgd_none(*pgdp)) {
- pmdp = __alloc_bootmem(SRMMU_PMD_TABLE_SIZE,
- SRMMU_PMD_TABLE_SIZE, 0UL);
+ if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
+ pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
if (pmdp == NULL)
early_pgtable_allocfail("pmd");
- memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
- srmmu_pgd_set(pgdp, pmdp);
+ memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
+ pgd_set(__nocache_fix(pgdp), pmdp);
}
- pmdp = srmmu_pmd_offset(pgdp, start);
+ pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start);
if(what == 1) {
- *pmdp = __pmd(prompte);
+ *(pmd_t *)__nocache_fix(pmdp) = __pmd(prompte);
start += SRMMU_PMD_SIZE;
continue;
}
- if(srmmu_pmd_none(*pmdp)) {
- ptep = __alloc_bootmem(SRMMU_PTE_TABLE_SIZE, SRMMU_PTE_TABLE_SIZE, 0UL);
+ if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
+ ptep = (pte_t *)__srmmu_get_nocache(SRMMU_PTE_TABLE_SIZE, SRMMU_PTE_TABLE_SIZE);
if (ptep == NULL)
early_pgtable_allocfail("pte");
- memset(ptep, 0, SRMMU_PTE_TABLE_SIZE);
- srmmu_pmd_set(pmdp, ptep);
+ memset(__nocache_fix(ptep), 0, SRMMU_PTE_TABLE_SIZE);
+ pmd_set(__nocache_fix(pmdp), ptep);
}
- ptep = srmmu_pte_offset(pmdp, start);
- *ptep = __pte(prompte);
+ ptep = srmmu_pte_offset(__nocache_fix(pmdp), start);
+ *(pte_t *)__nocache_fix(ptep) = __pte(prompte);
start += PAGE_SIZE;
}
}
@@ -1171,15 +1082,14 @@ static unsigned long end_of_phys_memory __initdata = 0;
/* Create a third-level SRMMU 16MB page mapping. */
static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base)
{
- pgd_t *pgdp = srmmu_pgd_offset(&init_mm, vaddr);
+ pgd_t *pgdp = pgd_offset_k(vaddr);
unsigned long big_pte;
big_pte = KERNEL_PTE(phys_base >> 4);
- *pgdp = __pgd(big_pte);
+ *(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte);
}
-/* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE.
- */
+/* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */
static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
{
unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK);
@@ -1254,53 +1164,40 @@ void __init srmmu_paging_init(void)
prom_printf("Something wrong, can't find cpu node in paging_init.\n");
prom_halt();
}
-
- memset(swapper_pg_dir, 0, PAGE_SIZE);
last_valid_pfn = end_pfn = bootmem_init();
- srmmu_allocate_ptable_skeleton(KERNBASE, (unsigned long)__va(end_of_phys_memory));
-#if CONFIG_SUN_IO
- srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END);
- srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
-#endif
-
- /* This does not logically belong here, but we need to
- * call it at the moment we are able to use the bootmem
- * allocator.
- */
- sun_serial_setup();
-
+ srmmu_nocache_init();
srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE));
map_kernel();
-#define BOOTMEM_BROKEN
-#ifdef BOOTMEM_BROKEN
- srmmu_context_table = __alloc_bootmem(num_contexts*sizeof(ctxd_t)*2, SMP_CACHE_BYTES, 0UL);
- (unsigned long)srmmu_context_table += num_contexts*sizeof(ctxd_t);
- (unsigned long)srmmu_context_table &= ~(num_contexts*sizeof(ctxd_t)-1);
-#else
- srmmu_context_table = __alloc_bootmem(num_contexts*sizeof(ctxd_t), num_contexts*sizeof(ctxd_t), 0UL);
-#endif
+ /* ctx table has to be physically aligned to its size */
+ srmmu_context_table = (ctxd_t *)__srmmu_get_nocache(num_contexts*sizeof(ctxd_t), num_contexts*sizeof(ctxd_t));
+ srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table);
- srmmu_ctx_table_phys = (ctxd_t *) __pa((unsigned long) srmmu_context_table);
for(i = 0; i < num_contexts; i++)
- ctxd_set(&srmmu_context_table[i], swapper_pg_dir);
+ srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir);
flush_cache_all();
- if(BTFIXUPVAL_CALL(flush_page_for_dma) == (unsigned long)viking_flush_page) {
- unsigned long start = (unsigned long)srmmu_context_table;
- unsigned long end = PAGE_ALIGN((unsigned long)srmmu_context_table + num_contexts*sizeof(ctxd_t));
-
- while(start < end) {
- viking_flush_page(start);
- start += PAGE_SIZE;
- }
- }
- srmmu_set_ctable_ptr((unsigned long) srmmu_ctx_table_phys);
+ srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
flush_tlb_all();
poke_srmmu();
+#if CONFIG_SUN_IO
+ srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END);
+ srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
+#endif
+
+ flush_cache_all();
+ flush_tlb_all();
+
+ /*
+ * This does not logically belong here, but we need to
+ * call it at the moment we are able to use the bootmem
+ * allocator.
+ */
+ sun_serial_setup();
+
sparc_context_init(num_contexts);
{
@@ -1309,36 +1206,19 @@ void __init srmmu_paging_init(void)
zones_size[ZONE_DMA] = end_pfn;
free_area_init(zones_size);
}
-
-#ifdef CONFIG_BLK_DEV_INITRD
- /* If initial ramdisk was specified with physical address,
- translate it here, as the p2v translation in srmmu
- is not straightforward. */
- if (initrd_start && initrd_start < KERNBASE) {
- initrd_start = __va(initrd_start);
- initrd_end = __va(initrd_end);
- if (initrd_end <= initrd_start)
- initrd_start = 0;
- }
-#endif
-
}
static int srmmu_mmu_info(char *buf)
{
return sprintf(buf,
"MMU type\t: %s\n"
- "invall\t\t: %d\n"
- "invmm\t\t: %d\n"
- "invrnge\t\t: %d\n"
- "invpg\t\t: %d\n"
"contexts\t: %d\n"
+ "nocache total\t: %d\n"
+ "nocache used\t: %d\n"
, srmmu_name,
- module_stats.invall,
- module_stats.invmm,
- module_stats.invrnge,
- module_stats.invpg,
- num_contexts
+ num_contexts,
+ SRMMU_NOCACHE_SIZE,
+ (srmmu_nocache_used << SRMMU_NOCACHE_BITMAP_SHIFT)
);
}
@@ -1351,7 +1231,7 @@ static void srmmu_destroy_context(struct mm_struct *mm)
if(mm->context != NO_CONTEXT) {
flush_cache_mm(mm);
- ctxd_set(&srmmu_context_table[mm->context], swapper_pg_dir);
+ srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);
flush_tlb_mm(mm);
spin_lock(&srmmu_context_spinlock);
free_context(mm->context);
@@ -1429,30 +1309,6 @@ static void srmmu_vac_update_mmu_cache(struct vm_area_struct * vma,
}
}
-static void hypersparc_destroy_context(struct mm_struct *mm)
-{
- if(mm->context != NO_CONTEXT) {
- ctxd_t *ctxp;
-
- /* HyperSparc is copy-back, any data for this
- * process in a modified cache line is stale
- * and must be written back to main memory now
- * else we eat shit later big time.
- */
- flush_cache_mm(mm);
-
- ctxp = &srmmu_context_table[mm->context];
- srmmu_set_entry((pte_t *)ctxp, __pte((SRMMU_ET_PTD | (__pa((unsigned long) swapper_pg_dir) >> 4))));
- hypersparc_flush_page_to_ram((unsigned long)ctxp);
-
- flush_tlb_mm(mm);
- spin_lock(&srmmu_context_spinlock);
- free_context(mm->context);
- spin_unlock(&srmmu_context_spinlock);
- mm->context = NO_CONTEXT;
- }
-}
-
/* Init various srmmu chip types. */
static void __init srmmu_is_bad(void)
{
@@ -1527,7 +1383,7 @@ static void __init poke_hypersparc(void)
srmmu_set_mmureg(mreg);
-#if 0 /* I think this is bad news... -DaveM */
+#if 0 /* XXX I think this is bad news... -DaveM */
hyper_clear_all_tags();
#endif
@@ -1543,7 +1399,6 @@ static void __init init_hypersparc(void)
init_vac_layout();
- BTFIXUPSET_CALL(set_pte, srmmu_set_pte_nocache_hyper, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM);
@@ -1561,11 +1416,7 @@ static void __init init_hypersparc(void)
BTFIXUPSET_CALL(flush_sig_insns, hypersparc_flush_sig_insns, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_page_for_dma, hypersparc_flush_page_for_dma, BTFIXUPCALL_NOP);
- BTFIXUPSET_CALL(flush_chunk, hypersparc_flush_chunk, BTFIXUPCALL_NORM); /* local flush _only_ */
- BTFIXUPSET_CALL(ctxd_set, hypersparc_ctxd_set, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(switch_mm, hypersparc_switch_mm, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(destroy_context, hypersparc_destroy_context, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(update_mmu_cache, srmmu_vac_update_mmu_cache, BTFIXUPCALL_NORM);
poke_srmmu = poke_hypersparc;
@@ -1615,7 +1466,6 @@ static void __init init_cypress_common(void)
{
init_vac_layout();
- BTFIXUPSET_CALL(set_pte, srmmu_set_pte_nocache_cypress, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM);
@@ -1629,7 +1479,6 @@ static void __init init_cypress_common(void)
BTFIXUPSET_CALL(flush_tlb_page, cypress_flush_tlb_page, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_tlb_range, cypress_flush_tlb_range, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(flush_chunk, cypress_flush_chunk, BTFIXUPCALL_NORM); /* local flush _only_ */
BTFIXUPSET_CALL(__flush_page_to_ram, cypress_flush_page_to_ram, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_sig_insns, cypress_flush_sig_insns, BTFIXUPCALL_NOP);
@@ -1673,7 +1522,8 @@ static void __init poke_swift(void)
/* Enable I & D caches */
mreg = srmmu_get_mmureg();
mreg |= (SWIFT_IE | SWIFT_DE);
- /* The Swift branch folding logic is completely broken. At
+ /*
+ * The Swift branch folding logic is completely broken. At
* trap time, if things are just right, if can mistakenly
* think that a trap is coming from kernel mode when in fact
* it is coming from user mode (it mis-executes the branch in
@@ -1702,7 +1552,8 @@ static void __init init_swift(void)
case 0x30:
srmmu_modtype = Swift_lots_o_bugs;
hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN);
- /* Gee george, I wonder why Sun is so hush hush about
+ /*
+ * Gee george, I wonder why Sun is so hush hush about
* this hardware bug... really braindamage stuff going
* on here. However I think we can find a way to avoid
* all of the workaround overhead under Linux. Basically,
@@ -1723,7 +1574,8 @@ static void __init init_swift(void)
case 0x31:
srmmu_modtype = Swift_bad_c;
hwbug_bitmask |= HWBUG_KERN_CBITBROKEN;
- /* You see Sun allude to this hardware bug but never
+ /*
+ * You see Sun allude to this hardware bug but never
* admit things directly, they'll say things like,
* "the Swift chip cache problems" or similar.
*/
@@ -1738,7 +1590,6 @@ static void __init init_swift(void)
BTFIXUPSET_CALL(flush_cache_page, swift_flush_cache_page, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_cache_range, swift_flush_cache_range, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(flush_chunk, swift_flush_chunk, BTFIXUPCALL_NORM); /* local flush _only_ */
BTFIXUPSET_CALL(flush_tlb_all, swift_flush_tlb_all, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_tlb_mm, swift_flush_tlb_mm, BTFIXUPCALL_NORM);
@@ -1753,7 +1604,8 @@ static void __init init_swift(void)
flush_page_for_dma_global = 0;
- /* Are you now convinced that the Swift is one of the
+ /*
+ * Are you now convinced that the Swift is one of the
* biggest VLSI abortions of all time? Bravo Fujitsu!
* Fujitsu, the !#?!%$'d up processor people. I bet if
* you examined the microcode of the Swift you'd find
@@ -1815,21 +1667,15 @@ static void turbosparc_flush_page_for_dma(unsigned long page)
turbosparc_flush_dcache();
}
-static void turbosparc_flush_chunk(unsigned long chunk)
-{
-}
-
static void turbosparc_flush_tlb_all(void)
{
srmmu_flush_whole_tlb();
- module_stats.invall++;
}
static void turbosparc_flush_tlb_mm(struct mm_struct *mm)
{
FLUSH_BEGIN(mm)
srmmu_flush_whole_tlb();
- module_stats.invmm++;
FLUSH_END
}
@@ -1837,7 +1683,6 @@ static void turbosparc_flush_tlb_range(struct mm_struct *mm, unsigned long start
{
FLUSH_BEGIN(mm)
srmmu_flush_whole_tlb();
- module_stats.invrnge++;
FLUSH_END
}
@@ -1845,7 +1690,6 @@ static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long
{
FLUSH_BEGIN(vma->vm_mm)
srmmu_flush_whole_tlb();
- module_stats.invpg++;
FLUSH_END
}
@@ -1906,7 +1750,6 @@ static void __init init_turbosparc(void)
BTFIXUPSET_CALL(flush_tlb_range, turbosparc_flush_tlb_range, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(__flush_page_to_ram, turbosparc_flush_page_to_ram, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(flush_chunk, turbosparc_flush_chunk, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_sig_insns, turbosparc_flush_sig_insns, BTFIXUPCALL_NOP);
BTFIXUPSET_CALL(flush_page_for_dma, turbosparc_flush_page_for_dma, BTFIXUPCALL_NORM);
@@ -1927,7 +1770,8 @@ static void __init poke_tsunami(void)
static void __init init_tsunami(void)
{
- /* Tsunami's pretty sane, Sun and TI actually got it
+ /*
+ * Tsunami's pretty sane, Sun and TI actually got it
* somewhat right this time. Fujitsu should have
* taken some lessons from them.
*/
@@ -1940,7 +1784,6 @@ static void __init init_tsunami(void)
BTFIXUPSET_CALL(flush_cache_page, tsunami_flush_cache_page, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_cache_range, tsunami_flush_cache_range, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(flush_chunk, tsunami_flush_chunk, BTFIXUPCALL_NORM); /* local flush _only_ */
BTFIXUPSET_CALL(flush_tlb_all, tsunami_flush_tlb_all, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_tlb_mm, tsunami_flush_tlb_mm, BTFIXUPCALL_NORM);
@@ -1968,7 +1811,8 @@ static void __init poke_viking(void)
mxcc_control &= ~(MXCC_CTL_RRC);
mxcc_set_creg(mxcc_control);
- /* We don't need memory parity checks.
+ /*
+ * We don't need memory parity checks.
* XXX This is a mess, have to dig out later. ecd.
viking_mxcc_turn_off_parity(&mreg, &mxcc_control);
*/
@@ -1980,9 +1824,7 @@ static void __init poke_viking(void)
mreg &= ~(VIKING_TCENABLE);
if(smp_catch++) {
- /* Must disable mixed-cmd mode here for
- * other cpu's.
- */
+ /* Must disable mixed-cmd mode here for other cpu's. */
bpreg = viking_get_bpreg();
bpreg &= ~(VIKING_ACTION_MIX);
viking_set_bpreg(bpreg);
@@ -2021,14 +1863,12 @@ static void __init init_viking(void)
viking_mxcc_present = 0;
msi_set_sync();
- BTFIXUPSET_CALL(set_pte, srmmu_set_pte_nocache_viking, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(flush_chunk, viking_flush_chunk, BTFIXUPCALL_NORM); /* local flush _only_ */
-
- /* We need this to make sure old viking takes no hits
+ /*
+ * We need this to make sure old viking takes no hits
* on it's cache for dma snoops to workaround the
* "load from non-cacheable memory" interrupt bug.
* This is only necessary because of the new way in
@@ -2041,7 +1881,7 @@ static void __init init_viking(void)
srmmu_name = "TI Viking/MXCC";
viking_mxcc_present = 1;
- BTFIXUPSET_CALL(flush_chunk, viking_mxcc_flush_chunk, BTFIXUPCALL_NOP); /* local flush _only_ */
+ srmmu_cache_pagetables = 1;
/* MXCC vikings lack the DMA snooping bug. */
BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page_for_dma, BTFIXUPCALL_NOP);
@@ -2118,8 +1958,10 @@ static void __init get_srmmu_type(void)
return;
}
- /* Now Fujitsu TurboSparc. It might happen that it is
- in Swift emulation mode, so we will check later... */
+ /*
+ * Now Fujitsu TurboSparc. It might happen that it is
+ * in Swift emulation mode, so we will check later...
+ */
if (psr_typ == 0 && psr_vers == 5) {
init_turbosparc();
return;
@@ -2166,74 +2008,10 @@ static void __init get_srmmu_type(void)
srmmu_is_bad();
}
+/* dont laugh, static pagetables */
static int srmmu_check_pgt_cache(int low, int high)
{
- struct page *page, *page2;
- int freed = 0;
-
- if (pgtable_cache_size > high) {
- spin_lock(&pte_spinlock);
- for (page2 = NULL, page = (struct page *)pte_quicklist; page;) {
- if ((unsigned int)page->pprev_hash == 0xffff) {
- if (page2)
- page2->next_hash = page->next_hash;
- else
- (struct page *)pte_quicklist = page->next_hash;
- page->next_hash = NULL;
- page->pprev_hash = NULL;
- pgtable_cache_size -= 16;
- __free_page(page);
- freed++;
- if (page2)
- page = page2->next_hash;
- else
- page = (struct page *)pte_quicklist;
- if (pgtable_cache_size <= low)
- break;
- continue;
- }
- page2 = page;
- page = page->next_hash;
- }
- spin_unlock(&pte_spinlock);
- }
- if (pgd_cache_size > high / 4) {
- spin_lock(&pgd_spinlock);
- for (page2 = NULL, page = (struct page *)pgd_quicklist; page;) {
- if ((unsigned int)page->pprev_hash == 0xf) {
- if (page2)
- page2->next_hash = page->next_hash;
- else
- (struct page *)pgd_quicklist = page->next_hash;
- page->next_hash = NULL;
- page->pprev_hash = NULL;
- pgd_cache_size -= 4;
- __free_page(page);
- freed++;
- if (page2)
- page = page2->next_hash;
- else
- page = (struct page *)pgd_quicklist;
- if (pgd_cache_size <= low / 4)
- break;
- continue;
- }
- page2 = page;
- page = page->next_hash;
- }
- spin_unlock(&pgd_spinlock);
- }
- return freed;
-}
-
-static void srmmu_flush_dma_area(unsigned long addr, int len)
-{
- /* XXX Later */
-}
-
-static void srmmu_inval_dma_area(unsigned long addr, int len)
-{
- /* XXX Later */
+ return 0;
}
extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme,
@@ -2302,10 +2080,6 @@ void __init ld_mmu_srmmu(void)
#ifndef CONFIG_SMP
BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2);
#endif
- BTFIXUPSET_CALL(get_pte_fast, srmmu_get_pte_fast, BTFIXUPCALL_RETINT(0));
- BTFIXUPSET_CALL(get_pgd_fast, srmmu_get_pgd_fast, BTFIXUPCALL_RETINT(0));
- BTFIXUPSET_CALL(free_pte_slow, srmmu_free_pte_slow, BTFIXUPCALL_NOP);
- BTFIXUPSET_CALL(free_pgd_slow, srmmu_free_pgd_slow, BTFIXUPCALL_NOP);
BTFIXUPSET_CALL(do_check_pgt_cache, srmmu_check_pgt_cache, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(set_pgdir, srmmu_set_pgdir, BTFIXUPCALL_NORM);
@@ -2317,7 +2091,7 @@ void __init ld_mmu_srmmu(void)
BTFIXUPSET_CALL(pmd_page, srmmu_pmd_page, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pgd_page, srmmu_pgd_page, BTFIXUPCALL_NORM);
- BTFIXUPSET_SETHI(none_mask, 0xF0000000); /* P3: is it used? */
+ BTFIXUPSET_SETHI(none_mask, 0xF0000000); /* XXX P3: is it used? */
BTFIXUPSET_CALL(pte_present, srmmu_pte_present, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_SWAPO0G0);
@@ -2369,17 +2143,9 @@ void __init ld_mmu_srmmu(void)
BTFIXUPSET_CALL(free_task_struct, srmmu_free_task_struct, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(get_task_struct, srmmu_get_task_struct, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(quick_kernel_fault, srmmu_quick_kernel_fault, BTFIXUPCALL_NORM);
-
/* SRMMU specific. */
- BTFIXUPSET_CALL(ctxd_set, srmmu_ctxd_set, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pmd_set, srmmu_pmd_set, BTFIXUPCALL_NORM);
-/* hmm isn't flush_dma_area the same thing as flush_page_for_dma? */
-/* It is, except flush_page_for_dma was local to srmmu.c */
- BTFIXUPSET_CALL(mmu_flush_dma_area, srmmu_flush_dma_area, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(mmu_inval_dma_area, srmmu_inval_dma_area, BTFIXUPCALL_NORM);
-
get_srmmu_type();
patch_window_trap_handlers();
diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c
index b4b9a8276..944195694 100644
--- a/arch/sparc/mm/sun4c.c
+++ b/arch/sparc/mm/sun4c.c
@@ -1,10 +1,10 @@
-/* $Id: sun4c.c,v 1.192 2000/05/09 17:40:13 davem Exp $
+/* $Id: sun4c.c,v 1.194 2000/06/05 06:08:45 anton Exp $
* sun4c.c: Doing in software what should be done in hardware.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1996 Andrew Tridgell (Andrew.Tridgell@anu.edu.au)
- * Copyright (C) 1997,99 Anton Blanchard (anton@progsoc.uts.edu.au)
+ * Copyright (C) 1997-2000 Anton Blanchard (anton@linuxcare.com)
* Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
@@ -597,14 +597,6 @@ static void sun4c_unmap_dma_area(unsigned long busa, int len)
/* XXX Implement this */
}
-static void sun4c_inval_dma_area(unsigned long virt, int len)
-{
-}
-
-static void sun4c_flush_dma_area(unsigned long virt, int len)
-{
-}
-
/* TLB management. */
/* Don't change this struct without changing entry.S. This is used
@@ -1011,13 +1003,6 @@ void sun4c_grow_kernel_ring(void)
}
}
-/* This is now a fast in-window trap handler to avoid any and all races. */
-static void sun4c_quick_kernel_fault(unsigned long address)
-{
- printk("Kernel faults at addr 0x%08lx\n", address);
- panic("sun4c kernel fault handler bolixed...");
-}
-
/* 2 page buckets for task struct and kernel stack allocation.
*
* TASK_STACK_BEGIN
@@ -2280,22 +2265,6 @@ extern __inline__ pgd_t *sun4c_get_pgd_fast(void)
return (pgd_t *)ret;
}
-static int sun4c_check_pgt_cache(int low, int high)
-{
- int freed = 0;
- if (pgtable_cache_size > high) {
- do {
- if (pgd_quicklist)
- free_pgd_slow(get_pgd_fast()), freed++;
- if (pmd_quicklist)
- free_pmd_slow(get_pmd_fast()), freed++;
- if (pte_quicklist)
- free_pte_slow(get_pte_fast()), freed++;
- } while (pgtable_cache_size > low);
- }
- return freed;
-}
-
static void sun4c_set_pgdir(unsigned long address, pgd_t entry)
{
/* Nothing to do */
@@ -2361,11 +2330,6 @@ static pte_t *sun4c_pte_alloc(pmd_t * pmd, unsigned long address)
return (pte_t *) sun4c_pmd_page(*pmd) + address;
}
-static pte_t *sun4c_pte_get(void)
-{
- return sun4c_get_pte_fast();
-}
-
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
* inside the pgd, so has no extra memory associated with it.
@@ -2389,6 +2353,22 @@ static pgd_t *sun4c_pgd_alloc(void)
return sun4c_get_pgd_fast();
}
+static int sun4c_check_pgt_cache(int low, int high)
+{
+ int freed = 0;
+ if (pgtable_cache_size > high) {
+ do {
+ if (pgd_quicklist)
+ sun4c_free_pgd_slow(sun4c_get_pgd_fast()), freed++;
+ if (pmd_quicklist)
+ sun4c_free_pmd_slow(sun4c_get_pmd_fast()), freed++;
+ if (pte_quicklist)
+ sun4c_free_pte_slow(sun4c_get_pte_fast()), freed++;
+ } while (pgtable_cache_size > low);
+ }
+ return freed;
+}
+
/* There are really two cases of aliases to watch out for, and these
* are:
*
@@ -2568,7 +2548,7 @@ void __init sun4c_paging_init(void)
memset(pg3, 0, PAGE_SIZE);
/* Save work later. */
- vaddr = SUN4C_VMALLOC_START;
+ vaddr = VMALLOC_START;
swapper_pg_dir[vaddr>>SUN4C_PGDIR_SHIFT] = __pgd(PGD_TABLE | (unsigned long) pg0);
vaddr += SUN4C_PGDIR_SIZE;
swapper_pg_dir[vaddr>>SUN4C_PGDIR_SHIFT] = __pgd(PGD_TABLE | (unsigned long) pg1);
@@ -2628,10 +2608,6 @@ void __init ld_mmu_sun4c(void)
#ifndef CONFIG_SMP
BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4c, BTFIXUPCALL_NORM);
#endif
- BTFIXUPSET_CALL(get_pte_fast, sun4c_pte_get, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(get_pgd_fast, sun4c_pgd_alloc, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(free_pte_slow, sun4c_free_pte_slow, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(free_pgd_slow, sun4c_free_pgd_slow, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(do_check_pgt_cache, sun4c_check_pgt_cache, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(set_pgdir, sun4c_set_pgdir, BTFIXUPCALL_NOP);
@@ -2727,14 +2703,11 @@ void __init ld_mmu_sun4c(void)
BTFIXUPSET_CALL(mmu_map_dma_area, sun4c_map_dma_area, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mmu_unmap_dma_area, sun4c_unmap_dma_area, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mmu_translate_dvma, sun4c_translate_dvma, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(mmu_flush_dma_area, sun4c_flush_dma_area, BTFIXUPCALL_NOP);
- BTFIXUPSET_CALL(mmu_inval_dma_area, sun4c_inval_dma_area, BTFIXUPCALL_NORM);
/* Task struct and kernel stack allocating/freeing. */
BTFIXUPSET_CALL(alloc_task_struct, sun4c_alloc_task_struct, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(get_task_struct, sun4c_get_task_struct, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(quick_kernel_fault, sun4c_quick_kernel_fault, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mmu_info, sun4c_mmu_info, BTFIXUPCALL_NORM);
/* These should _never_ get called with two level tables. */
diff --git a/arch/sparc/mm/swift.S b/arch/sparc/mm/swift.S
index 2d952f5ad..2ba8a6610 100644
--- a/arch/sparc/mm/swift.S
+++ b/arch/sparc/mm/swift.S
@@ -1,4 +1,4 @@
-/* $Id: swift.S,v 1.5 2000/05/09 17:40:13 davem Exp $
+/* $Id: swift.S,v 1.6 2000/06/04 06:23:53 anton Exp $
* swift.S: MicroSparc-II mmu/cache operations.
*
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
@@ -30,7 +30,7 @@
*/
.globl swift_flush_cache_all, swift_flush_cache_mm
.globl swift_flush_cache_range, swift_flush_cache_page
- .globl swift_flush_page_for_dma, swift_flush_chunk
+ .globl swift_flush_page_for_dma
.globl swift_flush_page_to_ram
swift_flush_cache_all:
@@ -38,7 +38,6 @@ swift_flush_cache_mm:
swift_flush_cache_range:
swift_flush_cache_page:
swift_flush_page_for_dma:
-swift_flush_chunk:
swift_flush_page_to_ram:
sethi %hi(0x2000), %o0
1: subcc %o0, 0x10, %o0
@@ -190,10 +189,8 @@ swift_flush_cache_page_out:
* caches which are virtually indexed and tagged.
*/
.globl swift_flush_page_for_dma
- .globl swift_flush_chunk
.globl swift_flush_page_to_ram
swift_flush_page_for_dma:
-swift_flush_chunk:
swift_flush_page_to_ram:
andn %o0, (PAGE_SIZE - 1), %o1
#if 1
diff --git a/arch/sparc/mm/tsunami.S b/arch/sparc/mm/tsunami.S
index 1eb8fd6da..316e00e3a 100644
--- a/arch/sparc/mm/tsunami.S
+++ b/arch/sparc/mm/tsunami.S
@@ -1,4 +1,4 @@
-/* $Id: tsunami.S,v 1.4 2000/05/09 17:40:13 davem Exp $
+/* $Id: tsunami.S,v 1.5 2000/06/04 06:23:53 anton Exp $
* tsunami.S: High speed MicroSparc-I mmu/cache operations.
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
@@ -28,7 +28,7 @@
.globl tsunami_flush_cache_all, tsunami_flush_cache_mm
.globl tsunami_flush_cache_range, tsunami_flush_cache_page
.globl tsunami_flush_page_to_ram, tsunami_flush_page_for_dma
- .globl tsunami_flush_sig_insns, tsunami_flush_chunk
+ .globl tsunami_flush_sig_insns
.globl tsunami_flush_tlb_all, tsunami_flush_tlb_mm
.globl tsunami_flush_tlb_range, tsunami_flush_tlb_page
@@ -46,7 +46,6 @@ tsunami_flush_cache_all:
WINDOW_FLUSH(%g4, %g5)
tsunami_flush_page_for_dma:
sta %g0, [%g0] ASI_M_IC_FLCLEAR
-tsunami_flush_chunk:
sta %g0, [%g0] ASI_M_DC_FLCLEAR
tsunami_flush_cache_out:
tsunami_flush_page_to_ram:
diff --git a/arch/sparc/mm/viking.S b/arch/sparc/mm/viking.S
index 4e0832257..ffbec2342 100644
--- a/arch/sparc/mm/viking.S
+++ b/arch/sparc/mm/viking.S
@@ -1,4 +1,4 @@
-/* $Id: viking.S,v 1.16 2000/05/09 17:40:13 davem Exp $
+/* $Id: viking.S,v 1.17 2000/06/04 06:23:53 anton Exp $
* viking.S: High speed Viking cache/mmu operations
*
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
@@ -31,12 +31,10 @@ sun4dsmp_flush_tlb_spin:
.globl viking_flush_cache_range, viking_flush_cache_page
.globl viking_flush_page, viking_mxcc_flush_page
.globl viking_flush_page_for_dma, viking_flush_page_to_ram
- .globl viking_flush_chunk, viking_mxcc_flush_chunk
.globl viking_flush_sig_insns
.globl viking_flush_tlb_all, viking_flush_tlb_mm
.globl viking_flush_tlb_range, viking_flush_tlb_page
-viking_flush_chunk:
viking_flush_page:
sethi %hi(PAGE_OFFSET), %g2
sub %o0, %g2, %g3
@@ -109,10 +107,6 @@ viking_mxcc_flush_page:
9: retl
nop
-viking_mxcc_flush_chunk:
- retl
- nop
-
#define WINDOW_FLUSH(tmp1, tmp2) \
mov 0, tmp1; \
98: ld [%g6 + AOFF_task_thread + AOFF_thread_uwinmask], tmp2; \