summaryrefslogtreecommitdiffstats
path: root/arch/sparc/mm
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-02-05 06:47:02 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-02-05 06:47:02 +0000
commit99a7e12f34b3661a0d1354eef83a0eef4df5e34c (patch)
tree3560aca9ca86792f9ab7bd87861ea143a1b3c7a3 /arch/sparc/mm
parente73a04659c0b8cdee4dd40e58630e2cf63afb316 (diff)
Merge with Linux 2.3.38.
Diffstat (limited to 'arch/sparc/mm')
-rw-r--r--arch/sparc/mm/Makefile7
-rw-r--r--arch/sparc/mm/asyncd.c3
-rw-r--r--arch/sparc/mm/btfixup.c3
-rw-r--r--arch/sparc/mm/fault.c37
-rw-r--r--arch/sparc/mm/generic.c32
-rw-r--r--arch/sparc/mm/init.c430
-rw-r--r--arch/sparc/mm/io-unit.c58
-rw-r--r--arch/sparc/mm/iommu.c104
-rw-r--r--arch/sparc/mm/nosrmmu.c8
-rw-r--r--arch/sparc/mm/srmmu.c387
-rw-r--r--arch/sparc/mm/sun4c.c1285
-rw-r--r--arch/sparc/mm/swift.S275
-rw-r--r--arch/sparc/mm/tsunami.S64
13 files changed, 1420 insertions, 1273 deletions
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
index 7caf69e90..5e304411c 100644
--- a/arch/sparc/mm/Makefile
+++ b/arch/sparc/mm/Makefile
@@ -1,4 +1,4 @@
-# $Id: Makefile,v 1.34 1999/08/14 03:51:42 anton Exp $
+# $Id: Makefile,v 1.35 1999/10/09 05:32:01 zaitcev Exp $
# Makefile for the linux Sparc-specific parts of the memory manager.
#
# Note! Dependencies are done automagically by 'make dep', which also
@@ -15,7 +15,7 @@ endif
ifeq ($(CONFIG_SUN4),y)
O_OBJS += nosrmmu.o
else
-O_OBJS += srmmu.o iommu.o io-unit.o hypersparc.o viking.o tsunami.o
+O_OBJS += srmmu.o iommu.o io-unit.o hypersparc.o viking.o tsunami.o swift.o
endif
ifdef CONFIG_SMP
O_OBJS += nosun4c.o
@@ -33,3 +33,6 @@ viking.o: viking.S
tsunami.o: tsunami.S
$(CC) -D__ASSEMBLY__ $(AFLAGS) -ansi -c -o tsunami.o tsunami.S
+
+swift.o: swift.S
+ $(CC) -D__ASSEMBLY__ $(AFLAGS) -ansi -c -o swift.o swift.S
diff --git a/arch/sparc/mm/asyncd.c b/arch/sparc/mm/asyncd.c
index d17979cd4..6ed8a3c99 100644
--- a/arch/sparc/mm/asyncd.c
+++ b/arch/sparc/mm/asyncd.c
@@ -1,4 +1,4 @@
-/* $Id: asyncd.c,v 1.17 1999/08/14 03:51:44 anton Exp $
+/* $Id: asyncd.c,v 1.18 1999/12/27 06:30:02 anton Exp $
* The asyncd kernel daemon. This handles paging on behalf of
* processes that receive page faults due to remote (async) memory
* accesses.
@@ -25,6 +25,7 @@
#include <asm/system.h> /* for cli()/sti() */
#include <asm/segment.h> /* for memcpy_to/fromfs */
#include <asm/bitops.h>
+#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#define DEBUG 0
diff --git a/arch/sparc/mm/btfixup.c b/arch/sparc/mm/btfixup.c
index 4ecf94360..9b766f4eb 100644
--- a/arch/sparc/mm/btfixup.c
+++ b/arch/sparc/mm/btfixup.c
@@ -1,4 +1,4 @@
-/* $Id: btfixup.c,v 1.8 1999/08/31 06:54:31 davem Exp $
+/* $Id: btfixup.c,v 1.9 1999/12/27 06:30:02 anton Exp $
* btfixup.c: Boot time code fixup and relocator, so that
* we can get rid of most indirect calls to achieve single
* image sun4c and srmmu kernel.
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <asm/btfixup.h>
#include <asm/page.h>
+#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/oplib.h>
#include <asm/system.h>
diff --git a/arch/sparc/mm/fault.c b/arch/sparc/mm/fault.c
index c400a0179..ba75681b1 100644
--- a/arch/sparc/mm/fault.c
+++ b/arch/sparc/mm/fault.c
@@ -1,4 +1,4 @@
-/* $Id: fault.c,v 1.107 1999/08/14 03:51:46 anton Exp $
+/* $Id: fault.c,v 1.111 1999/10/24 13:45:59 anton Exp $
* fault.c: Page fault handlers for the Sparc.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -146,10 +146,11 @@ static void unhandled_fault(unsigned long address, struct task_struct *tsk,
printk(KERN_ALERT "Unable to handle kernel paging request "
"at virtual address %08lx\n", address);
}
- printk(KERN_ALERT "tsk->mm->context = %08lx\n",
- (unsigned long) tsk->mm->context);
- printk(KERN_ALERT "tsk->mm->pgd = %08lx\n",
- (unsigned long) tsk->mm->pgd);
+ printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
+ (tsk->mm ? tsk->mm->context : tsk->active_mm->context));
+ printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
+ (tsk->mm ? (unsigned long) tsk->mm->pgd :
+ (unsigned long) tsk->active_mm->pgd));
die_if_kernel("Oops", regs);
}
@@ -309,8 +310,18 @@ asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write,
pgd_t *pgdp;
pte_t *ptep;
- if (text_fault)
+ if (text_fault) {
address = regs->pc;
+ } else if (!write &&
+ !(regs->psr & PSR_PS)) {
+ unsigned int insn, *ip;
+
+ ip = (unsigned int *)regs->pc;
+ if (! get_user(insn, ip)) {
+ if ((insn & 0xc1680000) == 0xc0680000)
+ write = 1;
+ }
+ }
pgdp = sun4c_pgd_offset(mm, address);
ptep = sun4c_pte_offset((pmd_t *) pgdp, address);
@@ -319,28 +330,36 @@ asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write,
if (write) {
if ((pte_val(*ptep) & (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT))
== (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT)) {
+ unsigned long flags;
*ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
_SUN4C_PAGE_MODIFIED |
_SUN4C_PAGE_VALID |
_SUN4C_PAGE_DIRTY);
+ save_and_cli(flags);
if (sun4c_get_segmap(address) != invalid_segment) {
sun4c_put_pte(address, pte_val(*ptep));
+ restore_flags(flags);
return;
}
+ restore_flags(flags);
}
} else {
if ((pte_val(*ptep) & (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT))
== (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT)) {
+ unsigned long flags;
*ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
_SUN4C_PAGE_VALID);
+ save_and_cli(flags);
if (sun4c_get_segmap(address) != invalid_segment) {
sun4c_put_pte(address, pte_val(*ptep));
+ restore_flags(flags);
return;
}
+ restore_flags(flags);
}
}
}
@@ -415,31 +434,25 @@ void window_overflow_fault(void)
{
unsigned long sp;
- lock_kernel();
sp = current->thread.rwbuf_stkptrs[0];
if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
force_user_fault(sp + 0x38, 1);
force_user_fault(sp, 1);
- unlock_kernel();
}
void window_underflow_fault(unsigned long sp)
{
- lock_kernel();
if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
force_user_fault(sp + 0x38, 0);
force_user_fault(sp, 0);
- unlock_kernel();
}
void window_ret_fault(struct pt_regs *regs)
{
unsigned long sp;
- lock_kernel();
sp = regs->u_regs[UREG_FP];
if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
force_user_fault(sp + 0x38, 0);
force_user_fault(sp, 0);
- unlock_kernel();
}
diff --git a/arch/sparc/mm/generic.c b/arch/sparc/mm/generic.c
index 9669f5111..9e599fd9d 100644
--- a/arch/sparc/mm/generic.c
+++ b/arch/sparc/mm/generic.c
@@ -1,4 +1,4 @@
-/* $Id: generic.c,v 1.6 1998/10/27 23:28:00 davem Exp $
+/* $Id: generic.c,v 1.9 1999/12/27 06:30:03 anton Exp $
* generic.c: Generic Sparc mm routines that are not dependent upon
* MMU type but are Sparc specific.
*
@@ -9,46 +9,26 @@
#include <linux/mm.h>
#include <linux/swap.h>
+#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/page.h>
-
-/* Allocate a block of RAM which is aligned to its size.
- * This procedure can be used until the call to mem_init().
- */
-void *sparc_init_alloc(unsigned long *kbrk, unsigned long size)
-{
- unsigned long mask = size - 1;
- unsigned long ret;
-
- if(!size)
- return 0x0;
- if(size & mask) {
- prom_printf("panic: sparc_init_alloc botch\n");
- prom_halt();
- }
- ret = (*kbrk + mask) & ~mask;
- *kbrk = ret + size;
- memset((void*) ret, 0, size);
- return (void*) ret;
-}
-
static inline void forget_pte(pte_t page)
{
if (pte_none(page))
return;
if (pte_present(page)) {
- unsigned long addr = pte_page(page);
- if (MAP_NR(addr) >= max_mapnr || PageReserved(mem_map+MAP_NR(addr)))
+ unsigned long nr = pte_pagenr(page);
+ if (nr >= max_mapnr || PageReserved(mem_map+nr))
return;
/*
* free_page() used to be able to clear swap cache
* entries. We may now have to do it manually.
*/
- free_page_and_swap_cache(addr);
+ free_page_and_swap_cache(mem_map+nr);
return;
}
- swap_free(pte_val(page));
+ swap_free(pte_to_swp_entry(page));
}
/* Remap IO memory, the same way as remap_page_range(), but use
diff --git a/arch/sparc/mm/init.c b/arch/sparc/mm/init.c
index 40aab1d66..221496f98 100644
--- a/arch/sparc/mm/init.c
+++ b/arch/sparc/mm/init.c
@@ -1,4 +1,4 @@
-/* $Id: init.c,v 1.69 1999/09/06 22:56:17 ecd Exp $
+/* $Id: init.c,v 1.72 1999/12/27 06:30:06 anton Exp $
* linux/arch/sparc/mm/init.c
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -22,6 +22,8 @@
#include <linux/blk.h>
#endif
#include <linux/init.h>
+#include <linux/highmem.h>
+#include <linux/bootmem.h>
#include <asm/system.h>
#include <asm/segment.h>
@@ -30,22 +32,21 @@
#include <asm/pgtable.h>
#include <asm/vaddrs.h>
-/* Turn this off if you suspect some place in some physical memory hole
- might get into page tables (something would be broken very much). */
-
-#define FREE_UNUSED_MEM_MAP
-
extern void show_net_buffers(void);
unsigned long *sparc_valid_addr_bitmap;
+unsigned long phys_base;
+
struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
unsigned long sparc_unmapped_base;
struct pgtable_cache_struct pgt_quicklists;
/* References to section boundaries */
-extern char __init_begin, __init_end, etext;
+extern char __init_begin, __init_end, _start, _end, etext , edata;
+
+static unsigned long totalram_pages = 0;
/*
* BAD_PAGE is the page that is used for page faults when linux
@@ -62,50 +63,31 @@ extern char __init_begin, __init_end, etext;
*/
pte_t *__bad_pagetable(void)
{
- memset((void *) EMPTY_PGT, 0, PAGE_SIZE);
- return (pte_t *) EMPTY_PGT;
+ memset((void *) &empty_bad_page_table, 0, PAGE_SIZE);
+ return (pte_t *) &empty_bad_page_table;
}
pte_t __bad_page(void)
{
- memset((void *) EMPTY_PGE, 0, PAGE_SIZE);
- return pte_mkdirty(mk_pte((unsigned long) EMPTY_PGE, PAGE_SHARED));
+ memset((void *) &empty_bad_page, 0, PAGE_SIZE);
+ return pte_mkdirty(mk_pte_phys((((unsigned long) &empty_bad_page)
+ - PAGE_OFFSET + phys_base),
+ PAGE_SHARED));
}
void show_mem(void)
{
- int free = 0,total = 0,reserved = 0;
- int shared = 0, cached = 0;
- struct page *page, *end;
-
- printk("\nMem-info:\n");
+ printk("Mem-info:\n");
show_free_areas();
- printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
- for (page = mem_map, end = mem_map + max_mapnr;
- page < end; page++) {
- if (PageSkip(page)) {
- if (page->next_hash < page)
- break;
- page = page->next_hash;
- }
- total++;
- if (PageReserved(page))
- reserved++;
- else if (PageSwapCache(page))
- cached++;
- else if (!atomic_read(&page->count))
- free++;
- else
- shared += atomic_read(&page->count) - 1;
- }
- printk("%d pages of RAM\n",total);
- printk("%d free pages\n",free);
- printk("%d reserved pages\n",reserved);
- printk("%d pages shared\n",shared);
- printk("%d pages swap cached\n",cached);
- printk("%ld page tables cached\n",pgtable_cache_size);
+ printk("Free swap: %6dkB\n",
+ nr_swap_pages << (PAGE_SHIFT-10));
+ printk("%ld pages of RAM\n", totalram_pages);
+ printk("%d free pages\n", nr_free_pages());
+ printk("%ld pages in page table cache\n",pgtable_cache_size);
+#ifndef __SMP__
if (sparc_cpu_model == sun4m || sparc_cpu_model == sun4d)
- printk("%ld page dirs cached\n", pgd_cache_size);
+ printk("%ld entries in page dir cache\n",pgd_cache_size);
+#endif
show_buffers();
#ifdef CONFIG_NET
show_net_buffers();
@@ -114,12 +96,12 @@ void show_mem(void)
extern pgprot_t protection_map[16];
-unsigned long __init sparc_context_init(unsigned long start_mem, int numctx)
+void __init sparc_context_init(int numctx)
{
int ctx;
- ctx_list_pool = (struct ctx_list *) start_mem;
- start_mem += (numctx * sizeof(struct ctx_list));
+ ctx_list_pool = __alloc_bootmem(numctx * sizeof(struct ctx_list), SMP_CACHE_BYTES, 0UL);
+
for(ctx = 0; ctx < numctx; ctx++) {
struct ctx_list *clist;
@@ -131,7 +113,98 @@ unsigned long __init sparc_context_init(unsigned long start_mem, int numctx)
ctx_used.next = ctx_used.prev = &ctx_used;
for(ctx = 0; ctx < numctx; ctx++)
add_to_free_ctxlist(ctx_list_pool + ctx);
- return start_mem;
+}
+
+#undef DEBUG_BOOTMEM
+
+extern unsigned long cmdline_memory_size;
+
+unsigned long __init bootmem_init(void)
+{
+ unsigned long bootmap_size, start_pfn, end_pfn;
+ unsigned long end_of_phys_memory = 0UL;
+ int i;
+
+ /* XXX It is a bit ambiguous here, whether we should
+ * XXX treat the user specified mem=xxx as total wanted
+ * XXX physical memory, or as a limit to the upper
+ * XXX physical address we allow. For now it is the
+ * XXX latter. -DaveM
+ */
+#ifdef DEBUG_BOOTMEM
+ prom_printf("bootmem_init: Scan sp_banks, ");
+#endif
+ for (i = 0; sp_banks[i].num_bytes != 0; i++) {
+ end_of_phys_memory = sp_banks[i].base_addr +
+ sp_banks[i].num_bytes;
+ if (cmdline_memory_size) {
+ if (end_of_phys_memory > cmdline_memory_size) {
+ if (cmdline_memory_size > sp_banks[i].base_addr) {
+ end_of_phys_memory =
+ sp_banks[i-1].base_addr +
+ sp_banks[i-1].num_bytes;
+ sp_banks[i].base_addr = 0xdeadbeef;
+ sp_banks[i].num_bytes = 0;
+ } else {
+ sp_banks[i].num_bytes -=
+ (end_of_phys_memory -
+ cmdline_memory_size);
+ end_of_phys_memory = cmdline_memory_size;
+ sp_banks[++i].base_addr = 0xdeadbeef;
+ sp_banks[i].num_bytes = 0;
+ }
+ break;
+ }
+ }
+ }
+
+ /* Start with page aligned address of last symbol in kernel
+ * image.
+ */
+ start_pfn = PAGE_ALIGN((unsigned long) &_end) - PAGE_OFFSET;
+
+ /* Adjust up to the physical address where the kernel begins. */
+ start_pfn += phys_base;
+
+ /* Now shift down to get the real physical page frame number. */
+ start_pfn >>= PAGE_SHIFT;
+
+ end_pfn = end_of_phys_memory >> PAGE_SHIFT;
+
+ /* Initialize the boot-time allocator. */
+#ifdef DEBUG_BOOTMEM
+ prom_printf("init_bootmem(spfn[%lx],epfn[%lx])\n",
+ start_pfn, end_pfn);
+#endif
+ bootmap_size = init_bootmem(start_pfn, end_pfn);
+
+ /* Now register the available physical memory with the
+ * allocator.
+ */
+ for (i = 0; sp_banks[i].num_bytes != 0; i++) {
+#ifdef DEBUG_BOOTMEM
+ prom_printf("free_bootmem: base[%lx] size[%lx]\n",
+ sp_banks[i].base_addr,
+ sp_banks[i].num_bytes);
+#endif
+ free_bootmem(sp_banks[i].base_addr,
+ sp_banks[i].num_bytes);
+ }
+
+ /* Reserve the kernel text/data/bss and the bootmem bitmap. */
+#ifdef DEBUG_BOOTMEM
+ prom_printf("reserve_bootmem: base[%lx] size[%lx]\n",
+ phys_base,
+ (((start_pfn << PAGE_SHIFT) +
+ bootmap_size) - phys_base));
+#endif
+ reserve_bootmem(phys_base, (((start_pfn << PAGE_SHIFT) +
+ bootmap_size) - phys_base));
+
+#ifdef DEBUG_BOOTMEM
+ prom_printf("init_bootmem: return end_pfn[%lx]\n", end_pfn);
+#endif
+ return end_pfn;
}
/*
@@ -139,31 +212,32 @@ unsigned long __init sparc_context_init(unsigned long start_mem, int numctx)
* init routine based upon the Sun model type on the Sparc.
*
*/
-extern unsigned long sun4c_paging_init(unsigned long, unsigned long);
-extern unsigned long srmmu_paging_init(unsigned long, unsigned long);
-extern unsigned long device_scan(unsigned long);
+extern void sun4c_paging_init(void);
+extern void srmmu_paging_init(void);
+extern void device_scan(void);
+
+unsigned long last_valid_pfn;
-unsigned long __init
-paging_init(unsigned long start_mem, unsigned long end_mem)
+void __init paging_init(void)
{
switch(sparc_cpu_model) {
case sun4c:
case sun4e:
case sun4:
- start_mem = sun4c_paging_init(start_mem, end_mem);
+ sun4c_paging_init();
sparc_unmapped_base = 0xe0000000;
BTFIXUPSET_SETHI(sparc_unmapped_base, 0xe0000000);
break;
case sun4m:
case sun4d:
- start_mem = srmmu_paging_init(start_mem, end_mem);
+ srmmu_paging_init();
sparc_unmapped_base = 0x50000000;
BTFIXUPSET_SETHI(sparc_unmapped_base, 0x50000000);
break;
case ap1000:
#if CONFIG_AP1000
- start_mem = apmmu_paging_init(start_mem, end_mem);
+ apmmu_paging_init();
sparc_unmapped_base = 0x50000000;
BTFIXUPSET_SETHI(sparc_unmapped_base, 0x50000000);
break;
@@ -194,74 +268,121 @@ paging_init(unsigned long start_mem, unsigned long end_mem)
protection_map[14] = PAGE_SHARED;
protection_map[15] = PAGE_SHARED;
btfixup();
- return device_scan(start_mem);
+ device_scan();
}
struct cache_palias *sparc_aliases;
-extern void srmmu_frob_mem_map(unsigned long);
+static void __init taint_real_pages(void)
+{
+ int i;
-int physmem_mapped_contig __initdata = 1;
+ for (i = 0; sp_banks[i].num_bytes; i++) {
+ unsigned long start, end;
-static void __init taint_real_pages(unsigned long start_mem, unsigned long end_mem)
-{
- unsigned long addr, tmp2 = 0;
-
- if(physmem_mapped_contig) {
- for(addr = PAGE_OFFSET; addr < end_mem; addr += PAGE_SIZE) {
- if(addr >= KERNBASE && addr < start_mem)
- addr = start_mem;
- for(tmp2=0; sp_banks[tmp2].num_bytes != 0; tmp2++) {
- unsigned long phys_addr = (addr - PAGE_OFFSET);
- unsigned long base = sp_banks[tmp2].base_addr;
- unsigned long limit = base + sp_banks[tmp2].num_bytes;
-
- if((phys_addr >= base) && (phys_addr < limit) &&
- ((phys_addr + PAGE_SIZE) < limit)) {
- mem_map[MAP_NR(addr)].flags &= ~(1<<PG_reserved);
- set_bit(MAP_NR(addr) >> 8, sparc_valid_addr_bitmap);
- }
- }
+ start = sp_banks[i].base_addr;
+ end = start +
+ sp_banks[i].num_bytes;
+ while (start < end) {
+ set_bit (start >> 20,
+ sparc_valid_addr_bitmap);
+ start += PAGE_SIZE;
}
- } else {
- if((sparc_cpu_model == sun4m) || (sparc_cpu_model == sun4d)) {
- srmmu_frob_mem_map(start_mem);
+ }
+}
+
+void __init free_mem_map_range(struct page *first, struct page *last)
+{
+ first = (struct page *) PAGE_ALIGN((unsigned long)first);
+ last = (struct page *) ((unsigned long)last & PAGE_MASK);
+#ifdef DEBUG_BOOTMEM
+ prom_printf("[%p,%p] ", first, last);
+#endif
+ while (first < last) {
+ ClearPageReserved(mem_map + MAP_NR(first));
+ set_page_count(mem_map + MAP_NR(first), 1);
+ free_page((unsigned long)first);
+ totalram_pages++;
+ num_physpages++;
+
+ first = (struct page *)((unsigned long)first + PAGE_SIZE);
+ }
+}
+
+/* Walk through holes in sp_banks regions, if the mem_map array
+ * areas representing those holes consume a page or more, free
+ * up such pages. This helps a lot on machines where physical
+ * ram is configured such that it begins at some hugh value.
+ *
+ * The sp_banks array is sorted by base address.
+ */
+void __init free_unused_mem_map(void)
+{
+ int i;
+
+#ifdef DEBUG_BOOTMEM
+ prom_printf("free_unused_mem_map: ");
+#endif
+ for (i = 0; sp_banks[i].num_bytes; i++) {
+ if (i == 0) {
+ struct page *first, *last;
+
+ first = mem_map;
+ last = &mem_map[sp_banks[i].base_addr >> PAGE_SHIFT];
+ free_mem_map_range(first, last);
} else {
- for(addr = start_mem; addr < end_mem; addr += PAGE_SIZE) {
- mem_map[MAP_NR(addr)].flags &= ~(1<<PG_reserved);
- set_bit(MAP_NR(addr) >> 8, sparc_valid_addr_bitmap);
+ struct page *first, *last;
+ unsigned long prev_end;
+
+ prev_end = sp_banks[i-1].base_addr +
+ sp_banks[i-1].num_bytes;
+ prev_end = PAGE_ALIGN(prev_end);
+ first = &mem_map[prev_end >> PAGE_SHIFT];
+ last = &mem_map[sp_banks[i].base_addr >> PAGE_SHIFT];
+
+ free_mem_map_range(first, last);
+
+ if (!sp_banks[i+1].num_bytes) {
+ prev_end = sp_banks[i].base_addr +
+ sp_banks[i].num_bytes;
+ first = &mem_map[prev_end >> PAGE_SHIFT];
+ last = &mem_map[last_valid_pfn];
+ free_mem_map_range(first, last);
}
}
}
+#ifdef DEBUG_BOOTMEM
+ prom_printf("\n");
+#endif
}
-void __init mem_init(unsigned long start_mem, unsigned long end_mem)
+void __init mem_init(void)
{
int codepages = 0;
int datapages = 0;
int initpages = 0;
int i;
- unsigned long addr;
- struct page *page, *end;
+ unsigned long addr, last;
/* Saves us work later. */
memset((void *) ZERO_PAGE(0), 0, PAGE_SIZE);
- end_mem &= PAGE_MASK;
- max_mapnr = MAP_NR(end_mem);
- high_memory = (void *) end_mem;
-
- sparc_valid_addr_bitmap = (unsigned long *)start_mem;
- i = max_mapnr >> (8 + 5);
+ i = last_valid_pfn >> (8 + 5);
i += 1;
- memset(sparc_valid_addr_bitmap, 0, i << 2);
- start_mem += i << 2;
- start_mem = PAGE_ALIGN(start_mem);
- num_physpages = 0;
+ sparc_valid_addr_bitmap = (unsigned long *)
+ __alloc_bootmem(i << 2, SMP_CACHE_BYTES, 0UL);
+
+ if (sparc_valid_addr_bitmap == NULL) {
+ prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
+ prom_halt();
+ }
+ memset(sparc_valid_addr_bitmap, 0, i << 2);
addr = KERNBASE;
- while(addr < start_mem) {
+ last = PAGE_ALIGN((unsigned long)&_end);
+ /* fix this */
+ while(addr < last) {
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_below_start_ok && addr >= initrd_start && addr < initrd_end)
mem_map[MAP_NR(addr)].flags &= ~(1<<PG_reserved);
@@ -272,69 +393,39 @@ void __init mem_init(unsigned long start_mem, unsigned long end_mem)
addr += PAGE_SIZE;
}
- taint_real_pages(start_mem, end_mem);
-
-#ifdef FREE_UNUSED_MEM_MAP
- end = mem_map + max_mapnr;
- for (page = mem_map; page < end; page++) {
- if (PageSkip(page)) {
- unsigned long low, high;
-
- /* See srmmu_frob_mem_map() for why this is done. -DaveM */
- page++;
-
- low = PAGE_ALIGN((unsigned long)(page+1));
- if (page->next_hash < page)
- high = ((unsigned long)end) & PAGE_MASK;
- else
- high = ((unsigned long)page->next_hash) & PAGE_MASK;
- while (low < high) {
- mem_map[MAP_NR(low)].flags &= ~(1<<PG_reserved);
- low += PAGE_SIZE;
- }
- }
- }
+ taint_real_pages();
+
+ max_mapnr = last_valid_pfn;
+ high_memory = __va(last_valid_pfn << PAGE_SHIFT);
+
+#ifdef DEBUG_BOOTMEM
+ prom_printf("mem_init: Calling free_all_bootmem().\n");
#endif
-
- for (addr = PAGE_OFFSET; addr < end_mem; addr += PAGE_SIZE) {
- if (PageSkip(mem_map + MAP_NR(addr))) {
- unsigned long next = mem_map[MAP_NR(addr)].next_hash - mem_map;
+ num_physpages = totalram_pages = free_all_bootmem();
- next = (next << PAGE_SHIFT) + PAGE_OFFSET;
- if (next < addr || next >= end_mem)
- break;
- addr = next;
- }
- num_physpages++;
- if(PageReserved(mem_map + MAP_NR(addr))) {
- if ((addr < (unsigned long) &etext) && (addr >= KERNBASE))
- codepages++;
- else if((addr >= (unsigned long)&__init_begin && addr < (unsigned long)&__init_end))
- initpages++;
- else if((addr < start_mem) && (addr >= KERNBASE))
- datapages++;
- continue;
- }
- atomic_set(&mem_map[MAP_NR(addr)].count, 1);
-#ifdef CONFIG_BLK_DEV_INITRD
- if (!initrd_start ||
- (addr < initrd_start || addr >= initrd_end))
+#if 0
+ free_unused_mem_map();
#endif
- free_page(addr);
- }
+
+ codepages = (((unsigned long) &etext) - ((unsigned long)&_start));
+ codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
+ datapages = (((unsigned long) &edata) - ((unsigned long)&etext));
+ datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
+ initpages = (((unsigned long) &__init_end) - ((unsigned long) &__init_begin));
+ initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
printk("Memory: %dk available (%dk kernel code, %dk data, %dk init) [%08lx,%08lx]\n",
- nr_free_pages << (PAGE_SHIFT-10),
+ nr_free_pages() << (PAGE_SHIFT-10),
codepages << (PAGE_SHIFT-10),
datapages << (PAGE_SHIFT-10),
initpages << (PAGE_SHIFT-10),
- (unsigned long)PAGE_OFFSET, end_mem);
+ (unsigned long)PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT));
/* NOTE NOTE NOTE NOTE
* Please keep track of things and make sure this
* always matches the code in mm/page_alloc.c -DaveM
*/
- i = nr_free_pages >> 7;
+ i = nr_free_pages() >> 7;
if (i < 48)
i = 48;
if (i > 256)
@@ -347,39 +438,34 @@ void __init mem_init(unsigned long start_mem, unsigned long end_mem)
void free_initmem (void)
{
unsigned long addr;
-
+
addr = (unsigned long)(&__init_begin);
for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
- mem_map[MAP_NR(addr)].flags &= ~(1 << PG_reserved);
- atomic_set(&mem_map[MAP_NR(addr)].count, 1);
- free_page(addr);
+ unsigned long page;
+ struct page *p;
+
+ page = (addr +
+ ((unsigned long) __va(phys_base)) -
+ PAGE_OFFSET);
+ p = mem_map + MAP_NR(page);
+
+ ClearPageReserved(p);
+ set_page_count(p, 1);
+ __free_page(p);
+ totalram_pages++;
+ num_physpages++;
}
}
void si_meminfo(struct sysinfo *val)
{
- struct page *page, *end;
-
- val->totalram = 0;
+ val->totalram = totalram_pages;
val->sharedram = 0;
- val->freeram = nr_free_pages << PAGE_SHIFT;
- val->bufferram = atomic_read(&buffermem);
- for (page = mem_map, end = mem_map + max_mapnr;
- page < end; page++) {
- if (PageSkip(page)) {
- if (page->next_hash < page)
- break;
- page = page->next_hash;
- }
- if (PageReserved(page))
- continue;
- val->totalram++;
- if (!atomic_read(&page->count))
- continue;
- val->sharedram += atomic_read(&page->count) - 1;
- }
- val->totalram <<= PAGE_SHIFT;
- val->sharedram <<= PAGE_SHIFT;
- val->totalbig = 0;
- val->freebig = 0;
+ val->freeram = nr_free_pages();
+ val->bufferram = atomic_read(&buffermem_pages);
+
+ val->totalhigh = 0;
+ val->freehigh = 0;
+
+ val->mem_unit = PAGE_SIZE;
}
diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c
index 45c74c0aa..1a3476a16 100644
--- a/arch/sparc/mm/io-unit.c
+++ b/arch/sparc/mm/io-unit.c
@@ -1,4 +1,4 @@
-/* $Id: io-unit.c,v 1.15 1999/09/10 10:40:38 davem Exp $
+/* $Id: io-unit.c,v 1.18 1999/12/28 04:28:55 anton Exp $
* io-unit.c: IO-UNIT specific routines for memory management.
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
@@ -9,6 +9,8 @@
#include <linux/init.h>
#include <linux/malloc.h>
#include <linux/spinlock.h>
+#include <asm/scatterlist.h>
+#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/sbus.h>
#include <asm/io.h>
@@ -27,14 +29,15 @@
#define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
void __init
-iounit_init(int sbi_node, int io_node, struct linux_sbus *sbus)
+iounit_init(int sbi_node, int io_node, struct sbus_bus *sbus)
{
iopte_t *xpt, *xptend;
struct iounit_struct *iounit;
struct linux_prom_registers iommu_promregs[PROMREG_MAX];
-
+ struct resource r;
+
iounit = kmalloc(sizeof(struct iounit_struct), GFP_ATOMIC);
-
+
memset(iounit, 0, sizeof(*iounit));
iounit->limit[0] = IOUNIT_BMAP1_START;
iounit->limit[1] = IOUNIT_BMAP2_START;
@@ -42,13 +45,14 @@ iounit_init(int sbi_node, int io_node, struct linux_sbus *sbus)
iounit->limit[3] = IOUNIT_BMAPM_END;
iounit->rotor[1] = IOUNIT_BMAP2_START;
iounit->rotor[2] = IOUNIT_BMAPM_START;
-
+
prom_getproperty(sbi_node, "reg", (void *) iommu_promregs,
sizeof(iommu_promregs));
prom_apply_generic_ranges(io_node, 0, iommu_promregs, 3);
- xpt = (iopte_t *)
- sparc_alloc_io(iommu_promregs[2].phys_addr, 0, (PAGE_SIZE * 16),
- "XPT", iommu_promregs[2].which_io, 0x0);
+ memset(&r, 0, sizeof(r));
+ r.flags = iommu_promregs[2].which_io;
+ r.start = iommu_promregs[2].phys_addr;
+ xpt = (iopte_t *) sbus_ioremap(&r, 0, PAGE_SIZE * 16, "XPT");
if(!xpt) panic("Cannot map External Page Table.");
sbus->iommu = (struct iommu_struct *)iounit;
@@ -108,7 +112,7 @@ nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan);
return vaddr;
}
-static __u32 iounit_get_scsi_one(char *vaddr, unsigned long len, struct linux_sbus *sbus)
+static __u32 iounit_get_scsi_one(char *vaddr, unsigned long len, struct sbus_bus *sbus)
{
unsigned long ret, flags;
struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
@@ -119,7 +123,7 @@ static __u32 iounit_get_scsi_one(char *vaddr, unsigned long len, struct linux_sb
return ret;
}
-static void iounit_get_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
+static void iounit_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
{
unsigned long flags;
struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
@@ -127,12 +131,13 @@ static void iounit_get_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus
/* FIXME: Cache some resolved pages - often several sg entries are to the same page */
spin_lock_irqsave(&iounit->lock, flags);
for (; sz >= 0; sz--) {
- sg[sz].dvma_addr = iounit_get_area(iounit, (unsigned long)sg[sz].addr, sg[sz].len);
+ sg[sz].dvma_address = iounit_get_area(iounit, (unsigned long)sg[sz].address, sg[sz].length);
+ sg[sz].dvma_length = sg[sz].length;
}
spin_unlock_irqrestore(&iounit->lock, flags);
}
-static void iounit_release_scsi_one(__u32 vaddr, unsigned long len, struct linux_sbus *sbus)
+static void iounit_release_scsi_one(__u32 vaddr, unsigned long len, struct sbus_bus *sbus)
{
unsigned long flags;
struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
@@ -146,16 +151,16 @@ static void iounit_release_scsi_one(__u32 vaddr, unsigned long len, struct linux
spin_unlock_irqrestore(&iounit->lock, flags);
}
-static void iounit_release_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
+static void iounit_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
{
unsigned long flags;
unsigned long vaddr, len;
struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
-
+
spin_lock_irqsave(&iounit->lock, flags);
for (; sz >= 0; sz--) {
- len = ((sg[sz].dvma_addr & ~PAGE_MASK) + sg[sz].len + (PAGE_SIZE-1)) >> PAGE_SHIFT;
- vaddr = (sg[sz].dvma_addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
+ len = ((sg[sz].dvma_address & ~PAGE_MASK) + sg[sz].length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
+ vaddr = (sg[sz].dvma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
for (len += vaddr; vaddr < len; vaddr++)
clear_bit(vaddr, iounit->bmap);
@@ -164,21 +169,18 @@ static void iounit_release_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_
}
#ifdef CONFIG_SBUS
-static void iounit_map_dma_area(unsigned long addr, int len)
+static void iounit_map_dma_area(unsigned long va, __u32 addr, int len)
{
unsigned long page, end;
pgprot_t dvma_prot;
iopte_t *iopte;
- struct linux_sbus *sbus;
+ struct sbus_bus *sbus;
dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
end = PAGE_ALIGN((addr + len));
while(addr < end) {
- page = get_free_page(GFP_KERNEL);
- if(!page) {
- prom_printf("alloc_dvma: Cannot get a dvma page\n");
- prom_halt();
- } else {
+ page = va;
+ {
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
@@ -200,10 +202,15 @@ static void iounit_map_dma_area(unsigned long addr, int len)
}
}
addr += PAGE_SIZE;
+ va += PAGE_SIZE;
}
flush_cache_all();
flush_tlb_all();
}
+
+static void iounit_unmap_dma_area(unsigned long addr, int len)
+{
+}
#endif
static char *iounit_lockarea(char *vaddr, unsigned long len)
@@ -229,10 +236,11 @@ void __init ld_mmu_iounit(void)
#ifdef CONFIG_SBUS
BTFIXUPSET_CALL(mmu_map_dma_area, iounit_map_dma_area, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_unmap_dma_area, iounit_unmap_dma_area, BTFIXUPCALL_NORM);
#endif
}
-__u32 iounit_map_dma_init(struct linux_sbus *sbus, int size)
+__u32 iounit_map_dma_init(struct sbus_bus *sbus, int size)
{
int i, j, k, npages;
unsigned long rotor, scan, limit;
@@ -271,7 +279,7 @@ nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan);
return ret;
}
-__u32 iounit_map_dma_page(__u32 vaddr, void *addr, struct linux_sbus *sbus)
+__u32 iounit_map_dma_page(__u32 vaddr, void *addr, struct sbus_bus *sbus)
{
int scan = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c
index 99f29c655..041e00c6a 100644
--- a/arch/sparc/mm/iommu.c
+++ b/arch/sparc/mm/iommu.c
@@ -1,8 +1,8 @@
-/* $Id: iommu.c,v 1.11 1999/08/31 06:54:34 davem Exp $
+/* $Id: iommu.c,v 1.16 1999/12/28 04:28:54 anton Exp $
* iommu.c: IOMMU specific routines for memory management.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1995 Peter A. Zaitcev (zaitcev@ithil.mcst.ru)
+ * Copyright (C) 1995 Pete Zaitcev
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
@@ -12,10 +12,13 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/malloc.h>
+#include <asm/scatterlist.h>
+#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/sbus.h>
#include <asm/io.h>
#include <asm/mxcc.h>
+#include <asm/mbus.h>
/* srmmu.c */
extern int viking_mxcc_present;
@@ -45,20 +48,23 @@ static inline void iommu_map_dvma_pages_for_iommu(struct iommu_struct *iommu)
}
void __init
-iommu_init(int iommund, struct linux_sbus *sbus)
+iommu_init(int iommund, struct sbus_bus *sbus)
{
unsigned int impl, vers, ptsize;
unsigned long tmp;
struct iommu_struct *iommu;
struct linux_prom_registers iommu_promregs[PROMREG_MAX];
+ struct resource r;
int i;
iommu = kmalloc(sizeof(struct iommu_struct), GFP_ATOMIC);
prom_getproperty(iommund, "reg", (void *) iommu_promregs,
sizeof(iommu_promregs));
+ memset(&r, 0, sizeof(r));
+ r.flags = iommu_promregs[0].which_io;
+ r.start = iommu_promregs[0].phys_addr;
iommu->regs = (struct iommu_regs *)
- sparc_alloc_io(iommu_promregs[0].phys_addr, 0, (PAGE_SIZE * 3),
- "IOMMU registers", iommu_promregs[0].which_io, 0x0);
+ sbus_ioremap(&r, 0, PAGE_SIZE * 3, "iommu_regs");
if(!iommu->regs)
panic("Cannot map IOMMU registers.");
impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28;
@@ -137,18 +143,18 @@ iommu_init(int iommund, struct linux_sbus *sbus)
impl, vers, iommu->page_table, ptsize);
}
-static __u32 iommu_get_scsi_one_noflush(char *vaddr, unsigned long len, struct linux_sbus *sbus)
+static __u32 iommu_get_scsi_one_noflush(char *vaddr, unsigned long len, struct sbus_bus *sbus)
{
return (__u32)vaddr;
}
-static __u32 iommu_get_scsi_one_gflush(char *vaddr, unsigned long len, struct linux_sbus *sbus)
+static __u32 iommu_get_scsi_one_gflush(char *vaddr, unsigned long len, struct sbus_bus *sbus)
{
flush_page_for_dma(0);
return (__u32)vaddr;
}
-static __u32 iommu_get_scsi_one_pflush(char *vaddr, unsigned long len, struct linux_sbus *sbus)
+static __u32 iommu_get_scsi_one_pflush(char *vaddr, unsigned long len, struct sbus_bus *sbus)
{
unsigned long page = ((unsigned long) vaddr) & PAGE_MASK;
@@ -159,81 +165,110 @@ static __u32 iommu_get_scsi_one_pflush(char *vaddr, unsigned long len, struct li
return (__u32)vaddr;
}
-static void iommu_get_scsi_sgl_noflush(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
+static void iommu_get_scsi_sgl_noflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
{
- for (; sz >= 0; sz--)
- sg[sz].dvma_addr = (__u32) (sg[sz].addr);
+ for (; sz >= 0; sz--) {
+ sg[sz].dvma_address = (__u32) (sg[sz].address);
+ sg[sz].dvma_length = (__u32) (sg[sz].length);
+ }
}
-static void iommu_get_scsi_sgl_gflush(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
+static void iommu_get_scsi_sgl_gflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
{
flush_page_for_dma(0);
- for (; sz >= 0; sz--)
- sg[sz].dvma_addr = (__u32) (sg[sz].addr);
+ for (; sz >= 0; sz--) {
+ sg[sz].dvma_address = (__u32) (sg[sz].address);
+ sg[sz].dvma_length = (__u32) (sg[sz].length);
+ }
}
-static void iommu_get_scsi_sgl_pflush(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
+static void iommu_get_scsi_sgl_pflush(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
{
unsigned long page, oldpage = 0;
while(sz >= 0) {
- page = ((unsigned long) sg[sz].addr) & PAGE_MASK;
+ page = ((unsigned long) sg[sz].address) & PAGE_MASK;
if (oldpage == page)
page += PAGE_SIZE; /* We flushed that page already */
- while(page < (unsigned long)(sg[sz].addr + sg[sz].len)) {
+ while(page < (unsigned long)(sg[sz].address + sg[sz].length)) {
flush_page_for_dma(page);
page += PAGE_SIZE;
}
- sg[sz].dvma_addr = (__u32) (sg[sz].addr);
+ sg[sz].dvma_address = (__u32) (sg[sz].address);
+ sg[sz].dvma_length = (__u32) (sg[sz].length);
sz--;
oldpage = page - PAGE_SIZE;
}
}
-static void iommu_release_scsi_one(__u32 vaddr, unsigned long len, struct linux_sbus *sbus)
+static void iommu_release_scsi_one(__u32 vaddr, unsigned long len, struct sbus_bus *sbus)
{
}
-static void iommu_release_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
+static void iommu_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
{
}
#ifdef CONFIG_SBUS
-static void iommu_map_dma_area(unsigned long addr, int len)
+static void iommu_map_dma_area(unsigned long va, __u32 addr, int len)
{
- unsigned long page, end;
+ unsigned long page, end, ipte_cache;
pgprot_t dvma_prot;
- struct iommu_struct *iommu = SBus_chain->iommu;
+ struct iommu_struct *iommu = sbus_root->iommu;
iopte_t *iopte = iommu->page_table;
iopte_t *first;
- if(viking_mxcc_present)
+ if(viking_mxcc_present || srmmu_modtype == HyperSparc) {
dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
- else
+ ipte_cache = 1;
+ } else {
dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
+ ipte_cache = 0;
+ }
iopte += ((addr - iommu->start) >> PAGE_SHIFT);
first = iopte;
end = PAGE_ALIGN((addr + len));
while(addr < end) {
- page = get_free_page(GFP_KERNEL);
- if(!page) {
- prom_printf("alloc_dvma: Cannot get a dvma page\n");
- prom_halt();
- } else {
+ page = va;
+ {
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
- pgdp = pgd_offset(init_task.mm, addr);
+ if (viking_mxcc_present)
+ viking_mxcc_flush_page(page);
+ else if (viking_flush)
+ viking_flush_page(page);
+ else
+ flush_page_to_ram(page);
+
+ pgdp = pgd_offset(&init_mm, addr);
pmdp = pmd_offset(pgdp, addr);
ptep = pte_offset(pmdp, addr);
set_pte(ptep, pte_val(mk_pte(page, dvma_prot)));
- iopte_val(*iopte++) = MKIOPTE(mmu_v2p(page));
+ if (ipte_cache != 0) {
+ iopte_val(*iopte++) = MKIOPTE(mmu_v2p(page));
+ } else {
+ iopte_val(*iopte++) =
+ MKIOPTE(mmu_v2p(page)) & ~IOPTE_CACHE;
+ }
}
addr += PAGE_SIZE;
+ va += PAGE_SIZE;
}
+ /* P3: why do we need this?
+ *
+ * DAVEM: Because there are several aspects, none of which
+ * are handled by a single interface. Some cpus are
+ * completely not I/O DMA coherent, and some have
+ * virtually indexed caches. The driver DMA flushing
+ * methods handle the former case, but here during
+ * IOMMU page table modifications, and usage of non-cacheable
+ * cpu mappings of pages potentially in the cpu caches, we have
+ * to handle the latter case as well.
+ */
flush_cache_all();
if(viking_mxcc_present) {
unsigned long start = ((unsigned long) first) & PAGE_MASK;
@@ -253,6 +288,10 @@ static void iommu_map_dma_area(unsigned long addr, int len)
flush_tlb_all();
iommu_invalidate(iommu->regs);
}
+
+static void iommu_unmap_dma_area(unsigned long addr, int len)
+{
+}
#endif
static char *iommu_lockarea(char *vaddr, unsigned long len)
@@ -287,5 +326,6 @@ void __init ld_mmu_iommu(void)
#ifdef CONFIG_SBUS
BTFIXUPSET_CALL(mmu_map_dma_area, iommu_map_dma_area, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_unmap_dma_area, iommu_unmap_dma_area, BTFIXUPCALL_NORM);
#endif
}
diff --git a/arch/sparc/mm/nosrmmu.c b/arch/sparc/mm/nosrmmu.c
index bae3bd140..4e49380d2 100644
--- a/arch/sparc/mm/nosrmmu.c
+++ b/arch/sparc/mm/nosrmmu.c
@@ -1,4 +1,4 @@
-/* $Id: nosrmmu.c,v 1.3 1999/08/31 06:54:35 davem Exp $
+/* $Id: nosrmmu.c,v 1.5 1999/11/19 04:11:54 davem Exp $
* nosrmmu.c: This file is a bunch of dummies for sun4 compiles,
* so that it does not need srmmu and avoid ifdefs.
*
@@ -14,6 +14,8 @@ static char shouldnothappen[] __initdata = "SUN4 kernel can only run on SUN4\n";
enum mbus_module srmmu_modtype;
+int vac_cache_size = 0;
+
static void __init should_not_happen(void)
{
prom_printf(shouldnothappen);
@@ -49,12 +51,12 @@ void __init srmmu_end_memory(unsigned long memory_size, unsigned long *mem_end_p
return 0;
}
-__u32 iounit_map_dma_init(struct linux_sbus *sbus, int size)
+__u32 iounit_map_dma_init(struct sbus_bus *sbus, int size)
{
return 0;
}
-__u32 iounit_map_dma_page(__u32 vaddr, void *addr, struct linux_sbus *sbus)
+__u32 iounit_map_dma_page(__u32 vaddr, void *addr, struct sbus_bus *sbus)
{
return 0;
}
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 5d99b02dd..c365cf0d5 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -1,8 +1,8 @@
-/* $Id: srmmu.c,v 1.192 1999/09/10 10:40:40 davem Exp $
+/* $Id: srmmu.c,v 1.199 1999/12/23 02:00:51 davem Exp $
* srmmu.c: SRMMU specific routines for memory management.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1995 Peter A. Zaitcev (zaitcev@ithil.mcst.ru)
+ * Copyright (C) 1995 Pete Zaitcev
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
@@ -53,7 +53,7 @@ int vac_cache_size;
int vac_line_size;
int vac_badbits;
-extern unsigned long sparc_iobase_vaddr;
+extern struct resource sparc_iomap;
#ifdef __SMP__
#define FLUSH_BEGIN(mm)
@@ -284,7 +284,7 @@ void __init srmmu_frob_mem_map(unsigned long start_mem)
}
/* The very generic SRMMU page table operations. */
-static inline int srmmu_device_memory(unsigned long x)
+static inline int srmmu_device_memory(unsigned long x)
{
return ((x & 0xF0000000) != 0);
}
@@ -464,17 +464,6 @@ static inline pte_t *srmmu_s_pte_offset(pmd_t * dir, unsigned long address)
return (pte_t *) srmmu_s_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE - 1));
}
-/* This must update the context table entry for this process. */
-static void srmmu_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp)
-{
- if(tsk->mm->context != NO_CONTEXT &&
- tsk->mm->pgd != pgdp) {
- flush_cache_mm(tsk->mm);
- ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp);
- flush_tlb_mm(tsk->mm);
- }
-}
-
static inline pte_t *srmmu_get_pte_fast(void)
{
struct page *ret;
@@ -777,11 +766,11 @@ static void srmmu_quick_kernel_fault(unsigned long address)
#else
printk("Kernel faults at addr=0x%08lx\n", address);
printk("PTE=%08lx\n", srmmu_hwprobe((address & PAGE_MASK)));
- die_if_kernel("SRMMU bolixed...", current->tss.kregs);
+ die_if_kernel("SRMMU bolixed...", current->thread.kregs);
#endif
}
-static inline void alloc_context(struct mm_struct *mm)
+static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
{
struct ctx_list *ctxp;
@@ -794,7 +783,7 @@ static inline void alloc_context(struct mm_struct *mm)
return;
}
ctxp = ctx_used.next;
- if(ctxp->ctx_mm == current->mm)
+ if(ctxp->ctx_mm == old_mm)
ctxp = ctxp->next;
if(ctxp == &ctx_used)
panic("out of mmu contexts");
@@ -817,29 +806,16 @@ static inline void free_context(int context)
}
-static void srmmu_switch_to_context(struct task_struct *tsk)
+static void srmmu_switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
+ struct task_struct *tsk, int cpu)
{
- if(tsk->mm->context == NO_CONTEXT) {
+ if(mm->context == NO_CONTEXT) {
spin_lock(&srmmu_context_spinlock);
- alloc_context(tsk->mm);
+ alloc_context(old_mm, mm);
spin_unlock(&srmmu_context_spinlock);
- ctxd_set(&srmmu_context_table[tsk->mm->context], tsk->mm->pgd);
+ ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
}
- srmmu_set_context(tsk->mm->context);
-}
-
-static void srmmu_init_new_context(struct mm_struct *mm)
-{
- spin_lock(&srmmu_context_spinlock);
- alloc_context(mm);
- spin_unlock(&srmmu_context_spinlock);
-
- flush_cache_mm(mm);
- ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
- flush_tlb_mm(mm);
-
- if(mm == current->mm)
- srmmu_set_context(mm->context);
+ srmmu_set_context(mm->context);
}
/* Low level IO area allocation on the SRMMU. */
@@ -885,9 +861,6 @@ void srmmu_unmapioaddr(unsigned long virt_addr)
flush_tlb_all();
}
-/* This is used in many routines below. */
-#define UWINMASK_OFFSET (const unsigned long)(&(((struct task_struct *)0)->tss.uwinmask))
-
/* On the SRMMU we do not have the problems with limited tlb entries
* for mapping kernel pages, so we just take things from the free page
* pool. As a side effect we are putting a little too much pressure
@@ -919,110 +892,85 @@ extern void tsunami_flush_tlb_all(void);
extern void tsunami_flush_tlb_mm(struct mm_struct *mm);
extern void tsunami_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end);
extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
-
-/* Workaround, until we find what's going on with Swift. When low on memory, it sometimes
- * loops in fault/handle_mm_fault incl. flush_tlb_page to find out it is already in page tables/
- * fault again on the same instruction. I really don't understand it, have checked it and contexts
- * are right, flush_tlb_all is done as well, and it faults again... Strange. -jj
+extern void tsunami_setup_blockops(void);
+
+/* Workaround, until we find what's going on with Swift. When low on memory,
+ * it sometimes loops in fault/handle_mm_fault incl. flush_tlb_page to find
+ * out it is already in page tables/ fault again on the same instruction.
+ * I really don't understand it, have checked it and contexts
+ * are right, flush_tlb_all is done as well, and it faults again...
+ * Strange. -jj
+ *
+ * The following code is a deadwood that may be necessary when
+ * we start to make precise page flushes again. --zaitcev
*/
static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte)
{
+#if 0
static unsigned long last;
-
- if (last == address) viking_hwprobe(address);
+ unsigned int val;
+ /* unsigned int n; */
+
+ if (address == last) {
+ val = srmmu_hwprobe(address);
+ if (val != 0 && pte_val(pte) != val) {
+ printk("swift_update_mmu_cache: "
+ "addr %lx put %08x probed %08x from %p\n",
+ address, pte_val(pte), val,
+ __builtin_return_address(0));
+ srmmu_flush_whole_tlb();
+ }
+ }
last = address;
+#endif
}
-/* Swift flushes. It has the recommended SRMMU specification flushing
- * facilities, so we can do things in a more fine grained fashion than we
- * could on the tsunami. Let's watch out for HARDWARE BUGS...
- */
-
-static void swift_flush_cache_all(void)
-{
- flush_user_windows();
- swift_idflash_clear();
-}
-
-static void swift_flush_cache_mm(struct mm_struct *mm)
-{
- FLUSH_BEGIN(mm)
- flush_user_windows();
- swift_idflash_clear();
- FLUSH_END
-}
-
-static void swift_flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
-{
- FLUSH_BEGIN(mm)
- flush_user_windows();
- swift_idflash_clear();
- FLUSH_END
-}
-
-static void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
-{
- FLUSH_BEGIN(vma->vm_mm)
- flush_user_windows();
- if(vma->vm_flags & VM_EXEC)
- swift_flush_icache();
- swift_flush_dcache();
- FLUSH_END
-}
-
-/* Not copy-back on swift. */
-static void swift_flush_page_to_ram(unsigned long page)
-{
-}
-
-/* But not IO coherent either. */
-static void swift_flush_page_for_dma(unsigned long page)
-{
- swift_flush_dcache();
-}
-
-/* Again, Swift is non-snooping split I/D cache'd just like tsunami,
- * so have to punt the icache for on-stack signal insns. Only the
- * icache need be flushed since the dcache is write-through.
- */
-static void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
-{
- swift_flush_icache();
-}
-
-static void swift_flush_chunk(unsigned long chunk)
-{
-}
-
-static void swift_flush_tlb_all(void)
-{
- srmmu_flush_whole_tlb();
- module_stats.invall++;
-}
-
-static void swift_flush_tlb_mm(struct mm_struct *mm)
-{
- FLUSH_BEGIN(mm)
- srmmu_flush_whole_tlb();
- module_stats.invmm++;
- FLUSH_END
-}
+/* swift.S */
+extern void swift_flush_cache_all(void);
+extern void swift_flush_cache_mm(struct mm_struct *mm);
+extern void swift_flush_cache_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end);
+extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
+extern void swift_flush_page_to_ram(unsigned long page);
+extern void swift_flush_page_for_dma(unsigned long page);
+extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
+extern void swift_flush_chunk(unsigned long chunk);
+extern void swift_flush_tlb_all(void);
+extern void swift_flush_tlb_mm(struct mm_struct *mm);
+extern void swift_flush_tlb_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end);
+extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
-static void swift_flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+#if 0 /* P3: deadwood to debug precise flushes on Swift. */
+void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
- FLUSH_BEGIN(mm)
- srmmu_flush_whole_tlb();
- module_stats.invrnge++;
- FLUSH_END
-}
+ int cctx, ctx1;
-static void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
-{
- FLUSH_BEGIN(vma->vm_mm)
- srmmu_flush_whole_tlb();
+ page &= PAGE_MASK;
+ if ((ctx1 = vma->vm_mm->context) != -1) {
+ cctx = srmmu_get_context();
+/* Is context # ever different from current context? P3 */
+ if (cctx != ctx1) {
+ printk("flush ctx %02x curr %02x\n", ctx1, cctx);
+ srmmu_set_context(ctx1);
+ swift_flush_page(page);
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
+ "r" (page), "i" (ASI_M_FLUSH_PROBE));
+ srmmu_set_context(cctx);
+ } else {
+ /* Rm. prot. bits from virt. c. */
+ /* swift_flush_cache_all(); */
+ /* swift_flush_cache_page(vma, page); */
+ swift_flush_page(page);
+
+ __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
+ "r" (page), "i" (ASI_M_FLUSH_PROBE));
+ /* same as above: srmmu_flush_tlb_page() */
+ }
+ }
module_stats.invpg++;
- FLUSH_END
}
+#endif
/* The following are all MBUS based SRMMU modules, and therefore could
* be found in a multiprocessor configuration. On the whole, these
@@ -1333,103 +1281,21 @@ static void hypersparc_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
hyper_flush_whole_icache();
}
-static void hypersparc_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp)
+static void hypersparc_switch_mm(struct mm_struct *old_mm,
+ struct mm_struct *mm, struct task_struct *tsk, int cpu)
{
- unsigned long page = ((unsigned long) pgdp) & PAGE_MASK;
-
- if(pgdp != swapper_pg_dir)
- hypersparc_flush_page_to_ram(page);
-
- if(tsk->mm->context != NO_CONTEXT &&
- tsk->mm->pgd != pgdp) {
- flush_cache_mm(tsk->mm);
- ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp);
- flush_tlb_mm(tsk->mm);
- }
-}
-
-static void viking_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp)
-{
- if(pgdp != swapper_pg_dir)
- flush_chunk((unsigned long)pgdp);
- if(tsk->mm->context != NO_CONTEXT &&
- tsk->mm->pgd != pgdp) {
- flush_cache_mm(tsk->mm);
- ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp);
- flush_tlb_mm(tsk->mm);
- }
-}
-
-static void cypress_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdp)
-{
- register unsigned long a, b, c, d, e, f, g;
- unsigned long page = ((unsigned long) pgdp) & PAGE_MASK;
- unsigned long line;
-
- if(pgdp == swapper_pg_dir)
- goto skip_flush;
-
- a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
- page &= PAGE_MASK;
- line = (page + PAGE_SIZE) - 0x100;
- goto inside;
- do {
- line -= 0x100;
- inside:
- __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
- "sta %%g0, [%0 + %2] %1\n\t"
- "sta %%g0, [%0 + %3] %1\n\t"
- "sta %%g0, [%0 + %4] %1\n\t"
- "sta %%g0, [%0 + %5] %1\n\t"
- "sta %%g0, [%0 + %6] %1\n\t"
- "sta %%g0, [%0 + %7] %1\n\t"
- "sta %%g0, [%0 + %8] %1\n\t" : :
- "r" (line),
- "i" (ASI_M_FLUSH_PAGE),
- "r" (a), "r" (b), "r" (c), "r" (d),
- "r" (e), "r" (f), "r" (g));
- } while(line != page);
-skip_flush:
- if(tsk->mm->context != NO_CONTEXT &&
- tsk->mm->pgd != pgdp) {
- flush_cache_mm(tsk->mm);
- ctxd_set(&srmmu_context_table[tsk->mm->context], pgdp);
- flush_tlb_mm(tsk->mm);
- }
-}
-
-static void hypersparc_switch_to_context(struct task_struct *tsk)
-{
- if(tsk->mm->context == NO_CONTEXT) {
+ if(mm->context == NO_CONTEXT) {
ctxd_t *ctxp;
spin_lock(&srmmu_context_spinlock);
- alloc_context(tsk->mm);
+ alloc_context(old_mm, mm);
spin_unlock(&srmmu_context_spinlock);
- ctxp = &srmmu_context_table[tsk->mm->context];
- srmmu_set_entry((pte_t *)ctxp, __pte((SRMMU_ET_PTD | (srmmu_v2p((unsigned long) tsk->mm->pgd) >> 4))));
+ ctxp = &srmmu_context_table[mm->context];
+ srmmu_set_entry((pte_t *)ctxp, __pte((SRMMU_ET_PTD | (srmmu_v2p((unsigned long) mm->pgd) >> 4))));
hypersparc_flush_page_to_ram((unsigned long)ctxp);
}
hyper_flush_whole_icache();
- srmmu_set_context(tsk->mm->context);
-}
-
-static void hypersparc_init_new_context(struct mm_struct *mm)
-{
- ctxd_t *ctxp;
-
- spin_lock(&srmmu_context_spinlock);
- alloc_context(mm);
- spin_unlock(&srmmu_context_spinlock);
-
- ctxp = &srmmu_context_table[mm->context];
- srmmu_set_entry((pte_t *)ctxp, __pte((SRMMU_ET_PTD | (srmmu_v2p((unsigned long) mm->pgd) >> 4))));
- hypersparc_flush_page_to_ram((unsigned long)ctxp);
-
- if(mm == current->mm) {
- hyper_flush_whole_icache();
- srmmu_set_context(mm->context);
- }
+ srmmu_set_context(mm->context);
}
static unsigned long mempool;
@@ -1694,7 +1560,8 @@ static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
srmmu_map[srmmu_bank].vbase = vbase;
srmmu_map[srmmu_bank].pbase = sp_banks[sp_entry].base_addr;
srmmu_map[srmmu_bank].size = sp_banks[sp_entry].num_bytes;
- srmmu_bank++;
+ if (srmmu_map[srmmu_bank].size)
+ srmmu_bank++;
map_spbank_last_pa = pstart - SRMMU_PGDIR_SIZE;
return vstart;
}
@@ -1949,8 +1816,8 @@ unsigned long __init srmmu_paging_init(unsigned long start_mem, unsigned long en
int i, cpunode;
char node_str[128];
- sparc_iobase_vaddr = 0xfd000000; /* 16MB of IOSPACE on all sun4m's. */
- physmem_mapped_contig = 0; /* for init.c:taint_real_pages() */
+ sparc_iomap.start = 0xfd000000; /* 16MB of IOSPACE on all sun4m's. */
+ physmem_mapped_contig = 0; /* for init.c:taint_real_pages() */
if (sparc_cpu_model == sun4d)
num_contexts = 65536; /* We know it is Viking */
@@ -1981,7 +1848,7 @@ unsigned long __init srmmu_paging_init(unsigned long start_mem, unsigned long en
srmmu_allocate_ptable_skeleton(KERNBASE, end_mem);
#if CONFIG_SUN_IO
- srmmu_allocate_ptable_skeleton(sparc_iobase_vaddr, IOBASE_END);
+ srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END);
srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
#endif
@@ -2051,16 +1918,14 @@ static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long ad
static void srmmu_destroy_context(struct mm_struct *mm)
{
- if(mm->context != NO_CONTEXT && atomic_read(&mm->count) == 1) {
- /* XXX This could be drastically improved.
- * XXX We are only called from __exit_mm and it just did
- * XXX cache/tlb mm flush and right after this will (re-)
- * XXX SET_PAGE_DIR to swapper_pg_dir. -DaveM
- */
+
+ if(mm->context != NO_CONTEXT) {
flush_cache_mm(mm);
ctxd_set(&srmmu_context_table[mm->context], swapper_pg_dir);
flush_tlb_mm(mm);
+ spin_lock(&srmmu_context_spinlock);
free_context(mm->context);
+ spin_unlock(&srmmu_context_spinlock);
mm->context = NO_CONTEXT;
}
}
@@ -2136,7 +2001,7 @@ static void srmmu_vac_update_mmu_cache(struct vm_area_struct * vma,
static void hypersparc_destroy_context(struct mm_struct *mm)
{
- if(mm->context != NO_CONTEXT && atomic_read(&mm->count) == 1) {
+ if(mm->context != NO_CONTEXT) {
ctxd_t *ctxp;
/* HyperSparc is copy-back, any data for this
@@ -2151,7 +2016,9 @@ static void hypersparc_destroy_context(struct mm_struct *mm)
hypersparc_flush_page_to_ram((unsigned long)ctxp);
flush_tlb_mm(mm);
+ spin_lock(&srmmu_context_spinlock);
free_context(mm->context);
+ spin_unlock(&srmmu_context_spinlock);
mm->context = NO_CONTEXT;
}
}
@@ -2267,11 +2134,9 @@ static void __init init_hypersparc(void)
BTFIXUPSET_CALL(flush_chunk, hypersparc_flush_chunk, BTFIXUPCALL_NORM); /* local flush _only_ */
BTFIXUPSET_CALL(ctxd_set, hypersparc_ctxd_set, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(switch_to_context, hypersparc_switch_to_context, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(init_new_context, hypersparc_init_new_context, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(switch_mm, hypersparc_switch_mm, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(destroy_context, hypersparc_destroy_context, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(update_mmu_cache, srmmu_vac_update_mmu_cache, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(sparc_update_rootmmu_dir, hypersparc_update_rootmmu_dir, BTFIXUPCALL_NORM);
poke_srmmu = poke_hypersparc;
hypersparc_setup_blockops();
@@ -2339,7 +2204,6 @@ static void __init init_cypress_common(void)
BTFIXUPSET_CALL(flush_page_to_ram, cypress_flush_page_to_ram, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_sig_insns, cypress_flush_sig_insns, BTFIXUPCALL_NOP);
BTFIXUPSET_CALL(flush_page_for_dma, cypress_flush_page_for_dma, BTFIXUPCALL_NOP);
- BTFIXUPSET_CALL(sparc_update_rootmmu_dir, cypress_update_rootmmu_dir, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(update_mmu_cache, srmmu_vac_update_mmu_cache, BTFIXUPCALL_NORM);
poke_srmmu = poke_cypress;
@@ -2371,12 +2235,14 @@ static void __init init_cypress_605(unsigned long mrev)
static void __init poke_swift(void)
{
- unsigned long mreg = srmmu_get_mmureg();
+ unsigned long mreg;
/* Clear any crap from the cache or else... */
- swift_idflash_clear();
- mreg |= (SWIFT_IE | SWIFT_DE); /* I & D caches on */
+ swift_flush_cache_all();
+ /* Enable I & D caches */
+ mreg = srmmu_get_mmureg();
+ mreg |= (SWIFT_IE | SWIFT_DE);
/* The Swift branch folding logic is completely broken. At
* trap time, if things are just right, if can mistakenly
* think that a trap is coming from kernel mode when in fact
@@ -2442,19 +2308,21 @@ static void __init init_swift(void)
BTFIXUPSET_CALL(flush_cache_page, swift_flush_cache_page, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_cache_range, swift_flush_cache_range, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(flush_chunk, swift_flush_chunk, BTFIXUPCALL_NOP); /* local flush _only_ */
+ BTFIXUPSET_CALL(flush_chunk, swift_flush_chunk, BTFIXUPCALL_NORM); /* local flush _only_ */
BTFIXUPSET_CALL(flush_tlb_all, swift_flush_tlb_all, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_tlb_mm, swift_flush_tlb_mm, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_tlb_page, swift_flush_tlb_page, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_tlb_range, swift_flush_tlb_range, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(flush_page_to_ram, swift_flush_page_to_ram, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(flush_page_to_ram, swift_flush_page_to_ram, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_sig_insns, swift_flush_sig_insns, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_page_for_dma, swift_flush_page_for_dma, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(update_mmu_cache, swift_update_mmu_cache, BTFIXUPCALL_NORM);
+ flush_page_for_dma_global = 0;
+
/* Are you now convinced that the Swift is one of the
* biggest VLSI abortions of all time? Bravo Fujitsu!
* Fujitsu, the !#?!%$'d up processor people. I bet if
@@ -2611,7 +2479,7 @@ static void __init init_turbosparc(void)
BTFIXUPSET_CALL(flush_chunk, turbosparc_flush_chunk, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_sig_insns, turbosparc_flush_sig_insns, BTFIXUPCALL_NOP);
- BTFIXUPSET_CALL(flush_page_for_dma, turbosparc_flush_page_for_dma, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(flush_page_for_dma, turbosparc_flush_page_for_dma, BTFIXUPCALL_NORM);
poke_srmmu = poke_turbosparc;
}
@@ -2642,7 +2510,7 @@ static void __init init_tsunami(void)
BTFIXUPSET_CALL(flush_cache_page, tsunami_flush_cache_page, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_cache_range, tsunami_flush_cache_range, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(flush_chunk, tsunami_flush_chunk, BTFIXUPCALL_NOP); /* local flush _only_ */
+ BTFIXUPSET_CALL(flush_chunk, tsunami_flush_chunk, BTFIXUPCALL_NORM); /* local flush _only_ */
BTFIXUPSET_CALL(flush_tlb_all, tsunami_flush_tlb_all, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_tlb_mm, tsunami_flush_tlb_mm, BTFIXUPCALL_NORM);
@@ -2654,6 +2522,8 @@ static void __init init_tsunami(void)
BTFIXUPSET_CALL(flush_page_for_dma, tsunami_flush_page_for_dma, BTFIXUPCALL_NORM);
poke_srmmu = poke_tsunami;
+
+ tsunami_setup_blockops();
}
static void __init poke_viking(void)
@@ -2725,7 +2595,6 @@ static void __init init_viking(void)
BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(sparc_update_rootmmu_dir, viking_update_rootmmu_dir, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_chunk, viking_flush_chunk, BTFIXUPCALL_NORM); /* local flush _only_ */
@@ -2736,8 +2605,7 @@ static void __init init_viking(void)
* which we use the IOMMU.
*/
BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page, BTFIXUPCALL_NORM);
- /* Also, this is so far the only chip which actually uses
- the page argument to flush_page_for_dma */
+
flush_page_for_dma_global = 0;
} else {
srmmu_name = "TI Viking/MXCC";
@@ -2928,6 +2796,16 @@ static int srmmu_check_pgt_cache(int low, int high)
return freed;
}
+static void srmmu_flush_dma_area(unsigned long addr, int len)
+{
+ /* XXX Later */
+}
+
+static void srmmu_inval_dma_area(unsigned long addr, int len)
+{
+ /* XXX Later */
+}
+
extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme,
tsetup_mmu_patchme, rtrap_mmu_patchme;
@@ -2999,21 +2877,18 @@ void __init ld_mmu_srmmu(void)
BTFIXUPSET_CALL(free_pte_slow, srmmu_free_pte_slow, BTFIXUPCALL_NOP);
BTFIXUPSET_CALL(free_pgd_slow, srmmu_free_pgd_slow, BTFIXUPCALL_NOP);
BTFIXUPSET_CALL(do_check_pgt_cache, srmmu_check_pgt_cache, BTFIXUPCALL_NORM);
-
+
BTFIXUPSET_CALL(set_pgdir, srmmu_set_pgdir, BTFIXUPCALL_NORM);
-
+
BTFIXUPSET_CALL(set_pte, srmmu_set_pte_cacheable, BTFIXUPCALL_SWAPO0O1);
- BTFIXUPSET_CALL(init_new_context, srmmu_init_new_context, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(switch_to_context, srmmu_switch_to_context, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(switch_mm, srmmu_switch_mm, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pte_page, srmmu_pte_page, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pmd_page, srmmu_pmd_page, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pgd_page, srmmu_pgd_page, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(sparc_update_rootmmu_dir, srmmu_update_rootmmu_dir, BTFIXUPCALL_NORM);
+ BTFIXUPSET_SETHI(none_mask, 0xF0000000); /* P3: is it used? */
- BTFIXUPSET_SETHI(none_mask, 0xF0000000);
-
BTFIXUPSET_CALL(pte_present, srmmu_pte_present, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_SWAPO0G0);
@@ -3072,6 +2947,11 @@ void __init ld_mmu_srmmu(void)
BTFIXUPSET_CALL(ctxd_set, srmmu_ctxd_set, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pmd_set, srmmu_pmd_set, BTFIXUPCALL_NORM);
+/* hmm isn't flush_dma_area the same thing as flush_page_for_dma? */
+/* It is, except flush_page_for_dma was local to srmmu.c */
+ BTFIXUPSET_CALL(mmu_flush_dma_area, srmmu_flush_dma_area, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_inval_dma_area, srmmu_inval_dma_area, BTFIXUPCALL_NORM);
+
get_srmmu_type();
patch_window_trap_handlers();
@@ -3104,6 +2984,7 @@ void __init ld_mmu_srmmu(void)
BTFIXUPSET_CALL(flush_sig_insns, smp_flush_sig_insns, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_page_for_dma, smp_flush_page_for_dma, BTFIXUPCALL_NORM);
#endif
+
if (sparc_cpu_model == sun4d)
ld_mmu_iounit();
else
diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c
index f91ab1ce6..0530e635f 100644
--- a/arch/sparc/mm/sun4c.c
+++ b/arch/sparc/mm/sun4c.c
@@ -1,19 +1,24 @@
-/* $Id: sun4c.c,v 1.176 1999/08/31 06:54:42 davem Exp $
+/* $Id: sun4c.c,v 1.182 1999/12/27 06:30:04 anton Exp $
* sun4c.c: Doing in software what should be done in hardware.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1996 Andrew Tridgell (Andrew.Tridgell@anu.edu.au)
- * Copyright (C) 1997 Anton Blanchard (anton@progsoc.uts.edu.au)
+ * Copyright (C) 1997,99 Anton Blanchard (anton@progsoc.uts.edu.au)
* Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
+#define NR_TASK_BUCKETS 512
+
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <asm/scatterlist.h>
#include <asm/page.h>
+#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/vaddrs.h>
#include <asm/idprom.h>
@@ -27,42 +32,19 @@
#include <asm/mmu_context.h>
#include <asm/sun4paddr.h>
-/* TODO: Make it such that interrupt handlers cannot dick with
- * the user segment lists, most of the cli/sti pairs can
- * disappear once that is taken care of.
- */
-
-/* XXX Ok the real performance win, I figure, will be to use a combined hashing
- * XXX and bitmap scheme to keep track of what we have mapped where. The whole
- * XXX incentive is to make it such that the range flushes can be serviced
- * XXX always in near constant time. --DaveM
+/* Because of our dynamic kernel TLB miss strategy, and how
+ * our DVMA mapping allocation works, you _MUST_:
+ *
+ * 1) Disable interrupts _and_ not touch any dynamic kernel
+ * memory while messing with kernel MMU state. By
+ * dynamic memory I mean any object which is not in
+ * the kernel image itself or a task_struct (both of
+ * which are locked into the MMU).
+ * 2) Disable interrupts while messing with user MMU state.
*/
extern int num_segmaps, num_contexts;
-/* Define this to get extremely anal debugging, undefine for performance. */
-/* #define DEBUG_SUN4C_MM */
-
-#define UWINMASK_OFFSET (const unsigned long)(&(((struct task_struct *)0)->tss.uwinmask))
-
-/* This is used in many routines below. */
-#define FUW_INLINE do { \
- register int ctr asm("g5"); \
- ctr = 0; \
- __asm__ __volatile__("\n" \
- "1: ld [%%g6 + %2], %%g4 ! flush user windows\n" \
- " orcc %%g0, %%g4, %%g0\n" \
- " add %0, 1, %0\n" \
- " bne 1b\n" \
- " save %%sp, -64, %%sp\n" \
- "2: subcc %0, 1, %0\n" \
- " bne 2b\n" \
- " restore %%g0, %%g0, %%g0\n" \
- : "=&r" (ctr) \
- : "0" (ctr), "i" (UWINMASK_OFFSET) \
- : "g4", "cc"); \
-} while(0);
-
#ifdef CONFIG_SUN4
#define SUN4C_VAC_SIZE sun4c_vacinfo.num_bytes
#else
@@ -82,58 +64,21 @@ extern int num_segmaps, num_contexts;
#define MIN(a,b) ((a)<(b)?(a):(b))
#endif
-
-#define KGPROF_PROFILING 0
-#if KGPROF_PROFILING
-#define KGPROF_DEPTH 3 /* this needs to match the code below */
-#define KGPROF_SIZE 100
-static struct {
- unsigned addr[KGPROF_DEPTH];
- unsigned count;
-} kgprof_counters[KGPROF_SIZE];
-
-/* just call this function from whatever function you think needs it then
- look at /proc/cpuinfo to see where the function is being called from
- and how often. This gives a type of "kernel gprof" */
-#define NEXT_PROF(prev,lvl) (prev>PAGE_OFFSET?__builtin_return_address(lvl):0)
-static inline void kgprof_profile(void)
-{
- unsigned ret[KGPROF_DEPTH];
- int i,j;
- /* you can't use a variable argument to __builtin_return_address() */
- ret[0] = (unsigned)__builtin_return_address(0);
- ret[1] = (unsigned)NEXT_PROF(ret[0],1);
- ret[2] = (unsigned)NEXT_PROF(ret[1],2);
-
- for (i=0;i<KGPROF_SIZE && kgprof_counters[i].addr[0];i++) {
- for (j=0;j<KGPROF_DEPTH;j++)
- if (ret[j] != kgprof_counters[i].addr[j]) break;
- if (j==KGPROF_DEPTH) break;
- }
- if (i<KGPROF_SIZE) {
- for (j=0;j<KGPROF_DEPTH;j++)
- kgprof_counters[i].addr[j] = ret[j];
- kgprof_counters[i].count++;
- }
-}
-#endif
-
-
/* Flushing the cache. */
struct sun4c_vac_props sun4c_vacinfo;
-static int ctxflushes, segflushes, pageflushes;
unsigned long sun4c_kernel_faults;
/* convert a virtual address to a physical address and vice
- versa. Easy on the 4c */
+ * versa. Easy on the 4c
+ */
static unsigned long sun4c_v2p(unsigned long vaddr)
{
- return(vaddr - PAGE_OFFSET);
+ return (vaddr - PAGE_OFFSET);
}
static unsigned long sun4c_p2v(unsigned long vaddr)
{
- return(vaddr + PAGE_OFFSET);
+ return (vaddr + PAGE_OFFSET);
}
@@ -142,44 +87,64 @@ void sun4c_flush_all(void)
{
unsigned long begin, end;
- if(sun4c_vacinfo.on)
+ if (sun4c_vacinfo.on)
panic("SUN4C: AIEEE, trying to invalidate vac while"
" it is on.");
/* Clear 'valid' bit in all cache line tags */
begin = AC_CACHETAGS;
end = (AC_CACHETAGS + SUN4C_VAC_SIZE);
- while(begin < end) {
+ while (begin < end) {
__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
"r" (begin), "i" (ASI_CONTROL));
begin += sun4c_vacinfo.linesize;
}
}
-/* Context level flush. */
-static inline void sun4c_flush_context_hw(void)
+static __inline__ void sun4c_flush_context_hw(void)
{
unsigned long end = SUN4C_VAC_SIZE;
- unsigned pgsz = PAGE_SIZE;
- ctxflushes++;
- __asm__ __volatile__("
-1: subcc %0, %2, %0
- bg 1b
- sta %%g0, [%0] %3
- nop; nop; nop; ! Weitek hwbug
-" : "=&r" (end)
- : "0" (end), "r" (pgsz), "i" (ASI_HWFLUSHCONTEXT)
+ __asm__ __volatile__(
+ "1: addcc %0, -4096, %0\n\t"
+ " bne 1b\n\t"
+ " sta %%g0, [%0] %2"
+ : "=&r" (end)
+ : "0" (end), "i" (ASI_HWFLUSHCONTEXT)
: "cc");
}
+/* Must be called minimally with IRQs disabled. */
+static void sun4c_flush_segment_hw(unsigned long addr)
+{
+ if (sun4c_get_segmap(addr) != invalid_segment) {
+ unsigned long vac_size = SUN4C_VAC_SIZE;
+
+ __asm__ __volatile__(
+ "1: addcc %0, -4096, %0\n\t"
+ " bne 1b\n\t"
+ " sta %%g0, [%2 + %0] %3"
+ : "=&r" (vac_size)
+ : "0" (vac_size), "r" (addr), "i" (ASI_HWFLUSHSEG)
+ : "cc");
+ }
+}
+
+/* Must be called minimally with interrupts disabled. */
+static __inline__ void sun4c_flush_page_hw(unsigned long addr)
+{
+ addr &= PAGE_MASK;
+ if ((int)sun4c_get_pte(addr) < 0)
+ __asm__ __volatile__("sta %%g0, [%0] %1"
+ : : "r" (addr), "i" (ASI_HWFLUSHPAGE));
+}
+
/* Don't inline the software version as it eats too many cache lines if expanded. */
static void sun4c_flush_context_sw(void)
{
unsigned long nbytes = SUN4C_VAC_SIZE;
unsigned long lsize = sun4c_vacinfo.linesize;
- ctxflushes++;
__asm__ __volatile__("
add %2, %2, %%g1
add %2, %%g1, %%g2
@@ -203,72 +168,13 @@ static void sun4c_flush_context_sw(void)
: "g1", "g2", "g3", "g4", "g5", "o4", "o5", "cc");
}
-/* Scrape the segment starting at ADDR from the virtual cache. */
-static inline void sun4c_flush_segment(unsigned long addr)
-{
- if(sun4c_get_segmap(addr) == invalid_segment)
- return;
-
- segflushes++;
- if(sun4c_vacinfo.do_hwflushes) {
- unsigned long end = (addr + SUN4C_VAC_SIZE);
-
- for( ; addr < end; addr += PAGE_SIZE)
- __asm__ __volatile__("sta %%g0, [%0] %1;nop;nop;nop;\n\t" : :
- "r" (addr), "i" (ASI_HWFLUSHSEG));
- } else {
- unsigned long nbytes = SUN4C_VAC_SIZE;
- unsigned long lsize = sun4c_vacinfo.linesize;
-
- __asm__ __volatile__("add %2, %2, %%g1\n\t"
- "add %2, %%g1, %%g2\n\t"
- "add %2, %%g2, %%g3\n\t"
- "add %2, %%g3, %%g4\n\t"
- "add %2, %%g4, %%g5\n\t"
- "add %2, %%g5, %%o4\n\t"
- "add %2, %%o4, %%o5\n"
- "1:\n\t"
- "subcc %1, %%o5, %1\n\t"
- "sta %%g0, [%0] %6\n\t"
- "sta %%g0, [%0 + %2] %6\n\t"
- "sta %%g0, [%0 + %%g1] %6\n\t"
- "sta %%g0, [%0 + %%g2] %6\n\t"
- "sta %%g0, [%0 + %%g3] %6\n\t"
- "sta %%g0, [%0 + %%g4] %6\n\t"
- "sta %%g0, [%0 + %%g5] %6\n\t"
- "sta %%g0, [%0 + %%o4] %6\n\t"
- "bg 1b\n\t"
- " add %0, %%o5, %0\n\t"
- : "=&r" (addr), "=&r" (nbytes), "=&r" (lsize)
- : "0" (addr), "1" (nbytes), "2" (lsize),
- "i" (ASI_FLUSHSEG)
- : "g1", "g2", "g3", "g4", "g5", "o4", "o5", "cc");
- }
-}
-
-/* Call this version when you know hardware flushes are available. */
-static inline void sun4c_flush_segment_hw(unsigned long addr)
-{
- if(sun4c_get_segmap(addr) != invalid_segment) {
- unsigned long end;
-
- segflushes++;
- for(end = addr + SUN4C_VAC_SIZE; addr < end; addr += PAGE_SIZE)
- __asm__ __volatile__("sta %%g0, [%0] %1"
- : : "r" (addr), "i" (ASI_HWFLUSHSEG));
- /* Weitek POWER-UP hwbug workaround. */
- __asm__ __volatile__("nop;nop;nop; ! Weitek hwbug");
- }
-}
-
/* Don't inline the software version as it eats too many cache lines if expanded. */
static void sun4c_flush_segment_sw(unsigned long addr)
{
- if(sun4c_get_segmap(addr) != invalid_segment) {
+ if (sun4c_get_segmap(addr) != invalid_segment) {
unsigned long nbytes = SUN4C_VAC_SIZE;
unsigned long lsize = sun4c_vacinfo.linesize;
- segflushes++;
__asm__ __volatile__("
add %2, %2, %%g1
add %2, %%g1, %%g2
@@ -300,12 +206,11 @@ static void sun4c_flush_page(unsigned long addr)
{
addr &= PAGE_MASK;
- if((sun4c_get_pte(addr) & (_SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_VALID)) !=
- _SUN4C_PAGE_VALID)
+ if ((sun4c_get_pte(addr) & (_SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_VALID)) !=
+ _SUN4C_PAGE_VALID)
return;
- pageflushes++;
- if(sun4c_vacinfo.do_hwflushes) {
+ if (sun4c_vacinfo.do_hwflushes) {
__asm__ __volatile__("sta %%g0, [%0] %1;nop;nop;nop;\n\t" : :
"r" (addr), "i" (ASI_HWFLUSHPAGE));
} else {
@@ -338,30 +243,15 @@ static void sun4c_flush_page(unsigned long addr)
}
}
-/* Again, hw-only and sw-only cache page-level flush variants. */
-static inline void sun4c_flush_page_hw(unsigned long addr)
-{
- addr &= PAGE_MASK;
- if((sun4c_get_pte(addr) & (_SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_VALID)) ==
- _SUN4C_PAGE_VALID) {
- pageflushes++;
- __asm__ __volatile__("sta %%g0, [%0] %1"
- : : "r" (addr), "i" (ASI_HWFLUSHPAGE));
- /* Weitek POWER-UP hwbug workaround. */
- __asm__ __volatile__("nop;nop;nop; ! Weitek hwbug");
- }
-}
-
/* Don't inline the software version as it eats too many cache lines if expanded. */
static void sun4c_flush_page_sw(unsigned long addr)
{
addr &= PAGE_MASK;
- if((sun4c_get_pte(addr) & (_SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_VALID)) ==
- _SUN4C_PAGE_VALID) {
+ if ((sun4c_get_pte(addr) & (_SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_VALID)) ==
+ _SUN4C_PAGE_VALID) {
unsigned long left = PAGE_SIZE;
unsigned long lsize = sun4c_vacinfo.linesize;
- pageflushes++;
__asm__ __volatile__("
add %2, %2, %%g1
add %2, %%g1, %%g2
@@ -411,7 +301,7 @@ static inline void sun4c_init_clean_segmap(unsigned char pseg)
unsigned long vaddr;
sun4c_put_segmap(0, pseg);
- for(vaddr = 0; vaddr < SUN4C_REAL_PGDIR_SIZE; vaddr+=PAGE_SIZE)
+ for (vaddr = 0; vaddr < SUN4C_REAL_PGDIR_SIZE; vaddr += PAGE_SIZE)
sun4c_put_pte(vaddr, 0);
sun4c_put_segmap(0, invalid_segment);
}
@@ -423,15 +313,15 @@ static inline void sun4c_init_clean_mmu(unsigned long kernel_end)
savectx = sun4c_get_context();
kernel_end = SUN4C_REAL_PGDIR_ALIGN(kernel_end);
- for(ctx = 0; ctx < num_contexts; ctx++) {
+ for (ctx = 0; ctx < num_contexts; ctx++) {
sun4c_set_context(ctx);
- for(vaddr = 0; vaddr < 0x20000000; vaddr += SUN4C_REAL_PGDIR_SIZE)
+ for (vaddr = 0; vaddr < 0x20000000; vaddr += SUN4C_REAL_PGDIR_SIZE)
sun4c_put_segmap(vaddr, invalid_segment);
- for(vaddr = 0xe0000000; vaddr < KERNBASE; vaddr += SUN4C_REAL_PGDIR_SIZE)
+ for (vaddr = 0xe0000000; vaddr < KERNBASE; vaddr += SUN4C_REAL_PGDIR_SIZE)
sun4c_put_segmap(vaddr, invalid_segment);
- for(vaddr = kernel_end; vaddr < KADB_DEBUGGER_BEGVM; vaddr += SUN4C_REAL_PGDIR_SIZE)
+ for (vaddr = kernel_end; vaddr < KADB_DEBUGGER_BEGVM; vaddr += SUN4C_REAL_PGDIR_SIZE)
sun4c_put_segmap(vaddr, invalid_segment);
- for(vaddr = LINUX_OPPROM_ENDVM; vaddr; vaddr += SUN4C_REAL_PGDIR_SIZE)
+ for (vaddr = LINUX_OPPROM_ENDVM; vaddr; vaddr += SUN4C_REAL_PGDIR_SIZE)
sun4c_put_segmap(vaddr, invalid_segment);
}
sun4c_set_context(savectx);
@@ -442,7 +332,7 @@ void __init sun4c_probe_vac(void)
sun4c_disable_vac();
if (ARCH_SUN4) {
- switch(idprom->id_machtype) {
+ switch (idprom->id_machtype) {
case (SM_SUN4|SM_4_110):
sun4c_vacinfo.type = NONE;
@@ -477,12 +367,12 @@ void __init sun4c_probe_vac(void)
default:
prom_printf("Cannot initialize VAC - wierd sun4 model idprom->id_machtype = %d", idprom->id_machtype);
prom_halt();
- }
+ };
} else {
sun4c_vacinfo.type = WRITE_THROUGH;
- if((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) ||
- (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) {
+ if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) ||
+ (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) {
/* PROM on SS1 lacks this info, to be super safe we
* hard code it here since this arch is cast in stone.
*/
@@ -497,7 +387,7 @@ void __init sun4c_probe_vac(void)
sun4c_vacinfo.do_hwflushes =
prom_getintdefault(prom_root_node, "vac-hwflush", 0);
- if(sun4c_vacinfo.do_hwflushes == 0)
+ if (sun4c_vacinfo.do_hwflushes == 0)
sun4c_vacinfo.do_hwflushes =
prom_getintdefault(prom_root_node, "vac_hwflush", 0);
@@ -509,7 +399,7 @@ void __init sun4c_probe_vac(void)
sun4c_vacinfo.num_lines =
(sun4c_vacinfo.num_bytes / sun4c_vacinfo.linesize);
- switch(sun4c_vacinfo.linesize) {
+ switch (sun4c_vacinfo.linesize) {
case 16:
sun4c_vacinfo.log2lsize = 4;
break;
@@ -566,7 +456,7 @@ static void patch_kernel_fault_handler(void)
prom_printf("Unhandled number of segmaps: %d\n",
num_segmaps);
prom_halt();
- }
+ };
switch (num_contexts) {
case 8:
/* Default, nothing to do. */
@@ -574,19 +464,22 @@ static void patch_kernel_fault_handler(void)
case 16:
PATCH_INSN(num_context_patch1_16,
num_context_patch1);
+#if 0
PATCH_INSN(num_context_patch2_16,
num_context_patch2);
+#endif
break;
default:
prom_printf("Unhandled number of contexts: %d\n",
num_contexts);
prom_halt();
- }
- if(sun4c_vacinfo.do_hwflushes != 0) {
+ };
+
+ if (sun4c_vacinfo.do_hwflushes != 0) {
PATCH_INSN(vac_hwflush_patch1_on, vac_hwflush_patch1);
PATCH_INSN(vac_hwflush_patch2_on, vac_hwflush_patch2);
} else {
- switch(sun4c_vacinfo.linesize) {
+ switch (sun4c_vacinfo.linesize) {
case 16:
/* Default, nothing to do. */
break;
@@ -604,7 +497,7 @@ static void patch_kernel_fault_handler(void)
static void __init sun4c_probe_mmu(void)
{
if (ARCH_SUN4) {
- switch(idprom->id_machtype) {
+ switch (idprom->id_machtype) {
case (SM_SUN4|SM_4_110):
prom_printf("No support for 4100 yet\n");
prom_halt();
@@ -631,10 +524,10 @@ static void __init sun4c_probe_mmu(void)
default:
prom_printf("Invalid SUN4 model\n");
prom_halt();
- }
+ };
} else {
- if((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) ||
- (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) {
+ if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) ||
+ (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) {
/* Hardcode these just to be safe, PROM on SS1 does
* not have this info available in the root node.
*/
@@ -658,20 +551,15 @@ void __init sun4c_probe_memerr_reg(void)
struct linux_prom_registers regs[1];
if (ARCH_SUN4) {
- sun4c_memerr_reg = sparc_alloc_io(sun4_memreg_physaddr, 0,
- PAGE_SIZE,
- "memory parity error",
- 0x0, 0);
+ sun4c_memerr_reg = ioremap(sun4_memreg_physaddr, PAGE_SIZE);
} else {
node = prom_getchild(prom_root_node);
node = prom_searchsiblings(prom_root_node, "memory-error");
if (!node)
return;
prom_getproperty(node, "reg", (char *)regs, sizeof(regs));
- sun4c_memerr_reg = sparc_alloc_io(regs[0].phys_addr, 0,
- regs[0].reg_size,
- "memory parity error",
- regs[0].which_io, 0);
+ /* hmm I think regs[0].which_io is zero here anyways */
+ sun4c_memerr_reg = ioremap(regs[0].phys_addr, regs[0].reg_size);
}
}
@@ -679,10 +567,10 @@ static inline void sun4c_init_ss2_cache_bug(void)
{
extern unsigned long start;
- if((idprom->id_machtype == (SM_SUN4C | SM_4C_SS2)) ||
- (idprom->id_machtype == (SM_SUN4C | SM_4C_IPX)) ||
- (idprom->id_machtype == (SM_SUN4 | SM_4_330)) ||
- (idprom->id_machtype == (SM_SUN4C | SM_4C_ELC))) {
+ if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS2)) ||
+ (idprom->id_machtype == (SM_SUN4C | SM_4C_IPX)) ||
+ (idprom->id_machtype == (SM_SUN4 | SM_4_330)) ||
+ (idprom->id_machtype == (SM_SUN4C | SM_4C_ELC))) {
/* Whee.. */
printk("SS2 cache bug detected, uncaching trap table page\n");
sun4c_flush_page((unsigned int) &start);
@@ -692,17 +580,13 @@ static inline void sun4c_init_ss2_cache_bug(void)
}
/* Addr is always aligned on a page boundry for us already. */
-static void sun4c_map_dma_area(unsigned long addr, int len)
+static void sun4c_map_dma_area(unsigned long va, u32 addr, int len)
{
unsigned long page, end;
end = PAGE_ALIGN((addr + len));
- while(addr < end) {
- page = get_free_page(GFP_KERNEL);
- if(!page) {
- prom_printf("alloc_dvma: Cannot get a dvma page\n");
- prom_halt();
- }
+ while (addr < end) {
+ page = va;
sun4c_flush_page(page);
page -= PAGE_OFFSET;
page >>= PAGE_SHIFT;
@@ -710,9 +594,21 @@ static void sun4c_map_dma_area(unsigned long addr, int len)
_SUN4C_PAGE_NOCACHE | _SUN4C_PAGE_PRIV);
sun4c_put_pte(addr, page);
addr += PAGE_SIZE;
+ va += PAGE_SIZE;
}
}
+static void sun4c_unmap_dma_area(unsigned long addr, int len)
+{
+}
+
+static void sun4c_inval_dma_area(unsigned long addr, int len)
+{
+}
+
+static void sun4c_flush_dma_area(unsigned long addr, int len)
+{
+}
/* TLB management. */
@@ -726,6 +622,13 @@ struct sun4c_mmu_entry {
unsigned long vaddr;
unsigned char pseg;
unsigned char locked;
+
+ /* For user mappings only, and completely hidden from kernel
+ * TLB miss code.
+ */
+ unsigned char ctx;
+ struct sun4c_mmu_entry *lru_next;
+ struct sun4c_mmu_entry *lru_prev;
};
static struct sun4c_mmu_entry mmu_entry_pool[SUN4C_MAX_SEGMAPS];
@@ -734,12 +637,15 @@ static void __init sun4c_init_mmu_entry_pool(void)
{
int i;
- for(i=0; i < SUN4C_MAX_SEGMAPS; i++) {
+ for (i=0; i < SUN4C_MAX_SEGMAPS; i++) {
mmu_entry_pool[i].pseg = i;
mmu_entry_pool[i].next = 0;
mmu_entry_pool[i].prev = 0;
mmu_entry_pool[i].vaddr = 0;
mmu_entry_pool[i].locked = 0;
+ mmu_entry_pool[i].ctx = 0;
+ mmu_entry_pool[i].lru_next = 0;
+ mmu_entry_pool[i].lru_prev = 0;
}
mmu_entry_pool[invalid_segment].locked = 1;
}
@@ -750,8 +656,8 @@ static inline void fix_permissions(unsigned long vaddr, unsigned long bits_on,
unsigned long start, end;
end = vaddr + SUN4C_REAL_PGDIR_SIZE;
- for(start = vaddr; start < end; start += PAGE_SIZE)
- if(sun4c_get_pte(start) & _SUN4C_PAGE_VALID)
+ for (start = vaddr; start < end; start += PAGE_SIZE)
+ if (sun4c_get_pte(start) & _SUN4C_PAGE_VALID)
sun4c_put_pte(start, (sun4c_get_pte(start) | bits_on) &
~bits_off);
}
@@ -762,16 +668,16 @@ static inline void sun4c_init_map_kernelprom(unsigned long kernel_end)
unsigned char pseg, ctx;
#ifdef CONFIG_SUN4
/* sun4/110 and 260 have no kadb. */
- if((idprom->id_machtype != (SM_SUN4 | SM_4_260)) &&
- (idprom->id_machtype != (SM_SUN4 | SM_4_110))) {
+ if ((idprom->id_machtype != (SM_SUN4 | SM_4_260)) &&
+ (idprom->id_machtype != (SM_SUN4 | SM_4_110))) {
#endif
- for(vaddr = KADB_DEBUGGER_BEGVM;
- vaddr < LINUX_OPPROM_ENDVM;
- vaddr += SUN4C_REAL_PGDIR_SIZE) {
+ for (vaddr = KADB_DEBUGGER_BEGVM;
+ vaddr < LINUX_OPPROM_ENDVM;
+ vaddr += SUN4C_REAL_PGDIR_SIZE) {
pseg = sun4c_get_segmap(vaddr);
- if(pseg != invalid_segment) {
+ if (pseg != invalid_segment) {
mmu_entry_pool[pseg].locked = 1;
- for(ctx = 0; ctx < num_contexts; ctx++)
+ for (ctx = 0; ctx < num_contexts; ctx++)
prom_putsegment(ctx, vaddr, pseg);
fix_permissions(vaddr, _SUN4C_PAGE_PRIV, 0);
}
@@ -779,10 +685,10 @@ static inline void sun4c_init_map_kernelprom(unsigned long kernel_end)
#ifdef CONFIG_SUN4
}
#endif
- for(vaddr = KERNBASE; vaddr < kernel_end; vaddr += SUN4C_REAL_PGDIR_SIZE) {
+ for (vaddr = KERNBASE; vaddr < kernel_end; vaddr += SUN4C_REAL_PGDIR_SIZE) {
pseg = sun4c_get_segmap(vaddr);
mmu_entry_pool[pseg].locked = 1;
- for(ctx = 0; ctx < num_contexts; ctx++)
+ for (ctx = 0; ctx < num_contexts; ctx++)
prom_putsegment(ctx, vaddr, pseg);
fix_permissions(vaddr, _SUN4C_PAGE_PRIV, _SUN4C_PAGE_NOCACHE);
}
@@ -792,13 +698,13 @@ static void __init sun4c_init_lock_area(unsigned long start, unsigned long end)
{
int i, ctx;
- while(start < end) {
- for(i=0; i < invalid_segment; i++)
- if(!mmu_entry_pool[i].locked)
+ while (start < end) {
+ for (i = 0; i < invalid_segment; i++)
+ if (!mmu_entry_pool[i].locked)
break;
mmu_entry_pool[i].locked = 1;
sun4c_init_clean_segmap(i);
- for(ctx = 0; ctx < num_contexts; ctx++)
+ for (ctx = 0; ctx < num_contexts; ctx++)
prom_putsegment(ctx, start, mmu_entry_pool[i].pseg);
start += SUN4C_REAL_PGDIR_SIZE;
}
@@ -815,13 +721,15 @@ struct sun4c_mmu_ring {
static struct sun4c_mmu_ring sun4c_context_ring[SUN4C_MAX_CONTEXTS]; /* used user entries */
static struct sun4c_mmu_ring sun4c_ufree_ring; /* free user entries */
+static struct sun4c_mmu_ring sun4c_ulru_ring; /* LRU user entries */
struct sun4c_mmu_ring sun4c_kernel_ring; /* used kernel entries */
struct sun4c_mmu_ring sun4c_kfree_ring; /* free kernel entries */
-static inline void sun4c_init_rings(unsigned long *mempool)
+static inline void sun4c_init_rings(void)
{
int i;
- for(i=0; i<SUN4C_MAX_CONTEXTS; i++) {
+
+ for (i = 0; i < SUN4C_MAX_CONTEXTS; i++) {
sun4c_context_ring[i].ringhd.next =
sun4c_context_ring[i].ringhd.prev =
&sun4c_context_ring[i].ringhd;
@@ -830,6 +738,9 @@ static inline void sun4c_init_rings(unsigned long *mempool)
sun4c_ufree_ring.ringhd.next = sun4c_ufree_ring.ringhd.prev =
&sun4c_ufree_ring.ringhd;
sun4c_ufree_ring.num_entries = 0;
+ sun4c_ulru_ring.ringhd.lru_next = sun4c_ulru_ring.ringhd.lru_prev =
+ &sun4c_ulru_ring.ringhd;
+ sun4c_ulru_ring.num_entries = 0;
sun4c_kernel_ring.ringhd.next = sun4c_kernel_ring.ringhd.prev =
&sun4c_kernel_ring.ringhd;
sun4c_kernel_ring.num_entries = 0;
@@ -838,8 +749,8 @@ static inline void sun4c_init_rings(unsigned long *mempool)
sun4c_kfree_ring.num_entries = 0;
}
-static inline void add_ring(struct sun4c_mmu_ring *ring,
- struct sun4c_mmu_entry *entry)
+static void add_ring(struct sun4c_mmu_ring *ring,
+ struct sun4c_mmu_entry *entry)
{
struct sun4c_mmu_entry *head = &ring->ringhd;
@@ -849,49 +760,58 @@ static inline void add_ring(struct sun4c_mmu_ring *ring,
ring->num_entries++;
}
-static inline void add_ring_ordered(struct sun4c_mmu_ring *ring,
- struct sun4c_mmu_entry *entry)
+static __inline__ void add_lru(struct sun4c_mmu_entry *entry)
+{
+ struct sun4c_mmu_ring *ring = &sun4c_ulru_ring;
+ struct sun4c_mmu_entry *head = &ring->ringhd;
+
+ entry->lru_next = head;
+ (entry->lru_prev = head->lru_prev)->lru_next = entry;
+ head->lru_prev = entry;
+}
+
+static void add_ring_ordered(struct sun4c_mmu_ring *ring,
+ struct sun4c_mmu_entry *entry)
{
struct sun4c_mmu_entry *head = &ring->ringhd;
unsigned long addr = entry->vaddr;
- if(head->next != &ring->ringhd) {
- while((head->next != &ring->ringhd) && (head->next->vaddr < addr))
- head = head->next;
- }
+ while ((head->next != &ring->ringhd) && (head->next->vaddr < addr))
+ head = head->next;
+
entry->prev = head;
(entry->next = head->next)->prev = entry;
head->next = entry;
ring->num_entries++;
+
+ add_lru(entry);
}
-static inline void remove_ring(struct sun4c_mmu_ring *ring,
- struct sun4c_mmu_entry *entry)
+static __inline__ void remove_ring(struct sun4c_mmu_ring *ring,
+ struct sun4c_mmu_entry *entry)
{
struct sun4c_mmu_entry *next = entry->next;
(next->prev = entry->prev)->next = next;
ring->num_entries--;
-#ifdef DEBUG_SUN4C_MM
- if(ring->num_entries < 0)
- panic("sun4c: Ring num_entries < 0!");
-#endif
}
-static inline void free_user_entry(int ctx, struct sun4c_mmu_entry *entry)
+static void remove_lru(struct sun4c_mmu_entry *entry)
{
- remove_ring(sun4c_context_ring+ctx, entry);
- add_ring(&sun4c_ufree_ring, entry);
+ struct sun4c_mmu_entry *next = entry->lru_next;
+
+ (next->lru_prev = entry->lru_prev)->lru_next = next;
}
-static inline void assign_user_entry(int ctx, struct sun4c_mmu_entry *entry)
+static void free_user_entry(int ctx, struct sun4c_mmu_entry *entry)
{
- remove_ring(&sun4c_ufree_ring, entry);
- add_ring_ordered(sun4c_context_ring+ctx, entry);
+ remove_ring(sun4c_context_ring+ctx, entry);
+ remove_lru(entry);
+ add_ring(&sun4c_ufree_ring, entry);
}
-static inline void free_kernel_entry(struct sun4c_mmu_entry *entry,
- struct sun4c_mmu_ring *ring)
+static void free_kernel_entry(struct sun4c_mmu_entry *entry,
+ struct sun4c_mmu_ring *ring)
{
remove_ring(ring, entry);
add_ring(&sun4c_kfree_ring, entry);
@@ -901,9 +821,9 @@ static void __init sun4c_init_fill_kernel_ring(int howmany)
{
int i;
- while(howmany) {
- for(i=0; i < invalid_segment; i++)
- if(!mmu_entry_pool[i].locked)
+ while (howmany) {
+ for (i = 0; i < invalid_segment; i++)
+ if (!mmu_entry_pool[i].locked)
break;
mmu_entry_pool[i].locked = 1;
sun4c_init_clean_segmap(i);
@@ -916,54 +836,40 @@ static void __init sun4c_init_fill_user_ring(void)
{
int i;
- for(i=0; i < invalid_segment; i++) {
- if(mmu_entry_pool[i].locked)
+ for (i = 0; i < invalid_segment; i++) {
+ if (mmu_entry_pool[i].locked)
continue;
sun4c_init_clean_segmap(i);
add_ring(&sun4c_ufree_ring, &mmu_entry_pool[i]);
}
}
-static inline void sun4c_kernel_unmap(struct sun4c_mmu_entry *kentry)
+static void sun4c_kernel_unmap(struct sun4c_mmu_entry *kentry)
{
int savectx, ctx;
savectx = sun4c_get_context();
- for(ctx = 0; ctx < num_contexts; ctx++) {
+ for (ctx = 0; ctx < num_contexts; ctx++) {
sun4c_set_context(ctx);
sun4c_put_segmap(kentry->vaddr, invalid_segment);
}
sun4c_set_context(savectx);
}
-static inline void sun4c_kernel_map(struct sun4c_mmu_entry *kentry)
+static void sun4c_kernel_map(struct sun4c_mmu_entry *kentry)
{
int savectx, ctx;
savectx = sun4c_get_context();
- for(ctx = 0; ctx < num_contexts; ctx++) {
+ for (ctx = 0; ctx < num_contexts; ctx++) {
sun4c_set_context(ctx);
sun4c_put_segmap(kentry->vaddr, kentry->pseg);
}
sun4c_set_context(savectx);
}
-static inline void sun4c_user_unmap(struct sun4c_mmu_entry *uentry)
-{
- sun4c_put_segmap(uentry->vaddr, invalid_segment);
-}
-
-static inline void sun4c_user_map(struct sun4c_mmu_entry *uentry)
-{
- unsigned long start = uentry->vaddr;
- unsigned long end = start + SUN4C_REAL_PGDIR_SIZE;
-
- sun4c_put_segmap(uentry->vaddr, uentry->pseg);
- while(start < end) {
- sun4c_put_pte(start, 0);
- start += PAGE_SIZE;
- }
-}
+#define sun4c_user_unmap(__entry) \
+ sun4c_put_segmap((__entry)->vaddr, invalid_segment)
static void sun4c_demap_context_hw(struct sun4c_mmu_ring *crp, unsigned char ctx)
{
@@ -971,11 +877,11 @@ static void sun4c_demap_context_hw(struct sun4c_mmu_ring *crp, unsigned char ctx
unsigned long flags;
save_and_cli(flags);
- if(head->next != head) {
+ if (head->next != head) {
struct sun4c_mmu_entry *entry = head->next;
int savectx = sun4c_get_context();
- FUW_INLINE
+ flush_user_windows();
sun4c_set_context(ctx);
sun4c_flush_context_hw();
do {
@@ -985,7 +891,7 @@ static void sun4c_demap_context_hw(struct sun4c_mmu_ring *crp, unsigned char ctx
free_user_entry(ctx, entry);
entry = next;
- } while(entry != head);
+ } while (entry != head);
sun4c_set_context(savectx);
}
restore_flags(flags);
@@ -997,11 +903,11 @@ static void sun4c_demap_context_sw(struct sun4c_mmu_ring *crp, unsigned char ctx
unsigned long flags;
save_and_cli(flags);
- if(head->next != head) {
+ if (head->next != head) {
struct sun4c_mmu_entry *entry = head->next;
int savectx = sun4c_get_context();
- FUW_INLINE
+ flush_user_windows();
sun4c_set_context(ctx);
sun4c_flush_context_sw();
do {
@@ -1011,49 +917,31 @@ static void sun4c_demap_context_sw(struct sun4c_mmu_ring *crp, unsigned char ctx
free_user_entry(ctx, entry);
entry = next;
- } while(entry != head);
+ } while (entry != head);
sun4c_set_context(savectx);
}
restore_flags(flags);
}
-static inline void sun4c_demap_one(struct sun4c_mmu_ring *crp, unsigned char ctx)
-{
- /* by using .prev we get a kind of "lru" algorithm */
- struct sun4c_mmu_entry *entry = crp->ringhd.prev;
- unsigned long flags;
- int savectx = sun4c_get_context();
-
-#ifdef DEBUG_SUN4C_MM
- if(entry == &crp->ringhd)
- panic("sun4c_demap_one: Freeing from empty ctx ring.");
-#endif
- FUW_INLINE
- save_and_cli(flags);
- sun4c_set_context(ctx);
- sun4c_flush_segment(entry->vaddr);
- sun4c_user_unmap(entry);
- free_user_entry(ctx, entry);
- sun4c_set_context(savectx);
- restore_flags(flags);
-}
-
static int sun4c_user_taken_entries = 0; /* This is how much we have. */
static int max_user_taken_entries = 0; /* This limits us and prevents deadlock. */
-static inline struct sun4c_mmu_entry *sun4c_kernel_strategy(void)
+static struct sun4c_mmu_entry *sun4c_kernel_strategy(void)
{
struct sun4c_mmu_entry *this_entry;
/* If some are free, return first one. */
- if(sun4c_kfree_ring.num_entries) {
+ if (sun4c_kfree_ring.num_entries) {
this_entry = sun4c_kfree_ring.ringhd.next;
return this_entry;
}
/* Else free one up. */
this_entry = sun4c_kernel_ring.ringhd.prev;
- sun4c_flush_segment(this_entry->vaddr);
+ if (sun4c_vacinfo.do_hwflushes)
+ sun4c_flush_segment_hw(this_entry->vaddr);
+ else
+ sun4c_flush_segment_sw(this_entry->vaddr);
sun4c_kernel_unmap(this_entry);
free_kernel_entry(this_entry, &sun4c_kernel_ring);
this_entry = sun4c_kfree_ring.ringhd.next;
@@ -1061,141 +949,73 @@ static inline struct sun4c_mmu_entry *sun4c_kernel_strategy(void)
return this_entry;
}
-void sun4c_shrink_kernel_ring(void)
-{
- struct sun4c_mmu_entry *entry;
- unsigned long flags;
-
- /* If an interrupt comes in here, we die... */
- save_and_cli(flags);
-
- if (sun4c_user_taken_entries) {
- entry = sun4c_kernel_strategy();
- remove_ring(&sun4c_kfree_ring, entry);
- add_ring(&sun4c_ufree_ring, entry);
- sun4c_user_taken_entries--;
-#if 0
- printk("shrink: ufree= %d, kfree= %d, kernel= %d\n",
- sun4c_ufree_ring.num_entries,
- sun4c_kfree_ring.num_entries,
- sun4c_kernel_ring.num_entries);
-#endif
-#ifdef DEBUG_SUN4C_MM
- if(sun4c_user_taken_entries < 0)
- panic("sun4c_shrink_kernel_ring: taken < 0.");
-#endif
- }
- restore_flags(flags);
-}
-
/* Using this method to free up mmu entries eliminates a lot of
* potential races since we have a kernel that incurs tlb
* replacement faults. There may be performance penalties.
+ *
+ * NOTE: Must be called with interrupts disabled.
*/
-static inline struct sun4c_mmu_entry *sun4c_user_strategy(void)
+static struct sun4c_mmu_entry *sun4c_user_strategy(void)
{
- struct ctx_list *next_one;
- struct sun4c_mmu_ring *rp = 0;
+ struct sun4c_mmu_entry *entry;
unsigned char ctx;
-#ifdef DEBUG_SUN4C_MM
- int lim = num_contexts;
-#endif
+ int savectx;
/* If some are free, return first one. */
- if(sun4c_ufree_ring.num_entries) {
-#ifdef DEBUG_SUN4C_MM
- if(sun4c_ufree_ring.ringhd.next == &sun4c_ufree_ring.ringhd)
- panic("sun4c_user_strategy: num_entries!=0 but ring empty.");
-#endif
- return sun4c_ufree_ring.ringhd.next;
+ if (sun4c_ufree_ring.num_entries) {
+ entry = sun4c_ufree_ring.ringhd.next;
+ goto unlink_out;
}
if (sun4c_user_taken_entries) {
- sun4c_shrink_kernel_ring();
-#ifdef DEBUG_SUN4C_MM
- if(sun4c_ufree_ring.ringhd.next == &sun4c_ufree_ring.ringhd)
- panic("sun4c_user_strategy: kernel shrunk but ufree empty.");
-#endif
- return sun4c_ufree_ring.ringhd.next;
+ entry = sun4c_kernel_strategy();
+ sun4c_user_taken_entries--;
+ goto kunlink_out;
}
- /* Grab one from the LRU context. */
- next_one = ctx_used.next;
- while ((sun4c_context_ring[next_one->ctx_number].num_entries == 0)
-#ifdef DEBUG_SUN4C_MM
- && (--lim >= 0)
-#endif
- )
- next_one = next_one->next;
+ /* Grab from the beginning of the LRU list. */
+ entry = sun4c_ulru_ring.ringhd.lru_next;
+ ctx = entry->ctx;
-#ifdef DEBUG_SUN4C_MM
- if(lim < 0)
- panic("No user segmaps!");
-#endif
+ savectx = sun4c_get_context();
+ flush_user_windows();
+ sun4c_set_context(ctx);
+ if (sun4c_vacinfo.do_hwflushes)
+ sun4c_flush_segment_hw(entry->vaddr);
+ else
+ sun4c_flush_segment_sw(entry->vaddr);
+ sun4c_user_unmap(entry);
+ remove_ring(sun4c_context_ring + ctx, entry);
+ remove_lru(entry);
+ sun4c_set_context(savectx);
- ctx = next_one->ctx_number;
- rp = &sun4c_context_ring[ctx];
+ return entry;
- sun4c_demap_one(rp, ctx);
-#ifdef DEBUG_SUN4C_MM
- if(sun4c_ufree_ring.ringhd.next == &sun4c_ufree_ring.ringhd)
- panic("sun4c_user_strategy: demapped one but ufree empty.");
-#endif
- return sun4c_ufree_ring.ringhd.next;
+unlink_out:
+ remove_ring(&sun4c_ufree_ring, entry);
+ return entry;
+kunlink_out:
+ remove_ring(&sun4c_kfree_ring, entry);
+ return entry;
}
+/* NOTE: Must be called with interrupts disabled. */
void sun4c_grow_kernel_ring(void)
{
struct sun4c_mmu_entry *entry;
-#if 0
- printk("grow: ");
-#endif
-
/* Prevent deadlock condition. */
- if(sun4c_user_taken_entries >= max_user_taken_entries) {
-#if 0
- printk("deadlock avoidance, taken= %d max= %d\n",
- sun4c_user_taken_entries, max_user_taken_entries);
-#endif
+ if (sun4c_user_taken_entries >= max_user_taken_entries)
return;
- }
if (sun4c_ufree_ring.num_entries) {
entry = sun4c_ufree_ring.ringhd.next;
-#ifdef DEBUG_SUN4C_MM
- if(entry == &sun4c_ufree_ring.ringhd)
- panic("\nsun4c_grow_kernel_ring: num_entries!=0, ring empty.");
-#endif
remove_ring(&sun4c_ufree_ring, entry);
add_ring(&sun4c_kfree_ring, entry);
-#ifdef DEBUG_SUN4C_MM
- if(sun4c_user_taken_entries < 0)
- panic("\nsun4c_grow_kernel_ring: taken < 0.");
-#endif
sun4c_user_taken_entries++;
-#if 0
- printk("ufree= %d, kfree= %d, kernel= %d\n",
- sun4c_ufree_ring.num_entries,
- sun4c_kfree_ring.num_entries,
- sun4c_kernel_ring.num_entries);
-#endif
}
}
-static inline void alloc_user_segment(unsigned long address, unsigned char ctx)
-{
- struct sun4c_mmu_entry *entry;
- unsigned long flags;
-
- save_and_cli(flags);
- entry = sun4c_user_strategy();
- entry->vaddr = (address & SUN4C_REAL_PGDIR_MASK);
- assign_user_entry(ctx, entry);
- sun4c_user_map(entry);
- restore_flags(flags);
-}
-
/* This is now a fast in-window trap handler to avoid any and all races. */
static void sun4c_quick_kernel_fault(unsigned long address)
{
@@ -1209,8 +1029,8 @@ static void sun4c_quick_kernel_fault(unsigned long address)
* bucket[0]
* bucket[1]
* [ ... ]
- * bucket[NR_TASKS-1]
- * TASK_STACK_BEGIN + (sizeof(struct task_bucket) * NR_TASKS)
+ * bucket[NR_TASK_BUCKETS-1]
+ * TASK_STACK_BEGIN + (sizeof(struct task_bucket) * NR_TASK_BUCKETS)
*
* Each slot looks like:
*
@@ -1218,7 +1038,7 @@ static void sun4c_quick_kernel_fault(unsigned long address)
* page 2 -- rest of kernel stack
*/
-union task_union *sun4c_bucket[NR_TASKS];
+union task_union *sun4c_bucket[NR_TASK_BUCKETS];
static int sun4c_lowbucket_avail;
@@ -1232,7 +1052,7 @@ static int sun4c_lowbucket_avail;
#define BUCKET_PTE_PAGE(pte) \
(PAGE_OFFSET + (((pte) & SUN4C_PFN_MASK) << PAGE_SHIFT))
-static inline void get_locked_segment(unsigned long addr)
+static void get_locked_segment(unsigned long addr)
{
struct sun4c_mmu_entry *stolen;
unsigned long flags;
@@ -1240,19 +1060,14 @@ static inline void get_locked_segment(unsigned long addr)
save_and_cli(flags);
addr &= SUN4C_REAL_PGDIR_MASK;
stolen = sun4c_user_strategy();
- remove_ring(&sun4c_ufree_ring, stolen);
max_user_taken_entries--;
-#ifdef DEBUG_SUN4C_MM
- if(max_user_taken_entries < 0)
- panic("get_locked_segment: max_user_taken < 0.");
-#endif
stolen->vaddr = addr;
- FUW_INLINE
+ flush_user_windows();
sun4c_kernel_map(stolen);
restore_flags(flags);
}
-static inline void free_locked_segment(unsigned long addr)
+static void free_locked_segment(unsigned long addr)
{
struct sun4c_mmu_entry *entry;
unsigned long flags;
@@ -1263,14 +1078,13 @@ static inline void free_locked_segment(unsigned long addr)
pseg = sun4c_get_segmap(addr);
entry = &mmu_entry_pool[pseg];
- FUW_INLINE
- sun4c_flush_segment(addr);
+ flush_user_windows();
+ if (sun4c_vacinfo.do_hwflushes)
+ sun4c_flush_segment_hw(addr);
+ else
+ sun4c_flush_segment_sw(addr);
sun4c_kernel_unmap(entry);
add_ring(&sun4c_ufree_ring, entry);
-#ifdef DEBUG_SUN4C_MM
- if(max_user_taken_entries < 0)
- panic("free_locked_segment: max_user_taken < 0.");
-#endif
max_user_taken_entries++;
restore_flags(flags);
}
@@ -1282,8 +1096,8 @@ static inline void garbage_collect(int entry)
/* 32 buckets per segment... */
entry &= ~31;
start = entry;
- for(end = (start + 32); start < end; start++)
- if(sun4c_bucket[start] != BUCKET_EMPTY)
+ for (end = (start + 32); start < end; start++)
+ if (sun4c_bucket[start] != BUCKET_EMPTY)
return;
/* Entire segment empty, release it. */
@@ -1302,23 +1116,39 @@ static struct task_struct *sun4c_alloc_task_struct(void)
int entry;
pages = __get_free_pages(GFP_KERNEL, TASK_STRUCT_ORDER);
- if(!pages)
+ if (!pages)
return (struct task_struct *) 0;
- for(entry = sun4c_lowbucket_avail; entry < NR_TASKS; entry++)
- if(sun4c_bucket[entry] == BUCKET_EMPTY)
+ for (entry = sun4c_lowbucket_avail; entry < NR_TASK_BUCKETS; entry++)
+ if (sun4c_bucket[entry] == BUCKET_EMPTY)
break;
- if(entry == NR_TASKS) {
+ if (entry == NR_TASK_BUCKETS) {
free_pages(pages, TASK_STRUCT_ORDER);
return (struct task_struct *) 0;
}
- if(entry >= sun4c_lowbucket_avail)
+ if (entry >= sun4c_lowbucket_avail)
sun4c_lowbucket_avail = entry + 1;
addr = BUCKET_ADDR(entry);
sun4c_bucket[entry] = (union task_union *) addr;
if(sun4c_get_segmap(addr) == invalid_segment)
get_locked_segment(addr);
+
+ /* We are changing the virtual color of the page(s)
+ * so we must flush the cache to guarentee consistancy.
+ */
+ if (sun4c_vacinfo.do_hwflushes) {
+ sun4c_flush_page_hw(pages);
+#ifndef CONFIG_SUN4
+ sun4c_flush_page_hw(pages + PAGE_SIZE);
+#endif
+ } else {
+ sun4c_flush_page_sw(pages);
+#ifndef CONFIG_SUN4
+ sun4c_flush_page_sw(pages + PAGE_SIZE);
+#endif
+ }
+
sun4c_put_pte(addr, BUCKET_PTE(pages));
#ifndef CONFIG_SUN4
sun4c_put_pte(addr + PAGE_SIZE, BUCKET_PTE(pages + PAGE_SIZE));
@@ -1342,7 +1172,7 @@ static void sun4c_free_task_struct_hw(struct task_struct *tsk)
sun4c_put_pte(tsaddr + PAGE_SIZE, 0);
#endif
sun4c_bucket[entry] = BUCKET_EMPTY;
- if(entry < sun4c_lowbucket_avail)
+ if (entry < sun4c_lowbucket_avail)
sun4c_lowbucket_avail = entry;
free_pages(pages, TASK_STRUCT_ORDER);
@@ -1365,7 +1195,7 @@ static void sun4c_free_task_struct_sw(struct task_struct *tsk)
sun4c_put_pte(tsaddr + PAGE_SIZE, 0);
#endif
sun4c_bucket[entry] = BUCKET_EMPTY;
- if(entry < sun4c_lowbucket_avail)
+ if (entry < sun4c_lowbucket_avail)
sun4c_lowbucket_avail = entry;
free_pages(pages, TASK_STRUCT_ORDER);
@@ -1376,10 +1206,10 @@ static void __init sun4c_init_buckets(void)
{
int entry;
- if(sizeof(union task_union) != (PAGE_SIZE << TASK_STRUCT_ORDER)) {
+ if (sizeof(union task_union) != (PAGE_SIZE << TASK_STRUCT_ORDER)) {
prom_printf("task union not %d page(s)!\n", 1 << TASK_STRUCT_ORDER);
}
- for(entry = 0; entry < NR_TASKS; entry++)
+ for (entry = 0; entry < NR_TASK_BUCKETS; entry++)
sun4c_bucket[entry] = BUCKET_EMPTY;
sun4c_lowbucket_avail = 0;
}
@@ -1494,37 +1324,38 @@ static void sun4c_unlockarea(char *vaddr, unsigned long size)
* by implication and fool the page locking code above
* if passed to by mistake.
*/
-static __u32 sun4c_get_scsi_one(char *bufptr, unsigned long len, struct linux_sbus *sbus)
+static __u32 sun4c_get_scsi_one(char *bufptr, unsigned long len, struct sbus_bus *sbus)
{
unsigned long page;
page = ((unsigned long)bufptr) & PAGE_MASK;
- if(MAP_NR(page) > max_mapnr) {
+ if (MAP_NR(page) > max_mapnr) {
sun4c_flush_page(page);
return (__u32)bufptr; /* already locked */
}
return (__u32)sun4c_lockarea(bufptr, len);
}
-static void sun4c_get_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
+static void sun4c_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
{
- while(sz >= 0) {
- sg[sz].dvma_addr = (__u32)sun4c_lockarea(sg[sz].addr, sg[sz].len);
+ while (sz >= 0) {
+ sg[sz].dvma_address = (__u32)sun4c_lockarea(sg[sz].address, sg[sz].length);
+ sg[sz].dvma_length = sg[sz].length;
sz--;
}
}
-static void sun4c_release_scsi_one(__u32 bufptr, unsigned long len, struct linux_sbus *sbus)
+static void sun4c_release_scsi_one(__u32 bufptr, unsigned long len, struct sbus_bus *sbus)
{
- if(bufptr < sun4c_iobuffer_start)
+ if (bufptr < sun4c_iobuffer_start)
return; /* On kernel stack or similar, see above */
sun4c_unlockarea((char *)bufptr, len);
}
-static void sun4c_release_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_sbus *sbus)
+static void sun4c_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
{
- while(sz >= 0) {
- sun4c_unlockarea((char *)sg[sz].dvma_addr, sg[sz].len);
+ while (sz >= 0) {
+ sun4c_unlockarea((char *)sg[sz].dvma_address, sg[sz].length);
sz--;
}
}
@@ -1534,7 +1365,7 @@ static void sun4c_release_scsi_sgl(struct mmu_sglist *sg, int sz, struct linux_s
struct vm_area_struct sun4c_kstack_vma;
-static unsigned long __init sun4c_init_lock_areas(unsigned long start_mem)
+static void __init sun4c_init_lock_areas(void)
{
unsigned long sun4c_taskstack_start;
unsigned long sun4c_taskstack_end;
@@ -1543,9 +1374,9 @@ static unsigned long __init sun4c_init_lock_areas(unsigned long start_mem)
sun4c_init_buckets();
sun4c_taskstack_start = SUN4C_LOCK_VADDR;
sun4c_taskstack_end = (sun4c_taskstack_start +
- (TASK_ENTRY_SIZE * NR_TASKS));
- if(sun4c_taskstack_end >= SUN4C_LOCK_END) {
- prom_printf("Too many tasks, decrease NR_TASKS please.\n");
+ (TASK_ENTRY_SIZE * NR_TASK_BUCKETS));
+ if (sun4c_taskstack_end >= SUN4C_LOCK_END) {
+ prom_printf("Too many tasks, decrease NR_TASK_BUCKETS please.\n");
prom_halt();
}
@@ -1556,9 +1387,8 @@ static unsigned long __init sun4c_init_lock_areas(unsigned long start_mem)
bitmap_size = (bitmap_size + 7) >> 3;
bitmap_size = LONG_ALIGN(bitmap_size);
iobuffer_map_size = bitmap_size << 3;
- sun4c_iobuffer_map = (unsigned long *) start_mem;
- memset((void *) start_mem, 0, bitmap_size);
- start_mem += bitmap_size;
+ sun4c_iobuffer_map = __alloc_bootmem(bitmap_size, SMP_CACHE_BYTES, 0UL);
+ memset((void *) sun4c_iobuffer_map, 0, bitmap_size);
sun4c_kstack_vma.vm_mm = &init_mm;
sun4c_kstack_vma.vm_start = sun4c_taskstack_start;
@@ -1566,7 +1396,6 @@ static unsigned long __init sun4c_init_lock_areas(unsigned long start_mem)
sun4c_kstack_vma.vm_page_prot = PAGE_SHARED;
sun4c_kstack_vma.vm_flags = VM_READ | VM_WRITE | VM_EXEC;
insert_vm_struct(&init_mm, &sun4c_kstack_vma);
- return start_mem;
}
/* Cache flushing on the sun4c. */
@@ -1574,12 +1403,12 @@ static void sun4c_flush_cache_all(void)
{
unsigned long begin, end;
- FUW_INLINE
+ flush_user_windows();
begin = (KERNBASE + SUN4C_REAL_PGDIR_SIZE);
end = (begin + SUN4C_VAC_SIZE);
- if(sun4c_vacinfo.linesize == 32) {
- while(begin < end) {
+ if (sun4c_vacinfo.linesize == 32) {
+ while (begin < end) {
__asm__ __volatile__("
ld [%0 + 0x00], %%g0
ld [%0 + 0x20], %%g0
@@ -1601,7 +1430,7 @@ static void sun4c_flush_cache_all(void)
begin += 512;
}
} else {
- while(begin < end) {
+ while (begin < end) {
__asm__ __volatile__("
ld [%0 + 0x00], %%g0
ld [%0 + 0x10], %%g0
@@ -1629,29 +1458,31 @@ static void sun4c_flush_cache_mm_hw(struct mm_struct *mm)
{
int new_ctx = mm->context;
- if(new_ctx != NO_CONTEXT && sun4c_context_ring[new_ctx].num_entries) {
- struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
- unsigned long flags;
+ if (new_ctx != NO_CONTEXT) {
+ flush_user_windows();
+ if (sun4c_context_ring[new_ctx].num_entries) {
+ struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
+ unsigned long flags;
- save_and_cli(flags);
- if(head->next != head) {
- struct sun4c_mmu_entry *entry = head->next;
- int savectx = sun4c_get_context();
+ save_and_cli(flags);
+ if (head->next != head) {
+ struct sun4c_mmu_entry *entry = head->next;
+ int savectx = sun4c_get_context();
- FUW_INLINE
- sun4c_set_context(new_ctx);
- sun4c_flush_context_hw();
- do {
- struct sun4c_mmu_entry *next = entry->next;
+ sun4c_set_context(new_ctx);
+ sun4c_flush_context_hw();
+ do {
+ struct sun4c_mmu_entry *next = entry->next;
- sun4c_user_unmap(entry);
- free_user_entry(new_ctx, entry);
+ sun4c_user_unmap(entry);
+ free_user_entry(new_ctx, entry);
- entry = next;
- } while(entry != head);
- sun4c_set_context(savectx);
+ entry = next;
+ } while (entry != head);
+ sun4c_set_context(savectx);
+ }
+ restore_flags(flags);
}
- restore_flags(flags);
}
}
@@ -1659,29 +1490,28 @@ static void sun4c_flush_cache_range_hw(struct mm_struct *mm, unsigned long start
{
int new_ctx = mm->context;
-#if KGPROF_PROFILING
- kgprof_profile();
-#endif
- if(new_ctx != NO_CONTEXT) {
+ if (new_ctx != NO_CONTEXT) {
struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
struct sun4c_mmu_entry *entry;
unsigned long flags;
- FUW_INLINE
+ flush_user_windows();
+
save_and_cli(flags);
/* All user segmap chains are ordered on entry->vaddr. */
- for(entry = head->next;
- (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);
- entry = entry->next)
+ for (entry = head->next;
+ (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);
+ entry = entry->next)
;
/* Tracing various job mixtures showed that this conditional
* only passes ~35% of the time for most worse case situations,
* therefore we avoid all of this gross overhead ~65% of the time.
*/
- if((entry != head) && (entry->vaddr < end)) {
+ if ((entry != head) && (entry->vaddr < end)) {
int octx = sun4c_get_context();
+
sun4c_set_context(new_ctx);
/* At this point, always, (start >= entry->vaddr) and
@@ -1696,11 +1526,11 @@ static void sun4c_flush_cache_range_hw(struct mm_struct *mm, unsigned long start
/* "realstart" is always >= entry->vaddr */
realend = entry->vaddr + SUN4C_REAL_PGDIR_SIZE;
- if(end < realend)
+ if (end < realend)
realend = end;
- if((realend - entry->vaddr) <= (PAGE_SIZE << 3)) {
+ if ((realend - entry->vaddr) <= (PAGE_SIZE << 3)) {
unsigned long page = entry->vaddr;
- while(page < realend) {
+ while (page < realend) {
sun4c_flush_page_hw(page);
page += PAGE_SIZE;
}
@@ -1710,14 +1540,13 @@ static void sun4c_flush_cache_range_hw(struct mm_struct *mm, unsigned long start
free_user_entry(new_ctx, entry);
}
entry = next;
- } while((entry != head) && (entry->vaddr < end));
+ } while ((entry != head) && (entry->vaddr < end));
sun4c_set_context(octx);
}
restore_flags(flags);
}
}
-/* XXX no save_and_cli/restore_flags needed, but put here if darkside still crashes */
static void sun4c_flush_cache_page_hw(struct vm_area_struct *vma, unsigned long page)
{
struct mm_struct *mm = vma->vm_mm;
@@ -1726,76 +1555,85 @@ static void sun4c_flush_cache_page_hw(struct vm_area_struct *vma, unsigned long
/* Sun4c has no separate I/D caches so cannot optimize for non
* text page flushes.
*/
- if(new_ctx != NO_CONTEXT) {
+ if (new_ctx != NO_CONTEXT) {
int octx = sun4c_get_context();
+ unsigned long flags;
- FUW_INLINE
+ flush_user_windows();
+ save_and_cli(flags);
sun4c_set_context(new_ctx);
sun4c_flush_page_hw(page);
sun4c_set_context(octx);
+ restore_flags(flags);
}
}
-static void sun4c_flush_page_to_ram_hw(unsigned long page)
+static void sun4c_flush_page_to_ram_hw(struct page *page)
{
- sun4c_flush_page_hw(page);
+ unsigned long flags;
+ unsigned long addr = page_address(page);
+
+ save_and_cli(flags);
+ sun4c_flush_page_hw(addr);
+ restore_flags(flags);
}
static void sun4c_flush_cache_mm_sw(struct mm_struct *mm)
{
int new_ctx = mm->context;
- if(new_ctx != NO_CONTEXT && sun4c_context_ring[new_ctx].num_entries) {
- struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
- unsigned long flags;
+ if (new_ctx != NO_CONTEXT) {
+ flush_user_windows();
- save_and_cli(flags);
- if(head->next != head) {
- struct sun4c_mmu_entry *entry = head->next;
- int savectx = sun4c_get_context();
+ if (sun4c_context_ring[new_ctx].num_entries) {
+ struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
+ unsigned long flags;
- FUW_INLINE
- sun4c_set_context(new_ctx);
- sun4c_flush_context_sw();
- do {
- struct sun4c_mmu_entry *next = entry->next;
+ save_and_cli(flags);
+ if (head->next != head) {
+ struct sun4c_mmu_entry *entry = head->next;
+ int savectx = sun4c_get_context();
- sun4c_user_unmap(entry);
- free_user_entry(new_ctx, entry);
+ sun4c_set_context(new_ctx);
+ sun4c_flush_context_sw();
+ do {
+ struct sun4c_mmu_entry *next = entry->next;
- entry = next;
- } while(entry != head);
- sun4c_set_context(savectx);
+ sun4c_user_unmap(entry);
+ free_user_entry(new_ctx, entry);
+
+ entry = next;
+ } while (entry != head);
+ sun4c_set_context(savectx);
+ }
+ restore_flags(flags);
}
- restore_flags(flags);
}
}
static void sun4c_flush_cache_range_sw(struct mm_struct *mm, unsigned long start, unsigned long end)
{
int new_ctx = mm->context;
-
-#if KGPROF_PROFILING
- kgprof_profile();
-#endif
- if(new_ctx != NO_CONTEXT) {
+
+ if (new_ctx != NO_CONTEXT) {
struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
struct sun4c_mmu_entry *entry;
unsigned long flags;
- FUW_INLINE
+ flush_user_windows();
+
save_and_cli(flags);
/* All user segmap chains are ordered on entry->vaddr. */
- for(entry = head->next;
- (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);
- entry = entry->next)
+ for (entry = head->next;
+ (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);
+ entry = entry->next)
;
/* Tracing various job mixtures showed that this conditional
* only passes ~35% of the time for most worse case situations,
* therefore we avoid all of this gross overhead ~65% of the time.
*/
- if((entry != head) && (entry->vaddr < end)) {
+ if ((entry != head) && (entry->vaddr < end)) {
int octx = sun4c_get_context();
sun4c_set_context(new_ctx);
@@ -1811,11 +1649,11 @@ static void sun4c_flush_cache_range_sw(struct mm_struct *mm, unsigned long start
/* "realstart" is always >= entry->vaddr */
realend = entry->vaddr + SUN4C_REAL_PGDIR_SIZE;
- if(end < realend)
+ if (end < realend)
realend = end;
- if((realend - entry->vaddr) <= (PAGE_SIZE << 3)) {
+ if ((realend - entry->vaddr) <= (PAGE_SIZE << 3)) {
unsigned long page = entry->vaddr;
- while(page < realend) {
+ while (page < realend) {
sun4c_flush_page_sw(page);
page += PAGE_SIZE;
}
@@ -1825,7 +1663,7 @@ static void sun4c_flush_cache_range_sw(struct mm_struct *mm, unsigned long start
free_user_entry(new_ctx, entry);
}
entry = next;
- } while((entry != head) && (entry->vaddr < end));
+ } while ((entry != head) && (entry->vaddr < end));
sun4c_set_context(octx);
}
restore_flags(flags);
@@ -1840,19 +1678,27 @@ static void sun4c_flush_cache_page_sw(struct vm_area_struct *vma, unsigned long
/* Sun4c has no separate I/D caches so cannot optimize for non
* text page flushes.
*/
- if(new_ctx != NO_CONTEXT) {
+ if (new_ctx != NO_CONTEXT) {
int octx = sun4c_get_context();
+ unsigned long flags;
- FUW_INLINE
+ flush_user_windows();
+ save_and_cli(flags);
sun4c_set_context(new_ctx);
sun4c_flush_page_sw(page);
sun4c_set_context(octx);
+ restore_flags(flags);
}
}
-static void sun4c_flush_page_to_ram_sw(unsigned long page)
+static void sun4c_flush_page_to_ram_sw(struct page *page)
{
- sun4c_flush_page_sw(page);
+ unsigned long flags;
+ unsigned long addr = page_address(page);
+
+ save_and_cli(flags);
+ sun4c_flush_page_sw(addr);
+ restore_flags(flags);
}
/* Sun4c cache is unified, both instructions and data live there, so
@@ -1879,8 +1725,11 @@ static void sun4c_flush_tlb_all(void)
flush_user_windows();
while (sun4c_kernel_ring.num_entries) {
next_entry = this_entry->next;
- sun4c_flush_segment(this_entry->vaddr);
- for(ctx = 0; ctx < num_contexts; ctx++) {
+ if (sun4c_vacinfo.do_hwflushes)
+ sun4c_flush_segment_hw(this_entry->vaddr);
+ else
+ sun4c_flush_segment_sw(this_entry->vaddr);
+ for (ctx = 0; ctx < num_contexts; ctx++) {
sun4c_set_context(ctx);
sun4c_put_segmap(this_entry->vaddr, invalid_segment);
}
@@ -1895,16 +1744,15 @@ static void sun4c_flush_tlb_mm_hw(struct mm_struct *mm)
{
int new_ctx = mm->context;
- if(new_ctx != NO_CONTEXT) {
+ if (new_ctx != NO_CONTEXT) {
struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
unsigned long flags;
save_and_cli(flags);
- if(head->next != head) {
+ if (head->next != head) {
struct sun4c_mmu_entry *entry = head->next;
int savectx = sun4c_get_context();
- FUW_INLINE
sun4c_set_context(new_ctx);
sun4c_flush_context_hw();
do {
@@ -1914,7 +1762,7 @@ static void sun4c_flush_tlb_mm_hw(struct mm_struct *mm)
free_user_entry(new_ctx, entry);
entry = next;
- } while(entry != head);
+ } while (entry != head);
sun4c_set_context(savectx);
}
restore_flags(flags);
@@ -1925,26 +1773,21 @@ static void sun4c_flush_tlb_range_hw(struct mm_struct *mm, unsigned long start,
{
int new_ctx = mm->context;
- if(new_ctx != NO_CONTEXT) {
+ if (new_ctx != NO_CONTEXT) {
struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
struct sun4c_mmu_entry *entry;
unsigned long flags;
-#if KGPROF_PROFILING
- kgprof_profile();
-#endif
save_and_cli(flags);
/* See commentary in sun4c_flush_cache_range_*(). */
- for(entry = head->next;
- (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);
- entry = entry->next)
+ for (entry = head->next;
+ (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);
+ entry = entry->next)
;
- if((entry != head) && (entry->vaddr < end)) {
+ if ((entry != head) && (entry->vaddr < end)) {
int octx = sun4c_get_context();
- /* This window flush is paranoid I think... -DaveM */
- FUW_INLINE
sun4c_set_context(new_ctx);
do {
struct sun4c_mmu_entry *next = entry->next;
@@ -1954,7 +1797,7 @@ static void sun4c_flush_tlb_range_hw(struct mm_struct *mm, unsigned long start,
free_user_entry(new_ctx, entry);
entry = next;
- } while((entry != head) && (entry->vaddr < end));
+ } while ((entry != head) && (entry->vaddr < end));
sun4c_set_context(octx);
}
restore_flags(flags);
@@ -1966,15 +1809,17 @@ static void sun4c_flush_tlb_page_hw(struct vm_area_struct *vma, unsigned long pa
struct mm_struct *mm = vma->vm_mm;
int new_ctx = mm->context;
- if(new_ctx != NO_CONTEXT) {
+ if (new_ctx != NO_CONTEXT) {
int savectx = sun4c_get_context();
+ unsigned long flags;
- FUW_INLINE
+ save_and_cli(flags);
sun4c_set_context(new_ctx);
page &= PAGE_MASK;
sun4c_flush_page_hw(page);
sun4c_put_pte(page, 0);
sun4c_set_context(savectx);
+ restore_flags(flags);
}
}
@@ -1982,16 +1827,15 @@ static void sun4c_flush_tlb_mm_sw(struct mm_struct *mm)
{
int new_ctx = mm->context;
- if(new_ctx != NO_CONTEXT) {
+ if (new_ctx != NO_CONTEXT) {
struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
unsigned long flags;
save_and_cli(flags);
- if(head->next != head) {
+ if (head->next != head) {
struct sun4c_mmu_entry *entry = head->next;
int savectx = sun4c_get_context();
- FUW_INLINE
sun4c_set_context(new_ctx);
sun4c_flush_context_sw();
do {
@@ -2001,7 +1845,7 @@ static void sun4c_flush_tlb_mm_sw(struct mm_struct *mm)
free_user_entry(new_ctx, entry);
entry = next;
- } while(entry != head);
+ } while (entry != head);
sun4c_set_context(savectx);
}
restore_flags(flags);
@@ -2012,27 +1856,21 @@ static void sun4c_flush_tlb_range_sw(struct mm_struct *mm, unsigned long start,
{
int new_ctx = mm->context;
- if(new_ctx != NO_CONTEXT) {
+ if (new_ctx != NO_CONTEXT) {
struct sun4c_mmu_entry *head = &sun4c_context_ring[new_ctx].ringhd;
struct sun4c_mmu_entry *entry;
unsigned long flags;
-#if KGPROF_PROFILING
- kgprof_profile();
-#endif
-
save_and_cli(flags);
/* See commentary in sun4c_flush_cache_range_*(). */
- for(entry = head->next;
- (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);
- entry = entry->next)
+ for (entry = head->next;
+ (entry != head) && ((entry->vaddr+SUN4C_REAL_PGDIR_SIZE) < start);
+ entry = entry->next)
;
- if((entry != head) && (entry->vaddr < end)) {
+ if ((entry != head) && (entry->vaddr < end)) {
int octx = sun4c_get_context();
- /* This window flush is paranoid I think... -DaveM */
- FUW_INLINE
sun4c_set_context(new_ctx);
do {
struct sun4c_mmu_entry *next = entry->next;
@@ -2042,7 +1880,7 @@ static void sun4c_flush_tlb_range_sw(struct mm_struct *mm, unsigned long start,
free_user_entry(new_ctx, entry);
entry = next;
- } while((entry != head) && (entry->vaddr < end));
+ } while ((entry != head) && (entry->vaddr < end));
sun4c_set_context(octx);
}
restore_flags(flags);
@@ -2054,15 +1892,17 @@ static void sun4c_flush_tlb_page_sw(struct vm_area_struct *vma, unsigned long pa
struct mm_struct *mm = vma->vm_mm;
int new_ctx = mm->context;
- if(new_ctx != NO_CONTEXT) {
+ if (new_ctx != NO_CONTEXT) {
int savectx = sun4c_get_context();
+ unsigned long flags;
- FUW_INLINE
+ save_and_cli(flags);
sun4c_set_context(new_ctx);
page &= PAGE_MASK;
sun4c_flush_page_sw(page);
sun4c_put_pte(page, 0);
sun4c_set_context(savectx);
+ restore_flags(flags);
}
}
@@ -2075,7 +1915,6 @@ static void sun4c_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
{
}
-
void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr,
int bus_type, int rdonly)
{
@@ -2083,7 +1922,7 @@ void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr,
page_entry = ((physaddr >> PAGE_SHIFT) & SUN4C_PFN_MASK);
page_entry |= ((pg_iobits | _SUN4C_PAGE_PRIV) & ~(_SUN4C_PAGE_PRESENT));
- if(rdonly)
+ if (rdonly)
page_entry &= ~_SUN4C_WRITEABLE;
sun4c_put_pte(virt_addr, page_entry);
}
@@ -2093,12 +1932,12 @@ void sun4c_unmapioaddr(unsigned long virt_addr)
sun4c_put_pte(virt_addr, 0);
}
-static void sun4c_alloc_context_hw(struct mm_struct *mm)
+static void sun4c_alloc_context_hw(struct mm_struct *old_mm, struct mm_struct *mm)
{
struct ctx_list *ctxp;
ctxp = ctx_free.next;
- if(ctxp != &ctx_free) {
+ if (ctxp != &ctx_free) {
remove_from_ctx_list(ctxp);
add_to_used_ctxlist(ctxp);
mm->context = ctxp->ctx_number;
@@ -2106,40 +1945,33 @@ static void sun4c_alloc_context_hw(struct mm_struct *mm)
return;
}
ctxp = ctx_used.next;
- if(ctxp->ctx_mm == current->mm)
+ if (ctxp->ctx_mm == old_mm)
ctxp = ctxp->next;
-#ifdef DEBUG_SUN4C_MM
- if(ctxp == &ctx_used)
- panic("out of mmu contexts");
-#endif
remove_from_ctx_list(ctxp);
add_to_used_ctxlist(ctxp);
ctxp->ctx_mm->context = NO_CONTEXT;
ctxp->ctx_mm = mm;
mm->context = ctxp->ctx_number;
sun4c_demap_context_hw(&sun4c_context_ring[ctxp->ctx_number],
- ctxp->ctx_number);
+ ctxp->ctx_number);
}
-static void sun4c_switch_to_context_hw(struct task_struct *tsk)
+/* Switch the current MM context. */
+static void sun4c_switch_mm_hw(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk, int cpu)
{
struct ctx_list *ctx;
+ int dirty = 0;
- if(tsk->mm->context == NO_CONTEXT) {
- sun4c_alloc_context_hw(tsk->mm);
+ if (mm->context == NO_CONTEXT) {
+ dirty = 1;
+ sun4c_alloc_context_hw(old_mm, mm);
} else {
/* Update the LRU ring of contexts. */
- ctx = ctx_list_pool + tsk->mm->context;
+ ctx = ctx_list_pool + mm->context;
remove_from_ctx_list(ctx);
add_to_used_ctxlist(ctx);
}
- sun4c_set_context(tsk->mm->context);
-}
-
-static void sun4c_init_new_context_hw(struct mm_struct *mm)
-{
- sun4c_alloc_context_hw(mm);
- if(mm == current->mm)
+ if (dirty || old_mm != mm)
sun4c_set_context(mm->context);
}
@@ -2147,7 +1979,7 @@ static void sun4c_destroy_context_hw(struct mm_struct *mm)
{
struct ctx_list *ctx_old;
- if(mm->context != NO_CONTEXT && atomic_read(&mm->count) == 1) {
+ if (mm->context != NO_CONTEXT) {
sun4c_demap_context_hw(&sun4c_context_ring[mm->context], mm->context);
ctx_old = ctx_list_pool + mm->context;
remove_from_ctx_list(ctx_old);
@@ -2156,12 +1988,12 @@ static void sun4c_destroy_context_hw(struct mm_struct *mm)
}
}
-static void sun4c_alloc_context_sw(struct mm_struct *mm)
+static void sun4c_alloc_context_sw(struct mm_struct *old_mm, struct mm_struct *mm)
{
struct ctx_list *ctxp;
ctxp = ctx_free.next;
- if(ctxp != &ctx_free) {
+ if (ctxp != &ctx_free) {
remove_from_ctx_list(ctxp);
add_to_used_ctxlist(ctxp);
mm->context = ctxp->ctx_number;
@@ -2169,40 +2001,34 @@ static void sun4c_alloc_context_sw(struct mm_struct *mm)
return;
}
ctxp = ctx_used.next;
- if(ctxp->ctx_mm == current->mm)
+ if(ctxp->ctx_mm == old_mm)
ctxp = ctxp->next;
-#ifdef DEBUG_SUN4C_MM
- if(ctxp == &ctx_used)
- panic("out of mmu contexts");
-#endif
remove_from_ctx_list(ctxp);
add_to_used_ctxlist(ctxp);
ctxp->ctx_mm->context = NO_CONTEXT;
ctxp->ctx_mm = mm;
mm->context = ctxp->ctx_number;
sun4c_demap_context_sw(&sun4c_context_ring[ctxp->ctx_number],
- ctxp->ctx_number);
+ ctxp->ctx_number);
}
-static void sun4c_switch_to_context_sw(struct task_struct *tsk)
+/* Switch the current MM context. */
+static void sun4c_switch_mm_sw(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk, int cpu)
{
struct ctx_list *ctx;
+ int dirty = 0;
- if(tsk->mm->context == NO_CONTEXT) {
- sun4c_alloc_context_sw(tsk->mm);
+ if (mm->context == NO_CONTEXT) {
+ dirty = 1;
+ sun4c_alloc_context_sw(old_mm, mm);
} else {
/* Update the LRU ring of contexts. */
- ctx = ctx_list_pool + tsk->mm->context;
+ ctx = ctx_list_pool + mm->context;
remove_from_ctx_list(ctx);
add_to_used_ctxlist(ctx);
}
- sun4c_set_context(tsk->mm->context);
-}
-static void sun4c_init_new_context_sw(struct mm_struct *mm)
-{
- sun4c_alloc_context_sw(mm);
- if(mm == current->mm)
+ if (dirty || old_mm != mm)
sun4c_set_context(mm->context);
}
@@ -2210,7 +2036,7 @@ static void sun4c_destroy_context_sw(struct mm_struct *mm)
{
struct ctx_list *ctx_old;
- if(mm->context != NO_CONTEXT && atomic_read(&mm->count) == 1) {
+ if (mm->context != NO_CONTEXT) {
sun4c_demap_context_sw(&sun4c_context_ring[mm->context], mm->context);
ctx_old = ctx_list_pool + mm->context;
remove_from_ctx_list(ctx_old);
@@ -2225,7 +2051,7 @@ static int sun4c_mmu_info(char *buf)
int len;
used_user_entries = 0;
- for(i=0; i < num_contexts; i++)
+ for (i = 0; i < num_contexts; i++)
used_user_entries += sun4c_context_ring[i].num_entries;
len = sprintf(buf,
@@ -2239,10 +2065,7 @@ static int sun4c_mmu_info(char *buf)
"usedpsegs\t: %d\n"
"ufreepsegs\t: %d\n"
"user_taken\t: %d\n"
- "max_taken\t: %d\n"
- "context\t\t: %d flushes\n"
- "segment\t\t: %d flushes\n"
- "page\t\t: %d flushes\n",
+ "max_taken\t: %d\n",
sun4c_vacinfo.num_bytes,
(sun4c_vacinfo.do_hwflushes ? "yes" : "no"),
sun4c_vacinfo.linesize,
@@ -2253,22 +2076,7 @@ static int sun4c_mmu_info(char *buf)
used_user_entries,
sun4c_ufree_ring.num_entries,
sun4c_user_taken_entries,
- max_user_taken_entries,
- ctxflushes, segflushes, pageflushes);
-
-#if KGPROF_PROFILING
- {
- int i,j;
- len += sprintf(buf + len,"kgprof profiling:\n");
- for (i=0;i<KGPROF_SIZE && kgprof_counters[i].addr[0];i++) {
- len += sprintf(buf + len,"%5d ",kgprof_counters[i].count);
- for (j=0;j<KGPROF_DEPTH;j++) {
- len += sprintf(buf + len,"%08x ",kgprof_counters[i].addr[j]);
- }
- len += sprintf(buf + len,"\n");
- }
- }
-#endif
+ max_user_taken_entries);
return len;
}
@@ -2277,13 +2085,6 @@ static int sun4c_mmu_info(char *buf)
* data structures.
*/
-#if 0 /* Not used due to BTFIXUPs */
-static unsigned int sun4c_pmd_align(unsigned int addr) { return SUN4C_PMD_ALIGN(addr); }
-#endif
-#if 0 /* Not used due to BTFIXUPs */
-static unsigned int sun4c_pgdir_align(unsigned int addr) { return SUN4C_PGDIR_ALIGN(addr); }
-#endif
-
/* First the functions which the mid-level code uses to directly
* manipulate the software page tables. Some defines since we are
* emulating the i386 page directory layout.
@@ -2295,17 +2096,6 @@ static unsigned int sun4c_pgdir_align(unsigned int addr) { return SUN4C_PGDIR_AL
#define PGD_DIRTY 0x040
#define PGD_TABLE (PGD_PRESENT | PGD_RW | PGD_USER | PGD_ACCESSED | PGD_DIRTY)
-#if 0 /* Not used due to BTFIXUPs */
-static unsigned long sun4c_vmalloc_start(void)
-{
- return SUN4C_VMALLOC_START;
-}
-#endif
-
-#if 0 /* Not used due to BTFIXUPs */
-static int sun4c_pte_none(pte_t pte) { return !pte_val(pte); }
-#endif
-
static int sun4c_pte_present(pte_t pte)
{
return ((pte_val(pte) & (_SUN4C_PAGE_PRESENT | _SUN4C_PAGE_PRIV)) != 0);
@@ -2334,48 +2124,6 @@ static void sun4c_pgd_clear(pgd_t * pgdp) { }
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
-#if 0 /* Not used due to BTFIXUPs */
-static int sun4c_pte_write(pte_t pte)
-{
- return pte_val(pte) & _SUN4C_PAGE_WRITE;
-}
-#endif
-
-#if 0 /* Not used due to BTFIXUPs */
-static int sun4c_pte_dirty(pte_t pte)
-{
- return pte_val(pte) & _SUN4C_PAGE_MODIFIED;
-}
-#endif
-
-#if 0 /* Not used due to BTFIXUPs */
-static int sun4c_pte_young(pte_t pte)
-{
- return pte_val(pte) & _SUN4C_PAGE_ACCESSED;
-}
-#endif
-
-#if 0 /* Not used due to BTFIXUPs */
-static pte_t sun4c_pte_wrprotect(pte_t pte)
-{
- return __pte(pte_val(pte) & ~(_SUN4C_PAGE_WRITE | _SUN4C_PAGE_SILENT_WRITE));
-}
-#endif
-
-#if 0 /* Not used due to BTFIXUPs */
-static pte_t sun4c_pte_mkclean(pte_t pte)
-{
- return __pte(pte_val(pte) & ~(_SUN4C_PAGE_MODIFIED | _SUN4C_PAGE_SILENT_WRITE));
-}
-#endif
-
-#if 0 /* Not used due to BTFIXUPs */
-static pte_t sun4c_pte_mkold(pte_t pte)
-{
- return __pte(pte_val(pte) & ~(_SUN4C_PAGE_ACCESSED | _SUN4C_PAGE_SILENT_READ));
-}
-#endif
-
static pte_t sun4c_pte_mkwrite(pte_t pte)
{
pte = __pte(pte_val(pte) | _SUN4C_PAGE_WRITE);
@@ -2404,9 +2152,9 @@ static pte_t sun4c_pte_mkyoung(pte_t pte)
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
-static pte_t sun4c_mk_pte(unsigned long page, pgprot_t pgprot)
+static pte_t sun4c_mk_pte(struct page *page, pgprot_t pgprot)
{
- return __pte(((page - PAGE_OFFSET) >> PAGE_SHIFT) | pgprot_val(pgprot));
+ return __pte((page - mem_map) | pgprot_val(pgprot));
}
static pte_t sun4c_mk_pte_phys(unsigned long phys_page, pgprot_t pgprot)
@@ -2419,17 +2167,9 @@ static pte_t sun4c_mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
return __pte(((page - PAGE_OFFSET) >> PAGE_SHIFT) | pgprot_val(pgprot));
}
-#if 0 /* Not used due to BTFIXUPs */
-static pte_t sun4c_pte_modify(pte_t pte, pgprot_t newprot)
-{
- return __pte((pte_val(pte) & _SUN4C_PAGE_CHG_MASK) |
- pgprot_val(newprot));
-}
-#endif
-
-static unsigned long sun4c_pte_page(pte_t pte)
+static unsigned long sun4c_pte_pagenr(pte_t pte)
{
- return (PAGE_OFFSET + ((pte_val(pte) & SUN4C_PFN_MASK) << (PAGE_SHIFT)));
+ return (pte_val(pte) & SUN4C_PFN_MASK);
}
static inline unsigned long sun4c_pmd_page(pmd_t pmd)
@@ -2460,11 +2200,6 @@ pte_t *sun4c_pte_offset(pmd_t * dir, unsigned long address)
return (pte_t *) sun4c_pmd_page(*dir) + ((address >> PAGE_SHIFT) & (SUN4C_PTRS_PER_PTE - 1));
}
-/* Update the root mmu directory. */
-static void sun4c_update_rootmmu_dir(struct task_struct *tsk, pgd_t *pgdir)
-{
-}
-
/* Please take special note on the foo_kernel() routines below, our
* fast in window fault handler wants to get at the pte's for vmalloc
* area with traps off, therefore they _MUST_ be locked down to prevent
@@ -2487,7 +2222,7 @@ static void sun4c_pte_free_kernel(pte_t *pte)
static pte_t *sun4c_pte_alloc_kernel(pmd_t *pmd, unsigned long address)
{
- if(address >= SUN4C_LOCK_VADDR)
+ if (address >= SUN4C_LOCK_VADDR)
return NULL;
address = (address >> PAGE_SHIFT) & (SUN4C_PTRS_PER_PTE - 1);
if (sun4c_pmd_none(*pmd))
@@ -2527,7 +2262,7 @@ extern __inline__ pgd_t *sun4c_get_pgd_fast(void)
{
unsigned long *ret;
- if((ret = pgd_quicklist) != NULL) {
+ if ((ret = pgd_quicklist) != NULL) {
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = ret[1];
pgtable_cache_size--;
@@ -2546,15 +2281,15 @@ extern __inline__ pgd_t *sun4c_get_pgd_fast(void)
static int sun4c_check_pgt_cache(int low, int high)
{
int freed = 0;
- if(pgtable_cache_size > high) {
+ if (pgtable_cache_size > high) {
do {
- if(pgd_quicklist)
+ if (pgd_quicklist)
free_pgd_slow(get_pgd_fast()), freed++;
- if(pmd_quicklist)
+ if (pmd_quicklist)
free_pmd_slow(get_pmd_fast()), freed++;
- if(pte_quicklist)
+ if (pte_quicklist)
free_pte_slow(get_pte_fast()), freed++;
- } while(pgtable_cache_size > low);
+ } while (pgtable_cache_size > low);
}
return freed;
}
@@ -2575,7 +2310,7 @@ extern __inline__ pte_t *sun4c_get_pte_fast(void)
{
unsigned long *ret;
- if((ret = (unsigned long *)pte_quicklist) != NULL) {
+ if ((ret = (unsigned long *)pte_quicklist) != NULL) {
pte_quicklist = (unsigned long *)(*ret);
ret[0] = ret[1];
pgtable_cache_size--;
@@ -2691,19 +2426,21 @@ static void sun4c_vac_alias_fixup(struct vm_area_struct *vma, unsigned long addr
unsigned long start;
/* Do not mistake ourselves as another mapping. */
- if(vmaring == vma)
+ if (vmaring == vma)
continue;
if (S4CVAC_BADALIAS(vaddr, address)) {
alias_found++;
start = vmaring->vm_start;
- while(start < vmaring->vm_end) {
+ while (start < vmaring->vm_end) {
pgdp = sun4c_pgd_offset(vmaring->vm_mm, start);
- if(!pgdp) goto next;
+ if (!pgdp)
+ goto next;
ptep = sun4c_pte_offset((pmd_t *) pgdp, start);
- if(!ptep) goto next;
+ if (!ptep)
+ goto next;
- if(pte_val(*ptep) & _SUN4C_PAGE_PRESENT) {
+ if (pte_val(*ptep) & _SUN4C_PAGE_PRESENT) {
flush_cache_page(vmaring, start);
*ptep = __pte(pte_val(*ptep) |
_SUN4C_PAGE_NOCACHE);
@@ -2716,54 +2453,112 @@ static void sun4c_vac_alias_fixup(struct vm_area_struct *vma, unsigned long addr
} while ((vmaring = vmaring->vm_next_share) != NULL);
spin_unlock(&inode->i_shared_lock);
- if(alias_found && !(pte_val(pte) & _SUN4C_PAGE_NOCACHE)) {
+ if (alias_found && !(pte_val(pte) & _SUN4C_PAGE_NOCACHE)) {
pgdp = sun4c_pgd_offset(vma->vm_mm, address);
ptep = sun4c_pte_offset((pmd_t *) pgdp, address);
*ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_NOCACHE);
- pte = pte_val(*ptep);
+ pte = *ptep;
}
}
}
+/* An experiment, turn off by default for now... -DaveM */
+#define SUN4C_PRELOAD_PSEG
+
void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
unsigned long flags;
+ int pseg;
save_and_cli(flags);
address &= PAGE_MASK;
- if(sun4c_get_segmap(address) == invalid_segment)
- alloc_user_segment(address, sun4c_get_context());
+ if ((pseg = sun4c_get_segmap(address)) == invalid_segment) {
+ struct sun4c_mmu_entry *entry = sun4c_user_strategy();
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long start, end;
+
+ entry->vaddr = start = (address & SUN4C_REAL_PGDIR_MASK);
+ entry->ctx = mm->context;
+ add_ring_ordered(sun4c_context_ring + mm->context, entry);
+ sun4c_put_segmap(entry->vaddr, entry->pseg);
+ end = start + SUN4C_REAL_PGDIR_SIZE;
+ while (start < end) {
+#ifdef SUN4C_PRELOAD_PSEG
+ pgd_t *pgdp = sun4c_pgd_offset(mm, start);
+ pte_t *ptep;
+
+ if (!pgdp)
+ goto no_mapping;
+ ptep = sun4c_pte_offset((pmd_t *) pgdp, start);
+ if (!ptep || !(pte_val(*ptep) & _SUN4C_PAGE_PRESENT))
+ goto no_mapping;
+ sun4c_put_pte(start, pte_val(*ptep));
+ goto next;
+
+ no_mapping:
+#endif
+ sun4c_put_pte(start, 0);
+#ifdef SUN4C_PRELOAD_PSEG
+ next:
+#endif
+ start += PAGE_SIZE;
+ }
+ if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED))
+ sun4c_vac_alias_fixup(vma, address, pte);
+#ifndef SUN4C_PRELOAD_PSEG
+ sun4c_put_pte(address, pte_val(pte));
+#endif
+ restore_flags(flags);
+ return;
+ } else {
+ struct sun4c_mmu_entry *entry = &mmu_entry_pool[pseg];
+
+ remove_lru(entry);
+ add_lru(entry);
+ }
- if((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED))
+ if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED))
sun4c_vac_alias_fixup(vma, address, pte);
sun4c_put_pte(address, pte_val(pte));
restore_flags(flags);
}
-extern unsigned long free_area_init(unsigned long, unsigned long);
-extern unsigned long sparc_context_init(unsigned long, int);
+extern void sparc_context_init(int);
extern unsigned long end;
+extern unsigned long bootmem_init(void);
+extern unsigned long last_valid_pfn;
+extern void sun_serial_setup(void);
-unsigned long __init sun4c_paging_init(unsigned long start_mem, unsigned long end_mem)
+void __init sun4c_paging_init(void)
{
int i, cnt;
unsigned long kernel_end, vaddr;
- extern unsigned long sparc_iobase_vaddr;
+ extern struct resource sparc_iomap;
+ unsigned long end_pfn;
kernel_end = (unsigned long) &end;
kernel_end += (SUN4C_REAL_PGDIR_SIZE * 4);
kernel_end = SUN4C_REAL_PGDIR_ALIGN(kernel_end);
+
+ last_valid_pfn = end_pfn = bootmem_init();
+
+ /* This does not logically belong here, but we need to
+ * call it at the moment we are able to use the bootmem
+ * allocator.
+ */
+ sun_serial_setup();
+
sun4c_probe_mmu();
invalid_segment = (num_segmaps - 1);
sun4c_init_mmu_entry_pool();
- sun4c_init_rings(&start_mem);
+ sun4c_init_rings();
sun4c_init_map_kernelprom(kernel_end);
sun4c_init_clean_mmu(kernel_end);
sun4c_init_fill_kernel_ring(SUN4C_KERNEL_BUCKETS);
- sun4c_init_lock_area(sparc_iobase_vaddr, IOBASE_END);
+ sun4c_init_lock_area(sparc_iomap.start, IOBASE_END);
sun4c_init_lock_area(DVMA_VADDR, DVMA_END);
- start_mem = sun4c_init_lock_areas(start_mem);
+ sun4c_init_lock_areas();
sun4c_init_fill_user_ring();
sun4c_set_context(0);
@@ -2783,18 +2578,23 @@ unsigned long __init sun4c_paging_init(unsigned long start_mem, unsigned long en
vaddr += SUN4C_PGDIR_SIZE;
swapper_pg_dir[vaddr>>SUN4C_PGDIR_SHIFT] = __pgd(PGD_TABLE | (unsigned long) pg3);
sun4c_init_ss2_cache_bug();
- start_mem = PAGE_ALIGN(start_mem);
- start_mem = sparc_context_init(start_mem, num_contexts);
- start_mem = free_area_init(start_mem, end_mem);
+ sparc_context_init(num_contexts);
+
+ {
+ unsigned int zones_size[MAX_NR_ZONES] = { 0, 0, 0};
+
+ zones_size[ZONE_DMA] = end_pfn;
+ free_area_init(zones_size);
+ }
+
cnt = 0;
- for(i = 0; i < num_segmaps; i++)
- if(mmu_entry_pool[i].locked)
+ for (i = 0; i < num_segmaps; i++)
+ if (mmu_entry_pool[i].locked)
cnt++;
max_user_taken_entries = num_segmaps - cnt - 40 - 1;
printk("SUN4C: %d mmu entries for the kernel\n", cnt);
- return start_mem;
}
/* Load up routines and constants for sun4c mmu */
@@ -2839,7 +2639,7 @@ void __init ld_mmu_sun4c(void)
BTFIXUPSET_CALL(flush_cache_all, sun4c_flush_cache_all, BTFIXUPCALL_NORM);
- if(sun4c_vacinfo.do_hwflushes) {
+ if (sun4c_vacinfo.do_hwflushes) {
BTFIXUPSET_CALL(flush_cache_mm, sun4c_flush_cache_mm_hw, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_cache_range, sun4c_flush_cache_range_hw, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_cache_page, sun4c_flush_cache_page_hw, BTFIXUPCALL_NORM);
@@ -2848,9 +2648,8 @@ void __init ld_mmu_sun4c(void)
BTFIXUPSET_CALL(flush_tlb_range, sun4c_flush_tlb_range_hw, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_tlb_page, sun4c_flush_tlb_page_hw, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(free_task_struct, sun4c_free_task_struct_hw, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(switch_to_context, sun4c_switch_to_context_hw, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(switch_mm, sun4c_switch_mm_hw, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(destroy_context, sun4c_destroy_context_hw, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(init_new_context, sun4c_init_new_context_hw, BTFIXUPCALL_NORM);
} else {
BTFIXUPSET_CALL(flush_cache_mm, sun4c_flush_cache_mm_sw, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_cache_range, sun4c_flush_cache_range_sw, BTFIXUPCALL_NORM);
@@ -2860,9 +2659,8 @@ void __init ld_mmu_sun4c(void)
BTFIXUPSET_CALL(flush_tlb_range, sun4c_flush_tlb_range_sw, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(flush_tlb_page, sun4c_flush_tlb_page_sw, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(free_task_struct, sun4c_free_task_struct_sw, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(switch_to_context, sun4c_switch_to_context_sw, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(switch_mm, sun4c_switch_mm_sw, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(destroy_context, sun4c_destroy_context_sw, BTFIXUPCALL_NORM);
- BTFIXUPSET_CALL(init_new_context, sun4c_init_new_context_sw, BTFIXUPCALL_NORM);
}
BTFIXUPSET_CALL(flush_tlb_all, sun4c_flush_tlb_all, BTFIXUPCALL_NORM);
@@ -2871,15 +2669,13 @@ void __init ld_mmu_sun4c(void)
BTFIXUPSET_CALL(set_pte, sun4c_set_pte, BTFIXUPCALL_STO1O0);
- BTFIXUPSET_CALL(pte_page, sun4c_pte_page, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(pte_pagenr, sun4c_pte_pagenr, BTFIXUPCALL_NORM);
#if PAGE_SHIFT <= 12
BTFIXUPSET_CALL(pmd_page, sun4c_pmd_page, BTFIXUPCALL_ANDNINT(PAGE_SIZE - 1));
#else
BTFIXUPSET_CALL(pmd_page, sun4c_pmd_page, BTFIXUPCALL_NORM);
#endif
- BTFIXUPSET_CALL(sparc_update_rootmmu_dir, sun4c_update_rootmmu_dir, BTFIXUPCALL_NOP);
-
BTFIXUPSET_CALL(pte_present, sun4c_pte_present, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(pte_clear, sun4c_pte_clear, BTFIXUPCALL_STG0O0);
@@ -2931,6 +2727,9 @@ void __init ld_mmu_sun4c(void)
BTFIXUPSET_CALL(mmu_release_scsi_sgl, sun4c_release_scsi_sgl, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mmu_map_dma_area, sun4c_map_dma_area, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_unmap_dma_area, sun4c_unmap_dma_area, BTFIXUPCALL_NORM);
+ BTFIXUPSET_CALL(mmu_flush_dma_area, sun4c_flush_dma_area, BTFIXUPCALL_NOP);
+ BTFIXUPSET_CALL(mmu_inval_dma_area, sun4c_inval_dma_area, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mmu_v2p, sun4c_v2p, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mmu_p2v, sun4c_p2v, BTFIXUPCALL_NORM);
diff --git a/arch/sparc/mm/swift.S b/arch/sparc/mm/swift.S
new file mode 100644
index 000000000..e9fe43293
--- /dev/null
+++ b/arch/sparc/mm/swift.S
@@ -0,0 +1,275 @@
+/* $Id: swift.S,v 1.3 1999/11/14 06:13:56 zaitcev Exp $
+ * swift.S: MicroSparc-II mmu/cache operations.
+ *
+ * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ */
+
+#include <asm/psr.h>
+#include <asm/asi.h>
+#include <asm/page.h>
+#include <asm/pgtsrmmu.h>
+#include <asm/asm_offsets.h>
+
+#define WINDOW_FLUSH(tmp1, tmp2) \
+ mov 0, tmp1; \
+98: ld [%g6 + AOFF_task_thread + AOFF_thread_uwinmask], tmp2; \
+ orcc %g0, tmp2, %g0; \
+ add tmp1, 1, tmp1; \
+ bne 98b; \
+ save %sp, -64, %sp; \
+99: subcc tmp1, 1, tmp1; \
+ bne 99b; \
+ restore %g0, %g0, %g0;
+
+ .text
+ .align 4
+
+#if 1 /* XXX screw this, I can't get the VAC flushes working
+ * XXX reliably... -DaveM
+ */
+ .globl swift_flush_cache_all, swift_flush_cache_mm
+ .globl swift_flush_cache_range, swift_flush_cache_page
+ .globl swift_flush_page_for_dma, swift_flush_chunk
+ .globl swift_flush_page_to_ram
+
+swift_flush_cache_all:
+swift_flush_cache_mm:
+swift_flush_cache_range:
+swift_flush_cache_page:
+swift_flush_page_for_dma:
+swift_flush_chunk:
+swift_flush_page_to_ram:
+ sethi %hi(0x2000), %o0
+1: subcc %o0, 0x10, %o0
+ sta %g0, [%o0] ASI_M_TXTC_TAG
+ sta %g0, [%o0] ASI_M_DATAC_TAG
+ bne 1b
+ nop
+ retl
+ nop
+#else
+
+ .globl swift_flush_cache_all
+swift_flush_cache_all:
+ WINDOW_FLUSH(%g4, %g5)
+
+ /* Just clear out all the tags. */
+ sethi %hi(16 * 1024), %o0
+1: subcc %o0, 16, %o0
+ sta %g0, [%o0] ASI_M_TXTC_TAG
+ bne 1b
+ sta %g0, [%o0] ASI_M_DATAC_TAG
+ retl
+ nop
+
+ .globl swift_flush_cache_mm
+swift_flush_cache_mm:
+#ifndef __SMP__
+ ld [%o0 + AOFF_mm_context], %g2
+ cmp %g2, -1
+ be swift_flush_cache_mm_out
+#endif
+ WINDOW_FLUSH(%g4, %g5)
+ rd %psr, %g1
+ andn %g1, PSR_ET, %g3
+ wr %g3, 0x0, %psr
+ nop
+ nop
+ mov SRMMU_CTX_REG, %g7
+ lda [%g7] ASI_M_MMUREGS, %g5
+ sta %g2, [%g7] ASI_M_MMUREGS
+
+#if 1
+ sethi %hi(0x2000), %o0
+1: subcc %o0, 0x10, %o0
+ sta %g0, [%o0] ASI_M_FLUSH_CTX
+ bne 1b
+ nop
+#else
+ clr %o0
+ or %g0, 2048, %g7
+ or %g0, 2048, %o1
+ add %o1, 2048, %o2
+ add %o2, 2048, %o3
+ mov 16, %o4
+ add %o4, 2048, %o5
+ add %o5, 2048, %g2
+ add %g2, 2048, %g3
+1: sta %g0, [%o0 ] ASI_M_FLUSH_CTX
+ sta %g0, [%o0 + %o1] ASI_M_FLUSH_CTX
+ sta %g0, [%o0 + %o2] ASI_M_FLUSH_CTX
+ sta %g0, [%o0 + %o3] ASI_M_FLUSH_CTX
+ sta %g0, [%o0 + %o4] ASI_M_FLUSH_CTX
+ sta %g0, [%o0 + %o5] ASI_M_FLUSH_CTX
+ sta %g0, [%o0 + %g2] ASI_M_FLUSH_CTX
+ sta %g0, [%o0 + %g3] ASI_M_FLUSH_CTX
+ subcc %g7, 32, %g7
+ bne 1b
+ add %o0, 32, %o0
+#endif
+
+ mov SRMMU_CTX_REG, %g7
+ sta %g5, [%g7] ASI_M_MMUREGS
+ wr %g1, 0x0, %psr
+ nop
+ nop
+swift_flush_cache_mm_out:
+ retl
+ nop
+
+ .globl swift_flush_cache_range
+swift_flush_cache_range:
+ sub %o2, %o1, %o2
+ sethi %hi(4096), %o3
+ cmp %o2, %o3
+ bgu swift_flush_cache_mm
+ nop
+ b 70f
+ nop
+
+ .globl swift_flush_cache_page
+swift_flush_cache_page:
+ ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */
+70:
+#ifndef __SMP__
+ ld [%o0 + AOFF_mm_context], %g2
+ cmp %g2, -1
+ be swift_flush_cache_page_out
+#endif
+ WINDOW_FLUSH(%g4, %g5)
+ rd %psr, %g1
+ andn %g1, PSR_ET, %g3
+ wr %g3, 0x0, %psr
+ nop
+ nop
+ mov SRMMU_CTX_REG, %g7
+ lda [%g7] ASI_M_MMUREGS, %g5
+ sta %g2, [%g7] ASI_M_MMUREGS
+
+ andn %o1, (PAGE_SIZE - 1), %o1
+#if 1
+ sethi %hi(0x1000), %o0
+1: subcc %o0, 0x10, %o0
+ sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE
+ bne 1b
+ nop
+#else
+ or %g0, 512, %g7
+ or %g0, 512, %o0
+ add %o0, 512, %o2
+ add %o2, 512, %o3
+ add %o3, 512, %o4
+ add %o4, 512, %o5
+ add %o5, 512, %g3
+ add %g3, 512, %g4
+1: sta %g0, [%o1 ] ASI_M_FLUSH_PAGE
+ sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE
+ sta %g0, [%o1 + %o2] ASI_M_FLUSH_PAGE
+ sta %g0, [%o1 + %o3] ASI_M_FLUSH_PAGE
+ sta %g0, [%o1 + %o4] ASI_M_FLUSH_PAGE
+ sta %g0, [%o1 + %o5] ASI_M_FLUSH_PAGE
+ sta %g0, [%o1 + %g3] ASI_M_FLUSH_PAGE
+ sta %g0, [%o1 + %g4] ASI_M_FLUSH_PAGE
+ subcc %g7, 16, %g7
+ bne 1b
+ add %o1, 16, %o1
+#endif
+
+ mov SRMMU_CTX_REG, %g7
+ sta %g5, [%g7] ASI_M_MMUREGS
+ wr %g1, 0x0, %psr
+ nop
+ nop
+swift_flush_cache_page_out:
+ retl
+ nop
+
+ /* Swift is write-thru, however it is not
+ * I/O nor TLB-walk coherent. Also it has
+ * caches which are virtually indexed and tagged.
+ */
+ .globl swift_flush_page_for_dma
+ .globl swift_flush_chunk
+ .globl swift_flush_page_to_ram
+swift_flush_page_for_dma:
+swift_flush_chunk:
+swift_flush_page_to_ram:
+ andn %o0, (PAGE_SIZE - 1), %o1
+#if 1
+ sethi %hi(0x1000), %o0
+1: subcc %o0, 0x10, %o0
+ sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE
+ bne 1b
+ nop
+#else
+ or %g0, 512, %g7
+ or %g0, 512, %o0
+ add %o0, 512, %o2
+ add %o2, 512, %o3
+ add %o3, 512, %o4
+ add %o4, 512, %o5
+ add %o5, 512, %g3
+ add %g3, 512, %g4
+1: sta %g0, [%o1 ] ASI_M_FLUSH_PAGE
+ sta %g0, [%o1 + %o0] ASI_M_FLUSH_PAGE
+ sta %g0, [%o1 + %o2] ASI_M_FLUSH_PAGE
+ sta %g0, [%o1 + %o3] ASI_M_FLUSH_PAGE
+ sta %g0, [%o1 + %o4] ASI_M_FLUSH_PAGE
+ sta %g0, [%o1 + %o5] ASI_M_FLUSH_PAGE
+ sta %g0, [%o1 + %g3] ASI_M_FLUSH_PAGE
+ sta %g0, [%o1 + %g4] ASI_M_FLUSH_PAGE
+ subcc %g7, 16, %g7
+ bne 1b
+ add %o1, 16, %o1
+#endif
+ retl
+ nop
+#endif
+
+ .globl swift_flush_sig_insns
+swift_flush_sig_insns:
+ flush %o1
+ retl
+ flush %o1 + 4
+
+ .globl swift_flush_tlb_mm
+ .globl swift_flush_tlb_range
+ .globl swift_flush_tlb_all
+swift_flush_tlb_mm:
+swift_flush_tlb_range:
+#ifndef __SMP__
+ ld [%o0 + AOFF_mm_context], %g2
+ cmp %g2, -1
+ be swift_flush_tlb_all_out
+#endif
+swift_flush_tlb_all:
+ mov 0x400, %o1
+ sta %g0, [%o1] ASI_M_FLUSH_PROBE
+swift_flush_tlb_all_out:
+ retl
+ nop
+
+ .globl swift_flush_tlb_page
+swift_flush_tlb_page:
+ ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */
+ mov SRMMU_CTX_REG, %g1
+ ld [%o0 + AOFF_mm_context], %o3
+ andn %o1, (PAGE_SIZE - 1), %o1
+#ifndef __SMP__
+ cmp %o3, -1
+ be swift_flush_tlb_page_out
+ nop
+#endif
+#if 1
+ mov 0x400, %o1
+ sta %g0, [%o1] ASI_M_FLUSH_PROBE
+#else
+ lda [%g1] ASI_M_MMUREGS, %g5
+ sta %o3, [%g1] ASI_M_MMUREGS
+ sta %g0, [%o1] ASI_M_FLUSH_PAGE /* rem. virt. cache. prot. */
+ sta %g0, [%o1] ASI_M_FLUSH_PROBE
+ sta %g5, [%g1] ASI_M_MMUREGS
+#endif
+swift_flush_tlb_page_out:
+ retl
+ nop
diff --git a/arch/sparc/mm/tsunami.S b/arch/sparc/mm/tsunami.S
index 1c4356fa0..07c5ed620 100644
--- a/arch/sparc/mm/tsunami.S
+++ b/arch/sparc/mm/tsunami.S
@@ -1,4 +1,4 @@
-/* $Id: tsunami.S,v 1.2 1999/08/14 03:51:48 anton Exp $
+/* $Id: tsunami.S,v 1.3 1999/10/09 05:32:19 zaitcev Exp $
* tsunami.S: High speed MicroSparc-I mmu/cache operations.
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
@@ -44,11 +44,11 @@ tsunami_flush_cache_range:
tsunami_flush_cache_all:
WINDOW_FLUSH(%g4, %g5)
tsunami_flush_page_for_dma:
- sta %g0, [%g0] ASI_M_DC_FLCLEAR
sta %g0, [%g0] ASI_M_IC_FLCLEAR
+tsunami_flush_chunk:
+ sta %g0, [%g0] ASI_M_DC_FLCLEAR
tsunami_flush_cache_out:
tsunami_flush_page_to_ram:
-tsunami_flush_chunk:
retl
nop
@@ -68,6 +68,11 @@ tsunami_flush_tlb_range:
tsunami_flush_tlb_all:
mov 0x400, %o1
sta %g0, [%o1] ASI_M_FLUSH_PROBE
+ nop
+ nop
+ nop
+ nop
+ nop
tsunami_flush_tlb_out:
retl
nop
@@ -85,6 +90,59 @@ tsunami_flush_tlb_page:
lda [%g1] ASI_M_MMUREGS, %g5
sta %o3, [%g1] ASI_M_MMUREGS
sta %g0, [%o1] ASI_M_FLUSH_PROBE
+ nop
+ nop
+ nop
+ nop
+ nop
tsunami_flush_tlb_page_out:
retl
sta %g5, [%g1] ASI_M_MMUREGS
+
+#define MIRROR_BLOCK(dst, src, offset, t0, t1, t2, t3) \
+ ldd [src + offset + 0x18], t0; \
+ std t0, [dst + offset + 0x18]; \
+ ldd [src + offset + 0x10], t2; \
+ std t2, [dst + offset + 0x10]; \
+ ldd [src + offset + 0x08], t0; \
+ std t0, [dst + offset + 0x08]; \
+ ldd [src + offset + 0x00], t2; \
+ std t2, [dst + offset + 0x00];
+
+ .globl tsunami_copy_1page
+tsunami_copy_1page:
+/* NOTE: This routine has to be shorter than 70insns --jj */
+ or %g0, (PAGE_SIZE >> 8), %g1
+1:
+ MIRROR_BLOCK(%o0, %o1, 0x00, %o2, %o3, %o4, %o5)
+ MIRROR_BLOCK(%o0, %o1, 0x20, %o2, %o3, %o4, %o5)
+ MIRROR_BLOCK(%o0, %o1, 0x40, %o2, %o3, %o4, %o5)
+ MIRROR_BLOCK(%o0, %o1, 0x60, %o2, %o3, %o4, %o5)
+ MIRROR_BLOCK(%o0, %o1, 0x80, %o2, %o3, %o4, %o5)
+ MIRROR_BLOCK(%o0, %o1, 0xa0, %o2, %o3, %o4, %o5)
+ MIRROR_BLOCK(%o0, %o1, 0xc0, %o2, %o3, %o4, %o5)
+ MIRROR_BLOCK(%o0, %o1, 0xe0, %o2, %o3, %o4, %o5)
+ subcc %g1, 1, %g1
+ add %o0, 0x100, %o0
+ bne 1b
+ add %o1, 0x100, %o1
+
+ .globl tsunami_setup_blockops
+tsunami_setup_blockops:
+ sethi %hi(__copy_1page), %o0
+ or %o0, %lo(__copy_1page), %o0
+ sethi %hi(tsunami_copy_1page), %o1
+ or %o1, %lo(tsunami_copy_1page), %o1
+ sethi %hi(tsunami_setup_blockops), %o2
+ or %o2, %lo(tsunami_setup_blockops), %o2
+ ld [%o1], %o4
+1: add %o1, 4, %o1
+ st %o4, [%o0]
+ add %o0, 4, %o0
+ cmp %o1, %o2
+ bne 1b
+ ld [%o1], %o4
+ sta %g0, [%g0] ASI_M_IC_FLCLEAR
+ sta %g0, [%g0] ASI_M_DC_FLCLEAR
+ retl
+ nop