summaryrefslogtreecommitdiffstats
path: root/arch/sparc64/mm
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1999-10-09 00:00:47 +0000
committerRalf Baechle <ralf@linux-mips.org>1999-10-09 00:00:47 +0000
commitd6434e1042f3b0a6dfe1b1f615af369486f9b1fa (patch)
treee2be02f33984c48ec019c654051d27964e42c441 /arch/sparc64/mm
parent609d1e803baf519487233b765eb487f9ec227a18 (diff)
Merge with 2.3.19.
Diffstat (limited to 'arch/sparc64/mm')
-rw-r--r--arch/sparc64/mm/asyncd.c12
-rw-r--r--arch/sparc64/mm/fault.c64
-rw-r--r--arch/sparc64/mm/generic.c5
-rw-r--r--arch/sparc64/mm/init.c59
-rw-r--r--arch/sparc64/mm/ultra.S29
5 files changed, 111 insertions, 58 deletions
diff --git a/arch/sparc64/mm/asyncd.c b/arch/sparc64/mm/asyncd.c
index e9607cb8f..30272e9b5 100644
--- a/arch/sparc64/mm/asyncd.c
+++ b/arch/sparc64/mm/asyncd.c
@@ -1,4 +1,4 @@
-/* $Id: asyncd.c,v 1.8 1999/07/04 04:35:55 davem Exp $
+/* $Id: asyncd.c,v 1.9 1999/07/30 09:35:43 davem Exp $
* The asyncd kernel daemon. This handles paging on behalf of
* processes that receive page faults due to remote (async) memory
* accesses.
@@ -91,7 +91,8 @@ static void add_to_async_queue(int taskid,
void async_fault(unsigned long address, int write, int taskid,
void (*callback)(int,unsigned long,int,int))
{
- struct task_struct *tsk = task[taskid];
+#warning Need some fixing here... -DaveM
+ struct task_struct *tsk = current /* XXX task[taskid] */;
struct mm_struct *mm = tsk->mm;
stats.faults++;
@@ -111,7 +112,8 @@ static int fault_in_page(int taskid,
{
static unsigned last_address;
static int last_task, loop_counter;
- struct task_struct *tsk = task[taskid];
+#warning Need some fixing here... -DaveM
+ struct task_struct *tsk = current /* XXX task[taskid] */;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
@@ -178,8 +180,8 @@ no_memory:
bad_area:
stats.failure++;
- tsk->tss.sig_address = address;
- tsk->tss.sig_desc = SUBSIG_NOMAPPING;
+ tsk->thread.sig_address = address;
+ tsk->thread.sig_desc = SUBSIG_NOMAPPING;
send_sig(SIGSEGV, tsk, 1);
return 1;
}
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index 04aa8284e..1a20b399b 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -1,4 +1,4 @@
-/* $Id: fault.c,v 1.36 1999/07/04 04:35:56 davem Exp $
+/* $Id: fault.c,v 1.39 1999/08/30 10:07:09 davem Exp $
* arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
@@ -84,10 +84,11 @@ void unhandled_fault(unsigned long address, struct task_struct *tsk,
printk(KERN_ALERT "Unable to handle kernel paging request "
"at virtual address %016lx\n", (unsigned long)address);
}
- printk(KERN_ALERT "tsk->mm->context = %016lx\n",
- (unsigned long) tsk->mm->context);
- printk(KERN_ALERT "tsk->mm->pgd = %016lx\n",
- (unsigned long) tsk->mm->pgd);
+ printk(KERN_ALERT "tsk->{mm,active_mm}->context = %016lx\n",
+ (tsk->mm ? tsk->mm->context : tsk->active_mm->context));
+ printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %016lx\n",
+ (tsk->mm ? (unsigned long) tsk->mm->pgd :
+ (unsigned long) tsk->active_mm->pgd));
die_if_kernel("Oops", regs);
}
@@ -154,16 +155,45 @@ asmlinkage void do_sparc64_fault(struct pt_regs *regs, unsigned long address, in
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
- if (in_interrupt() || mm == &init_mm)
+ if (in_interrupt() || !mm)
goto do_kernel_fault;
down(&mm->mmap_sem);
#ifdef DEBUG_LOCKUPS
- if (regs->tpc == lastpc && address == lastaddr && write == lastwrite) {
+ if (regs->tpc == lastpc &&
+ address == lastaddr &&
+ write == lastwrite) {
lockcnt++;
if (lockcnt == 100000) {
- printk("do_sparc64_fault: possible fault loop for %016lx %s\n", address, write ? "write" : "read");
+ unsigned char tmp;
+ register unsigned long tmp1 asm("o5");
+ register unsigned long tmp2 asm("o4");
+
+ printk("do_sparc64_fault[%s:%d]: possible fault loop for %016lx %s\n",
+ current->comm, current->pid,
+ address, write ? "write" : "read");
+ printk("do_sparc64_fault: CHECK[papgd[%016lx],pcac[%016lx]]\n",
+ __pa(mm->pgd), pgd_val(mm->pgd[0])<<11UL);
+ __asm__ __volatile__(
+ "wrpr %%g0, 0x494, %%pstate\n\t"
+ "mov %3, %%g4\n\t"
+ "mov %%g7, %0\n\t"
+ "ldxa [%%g4] %2, %1\n\t"
+ "wrpr %%g0, 0x096, %%pstate"
+ : "=r" (tmp1), "=r" (tmp2)
+ : "i" (ASI_DMMU), "i" (TSB_REG));
+ printk("do_sparc64_fault: IS[papgd[%016lx],pcac[%016lx]]\n",
+ tmp1, tmp2);
+ printk("do_sparc64_fault: CHECK[ctx(%016lx)] IS[ctx(%016lx)]\n",
+ mm->context, spitfire_get_secondary_context());
+ __asm__ __volatile__("rd %%asi, %0"
+ : "=r" (tmp));
+ printk("do_sparc64_fault: CHECK[seg(%02x)] IS[seg(%02x)]\n",
+ current->thread.current_ds.seg, tmp);
show_regs(regs);
+ __sti();
+ while(1)
+ barrier();
}
} else {
lastpc = regs->tpc;
@@ -282,8 +312,18 @@ do_kernel_fault:
return;
}
} else {
- current->tss.sig_address = address;
- current->tss.sig_desc = SUBSIG_NOMAPPING;
+#if 0
+ extern void __show_regs(struct pt_regs *);
+ printk("SHIT(%s:%d:cpu(%d)): PC[%016lx] ADDR[%016lx]\n",
+ current->comm, current->pid, smp_processor_id(),
+ regs->tpc, address);
+ __show_regs(regs);
+ __sti();
+ while(1)
+ barrier();
+#endif
+ current->thread.sig_address = address;
+ current->thread.sig_desc = SUBSIG_NOMAPPING;
force_sig(SIGSEGV, current);
return;
}
@@ -293,8 +333,8 @@ do_kernel_fault:
do_sigbus:
up(&mm->mmap_sem);
- current->tss.sig_address = address;
- current->tss.sig_desc = SUBSIG_MISCERROR;
+ current->thread.sig_address = address;
+ current->thread.sig_desc = SUBSIG_MISCERROR;
force_sig(SIGBUS, current);
if (regs->tstate & TSTATE_PRIV)
goto do_kernel_fault;
diff --git a/arch/sparc64/mm/generic.c b/arch/sparc64/mm/generic.c
index ccb0951cc..cf94f4250 100644
--- a/arch/sparc64/mm/generic.c
+++ b/arch/sparc64/mm/generic.c
@@ -1,4 +1,4 @@
-/* $Id: generic.c,v 1.8 1999/03/12 06:51:50 davem Exp $
+/* $Id: generic.c,v 1.9 1999/07/23 22:32:01 davem Exp $
* generic.c: Generic Sparc mm routines that are not dependent upon
* MMU type but are Sparc specific.
*
@@ -95,7 +95,8 @@ static inline void io_remap_pte_range(pte_t * pte, unsigned long address, unsign
space);
curend = address + 0x10000;
offset += 0x10000;
- }
+ } else
+ offset += PAGE_SIZE;
} else
offset += PAGE_SIZE;
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 8f176e885..c1d8d24ae 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -1,4 +1,4 @@
-/* $Id: init.c,v 1.128 1999/05/25 16:53:24 jj Exp $
+/* $Id: init.c,v 1.135 1999/09/06 22:55:10 ecd Exp $
* arch/sparc64/mm/init.c
*
* Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
@@ -42,7 +42,10 @@ unsigned long *sparc64_valid_addr_bitmap;
unsigned long phys_base;
/* get_new_mmu_context() uses "cache + 1". */
+spinlock_t ctx_alloc_lock = SPIN_LOCK_UNLOCKED;
unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
+#define CTX_BMAP_SLOTS (1UL << (CTX_VERSION_SHIFT - 6))
+unsigned long mmu_context_bmap[CTX_BMAP_SLOTS];
/* References to section boundaries */
extern char __init_begin, __init_end, etext, __bss_start;
@@ -163,7 +166,7 @@ static int dvma_pages_current_index;
static unsigned long dvmaiobase = 0;
static unsigned long dvmaiosz __initdata = 0;
-__initfunc(void dvmaio_init(void))
+void __init dvmaio_init(void)
{
long i;
@@ -184,7 +187,7 @@ __initfunc(void dvmaio_init(void))
}
}
-__initfunc(void iommu_init(int iommu_node, struct linux_sbus *sbus))
+void __init iommu_init(int iommu_node, struct linux_sbus *sbus)
{
extern int this_is_starfire;
extern void *starfire_hookup(int);
@@ -386,7 +389,7 @@ void mmu_map_dma_area(unsigned long addr, int len, __u32 *dvma_addr,
dvma_pages_current_offset;
/* Map the CPU's view. */
- pgdp = pgd_offset(init_task.mm, addr);
+ pgdp = pgd_offset(&init_mm, addr);
pmdp = pmd_alloc_kernel(pgdp, addr);
ptep = pte_alloc_kernel(pmdp, addr);
pte = mk_pte(the_page, PAGE_KERNEL);
@@ -583,7 +586,8 @@ void mmu_set_sbus64(struct linux_sbus_device *sdev, int bursts)
struct linux_sbus *sbus = sdev->my_bus;
struct sysio_regs *sregs = sbus->iommu->sysio_regs;
int slot = sdev->slot;
- u64 *cfg, tmp;
+ volatile u64 *cfg;
+ u64 tmp;
switch(slot) {
case 0:
@@ -677,7 +681,7 @@ static inline void inherit_prom_mappings(void)
for (vaddr = trans[i].virt;
vaddr < trans[i].virt + trans[i].size;
vaddr += PAGE_SIZE) {
- pgdp = pgd_offset(init_task.mm, vaddr);
+ pgdp = pgd_offset(&init_mm, vaddr);
if (pgd_none(*pgdp)) {
pmdp = sparc_init_alloc(&mempool,
PMD_TABLE_SIZE);
@@ -739,7 +743,7 @@ void prom_world(int enter)
int i;
if (!enter)
- set_fs(current->tss.current_ds);
+ set_fs(current->thread.current_ds);
if (!prom_ditlb_set)
return;
@@ -957,9 +961,6 @@ void __flush_tlb_all(void)
: : "r" (pstate));
}
-#define CTX_BMAP_SLOTS (1UL << (CTX_VERSION_SHIFT - 6))
-unsigned long mmu_context_bmap[CTX_BMAP_SLOTS];
-
/* Caller does TLB context flushing on local CPU if necessary.
*
* We must be careful about boundary cases so that we never
@@ -969,14 +970,16 @@ unsigned long mmu_context_bmap[CTX_BMAP_SLOTS];
*/
void get_new_mmu_context(struct mm_struct *mm)
{
- unsigned long ctx = (tlb_context_cache + 1) & ~(CTX_VERSION_MASK);
- unsigned long new_ctx;
+ unsigned long ctx, new_ctx;
+ spin_lock(&ctx_alloc_lock);
+ ctx = CTX_HWBITS(tlb_context_cache + 1);
if (ctx == 0)
ctx = 1;
- if ((mm->context != NO_CONTEXT) &&
- !((mm->context ^ tlb_context_cache) & CTX_VERSION_MASK))
- clear_bit(mm->context & ~(CTX_VERSION_MASK), mmu_context_bmap);
+ if (CTX_VALID(mm->context)) {
+ unsigned long nr = CTX_HWBITS(mm->context);
+ mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
+ }
new_ctx = find_next_zero_bit(mmu_context_bmap, 1UL << CTX_VERSION_SHIFT, ctx);
if (new_ctx >= (1UL << CTX_VERSION_SHIFT)) {
new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
@@ -1003,12 +1006,13 @@ void get_new_mmu_context(struct mm_struct *mm)
goto out;
}
}
- set_bit(new_ctx, mmu_context_bmap);
+ mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
out:
tlb_context_cache = new_ctx;
+ spin_unlock(&ctx_alloc_lock);
+
mm->context = new_ctx;
- mm->cpu_vm_mask = 0;
}
#ifndef __SMP__
@@ -1041,15 +1045,15 @@ pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset)
return NULL;
}
-__initfunc(static void
-allocate_ptable_skeleton(unsigned long start, unsigned long end))
+static void __init
+allocate_ptable_skeleton(unsigned long start, unsigned long end)
{
pgd_t *pgdp;
pmd_t *pmdp;
pte_t *ptep;
while (start < end) {
- pgdp = pgd_offset(init_task.mm, start);
+ pgdp = pgd_offset(&init_mm, start);
if (pgd_none(*pgdp)) {
pmdp = sparc_init_alloc(&mempool, PAGE_SIZE);
memset(pmdp, 0, PAGE_SIZE);
@@ -1073,7 +1077,7 @@ allocate_ptable_skeleton(unsigned long start, unsigned long end))
void sparc_ultra_mapioaddr(unsigned long physaddr, unsigned long virt_addr,
int bus, int rdonly)
{
- pgd_t *pgdp = pgd_offset(init_task.mm, virt_addr);
+ pgd_t *pgdp = pgd_offset(&init_mm, virt_addr);
pmd_t *pmdp = pmd_offset(pgdp, virt_addr);
pte_t *ptep = pte_offset(pmdp, virt_addr);
pte_t pte;
@@ -1095,7 +1099,7 @@ void sparc_ultra_unmapioaddr(unsigned long virt_addr)
pmd_t *pmdp;
pte_t *ptep;
- pgdp = pgd_offset(init_task.mm, virt_addr);
+ pgdp = pgd_offset(&init_mm, virt_addr);
pmdp = pmd_offset(pgdp, virt_addr);
ptep = pte_offset(pmdp, virt_addr);
@@ -1139,8 +1143,8 @@ void sparc_ultra_dump_dtlb(void)
extern unsigned long free_area_init(unsigned long, unsigned long);
extern unsigned long sun_serial_setup(unsigned long);
-__initfunc(unsigned long
-paging_init(unsigned long start_mem, unsigned long end_mem))
+unsigned long __init
+paging_init(unsigned long start_mem, unsigned long end_mem)
{
extern pmd_t swapper_pmd_dir[1024];
extern unsigned int sparc64_vpte_patchme1[1];
@@ -1259,7 +1263,7 @@ paging_init(unsigned long start_mem, unsigned long end_mem))
return device_scan (PAGE_ALIGN (start_mem));
}
-__initfunc(static void taint_real_pages(unsigned long start_mem, unsigned long end_mem))
+static void __init taint_real_pages(unsigned long start_mem, unsigned long end_mem)
{
unsigned long tmp = 0, paddr, endaddr;
unsigned long end = __pa(end_mem);
@@ -1305,7 +1309,7 @@ __initfunc(static void taint_real_pages(unsigned long start_mem, unsigned long e
}
}
-__initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem))
+void __init mem_init(unsigned long start_mem, unsigned long end_mem)
{
int codepages = 0;
int datapages = 0;
@@ -1319,6 +1323,7 @@ __initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem))
max_mapnr = MAP_NR(end_mem);
high_memory = (void *) end_mem;
+ start_mem = ((start_mem + 7UL) & ~7UL);
sparc64_valid_addr_bitmap = (unsigned long *)start_mem;
i = max_mapnr >> ((22 - PAGE_SHIFT) + 6);
i += 1;
@@ -1472,4 +1477,6 @@ void si_meminfo(struct sysinfo *val)
}
val->totalram <<= PAGE_SHIFT;
val->sharedram <<= PAGE_SHIFT;
+ val->totalbig = 0;
+ val->freebig = 0;
}
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
index 3d3d1a289..71e05dc7e 100644
--- a/arch/sparc64/mm/ultra.S
+++ b/arch/sparc64/mm/ultra.S
@@ -1,9 +1,11 @@
-/* $Id: ultra.S,v 1.32 1999/03/28 08:39:34 davem Exp $
+/* $Id: ultra.S,v 1.34 1999/09/10 10:40:51 davem Exp $
* ultra.S: Don't expand these all over the place...
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
*/
+#include <linux/config.h>
+
#include <asm/asi.h>
#include <asm/pgtable.h>
#include <asm/spitfire.h>
@@ -136,36 +138,37 @@ __flush_tlb_range_pbp_slow:
flush_icache_page: /* %o0 = phys_page */
sethi %hi(1 << 13), %o2 ! IC_set bit
mov 1, %g1
- srlx %o0, 5, %o0 ! phys-addr comparitor
+ srlx %o0, 5, %o0
clr %o1 ! IC_addr
sllx %g1, 36, %g1
sub %g1, 1, %g2
- andn %g2, 0xff, %g2 ! IC_tag mask
- nop
+ or %o0, %g1, %o0 ! VALID+phys-addr comparitor
+ sllx %g2, 1, %g2
+ andn %g2, 0xfe, %g2 ! IC_tag mask
1: ldda [%o1] ASI_IC_TAG, %o4
and %o5, %g2, %o5
cmp %o5, %o0
be,pn %xcc, iflush1
- nop
+ add %o1, 0x20, %g3
2: ldda [%o1 + %o2] ASI_IC_TAG, %o4
and %o5, %g2, %o5
- cmp %o5, %o0
+ cmp %o5, %o0
be,pn %xcc, iflush2
nop
-3: add %o1, 0x20, %o1
- cmp %o1, %o2
+3: cmp %g3, %o2
bne,pt %xcc, 1b
- nop
+ mov %g3, %o1
retl
nop
+
iflush1:stxa %g0, [%o1] ASI_IC_TAG
- ba,pt %xcc, 2b
- flush %g6
+ flush %g6
+ ba,a,pt %xcc, 2b
iflush2:stxa %g0, [%o1 + %o2] ASI_IC_TAG
- ba,pt %xcc, 3b
- flush %g6
+ flush %g6
+ ba,a,pt %xcc, 3b
#ifdef __SMP__
/* These are all called by the slaves of a cross call, at