summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1999-06-13 16:29:25 +0000
committerRalf Baechle <ralf@linux-mips.org>1999-06-13 16:29:25 +0000
commitdb7d4daea91e105e3859cf461d7e53b9b77454b2 (patch)
tree9bb65b95440af09e8aca63abe56970dd3360cc57 /arch/arm/mm
parent9c1c01ead627bdda9211c9abd5b758d6c687d8ac (diff)
Merge with Linux 2.2.8.
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Makefile27
-rw-r--r--arch/arm/mm/fault-armo.c161
-rw-r--r--arch/arm/mm/fault-armv.c525
-rw-r--r--arch/arm/mm/fault-common.c188
-rw-r--r--arch/arm/mm/init.c90
-rw-r--r--arch/arm/mm/ioremap.c149
-rw-r--r--arch/arm/mm/mm-arc.c82
-rw-r--r--arch/arm/mm/mm-armv.c6
-rw-r--r--arch/arm/mm/mm-ebsa285.c39
-rw-r--r--arch/arm/mm/mm-footbridge.c91
-rw-r--r--arch/arm/mm/mm-vnc.c31
-rw-r--r--arch/arm/mm/proc-arm2,3.S20
-rw-r--r--arch/arm/mm/proc-arm6,7.S61
-rw-r--r--arch/arm/mm/proc-sa110.S122
-rw-r--r--arch/arm/mm/small_page.c73
15 files changed, 1067 insertions, 598 deletions
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index fc9809144..665c7c0b1 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -8,37 +8,30 @@
# Note 2! The CFLAGS definition is now in the main makefile...
all: lib first_rule
-ifeq ($(MACHINE),a5k)
-MMARCH=arc
-else
-MMARCH=$(MACHINE)
-endif
O_TARGET := mm.o
-O_OBJS := init.o extable.o fault-$(PROCESSOR).o mm-$(MMARCH).o
+O_OBJS := init.o extable.o fault-$(PROCESSOR).o small_page.o
ifeq ($(PROCESSOR),armo)
O_OBJS += proc-arm2,3.o
endif
ifeq ($(PROCESSOR),armv)
- O_OBJS += small_page.o proc-arm6,7.o proc-sa110.o
+ O_OBJS += mm-$(MACHINE).o proc-arm6,7.o proc-sa110.o ioremap.o
endif
include $(TOPDIR)/Rules.make
-proc-arm2,3.o: ../lib/constants.h
-proc-arm6,7.o: ../lib/constants.h
-proc-sa110.o: ../lib/constants.h
-
%.o: %.S
-ifneq ($(CONFIG_BINUTILS_NEW),y)
- $(CC) $(CFLAGS) -D__ASSEMBLY__ -E $< | tr ';$$' '\n#' > ..$@.tmp.s
- $(CC) $(CFLAGS:-pipe=) -c -o $@ ..$@.tmp.s
- $(RM) ..$@.tmp.s
-else
$(CC) $(CFLAGS) -D__ASSEMBLY__ -c -o $@ $<
-endif
.PHONY: lib
lib:; @$(MAKE) -C ../lib constants.h
+
+# Special dependencies
+fault-armv.o: fault-common.c
+fault-armo.o: fault-common.c
+proc-arm2,3.o: ../lib/constants.h
+proc-arm6,7.o: ../lib/constants.h
+proc-sa110.o: ../lib/constants.h
+
diff --git a/arch/arm/mm/fault-armo.c b/arch/arm/mm/fault-armo.c
index 6fe1f30ff..c51980771 100644
--- a/arch/arm/mm/fault-armo.c
+++ b/arch/arm/mm/fault-armo.c
@@ -1,11 +1,10 @@
/*
- * linux/arch/arm/mm/fault.c
+ * linux/arch/arm/mm/fault-armo.c
*
* Copyright (C) 1995 Linus Torvalds
- * Modifications for ARM processor (c) 1995, 1996 Russell King
+ * Modifications for ARM processor (c) 1995-1999 Russell King
*/
-#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -15,8 +14,7 @@
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
#include <asm/system.h>
#include <asm/uaccess.h>
@@ -27,35 +25,32 @@
#define FAULT_CODE_WRITE 0x02
#define FAULT_CODE_USER 0x01
-struct pgtable_cache_struct quicklists;
+#define DO_COW(m) ((m) & (FAULT_CODE_WRITE|FAULT_CODE_FORCECOW))
+#define READ_FAULT(m) (!((m) & FAULT_CODE_WRITE))
-void __bad_pmd(pmd_t *pmd)
+#include "fault-common.c"
+
+static void *alloc_table(int size, int prio)
{
- printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
-#ifdef CONFIG_DEBUG_ERRORS
- __backtrace();
-#endif
- set_pmd(pmd, mk_pmd(BAD_PAGETABLE));
+ if (size != 128)
+ printk("invalid table size\n");
+ return (void *)get_page_8k(prio);
}
-void __bad_pmd_kernel(pmd_t *pmd)
+void free_table(void *table)
{
- printk("Bad pmd in pte_alloc_kernel: %08lx\n", pmd_val(*pmd));
-#ifdef CONFIG_DEBUG_ERRORS
- __backtrace();
-#endif
- set_pmd(pmd, mk_pmd(BAD_PAGETABLE));
+ free_page_8k((unsigned long)table);
}
pgd_t *get_pgd_slow(void)
{
- pgd_t *pgd = (pgd_t *) kmalloc(PTRS_PER_PGD * BYTES_PER_PTR, GFP_KERNEL);
+ pgd_t *pgd = (pgd_t *)alloc_table(PTRS_PER_PGD * BYTES_PER_PTR, GFP_KERNEL);
pgd_t *init;
-
+
if (pgd) {
init = pgd_offset(&init_mm, 0);
- memzero (pgd, USER_PTRS_PER_PGD * BYTES_PER_PTR);
- memcpy (pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+ memzero(pgd, USER_PTRS_PER_PGD * BYTES_PER_PTR);
+ memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * BYTES_PER_PTR);
}
return pgd;
@@ -65,17 +60,17 @@ pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset)
{
pte_t *pte;
- pte = (pte_t *) kmalloc (PTRS_PER_PTE * BYTES_PER_PTR, GFP_KERNEL);
+ pte = (pte_t *)alloc_table(PTRS_PER_PTE * BYTES_PER_PTR, GFP_KERNEL);
if (pmd_none(*pmd)) {
if (pte) {
- memzero (pte, PTRS_PER_PTE * BYTES_PER_PTR);
+ memzero(pte, PTRS_PER_PTE * BYTES_PER_PTR);
set_pmd(pmd, mk_pmd(pte));
return pte + offset;
}
set_pmd(pmd, mk_pmd(BAD_PAGETABLE));
return NULL;
}
- kfree (pte);
+ free_table((void *)pte);
if (pmd_bad(*pmd)) {
__bad_pmd(pmd);
return NULL;
@@ -83,126 +78,22 @@ pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset)
return (pte_t *) pmd_page(*pmd) + offset;
}
-extern void die_if_kernel(char *msg, struct pt_regs *regs, unsigned int err, unsigned int ret);
-
-static void kernel_page_fault (unsigned long addr, int mode, struct pt_regs *regs,
- struct task_struct *tsk, struct mm_struct *mm)
-{
- /*
- * Oops. The kernel tried to access some bad page. We'll have to
- * terminate things with extreme prejudice.
- */
- pgd_t *pgd;
- if (addr < PAGE_SIZE)
- printk (KERN_ALERT "Unable to handle kernel NULL pointer dereference");
- else
- printk (KERN_ALERT "Unable to handle kernel paging request");
- printk (" at virtual address %08lx\n", addr);
- printk (KERN_ALERT "current->tss.memmap = %08lX\n", tsk->tss.memmap);
- pgd = pgd_offset (mm, addr);
- printk (KERN_ALERT "*pgd = %08lx", pgd_val (*pgd));
- if (!pgd_none (*pgd)) {
- pmd_t *pmd;
- pmd = pmd_offset (pgd, addr);
- printk (", *pmd = %08lx", pmd_val (*pmd));
- if (!pmd_none (*pmd))
- printk (", *pte = %08lx", pte_val (*pte_offset (pmd, addr)));
- }
- printk ("\n");
- die_if_kernel ("Oops", regs, mode, SIGKILL);
- do_exit (SIGKILL);
-}
-
-static void
-handle_dataabort (unsigned long addr, int mode, struct pt_regs *regs)
-{
- struct task_struct *tsk;
- struct mm_struct *mm;
- struct vm_area_struct *vma;
- unsigned long fixup;
-
- lock_kernel();
- tsk = current;
- mm = tsk->mm;
-
- down(&mm->mmap_sem);
- vma = find_vma (mm, addr);
- if (!vma)
- goto bad_area;
- if (addr >= vma->vm_start)
- goto good_area;
- if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack (vma, addr))
- goto bad_area;
-
- /*
- * Ok, we have a good vm_area for this memory access, so
- * we can handle it..
- */
-good_area:
- if (!(mode & FAULT_CODE_WRITE)) { /* write? */
- if (!(vma->vm_flags & (VM_READ|VM_EXEC)))
- goto bad_area;
- } else {
- if (!(vma->vm_flags & VM_WRITE))
- goto bad_area;
- }
- handle_mm_fault (tsk, vma, addr, mode & (FAULT_CODE_WRITE|FAULT_CODE_FORCECOW));
- up(&mm->mmap_sem);
- goto out;
-
- /*
- * Something tried to access memory that isn't in our memory map..
- * Fix it, but check if it's kernel or user first..
- */
-bad_area:
- up(&mm->mmap_sem);
- if (mode & FAULT_CODE_USER) {
-//extern int console_loglevel;
-//cli();
- tsk->tss.error_code = mode;
- tsk->tss.trap_no = 14;
-//console_loglevel = 9;
- printk ("%s: memory violation at pc=0x%08lx, lr=0x%08lx (bad address=0x%08lx, code %d)\n",
- tsk->comm, regs->ARM_pc, regs->ARM_lr, addr, mode);
-//#ifdef DEBUG
- show_regs (regs);
- c_backtrace (regs->ARM_fp, 0);
-//#endif
- force_sig(SIGSEGV, tsk);
-//while (1);
- goto out;
- }
-
- /* Are we prepared to handle this kernel fault? */
- if ((fixup = search_exception_table(instruction_pointer(regs))) != 0) {
- printk(KERN_DEBUG "%s: Exception at [<%lx>] addr=%lx (fixup: %lx)\n",
- tsk->comm, regs->ARM_pc, addr, fixup);
- regs->ARM_pc = fixup;
- goto out;
- }
-
-
- kernel_page_fault (addr, mode, regs, tsk, mm);
-out:
- unlock_kernel();
-}
-
/*
* Handle a data abort. Note that we have to handle a range of addresses
* on ARM2/3 for ldm. If both pages are zero-mapped, then we have to force
- * a copy-on-write
+ * a copy-on-write. However, on the second page, we always force COW.
*/
asmlinkage void
-do_DataAbort (unsigned long min_addr, unsigned long max_addr, int mode, struct pt_regs *regs)
+do_DataAbort(unsigned long min_addr, unsigned long max_addr, int mode, struct pt_regs *regs)
{
- handle_dataabort (min_addr, mode, regs);
+ do_page_fault(min_addr, mode, regs);
if ((min_addr ^ max_addr) >> PAGE_SHIFT)
- handle_dataabort (max_addr, mode | FAULT_CODE_FORCECOW, regs);
+ do_page_fault(max_addr, mode | FAULT_CODE_FORCECOW, regs);
}
asmlinkage int
-do_PrefetchAbort (unsigned long addr, int mode, struct pt_regs *regs)
+do_PrefetchAbort(unsigned long addr, struct pt_regs *regs)
{
#if 0
if (the memc mapping for this page exists - can check now...) {
@@ -210,6 +101,6 @@ do_PrefetchAbort (unsigned long addr, int mode, struct pt_regs *regs)
return 0;
}
#endif
- handle_dataabort (addr, mode, regs);
+ do_page_fault(addr, FAULT_CODE_USER|FAULT_CODE_PREFETCH, regs);
return 1;
}
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index f090c5f2c..d57d4fb20 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -1,10 +1,11 @@
/*
- * linux/arch/arm/mm/fault.c
+ * linux/arch/arm/mm/fault-armv.c
*
* Copyright (C) 1995 Linus Torvalds
- * Modifications for ARM processor (c) 1995, 1996 Russell King
+ * Modifications for ARM processor (c) 1995-1999 Russell King
*/
+#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -14,43 +15,37 @@
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
+#include <asm/unaligned.h>
#define FAULT_CODE_READ 0x02
#define FAULT_CODE_USER 0x01
-struct pgtable_cache_struct quicklists;
+#define DO_COW(m) (!((m) & FAULT_CODE_READ))
+#define READ_FAULT(m) ((m) & FAULT_CODE_READ)
-void __bad_pmd(pmd_t *pmd)
-{
- printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
- set_pmd(pmd, mk_user_pmd(BAD_PAGETABLE));
-}
-
-void __bad_pmd_kernel(pmd_t *pmd)
-{
- printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
- set_pmd(pmd, mk_kernel_pmd(BAD_PAGETABLE));
-}
+#include "fault-common.c"
pgd_t *get_pgd_slow(void)
{
/*
* need to get a 16k page for level 1
*/
- pgd_t *pgd = (pgd_t *) __get_free_pages(GFP_KERNEL,2);
+ pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL,2);
pgd_t *init;
-
+
if (pgd) {
init = pgd_offset(&init_mm, 0);
- memzero ((void *)pgd, USER_PTRS_PER_PGD * BYTES_PER_PTR);
- memcpy (pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+ memzero(pgd, USER_PTRS_PER_PGD * BYTES_PER_PTR);
+ memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * BYTES_PER_PTR);
+ clean_cache_area(pgd, PTRS_PER_PGD * BYTES_PER_PTR);
}
return pgd;
}
@@ -59,17 +54,19 @@ pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset)
{
pte_t *pte;
- pte = (pte_t *) get_small_page(GFP_KERNEL);
+ pte = (pte_t *)get_page_2k(GFP_KERNEL);
if (pmd_none(*pmd)) {
if (pte) {
- memzero (pte, PTRS_PER_PTE * BYTES_PER_PTR);
+ memzero(pte, 2 * PTRS_PER_PTE * BYTES_PER_PTR);
+ clean_cache_area(pte, PTRS_PER_PTE * BYTES_PER_PTR);
+ pte += PTRS_PER_PTE;
set_pmd(pmd, mk_user_pmd(pte));
return pte + offset;
}
set_pmd(pmd, mk_user_pmd(BAD_PAGETABLE));
return NULL;
}
- free_small_page ((unsigned long) pte);
+ free_page_2k((unsigned long)pte);
if (pmd_bad(*pmd)) {
__bad_pmd(pmd);
return NULL;
@@ -81,17 +78,19 @@ pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long offset)
{
pte_t *pte;
- pte = (pte_t *) get_small_page(GFP_KERNEL);
+ pte = (pte_t *)get_page_2k(GFP_KERNEL);
if (pmd_none(*pmd)) {
if (pte) {
- memzero (pte, PTRS_PER_PTE * BYTES_PER_PTR);
+ memzero(pte, 2 * PTRS_PER_PTE * BYTES_PER_PTR);
+ clean_cache_area(pte, PTRS_PER_PTE * BYTES_PER_PTR);
+ pte += PTRS_PER_PTE;
set_pmd(pmd, mk_kernel_pmd(pte));
return pte + offset;
}
set_pmd(pmd, mk_kernel_pmd(BAD_PAGETABLE));
return NULL;
}
- free_small_page ((unsigned long) pte);
+ free_page_2k((unsigned long)pte);
if (pmd_bad(*pmd)) {
__bad_pmd_kernel(pmd);
return NULL;
@@ -99,10 +98,8 @@ pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long offset)
return (pte_t *) pmd_page(*pmd) + offset;
}
-extern void die_if_kernel(char *msg, struct pt_regs *regs, unsigned int err, unsigned int ret);
-
#ifdef DEBUG
-static int sp_valid (unsigned long *sp)
+static int sp_valid(unsigned long *sp)
{
unsigned long addr = (unsigned long) sp;
@@ -114,187 +111,371 @@ static int sp_valid (unsigned long *sp)
}
#endif
-static void kernel_page_fault (unsigned long addr, int mode, struct pt_regs *regs,
- struct task_struct *tsk, struct mm_struct *mm)
+#ifdef CONFIG_ALIGNMENT_TRAP
+/*
+ * 32-bit misaligned trap handler (c) 1998 San Mehat (CCC) -July 1998
+ * /proc/sys/debug/alignment, modified and integrated into
+ * Linux 2.1 by Russell King
+ *
+ * NOTE!!! This is not portable onto the ARM6/ARM7 processors yet. Also,
+ * it seems to give a severe performance impact (1 abort/ms - NW runs at
+ * ARM6 speeds) with GCC 2.7.2.2 - needs checking with a later GCC/EGCS.
+ *
+ * IMHO, I don't think that the trap handler is advantageous on ARM6,7
+ * processors (they'll run like an ARM3). We'll see.
+ */
+#define CODING_BITS(i) (i & 0x0e000000)
+
+#define LDST_I_BIT(i) (i & (1 << 26)) /* Immediate constant */
+#define LDST_P_BIT(i) (i & (1 << 24)) /* Preindex */
+#define LDST_U_BIT(i) (i & (1 << 23)) /* Add offset */
+#define LDST_W_BIT(i) (i & (1 << 21)) /* Writeback */
+#define LDST_L_BIT(i) (i & (1 << 20)) /* Load */
+
+#define LDSTH_I_BIT(i) (i & (1 << 22)) /* half-word immed */
+#define LDM_S_BIT(i) (i & (1 << 22)) /* write CPSR from SPSR */
+
+#define RN_BITS(i) ((i >> 16) & 15) /* Rn */
+#define RD_BITS(i) ((i >> 12) & 15) /* Rd */
+#define RM_BITS(i) (i & 15) /* Rm */
+
+#define REGMASK_BITS(i) (i & 0xffff)
+#define OFFSET_BITS(i) (i & 0x0fff)
+
+#define IS_SHIFT(i) (i & 0x0ff0)
+#define SHIFT_BITS(i) ((i >> 7) & 0x1f)
+#define SHIFT_TYPE(i) (i & 0x60)
+#define SHIFT_LSL 0x00
+#define SHIFT_LSR 0x20
+#define SHIFT_ASR 0x40
+#define SHIFT_RORRRX 0x60
+
+static unsigned long ai_user;
+static unsigned long ai_sys;
+static unsigned long ai_skipped;
+static unsigned long ai_half;
+static unsigned long ai_word;
+static unsigned long ai_multi;
+
+static int proc_alignment_read(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
{
- /*
- * Oops. The kernel tried to access some bad page. We'll have to
- * terminate things with extreme prejudice.
- */
- pgd_t *pgd;
- if (addr < PAGE_SIZE)
- printk (KERN_ALERT "Unable to handle kernel NULL pointer dereference");
- else
- printk (KERN_ALERT "Unable to handle kernel paging request");
- printk (" at virtual address %08lx\n", addr);
- printk (KERN_ALERT "current->tss.memmap = %08lX\n", tsk->tss.memmap);
- pgd = pgd_offset (mm, addr);
- printk (KERN_ALERT "*pgd = %08lx", pgd_val (*pgd));
- if (!pgd_none (*pgd)) {
- pmd_t *pmd;
- pmd = pmd_offset (pgd, addr);
- printk (", *pmd = %08lx", pmd_val (*pmd));
- if (!pmd_none (*pmd))
- printk (", *pte = %08lx", pte_val (*pte_offset (pmd, addr)));
- }
- printk ("\n");
- die_if_kernel ("Oops", regs, mode, SIGKILL);
- do_exit (SIGKILL);
+ char *p = page;
+ int len;
+
+ p += sprintf(p, "User:\t\t%li\n", ai_user);
+ p += sprintf(p, "System:\t\t%li\n", ai_sys);
+ p += sprintf(p, "Skipped:\t%li\n", ai_skipped);
+ p += sprintf(p, "Half:\t\t%li\n", ai_half);
+ p += sprintf(p, "Word:\t\t%li\n", ai_word);
+ p += sprintf(p, "Multi:\t\t%li\n", ai_multi);
+
+ len = (p - page) - off;
+ if (len < 0)
+ len = 0;
+
+ *eof = (len <= count) ? 1 : 0;
+ *start = page + off;
+
+ return len;
}
-static void page_fault (unsigned long addr, int mode, struct pt_regs *regs)
+/*
+ * This needs to be done after sysctl_init, otherwise sys/
+ * will be overwritten.
+ */
+void __init alignment_init(void)
{
- struct task_struct *tsk;
- struct mm_struct *mm;
- struct vm_area_struct *vma;
- unsigned long fixup;
-
- lock_kernel();
- tsk = current;
- mm = tsk->mm;
-
- down(&mm->mmap_sem);
- vma = find_vma (mm, addr);
- if (!vma)
- goto bad_area;
- if (vma->vm_start <= addr)
- goto good_area;
- if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack (vma, addr))
- goto bad_area;
+ struct proc_dir_entry *e;
- /*
- * Ok, we have a good vm_area for this memory access, so
- * we can handle it..
- */
-good_area:
- if (mode & FAULT_CODE_READ) { /* read? */
- if (!(vma->vm_flags & (VM_READ|VM_EXEC)))
- goto bad_area;
- } else {
- if (!(vma->vm_flags & VM_WRITE))
- goto bad_area;
+ e = create_proc_entry("sys/debug/alignment", S_IFREG | S_IRUGO, NULL);
+
+ if (e)
+ e->read_proc = proc_alignment_read;
+}
+
+static int
+do_alignment_exception(struct pt_regs *regs)
+{
+ unsigned int instr, rd, rn, correction, nr_regs, regbits;
+ unsigned long eaddr;
+ union { unsigned long un; signed long sn; } offset;
+
+ if (user_mode(regs)) {
+ set_cr(cr_no_alignment);
+ ai_user += 1;
+ return 0;
}
- handle_mm_fault (tsk, vma, addr & PAGE_MASK, !(mode & FAULT_CODE_READ));
- up(&mm->mmap_sem);
- goto out;
- /*
- * Something tried to access memory that isn't in our memory map..
- * Fix it, but check if it's kernel or user first..
- */
-bad_area:
- up(&mm->mmap_sem);
- if (mode & FAULT_CODE_USER) {
- tsk->tss.error_code = mode;
- tsk->tss.trap_no = 14;
- printk ("%s: memory violation at pc=0x%08lx, lr=0x%08lx (bad address=0x%08lx, code %d)\n",
- tsk->comm, regs->ARM_pc, regs->ARM_lr, addr, mode);
-#ifdef DEBUG
- {
- unsigned int i, j;
- unsigned long *sp = (unsigned long *) (regs->ARM_sp - 128);
- for (j = 0; j < 20 && sp_valid (sp); j++) {
- printk ("%p: ", sp);
- for (i = 0; i < 8 && sp_valid (sp); i += 1, sp++)
- printk ("%08lx ", *sp);
- printk ("\n");
+ ai_sys += 1;
+
+ instr = *(unsigned long *)instruction_pointer(regs);
+ correction = 4; /* sometimes 8 on ARMv3 */
+ regs->ARM_pc += correction + 4;
+
+ rd = RD_BITS(instr);
+ rn = RN_BITS(instr);
+ eaddr = regs->uregs[rn];
+
+ switch(CODING_BITS(instr)) {
+ case 0x00000000:
+ if ((instr & 0x0ff00ff0) == 0x01000090) {
+ ai_skipped += 1;
+ printk(KERN_ERR "Unaligned trap: not handling swp instruction\n");
+ return 1;
+ }
+
+ if (((instr & 0x0e000090) == 0x00000090) && (instr & 0x60) != 0) {
+ ai_half += 1;
+ if (LDSTH_I_BIT(instr))
+ offset.un = (instr & 0xf00) >> 4 | (instr & 15);
+ else
+ offset.un = regs->uregs[RM_BITS(instr)];
+
+ if (LDST_P_BIT(instr)) {
+ if (LDST_U_BIT(instr))
+ eaddr += offset.un;
+ else
+ eaddr -= offset.un;
}
+
+ if (LDST_L_BIT(instr))
+ regs->uregs[rd] = get_unaligned((unsigned short *)eaddr);
+ else
+ put_unaligned(regs->uregs[rd], (unsigned short *)eaddr);
+
+ /* signed half-word? */
+ if (instr & 0x40)
+ regs->uregs[rd] = (long)((short) regs->uregs[rd]);
+
+ if (!LDST_P_BIT(instr)) {
+ if (LDST_U_BIT(instr))
+ eaddr += offset.un;
+ else
+ eaddr -= offset.un;
+ regs->uregs[rn] = eaddr;
+ } else if (LDST_W_BIT(instr))
+ regs->uregs[rn] = eaddr;
+ break;
}
- show_regs (regs);
- c_backtrace (regs->ARM_fp, regs->ARM_cpsr);
-#endif
- force_sig(SIGSEGV, tsk);
- goto out;
- }
- /* Are we prepared to handle this kernel fault? */
- if ((fixup = search_exception_table(instruction_pointer(regs))) != 0) {
- printk(KERN_DEBUG "%s: Exception at [<%lx>] addr=%lx (fixup: %lx)\n",
- tsk->comm, regs->ARM_pc, addr, fixup);
- regs->ARM_pc = fixup;
- goto out;
+ default:
+ ai_skipped += 1;
+ panic("Alignment trap: not handling instruction %08X at %08lX",
+ instr, regs->ARM_pc - correction - 4);
+ break;
+
+ case 0x04000000:
+ offset.un = OFFSET_BITS(instr);
+ goto ldr_str;
+
+ case 0x06000000:
+ offset.un = regs->uregs[RM_BITS(instr)];
+
+ if (IS_SHIFT(instr)) {
+ unsigned int shiftval = SHIFT_BITS(instr);
+
+ switch(SHIFT_TYPE(instr)) {
+ case SHIFT_LSL:
+ offset.un <<= shiftval;
+ break;
+
+ case SHIFT_LSR:
+ offset.un >>= shiftval;
+ break;
+
+ case SHIFT_ASR:
+ offset.sn >>= shiftval;
+ break;
+
+ case SHIFT_RORRRX:
+ if (shiftval == 0) {
+ offset.un >>= 1;
+ if (regs->ARM_cpsr & CC_C_BIT)
+ offset.un |= 1 << 31;
+ } else
+ offset.un = offset.un >> shiftval |
+ offset.un << (32 - shiftval);
+ break;
+ }
+ }
+
+ ldr_str:
+ ai_word += 1;
+ if (LDST_P_BIT(instr)) {
+ if (LDST_U_BIT(instr))
+ eaddr += offset.un;
+ else
+ eaddr -= offset.un;
+ } else {
+ if (LDST_W_BIT(instr)) {
+ printk(KERN_ERR "Not handling ldrt/strt correctly\n");
+ return 1;
+ }
+ }
+
+ if (LDST_L_BIT(instr)) {
+ regs->uregs[rd] = get_unaligned((unsigned long *)eaddr);
+ if (rd == 15)
+ correction = 0;
+ } else
+ put_unaligned(regs->uregs[rd], (unsigned long *)eaddr);
+
+ if (!LDST_P_BIT(instr)) {
+ if (LDST_U_BIT(instr))
+ eaddr += offset.un;
+ else
+ eaddr -= offset.un;
+
+ regs->uregs[rn] = eaddr;
+ } else if (LDST_W_BIT(instr))
+ regs->uregs[rn] = eaddr;
+ break;
+
+ case 0x08000000:
+ if (LDM_S_BIT(instr))
+ panic("Alignment trap: not handling LDM with s-bit\n");
+ ai_multi += 1;
+
+ for (regbits = REGMASK_BITS(instr), nr_regs = 0; regbits; regbits >>= 1)
+ nr_regs += 4;
+
+ if (!LDST_U_BIT(instr))
+ eaddr -= nr_regs;
+
+ if ((LDST_U_BIT(instr) == 0 && LDST_P_BIT(instr) == 0) ||
+ (LDST_U_BIT(instr) && LDST_P_BIT(instr)))
+ eaddr += 4;
+
+ for (regbits = REGMASK_BITS(instr), rd = 0; regbits; regbits >>= 1, rd += 1)
+ if (regbits & 1) {
+ if (LDST_L_BIT(instr)) {
+ regs->uregs[rd] = get_unaligned((unsigned long *)eaddr);
+ if (rd == 15)
+ correction = 0;
+ } else
+ put_unaligned(regs->uregs[rd], (unsigned long *)eaddr);
+ eaddr += 4;
+ }
+
+ if (LDST_W_BIT(instr)) {
+ if (LDST_P_BIT(instr) && !LDST_U_BIT(instr))
+ eaddr -= nr_regs;
+ else if (LDST_P_BIT(instr))
+ eaddr -= 4;
+ else if (!LDST_U_BIT(instr))
+ eaddr -= 4 + nr_regs;
+ regs->uregs[rn] = eaddr;
+ }
+ break;
}
- kernel_page_fault (addr, mode, regs, tsk, mm);
-out:
- unlock_kernel();
+ regs->ARM_pc -= correction;
+
+ return 0;
}
-/*
- * Handle a data abort. Note that we have to handle a range of addresses
- * on ARM2/3 for ldm. If both pages are zero-mapped, then we have to force
- * a copy-on-write
- */
+#endif
+
asmlinkage void
-do_DataAbort (unsigned long addr, int fsr, int error_code, struct pt_regs *regs)
+do_DataAbort(unsigned long addr, int fsr, int error_code, struct pt_regs *regs)
{
if (user_mode(regs))
error_code |= FAULT_CODE_USER;
+
#define DIE(signr,nam)\
force_sig(signr, current);\
- die_if_kernel(nam, regs, fsr, signr);\
- break;
+ die(nam, regs, fsr);\
+ do_exit(signr);\
+ break
switch (fsr & 15) {
- case 2:
- DIE(SIGKILL, "Terminal exception")
+ /*
+ * 0 - vector exception
+ */
case 0:
- DIE(SIGSEGV, "Vector exception")
+ force_sig(SIGSEGV, current);
+ if (!user_mode(regs)) {
+ die("vector exception", regs, fsr);
+ do_exit(SIGSEGV);
+ }
+ break;
+
+ /*
+ * 15 - permission fault on page
+ * 5 - page-table entry descriptor fault
+ * 7 - first-level descriptor fault
+ */
+ case 15: case 5: case 7:
+ do_page_fault(addr, error_code, regs);
+ break;
+
+ /*
+ * 13 - permission fault on section
+ */
+ case 13:
+ force_sig(SIGSEGV, current);
+ if (!user_mode(regs)) {
+ die("section permission fault", regs, fsr);
+ do_exit(SIGSEGV);
+ } else {
+#ifdef CONFIG_DEBUG_USER
+ printk("%s: permission fault on section, "
+ "address=0x%08lx, code %d\n",
+ current->comm, addr, error_code);
+#ifdef DEBUG
+ {
+ unsigned int i, j;
+ unsigned long *sp;
+
+ sp = (unsigned long *) (regs->ARM_sp - 128);
+ for (j = 0; j < 20 && sp_valid(sp); j++) {
+ printk("%p: ", sp);
+ for (i = 0; i < 8 && sp_valid(sp); i += 1, sp++)
+ printk("%08lx ", *sp);
+ printk("\n");
+ }
+ show_regs(regs);
+ c_backtrace(regs->ARM_fp, regs->ARM_cpsr);
+ }
+#endif
+#endif
+ }
+ break;
+
case 1:
case 3:
- DIE(SIGBUS, "Alignment exception")
+#ifdef CONFIG_ALIGNMENT_TRAP
+ if (!do_alignment_exception(regs))
+ break;
+#endif
+ /*
+ * this should never happen
+ */
+ DIE(SIGBUS, "Alignment exception");
+ break;
+
+ case 2:
+ DIE(SIGKILL, "Terminal exception");
case 12:
case 14:
- DIE(SIGBUS, "External abort on translation")
+ DIE(SIGBUS, "External abort on translation");
case 9:
case 11:
- DIE(SIGSEGV, "Domain fault")
- case 13:/* permission fault on section */
-#ifdef DEBUG
- {
- unsigned int i, j;
- unsigned long *sp;
-
- printk ("%s: section permission fault (bad address=0x%08lx, code %d)\n",
- current->comm, addr, error_code);
- sp = (unsigned long *) (regs->ARM_sp - 128);
- for (j = 0; j < 20 && sp_valid (sp); j++) {
- printk ("%p: ", sp);
- for (i = 0; i < 8 && sp_valid (sp); i += 1, sp++)
- printk ("%08lx ", *sp);
- printk ("\n");
- }
- show_regs (regs);
- c_backtrace(regs->ARM_fp, regs->ARM_cpsr);
- }
-#endif
- DIE(SIGSEGV, "Permission fault")
+ DIE(SIGSEGV, "Domain fault");
- case 15:/* permission fault on page */
- case 5: /* page-table entry descriptor fault */
- case 7: /* first-level descriptor fault */
- page_fault (addr, error_code, regs);
- break;
case 4:
case 6:
- DIE(SIGBUS, "External abort on linefetch")
+ DIE(SIGBUS, "External abort on linefetch");
case 8:
case 10:
- DIE(SIGBUS, "External abort on non-linefetch")
+ DIE(SIGBUS, "External abort on non-linefetch");
}
}
asmlinkage int
-do_PrefetchAbort (unsigned long addr, struct pt_regs *regs)
+do_PrefetchAbort(unsigned long addr, struct pt_regs *regs)
{
-#if 0
- /* does this still apply ? */
- if (the memc mapping for this page exists - can check now...) {
- printk ("Page in, but got abort (undefined instruction?)\n");
- return 0;
- }
-#endif
- page_fault (addr, FAULT_CODE_USER|FAULT_CODE_READ, regs);
+ do_page_fault(addr, FAULT_CODE_USER|FAULT_CODE_READ, regs);
return 1;
}
-
diff --git a/arch/arm/mm/fault-common.c b/arch/arm/mm/fault-common.c
new file mode 100644
index 000000000..810dea699
--- /dev/null
+++ b/arch/arm/mm/fault-common.c
@@ -0,0 +1,188 @@
+/*
+ * linux/arch/arm/mm/fault-common.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ * Modifications for ARM processor (c) 1995-1999 Russell King
+ */
+#include <linux/config.h>
+
+extern void die(char *msg, struct pt_regs *regs, unsigned int err);
+
+void __bad_pmd(pmd_t *pmd)
+{
+ printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
+#ifdef CONFIG_DEBUG_ERRORS
+ __backtrace();
+#endif
+ set_pmd(pmd, mk_user_pmd(BAD_PAGETABLE));
+}
+
+void __bad_pmd_kernel(pmd_t *pmd)
+{
+ printk("Bad pmd in pte_alloc_kernel: %08lx\n", pmd_val(*pmd));
+#ifdef CONFIG_DEBUG_ERRORS
+ __backtrace();
+#endif
+ set_pmd(pmd, mk_kernel_pmd(BAD_PAGETABLE));
+}
+
+static void
+kernel_page_fault(unsigned long addr, int mode, struct pt_regs *regs,
+ struct task_struct *tsk, struct mm_struct *mm)
+{
+ char *reason;
+ /*
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+ pgd_t *pgd;
+
+ if (addr < PAGE_SIZE)
+ reason = "NULL pointer dereference";
+ else
+ reason = "paging request";
+
+ printk(KERN_ALERT "Unable to handle kernel %s at virtual address %08lx\n",
+ reason, addr);
+ printk(KERN_ALERT "memmap = %08lX, pgd = %p\n", tsk->tss.memmap, mm->pgd);
+ pgd = pgd_offset(mm, addr);
+ printk(KERN_ALERT "*pgd = %08lx", pgd_val(*pgd));
+
+ do {
+ pmd_t *pmd;
+ pte_t *pte;
+
+ if (pgd_none(*pgd))
+ break;
+
+ if (pgd_bad(*pgd)) {
+ printk("(bad)\n");
+ break;
+ }
+
+ pmd = pmd_offset(pgd, addr);
+ printk(", *pmd = %08lx", pmd_val(*pmd));
+
+ if (pmd_none(*pmd))
+ break;
+
+ if (pmd_bad(*pmd)) {
+ printk("(bad)\n");
+ break;
+ }
+
+ pte = pte_offset(pmd, addr);
+ printk(", *pte = %08lx", pte_val(*pte));
+ printk(", *ppte = %08lx", pte_val(pte[-PTRS_PER_PTE]));
+ } while(0);
+
+ printk("\n");
+ die("Oops", regs, mode);
+
+ do_exit(SIGKILL);
+}
+
+static void do_page_fault(unsigned long addr, int mode, struct pt_regs *regs)
+{
+ struct task_struct *tsk;
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+ unsigned long fixup;
+
+ tsk = current;
+ mm = tsk->mm;
+
+ /*
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+ if (in_interrupt() || mm == &init_mm)
+ goto no_context;
+
+ down(&mm->mmap_sem);
+ vma = find_vma(mm, addr);
+ if (!vma)
+ goto bad_area;
+ if (vma->vm_start <= addr)
+ goto good_area;
+ if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, addr))
+ goto bad_area;
+
+ /*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+good_area:
+ if (READ_FAULT(mode)) { /* read? */
+ if (!(vma->vm_flags & (VM_READ|VM_EXEC)))
+ goto bad_area;
+ } else {
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ }
+
+ /*
+ * If for any reason at all we couldn't handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+ if (!handle_mm_fault(tsk, vma, addr & PAGE_MASK, DO_COW(mode)))
+ goto do_sigbus;
+
+ up(&mm->mmap_sem);
+ return;
+
+ /*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+ up(&mm->mmap_sem);
+
+ /* User mode accesses just cause a SIGSEGV */
+ if (mode & FAULT_CODE_USER) {
+ tsk->tss.error_code = mode;
+ tsk->tss.trap_no = 14;
+#ifdef CONFIG_DEBUG_USER
+ printk("%s: memory violation at pc=0x%08lx, lr=0x%08lx (bad address=0x%08lx, code %d)\n",
+ tsk->comm, regs->ARM_pc, regs->ARM_lr, addr, mode);
+#endif
+ force_sig(SIGSEGV, tsk);
+ return;
+ }
+
+no_context:
+ /* Are we prepared to handle this kernel fault? */
+ if ((fixup = search_exception_table(instruction_pointer(regs))) != 0) {
+#ifdef DEBUG
+ printk(KERN_DEBUG "%s: Exception at [<%lx>] addr=%lx (fixup: %lx)\n",
+ tsk->comm, regs->ARM_pc, addr, fixup);
+#endif
+ regs->ARM_pc = fixup;
+ return;
+ }
+
+ kernel_page_fault(addr, mode, regs, tsk, mm);
+ return;
+
+do_sigbus:
+ /*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+ up(&mm->mmap_sem);
+
+ /*
+ * Send a sigbus, regardless of whether we were in kernel
+ * or user mode.
+ */
+ tsk->tss.error_code = mode;
+ tsk->tss.trap_no = 14;
+ force_sig(SIGBUS, tsk);
+
+ /* Kernel mode? Handle exceptions or die */
+ if (!(mode & FAULT_CODE_USER))
+ goto no_context;
+}
+
+
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index b3b0ecf56..47a2cfde7 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -29,6 +29,9 @@
#include <asm/proc/mm-init.h>
pgd_t swapper_pg_dir[PTRS_PER_PGD];
+#ifndef CONFIG_NO_PGT_CACHE
+struct pgtable_cache_struct quicklists;
+#endif
extern char _etext, _stext, _edata, __bss_start, _end;
extern char __init_begin, __init_end;
@@ -36,6 +39,7 @@ extern char __init_begin, __init_end;
int do_check_pgt_cache(int low, int high)
{
int freed = 0;
+#ifndef CONFIG_NO_PGT_CACHE
if(pgtable_cache_size > high) {
do {
if(pgd_quicklist)
@@ -46,6 +50,7 @@ int do_check_pgt_cache(int low, int high)
free_pte_slow(get_pte_fast()), freed++;
} while(pgtable_cache_size > low);
}
+#endif
return freed;
}
@@ -63,17 +68,18 @@ int do_check_pgt_cache(int low, int high)
* data and COW.
*/
#if PTRS_PER_PTE != 1
-unsigned long *empty_bad_page_table;
+pte_t *empty_bad_page_table;
pte_t *__bad_pagetable(void)
{
- int i;
pte_t bad_page;
+ int i;
bad_page = BAD_PAGE;
for (i = 0; i < PTRS_PER_PTE; i++)
- empty_bad_page_table[i] = (unsigned long)pte_val(bad_page);
- return (pte_t *) empty_bad_page_table;
+ set_pte(empty_bad_page_table + i, bad_page);
+
+ return empty_bad_page_table;
}
#endif
@@ -128,8 +134,11 @@ __initfunc(unsigned long paging_init(unsigned long start_mem, unsigned long end_
empty_bad_page = (unsigned long *)start_mem;
start_mem += PAGE_SIZE;
#if PTRS_PER_PTE != 1
- empty_bad_page_table = (unsigned long *)start_mem;
- start_mem += PTRS_PER_PTE * sizeof (void *);
+#ifdef CONFIG_CPU_32
+ start_mem += PTRS_PER_PTE * BYTES_PER_PTR;
+#endif
+ empty_bad_page_table = (pte_t *)start_mem;
+ start_mem += PTRS_PER_PTE * BYTES_PER_PTR;
#endif
memzero (empty_zero_page, PAGE_SIZE);
start_mem = setup_pagetables (start_mem, end_mem);
@@ -137,6 +146,9 @@ __initfunc(unsigned long paging_init(unsigned long start_mem, unsigned long end_
flush_tlb_all();
update_memc_all();
+ end_mem &= PAGE_MASK;
+ high_memory = (void *)end_mem;
+
return free_area_init(start_mem, end_mem);
}
@@ -161,19 +173,18 @@ __initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem))
/* mark usable pages in the mem_map[] */
mark_usable_memory_areas(&start_mem, end_mem);
+#define BETWEEN(w,min,max) ((w) >= (unsigned long)(min) && \
+ (w) < (unsigned long)(max))
+
for (tmp = PAGE_OFFSET; tmp < end_mem ; tmp += PAGE_SIZE) {
if (PageReserved(mem_map+MAP_NR(tmp))) {
- if (tmp >= KERNTOPHYS(_stext) &&
- tmp < KERNTOPHYS(_edata)) {
- if (tmp < KERNTOPHYS(_etext))
- codepages++;
- else
- datapages++;
- } else if (tmp >= KERNTOPHYS(__init_begin)
- && tmp < KERNTOPHYS(__init_end))
+ if (BETWEEN(tmp, &__init_begin, &__init_end))
initpages++;
- else if (tmp >= KERNTOPHYS(__bss_start)
- && tmp < (unsigned long) start_mem)
+ else if (BETWEEN(tmp, &_stext, &_etext))
+ codepages++;
+ else if (BETWEEN(tmp, &_etext, &_edata))
+ datapages++;
+ else if (BETWEEN(tmp, &__bss_start, start_mem))
datapages++;
else
reservedpages++;
@@ -181,13 +192,16 @@ __initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem))
}
atomic_set(&mem_map[MAP_NR(tmp)].count, 1);
#ifdef CONFIG_BLK_DEV_INITRD
- if (!initrd_start || (tmp < initrd_start || tmp >= initrd_end))
+ if (!initrd_start || !BETWEEN(tmp, initrd_start, initrd_end))
#endif
free_page(tmp);
}
- printk ("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n",
+
+#undef BETWEEN
+
+ printk ("Memory: %luk/%luM available (%dk code, %dk reserved, %dk data, %dk init)\n",
(unsigned long) nr_free_pages << (PAGE_SHIFT-10),
- max_mapnr << (PAGE_SHIFT-10),
+ max_mapnr >> (20 - PAGE_SHIFT),
codepages << (PAGE_SHIFT-10),
reservedpages << (PAGE_SHIFT-10),
datapages << (PAGE_SHIFT-10),
@@ -203,17 +217,45 @@ __initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem))
#endif
}
-void free_initmem (void)
+static void free_area(unsigned long addr, unsigned long end, char *s)
{
- unsigned long addr;
+ unsigned int size = (end - addr) >> 10;
- addr = (unsigned long)(&__init_begin);
- for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
+ for (; addr < end; addr += PAGE_SIZE) {
mem_map[MAP_NR(addr)].flags &= ~(1 << PG_reserved);
atomic_set(&mem_map[MAP_NR(addr)].count, 1);
free_page(addr);
}
- printk ("Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10);
+
+ if (size)
+ printk(" %dk %s", size, s);
+}
+
+void free_initmem (void)
+{
+ printk("Freeing unused kernel memory:");
+
+ free_area((unsigned long)(&__init_begin),
+ (unsigned long)(&__init_end),
+ "init");
+
+#ifdef CONFIG_FOOTBRIDGE
+ {
+ extern int __netwinder_begin, __netwinder_end, __ebsa285_begin, __ebsa285_end;
+
+ if (!machine_is_netwinder())
+ free_area((unsigned long)(&__netwinder_begin),
+ (unsigned long)(&__netwinder_end),
+ "netwinder");
+
+ if (!machine_is_ebsa285() && !machine_is_cats())
+ free_area((unsigned long)(&__ebsa285_begin),
+ (unsigned long)(&__ebsa285_end),
+ "ebsa285/cats");
+ }
+#endif
+
+ printk("\n");
}
void si_meminfo(struct sysinfo *val)
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
new file mode 100644
index 000000000..70d7c77b9
--- /dev/null
+++ b/arch/arm/mm/ioremap.c
@@ -0,0 +1,149 @@
+/*
+ * arch/arm/mm/ioremap.c
+ *
+ * Re-map IO memory to kernel address space so that we can access it.
+ *
+ * (C) Copyright 1995 1996 Linus Torvalds
+ *
+ * Hacked for ARM by Phil Blundell <philb@gnu.org>
+ * Hacked to allow all architectures to build, and various cleanups
+ * by Russell King
+ */
+
+/*
+ * This allows a driver to remap an arbitrary region of bus memory into
+ * virtual space. One should *only* use readl, writel, memcpy_toio and
+ * so on with such remapped areas.
+ *
+ * Because the ARM only has a 32-bit address space we can't address the
+ * whole of the (physical) PCI space at once. PCI huge-mode addressing
+ * allows us to circumvent this restriction by splitting PCI space into
+ * two 2GB chunks and mapping only one at a time into processor memory.
+ * We use MMU protection domains to trap any attempt to access the bank
+ * that is not currently mapped. (This isn't fully implemented yet.)
+ *
+ * DC21285 currently has a bug in that the PCI address extension
+ * register affects the address of any writes waiting in the outbound
+ * FIFO. Unfortunately, it is not possible to tell the DC21285 to
+ * flush this - flushing the area causes the bus to lock.
+ */
+
+#include <linux/vmalloc.h>
+#include <asm/io.h>
+
+static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
+ unsigned long phys_addr, pgprot_t pgprot)
+{
+ unsigned long end;
+
+ address &= ~PMD_MASK;
+ end = address + size;
+ if (end > PMD_SIZE)
+ end = PMD_SIZE;
+ do {
+ if (!pte_none(*pte))
+ printk("remap_area_pte: page already exists\n");
+ set_pte(pte, mk_pte_phys(phys_addr, pgprot));
+ address += PAGE_SIZE;
+ phys_addr += PAGE_SIZE;
+ pte++;
+ } while (address < end);
+}
+
+static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
+ unsigned long phys_addr, unsigned long flags)
+{
+ unsigned long end;
+ pgprot_t pgprot;
+
+ address &= ~PGDIR_MASK;
+ end = address + size;
+
+ if (end > PGDIR_SIZE)
+ end = PGDIR_SIZE;
+
+ phys_addr -= address;
+ pgprot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE | flags);
+ do {
+ pte_t * pte = pte_alloc_kernel(pmd, address);
+ if (!pte)
+ return -ENOMEM;
+ remap_area_pte(pte, address, end - address, address + phys_addr, pgprot);
+ address = (address + PMD_SIZE) & PMD_MASK;
+ pmd++;
+ } while (address < end);
+ return 0;
+}
+
+static int remap_area_pages(unsigned long address, unsigned long phys_addr,
+ unsigned long size, unsigned long flags)
+{
+ pgd_t * dir;
+ unsigned long end = address + size;
+
+ phys_addr -= address;
+ dir = pgd_offset(&init_mm, address);
+ flush_cache_all();
+ while (address < end) {
+ pmd_t *pmd = pmd_alloc_kernel(dir, address);
+ if (!pmd)
+ return -ENOMEM;
+ if (remap_area_pmd(pmd, address, end - address,
+ phys_addr + address, flags))
+ return -ENOMEM;
+ set_pgdir(address, *dir);
+ address = (address + PGDIR_SIZE) & PGDIR_MASK;
+ dir++;
+ }
+ flush_tlb_all();
+ return 0;
+}
+
+/*
+ * Remap an arbitrary physical address space into the kernel virtual
+ * address space. Needed when the kernel wants to access high addresses
+ * directly.
+ *
+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
+ * have to convert them into an offset in a page-aligned mapping, but the
+ * caller shouldn't need to know that small detail.
+ *
+ * 'flags' are the extra L_PTE_ flags that you want to specify for this
+ * mapping. See include/asm-arm/proc-armv/pgtable.h for more information.
+ */
+void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
+{
+ void * addr;
+ struct vm_struct * area;
+ unsigned long offset;
+
+ /*
+ * Mappings have to be page-aligned
+ */
+ offset = phys_addr & ~PAGE_MASK;
+ size = PAGE_ALIGN(size + offset);
+
+ /*
+ * Don't allow mappings that wrap..
+ */
+ if (!size || size > phys_addr + size)
+ return NULL;
+
+ /*
+ * Ok, go for it..
+ */
+ area = get_vm_area(size);
+ if (!area)
+ return NULL;
+ addr = area->addr;
+ if (remap_area_pages(VMALLOC_VMADDR(addr), phys_addr, size, flags)) {
+ vfree(addr);
+ return NULL;
+ }
+ return (void *) (offset + (char *)addr);
+}
+
+void iounmap(void *addr)
+{
+ return vfree((void *) (PAGE_MASK & (unsigned long) addr));
+}
diff --git a/arch/arm/mm/mm-arc.c b/arch/arm/mm/mm-arc.c
deleted file mode 100644
index 6bb92f037..000000000
--- a/arch/arm/mm/mm-arc.c
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * arch/arm/mm/mm-arc.c
- *
- * Extra MM routines for the Archimedes architecture
- *
- * Copyright (C) 1998 Russell King
- */
-#include <linux/init.h>
-#include <asm/hardware.h>
-#include <asm/pgtable.h>
-
-unsigned long phys_screen_end;
-
-/*
- * This routine needs more work to make it dynamically release/allocate mem!
- */
-__initfunc(unsigned long map_screen_mem(unsigned long log_start, unsigned long kmem, int update))
-{
- static int updated = 0;
-
- if (updated)
- return 0;
-
- updated = update;
-
- if (update) {
- unsigned long address = log_start, offset;
- pgd_t *pgdp;
-
- kmem = (kmem + 3) & ~3;
-
- pgdp = pgd_offset (&init_mm, address); /* +31 */
- offset = SCREEN_START;
- while (address < SCREEN1_END) {
- unsigned long addr_pmd, end_pmd;
- pmd_t *pmdp;
-
- /* if (pgd_none (*pgdp)) alloc pmd */
- pmdp = pmd_offset (pgdp, address); /* +0 */
- addr_pmd = address & ~PGDIR_MASK; /* 088000 */
- end_pmd = addr_pmd + SCREEN1_END - address; /* 100000 */
- if (end_pmd > PGDIR_SIZE)
- end_pmd = PGDIR_SIZE;
-
- do {
- unsigned long addr_pte, end_pte;
- pte_t *ptep;
-
- if (pmd_none (*pmdp)) {
- pte_t *new_pte = (pte_t *)kmem;
- kmem += PTRS_PER_PTE * BYTES_PER_PTR;
- memzero (new_pte, PTRS_PER_PTE * BYTES_PER_PTR);
- set_pmd (pmdp, mk_pmd(new_pte));
- }
-
- ptep = pte_offset (pmdp, addr_pmd); /* +11 */
- addr_pte = addr_pmd & ~PMD_MASK; /* 088000 */
- end_pte = addr_pte + end_pmd - addr_pmd; /* 100000 */
- if (end_pte > PMD_SIZE)
- end_pte = PMD_SIZE;
-
- do {
- set_pte (ptep, mk_pte(offset, PAGE_KERNEL));
- addr_pte += PAGE_SIZE;
- offset += PAGE_SIZE;
- ptep++;
- } while (addr_pte < end_pte);
-
- pmdp++;
- addr_pmd = (addr_pmd + PMD_SIZE) & PMD_MASK;
- } while (addr_pmd < end_pmd);
-
- address = (address + PGDIR_SIZE) & PGDIR_MASK;
- pgdp ++;
- }
-
- phys_screen_end = offset;
- flush_tlb_all ();
- update_memc_all ();
- }
- return kmem;
-}
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c
index 8a226526b..4481fc32b 100644
--- a/arch/arm/mm/mm-armv.c
+++ b/arch/arm/mm/mm-armv.c
@@ -37,7 +37,8 @@ __initfunc(unsigned long setup_io_pagetables(unsigned long start_mem))
virtual = mp->virtual;
physical = mp->physical;
length = mp->length;
- prot = (mp->prot_read ? PTE_AP_READ : 0) | (mp->prot_write ? PTE_AP_WRITE : 0);
+ prot = (mp->prot_read ? L_PTE_USER : 0) | (mp->prot_write ? L_PTE_WRITE : 0)
+ | L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY;
while ((virtual & 1048575 || physical & 1048575) && length >= PAGE_SIZE) {
alloc_init_page(&start_mem, virtual, physical, mp->domain, prot);
@@ -56,7 +57,8 @@ __initfunc(unsigned long setup_io_pagetables(unsigned long start_mem))
physical += 1048576;
}
- prot = (mp->prot_read ? PTE_AP_READ : 0) | (mp->prot_write ? PTE_AP_WRITE : 0);
+ prot = (mp->prot_read ? L_PTE_USER : 0) | (mp->prot_write ? L_PTE_WRITE : 0)
+ | L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY;
while (length >= PAGE_SIZE) {
alloc_init_page(&start_mem, virtual, physical, mp->domain, prot);
diff --git a/arch/arm/mm/mm-ebsa285.c b/arch/arm/mm/mm-ebsa285.c
deleted file mode 100644
index a5b17c6b9..000000000
--- a/arch/arm/mm/mm-ebsa285.c
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * arch/arm/mm/mm-ebsa285.c
- *
- * Extra MM routines for the EBSA285 architecture
- *
- * Copyright (C) 1998 Russell King, Dave Gilbert.
- */
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-
-#include <asm/pgtable.h>
-#include <asm/page.h>
-#include <asm/io.h>
-#include <asm/proc/mm-init.h>
-#include <asm/dec21285.h>
-
-/*
- * This is to allow us to fiddle with the EEPROM
- * This entry will go away in time, once the fmu
- * can mmap() the flash.
- *
- * These ones are so that we can fiddle
- * with the various cards (eg VGA)
- * until we're happy with them...
- */
-#define MAPPING \
- { 0xd8000000, DC21285_FLASH, 0x00400000, DOMAIN_USER, 1, 1 }, /* EEPROM */ \
- { 0xdc000000, 0x7c000000, 0x00100000, DOMAIN_USER, 1, 1 }, /* VGA */ \
- { 0xe0000000, DC21285_PCI_MEM, 0x18000000, DOMAIN_USER, 1, 1 }, /* VGA */ \
- { 0xf8000000, DC21285_PCI_TYPE_0_CONFIG, 0x01000000, DOMAIN_IO , 0, 1 }, /* Type 0 Config */ \
- { 0xf9000000, DC21285_PCI_TYPE_1_CONFIG, 0x01000000, DOMAIN_IO , 0, 1 }, /* Type 1 Config */ \
- { PCI_IACK, DC21285_PCI_IACK, 0x01000000, DOMAIN_IO , 0, 1 }, /* PCI IACK */ \
- { 0xfd000000, DC21285_OUTBOUND_WRITE_FLUSH, 0x01000000, DOMAIN_IO , 0, 1 }, /* Out wrflsh */ \
- { 0xfe000000, DC21285_ARMCSR_BASE, 0x01000000, DOMAIN_IO , 0, 1 }, /* CSR */ \
- { 0xffe00000, DC21285_PCI_IO, 0x00100000, DOMAIN_IO , 0, 1 }, /* PCI I/O */ \
- { 0xfff00000, 0x40000000, 0x00100000, DOMAIN_IO , 0, 1 }, /* X-Bus */
-
-#include "mm-armv.c"
diff --git a/arch/arm/mm/mm-footbridge.c b/arch/arm/mm/mm-footbridge.c
new file mode 100644
index 000000000..ec7e64c90
--- /dev/null
+++ b/arch/arm/mm/mm-footbridge.c
@@ -0,0 +1,91 @@
+/*
+ * arch/arm/mm/mm-ebsa285.c
+ *
+ * Extra MM routines for the EBSA285 architecture
+ *
+ * Copyright (C) 1998 Russell King, Dave Gilbert.
+ */
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/io.h>
+#include <asm/proc/mm-init.h>
+#include <asm/dec21285.h>
+
+/*
+ * The first entry allows us to fiddle with the EEPROM from user-space.
+ * This entry will go away in time, once the fmu32 can mmap() the
+ * flash. It can't at the moment.
+ *
+ * If you want to fiddle with PCI VGA cards from user space, then
+ * change the '0, 1 }' for the PCI MEM and PCI IO to '1, 1 }'
+ * You can then access the PCI bus at 0xe0000000 and 0xffe00000.
+ */
+
+#ifdef CONFIG_HOST_FOOTBRIDGE
+
+/*
+ * The mapping when the footbridge is in host mode.
+ */
+#define MAPPING \
+ { FLASH_BASE, DC21285_FLASH, FLASH_SIZE, DOMAIN_IO, 0, 1 }, \
+ { PCIMEM_BASE, DC21285_PCI_MEM, PCIMEM_SIZE, DOMAIN_IO, 0, 1 }, \
+ { PCICFG0_BASE, DC21285_PCI_TYPE_0_CONFIG, PCICFG0_SIZE, DOMAIN_IO, 0, 1 }, \
+ { PCICFG1_BASE, DC21285_PCI_TYPE_1_CONFIG, PCICFG1_SIZE, DOMAIN_IO, 0, 1 }, \
+ { PCIIACK_BASE, DC21285_PCI_IACK, PCIIACK_SIZE, DOMAIN_IO, 0, 1 }, \
+ { WFLUSH_BASE, DC21285_OUTBOUND_WRITE_FLUSH, WFLUSH_SIZE, DOMAIN_IO, 0, 1 }, \
+ { ARMCSR_BASE, DC21285_ARMCSR_BASE, ARMCSR_SIZE, DOMAIN_IO, 0, 1 }, \
+ { PCIO_BASE, DC21285_PCI_IO, PCIO_SIZE, DOMAIN_IO, 0, 1 }, \
+ { XBUS_BASE, 0x40000000, XBUS_SIZE, DOMAIN_IO, 0, 1 }
+
+#else
+
+/*
+ * These two functions convert virtual addresses to PCI addresses
+ * and PCI addresses to virtual addresses. Note that it is only
+ * legal to use these on memory obtained via get_free_page or
+ * kmalloc.
+ */
+unsigned long __virt_to_bus(unsigned long res)
+{
+#ifdef CONFIG_DEBUG_ERRORS
+ if (res < PAGE_OFFSET || res >= (unsigned long)high_memory) {
+ printk("__virt_to_phys: invalid virtual address 0x%08lx\n", res);
+ __backtrace();
+ }
+#endif
+ return (res - PAGE_OFFSET) + (*CSR_PCISDRAMBASE & 0xfffffff0);
+}
+
+unsigned long __bus_to_virt(unsigned long res)
+{
+ res -= (*CSR_PCISDRAMBASE & 0xfffffff0);
+ res += PAGE_OFFSET;
+
+#ifdef CONFIG_DEBUG_ERRORS
+ if (res < PAGE_OFFSET || res >= (unsigned long)high_memory) {
+ printk("__phys_to_virt: invalid virtual address 0x%08lx\n", res);
+ __backtrace();
+ }
+#endif
+ return res;
+}
+
+/*
+ * The mapping when the footbridge is in add-in mode.
+ */
+#define MAPPING \
+ { PCIO_BASE, DC21285_PCI_IO, PCIO_SIZE, DOMAIN_IO, 0, 1 }, \
+ { XBUS_BASE, 0x40000000, XBUS_SIZE, DOMAIN_IO, 0, 1 }, \
+ { ARMCSR_BASE, DC21285_ARMCSR_BASE, ARMCSR_SIZE, DOMAIN_IO, 0, 1 }, \
+ { WFLUSH_BASE, DC21285_OUTBOUND_WRITE_FLUSH, WFLUSH_SIZE, DOMAIN_IO, 0, 1 }, \
+ { FLASH_BASE, DC21285_FLASH, FLASH_SIZE, DOMAIN_IO, 0, 1 }, \
+ { PCIMEM_BASE, DC21285_PCI_MEM, PCIMEM_SIZE, DOMAIN_IO, 0, 1 }
+
+#endif
+
+#include "mm-armv.c"
diff --git a/arch/arm/mm/mm-vnc.c b/arch/arm/mm/mm-vnc.c
deleted file mode 100644
index 94e037485..000000000
--- a/arch/arm/mm/mm-vnc.c
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * arch/arm/mm/mm-vnc.c
- *
- * Extra MM routines for the Corel VNC architecture
- *
- * Copyright (C) 1998 Russell King
- */
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-
-#include <asm/pgtable.h>
-#include <asm/page.h>
-#include <asm/io.h>
-#include <asm/proc/mm-init.h>
-#include <asm/dec21285.h>
-
-/* Table describing the MMU translation mapping
- * mainly used to set up the I/O mappings.
- */
-#define MAPPING \
- { 0xd0000000, DC21285_FLASH, 0x00800000, DOMAIN_IO , 0, 1 }, /* Flash */ \
- { 0xe0000000, DC21285_PCI_MEM, 0x18000000, DOMAIN_IO , 0, 1 }, /* PCI Mem */ \
- { 0xf8000000, DC21285_PCI_TYPE_0_CONFIG, 0x01000000, DOMAIN_IO , 0, 1 }, /* Type 0 Config */ \
- { 0xf9000000, DC21285_PCI_TYPE_1_CONFIG, 0x01000000, DOMAIN_IO , 0, 1 }, /* Type 1 Config */ \
- { PCI_IACK, DC21285_PCI_IACK, 0x01000000, DOMAIN_IO , 0, 1 }, /* PCI IACK */ \
- { 0xfd000000, DC21285_OUTBOUND_WRITE_FLUSH, 0x01000000, DOMAIN_IO , 0, 1 }, /* Out wrflsh */ \
- { 0xfe000000, DC21285_ARMCSR_BASE, 0x01000000, DOMAIN_IO , 0, 1 }, /* CSR */ \
- { 0xffe00000, DC21285_PCI_IO, 0x00100000, DOMAIN_IO , 0, 1 }, /* PCI I/O */ \
-
-#include "mm-armv.c"
diff --git a/arch/arm/mm/proc-arm2,3.S b/arch/arm/mm/proc-arm2,3.S
index 86cab564a..263d79708 100644
--- a/arch/arm/mm/proc-arm2,3.S
+++ b/arch/arm/mm/proc-arm2,3.S
@@ -193,7 +193,7 @@ _arm2_3_data_abort:
movs pc, lr
_arm2_3_check_bugs:
- movs pc, lr
+ bics pc, lr, #0x04000000 @ Clear FIQ disable bit
/*
* Processor specific - ARM2
@@ -206,6 +206,8 @@ LC0: .word SYMBOL_NAME(page_nr)
* Params : prev Old task structure
* : next New task structure for process to run
*
+ * Returns : prev
+ *
* Purpose : Perform a task switch, saving the old processes state, and restoring
* the new.
*
@@ -218,15 +220,15 @@ _arm2_switch_to:
str sp, [r0, #TSS_SAVE] @ Save sp_SVC
ldr sp, [r1, #TSS_SAVE] @ Get saved sp_SVC
mov r4, r1
- add r0, r1, #TSS_MEMCMAP @ Remap MEMC
+ add r7, r1, #TSS_MEMCMAP @ Remap MEMC
ldr r1, LC0
ldr r1, [r1]
-1: ldmia r0!, {r2, r3, r5, r6}
+1: ldmia r7!, {r2, r3, r5, r6}
strb r2, [r2]
strb r3, [r3]
strb r5, [r5]
strb r6, [r6]
- ldmia r0!, {r2, r3, r5, r6}
+ ldmia r7!, {r2, r3, r5, r6}
strb r2, [r2]
strb r3, [r3]
strb r5, [r5]
@@ -318,6 +320,8 @@ _arm2_proc_fin: movs pc, lr
* Params : prev Old task structure
* : next New task structure for process to run
*
+ * Returns : prev
+ *
* Purpose : Perform a task switch, saving the old processes state, and restoring
* the new.
*
@@ -330,22 +334,22 @@ _arm3_switch_to:
str sp, [r0, #TSS_SAVE] @ Save sp_SVC
ldr sp, [r1, #TSS_SAVE] @ Get saved sp_SVC
mov r4, r1
- add r0, r1, #TSS_MEMCMAP @ Remap MEMC
+ add r7, r1, #TSS_MEMCMAP @ Remap MEMC
ldr r1, LC0
ldr r1, [r1]
-1: ldmia r0!, {r2, r3, r5, r6}
+1: ldmia r7!, {r2, r3, r5, r6}
strb r2, [r2]
strb r3, [r3]
strb r5, [r5]
strb r6, [r6]
- ldmia r0!, {r2, r3, r5, r6}
+ ldmia r7!, {r2, r3, r5, r6}
strb r2, [r2]
strb r3, [r3]
strb r5, [r5]
strb r6, [r6]
subs r1, r1, #8
bhi 1b
- mcr p15, 0, r0, c1, c0, 0 @ flush cache
+ mcr p15, 0, r7, c1, c0, 0 @ flush cache
ldmfd sp!, {r4 - r9, fp, pc}^ @ Load all regs saved previously
/*
* Function: arm3_remap_memc (struct task_struct *tsk)
diff --git a/arch/arm/mm/proc-arm6,7.S b/arch/arm/mm/proc-arm6,7.S
index b7119a330..b817ae2b4 100644
--- a/arch/arm/mm/proc-arm6,7.S
+++ b/arch/arm/mm/proc-arm6,7.S
@@ -52,13 +52,14 @@ _arm6_7_flush_tlb_area:
blt 1b
mov pc, lr
-@LC0: .word _current
/*
* Function: arm6_7_switch_to (struct task_struct *prev, struct task_struct *next)
*
* Params : prev Old task structure
* : next New task structure for process to run
*
+ * Returns : prev
+ *
* Purpose : Perform a task switch, saving the old processes state, and restoring
* the new.
*
@@ -72,15 +73,15 @@ _arm6_7_switch_to:
stmfd sp!, {ip} @ Save cpsr_SVC
str sp, [r0, #TSS_SAVE] @ Save sp_SVC
ldr sp, [r1, #TSS_SAVE] @ Get saved sp_SVC
- ldr r0, [r1, #TSK_ADDR_LIMIT]
- teq r0, #0
- moveq r0, #DOM_KERNELDOMAIN
- movne r0, #DOM_USERDOMAIN
- mcr p15, 0, r0, c3, c0 @ Set domain reg
- ldr r0, [r1, #TSS_MEMMAP] @ Page table pointer
+ ldr r2, [r1, #TSK_ADDR_LIMIT]
+ teq r2, #0
+ moveq r2, #DOM_KERNELDOMAIN
+ movne r2, #DOM_USERDOMAIN
+ mcr p15, 0, r2, c3, c0 @ Set domain reg
+ ldr r2, [r1, #TSS_MEMMAP] @ Page table pointer
mov r1, #0
mcr p15, 0, r1, c7, c0, 0 @ flush cache
- mcr p15, 0, r0, c2, c0, 0 @ update page table ptr
+ mcr p15, 0, r2, c2, c0, 0 @ update page table ptr
mcr p15, 0, r1, c5, c0, 0 @ flush TLBs
ldmfd sp!, {ip}
msr spsr, ip @ Save tasks CPSR into SPSR for this return
@@ -369,6 +370,35 @@ _arm7_set_pmd: tst r1, #3
mov pc, lr
/*
+ * Function: arm6_7_set_pte(pte_t *ptep, pte_t pte)
+ * Params : r0 = Address to set
+ * : r1 = value to set
+ * Purpose : Set a PTE and flush it out of any WB cache
+ */
+ .align 5
+_arm6_7_set_pte:
+ str r1, [r0], #-1024 @ linux version
+
+ bic r2, r1, #0xff0
+ bic r2, r2, #3
+ orr r2, r2, #HPTE_TYPE_SMALL
+
+ tst r1, #LPTE_USER | LPTE_EXEC
+ orrne r2, r2, #HPTE_AP_READ
+
+ tst r1, #LPTE_WRITE
+ tstne r1, #LPTE_DIRTY
+ orrne r2, r2, #HPTE_AP_WRITE
+
+ tst r1, #LPTE_PRESENT
+ tstne r1, #LPTE_YOUNG
+ moveq r2, #0
+
+ str r2, [r0] @ hardware version
+ mcr p15, 0, r0, c7, c10, 1 @ clean D entry (drain is done by TLB fns)
+ mov pc, lr
+
+/*
* Function: _arm6_7_reset
*
* Notes : This sets up everything for a reset
@@ -405,8 +435,12 @@ ENTRY(arm6_processor_functions)
.word _arm6_7_flush_tlb_all @ 44
.word _arm6_7_flush_tlb_area @ 48
.word _arm6_set_pmd @ 52
- .word _arm6_7_reset @ 54
- .word _arm6_7_flush_cache @ 58
+ .word _arm6_7_set_pte @ 56
+ .word _arm6_7_reset @ 60
+ .word _arm6_7_flush_cache @ 64
+
+ .word _arm6_7_flush_cache @ 68
+ .word _arm6_7_flush_cache @ 72
/*
* Purpose : Function pointers used to access above functions - all calls
@@ -431,8 +465,9 @@ ENTRY(arm7_processor_functions)
.word _arm6_7_flush_tlb_all @ 44
.word _arm6_7_flush_tlb_area @ 48
.word _arm7_set_pmd @ 52
- .word _arm6_7_reset @ 56
- .word _arm6_7_flush_cache @ 60
-
+ .word _arm6_7_set_pte @ 56
+ .word _arm6_7_reset @ 60
.word _arm6_7_flush_cache @ 64
+
.word _arm6_7_flush_cache @ 68
+ .word _arm6_7_flush_cache @ 72
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
index 221797862..ff55c8ffa 100644
--- a/arch/arm/mm/proc-sa110.S
+++ b/arch/arm/mm/proc-sa110.S
@@ -8,6 +8,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/hardware.h>
#include "../lib/constants.h"
/* This is the maximum size of an area which will be flushed. If the area
@@ -21,7 +22,6 @@ Lclean_switch: .long 0
/*
* Function: sa110_flush_cache_all (void)
- *
* Purpose : Flush all cache lines
*/
.align 5
@@ -33,7 +33,7 @@ _sa110_flush_cache_all_r2:
ands r1, r1, #1
eor r1, r1, #1
str r1, [r3]
- ldr ip, =0xdf000000
+ ldr ip, =FLUSH_BASE
addne ip, ip, #32768
add r1, ip, #16384 @ only necessary for 16k
1: ldr r3, [ip], #32
@@ -47,11 +47,9 @@ _sa110_flush_cache_all_r2:
/*
* Function: sa110_flush_cache_area (unsigned long address, int end, int flags)
- *
* Params : address Area start address
* : end Area end address
* : flags b0 = I cache as well
- *
* Purpose : clean & flush all cache lines associated with this area of memory
*/
.align 5
@@ -74,10 +72,8 @@ _sa110_flush_cache_area:
/*
* Function: sa110_cache_wback_area(unsigned long address, unsigned long end)
- *
* Params : address Area start address
* : end Area end address
- *
* Purpose : ensure all dirty cachelines in the specified area have been
* written out to memory (for DMA)
*/
@@ -99,13 +95,10 @@ _sa110_cache_wback_area:
/*
* Function: sa110_cache_purge_area(unsigned long address, unsigned long end)
- *
* Params : address Area start address
* : end Area end address
- *
* Purpose : throw away all D-cached data in specified region without
- * an obligation to write it ack.
- *
+ * an obligation to write it back.
* Note : Must clean the D-cached entries around the boundaries if the
* start and/or end address are not cache aligned.
*/
@@ -124,9 +117,7 @@ _sa110_cache_purge_area:
/*
* Function: sa110_flush_cache_entry (unsigned long address)
- *
* Params : address Address of cache line to flush
- *
* Purpose : clean & flush an entry
*/
.align 5
@@ -138,24 +129,23 @@ _sa110_flush_cache_entry:
mov pc, lr
/*
- * Function: sa110_flush_cache_pte (unsigned long address)
- *
+ * Function: sa110_clean_cache_area(unsigned long start, unsigned long size)
* Params : address Address of cache line to clean
- *
* Purpose : Ensure that physical memory reflects cache at this location
* for page table purposes.
*/
-_sa110_flush_cache_pte:
- mcr p15, 0, r0, c7, c10, 1 @ clean D entry (drain is done by TLB fns)
+_sa110_clean_cache_area:
+1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry (drain is done by TLB fns)
+ add r0, r0, #32
+ subs r1, r1, #32
+ bhi 1b
mov pc, lr
/*
* Function: sa110_flush_ram_page (unsigned long page)
- *
* Params : address Area start address
* : size size of area
* : flags b0 = I cache as well
- *
* Purpose : clean & flush all cache lines associated with this area of memory
*/
.align 5
@@ -176,7 +166,6 @@ _sa110_flush_ram_page:
/*
* Function: sa110_flush_tlb_all (void)
- *
* Purpose : flush all TLB entries in all caches
*/
.align 5
@@ -188,11 +177,9 @@ _sa110_flush_tlb_all:
/*
* Function: sa110_flush_tlb_area (unsigned long address, unsigned long end, int flags)
- *
* Params : address Area start address
* : end Area end address
* : flags b0 = I cache as well
- *
* Purpose : flush a TLB entry
*/
.align 5
@@ -212,22 +199,21 @@ _sa110_flush_tlb_area:
.align 5
_sa110_flush_icache_area:
- mov r3, #0
1: mcr p15, 0, r0, c7, c10, 1 @ Clean D entry
add r0, r0, #32
- cmp r0, r1
- blt 1b
+ subs r1, r1, #32
+ bhi 1b
+ mov r0, #0
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c7, c5, 0 @ flush I cache
mov pc, lr
/*
* Function: sa110_switch_to (struct task_struct *prev, struct task_struct *next)
- *
* Params : prev Old task structure
* : next New task structure for process to run
- *
+ * Returns : prev
* Purpose : Perform a task switch, saving the old processes state, and restoring
* the new.
- *
* Notes : We don't fiddle with the FP registers here - we postpone this until
* the new task actually uses FP. This way, we don't swap FP for tasks
* that do not require it.
@@ -237,20 +223,30 @@ _sa110_switch_to:
stmfd sp!, {r4 - r9, fp, lr} @ Store most regs on stack
mrs ip, cpsr
stmfd sp!, {ip} @ Save cpsr_SVC
+ ldr r2, [r0, #TSS_MEMMAP] @ Get old page tables
str sp, [r0, #TSS_SAVE] @ Save sp_SVC
ldr sp, [r1, #TSS_SAVE] @ Get saved sp_SVC
- ldr r0, [r1, #TSK_ADDR_LIMIT]
- teq r0, #0
- moveq r0, #DOM_KERNELDOMAIN
- movne r0, #DOM_USERDOMAIN
- mcr p15, 0, r0, c3, c0 @ Set segment
- ldr r0, [r1, #TSS_MEMMAP] @ Page table pointer
+ ldr r4, [r1, #TSK_ADDR_LIMIT]
+ teq r4, #0
+ moveq r4, #DOM_KERNELDOMAIN
+ movne r4, #DOM_USERDOMAIN
+ mcr p15, 0, r4, c3, c0 @ Set segment
+ ldr r4, [r1, #TSS_MEMMAP] @ Page table pointer
+/*
+ * Flushing the cache is nightmarishly slow, so we take any excuse
+ * to get out of it. If the old page table is the same as the new,
+ * this is a CLONE_VM relative of the old task and there is no need
+ * to flush. The overhead of the tests isn't even on the radar
+ * compared to the cost of the flush itself.
+ */
+ teq r4, r2
+ beq 2f
ldr r3, =Lclean_switch
ldr r2, [r3]
ands r2, r2, #1
eor r2, r2, #1
str r2, [r3]
- ldr r2, =0xdf000000
+ ldr r2, =FLUSH_BASE
addne r2, r2, #32768
add r1, r2, #16384 @ only necessary for 16k
1: ldr r3, [r2], #32
@@ -259,19 +255,16 @@ _sa110_switch_to:
mov r1, #0
mcr p15, 0, r1, c7, c5, 0 @ flush I cache
mcr p15, 0, r1, c7, c10, 4 @ drain WB
- mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
+ mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
mcr p15, 0, r1, c8, c7, 0 @ flush TLBs
- ldmfd sp!, {ip}
+2: ldmfd sp!, {ip}
msr spsr, ip @ Save tasks CPSR into SPSR for this return
ldmfd sp!, {r4 - r9, fp, pc}^ @ Load all regs saved previously
/*
* Function: sa110_data_abort ()
- *
* Params : r0 = address of aborted instruction
- *
* Purpose : obtain information about current aborted instruction
- *
* Returns : r0 = address of abort
* : r1 = FSR
* : r2 != 0 if writing
@@ -288,12 +281,10 @@ _sa110_data_abort:
mov pc, lr
/*
- * Function: sa110_set_pmd ()
- *
+ * Function: sa110_set_pmd(pmd_t *pmdp, pmd_t pmd)
* Params : r0 = Address to set
* : r1 = value to set
- *
- * Purpose : Set a PMD and flush it out of any WB cache
+ * Purpose : Set a PMD and flush it out
*/
.align 5
_sa110_set_pmd: str r1, [r0]
@@ -301,23 +292,51 @@ _sa110_set_pmd: str r1, [r0]
mov pc, lr
/*
+ * Function: sa110_set_pte(pte_t *ptep, pte_t pte)
+ * Params : r0 = Address to set
+ * : r1 = value to set
+ * Purpose : Set a PTE and flush it out
+ */
+ .align 5
+_sa110_set_pte: str r1, [r0], #-1024 @ linux version
+
+ eor r1, r1, #LPTE_PRESENT | LPTE_YOUNG | LPTE_WRITE | LPTE_DIRTY
+
+ bic r2, r1, #0xff0
+ bic r2, r2, #3
+ orr r2, r2, #HPTE_TYPE_SMALL
+
+ tst r1, #LPTE_USER | LPTE_EXEC @ User or Exec?
+ orrne r2, r2, #HPTE_AP_READ
+
+ tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty?
+ orreq r2, r2, #HPTE_AP_WRITE
+
+ tst r1, #LPTE_PRESENT | LPTE_YOUNG @ Present and Young?
+ movne r2, #0
+
+ str r2, [r0] @ hardware version
+ mov r0, r0
+ mcr p15, 0, r0, c7, c10, 1 @ clean D entry (drain is done by TLB fns)
+ mov pc, lr
+
+/*
* Function: sa110_check_bugs (void)
* : sa110_proc_init (void)
* : sa110_proc_fin (void)
- *
* Notes : This processor does not require these
*/
_sa110_check_bugs:
mrs ip, cpsr
bic ip, ip, #F_BIT
msr cpsr, ip
+
_sa110_proc_init:
_sa110_proc_fin:
mov pc, lr
/*
* Function: sa110_reset
- *
* Notes : This sets up everything for a reset
*/
_sa110_reset: mrs r1, cpsr
@@ -350,14 +369,15 @@ ENTRY(sa110_processor_functions)
.word _sa110_flush_cache_all @ 24
.word _sa110_flush_cache_area @ 28
.word _sa110_flush_cache_entry @ 32
- .word _sa110_flush_cache_pte @ 36
+ .word _sa110_clean_cache_area @ 36
.word _sa110_flush_ram_page @ 40
.word _sa110_flush_tlb_all @ 44
.word _sa110_flush_tlb_area @ 48
.word _sa110_set_pmd @ 52
- .word _sa110_reset @ 56
- .word _sa110_flush_icache_area @ 60
+ .word _sa110_set_pte @ 56
+ .word _sa110_reset @ 60
+ .word _sa110_flush_icache_area @ 64
- .word _sa110_cache_wback_area @ 64
- .word _sa110_cache_purge_area @ 68
+ .word _sa110_cache_wback_area @ 68
+ .word _sa110_cache_purge_area @ 72
diff --git a/arch/arm/mm/small_page.c b/arch/arm/mm/small_page.c
index 2f8cad9a3..6bdc6cfc7 100644
--- a/arch/arm/mm/small_page.c
+++ b/arch/arm/mm/small_page.c
@@ -5,6 +5,8 @@
*
* Changelog:
* 26/01/1996 RMK Cleaned up various areas to make little more generic
+ * 07/02/1999 RMK Support added for 16K and 32K page sizes
+ * containing 8K blocks
*/
#include <linux/signal.h>
@@ -19,21 +21,32 @@
#include <linux/swap.h>
#include <linux/smp.h>
-#define SMALL_ALLOC_SHIFT (10)
+#if PAGE_SIZE == 4096
+/* 2K blocks */
+#define SMALL_ALLOC_SHIFT (11)
+#define NAME(x) x##_2k
+#elif PAGE_SIZE == 32768 || PAGE_SIZE == 16384
+/* 8K blocks */
+#define SMALL_ALLOC_SHIFT (13)
+#define NAME(x) x##_8k
+#endif
+
#define SMALL_ALLOC_SIZE (1 << SMALL_ALLOC_SHIFT)
#define NR_BLOCKS (PAGE_SIZE / SMALL_ALLOC_SIZE)
+#define BLOCK_MASK ((1 << NR_BLOCKS) - 1)
-#if NR_BLOCKS != 4
-#error I only support 4 blocks per page!
-#endif
-
-#define USED(pg) ((atomic_read(&(pg)->count) >> 8) & 15)
+#define USED(pg) ((atomic_read(&(pg)->count) >> 8) & BLOCK_MASK)
#define SET_USED(pg,off) (atomic_read(&(pg)->count) |= 256 << off)
#define CLEAR_USED(pg,off) (atomic_read(&(pg)->count) &= ~(256 << off))
+#define ALL_USED BLOCK_MASK
#define IS_FREE(pg,off) (!(atomic_read(&(pg)->count) & (256 << off)))
-#define PAGE_PTR(page,block) ((struct free_small_page *)((page) + \
+#define SM_PAGE_PTR(page,block) ((struct free_small_page *)((page) + \
((block) << SMALL_ALLOC_SHIFT)))
+#if NR_BLOCKS != 2 && NR_BLOCKS != 4
+#error I only support 2 or 4 blocks per page
+#endif
+
struct free_small_page {
unsigned long next;
unsigned long prev;
@@ -52,6 +65,7 @@ static unsigned char offsets[1<<NR_BLOCKS] = {
1, /* 0001 */
0, /* 0010 */
2, /* 0011 */
+#if NR_BLOCKS == 4
0, /* 0100 */
1, /* 0101 */
0, /* 0110 */
@@ -64,6 +78,7 @@ static unsigned char offsets[1<<NR_BLOCKS] = {
1, /* 1101 */
0, /* 1110 */
4 /* 1111 */
+#endif
};
static inline void clear_page_links(unsigned long page)
@@ -72,7 +87,7 @@ static inline void clear_page_links(unsigned long page)
int i;
for (i = 0; i < NR_BLOCKS; i++) {
- fsp = PAGE_PTR(page, i);
+ fsp = SM_PAGE_PTR(page, i);
fsp->next = fsp->prev = 0;
}
}
@@ -90,7 +105,7 @@ static inline void set_page_links_prev(unsigned long page, unsigned long prev)
for (i = 0; i < NR_BLOCKS; i++) {
if (mask & (1 << i))
continue;
- fsp = PAGE_PTR(page, i);
+ fsp = SM_PAGE_PTR(page, i);
fsp->prev = prev;
}
}
@@ -108,12 +123,12 @@ static inline void set_page_links_next(unsigned long page, unsigned long next)
for (i = 0; i < NR_BLOCKS; i++) {
if (mask & (1 << i))
continue;
- fsp = PAGE_PTR(page, i);
+ fsp = SM_PAGE_PTR(page, i);
fsp->next = next;
}
}
-unsigned long get_small_page(int priority)
+unsigned long NAME(get_page)(int priority)
{
struct free_small_page *fsp;
unsigned long new_page;
@@ -129,8 +144,8 @@ again:
page = mem_map + MAP_NR(small_page_ptr);
offset = offsets[USED(page)];
SET_USED(page, offset);
- new_page = (unsigned long)PAGE_PTR(small_page_ptr, offset);
- if (USED(page) == 15) {
+ new_page = (unsigned long)SM_PAGE_PTR(small_page_ptr, offset);
+ if (USED(page) == ALL_USED) {
fsp = (struct free_small_page *)new_page;
set_page_links_prev (fsp->next, 0);
small_page_ptr = fsp->next;
@@ -156,30 +171,31 @@ need_new_page:
goto again;
}
-void free_small_page(unsigned long spage)
+void NAME(free_page)(unsigned long spage)
{
struct free_small_page *ofsp, *cfsp;
unsigned long flags;
struct page *page;
int offset, oldoffset;
+ if (!spage)
+ goto none;
+
offset = (spage >> SMALL_ALLOC_SHIFT) & (NR_BLOCKS - 1);
spage -= offset << SMALL_ALLOC_SHIFT;
page = mem_map + MAP_NR(spage);
- if (!PageReserved(page) || !USED(page)) {
- printk ("Trying to free non-small page from %p\n", __builtin_return_address(0));
- return;
- }
- if (IS_FREE(page, offset)) {
- printk ("Trying to free free small page from %p\n", __builtin_return_address(0));
- return;
- }
+ if (!PageReserved(page) || !USED(page))
+ goto non_small;
+
+ if (IS_FREE(page, offset))
+ goto free;
+
save_flags_cli (flags);
oldoffset = offsets[USED(page)];
CLEAR_USED(page, offset);
- ofsp = PAGE_PTR(spage, oldoffset);
- cfsp = PAGE_PTR(spage, offset);
+ ofsp = SM_PAGE_PTR(spage, oldoffset);
+ cfsp = SM_PAGE_PTR(spage, offset);
if (oldoffset == NR_BLOCKS) { /* going from totally used to mostly used */
cfsp->prev = 0;
@@ -197,4 +213,13 @@ void free_small_page(unsigned long spage)
} else
*cfsp = *ofsp;
restore_flags(flags);
+ return;
+
+non_small:
+ printk ("Trying to free non-small page from %p\n", __builtin_return_address(0));
+ return;
+free:
+ printk ("Trying to free free small page from %p\n", __builtin_return_address(0));
+none:
+ return;
}