diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2001-03-09 20:33:35 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2001-03-09 20:33:35 +0000 |
commit | 116674acc97ba75a720329996877077d988443a2 (patch) | |
tree | 6a3f2ff0b612ae2ee8a3f3509370c9e6333a53b3 /arch/cris/mm | |
parent | 71118c319fcae4a138f16e35b4f7e0a6d53ce2ca (diff) |
Merge with Linux 2.4.2.
Diffstat (limited to 'arch/cris/mm')
-rw-r--r-- | arch/cris/mm/Makefile | 13 | ||||
-rw-r--r-- | arch/cris/mm/extable.c | 55 | ||||
-rw-r--r-- | arch/cris/mm/fault.c | 390 | ||||
-rw-r--r-- | arch/cris/mm/init.c | 506 | ||||
-rw-r--r-- | arch/cris/mm/tlb.c | 301 |
5 files changed, 1265 insertions, 0 deletions
diff --git a/arch/cris/mm/Makefile b/arch/cris/mm/Makefile new file mode 100644 index 000000000..d1d21a7b7 --- /dev/null +++ b/arch/cris/mm/Makefile @@ -0,0 +1,13 @@ +# +# Makefile for the linux cris-specific parts of the memory manager. +# +# Note! Dependencies are done automagically by 'make dep', which also +# removes any old dependencies. DON'T put your own dependencies here +# unless it's something special (ie not a .c file). +# +# Note 2! The CFLAGS definition is now in the main makefile... + +O_TARGET := mm.o +obj-y := init.o fault.o tlb.o extable.o + +include $(TOPDIR)/Rules.make diff --git a/arch/cris/mm/extable.c b/arch/cris/mm/extable.c new file mode 100644 index 000000000..a4cf00f14 --- /dev/null +++ b/arch/cris/mm/extable.c @@ -0,0 +1,55 @@ +/* + * linux/arch/cris/mm/extable.c + */ + +#include <linux/config.h> +#include <linux/module.h> +#include <asm/uaccess.h> + +extern const struct exception_table_entry _start___ex_table[]; +extern const struct exception_table_entry _stop___ex_table[]; + +static inline unsigned long +search_one_table(const struct exception_table_entry *first, + const struct exception_table_entry *last, + unsigned long value) +{ + while (first <= last) { + const struct exception_table_entry *mid; + long diff; + + mid = (last - first) / 2 + first; + diff = mid->insn - value; + if (diff == 0) + return mid->fixup; + else if (diff < 0) + first = mid+1; + else + last = mid-1; + } + return 0; +} + +unsigned long +search_exception_table(unsigned long addr) +{ + unsigned long ret; + +#ifndef CONFIG_MODULES + /* There is only the kernel to search. */ + ret = search_one_table(_start___ex_table, _stop___ex_table-1, addr); + if (ret) return ret; +#else + /* The kernel is the last "module" -- no need to treat it special. */ + struct module *mp; + for (mp = module_list; mp != NULL; mp = mp->next) { + if (mp->ex_table_start == NULL) + continue; + ret = search_one_table(mp->ex_table_start, + mp->ex_table_end - 1, addr); + if (ret) return ret; + } +#endif + + return 0; +} diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c new file mode 100644 index 000000000..a4bb237b4 --- /dev/null +++ b/arch/cris/mm/fault.c @@ -0,0 +1,390 @@ +/* + * linux/arch/cris/mm/fault.c + * + * Copyright (C) 2000 Axis Communications AB + * + * Authors: Bjorn Wesen + * + * $Log: fault.c,v $ + * Revision 1.8 2000/11/22 14:45:31 bjornw + * * 2.4.0-test10 removed the set_pgdir instantaneous kernel global mapping + * into all processes. Instead we fill in the missing PTE entries on demand. + * + * Revision 1.7 2000/11/21 16:39:09 bjornw + * fixup switches frametype + * + * Revision 1.6 2000/11/17 16:54:08 bjornw + * More detailed siginfo reporting + * + * + */ + +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/ptrace.h> +#include <linux/mman.h> +#include <linux/mm.h> +#include <linux/interrupt.h> + +#include <asm/system.h> +#include <asm/segment.h> +#include <asm/pgtable.h> +#include <asm/uaccess.h> +#include <asm/svinto.h> + +extern void die_if_kernel(const char *,struct pt_regs *,long); + +asmlinkage void do_invalid_op (struct pt_regs *, unsigned long); +asmlinkage void do_page_fault(unsigned long address, struct pt_regs *regs, + int error_code); + +/* debug of low-level TLB reload */ +#define D(x) +/* debug of higher-level faults */ +#define DPG(x) + +/* fast TLB-fill fault handler */ + +void +handle_mmu_bus_fault(struct pt_regs *regs) +{ + int cause, select; + int index; + int page_id; + int miss, we, acc, inv; + struct mm_struct *mm = current->active_mm; + pmd_t *pmd; + pte_t pte; + int errcode = 0; + unsigned long address; + + cause = *R_MMU_CAUSE; + select = *R_TLB_SELECT; + + address = cause & PAGE_MASK; /* get faulting address */ + + page_id = IO_EXTRACT(R_MMU_CAUSE, page_id, cause); + miss = IO_EXTRACT(R_MMU_CAUSE, miss_excp, cause); + we = IO_EXTRACT(R_MMU_CAUSE, we_excp, cause); + acc = IO_EXTRACT(R_MMU_CAUSE, acc_excp, cause); + inv = IO_EXTRACT(R_MMU_CAUSE, inv_excp, cause); + index = IO_EXTRACT(R_TLB_SELECT, index, select); + + D(printk("bus_fault from IRP 0x%x: addr 0x%x, miss %d, inv %d, we %d, acc %d, " + "idx %d pid %d\n", + regs->irp, address, miss, inv, we, acc, index, page_id)); + + /* for a miss, we need to reload the TLB entry */ + + if(miss) { + + /* see if the pte exists at all */ + + pmd = (pmd_t *)pgd_offset(mm, address); + if(pmd_none(*pmd)) + goto dofault; + if(pmd_bad(*pmd)) { + printk("bad pgdir entry 0x%x at 0x%x\n", *pmd, pmd); + pmd_clear(pmd); + return; + } + pte = *pte_offset(pmd, address); + if(!pte_present(pte)) + goto dofault; + + D(printk(" found pte %x pg %x ", pte_val(pte), pte_page(pte))); + D( + { + if(pte_val(pte) & _PAGE_SILENT_WRITE) + printk("Silent-W "); + if(pte_val(pte) & _PAGE_KERNEL) + printk("Kernel "); + if(pte_val(pte) & _PAGE_SILENT_READ) + printk("Silent-R "); + if(pte_val(pte) & _PAGE_GLOBAL) + printk("Global "); + if(pte_val(pte) & _PAGE_PRESENT) + printk("Present "); + if(pte_val(pte) & _PAGE_ACCESSED) + printk("Accessed "); + if(pte_val(pte) & _PAGE_MODIFIED) + printk("Modified "); + if(pte_val(pte) & _PAGE_READ) + printk("Readable "); + if(pte_val(pte) & _PAGE_WRITE) + printk("Writeable "); + printk("\n"); + }); + + /* load up the chosen TLB entry + * this assumes the pte format is the same as the TLB_LO layout. + * + * the write to R_TLB_LO also writes the vpn and page_id fields from + * R_MMU_CAUSE, which we in this case obviously want to keep + */ + + *R_TLB_LO = pte_val(pte); + + return; + } + + errcode = 0x01 | (we << 1); + + dofault: + /* leave it to the MM system fault handler below */ + D(printk("do_page_fault %p errcode %d\n", address, errcode)); + do_page_fault(address, regs, errcode); +} + +/* + * This routine handles page faults. It determines the address, + * and the problem, and then passes it off to one of the appropriate + * routines. + * + * Notice that the address we're given is aligned to the page the fault + * occured in, since we only get the PFN in R_MMU_CAUSE not the complete + * address. + * + * error_code: + * bit 0 == 0 means no page found, 1 means protection fault + * bit 1 == 0 means read, 1 means write + * + * If this routine detects a bad access, it returns 1, otherwise it + * returns 0. + */ + +asmlinkage void +do_page_fault(unsigned long address, struct pt_regs *regs, + int error_code) +{ + struct task_struct *tsk; + struct mm_struct *mm; + struct vm_area_struct * vma; + int writeaccess; + int fault; + unsigned long fixup; + siginfo_t info; + + tsk = current; + + /* + * We fault-in kernel-space virtual memory on-demand. The + * 'reference' page table is init_mm.pgd. + * + * NOTE! We MUST NOT take any locks for this case. We may + * be in an interrupt or a critical region, and should + * only copy the information from the master page table, + * nothing more. + * + * NOTE2: This is done so that, when updating the vmalloc + * mappings we don't have to walk all processes pgdirs and + * add the high mappings all at once. Instead we do it as they + * are used. + * + * TODO: On CRIS, we have a PTE Global bit which should be set in + * all the PTE's related to vmalloc in all processes - that means if + * we switch process and a vmalloc PTE is still in the TLB, it won't + * need to be reloaded. It's an optimization. + * + * Linux/CRIS's kernel is not page-mapped, so the comparision below + * should really be >= VMALLOC_START, however, kernel fixup errors + * will be handled more quickly by going through vmalloc_fault and then + * into bad_area_nosemaphore than falling through the find_vma user-mode + * tests. + */ + + if (address >= TASK_SIZE) + goto vmalloc_fault; + + mm = tsk->mm; + writeaccess = error_code & 2; + info.si_code = SEGV_MAPERR; + + /* + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ + + if (in_interrupt() || !mm) + goto no_context; + + down(&mm->mmap_sem); + vma = find_vma(mm, address); + if (!vma) + goto bad_area; + if (vma->vm_start <= address) + goto good_area; + if (!(vma->vm_flags & VM_GROWSDOWN)) + goto bad_area; + if (user_mode(regs)) { + /* + * accessing the stack below usp is always a bug. + * we get page-aligned addresses so we can only check + * if we're within a page from usp, but that might be + * enough to catch brutal errors at least. + */ + if (address + PAGE_SIZE < rdusp()) + goto bad_area; + } + if (expand_stack(vma, address)) + goto bad_area; + + /* + * Ok, we have a good vm_area for this memory access, so + * we can handle it.. + */ + + good_area: + info.si_code = SEGV_ACCERR; + + /* first do some preliminary protection checks */ + + if (writeaccess) { + if (!(vma->vm_flags & VM_WRITE)) + goto bad_area; + } else { + if (!(vma->vm_flags & (VM_READ | VM_EXEC))) + goto bad_area; + } + + /* + * If for any reason at all we couldn't handle the fault, + * make sure we exit gracefully rather than endlessly redo + * the fault. + */ + + switch (handle_mm_fault(mm, vma, address, writeaccess)) { + case 1: + tsk->min_flt++; + break; + case 2: + tsk->maj_flt++; + break; + case 0: + goto do_sigbus; + default: + goto out_of_memory; + } + + up(&mm->mmap_sem); + return; + + /* + * Something tried to access memory that isn't in our memory map.. + * Fix it, but check if it's kernel or user first.. + */ + + bad_area: + + up(&mm->mmap_sem); + + bad_area_nosemaphore: + DPG(show_registers(regs)); + + /* User mode accesses just cause a SIGSEGV */ + + if (user_mode(regs)) { + info.si_signo = SIGSEGV; + info.si_errno = 0; + /* info.si_code has been set above */ + info.si_addr = (void *)address; + force_sig_info(SIGSEGV, &info, tsk); + return; + } + + no_context: + + /* Are we prepared to handle this kernel fault? + * + * (The kernel has valid exception-points in the source + * when it acesses user-memory. When it fails in one + * of those points, we find it in a table and do a jump + * to some fixup code that loads an appropriate error + * code) + */ + + if ((fixup = search_exception_table(regs->irp)) != 0) { + regs->irp = fixup; + regs->frametype = CRIS_FRAME_FIXUP; + D(printk("doing fixup to 0x%x\n", fixup)); + return; + } + +/* + * Oops. The kernel tried to access some bad page. We'll have to + * terminate things with extreme prejudice. + */ + if ((unsigned long) (address) < PAGE_SIZE) + printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); + else + printk(KERN_ALERT "Unable to handle kernel access"); + printk(" at virtual address %08lx\n",address); + + die_if_kernel("Oops", regs, error_code); + + do_exit(SIGKILL); + + /* + * We ran out of memory, or some other thing happened to us that made + * us unable to handle the page fault gracefully. + */ + + out_of_memory: + up(&mm->mmap_sem); + printk("VM: killing process %s\n", tsk->comm); + if(user_mode(regs)) + do_exit(SIGKILL); + goto no_context; + + do_sigbus: + up(&mm->mmap_sem); + + /* + * Send a sigbus, regardless of whether we were in kernel + * or user mode. + */ + info.si_code = SIGBUS; + info.si_errno = 0; + info.si_code = BUS_ADRERR; + info.si_addr = (void *)address; + force_sig_info(SIGBUS, &info, tsk); + + /* Kernel mode? Handle exceptions or die */ + if (!user_mode(regs)) + goto no_context; + return; + +vmalloc_fault: + { + /* + * Synchronize this task's top level page-table + * with the 'reference' page table. + */ + int offset = pgd_index(address); + pgd_t *pgd, *pgd_k; + pmd_t *pmd, *pmd_k; + + pgd = tsk->active_mm->pgd + offset; + pgd_k = init_mm.pgd + offset; + + if (!pgd_present(*pgd)) { + if (!pgd_present(*pgd_k)) + goto bad_area_nosemaphore; + set_pgd(pgd, *pgd_k); + return; + } + + pmd = pmd_offset(pgd, address); + pmd_k = pmd_offset(pgd_k, address); + + if (pmd_present(*pmd) || !pmd_present(*pmd_k)) + goto bad_area_nosemaphore; + set_pmd(pmd, *pmd_k); + return; + } + +} diff --git a/arch/cris/mm/init.c b/arch/cris/mm/init.c new file mode 100644 index 000000000..3d0ceeffb --- /dev/null +++ b/arch/cris/mm/init.c @@ -0,0 +1,506 @@ +/* + * linux/arch/cris/mm/init.c + * + * Copyright (C) 1995 Linus Torvalds + * Copyright (C) 2000 Axis Communications AB + * + * Authors: Bjorn Wesen (bjornw@axis.com) + * + * $Log: init.c,v $ + * Revision 1.15 2001/01/10 21:12:10 bjornw + * loops_per_sec -> loops_per_jiffy + * + * Revision 1.14 2000/11/22 16:23:20 bjornw + * Initialize totalhigh counters to 0 to make /proc/meminfo look nice. + * + * Revision 1.13 2000/11/21 16:37:51 bjornw + * Temporarily disable initmem freeing + * + * Revision 1.12 2000/11/21 13:55:07 bjornw + * Use CONFIG_CRIS_LOW_MAP for the low VM map instead of explicit CPU type + * + * Revision 1.11 2000/10/06 12:38:22 bjornw + * Cast empty_bad_page correctly (should really be of * type from the start.. + * + * Revision 1.10 2000/10/04 16:53:57 bjornw + * Fix memory-map due to LX features + * + * Revision 1.9 2000/09/13 15:47:49 bjornw + * Wrong count in reserved-pages loop + * + * Revision 1.8 2000/09/13 14:35:10 bjornw + * 2.4.0-test8 added a new arg to free_area_init_node + * + * Revision 1.7 2000/08/17 15:35:55 bjornw + * 2.4.0-test6 removed MAP_NR and inserted virt_to_page + * + * + */ + +#include <linux/config.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/ptrace.h> +#include <linux/mman.h> +#include <linux/mm.h> +#include <linux/swap.h> +#include <linux/smp.h> +#include <linux/bootmem.h> + +#include <asm/system.h> +#include <asm/segment.h> +#include <asm/pgalloc.h> +#include <asm/pgtable.h> +#include <asm/dma.h> +#include <asm/svinto.h> + +static unsigned long totalram_pages; + +struct pgtable_cache_struct quicklists; /* see asm/pgalloc.h */ + +const char bad_pmd_string[] = "Bad pmd in pte_alloc: %08lx\n"; + +extern void die_if_kernel(char *,struct pt_regs *,long); +extern void show_net_buffers(void); +extern void tlb_init(void); + +/* + * empty_bad_page is the page that is used for page faults when linux + * is out-of-memory. Older versions of linux just did a + * do_exit(), but using this instead means there is less risk + * for a process dying in kernel mode, possibly leaving a inode + * unused etc.. + * + * the main point is that when a page table error occurs, we want to get + * out of the kernel safely before killing the process, so we need something + * to feed the MMU with when the fault occurs even if we don't have any + * real PTE's or page tables. + * + * empty_bad_page_table is the accompanying page-table: it is initialized + * to point to empty_bad_page writable-shared entries. + * + * empty_zero_page is a special page that is used for zero-initialized + * data and COW. + */ + +unsigned long empty_bad_page_table; +unsigned long empty_bad_page; +unsigned long empty_zero_page; + +pte_t * __bad_pagetable(void) +{ + /* somehow it is enough to just clear it and not fill it with + * bad page PTE's... + */ + memset((void *)empty_bad_page_table, 0, PAGE_SIZE); + + return (pte_t *) empty_bad_page_table; +} + +pte_t __bad_page(void) +{ + + /* clear the empty_bad_page page. this should perhaps be + * a more simple inlined loop like it is on the other + * architectures. + */ + + memset((void *)empty_bad_page, 0, PAGE_SIZE); + + return pte_mkdirty(__mk_pte((void *)empty_bad_page, PAGE_SHARED)); +} + +static pte_t * get_bad_pte_table(void) +{ + pte_t *empty_bad_pte_table = (pte_t *)empty_bad_page_table; + pte_t v; + int i; + + v = __bad_page(); + + for (i = 0; i < PAGE_SIZE/sizeof(pte_t); i++) + empty_bad_pte_table[i] = v; + + return empty_bad_pte_table; +} + +void __handle_bad_pmd(pmd_t *pmd) +{ + pmd_ERROR(*pmd); + pmd_set(pmd, get_bad_pte_table()); +} + +void __handle_bad_pmd_kernel(pmd_t *pmd) +{ + pmd_ERROR(*pmd); + pmd_set_kernel(pmd, get_bad_pte_table()); +} + +pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long offset) +{ + pte_t *pte; + + pte = (pte_t *) __get_free_page(GFP_KERNEL); + if (pmd_none(*pmd)) { + if (pte) { + clear_page(pte); + pmd_set_kernel(pmd, pte); + return pte + offset; + } + pmd_set_kernel(pmd, get_bad_pte_table()); + return NULL; + } + free_page((unsigned long)pte); + if (pmd_bad(*pmd)) { + __handle_bad_pmd_kernel(pmd); + return NULL; + } + return (pte_t *) pmd_page(*pmd) + offset; +} + +pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset) +{ + pte_t *pte; + + pte = (pte_t *) __get_free_page(GFP_KERNEL); + if (pmd_none(*pmd)) { + if (pte) { + clear_page(pte); + pmd_set(pmd, pte); + return pte + offset; + } + pmd_set(pmd, get_bad_pte_table()); + return NULL; + } + free_page((unsigned long)pte); + if (pmd_bad(*pmd)) { + __handle_bad_pmd(pmd); + return NULL; + } + return (pte_t *) pmd_page(*pmd) + offset; +} + +#ifndef CONFIG_NO_PGT_CACHE +struct pgtable_cache_struct quicklists; + +/* trim the page-table cache if necessary */ + +int do_check_pgt_cache(int low, int high) +{ + int freed = 0; + + if(pgtable_cache_size > high) { + do { + if(pgd_quicklist) { + free_pgd_slow(get_pgd_fast()); + freed++; + } + if(pmd_quicklist) { + free_pmd_slow(get_pmd_fast()); + freed++; + } + if(pte_quicklist) { + free_pte_slow(get_pte_fast()); + freed++; + } + } while(pgtable_cache_size > low); + } + return freed; +} +#else +int do_check_pgt_cache(int low, int high) +{ + return 0; +} +#endif + +void show_mem(void) +{ + int i,free = 0,total = 0,cached = 0, reserved = 0, nonshared = 0; + int shared = 0; + + printk("\nMem-info:\n"); + show_free_areas(); + printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10)); + i = max_mapnr; + while (i-- > 0) { + total++; + if (PageReserved(mem_map+i)) + reserved++; + else if (PageSwapCache(mem_map+i)) + cached++; + else if (!page_count(mem_map+i)) + free++; + else if (page_count(mem_map+i) == 1) + nonshared++; + else + shared += page_count(mem_map+i) - 1; + } + printk("%d pages of RAM\n",total); + printk("%d free pages\n",free); + printk("%d reserved pages\n",reserved); + printk("%d pages nonshared\n",nonshared); + printk("%d pages shared\n",shared); + printk("%d pages swap cached\n",cached); + printk("%ld pages in page table cache\n",pgtable_cache_size); + show_buffers(); +} + +/* + * The kernel is already mapped with a kernel segment at kseg_c so + * we don't need to map it with a page table. However head.S also + * temporarily mapped it at kseg_4 so we should set up the ksegs again, + * clear the TLB and do some other paging setup stuff. + */ + +void __init +paging_init(void) +{ + int i; + unsigned long zones_size[MAX_NR_ZONES]; + + printk("Setting up paging and the MMU.\n"); + + /* clear out the init_mm.pgd that will contain the kernel's mappings */ + + for(i = 0; i < PTRS_PER_PGD; i++) + swapper_pg_dir[i] = __pgd(0); + + /* initialise the TLB (tlb.c) */ + + tlb_init(); + + /* see README.mm for details on the KSEG setup */ + +#ifdef CONFIG_CRIS_LOW_MAP + + /* Etrax-100 LX version 1 has a bug so that we cannot map anything + * across the 0x80000000 boundary, so we need to shrink the user-virtual + * area to 0x50000000 instead of 0xb0000000 and map things slightly + * different. The unused areas are marked as paged so that we can catch + * freak kernel accesses there. + * + * The Juliette chip is mapped at 0xa so we pass that segment straight + * through. We cannot vremap it because the vmalloc area is below 0x8 + * and Juliette needs an uncached area above 0x8. + */ + + *R_MMU_KSEG = ( IO_STATE(R_MMU_KSEG, seg_f, page ) | + IO_STATE(R_MMU_KSEG, seg_e, seg ) | /* uncached flash */ + IO_STATE(R_MMU_KSEG, seg_d, page ) | + IO_STATE(R_MMU_KSEG, seg_c, page ) | + IO_STATE(R_MMU_KSEG, seg_b, seg ) | /* kernel reg area */ + IO_STATE(R_MMU_KSEG, seg_a, seg ) | /* Juliette etc. */ + IO_STATE(R_MMU_KSEG, seg_9, page ) | + IO_STATE(R_MMU_KSEG, seg_8, page ) | + IO_STATE(R_MMU_KSEG, seg_7, page ) | /* kernel vmalloc area */ + IO_STATE(R_MMU_KSEG, seg_6, seg ) | /* kernel DRAM area */ + IO_STATE(R_MMU_KSEG, seg_5, seg ) | /* cached flash */ + IO_STATE(R_MMU_KSEG, seg_4, page ) | /* user area */ + IO_STATE(R_MMU_KSEG, seg_3, page ) | /* user area */ + IO_STATE(R_MMU_KSEG, seg_2, page ) | /* user area */ + IO_STATE(R_MMU_KSEG, seg_1, page ) | /* user area */ + IO_STATE(R_MMU_KSEG, seg_0, page ) ); /* user area */ + + *R_MMU_KBASE_HI = ( IO_FIELD(R_MMU_KBASE_HI, base_f, 0x0 ) | + IO_FIELD(R_MMU_KBASE_HI, base_e, 0x8 ) | + IO_FIELD(R_MMU_KBASE_HI, base_d, 0x0 ) | + IO_FIELD(R_MMU_KBASE_HI, base_c, 0x0 ) | + IO_FIELD(R_MMU_KBASE_HI, base_b, 0xb ) | + IO_FIELD(R_MMU_KBASE_HI, base_a, 0xa ) | + IO_FIELD(R_MMU_KBASE_HI, base_9, 0x0 ) | + IO_FIELD(R_MMU_KBASE_HI, base_8, 0x0 ) ); + + *R_MMU_KBASE_LO = ( IO_FIELD(R_MMU_KBASE_LO, base_7, 0x0 ) | + IO_FIELD(R_MMU_KBASE_LO, base_6, 0x4 ) | + IO_FIELD(R_MMU_KBASE_LO, base_5, 0x0 ) | + IO_FIELD(R_MMU_KBASE_LO, base_4, 0x0 ) | + IO_FIELD(R_MMU_KBASE_LO, base_3, 0x0 ) | + IO_FIELD(R_MMU_KBASE_LO, base_2, 0x0 ) | + IO_FIELD(R_MMU_KBASE_LO, base_1, 0x0 ) | + IO_FIELD(R_MMU_KBASE_LO, base_0, 0x0 ) ); +#else + /* This code is for the hopefully corrected Etrax-100 LX version 2... */ + + *R_MMU_KSEG = ( IO_STATE(R_MMU_KSEG, seg_f, seg ) | /* cached flash */ + IO_STATE(R_MMU_KSEG, seg_e, seg ) | /* uncached flash */ + IO_STATE(R_MMU_KSEG, seg_d, page ) | /* vmalloc area */ + IO_STATE(R_MMU_KSEG, seg_c, seg ) | /* kernel area */ + IO_STATE(R_MMU_KSEG, seg_b, seg ) | /* kernel reg area */ + IO_STATE(R_MMU_KSEG, seg_a, page ) | /* user area */ + IO_STATE(R_MMU_KSEG, seg_9, page ) | + IO_STATE(R_MMU_KSEG, seg_8, page ) | + IO_STATE(R_MMU_KSEG, seg_7, page ) | + IO_STATE(R_MMU_KSEG, seg_6, page ) | + IO_STATE(R_MMU_KSEG, seg_5, page ) | + IO_STATE(R_MMU_KSEG, seg_4, page ) | + IO_STATE(R_MMU_KSEG, seg_3, page ) | + IO_STATE(R_MMU_KSEG, seg_2, page ) | + IO_STATE(R_MMU_KSEG, seg_1, page ) | + IO_STATE(R_MMU_KSEG, seg_0, page ) ); + + *R_MMU_KBASE_HI = ( IO_FIELD(R_MMU_KBASE_HI, base_f, 0x0 ) | + IO_FIELD(R_MMU_KBASE_HI, base_e, 0x8 ) | + IO_FIELD(R_MMU_KBASE_HI, base_d, 0x0 ) | + IO_FIELD(R_MMU_KBASE_HI, base_c, 0x4 ) | + IO_FIELD(R_MMU_KBASE_HI, base_b, 0xb ) | + IO_FIELD(R_MMU_KBASE_HI, base_a, 0x0 ) | + IO_FIELD(R_MMU_KBASE_HI, base_9, 0x0 ) | + IO_FIELD(R_MMU_KBASE_HI, base_8, 0x0 ) ); + + *R_MMU_KBASE_LO = ( IO_FIELD(R_MMU_KBASE_LO, base_7, 0x0 ) | + IO_FIELD(R_MMU_KBASE_LO, base_6, 0x0 ) | + IO_FIELD(R_MMU_KBASE_LO, base_5, 0x0 ) | + IO_FIELD(R_MMU_KBASE_LO, base_4, 0x0 ) | + IO_FIELD(R_MMU_KBASE_LO, base_3, 0x0 ) | + IO_FIELD(R_MMU_KBASE_LO, base_2, 0x0 ) | + IO_FIELD(R_MMU_KBASE_LO, base_1, 0x0 ) | + IO_FIELD(R_MMU_KBASE_LO, base_0, 0x0 ) ); +#endif + + *R_MMU_CONTEXT = ( IO_FIELD(R_MMU_CONTEXT, page_id, 0 ) ); + + /* The MMU has been enabled ever since head.S but just to make + * it totally obvious we do it here as well. + */ + + *R_MMU_CTRL = ( IO_STATE(R_MMU_CTRL, inv_excp, enable ) | + IO_STATE(R_MMU_CTRL, acc_excp, enable ) | + IO_STATE(R_MMU_CTRL, we_excp, enable ) ); + + *R_MMU_ENABLE = IO_STATE(R_MMU_ENABLE, mmu_enable, enable); + + /* + * initialize the bad page table and bad page to point + * to a couple of allocated pages + */ + + empty_bad_page_table = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); + empty_bad_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); + empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); + memset((void *)empty_zero_page, 0, PAGE_SIZE); + + /* All pages are DMA'able in Etrax, so put all in the DMA'able zone */ + + zones_size[0] = ((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT; + + for (i = 1; i < MAX_NR_ZONES; i++) + zones_size[i] = 0; + + /* Use free_area_init_node instead of free_area_init, because the former + * is designed for systems where the DRAM starts at an address substantially + * higher than 0, like us (we start at PAGE_OFFSET). This saves space in the + * mem_map page array. + */ + + free_area_init_node(0, 0, 0, zones_size, PAGE_OFFSET, 0); +} + +extern unsigned long loops_per_jiffy; /* init/main.c */ +unsigned long loops_per_usec; + +extern char _stext, _edata, _etext; +extern char __init_begin, __init_end; + +void __init +mem_init(void) +{ + int codesize, reservedpages, datasize, initsize; + unsigned long tmp; + + if(!mem_map) + BUG(); + + /* max/min_low_pfn was set by setup.c + * now we just copy it to some other necessary places... + * + * high_memory was also set in setup.c + */ + + max_mapnr = num_physpages = max_low_pfn - min_low_pfn; + + /* this will put all memory onto the freelists */ + totalram_pages = free_all_bootmem(); + + reservedpages = 0; + for (tmp = 0; tmp < max_mapnr; tmp++) { + /* + * Only count reserved RAM pages + */ + if (PageReserved(mem_map + tmp)) + reservedpages++; + } + + codesize = (unsigned long) &_etext - (unsigned long) &_stext; + datasize = (unsigned long) &_edata - (unsigned long) &_etext; + initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; + + printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, " + "%dk init)\n" , + (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), + max_mapnr << (PAGE_SHIFT-10), + codesize >> 10, + reservedpages << (PAGE_SHIFT-10), + datasize >> 10, + initsize >> 10 + ); + + /* HACK alert - calculate a loops_per_usec for asm/delay.h here + * since this is called just after calibrate_delay in init/main.c + * but before places which use udelay. cannot be in time.c since + * that is called _before_ calibrate_delay + */ + + loops_per_usec = (loops_per_jiffy * HZ) / 1000000; + + return; +} + +/* free the pages occupied by initialization code */ + +void free_initmem(void) +{ +#if 0 + /* currently this is a bad idea since the cramfs image is catted onto + * the vmlinux image, and the end of that image is not page-padded so + * part of the cramfs image will be freed here + */ + unsigned long addr; + + addr = (unsigned long)(&__init_begin); + for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { + ClearPageReserved(virt_to_page(addr)); + set_page_count(virt_to_page(addr), 1); + free_page(addr); + totalram_pages++; + } + printk ("Freeing unused kernel memory: %dk freed\n", + (&__init_end - &__init_begin) >> 10); +#endif +} + +void si_meminfo(struct sysinfo *val) +{ + int i; + + i = max_mapnr; + val->totalram = 0; + val->sharedram = 0; + val->freeram = nr_free_pages(); + val->bufferram = atomic_read(&buffermem_pages); + while (i-- > 0) { + if (PageReserved(mem_map+i)) + continue; + val->totalram++; + if (!atomic_read(&mem_map[i].count)) + continue; + val->sharedram += atomic_read(&mem_map[i].count) - 1; + } + val->mem_unit = PAGE_SIZE; + val->totalhigh = 0; + val->freehigh = 0; +} diff --git a/arch/cris/mm/tlb.c b/arch/cris/mm/tlb.c new file mode 100644 index 000000000..ed74305ae --- /dev/null +++ b/arch/cris/mm/tlb.c @@ -0,0 +1,301 @@ +/* + * linux/arch/cris/mm/tlb.c + * + * Copyright (C) 2000 Axis Communications AB + * + * Authors: Bjorn Wesen (bjornw@axis.com) + * + */ + +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/ptrace.h> +#include <linux/mman.h> +#include <linux/mm.h> +#include <linux/init.h> + +#include <asm/system.h> +#include <asm/segment.h> +#include <asm/pgtable.h> +#include <asm/svinto.h> + +#define D(x) + +/* CRIS in Etrax100LX TLB */ + +#define NUM_TLB_ENTRIES 64 +#define NUM_PAGEID 64 +#define INVALID_PAGEID 63 +#define NO_CONTEXT -1 + +/* The TLB can host up to 64 different mm contexts at the same time. + * The running context is R_MMU_CONTEXT, and each TLB entry contains a + * page_id that has to match to give a hit. In page_id_map, we keep track + * of which mm's we have assigned which page_id's, so that we know when + * to invalidate TLB entries. + * + * The last page_id is never running - it is used as an invalid page_id + * so we can make TLB entries that will never match. + */ + +struct mm_struct *page_id_map[NUM_PAGEID]; + +static int map_replace_ptr = 1; /* which page_id_map entry to replace next */ + +/* invalidate all TLB entries */ + +void +flush_tlb_all() +{ + int i; + + /* the vpn of i & 0xf is so we dont write similar TLB entries + * in the same 4-way entry group. details.. + */ + + for(i = 0; i < NUM_TLB_ENTRIES; i++) { + *R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) ); + + *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | + IO_FIELD(R_TLB_HI, vpn, i & 0xf ) ); + + *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | + IO_STATE(R_TLB_LO, valid, no ) | + IO_STATE(R_TLB_LO, kernel,no ) | + IO_STATE(R_TLB_LO, we, no ) | + IO_FIELD(R_TLB_LO, pfn, 0 ) ); + } + D(printk("tlb: flushed all\n")); +} + +/* invalidate the selected mm context only */ + +void +flush_tlb_mm(struct mm_struct *mm) +{ + int i; + int page_id = mm->context; + + D(printk("tlb: flush mm context %d (%p)\n", page_id, mm)); + + if(page_id == NO_CONTEXT) + return; + + /* mark the TLB entries that match the page_id as invalid. + * here we could also check the _PAGE_GLOBAL bit and NOT flush + * global pages. is it worth the extra I/O ? + */ + + for(i = 0; i < NUM_TLB_ENTRIES; i++) { + *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i); + if (IO_EXTRACT(R_TLB_HI, page_id, *R_TLB_HI) == page_id) { + *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | + IO_FIELD(R_TLB_HI, vpn, i & 0xf ) ); + + *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | + IO_STATE(R_TLB_LO, valid, no ) | + IO_STATE(R_TLB_LO, kernel,no ) | + IO_STATE(R_TLB_LO, we, no ) | + IO_FIELD(R_TLB_LO, pfn, 0 ) ); + } + } +} + +/* invalidate a single page */ + +void +flush_tlb_page(struct vm_area_struct *vma, + unsigned long addr) +{ + struct mm_struct *mm = vma->vm_mm; + int page_id = mm->context; + int i; + + D(printk("tlb: flush page %p in context %d (%p)\n", addr, page_id, mm)); + + if(page_id == NO_CONTEXT) + return; + + addr &= PAGE_MASK; /* perhaps not necessary */ + + /* invalidate those TLB entries that match both the mm context + * and the virtual address requested + */ + + for(i = 0; i < NUM_TLB_ENTRIES; i++) { + unsigned long tlb_hi; + *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i); + tlb_hi = *R_TLB_HI; + if (IO_EXTRACT(R_TLB_HI, page_id, tlb_hi) == page_id && + (tlb_hi & PAGE_MASK) == addr) { + *R_TLB_HI = IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | + addr; /* same addr as before works. */ + + *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | + IO_STATE(R_TLB_LO, valid, no ) | + IO_STATE(R_TLB_LO, kernel,no ) | + IO_STATE(R_TLB_LO, we, no ) | + IO_FIELD(R_TLB_LO, pfn, 0 ) ); + } + } +} + +/* invalidate a page range */ + +void +flush_tlb_range(struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + int page_id = mm->context; + int i; + + D(printk("tlb: flush range %p<->%p in context %d (%p)\n", + start, end, page_id, mm)); + + if(page_id == NO_CONTEXT) + return; + + start &= PAGE_MASK; /* probably not necessary */ + end &= PAGE_MASK; /* dito */ + + /* invalidate those TLB entries that match both the mm context + * and the virtual address range + */ + + for(i = 0; i < NUM_TLB_ENTRIES; i++) { + unsigned long tlb_hi, vpn; + *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i); + tlb_hi = *R_TLB_HI; + vpn = tlb_hi & PAGE_MASK; + if (IO_EXTRACT(R_TLB_HI, page_id, tlb_hi) == page_id && + vpn >= start && vpn < end) { + *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | + IO_FIELD(R_TLB_HI, vpn, i & 0xf ) ); + + *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | + IO_STATE(R_TLB_LO, valid, no ) | + IO_STATE(R_TLB_LO, kernel,no ) | + IO_STATE(R_TLB_LO, we, no ) | + IO_FIELD(R_TLB_LO, pfn, 0 ) ); + } + } +} + +/* + * Initialize the context related info for a new mm_struct + * instance. + */ + +int +init_new_context(struct task_struct *tsk, struct mm_struct *mm) +{ + mm->context = NO_CONTEXT; + return 0; +} + +/* the following functions are similar to those used in the PPC port */ + +static inline void +alloc_context(struct mm_struct *mm) +{ + struct mm_struct *old_mm; + + D(printk("tlb: alloc context %d (%p)\n", map_replace_ptr, mm)); + + /* did we replace an mm ? */ + + old_mm = page_id_map[map_replace_ptr]; + + if(old_mm) { + /* throw out any TLB entries belonging to the mm we replace + * in the map + */ + flush_tlb_mm(old_mm); + + old_mm->context = NO_CONTEXT; + } + + /* insert it into the page_id_map */ + + mm->context = map_replace_ptr; + page_id_map[map_replace_ptr] = mm; + + map_replace_ptr++; + + if(map_replace_ptr == INVALID_PAGEID) + map_replace_ptr = 0; /* wrap around */ + +} + +/* + * if needed, get a new MMU context for the mm. otherwise nothing is done. + */ + +void +get_mmu_context(struct mm_struct *mm) +{ + if(mm->context == NO_CONTEXT) + alloc_context(mm); +} + +/* called in schedule() just before actually doing the switch_to */ + +void +switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk, int cpu) +{ + /* make sure we have a context */ + + get_mmu_context(next); + + /* switch context in the MMU */ + + D(printk("switching mmu_context to %d (%p)\n", next->context, next)); + + *R_MMU_CONTEXT = IO_FIELD(R_MMU_CONTEXT, page_id, next->context); +} + + +/* called by __exit_mm to destroy the used MMU context if any before + * destroying the mm itself. this is only called when the last user of the mm + * drops it. + * + * the only thing we really need to do here is mark the used PID slot + * as empty. + */ + +void +destroy_context(struct mm_struct *mm) +{ + if(mm->context != NO_CONTEXT) { + D(printk("destroy_context %d (%p)\n", mm->context, mm)); + flush_tlb_mm(mm); /* TODO this might be redundant ? */ + page_id_map[mm->context] = NULL; + /* mm->context = NO_CONTEXT; redundant.. mm will be freed */ + } +} + +/* called once during VM initialization, from init.c */ + +void __init +tlb_init(void) +{ + int i; + + /* clear the page_id map */ + + for(i = 0; i < 64; i++) + page_id_map[i] = NULL; + + /* invalidate the entire TLB */ + + flush_tlb_all(); + + /* the init_mm has context 0 from the boot */ + + page_id_map[0] = &init_mm; +} |