diff options
author | Ralf Baechle <ralf@linux-mips.org> | 1999-10-09 00:00:47 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 1999-10-09 00:00:47 +0000 |
commit | d6434e1042f3b0a6dfe1b1f615af369486f9b1fa (patch) | |
tree | e2be02f33984c48ec019c654051d27964e42c441 /arch/m68k/mm | |
parent | 609d1e803baf519487233b765eb487f9ec227a18 (diff) |
Merge with 2.3.19.
Diffstat (limited to 'arch/m68k/mm')
-rw-r--r-- | arch/m68k/mm/Makefile | 9 | ||||
-rw-r--r-- | arch/m68k/mm/fault.c | 17 | ||||
-rw-r--r-- | arch/m68k/mm/init.c | 341 | ||||
-rw-r--r-- | arch/m68k/mm/memory.c | 121 | ||||
-rw-r--r-- | arch/m68k/mm/motorola.c | 299 | ||||
-rw-r--r-- | arch/m68k/mm/sun3mmu.c | 98 |
6 files changed, 519 insertions, 366 deletions
diff --git a/arch/m68k/mm/Makefile b/arch/m68k/mm/Makefile index 54f0fae1a..fdc73c35d 100644 --- a/arch/m68k/mm/Makefile +++ b/arch/m68k/mm/Makefile @@ -8,6 +8,13 @@ # Note 2! The CFLAGS definition is now in the main makefile... O_TARGET := mm.o -O_OBJS := init.o fault.o memory.o kmap.o extable.o hwtest.o +O_OBJS := init.o fault.o extable.o hwtest.o + +ifndef CONFIG_SUN3 +O_OBJS += kmap.o memory.o motorola.o +else +O_OBJS += sun3mmu.o +endif + include $(TOPDIR)/Rules.make diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index ef1b855bd..c66db0652 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -36,7 +36,7 @@ asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address, struct mm_struct *mm = current->mm; struct vm_area_struct * vma; unsigned long fixup; - int write; + int write, fault; #ifdef DEBUG printk ("regs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n", @@ -44,12 +44,11 @@ asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address, current->mm->pgd); #endif - /* * If we're in an interrupt or have no user * context, we must not take the fault.. */ - if (in_interrupt() || mm == &init_mm) + if (in_interrupt() || !mm) goto no_context; down(&mm->mmap_sem); @@ -100,7 +99,10 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - if (!handle_mm_fault(current, vma, address, write)) + fault = handle_mm_fault(current, vma, address, write); + if (fault < 0) + goto out_of_memory; + if (!fault) goto do_sigbus; /* There seems to be a missing invalidate somewhere in do_no_page. @@ -160,6 +162,13 @@ no_context: * We ran out of memory, or some other thing happened to us that made * us unable to handle the page fault gracefully. */ +out_of_memory: + up(&mm->mmap_sem); + printk("VM: killing process %s\n", current->comm); + if (error_code & 4) + do_exit(SIGKILL); + goto no_context; + do_sigbus: up(&mm->mmap_sem); diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index 91409cd28..c22dccfc5 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c @@ -2,6 +2,9 @@ * linux/arch/m68k/mm/init.c * * Copyright (C) 1995 Hamish Macdonald + * + * Contains common initialization routines, specific init code moved + * to motorola.c and sun3mmu.c */ #include <linux/config.h> @@ -28,9 +31,10 @@ #include <asm/atari_stram.h> #endif -#undef DEBUG +#ifdef CONFIG_SUN3 +void mmu_emu_reserve_pages(unsigned long max_page); +#endif -extern void die_if_kernel(char *,struct pt_regs *,long); extern void show_net_buffers(void); int do_check_pgt_cache(int low, int high) @@ -60,7 +64,7 @@ int do_check_pgt_cache(int low, int high) * ZERO_PAGE is a special page that is used for zero-initialized * data and COW. */ -static unsigned long empty_bad_page_table; +unsigned long empty_bad_page_table; pte_t *__bad_pagetable(void) { @@ -68,7 +72,7 @@ pte_t *__bad_pagetable(void) return (pte_t *)empty_bad_page_table; } -static unsigned long empty_bad_page; +unsigned long empty_bad_page; pte_t __bad_page(void) { @@ -94,12 +98,12 @@ void show_mem(void) reserved++; else if (PageSwapCache(mem_map+i)) cached++; - else if (!atomic_read(&mem_map[i].count)) + else if (!page_count(mem_map+i)) free++; - else if (atomic_read(&mem_map[i].count) == 1) + else if (page_count(mem_map+i) == 1) nonshared++; else - shared += atomic_read(&mem_map[i].count) - 1; + shared += page_count(mem_map+i) - 1; } printk("%d pages of RAM\n",total); printk("%d free pages\n",free); @@ -113,167 +117,6 @@ void show_mem(void) #endif } -#ifndef mm_cachebits -/* - * Bits to add to page descriptors for "normal" caching mode. - * For 68020/030 this is 0. - * For 68040, this is _PAGE_CACHE040 (cachable, copyback) - */ -unsigned long mm_cachebits = 0; -#endif - -__initfunc(static pte_t * kernel_page_table(unsigned long *memavailp)) -{ - pte_t *ptablep; - - ptablep = (pte_t *)*memavailp; - *memavailp += PAGE_SIZE; - - clear_page((unsigned long)ptablep); - flush_page_to_ram((unsigned long) ptablep); - flush_tlb_kernel_page((unsigned long) ptablep); - nocache_page ((unsigned long)ptablep); - - return ptablep; -} - -static pmd_t *last_pgtable __initdata = NULL; - -__initfunc(static pmd_t * kernel_ptr_table(unsigned long *memavailp)) -{ - if (!last_pgtable) { - unsigned long pmd, last; - int i; - - /* Find the last ptr table that was used in head.S and - * reuse the remaining space in that page for further - * ptr tables. - */ - last = (unsigned long)kernel_pg_dir; - for (i = 0; i < PTRS_PER_PGD; i++) { - if (!pgd_present(kernel_pg_dir[i])) - continue; - pmd = pgd_page(kernel_pg_dir[i]); - if (pmd > last) - last = pmd; - } - - last_pgtable = (pmd_t *)last; -#ifdef DEBUG - printk("kernel_ptr_init: %p\n", last_pgtable); -#endif - } - - if (((unsigned long)(last_pgtable + PTRS_PER_PMD) & ~PAGE_MASK) == 0) { - last_pgtable = (pmd_t *)*memavailp; - *memavailp += PAGE_SIZE; - - clear_page((unsigned long)last_pgtable); - flush_page_to_ram((unsigned long)last_pgtable); - flush_tlb_kernel_page((unsigned long)last_pgtable); - nocache_page((unsigned long)last_pgtable); - } else - last_pgtable += PTRS_PER_PMD; - - return last_pgtable; -} - -__initfunc(static unsigned long -map_chunk (unsigned long addr, long size, unsigned long *memavailp)) -{ -#define PTRTREESIZE (256*1024) -#define ROOTTREESIZE (32*1024*1024) - static unsigned long virtaddr = 0; - unsigned long physaddr; - pgd_t *pgd_dir; - pmd_t *pmd_dir; - pte_t *pte_dir; - - physaddr = (addr | m68k_supervisor_cachemode | - _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY); - if (CPU_IS_040_OR_060) - physaddr |= _PAGE_GLOBAL040; - - while (size > 0) { -#ifdef DEBUG - if (!(virtaddr & (PTRTREESIZE-1))) - printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK, - virtaddr); -#endif - pgd_dir = pgd_offset_k(virtaddr); - if (virtaddr && CPU_IS_020_OR_030) { - if (!(virtaddr & (ROOTTREESIZE-1)) && - size >= ROOTTREESIZE) { -#ifdef DEBUG - printk ("[very early term]"); -#endif - pgd_val(*pgd_dir) = physaddr; - size -= ROOTTREESIZE; - virtaddr += ROOTTREESIZE; - physaddr += ROOTTREESIZE; - continue; - } - } - if (!pgd_present(*pgd_dir)) { - pmd_dir = kernel_ptr_table(memavailp); -#ifdef DEBUG - printk ("[new pointer %p]", pmd_dir); -#endif - pgd_set(pgd_dir, pmd_dir); - } else - pmd_dir = pmd_offset(pgd_dir, virtaddr); - - if (CPU_IS_020_OR_030) { - if (virtaddr) { -#ifdef DEBUG - printk ("[early term]"); -#endif - pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr; - physaddr += PTRTREESIZE; - } else { - int i; -#ifdef DEBUG - printk ("[zero map]"); -#endif - pte_dir = (pte_t *)kernel_ptr_table(memavailp); - pmd_dir->pmd[0] = virt_to_phys(pte_dir) | - _PAGE_TABLE | _PAGE_ACCESSED; - pte_val(*pte_dir++) = 0; - physaddr += PAGE_SIZE; - for (i = 1; i < 64; physaddr += PAGE_SIZE, i++) - pte_val(*pte_dir++) = physaddr; - } - size -= PTRTREESIZE; - virtaddr += PTRTREESIZE; - } else { - if (!pmd_present(*pmd_dir)) { -#ifdef DEBUG - printk ("[new table]"); -#endif - pte_dir = kernel_page_table(memavailp); - pmd_set(pmd_dir, pte_dir); - } - pte_dir = pte_offset(pmd_dir, virtaddr); - - if (virtaddr) { - if (!pte_present(*pte_dir)) - pte_val(*pte_dir) = physaddr; - } else - pte_val(*pte_dir) = 0; - size -= PAGE_SIZE; - virtaddr += PAGE_SIZE; - physaddr += PAGE_SIZE; - } - - } -#ifdef DEBUG - printk("\n"); -#endif - - return virtaddr; -} - -extern unsigned long free_area_init(unsigned long, unsigned long); extern void init_pointer_table(unsigned long ptable); /* References to section boundaries */ @@ -281,121 +124,9 @@ extern void init_pointer_table(unsigned long ptable); extern char _text, _etext, _edata, __bss_start, _end; extern char __init_begin, __init_end; -/* - * paging_init() continues the virtual memory environment setup which - * was begun by the code in arch/head.S. - */ -__initfunc(unsigned long paging_init(unsigned long start_mem, - unsigned long end_mem)) -{ - int chunk; - unsigned long mem_avail = 0; - -#ifdef DEBUG - { - extern unsigned long availmem; - printk ("start of paging_init (%p, %lx, %lx, %lx)\n", - kernel_pg_dir, availmem, start_mem, end_mem); - } -#endif - - /* Fix the cache mode in the page descriptors for the 680[46]0. */ - if (CPU_IS_040_OR_060) { - int i; -#ifndef mm_cachebits - mm_cachebits = _PAGE_CACHE040; -#endif - for (i = 0; i < 16; i++) - pgprot_val(protection_map[i]) |= _PAGE_CACHE040; - } - /* Fix the PAGE_NONE value. */ - if (CPU_IS_040_OR_060) { - /* On the 680[46]0 we can use the _PAGE_SUPER bit. */ - pgprot_val(protection_map[0]) |= _PAGE_SUPER; - pgprot_val(protection_map[VM_SHARED]) |= _PAGE_SUPER; - } else { - /* Otherwise we must fake it. */ - pgprot_val(protection_map[0]) &= ~_PAGE_PRESENT; - pgprot_val(protection_map[0]) |= _PAGE_FAKE_SUPER; - pgprot_val(protection_map[VM_SHARED]) &= ~_PAGE_PRESENT; - pgprot_val(protection_map[VM_SHARED]) |= _PAGE_FAKE_SUPER; - } - - /* - * Map the physical memory available into the kernel virtual - * address space. It may allocate some memory for page - * tables and thus modify availmem. - */ - - for (chunk = 0; chunk < m68k_num_memory; chunk++) { - mem_avail = map_chunk (m68k_memory[chunk].addr, - m68k_memory[chunk].size, &start_mem); - - } - - flush_tlb_all(); -#ifdef DEBUG - printk ("memory available is %ldKB\n", mem_avail >> 10); - printk ("start_mem is %#lx\nvirtual_end is %#lx\n", - start_mem, end_mem); -#endif - - /* - * initialize the bad page table and bad page to point - * to a couple of allocated pages - */ - empty_bad_page_table = start_mem; - start_mem += PAGE_SIZE; - empty_bad_page = start_mem; - start_mem += PAGE_SIZE; - empty_zero_page = start_mem; - start_mem += PAGE_SIZE; - memset((void *)empty_zero_page, 0, PAGE_SIZE); - - /* - * allocate the "swapper" page directory and - * record in task 0 (swapper) tss - */ - init_mm.pgd = (pgd_t *)kernel_ptr_table(&start_mem); - memset (init_mm.pgd, 0, sizeof(pgd_t)*PTRS_PER_PGD); - - /* setup CPU root pointer for swapper task */ - task[0]->tss.crp[0] = 0x80000000 | _PAGE_TABLE; - task[0]->tss.crp[1] = virt_to_phys(init_mm.pgd); +extern pmd_t *zero_pgtable; -#ifdef DEBUG - printk ("task 0 pagedir at %p virt, %#lx phys\n", - swapper_pg_dir, task[0]->tss.crp[1]); -#endif - - if (CPU_IS_040_OR_060) - asm __volatile__ (".chip 68040\n\t" - "movec %0,%%urp\n\t" - ".chip 68k" - : /* no outputs */ - : "r" (task[0]->tss.crp[1])); - else - asm __volatile__ (".chip 68030\n\t" - "pmove %0,%%crp\n\t" - ".chip 68k" - : /* no outputs */ - : "m" (task[0]->tss.crp[0])); -#ifdef DEBUG - printk ("set crp\n"); -#endif - - /* - * Set up SFC/DFC registers (user data space) - */ - set_fs (USER_DS); - -#ifdef DEBUG - printk ("before free_area_init\n"); -#endif - return PAGE_ALIGN(free_area_init(start_mem, end_mem)); -} - -__initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem)) +void __init mem_init(unsigned long start_mem, unsigned long end_mem) { int codepages = 0; int datapages = 0; @@ -418,31 +149,36 @@ __initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem)) atari_stram_reserve_pages( start_mem ); #endif - for (tmp = 0 ; tmp < end_mem ; tmp += PAGE_SIZE) { +#ifdef CONFIG_SUN3 + /* reserve rom pages */ + mmu_emu_reserve_pages(max_mapnr); +#endif + + for (tmp = PAGE_OFFSET ; tmp < end_mem ; tmp += PAGE_SIZE) { +#ifndef CONFIG_SUN3 if (virt_to_phys ((void *)tmp) >= mach_max_dma_address) clear_bit(PG_DMA, &mem_map[MAP_NR(tmp)].flags); +#endif if (PageReserved(mem_map+MAP_NR(tmp))) { if (tmp >= (unsigned long)&_text - && tmp < (unsigned long)&_edata) { - if (tmp < (unsigned long) &_etext) - codepages++; - else - datapages++; - } else if (tmp >= (unsigned long) &__init_begin - && tmp < (unsigned long) &__init_end) + && tmp < (unsigned long)&_etext) + codepages++; + else if (tmp >= (unsigned long) &__init_begin + && tmp < (unsigned long) &__init_end) initpages++; else datapages++; continue; } - atomic_set(&mem_map[MAP_NR(tmp)].count, 1); + set_page_count(mem_map+MAP_NR(tmp), 1); #ifdef CONFIG_BLK_DEV_INITRD if (!initrd_start || (tmp < (initrd_start & PAGE_MASK) || tmp >= initrd_end)) #endif free_page(tmp); } - + +#ifndef CONFIG_SUN3 /* insert pointer tables allocated so far into the tablelist */ init_pointer_table((unsigned long)kernel_pg_dir); for (i = 0; i < PTRS_PER_PGD; i++) { @@ -450,6 +186,11 @@ __initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem)) init_pointer_table(pgd_page(kernel_pg_dir[i])); } + /* insert also pointer table that we used to unmap the zero page */ + if (zero_pgtable) + init_pointer_table((unsigned long)zero_pgtable); +#endif + printk("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init)\n", (unsigned long) nr_free_pages << (PAGE_SHIFT-10), max_mapnr << (PAGE_SHIFT-10), @@ -458,18 +199,6 @@ __initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem)) initpages << (PAGE_SHIFT-10)); } -void free_initmem(void) -{ - unsigned long addr; - - addr = (unsigned long)&__init_begin; - for (; addr < (unsigned long)&__init_end; addr += PAGE_SIZE) { - mem_map[MAP_NR(addr)].flags &= ~(1 << PG_reserved); - atomic_set(&mem_map[MAP_NR(addr)].count, 1); - free_page(addr); - } -} - void si_meminfo(struct sysinfo *val) { unsigned long i; @@ -483,9 +212,9 @@ void si_meminfo(struct sysinfo *val) if (PageReserved(mem_map+i)) continue; val->totalram++; - if (!atomic_read(&mem_map[i].count)) + if (!page_count(mem_map+i)) continue; - val->sharedram += atomic_read(&mem_map[i].count) - 1; + val->sharedram += page_count(mem_map+i) - 1; } val->totalram <<= PAGE_SHIFT; val->sharedram <<= PAGE_SHIFT; diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c index a6d496571..0bf9691dd 100644 --- a/arch/m68k/mm/memory.c +++ b/arch/m68k/mm/memory.c @@ -98,7 +98,7 @@ static ptable_desc ptable_list = { &ptable_list, &ptable_list }; #define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t)) -__initfunc(void init_pointer_table(unsigned long ptable)) +void __init init_pointer_table(unsigned long ptable) { ptable_desc *dp; unsigned long page = ptable & PAGE_MASK; @@ -227,31 +227,33 @@ static unsigned long transp_transl_matches( unsigned long regval, /* address match? */ base = regval & 0xff000000; mask = ~(regval << 8) & 0xff000000; - return ((vaddr ^ base) & mask) == 0; + return (((unsigned long)vaddr ^ base) & mask) == 0; } +#if DEBUG_INVALID_PTOV +int mm_inv_cnt = 5; +#endif + #ifndef CONFIG_SINGLE_MEMORY_CHUNK /* * The following two routines map from a physical address to a kernel * virtual address and vice versa. */ -unsigned long mm_vtop (unsigned long vaddr) +unsigned long mm_vtop(unsigned long vaddr) { int i=0; - unsigned long voff = vaddr; - unsigned long offset = 0; + unsigned long voff = (unsigned long)vaddr - PAGE_OFFSET; - do{ - if (voff < offset + m68k_memory[i].size) { + do { + if (voff < m68k_memory[i].size) { #ifdef DEBUGPV - printk ("VTOP(%lx)=%lx\n", vaddr, - m68k_memory[i].addr + voff - offset); + printk ("VTOP(%p)=%lx\n", vaddr, + m68k_memory[i].addr + voff); #endif - return m68k_memory[i].addr + voff - offset; - } else - offset += m68k_memory[i].size; - i++; - }while (i < m68k_num_memory); + return m68k_memory[i].addr + voff; + } + voff -= m68k_memory[i].size; + } while (++i < m68k_num_memory); return mm_vtop_fallback(vaddr); } @@ -259,7 +261,7 @@ unsigned long mm_vtop (unsigned long vaddr) /* Separate function to make the common case faster (needs to save less registers) */ -unsigned long mm_vtop_fallback (unsigned long vaddr) +unsigned long mm_vtop_fallback(unsigned long vaddr) { /* not in one of the memory chunks; test for applying transparent * translation */ @@ -272,13 +274,13 @@ unsigned long mm_vtop_fallback (unsigned long vaddr) ".chip 68k" : : "a" (&ttreg) ); if (transp_transl_matches( ttreg, vaddr )) - return vaddr; + return (unsigned long)vaddr; asm volatile( ".chip 68030\n\t" "pmove %/tt1,%0@\n\t" ".chip 68k" : : "a" (&ttreg) ); if (transp_transl_matches( ttreg, vaddr )) - return vaddr; + return (unsigned long)vaddr; } else if (CPU_IS_040_OR_060) { unsigned long ttreg; @@ -288,13 +290,13 @@ unsigned long mm_vtop_fallback (unsigned long vaddr) ".chip 68k" : "=d" (ttreg) ); if (transp_transl_matches( ttreg, vaddr )) - return vaddr; + return (unsigned long)vaddr; asm volatile( ".chip 68040\n\t" "movec %%dtt1,%0\n\t" ".chip 68k" : "=d" (ttreg) ); if (transp_transl_matches( ttreg, vaddr )) - return vaddr; + return (unsigned long)vaddr; } /* no match, too, so get the actual physical address from the MMU. */ @@ -306,11 +308,21 @@ unsigned long mm_vtop_fallback (unsigned long vaddr) set_fs (MAKE_MM_SEG(SUPER_DATA)); /* The PLPAR instruction causes an access error if the translation - * is not possible. We don't catch that here, so a bad kernel trap - * will be reported in this case. */ - asm volatile (".chip 68060\n\t" - "plpar (%0)\n\t" - ".chip 68k" + * is not possible. To catch this we use the same exception mechanism + * as for user space accesses in <asm/uaccess.h>. */ + asm volatile (".chip 68060\n" + "1: plpar (%0)\n" + ".chip 68k\n" + "2:\n" + ".section .fixup,\"ax\"\n" + " .even\n" + "3: lea -1,%0\n" + " jra 2b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + " .align 4\n" + " .long 1b,3b\n" + ".previous" : "=a" (paddr) : "0" (vaddr)); set_fs (fs); @@ -332,12 +344,13 @@ unsigned long mm_vtop_fallback (unsigned long vaddr) set_fs (fs); if (mmusr & MMU_T_040) { - return (vaddr); /* Transparent translation */ + return (unsigned long)vaddr; /* Transparent translation */ } if (mmusr & MMU_R_040) - return (mmusr & PAGE_MASK) | (vaddr & (PAGE_SIZE-1)); + return (mmusr & PAGE_MASK) | ((unsigned long)vaddr & (PAGE_SIZE-1)); - panic ("VTOP040: bad virtual address %08lx (%lx)", vaddr, mmusr); + printk("VTOP040: bad virtual address %lx (%lx)", vaddr, mmusr); + return -1; } else { volatile unsigned short temp; unsigned short mmusr; @@ -350,46 +363,51 @@ unsigned long mm_vtop_fallback (unsigned long vaddr) mmusr = temp; if (mmusr & (MMU_I|MMU_B|MMU_L)) - panic ("VTOP030: bad virtual address %08lx (%x)", vaddr, mmusr); + printk("VTOP030: bad virtual address %lx (%x)\n", vaddr, mmusr); descaddr = phys_to_virt((unsigned long)descaddr); switch (mmusr & MMU_NUM) { case 1: - return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff); + return (*descaddr & 0xfe000000) | ((unsigned long)vaddr & 0x01ffffff); case 2: - return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff); + return (*descaddr & 0xfffc0000) | ((unsigned long)vaddr & 0x0003ffff); case 3: - return (*descaddr & PAGE_MASK) | (vaddr & (PAGE_SIZE-1)); + return (*descaddr & PAGE_MASK) | ((unsigned long)vaddr & (PAGE_SIZE-1)); default: - panic ("VTOP: bad levels (%u) for virtual address %08lx", + printk("VTOP: bad levels (%u) for virtual address %lx\n", mmusr & MMU_NUM, vaddr); } } - panic ("VTOP: bad virtual address %08lx", vaddr); + printk("VTOP: bad virtual address %lx\n", vaddr); + return -1; } #ifndef CONFIG_SINGLE_MEMORY_CHUNK unsigned long mm_ptov (unsigned long paddr) { int i = 0; - unsigned long offset = 0; + unsigned long poff, voff = PAGE_OFFSET; - do{ - if (paddr >= m68k_memory[i].addr && - paddr < (m68k_memory[i].addr - + m68k_memory[i].size)) { + do { + poff = paddr - m68k_memory[i].addr; + if (poff < m68k_memory[i].size) { #ifdef DEBUGPV - printk ("PTOV(%lx)=%lx\n", paddr, - (paddr - m68k_memory[i].addr) + offset); + printk ("PTOV(%lx)=%lx\n", paddr, poff + voff); +#endif + return poff + voff; + } + voff += m68k_memory[i].size; + } while (++i < m68k_num_memory); + +#if DEBUG_INVALID_PTOV + if (mm_inv_cnt > 0) { + mm_inv_cnt--; + printk("Invalid use of phys_to_virt(0x%lx) at 0x%p!\n", + paddr, __builtin_return_address(0)); + } #endif - return (paddr - m68k_memory[i].addr) + offset; - } else - offset += m68k_memory[i].size; - i++; - }while (i < m68k_num_memory); - /* * assume that the kernel virtual address is the same as the * physical address. @@ -413,7 +431,7 @@ unsigned long mm_ptov (unsigned long paddr) if (MACH_IS_AMIGA && paddr < 16*1024*1024) return ZTWO_VADDR(paddr); #endif - return paddr; + return -1; } #endif @@ -473,14 +491,7 @@ unsigned long mm_ptov (unsigned long paddr) * this?). So we have to push first and then additionally to invalidate. */ -#ifdef CONFIG_M68K_L2_CACHE -/* - * Jes was worried about performance (urhh ???) so its optional - */ - -void (*mach_l2_flush)(int) = NULL; -#endif - + /* * cache_clear() semantics: Clear any cache entries for the area in question, * without writing back dirty entries first. This is useful if the data will diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c new file mode 100644 index 000000000..7b463b895 --- /dev/null +++ b/arch/m68k/mm/motorola.c @@ -0,0 +1,299 @@ +/* + * linux/arch/m68k/motorola.c + * + * Routines specific to the Motorola MMU, originally from: + * linux/arch/m68k/init.c + * which are Copyright (C) 1995 Hamish Macdonald + * + * Moved 8/20/1999 Sam Creasey + */ + +#include <linux/config.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/swap.h> +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/init.h> +#ifdef CONFIG_BLK_DEV_RAM +#include <linux/blk.h> +#endif + +#include <asm/setup.h> +#include <asm/uaccess.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/system.h> +#include <asm/machdep.h> +#include <asm/io.h> +#ifdef CONFIG_ATARI +#include <asm/atari_stram.h> +#endif + +#undef DEBUG + +#ifndef mm_cachebits +/* + * Bits to add to page descriptors for "normal" caching mode. + * For 68020/030 this is 0. + * For 68040, this is _PAGE_CACHE040 (cachable, copyback) + */ +unsigned long mm_cachebits = 0; +#endif + +static pte_t * __init kernel_page_table(unsigned long *memavailp) +{ + pte_t *ptablep; + + ptablep = (pte_t *)*memavailp; + *memavailp += PAGE_SIZE; + + clear_page((unsigned long)ptablep); + flush_page_to_ram((unsigned long) ptablep); + flush_tlb_kernel_page((unsigned long) ptablep); + nocache_page ((unsigned long)ptablep); + + return ptablep; +} + +static pmd_t *last_pgtable __initdata = NULL; +pmd_t *zero_pgtable __initdata = NULL; + +static pmd_t * __init kernel_ptr_table(unsigned long *memavailp) +{ + if (!last_pgtable) { + unsigned long pmd, last; + int i; + + /* Find the last ptr table that was used in head.S and + * reuse the remaining space in that page for further + * ptr tables. + */ + last = (unsigned long)kernel_pg_dir; + for (i = 0; i < PTRS_PER_PGD; i++) { + if (!pgd_present(kernel_pg_dir[i])) + continue; + pmd = pgd_page(kernel_pg_dir[i]); + if (pmd > last) + last = pmd; + } + + last_pgtable = (pmd_t *)last; +#ifdef DEBUG + printk("kernel_ptr_init: %p\n", last_pgtable); +#endif + } + + if (((unsigned long)(last_pgtable + PTRS_PER_PMD) & ~PAGE_MASK) == 0) { + last_pgtable = (pmd_t *)*memavailp; + *memavailp += PAGE_SIZE; + + clear_page((unsigned long)last_pgtable); + flush_page_to_ram((unsigned long)last_pgtable); + flush_tlb_kernel_page((unsigned long)last_pgtable); + nocache_page((unsigned long)last_pgtable); + } else + last_pgtable += PTRS_PER_PMD; + + return last_pgtable; +} + +static unsigned long __init +map_chunk (unsigned long addr, long size, unsigned long *memavailp) +{ +#define PTRTREESIZE (256*1024) +#define ROOTTREESIZE (32*1024*1024) + static unsigned long virtaddr = PAGE_OFFSET; + unsigned long physaddr; + pgd_t *pgd_dir; + pmd_t *pmd_dir; + pte_t *pte_dir; + + physaddr = (addr | m68k_supervisor_cachemode | + _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY); + if (CPU_IS_040_OR_060) + physaddr |= _PAGE_GLOBAL040; + + while (size > 0) { +#ifdef DEBUG + if (!(virtaddr & (PTRTREESIZE-1))) + printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK, + virtaddr); +#endif + pgd_dir = pgd_offset_k(virtaddr); + if (virtaddr && CPU_IS_020_OR_030) { + if (!(virtaddr & (ROOTTREESIZE-1)) && + size >= ROOTTREESIZE) { +#ifdef DEBUG + printk ("[very early term]"); +#endif + pgd_val(*pgd_dir) = physaddr; + size -= ROOTTREESIZE; + virtaddr += ROOTTREESIZE; + physaddr += ROOTTREESIZE; + continue; + } + } + if (!pgd_present(*pgd_dir)) { + pmd_dir = kernel_ptr_table(memavailp); +#ifdef DEBUG + printk ("[new pointer %p]", pmd_dir); +#endif + pgd_set(pgd_dir, pmd_dir); + } else + pmd_dir = pmd_offset(pgd_dir, virtaddr); + + if (CPU_IS_020_OR_030) { + if (virtaddr) { +#ifdef DEBUG + printk ("[early term]"); +#endif + pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr; + physaddr += PTRTREESIZE; + } else { + int i; +#ifdef DEBUG + printk ("[zero map]"); +#endif + zero_pgtable = kernel_ptr_table(memavailp); + pte_dir = (pte_t *)zero_pgtable; + pmd_dir->pmd[0] = virt_to_phys(pte_dir) | + _PAGE_TABLE | _PAGE_ACCESSED; + pte_val(*pte_dir++) = 0; + physaddr += PAGE_SIZE; + for (i = 1; i < 64; physaddr += PAGE_SIZE, i++) + pte_val(*pte_dir++) = physaddr; + } + size -= PTRTREESIZE; + virtaddr += PTRTREESIZE; + } else { + if (!pmd_present(*pmd_dir)) { +#ifdef DEBUG + printk ("[new table]"); +#endif + pte_dir = kernel_page_table(memavailp); + pmd_set(pmd_dir, pte_dir); + } + pte_dir = pte_offset(pmd_dir, virtaddr); + + if (virtaddr) { + if (!pte_present(*pte_dir)) + pte_val(*pte_dir) = physaddr; + } else + pte_val(*pte_dir) = 0; + size -= PAGE_SIZE; + virtaddr += PAGE_SIZE; + physaddr += PAGE_SIZE; + } + + } +#ifdef DEBUG + printk("\n"); +#endif + + return virtaddr; +} + +extern unsigned long free_area_init(unsigned long, unsigned long); +extern unsigned long empty_bad_page_table; +extern unsigned long empty_bad_page; + +/* + * paging_init() continues the virtual memory environment setup which + * was begun by the code in arch/head.S. + */ +unsigned long __init paging_init(unsigned long start_mem, + unsigned long end_mem) +{ + int chunk; + unsigned long mem_avail = 0; + +#ifdef DEBUG + { + extern unsigned long availmem; + printk ("start of paging_init (%p, %lx, %lx, %lx)\n", + kernel_pg_dir, availmem, start_mem, end_mem); + } +#endif + + /* Fix the cache mode in the page descriptors for the 680[46]0. */ + if (CPU_IS_040_OR_060) { + int i; +#ifndef mm_cachebits + mm_cachebits = _PAGE_CACHE040; +#endif + for (i = 0; i < 16; i++) + pgprot_val(protection_map[i]) |= _PAGE_CACHE040; + } + /* Fix the PAGE_NONE value. */ + if (CPU_IS_040_OR_060) { + /* On the 680[46]0 we can use the _PAGE_SUPER bit. */ + pgprot_val(protection_map[0]) |= _PAGE_SUPER; + pgprot_val(protection_map[VM_SHARED]) |= _PAGE_SUPER; + } else { + /* Otherwise we must fake it. */ + pgprot_val(protection_map[0]) &= ~_PAGE_PRESENT; + pgprot_val(protection_map[0]) |= _PAGE_FAKE_SUPER; + pgprot_val(protection_map[VM_SHARED]) &= ~_PAGE_PRESENT; + pgprot_val(protection_map[VM_SHARED]) |= _PAGE_FAKE_SUPER; + } + + /* + * Map the physical memory available into the kernel virtual + * address space. It may allocate some memory for page + * tables and thus modify availmem. + */ + + for (chunk = 0; chunk < m68k_num_memory; chunk++) { + mem_avail = map_chunk (m68k_memory[chunk].addr, + m68k_memory[chunk].size, &start_mem); + + } + + flush_tlb_all(); +#ifdef DEBUG + printk ("memory available is %ldKB\n", mem_avail >> 10); + printk ("start_mem is %#lx\nvirtual_end is %#lx\n", + start_mem, end_mem); +#endif + + /* + * initialize the bad page table and bad page to point + * to a couple of allocated pages + */ + empty_bad_page_table = start_mem; + start_mem += PAGE_SIZE; + empty_bad_page = start_mem; + start_mem += PAGE_SIZE; + empty_zero_page = start_mem; + start_mem += PAGE_SIZE; + memset((void *)empty_zero_page, 0, PAGE_SIZE); + + /* + * Set up SFC/DFC registers (user data space) + */ + set_fs (USER_DS); + +#ifdef DEBUG + printk ("before free_area_init\n"); +#endif + return PAGE_ALIGN(free_area_init(start_mem, end_mem)); +} + +extern char __init_begin, __init_end; + +void free_initmem(void) +{ + unsigned long addr; + + addr = (unsigned long)&__init_begin; + for (; addr < (unsigned long)&__init_end; addr += PAGE_SIZE) { + mem_map[MAP_NR(addr)].flags &= ~(1 << PG_reserved); + set_page_count(mem_map+MAP_NR(addr), 1); + free_page(addr); + } +} + + diff --git a/arch/m68k/mm/sun3mmu.c b/arch/m68k/mm/sun3mmu.c new file mode 100644 index 000000000..0d3277de0 --- /dev/null +++ b/arch/m68k/mm/sun3mmu.c @@ -0,0 +1,98 @@ +/* + * linux/arch/m68k/mm/sun3mmu.c + * + * Implementations of mm routines specific to the sun3 MMU. + * + * Moved here 8/20/1999 Sam Creasey + * + */ + +#include <linux/config.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/swap.h> +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/init.h> +#ifdef CONFIG_BLK_DEV_RAM +#include <linux/blk.h> +#endif + +#include <asm/setup.h> +#include <asm/uaccess.h> +#include <asm/page.h> +#include <asm/pgtable.h> +#include <asm/system.h> +#include <asm/machdep.h> +#include <asm/io.h> + +extern void mmu_emu_init (void); + +extern unsigned long free_area_init(unsigned long, unsigned long); + +const char bad_pmd_string[] = "Bad pmd in pte_alloc: %08lx\n"; + +extern unsigned long empty_bad_page_table; +extern unsigned long empty_bad_page; + +void free_initmem(void) +{ +} +/* For the sun3 we try to follow the i386 paging_init() more closely */ +/* start_mem and end_mem have PAGE_OFFSET added already */ +/* now sets up tables using sun3 PTEs rather than i386 as before. --m */ +unsigned long __init paging_init(unsigned long start_mem, + unsigned long end_mem) +{ + pgd_t * pg_dir; + pte_t * pg_table; + int i; + unsigned long address; + +#ifdef TEST_VERIFY_AREA + wp_works_ok = 0; +#endif + start_mem = PAGE_ALIGN(start_mem); + empty_bad_page_table = start_mem; + start_mem += PAGE_SIZE; + empty_bad_page = start_mem; + start_mem += PAGE_SIZE; + empty_zero_page = start_mem; + start_mem += PAGE_SIZE; + memset((void *)empty_zero_page, 0, PAGE_SIZE); + + address = PAGE_OFFSET; + pg_dir = swapper_pg_dir; + memset (swapper_pg_dir, 0, sizeof (swapper_pg_dir)); + memset (kernel_pg_dir, 0, sizeof (kernel_pg_dir)); + + /* Map whole memory from PAGE_OFFSET (0x0E000000) */ + pg_dir += PAGE_OFFSET >> PGDIR_SHIFT; + + while (address < end_mem) { + pg_table = (pte_t *) __pa (start_mem); + start_mem += PTRS_PER_PTE * sizeof (pte_t); + pgd_val(*pg_dir) = (unsigned long) pg_table; + pg_dir++; + + /* now change pg_table to kernel virtual addresses */ + pg_table = (pte_t *) __va ((unsigned long) pg_table); + for (i=0; i<PTRS_PER_PTE; ++i, ++pg_table) { + pte_t pte = mk_pte (address, PAGE_INIT); + if (address >= end_mem) + pte_val (pte) = 0; + set_pte (pg_table, pte); + address += PAGE_SIZE; + } + } + + mmu_emu_init(); + + current->mm = NULL; + + return PAGE_ALIGN(free_area_init(start_mem, end_mem)); +} + + |