summaryrefslogtreecommitdiffstats
path: root/arch/m68k/mm
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1999-02-15 02:15:32 +0000
committerRalf Baechle <ralf@linux-mips.org>1999-02-15 02:15:32 +0000
commit86464aed71025541805e7b1515541aee89879e33 (patch)
treee01a457a4912a8553bc65524aa3125d51f29f810 /arch/m68k/mm
parent88f99939ecc6a95a79614574cb7d95ffccfc3466 (diff)
Merge with Linux 2.2.1.
Diffstat (limited to 'arch/m68k/mm')
-rw-r--r--arch/m68k/mm/fault.c47
-rw-r--r--arch/m68k/mm/init.c287
-rw-r--r--arch/m68k/mm/kmap.c696
-rw-r--r--arch/m68k/mm/memory.c136
4 files changed, 435 insertions, 731 deletions
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index 62129782b..ef1b855bd 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -8,6 +8,7 @@
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
+#include <linux/interrupt.h>
#include <asm/setup.h>
#include <asm/traps.h>
@@ -32,8 +33,7 @@ extern const int frame_extra_sizes[]; /* in m68k/kernel/signal.c */
asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code)
{
- struct task_struct *tsk = current;
- struct mm_struct *mm = tsk->mm;
+ struct mm_struct *mm = current->mm;
struct vm_area_struct * vma;
unsigned long fixup;
int write;
@@ -41,9 +41,17 @@ asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
#ifdef DEBUG
printk ("regs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld, %p\n",
regs->sr, regs->pc, address, error_code,
- tsk->mm->pgd);
+ current->mm->pgd);
#endif
+
+ /*
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+ if (in_interrupt() || mm == &init_mm)
+ goto no_context;
+
down(&mm->mmap_sem);
vma = find_vma(mm, address);
@@ -86,7 +94,14 @@ good_area:
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
}
- handle_mm_fault(current, vma, address, write);
+
+ /*
+ * If for any reason at all we couldn't handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+ if (!handle_mm_fault(current, vma, address, write))
+ goto do_sigbus;
/* There seems to be a missing invalidate somewhere in do_no_page.
* Until I found it, this one cures the problem and makes
@@ -106,10 +121,15 @@ bad_area:
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
- force_sig (SIGSEGV, tsk);
+ siginfo_t info;
+ info.si_signo = SIGSEGV;
+ info.si_code = SEGV_MAPERR;
+ info.si_addr = (void *)address;
+ force_sig_info(SIGSEGV, &info, current);
return 1;
}
+no_context:
/* Are we prepared to handle this kernel fault? */
if ((fixup = search_exception_table(regs->pc)) != 0) {
struct pt_regs *tregs;
@@ -136,5 +156,22 @@ bad_area:
die_if_kernel("Oops", regs, error_code);
do_exit(SIGKILL);
+/*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+do_sigbus:
+ up(&mm->mmap_sem);
+
+ /*
+ * Send a sigbus, regardless of whether we were in kernel
+ * or user mode.
+ */
+ force_sig(SIGBUS, current);
+
+ /* Kernel mode? Handle exceptions or die */
+ if (!user_mode(regs))
+ goto no_context;
+
return 1;
}
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 395fb41b6..8e520702f 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -28,8 +28,9 @@
#include <asm/atari_stram.h>
#endif
+#undef DEBUG
+
extern void die_if_kernel(char *,struct pt_regs *,long);
-extern void init_kpointer_table(void);
extern void show_net_buffers(void);
int do_check_pgt_cache(int low, int high)
@@ -122,17 +123,14 @@ void show_mem(void)
unsigned long mm_cachebits = 0;
#endif
-pte_t *kernel_page_table (unsigned long *memavailp)
+static pte_t *__init kernel_page_table(unsigned long *memavailp)
{
pte_t *ptablep;
- if (memavailp) {
- ptablep = (pte_t *)*memavailp;
- *memavailp += PAGE_SIZE;
- }
- else
- ptablep = (pte_t *)__get_free_page(GFP_KERNEL);
+ ptablep = (pte_t *)*memavailp;
+ *memavailp += PAGE_SIZE;
+ clear_page((unsigned long)ptablep);
flush_page_to_ram((unsigned long) ptablep);
flush_tlb_kernel_page((unsigned long) ptablep);
nocache_page ((unsigned long)ptablep);
@@ -140,199 +138,164 @@ pte_t *kernel_page_table (unsigned long *memavailp)
return ptablep;
}
-__initfunc(static unsigned long
-map_chunk (unsigned long addr, unsigned long size, unsigned long *memavailp))
-{
-#define ONEMEG (1024*1024)
-#define L3TREESIZE (256*1024)
+static pmd_t *last_pgtable __initdata = NULL;
- static unsigned long mem_mapped = 0;
- static unsigned long virtaddr = 0;
- static pte_t *ktablep = NULL;
- unsigned long *kpointerp;
- unsigned long physaddr;
- extern pte_t *kpt;
- int pindex; /* index into pointer table */
- pgd_t *page_dir = pgd_offset_k (virtaddr);
-
- if (!pgd_present (*page_dir)) {
- /* we need a new pointer table */
- kpointerp = (unsigned long *) get_kpointer_table ();
- pgd_set (page_dir, (pmd_t *) kpointerp);
- memset (kpointerp, 0, PTRS_PER_PMD * sizeof (pmd_t));
- }
- else
- kpointerp = (unsigned long *) pgd_page (*page_dir);
+static pmd_t *__init kernel_ptr_table(unsigned long *memavailp)
+{
+ if (!last_pgtable) {
+ unsigned long pmd, last;
+ int i;
- /*
- * pindex is the offset into the pointer table for the
- * descriptors for the current virtual address being mapped.
- */
- pindex = (virtaddr >> 18) & 0x7f;
+ last = (unsigned long)kernel_pg_dir;
+ for (i = 0; i < PTRS_PER_PGD; i++) {
+ if (!pgd_val(kernel_pg_dir[i]))
+ continue;
+ pmd = pgd_page(kernel_pg_dir[i]);
+ if (pmd > last)
+ last = pmd;
+ }
+ last_pgtable = (pmd_t *)last;
#ifdef DEBUG
- printk ("mm=%ld, kernel_pg_dir=%p, kpointerp=%p, pindex=%d\n",
- mem_mapped, kernel_pg_dir, kpointerp, pindex);
+ printk("kernel_ptr_init: %p\n", last_pgtable);
#endif
+ }
- /*
- * if this is running on an '040, we already allocated a page
- * table for the first 4M. The address is stored in kpt by
- * arch/head.S
- *
- */
- if (CPU_IS_040_OR_060 && mem_mapped == 0)
- ktablep = kpt;
-
- for (physaddr = addr;
- physaddr < addr + size;
- mem_mapped += L3TREESIZE, virtaddr += L3TREESIZE) {
-
-#ifdef DEBUG
- printk ("pa=%#lx va=%#lx ", physaddr, virtaddr);
-#endif
+ if (((unsigned long)(last_pgtable + PTRS_PER_PMD) & ~PAGE_MASK) == 0) {
+ last_pgtable = (pmd_t *)*memavailp;
+ *memavailp += PAGE_SIZE;
- if (pindex > 127 && mem_mapped >= 32*ONEMEG) {
- /* we need a new pointer table every 32M */
-#ifdef DEBUG
- printk ("[new pointer]");
-#endif
+ clear_page((unsigned long)last_pgtable);
+ flush_page_to_ram((unsigned long)last_pgtable);
+ flush_tlb_kernel_page((unsigned long)last_pgtable);
+ nocache_page((unsigned long)last_pgtable);
+ } else
+ last_pgtable += PTRS_PER_PMD;
- kpointerp = (unsigned long *)get_kpointer_table ();
- pgd_set(pgd_offset_k(virtaddr), (pmd_t *)kpointerp);
- pindex = 0;
- }
+ return last_pgtable;
+}
- if (CPU_IS_040_OR_060) {
- int i;
- unsigned long ktable;
+static unsigned long __init
+map_chunk (unsigned long addr, long size, unsigned long *memavailp)
+{
+#define PTRTREESIZE (256*1024)
+#define ROOTTREESIZE (32*1024*1024)
+ static unsigned long virtaddr = 0;
+ unsigned long physaddr;
+ pgd_t *pgd_dir;
+ pmd_t *pmd_dir;
+ pte_t *pte_dir;
- /* Don't map the first 4 MB again. The pagetables
- * for this range have already been initialized
- * in boot/head.S. Otherwise the pages used for
- * tables would be reinitialized to copyback mode.
- */
+ physaddr = (addr | m68k_supervisor_cachemode |
+ _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
+ if (CPU_IS_040_OR_060)
+ physaddr |= _PAGE_GLOBAL040;
- if (mem_mapped < 4 * ONEMEG)
- {
+ while (size > 0) {
+#ifdef DEBUG
+ if (!(virtaddr & (PTRTREESIZE-1)))
+ printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK,
+ virtaddr);
+#endif
+ pgd_dir = pgd_offset_k(virtaddr);
+ if (virtaddr && CPU_IS_020_OR_030) {
+ if (!(virtaddr & (ROOTTREESIZE-1)) &&
+ size >= ROOTTREESIZE) {
#ifdef DEBUG
- printk ("Already initialized\n");
+ printk ("[very early term]");
#endif
- physaddr += L3TREESIZE;
- pindex++;
+ pgd_val(*pgd_dir) = physaddr;
+ size -= ROOTTREESIZE;
+ virtaddr += ROOTTREESIZE;
+ physaddr += ROOTTREESIZE;
continue;
}
+ }
+ if (!pgd_present(*pgd_dir)) {
+ pmd_dir = kernel_ptr_table(memavailp);
#ifdef DEBUG
- printk ("[setup table]");
+ printk ("[new pointer %p]", pmd_dir);
#endif
+ pgd_set(pgd_dir, pmd_dir);
+ } else
+ pmd_dir = pmd_offset(pgd_dir, virtaddr);
- /*
- * 68040, use page tables pointed to by the
- * kernel pointer table.
- */
-
- if ((pindex & 15) == 0) {
- /* Need new page table every 4M on the '040 */
+ if (CPU_IS_020_OR_030) {
+ if (virtaddr) {
#ifdef DEBUG
- printk ("[new table]");
+ printk ("[early term]");
#endif
- ktablep = kernel_page_table (memavailp);
- }
-
- ktable = virt_to_phys(ktablep);
-
- /*
- * initialize section of the page table mapping
- * this 256K portion.
- */
- for (i = 0; i < 64; i++) {
- pte_val(ktablep[i]) = physaddr | _PAGE_PRESENT
- | m68k_supervisor_cachemode | _PAGE_GLOBAL040
- | _PAGE_ACCESSED;
+ pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
+ physaddr += PTRTREESIZE;
+ } else {
+ int i;
+#ifdef DEBUG
+ printk ("[zero map]");
+#endif
+ pte_dir = (pte_t *)kernel_ptr_table(memavailp);
+ pmd_dir->pmd[0] = virt_to_phys(pte_dir) |
+ _PAGE_TABLE | _PAGE_ACCESSED;
+ pte_val(*pte_dir++) = 0;
physaddr += PAGE_SIZE;
+ for (i = 1; i < 64; physaddr += PAGE_SIZE, i++)
+ pte_val(*pte_dir++) = physaddr;
}
- ktablep += 64;
-
- /*
- * make the kernel pointer table point to the
- * kernel page table. Each entries point to a
- * 64 entry section of the page table.
- */
-
- kpointerp[pindex++] = ktable | _PAGE_TABLE | _PAGE_ACCESSED;
+ size -= PTRTREESIZE;
+ virtaddr += PTRTREESIZE;
} else {
- /*
- * 68030, use early termination page descriptors.
- * Each one points to 64 pages (256K).
- */
-#ifdef DEBUG
- printk ("[early term] ");
-#endif
- if (virtaddr == 0UL) {
- /* map the first 256K using a 64 entry
- * 3rd level page table.
- * UNMAP the first entry to trap
- * zero page (NULL pointer) references
- */
- int i;
- unsigned long *tbl;
-
- tbl = (unsigned long *)get_kpointer_table();
-
- kpointerp[pindex++] = virt_to_phys(tbl) | _PAGE_TABLE |_PAGE_ACCESSED;
-
- for (i = 0; i < 64; i++, physaddr += PAGE_SIZE)
- tbl[i] = physaddr | _PAGE_PRESENT | _PAGE_ACCESSED;
-
- /* unmap the zero page */
- tbl[0] = 0;
- } else {
- /* not the first 256K */
- kpointerp[pindex++] = physaddr | _PAGE_PRESENT | _PAGE_ACCESSED;
+ if (!pmd_present(*pmd_dir)) {
#ifdef DEBUG
- printk ("%lx=%lx ", virt_to_phys(&kpointerp[pindex-1]),
- kpointerp[pindex-1]);
+ printk ("[new table]");
#endif
- physaddr += 64 * PAGE_SIZE;
+ pte_dir = kernel_page_table(memavailp);
+ pmd_set(pmd_dir, pte_dir);
}
+ pte_dir = pte_offset(pmd_dir, virtaddr);
+
+ if (virtaddr) {
+ if (!pte_present(*pte_dir))
+ pte_val(*pte_dir) = physaddr;
+ } else
+ pte_val(*pte_dir) = 0;
+ size -= PAGE_SIZE;
+ virtaddr += PAGE_SIZE;
+ physaddr += PAGE_SIZE;
}
+
+ }
#ifdef DEBUG
- printk ("\n");
+ printk("\n");
#endif
- }
- return mem_mapped;
+ return virtaddr;
}
extern unsigned long free_area_init(unsigned long, unsigned long);
+extern void init_pointer_table(unsigned long ptable);
/* References to section boundaries */
extern char _text, _etext, _edata, __bss_start, _end;
extern char __init_begin, __init_end;
-extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
-
/*
* paging_init() continues the virtual memory environment setup which
* was begun by the code in arch/head.S.
*/
-__initfunc(unsigned long paging_init(unsigned long start_mem,
- unsigned long end_mem))
+unsigned long __init paging_init(unsigned long start_mem,
+ unsigned long end_mem)
{
int chunk;
unsigned long mem_avail = 0;
#ifdef DEBUG
{
- extern pte_t *kpt;
- printk ("start of paging_init (%p, %p, %lx, %lx, %lx)\n",
- kernel_pg_dir, kpt, availmem, start_mem, end_mem);
+ extern unsigned long availmem;
+ printk ("start of paging_init (%p, %lx, %lx, %lx)\n",
+ kernel_pg_dir, availmem, start_mem, end_mem);
}
#endif
- init_kpointer_table();
-
/* Fix the cache mode in the page descriptors for the 680[46]0. */
if (CPU_IS_040_OR_060) {
int i;
@@ -366,6 +329,7 @@ __initfunc(unsigned long paging_init(unsigned long start_mem,
m68k_memory[chunk].size, &start_mem);
}
+
flush_tlb_all();
#ifdef DEBUG
printk ("memory available is %ldKB\n", mem_avail >> 10);
@@ -385,21 +349,16 @@ __initfunc(unsigned long paging_init(unsigned long start_mem,
start_mem += PAGE_SIZE;
memset((void *)empty_zero_page, 0, PAGE_SIZE);
-#if 0
/*
* allocate the "swapper" page directory and
* record in task 0 (swapper) tss
*/
- swapper_pg_dir = (pgd_t *)get_kpointer_table();
-
- init_mm.pgd = swapper_pg_dir;
-#endif
-
- memset (swapper_pg_dir, 0, sizeof(pgd_t)*PTRS_PER_PGD);
+ init_mm.pgd = (pgd_t *)kernel_ptr_table(&start_mem);
+ memset (init_mm.pgd, 0, sizeof(pgd_t)*PTRS_PER_PGD);
/* setup CPU root pointer for swapper task */
task[0]->tss.crp[0] = 0x80000000 | _PAGE_TABLE;
- task[0]->tss.crp[1] = virt_to_phys (swapper_pg_dir);
+ task[0]->tss.crp[1] = virt_to_phys(init_mm.pgd);
#ifdef DEBUG
printk ("task 0 pagedir at %p virt, %#lx phys\n",
@@ -430,16 +389,16 @@ __initfunc(unsigned long paging_init(unsigned long start_mem,
#ifdef DEBUG
printk ("before free_area_init\n");
#endif
-
- return PAGE_ALIGN(free_area_init (start_mem, end_mem));
+ return PAGE_ALIGN(free_area_init(start_mem, end_mem));
}
-__initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem))
+void __init mem_init(unsigned long start_mem, unsigned long end_mem)
{
int codepages = 0;
int datapages = 0;
int initpages = 0;
unsigned long tmp;
+ int i;
end_mem &= PAGE_MASK;
high_memory = (void *) end_mem;
@@ -480,6 +439,14 @@ __initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem))
#endif
free_page(tmp);
}
+
+ /* insert pointer tables allocated so far into the tablelist */
+ init_pointer_table((unsigned long)kernel_pg_dir);
+ for (i = 0; i < PTRS_PER_PGD; i++) {
+ if (pgd_val(kernel_pg_dir[i]))
+ init_pointer_table(pgd_page(kernel_pg_dir[i]));
+ }
+
printk("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init)\n",
(unsigned long) nr_free_pages << (PAGE_SHIFT-10),
max_mapnr << (PAGE_SHIFT-10),
diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c
index 802771ab4..d2cd29011 100644
--- a/arch/m68k/mm/kmap.c
+++ b/arch/m68k/mm/kmap.c
@@ -2,6 +2,9 @@
* linux/arch/m68k/mm/kmap.c
*
* Copyright (C) 1997 Roman Hodek
+ *
+ * 10/01/99 cleaned up the code and changing to the same interface
+ * used by other architectures /Roman Zippel
*/
#include <linux/mm.h>
@@ -9,250 +12,88 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/malloc.h>
+#include <linux/vmalloc.h>
#include <asm/setup.h>
#include <asm/segment.h>
#include <asm/page.h>
#include <asm/pgtable.h>
+#include <asm/io.h>
#include <asm/system.h>
+#undef DEBUG
-extern pte_t *kernel_page_table (unsigned long *memavailp);
-
-/* Granularity of kernel_map() allocations */
-#define KMAP_STEP (256*1024)
-
-/* Size of pool of KMAP structures; that is needed, because kernel_map() can
- * be called at times where kmalloc() isn't initialized yet. */
-#define KMAP_POOL_SIZE 16
-
-/* structure for maintainance of kmap regions */
-typedef struct kmap {
- struct kmap *next, *prev; /* linking of list */
- unsigned long addr; /* start address of region */
- unsigned long mapaddr; /* address returned to user */
- unsigned long size; /* size of region */
- unsigned free : 1; /* flag whether free or allocated */
- unsigned kmalloced : 1; /* flag whether got this from kmalloc() */
- unsigned pool_alloc : 1; /* flag whether got this is alloced in pool */
-} KMAP;
-
-KMAP kmap_pool[KMAP_POOL_SIZE] = {
- { NULL, NULL, KMAP_START, KMAP_START, KMAP_END-KMAP_START, 1, 0, 1 },
- { NULL, NULL, 0, 0, 0, 0, 0, 0 },
-};
+#define PTRTREESIZE (256*1024)
/*
- * anchor of kmap region list
- *
- * The list is always ordered by addresses, and regions are always adjacent,
- * i.e. there must be no holes between them!
+ * For 040/060 we can use the virtual memory area like other architectures,
+ * but for 020/030 we want to use early termination page descriptor and we
+ * can't mix this with normal page descriptors, so we have to copy that code
+ * (mm/vmalloc.c) and return appriorate aligned addresses.
*/
-KMAP *kmap_regions = &kmap_pool[0];
-
-/* for protecting the kmap_regions list against races */
-static struct semaphore kmap_sem = MUTEX;
+#ifdef CPU_M68040_OR_M68060_ONLY
+#define IO_SIZE PAGE_SIZE
-/*
- * Low-level allocation and freeing of KMAP structures
- */
-static KMAP *alloc_kmap( int use_kmalloc )
+static inline struct vm_struct *get_io_area(unsigned long size)
{
- KMAP *p;
- int i;
-
- /* first try to get from the pool if possible */
- for( i = 0; i < KMAP_POOL_SIZE; ++i ) {
- if (!kmap_pool[i].pool_alloc) {
- kmap_pool[i].kmalloced = 0;
- kmap_pool[i].pool_alloc = 1;
- return( &kmap_pool[i] );
- }
- }
-
- if (use_kmalloc && (p = (KMAP *)kmalloc( sizeof(KMAP), GFP_KERNEL ))) {
- p->kmalloced = 1;
- return( p );
- }
-
- return( NULL );
-}
-
-static void free_kmap( KMAP *p )
-{
- if (p->kmalloced)
- kfree( p );
- else
- p->pool_alloc = 0;
+ return get_vm_area(size);
}
-/*
- * Get a free region from the kmap address range
- */
-static KMAP *kmap_get_region( unsigned long size, int use_kmalloc )
+static inline void free_io_area(void *addr)
{
- KMAP *p, *q;
-
- /* look for a suitable free region */
- for( p = kmap_regions; p; p = p->next )
- if (p->free && p->size >= size)
- break;
- if (!p) {
- printk( KERN_ERR "kernel_map: address space for "
- "allocations exhausted\n" );
- return( NULL );
- }
-
- if (p->size > size) {
- /* if free region is bigger than we need, split off the rear free part
- * into a new region */
- if (!(q = alloc_kmap( use_kmalloc ))) {
- printk( KERN_ERR "kernel_map: out of memory\n" );
- return( NULL );
- }
- q->addr = p->addr + size;
- q->size = p->size - size;
- p->size = size;
- q->free = 1;
-
- q->prev = p;
- q->next = p->next;
- p->next = q;
- if (q->next) q->next->prev = q;
- }
-
- p->free = 0;
- return( p );
+ return vfree((void *)(PAGE_MASK & (unsigned long)addr));
}
+#else
-/*
- * Free a kernel_map region again
- */
-static void kmap_put_region( KMAP *p )
-{
- KMAP *q;
-
- p->free = 1;
+#define IO_SIZE (256*1024)
- /* merge with previous region if possible */
- q = p->prev;
- if (q && q->free) {
- if (q->addr + q->size != p->addr) {
- printk( KERN_ERR "kernel_malloc: allocation list destroyed\n" );
- return;
- }
- q->size += p->size;
- q->next = p->next;
- if (p->next) p->next->prev = q;
- free_kmap( p );
- p = q;
- }
-
- /* merge with following region if possible */
- q = p->next;
- if (q && q->free) {
- if (p->addr + p->size != q->addr) {
- printk( KERN_ERR "kernel_malloc: allocation list destroyed\n" );
- return;
- }
- p->size += q->size;
- p->next = q->next;
- if (q->next) q->next->prev = p;
- free_kmap( q );
- }
-}
+static struct vm_struct *iolist = NULL;
-
-/*
- * kernel_map() helpers
- */
-static inline pte_t *
-pte_alloc_kernel_map(pmd_t *pmd, unsigned long address,
- unsigned long *memavailp)
+static struct vm_struct *get_io_area(unsigned long size)
{
- address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
- if (pmd_none(*pmd)) {
- pte_t *page = kernel_page_table(memavailp);
- if (pmd_none(*pmd)) {
- if (page) {
- pmd_set(pmd, page);
- memset( page, 0, PAGE_SIZE );
- return page + address;
- }
- pmd_set(pmd, BAD_PAGETABLE);
- return NULL;
- }
- if (memavailp)
- panic("kernel_map: slept during init?!?");
- cache_page((unsigned long) page);
- free_page((unsigned long) page);
- }
- if (pmd_bad(*pmd)) {
- printk( KERN_ERR "Bad pmd in pte_alloc_kernel_map: %08lx\n",
- pmd_val(*pmd));
- pmd_set(pmd, BAD_PAGETABLE);
+ unsigned long addr;
+ struct vm_struct **p, *tmp, *area;
+
+ area = (struct vm_struct *)kmalloc(sizeof(*area), GFP_KERNEL);
+ if (!area)
return NULL;
+ addr = KMAP_START;
+ for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
+ if (size + addr < (unsigned long)tmp->addr)
+ break;
+ if (addr > KMAP_END-size)
+ return NULL;
+ addr = tmp->size + (unsigned long)tmp->addr;
}
- return (pte_t *) pmd_page(*pmd) + address;
-}
-
-static inline void
-kernel_map_pte(pte_t *pte, unsigned long address, unsigned long size,
- unsigned long phys_addr, pgprot_t prot)
-{
- unsigned long end;
-
- address &= ~PMD_MASK;
- end = address + size;
- if (end > PMD_SIZE)
- end = PMD_SIZE;
- do {
- pte_val(*pte) = phys_addr + pgprot_val(prot);
- address += PAGE_SIZE;
- phys_addr += PAGE_SIZE;
- pte++;
- } while (address < end);
+ area->addr = (void *)addr;
+ area->size = size + IO_SIZE;
+ area->next = *p;
+ *p = area;
+ return area;
}
-static inline int
-kernel_map_pmd (pmd_t *pmd, unsigned long address, unsigned long size,
- unsigned long phys_addr, pgprot_t prot,
- unsigned long *memavailp)
+static inline void free_io_area(void *addr)
{
- unsigned long end;
+ struct vm_struct **p, *tmp;
- address &= ~PGDIR_MASK;
- end = address + size;
- if (end > PGDIR_SIZE)
- end = PGDIR_SIZE;
- phys_addr -= address;
-
- if (CPU_IS_040_OR_060) {
- do {
- pte_t *pte = pte_alloc_kernel_map(pmd, address, memavailp);
- if (!pte)
- return -ENOMEM;
- kernel_map_pte(pte, address, end - address,
- address + phys_addr, prot);
- address = (address + PMD_SIZE) & PMD_MASK;
- pmd++;
- } while (address < end);
- } else {
- /* On the 68030 we use early termination page descriptors.
- Each one points to 64 pages (256K). */
- int i = (address >> (PMD_SHIFT-4)) & 15;
- do {
- (&pmd_val(*pmd))[i++] = (address + phys_addr) | pgprot_val(prot);
- address += PMD_SIZE / 16;
- } while (address < end);
+ if (!addr)
+ return;
+ addr = (void *)((unsigned long)addr & -IO_SIZE);
+ for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
+ if (tmp->addr == addr) {
+ *p = tmp->next;
+ __iounmap(tmp->addr, tmp->size);
+ kfree(tmp);
+ return;
+ }
}
- return 0;
}
+#endif
/*
* Map some physical address range into the kernel address space. The
@@ -260,304 +101,245 @@ kernel_map_pmd (pmd_t *pmd, unsigned long address, unsigned long size,
*/
/* Rewritten by Andreas Schwab to remove all races. */
-unsigned long kernel_map(unsigned long phys_addr, unsigned long size,
- int cacheflag, unsigned long *memavailp)
+void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
{
- unsigned long retaddr, from, end;
- pgd_t *dir;
- pgprot_t prot;
- KMAP *kmap;
-
- /* Round down 'phys_addr' to 256 KB and adjust size */
- retaddr = phys_addr & (KMAP_STEP-1);
- size += retaddr;
- phys_addr &= ~(KMAP_STEP-1);
- /* Round up the size to 256 KB. It doesn't hurt if too much is
- mapped... */
- size = (size + KMAP_STEP - 1) & ~(KMAP_STEP-1);
-
- down( &kmap_sem );
- kmap = kmap_get_region(size, memavailp == NULL);
- if (!kmap) {
- up(&kmap_sem);
- return 0;
- }
- from = kmap->addr;
- retaddr += from;
- kmap->mapaddr = retaddr;
- end = from + size;
- up( &kmap_sem );
+ struct vm_struct *area;
+ unsigned long virtaddr, retaddr;
+ long offset;
+ pgd_t *pgd_dir;
+ pmd_t *pmd_dir;
+ pte_t *pte_dir;
+
+ /*
+ * Don't allow mappings that wrap..
+ */
+ if (!size || size > physaddr + size)
+ return NULL;
+#ifdef DEBUG
+ printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
+#endif
+ /*
+ * Mappings have to be aligned
+ */
+ offset = physaddr & (IO_SIZE - 1);
+ physaddr &= -IO_SIZE;
+ size = (size + offset + IO_SIZE - 1) & -IO_SIZE;
+
+ /*
+ * Ok, go for it..
+ */
+ area = get_io_area(size);
+ if (!area)
+ return NULL;
+
+ virtaddr = (unsigned long)area->addr;
+ retaddr = virtaddr + offset;
+#ifdef DEBUG
+ printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
+#endif
+
+ /*
+ * add cache and table flags to physical address
+ */
if (CPU_IS_040_OR_060) {
- pgprot_val(prot) = (_PAGE_PRESENT | _PAGE_GLOBAL040 |
- _PAGE_ACCESSED | _PAGE_DIRTY);
+ physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
+ _PAGE_ACCESSED | _PAGE_DIRTY);
switch (cacheflag) {
- case KERNELMAP_FULL_CACHING:
- pgprot_val(prot) |= _PAGE_CACHE040;
+ case IOMAP_FULL_CACHING:
+ physaddr |= _PAGE_CACHE040;
break;
- case KERNELMAP_NOCACHE_SER:
+ case IOMAP_NOCACHE_SER:
default:
- pgprot_val(prot) |= _PAGE_NOCACHE_S;
+ physaddr |= _PAGE_NOCACHE_S;
break;
- case KERNELMAP_NOCACHE_NONSER:
- pgprot_val(prot) |= _PAGE_NOCACHE;
+ case IOMAP_NOCACHE_NONSER:
+ physaddr |= _PAGE_NOCACHE;
break;
- case KERNELMAP_NO_COPYBACK:
- pgprot_val(prot) |= _PAGE_CACHE040W;
+ case IOMAP_WRITETHROUGH:
+ physaddr |= _PAGE_CACHE040W;
break;
}
- } else
- pgprot_val(prot) = (_PAGE_PRESENT | _PAGE_ACCESSED |
- _PAGE_DIRTY |
- ((cacheflag == KERNELMAP_FULL_CACHING ||
- cacheflag == KERNELMAP_NO_COPYBACK)
- ? 0 : _PAGE_NOCACHE030));
-
- phys_addr -= from;
- dir = pgd_offset_k(from);
- while (from < end) {
- pmd_t *pmd = pmd_alloc_kernel(dir, from);
-
- if (kernel_map_pmd(pmd, from, end - from, phys_addr + from,
- prot, memavailp)) {
- printk( KERN_ERR "kernel_map: out of memory\n" );
- return 0UL;
+ } else {
+ physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
+ switch (cacheflag) {
+ case IOMAP_NOCACHE_SER:
+ case IOMAP_NOCACHE_NONSER:
+ default:
+ physaddr |= _PAGE_NOCACHE030;
+ break;
+ case IOMAP_FULL_CACHING:
+ case IOMAP_WRITETHROUGH:
+ break;
}
- from = (from + PGDIR_SIZE) & PGDIR_MASK;
- dir++;
}
- return retaddr;
-}
-
+ while (size > 0) {
+#ifdef DEBUG
+ if (!(virtaddr & (PTRTREESIZE-1)))
+ printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
+#endif
+ pgd_dir = pgd_offset_k(virtaddr);
+ pmd_dir = pmd_alloc_kernel(pgd_dir, virtaddr);
+ if (!pmd_dir) {
+ printk("ioremap: no mem for pmd_dir\n");
+ return NULL;
+ }
-/*
- * kernel_unmap() helpers
- */
-static inline void pte_free_kernel_unmap( pmd_t *pmd )
-{
- unsigned long page = pmd_page(*pmd);
- mem_map_t *pagemap = &mem_map[MAP_NR(page)];
-
- pmd_clear(pmd);
- cache_page(page);
-
- if (PageReserved( pagemap )) {
- /* need to unreserve pages that were allocated with memavailp != NULL;
- * this works only if 'page' is page-aligned */
- if (page & ~PAGE_MASK)
- return;
- clear_bit( PG_reserved, &pagemap->flags );
- atomic_set( &pagemap->count, 1 );
- }
- free_page( page );
-}
+ if (CPU_IS_020_OR_030) {
+ pmd_dir->pmd[(virtaddr/PTRTREESIZE)&-16] = physaddr;
+ physaddr += PTRTREESIZE;
+ virtaddr += PTRTREESIZE;
+ size -= PTRTREESIZE;
+ } else {
+ pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
+ if (!pte_dir) {
+ printk("ioremap: no mem for pte_dir\n");
+ return NULL;
+ }
-/*
- * This not only unmaps the requested region, but also loops over the whole
- * pmd to determine whether the other pte's are clear (so that the page can be
- * freed.) If so, it returns 1, 0 otherwise.
- */
-static inline int
-kernel_unmap_pte_range(pmd_t * pmd, unsigned long address, unsigned long size)
-{
- pte_t *pte;
- unsigned long addr2, end, end2;
- int all_clear = 1;
-
- if (pmd_none(*pmd))
- return( 0 );
- if (pmd_bad(*pmd)) {
- printk( KERN_ERR "kernel_unmap_pte_range: bad pmd (%08lx)\n",
- pmd_val(*pmd) );
- pmd_clear(pmd);
- return( 0 );
- }
- address &= ~PMD_MASK;
- addr2 = 0;
- pte = pte_offset(pmd, addr2);
- end = address + size;
- if (end > PMD_SIZE)
- end = PMD_SIZE;
- end2 = addr2 + PMD_SIZE;
- while( addr2 < end2 ) {
- if (!pte_none(*pte)) {
- if (address <= addr2 && addr2 < end)
- pte_clear(pte);
- else
- all_clear = 0;
+ pte_val(*pte_dir) = physaddr;
+ virtaddr += PAGE_SIZE;
+ physaddr += PAGE_SIZE;
+ size -= PAGE_SIZE;
}
- ++pte;
- addr2 += PAGE_SIZE;
}
- return( all_clear );
-}
-
-static inline void
-kernel_unmap_pmd_range(pgd_t * dir, unsigned long address, unsigned long size)
-{
- pmd_t * pmd;
- unsigned long end;
+#ifdef DEBUG
+ printk("\n");
+#endif
+ flush_tlb_all();
- if (pgd_none(*dir))
- return;
- if (pgd_bad(*dir)) {
- printk( KERN_ERR "kernel_unmap_pmd_range: bad pgd (%08lx)\n",
- pgd_val(*dir) );
- pgd_clear(dir);
- return;
- }
- pmd = pmd_offset(dir, address);
- address &= ~PGDIR_MASK;
- end = address + size;
- if (end > PGDIR_SIZE)
- end = PGDIR_SIZE;
-
- if (CPU_IS_040_OR_060) {
- do {
- if (kernel_unmap_pte_range(pmd, address, end - address))
- pte_free_kernel_unmap( pmd );
- address = (address + PMD_SIZE) & PMD_MASK;
- pmd++;
- } while (address < end);
- } else {
- /* On the 68030 clear the early termination descriptors */
- int i = (address >> (PMD_SHIFT-4)) & 15;
- do {
- (&pmd_val(*pmd))[i++] = 0;
- address += PMD_SIZE / 16;
- } while (address < end);
- }
+ return (void *)retaddr;
}
/*
- * Unmap a kernel_map()ed region again
+ * Unmap a ioremap()ed region again
*/
-void kernel_unmap( unsigned long addr )
+void iounmap(void *addr)
{
- unsigned long end;
- pgd_t *dir;
- KMAP *p;
-
- down( &kmap_sem );
-
- /* find region for 'addr' in list; must search for mapaddr! */
- for( p = kmap_regions; p; p = p->next )
- if (!p->free && p->mapaddr == addr)
- break;
- if (!p) {
- printk( KERN_ERR "kernel_unmap: trying to free invalid region\n" );
- return;
- }
- addr = p->addr;
- end = addr + p->size;
- kmap_put_region( p );
-
- dir = pgd_offset_k( addr );
- while( addr < end ) {
- kernel_unmap_pmd_range( dir, addr, end - addr );
- addr = (addr + PGDIR_SIZE) & PGDIR_MASK;
- dir++;
- }
-
- up( &kmap_sem );
- /* flushing for a range would do, but there's no such function for kernel
- * address space... */
- flush_tlb_all();
+ free_io_area(addr);
}
-
/*
- * kernel_set_cachemode() helpers
+ * __iounmap unmaps nearly everything, so be careful
+ * it doesn't free currently pointer/page tables anymore but it
+ * wans't used anyway and might be added later.
*/
-static inline void set_cmode_pte( pmd_t *pmd, unsigned long address,
- unsigned long size, unsigned cmode )
-{ pte_t *pte;
- unsigned long end;
-
- if (pmd_none(*pmd))
- return;
-
- pte = pte_offset( pmd, address );
- address &= ~PMD_MASK;
- end = address + size;
- if (end >= PMD_SIZE)
- end = PMD_SIZE;
-
- for( ; address < end; pte++ ) {
- pte_val(*pte) = (pte_val(*pte) & ~_PAGE_NOCACHE) | cmode;
- address += PAGE_SIZE;
- }
-}
-
-
-static inline void set_cmode_pmd( pgd_t *dir, unsigned long address,
- unsigned long size, unsigned cmode )
+void __iounmap(void *addr, unsigned long size)
{
- pmd_t *pmd;
- unsigned long end;
+ unsigned long virtaddr = (unsigned long)addr;
+ pgd_t *pgd_dir;
+ pmd_t *pmd_dir;
+ pte_t *pte_dir;
+
+ while (size > 0) {
+ pgd_dir = pgd_offset_k(virtaddr);
+ if (pgd_bad(*pgd_dir)) {
+ printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
+ pgd_clear(pgd_dir);
+ return;
+ }
+ pmd_dir = pmd_offset(pgd_dir, virtaddr);
- if (pgd_none(*dir))
- return;
+ if (CPU_IS_020_OR_030) {
+ int pmd_off = (virtaddr/PTRTREESIZE) & -16;
- pmd = pmd_offset( dir, address );
- address &= ~PGDIR_MASK;
- end = address + size;
- if (end > PGDIR_SIZE)
- end = PGDIR_SIZE;
+ if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
+ pmd_dir->pmd[pmd_off] = 0;
+ virtaddr += PTRTREESIZE;
+ size -= PTRTREESIZE;
+ continue;
+ }
+ }
- if ((pmd_val(*pmd) & _DESCTYPE_MASK) == _PAGE_PRESENT) {
- /* 68030 early termination descriptor */
- pmd_val(*pmd) = (pmd_val(*pmd) & ~_PAGE_NOCACHE) | cmode;
- return;
- }
- else {
- /* "normal" tables */
- for( ; address < end; pmd++ ) {
- set_cmode_pte( pmd, address, end - address, cmode );
- address = (address + PMD_SIZE) & PMD_MASK;
+ if (pmd_bad(*pmd_dir)) {
+ printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
+ pmd_clear(pmd_dir);
+ return;
}
+ pte_dir = pte_offset(pmd_dir, virtaddr);
+
+ pte_val(*pte_dir) = 0;
+ virtaddr += PAGE_SIZE;
+ size -= PAGE_SIZE;
}
-}
+ flush_tlb_all();
+}
/*
* Set new cache mode for some kernel address space.
* The caller must push data for that range itself, if such data may already
* be in the cache.
*/
-void kernel_set_cachemode( unsigned long address, unsigned long size,
- unsigned cmode )
+void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
{
- pgd_t *dir = pgd_offset_k( address );
- unsigned long end = address + size;
-
+ unsigned long virtaddr = (unsigned long)addr;
+ pgd_t *pgd_dir;
+ pmd_t *pmd_dir;
+ pte_t *pte_dir;
+
if (CPU_IS_040_OR_060) {
- switch( cmode ) {
- case KERNELMAP_FULL_CACHING:
+ switch (cmode) {
+ case IOMAP_FULL_CACHING:
cmode = _PAGE_CACHE040;
break;
- case KERNELMAP_NOCACHE_SER:
- default:
+ case IOMAP_NOCACHE_SER:
+ default:
cmode = _PAGE_NOCACHE_S;
break;
- case KERNELMAP_NOCACHE_NONSER:
+ case IOMAP_NOCACHE_NONSER:
cmode = _PAGE_NOCACHE;
break;
- case KERNELMAP_NO_COPYBACK:
+ case IOMAP_WRITETHROUGH:
cmode = _PAGE_CACHE040W;
break;
}
- } else
- cmode = ((cmode == KERNELMAP_FULL_CACHING ||
- cmode == KERNELMAP_NO_COPYBACK) ?
- 0 : _PAGE_NOCACHE030);
-
- for( ; address < end; dir++ ) {
- set_cmode_pmd( dir, address, end - address, cmode );
- address = (address + PGDIR_SIZE) & PGDIR_MASK;
+ } else {
+ switch (cmode) {
+ case IOMAP_NOCACHE_SER:
+ case IOMAP_NOCACHE_NONSER:
+ default:
+ cmode = _PAGE_NOCACHE030;
+ break;
+ case IOMAP_FULL_CACHING:
+ case IOMAP_WRITETHROUGH:
+ cmode = 0;
+ }
+ }
+
+ while (size > 0) {
+ pgd_dir = pgd_offset_k(virtaddr);
+ if (pgd_bad(*pgd_dir)) {
+ printk("iocachemode: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
+ pgd_clear(pgd_dir);
+ return;
+ }
+ pmd_dir = pmd_offset(pgd_dir, virtaddr);
+
+ if (CPU_IS_020_OR_030) {
+ int pmd_off = (virtaddr/PTRTREESIZE) & -16;
+
+ if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
+ pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] &
+ _CACHEMASK040) | cmode;
+ virtaddr += PTRTREESIZE;
+ size -= PTRTREESIZE;
+ continue;
+ }
+ }
+
+ if (pmd_bad(*pmd_dir)) {
+ printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
+ pmd_clear(pmd_dir);
+ return;
+ }
+ pte_dir = pte_offset(pmd_dir, virtaddr);
+
+ pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
+ virtaddr += PAGE_SIZE;
+ size -= PAGE_SIZE;
}
- /* flushing for a range would do, but there's no such function for kernel
- * address space... */
+
flush_tlb_all();
}
diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c
index 39cc1d1a9..a97578ec2 100644
--- a/arch/m68k/mm/memory.c
+++ b/arch/m68k/mm/memory.c
@@ -10,6 +10,7 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/malloc.h>
+#include <linux/init.h>
#include <asm/setup.h>
#include <asm/segment.h>
@@ -97,6 +98,31 @@ static ptable_desc ptable_list = { &ptable_list, &ptable_list };
#define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t))
+void __init init_pointer_table(unsigned long ptable)
+{
+ ptable_desc *dp;
+ unsigned long page = ptable & PAGE_MASK;
+ unsigned char mask = 1 << ((ptable - page)/PTABLE_SIZE);
+
+ dp = PAGE_PD(page);
+ if (!(PD_MARKBITS(dp) & mask)) {
+ PD_MARKBITS(dp) = 0xff;
+ (dp->prev = ptable_list.prev)->next = dp;
+ (dp->next = &ptable_list)->prev = dp;
+ }
+
+ PD_MARKBITS(dp) &= ~mask;
+#ifdef DEBUG
+ printk("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));
+#endif
+
+ /* unreserve the page so it's possible to free that page */
+ dp->flags &= ~(1 << PG_reserved);
+ atomic_set(&dp->count, 1);
+
+ return;
+}
+
pmd_t *get_pointer_table (void)
{
ptable_desc *dp = ptable_list.next;
@@ -176,103 +202,6 @@ int free_pointer_table (pmd_t *ptable)
return 0;
}
-/* maximum pages used for kpointer tables */
-#define KPTR_PAGES 4
-/* # of reserved slots */
-#define RESERVED_KPTR 4
-extern pmd_tablepage kernel_pmd_table; /* reserved in head.S */
-
-static struct kpointer_pages {
- pmd_tablepage *page[KPTR_PAGES];
- u_char alloced[KPTR_PAGES];
-} kptr_pages;
-
-void init_kpointer_table(void) {
- short i = KPTR_PAGES-1;
-
- /* first page is reserved in head.S */
- kptr_pages.page[i] = &kernel_pmd_table;
- kptr_pages.alloced[i] = ~(0xff>>RESERVED_KPTR);
- for (i--; i>=0; i--) {
- kptr_pages.page[i] = NULL;
- kptr_pages.alloced[i] = 0;
- }
-}
-
-pmd_t *get_kpointer_table (void)
-{
- /* For pointer tables for the kernel virtual address space,
- * use the page that is reserved in head.S that can hold up to
- * 8 pointer tables. 3 of these tables are always reserved
- * (kernel_pg_dir, swapper_pg_dir and kernel pointer table for
- * the first 16 MB of RAM). In addition, the 4th pointer table
- * in this page is reserved. On Amiga and Atari, it is used to
- * map in the hardware registers. It may be used for other
- * purposes on other 68k machines. This leaves 4 pointer tables
- * available for use by the kernel. 1 of them are usually used
- * for the vmalloc tables. This allows mapping of 3 * 32 = 96 MB
- * of physical memory. But these pointer tables are also used
- * for other purposes, like kernel_map(), so further pages can
- * now be allocated.
- */
- pmd_tablepage *page;
- pmd_table *table;
- long nr, offset = -8;
- short i;
-
- for (i=KPTR_PAGES-1; i>=0; i--) {
- asm volatile("bfffo %1{%2,#8},%0"
- : "=d" (nr)
- : "d" ((u_char)~kptr_pages.alloced[i]), "d" (offset));
- if (nr)
- break;
- }
- if (i < 0) {
- printk("No space for kernel pointer table!\n");
- return NULL;
- }
- if (!(page = kptr_pages.page[i])) {
- if (!(page = (pmd_tablepage *)get_free_page(GFP_KERNEL))) {
- printk("No space for kernel pointer table!\n");
- return NULL;
- }
- flush_tlb_kernel_page((unsigned long) page);
- nocache_page((u_long)(kptr_pages.page[i] = page));
- }
- asm volatile("bfset %0@{%1,#1}"
- : /* no output */
- : "a" (&kptr_pages.alloced[i]), "d" (nr-offset));
- table = &(*page)[nr-offset];
- memset(table, 0, sizeof(pmd_table));
- return ((pmd_t *)table);
-}
-
-void free_kpointer_table (pmd_t *pmdp)
-{
- pmd_table *table = (pmd_table *)pmdp;
- pmd_tablepage *page = (pmd_tablepage *)((u_long)table & PAGE_MASK);
- long nr;
- short i;
-
- for (i=KPTR_PAGES-1; i>=0; i--) {
- if (kptr_pages.page[i] == page)
- break;
- }
- nr = ((u_long)table - (u_long)page) / sizeof(pmd_table);
- if (!table || i < 0 || (i == KPTR_PAGES-1 && nr < RESERVED_KPTR)) {
- printk("Attempt to free invalid kernel pointer table: %p\n", table);
- return;
- }
- asm volatile("bfclr %0@{%1,#1}"
- : /* no output */
- : "a" (&kptr_pages.alloced[i]), "d" (nr));
- if (!kptr_pages.alloced[i]) {
- kptr_pages.page[i] = 0;
- cache_page ((u_long)page);
- free_page ((u_long)page);
- }
-}
-
static unsigned long transp_transl_matches( unsigned long regval,
unsigned long vaddr )
{
@@ -308,7 +237,6 @@ static unsigned long transp_transl_matches( unsigned long regval,
*/
unsigned long mm_vtop (unsigned long vaddr)
{
-#ifndef CONFIG_SINGLE_MEMORY_CHUNK
int i=0;
unsigned long voff = vaddr;
unsigned long offset = 0;
@@ -324,10 +252,6 @@ unsigned long mm_vtop (unsigned long vaddr)
offset += m68k_memory[i].size;
i++;
}while (i < m68k_num_memory);
-#else
- if (vaddr < m68k_memory[0].size)
- return m68k_memory[0].addr + vaddr;
-#endif
return mm_vtop_fallback(vaddr);
}
@@ -449,7 +373,6 @@ unsigned long mm_vtop_fallback (unsigned long vaddr)
#ifndef CONFIG_SINGLE_MEMORY_CHUNK
unsigned long mm_ptov (unsigned long paddr)
{
-#ifndef CONFIG_SINGLE_MEMORY_CHUNK
int i = 0;
unsigned long offset = 0;
@@ -466,11 +389,6 @@ unsigned long mm_ptov (unsigned long paddr)
offset += m68k_memory[i].size;
i++;
}while (i < m68k_num_memory);
-#else
- unsigned long base = m68k_memory[0].addr;
- if (paddr >= base && paddr < (base + m68k_memory[0].size))
- return (paddr - base);
-#endif
/*
* assume that the kernel virtual address is the same as the
@@ -560,7 +478,7 @@ unsigned long mm_ptov (unsigned long paddr)
* Jes was worried about performance (urhh ???) so its optional
*/
-extern void (*mach_l2_flush)(int) = NULL;
+void (*mach_l2_flush)(int) = NULL;
#endif
/*