summaryrefslogtreecommitdiffstats
path: root/arch/m68k/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/m68k/mm/init.c')
-rw-r--r--arch/m68k/mm/init.c341
1 files changed, 35 insertions, 306 deletions
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 91409cd28..c22dccfc5 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -2,6 +2,9 @@
* linux/arch/m68k/mm/init.c
*
* Copyright (C) 1995 Hamish Macdonald
+ *
+ * Contains common initialization routines, specific init code moved
+ * to motorola.c and sun3mmu.c
*/
#include <linux/config.h>
@@ -28,9 +31,10 @@
#include <asm/atari_stram.h>
#endif
-#undef DEBUG
+#ifdef CONFIG_SUN3
+void mmu_emu_reserve_pages(unsigned long max_page);
+#endif
-extern void die_if_kernel(char *,struct pt_regs *,long);
extern void show_net_buffers(void);
int do_check_pgt_cache(int low, int high)
@@ -60,7 +64,7 @@ int do_check_pgt_cache(int low, int high)
* ZERO_PAGE is a special page that is used for zero-initialized
* data and COW.
*/
-static unsigned long empty_bad_page_table;
+unsigned long empty_bad_page_table;
pte_t *__bad_pagetable(void)
{
@@ -68,7 +72,7 @@ pte_t *__bad_pagetable(void)
return (pte_t *)empty_bad_page_table;
}
-static unsigned long empty_bad_page;
+unsigned long empty_bad_page;
pte_t __bad_page(void)
{
@@ -94,12 +98,12 @@ void show_mem(void)
reserved++;
else if (PageSwapCache(mem_map+i))
cached++;
- else if (!atomic_read(&mem_map[i].count))
+ else if (!page_count(mem_map+i))
free++;
- else if (atomic_read(&mem_map[i].count) == 1)
+ else if (page_count(mem_map+i) == 1)
nonshared++;
else
- shared += atomic_read(&mem_map[i].count) - 1;
+ shared += page_count(mem_map+i) - 1;
}
printk("%d pages of RAM\n",total);
printk("%d free pages\n",free);
@@ -113,167 +117,6 @@ void show_mem(void)
#endif
}
-#ifndef mm_cachebits
-/*
- * Bits to add to page descriptors for "normal" caching mode.
- * For 68020/030 this is 0.
- * For 68040, this is _PAGE_CACHE040 (cachable, copyback)
- */
-unsigned long mm_cachebits = 0;
-#endif
-
-__initfunc(static pte_t * kernel_page_table(unsigned long *memavailp))
-{
- pte_t *ptablep;
-
- ptablep = (pte_t *)*memavailp;
- *memavailp += PAGE_SIZE;
-
- clear_page((unsigned long)ptablep);
- flush_page_to_ram((unsigned long) ptablep);
- flush_tlb_kernel_page((unsigned long) ptablep);
- nocache_page ((unsigned long)ptablep);
-
- return ptablep;
-}
-
-static pmd_t *last_pgtable __initdata = NULL;
-
-__initfunc(static pmd_t * kernel_ptr_table(unsigned long *memavailp))
-{
- if (!last_pgtable) {
- unsigned long pmd, last;
- int i;
-
- /* Find the last ptr table that was used in head.S and
- * reuse the remaining space in that page for further
- * ptr tables.
- */
- last = (unsigned long)kernel_pg_dir;
- for (i = 0; i < PTRS_PER_PGD; i++) {
- if (!pgd_present(kernel_pg_dir[i]))
- continue;
- pmd = pgd_page(kernel_pg_dir[i]);
- if (pmd > last)
- last = pmd;
- }
-
- last_pgtable = (pmd_t *)last;
-#ifdef DEBUG
- printk("kernel_ptr_init: %p\n", last_pgtable);
-#endif
- }
-
- if (((unsigned long)(last_pgtable + PTRS_PER_PMD) & ~PAGE_MASK) == 0) {
- last_pgtable = (pmd_t *)*memavailp;
- *memavailp += PAGE_SIZE;
-
- clear_page((unsigned long)last_pgtable);
- flush_page_to_ram((unsigned long)last_pgtable);
- flush_tlb_kernel_page((unsigned long)last_pgtable);
- nocache_page((unsigned long)last_pgtable);
- } else
- last_pgtable += PTRS_PER_PMD;
-
- return last_pgtable;
-}
-
-__initfunc(static unsigned long
-map_chunk (unsigned long addr, long size, unsigned long *memavailp))
-{
-#define PTRTREESIZE (256*1024)
-#define ROOTTREESIZE (32*1024*1024)
- static unsigned long virtaddr = 0;
- unsigned long physaddr;
- pgd_t *pgd_dir;
- pmd_t *pmd_dir;
- pte_t *pte_dir;
-
- physaddr = (addr | m68k_supervisor_cachemode |
- _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
- if (CPU_IS_040_OR_060)
- physaddr |= _PAGE_GLOBAL040;
-
- while (size > 0) {
-#ifdef DEBUG
- if (!(virtaddr & (PTRTREESIZE-1)))
- printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK,
- virtaddr);
-#endif
- pgd_dir = pgd_offset_k(virtaddr);
- if (virtaddr && CPU_IS_020_OR_030) {
- if (!(virtaddr & (ROOTTREESIZE-1)) &&
- size >= ROOTTREESIZE) {
-#ifdef DEBUG
- printk ("[very early term]");
-#endif
- pgd_val(*pgd_dir) = physaddr;
- size -= ROOTTREESIZE;
- virtaddr += ROOTTREESIZE;
- physaddr += ROOTTREESIZE;
- continue;
- }
- }
- if (!pgd_present(*pgd_dir)) {
- pmd_dir = kernel_ptr_table(memavailp);
-#ifdef DEBUG
- printk ("[new pointer %p]", pmd_dir);
-#endif
- pgd_set(pgd_dir, pmd_dir);
- } else
- pmd_dir = pmd_offset(pgd_dir, virtaddr);
-
- if (CPU_IS_020_OR_030) {
- if (virtaddr) {
-#ifdef DEBUG
- printk ("[early term]");
-#endif
- pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
- physaddr += PTRTREESIZE;
- } else {
- int i;
-#ifdef DEBUG
- printk ("[zero map]");
-#endif
- pte_dir = (pte_t *)kernel_ptr_table(memavailp);
- pmd_dir->pmd[0] = virt_to_phys(pte_dir) |
- _PAGE_TABLE | _PAGE_ACCESSED;
- pte_val(*pte_dir++) = 0;
- physaddr += PAGE_SIZE;
- for (i = 1; i < 64; physaddr += PAGE_SIZE, i++)
- pte_val(*pte_dir++) = physaddr;
- }
- size -= PTRTREESIZE;
- virtaddr += PTRTREESIZE;
- } else {
- if (!pmd_present(*pmd_dir)) {
-#ifdef DEBUG
- printk ("[new table]");
-#endif
- pte_dir = kernel_page_table(memavailp);
- pmd_set(pmd_dir, pte_dir);
- }
- pte_dir = pte_offset(pmd_dir, virtaddr);
-
- if (virtaddr) {
- if (!pte_present(*pte_dir))
- pte_val(*pte_dir) = physaddr;
- } else
- pte_val(*pte_dir) = 0;
- size -= PAGE_SIZE;
- virtaddr += PAGE_SIZE;
- physaddr += PAGE_SIZE;
- }
-
- }
-#ifdef DEBUG
- printk("\n");
-#endif
-
- return virtaddr;
-}
-
-extern unsigned long free_area_init(unsigned long, unsigned long);
extern void init_pointer_table(unsigned long ptable);
/* References to section boundaries */
@@ -281,121 +124,9 @@ extern void init_pointer_table(unsigned long ptable);
extern char _text, _etext, _edata, __bss_start, _end;
extern char __init_begin, __init_end;
-/*
- * paging_init() continues the virtual memory environment setup which
- * was begun by the code in arch/head.S.
- */
-__initfunc(unsigned long paging_init(unsigned long start_mem,
- unsigned long end_mem))
-{
- int chunk;
- unsigned long mem_avail = 0;
-
-#ifdef DEBUG
- {
- extern unsigned long availmem;
- printk ("start of paging_init (%p, %lx, %lx, %lx)\n",
- kernel_pg_dir, availmem, start_mem, end_mem);
- }
-#endif
-
- /* Fix the cache mode in the page descriptors for the 680[46]0. */
- if (CPU_IS_040_OR_060) {
- int i;
-#ifndef mm_cachebits
- mm_cachebits = _PAGE_CACHE040;
-#endif
- for (i = 0; i < 16; i++)
- pgprot_val(protection_map[i]) |= _PAGE_CACHE040;
- }
- /* Fix the PAGE_NONE value. */
- if (CPU_IS_040_OR_060) {
- /* On the 680[46]0 we can use the _PAGE_SUPER bit. */
- pgprot_val(protection_map[0]) |= _PAGE_SUPER;
- pgprot_val(protection_map[VM_SHARED]) |= _PAGE_SUPER;
- } else {
- /* Otherwise we must fake it. */
- pgprot_val(protection_map[0]) &= ~_PAGE_PRESENT;
- pgprot_val(protection_map[0]) |= _PAGE_FAKE_SUPER;
- pgprot_val(protection_map[VM_SHARED]) &= ~_PAGE_PRESENT;
- pgprot_val(protection_map[VM_SHARED]) |= _PAGE_FAKE_SUPER;
- }
-
- /*
- * Map the physical memory available into the kernel virtual
- * address space. It may allocate some memory for page
- * tables and thus modify availmem.
- */
-
- for (chunk = 0; chunk < m68k_num_memory; chunk++) {
- mem_avail = map_chunk (m68k_memory[chunk].addr,
- m68k_memory[chunk].size, &start_mem);
-
- }
-
- flush_tlb_all();
-#ifdef DEBUG
- printk ("memory available is %ldKB\n", mem_avail >> 10);
- printk ("start_mem is %#lx\nvirtual_end is %#lx\n",
- start_mem, end_mem);
-#endif
-
- /*
- * initialize the bad page table and bad page to point
- * to a couple of allocated pages
- */
- empty_bad_page_table = start_mem;
- start_mem += PAGE_SIZE;
- empty_bad_page = start_mem;
- start_mem += PAGE_SIZE;
- empty_zero_page = start_mem;
- start_mem += PAGE_SIZE;
- memset((void *)empty_zero_page, 0, PAGE_SIZE);
-
- /*
- * allocate the "swapper" page directory and
- * record in task 0 (swapper) tss
- */
- init_mm.pgd = (pgd_t *)kernel_ptr_table(&start_mem);
- memset (init_mm.pgd, 0, sizeof(pgd_t)*PTRS_PER_PGD);
-
- /* setup CPU root pointer for swapper task */
- task[0]->tss.crp[0] = 0x80000000 | _PAGE_TABLE;
- task[0]->tss.crp[1] = virt_to_phys(init_mm.pgd);
+extern pmd_t *zero_pgtable;
-#ifdef DEBUG
- printk ("task 0 pagedir at %p virt, %#lx phys\n",
- swapper_pg_dir, task[0]->tss.crp[1]);
-#endif
-
- if (CPU_IS_040_OR_060)
- asm __volatile__ (".chip 68040\n\t"
- "movec %0,%%urp\n\t"
- ".chip 68k"
- : /* no outputs */
- : "r" (task[0]->tss.crp[1]));
- else
- asm __volatile__ (".chip 68030\n\t"
- "pmove %0,%%crp\n\t"
- ".chip 68k"
- : /* no outputs */
- : "m" (task[0]->tss.crp[0]));
-#ifdef DEBUG
- printk ("set crp\n");
-#endif
-
- /*
- * Set up SFC/DFC registers (user data space)
- */
- set_fs (USER_DS);
-
-#ifdef DEBUG
- printk ("before free_area_init\n");
-#endif
- return PAGE_ALIGN(free_area_init(start_mem, end_mem));
-}
-
-__initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem))
+void __init mem_init(unsigned long start_mem, unsigned long end_mem)
{
int codepages = 0;
int datapages = 0;
@@ -418,31 +149,36 @@ __initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem))
atari_stram_reserve_pages( start_mem );
#endif
- for (tmp = 0 ; tmp < end_mem ; tmp += PAGE_SIZE) {
+#ifdef CONFIG_SUN3
+ /* reserve rom pages */
+ mmu_emu_reserve_pages(max_mapnr);
+#endif
+
+ for (tmp = PAGE_OFFSET ; tmp < end_mem ; tmp += PAGE_SIZE) {
+#ifndef CONFIG_SUN3
if (virt_to_phys ((void *)tmp) >= mach_max_dma_address)
clear_bit(PG_DMA, &mem_map[MAP_NR(tmp)].flags);
+#endif
if (PageReserved(mem_map+MAP_NR(tmp))) {
if (tmp >= (unsigned long)&_text
- && tmp < (unsigned long)&_edata) {
- if (tmp < (unsigned long) &_etext)
- codepages++;
- else
- datapages++;
- } else if (tmp >= (unsigned long) &__init_begin
- && tmp < (unsigned long) &__init_end)
+ && tmp < (unsigned long)&_etext)
+ codepages++;
+ else if (tmp >= (unsigned long) &__init_begin
+ && tmp < (unsigned long) &__init_end)
initpages++;
else
datapages++;
continue;
}
- atomic_set(&mem_map[MAP_NR(tmp)].count, 1);
+ set_page_count(mem_map+MAP_NR(tmp), 1);
#ifdef CONFIG_BLK_DEV_INITRD
if (!initrd_start ||
(tmp < (initrd_start & PAGE_MASK) || tmp >= initrd_end))
#endif
free_page(tmp);
}
-
+
+#ifndef CONFIG_SUN3
/* insert pointer tables allocated so far into the tablelist */
init_pointer_table((unsigned long)kernel_pg_dir);
for (i = 0; i < PTRS_PER_PGD; i++) {
@@ -450,6 +186,11 @@ __initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem))
init_pointer_table(pgd_page(kernel_pg_dir[i]));
}
+ /* insert also pointer table that we used to unmap the zero page */
+ if (zero_pgtable)
+ init_pointer_table((unsigned long)zero_pgtable);
+#endif
+
printk("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init)\n",
(unsigned long) nr_free_pages << (PAGE_SHIFT-10),
max_mapnr << (PAGE_SHIFT-10),
@@ -458,18 +199,6 @@ __initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem))
initpages << (PAGE_SHIFT-10));
}
-void free_initmem(void)
-{
- unsigned long addr;
-
- addr = (unsigned long)&__init_begin;
- for (; addr < (unsigned long)&__init_end; addr += PAGE_SIZE) {
- mem_map[MAP_NR(addr)].flags &= ~(1 << PG_reserved);
- atomic_set(&mem_map[MAP_NR(addr)].count, 1);
- free_page(addr);
- }
-}
-
void si_meminfo(struct sysinfo *val)
{
unsigned long i;
@@ -483,9 +212,9 @@ void si_meminfo(struct sysinfo *val)
if (PageReserved(mem_map+i))
continue;
val->totalram++;
- if (!atomic_read(&mem_map[i].count))
+ if (!page_count(mem_map+i))
continue;
- val->sharedram += atomic_read(&mem_map[i].count) - 1;
+ val->sharedram += page_count(mem_map+i) - 1;
}
val->totalram <<= PAGE_SHIFT;
val->sharedram <<= PAGE_SHIFT;