summaryrefslogtreecommitdiffstats
path: root/arch/i386/mm
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1998-08-25 09:12:35 +0000
committerRalf Baechle <ralf@linux-mips.org>1998-08-25 09:12:35 +0000
commitc7fc24dc4420057f103afe8fc64524ebc25c5d37 (patch)
tree3682407a599b8f9f03fc096298134cafba1c9b2f /arch/i386/mm
parent1d793fade8b063fde3cf275bf1a5c2d381292cd9 (diff)
o Merge with Linux 2.1.116.
o New Newport console code. o New G364 console code.
Diffstat (limited to 'arch/i386/mm')
-rw-r--r--arch/i386/mm/fault.c25
-rw-r--r--arch/i386/mm/init.c186
-rw-r--r--arch/i386/mm/ioremap.c23
3 files changed, 158 insertions, 76 deletions
diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c
index 8fac7dc2b..c4955d724 100644
--- a/arch/i386/mm/fault.c
+++ b/arch/i386/mm/fault.c
@@ -16,13 +16,14 @@
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/hardirq.h>
-extern void die_if_kernel(const char *,struct pt_regs *,long);
+extern void die(const char *,struct pt_regs *,long);
/*
* Ugly, ugly, but the goto's result in better assembly..
@@ -100,13 +101,13 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
/* get the address */
__asm__("movl %%cr2,%0":"=r" (address));
- if (local_irq_count[smp_processor_id()])
- die_if_kernel("page fault from irq handler",regs,error_code);
- lock_kernel();
tsk = current;
mm = tsk->mm;
+ if (in_interrupt())
+ die("page fault from irq handler",regs,error_code);
down(&mm->mmap_sem);
+
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
@@ -151,7 +152,7 @@ good_area:
goto bad_area;
}
handle_mm_fault(tsk, vma, address, write);
- up(&mm->mmap_sem);
+
/*
* Did it hit the DOS screen memory VA from vm86 mode?
*/
@@ -160,7 +161,8 @@ good_area:
if (bit < 32)
tsk->tss.screen_bitmap |= 1 << bit;
}
- goto out;
+ up(&mm->mmap_sem);
+ return;
/*
* Something tried to access memory that isn't in our memory map..
@@ -175,7 +177,7 @@ bad_area:
tsk->tss.error_code = error_code;
tsk->tss.trap_no = 14;
force_sig(SIGSEGV, tsk);
- goto out;
+ return;
}
/*
@@ -187,7 +189,6 @@ bad_area:
nr = (address - (unsigned long) idt) >> 3;
if (nr == 6) {
- unlock_kernel();
do_invalid_op(regs, 0);
return;
}
@@ -196,7 +197,7 @@ bad_area:
/* Are we prepared to handle this kernel fault? */
if ((fixup = search_exception_table(regs->eip)) != 0) {
regs->eip = fixup;
- goto out;
+ return;
}
/*
@@ -215,7 +216,7 @@ bad_area:
* CPU state on certain buggy processors.
*/
printk("Ok");
- goto out;
+ return;
}
if (address < PAGE_SIZE)
@@ -234,8 +235,8 @@ bad_area:
page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
printk(KERN_ALERT "*pte = %08lx\n", page);
}
- die_if_kernel("Oops", regs, error_code);
+ lock_kernel();
+ die("Oops", regs, error_code);
do_exit(SIGKILL);
-out:
unlock_kernel();
}
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index c33c53b9a..b4cba8730 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -22,15 +22,87 @@
#include <linux/blk.h>
#endif
+#include <asm/processor.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/dma.h>
+#include <asm/fixmap.h>
-const char bad_pmd_string[] = "Bad pmd in pte_alloc: %08lx\n";
-
-extern void die_if_kernel(char *,struct pt_regs *,long);
extern void show_net_buffers(void);
+extern unsigned long init_smp_mappings(unsigned long);
+
+void __bad_pte_kernel(pmd_t *pmd)
+{
+ printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
+ pmd_val(*pmd) = _KERNPG_TABLE + __pa(BAD_PAGETABLE);
+}
+
+void __bad_pte(pmd_t *pmd)
+{
+ printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
+ pmd_val(*pmd) = _PAGE_TABLE + __pa(BAD_PAGETABLE);
+}
+
+pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long offset)
+{
+ pte_t *pte;
+
+ pte = (pte_t *) __get_free_page(GFP_KERNEL);
+ if (pmd_none(*pmd)) {
+ if (pte) {
+ clear_page((unsigned long)pte);
+ pmd_val(*pmd) = _KERNPG_TABLE + __pa(pte);
+ return pte + offset;
+ }
+ pmd_val(*pmd) = _KERNPG_TABLE + __pa(BAD_PAGETABLE);
+ return NULL;
+ }
+ free_page((unsigned long)pte);
+ if (pmd_bad(*pmd)) {
+ __bad_pte_kernel(pmd);
+ return NULL;
+ }
+ return (pte_t *) pmd_page(*pmd) + offset;
+}
+
+pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset)
+{
+ unsigned long pte;
+
+ pte = (unsigned long) __get_free_page(GFP_KERNEL);
+ if (pmd_none(*pmd)) {
+ if (pte) {
+ clear_page(pte);
+ pmd_val(*pmd) = _PAGE_TABLE + __pa(pte);
+ return (pte_t *)(pte + offset);
+ }
+ pmd_val(*pmd) = _PAGE_TABLE + __pa(BAD_PAGETABLE);
+ return NULL;
+ }
+ free_page(pte);
+ if (pmd_bad(*pmd)) {
+ __bad_pte(pmd);
+ return NULL;
+ }
+ return (pte_t *) (pmd_page(*pmd) + offset);
+}
+
+int do_check_pgt_cache(int low, int high)
+{
+ int freed = 0;
+ if(pgtable_cache_size > high) {
+ do {
+ if(pgd_quicklist)
+ free_pgd_slow(get_pgd_fast()), freed++;
+ if(pmd_quicklist)
+ free_pmd_slow(get_pmd_fast()), freed++;
+ if(pte_quicklist)
+ free_pte_slow(get_pte_fast()), freed++;
+ } while(pgtable_cache_size > low);
+ }
+ return freed;
+}
/*
* BAD_PAGE is the page that is used for page faults when linux
@@ -82,7 +154,7 @@ void show_mem(void)
total++;
if (PageReserved(mem_map+i))
reserved++;
- if (PageSwapCache(mem_map+i))
+ else if (PageSwapCache(mem_map+i))
cached++;
else if (!atomic_read(&mem_map[i].count))
free++;
@@ -93,6 +165,7 @@ void show_mem(void)
printk("%d reserved pages\n",reserved);
printk("%d pages shared\n",shared);
printk("%d pages swap cached\n",cached);
+ printk("%ld pages in page table cache\n",pgtable_cache_size);
show_buffers();
#ifdef CONFIG_NET
show_net_buffers();
@@ -116,23 +189,6 @@ extern char __init_begin, __init_end;
#define X86_CR4_PGE 0x0080 /* enable global pages */
#define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
-#define X86_FEATURE_FPU 0x0001 /* internal FPU */
-#define X86_FEATURE_VME 0x0002 /* vm86 extensions */
-#define X86_FEATURE_DE 0x0004 /* debugging extensions */
-#define X86_FEATURE_PSE 0x0008 /* Page size extensions */
-#define X86_FEATURE_TSC 0x0010 /* Time stamp counter */
-#define X86_FEATURE_MSR 0x0020 /* RDMSR/WRMSR */
-#define X86_FEATURE_PAE 0x0040 /* Physical address extension */
-#define X86_FEATURE_MCE 0x0080 /* Machine check exception */
-#define X86_FEATURE_CXS 0x0100 /* cmpxchg8 available */
-#define X86_FEATURE_APIC 0x0200 /* internal APIC */
-#define X86_FEATURE_10 0x0400
-#define X86_FEATURE_11 0x0800
-#define X86_FEATURE_MTRR 0x1000 /* memory type registers */
-#define X86_FEATURE_PGE 0x2000 /* Global page */
-#define X86_FEATURE_MCA 0x4000 /* Machine Check Architecture */
-#define X86_FEATURE_CMOV 0x8000 /* Cmov/fcomi */
-
/*
* Save the cr4 feature set we're using (ie
* Pentium 4MB enable and PPro Global page
@@ -152,6 +208,50 @@ static inline void set_in_cr4(unsigned long mask)
}
/*
+ * allocate page table(s) for compile-time fixed mappings
+ */
+static unsigned long __init fixmap_init(unsigned long start_mem)
+{
+ pgd_t * pg_dir;
+ unsigned int idx;
+ unsigned long address;
+
+ start_mem = PAGE_ALIGN(start_mem);
+
+ for (idx=1; idx <= __end_of_fixed_addresses; idx += PTRS_PER_PTE)
+ {
+ address = fix_to_virt(__end_of_fixed_addresses-idx);
+ pg_dir = swapper_pg_dir + (address >> PGDIR_SHIFT);
+ memset((void *)start_mem, 0, PAGE_SIZE);
+ pgd_val(*pg_dir) = _PAGE_TABLE | __pa(start_mem);
+ start_mem += PAGE_SIZE;
+ }
+
+ return start_mem;
+}
+
+static void set_pte_phys (unsigned long vaddr, unsigned long phys)
+{
+ pgprot_t prot;
+ pte_t * pte;
+
+ pte = pte_offset(pmd_offset(pgd_offset_k(vaddr), vaddr), vaddr);
+ prot = PAGE_KERNEL;
+ if (boot_cpu_data.x86_capability & X86_FEATURE_PGE)
+ pgprot_val(prot) |= _PAGE_GLOBAL;
+ set_pte(pte, mk_pte_phys(phys, prot));
+
+ local_flush_tlb();
+}
+
+void set_fixmap (enum fixed_addresses idx, unsigned long phys)
+{
+ unsigned long address = fix_to_virt(idx);
+
+ set_pte_phys (address,phys);
+}
+
+/*
* paging_init() sets up the page tables - note that the first 4MB are
* already mapped by head.S.
*
@@ -254,49 +354,9 @@ __initfunc(unsigned long paging_init(unsigned long start_mem, unsigned long end_
address += PAGE_SIZE;
}
}
+ start_mem = fixmap_init(start_mem);
#ifdef __SMP__
-{
- extern unsigned long mp_lapic_addr;
- pte_t pte;
- unsigned long apic_area = (unsigned long)APIC_BASE;
-
- pg_dir = swapper_pg_dir + ((apic_area) >> PGDIR_SHIFT);
- memset((void *)start_mem, 0, PAGE_SIZE);
- pgd_val(*pg_dir) = _PAGE_TABLE | __pa(start_mem);
- start_mem += PAGE_SIZE;
-
- if (smp_found_config) {
- /*
- * Map the local APIC to FEE00000. (it's only the default
- * value, thanks to Steve Hsieh for finding this out. We
- * now save the real local-APIC physical address in smp_scan(),
- * and use it here)
- */
- pg_table = pte_offset((pmd_t *)pg_dir, apic_area);
- pte = mk_pte(__va(mp_lapic_addr), PAGE_KERNEL);
- set_pte(pg_table, pte);
-
- /*
- * Map the IO-APIC to FEC00000.
- */
- apic_area = 0xFEC00000; /*(unsigned long)IO_APIC_BASE;*/
- pg_table = pte_offset((pmd_t *)pg_dir, apic_area);
- pte = mk_pte(__va(apic_area), PAGE_KERNEL);
- set_pte(pg_table, pte);
- } else {
- /*
- * No local APIC but we are compiled SMP ... set up a
- * fake all zeroes page to simulate the local APIC.
- */
- pg_table = pte_offset((pmd_t *)pg_dir, apic_area);
- pte = mk_pte(start_mem, PAGE_KERNEL);
- memset((void *)start_mem, 0, PAGE_SIZE);
- start_mem += PAGE_SIZE;
- set_pte(pg_table, pte);
- }
-
- local_flush_tlb();
-}
+ start_mem = init_smp_mappings(start_mem);
#endif
local_flush_tlb();
diff --git a/arch/i386/mm/ioremap.c b/arch/i386/mm/ioremap.c
index b3fd2bdc5..740f4551f 100644
--- a/arch/i386/mm/ioremap.c
+++ b/arch/i386/mm/ioremap.c
@@ -90,13 +90,34 @@ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flag
void * addr;
struct vm_struct * area;
- if (phys_addr < virt_to_phys(high_memory))
+ /*
+ * Don't remap the low PCI/ISA area, it's always mapped..
+ */
+ if (phys_addr >= 0xA0000 && (phys_addr+size) <= 0x100000)
return phys_to_virt(phys_addr);
+
+ /*
+ * Don't allow anybody to remap normal RAM that we're using..
+ */
+ if (phys_addr < virt_to_phys(high_memory))
+ return NULL;
+
+ /*
+ * Mappings have to be page-aligned
+ */
if (phys_addr & ~PAGE_MASK)
return NULL;
size = PAGE_ALIGN(size);
+
+ /*
+ * Don't allow mappings that wrap..
+ */
if (!size || size > phys_addr + size)
return NULL;
+
+ /*
+ * Ok, go for it..
+ */
area = get_vm_area(size);
if (!area)
return NULL;