diff options
author | Ralf Baechle <ralf@linux-mips.org> | 1998-03-17 22:05:47 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 1998-03-17 22:05:47 +0000 |
commit | 27cfca1ec98e91261b1a5355d10a8996464b63af (patch) | |
tree | 8e895a53e372fa682b4c0a585b9377d67ed70d0e /arch/i386/mm | |
parent | 6a76fb7214c477ccf6582bd79c5b4ccc4f9c41b1 (diff) |
Look Ma' what I found on my harddisk ...
o New faster syscalls for 2.1.x, too
o Upgrade to 2.1.89.
Don't try to run this. It's flaky as hell. But feel free to debug ...
Diffstat (limited to 'arch/i386/mm')
-rw-r--r-- | arch/i386/mm/.cvsignore | 1 | ||||
-rw-r--r-- | arch/i386/mm/fault.c | 26 | ||||
-rw-r--r-- | arch/i386/mm/init.c | 60 |
3 files changed, 44 insertions, 43 deletions
diff --git a/arch/i386/mm/.cvsignore b/arch/i386/mm/.cvsignore index 4671378ae..857dd22e9 100644 --- a/arch/i386/mm/.cvsignore +++ b/arch/i386/mm/.cvsignore @@ -1 +1,2 @@ .depend +.*.flags diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c index beb9f91a4..8fac7dc2b 100644 --- a/arch/i386/mm/fault.c +++ b/arch/i386/mm/fault.c @@ -20,6 +20,7 @@ #include <asm/system.h> #include <asm/uaccess.h> #include <asm/pgtable.h> +#include <asm/hardirq.h> extern void die_if_kernel(const char *,struct pt_regs *,long); @@ -76,8 +77,6 @@ bad_area: asmlinkage void do_invalid_op (struct pt_regs *, unsigned long); -extern int pentium_f00f_bug; - /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate @@ -101,6 +100,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code) /* get the address */ __asm__("movl %%cr2,%0":"=r" (address)); + if (local_irq_count[smp_processor_id()]) + die_if_kernel("page fault from irq handler",regs,error_code); lock_kernel(); tsk = current; mm = tsk->mm; @@ -180,7 +181,7 @@ bad_area: /* * Pentium F0 0F C7 C8 bug workaround. */ - if (pentium_f00f_bug) { + if (boot_cpu_data.f00f_bug) { unsigned long nr; nr = (address - (unsigned long) idt) >> 3; @@ -194,11 +195,6 @@ bad_area: /* Are we prepared to handle this kernel fault? */ if ((fixup = search_exception_table(regs->eip)) != 0) { - printk(KERN_DEBUG "%s: Exception at [<%lx>] cr2=%lx (fixup: %lx)\n", - tsk->comm, - regs->eip, - address, - fixup); regs->eip = fixup; goto out; } @@ -209,10 +205,16 @@ bad_area: * * First we check if it was the bootup rw-test, though.. */ - if (wp_works_ok < 0 && address == TASK_SIZE && (error_code & 1)) { - wp_works_ok = 1; - pg0[0] = pte_val(mk_pte(TASK_SIZE, PAGE_SHARED)); - flush_tlb(); + if (boot_cpu_data.wp_works_ok < 0 && + address == PAGE_OFFSET && (error_code & 1)) { + boot_cpu_data.wp_works_ok = 1; + pg0[0] = pte_val(mk_pte(PAGE_OFFSET, PAGE_KERNEL)); + local_flush_tlb(); + /* + * Beware: Black magic here. The printk is needed here to flush + * CPU state on certain buggy processors. + */ + printk("Ok"); goto out; } diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c index 6ed47e2ef..f9172bdae 100644 --- a/arch/i386/mm/init.c +++ b/arch/i386/mm/init.c @@ -72,7 +72,7 @@ pte_t __bad_page(void) void show_mem(void) { int i,free = 0,total = 0,reserved = 0; - int shared = 0; + int shared = 0, cached = 0; printk("Mem-info:\n"); show_free_areas(); @@ -82,6 +82,8 @@ void show_mem(void) total++; if (PageReserved(mem_map+i)) reserved++; + if (PageSwapCache(mem_map+i)) + cached++; else if (!atomic_read(&mem_map[i].count)) free++; else @@ -91,6 +93,7 @@ void show_mem(void) printk("%d free pages\n",free); printk("%d reserved pages\n",reserved); printk("%d pages shared\n",shared); + printk("%d pages swap cached\n",cached); show_buffers(); #ifdef CONFIG_NET show_net_buffers(); @@ -131,14 +134,6 @@ extern char __init_begin, __init_end; #define X86_FEATURE_MCA 0x4000 /* Machine Check Architecture */ #define X86_FEATURE_CMOV 0x8000 /* Cmov/fcomi */ -#ifdef GAS_KNOWS_CR4 -#define read_cr4 "movl %%cr4,%%eax" -#define write_cr4 "movl %%eax,%%cr4" -#else -#define read_cr4 ".byte 0x0f,0x20,0xe0" -#define write_cr4 ".byte 0x0f,0x22,0xe0" -#endif - /* * Save the cr4 feature set we're using (ie * Pentium 4MB enable and PPro Global page @@ -150,9 +145,9 @@ unsigned long mmu_cr4_features __initdata = 0; static inline void set_in_cr4(unsigned long mask) { mmu_cr4_features |= mask; - __asm__(read_cr4 "\n\t" + __asm__("movl %%cr4,%%eax\n\t" "orl %0,%%eax\n\t" - write_cr4 + "movl %%eax,%%cr4\n" : : "irg" (mask) :"ax"); } @@ -178,9 +173,6 @@ __initfunc(unsigned long paging_init(unsigned long start_mem, unsigned long end_ * kernel. * It may also hold the MP configuration table when we are booting SMP. */ -#if 0 - memset((void *) 0, 0, PAGE_SIZE); -#endif #ifdef __SMP__ if (!smp_scan_config(0x0,0x400)) /* Scan the bottom 1K for a signature */ { @@ -189,19 +181,23 @@ __initfunc(unsigned long paging_init(unsigned long start_mem, unsigned long end_ * the error... */ if (!smp_scan_config(639*0x400,0x400)) /* Scan the top 1K of base RAM */ - smp_scan_config(0xF0000,0x10000); /* Scan the 64K of bios */ + { + if(!smp_scan_config(0xF0000,0x10000)) /* Scan the 64K of bios */ + { + /* + * If it is an SMP machine we should know now, unless the configuration + * is in an EISA/MCA bus machine with an extended bios data area. + */ + + address = *(unsigned short *)phys_to_virt(0x40E); /* EBDA */ + address<<=4; /* Real mode segments to physical */ + smp_scan_config(address, 0x1000); /* Scan the EBDA */ + } + } } - /* - * If it is an SMP machine we should know now, unless the configuration - * is in an EISA/MCA bus machine with an extended bios data area. I don't - * have such a machine so someone else can fill in the check of the EBDA - * here. - */ + /* smp_alloc_memory(8192); */ #endif -#ifdef TEST_VERIFY_AREA - wp_works_ok = 0; -#endif start_mem = PAGE_ALIGN(start_mem); address = PAGE_OFFSET; pg_dir = swapper_pg_dir; @@ -219,14 +215,14 @@ __initfunc(unsigned long paging_init(unsigned long start_mem, unsigned long end_ * virtual memory boundary, but that's OK as we won't * use that memory anyway. */ - if (x86_capability & X86_FEATURE_PSE) { + if (boot_cpu_data.x86_capability & X86_FEATURE_PSE) { unsigned long __pe; set_in_cr4(X86_CR4_PSE); - wp_works_ok = 1; + boot_cpu_data.wp_works_ok = 1; __pe = _KERNPG_TABLE + _PAGE_4M + __pa(address); /* Make it "global" too if supported */ - if (x86_capability & X86_FEATURE_PGE) { + if (boot_cpu_data.x86_capability & X86_FEATURE_PGE) { set_in_cr4(X86_CR4_PGE); __pe += _PAGE_GLOBAL; } @@ -235,6 +231,7 @@ __initfunc(unsigned long paging_init(unsigned long start_mem, unsigned long end_ address += 4*1024*1024; continue; } + /* * We're on a [34]86, use normal page tables. * pg_table is physical at this point @@ -247,6 +244,7 @@ __initfunc(unsigned long paging_init(unsigned long start_mem, unsigned long end_ pgd_val(*pg_dir) = _PAGE_TABLE | (unsigned long) pg_table; pg_dir++; + /* now change pg_table to kernel virtual addresses */ pg_table = (pte_t *) __va(pg_table); for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) { @@ -288,14 +286,14 @@ __initfunc(void test_wp_bit(void)) pg0[0] = old; local_flush_tlb(); current->mm->mmap->vm_start -= PAGE_SIZE; - if (wp_works_ok < 0) { - wp_works_ok = 0; + if (boot_cpu_data.wp_works_ok < 0) { + boot_cpu_data.wp_works_ok = 0; printk("No.\n"); #ifndef CONFIG_M386 panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!"); #endif } else - printk("Ok.\n"); + printk(".\n"); } __initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem)) @@ -377,7 +375,7 @@ __initfunc(void mem_init(unsigned long start_mem, unsigned long end_mem)) datapages << (PAGE_SHIFT-10), initpages << (PAGE_SHIFT-10)); - if (wp_works_ok < 0) + if (boot_cpu_data.wp_works_ok < 0) test_wp_bit(); } |