From 825423e4c4f18289df2393951cfd2a7a31fc0464 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Wed, 31 Jan 2001 22:22:27 +0000 Subject: Merge with Linux 2.4.1. --- include/asm-i386/bugs.h | 19 ++++--------------- include/asm-i386/errno.h | 1 + include/asm-i386/i387.h | 10 ++++++---- include/asm-i386/pgtable.h | 6 +++++- include/asm-i386/system.h | 7 ------- 5 files changed, 16 insertions(+), 27 deletions(-) (limited to 'include/asm-i386') diff --git a/include/asm-i386/bugs.h b/include/asm-i386/bugs.h index 4e77e5d8a..257f1f879 100644 --- a/include/asm-i386/bugs.h +++ b/include/asm-i386/bugs.h @@ -76,26 +76,23 @@ static void __init check_fpu(void) } /* Enable FXSR and company _before_ testing for FP problems. */ -#if defined(CONFIG_X86_FXSR) || defined(CONFIG_X86_RUNTIME_FXSR) /* * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned. */ - if (offsetof(struct task_struct, thread.i387.fxsave) & 15) - panic("Kernel compiled for PII/PIII+ with FXSR, data not 16-byte aligned!"); - + if (offsetof(struct task_struct, thread.i387.fxsave) & 15) { + extern void __buggy_fxsr_alignment(void); + __buggy_fxsr_alignment(); + } if (cpu_has_fxsr) { printk(KERN_INFO "Enabling fast FPU save and restore... "); set_in_cr4(X86_CR4_OSFXSR); printk("done.\n"); } -#endif -#ifdef CONFIG_X86_XMM if (cpu_has_xmm) { printk(KERN_INFO "Enabling unmasked SIMD FPU exception support... "); set_in_cr4(X86_CR4_OSXMMEXCPT); printk("done.\n"); } -#endif /* Test for the divl bug.. */ __asm__("fninit\n\t" @@ -203,14 +200,6 @@ static void __init check_config(void) && (boot_cpu_data.x86_mask < 6 || boot_cpu_data.x86_mask == 11)) panic("Kernel compiled for PMMX+, assumes a local APIC without the read-before-write bug!"); #endif - -/* - * If we configured ourselves for FXSR, we'd better have it. - */ -#ifdef CONFIG_X86_FXSR - if (!cpu_has_fxsr) - panic("Kernel compiled for PII/PIII+, requires FXSR feature!"); -#endif } static void __init check_bugs(void) diff --git a/include/asm-i386/errno.h b/include/asm-i386/errno.h index 7cf599f4d..d22c4472b 100644 --- a/include/asm-i386/errno.h +++ b/include/asm-i386/errno.h @@ -128,5 +128,6 @@ #define ENOMEDIUM 123 /* No medium found */ #define EMEDIUMTYPE 124 /* Wrong medium type */ +#define EHASHCOLLISION 125 /* Number of hash collisons exceeds maximum generation counter value. */ #endif diff --git a/include/asm-i386/i387.h b/include/asm-i386/i387.h index 04ba635e5..1cf8dc2ab 100644 --- a/include/asm-i386/i387.h +++ b/include/asm-i386/i387.h @@ -23,6 +23,10 @@ extern void init_fpu(void); extern void save_init_fpu( struct task_struct *tsk ); extern void restore_fpu( struct task_struct *tsk ); +extern void kernel_fpu_begin(void); +#define kernel_fpu_end() stts() + + #define unlazy_fpu( tsk ) do { \ if ( tsk->flags & PF_USEDFPU ) \ save_init_fpu( tsk ); \ @@ -50,10 +54,8 @@ extern void set_fpu_twd( struct task_struct *tsk, unsigned short twd ); extern void set_fpu_mxcsr( struct task_struct *tsk, unsigned short mxcsr ); #define load_mxcsr( val ) do { \ - if ( cpu_has_xmm ) { \ - unsigned long __mxcsr = ((unsigned long)(val) & 0xffff); \ - asm volatile( "ldmxcsr %0" : : "m" (__mxcsr) ); \ - } \ + unsigned long __mxcsr = ((unsigned long)(val) & 0xffbf); \ + asm volatile( "ldmxcsr %0" : : "m" (__mxcsr) ); \ } while (0) /* diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index bf32a7449..a25f3bcfd 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h @@ -140,7 +140,11 @@ extern unsigned long empty_zero_page[1024]; #define VMALLOC_START (((unsigned long) high_memory + 2*VMALLOC_OFFSET-1) & \ ~(VMALLOC_OFFSET-1)) #define VMALLOC_VMADDR(x) ((unsigned long)(x)) -#define VMALLOC_END (FIXADDR_START) +#if CONFIG_HIGHMEM +# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) +#else +# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) +#endif /* * The 4MB page is guessing.. Detailed in the infamous "Chapter H" diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h index d3b01ab8b..52e24682e 100644 --- a/include/asm-i386/system.h +++ b/include/asm-i386/system.h @@ -267,15 +267,8 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, * I expect future Intel CPU's to have a weaker ordering, * but I'd also expect them to finally get their act together * and add some real memory barriers if so. - * - * The Pentium III does add a real memory barrier with the - * sfence instruction, so we use that where appropriate. */ -#ifndef CONFIG_X86_XMM #define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory") -#else -#define mb() __asm__ __volatile__ ("sfence": : :"memory") -#endif #define rmb() mb() #define wmb() __asm__ __volatile__ ("": : :"memory") -- cgit v1.2.3