diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2000-11-23 02:00:47 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2000-11-23 02:00:47 +0000 |
commit | 06615f62b17d7de6e12d2f5ec6b88cf30af08413 (patch) | |
tree | 8766f208847d4876a6db619aebbf54d53b76eb44 /arch/i386 | |
parent | fa9bdb574f4febb751848a685d9a9017e04e1d53 (diff) |
Merge with Linux 2.4.0-test10.
Diffstat (limited to 'arch/i386')
-rw-r--r-- | arch/i386/Makefile | 29 | ||||
-rw-r--r-- | arch/i386/boot/setup.S | 9 | ||||
-rw-r--r-- | arch/i386/config.in | 26 | ||||
-rw-r--r-- | arch/i386/defconfig | 5 | ||||
-rw-r--r-- | arch/i386/kernel/acpi.c | 45 | ||||
-rw-r--r-- | arch/i386/kernel/apm.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/bluesmoke.c | 4 | ||||
-rw-r--r-- | arch/i386/kernel/entry.S | 16 | ||||
-rw-r--r-- | arch/i386/kernel/i387.c | 50 | ||||
-rw-r--r-- | arch/i386/kernel/i8259.c | 4 | ||||
-rw-r--r-- | arch/i386/kernel/microcode.c | 10 | ||||
-rw-r--r-- | arch/i386/kernel/mtrr.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/pci-irq.c | 126 | ||||
-rw-r--r-- | arch/i386/kernel/ptrace.c | 16 | ||||
-rw-r--r-- | arch/i386/kernel/semaphore.c | 6 | ||||
-rw-r--r-- | arch/i386/kernel/setup.c | 284 | ||||
-rw-r--r-- | arch/i386/kernel/smp.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/time.c | 8 | ||||
-rw-r--r-- | arch/i386/kernel/traps.c | 6 | ||||
-rw-r--r-- | arch/i386/mm/fault.c | 76 | ||||
-rw-r--r-- | arch/i386/mm/init.c | 55 | ||||
-rw-r--r-- | arch/i386/mm/ioremap.c | 1 |
22 files changed, 477 insertions, 305 deletions
diff --git a/arch/i386/Makefile b/arch/i386/Makefile index 7735399cb..96da8d33b 100644 --- a/arch/i386/Makefile +++ b/arch/i386/Makefile @@ -23,62 +23,59 @@ LINKFLAGS =-T $(TOPDIR)/arch/i386/vmlinux.lds $(LDFLAGS) CFLAGS += -pipe -# only work around strength reduction bug(s) on older gcc versions -CFLAGS += $(shell if ! $(CC) -march=i486 -S -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-fno-strength-reduce"; fi) - # prevent gcc from keeping the stack 16 byte aligned CFLAGS += $(shell if $(CC) -mpreferred-stack-boundary=2 -S -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-mpreferred-stack-boundary=2"; fi) ifdef CONFIG_M386 -CFLAGS += $(shell if $(CC) -march=i386 -S -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-march=i386"; else echo "-m386"; fi) +CFLAGS += -march=i386 endif ifdef CONFIG_M486 -CFLAGS += $(shell if $(CC) -march=i486 -S -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-march=i486"; else echo "-m486"; fi) +CFLAGS += -march=i486 endif ifdef CONFIG_M586 -CFLAGS += $(shell if $(CC) -march=i586 -S -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-march=i586"; fi) +CFLAGS += -march=i586 endif ifdef CONFIG_M586TSC -CFLAGS += $(shell if $(CC) -march=i586 -S -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-march=i586"; fi) +CFLAGS += -march=i586 endif ifdef CONFIG_M586MMX -CFLAGS += $(shell if $(CC) -march=i586 -S -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-march=i586"; fi) +CFLAGS += -march=i586 endif ifdef CONFIG_M686 -CFLAGS += $(shell if $(CC) -march=i686 -S -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-march=i686"; fi) +CFLAGS += -march=i686 endif ifdef CONFIG_M686FXSR -CFLAGS += $(shell if $(CC) -march=i686 -S -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-march=i686"; fi) +CFLAGS += -march=i686 endif ifdef CONFIG_MK6 -CFLAGS += $(shell if $(CC) -march=k6 -S -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-march=k6"; fi) +CFLAGS += $(shell if $(CC) -march=k6 -S -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-march=k6"; else echo "-march=i586"; fi) endif ifdef CONFIG_MK7 -CFLAGS += $(shell if $(CC) -march=athlon -S -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-march=athlon"; else if $(CC) -march=i686 -S -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-march=i686 -malign-functions=4"; fi fi) +CFLAGS += $(shell if $(CC) -march=athlon -S -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-march=athlon"; else echo "-march=i686 -malign-functions=4"; fi) endif ifdef CONFIG_MCRUSOE -CFLAGS += $(shell if $(CC) -march=i586 -S -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-march=i586"; fi) +CFLAGS += -march=i586 endif ifdef CONFIG_MWINCHIPC6 -CFLAGS += $(shell if $(CC) -march=i686 -S -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-march=i686"; fi) +CFLAGS += -march=i586 endif ifdef CONFIG_MWINCHIP2 -CFLAGS += $(shell if $(CC) -march=i686 -S -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-march=i686"; fi) +CFLAGS += -march=i586 endif ifdef CONFIG_MWINCHIP3D -CFLAGS += $(shell if $(CC) -march=i686 -S -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-march=i686"; fi) +CFLAGS += -march=i586 endif HEAD := arch/i386/kernel/head.o arch/i386/kernel/init_task.o diff --git a/arch/i386/boot/setup.S b/arch/i386/boot/setup.S index c91d8ff00..0da4e866e 100644 --- a/arch/i386/boot/setup.S +++ b/arch/i386/boot/setup.S @@ -641,6 +641,15 @@ end_move_self: # now we are at the right place outb %al, $0x60 call empty_8042 +# +# You must preserve the other bits here. Otherwise embarrasing things +# like laptops powering off on boot happen. Corrected version by Kira +# Brown from Linux 2.2 +# + inb $0x92, %al # + orb $02, %al # "fast A20" version + outb %al, $0x92 # some chips have only this + # wait until a20 really *is* enabled; it can take a fair amount of # time on certain systems; Toshiba Tecras are known to have this # problem. The memory location used here (0x200) is the int 0x80 diff --git a/arch/i386/config.in b/arch/i386/config.in index 6d1279ed7..327aa736d 100644 --- a/arch/i386/config.in +++ b/arch/i386/config.in @@ -45,7 +45,7 @@ choice 'Processor family' \ # if [ "$CONFIG_M386" = "y" ]; then define_bool CONFIG_X86_CMPXCHG n - define_int CONFIG_X86_L1_CACHE_BYTES 16 + define_int CONFIG_X86_L1_CACHE_SHIFT 4 else define_bool CONFIG_X86_WP_WORKS_OK y define_bool CONFIG_X86_INVLPG y @@ -54,37 +54,37 @@ else define_bool CONFIG_X86_POPAD_OK y fi if [ "$CONFIG_M486" = "y" ]; then - define_int CONFIG_X86_L1_CACHE_BYTES 16 + define_int CONFIG_X86_L1_CACHE_SHIFT 4 define_bool CONFIG_X86_USE_STRING_486 y define_bool CONFIG_X86_ALIGNMENT_16 y fi if [ "$CONFIG_M586" = "y" ]; then - define_int CONFIG_X86_L1_CACHE_BYTES 32 + define_int CONFIG_X86_L1_CACHE_SHIFT 5 define_bool CONFIG_X86_USE_STRING_486 y define_bool CONFIG_X86_ALIGNMENT_16 y fi if [ "$CONFIG_M586TSC" = "y" ]; then - define_int CONFIG_X86_L1_CACHE_BYTES 32 + define_int CONFIG_X86_L1_CACHE_SHIFT 5 define_bool CONFIG_X86_USE_STRING_486 y define_bool CONFIG_X86_ALIGNMENT_16 y define_bool CONFIG_X86_TSC y fi if [ "$CONFIG_M586MMX" = "y" ]; then - define_int CONFIG_X86_L1_CACHE_BYTES 32 + define_int CONFIG_X86_L1_CACHE_SHIFT 5 define_bool CONFIG_X86_USE_STRING_486 y define_bool CONFIG_X86_ALIGNMENT_16 y define_bool CONFIG_X86_TSC y define_bool CONFIG_X86_GOOD_APIC y fi if [ "$CONFIG_M686" = "y" ]; then - define_int CONFIG_X86_L1_CACHE_BYTES 32 + define_int CONFIG_X86_L1_CACHE_SHIFT 5 define_bool CONFIG_X86_TSC y define_bool CONFIG_X86_GOOD_APIC y define_bool CONFIG_X86_PGE y define_bool CONFIG_X86_USE_PPRO_CHECKSUM y fi if [ "$CONFIG_M686FXSR" = "y" ]; then - define_int CONFIG_X86_L1_CACHE_BYTES 32 + define_int CONFIG_X86_L1_CACHE_SHIFT 5 define_bool CONFIG_X86_TSC y define_bool CONFIG_X86_GOOD_APIC y define_bool CONFIG_X86_PGE y @@ -93,13 +93,13 @@ if [ "$CONFIG_M686FXSR" = "y" ]; then define_bool CONFIG_X86_XMM y fi if [ "$CONFIG_MK6" = "y" ]; then - define_int CONFIG_X86_L1_CACHE_BYTES 32 + define_int CONFIG_X86_L1_CACHE_SHIFT 5 define_bool CONFIG_X86_ALIGNMENT_16 y define_bool CONFIG_X86_TSC y define_bool CONFIG_X86_USE_PPRO_CHECKSUM y fi if [ "$CONFIG_MK7" = "y" ]; then - define_int CONFIG_X86_L1_CACHE_BYTES 64 + define_int CONFIG_X86_L1_CACHE_SHIFT 6 define_bool CONFIG_X86_TSC y define_bool CONFIG_X86_GOOD_APIC y define_bool CONFIG_X86_USE_3DNOW y @@ -107,22 +107,22 @@ if [ "$CONFIG_MK7" = "y" ]; then define_bool CONFIG_X86_USE_PPRO_CHECKSUM y fi if [ "$CONFIG_MCRUSOE" = "y" ]; then - define_int CONFIG_X86_L1_CACHE_BYTES 32 + define_int CONFIG_X86_L1_CACHE_SHIFT 5 define_bool CONFIG_X86_TSC y fi if [ "$CONFIG_MWINCHIPC6" = "y" ]; then - define_int CONFIG_X86_L1_CACHE_BYTES 32 + define_int CONFIG_X86_L1_CACHE_SHIFT 5 define_bool CONFIG_X86_ALIGNMENT_16 y define_bool CONFIG_X86_USE_PPRO_CHECKSUM y fi if [ "$CONFIG_MWINCHIP2" = "y" ]; then - define_int CONFIG_X86_L1_CACHE_BYTES 32 + define_int CONFIG_X86_L1_CACHE_SHIFT 5 define_bool CONFIG_X86_ALIGNMENT_16 y define_bool CONFIG_X86_TSC y define_bool CONFIG_X86_USE_PPRO_CHECKSUM y fi if [ "$CONFIG_MWINCHIP3D" = "y" ]; then - define_int CONFIG_X86_L1_CACHE_BYTES 32 + define_int CONFIG_X86_L1_CACHE_SHIFT 5 define_bool CONFIG_X86_ALIGNMENT_16 y define_bool CONFIG_X86_TSC y define_bool CONFIG_X86_USE_PPRO_CHECKSUM y diff --git a/arch/i386/defconfig b/arch/i386/defconfig index 7dddf3057..b22c770db 100644 --- a/arch/i386/defconfig +++ b/arch/i386/defconfig @@ -39,7 +39,7 @@ CONFIG_X86_INVLPG=y CONFIG_X86_CMPXCHG=y CONFIG_X86_BSWAP=y CONFIG_X86_POPAD_OK=y -CONFIG_X86_L1_CACHE_BYTES=32 +CONFIG_X86_L1_CACHE_SHIFT=5 CONFIG_X86_TSC=y CONFIG_X86_GOOD_APIC=y CONFIG_X86_PGE=y @@ -230,7 +230,9 @@ CONFIG_IDEPCI_SHARE_IRQ=y # CONFIG_BLK_DEV_OPTI621 is not set # CONFIG_BLK_DEV_PDC202XX is not set # CONFIG_PDC202XX_BURST is not set +# CONFIG_BLK_DEV_OSB4 is not set # CONFIG_BLK_DEV_SIS5513 is not set +# CONFIG_BLK_DEV_SLC90E66 is not set # CONFIG_BLK_DEV_TRM290 is not set # CONFIG_BLK_DEV_VIA82CXXX is not set # CONFIG_IDE_CHIPSETS is not set @@ -352,6 +354,7 @@ CONFIG_NET_ETHERNET=y # CONFIG_NET_VENDOR_SMC is not set # CONFIG_NET_VENDOR_RACAL is not set # CONFIG_DEPCA is not set +# CONFIG_HP100 is not set # CONFIG_NET_ISA is not set CONFIG_NET_PCI=y # CONFIG_PCNET32 is not set diff --git a/arch/i386/kernel/acpi.c b/arch/i386/kernel/acpi.c index 211a6ce50..f9424d105 100644 --- a/arch/i386/kernel/acpi.c +++ b/arch/i386/kernel/acpi.c @@ -27,6 +27,8 @@ * - check copy*user return * - get rid of check_region * - get rid of verify_area + * Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 2000/09/28 + * - do proper release on failure in acpi_claim_ioports and acpi_init */ #include <linux/config.h> @@ -81,6 +83,7 @@ static int acpi_do_sleep(ctl_table *ctl, struct file *file, void *buffer, size_t *len); +static void acpi_release(unsigned long start, unsigned long size); static struct ctl_table_header *acpi_sysctl = NULL; @@ -1300,15 +1303,28 @@ static int acpi_claim(unsigned long start, unsigned long size) static int acpi_claim_ioports(struct acpi_facp *facp) { // we don't get a guarantee of contiguity for any of the ACPI registers - if (acpi_claim(facp->pm1a_evt, facp->pm1_evt_len) - || acpi_claim(facp->pm1b_evt, facp->pm1_evt_len) - || acpi_claim(facp->pm1a_cnt, facp->pm1_cnt_len) - || acpi_claim(facp->pm1b_cnt, facp->pm1_cnt_len) - || acpi_claim(facp->pm_tmr, facp->pm_tm_len) - || acpi_claim(facp->gpe0, facp->gpe0_len) - || acpi_claim(facp->gpe1, facp->gpe1_len)) - return -EBUSY; + if (acpi_claim(facp->pm1a_evt, facp->pm1_evt_len)) + goto return_ebusy; + if (acpi_claim(facp->pm1b_evt, facp->pm1_evt_len)) + goto release_pm1a_evt; + if (acpi_claim(facp->pm1a_cnt, facp->pm1_cnt_len)) + goto release_pm1b_evt; + if (acpi_claim(facp->pm1b_cnt, facp->pm1_cnt_len)) + goto release_pm1a_cnt; + if (acpi_claim(facp->pm_tmr, facp->pm_tm_len)) + goto release_pm1b_cnt; + if (acpi_claim(facp->gpe0, facp->gpe0_len)) + goto release_pm_tmr; + if (acpi_claim(facp->gpe1, facp->gpe1_len)) + goto release_gpe0; return 0; +release_gpe0: acpi_release(facp->gpe0, facp->gpe0_len); +release_pm_tmr: acpi_release(facp->pm_tmr, facp->pm_tm_len); +release_pm1b_cnt: acpi_release(facp->pm1b_cnt, facp->pm1_cnt_len); +release_pm1a_cnt: acpi_release(facp->pm1a_cnt, facp->pm1_cnt_len); +release_pm1b_evt: acpi_release(facp->pm1b_evt, facp->pm1_evt_len); +release_pm1a_evt: acpi_release(facp->pm1a_evt, facp->pm1_evt_len); +return_ebusy: return -EBUSY; } /* @@ -1523,8 +1539,10 @@ static int acpi_do_table(ctl_table *ctl, error = -ENOMEM; } if (data) - if (copy_from_user(data, buffer, size)) + if (copy_from_user(data, buffer, size)) { + acpi_destroy_table(info); error = -EFAULT; + } write_unlock(&acpi_do_table_lock); } @@ -1838,7 +1856,7 @@ int __init acpi_init(void) &acpi_facp)) { printk(KERN_ERR "ACPI: SCI (IRQ%d) allocation failed\n", facp->sci_int); - goto err_out; + goto cleanup_ioports; } #ifndef CONFIG_ACPI_S1_SLEEP @@ -1846,6 +1864,8 @@ int __init acpi_init(void) #endif acpi_sysctl = register_sysctl_table(acpi_dir_table, 1); + if (!acpi_sysctl) + goto cleanup_irq; pm_power_off = acpi_power_off; @@ -1863,7 +1883,10 @@ int __init acpi_init(void) pm_idle = acpi_idle; return 0; - +cleanup_irq: + free_irq(facp->sci_int, &acpi_facp); +cleanup_ioports: + acpi_release_ioports(facp); err_out: if (pci_driver_registered) pci_unregister_driver(&acpi_driver); diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c index 9a4084292..b0aa2a7c3 100644 --- a/arch/i386/kernel/apm.c +++ b/arch/i386/kernel/apm.c @@ -1427,7 +1427,7 @@ static int apm(void *unused) atomic_inc(¤t->files->count); daemonize(); - strcpy(current->comm, "kapmd"); + strcpy(current->comm, "kapm-idled"); sigfillset(¤t->blocked); current->tty = NULL; /* get rid of controlling tty */ diff --git a/arch/i386/kernel/bluesmoke.c b/arch/i386/kernel/bluesmoke.c index 0fecf5851..ee6966d5f 100644 --- a/arch/i386/kernel/bluesmoke.c +++ b/arch/i386/kernel/bluesmoke.c @@ -8,7 +8,7 @@ #include <asm/processor.h> #include <asm/msr.h> -static int banks = 0; +static int banks; void mcheck_fault(void) { @@ -71,7 +71,7 @@ void mcheck_init(void) u32 l, h; int i; struct cpuinfo_x86 *c; - static int done=0; + static int done; c=cpu_data+smp_processor_id(); diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S index 57aada831..4ae0ed2ee 100644 --- a/arch/i386/kernel/entry.S +++ b/arch/i386/kernel/entry.S @@ -205,7 +205,7 @@ ENTRY(system_call) ENTRY(ret_from_sys_call) #ifdef CONFIG_SMP movl processor(%ebx),%eax - shll $5,%eax + shll $CONFIG_X86_L1_CACHE_SHIFT,%eax movl SYMBOL_NAME(irq_stat)(,%eax),%ecx # softirq_active testl SYMBOL_NAME(irq_stat)+4(,%eax),%ecx # softirq_mask #else @@ -261,7 +261,7 @@ ret_from_exception: #ifdef CONFIG_SMP GET_CURRENT(%ebx) movl processor(%ebx),%eax - shll $5,%eax + shll $CONFIG_X86_L1_CACHE_SHIFT,%eax movl SYMBOL_NAME(irq_stat)(,%eax),%ecx # softirq_active testl SYMBOL_NAME(irq_stat)+4(,%eax),%ecx # softirq_mask #else @@ -305,16 +305,18 @@ error_code: pushl %ebx cld movl %es,%ecx - xchgl %eax, ORIG_EAX(%esp) # orig_eax (get the error code. ) + movl ORIG_EAX(%esp), %esi # get the error code + movl ES(%esp), %edi # get the function address + movl %eax, ORIG_EAX(%esp) + movl %ecx, ES(%esp) movl %esp,%edx - xchgl %ecx, ES(%esp) # get the address and save es. - pushl %eax # push the error code - pushl %edx + pushl %esi # push the error code + pushl %edx # push the pt_regs pointer movl $(__KERNEL_DS),%edx movl %edx,%ds movl %edx,%es GET_CURRENT(%ebx) - call *%ecx + call *%edi addl $8,%esp jmp ret_from_exception diff --git a/arch/i386/kernel/i387.c b/arch/i386/kernel/i387.c index b264c54a3..c3d052e8a 100644 --- a/arch/i386/kernel/i387.c +++ b/arch/i386/kernel/i387.c @@ -33,22 +33,24 @@ #endif /* - * FPU lazy state save handling. + * The _current_ task is using the FPU for the first time + * so initialize it and set the mxcsr to its default + * value at reset if we support FXSR and then + * remeber the current task has used the FPU. */ - -void save_fpu( struct task_struct *tsk ) +void init_fpu(void) { - if ( HAVE_FXSR ) { - asm volatile( "fxsave %0 ; fwait" - : "=m" (tsk->thread.i387.fxsave) ); - } else { - asm volatile( "fnsave %0 ; fwait" - : "=m" (tsk->thread.i387.fsave) ); - } - tsk->flags &= ~PF_USEDFPU; - stts(); + __asm__("fninit"); + if ( HAVE_FXSR ) + load_mxcsr(0x1f80); + + current->used_math = 1; } +/* + * FPU lazy state save handling. + */ + void save_init_fpu( struct task_struct *tsk ) { if ( HAVE_FXSR ) { @@ -79,16 +81,16 @@ void restore_fpu( struct task_struct *tsk ) static inline unsigned short twd_i387_to_fxsr( unsigned short twd ) { - unsigned short ret = 0; - int i; - - for ( i = 0 ; i < 8 ; i++ ) { - if ( (twd & 0x3) != 0x3 ) { - ret |= (1 << i); - } - twd = twd >> 2; - } - return ret; + unsigned int tmp; /* to avoid 16 bit prefixes in the code */ + + /* Transform each pair of bits into 01 (valid) or 00 (empty) */ + tmp = ~twd; + tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */ + /* and move the valid bits to the lower byte. */ + tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */ + tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */ + tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */ + return tmp; } static inline unsigned long twd_fxsr_to_i387( struct i387_fxsave_struct *fxsave ) @@ -105,8 +107,8 @@ static inline unsigned long twd_fxsr_to_i387( struct i387_fxsave_struct *fxsave if ( twd & 0x1 ) { st = (struct _fpxreg *) FPREG_ADDR( fxsave, i ); - switch ( st->exponent ) { - case 0xffff: + switch ( st->exponent & 0x7fff ) { + case 0x7fff: tag = 2; /* Special */ break; case 0x0000: diff --git a/arch/i386/kernel/i8259.c b/arch/i386/kernel/i8259.c index 46e270774..df377c5b4 100644 --- a/arch/i386/kernel/i8259.c +++ b/arch/i386/kernel/i8259.c @@ -178,7 +178,7 @@ static unsigned int cached_irq_mask = 0xffff; * this 'mixed mode' IRQ handling costs nothing because it's only used * at IRQ setup time. */ -unsigned long io_apic_irqs = 0; +unsigned long io_apic_irqs; void disable_8259A_irq(unsigned int irq) { @@ -312,7 +312,7 @@ spurious_8259A_irq: goto handle_real_irq; { - static int spurious_irq_mask = 0; + static int spurious_irq_mask; /* * At this point we can be sure the IRQ is spurious, * lets ACK and report it. [once per IRQ] diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c index 80536a1c4..5f9925495 100644 --- a/arch/i386/kernel/microcode.c +++ b/arch/i386/kernel/microcode.c @@ -177,7 +177,7 @@ static void do_update_one(void *unused) req->err = 1; /* assume the worst */ - if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6){ + if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 != 6){ printk(KERN_ERR "microcode: CPU%d not an Intel P6\n", cpu_num); return; } @@ -198,11 +198,15 @@ static void do_update_one(void *unused) wrmsr(0x8B, 0, 0); __asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx"); rdmsr(0x8B, val[0], rev); - if (microcode[i].rev <= rev) { + if (microcode[i].rev < rev) { printk(KERN_ERR "microcode: CPU%d not 'upgrading' to earlier revision" " %d (current=%d)\n", cpu_num, microcode[i].rev, rev); - } else { + } else if (microcode[i].rev == rev) { + printk(KERN_ERR + "microcode: CPU%d already up-to-date (revision %d)\n", + cpu_num, rev); + } else { int sum = 0; struct microcode *m = µcode[i]; unsigned int *sump = (unsigned int *)(m+1); diff --git a/arch/i386/kernel/mtrr.c b/arch/i386/kernel/mtrr.c index abe198315..045374d50 100644 --- a/arch/i386/kernel/mtrr.c +++ b/arch/i386/kernel/mtrr.c @@ -1581,7 +1581,7 @@ static struct proc_dir_entry *proc_root_mtrr; # endif /* CONFIG_PROC_FS */ -static devfs_handle_t devfs_handle = NULL; +static devfs_handle_t devfs_handle; static void compute_ascii (void) { diff --git a/arch/i386/kernel/pci-irq.c b/arch/i386/kernel/pci-irq.c index b98bf4748..9a5fe8958 100644 --- a/arch/i386/kernel/pci-irq.c +++ b/arch/i386/kernel/pci-irq.c @@ -125,81 +125,143 @@ static void eisa_set_level_irq(unsigned int irq) } } +/* + * Common IRQ routing practice: nybbles in config space, + * offset by some magic constant. + */ +static unsigned int read_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr) +{ + u8 x; + unsigned reg = offset + (nr >> 1); + + pci_read_config_byte(router, reg, &x); + return (nr & 1) ? (x >> 4) : (x & 0xf); +} + +static void write_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr, unsigned int val) +{ + u8 x; + unsigned reg = offset + (nr >> 1); + + pci_read_config_byte(router, reg, &x); + x = (nr & 1) ? ((x & 0x0f) | (val << 4)) : ((x & 0xf0) | val); + pci_write_config_byte(router, reg, x); +} + +/* + * ALI pirq entries are damn ugly, and completely undocumented. + * This has been figured out from pirq tables, and it's not a pretty + * picture. + */ static int pirq_ali_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { static unsigned char irqmap[16] = { 0, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 }; + + switch (pirq) { + case 0x00: + return 0; + default: + return irqmap[read_config_nybble(router, 0x48, pirq-1)]; + case 0xfe: + return irqmap[read_config_nybble(router, 0x44, 0)]; + case 0xff: + return irqmap[read_config_nybble(router, 0x75, 0)]; + } +} + +static void pirq_ali_ide_interrupt(struct pci_dev *router, unsigned reg, unsigned val, unsigned irq) +{ u8 x; - unsigned reg; - pirq--; - reg = 0x48 + (pirq >> 1); pci_read_config_byte(router, reg, &x); - return irqmap[(pirq & 1) ? (x >> 4) : (x & 0x0f)]; + x = (x & 0xe0) | val; /* clear the level->edge transform */ + pci_write_config_byte(router, reg, x); + eisa_set_level_irq(irq); } static int pirq_ali_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { static unsigned char irqmap[16] = { 0, 8, 0, 2, 4, 5, 7, 6, 0, 1, 3, 9, 11, 0, 13, 15 }; unsigned int val = irqmap[irq]; - pirq--; + if (val) { - u8 x; - unsigned reg = 0x48 + (pirq >> 1); - pci_read_config_byte(router, reg, &x); - x = (pirq & 1) ? ((x & 0x0f) | (val << 4)) : ((x & 0xf0) | val); - pci_write_config_byte(router, reg, x); + switch (pirq) { + default: + write_config_nybble(router, 0x48, pirq-1, val); + break; + case 0xfe: + pirq_ali_ide_interrupt(router, 0x44, val, irq); + break; + case 0xff: + pirq_ali_ide_interrupt(router, 0x75, val, irq); + break; + } eisa_set_level_irq(irq); return 1; } return 0; } +/* + * The Intel PIIX4 pirq rules are fairly simple: "pirq" is + * just a pointer to the config space. However, something + * funny is going on with 0xfe/0xff, and apparently they + * should handle IDE irq routing. Ignore them for now. + */ static int pirq_piix_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { u8 x; - pci_read_config_byte(router, pirq, &x); - return (x < 16) ? x : 0; + + switch (pirq) { + case 0xfe: + case 0xff: + return 0; + default: + pci_read_config_byte(router, pirq, &x); + return (x < 16) ? x : 0; + } } static int pirq_piix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { - pci_write_config_byte(router, pirq, irq); - return 1; + switch (pirq) { + case 0xfe: + case 0xff: + return 0; + default: + pci_write_config_byte(router, pirq, irq); + return 1; + } } +/* + * The VIA pirq rules are nibble-based, like ALI, + * but without the ugly irq number munging or the + * strange special cases.. + */ static int pirq_via_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { - u8 x; - int reg = 0x55 + (pirq >> 1); - pci_read_config_byte(router, reg, &x); - return (pirq & 1) ? (x >> 4) : (x & 0x0f); + return read_config_nybble(router, 0x55, pirq); } static int pirq_via_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { - u8 x; - int reg = 0x55 + (pirq >> 1); - pci_read_config_byte(router, reg, &x); - x = (pirq & 1) ? ((x & 0x0f) | (irq << 4)) : ((x & 0xf0) | irq); - pci_write_config_byte(router, reg, x); + write_config_nybble(router, 0x55, pirq, irq); return 1; } +/* + * OPTI: high four bits are nibble pointer.. + * I wonder what the low bits do? + */ static int pirq_opti_get(struct pci_dev *router, struct pci_dev *dev, int pirq) { - u8 x; - int reg = 0xb8 + (pirq >> 5); - pci_read_config_byte(router, reg, &x); - return (pirq & 0x10) ? (x >> 4) : (x & 0x0f); + return read_config_nybble(router, 0xb8, pirq >> 4); } static int pirq_opti_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) { - u8 x; - int reg = 0xb8 + (pirq >> 5); - pci_read_config_byte(router, reg, &x); - x = (pirq & 0x10) ? ((x & 0x0f) | (irq << 4)) : ((x & 0xf0) | irq); - pci_write_config_byte(router, reg, x); + write_config_nybble(router, 0xb8, pirq >> 4, irq); return 1; } diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c index a9b11561d..b374f8e99 100644 --- a/arch/i386/kernel/ptrace.c +++ b/arch/i386/kernel/ptrace.c @@ -351,7 +351,7 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) ret = -EIO; if ((unsigned long) data > _NSIG) break; - child->ptrace &= ~(PT_PTRACED|PT_TRACESYS); + child->ptrace = 0; child->exit_code = data; write_lock_irq(&tasklist_lock); REMOVE_LINKS(child); @@ -451,6 +451,15 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data) break; } + case PTRACE_SETOPTIONS: { + if (data & PTRACE_O_TRACESYSGOOD) + child->ptrace |= PT_TRACESYSGOOD; + else + child->ptrace &= ~PT_TRACESYSGOOD; + ret = 0; + break; + } + default: ret = -EIO; break; @@ -467,7 +476,10 @@ asmlinkage void syscall_trace(void) if ((current->ptrace & (PT_PTRACED|PT_TRACESYS)) != (PT_PTRACED|PT_TRACESYS)) return; - current->exit_code = SIGTRAP; + /* the 0x80 provides a way for the tracing parent to distinguish + between a syscall stop and SIGTRAP delivery */ + current->exit_code = SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) + ? 0x80 : 0); current->state = TASK_STOPPED; notify_parent(current, SIGCHLD); schedule(); diff --git a/arch/i386/kernel/semaphore.c b/arch/i386/kernel/semaphore.c index febc592ae..d70e9e569 100644 --- a/arch/i386/kernel/semaphore.c +++ b/arch/i386/kernel/semaphore.c @@ -20,8 +20,8 @@ /* * Semaphores are implemented using a two-way counter: * The "count" variable is decremented for each process - * that tries to aquire the semaphore, while the "sleeping" - * variable is a count of such aquires. + * that tries to acquire the semaphore, while the "sleeping" + * variable is a count of such acquires. * * Notably, the inline "up()" and "down()" functions can * efficiently test if they need to do any extra work (up @@ -373,7 +373,7 @@ struct rw_semaphore *down_write_failed(struct rw_semaphore *sem) while (atomic_read(&sem->count) < 0) { set_task_state(tsk, TASK_UNINTERRUPTIBLE | TASK_EXCLUSIVE); if (atomic_read(&sem->count) >= 0) - break; /* we must attempt to aquire or bias the lock */ + break; /* we must attempt to acquire or bias the lock */ schedule(); } diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c index e765a0e94..7ac7fdb36 100644 --- a/arch/i386/kernel/setup.c +++ b/arch/i386/kernel/setup.c @@ -51,6 +51,10 @@ * Forward port AMD Duron errata T13 from 2.2.17pre * Dave Jones <davej@suse.de>, August 2000 * + * Forward port lots of fixes/improvements from 2.2.18pre + * Cyrix III, Pentium IV support. + * Dave Jones <davej@suse.de>, October 2000 + * */ /* @@ -137,6 +141,8 @@ extern int root_mountflags; extern char _text, _etext, _edata, _end; extern unsigned long cpu_khz; +static int disable_x86_serial_nr __initdata = 1; + /* * This is set up by the setup-routine at boot-time */ @@ -844,12 +850,6 @@ static int __init get_model_name(struct cpuinfo_x86 *c) { unsigned int n, dummy, *v; - /* - * Actually we must have cpuid or we could never have - * figured out that this was AMD/Cyrix/Transmeta - * from the vendor info :-). - */ - cpuid(0x80000000, &n, &dummy, &dummy, &dummy); if (n < 0x80000004) return 0; @@ -862,29 +862,45 @@ static int __init get_model_name(struct cpuinfo_x86 *c) return 1; } + +static void __init display_cacheinfo(struct cpuinfo_x86 *c) +{ + unsigned int n, dummy, ecx, edx; + + cpuid(0x80000000, &n, &dummy, &dummy, &dummy); + + if (n >= 0x80000005) { + cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); + printk("CPU: L1 I Cache: %dK L1 D Cache: %dK (%d bytes/line)\n", + edx>>24, ecx>>24, edx&0xFF); + c->x86_cache_size=(ecx>>24)+(edx>>24); + } + + if (n < 0x80000006) /* Cyrix just has large L1. */ + return; + + cpuid(0x80000006, &dummy, &dummy, &ecx, &edx); + c->x86_cache_size = ecx >>16; + + /* AMD errata T13 (order #21922) */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && + boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 3 && + boot_cpu_data.x86_mask == 0) + { + c->x86_cache_size = 64; + } + printk("CPU: L2 Cache: %dK\n", ecx>>16); +} + + static int __init amd_model(struct cpuinfo_x86 *c) { u32 l, h; unsigned long flags; - unsigned int n, dummy, ecx, edx; int mbytes = max_mapnr >> (20-PAGE_SHIFT); int r=get_model_name(c); - /* - * Set MTRR capability flag if appropriate - */ - if(boot_cpu_data.x86 == 5) { - if((boot_cpu_data.x86_model == 13) || - (boot_cpu_data.x86_model == 9) || - ((boot_cpu_data.x86_model == 8) && - (boot_cpu_data.x86_mask >= 8))) - c->x86_capability |= X86_FEATURE_MTRR; - } - - /* - * Now do the cache operations. - */ switch(c->x86) { case 5: @@ -923,6 +939,7 @@ static int __init amd_model(struct cpuinfo_x86 *c) if(mbytes>4092) mbytes=4092; + rdmsr(0xC0000082, l, h); if((l&0xFFFF0000)==0) { @@ -935,35 +952,23 @@ static int __init amd_model(struct cpuinfo_x86 *c) printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n", mbytes); } + + /* Set MTRR capability flag if appropriate */ + if((boot_cpu_data.x86_model == 13) || + (boot_cpu_data.x86_model == 9) || + ((boot_cpu_data.x86_model == 8) && + (boot_cpu_data.x86_mask >= 8))) + c->x86_capability |= X86_FEATURE_MTRR; break; } + break; + case 6: /* An Athlon/Duron. We can trust the BIOS probably */ break; } - cpuid(0x80000000, &n, &dummy, &dummy, &dummy); - if (n >= 0x80000005) { - cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); - printk("CPU: L1 I Cache: %dK L1 D Cache: %dK (%d bytes/line)\n", - edx>>24, ecx>>24, edx&0xFF); - c->x86_cache_size=(ecx>>24)+(edx>>24); - } - - /* AMD errata T13 (order #21922) */ - if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 3 && - boot_cpu_data.x86_mask == 0) - { - c->x86_cache_size = 64; - printk("CPU: L2 Cache: 64K\n"); - } else { - if (n >= 0x80000006) { - cpuid(0x80000006, &dummy, &dummy, &ecx, &edx); - printk("CPU: L2 Cache: %dK\n", ecx>>16); - c->x86_cache_size=(ecx>>16); - } - } - + display_cacheinfo(c); return r; } @@ -1097,9 +1102,8 @@ static void __init cyrix_model(struct cpuinfo_x86 *c) bug to do with 'hlt'. I've not seen any boards using VSA2 and X doesn't seem to support it either so who cares 8). VSA1 we work around however. - */ - + printk(KERN_INFO "Working around Cyrix MediaGX virtual DMA bugs.\n"); isa_dma_bridge_buggy = 2; #endif @@ -1183,79 +1187,104 @@ static void __init centaur_model(struct cpuinfo_x86 *c) u32 lo,hi,newlo; u32 aa,bb,cc,dd; - switch(c->x86_model) { - case 4: - name="C6"; - fcr_set=ECX8|DSMC|EDCTLB|EMMX|ERETSTK; - fcr_clr=DPDC; - printk("Disabling bugged TSC.\n"); - c->x86_capability &= ~X86_FEATURE_TSC; - break; - case 8: - switch(c->x86_mask) { - default: - name="2"; - break; - case 7 ... 9: - name="2A"; - break; - case 10 ... 15: - name="2B"; + switch (c->x86) { + + case 5: + switch(c->x86_model) { + case 4: + name="C6"; + fcr_set=ECX8|DSMC|EDCTLB|EMMX|ERETSTK; + fcr_clr=DPDC; + printk("Disabling bugged TSC.\n"); + c->x86_capability &= ~X86_FEATURE_TSC; + break; + case 8: + switch(c->x86_mask) { + default: + name="2"; + break; + case 7 ... 9: + name="2A"; + break; + case 10 ... 15: + name="2B"; + break; + } + fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D; + fcr_clr=DPDC; + break; + case 9: + name="3"; + fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D; + fcr_clr=DPDC; + break; + case 10: + name="4"; + /* no info on the WC4 yet */ + break; + default: + name="??"; + } + + /* get FCR */ + rdmsr(0x107, lo, hi); + + newlo=(lo|fcr_set) & (~fcr_clr); + + if (newlo!=lo) { + printk("Centaur FCR was 0x%X now 0x%X\n", lo, newlo ); + wrmsr(0x107, newlo, hi ); + } else { + printk("Centaur FCR is 0x%X\n",lo); + } + /* Emulate MTRRs using Centaur's MCR. */ + c->x86_capability |= X86_FEATURE_MTRR; + /* Report CX8 */ + c->x86_capability |= X86_FEATURE_CX8; + /* Set 3DNow! on Winchip 2 and above. */ + if (c->x86_model >=8) + c->x86_capability |= X86_FEATURE_AMD3D; + /* See if we can find out some more. */ + cpuid(0x80000000,&aa,&bb,&cc,&dd); + if (aa>=0x80000005) { /* Yes, we can. */ + cpuid(0x80000005,&aa,&bb,&cc,&dd); + /* Add L1 data and code cache sizes. */ + c->x86_cache_size = (cc>>24)+(dd>>24); + } + sprintf( c->x86_model_id, "WinChip %s", name ); break; - } - fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D; - fcr_clr=DPDC; - break; - case 9: - name="3"; - fcr_set=ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|E2MMX|EAMD3D; - fcr_clr=DPDC; - break; - case 10: - name="4"; - /* no info on the WC4 yet */ - break; - default: - name="??"; - } - /* get FCR */ - rdmsr(0x107, lo, hi); + case 6: + switch (c->x86_model) { + case 6: /* Cyrix III */ + rdmsr (0x1107, lo, hi); + lo |= (1<<1 | 1<<7); /* Report CX8 & enable PGE */ + wrmsr (0x1107, lo, hi); - newlo=(lo|fcr_set) & (~fcr_clr); + c->x86_capability |= X86_FEATURE_CX8; + rdmsr (0x80000001, lo, hi); + if (hi & (1<<31)) + c->x86_capability |= X86_FEATURE_AMD3D; - if (newlo!=lo) { - printk("Centaur FCR was 0x%X now 0x%X\n", lo, newlo ); - wrmsr(0x107, newlo, hi ); - } else { - printk("Centaur FCR is 0x%X\n",lo); + get_model_name(c); + display_cacheinfo(c); + break; + } + break; } - /* Emulate MTRRs using Centaur's MCR. */ - c->x86_capability |= X86_FEATURE_MTRR; - /* Report CX8 */ - c->x86_capability |= X86_FEATURE_CX8; - /* Set 3DNow! on Winchip 2 and above. */ - if (c->x86_model >=8) - c->x86_capability |= X86_FEATURE_AMD3D; - /* See if we can find out some more. */ - cpuid(0x80000000,&aa,&bb,&cc,&dd); - if (aa>=0x80000005) { /* Yes, we can. */ - cpuid(0x80000005,&aa,&bb,&cc,&dd); - /* Add L1 data and code cache sizes. */ - c->x86_cache_size = (cc>>24)+(dd>>24); - } - sprintf( c->x86_model_id, "WinChip %s", name ); } + static void __init transmeta_model(struct cpuinfo_x86 *c) { - unsigned int cap_mask, uk, max, dummy, n, ecx, edx; + unsigned int cap_mask, uk, max, dummy; unsigned int cms_rev1, cms_rev2; unsigned int cpu_rev, cpu_freq, cpu_flags; char cpu_info[65]; get_model_name(c); /* Same as AMD/Cyrix */ + display_cacheinfo(c); /* Print CMS and CPU revision */ cpuid(0x80860000, &max, &dummy, &dummy, &dummy); @@ -1309,22 +1338,6 @@ static void __init transmeta_model(struct cpuinfo_x86 *c) wrmsr(0x80860004, ~0, uk); cpuid(0x00000001, &dummy, &dummy, &dummy, &c->x86_capability); wrmsr(0x80860004, cap_mask, uk); - - - /* L1/L2 cache */ - cpuid(0x80000000, &n, &dummy, &dummy, &dummy); - - if (n >= 0x80000005) { - cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); - printk("CPU: L1 I Cache: %dK L1 D Cache: %dK\n", - ecx>>24, edx>>24); - c->x86_cache_size=(ecx>>24)+(edx>>24); - } - if (n >= 0x80000006) { - cpuid(0x80000006, &dummy, &dummy, &ecx, &edx); - printk("CPU: L2 Cache: %dK\n", ecx>>16); - c->x86_cache_size=(ecx>>16); - } } @@ -1407,7 +1420,7 @@ static struct cpu_model_info cpu_models[] __initdata = { * to have CPUID. (Thanks to Herbert Oppmann) */ -static int deep_magic_nexgen_probe(void) +static int __init deep_magic_nexgen_probe(void) { int ret; @@ -1424,9 +1437,9 @@ static int deep_magic_nexgen_probe(void) return ret; } -static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) +static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c) { - if(c->x86_capability&(1<<18)) { + if(c->x86_capability&(X86_FEATURE_PN) && disable_x86_serial_nr) { /* Disable processor serial number */ unsigned long lo,hi; rdmsr(0x119,lo,hi); @@ -1436,10 +1449,20 @@ static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) } } + +int __init x86_serial_nr_setup(char *s) +{ + disable_x86_serial_nr = 0; + return 1; +} +__setup("serialnumber", x86_serial_nr_setup); + + void __init identify_cpu(struct cpuinfo_x86 *c) { int i=0; char *p = NULL; + extern void mcheck_init(void); c->loops_per_sec = loops_per_sec; c->x86_cache_size = -1; @@ -1476,9 +1499,10 @@ void __init identify_cpu(struct cpuinfo_x86 *c) return; case X86_VENDOR_INTEL: - + squash_the_stupid_serial_number(c); - + mcheck_init(); + if (c->cpuid_level > 1) { /* supports eax=2 call */ int edx, dummy; @@ -1522,6 +1546,12 @@ void __init identify_cpu(struct cpuinfo_x86 *c) } } + /* Pentium IV. */ + if (c->x86 == 15) { + get_model_name(c); + return; + } + /* Names for the Pentium II/Celeron processors detectable only by also checking the cache size. Dixon is NOT a Celeron. */ @@ -1555,7 +1585,7 @@ void __init identify_cpu(struct cpuinfo_x86 *c) squash_the_stupid_serial_number(c); return; } - + /* may be changed in the switch so needs to be after */ if(c->x86_vendor == X86_VENDOR_NEXGEN) @@ -1658,12 +1688,12 @@ int get_cpuinfo(char * buffer) #endif p += sprintf(p,"processor\t: %d\n" "vendor_id\t: %s\n" - "cpu family\t: %c\n" + "cpu family\t: %d\n" "model\t\t: %d\n" "model name\t: %s\n", n, c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown", - c->x86 + '0', + c->x86, c->x86_model, c->x86_model_id[0] ? c->x86_model_id : "unknown"); diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c index 6ce9eb61e..f39e42522 100644 --- a/arch/i386/kernel/smp.c +++ b/arch/i386/kernel/smp.c @@ -422,7 +422,7 @@ struct call_data_struct { int wait; }; -static struct call_data_struct * call_data = NULL; +static struct call_data_struct * call_data; /* * this function sends a 'generic call function' IPI to all other CPUs diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c index 3a9df08e7..12ba21b92 100644 --- a/arch/i386/kernel/time.c +++ b/arch/i386/kernel/time.c @@ -76,7 +76,7 @@ static unsigned long last_tsc_low; /* lsb 32 bits of Time Stamp Counter */ * Equal to 2^32 * (1 / (clocks per usec) ). * Initialized in time_init. */ -unsigned long fast_gettimeoffset_quotient=0; +unsigned long fast_gettimeoffset_quotient; extern rwlock_t xtime_lock; extern unsigned long wall_jiffies; @@ -373,9 +373,9 @@ static int set_rtc_mmss(unsigned long nowtime) } /* last time the cmos clock got updated */ -static long last_rtc_update = 0; +static long last_rtc_update; -int timer_ack = 0; +int timer_ack; /* * timer_interrupt() needs to keep up the real-time clock, @@ -449,7 +449,7 @@ static inline void do_timer_interrupt(int irq, void *dev_id, struct pt_regs *reg #endif } -static int use_tsc = 0; +static int use_tsc; /* * This is the same as the above, except we _also_ save the current diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c index 953c23d55..ae87ded92 100644 --- a/arch/i386/kernel/traps.c +++ b/arch/i386/kernel/traps.c @@ -741,11 +741,7 @@ asmlinkage void math_state_restore(struct pt_regs regs) if (current->used_math) { restore_fpu(current); } else { - /* - * Our first FPU usage, clean the chip. - */ - __asm__("fninit"); - current->used_math = 1; + init_fpu(); } current->flags |= PF_USEDFPU; /* So we fnsave on switch_to() */ } diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c index b88c4a422..946d1f40a 100644 --- a/arch/i386/mm/fault.c +++ b/arch/i386/mm/fault.c @@ -77,31 +77,6 @@ bad_area: return 0; } -static void __init handle_wp_test (void) -{ - const unsigned long vaddr = PAGE_OFFSET; - pgd_t *pgd; - pmd_t *pmd; - pte_t *pte; - - /* - * make it read/writable temporarily, so that the fault - * can be handled. - */ - pgd = swapper_pg_dir + __pgd_offset(vaddr); - pmd = pmd_offset(pgd, vaddr); - pte = pte_offset(pmd, vaddr); - *pte = mk_pte_phys(0, PAGE_KERNEL); - __flush_tlb_all(); - - boot_cpu_data.wp_works_ok = 1; - /* - * Beware: Black magic here. The printk is needed here to flush - * CPU state on certain buggy processors. - */ - printk("Ok"); -} - asmlinkage void do_invalid_op(struct pt_regs *, unsigned long); extern unsigned long idt; @@ -130,6 +105,19 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code) __asm__("movl %%cr2,%0":"=r" (address)); tsk = current; + + /* + * We fault-in kernel-space virtual memory on-demand. The + * 'reference' page table is init_mm.pgd. + * + * NOTE! We MUST NOT take any locks for this case. We may + * be in an interrupt or a critical region, and should + * only copy the information from the master page table, + * nothing more. + */ + if (address >= TASK_SIZE) + goto vmalloc_fault; + mm = tsk->mm; info.si_code = SEGV_MAPERR; @@ -223,6 +211,7 @@ good_area: bad_area: up(&mm->mmap_sem); +bad_area_nosemaphore: /* User mode accesses just cause a SIGSEGV */ if (error_code & 4) { tsk->thread.cr2 = address; @@ -260,14 +249,7 @@ no_context: /* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. - * - * First we check if it was the bootup rw-test, though.. */ - if (boot_cpu_data.wp_works_ok < 0 && - address == PAGE_OFFSET && (error_code & 1)) { - handle_wp_test(); - return; - } if (address < PAGE_SIZE) printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); @@ -318,4 +300,34 @@ do_sigbus: /* Kernel mode? Handle exceptions or die */ if (!(error_code & 4)) goto no_context; + return; + +vmalloc_fault: + { + /* + * Synchronize this task's top level page-table + * with the 'reference' page table. + */ + int offset = __pgd_offset(address); + pgd_t *pgd, *pgd_k; + pmd_t *pmd, *pmd_k; + + pgd = tsk->active_mm->pgd + offset; + pgd_k = init_mm.pgd + offset; + + if (!pgd_present(*pgd)) { + if (!pgd_present(*pgd_k)) + goto bad_area_nosemaphore; + set_pgd(pgd, *pgd_k); + return; + } + + pmd = pmd_offset(pgd, address); + pmd_k = pmd_offset(pgd_k, address); + + if (pmd_present(*pmd) || !pmd_present(*pmd_k)) + goto bad_area_nosemaphore; + set_pmd(pmd, *pmd_k); + return; + } } diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c index 9ba2baa31..39a6ce0f8 100644 --- a/arch/i386/mm/init.c +++ b/arch/i386/mm/init.c @@ -37,8 +37,8 @@ #include <asm/apic.h> unsigned long highstart_pfn, highend_pfn; -static unsigned long totalram_pages = 0; -static unsigned long totalhigh_pages = 0; +static unsigned long totalram_pages; +static unsigned long totalhigh_pages; /* * BAD_PAGE is the page that is used for page faults when linux @@ -491,16 +491,21 @@ void __init paging_init(void) * before and after the test are here to work-around some nasty CPU bugs. */ +/* + * This function cannot be __init, since exceptions don't work in that + * section. + */ +static int do_test_wp_bit(unsigned long vaddr); + void __init test_wp_bit(void) { /* - * Ok, all PAE-capable CPUs are definitely handling the WP bit right. + * Ok, all PSE-capable CPUs are definitely handling the WP bit right. */ const unsigned long vaddr = PAGE_OFFSET; pgd_t *pgd; pmd_t *pmd; pte_t *pte, old_pte; - char tmp_reg; printk("Checking if this processor honours the WP bit even in supervisor mode... "); @@ -511,27 +516,19 @@ void __init test_wp_bit(void) *pte = mk_pte_phys(0, PAGE_READONLY); local_flush_tlb(); - __asm__ __volatile__( - "jmp 1f; 1:\n" - "movb %0,%1\n" - "movb %1,%0\n" - "jmp 1f; 1:\n" - :"=m" (*(char *) vaddr), - "=q" (tmp_reg) - :/* no inputs */ - :"memory"); + boot_cpu_data.wp_works_ok = do_test_wp_bit(vaddr); *pte = old_pte; local_flush_tlb(); - if (boot_cpu_data.wp_works_ok < 0) { - boot_cpu_data.wp_works_ok = 0; + if (!boot_cpu_data.wp_works_ok) { printk("No.\n"); #ifdef CONFIG_X86_WP_WORKS_OK panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!"); #endif - } else - printk(".\n"); + } else { + printk("Ok.\n"); + } } static inline int page_is_ram (unsigned long pagenr) @@ -634,6 +631,30 @@ void __init mem_init(void) } +/* Put this after the callers, so that it cannot be inlined */ +static int do_test_wp_bit(unsigned long vaddr) +{ + char tmp_reg; + int flag; + + __asm__ __volatile__( + " movb %0,%1 \n" + "1: movb %1,%0 \n" + " xorl %2,%2 \n" + "2: \n" + ".section __ex_table,\"a\"\n" + " .align 4 \n" + " .long 1b,2b \n" + ".previous \n" + :"=m" (*(char *) vaddr), + "=q" (tmp_reg), + "=r" (flag) + :"2" (1) + :"memory"); + + return flag; +} + void free_initmem(void) { unsigned long addr; diff --git a/arch/i386/mm/ioremap.c b/arch/i386/mm/ioremap.c index 66f846925..bb72da8c8 100644 --- a/arch/i386/mm/ioremap.c +++ b/arch/i386/mm/ioremap.c @@ -78,7 +78,6 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr, if (remap_area_pmd(pmd, address, end - address, phys_addr + address, flags)) return -ENOMEM; - set_pgdir(address, *dir); address = (address + PGDIR_SIZE) & PGDIR_MASK; dir++; } while (address && (address < end)); |