diff options
author | Ralf Baechle <ralf@linux-mips.org> | 1998-09-19 19:15:08 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 1998-09-19 19:15:08 +0000 |
commit | 03ba4131783cc9e872f8bb26a03f15bc11f27564 (patch) | |
tree | 88db8dba75ae06ba3bad08e42c5e52efc162535c /arch/i386 | |
parent | 257730f99381dd26e10b832fce4c94cae7ac1176 (diff) |
- Merge with Linux 2.1.121.
- Bugfixes.
Diffstat (limited to 'arch/i386')
-rw-r--r-- | arch/i386/boot/setup.S | 2 | ||||
-rw-r--r-- | arch/i386/defconfig | 1 | ||||
-rw-r--r-- | arch/i386/kernel/bios32.c | 58 | ||||
-rw-r--r-- | arch/i386/kernel/desc.h | 61 | ||||
-rw-r--r-- | arch/i386/kernel/entry.S | 32 | ||||
-rw-r--r-- | arch/i386/kernel/head.S | 5 | ||||
-rw-r--r-- | arch/i386/kernel/init_task.c | 13 | ||||
-rw-r--r-- | arch/i386/kernel/io_apic.c | 81 | ||||
-rw-r--r-- | arch/i386/kernel/irq.c | 235 | ||||
-rw-r--r-- | arch/i386/kernel/irq.h | 37 | ||||
-rw-r--r-- | arch/i386/kernel/ldt.c | 11 | ||||
-rw-r--r-- | arch/i386/kernel/mca.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/mtrr.c | 1 | ||||
-rw-r--r-- | arch/i386/kernel/process.c | 65 | ||||
-rw-r--r-- | arch/i386/kernel/ptrace.c | 1 | ||||
-rw-r--r-- | arch/i386/kernel/setup.c | 83 | ||||
-rw-r--r-- | arch/i386/kernel/signal.c | 43 | ||||
-rw-r--r-- | arch/i386/kernel/smp.c | 144 | ||||
-rw-r--r-- | arch/i386/kernel/traps.c | 102 | ||||
-rw-r--r-- | arch/i386/lib/usercopy.c | 6 | ||||
-rw-r--r-- | arch/i386/math-emu/get_address.c | 1 | ||||
-rw-r--r-- | arch/i386/mm/fault.c | 6 | ||||
-rw-r--r-- | arch/i386/mm/init.c | 9 | ||||
-rw-r--r-- | arch/i386/vmlinux.lds | 6 |
24 files changed, 607 insertions, 398 deletions
diff --git a/arch/i386/boot/setup.S b/arch/i386/boot/setup.S index 0959487c1..22fca192c 100644 --- a/arch/i386/boot/setup.S +++ b/arch/i386/boot/setup.S @@ -18,7 +18,7 @@ ! March 1993/June 1994 (Christoph.Niemann@linux.org) ! ! add APM BIOS checking by Stephen Rothwell, May 1994 -! (Stephen.Rothwell@pd.necisa.oz.au) +! (Stephen.Rothwell@canb.auug.org.au) ! ! High load stuff, initrd support and position independency ! by Hans Lermen & Werner Almesberger, February 1996 diff --git a/arch/i386/defconfig b/arch/i386/defconfig index c1a21238a..eda0b2bcb 100644 --- a/arch/i386/defconfig +++ b/arch/i386/defconfig @@ -198,6 +198,7 @@ CONFIG_EEXPRESS_PRO100=y # CONFIG_SLIP is not set # CONFIG_NET_RADIO is not set # CONFIG_TR is not set +# CONFIG_HOSTESS_SV11 is not set # CONFIG_WAN_DRIVERS is not set # CONFIG_LAPBETHER is not set # CONFIG_X25_ASY is not set diff --git a/arch/i386/kernel/bios32.c b/arch/i386/kernel/bios32.c index 1752ff2c0..31049ec2a 100644 --- a/arch/i386/kernel/bios32.c +++ b/arch/i386/kernel/bios32.c @@ -1,7 +1,7 @@ /* * bios32.c - Low-Level PCI Access * - * $Id: bios32.c,v 1.44 1998/08/04 14:54:56 mj Exp $ + * $Id: bios32.c,v 1.45 1998/08/15 10:41:04 mj Exp $ * * Copyright 1993, 1994 Drew Eckhardt * Visionary Computing @@ -945,7 +945,8 @@ __initfunc(void pcibios_fixup_ghosts(struct pci_bus *b)) __initfunc(void pcibios_fixup_peer_bridges(void)) { struct pci_bus *b = &pci_root; - int i; + int i, cnt=-1; + struct pci_dev *d; #ifdef CONFIG_PCI_DIRECT /* @@ -956,23 +957,34 @@ __initfunc(void pcibios_fixup_peer_bridges(void)) if (access_pci == &pci_direct_conf2) return; #endif + for(d=b->devices; d; d=d->sibling) + if ((d->class >> 8) == PCI_CLASS_BRIDGE_HOST) + cnt++; do { int n = b->subordinate+1; + int found = 0; u16 l; for(i=0; i<256; i += 8) if (!pcibios_read_config_word(n, i, PCI_VENDOR_ID, &l) && l != 0x0000 && l != 0xffff) { DBG("Found device at %02x:%02x\n", n, i); - printk("PCI: Discovered primary peer bus %02x\n", n); - b = kmalloc(sizeof(*b), GFP_KERNEL); - memset(b, 0, sizeof(*b)); - b->next = pci_root.next; - pci_root.next = b; - b->number = b->secondary = n; - b->subordinate = 0xff; - b->subordinate = pci_scan_bus(b); - break; + found++; + if (!pcibios_read_config_word(n, i, PCI_CLASS_DEVICE, &l) && + l == PCI_CLASS_BRIDGE_HOST) + cnt++; } + if (found && cnt > 0) { + cnt--; + printk("PCI: Discovered primary peer bus %02x\n", n); + b = kmalloc(sizeof(*b), GFP_KERNEL); + memset(b, 0, sizeof(*b)); + b->next = pci_root.next; + pci_root.next = b; + b->number = b->secondary = n; + b->subordinate = 0xff; + b->subordinate = pci_scan_bus(b); + break; + } } while (i < 256); } @@ -1034,19 +1046,19 @@ __initfunc(void pcibios_fixup_devices(void)) * Recalculate IRQ numbers if we use the I/O APIC */ { - int irq; - unsigned char pin; - - pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); - if (pin) { - pin--; /* interrupt pins are numbered starting from 1 */ - irq = IO_APIC_get_PCI_irq_vector (dev->bus->number, PCI_SLOT(dev->devfn), pin); - if (irq >= 0) { - printk("PCI->APIC IRQ transform: (B%d,I%d,P%d) -> %d\n", - dev->bus->number, PCI_SLOT(dev->devfn), pin, irq); - dev->irq = irq; + int irq; + unsigned char pin; + + pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); + if (pin) { + pin--; /* interrupt pins are numbered starting from 1 */ + irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin); + if (irq >= 0) { + printk("PCI->APIC IRQ transform: (B%d,I%d,P%d) -> %d\n", + dev->bus->number, PCI_SLOT(dev->devfn), pin, irq); + dev->irq = irq; } - } + } } #endif /* diff --git a/arch/i386/kernel/desc.h b/arch/i386/kernel/desc.h new file mode 100644 index 000000000..e91580e04 --- /dev/null +++ b/arch/i386/kernel/desc.h @@ -0,0 +1,61 @@ +#ifndef __ARCH_DESC_H +#define __ARCH_DESC_H + +struct desc_struct { + unsigned long a,b; +}; + +extern struct desc_struct gdt_table[]; +extern struct desc_struct *idt, *gdt; + +struct Xgt_desc_struct { + unsigned short size; + unsigned long address __attribute__((packed)); +}; + +#define idt_descr (*(struct Xgt_desc_struct *)((char *)&idt - 2)) +#define gdt_descr (*(struct Xgt_desc_struct *)((char *)&gdt - 2)) + +/* + * Entry into gdt where to find first TSS. GDT layout: + * 0 - null + * 1 - not used + * 2 - kernel code segment + * 3 - kernel data segment + * 4 - user code segment + * 5 - user data segment + * 6 - not used + * 7 - not used + * 8 - APM BIOS support + * 9 - APM BIOS support + * 10 - APM BIOS support + * 11 - APM BIOS support + * 12 - TSS #0 + * 13 - LDT #0 + * 14 - TSS #1 + * 15 - LDT #1 + */ +#define FIRST_TSS_ENTRY 12 +#define FIRST_LDT_ENTRY (FIRST_TSS_ENTRY+1) +#define _TSS(n) ((((unsigned long) n)<<4)+(FIRST_TSS_ENTRY<<3)) +#define _LDT(n) ((((unsigned long) n)<<4)+(FIRST_LDT_ENTRY<<3)) +#define load_TR(n) __asm__ __volatile__("ltr %%ax": /* no output */ :"a" (_TSS(n))) +#define load_ldt(n) __asm__ __volatile__("lldt %%ax": /* no output */ :"a" (_LDT(n))) +#define store_TR(n) \ +__asm__("str %%ax\n\t" \ + "subl %2,%%eax\n\t" \ + "shrl $4,%%eax" \ + :"=a" (n) \ + :"0" (0),"i" (FIRST_TSS_ENTRY<<3)) + +extern void set_intr_gate(unsigned int irq, void * addr); +extern void set_ldt_desc(unsigned int n, void *addr, unsigned int size); +extern void set_tss_desc(unsigned int n, void *addr); + +/* + * This is the ldt that every process will get unless we need + * something other than this. + */ +extern struct desc_struct default_ldt; + +#endif diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S index aac4866c1..0261ae24b 100644 --- a/arch/i386/kernel/entry.S +++ b/arch/i386/kernel/entry.S @@ -105,23 +105,21 @@ ENOSYS = 38 popl %eax; \ 1: popl %ds; \ 2: popl %es; \ -3: addl $4,%esp; \ -4: iret; \ + addl $4,%esp; \ +3: iret; \ .section fixup,"ax"; \ -5: pushl $0; \ - popl %ds; \ +4: movl $0,(%esp); \ + jmp 1b; \ +5: movl $0,(%esp); \ jmp 2b; \ -6: pushl $0; \ - popl %es; \ - jmp 3b; \ -7: pushl $11; \ +6: pushl $11; \ call do_exit; \ .previous; \ .section __ex_table,"a";\ .align 4; \ - .long 1b,5b; \ - .long 2b,6b; \ - .long 4b,7b; \ + .long 1b,4b; \ + .long 2b,5b; \ + .long 3b,6b; \ .previous #define GET_CURRENT(reg) \ @@ -366,7 +364,7 @@ ENTRY(spurious_interrupt_bug) .data ENTRY(sys_call_table) - .long SYMBOL_NAME(sys_setup) /* 0 */ + .long SYMBOL_NAME(sys_ni_syscall) /* 0 - old "setup()" system call*/ .long SYMBOL_NAME(sys_exit) .long SYMBOL_NAME(sys_fork) .long SYMBOL_NAME(sys_read) @@ -556,7 +554,13 @@ ENTRY(sys_call_table) .long SYMBOL_NAME(sys_sendfile) .long SYMBOL_NAME(sys_ni_syscall) /* streams1 */ .long SYMBOL_NAME(sys_ni_syscall) /* streams2 */ - - .rept NR_syscalls-187 + + /* + * NOTE!! This doesn' thave to be exact - we just have + * to make sure we have _enough_ of the "sys_ni_syscall" + * entries. Don't panic if you notice that this hasn't + * been shrunk every time we add a new system call. + */ + .rept NR_syscalls-189 .long SYMBOL_NAME(sys_ni_syscall) .endr diff --git a/arch/i386/kernel/head.S b/arch/i386/kernel/head.S index 0697306a7..7343cd64f 100644 --- a/arch/i386/kernel/head.S +++ b/arch/i386/kernel/head.S @@ -509,7 +509,6 @@ ENTRY(empty_bad_page_table) ENTRY(empty_zero_page) .org 0x6000 -ENTRY(this_must_match_init_task) /* * This starts the data section. Note that the above is all @@ -519,10 +518,6 @@ ENTRY(this_must_match_init_task) .data ALIGN -/* 256 quadwords - 2048 bytes of idt */ -ENTRY(idt_table) - .fill 256,8,0 # idt is uninitialized - /* * This contains up to 8192 quadwords depending on NR_TASKS - 64kB of * gdt entries. Ugh. diff --git a/arch/i386/kernel/init_task.c b/arch/i386/kernel/init_task.c index c0571c769..4381719ae 100644 --- a/arch/i386/kernel/init_task.c +++ b/arch/i386/kernel/init_task.c @@ -4,6 +4,8 @@ #include <asm/uaccess.h> #include <asm/pgtable.h> +#include "desc.h" + static struct vm_area_struct init_mmap = INIT_MMAP; static struct fs_struct init_fs = INIT_FS; static struct file * init_fd_array[NR_OPEN] = { NULL, }; @@ -15,10 +17,9 @@ struct mm_struct init_mm = INIT_MM; * Initial task structure. * * We need to make sure that this is 8192-byte aligned due to the - * way process stacks are handled. This is done by making sure - * the linker maps this in the .text segment right after head.S, - * and making head.S ensure the proper alignment. - * - * The things we do for performance.. + * way process stacks are handled. This is done by having a special + * "init_task" linker map entry.. */ -union task_union init_task_union __attribute__((__section__(".text"))) = { INIT_TASK }; +union task_union init_task_union + __attribute__((__section__(".data.init_task"))) = { INIT_TASK }; + diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c index 4e1c6b563..e2e107c9e 100644 --- a/arch/i386/kernel/io_apic.c +++ b/arch/i386/kernel/io_apic.c @@ -514,6 +514,8 @@ static inline int IO_APIC_irq_trigger(int irq) return 0; } +int irq_vector[NR_IRQS] = { IRQ0_TRAP_VECTOR , 0 }; + static int __init assign_irq_vector(int irq) { static int current_vector = IRQ0_TRAP_VECTOR, offset = 0; @@ -921,9 +923,10 @@ static int __init timer_irq_works(void) static inline void self_IPI(unsigned int irq) { irq_desc_t *desc = irq_desc + irq; + unsigned int status = desc->status; - if (desc->events && !desc->ipi) { - desc->ipi = 1; + if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { + desc->status = status | IRQ_REPLAY; send_IPI(APIC_DEST_SELF, IO_APIC_VECTOR(irq)); } } @@ -956,10 +959,11 @@ static void disable_level_ioapic_irq(unsigned int irq) mask_IO_APIC_irq(irq); } -static void do_edge_ioapic_IRQ(unsigned int irq, int cpu, struct pt_regs * regs) +static void do_edge_ioapic_IRQ(unsigned int irq, struct pt_regs * regs) { irq_desc_t *desc = irq_desc + irq; struct irqaction * action; + unsigned int status; spin_lock(&irq_controller_lock); @@ -968,19 +972,19 @@ static void do_edge_ioapic_IRQ(unsigned int irq, int cpu, struct pt_regs * regs) * and do not need to be masked. */ ack_APIC_irq(); - desc->ipi = 0; - desc->events = 1; + status = desc->status & ~IRQ_REPLAY; + status |= IRQ_PENDING; /* * If the IRQ is disabled for whatever reason, we cannot * use the action we have. */ action = NULL; - if (!(desc->status & (IRQ_DISABLED | IRQ_INPROGRESS))) { + if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) { action = desc->action; - desc->status = IRQ_INPROGRESS; - desc->events = 0; + status &= ~IRQ_PENDING; } + desc->status = status | IRQ_INPROGRESS; spin_unlock(&irq_controller_lock); /* @@ -989,35 +993,28 @@ static void do_edge_ioapic_IRQ(unsigned int irq, int cpu, struct pt_regs * regs) if (!action) return; - irq_enter(cpu, irq); - /* * Edge triggered interrupts need to remember * pending events. */ for (;;) { - int pending; - - handle_IRQ_event(irq, regs); + handle_IRQ_event(irq, regs, action); spin_lock(&irq_controller_lock); - pending = desc->events; - desc->events = 0; - if (!pending) + if (!(desc->status & IRQ_PENDING)) break; + desc->status &= ~IRQ_PENDING; spin_unlock(&irq_controller_lock); } - desc->status &= IRQ_DISABLED; + desc->status &= ~IRQ_INPROGRESS; spin_unlock(&irq_controller_lock); - - irq_exit(cpu, irq); } -static void do_level_ioapic_IRQ(unsigned int irq, int cpu, - struct pt_regs * regs) +static void do_level_ioapic_IRQ(unsigned int irq, struct pt_regs * regs) { irq_desc_t *desc = irq_desc + irq; struct irqaction * action; + unsigned int status; spin_lock(&irq_controller_lock); /* @@ -1029,18 +1026,17 @@ static void do_level_ioapic_IRQ(unsigned int irq, int cpu, * So this all has to be within the spinlock. */ mask_IO_APIC_irq(irq); - - desc->ipi = 0; + status = desc->status & ~IRQ_REPLAY; /* * If the IRQ is disabled for whatever reason, we must * not enter the IRQ action. */ action = NULL; - if (!(desc->status & (IRQ_DISABLED | IRQ_INPROGRESS))) { + if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) { action = desc->action; - desc->status = IRQ_INPROGRESS; } + desc->status = status | IRQ_INPROGRESS; ack_APIC_irq(); spin_unlock(&irq_controller_lock); @@ -1049,17 +1045,13 @@ static void do_level_ioapic_IRQ(unsigned int irq, int cpu, if (!action) return; - irq_enter(cpu, irq); - - handle_IRQ_event(irq, regs); + handle_IRQ_event(irq, regs, action); spin_lock(&irq_controller_lock); desc->status &= ~IRQ_INPROGRESS; - if (!desc->status) + if (!(desc->status & IRQ_DISABLED)) unmask_IO_APIC_irq(irq); spin_unlock(&irq_controller_lock); - - irq_exit(cpu, irq); } /* @@ -1160,6 +1152,31 @@ static inline void check_timer(void) } } +/* + * + * IRQ's that are handled by the old PIC in all cases: + * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ. + * Linux doesn't really care, as it's not actually used + * for any interrupt handling anyway. + * - IRQ13 is the FPU error IRQ, and may be connected + * directly from the FPU to the old PIC. Linux doesn't + * really care, because Linux doesn't want to use IRQ13 + * anyway (exception 16 is the proper FPU error signal) + * - IRQ9 is broken on PIIX4 motherboards: + * + * "IRQ9 cannot be re-assigned" + * + * IRQ9 is not available to assign to + * ISA add-in cards because it is + * dedicated to the power + * management function of the PIIX4 + * controller on the motherboard. + * This is true for other motherboards + * which use the 82371AB PIIX4 + * component. + */ +#define PIC_IRQS ((1<<2)|(1<<9)|(1<<13)) + void __init setup_IO_APIC(void) { init_sym_mode(); @@ -1177,7 +1194,7 @@ void __init setup_IO_APIC(void) pirqs_enabled) { printk("ENABLING IO-APIC IRQs\n"); - io_apic_irqs = ~((1<<2)|(1<<9)|(1<<13)); + io_apic_irqs = ~PIC_IRQS; } else { if (ioapic_blacklisted()) printk(" blacklisted board, DISABLING IO-APIC IRQs\n"); diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c index ed086f5be..1baf19aea 100644 --- a/arch/i386/kernel/irq.c +++ b/arch/i386/kernel/irq.c @@ -39,6 +39,7 @@ #include <asm/delay.h> #include "irq.h" +#include "desc.h" unsigned int local_bh_count[NR_CPUS]; unsigned int local_irq_count[NR_CPUS]; @@ -79,14 +80,14 @@ spinlock_t irq_controller_lock; */ unsigned long long io_apic_irqs = 0; -static void do_8259A_IRQ (unsigned int irq, int cpu, struct pt_regs * regs); +static void do_8259A_IRQ(unsigned int irq, struct pt_regs * regs); static void enable_8259A_irq(unsigned int irq); void disable_8259A_irq(unsigned int irq); /* * Dummy controller type for unused interrupts */ -static void do_none(unsigned int irq, int cpu, struct pt_regs * regs) { } +static void do_none(unsigned int irq, struct pt_regs * regs) { } static void enable_none(unsigned int irq) { } static void disable_none(unsigned int irq) { } @@ -105,12 +106,10 @@ static struct hw_interrupt_type i8259A_irq_type = { }; irq_desc_t irq_desc[NR_IRQS] = { - [0 ... 15] = { 0, 0, 0, &i8259A_irq_type, }, /* default to standard ISA IRQs */ - [16 ... 63] = { 0, 0, 0, &no_irq_type, }, /* 'high' PCI IRQs filled in on demand */ + [0 ... 15] = { 0, &i8259A_irq_type, }, /* default to standard ISA IRQs */ + [16 ... NR_IRQS-1] = { 0, &no_irq_type, }, /* 'high' PCI IRQs filled in on demand */ }; -int irq_vector[NR_IRQS] = { IRQ0_TRAP_VECTOR , 0 }; - /* * These have to be protected by the irq controller spinlock @@ -150,52 +149,29 @@ static void enable_8259A_irq(unsigned int irq) BUILD_COMMON_IRQ() /* - * ISA PIC or IO-APIC triggered (INTA-cycle or APIC) interrupts: + * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts: */ -BUILD_IRQ(0) BUILD_IRQ(1) BUILD_IRQ(2) BUILD_IRQ(3) -BUILD_IRQ(4) BUILD_IRQ(5) BUILD_IRQ(6) BUILD_IRQ(7) -BUILD_IRQ(8) BUILD_IRQ(9) BUILD_IRQ(10) BUILD_IRQ(11) +BUILD_IRQ(0) BUILD_IRQ(1) BUILD_IRQ(2) BUILD_IRQ(3) +BUILD_IRQ(4) BUILD_IRQ(5) BUILD_IRQ(6) BUILD_IRQ(7) +BUILD_IRQ(8) BUILD_IRQ(9) BUILD_IRQ(10) BUILD_IRQ(11) BUILD_IRQ(12) BUILD_IRQ(13) BUILD_IRQ(14) BUILD_IRQ(15) #ifdef __SMP__ /* - * The IO-APIC (present only in SMP boards) has 8 more hardware - * interrupt pins, for all of them we define an IRQ vector: - * - * raw PCI interrupts 0-3, basically these are the ones used - * heavily: + * The IO-APIC gives us many more interrupt sources.. */ BUILD_IRQ(16) BUILD_IRQ(17) BUILD_IRQ(18) BUILD_IRQ(19) - -/* - * [FIXME: anyone with 2 separate PCI buses and 2 IO-APICs, please - * speak up if problems and request experimental patches. - * --mingo ] - */ - -/* - * MIRQ (motherboard IRQ) interrupts 0-1: - */ -BUILD_IRQ(20) BUILD_IRQ(21) - -/* - * 'nondefined general purpose interrupt'. - */ -BUILD_IRQ(22) -/* - * optionally rerouted SMI interrupt: - */ -BUILD_IRQ(23) - -BUILD_IRQ(24) -BUILD_IRQ(25) BUILD_IRQ(26) BUILD_IRQ(27) BUILD_IRQ(28) BUILD_IRQ(29) -BUILD_IRQ(30) BUILD_IRQ(31) BUILD_IRQ(32) BUILD_IRQ(33) BUILD_IRQ(34) -BUILD_IRQ(35) BUILD_IRQ(36) BUILD_IRQ(37) BUILD_IRQ(38) BUILD_IRQ(39) -BUILD_IRQ(40) BUILD_IRQ(41) BUILD_IRQ(42) BUILD_IRQ(43) BUILD_IRQ(44) -BUILD_IRQ(45) BUILD_IRQ(46) BUILD_IRQ(47) BUILD_IRQ(48) BUILD_IRQ(49) -BUILD_IRQ(50) BUILD_IRQ(51) BUILD_IRQ(52) BUILD_IRQ(53) BUILD_IRQ(54) -BUILD_IRQ(55) BUILD_IRQ(56) BUILD_IRQ(57) BUILD_IRQ(58) BUILD_IRQ(59) +BUILD_IRQ(20) BUILD_IRQ(21) BUILD_IRQ(22) BUILD_IRQ(23) +BUILD_IRQ(24) BUILD_IRQ(25) BUILD_IRQ(26) BUILD_IRQ(27) +BUILD_IRQ(28) BUILD_IRQ(29) BUILD_IRQ(30) BUILD_IRQ(31) +BUILD_IRQ(32) BUILD_IRQ(33) BUILD_IRQ(34) BUILD_IRQ(35) +BUILD_IRQ(36) BUILD_IRQ(37) BUILD_IRQ(38) BUILD_IRQ(39) +BUILD_IRQ(40) BUILD_IRQ(41) BUILD_IRQ(42) BUILD_IRQ(43) +BUILD_IRQ(44) BUILD_IRQ(45) BUILD_IRQ(46) BUILD_IRQ(47) +BUILD_IRQ(48) BUILD_IRQ(49) BUILD_IRQ(50) BUILD_IRQ(51) +BUILD_IRQ(52) BUILD_IRQ(53) BUILD_IRQ(54) BUILD_IRQ(55) +BUILD_IRQ(56) BUILD_IRQ(57) BUILD_IRQ(58) BUILD_IRQ(59) BUILD_IRQ(60) BUILD_IRQ(61) BUILD_IRQ(62) BUILD_IRQ(63) /* @@ -450,12 +426,12 @@ static inline void wait_on_irq(int cpu) * no other CPU is executing any bottom half handler. * * Don't wait if we're already running in an interrupt - * context or are inside a bh handler. + * context or are inside a bh handler. */ void synchronize_bh(void) { if (atomic_read(&global_bh_count) && !in_interrupt()) - wait_on_bh(); + wait_on_bh(); } /* @@ -586,29 +562,35 @@ void __global_restore_flags(unsigned long flags) #endif -int handle_IRQ_event(unsigned int irq, struct pt_regs * regs) +/* + * This should really return information about whether + * we should do bottom half handling etc. Right now we + * end up _always_ checking the bottom half, which is a + * waste of time and is not what some drivers would + * prefer. + */ +int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action) { - struct irqaction * action; int status; + int cpu = smp_processor_id(); - status = 0; - action = irq_desc[irq].action; + irq_enter(cpu, irq); - if (action) { - status |= 1; + status = 1; /* Force the "do bottom halves" bit */ - if (!(action->flags & SA_INTERRUPT)) - __sti(); + if (!(action->flags & SA_INTERRUPT)) + __sti(); - do { - status |= action->flags; - action->handler(irq, action->dev_id, regs); - action = action->next; - } while (action); - if (status & SA_SAMPLE_RANDOM) - add_interrupt_randomness(irq); - __cli(); - } + do { + status |= action->flags; + action->handler(irq, action->dev_id, regs); + action = action->next; + } while (action); + if (status & SA_SAMPLE_RANDOM) + add_interrupt_randomness(irq); + __cli(); + + irq_exit(cpu, irq); return status; } @@ -625,9 +607,9 @@ int i8259A_irq_pending(unsigned int irq) void make_8259A_irq(unsigned int irq) { + disable_irq(irq); __long(0,io_apic_irqs) &= ~(1<<irq); irq_desc[irq].handler = &i8259A_irq_type; - disable_irq(irq); enable_irq(irq); } @@ -639,8 +621,6 @@ void make_8259A_irq(unsigned int irq) */ static inline void mask_and_ack_8259A(unsigned int irq) { - spin_lock(&irq_controller_lock); - irq_desc[irq].status |= IRQ_INPROGRESS; cached_irq_mask |= 1 << irq; if (irq & 8) { inb(0xA1); /* DUMMY */ @@ -652,23 +632,39 @@ static inline void mask_and_ack_8259A(unsigned int irq) outb(cached_21,0x21); outb(0x20,0x20); } - spin_unlock(&irq_controller_lock); } -static void do_8259A_IRQ(unsigned int irq, int cpu, struct pt_regs * regs) +static void do_8259A_IRQ(unsigned int irq, struct pt_regs * regs) { - mask_and_ack_8259A(irq); + struct irqaction * action; + irq_desc_t *desc = irq_desc + irq; - irq_enter(cpu, irq); + spin_lock(&irq_controller_lock); + { + unsigned int status; + mask_and_ack_8259A(irq); + status = desc->status & ~IRQ_REPLAY; + action = NULL; + if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) + action = desc->action; + desc->status = status | IRQ_INPROGRESS; + } + spin_unlock(&irq_controller_lock); + + /* Exit early if we had no action or it was disabled */ + if (!action) + return; - if (handle_IRQ_event(irq, regs)) { - spin_lock(&irq_controller_lock); - if (!(irq_desc[irq].status &= IRQ_DISABLED)) + handle_IRQ_event(irq, regs, action); + + spin_lock(&irq_controller_lock); + { + unsigned int status = desc->status & ~IRQ_INPROGRESS; + desc->status = status; + if (!(status & IRQ_DISABLED)) enable_8259A_irq(irq); - spin_unlock(&irq_controller_lock); } - - irq_exit(cpu, irq); + spin_unlock(&irq_controller_lock); } @@ -683,10 +679,6 @@ void disable_irq(unsigned int irq) unsigned long flags; spin_lock_irqsave(&irq_controller_lock, flags); - /* - * At this point we may actually have a pending interrupt being active - * on another CPU. So don't touch the IRQ_INPROGRESS bit.. - */ irq_desc[irq].status |= IRQ_DISABLED; irq_desc[irq].handler->disable(irq); spin_unlock_irqrestore(&irq_controller_lock, flags); @@ -733,7 +725,7 @@ asmlinkage void do_IRQ(struct pt_regs regs) int cpu = smp_processor_id(); kstat.irqs[cpu][irq]++; - irq_desc[irq].handler->handle(irq, cpu, ®s); + irq_desc[irq].handler->handle(irq, ®s); /* * This should be conditional: we should really get @@ -793,6 +785,7 @@ int setup_x86_irq(unsigned int irq, struct irqaction * new) *p = new; if (!shared) { + irq_desc[irq].status = 0; #ifdef __SMP__ if (IO_APIC_IRQ(irq)) { /* @@ -803,11 +796,10 @@ int setup_x86_irq(unsigned int irq, struct irqaction * new) if (irq < 16) { disable_8259A_irq(irq); if (i8259A_irq_pending(irq)) - irq_desc[irq].events = 1; + irq_desc[irq].status = IRQ_PENDING; } } #endif - irq_desc[irq].status = 0; irq_desc[irq].handler->enable(irq); } spin_unlock_irqrestore(&irq_controller_lock,flags); @@ -863,8 +855,10 @@ void free_irq(unsigned int irq, void *dev_id) /* Found it - now free it */ *p = action->next; kfree(action); - if (!irq_desc[irq].action) + if (!irq_desc[irq].action) { + irq_desc[irq].status |= IRQ_DISABLED; irq_desc[irq].handler->disable(irq); + } goto out; } printk("Trying to free free IRQ%d\n",irq); @@ -880,9 +874,9 @@ out: * with "IRQ_INPROGRESS" asserted and the interrupt * disabled. */ -unsigned long probe_irq_on (void) +unsigned long probe_irq_on(void) { - unsigned int i, irqs = 0; + unsigned int i; unsigned long delay; /* @@ -891,51 +885,68 @@ unsigned long probe_irq_on (void) spin_lock_irq(&irq_controller_lock); for (i = NR_IRQS-1; i > 0; i--) { if (!irq_desc[i].action) { - irq_desc[i].status = 0; + unsigned int status = irq_desc[i].status | IRQ_AUTODETECT; + irq_desc[i].status = status & ~(IRQ_INPROGRESS | IRQ_PENDING); irq_desc[i].handler->enable(i); - irqs |= (1 << i); } } spin_unlock_irq(&irq_controller_lock); /* - * wait for spurious interrupts to increase counters + * Wait for spurious interrupts to trigger */ for (delay = jiffies + HZ/10; delay > jiffies; ) /* about 100ms delay */ synchronize_irq(); /* - * now filter out any obviously spurious interrupts + * Now filter out any obviously spurious interrupts */ spin_lock_irq(&irq_controller_lock); for (i=0; i<NR_IRQS; i++) { - if (irq_desc[i].status & IRQ_INPROGRESS) - irqs &= ~(1UL << i); + unsigned int status = irq_desc[i].status; + + if (!(status & IRQ_AUTODETECT)) + continue; + + /* It triggered already - consider it spurious. */ + if (status & IRQ_INPROGRESS) { + irq_desc[i].status = status & ~IRQ_AUTODETECT; + irq_desc[i].handler->disable(i); + } } spin_unlock_irq(&irq_controller_lock); - return irqs; + return 0x12345678; } -int probe_irq_off (unsigned long irqs) +int probe_irq_off(unsigned long unused) { - int i, irq_found = -1; + int i, irq_found, nr_irqs; + if (unused != 0x12345678) + printk("Bad IRQ probe from %lx\n", (&unused)[-1]); + + nr_irqs = 0; + irq_found = 0; spin_lock_irq(&irq_controller_lock); for (i=0; i<NR_IRQS; i++) { - if ((irqs & 1) && (irq_desc[i].status & IRQ_INPROGRESS)) { - if (irq_found != -1) { - irq_found = -irq_found; - goto out; - } - irq_found = i; + unsigned int status = irq_desc[i].status; + + if (!(status & IRQ_AUTODETECT)) + continue; + + if (status & IRQ_INPROGRESS) { + if (!nr_irqs) + irq_found = i; + nr_irqs++; } - irqs >>= 1; + irq_desc[i].status = status & ~IRQ_AUTODETECT; + irq_desc[i].handler->disable(i); } - if (irq_found == -1) - irq_found = 0; -out: spin_unlock_irq(&irq_controller_lock); + + if (nr_irqs > 1) + irq_found = -irq_found; return irq_found; } @@ -948,10 +959,9 @@ __initfunc(void init_IRQ(void)) outb_p(LATCH & 0xff , 0x40); /* LSB */ outb(LATCH >> 8 , 0x40); /* MSB */ - for (i=0; i<NR_IRQS; i++) { - irq_desc[i].events = 0; - irq_desc[i].status = 0; - } + for (i=0; i<NR_IRQS; i++) + irq_desc[i].status = IRQ_DISABLED; + /* * 16 old-style INTA-cycle interrupt gates: */ @@ -971,9 +981,6 @@ __initfunc(void init_IRQ(void)) * while so far it was a kind of broadcasted timer interrupt, * in the future it should become a CPU-to-CPU rescheduling IPI, * driven by schedule() ? - * - * [ It has to be here .. it doesn't work if you put - * it down the bottom - assembler explodes 8) ] */ /* IPI for rescheduling */ @@ -998,9 +1005,9 @@ __initfunc(void init_IRQ(void)) request_region(0xa0,0x20,"pic2"); setup_x86_irq(2, &irq2); setup_x86_irq(13, &irq13); -} +} -#ifdef __SMP__ +#ifdef __SMP__ __initfunc(void init_IRQ_SMP(void)) { diff --git a/arch/i386/kernel/irq.h b/arch/i386/kernel/irq.h index fedfdbc97..d9f32ff94 100644 --- a/arch/i386/kernel/irq.h +++ b/arch/i386/kernel/irq.h @@ -9,19 +9,20 @@ */ struct hw_interrupt_type { const char * typename; - void (*handle)(unsigned int irq, int cpu, struct pt_regs * regs); + void (*handle)(unsigned int irq, struct pt_regs * regs); void (*enable)(unsigned int irq); void (*disable)(unsigned int irq); }; /* - * Status: reason for being disabled: somebody has - * done a "disable_irq()" or we must not re-enter the - * already executing irq.. + * IRQ line status. */ -#define IRQ_INPROGRESS 1 -#define IRQ_DISABLED 2 +#define IRQ_INPROGRESS 1 /* IRQ handler active - do not enter! */ +#define IRQ_DISABLED 2 /* IRQ disabled - do not enter! */ +#define IRQ_PENDING 4 /* IRQ pending - replay on enable */ +#define IRQ_REPLAY 8 /* IRQ has been replayed but not acked yet */ +#define IRQ_AUTODETECT 16 /* IRQ is being autodetected */ /* * This is the "IRQ descriptor", which contains various information @@ -32,8 +33,6 @@ struct hw_interrupt_type { */ typedef struct { unsigned int status; /* IRQ status - IRQ_INPROGRESS, IRQ_DISABLED */ - unsigned int events; /* Do we have any pending events? */ - unsigned int ipi; /* Have we sent off the pending IPI? */ struct hw_interrupt_type *handler; /* handle/enable/disable functions */ struct irqaction *action; /* IRQ action list */ unsigned int unused[3]; @@ -43,9 +42,10 @@ typedef struct { extern irq_desc_t irq_desc[NR_IRQS]; extern int irq_vector[NR_IRQS]; +#define IO_APIC_VECTOR(irq) irq_vector[irq] extern void init_IRQ_SMP(void); -extern int handle_IRQ_event(unsigned int, struct pt_regs *); +extern int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); /* * Various low-level irq details needed by irq.c, process.c, @@ -57,19 +57,17 @@ extern int handle_IRQ_event(unsigned int, struct pt_regs *); void mask_irq(unsigned int irq); void unmask_irq(unsigned int irq); void disable_8259A_irq(unsigned int irq); -int i8259A_irq_pending (unsigned int irq); -void ack_APIC_irq (void); -void setup_IO_APIC (void); -int IO_APIC_get_PCI_irq_vector (int bus, int slot, int fn); -void make_8259A_irq (unsigned int irq); -void send_IPI (int dest, int vector); -void init_pic_mode (void); -void print_IO_APIC (void); +int i8259A_irq_pending(unsigned int irq); +void ack_APIC_irq(void); +void setup_IO_APIC(void); +int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn); +void make_8259A_irq(unsigned int irq); +void send_IPI(int dest, int vector); +void init_pic_mode(void); +void print_IO_APIC(void); extern unsigned long long io_apic_irqs; -#define IO_APIC_VECTOR(irq) irq_vector[irq] - #define MAX_IRQ_SOURCES 128 #define MAX_MP_BUSSES 32 enum mp_bustype { @@ -102,7 +100,6 @@ static inline void irq_enter(int cpu, unsigned int irq) static inline void irq_exit(int cpu, unsigned int irq) { hardirq_exit(cpu); - release_irqlock(cpu); } #define IO_APIC_IRQ(x) ((1<<x) & io_apic_irqs) diff --git a/arch/i386/kernel/ldt.c b/arch/i386/kernel/ldt.c index 40a700d0e..4f41b8a33 100644 --- a/arch/i386/kernel/ldt.c +++ b/arch/i386/kernel/ldt.c @@ -16,6 +16,8 @@ #include <asm/system.h> #include <asm/ldt.h> +#include "desc.h" + static int read_ldt(void * ptr, unsigned long bytecount) { void * address = current->mm->segments; @@ -23,11 +25,9 @@ static int read_ldt(void * ptr, unsigned long bytecount) if (!ptr) return -EINVAL; + if (!address) + return 0; size = LDT_ENTRIES*LDT_ENTRY_SIZE; - if (!address) { - address = &default_ldt; - size = sizeof(default_ldt); - } if (size > bytecount) size = bytecount; return copy_to_user(ptr, address, size) ? -EFAULT : size; @@ -81,7 +81,8 @@ static int write_ldt(void * ptr, unsigned long bytecount, int oldmode) if (!mm->segments) { int i = current->tarray_ptr - &task[0]; mm->segments = ldt; - set_ldt_desc(gdt+(i<<1)+FIRST_LDT_ENTRY, ldt, LDT_ENTRIES); + set_ldt_desc(i, ldt, LDT_ENTRIES); + current->tss.ldt = _LDT(i); load_ldt(i); if (atomic_read(&mm->count) > 1) printk(KERN_WARNING diff --git a/arch/i386/kernel/mca.c b/arch/i386/kernel/mca.c index ae67822bc..de6efd1e2 100644 --- a/arch/i386/kernel/mca.c +++ b/arch/i386/kernel/mca.c @@ -68,7 +68,7 @@ static int mca_default_procfn( char* buf, int slot ); static ssize_t proc_mca_read( struct file*, char*, size_t, loff_t *); static struct file_operations proc_mca_operations = { NULL, proc_mca_read, - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }; static struct inode_operations proc_mca_inode_operations = { &proc_mca_operations, diff --git a/arch/i386/kernel/mtrr.c b/arch/i386/kernel/mtrr.c index 02d5b61a0..f56bcdfa1 100644 --- a/arch/i386/kernel/mtrr.c +++ b/arch/i386/kernel/mtrr.c @@ -1086,6 +1086,7 @@ static struct file_operations mtrr_fops = mtrr_ioctl, /* IOctl */ NULL, /* MMAP */ mtrr_open, /* Open */ + NULL, /* Flush */ mtrr_close, /* Release */ NULL, /* Fsync */ NULL, /* Fasync */ diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index 3bcd0f9bc..bd6113cbb 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c @@ -44,7 +44,9 @@ #ifdef CONFIG_MATH_EMULATION #include <asm/math_emu.h> #endif + #include "irq.h" +#include "desc.h" spinlock_t semaphore_wake_lock = SPIN_LOCK_UNLOCKED; @@ -488,21 +490,19 @@ void free_task_struct(struct task_struct *p) void release_segments(struct mm_struct *mm) { - void * ldt = mm->segments; - int nr; - /* forget local segments */ - __asm__ __volatile__("movl %w0,%%fs ; movl %w0,%%gs ; lldt %w0" + __asm__ __volatile__("movl %w0,%%fs ; movl %w0,%%gs" : /* no outputs */ : "r" (0)); - current->tss.ldt = 0; - /* - * Set the GDT entry back to the default. - */ - nr = current->tarray_ptr - &task[0]; - set_ldt_desc(gdt+(nr<<1)+FIRST_LDT_ENTRY, &default_ldt, 1); + if (mm->segments) { + void * ldt = mm->segments; + + /* + * Get the LDT entry from init_task. + */ + current->tss.ldt = _LDT(0); + load_ldt(0); - if (ldt) { mm->segments = NULL; vfree(ldt); } @@ -555,25 +555,23 @@ void copy_segments(int nr, struct task_struct *p, struct mm_struct *new_mm) { struct mm_struct * old_mm = current->mm; void * old_ldt = old_mm->segments, * ldt = old_ldt; - int ldt_size = LDT_ENTRIES; - p->tss.ldt = _LDT(nr); + /* default LDT - use the one from init_task */ + p->tss.ldt = _LDT(0); if (old_ldt) { if (new_mm) { ldt = vmalloc(LDT_ENTRIES*LDT_ENTRY_SIZE); new_mm->segments = ldt; if (!ldt) { printk(KERN_WARNING "ldt allocation failed\n"); - goto no_ldt; + return; } memcpy(ldt, old_ldt, LDT_ENTRIES*LDT_ENTRY_SIZE); } - } else { - no_ldt: - ldt = &default_ldt; - ldt_size = 1; + p->tss.ldt = _LDT(nr); + set_ldt_desc(nr, ldt, LDT_ENTRIES); + return; } - set_ldt_desc(gdt+(nr<<1)+FIRST_LDT_ENTRY, ldt, ldt_size); } /* @@ -598,7 +596,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp, p->tss.ss0 = __KERNEL_DS; p->tss.tr = _TSS(nr); - set_tss_desc(gdt+(nr<<1)+FIRST_TSS_ENTRY,&(p->tss)); + set_tss_desc(nr,&(p->tss)); p->tss.eip = (unsigned long) ret_from_fork; savesegment(fs,p->tss.fs); @@ -727,6 +725,13 @@ void __switch_to(struct task_struct *prev, struct task_struct *next) gdt_table[next->tss.tr >> 3].b &= 0xfffffdff; asm volatile("ltr %0": :"g" (*(unsigned short *)&next->tss.tr)); + /* + * Save away %fs and %gs. No need to save %es and %ds, as + * those are always kernel segments while inside the kernel. + */ + asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->tss.fs)); + asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->tss.gs)); + /* Re-load LDT if necessary */ if (next->mm->segments != prev->mm->segments) asm volatile("lldt %0": :"g" (*(unsigned short *)&next->tss.ldt)); @@ -736,13 +741,8 @@ void __switch_to(struct task_struct *prev, struct task_struct *next) asm volatile("movl %0,%%cr3": :"r" (next->tss.cr3)); /* - * Save away %fs and %gs. No need to save %es and %ds, as - * those are always kernel segments while inside the kernel. - * Restore the new values. + * Restore %fs and %gs. */ - asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->tss.fs)); - asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->tss.gs)); - loadsegment(fs,next->tss.fs); loadsegment(gs,next->tss.gs); @@ -761,28 +761,19 @@ void __switch_to(struct task_struct *prev, struct task_struct *next) asmlinkage int sys_fork(struct pt_regs regs) { - int ret; - - lock_kernel(); - ret = do_fork(SIGCHLD, regs.esp, ®s); - unlock_kernel(); - return ret; + return do_fork(SIGCHLD, regs.esp, ®s); } asmlinkage int sys_clone(struct pt_regs regs) { unsigned long clone_flags; unsigned long newsp; - int ret; - lock_kernel(); clone_flags = regs.ebx; newsp = regs.ecx; if (!newsp) newsp = regs.esp; - ret = do_fork(clone_flags, newsp, ®s); - unlock_kernel(); - return ret; + return do_fork(clone_flags, newsp, ®s); } /* diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c index 708376e84..dfa3b9848 100644 --- a/arch/i386/kernel/ptrace.c +++ b/arch/i386/kernel/ptrace.c @@ -3,7 +3,6 @@ /* edited by Linus Torvalds */ #include <linux/config.h> /* for CONFIG_MATH_EMULATION */ -#include <linux/head.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c index b2bb69243..498820ec8 100644 --- a/arch/i386/kernel/setup.c +++ b/arch/i386/kernel/setup.c @@ -439,8 +439,8 @@ static struct cpu_model_info cpu_models[] __initdata = { NULL, NULL, NULL, NULL }}, { X86_VENDOR_INTEL, 6, { "Pentium Pro A-step", "Pentium Pro", NULL, "Pentium II (Klamath)", - NULL, "Pentium II (Deschutes)", NULL, NULL, NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL }}, + NULL, "Pentium II (Deschutes)", "Celeron (Mendocino)", NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }}, { X86_VENDOR_AMD, 4, { NULL, NULL, NULL, "486 DX/2", NULL, NULL, NULL, "486 DX/2-WB", "486 DX/4", "486 DX/4-WB", NULL, NULL, NULL, NULL, "Am5x86-WT", @@ -467,6 +467,7 @@ __initfunc(void identify_cpu(struct cpuinfo_x86 *c)) char *p = NULL; c->loops_per_sec = loops_per_sec; + c->x86_cache_size = -1; get_cpu_vendor(c); @@ -479,13 +480,64 @@ __initfunc(void identify_cpu(struct cpuinfo_x86 *c)) return; } - if (c->x86_model < 16) - for (i=0; i<sizeof(cpu_models)/sizeof(struct cpu_model_info); i++) - if (cpu_models[i].vendor == c->x86_vendor && - cpu_models[i].x86 == c->x86) { + for (i = 0; i < sizeof(cpu_models)/sizeof(struct cpu_model_info); i++) { + if (c->cpuid_level > 1) { + /* supports eax=2 call */ + int edx, cache_size, dummy; + + cpuid(2, &dummy, &dummy, &dummy, &edx); + + /* We need only the LSB */ + edx &= 0xff; + + switch (edx) { + case 0x40: + cache_size = 0; + + case 0x41: + cache_size = 128; + break; + + case 0x42: + cache_size = 256; + break; + + case 0x43: + cache_size = 512; + break; + + case 0x44: + cache_size = 1024; + break; + + case 0x45: + cache_size = 2048; + break; + + default: + cache_size = 0; + break; + } + + c->x86_cache_size = cache_size; + } + + if (cpu_models[i].vendor == c->x86_vendor && + cpu_models[i].x86 == c->x86) { + if (c->x86_model <= 16) p = cpu_models[i].model_names[c->x86_model]; - break; + + /* Names for the Pentium II processors */ + if ((cpu_models[i].vendor == X86_VENDOR_INTEL) + && (cpu_models[i].x86 == 6) + && (c->x86_model == 5) + && (c->x86_cache_size == 0)) { + p = "Celeron"; } + } + + } + if (p) { strcpy(c->x86_model_id, p); return; @@ -548,14 +600,17 @@ int get_cpuinfo(char * buffer) if (!(cpu_present_map & (1<<n))) continue; #endif - p += sprintf(p, "processor\t: %d\n" + p += sprintf(p,"processor\t: %d\n" + "vendor_id\t: %s\n" "cpu family\t: %c\n" - "model\t\t: %s\n" - "vendor_id\t: %s\n", + "model\t\t: %d\n" + "model name\t: %s\n", n, + c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown", c->x86 + '0', - c->x86_model_id[0] ? c->x86_model_id : "unknown", - c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown"); + c->x86_model, + c->x86_model_id[0] ? c->x86_model_id : "unknown"); + if (c->x86_mask) { if (c->x86_vendor == X86_VENDOR_CYRIX) p += sprintf(p, "stepping\t: %s\n", Cx86_step); @@ -564,6 +619,10 @@ int get_cpuinfo(char * buffer) } else p += sprintf(p, "stepping\t: unknown\n"); + /* Cache size */ + if (c->x86_cache_size >= 0) + p += sprintf(p, "cache size\t: %d KB\n", c->x86_cache_size); + /* Modify the capabilities according to chip type */ if (c->x86_mask) { if (c->x86_vendor == X86_VENDOR_CYRIX) { diff --git a/arch/i386/kernel/signal.c b/arch/i386/kernel/signal.c index 05e53287a..231356d90 100644 --- a/arch/i386/kernel/signal.c +++ b/arch/i386/kernel/signal.c @@ -184,26 +184,17 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc, int *peax) #define COPY_SEG(seg) \ { unsigned short tmp; \ - err |= __get_user(tmp, &sc->seg); \ - if ((tmp & 0xfffc) /* not a NULL selectors */ \ - && (tmp & 0x4) != 0x4 /* not a LDT selector */ \ - && (tmp & 3) != 3) /* not a RPL3 GDT selector */ \ - goto badframe; \ + err |= __get_user(tmp, &sc->seg); \ regs->x##seg = tmp; } #define COPY_SEG_STRICT(seg) \ { unsigned short tmp; \ err |= __get_user(tmp, &sc->seg); \ - if ((tmp & 0xfffc) && (tmp & 3) != 3) goto badframe; \ - regs->x##seg = tmp; } + regs->x##seg = tmp|3; } #define GET_SEG(seg) \ { unsigned short tmp; \ err |= __get_user(tmp, &sc->seg); \ - if ((tmp & 0xfffc) /* not a NULL selectors */ \ - && (tmp & 0x4) != 0x4 /* not a LDT selector */ \ - && (tmp & 3) != 3) /* not a RPL3 GDT selector */ \ - goto badframe; \ loadsegment(seg,tmp); } GET_SEG(gs); @@ -459,15 +450,12 @@ static void setup_frame(int sig, struct k_sigaction *ka, /* Set up registers for signal handler */ regs->esp = (unsigned long) frame; regs->eip = (unsigned long) ka->sa.sa_handler; - { - unsigned long seg = __USER_DS; - __asm__("movl %w0,%%fs ; movl %w0,%%gs": "=r"(seg) : "0"(seg)); - set_fs(USER_DS); - regs->xds = seg; - regs->xes = seg; - regs->xss = seg; - regs->xcs = __USER_CS; - } + + set_fs(USER_DS); + regs->xds = __USER_DS; + regs->xes = __USER_DS; + regs->xss = __USER_DS; + regs->xcs = __USER_CS; regs->eflags &= ~TF_MASK; #if DEBUG_SIG @@ -533,15 +521,12 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, /* Set up registers for signal handler */ regs->esp = (unsigned long) frame; regs->eip = (unsigned long) ka->sa.sa_handler; - { - unsigned long seg = __USER_DS; - __asm__("movl %w0,%%fs ; movl %w0,%%gs": "=r"(seg) : "0"(seg)); - set_fs(USER_DS); - regs->xds = seg; - regs->xes = seg; - regs->xss = seg; - regs->xcs = __USER_CS; - } + + set_fs(USER_DS); + regs->xds = __USER_DS; + regs->xes = __USER_DS; + regs->xss = __USER_DS; + regs->xcs = __USER_CS; regs->eflags &= ~TF_MASK; #if DEBUG_SIG diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c index d230574ed..ca9a34bdd 100644 --- a/arch/i386/kernel/smp.c +++ b/arch/i386/kernel/smp.c @@ -113,7 +113,7 @@ spinlock_t kernel_flag = SPIN_LOCK_UNLOCKED; extern __inline int max(int a,int b) { - if(a>b) + if (a>b) return a; return b; } @@ -179,7 +179,7 @@ unsigned long mp_lapic_addr = 0; * SMP mode to <NUM>. */ -__initfunc(void smp_setup(char *str, int *ints)) +void __init smp_setup(char *str, int *ints) { if (ints && ints[0] > 0) max_cpus = ints[1]; @@ -187,7 +187,7 @@ __initfunc(void smp_setup(char *str, int *ints)) max_cpus = 0; } -void ack_APIC_irq (void) +void ack_APIC_irq(void) { /* Clear the IPI */ @@ -225,13 +225,13 @@ static char *mpc_family(int family,int model) "Unknown","Unknown", "80486DX/4" }; - if(family==0x6) + if (family==0x6) return("Pentium(tm) Pro"); - if(family==0x5) + if (family==0x5) return("Pentium(tm)"); - if(family==0x0F && model==0x0F) + if (family==0x0F && model==0x0F) return("Special controller"); - if(family==0x04 && model<9) + if (family==0x04 && model<9) return model_defs[model]; sprintf(n,"Unknown CPU [%d:%d]",family, model); return n; @@ -241,14 +241,14 @@ static char *mpc_family(int family,int model) * Read the MPC */ -__initfunc(static int smp_read_mpc(struct mp_config_table *mpc)) +static int __init smp_read_mpc(struct mp_config_table *mpc) { char str[16]; int count=sizeof(*mpc); - int apics=0; + int ioapics = 0; unsigned char *mpt=((unsigned char *)mpc)+count; - if(memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) + if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) { printk("Bad signature [%c%c%c%c].\n", mpc->mpc_signature[0], @@ -257,12 +257,12 @@ __initfunc(static int smp_read_mpc(struct mp_config_table *mpc)) mpc->mpc_signature[3]); return 1; } - if(mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) + if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) { printk("Checksum error.\n"); return 1; } - if(mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) + if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) { printk("Bad Config Table version (%d)!!\n",mpc->mpc_spec); return 1; @@ -294,7 +294,7 @@ __initfunc(static int smp_read_mpc(struct mp_config_table *mpc)) { struct mpc_config_processor *m= (struct mpc_config_processor *)mpt; - if(m->mpc_cpuflag&CPU_ENABLED) + if (m->mpc_cpuflag&CPU_ENABLED) { printk("Processor #%d %s APIC version %d\n", m->mpc_apicid, @@ -304,16 +304,16 @@ __initfunc(static int smp_read_mpc(struct mp_config_table *mpc)) CPU_MODEL_MASK)>>4), m->mpc_apicver); #ifdef SMP_DEBUG - if(m->mpc_featureflag&(1<<0)) + if (m->mpc_featureflag&(1<<0)) printk(" Floating point unit present.\n"); - if(m->mpc_featureflag&(1<<7)) + if (m->mpc_featureflag&(1<<7)) printk(" Machine Exception supported.\n"); - if(m->mpc_featureflag&(1<<8)) + if (m->mpc_featureflag&(1<<8)) printk(" 64 bit compare & exchange supported.\n"); - if(m->mpc_featureflag&(1<<9)) + if (m->mpc_featureflag&(1<<9)) printk(" Internal APIC present.\n"); #endif - if(m->mpc_cpuflag&CPU_BOOTPROCESSOR) + if (m->mpc_cpuflag&CPU_BOOTPROCESSOR) { SMP_PRINTK((" Bootup CPU\n")); boot_cpu_id=m->mpc_apicid; @@ -321,7 +321,7 @@ __initfunc(static int smp_read_mpc(struct mp_config_table *mpc)) else /* Boot CPU already counted */ num_processors++; - if(m->mpc_apicid>NR_CPUS) + if (m->mpc_apicid>NR_CPUS) printk("Processor #%d unused. (Max %d processors).\n",m->mpc_apicid, NR_CPUS); else { @@ -362,13 +362,17 @@ __initfunc(static int smp_read_mpc(struct mp_config_table *mpc)) { struct mpc_config_ioapic *m= (struct mpc_config_ioapic *)mpt; - if(m->mpc_flags&MPC_APIC_USABLE) + if (m->mpc_flags&MPC_APIC_USABLE) { - apics++; + ioapics++; printk("I/O APIC #%d Version %d at 0x%lX.\n", m->mpc_apicid,m->mpc_apicver, m->mpc_apicaddr); - mp_ioapic_addr = m->mpc_apicaddr; + /* + * we use the first one only currently + */ + if (ioapics == 1) + mp_ioapic_addr = m->mpc_apicaddr; } mpt+=sizeof(*m); count+=sizeof(*m); @@ -400,8 +404,8 @@ __initfunc(static int smp_read_mpc(struct mp_config_table *mpc)) } } } - if(apics>1) - printk("Warning: Multiple APICs not supported.\n"); + if (ioapics > 1) + printk("Warning: Multiple IO-APICs not yet supported.\n"); return num_processors; } @@ -409,28 +413,28 @@ __initfunc(static int smp_read_mpc(struct mp_config_table *mpc)) * Scan the memory blocks for an SMP configuration block. */ -__initfunc(int smp_scan_config(unsigned long base, unsigned long length)) +int __init smp_scan_config(unsigned long base, unsigned long length) { unsigned long *bp=phys_to_virt(base); struct intel_mp_floating *mpf; SMP_PRINTK(("Scan SMP from %p for %ld bytes.\n", bp,length)); - if(sizeof(*mpf)!=16) + if (sizeof(*mpf)!=16) printk("Error: MPF size\n"); - while(length>0) + while (length>0) { - if(*bp==SMP_MAGIC_IDENT) + if (*bp==SMP_MAGIC_IDENT) { mpf=(struct intel_mp_floating *)bp; - if(mpf->mpf_length==1 && + if (mpf->mpf_length==1 && !mpf_checksum((unsigned char *)bp,16) && (mpf->mpf_specification == 1 || mpf->mpf_specification == 4) ) { printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification); - if(mpf->mpf_feature2&(1<<7)) + if (mpf->mpf_feature2&(1<<7)) printk(" IMCR and PIC compatibility mode.\n"); else printk(" Virtual Wire compatibility mode.\n"); @@ -438,7 +442,7 @@ __initfunc(int smp_scan_config(unsigned long base, unsigned long length)) /* * Now see if we need to read further. */ - if(mpf->mpf_feature1!=0) + if (mpf->mpf_feature1!=0) { unsigned long cfg; @@ -520,7 +524,7 @@ __initfunc(int smp_scan_config(unsigned long base, unsigned long length)) mpf->mpf_feature1); return 1; } - if(mpf->mpf_feature1>4) + if (mpf->mpf_feature1>4) { printk("Bus #1 is PCI\n"); @@ -539,7 +543,7 @@ __initfunc(int smp_scan_config(unsigned long base, unsigned long length)) * Anything here will override the * defaults. */ - if(mpf->mpf_physptr) + if (mpf->mpf_physptr) smp_read_mpc((void *)mpf->mpf_physptr); __cpu_logical_map[0] = boot_cpu_id; @@ -574,7 +578,7 @@ static unsigned char *trampoline_base; * has made sure it's suitably aligned. */ -__initfunc(static unsigned long setup_trampoline(void)) +static unsigned long __init setup_trampoline(void) { memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data); return virt_to_phys(trampoline_base); @@ -584,7 +588,7 @@ __initfunc(static unsigned long setup_trampoline(void)) * We are called very early to get the low memory for the * SMP bootup trampoline page. */ -__initfunc(unsigned long smp_alloc_memory(unsigned long mem_base)) +unsigned long __init smp_alloc_memory(unsigned long mem_base) { if (virt_to_phys((void *)mem_base) >= 0x9F000) panic("smp_alloc_memory: Insufficient low memory for kernel trampoline 0x%lx.", mem_base); @@ -597,7 +601,7 @@ __initfunc(unsigned long smp_alloc_memory(unsigned long mem_base)) * a given CPU */ -__initfunc(void smp_store_cpu_info(int id)) +void __init smp_store_cpu_info(int id) { struct cpuinfo_x86 *c=&cpu_data[id]; @@ -626,7 +630,7 @@ __initfunc(void smp_store_cpu_info(int id)) * we use to track CPUs as they power up. */ -__initfunc(void smp_commence(void)) +void __init smp_commence(void) { /* * Lets the callins below out of their loop. @@ -635,7 +639,7 @@ __initfunc(void smp_commence(void)) smp_commenced=1; } -__initfunc(void enable_local_APIC(void)) +void __init enable_local_APIC(void) { unsigned long value; @@ -654,7 +658,7 @@ __initfunc(void enable_local_APIC(void)) udelay(100); } -__initfunc(unsigned long init_smp_mappings(unsigned long memory_start)) +unsigned long __init init_smp_mappings(unsigned long memory_start) { unsigned long apic_phys, ioapic_phys; @@ -684,7 +688,7 @@ __initfunc(unsigned long init_smp_mappings(unsigned long memory_start)) return memory_start; } -__initfunc(void smp_callin(void)) +void __init smp_callin(void) { extern void calibrate_delay(void); int cpuid=GET_APIC_ID(apic_read(APIC_ID)); @@ -726,7 +730,7 @@ extern int cpu_idle(void * unused); /* * Activate a secondary processor. */ -__initfunc(int start_secondary(void *unused)) +int __init start_secondary(void *unused) { #ifdef CONFIG_MTRR /* Must be done before calibration delay is computed */ @@ -743,7 +747,7 @@ __initfunc(int start_secondary(void *unused)) * CPUs - they just need to reload everything * from the task structure */ -__initfunc(void initialize_secondary(void)) +void __init initialize_secondary(void) { struct thread_struct * p = ¤t->tss; @@ -769,7 +773,7 @@ extern struct { unsigned short ss; } stack_start; -__initfunc(static void do_boot_cpu(int i)) +static void __init do_boot_cpu(int i) { unsigned long cfg; pgd_t maincfg; @@ -921,15 +925,15 @@ __initfunc(static void do_boot_cpu(int i)) if (accept_status) /* Send accept error */ printk("APIC delivery error (%lx).\n", accept_status); - if( !(send_status || accept_status) ) + if ( !(send_status || accept_status) ) { for(timeout=0;timeout<50000;timeout++) { - if(cpu_callin_map[0]&(1<<i)) + if (cpu_callin_map[0]&(1<<i)) break; /* It has booted */ udelay(100); /* Wait 5s total for a response */ } - if(cpu_callin_map[0]&(1<<i)) + if (cpu_callin_map[0]&(1<<i)) { /* number CPUs logically, starting from 1 (BSP is 0) */ #if 0 @@ -942,7 +946,7 @@ __initfunc(static void do_boot_cpu(int i)) } else { - if(*((volatile unsigned char *)phys_to_virt(8192))==0xA5) + if (*((volatile unsigned char *)phys_to_virt(8192))==0xA5) printk("Stuck ??\n"); else printk("Not responding.\n"); @@ -970,7 +974,7 @@ unsigned int prof_counter[NR_CPUS]; * Cycle through the processors sending APIC IPIs to boot each. */ -__initfunc(void smp_boot_cpus(void)) +void __init smp_boot_cpus(void) { int i; unsigned long cfg; @@ -1131,7 +1135,7 @@ __initfunc(void smp_boot_cpus(void)) */ SMP_PRINTK(("Before bogomips.\n")); - if(cpucount==0) + if (cpucount==0) { printk(KERN_ERR "Error: only one processor found.\n"); cpu_present_map=(1<<hard_smp_processor_id()); @@ -1141,7 +1145,7 @@ __initfunc(void smp_boot_cpus(void)) unsigned long bogosum=0; for(i=0;i<32;i++) { - if(cpu_present_map&(1<<i)) + if (cpu_present_map&(1<<i)) bogosum+=cpu_data[i].loops_per_sec; } printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", @@ -1152,7 +1156,7 @@ __initfunc(void smp_boot_cpus(void)) smp_activated=1; smp_num_cpus=cpucount+1; } - if(smp_b_stepping) + if (smp_b_stepping) printk(KERN_WARNING "WARNING: SMP operation may be unreliable with B stepping processors.\n"); SMP_PRINTK(("Boot done.\n")); @@ -1166,7 +1170,7 @@ smp_done: } -void send_IPI (int dest, int vector) +void send_IPI(int dest, int vector) { unsigned long cfg; unsigned long flags; @@ -1221,7 +1225,7 @@ void smp_message_pass(int target, int msg, unsigned long data, int wait) * During boot up send no messages */ - if(!smp_activated || !smp_commenced) + if (!smp_activated || !smp_commenced) return; @@ -1281,7 +1285,7 @@ void smp_message_pass(int target, int msg, unsigned long data, int wait) while (ct<1000) { cfg=apic_read(APIC_ICR); - if(!(cfg&(1<<12))) + if (!(cfg&(1<<12))) break; ct++; udelay(10); @@ -1291,20 +1295,20 @@ void smp_message_pass(int target, int msg, unsigned long data, int wait) * Just pray... there is nothing more we can do */ - if(ct==1000) + if (ct==1000) printk("CPU #%d: previous IPI still not cleared after 10mS\n", p); /* * Set the target requirement */ - if(target==MSG_ALL_BUT_SELF) + if (target==MSG_ALL_BUT_SELF) { dest=APIC_DEST_ALLBUT; target_map=cpu_present_map; cpu_callin_map[0]=(1<<p); } - else if(target==MSG_ALL) + else if (target==MSG_ALL) { dest=APIC_DEST_ALLINC; target_map=cpu_present_map; @@ -1440,7 +1444,7 @@ void smp_local_timer_interrupt(struct pt_regs * regs) * useful with a profiling multiplier != 1 */ if (!user_mode(regs)) - x86_do_profile (regs->eip); + x86_do_profile(regs->eip); if (!--prof_counter[cpu]) { int user=0,system=0; @@ -1533,7 +1537,7 @@ asmlinkage void smp_invalidate_interrupt(void) if (test_and_clear_bit(smp_processor_id(), &smp_invalidate_needed)) local_flush_tlb(); - ack_APIC_irq (); + ack_APIC_irq(); } /* @@ -1543,15 +1547,15 @@ asmlinkage void smp_stop_cpu_interrupt(void) { if (cpu_data[smp_processor_id()].hlt_works_ok) for(;;) __asm__("hlt"); - for (;;) ; + for (;;) ; } void (*mtrr_hook) (void) = NULL; asmlinkage void smp_mtrr_interrupt(void) { - ack_APIC_irq (); - if (mtrr_hook) (*mtrr_hook) (); + ack_APIC_irq(); + if (mtrr_hook) (*mtrr_hook)(); } /* @@ -1559,7 +1563,7 @@ asmlinkage void smp_mtrr_interrupt(void) */ asmlinkage void smp_spurious_interrupt(void) { - /* ack_APIC_irq (); see sw-dev-man vol 3, chapter 7.4.13.5 */ + /* ack_APIC_irq(); see sw-dev-man vol 3, chapter 7.4.13.5 */ printk("spurious APIC interrupt, ayiee, should never happen.\n"); } @@ -1581,7 +1585,7 @@ asmlinkage void smp_spurious_interrupt(void) * but we do not accept timer interrupts yet. We only allow the BP * to calibrate. */ -__initfunc(static unsigned int get_8254_timer_count (void)) +static unsigned int __init get_8254_timer_count(void) { unsigned int count; @@ -1608,7 +1612,7 @@ __initfunc(static unsigned int get_8254_timer_count (void)) #define APIC_DIVISOR 16 -void setup_APIC_timer (unsigned int clocks) +void setup_APIC_timer(unsigned int clocks) { unsigned long lvtt1_value; unsigned int tmp_value; @@ -1636,7 +1640,7 @@ void setup_APIC_timer (unsigned int clocks) apic_write(APIC_TMICT, clocks/APIC_DIVISOR); } -__initfunc(void wait_8254_wraparound (void)) +void __init wait_8254_wraparound(void) { unsigned int curr_count, prev_count=~0; int delta; @@ -1670,7 +1674,7 @@ __initfunc(void wait_8254_wraparound (void)) * APIC irq that way. */ -__initfunc(int calibrate_APIC_clock (void)) +int __init calibrate_APIC_clock(void) { unsigned long long t1,t2; long tt1,tt2; @@ -1741,7 +1745,7 @@ __initfunc(int calibrate_APIC_clock (void)) static unsigned int calibration_result; -__initfunc(void setup_APIC_clock (void)) +void __init setup_APIC_clock(void) { unsigned long flags; @@ -1796,7 +1800,7 @@ __initfunc(void setup_APIC_clock (void)) * * usually you want to run this on all CPUs ;) */ -int setup_profiling_timer (unsigned int multiplier) +int setup_profiling_timer(unsigned int multiplier) { int cpu = smp_processor_id(); unsigned long flags; @@ -1811,7 +1815,7 @@ int setup_profiling_timer (unsigned int multiplier) save_flags(flags); cli(); - setup_APIC_timer (calibration_result/multiplier); + setup_APIC_timer(calibration_result/multiplier); prof_multiplier[cpu]=multiplier; restore_flags(flags); diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c index 1f632e570..6d60aeb40 100644 --- a/arch/i386/kernel/traps.c +++ b/arch/i386/kernel/traps.c @@ -9,7 +9,6 @@ * state in 'asm.s'. */ #include <linux/config.h> -#include <linux/head.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/string.h> @@ -29,10 +28,20 @@ #include <asm/atomic.h> #include <asm/debugreg.h> +#include "desc.h" + asmlinkage int system_call(void); asmlinkage void lcall7(void); + struct desc_struct default_ldt = { 0, 0 }; +/* + * The IDT has to be page-aligned to simplify the Pentium + * F0 0F bug workaround.. We have a special link segment + * for this. + */ +struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, }; + static inline void console_verbose(void) { extern int console_loglevel; @@ -467,12 +476,11 @@ __initfunc(void trap_init_f00f_bug(void)) * move the IDT into it and write protect this page. */ page = (unsigned long) vmalloc(PAGE_SIZE); - memcpy((void *) page, idt_table, 256*8); - pgd = pgd_offset(&init_mm, page); pmd = pmd_offset(pgd, page); pte = pte_offset(pmd, page); - *pte = pte_wrprotect(*pte); + free_page(pte_page(*pte)); + *pte = mk_pte(&idt_table, PAGE_KERNEL_RO); local_flush_tlb(); /* @@ -484,12 +492,77 @@ __initfunc(void trap_init_f00f_bug(void)) __asm__ __volatile__("lidt %0": "=m" (idt_descr)); } +#define _set_gate(gate_addr,type,dpl,addr) \ +__asm__ __volatile__ ("movw %%dx,%%ax\n\t" \ + "movw %2,%%dx\n\t" \ + "movl %%eax,%0\n\t" \ + "movl %%edx,%1" \ + :"=m" (*((long *) (gate_addr))), \ + "=m" (*(1+(long *) (gate_addr))) \ + :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \ + "d" ((char *) (addr)),"a" (__KERNEL_CS << 16) \ + :"ax","dx") + +/* + * This needs to use 'idt_table' rather than 'idt', and + * thus use the _nonmapped_ version of the IDT, as the + * Pentium F0 0F bugfix can have resulted in the mapped + * IDT being write-protected. + */ +void set_intr_gate(unsigned int n, void *addr) +{ + _set_gate(idt_table+n,14,0,addr); +} + +static void __init set_trap_gate(unsigned int n, void *addr) +{ + _set_gate(idt_table+n,15,0,addr); +} + +static void __init set_system_gate(unsigned int n, void *addr) +{ + _set_gate(idt_table+n,15,3,addr); +} + +static void __init set_call_gate(void *a, void *addr) +{ + _set_gate(a,12,3,addr); +} + +#define _set_seg_desc(gate_addr,type,dpl,base,limit) {\ + *((gate_addr)+1) = ((base) & 0xff000000) | \ + (((base) & 0x00ff0000)>>16) | \ + ((limit) & 0xf0000) | \ + ((dpl)<<13) | \ + (0x00408000) | \ + ((type)<<8); \ + *(gate_addr) = (((base) & 0x0000ffff)<<16) | \ + ((limit) & 0x0ffff); } + +#define _set_tssldt_desc(n,addr,limit,type) \ +__asm__ __volatile__ ("movw %3,0(%2)\n\t" \ + "movw %%ax,2(%2)\n\t" \ + "rorl $16,%%eax\n\t" \ + "movb %%al,4(%2)\n\t" \ + "movb %4,5(%2)\n\t" \ + "movb $0,6(%2)\n\t" \ + "movb %%ah,7(%2)\n\t" \ + "rorl $16,%%eax" \ + : "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit), "i"(type)) + +void set_tss_desc(unsigned int n, void *addr) +{ + _set_tssldt_desc(gdt_table+FIRST_TSS_ENTRY+(n<<1), (int)addr, 235, 0x89); +} +void set_ldt_desc(unsigned int n, void *addr, unsigned int size) +{ + _set_tssldt_desc(gdt_table+FIRST_LDT_ENTRY+(n<<1), (int)addr, ((size << 3) - 1), 0x82); +} void __init trap_init(void) { int i; - struct desc_struct * p; if (readl(0x0FFFD9) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24)) EISA_bus = 1; @@ -515,19 +588,12 @@ void __init trap_init(void) for (i=18;i<48;i++) set_trap_gate(i,&reserved); set_system_gate(0x80,&system_call); -/* set up GDT task & ldt entries */ - p = gdt+FIRST_TSS_ENTRY; - set_tss_desc(p, &init_task.tss); - p++; - set_ldt_desc(p, &default_ldt, 1); - p++; - for(i=1 ; i<NR_TASKS ; i++) { - p->a=p->b=0; - p++; - p->a=p->b=0; - p++; - } -/* Clear NT, so that we won't have troubles with that later on */ + + /* set up GDT task & ldt entries */ + set_tss_desc(0, &init_task.tss); + set_ldt_desc(0, &default_ldt, 1); + + /* Clear NT, so that we won't have troubles with that later on */ __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl"); load_TR(0); load_ldt(0); diff --git a/arch/i386/lib/usercopy.c b/arch/i386/lib/usercopy.c index 6b313d99c..d5b052c20 100644 --- a/arch/i386/lib/usercopy.c +++ b/arch/i386/lib/usercopy.c @@ -7,7 +7,7 @@ */ #include <asm/uaccess.h> -inline unsigned long +unsigned long __generic_copy_to_user(void *to, const void *from, unsigned long n) { if (access_ok(VERIFY_WRITE, to, n)) @@ -15,11 +15,11 @@ __generic_copy_to_user(void *to, const void *from, unsigned long n) return n; } -inline unsigned long +unsigned long __generic_copy_from_user(void *to, const void *from, unsigned long n) { if (access_ok(VERIFY_READ, from, n)) - __copy_user(to,from,n); + __copy_user_zeroing(to,from,n); return n; } diff --git a/arch/i386/math-emu/get_address.c b/arch/i386/math-emu/get_address.c index a4b15ee7f..1e7009a23 100644 --- a/arch/i386/math-emu/get_address.c +++ b/arch/i386/math-emu/get_address.c @@ -19,7 +19,6 @@ #include <linux/stddef.h> -#include <linux/head.h> #include <asm/uaccess.h> diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c index c4955d724..358ff5033 100644 --- a/arch/i386/mm/fault.c +++ b/arch/i386/mm/fault.c @@ -6,7 +6,6 @@ #include <linux/signal.h> #include <linux/sched.h> -#include <linux/head.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> @@ -76,7 +75,8 @@ bad_area: return 0; } -asmlinkage void do_invalid_op (struct pt_regs *, unsigned long); +asmlinkage void do_invalid_op(struct pt_regs *, unsigned long); +extern unsigned long idt; /* * This routine handles page faults. It determines the address, @@ -186,7 +186,7 @@ bad_area: if (boot_cpu_data.f00f_bug) { unsigned long nr; - nr = (address - (unsigned long) idt) >> 3; + nr = (address - idt) >> 3; if (nr == 6) { do_invalid_op(regs, 0); diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c index b4cba8730..aed7ecc55 100644 --- a/arch/i386/mm/init.c +++ b/arch/i386/mm/init.c @@ -7,7 +7,6 @@ #include <linux/config.h> #include <linux/signal.h> #include <linux/sched.h> -#include <linux/head.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> @@ -220,7 +219,7 @@ static unsigned long __init fixmap_init(unsigned long start_mem) for (idx=1; idx <= __end_of_fixed_addresses; idx += PTRS_PER_PTE) { - address = fix_to_virt(__end_of_fixed_addresses-idx); + address = __fix_to_virt(__end_of_fixed_addresses-idx); pg_dir = swapper_pg_dir + (address >> PGDIR_SHIFT); memset((void *)start_mem, 0, PAGE_SIZE); pgd_val(*pg_dir) = _PAGE_TABLE | __pa(start_mem); @@ -246,8 +245,12 @@ static void set_pte_phys (unsigned long vaddr, unsigned long phys) void set_fixmap (enum fixed_addresses idx, unsigned long phys) { - unsigned long address = fix_to_virt(idx); + unsigned long address = __fix_to_virt(idx); + if (idx >= __end_of_fixed_addresses) { + printk("Invalid set_fixmap\n"); + return; + } set_pte_phys (address,phys); } diff --git a/arch/i386/vmlinux.lds b/arch/i386/vmlinux.lds index 3812c81db..c23007bc8 100644 --- a/arch/i386/vmlinux.lds +++ b/arch/i386/vmlinux.lds @@ -35,6 +35,9 @@ SECTIONS _edata = .; /* End of data section */ + . = ALIGN(8192); /* init_task */ + .data.init_task : { *(.data.init_task) } + . = ALIGN(4096); /* Init code and data */ __init_begin = .; .text.init : { *(.text.init) } @@ -42,6 +45,9 @@ SECTIONS . = ALIGN(4096); __init_end = .; + . = ALIGN(4096); + .data.page_aligned : { *(.data.idt) } + __bss_start = .; /* BSS */ .bss : { *(.bss) |