From 89eba5eb77bbf92ffed6686c951cc35f4027e71f Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Thu, 27 Jul 2000 23:20:03 +0000 Subject: Merge with Linux 2.4.0-test5-pre5. --- arch/i386/kernel/apic.c | 46 ++++++++------------------------------------- arch/i386/kernel/pci-i386.c | 8 -------- arch/i386/kernel/smp.c | 40 +++++++++++++++++++++++++++++---------- 3 files changed, 38 insertions(+), 56 deletions(-) (limited to 'arch/i386') diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c index 0600d91a9..a839790f8 100644 --- a/arch/i386/kernel/apic.c +++ b/arch/i386/kernel/apic.c @@ -569,43 +569,6 @@ int setup_profiling_timer(unsigned int multiplier) #undef APIC_DIVISOR -#ifdef CONFIG_SMP -static inline void handle_smp_time (int user, int cpu) -{ - int system = !user; - struct task_struct * p = current; - /* - * After doing the above, we need to make like - * a normal interrupt - otherwise timer interrupts - * ignore the global interrupt lock, which is the - * WrongThing (tm) to do. - */ - - irq_enter(cpu, 0); - update_one_process(p, 1, user, system, cpu); - if (p->pid) { - p->counter -= 1; - if (p->counter <= 0) { - p->counter = 0; - p->need_resched = 1; - } - if (p->nice > 0) { - kstat.cpu_nice += user; - kstat.per_cpu_nice[cpu] += user; - } else { - kstat.cpu_user += user; - kstat.per_cpu_user[cpu] += user; - } - kstat.cpu_system += system; - kstat.per_cpu_system[cpu] += system; - } else if (local_bh_count(cpu) || local_irq_count(cpu) > 1) { - kstat.cpu_system += system; - kstat.per_cpu_system[cpu] += system; - } - irq_exit(cpu, 0); -} -#endif - /* * Local timer interrupt handler. It does both profiling and * process statistics/rescheduling. @@ -646,7 +609,14 @@ inline void smp_local_timer_interrupt(struct pt_regs * regs) } #ifdef CONFIG_SMP - handle_smp_time(user, cpu); + /* + * update_process_times() expects us to have done irq_enter(). + * Besides, if we don't timer interrupts ignore the global + * interrupt lock, which is the WrongThing (tm) to do. + */ + irq_enter(cpu, 0); + update_process_times(user); + irq_exit(cpu, 0); #endif } diff --git a/arch/i386/kernel/pci-i386.c b/arch/i386/kernel/pci-i386.c index 0e48d44d3..ab9cdfdf3 100644 --- a/arch/i386/kernel/pci-i386.c +++ b/arch/i386/kernel/pci-i386.c @@ -138,17 +138,9 @@ pcibios_update_resource(struct pci_dev *dev, struct resource *root, void pcibios_align_resource(void *data, struct resource *res, unsigned long size) { - struct pci_dev *dev = data; - if (res->flags & IORESOURCE_IO) { unsigned long start = res->start; - if (size > 0x100) { - printk(KERN_ERR "PCI: I/O Region %s/%d too large" - " (%ld bytes)\n", dev->slot_name, - dev->resource - res, size); - } - if (start & 0x300) { start = (start + 0x3ff) & ~0x3ff; res->start = start; diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c index b11b629fe..4a118ab17 100644 --- a/arch/i386/kernel/smp.c +++ b/arch/i386/kernel/smp.c @@ -207,7 +207,7 @@ static inline void send_IPI_mask(int mask, int vector) * These mean you can really definitely utterly forget about * writing to user space from interrupts. (Its not allowed anyway). * - * Optimizations Manfred Spraul + * Optimizations Manfred Spraul */ static volatile unsigned long flush_cpumask; @@ -216,23 +216,45 @@ static unsigned long flush_va; static spinlock_t tlbstate_lock = SPIN_LOCK_UNLOCKED; #define FLUSH_ALL 0xffffffff +/* + * We cannot call mmdrop() because we are in interrupt context, + * instead update mm->cpu_vm_mask. + */ static void inline leave_mm (unsigned long cpu) { if (cpu_tlbstate[cpu].state == TLBSTATE_OK) BUG(); clear_bit(cpu, &cpu_tlbstate[cpu].active_mm->cpu_vm_mask); - cpu_tlbstate[cpu].state = TLBSTATE_OLD; } /* * * The flush IPI assumes that a thread switch happens in this order: - * 1) set_bit(cpu, &new_mm->cpu_vm_mask); - * 2) update cpu_tlbstate - * [now the cpu can accept tlb flush request for the new mm] - * 3) change cr3 (if required, or flush local tlb,...) - * 4) clear_bit(cpu, &old_mm->cpu_vm_mask); - * 5) switch %%esp, ie current + * [cpu0: the cpu that switches] + * 1) switch_mm() either 1a) or 1b) + * 1a) thread switch to a different mm + * 1a1) clear_bit(cpu, &old_mm->cpu_vm_mask); + * Stop ipi delivery for the old mm. This is not synchronized with + * the other cpus, but smp_invalidate_interrupt ignore flush ipis + * for the wrong mm, and in the worst case we perform a superflous + * tlb flush. + * 1a2) set cpu_tlbstate to TLBSTATE_OK + * Now the smp_invalidate_interrupt won't call leave_mm if cpu0 + * was in lazy tlb mode. + * 1a3) update cpu_tlbstate[].active_mm + * Now cpu0 accepts tlb flushes for the new mm. + * 1a4) set_bit(cpu, &new_mm->cpu_vm_mask); + * Now the other cpus will send tlb flush ipis. + * 1a4) change cr3. + * 1b) thread switch without mm change + * cpu_tlbstate[].active_mm is correct, cpu0 already handles + * flush ipis. + * 1b1) set cpu_tlbstate to TLBSTATE_OK + * 1b2) test_and_set the cpu bit in cpu_vm_mask. + * Atomically set the bit [other cpus will start sending flush ipis], + * and test the bit. + * 1b3) if the bit was 0: leave_mm was called, flush the tlb. + * 2) switch %%esp, ie current * * The interrupt must handle 2 special cases: * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. @@ -249,8 +271,6 @@ static void inline leave_mm (unsigned long cpu) * * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. * 2) Leave the mm if we are in the lazy tlb mode. - * We cannot call mmdrop() because we are in interrupt context, - * instead update cpu_tlbstate. */ asmlinkage void smp_invalidate_interrupt (void) -- cgit v1.2.3