summaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel')
-rw-r--r--arch/i386/kernel/acpi.c37
-rw-r--r--arch/i386/kernel/apm.c14
-rw-r--r--arch/i386/kernel/entry.S2
-rw-r--r--arch/i386/kernel/i386_ksyms.c4
-rw-r--r--arch/i386/kernel/i8259.c36
-rw-r--r--arch/i386/kernel/io_apic.c102
-rw-r--r--arch/i386/kernel/irq.c419
-rw-r--r--arch/i386/kernel/microcode.c46
-rw-r--r--arch/i386/kernel/mpparse.c29
-rw-r--r--arch/i386/kernel/mtrr.c9
-rw-r--r--arch/i386/kernel/process.c9
-rw-r--r--arch/i386/kernel/semaphore.c5
-rw-r--r--arch/i386/kernel/setup.c17
-rw-r--r--arch/i386/kernel/traps.c5
14 files changed, 512 insertions, 222 deletions
diff --git a/arch/i386/kernel/acpi.c b/arch/i386/kernel/acpi.c
index 9bdd111d1..6228805db 100644
--- a/arch/i386/kernel/acpi.c
+++ b/arch/i386/kernel/acpi.c
@@ -34,6 +34,7 @@
#include <linux/spinlock.h>
#include <linux/ioport.h>
#include <linux/slab.h>
+#include <linux/mm.h>
#include <linux/pci.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -379,13 +380,14 @@ static struct acpi_table *__init acpi_map_table(u32 addr)
ioremap((unsigned long) addr, table_size);
}
- if (!table) {
- /* ioremap is a pain, it returns NULL if the
- * table starts within mapped physical memory.
- * Hopefully, no table straddles a mapped/unmapped
- * physical memory boundary, ugh
+ if (!table && addr < virt_to_phys(high_memory)) {
+ /* sometimes we see ACPI tables in low memory
+ * and not reserved by the memory map (E820) code,
+ * who is at fault for this? BIOS?
*/
- table = (struct acpi_table*) phys_to_virt(addr);
+ printk(KERN_ERR
+ "ACPI: unreserved table memory @ 0x%p!\n",
+ (void*) addr);
}
}
return table;
@@ -933,9 +935,9 @@ static int acpi_enter_dx(acpi_dstate_t state)
int status = 0;
if (state == ACPI_D0)
- status = pm_send_request(PM_RESUME, (void*) state);
+ status = pm_send_all(PM_RESUME, (void*) state);
else
- status = pm_send_request(PM_SUSPEND, (void*) state);
+ status = pm_send_all(PM_SUSPEND, (void*) state);
return status;
}
@@ -1333,10 +1335,7 @@ static int __init acpi_init(void)
if (acpi_claim_ioports(acpi_facp)) {
printk(KERN_ERR "ACPI: I/O port allocation failed\n");
- if (pci_driver_registered)
- pci_unregister_driver(&acpi_driver);
- acpi_destroy_tables();
- return -ENODEV;
+ goto err_out;
}
if (acpi_facp->sci_int
@@ -1347,12 +1346,7 @@ static int __init acpi_init(void)
acpi_facp)) {
printk(KERN_ERR "ACPI: SCI (IRQ%d) allocation failed\n",
acpi_facp->sci_int);
-
- if (pci_driver_registered)
- pci_unregister_driver(&acpi_driver);
- acpi_destroy_tables();
-
- return -ENODEV;
+ goto err_out;
}
acpi_sysctl = register_sysctl_table(acpi_dir_table, 1);
@@ -1379,6 +1373,13 @@ static int __init acpi_init(void)
pm_idle = acpi_idle;
return 0;
+
+err_out:
+ if (pci_driver_registered)
+ pci_unregister_driver(&acpi_driver);
+ acpi_destroy_tables();
+
+ return -ENODEV;
}
/*
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index 4ec5e7993..3d403b93c 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -333,7 +333,7 @@ static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue);
static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue);
static struct apm_user * user_list = NULL;
-static char driver_version[] = "1.12"; /* no spaces */
+static char driver_version[] = "1.13"; /* no spaces */
static char * apm_event_name[] = {
"system standby",
@@ -590,7 +590,11 @@ static void apm_cpu_idle(void)
continue;
if (hlt_counter)
continue;
- asm volatile("sti ; hlt" : : : "memory");
+ asm volatile("cli" : : : "memory");
+ if (!current->need_resched)
+ asm volatile("sti ; hlt" : : : "memory");
+ else
+ asm volatile("sti" : : : "memory");
continue;
}
@@ -635,7 +639,7 @@ static void apm_power_off(void)
*/
#ifdef CONFIG_SMP
/* Some bioses don't like being called from CPU != 0 */
- while (cpu_number_map[smp_processor_id()] != 0) {
+ while (cpu_number_map(smp_processor_id()) != 0) {
kernel_thread(apm_magic, NULL,
CLONE_FS | CLONE_FILES | CLONE_SIGHAND | SIGCHLD);
schedule();
@@ -916,7 +920,7 @@ static int send_event(apm_event_t event, struct apm_user *sender)
case APM_CRITICAL_SUSPEND:
case APM_USER_SUSPEND:
/* map all suspends to ACPI D3 */
- if (pm_send_request(PM_SUSPEND, (void *)3)) {
+ if (pm_send_all(PM_SUSPEND, (void *)3)) {
if (apm_bios_info.version > 0x100)
apm_set_power_state(APM_STATE_REJECT);
return 0;
@@ -925,7 +929,7 @@ static int send_event(apm_event_t event, struct apm_user *sender)
case APM_NORMAL_RESUME:
case APM_CRITICAL_RESUME:
/* map all resumes to ACPI D0 */
- (void) pm_send_request(PM_RESUME, (void *)0);
+ (void) pm_send_all(PM_RESUME, (void *)0);
break;
}
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index bcca244c1..0c3cae5d9 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -181,6 +181,8 @@ ret_from_fork:
call SYMBOL_NAME(schedule_tail)
addl $4, %esp
GET_CURRENT(%ebx)
+ testb $0x20,flags(%ebx) # PF_TRACESYS
+ jne tracesys_exit
jmp ret_from_sys_call
/*
diff --git a/arch/i386/kernel/i386_ksyms.c b/arch/i386/kernel/i386_ksyms.c
index cad6ceb17..a3389c5f0 100644
--- a/arch/i386/kernel/i386_ksyms.c
+++ b/arch/i386/kernel/i386_ksyms.c
@@ -144,6 +144,4 @@ EXPORT_SYMBOL(screen_info);
EXPORT_SYMBOL(get_wchan);
-
-EXPORT_SYMBOL(local_bh_count);
-EXPORT_SYMBOL(local_irq_count);
+EXPORT_SYMBOL(irq_stat);
diff --git a/arch/i386/kernel/i8259.c b/arch/i386/kernel/i8259.c
index d54f9b503..ec33f2269 100644
--- a/arch/i386/kernel/i8259.c
+++ b/arch/i386/kernel/i8259.c
@@ -127,11 +127,14 @@ void (*interrupt[NR_IRQS])(void) = {
* moves to arch independent land
*/
-void enable_8259A_irq(unsigned int irq);
-void disable_8259A_irq(unsigned int irq);
+static spinlock_t i8259A_lock = SPIN_LOCK_UNLOCKED;
+
+static void end_8259A_irq (unsigned int irq)
+{
+ if (!(irq_desc[irq].status & IRQ_DISABLED))
+ enable_8259A_irq(irq);
+}
-/* shutdown is same as "disable" */
-#define end_8259A_irq enable_8259A_irq
#define shutdown_8259A_irq disable_8259A_irq
void mask_and_ack_8259A(unsigned int);
@@ -149,7 +152,8 @@ static struct hw_interrupt_type i8259A_irq_type = {
enable_8259A_irq,
disable_8259A_irq,
mask_and_ack_8259A,
- end_8259A_irq
+ end_8259A_irq,
+ NULL
};
/*
@@ -183,30 +187,45 @@ unsigned long io_apic_irqs = 0;
void disable_8259A_irq(unsigned int irq)
{
unsigned int mask = 1 << irq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&i8259A_lock, flags);
cached_irq_mask |= mask;
if (irq & 8)
outb(cached_A1,0xA1);
else
outb(cached_21,0x21);
+ spin_unlock_irqrestore(&i8259A_lock, flags);
}
void enable_8259A_irq(unsigned int irq)
{
unsigned int mask = ~(1 << irq);
+ unsigned long flags;
+
+ spin_lock_irqsave(&i8259A_lock, flags);
cached_irq_mask &= mask;
if (irq & 8)
outb(cached_A1,0xA1);
else
outb(cached_21,0x21);
+ spin_unlock_irqrestore(&i8259A_lock, flags);
}
int i8259A_irq_pending(unsigned int irq)
{
unsigned int mask = 1<<irq;
+ unsigned long flags;
+ int ret;
+ spin_lock_irqsave(&i8259A_lock, flags);
if (irq < 8)
- return (inb(0x20) & mask);
- return (inb(0xA0) & (mask >> 8));
+ ret = inb(0x20) & mask;
+ else
+ ret = inb(0xA0) & (mask >> 8);
+ spin_unlock_irqrestore(&i8259A_lock, flags);
+
+ return ret;
}
void make_8259A_irq(unsigned int irq)
@@ -247,7 +266,9 @@ static inline int i8259A_irq_real(unsigned int irq)
void mask_and_ack_8259A(unsigned int irq)
{
unsigned int irqmask = 1 << irq;
+ unsigned long flags;
+ spin_lock_irqsave(&i8259A_lock, flags);
/*
* Lightweight spurious IRQ detection. We do not want
* to overdo spurious IRQ handling - it's usually a sign
@@ -278,6 +299,7 @@ handle_real_irq:
outb(cached_21,0x21);
outb(0x20,0x20); /* 'generic EOI' to master */
}
+ spin_unlock_irqrestore(&i8259A_lock, flags);
return;
spurious_8259A_irq:
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 75b2bfb9f..129a587f0 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -28,6 +28,8 @@
#include <asm/smp.h>
#include <asm/desc.h>
+static spinlock_t ioapic_lock = SPIN_LOCK_UNLOCKED;
+
/*
* # of IO-APICs and # of IRQ routing registers
*/
@@ -87,9 +89,8 @@ static void add_pin_to_irq(unsigned int irq, int apic, int pin)
entry->pin = pin;
}
-#define DO_ACTION(name,R,ACTION, FINAL) \
+#define __DO_ACTION(name,R,ACTION, FINAL) \
\
-static void name##_IO_APIC_irq(unsigned int irq) \
{ \
int pin; \
struct irq_pin_list *entry = irq_2_pin + irq; \
@@ -109,8 +110,31 @@ static void name##_IO_APIC_irq(unsigned int irq) \
FINAL; \
}
-DO_ACTION( mask, 0, |= 0x00010000, io_apic_sync(entry->apic))/* mask = 1 */
-DO_ACTION( unmask, 0, &= 0xfffeffff, ) /* mask = 0 */
+#define DO_ACTION(name,R,ACTION, FINAL) \
+ \
+static void name##_IO_APIC_irq(unsigned int irq) \
+__DO_ACTION(name,R,ACTION, FINAL)
+
+DO_ACTION( __mask, 0, |= 0x00010000, io_apic_sync(entry->apic))/* mask = 1 */
+DO_ACTION( __unmask, 0, &= 0xfffeffff, ) /* mask = 0 */
+
+static void mask_IO_APIC_irq (unsigned int irq)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioapic_lock, flags);
+ __mask_IO_APIC_irq(irq);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+}
+
+static void unmask_IO_APIC_irq (unsigned int irq)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioapic_lock, flags);
+ __unmask_IO_APIC_irq(irq);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+}
void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
{
@@ -537,7 +561,7 @@ void __init setup_IO_APIC_irqs(void)
entry.delivery_mode = dest_LowestPrio;
entry.dest_mode = 1; /* logical delivery */
entry.mask = 0; /* enable IRQ */
- entry.dest.logical.logical_dest = APIC_ALL_CPUS; /* all CPUs */
+ entry.dest.logical.logical_dest = APIC_ALL_CPUS;
idx = find_irq_entry(apic,pin,mp_INT);
if (idx == -1) {
@@ -1026,16 +1050,16 @@ extern atomic_t nmi_counter[NR_CPUS];
static int __init nmi_irq_works(void)
{
- atomic_t tmp[NR_CPUS];
+ irq_cpustat_t tmp[NR_CPUS];
int j, cpu;
- memcpy(tmp, nmi_counter, sizeof(tmp));
+ memcpy(tmp, irq_stat, sizeof(tmp));
sti();
mdelay(50);
for (j = 0; j < smp_num_cpus; j++) {
cpu = cpu_logical_map(j);
- if (atomic_read(nmi_counter+cpu) - atomic_read(tmp+cpu) <= 3) {
+ if (atomic_read(&nmi_counter(cpu)) - atomic_read(&tmp[cpu].__nmi_counter) <= 3) {
printk("CPU#%d NMI appears to be stuck.\n", cpu);
return 0;
}
@@ -1055,14 +1079,9 @@ static int __init nmi_irq_works(void)
* that was delayed but this is now handled in the device
* independent code.
*/
-static void enable_edge_ioapic_irq(unsigned int irq)
-{
- unmask_IO_APIC_irq(irq);
-}
+#define enable_edge_ioapic_irq unmask_IO_APIC_irq
-static void disable_edge_ioapic_irq(unsigned int irq)
-{
-}
+static void disable_edge_ioapic_irq (unsigned int irq) { /* nothing */ }
/*
* Starting up a edge-triggered IO-APIC interrupt is
@@ -1077,12 +1096,17 @@ static void disable_edge_ioapic_irq(unsigned int irq)
static unsigned int startup_edge_ioapic_irq(unsigned int irq)
{
int was_pending = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioapic_lock, flags);
if (irq < 16) {
disable_8259A_irq(irq);
if (i8259A_irq_pending(irq))
was_pending = 1;
}
- enable_edge_ioapic_irq(irq);
+ __unmask_IO_APIC_irq(irq);
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+
return was_pending;
}
@@ -1093,14 +1117,15 @@ static unsigned int startup_edge_ioapic_irq(unsigned int irq)
* interrupt for real. This prevents IRQ storms from unhandled
* devices.
*/
-void static ack_edge_ioapic_irq(unsigned int irq)
+static void ack_edge_ioapic_irq(unsigned int irq)
{
if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
== (IRQ_PENDING | IRQ_DISABLED))
mask_IO_APIC_irq(irq);
ack_APIC_irq();
}
-void static end_edge_ioapic_irq(unsigned int i){}
+
+static void end_edge_ioapic_irq (unsigned int i) { /* nothing */ }
/*
@@ -1108,23 +1133,46 @@ void static end_edge_ioapic_irq(unsigned int i){}
* and shutting down and starting up the interrupt
* is the same as enabling and disabling them -- except
* with a startup need to return a "was pending" value.
+ *
+ * Level triggered interrupts are special because we
+ * do not touch any IO-APIC register while handling
+ * them. We ack the APIC in the end-IRQ handler, not
+ * in the start-IRQ-handler. Protection against reentrance
+ * from the same interrupt is still provided, both by the
+ * generic IRQ layer and by the fact that an unacked local
+ * APIC does not accept IRQs.
*/
-static unsigned int startup_level_ioapic_irq(unsigned int irq)
+static unsigned int startup_level_ioapic_irq (unsigned int irq)
{
unmask_IO_APIC_irq(irq);
+
return 0; /* don't check for pending */
}
#define shutdown_level_ioapic_irq mask_IO_APIC_irq
#define enable_level_ioapic_irq unmask_IO_APIC_irq
#define disable_level_ioapic_irq mask_IO_APIC_irq
-#define end_level_ioapic_irq unmask_IO_APIC_irq
-void static mask_and_ack_level_ioapic_irq(unsigned int i)
+
+static void end_level_ioapic_irq (unsigned int i)
{
- mask_IO_APIC_irq(i);
ack_APIC_irq();
}
+static void mask_and_ack_level_ioapic_irq (unsigned int i) { /* nothing */ }
+
+static void set_ioapic_affinity (unsigned int irq, unsigned int mask)
+{
+ unsigned long flags;
+ /*
+ * Only the first 8 bits are valid.
+ */
+ mask = mask << 24;
+
+ spin_lock_irqsave(&ioapic_lock, flags);
+ __DO_ACTION( target, 1, = mask, )
+ spin_unlock_irqrestore(&ioapic_lock, flags);
+}
+
/*
* Level and edge triggered IO-APIC interrupts need different handling,
* so we use two separate IRQ descriptors. Edge triggered IRQs can be
@@ -1141,7 +1189,8 @@ static struct hw_interrupt_type ioapic_edge_irq_type = {
enable_edge_ioapic_irq,
disable_edge_ioapic_irq,
ack_edge_ioapic_irq,
- end_edge_ioapic_irq
+ end_edge_ioapic_irq,
+ set_ioapic_affinity,
};
static struct hw_interrupt_type ioapic_level_irq_type = {
@@ -1151,7 +1200,8 @@ static struct hw_interrupt_type ioapic_level_irq_type = {
enable_level_ioapic_irq,
disable_level_ioapic_irq,
mask_and_ack_level_ioapic_irq,
- end_level_ioapic_irq
+ end_level_ioapic_irq,
+ set_ioapic_affinity,
};
static inline void init_IO_APIC_traps(void)
@@ -1185,12 +1235,12 @@ static inline void init_IO_APIC_traps(void)
}
}
-void static ack_lapic_irq (unsigned int irq)
+static void ack_lapic_irq (unsigned int irq)
{
ack_APIC_irq();
}
-void static end_lapic_irq (unsigned int i) { /* nothing */ }
+static void end_lapic_irq (unsigned int i) { /* nothing */ }
static struct hw_interrupt_type lapic_irq_type = {
"local-APIC-edge",
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
index 9d4a81041..7054249e6 100644
--- a/arch/i386/kernel/irq.c
+++ b/arch/i386/kernel/irq.c
@@ -31,21 +31,20 @@
#include <linux/init.h>
#include <linux/kernel_stat.h>
#include <linux/irq.h>
+#include <linux/proc_fs.h>
+#include <linux/irq.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/system.h>
#include <asm/bitops.h>
+#include <asm/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/delay.h>
#include <asm/desc.h>
#include <asm/irq.h>
-unsigned int local_bh_count[NR_CPUS];
-unsigned int local_irq_count[NR_CPUS];
-
-extern atomic_t nmi_counter[NR_CPUS];
/*
* Linux has a controller-independent x86 interrupt architecture.
@@ -63,17 +62,15 @@ extern atomic_t nmi_counter[NR_CPUS];
* interrupt controllers, without having to do assembly magic.
*/
-/*
- * Micro-access to controllers is serialized over the whole
- * system. We never hold this lock when we call the actual
- * IRQ handler.
- */
-spinlock_t irq_controller_lock = SPIN_LOCK_UNLOCKED;
+irq_cpustat_t irq_stat [NR_CPUS];
+
/*
* Controller mappings for all interrupt sources:
*/
irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
- { [0 ... NR_IRQS-1] = { 0, &no_irq_type, }};
+ { [0 ... NR_IRQS-1] = { 0, &no_irq_type, NULL, 0, SPIN_LOCK_UNLOCKED}};
+
+static void register_irq_proc (unsigned int irq);
/*
* Special irq handlers.
@@ -164,7 +161,7 @@ int get_irq_list(char *buf)
p += sprintf(p, "NMI: ");
for (j = 0; j < smp_num_cpus; j++)
p += sprintf(p, "%10u ",
- atomic_read(nmi_counter+cpu_logical_map(j)));
+ atomic_read(&nmi_counter(cpu_logical_map(j))));
p += sprintf(p, "\n");
#if CONFIG_SMP
p += sprintf(p, "LOC: ");
@@ -186,7 +183,6 @@ int get_irq_list(char *buf)
#ifdef CONFIG_SMP
unsigned char global_irq_holder = NO_PROC_ID;
unsigned volatile int global_irq_lock;
-atomic_t global_irq_count;
static void show(char * str)
{
@@ -196,9 +192,9 @@ static void show(char * str)
printk("\n%s, CPU %d:\n", str, cpu);
printk("irq: %d [%d %d]\n",
- atomic_read(&global_irq_count), local_irq_count[0], local_irq_count[1]);
+ irqs_running(), local_irq_count(0), local_irq_count(1));
printk("bh: %d [%d %d]\n",
- spin_is_locked(&global_bh_lock) ? 1 : 0, local_bh_count[0], local_bh_count[1]);
+ spin_is_locked(&global_bh_lock) ? 1 : 0, local_bh_count(0), local_bh_count(1));
stack = (unsigned long *) &stack;
for (i = 40; i ; i--) {
unsigned long x = *++stack;
@@ -248,10 +244,9 @@ static inline void wait_on_irq(int cpu)
* for bottom half handlers unless we're
* already executing in one..
*/
- if (!atomic_read(&global_irq_count)) {
- if (local_bh_count[cpu] || !spin_is_locked(&global_bh_lock))
+ if (!irqs_running())
+ if (local_bh_count(cpu) || !spin_is_locked(&global_bh_lock))
break;
- }
/* Duh, we have to loop. Release the lock to avoid deadlocks */
clear_bit(0,&global_irq_lock);
@@ -264,11 +259,11 @@ static inline void wait_on_irq(int cpu)
__sti();
SYNC_OTHER_CORES(cpu);
__cli();
- if (atomic_read(&global_irq_count))
+ if (irqs_running())
continue;
if (global_irq_lock)
continue;
- if (!local_bh_count[cpu] && spin_is_locked(&global_bh_lock))
+ if (!local_bh_count(cpu) && spin_is_locked(&global_bh_lock))
continue;
if (!test_and_set_bit(0,&global_irq_lock))
break;
@@ -285,7 +280,7 @@ static inline void wait_on_irq(int cpu)
*/
void synchronize_irq(void)
{
- if (atomic_read(&global_irq_count)) {
+ if (irqs_running()) {
/* Stupid approach */
cli();
sti();
@@ -338,7 +333,7 @@ void __global_cli(void)
if (flags & (1 << EFLAGS_IF_SHIFT)) {
int cpu = smp_processor_id();
__cli();
- if (!local_irq_count[cpu])
+ if (!local_irq_count(cpu))
get_irqlock(cpu);
}
}
@@ -347,7 +342,7 @@ void __global_sti(void)
{
int cpu = smp_processor_id();
- if (!local_irq_count[cpu])
+ if (!local_irq_count(cpu))
release_irqlock(cpu);
__sti();
}
@@ -364,6 +359,7 @@ unsigned long __global_save_flags(void)
int retval;
int local_enabled;
unsigned long flags;
+ int cpu = smp_processor_id();
__save_flags(flags);
local_enabled = (flags >> EFLAGS_IF_SHIFT) & 1;
@@ -371,10 +367,10 @@ unsigned long __global_save_flags(void)
retval = 2 + local_enabled;
/* check for global flags if we're not in an interrupt */
- if (!local_irq_count[smp_processor_id()]) {
+ if (!local_irq_count(cpu)) {
if (local_enabled)
retval = 1;
- if (global_irq_holder == (unsigned char) smp_processor_id())
+ if (global_irq_holder == cpu)
retval = 0;
}
return retval;
@@ -442,16 +438,17 @@ int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction *
* hardware disable after having gotten the irq
* controller lock.
*/
-void disable_irq_nosync(unsigned int irq)
+void inline disable_irq_nosync(unsigned int irq)
{
+ irq_desc_t *desc = irq_desc + irq;
unsigned long flags;
- spin_lock_irqsave(&irq_controller_lock, flags);
- if (!irq_desc[irq].depth++) {
- irq_desc[irq].status |= IRQ_DISABLED;
- irq_desc[irq].handler->disable(irq);
+ spin_lock_irqsave(&desc->lock, flags);
+ if (!desc->depth++) {
+ desc->status |= IRQ_DISABLED;
+ desc->handler->disable(irq);
}
- spin_unlock_irqrestore(&irq_controller_lock, flags);
+ spin_unlock_irqrestore(&desc->lock, flags);
}
/*
@@ -462,7 +459,7 @@ void disable_irq(unsigned int irq)
{
disable_irq_nosync(irq);
- if (!local_irq_count[smp_processor_id()]) {
+ if (!local_irq_count(smp_processor_id())) {
do {
barrier();
} while (irq_desc[irq].status & IRQ_INPROGRESS);
@@ -471,28 +468,29 @@ void disable_irq(unsigned int irq)
void enable_irq(unsigned int irq)
{
+ irq_desc_t *desc = irq_desc + irq;
unsigned long flags;
- spin_lock_irqsave(&irq_controller_lock, flags);
- switch (irq_desc[irq].depth) {
+ spin_lock_irqsave(&desc->lock, flags);
+ switch (desc->depth) {
case 1: {
- unsigned int status = irq_desc[irq].status & ~IRQ_DISABLED;
- irq_desc[irq].status = status;
+ unsigned int status = desc->status & ~IRQ_DISABLED;
+ desc->status = status;
if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
- irq_desc[irq].status = status | IRQ_REPLAY;
- hw_resend_irq(irq_desc[irq].handler,irq);
+ desc->status = status | IRQ_REPLAY;
+ hw_resend_irq(desc->handler,irq);
}
- irq_desc[irq].handler->enable(irq);
+ desc->handler->enable(irq);
/* fall-through */
}
default:
- irq_desc[irq].depth--;
+ desc->depth--;
break;
case 0:
printk("enable_irq() unbalanced from %p\n",
__builtin_return_address(0));
}
- spin_unlock_irqrestore(&irq_controller_lock, flags);
+ spin_unlock_irqrestore(&desc->lock, flags);
}
/*
@@ -514,13 +512,12 @@ asmlinkage unsigned int do_IRQ(struct pt_regs regs)
*/
int irq = regs.orig_eax & 0xff; /* high bits used in ret_from_ code */
int cpu = smp_processor_id();
- irq_desc_t *desc;
+ irq_desc_t *desc = irq_desc + irq;
struct irqaction * action;
unsigned int status;
kstat.irqs[cpu][irq]++;
- desc = irq_desc + irq;
- spin_lock(&irq_controller_lock);
+ spin_lock(&desc->lock);
desc->handler->ack(irq);
/*
REPLAY is when Linux resends an IRQ that was dropped earlier
@@ -540,7 +537,6 @@ asmlinkage unsigned int do_IRQ(struct pt_regs regs)
status |= IRQ_INPROGRESS; /* we are handling it */
}
desc->status = status;
- spin_unlock(&irq_controller_lock);
/*
* If there is no IRQ handler or it was disabled, exit early.
@@ -549,7 +545,7 @@ asmlinkage unsigned int do_IRQ(struct pt_regs regs)
will take care of it.
*/
if (!action)
- return 1;
+ goto out;
/*
* Edge triggered interrupts need to remember
@@ -562,20 +558,24 @@ asmlinkage unsigned int do_IRQ(struct pt_regs regs)
* SMP environment.
*/
for (;;) {
+ spin_unlock(&desc->lock);
handle_IRQ_event(irq, &regs, action);
- spin_lock(&irq_controller_lock);
+ spin_lock(&desc->lock);
if (!(desc->status & IRQ_PENDING))
break;
desc->status &= ~IRQ_PENDING;
- spin_unlock(&irq_controller_lock);
}
desc->status &= ~IRQ_INPROGRESS;
- if (!(desc->status & IRQ_DISABLED))
- desc->handler->end(irq);
- spin_unlock(&irq_controller_lock);
+out:
+ /*
+ * The ->end() handler has to deal with interrupts which got
+ * disabled while the handler was running.
+ */
+ desc->handler->end(irq);
+ spin_unlock(&desc->lock);
- if (softirq_state[cpu].active&softirq_state[cpu].mask)
+ if (softirq_state[cpu].active & softirq_state[cpu].mask)
do_softirq();
return 1;
}
@@ -627,14 +627,16 @@ int request_irq(unsigned int irq,
void free_irq(unsigned int irq, void *dev_id)
{
+ irq_desc_t *desc;
struct irqaction **p;
unsigned long flags;
if (irq >= NR_IRQS)
return;
- spin_lock_irqsave(&irq_controller_lock,flags);
- p = &irq_desc[irq].action;
+ desc = irq_desc + irq;
+ spin_lock_irqsave(&desc->lock,flags);
+ p = &desc->action;
for (;;) {
struct irqaction * action = *p;
if (action) {
@@ -645,22 +647,22 @@ void free_irq(unsigned int irq, void *dev_id)
/* Found it - now remove it from the list of entries */
*pp = action->next;
- if (!irq_desc[irq].action) {
- irq_desc[irq].status |= IRQ_DISABLED;
- irq_desc[irq].handler->shutdown(irq);
+ if (!desc->action) {
+ desc->status |= IRQ_DISABLED;
+ desc->handler->shutdown(irq);
}
- spin_unlock_irqrestore(&irq_controller_lock,flags);
+ spin_unlock_irqrestore(&desc->lock,flags);
#ifdef CONFIG_SMP
/* Wait to make sure it's not being used on another CPU */
- while (irq_desc[irq].status & IRQ_INPROGRESS)
+ while (desc->status & IRQ_INPROGRESS)
barrier();
#endif
kfree(action);
return;
}
printk("Trying to free free IRQ%d\n",irq);
- spin_unlock_irqrestore(&irq_controller_lock,flags);
+ spin_unlock_irqrestore(&desc->lock,flags);
return;
}
}
@@ -676,21 +678,43 @@ void free_irq(unsigned int irq, void *dev_id)
unsigned long probe_irq_on(void)
{
unsigned int i;
- unsigned long delay;
+ irq_desc_t *desc;
unsigned long val;
+ unsigned long delay;
+
+ /*
+ * something may have generated an irq long ago and we want to
+ * flush such a longstanding irq before considering it as spurious.
+ */
+ for (i = NR_IRQS-1; i > 0; i--) {
+ desc = irq_desc + i;
+
+ spin_lock_irq(&desc->lock);
+ if (!irq_desc[i].action)
+ irq_desc[i].handler->startup(i);
+ spin_unlock_irq(&desc->lock);
+ }
+
+ /* Wait for longstanding interrupts to trigger. */
+ for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
+ /* about 20ms delay */ synchronize_irq();
/*
- * first, enable any unassigned irqs
+ * enable any unassigned irqs
+ * (we must startup again here because if a longstanding irq
+ * happened in the previous stage, it may have masked itself)
*/
- spin_lock_irq(&irq_controller_lock);
for (i = NR_IRQS-1; i > 0; i--) {
- if (!irq_desc[i].action) {
- irq_desc[i].status |= IRQ_AUTODETECT | IRQ_WAITING;
- if(irq_desc[i].handler->startup(i))
- irq_desc[i].status |= IRQ_PENDING;
+ desc = irq_desc + i;
+
+ spin_lock_irq(&desc->lock);
+ if (!desc->action) {
+ desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
+ if (desc->handler->startup(i))
+ desc->status |= IRQ_PENDING;
}
+ spin_unlock_irq(&desc->lock);
}
- spin_unlock_irq(&irq_controller_lock);
/*
* Wait for spurious interrupts to trigger
@@ -702,24 +726,24 @@ unsigned long probe_irq_on(void)
* Now filter out any obviously spurious interrupts
*/
val = 0;
- spin_lock_irq(&irq_controller_lock);
- for (i=0; i<NR_IRQS; i++) {
- unsigned int status = irq_desc[i].status;
-
- if (!(status & IRQ_AUTODETECT))
- continue;
-
- /* It triggered already - consider it spurious. */
- if (!(status & IRQ_WAITING)) {
- irq_desc[i].status = status & ~IRQ_AUTODETECT;
- irq_desc[i].handler->shutdown(i);
- continue;
+ for (i = 0; i < NR_IRQS; i++) {
+ irq_desc_t *desc = irq_desc + i;
+ unsigned int status;
+
+ spin_lock_irq(&desc->lock);
+ status = desc->status;
+
+ if (status & IRQ_AUTODETECT) {
+ /* It triggered already - consider it spurious. */
+ if (!(status & IRQ_WAITING)) {
+ desc->status = status & ~IRQ_AUTODETECT;
+ desc->handler->shutdown(i);
+ } else
+ if (i < 32)
+ val |= 1 << i;
}
-
- if (i < 32)
- val |= 1 << i;
+ spin_unlock_irq(&desc->lock);
}
- spin_unlock_irq(&irq_controller_lock);
return val;
}
@@ -734,20 +758,22 @@ unsigned int probe_irq_mask(unsigned long val)
unsigned int mask;
mask = 0;
- spin_lock_irq(&irq_controller_lock);
for (i = 0; i < 16; i++) {
- unsigned int status = irq_desc[i].status;
+ irq_desc_t *desc = irq_desc + i;
+ unsigned int status;
- if (!(status & IRQ_AUTODETECT))
- continue;
+ spin_lock_irq(&desc->lock);
+ status = desc->status;
- if (!(status & IRQ_WAITING))
- mask |= 1 << i;
+ if (status & IRQ_AUTODETECT) {
+ if (!(status & IRQ_WAITING))
+ mask |= 1 << i;
- irq_desc[i].status = status & ~IRQ_AUTODETECT;
- irq_desc[i].handler->shutdown(i);
+ desc->status = status & ~IRQ_AUTODETECT;
+ desc->handler->shutdown(i);
+ }
+ spin_unlock_irq(&desc->lock);
}
- spin_unlock_irq(&irq_controller_lock);
return mask & val;
}
@@ -762,22 +788,24 @@ int probe_irq_off(unsigned long val)
nr_irqs = 0;
irq_found = 0;
- spin_lock_irq(&irq_controller_lock);
- for (i=0; i<NR_IRQS; i++) {
- unsigned int status = irq_desc[i].status;
-
- if (!(status & IRQ_AUTODETECT))
- continue;
-
- if (!(status & IRQ_WAITING)) {
- if (!nr_irqs)
- irq_found = i;
- nr_irqs++;
+ for (i = 0; i < NR_IRQS; i++) {
+ irq_desc_t *desc = irq_desc + i;
+ unsigned int status;
+
+ spin_lock_irq(&desc->lock);
+ status = desc->status;
+
+ if (status & IRQ_AUTODETECT) {
+ if (!(status & IRQ_WAITING)) {
+ if (!nr_irqs)
+ irq_found = i;
+ nr_irqs++;
+ }
+ desc->status = status & ~IRQ_AUTODETECT;
+ desc->handler->shutdown(i);
}
- irq_desc[i].status = status & ~IRQ_AUTODETECT;
- irq_desc[i].handler->shutdown(i);
+ spin_unlock_irq(&desc->lock);
}
- spin_unlock_irq(&irq_controller_lock);
if (nr_irqs > 1)
irq_found = -irq_found;
@@ -788,8 +816,9 @@ int probe_irq_off(unsigned long val)
int setup_irq(unsigned int irq, struct irqaction * new)
{
int shared = 0;
- struct irqaction *old, **p;
unsigned long flags;
+ struct irqaction *old, **p;
+ irq_desc_t *desc = irq_desc + irq;
/*
* Some drivers like serial.c use request_irq() heavily,
@@ -811,12 +840,12 @@ int setup_irq(unsigned int irq, struct irqaction * new)
/*
* The following block of code has to be executed atomically
*/
- spin_lock_irqsave(&irq_controller_lock,flags);
- p = &irq_desc[irq].action;
+ spin_lock_irqsave(&desc->lock,flags);
+ p = &desc->action;
if ((old = *p) != NULL) {
/* Can't share interrupts unless both agree to */
if (!(old->flags & new->flags & SA_SHIRQ)) {
- spin_unlock_irqrestore(&irq_controller_lock,flags);
+ spin_unlock_irqrestore(&desc->lock,flags);
return -EBUSY;
}
@@ -831,11 +860,171 @@ int setup_irq(unsigned int irq, struct irqaction * new)
*p = new;
if (!shared) {
- irq_desc[irq].depth = 0;
- irq_desc[irq].status &= ~IRQ_DISABLED;
- irq_desc[irq].handler->startup(irq);
+ desc->depth = 0;
+ desc->status &= ~IRQ_DISABLED;
+ desc->handler->startup(irq);
}
- spin_unlock_irqrestore(&irq_controller_lock,flags);
+ spin_unlock_irqrestore(&desc->lock,flags);
+
+ register_irq_proc(irq);
return 0;
}
+static struct proc_dir_entry * root_irq_dir;
+static struct proc_dir_entry * irq_dir [NR_IRQS];
+static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
+
+unsigned int irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = 0xffffffff};
+
+#define HEX_DIGITS 8
+
+static int irq_affinity_read_proc (char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ if (count < HEX_DIGITS+1)
+ return -EINVAL;
+ return sprintf (page, "%08x\n", irq_affinity[(int)data]);
+}
+
+static unsigned int parse_hex_value (const char *buffer,
+ unsigned long count, unsigned long *ret)
+{
+ unsigned char hexnum [HEX_DIGITS];
+ unsigned long value;
+ int i;
+
+ if (!count)
+ return -EINVAL;
+ if (count > HEX_DIGITS)
+ count = HEX_DIGITS;
+ if (copy_from_user(hexnum, buffer, count))
+ return -EFAULT;
+
+ /*
+ * Parse the first 8 characters as a hex string, any non-hex char
+ * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
+ */
+ value = 0;
+
+ for (i = 0; i < count; i++) {
+ unsigned int c = hexnum[i];
+
+ switch (c) {
+ case '0' ... '9': c -= '0'; break;
+ case 'a' ... 'f': c -= 'a'-10; break;
+ case 'A' ... 'F': c -= 'A'-10; break;
+ default:
+ goto out;
+ }
+ value = (value << 4) | c;
+ }
+out:
+ *ret = value;
+ return 0;
+}
+
+static int irq_affinity_write_proc (struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ int irq = (int) data, full_count = count, err;
+ unsigned long new_value;
+
+ if (!irq_desc[irq].handler->set_affinity)
+ return -EIO;
+
+ err = parse_hex_value(buffer, count, &new_value);
+
+#if CONFIG_SMP
+ /*
+ * Do not allow disabling IRQs completely - it's a too easy
+ * way to make the system unusable accidentally :-) At least
+ * one online CPU still has to be targeted.
+ */
+ if (!(new_value & cpu_online_map))
+ return -EINVAL;
+#endif
+
+ irq_affinity[irq] = new_value;
+ irq_desc[irq].handler->set_affinity(irq, new_value);
+
+ return full_count;
+}
+
+static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ unsigned long *mask = (unsigned long *) data;
+ if (count < HEX_DIGITS+1)
+ return -EINVAL;
+ return sprintf (page, "%08lx\n", *mask);
+}
+
+static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ unsigned long *mask = (unsigned long *) data, full_count = count, err;
+ unsigned long new_value;
+
+ err = parse_hex_value(buffer, count, &new_value);
+ if (err)
+ return err;
+
+ *mask = new_value;
+ return full_count;
+}
+
+#define MAX_NAMELEN 10
+
+static void register_irq_proc (unsigned int irq)
+{
+ struct proc_dir_entry *entry;
+ char name [MAX_NAMELEN];
+
+ if (!root_irq_dir || (irq_desc[irq].handler == &no_irq_type))
+ return;
+
+ memset(name, 0, MAX_NAMELEN);
+ sprintf(name, "%d", irq);
+
+ /* create /proc/irq/1234 */
+ irq_dir[irq] = proc_mkdir(name, root_irq_dir);
+
+ /* create /proc/irq/1234/smp_affinity */
+ entry = create_proc_entry("smp_affinity", 0700, irq_dir[irq]);
+
+ entry->nlink = 1;
+ entry->data = (void *)irq;
+ entry->read_proc = irq_affinity_read_proc;
+ entry->write_proc = irq_affinity_write_proc;
+
+ smp_affinity_entry[irq] = entry;
+}
+
+unsigned long prof_cpu_mask = -1;
+
+void init_irq_proc (void)
+{
+ struct proc_dir_entry *entry;
+ int i;
+
+ /* create /proc/irq */
+ root_irq_dir = proc_mkdir("irq", 0);
+
+ /* create /proc/irq/prof_cpu_mask */
+ entry = create_proc_entry("prof_cpu_mask", 0700, root_irq_dir);
+
+ entry->nlink = 1;
+ entry->data = (void *)&prof_cpu_mask;
+ entry->read_proc = prof_cpu_mask_read_proc;
+ entry->write_proc = prof_cpu_mask_write_proc;
+
+ /*
+ * Create entries for all existing IRQs.
+ */
+ for (i = 0; i < NR_IRQS; i++) {
+ if (irq_desc[i].handler == &no_irq_type)
+ continue;
+ register_irq_proc(i);
+ }
+}
+
diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c
index 26b6525d8..84490b40b 100644
--- a/arch/i386/kernel/microcode.c
+++ b/arch/i386/kernel/microcode.c
@@ -20,6 +20,9 @@
* Initial release.
* 1.01 18 February 2000, Tigran Aivazian <tigran@sco.com>
* Added read() support + cleanups.
+ * 1.02 21 February 2000, Tigran Aivazian <tigran@sco.com>
+ * Added 'device trimming' support. open(O_WRONLY) zeroes
+ * and frees the saved copy of applied microcode.
*/
#include <linux/init.h>
@@ -33,7 +36,7 @@
#include <asm/uaccess.h>
#include <asm/processor.h>
-#define MICROCODE_VERSION "1.01"
+#define MICROCODE_VERSION "1.02"
MODULE_DESCRIPTION("CPU (P6) microcode update driver");
MODULE_AUTHOR("Tigran Aivazian <tigran@ocston.org>");
@@ -53,7 +56,7 @@ static void do_update_one(void *);
/*
* Bits in microcode_status. (31 bits of room for future expansion)
*/
-#define MICROCODE_IS_OPEN 0 /* set if /dev/microcode is in use */
+#define MICROCODE_IS_OPEN 0 /* set if device is in use */
static unsigned long microcode_status = 0;
/* the actual array of microcode blocks, each 2048 bytes */
@@ -68,31 +71,16 @@ static struct file_operations microcode_fops = {
release: microcode_release,
};
-static struct inode_operations microcode_inops = {
- default_file_ops: &microcode_fops,
-};
-
static struct proc_dir_entry *proc_microcode;
static int __init microcode_init(void)
{
- int size;
-
proc_microcode = create_proc_entry("microcode", S_IWUSR|S_IRUSR, proc_root_driver);
if (!proc_microcode) {
printk(KERN_ERR "microcode: can't create /proc/driver/microcode\n");
return -ENOMEM;
}
- proc_microcode->ops = &microcode_inops;
- size = smp_num_cpus * sizeof(struct microcode);
- mc_applied = kmalloc(size, GFP_KERNEL);
- if (!mc_applied) {
- remove_proc_entry("microcode", proc_root_driver);
- printk(KERN_ERR "microcode: can't allocate memory for saved microcode\n");
- return -ENOMEM;
- }
- memset(mc_applied, 0, size); /* so that reading from offsets corresponding to failed
- update makes this obvious */
+ proc_microcode->proc_fops = &microcode_fops;
printk(KERN_INFO "P6 Microcode Update Driver v%s registered\n", MICROCODE_VERSION);
return 0;
}
@@ -100,7 +88,8 @@ static int __init microcode_init(void)
static void __exit microcode_exit(void)
{
remove_proc_entry("microcode", proc_root_driver);
- kfree(mc_applied);
+ if (mc_applied)
+ kfree(mc_applied);
printk(KERN_INFO "P6 Microcode Update Driver v%s unregistered\n", MICROCODE_VERSION);
}
@@ -119,6 +108,15 @@ static int microcode_open(struct inode *inode, struct file *file)
if (test_and_set_bit(MICROCODE_IS_OPEN, &microcode_status))
return -EBUSY;
+ if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
+ proc_microcode->size = 0;
+ if (mc_applied) {
+ memset(mc_applied, 0, smp_num_cpus * sizeof(struct microcode));
+ kfree(mc_applied);
+ mc_applied = NULL;
+ }
+ }
+
MOD_INC_USE_COUNT;
return 0;
@@ -243,6 +241,16 @@ static ssize_t microcode_write(struct file *file, const char *buf, size_t len, l
sizeof(struct microcode));
return -EINVAL;
}
+ if (!mc_applied) {
+ int size = smp_num_cpus * sizeof(struct microcode);
+ mc_applied = kmalloc(size, GFP_KERNEL);
+ if (!mc_applied) {
+ printk(KERN_ERR "microcode: can't allocate memory for saved microcode\n");
+ return -ENOMEM;
+ }
+ memset(mc_applied, 0, size);
+ }
+
lock_kernel();
microcode_num = len/sizeof(struct microcode);
microcode = vmalloc(len);
diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c
index 81685d2f5..030b31647 100644
--- a/arch/i386/kernel/mpparse.c
+++ b/arch/i386/kernel/mpparse.c
@@ -316,11 +316,14 @@ static int __init smp_read_mpc(struct mp_config_table *mpc)
return num_processors;
}
+static struct intel_mp_floating *mpf_found;
+
/*
* Scan the memory blocks for an SMP configuration block.
*/
-static int __init smp_get_mpf(struct intel_mp_floating *mpf)
+void __init get_smp_config (void)
{
+ struct intel_mp_floating *mpf = mpf_found;
printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
if (mpf->mpf_feature2 & (1<<7)) {
printk(" IMCR and PIC compatibility mode.\n");
@@ -329,7 +332,6 @@ static int __init smp_get_mpf(struct intel_mp_floating *mpf)
printk(" Virtual Wire compatibility mode.\n");
pic_mode = 0;
}
- smp_found_config = 1;
/*
* default CPU id - if it's different in the mptable
* then we change it before first using it.
@@ -388,7 +390,7 @@ static int __init smp_get_mpf(struct intel_mp_floating *mpf)
default:
printk("???\nUnknown standard configuration %d\n",
mpf->mpf_feature1);
- return 1;
+ return;
}
if (mpf->mpf_feature1 > 4) {
printk("Bus #1 is PCI\n");
@@ -412,10 +414,9 @@ static int __init smp_get_mpf(struct intel_mp_floating *mpf)
/*
* Only use the first configuration found.
*/
- return 1;
}
-static int __init smp_scan_config(unsigned long base, unsigned long length)
+static int __init smp_scan_config (unsigned long base, unsigned long length)
{
unsigned long *bp = phys_to_virt(base);
struct intel_mp_floating *mpf;
@@ -432,9 +433,13 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
((mpf->mpf_specification == 1)
|| (mpf->mpf_specification == 4)) ) {
- printk("found SMP MP-table at %08ld\n",
+ smp_found_config = 1;
+ printk("found SMP MP-table at %08lx\n",
virt_to_phys(mpf));
- smp_get_mpf(mpf);
+ reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE);
+ if (mpf->mpf_physptr)
+ reserve_bootmem(mpf->mpf_physptr, PAGE_SIZE);
+ mpf_found = mpf;
return 1;
}
bp += 4;
@@ -443,7 +448,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
return 0;
}
-void __init init_intel_smp (void)
+void __init find_intel_smp (void)
{
unsigned int address;
@@ -488,7 +493,7 @@ void __init init_intel_smp (void)
* sense, but it doesnt have a BIOS(-configuration table).
* No problem for Linux.
*/
-void __init init_visws_smp(void)
+void __init find_visws_smp(void)
{
smp_found_config = 1;
@@ -505,13 +510,13 @@ void __init init_visws_smp(void)
* - Intel MP Configuration Table
* - or SGI Visual Workstation configuration
*/
-void __init init_smp_config (void)
+void __init find_smp_config (void)
{
#ifdef CONFIG_X86_IO_APIC
- init_intel_smp();
+ find_intel_smp();
#endif
#ifdef CONFIG_VISWS
- init_visws_smp();
+ find_visws_smp();
#endif
}
diff --git a/arch/i386/kernel/mtrr.c b/arch/i386/kernel/mtrr.c
index a0a4ab851..cc9c7eafe 100644
--- a/arch/i386/kernel/mtrr.c
+++ b/arch/i386/kernel/mtrr.c
@@ -1507,11 +1507,6 @@ static struct file_operations mtrr_fops =
# ifdef CONFIG_PROC_FS
-static struct inode_operations proc_mtrr_inode_operations =
-{
- &mtrr_fops, /* default property file-ops */
-};
-
static struct proc_dir_entry *proc_root_mtrr;
# endif /* CONFIG_PROC_FS */
@@ -1836,9 +1831,9 @@ int __init mtrr_init(void)
#ifdef CONFIG_PROC_FS
proc_root_mtrr = create_proc_entry ("mtrr", S_IWUSR | S_IRUGO, &proc_root);
- proc_root_mtrr->ops = &proc_mtrr_inode_operations;
+ proc_root_mtrr->proc_fops = &mtrr_fops;
#endif
-#ifdev CONFIG_DEVFS_FS
+#ifdef CONFIG_DEVFS_FS
devfs_handle = devfs_register (NULL, "cpu/mtrr", 0, DEVFS_FL_DEFAULT, 0, 0,
S_IFREG | S_IRUGO | S_IWUSR, 0, 0,
&mtrr_fops, NULL);
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 0f61ca543..19f7022a4 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -74,8 +74,13 @@ void enable_hlt(void)
*/
static void default_idle(void)
{
- if (current_cpu_data.hlt_works_ok && !hlt_counter)
- asm volatile("sti ; hlt" : : : "memory");
+ if (current_cpu_data.hlt_works_ok && !hlt_counter) {
+ asm volatile("cli" : : : "memory");
+ if (!current->need_resched)
+ asm volatile("sti ; hlt" : : : "memory");
+ else
+ asm volatile("sti" : : : "memory");
+ }
}
/*
diff --git a/arch/i386/kernel/semaphore.c b/arch/i386/kernel/semaphore.c
index c530eece0..febc592ae 100644
--- a/arch/i386/kernel/semaphore.c
+++ b/arch/i386/kernel/semaphore.c
@@ -150,8 +150,9 @@ int __down_interruptible(struct semaphore * sem)
int __down_trylock(struct semaphore * sem)
{
int sleepers;
+ unsigned long flags;
- spin_lock_irq(&semaphore_lock);
+ spin_lock_irqsave(&semaphore_lock, flags);
sleepers = sem->sleepers + 1;
sem->sleepers = 0;
@@ -162,7 +163,7 @@ int __down_trylock(struct semaphore * sem)
if (!atomic_add_negative(sleepers, &sem->count))
wake_up(&sem->wait);
- spin_unlock_irq(&semaphore_lock);
+ spin_unlock_irqrestore(&semaphore_lock, flags);
return 1;
}
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index cd2a3d8af..b5602ebec 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -119,7 +119,7 @@ extern int rd_image_start; /* starting block # of image */
#endif
extern int root_mountflags;
-extern int _text, _etext, _edata, _end;
+extern char _text, _etext, _edata, _end;
extern unsigned long cpu_hz;
/*
@@ -709,9 +709,20 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_X86_IO_APIC
/*
- * Save possible boot-time SMP configuration:
+ * Find and reserve possible boot-time SMP configuration:
*/
- init_smp_config();
+ find_smp_config();
+#endif
+ paging_init();
+#ifdef CONFIG_X86_IO_APIC
+ /*
+ * get boot-time SMP configuration:
+ */
+ if (smp_found_config)
+ get_smp_config();
+#endif
+#ifdef CONFIG_X86_LOCAL_APIC
+ init_apic_mappings();
#endif
#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 07797e760..7400b628b 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -360,8 +360,6 @@ static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
printk("Do you have a strange power saving mode enabled?\n");
}
-atomic_t nmi_counter[NR_CPUS];
-
#if CONFIG_X86_IO_APIC
int nmi_watchdog = 1;
@@ -437,7 +435,8 @@ asmlinkage void do_nmi(struct pt_regs * regs, long error_code)
{
unsigned char reason = inb(0x61);
- atomic_inc(nmi_counter+smp_processor_id());
+
+ atomic_inc(&nmi_counter(smp_processor_id()));
if (!(reason & 0xc0)) {
#if CONFIG_X86_IO_APIC
/*