summaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-02-23 00:40:54 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-02-23 00:40:54 +0000
commit529c593ece216e4aaffd36bd940cb94f1fa63129 (patch)
tree78f1c0b805f5656aa7b0417a043c5346f700a2cf /arch/i386/kernel
parent0bd079751d25808d1972baee5c4eaa1db2227257 (diff)
Merge with 2.3.43. I did ignore all modifications to the qlogicisp.c
driver due to the Origin A64 hacks.
Diffstat (limited to 'arch/i386/kernel')
-rw-r--r--arch/i386/kernel/Makefile18
-rw-r--r--arch/i386/kernel/acpi.c97
-rw-r--r--arch/i386/kernel/apm.c74
-rw-r--r--arch/i386/kernel/entry.S36
-rw-r--r--arch/i386/kernel/head.S11
-rw-r--r--arch/i386/kernel/i386_ksyms.c17
-rw-r--r--arch/i386/kernel/io_apic.c16
-rw-r--r--arch/i386/kernel/irq.c64
-rw-r--r--arch/i386/kernel/mtrr.c17
-rw-r--r--arch/i386/kernel/pci-pc.c19
-rw-r--r--arch/i386/kernel/pm.c104
-rw-r--r--arch/i386/kernel/process.c10
-rw-r--r--arch/i386/kernel/setup.c6
-rw-r--r--arch/i386/kernel/smp.c243
-rw-r--r--arch/i386/kernel/trampoline.S2
-rw-r--r--arch/i386/kernel/traps.c99
16 files changed, 376 insertions, 457 deletions
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
index 6b7302f99..96be4dff6 100644
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -40,23 +40,19 @@ else
endif
endif
-ifdef CONFIG_ACPI
-OX_OBJS += pm.o
-else
-ifdef CONFIG_APM
-OX_OBJS += pm.o
-endif
-endif
-
ifeq ($(CONFIG_ACPI),y)
- O_OBJS += acpi.o
+O_OBJS += acpi.o
+else
+ ifeq ($(CONFIG_ACPI),m)
+ M_OBJS += acpi.o
+ endif
endif
ifeq ($(CONFIG_APM),y)
-OX_OBJS += apm.o
+O_OBJS += apm.o
else
ifeq ($(CONFIG_APM),m)
- MX_OBJS += apm.o
+ M_OBJS += apm.o
endif
endif
diff --git a/arch/i386/kernel/acpi.c b/arch/i386/kernel/acpi.c
index 5ad3106f8..8ae8bc299 100644
--- a/arch/i386/kernel/acpi.c
+++ b/arch/i386/kernel/acpi.c
@@ -105,7 +105,6 @@ static int acpi_p_lvl2_tested = 0;
static int acpi_p_lvl3_tested = 0;
static int acpi_disabled = 0;
-int acpi_active = 0;
// bits 8-15 are SLP_TYPa, bits 0-7 are SLP_TYPb
static unsigned long acpi_slp_typ[] =
@@ -564,7 +563,7 @@ static int __init acpi_init_piix4(struct pci_dev *dev)
/*
* Init VIA ACPI device and create a fake FACP
*/
-static int __init acpi_init_via686a(struct pci_dev *dev)
+static int __init acpi_init_via(struct pci_dev *dev)
{
u32 base;
u8 tmp, irq;
@@ -631,6 +630,7 @@ typedef enum
{
CH_UNKNOWN = 0,
CH_INTEL_PIIX4,
+ CH_VIA_586,
CH_VIA_686A,
} acpi_chip_t;
@@ -642,12 +642,13 @@ const static struct
{
{NULL,},
{acpi_init_piix4},
- {acpi_init_via686a},
+ {acpi_init_via},
};
const static struct pci_device_id acpi_pci_tbl[] =
{
{0x8086, 0x7113, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_INTEL_PIIX4},
+ {0x1106, 0x3040, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_VIA_586},
{0x1106, 0x3057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_VIA_686A},
{0,}, /* terminate list */
};
@@ -792,7 +793,7 @@ static void wake_on_busmaster(struct acpi_facp *facp)
/*
* Idle loop (uniprocessor only)
*/
-static void acpi_idle_handler(void)
+static void acpi_idle(void)
{
static int sleep_level = 1;
struct acpi_facp *facp = acpi_facp;
@@ -1009,56 +1010,63 @@ static void acpi_enter_sx(acpi_sstate_t state)
/*
* Enter soft-off (S5)
*/
-static void acpi_power_off_handler(void)
+static void acpi_power_off(void)
{
acpi_enter_sx(ACPI_S5);
}
/*
+ * Claim I/O port if available
+ */
+static int acpi_claim(unsigned long start, unsigned long size)
+{
+ if (start && size) {
+ if (check_region(start, size))
+ return -EBUSY;
+ request_region(start, size, "acpi");
+ }
+ return 0;
+}
+
+/*
* Claim ACPI I/O ports
*/
static int acpi_claim_ioports(struct acpi_facp *facp)
{
// we don't get a guarantee of contiguity for any of the ACPI registers
- if (facp->pm1a_evt)
- request_region(facp->pm1a_evt, facp->pm1_evt_len, "acpi");
- if (facp->pm1b_evt)
- request_region(facp->pm1b_evt, facp->pm1_evt_len, "acpi");
- if (facp->pm1a_cnt)
- request_region(facp->pm1a_cnt, facp->pm1_cnt_len, "acpi");
- if (facp->pm1b_cnt)
- request_region(facp->pm1b_cnt, facp->pm1_cnt_len, "acpi");
- if (facp->pm_tmr)
- request_region(facp->pm_tmr, facp->pm_tm_len, "acpi");
- if (facp->gpe0)
- request_region(facp->gpe0, facp->gpe0_len, "acpi");
- if (facp->gpe1)
- request_region(facp->gpe1, facp->gpe1_len, "acpi");
-
+ if (acpi_claim(facp->pm1a_evt, facp->pm1_evt_len)
+ || acpi_claim(facp->pm1b_evt, facp->pm1_evt_len)
+ || acpi_claim(facp->pm1a_cnt, facp->pm1_cnt_len)
+ || acpi_claim(facp->pm1b_cnt, facp->pm1_cnt_len)
+ || acpi_claim(facp->pm_tmr, facp->pm_tm_len)
+ || acpi_claim(facp->gpe0, facp->gpe0_len)
+ || acpi_claim(facp->gpe1, facp->gpe1_len))
+ return -EBUSY;
return 0;
}
/*
+ * Release I/O port if claimed
+ */
+static void acpi_release(unsigned long start, unsigned long size)
+{
+ if (start && size)
+ release_region(start, size);
+}
+
+/*
* Free ACPI I/O ports
*/
static int acpi_release_ioports(struct acpi_facp *facp)
{
// we don't get a guarantee of contiguity for any of the ACPI registers
- if (facp->pm1a_evt)
- release_region(facp->pm1a_evt, facp->pm1_evt_len);
- if (facp->pm1b_evt)
- release_region(facp->pm1b_evt, facp->pm1_evt_len);
- if (facp->pm1a_cnt)
- release_region(facp->pm1a_cnt, facp->pm1_cnt_len);
- if (facp->pm1b_cnt)
- release_region(facp->pm1b_cnt, facp->pm1_cnt_len);
- if (facp->pm_tmr)
- release_region(facp->pm_tmr, facp->pm_tm_len);
- if (facp->gpe0)
- release_region(facp->gpe0, facp->gpe0_len);
- if (facp->gpe1)
- release_region(facp->gpe1, facp->gpe1_len);
-
+ acpi_release(facp->gpe1, facp->gpe1_len);
+ acpi_release(facp->gpe0, facp->gpe0_len);
+ acpi_release(facp->pm_tmr, facp->pm_tm_len);
+ acpi_release(facp->pm1b_cnt, facp->pm1_cnt_len);
+ acpi_release(facp->pm1a_cnt, facp->pm1_cnt_len);
+ acpi_release(facp->pm1b_evt, facp->pm1_evt_len);
+ acpi_release(facp->pm1a_evt, facp->pm1_evt_len);
return 0;
}
@@ -1322,6 +1330,14 @@ static int __init acpi_init(void)
= ACPI_uS_TO_TMR_TICKS(acpi_facp->p_lvl3_lat * 5);
}
+ if (acpi_claim_ioports(acpi_facp)) {
+ printk(KERN_ERR "ACPI: I/O port allocation failed\n");
+ if (pci_driver_registered)
+ pci_unregister_driver(&acpi_driver);
+ acpi_destroy_tables();
+ return -ENODEV;
+ }
+
if (acpi_facp->sci_int
&& request_irq(acpi_facp->sci_int,
acpi_irq,
@@ -1338,16 +1354,15 @@ static int __init acpi_init(void)
return -ENODEV;
}
- acpi_claim_ioports(acpi_facp);
acpi_sysctl = register_sysctl_table(acpi_dir_table, 1);
pid = kernel_thread(acpi_control_thread,
NULL,
CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
- acpi_power_off = acpi_power_off_handler;
+ pm_power_off = acpi_power_off;
- acpi_active = 1;
+ pm_active = 1;
/*
* Set up the ACPI idle function. Note that we can't really
@@ -1360,7 +1375,7 @@ static int __init acpi_init(void)
#endif
if (acpi_facp->pm_tmr)
- acpi_idle = acpi_idle_handler;
+ pm_idle = acpi_idle;
return 0;
}
@@ -1370,8 +1385,8 @@ static int __init acpi_init(void)
*/
static void __exit acpi_exit(void)
{
- acpi_idle = NULL;
- acpi_power_off = NULL;
+ pm_idle = NULL;
+ pm_power_off = NULL;
unregister_sysctl_table(acpi_sysctl);
acpi_disable(acpi_facp);
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index daa7226cd..81a813b05 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -150,13 +150,7 @@
#include <asm/uaccess.h>
#include <asm/desc.h>
-/*
- * Make APM look as much as just another ACPI module as possible..
- */
-#include <linux/acpi.h>
-
-EXPORT_SYMBOL(apm_register_callback);
-EXPORT_SYMBOL(apm_unregister_callback);
+#include <linux/pm.h>
extern unsigned long get_cmos_time(void);
extern void machine_real_restart(unsigned char *, int);
@@ -304,13 +298,6 @@ static char * apm_event_name[] = {
#define NR_APM_EVENT_NAME \
(sizeof(apm_event_name) / sizeof(apm_event_name[0]))
-typedef struct callback_list_t {
- int (* callback)(apm_event_t);
- struct callback_list_t * next;
-} callback_list_t;
-
-static callback_list_t * callback_list = NULL;
-
typedef struct lookup_t {
int key;
char * msg;
@@ -687,32 +674,6 @@ static int apm_console_blank(int blank)
}
#endif
-int apm_register_callback(int (*callback)(apm_event_t))
-{
- callback_list_t * new;
-
- new = kmalloc(sizeof(callback_list_t), GFP_KERNEL);
- if (new == NULL)
- return -ENOMEM;
- new->callback = callback;
- new->next = callback_list;
- callback_list = new;
- return 0;
-}
-
-void apm_unregister_callback(int (*callback)(apm_event_t))
-{
- callback_list_t ** ptr;
- callback_list_t * old;
-
- for (ptr = &callback_list; *ptr != NULL; ptr = &(*ptr)->next)
- if ((*ptr)->callback == callback)
- break;
- old = *ptr;
- *ptr = old->next;
- kfree_s(old, sizeof(callback_list_t));
-}
-
static int queue_empty(struct apm_bios_struct * as)
{
return as->event_head == as->event_tail;
@@ -848,17 +809,26 @@ static apm_event_t get_event(void)
static int send_event(apm_event_t event, apm_event_t undo,
struct apm_bios_struct *sender)
{
- callback_list_t * call;
- callback_list_t * fix;
-
- for (call = callback_list; call != NULL; call = call->next) {
- if (call->callback(event) && undo) {
- for (fix = callback_list; fix != call; fix = fix->next)
- fix->callback(undo);
+ switch (event) {
+ case APM_SYS_SUSPEND:
+ case APM_CRITICAL_SUSPEND:
+ case APM_USER_SUSPEND:
+ /* map all suspends to ACPI D3 */
+ if (pm_send_request(PM_SUSPEND, (void*) 3)) {
+ if (apm_bios_info.version > 0x100)
+ apm_set_power_state(APM_STATE_REJECT);
+ return 0;
+ }
+ break;
+ case APM_NORMAL_RESUME:
+ case APM_CRITICAL_RESUME:
+ /* map all resumes to ACPI D0 */
+ if (pm_send_request(PM_RESUME, 0)) {
if (apm_bios_info.version > 0x100)
apm_set_power_state(APM_STATE_REJECT);
return 0;
}
+ break;
}
queue_event(event, sender);
@@ -1373,7 +1343,7 @@ static int apm(void *unused)
/* Install our power off handler.. */
if (power_off_enabled)
- acpi_power_off = apm_power_off;
+ pm_power_off = apm_power_off;
#ifdef CONFIG_MAGIC_SYSRQ
sysrq_power_off = apm_power_off;
#endif
@@ -1381,6 +1351,8 @@ static int apm(void *unused)
console_blank_hook = apm_console_blank;
#endif
+ pm_active = 1;
+
apm_mainloop();
return 0;
}
@@ -1484,12 +1456,10 @@ static int __init apm_init(void)
APM_INIT_ERROR_RETURN;
}
-#ifdef CONFIG_ACPI
- if (acpi_active) {
+ if (PM_IS_ACTIVE()) {
printk(KERN_NOTICE "apm: overridden by ACPI.\n");
APM_INIT_ERROR_RETURN;
}
-#endif
/*
* Set up a segment that references the real mode segment 0x40
@@ -1551,4 +1521,4 @@ static int __init apm_init(void)
return 0;
}
-module_init(apm_init)
+__initcall(apm_init);
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index f93765754..bcca244c1 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -76,6 +76,7 @@ sigpending = 8
addr_limit = 12
exec_domain = 16
need_resched = 20
+processor = 56
ENOSYS = 38
@@ -203,9 +204,17 @@ ENTRY(system_call)
.globl ret_from_sys_call
.globl ret_from_intr
ret_from_sys_call:
- movl SYMBOL_NAME(bh_mask),%eax
- andl SYMBOL_NAME(bh_active),%eax
- jne handle_bottom_half
+#ifdef __SMP__
+ movl processor(%ebx),%eax
+ shll $5,%eax
+ movl SYMBOL_NAME(softirq_state)(,%eax),%ecx
+ testl SYMBOL_NAME(softirq_state)+4(,%eax),%ecx
+#else
+ movl SYMBOL_NAME(softirq_state),%ecx
+ testl SYMBOL_NAME(softirq_state)+4,%ecx
+#endif
+ jne handle_softirq
+
ret_with_reschedule:
cmpl $0,need_resched(%ebx)
jne reschedule
@@ -250,9 +259,18 @@ badsys:
ALIGN
ret_from_exception:
- movl SYMBOL_NAME(bh_mask),%eax
- andl SYMBOL_NAME(bh_active),%eax
- jne handle_bottom_half
+#ifdef __SMP__
+ GET_CURRENT(%ebx)
+ movl processor(%ebx),%eax
+ shll $5,%eax
+ movl SYMBOL_NAME(softirq_state)(,%eax),%ecx
+ testl SYMBOL_NAME(softirq_state)+4(,%eax),%ecx
+#else
+ movl SYMBOL_NAME(softirq_state),%ecx
+ testl SYMBOL_NAME(softirq_state)+4,%ecx
+#endif
+ jne handle_softirq
+
ALIGN
ret_from_intr:
GET_CURRENT(%ebx)
@@ -263,10 +281,10 @@ ret_from_intr:
jmp restore_all
ALIGN
-handle_bottom_half:
- call SYMBOL_NAME(do_bottom_half)
+handle_softirq:
+ call SYMBOL_NAME(do_softirq)
jmp ret_from_intr
-
+
ALIGN
reschedule:
call SYMBOL_NAME(schedule) # test
diff --git a/arch/i386/kernel/head.S b/arch/i386/kernel/head.S
index 0c8250ad3..f7138faa3 100644
--- a/arch/i386/kernel/head.S
+++ b/arch/i386/kernel/head.S
@@ -212,17 +212,6 @@ is386: pushl %ecx # restore original EFLAGS
orl $2,%eax # set MP
2: movl %eax,%cr0
call check_x87
-#ifdef __SMP__
- movb ready,%al # First CPU if 0
- orb %al,%al
- jz 4f # First CPU skip this stuff
- movl %cr4,%eax # Turn on 4Mb pages
- orl $16,%eax
- movl %eax,%cr4
- movl %cr3,%eax # Intel specification clarification says
- movl %eax,%cr3 # to do this. Maybe it makes a difference.
- # Who knows ?
-#endif
4:
#ifdef __SMP__
incb ready
diff --git a/arch/i386/kernel/i386_ksyms.c b/arch/i386/kernel/i386_ksyms.c
index 456f4bab2..f58e7485f 100644
--- a/arch/i386/kernel/i386_ksyms.c
+++ b/arch/i386/kernel/i386_ksyms.c
@@ -8,7 +8,7 @@
#include <linux/in6.h>
#include <linux/interrupt.h>
#include <linux/smp_lock.h>
-#include <linux/acpi.h>
+#include <linux/pm.h>
#include <linux/pci.h>
#include <asm/semaphore.h>
@@ -44,14 +44,13 @@ EXPORT_SYMBOL(dump_fpu);
EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL(__io_virt_debug);
-EXPORT_SYMBOL(local_bh_count);
-EXPORT_SYMBOL(local_irq_count);
EXPORT_SYMBOL(enable_irq);
EXPORT_SYMBOL(disable_irq);
EXPORT_SYMBOL(disable_irq_nosync);
+EXPORT_SYMBOL(probe_irq_mask);
EXPORT_SYMBOL(kernel_thread);
-EXPORT_SYMBOL(acpi_idle);
-EXPORT_SYMBOL(acpi_power_off);
+EXPORT_SYMBOL(pm_idle);
+EXPORT_SYMBOL(pm_power_off);
EXPORT_SYMBOL_NOVERS(__down_failed);
EXPORT_SYMBOL_NOVERS(__down_failed_interruptible);
@@ -106,11 +105,7 @@ EXPORT_SYMBOL_NOVERS(__read_lock_failed);
/* Global SMP irq stuff */
EXPORT_SYMBOL(synchronize_irq);
-EXPORT_SYMBOL(synchronize_bh);
-EXPORT_SYMBOL(global_bh_count);
-EXPORT_SYMBOL(global_bh_lock);
EXPORT_SYMBOL(global_irq_holder);
-EXPORT_SYMBOL(i386_bh_lock);
EXPORT_SYMBOL(__global_cli);
EXPORT_SYMBOL(__global_sti);
EXPORT_SYMBOL(__global_save_flags);
@@ -141,3 +136,7 @@ EXPORT_SYMBOL(screen_info);
#endif
EXPORT_SYMBOL(get_wchan);
+
+
+EXPORT_SYMBOL(local_bh_count);
+EXPORT_SYMBOL(local_irq_count);
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index fdd4ecda9..0037bebdd 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -223,7 +223,7 @@ static int __init find_timer_pin(int type)
static int __init pin_2_irq(int idx, int apic, int pin);
int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pci_pin)
{
- int apic, i;
+ int apic, i, best_guess = -1;
for (i = 0; i < mp_irq_entries; i++) {
int lbus = mp_irqs[i].mpc_srcbus;
@@ -236,10 +236,18 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pci_pin)
(mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
!mp_irqs[i].mpc_irqtype &&
(bus == mp_bus_id_to_pci_bus[mp_irqs[i].mpc_srcbus]) &&
- (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f)) &&
- (pci_pin == (mp_irqs[i].mpc_srcbusirq & 3)))
+ (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
+ int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
- return pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
+ if (pci_pin == (mp_irqs[i].mpc_srcbusirq & 3))
+ return irq;
+ /*
+ * Use the first all-but-pin matching entry as a
+ * best-guess fuzzy result for broken mptables.
+ */
+ if (best_guess < 0)
+ best_guess = irq;
+ }
}
return -1;
}
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
index 317d8a8d7..6112ac036 100644
--- a/arch/i386/kernel/irq.c
+++ b/arch/i386/kernel/irq.c
@@ -182,30 +182,12 @@ int get_irq_list(char *buf)
* Global interrupt locks for SMP. Allow interrupts to come in on any
* CPU, yet make cli/sti act globally to protect critical regions..
*/
-spinlock_t i386_bh_lock = SPIN_LOCK_UNLOCKED;
#ifdef CONFIG_SMP
unsigned char global_irq_holder = NO_PROC_ID;
unsigned volatile int global_irq_lock;
atomic_t global_irq_count;
-atomic_t global_bh_count;
-atomic_t global_bh_lock;
-
-/*
- * "global_cli()" is a special case, in that it can hold the
- * interrupts disabled for a longish time, and also because
- * we may be doing TLB invalidates when holding the global
- * IRQ lock for historical reasons. Thus we may need to check
- * SMP invalidate events specially by hand here (but not in
- * any normal spinlocks)
- */
-static inline void check_smp_invalidate(int cpu)
-{
- if (test_bit(cpu, &smp_invalidate_needed))
- do_flush_tlb_local();
-}
-
static void show(char * str)
{
int i;
@@ -216,7 +198,7 @@ static void show(char * str)
printk("irq: %d [%d %d]\n",
atomic_read(&global_irq_count), local_irq_count[0], local_irq_count[1]);
printk("bh: %d [%d %d]\n",
- atomic_read(&global_bh_count), local_bh_count[0], local_bh_count[1]);
+ spin_is_locked(&global_bh_lock) ? 1 : 0, local_bh_count[0], local_bh_count[1]);
stack = (unsigned long *) &stack;
for (i = 40; i ; i--) {
unsigned long x = *++stack;
@@ -228,18 +210,6 @@ static void show(char * str)
#define MAXCOUNT 100000000
-static inline void wait_on_bh(void)
-{
- int count = MAXCOUNT;
- do {
- if (!--count) {
- show("wait_on_bh");
- count = ~0;
- }
- /* nothing .. wait for the other bh's to go away */
- } while (atomic_read(&global_bh_count) != 0);
-}
-
/*
* I had a lockup scenario where a tight loop doing
* spin_unlock()/spin_lock() on CPU#1 was racing with
@@ -279,7 +249,7 @@ static inline void wait_on_irq(int cpu)
* already executing in one..
*/
if (!atomic_read(&global_irq_count)) {
- if (local_bh_count[cpu] || !atomic_read(&global_bh_count))
+ if (local_bh_count[cpu] || !spin_is_locked(&global_bh_lock))
break;
}
@@ -294,12 +264,11 @@ static inline void wait_on_irq(int cpu)
__sti();
SYNC_OTHER_CORES(cpu);
__cli();
- check_smp_invalidate(cpu);
if (atomic_read(&global_irq_count))
continue;
if (global_irq_lock)
continue;
- if (!local_bh_count[cpu] && atomic_read(&global_bh_count))
+ if (!local_bh_count[cpu] && spin_is_locked(&global_bh_lock))
continue;
if (!test_and_set_bit(0,&global_irq_lock))
break;
@@ -309,20 +278,6 @@ static inline void wait_on_irq(int cpu)
/*
* This is called when we want to synchronize with
- * bottom half handlers. We need to wait until
- * no other CPU is executing any bottom half handler.
- *
- * Don't wait if we're already running in an interrupt
- * context or are inside a bh handler.
- */
-void synchronize_bh(void)
-{
- if (atomic_read(&global_bh_count) && !in_interrupt())
- wait_on_bh();
-}
-
-/*
- * This is called when we want to synchronize with
* interrupts. We may for example tell a device to
* stop sending interrupts: but to make sure there
* are no interrupts that are executing on another
@@ -346,7 +301,6 @@ static inline void get_irqlock(int cpu)
/* Uhhuh.. Somebody else got it. Wait.. */
do {
do {
- check_smp_invalidate(cpu);
} while (test_bit(0,&global_irq_lock));
} while (test_and_set_bit(0,&global_irq_lock));
}
@@ -621,16 +575,8 @@ asmlinkage unsigned int do_IRQ(struct pt_regs regs)
desc->handler->end(irq);
spin_unlock(&irq_controller_lock);
- /*
- * This should be conditional: we should really get
- * a return code from the irq handler to tell us
- * whether the handler wants us to do software bottom
- * half handling or not..
- */
- if (1) {
- if (bh_active & bh_mask)
- do_bottom_half();
- }
+ if (softirq_state[cpu].active&softirq_state[cpu].mask)
+ do_softirq();
return 1;
}
diff --git a/arch/i386/kernel/mtrr.c b/arch/i386/kernel/mtrr.c
index 0061bc14d..5caa4e477 100644
--- a/arch/i386/kernel/mtrr.c
+++ b/arch/i386/kernel/mtrr.c
@@ -1469,19 +1469,10 @@ static int mtrr_close (struct inode *ino, struct file *file)
static struct file_operations mtrr_fops =
{
- NULL, /* Seek */
- mtrr_read, /* Read */
- mtrr_write, /* Write */
- NULL, /* Readdir */
- NULL, /* Poll */
- mtrr_ioctl, /* IOctl */
- NULL, /* MMAP */
- NULL, /* Open */
- NULL, /* Flush */
- mtrr_close, /* Release */
- NULL, /* Fsync */
- NULL, /* Fasync */
- NULL, /* Lock */
+ read: mtrr_read,
+ write: mtrr_write,
+ ioctl: mtrr_ioctl,
+ release: mtrr_close,
};
static struct inode_operations proc_mtrr_inode_operations = {
diff --git a/arch/i386/kernel/pci-pc.c b/arch/i386/kernel/pci-pc.c
index 601ffd3bf..590e01fd5 100644
--- a/arch/i386/kernel/pci-pc.c
+++ b/arch/i386/kernel/pci-pc.c
@@ -939,12 +939,26 @@ static void __init pci_fixup_ide_bases(struct pci_dev *d)
}
}
+static void __init pci_fixup_ide_trash(struct pci_dev *d)
+{
+ int i;
+
+ /*
+ * There exist PCI IDE controllers which have utter garbage
+ * in first four base registers. Ignore that.
+ */
+ DBG("PCI: IDE base address trash cleared for %s\n", d->slot_name);
+ for(i=0; i<4; i++)
+ d->resource[i].start = d->resource[i].end = d->resource[i].flags = 0;
+}
+
struct pci_fixup pcibios_fixups[] = {
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82451NX, pci_fixup_i450nx },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_RCC, PCI_DEVICE_ID_RCC_HE, pci_fixup_rcc },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_RCC, PCI_DEVICE_ID_RCC_LE, pci_fixup_rcc },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_6010, pci_fixup_compaq },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886BF, pci_fixup_umc_ide },
+ { PCI_FIXUP_HEADER, PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5513, pci_fixup_ide_trash },
{ PCI_FIXUP_HEADER, PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases },
{ 0 }
};
@@ -1132,6 +1146,10 @@ static void __init pcibios_fixup_irqs(void)
if (pin) {
pin--; /* interrupt pins are numbered starting from 1 */
irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin);
+/*
+ * Will be removed completely if things work out well with fuzzy parsing
+ */
+#if 0
if (irq < 0 && dev->bus->parent) { /* go back to the bridge */
struct pci_dev * bridge = dev->bus->self;
@@ -1142,6 +1160,7 @@ static void __init pcibios_fixup_irqs(void)
printk(KERN_WARNING "PCI: using PPB(B%d,I%d,P%d) to get irq %d\n",
bridge->bus->number, PCI_SLOT(bridge->devfn), pin, irq);
}
+#endif
if (irq >= 0) {
printk("PCI->APIC IRQ transform: (B%d,I%d,P%d) -> %d\n",
dev->bus->number, PCI_SLOT(dev->devfn), pin, irq);
diff --git a/arch/i386/kernel/pm.c b/arch/i386/kernel/pm.c
deleted file mode 100644
index 35ec0f489..000000000
--- a/arch/i386/kernel/pm.c
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * pm.c - Power management interface
- *
- * Copyright (C) 2000 Andrew Henroid
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/slab.h>
-#include <linux/pm.h>
-
-static spinlock_t pm_devs_lock = SPIN_LOCK_UNLOCKED;
-static LIST_HEAD(pm_devs);
-
-/*
- * Register a device with power management
- */
-struct pm_dev *pm_register(pm_dev_t type,
- unsigned long id,
- pm_callback callback)
-{
- struct pm_dev *dev = kmalloc(sizeof(struct pm_dev), GFP_KERNEL);
- if (dev) {
- unsigned long flags;
-
- memset(dev, 0, sizeof(*dev));
- dev->type = type;
- dev->id = id;
- dev->callback = callback;
-
- spin_lock_irqsave(&pm_devs_lock, flags);
- list_add(&dev->entry, &pm_devs);
- spin_unlock_irqrestore(&pm_devs_lock, flags);
- }
- return dev;
-}
-
-/*
- * Unregister a device with power management
- */
-void pm_unregister(struct pm_dev *dev)
-{
- if (dev) {
- unsigned long flags;
-
- spin_lock_irqsave(&pm_devs_lock, flags);
- list_del(&dev->entry);
- spin_unlock_irqrestore(&pm_devs_lock, flags);
-
- kfree(dev);
- }
-}
-
-/*
- * Send a request to all devices
- */
-int pm_send_request(pm_request_t rqst, void *data)
-{
- struct list_head *entry = pm_devs.next;
- while (entry != &pm_devs) {
- struct pm_dev *dev = list_entry(entry, struct pm_dev, entry);
- if (dev->callback) {
- int status = (*dev->callback)(dev, rqst, data);
- if (status)
- return status;
- }
- entry = entry->next;
- }
- return 0;
-}
-
-/*
- * Find a device
- */
-struct pm_dev *pm_find(pm_dev_t type, struct pm_dev *from)
-{
- struct list_head *entry = from ? from->entry.next:pm_devs.next;
- while (entry != &pm_devs) {
- struct pm_dev *dev = list_entry(entry, struct pm_dev, entry);
- if (type == PM_UNKNOWN_DEV || dev->type == type)
- return dev;
- entry = entry->next;
- }
- return 0;
-}
-
-EXPORT_SYMBOL(pm_register);
-EXPORT_SYMBOL(pm_unregister);
-EXPORT_SYMBOL(pm_send_request);
-EXPORT_SYMBOL(pm_find);
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 703482425..c38e383e7 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -61,12 +61,12 @@ void enable_hlt(void)
/*
* Powermanagement idle function, if any..
*/
-void (*acpi_idle)(void) = NULL;
+void (*pm_idle)(void) = NULL;
/*
* Power off function, if any
*/
-void (*acpi_power_off)(void) = NULL;
+void (*pm_power_off)(void) = NULL;
/*
* We use this if we don't have any better
@@ -92,7 +92,7 @@ void cpu_idle(void)
current->counter = -100;
while (1) {
- void (*idle)(void) = acpi_idle;
+ void (*idle)(void) = pm_idle;
if (!idle)
idle = default_idle;
while (!current->need_resched)
@@ -328,8 +328,8 @@ void machine_halt(void)
void machine_power_off(void)
{
- if (acpi_power_off)
- acpi_power_off();
+ if (pm_power_off)
+ pm_power_off();
}
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index d308a1280..e72f95160 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -75,7 +75,7 @@
#include <asm/e820.h>
#include <asm/dma.h>
#include <asm/mpspec.h>
-
+#include <asm/mmu_context.h>
/*
* Machine setup..
*/
@@ -1543,6 +1543,10 @@ void cpu_init (void)
*/
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
+ if(current->mm)
+ BUG();
+ enter_lazy_tlb(&init_mm, current, nr);
+
t->esp0 = current->thread.esp0;
set_tss_desc(nr,t);
gdt_table[__TSS(nr)].b &= 0xfffffdff;
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c
index 05e0d1d23..0b585513f 100644
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -103,8 +103,7 @@
/* The 'big kernel lock' */
spinlock_t kernel_flag = SPIN_LOCK_UNLOCKED;
-volatile unsigned long smp_invalidate_needed; /* immediate flush required */
-unsigned int cpu_tlbbad[NR_CPUS]; /* flush before returning to user space */
+struct tlb_state cpu_tlbstate[NR_CPUS];
/*
* the following functions deal with sending IPIs between CPUs.
@@ -186,15 +185,15 @@ static inline int __prepare_ICR (unsigned int shortcut, int vector)
return cfg;
}
-static inline int __prepare_ICR2 (unsigned int dest)
+static inline int __prepare_ICR2 (unsigned int mask)
{
unsigned int cfg;
cfg = __get_ICR2();
#if LOGICAL_DELIVERY
- cfg |= SET_APIC_DEST_FIELD((1<<dest));
+ cfg |= SET_APIC_DEST_FIELD(mask);
#else
- cfg |= SET_APIC_DEST_FIELD(dest);
+ cfg |= SET_APIC_DEST_FIELD(mask);
#endif
return cfg;
@@ -250,7 +249,7 @@ void send_IPI_self(int vector)
__send_IPI_shortcut(APIC_DEST_SELF, vector);
}
-static inline void send_IPI_single(int dest, int vector)
+static inline void send_IPI_mask(int mask, int vector)
{
unsigned long cfg;
#if FORCE_READ_AROUND_WRITE
@@ -264,7 +263,7 @@ static inline void send_IPI_single(int dest, int vector)
* prepare target chip field
*/
- cfg = __prepare_ICR2(dest);
+ cfg = __prepare_ICR2(mask);
apic_write(APIC_ICR2, cfg);
/*
@@ -282,112 +281,173 @@ static inline void send_IPI_single(int dest, int vector)
}
/*
- * This is fraught with deadlocks. Probably the situation is not that
- * bad as in the early days of SMP, so we might ease some of the
- * paranoia here.
+ * Smarter SMP flushing macros.
+ * c/o Linus Torvalds.
+ *
+ * These mean you can really definitely utterly forget about
+ * writing to user space from interrupts. (Its not allowed anyway).
+ *
+ * Optimizations Manfred Spraul <manfreds@colorfullife.com>
*/
-static void flush_tlb_others(unsigned int cpumask)
+
+static volatile unsigned long flush_cpumask;
+static struct mm_struct * flush_mm;
+static unsigned long flush_va;
+static spinlock_t tlbstate_lock = SPIN_LOCK_UNLOCKED;
+#define FLUSH_ALL 0xffffffff
+
+static void inline leave_mm (unsigned long cpu)
{
- int cpu = smp_processor_id();
- int stuck;
- unsigned long flags;
+ if (cpu_tlbstate[cpu].state == TLBSTATE_OK)
+ BUG();
+ clear_bit(cpu, &cpu_tlbstate[cpu].active_mm->cpu_vm_mask);
+ cpu_tlbstate[cpu].state = TLBSTATE_OLD;
+}
+
+/*
+ *
+ * The flush IPI assumes that a thread switch happens in this order:
+ * 1) set_bit(cpu, &new_mm->cpu_vm_mask);
+ * 2) update cpu_tlbstate
+ * [now the cpu can accept tlb flush request for the new mm]
+ * 3) change cr3 (if required, or flush local tlb,...)
+ * 4) clear_bit(cpu, &old_mm->cpu_vm_mask);
+ * 5) switch %%esp, ie current
+ *
+ * The interrupt must handle 2 special cases:
+ * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
+ * - the cpu performs speculative tlb reads, i.e. even if the cpu only
+ * runs in kernel space, the cpu could load tlb entries for user space
+ * pages.
+ *
+ * The good news is that cpu_tlbstate is local to each cpu, no
+ * write/read ordering problems.
+ */
+
+/*
+ * TLB flush IPI:
+ *
+ * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
+ * 2) Leave the mm if we are in the lazy tlb mode.
+ * We cannot call mmdrop() because we are in interrupt context,
+ * instead update cpu_tlbstate.
+ */
+
+asmlinkage void smp_invalidate_interrupt (void)
+{
+ unsigned long cpu = smp_processor_id();
+
+ if (!test_bit(cpu, &flush_cpumask))
+ BUG();
+ if (flush_mm == cpu_tlbstate[cpu].active_mm) {
+ if (cpu_tlbstate[cpu].state == TLBSTATE_OK) {
+ if (flush_va == FLUSH_ALL)
+ local_flush_tlb();
+ else
+ __flush_tlb_one(flush_va);
+ } else
+ leave_mm(cpu);
+ } else {
+ extern void show_stack (void *);
+ printk("hm #1: %p, %p.\n", flush_mm, cpu_tlbstate[cpu].active_mm);
+ show_stack(NULL);
+ }
+ __flush_tlb();
+ ack_APIC_irq();
+ clear_bit(cpu, &flush_cpumask);
+}
+static void flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
+ unsigned long va)
+{
/*
- * it's important that we do not generate any APIC traffic
- * until the AP CPUs have booted up!
+ * A couple of (to be removed) sanity checks:
+ *
+ * - we do not send IPIs to not-yet booted CPUs.
+ * - current CPU must not be in mask
+ * - mask must exist :)
*/
- cpumask &= cpu_online_map;
- if (cpumask) {
- atomic_set_mask(cpumask, &smp_invalidate_needed);
-
- /*
- * Processors spinning on some lock with IRQs disabled
- * will see this IRQ late. The smp_invalidate_needed
- * map will ensure they don't do a spurious flush tlb
- * or miss one.
- */
+ if (!cpumask)
+ BUG();
+ if ((cpumask & cpu_online_map) != cpumask)
+ BUG();
+ if (cpumask & (1 << smp_processor_id()))
+ BUG();
+ if (!mm)
+ BUG();
+
+ /*
+ * i'm not happy about this global shared spinlock in the
+ * MM hot path, but we'll see how contended it is.
+ * Temporarily this turns IRQs off, so that lockups are
+ * detected by the NMI watchdog.
+ */
+ spin_lock_irq(&tlbstate_lock);
- __save_flags(flags);
- __cli();
-
- send_IPI_allbutself(INVALIDATE_TLB_VECTOR);
-
- /*
- * Spin waiting for completion
- */
-
- stuck = 50000000;
- while (smp_invalidate_needed) {
- /*
- * Take care of "crossing" invalidates
- */
- if (test_bit(cpu, &smp_invalidate_needed))
- do_flush_tlb_local();
-
- --stuck;
- if (!stuck) {
- printk("stuck on TLB IPI wait (CPU#%d)\n",cpu);
- break;
- }
- }
- __restore_flags(flags);
- }
-}
+ flush_mm = mm;
+ flush_va = va;
+ atomic_set_mask(cpumask, &flush_cpumask);
+ /*
+ * We have to send the IPI only to
+ * CPUs affected.
+ */
+ send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
-/*
- * Smarter SMP flushing macros.
- * c/o Linus Torvalds.
- *
- * These mean you can really definitely utterly forget about
- * writing to user space from interrupts. (Its not allowed anyway).
- */
+ while (flush_cpumask)
+ /* nothing. lockup detection does not belong here */;
+
+ flush_mm = NULL;
+ flush_va = 0;
+ spin_unlock_irq(&tlbstate_lock);
+}
+
void flush_tlb_current_task(void)
{
- unsigned long vm_mask = 1 << smp_processor_id();
struct mm_struct *mm = current->mm;
- unsigned long cpu_mask = mm->cpu_vm_mask & ~vm_mask;
+ unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
- mm->cpu_vm_mask = vm_mask;
- flush_tlb_others(cpu_mask);
local_flush_tlb();
+ if (cpu_mask)
+ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
}
-void flush_tlb_mm(struct mm_struct * mm)
+void flush_tlb_mm (struct mm_struct * mm)
{
- unsigned long vm_mask = 1 << smp_processor_id();
- unsigned long cpu_mask = mm->cpu_vm_mask & ~vm_mask;
+ unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
- mm->cpu_vm_mask = 0;
if (current->active_mm == mm) {
- mm->cpu_vm_mask = vm_mask;
- local_flush_tlb();
+ if (current->mm)
+ local_flush_tlb();
+ else
+ leave_mm(smp_processor_id());
}
- flush_tlb_others(cpu_mask);
+ if (cpu_mask)
+ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
}
void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
{
- unsigned long vm_mask = 1 << smp_processor_id();
struct mm_struct *mm = vma->vm_mm;
- unsigned long cpu_mask = mm->cpu_vm_mask & ~vm_mask;
+ unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
- mm->cpu_vm_mask = 0;
if (current->active_mm == mm) {
- __flush_tlb_one(va);
- mm->cpu_vm_mask = vm_mask;
+ if(current->mm)
+ __flush_tlb_one(va);
+ else
+ leave_mm(smp_processor_id());
}
- flush_tlb_others(cpu_mask);
+
+ if (cpu_mask)
+ flush_tlb_others(cpu_mask, mm, va);
}
static inline void do_flush_tlb_all_local(void)
{
- __flush_tlb_all();
- if (!current->mm && current->active_mm) {
- unsigned long cpu = smp_processor_id();
+ unsigned long cpu = smp_processor_id();
- clear_bit(cpu, &current->active_mm->cpu_vm_mask);
- cpu_tlbbad[cpu] = 1;
- }
+ __flush_tlb_all();
+ if (cpu_tlbstate[cpu].state == TLBSTATE_LAZY)
+ leave_mm(cpu);
}
static void flush_tlb_all_ipi(void* info)
@@ -410,7 +470,7 @@ void flush_tlb_all(void)
void smp_send_reschedule(int cpu)
{
- send_IPI_single(cpu, RESCHEDULE_VECTOR);
+ send_IPI_mask(1 << cpu, RESCHEDULE_VECTOR);
}
/*
@@ -514,23 +574,6 @@ asmlinkage void smp_reschedule_interrupt(void)
ack_APIC_irq();
}
-/*
- * Invalidate call-back.
- *
- * Mark the CPU as a VM user if there is a active
- * thread holding on to an mm at this time. This
- * allows us to optimize CPU cross-calls even in the
- * presense of lazy TLB handling.
- */
-asmlinkage void smp_invalidate_interrupt(void)
-{
- if (test_bit(smp_processor_id(), &smp_invalidate_needed))
- do_flush_tlb_local();
-
- ack_APIC_irq();
-
-}
-
asmlinkage void smp_call_function_interrupt(void)
{
void (*func) (void *info) = call_data->func;
diff --git a/arch/i386/kernel/trampoline.S b/arch/i386/kernel/trampoline.S
index 12c1dbe34..52e00d9be 100644
--- a/arch/i386/kernel/trampoline.S
+++ b/arch/i386/kernel/trampoline.S
@@ -55,7 +55,7 @@ r_base = .
jmp flush_instr
flush_instr:
ljmpl $__KERNEL_CS, $0x00100000
- # jump to startup_32
+ # jump to startup_32 in arch/i386/kernel/head.S
idt_48:
.word 0 # idt limit = 0
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 17cac5019..07797e760 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -124,19 +124,63 @@ int kstack_depth_to_print = 24;
/*
* These constants are for searching for possible module text
- * segments. VMALLOC_OFFSET comes from mm/vmalloc.c; MODULE_RANGE is
- * a guess of how much space is likely to be vmalloced.
+ * segments. MODULE_RANGE is a guess of how much space is likely
+ * to be vmalloced.
*/
-#define VMALLOC_OFFSET (8*1024*1024)
#define MODULE_RANGE (8*1024*1024)
+void show_stack(unsigned long * esp)
+{
+ unsigned long *stack, addr, module_start, module_end;
+ int i;
+
+ // debugging aid: "show_stack(NULL);" prints the
+ // back trace for this cpu.
+
+ if(esp==NULL)
+ esp=(unsigned long*)&esp;
+
+ stack = esp;
+ for(i=0; i < kstack_depth_to_print; i++) {
+ if (((long) stack & (THREAD_SIZE-1)) == 0)
+ break;
+ if (i && ((i % 8) == 0))
+ printk("\n ");
+ printk("%08lx ", *stack++);
+ }
+
+ printk("\nCall Trace: ");
+ stack = esp;
+ i = 1;
+ module_start = VMALLOC_START;
+ module_end = VMALLOC_END;
+ while (((long) stack & (THREAD_SIZE-1)) != 0) {
+ addr = *stack++;
+ /*
+ * If the address is either in the text segment of the
+ * kernel, or in the region which contains vmalloc'ed
+ * memory, it *may* be the address of a calling
+ * routine; if so, print it so that someone tracing
+ * down the cause of the crash will be able to figure
+ * out the call path that was taken.
+ */
+ if (((addr >= (unsigned long) &_stext) &&
+ (addr <= (unsigned long) &_etext)) ||
+ ((addr >= module_start) && (addr <= module_end))) {
+ if (i && ((i % 8) == 0))
+ printk("\n ");
+ printk("[<%08lx>] ", addr);
+ i++;
+ }
+ }
+}
+
static void show_registers(struct pt_regs *regs)
{
int i;
int in_kernel = 1;
unsigned long esp;
unsigned short ss;
- unsigned long *stack, addr, module_start, module_end;
esp = (unsigned long) (&regs->esp);
ss = __KERNEL_DS;
@@ -160,43 +204,24 @@ static void show_registers(struct pt_regs *regs)
* time of the fault..
*/
if (in_kernel) {
+
printk("\nStack: ");
- stack = (unsigned long *) esp;
- for(i=0; i < kstack_depth_to_print; i++) {
- if (((long) stack & 4095) == 0)
+ show_stack((unsigned long*)esp);
+
+ printk("\nCode: ");
+ if(regs->eip < PAGE_OFFSET)
+ goto bad;
+
+ for(i=0;i<20;i++)
+ {
+ unsigned char c;
+ if(__get_user(c, &((unsigned char*)regs->eip)[i])) {
+bad:
+ printk(" Bad EIP value.");
break;
- if (i && ((i % 8) == 0))
- printk("\n ");
- printk("%08lx ", *stack++);
- }
- printk("\nCall Trace: ");
- stack = (unsigned long *) esp;
- i = 1;
- module_start = PAGE_OFFSET + (max_mapnr << PAGE_SHIFT);
- module_start = ((module_start + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1));
- module_end = module_start + MODULE_RANGE;
- while (((long) stack & 4095) != 0) {
- addr = *stack++;
- /*
- * If the address is either in the text segment of the
- * kernel, or in the region which contains vmalloc'ed
- * memory, it *may* be the address of a calling
- * routine; if so, print it so that someone tracing
- * down the cause of the crash will be able to figure
- * out the call path that was taken.
- */
- if (((addr >= (unsigned long) &_stext) &&
- (addr <= (unsigned long) &_etext)) ||
- ((addr >= module_start) && (addr <= module_end))) {
- if (i && ((i % 8) == 0))
- printk("\n ");
- printk("[<%08lx>] ", addr);
- i++;
}
+ printk("%02x ", c);
}
- printk("\nCode: ");
- for(i=0;i<20;i++)
- printk("%02x ", ((unsigned char *)regs->eip)[i]);
}
printk("\n");
}