summaryrefslogtreecommitdiffstats
path: root/arch/i386
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-06-16 23:00:36 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-06-16 23:00:36 +0000
commit14dd2ec093cfabda3ae7efeeaf0e23c66ebaccc0 (patch)
tree9a9ce5cff6ef92faa6e07a82785b9a6d6838f7e4 /arch/i386
parent847290510f811c572cc2aa80c1f02a04721410b1 (diff)
Merge with 2.4.0-test1.
Diffstat (limited to 'arch/i386')
-rw-r--r--arch/i386/Makefile4
-rw-r--r--arch/i386/config.in34
-rw-r--r--arch/i386/defconfig5
-rw-r--r--arch/i386/kernel/Makefile16
-rw-r--r--arch/i386/kernel/apic.c5
-rw-r--r--arch/i386/kernel/apm.c42
-rw-r--r--arch/i386/kernel/cpuid.c168
-rw-r--r--arch/i386/kernel/entry.S5
-rw-r--r--arch/i386/kernel/i8259.c3
-rw-r--r--arch/i386/kernel/msr.c274
-rw-r--r--arch/i386/kernel/process.c90
-rw-r--r--arch/i386/kernel/ptrace.c13
-rw-r--r--arch/i386/kernel/setup.c142
-rw-r--r--arch/i386/kernel/signal.c43
-rw-r--r--arch/i386/kernel/smp.c10
-rw-r--r--arch/i386/kernel/smpboot.c5
-rw-r--r--arch/i386/kernel/traps.c126
-rw-r--r--arch/i386/mm/fault.c22
18 files changed, 926 insertions, 81 deletions
diff --git a/arch/i386/Makefile b/arch/i386/Makefile
index 113a00fa6..755cc877a 100644
--- a/arch/i386/Makefile
+++ b/arch/i386/Makefile
@@ -49,6 +49,10 @@ ifdef CONFIG_M686
CFLAGS += $(shell if $(CC) -march=i686 -S -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-march=i686"; fi)
endif
+ifdef CONFIG_M686FX
+CFLAGS += $(shell if $(CC) -march=i686 -S -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-march=i686"; fi)
+endif
+
ifdef CONFIG_MK6
CFLAGS += $(shell if $(CC) -march=k6 -S -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-march=k6"; fi)
endif
diff --git a/arch/i386/config.in b/arch/i386/config.in
index 1208a6b82..a928efb33 100644
--- a/arch/i386/config.in
+++ b/arch/i386/config.in
@@ -18,13 +18,15 @@ endmenu
mainmenu_option next_comment
comment 'Processor type and features'
choice 'Processor family' \
- "386 CONFIG_M386 \
- 486/Cx486 CONFIG_M486 \
- 586/K5/5x86/6x86/6x86MX CONFIG_M586 \
- Pentium/TSC CONFIG_M586TSC \
- PPro/P-II/P-III CONFIG_M686 \
- K6/II/III CONFIG_MK6 \
- Athlon CONFIG_MK7" PPro
+ "386 CONFIG_M386 \
+ 486/Cx486 CONFIG_M486 \
+ 586/K5/5x86/6x86/6x86MX CONFIG_M586 \
+ Pentium/TSC CONFIG_M586TSC \
+ PPro/Pentium-II CONFIG_M686 \
+ Pentium-III CONFIG_M686FX \
+ K6/K6-II/K6-III CONFIG_MK6 \
+ Athlon CONFIG_MK7 \
+ Crusoe CONFIG_MCRUSOE" PPro
#
# Define implied options from the CPU selection here
#
@@ -60,6 +62,14 @@ if [ "$CONFIG_M686" = "y" ]; then
define_bool CONFIG_X86_PGE y
define_bool CONFIG_X86_USE_PPRO_CHECKSUM y
fi
+if [ "$CONFIG_M686FX" = "y" ]; then
+ define_int CONFIG_X86_L1_CACHE_BYTES 32
+ define_bool CONFIG_X86_TSC y
+ define_bool CONFIG_X86_GOOD_APIC y
+ define_bool CONFIG_X86_PGE y
+ define_bool CONFIG_X86_USE_PPRO_CHECKSUM y
+ define_bool CONFIG_X86_FX y
+fi
if [ "$CONFIG_MK6" = "y" ]; then
define_int CONFIG_X86_L1_CACHE_BYTES 32
define_bool CONFIG_X86_ALIGNMENT_16 y
@@ -74,8 +84,14 @@ if [ "$CONFIG_MK7" = "y" ]; then
define_bool CONFIG_X86_PGE y
define_bool CONFIG_X86_USE_PPRO_CHECKSUM y
fi
+if [ "$CONFIG_MCRUSOE" = "y" ]; then
+ define_int CONFIG_X86_L1_CACHE_BYTES 32
+ define_bool CONFIG_X86_TSC y
+fi
tristate '/dev/cpu/microcode - Intel P6 CPU microcode support' CONFIG_MICROCODE
+tristate '/dev/cpu/*/msr - Model-specific register support' CONFIG_X86_MSR
+tristate '/dev/cpu/*/cpuid - CPU information support' CONFIG_X86_CPUID
choice 'High Memory Support' \
"off CONFIG_NOHIGHMEM \
@@ -89,7 +105,9 @@ if [ "$CONFIG_HIGHMEM64G" = "y" ]; then
define_bool CONFIG_X86_PAE y
fi
-bool 'Math emulation' CONFIG_MATH_EMULATION
+if [ "$CONFIG_X86_FX" != "y" ]; then
+ bool 'Math emulation' CONFIG_MATH_EMULATION
+fi
bool 'MTRR (Memory Type Range Register) support' CONFIG_MTRR
bool 'Symmetric multi-processing support' CONFIG_SMP
if [ "$CONFIG_SMP" != "y" ]; then
diff --git a/arch/i386/defconfig b/arch/i386/defconfig
index e00ac753a..281a82cea 100644
--- a/arch/i386/defconfig
+++ b/arch/i386/defconfig
@@ -19,8 +19,10 @@ CONFIG_UID16=y
# CONFIG_M586 is not set
# CONFIG_M586TSC is not set
CONFIG_M686=y
+# CONFIG_M686FX is not set
# CONFIG_MK6 is not set
# CONFIG_MK7 is not set
+# CONFIG_MCRUSOE is not set
CONFIG_X86_WP_WORKS_OK=y
CONFIG_X86_INVLPG=y
CONFIG_X86_CMPXCHG=y
@@ -32,6 +34,8 @@ CONFIG_X86_GOOD_APIC=y
CONFIG_X86_PGE=y
CONFIG_X86_USE_PPRO_CHECKSUM=y
# CONFIG_MICROCODE is not set
+# CONFIG_X86_MSR is not set
+# CONFIG_X86_CPUID is not set
CONFIG_NOHIGHMEM=y
# CONFIG_HIGHMEM4G is not set
# CONFIG_HIGHMEM64G is not set
@@ -194,7 +198,6 @@ CONFIG_IDEPCI_SHARE_IRQ=y
# CONFIG_BLK_DEV_OFFBOARD is not set
# CONFIG_IDEDMA_PCI_AUTO is not set
# CONFIG_BLK_DEV_IDEDMA is not set
-# CONFIG_IDEDMA_PCI_EXPERIMENTAL is not set
# CONFIG_IDEDMA_PCI_WIP is not set
# CONFIG_IDEDMA_NEW_DRIVE_LISTINGS is not set
# CONFIG_BLK_DEV_AEC62XX is not set
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile
index 56db72ef8..9e9b76848 100644
--- a/arch/i386/kernel/Makefile
+++ b/arch/i386/kernel/Makefile
@@ -40,6 +40,22 @@ else
endif
endif
+ifeq ($(CONFIG_X86_MSR),y)
+OX_OBJS += msr.o
+else
+ ifeq ($(CONFIG_X86_MSR),m)
+ MX_OBJS += msr.o
+ endif
+endif
+
+ifeq ($(CONFIG_X86_CPUID),y)
+OX_OBJS += cpuid.o
+else
+ ifeq ($(CONFIG_X86_CPUID),m)
+ MX_OBJS += cpuid.o
+ endif
+endif
+
ifeq ($(CONFIG_MICROCODE),y)
OX_OBJS += microcode.o
else
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index eab365e26..0a19bb758 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -128,6 +128,11 @@ void disable_local_APIC(void)
void __init sync_Arb_IDs(void)
{
+ /*
+ * Wait for idle.
+ */
+ apic_wait_icr_idle();
+
Dprintk("Synchronizing Arb IDs.\n");
apic_write_around(APIC_ICR, APIC_DEST_ALLINC | APIC_INT_LEVELTRIG
| APIC_DM_INIT);
diff --git a/arch/i386/kernel/apm.c b/arch/i386/kernel/apm.c
index b1e0e8b7c..0b4517137 100644
--- a/arch/i386/kernel/apm.c
+++ b/arch/i386/kernel/apm.c
@@ -922,6 +922,10 @@ static int send_event(apm_event_t event, struct apm_user *sender)
case APM_USER_SUSPEND:
/* map all suspends to ACPI D3 */
if (pm_send_all(PM_SUSPEND, (void *)3)) {
+ if (event == APM_CRITICAL_SUSPEND) {
+ printk(KERN_CRIT "apm: Critical suspend was vetoed, expect armagedon\n" );
+ return 0;
+ }
if (apm_bios_info.version > 0x100)
apm_set_power_state(APM_STATE_REJECT);
return 0;
@@ -934,7 +938,6 @@ static int send_event(apm_event_t event, struct apm_user *sender)
break;
}
- queue_event(event, sender);
return 1;
}
@@ -964,6 +967,7 @@ static void check_events(void)
case APM_SYS_STANDBY:
case APM_USER_STANDBY:
if (send_event(event, NULL)) {
+ queue_event(event, NULL);
if (standbys_pending <= 0)
standby();
}
@@ -980,17 +984,18 @@ static void check_events(void)
if (ignore_bounce)
break;
#endif
- /*
- * If we are already processing a SUSPEND,
- * then further SUSPEND events from the BIOS
- * will be ignored. We also return here to
- * cope with the fact that the Thinkpads keep
- * sending a SUSPEND event until something else
- * happens!
- */
+ /*
+ * If we are already processing a SUSPEND,
+ * then further SUSPEND events from the BIOS
+ * will be ignored. We also return here to
+ * cope with the fact that the Thinkpads keep
+ * sending a SUSPEND event until something else
+ * happens!
+ */
if (waiting_for_resume)
- return;
+ return;
if (send_event(event, NULL)) {
+ queue_event(event, NULL);
waiting_for_resume = 1;
if (suspends_pending <= 0)
(void) suspend();
@@ -1007,6 +1012,7 @@ static void check_events(void)
#endif
set_time();
send_event(event, NULL);
+ queue_event(event, NULL);
break;
case APM_CAPABILITY_CHANGE:
@@ -1020,6 +1026,7 @@ static void check_events(void)
break;
case APM_CRITICAL_SUSPEND:
+ send_event(event, NULL); /* We can only hope it worked; critical suspend may not fail */
(void) suspend();
break;
}
@@ -1056,6 +1063,7 @@ static void apm_event_handler(void)
static void apm_mainloop(void)
{
+ int timeout = HZ;
DECLARE_WAITQUEUE(wait, current);
if (smp_num_cpus > 1)
@@ -1065,7 +1073,10 @@ static void apm_mainloop(void)
current->state = TASK_INTERRUPTIBLE;
for (;;) {
/* Nothing to do, just sleep for the timeout */
- schedule_timeout(APM_CHECK_TIMEOUT);
+ timeout = 2*timeout;
+ if (timeout > APM_CHECK_TIMEOUT)
+ timeout = APM_CHECK_TIMEOUT;
+ schedule_timeout(timeout);
if (exit_kapmd)
break;
@@ -1080,13 +1091,16 @@ static void apm_mainloop(void)
continue;
if (apm_do_idle()) {
unsigned long start = jiffies;
- while (system_idle()) {
+ while ((!exit_kapmd) && system_idle()) {
apm_do_idle();
- if (jiffies - start > APM_CHECK_TIMEOUT)
- break;
+ if (jiffies - start > (5*APM_CHECK_TIMEOUT)) {
+ apm_event_handler();
+ start = jiffies;
+ }
}
apm_do_busy();
apm_event_handler();
+ timeout = 1;
}
#endif
}
diff --git a/arch/i386/kernel/cpuid.c b/arch/i386/kernel/cpuid.c
new file mode 100644
index 000000000..34c34d818
--- /dev/null
+++ b/arch/i386/kernel/cpuid.c
@@ -0,0 +1,168 @@
+#ident "$Id$"
+/* ----------------------------------------------------------------------- *
+ *
+ * Copyright 2000 H. Peter Anvin - All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version; incorporated herein by reference.
+ *
+ * ----------------------------------------------------------------------- */
+
+
+/*
+ * cpuid.c
+ *
+ * x86 CPUID access device
+ *
+ * This device is accessed by lseek() to the appropriate CPUID level
+ * and then read in chunks of 16 bytes. A larger size means multiple
+ * reads of consecutive levels.
+ *
+ * This driver uses /dev/cpu/%d/cpuid where %d is the minor number, and on
+ * an SMP box will direct the access to CPU %d.
+ */
+
+#include <linux/module.h>
+#include <linux/config.h>
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/smp.h>
+#include <linux/major.h>
+
+#include <asm/processor.h>
+#include <asm/msr.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+#ifdef CONFIG_SMP
+
+struct cpuid_command {
+ int cpu;
+ u32 reg;
+ u32 *data;
+};
+
+static void cpuid_smp_cpuid(void *cmd_block)
+{
+ struct cpuid_command *cmd = (struct cpuid_command *) cmd_block;
+
+ if ( cmd->cpu == smp_processor_id() )
+ cpuid(cmd->reg, &cmd->data[0], &cmd->data[1], &cmd->data[2], &cmd->data[3]);
+}
+
+extern inline void do_cpuid(int cpu, u32 reg, u32 *data)
+{
+ struct cpuid_command cmd;
+
+ if ( cpu == smp_processor_id() ) {
+ cpuid(reg, &data[0], &data[1], &data[2], &data[3]);
+ } else {
+ cmd->cpu = cpu;
+ cmd->reg = reg;
+ cmd->data = data;
+
+ smp_call_function(cpuid_smp_cpuid, (void *)cmd, 1, 1);
+ }
+}
+#else /* ! CONFIG_SMP */
+
+extern inline void do_cpuid(int cpu, u32 reg, u32 *data)
+{
+ cpuid(reg, &data[0], &data[1], &data[2], &data[3]);
+}
+
+#endif /* ! CONFIG_SMP */
+
+static loff_t cpuid_seek(struct file *file, loff_t offset, int orig)
+{
+ switch (orig) {
+ case 0:
+ file->f_pos = offset;
+ return file->f_pos;
+ case 1:
+ file->f_pos += offset;
+ return file->f_pos;
+ default:
+ return -EINVAL; /* SEEK_END not supported */
+ }
+}
+
+static ssize_t cpuid_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
+{
+ u32 *tmp = (u32 *)buf;
+ u32 data[4];
+ size_t rv;
+ u32 reg = *ppos;
+ int cpu = MINOR(file->f_dentry->d_inode->i_rdev);
+
+ if ( count % 16 )
+ return -EINVAL; /* Invalid chunk size */
+
+ for ( rv = 0 ; count ; count -= 16 ) {
+ do_cpuid(cpu, reg, data);
+ if ( copy_to_user(tmp,&data,16) )
+ return -EFAULT;
+ tmp += 4;
+ *ppos = reg++;
+ }
+
+ return ((char *)tmp) - buf;
+}
+
+static int cpuid_open(struct inode *inode, struct file *file)
+{
+ int cpu = MINOR(file->f_dentry->d_inode->i_rdev);
+
+ if ( !(cpu_online_map & (1UL << cpu)) )
+ return -ENXIO; /* No such CPU */
+
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static int cpuid_release(struct inode *inode, struct file *file)
+{
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+/*
+ * File operations we support
+ */
+static struct file_operations cpuid_fops = {
+ llseek: cpuid_seek,
+ read: cpuid_read,
+ open: cpuid_open,
+ release: cpuid_release,
+};
+
+int __init cpuid_init(void)
+{
+ if (register_chrdev(CPUID_MAJOR, "cpu/cpuid", &cpuid_fops)) {
+ printk(KERN_ERR "cpuid: unable to get major %d for cpuid\n",
+ CPUID_MAJOR);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+void __exit cpuid_exit(void)
+{
+}
+
+module_init(cpuid_init);
+module_exit(cpuid_exit)
+
+EXPORT_NO_SYMBOLS;
+
+MODULE_AUTHOR("H. Peter Anvin <hpa@zytor.com>");
+MODULE_DESCRIPTION("x86 generic CPUID driver");
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index 50887c15c..26f3d505d 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -413,6 +413,11 @@ ENTRY(spurious_interrupt_bug)
pushl $ SYMBOL_NAME(do_spurious_interrupt_bug)
jmp error_code
+ENTRY(xmm_fault)
+ pushl $0
+ pushl $ SYMBOL_NAME(do_xmm_fault)
+ jmp error_code
+
.data
ENTRY(sys_call_table)
.long SYMBOL_NAME(sys_ni_syscall) /* 0 - old "setup()" system call*/
diff --git a/arch/i386/kernel/i8259.c b/arch/i386/kernel/i8259.c
index e88fa9022..fff171c24 100644
--- a/arch/i386/kernel/i8259.c
+++ b/arch/i386/kernel/i8259.c
@@ -390,10 +390,11 @@ void __init init_8259A(int auto_eoi)
static void math_error_irq(int cpl, void *dev_id, struct pt_regs *regs)
{
+ extern void math_error(void *);
outb(0,0xF0);
if (ignore_irq13 || !boot_cpu_data.hard_math)
return;
- math_error();
+ math_error((void *)regs->eip);
}
static struct irqaction irq13 = { math_error_irq, 0, 0, "fpu", NULL, NULL };
diff --git a/arch/i386/kernel/msr.c b/arch/i386/kernel/msr.c
new file mode 100644
index 000000000..2ab1ecf33
--- /dev/null
+++ b/arch/i386/kernel/msr.c
@@ -0,0 +1,274 @@
+#ident "$Id$"
+/* ----------------------------------------------------------------------- *
+ *
+ * Copyright 2000 H. Peter Anvin - All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version; incorporated herein by reference.
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * msr.c
+ *
+ * x86 MSR access device
+ *
+ * This device is accessed by lseek() to the appropriate register number
+ * and then read/write in chunks of 8 bytes. A larger size means multiple
+ * reads or writes of the same register.
+ *
+ * This driver uses /dev/cpu/%d/msr where %d is the minor number, and on
+ * an SMP box will direct the access to CPU %d.
+ */
+
+#include <linux/module.h>
+#include <linux/config.h>
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/smp.h>
+#include <linux/major.h>
+
+#include <asm/processor.h>
+#include <asm/msr.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+extern inline int wrmsr_eio(u32 reg, u32 eax, u32 edx)
+{
+ int err = 0;
+
+ asm volatile(
+ "1: wrmsr\n"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+ "3: movl %4,%0\n"
+ " jmp 1b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 1b,3b\n"
+ ".previous"
+ : "+r" (err)
+ : "a" (eax), "d" (edx), "c" (reg), "i" (-EIO));
+
+ return err;
+}
+
+extern inline int rdmsr_eio(u32 reg, u32 *eax, u32 *edx)
+{
+ int err = 0;
+
+ asm volatile(
+ "1: rdmsr\n"
+ "2:\n"
+ ".section .fixup,\"ax\"\n"
+ "3: movl %4,%0\n"
+ " jmp 1b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 1b,3b\n"
+ ".previous"
+ : "+r" (err), "=a" (*eax), "=d" (*eax)
+ : "c" (reg), "i" (-EIO));
+
+ return err;
+}
+
+#ifdef CONFIG_SMP
+
+struct msr_command {
+ int cpu;
+ int err;
+ u32 reg;
+ u32 data[2];
+};
+
+static void msr_smp_wrmsr(void *cmd_block)
+{
+ struct msr_command *cmd = (struct msr_command *) cmd_block;
+
+ if ( cmd->cpu == smp_processor_id() )
+ cmd->err = wrmsr_eio(cmd->reg, &cmd->data[0], &cmd->data[1]);
+}
+
+static void msr_smp_rdmsr(void *cmd_block)
+{
+ struct msr_command *cmd = (struct msr_command *) cmd_block;
+
+ if ( cmd->cpu == smp_processor_id() )
+ cmd->err = rdmsr_eio(cmd->reg, &cmd->data[0], &cmd->data[1]);
+}
+
+extern inline int do_wrmsr(int cpu, u32 reg, u32 eax, u32 edx)
+{
+ struct msr_command cmd;
+
+ if ( cpu == smp_processor_id() ) {
+ return wrmsr_eio(reg, eax, edx);
+ } else {
+ cmd.cpu = cpu;
+ cmd.reg = reg;
+ cmd.data[0] = eax;
+ cmd.data[1] = edx;
+
+ smp_call_function(msr_smp_wrmsr, (void *)cmd, 1, 1);
+ return cmd.err;
+ }
+}
+
+extern inline int do_rdmsr(int cpu, u32 reg, u32 *eax, u32 *edx)
+{
+ struct msr_command cmd;
+
+ if ( cpu == smp_processor_id() ) {
+ return rdmsr_eio(reg, eax, edx);
+ } else {
+ cmd.cpu = cpu;
+ cmd.reg = reg;
+
+ smp_call_function(msr_smp_rdmsr, (void *)cmd, 1, 1);
+
+ *eax = cmd.data[0];
+ *edx = cmd.data[1];
+
+ return cmd.err;
+ }
+}
+
+#else /* ! CONFIG_SMP */
+
+extern inline int do_wrmsr(int cpu, u32 reg, u32 eax, u32 edx)
+{
+ return wrmsr_eio(reg, eax, edx);
+}
+
+extern inline int do_rdmsr(int cpu, u32 reg, u32 *eax, u32 *edx)
+{
+ return rdmsr_eio(reg, eax, edx);
+}
+
+#endif /* ! CONFIG_SMP */
+
+static loff_t msr_seek(struct file *file, loff_t offset, int orig)
+{
+ switch (orig) {
+ case 0:
+ file->f_pos = offset;
+ return file->f_pos;
+ case 1:
+ file->f_pos += offset;
+ return file->f_pos;
+ default:
+ return -EINVAL; /* SEEK_END not supported */
+ }
+}
+
+static ssize_t msr_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
+{
+ u32 *tmp = (u32 *)buf;
+ u32 data[2];
+ size_t rv;
+ u32 reg = *ppos;
+ int cpu = MINOR(file->f_dentry->d_inode->i_rdev);
+ int err;
+
+ if ( count % 8 )
+ return -EINVAL; /* Invalid chunk size */
+
+ for ( rv = 0 ; count ; count -= 8 ) {
+ err = do_rdmsr(cpu, reg, &data[0], &data[1]);
+ if ( err )
+ return err;
+ if ( copy_to_user(tmp,&data,8) )
+ return -EFAULT;
+ tmp += 2;
+ }
+
+ return ((char *)tmp) - buf;
+}
+
+static ssize_t msr_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
+{
+ const u32 *tmp = (const u32 *)buf;
+ u32 data[2];
+ size_t rv;
+ u32 reg = *ppos;
+ int cpu = MINOR(file->f_dentry->d_inode->i_rdev);
+ int err;
+
+ if ( count % 8 )
+ return -EINVAL; /* Invalid chunk size */
+
+ for ( rv = 0 ; count ; count -= 8 ) {
+ if ( copy_from_user(&data,tmp,8) )
+ return -EFAULT;
+ err = do_wrmsr(cpu, reg, data[0], data[1]);
+ if ( err )
+ return err;
+ tmp += 2;
+ }
+
+ return ((char *)tmp) - buf;
+}
+
+static int msr_open(struct inode *inode, struct file *file)
+{
+ int cpu = MINOR(file->f_dentry->d_inode->i_rdev);
+
+ if ( !(cpu_online_map & (1UL << cpu)) ||
+ !((cpu_data)[cpu].x86_capability & X86_FEATURE_MSR) )
+ return -ENXIO; /* No such CPU */
+
+ MOD_INC_USE_COUNT;
+ return 0;
+}
+
+static int msr_release(struct inode *inode, struct file *file)
+{
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+/*
+ * File operations we support
+ */
+static struct file_operations msr_fops = {
+ llseek: msr_seek,
+ read: msr_read,
+ write: msr_write,
+ open: msr_open,
+ release: msr_release,
+};
+
+int __init msr_init(void)
+{
+ if (register_chrdev(MSR_MAJOR, "cpu/msr", &msr_fops)) {
+ printk(KERN_ERR "msr: unable to get major %d for msr\n",
+ MSR_MAJOR);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+void __exit msr_exit(void)
+{
+}
+
+module_init(msr_init);
+module_exit(msr_exit)
+
+EXPORT_NO_SYMBOLS;
+
+MODULE_AUTHOR("H. Peter Anvin <hpa@zytor.com>");
+MODULE_DESCRIPTION("x86 generic MSR driver");
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 96eedb519..dddd807c8 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -2,6 +2,8 @@
* linux/arch/i386/kernel/process.c
*
* Copyright (C) 1995 Linus Torvalds
+ * Pentium III code by Ingo Molnar with changes and support for
+ * OS exception support by Goutham Rao
*/
/*
@@ -469,6 +471,94 @@ void copy_segments(struct task_struct *p, struct mm_struct *new_mm)
return;
}
+#ifdef CONFIG_X86_FX
+
+int i387_hard_to_user ( struct _fpstate * user,
+ struct i387_hard_struct * hard)
+{
+ int i, err = 0;
+ short *tmp, *tmp2;
+ long *ltmp1, *ltmp2;
+
+ err |= put_user(hard->cwd, &user->cw);
+ err |= put_user(hard->swd, &user->sw);
+ err |= put_user(fputag_KNIto387(hard->twd), &user->tag);
+ err |= put_user(hard->fip, &user->ipoff);
+ err |= put_user(hard->fcs, &user->cssel);
+ err |= put_user(hard->fdp, &user->dataoff);
+ err |= put_user(hard->fds, &user->datasel);
+ err |= put_user(hard->mxcsr, &user->mxcsr);
+
+ tmp = (short *)&user->_st;
+ tmp2 = (short *)&hard->st_space;
+
+ /*
+ * Transform the two layouts:
+ * (we do not mix 32-bit access with 16-bit access because
+ * thats suboptimal on PPros)
+ */
+ for (i = 0; i < 8; i++)
+ {
+ err |= put_user(*tmp2, tmp); tmp++; tmp2++;
+ err |= put_user(*tmp2, tmp); tmp++; tmp2++;
+ err |= put_user(*tmp2, tmp); tmp++; tmp2++;
+ err |= put_user(*tmp2, tmp); tmp++; tmp2++;
+ err |= put_user(*tmp2, tmp); tmp++; tmp2 += 3;
+ }
+
+ ltmp1 = (unsigned long *)&(user->_xmm[0]);
+ ltmp2 = (unsigned long *)&(hard->xmm_space[0]);
+ for(i = 0; i < 88; i++)
+ {
+ err |= put_user(*ltmp2, ltmp1);
+ ltmp1++; ltmp2++;
+ }
+
+ return err;
+}
+
+int i387_user_to_hard (struct i387_hard_struct * hard,
+ struct _fpstate * user)
+{
+ int i, err = 0;
+ short *tmp, *tmp2;
+ long *ltmp1, *ltmp2;
+
+ err |= get_user(hard->cwd, &user->cw);
+ err |= get_user(hard->swd, &user->sw);
+ err |= get_user(hard->twd, &user->tag);
+ hard->twd = fputag_387toKNI(hard->twd);
+ err |= get_user(hard->fip, &user->ipoff);
+ err |= get_user(hard->fcs, &user->cssel);
+ err |= get_user(hard->fdp, &user->dataoff);
+ err |= get_user(hard->fds, &user->datasel);
+ err |= get_user(hard->mxcsr, &user->mxcsr);
+
+ tmp2 = (short *)&hard->st_space;
+ tmp = (short *)&user->_st;
+
+ for (i = 0; i < 8; i++)
+ {
+ err |= get_user(*tmp2, tmp); tmp++; tmp2++;
+ err |= get_user(*tmp2, tmp); tmp++; tmp2++;
+ err |= get_user(*tmp2, tmp); tmp++; tmp2++;
+ err |= get_user(*tmp2, tmp); tmp++; tmp2++;
+ err |= get_user(*tmp2, tmp); tmp++; tmp2 += 3;
+ }
+
+ ltmp1 = (unsigned long *)(&user->_xmm[0]);
+ ltmp2 = (unsigned long *)(&hard->xmm_space[0]);
+ for(i = 0; i < (88); i++)
+ {
+ err |= get_user(*ltmp2, ltmp1);
+ ltmp2++; ltmp1++;
+ }
+
+ return err;
+}
+
+#endif
+
/*
* Save a segment.
*/
diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c
index de9656150..01edbc37a 100644
--- a/arch/i386/kernel/ptrace.c
+++ b/arch/i386/kernel/ptrace.c
@@ -1,5 +1,6 @@
/* ptrace.c */
/* By Ross Biro 1/23/92 */
+/* FXSAVE/FXRSTOR support by Ingo Molnar and modifications by Goutham Rao */
/* edited by Linus Torvalds */
#include <linux/config.h> /* for CONFIG_MATH_EMULATION */
@@ -398,14 +399,14 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
ret = 0;
if ( !child->used_math ) {
/* Simulate an empty FPU. */
- child->thread.i387.hard.cwd = 0xffff037f;
- child->thread.i387.hard.swd = 0xffff0000;
- child->thread.i387.hard.twd = 0xffffffff;
- }
+ i387_set_cwd(child->thread.i387.hard, 0x037f);
+ i387_set_swd(child->thread.i387.hard, 0x0000);
+ i387_set_twd(child->thread.i387.hard, 0xffff);
+ }
#ifdef CONFIG_MATH_EMULATION
if ( boot_cpu_data.hard_math ) {
#endif
- __copy_to_user((void *)data, &child->thread.i387.hard, sizeof(struct user_i387_struct));
+ i387_hard_to_user((struct _fpstate *)data, &child->thread.i387.hard);
#ifdef CONFIG_MATH_EMULATION
} else {
save_i387_soft(&child->thread.i387.soft, (struct _fpstate *)data);
@@ -423,7 +424,7 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
#ifdef CONFIG_MATH_EMULATION
if ( boot_cpu_data.hard_math ) {
#endif
- __copy_from_user(&child->thread.i387.hard, (void *)data, sizeof(struct user_i387_struct));
+ i387_user_to_hard(&child->thread.i387.hard,(struct _fpstate *)data);
#ifdef CONFIG_MATH_EMULATION
} else {
restore_i387_soft(&child->thread.i387.soft, (struct _fpstate *)data);
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 358d9f917..3ea101d69 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -39,7 +39,8 @@
* Detection for Celeron coppermine, identify_cpu() overhauled,
* and a few other clean ups.
* Dave Jones <dave@powertweak.com>, April 2000
- *
+ * Pentium-III code by Ingo Molnar and modifications by Goutham Rao
+ *
*/
/*
@@ -800,14 +801,30 @@ void __init setup_arch(char **cmdline_p)
conswitchp = &dummy_con;
#endif
#endif
+#ifdef CONFIG_X86_FX
+ if (boot_cpu_data.x86_capability & X86_FEATURE_FXSR)
+ {
+ printk("Enabling extended fast FPU save and restore ... ");
+ set_in_cr4(X86_CR4_OSFXSR);
+ printk("done.\n");
+ }
+ if (boot_cpu_data.x86_capability & X86_FEATURE_XMM)
+ {
+ printk("Enabling KNI unmasked exception support ... ");
+ set_in_cr4(X86_CR4_OSXMMEXCPT);
+ printk("done.\n");
+ }
+#endif
}
static int __init get_model_name(struct cpuinfo_x86 *c)
{
unsigned int n, dummy, *v;
- /* Actually we must have cpuid or we could never have
- * figured out that this was AMD/Cyrix from the vendor info :-).
+ /*
+ * Actually we must have cpuid or we could never have
+ * figured out that this was AMD/Cyrix/Transmeta
+ * from the vendor info :-).
*/
cpuid(0x80000000, &n, &dummy, &dummy, &dummy);
@@ -1196,6 +1213,86 @@ static void __init centaur_model(struct cpuinfo_x86 *c)
sprintf( c->x86_model_id, "WinChip %s", name );
}
+static void __init transmeta_model(struct cpuinfo_x86 *c)
+{
+ unsigned int cap_mask, uk, max, dummy, n, ecx, edx;
+ unsigned int cms_rev1, cms_rev2;
+ unsigned int cpu_rev, cpu_freq, cpu_flags;
+ char cpu_info[65];
+
+ get_model_name(c); /* Same as AMD/Cyrix */
+
+ /* Print CMS and CPU revision */
+ cpuid(0x80860000, &max, &dummy, &dummy, &dummy);
+ if ( max >= 0x80860001 ) {
+ cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags);
+ printk("CPU: Processor revision %u.%u.%u.%u, %u MHz%s%s\n",
+ (cpu_rev >> 24) & 0xff,
+ (cpu_rev >> 16) & 0xff,
+ (cpu_rev >> 8) & 0xff,
+ cpu_rev & 0xff,
+ cpu_freq,
+ (cpu_flags & 1) ? " [recovery]" : "",
+ (cpu_flags & 2) ? " [longrun]" : "");
+ }
+ if ( max >= 0x80860002 ) {
+ cpuid(0x80860002, &dummy, &cms_rev1, &cms_rev2, &dummy);
+ printk("CPU: Code Morphing Software revision %u.%u.%u-%u-%u\n",
+ (cms_rev1 >> 24) & 0xff,
+ (cms_rev1 >> 16) & 0xff,
+ (cms_rev1 >> 8) & 0xff,
+ cms_rev1 & 0xff,
+ cms_rev2);
+ }
+ if ( max >= 0x80860006 ) {
+ cpuid(0x80860003,
+ (void *)&cpu_info[0],
+ (void *)&cpu_info[4],
+ (void *)&cpu_info[8],
+ (void *)&cpu_info[12]);
+ cpuid(0x80860004,
+ (void *)&cpu_info[16],
+ (void *)&cpu_info[20],
+ (void *)&cpu_info[24],
+ (void *)&cpu_info[28]);
+ cpuid(0x80860005,
+ (void *)&cpu_info[32],
+ (void *)&cpu_info[36],
+ (void *)&cpu_info[40],
+ (void *)&cpu_info[44]);
+ cpuid(0x80860006,
+ (void *)&cpu_info[48],
+ (void *)&cpu_info[52],
+ (void *)&cpu_info[56],
+ (void *)&cpu_info[60]);
+ cpu_info[64] = '\0';
+ printk("CPU: %s\n", cpu_info);
+ }
+
+ /* Unhide possibly hidden flags */
+ rdmsr(0x80860004, cap_mask, uk);
+ wrmsr(0x80860004, ~0, uk);
+ cpuid(0x00000001, &dummy, &dummy, &dummy, &c->x86_capability);
+ wrmsr(0x80860004, cap_mask, uk);
+
+
+ /* L1/L2 cache */
+ cpuid(0x80000000, &n, &dummy, &dummy, &dummy);
+
+ if (n >= 0x80000005) {
+ cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
+ printk("CPU: L1 I Cache: %dK L1 D Cache: %dK\n",
+ ecx>>24, edx>>24);
+ c->x86_cache_size=(ecx>>24)+(edx>>24);
+ }
+ if (n >= 0x80000006) {
+ cpuid(0x80000006, &dummy, &dummy, &ecx, &edx);
+ printk("CPU: L2 Cache: %dK\n", ecx>>16);
+ c->x86_cache_size=(ecx>>16);
+ }
+}
+
+
void __init get_cpu_vendor(struct cpuinfo_x86 *c)
{
char *v = c->x86_vendor_id;
@@ -1214,6 +1311,8 @@ void __init get_cpu_vendor(struct cpuinfo_x86 *c)
c->x86_vendor = X86_VENDOR_NEXGEN;
else if (!strcmp(v, "RiseRiseRise"))
c->x86_vendor = X86_VENDOR_RISE;
+ else if (!strcmp(v, "GenuineTMx86"))
+ c->x86_vendor = X86_VENDOR_TRANSMETA;
else
c->x86_vendor = X86_VENDOR_UNKNOWN;
}
@@ -1263,6 +1362,9 @@ static struct cpu_model_info cpu_models[] __initdata = {
{ X86_VENDOR_RISE, 5,
{ "mP6", "mP6", NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }},
+ { X86_VENDOR_TRANSMETA, 5,
+ { NULL, NULL, NULL, "Crusoe", NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }},
};
void __init identify_cpu(struct cpuinfo_x86 *c)
@@ -1275,6 +1377,16 @@ void __init identify_cpu(struct cpuinfo_x86 *c)
get_cpu_vendor(c);
+ /* It should be possible for the user to override this. */
+ if(c->x86_capability&(1<<18)) {
+ /* Disable processor serial number */
+ unsigned long lo,hi;
+ rdmsr(0x119,lo,hi);
+ lo |= 0x200000;
+ wrmsr(0x119,lo,hi);
+ printk(KERN_INFO "CPU serial number disabled.\n");
+ }
+
switch (c->x86_vendor) {
case X86_VENDOR_UNKNOWN:
@@ -1296,16 +1408,6 @@ void __init identify_cpu(struct cpuinfo_x86 *c)
return;
case X86_VENDOR_INTEL:
- if(c->x86_capability&(1<<18)) {
- /* Disable processor serial number on Intel Pentium III
- from code by Phil Karn */
- unsigned long lo,hi;
- rdmsr(0x119,lo,hi);
- lo |= 0x200000;
- wrmsr(0x119,lo,hi);
- printk(KERN_INFO "Pentium-III serial number disabled.\n");
- }
-
if (c->cpuid_level > 1) {
/* supports eax=2 call */
int edx, dummy;
@@ -1374,6 +1476,10 @@ void __init identify_cpu(struct cpuinfo_x86 *c)
goto name_decoded;
break;
+
+ case X86_VENDOR_TRANSMETA:
+ transmeta_model(c);
+ return;
}
@@ -1412,7 +1518,7 @@ void __init dodgy_tsc(void)
static char *cpu_vendor_names[] __initdata = {
- "Intel", "Cyrix", "AMD", "UMC", "NexGen", "Centaur", "Rise" };
+ "Intel", "Cyrix", "AMD", "UMC", "NexGen", "Centaur", "Rise", "Transmeta" };
void __init print_cpu_info(struct cpuinfo_x86 *c)
@@ -1424,7 +1530,7 @@ void __init print_cpu_info(struct cpuinfo_x86 *c)
else if (c->cpuid_level >= 0)
vendor = c->x86_vendor_id;
- if (vendor)
+ if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
printk("%s ", vendor);
if (!c->x86_model_id[0])
@@ -1434,6 +1540,8 @@ void __init print_cpu_info(struct cpuinfo_x86 *c)
if (c->x86_mask || c->cpuid_level>=0)
printk(" stepping %02x\n", c->x86_mask);
+ else
+ printk("\n");
}
/*
@@ -1513,7 +1621,9 @@ int get_cpuinfo(char * buffer)
case X86_VENDOR_INTEL:
x86_cap_flags[16] = "pat";
+ x86_cap_flags[18] = "pn";
x86_cap_flags[24] = "fxsr";
+ x86_cap_flags[25] = "xmm";
break;
case X86_VENDOR_CENTAUR:
@@ -1522,7 +1632,7 @@ int get_cpuinfo(char * buffer)
break;
default:
- /* Unknown CPU manufacturer. Transmeta ? :-) */
+ /* Unknown CPU manufacturer or no special handling needed */
break;
}
diff --git a/arch/i386/kernel/signal.c b/arch/i386/kernel/signal.c
index 4e813fac7..c35f5bae8 100644
--- a/arch/i386/kernel/signal.c
+++ b/arch/i386/kernel/signal.c
@@ -4,6 +4,8 @@
* Copyright (C) 1991, 1992 Linus Torvalds
*
* 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
+ * Pentium III support by Ingo Molnar, modifications and OS Exception support
+ * by Goutham Rao
*/
#include <linux/config.h>
@@ -30,6 +32,41 @@ asmlinkage int sys_wait4(pid_t pid, unsigned long *stat_addr,
int options, unsigned long *ru);
asmlinkage int FASTCALL(do_signal(struct pt_regs *regs, sigset_t *oldset));
+int copy_siginfo_to_user(siginfo_t *to, siginfo_t *from)
+{
+ if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
+ return -EFAULT;
+ if (from->si_code < 0)
+ return __copy_to_user(to, from, sizeof(siginfo_t));
+ else {
+ int err;
+
+ /* If you change siginfo_t structure, please be sure
+ this code is fixed accordingly.
+ It should never copy any pad contained in the structure
+ to avoid security leaks, but must copy the generic
+ 3 ints plus the relevant union member. */
+ err = __put_user(from->si_signo, &to->si_signo);
+ err |= __put_user(from->si_errno, &to->si_errno);
+ err |= __put_user((short)from->si_code, &to->si_code);
+ /* First 32bits of unions are always present. */
+ err |= __put_user(from->si_pid, &to->si_pid);
+ switch (from->si_code >> 16) {
+ case __SI_FAULT >> 16:
+ break;
+ case __SI_CHLD >> 16:
+ err |= __put_user(from->si_utime, &to->si_utime);
+ err |= __put_user(from->si_stime, &to->si_stime);
+ err |= __put_user(from->si_status, &to->si_status);
+ default:
+ err |= __put_user(from->si_uid, &to->si_uid);
+ break;
+ /* case __SI_RT: This is not generated by the kernel as of now. */
+ }
+ return err;
+ }
+}
+
/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
@@ -155,7 +192,7 @@ static inline int restore_i387_hard(struct _fpstate *buf)
{
struct task_struct *tsk = current;
clear_fpu(tsk);
- return __copy_from_user(&tsk->thread.i387.hard, buf, sizeof(*buf));
+ return i387_user_to_hard(&tsk->thread.i387.hard, buf);
}
static inline int restore_i387(struct _fpstate *buf)
@@ -309,7 +346,7 @@ static inline int save_i387_hard(struct _fpstate * buf)
unlazy_fpu(tsk);
tsk->thread.i387.hard.status = tsk->thread.i387.hard.swd;
- if (__copy_to_user(buf, &tsk->thread.i387.hard, sizeof(*buf)))
+ if (i387_hard_to_user(buf, &tsk->thread.i387.hard))
return -1;
return 1;
}
@@ -491,7 +528,7 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
&frame->sig);
err |= __put_user(&frame->info, &frame->pinfo);
err |= __put_user(&frame->uc, &frame->puc);
- err |= __copy_to_user(&frame->info, info, sizeof(*info));
+ err |= copy_siginfo_to_user(&frame->info, info);
if (err)
goto give_sigsegv;
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c
index 94ba5c49f..e08418fe0 100644
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -133,6 +133,11 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector)
unsigned int cfg;
/*
+ * Wait for idle.
+ */
+ apic_wait_icr_idle();
+
+ /*
* No need to touch the target chip field
*/
cfg = __prepare_ICR(shortcut, vector);
@@ -173,6 +178,11 @@ static inline void send_IPI_mask(int mask, int vector)
__cli();
/*
+ * Wait for idle.
+ */
+ apic_wait_icr_idle();
+
+ /*
* prepare target chip field
*/
cfg = __prepare_ICR2(mask);
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index ae84ff2b5..e0ae38b28 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -502,6 +502,11 @@ static inline void inquire_remote_apic(int apicid)
for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) {
printk("... APIC #%d %s: ", apicid, names[i]);
+ /*
+ * Wait for idle.
+ */
+ apic_wait_icr_idle();
+
apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 7fb5ebc61..0df54449f 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -2,6 +2,7 @@
* linux/arch/i386/traps.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
+ * FXSAVE/FXRSTOR support by Ingo Molnar, OS exception support by Goutham Rao
*/
/*
@@ -82,6 +83,20 @@ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
force_sig(signr, tsk); \
}
+#define DO_ERROR_INFO(trapnr, signr, str, name, tsk, sicode, siaddr) \
+asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
+{ \
+ siginfo_t info; \
+ tsk->thread.error_code = error_code; \
+ tsk->thread.trap_no = trapnr; \
+ die_if_no_fixup(str,regs,error_code); \
+ info.si_signo = signr; \
+ info.si_errno = 0; \
+ info.si_code = sicode; \
+ info.si_addr = (void *)siaddr; \
+ force_sig_info(signr, &info, tsk); \
+}
+
#define DO_VM86_ERROR(trapnr, signr, str, name, tsk) \
asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
{ \
@@ -99,6 +114,28 @@ out: \
unlock_kernel(); \
}
+#define DO_VM86_ERROR_INFO(trapnr, signr, str, name, tsk, sicode, siaddr) \
+asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
+{ \
+ siginfo_t info; \
+ lock_kernel(); \
+ if (regs->eflags & VM_MASK) { \
+ if (!handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr)) \
+ goto out; \
+ /* else fall through */ \
+ } \
+ tsk->thread.error_code = error_code; \
+ tsk->thread.trap_no = trapnr; \
+ info.si_signo = signr; \
+ info.si_errno = 0; \
+ info.si_code = sicode; \
+ info.si_addr = (void *)siaddr; \
+ force_sig_info(signr, &info, tsk); \
+ die_if_kernel(str,regs,error_code); \
+out: \
+ unlock_kernel(); \
+}
+
void page_exception(void);
asmlinkage void divide_error(void);
@@ -120,6 +157,7 @@ asmlinkage void coprocessor_error(void);
asmlinkage void reserved(void);
asmlinkage void alignment_check(void);
asmlinkage void spurious_interrupt_bug(void);
+asmlinkage void xmm_fault(void);
int kstack_depth_to_print = 24;
@@ -260,34 +298,29 @@ static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
}
}
-DO_VM86_ERROR( 0, SIGFPE, "divide error", divide_error, current)
+static inline unsigned long get_cr2(void)
+{
+ unsigned long address;
+
+ /* get the address */
+ __asm__("movl %%cr2,%0":"=r" (address));
+ return address;
+}
+
+DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, current, FPE_INTDIV, regs->eip)
DO_VM86_ERROR( 3, SIGTRAP, "int3", int3, current)
DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow, current)
DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds, current)
-DO_ERROR( 6, SIGILL, "invalid operand", invalid_op, current)
+DO_ERROR_INFO( 6, SIGILL, "invalid operand", invalid_op, current, ILL_ILLOPN, regs->eip)
DO_VM86_ERROR( 7, SIGSEGV, "device not available", device_not_available, current)
DO_ERROR( 8, SIGSEGV, "double fault", double_fault, current)
DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun, current)
DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS, current)
DO_ERROR(11, SIGBUS, "segment not present", segment_not_present, current)
DO_ERROR(12, SIGBUS, "stack segment", stack_segment, current)
-DO_ERROR(17, SIGSEGV, "alignment check", alignment_check, current)
+DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, current, BUS_ADRALN, get_cr2())
DO_ERROR(18, SIGSEGV, "reserved", reserved, current)
-/* I don't have documents for this but it does seem to cover the cache
- flush from user space exception some people get. */
-DO_ERROR(19, SIGSEGV, "cache flush denied", cache_flush_denied, current)
-
-asmlinkage void cache_flush_denied(struct pt_regs * regs, long error_code)
-{
- if (regs->eflags & VM_MASK) {
- handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
- return;
- }
- die_if_kernel("cache flush denied",regs,error_code);
- current->thread.error_code = error_code;
- current->thread.trap_no = 19;
- force_sig(SIGSEGV, current);
-}
+DO_VM86_ERROR(19, SIGFPE, "XMM fault", xmm_fault, current)
asmlinkage void do_general_protection(struct pt_regs * regs, long error_code)
{
@@ -485,6 +518,7 @@ asmlinkage void do_debug(struct pt_regs * regs, long error_code)
{
unsigned int condition;
struct task_struct *tsk = current;
+ siginfo_t info;
__asm__ __volatile__("movl %%db6,%0" : "=r" (condition));
@@ -519,7 +553,11 @@ asmlinkage void do_debug(struct pt_regs * regs, long error_code)
/* Ok, finally something we can handle */
tsk->thread.trap_no = 1;
tsk->thread.error_code = error_code;
- force_sig(SIGTRAP, tsk);
+ info.si_signo = SIGTRAP;
+ info.si_errno = 0;
+ info.si_code = TRAP_BRKPT;
+ info.si_addr = (void *)regs->eip;
+ force_sig_info(SIGTRAP, &info, tsk);
return;
debug_vm86:
@@ -544,9 +582,10 @@ clear_TF:
* the correct behaviour even in the presence of the asynchronous
* IRQ13 behaviour
*/
-void math_error(void)
+void math_error(void *eip)
{
struct task_struct * task;
+ siginfo_t info;
/*
* Save the info for the exception handler
@@ -556,13 +595,52 @@ void math_error(void)
save_fpu(task);
task->thread.trap_no = 16;
task->thread.error_code = 0;
- force_sig(SIGFPE, task);
+ info.si_signo = SIGFPE;
+ info.si_errno = 0;
+ info.si_code = __SI_FAULT;
+ info.si_addr = eip;
+ /*
+ * (~cwd & swd) will mask out exceptions that are not set to unmasked
+ * status. 0x3f is the exception bits in these regs, 0x200 is the
+ * C1 reg you need in case of a stack fault, 0x040 is the stack
+ * fault bit. We should only be taking one exception at a time,
+ * so if this combination doesn't produce any single exception,
+ * then we have a bad program that isn't syncronizing its FPU usage
+ * and it will suffer the consequences since we won't be able to
+ * fully reproduce the context of the exception
+ */
+ switch(((~task->thread.i387.hard.cwd) &
+ task->thread.i387.hard.swd & 0x3f) |
+ (task->thread.i387.hard.swd & 0x240)) {
+ case 0x000:
+ default:
+ break;
+ case 0x001: /* Invalid Op */
+ case 0x040: /* Stack Fault */
+ case 0x240: /* Stack Fault | Direction */
+ info.si_code = FPE_FLTINV;
+ break;
+ case 0x002: /* Denormalize */
+ case 0x010: /* Underflow */
+ info.si_code = FPE_FLTUND;
+ break;
+ case 0x004: /* Zero Divide */
+ info.si_code = FPE_FLTDIV;
+ break;
+ case 0x008: /* Overflow */
+ info.si_code = FPE_FLTOVF;
+ break;
+ case 0x020: /* Precision */
+ info.si_code = FPE_FLTRES;
+ break;
+ }
+ force_sig_info(SIGFPE, &info, task);
}
asmlinkage void do_coprocessor_error(struct pt_regs * regs, long error_code)
{
ignore_irq13 = 1;
- math_error();
+ math_error((void *)regs->eip);
}
asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs,
@@ -586,7 +664,7 @@ asmlinkage void math_state_restore(struct pt_regs regs)
__asm__ __volatile__("clts"); /* Allow maths ops (or we recurse) */
if(current->used_math)
- __asm__("frstor %0": :"m" (current->thread.i387));
+ i387_restore_hard(current->thread.i387);
else
{
/*
@@ -829,6 +907,8 @@ void __init trap_init(void)
set_trap_gate(15,&spurious_interrupt_bug);
set_trap_gate(16,&coprocessor_error);
set_trap_gate(17,&alignment_check);
+ set_trap_gate(19,&xmm_fault);
+
set_system_gate(SYSCALL_VECTOR,&system_call);
/*
diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c
index 697645988..b88c4a422 100644
--- a/arch/i386/mm/fault.c
+++ b/arch/i386/mm/fault.c
@@ -124,13 +124,14 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
unsigned long page;
unsigned long fixup;
int write;
- int si_code = SEGV_MAPERR;
+ siginfo_t info;
/* get the address */
__asm__("movl %%cr2,%0":"=r" (address));
tsk = current;
mm = tsk->mm;
+ info.si_code = SEGV_MAPERR;
/*
* If we're in an interrupt or have no user
@@ -165,9 +166,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
* we can handle it..
*/
good_area:
+ info.si_code = SEGV_ACCERR;
write = 0;
- si_code = SEGV_ACCERR;
-
switch (error_code & 3) {
default: /* 3: write, present */
#ifdef TEST_VERIFY_AREA
@@ -225,14 +225,14 @@ bad_area:
/* User mode accesses just cause a SIGSEGV */
if (error_code & 4) {
- struct siginfo si;
tsk->thread.cr2 = address;
tsk->thread.error_code = error_code;
tsk->thread.trap_no = 14;
- si.si_signo = SIGSEGV;
- si.si_code = si_code;
- si.si_addr = (void*) address;
- force_sig_info(SIGSEGV, &si, tsk);
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ /* info.si_code has been set above */
+ info.si_addr = (void *)address;
+ force_sig_info(SIGSEGV, &info, tsk);
return;
}
@@ -309,7 +309,11 @@ do_sigbus:
tsk->thread.cr2 = address;
tsk->thread.error_code = error_code;
tsk->thread.trap_no = 14;
- force_sig(SIGBUS, tsk);
+ info.si_code = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRERR;
+ info.si_addr = (void *)address;
+ force_sig_info(SIGBUS, &info, tsk);
/* Kernel mode? Handle exceptions or die */
if (!(error_code & 4))