summaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel')
-rw-r--r--arch/i386/kernel/bios32.c135
-rw-r--r--arch/i386/kernel/entry.S2
-rw-r--r--arch/i386/kernel/i386_ksyms.c7
-rw-r--r--arch/i386/kernel/io_apic.c133
-rw-r--r--arch/i386/kernel/irq.c237
-rw-r--r--arch/i386/kernel/irq.h34
-rw-r--r--arch/i386/kernel/mca.c548
-rw-r--r--arch/i386/kernel/mtrr.c909
-rw-r--r--arch/i386/kernel/process.c6
-rw-r--r--arch/i386/kernel/ptrace.c11
-rw-r--r--arch/i386/kernel/setup.c135
-rw-r--r--arch/i386/kernel/smp.c166
-rw-r--r--arch/i386/kernel/time.c177
-rw-r--r--arch/i386/kernel/traps.c8
-rw-r--r--arch/i386/kernel/visws_apic.c8
15 files changed, 1689 insertions, 827 deletions
diff --git a/arch/i386/kernel/bios32.c b/arch/i386/kernel/bios32.c
index e7383e55b..91d338b2c 100644
--- a/arch/i386/kernel/bios32.c
+++ b/arch/i386/kernel/bios32.c
@@ -14,7 +14,7 @@
* Hannover, Germany
* hm@ix.de
*
- * Copyright 1997, 1998 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+ * Copyright 1997--1999 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
*
* For more information, please consult the following manuals (look at
* http://www.pcisig.com/ for how to get them):
@@ -71,6 +71,10 @@
* a large gallery of common hardware bug workarounds (watch the comments)
* -- the PCI specs themselves are sane, but most implementors should be
* hit hard with \hammer scaled \magstep5. [mj]
+ *
+ * Jan 23, 1999 : More improvements to peer host bridge logic. i450NX fixup. [mj]
+ *
+ * Feb 8, 1999 : Added UM8886BF I/O address fixup. [mj]
*/
#include <linux/config.h>
@@ -171,6 +175,7 @@ PCI_STUB(write, dword, u32)
#define PCI_NO_SORT 0x100
#define PCI_BIOS_SORT 0x200
#define PCI_NO_CHECKS 0x400
+#define PCI_NO_PEER_FIXUP 0x800
static unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2;
@@ -521,6 +526,8 @@ static struct {
unsigned short segment;
} pci_indirect = { 0, __KERNEL_CS };
+static int pci_bios_present;
+
__initfunc(static int check_pcibios(void))
{
u32 signature, eax, ebx, ecx;
@@ -803,7 +810,7 @@ __initfunc(static struct pci_access *pci_find_bios(void))
* which used BIOS ordering, we are bound to do this...
*/
-__initfunc(void pcibios_sort(void))
+static void __init pcibios_sort(void)
{
struct pci_dev *dev = pci_devices;
struct pci_dev **last = &pci_devices;
@@ -856,7 +863,7 @@ __initfunc(void pcibios_sort(void))
static int pci_last_io_addr __initdata = 0x5800;
-__initfunc(void pcibios_fixup_io_addr(struct pci_dev *dev, int idx))
+static void __init pcibios_fixup_io_addr(struct pci_dev *dev, int idx)
{
unsigned short cmd;
unsigned int reg = PCI_BASE_ADDRESS_0 + 4*idx;
@@ -868,13 +875,16 @@ __initfunc(void pcibios_fixup_io_addr(struct pci_dev *dev, int idx))
printk("PCI: Unassigned I/O space for %02x:%02x\n", bus, devfn);
return;
}
- if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && idx < 4) {
+ if (((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && idx < 4) ||
+ (dev->class >> 8) == PCI_CLASS_DISPLAY_VGA) {
/*
* In case the BIOS didn't assign an address 0--3 to an IDE
* controller, we don't try to fix it as it means "use default
* addresses" at least with several broken chips and the IDE
* driver needs the original settings to recognize which devices
* correspond to the primary controller.
+ *
+ * We don't assign VGA I/O ranges as well.
*/
return;
}
@@ -914,7 +924,7 @@ __initfunc(void pcibios_fixup_io_addr(struct pci_dev *dev, int idx))
* expected to be unique) and remove the ghost devices.
*/
-__initfunc(void pcibios_fixup_ghosts(struct pci_bus *b))
+static void __init pcibios_fixup_ghosts(struct pci_bus *b)
{
struct pci_dev *d, *e, **z;
int mirror = PCI_DEVFN(16,0);
@@ -954,12 +964,17 @@ __initfunc(void pcibios_fixup_ghosts(struct pci_bus *b))
* the reality doesn't pass this test and the bus number is usually
* set by BIOS to the first free value.
*/
-__initfunc(void pcibios_fixup_peer_bridges(void))
+static void __init pcibios_fixup_peer_bridges(void)
{
struct pci_bus *b = &pci_root;
int i, n, cnt=-1;
struct pci_dev *d;
+#ifdef CONFIG_VISWS
+ pci_scan_peer_bridge(1);
+ return;
+#endif
+
#ifdef CONFIG_PCI_DIRECT
/*
* Don't search for peer host bridges if we use config type 2
@@ -969,6 +984,7 @@ __initfunc(void pcibios_fixup_peer_bridges(void))
if (access_pci == &pci_direct_conf2)
return;
#endif
+
for(d=b->devices; d; d=d->sibling)
if ((d->class >> 8) == PCI_CLASS_BRIDGE_HOST)
cnt++;
@@ -979,6 +995,20 @@ __initfunc(void pcibios_fixup_peer_bridges(void))
for(i=0; i<256; i += 8)
if (!pcibios_read_config_word(n, i, PCI_VENDOR_ID, &l) &&
l != 0x0000 && l != 0xffff) {
+#ifdef CONFIG_PCI_BIOS
+ if (pci_bios_present) {
+ int err, idx = 0;
+ u8 bios_bus, bios_dfn;
+ u16 d;
+ pcibios_read_config_word(n, i, PCI_DEVICE_ID, &d);
+ DBG("BIOS test for %02x:%02x (%04x:%04x)\n", n, i, l, d);
+ while (!(err = pci_bios_find_device(l, d, idx, &bios_bus, &bios_dfn)) &&
+ (bios_bus != n || bios_dfn != i))
+ idx++;
+ if (err)
+ break;
+ }
+#endif
DBG("Found device at %02x:%02x\n", n, i);
found++;
if (!pcibios_read_config_word(n, i, PCI_CLASS_DEVICE, &l) &&
@@ -989,13 +1019,7 @@ __initfunc(void pcibios_fixup_peer_bridges(void))
break;
if (found) {
printk("PCI: Discovered primary peer bus %02x\n", n);
- b = kmalloc(sizeof(*b), GFP_KERNEL);
- memset(b, 0, sizeof(*b));
- b->next = pci_root.next;
- pci_root.next = b;
- b->number = b->secondary = n;
- b->subordinate = 0xff;
- b->subordinate = pci_scan_bus(b);
+ b = pci_scan_peer_bridge(n);
n = b->subordinate;
}
n++;
@@ -1003,11 +1027,77 @@ __initfunc(void pcibios_fixup_peer_bridges(void))
}
/*
+ * Exceptions for specific devices. Usually work-arounds for fatal design flaws.
+ */
+
+static void __init pci_fixup_i450nx(struct pci_dev *d)
+{
+ /*
+ * i450NX -- Find and scan all secondary buses on all PXB's.
+ */
+ int pxb, reg;
+ u8 busno, suba, subb;
+ reg = 0xd0;
+ for(pxb=0; pxb<2; pxb++) {
+ pci_read_config_byte(d, reg++, &busno);
+ pci_read_config_byte(d, reg++, &suba);
+ pci_read_config_byte(d, reg++, &subb);
+ DBG("i450NX PXB %d: %02x/%02x/%02x\n", pxb, busno, suba, subb);
+ if (busno)
+ pci_scan_peer_bridge(busno); /* Bus A */
+ if (suba < subb)
+ pci_scan_peer_bridge(suba+1); /* Bus B */
+ }
+ pci_probe |= PCI_NO_PEER_FIXUP;
+}
+
+static void __init pci_fixup_umc_ide(struct pci_dev *d)
+{
+ /*
+ * UM8886BF IDE controller sets region type bits incorrectly,
+ * therefore they look like memory despite of them being I/O.
+ */
+ int i;
+
+ for(i=0; i<4; i++)
+ d->base_address[i] |= PCI_BASE_ADDRESS_SPACE_IO;
+}
+
+struct dev_ex {
+ u16 vendor, device;
+ void (*handler)(struct pci_dev *);
+ char *comment;
+};
+
+static struct dev_ex __initdata dev_ex_table[] = {
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82451NX, pci_fixup_i450nx, "Scanning peer host bridges" },
+ { PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886BF, pci_fixup_umc_ide, "Working around UM8886BF bugs" }
+};
+
+static void __init pcibios_scan_buglist(struct pci_bus *b)
+{
+ struct pci_dev *d;
+ int i;
+
+ for(d=b->devices; d; d=d->sibling)
+ for(i=0; i<sizeof(dev_ex_table)/sizeof(dev_ex_table[0]); i++) {
+ struct dev_ex *e = &dev_ex_table[i];
+ if (e->vendor == d->vendor && e->device == d->device) {
+ printk("PCI: %02x:%02x [%04x/%04x]: %s\n",
+ b->number, d->devfn, d->vendor, d->device, e->comment);
+ e->handler(d);
+ }
+ }
+}
+
+/*
* Fix base addresses, I/O and memory enables and IRQ's (mostly work-arounds
* for buggy PCI BIOS'es :-[).
*/
-__initfunc(void pcibios_fixup_devices(void))
+extern int skip_ioapic_setup;
+
+static void __init pcibios_fixup_devices(void)
{
struct pci_dev *dev;
int i, has_io, has_mem;
@@ -1059,6 +1149,7 @@ __initfunc(void pcibios_fixup_devices(void))
/*
* Recalculate IRQ numbers if we use the I/O APIC
*/
+ if(!skip_ioapic_setup)
{
int irq;
unsigned char pin;
@@ -1099,7 +1190,8 @@ __initfunc(void pcibios_fixup_devices(void))
__initfunc(void pcibios_fixup(void))
{
- pcibios_fixup_peer_bridges();
+ if (!(pci_probe & PCI_NO_PEER_FIXUP))
+ pcibios_fixup_peer_bridges();
pcibios_fixup_devices();
#ifdef CONFIG_PCI_BIOS
@@ -1111,6 +1203,7 @@ __initfunc(void pcibios_fixup(void))
__initfunc(void pcibios_fixup_bus(struct pci_bus *b))
{
pcibios_fixup_ghosts(b);
+ pcibios_scan_buglist(b);
}
/*
@@ -1126,8 +1219,10 @@ __initfunc(void pcibios_init(void))
struct pci_access *dir = NULL;
#ifdef CONFIG_PCI_BIOS
- if ((pci_probe & PCI_PROBE_BIOS) && ((bios = pci_find_bios())))
+ if ((pci_probe & PCI_PROBE_BIOS) && ((bios = pci_find_bios()))) {
pci_probe |= PCI_BIOS_SORT;
+ pci_bios_present = 1;
+ }
#endif
#ifdef CONFIG_PCI_DIRECT
if (pci_probe & (PCI_PROBE_CONF1 | PCI_PROBE_CONF2))
@@ -1139,10 +1234,6 @@ __initfunc(void pcibios_init(void))
access_pci = bios;
}
-#if !defined(CONFIG_PCI_BIOS) && !defined(CONFIG_PCI_DIRECT)
-#error PCI configured with neither PCI BIOS or PCI direct access support.
-#endif
-
__initfunc(char *pcibios_setup(char *str))
{
if (!strcmp(str, "off")) {
@@ -1178,5 +1269,9 @@ __initfunc(char *pcibios_setup(char *str))
return NULL;
}
#endif
+ else if (!strcmp(str, "nopeer")) {
+ pci_probe |= PCI_NO_PEER_FIXUP;
+ return NULL;
+ }
return str;
}
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index 0153c4b40..3a5fc93a1 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -154,7 +154,9 @@ ENTRY(lcall7)
.globl ret_from_fork
ret_from_fork:
#ifdef __SMP__
+ pushl %ebx
call SYMBOL_NAME(schedule_tail)
+ addl $4, %esp
#endif /* __SMP__ */
GET_CURRENT(%ebx)
jmp ret_from_sys_call
diff --git a/arch/i386/kernel/i386_ksyms.c b/arch/i386/kernel/i386_ksyms.c
index cd9074796..f0d5d3378 100644
--- a/arch/i386/kernel/i386_ksyms.c
+++ b/arch/i386/kernel/i386_ksyms.c
@@ -39,10 +39,12 @@ EXPORT_SYMBOL(local_bh_count);
EXPORT_SYMBOL(local_irq_count);
EXPORT_SYMBOL(enable_irq);
EXPORT_SYMBOL(disable_irq);
+EXPORT_SYMBOL(disable_irq_nosync);
EXPORT_SYMBOL(kernel_thread);
EXPORT_SYMBOL_NOVERS(__down_failed);
EXPORT_SYMBOL_NOVERS(__down_failed_interruptible);
+EXPORT_SYMBOL_NOVERS(__down_failed_trylock);
EXPORT_SYMBOL_NOVERS(__up_wakeup);
/* Networking helper routines. */
EXPORT_SYMBOL(csum_partial_copy);
@@ -74,8 +76,11 @@ EXPORT_SYMBOL(strlen_user);
EXPORT_SYMBOL(cpu_data);
EXPORT_SYMBOL(kernel_flag);
EXPORT_SYMBOL(smp_invalidate_needed);
+EXPORT_SYMBOL(cpu_number_map);
EXPORT_SYMBOL(__cpu_logical_map);
EXPORT_SYMBOL(smp_num_cpus);
+EXPORT_SYMBOL(cpu_present_map);
+EXPORT_SYMBOL(cpu_online_map);
/* Global SMP irq stuff */
EXPORT_SYMBOL(synchronize_irq);
@@ -87,7 +92,7 @@ EXPORT_SYMBOL(__global_cli);
EXPORT_SYMBOL(__global_sti);
EXPORT_SYMBOL(__global_save_flags);
EXPORT_SYMBOL(__global_restore_flags);
-EXPORT_SYMBOL(mtrr_hook);
+EXPORT_SYMBOL(smp_call_function);
#endif
#ifdef CONFIG_MCA
diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c
index 232abf78d..42ebd9643 100644
--- a/arch/i386/kernel/io_apic.c
+++ b/arch/i386/kernel/io_apic.c
@@ -202,7 +202,7 @@ DO_ACTION( enable, 1, |= 0xff000000, ) /* destination = 0xff */
DO_ACTION( mask, 0, |= 0x00010000, io_apic_sync()) /* mask = 1 */
DO_ACTION( unmask, 0, &= 0xfffeffff, ) /* mask = 0 */
-static void __init clear_IO_APIC_pin(unsigned int pin)
+static void clear_IO_APIC_pin(unsigned int pin)
{
struct IO_APIC_route_entry entry;
@@ -215,6 +215,13 @@ static void __init clear_IO_APIC_pin(unsigned int pin)
io_apic_write(0x11 + 2 * pin, *(((int *)&entry) + 1));
}
+static void clear_IO_APIC (void)
+{
+ int pin;
+
+ for (pin = 0; pin < nr_ioapic_registers; pin++)
+ clear_IO_APIC_pin(pin);
+}
/*
* support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
@@ -286,7 +293,8 @@ static int __init find_timer_pin(int type)
for (i = 0; i < mp_irq_entries; i++) {
int lbus = mp_irqs[i].mpc_srcbus;
- if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA) &&
+ if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
+ mp_bus_id_to_type[lbus] == MP_BUS_EISA) &&
(mp_irqs[i].mpc_irqtype == type) &&
(mp_irqs[i].mpc_srcbusirq == 0x00))
@@ -319,20 +327,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pci_pin)
}
/*
- * Unclear documentation on what a "conforming ISA interrupt" means.
- *
- * Should we, or should we not, take the ELCR register into account?
- * It's part of the EISA specification, but maybe it should only be
- * used if the interrupt is actually marked as EISA?
- *
- * Oh, well. Don't do it until somebody tells us what the right thing
- * to do is..
- */
-#undef USE_ELCR_TRIGGER_LEVEL
-#ifdef USE_ELCR_TRIGGER_LEVEL
-
-/*
- * ISA Edge/Level control register, ELCR
+ * EISA Edge/Level control register, ELCR
*/
static int __init EISA_ELCR(unsigned int irq)
{
@@ -342,18 +337,22 @@ static int __init EISA_ELCR(unsigned int irq)
}
printk("Broken MPtable reports ISA irq %d\n", irq);
return 0;
-}
+}
-#define default_ISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_dstirq))
-#define default_ISA_polarity(idx) (0)
+/* EISA interrupts are always polarity zero and can be edge or level
+ * trigger depending on the ELCR value. If an interrupt is listed as
+ * EISA conforming in the MP table, that means its trigger type must
+ * be read in from the ELCR */
-#else
+#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_dstirq))
+#define default_EISA_polarity(idx) (0)
+
+/* ISA interrupts are always polarity zero edge triggered, even when
+ * listed as conforming in the MP table. */
#define default_ISA_trigger(idx) (0)
#define default_ISA_polarity(idx) (0)
-#endif
-
static int __init MPBIOS_polarity(int idx)
{
int bus = mp_irqs[idx].mpc_srcbus;
@@ -373,6 +372,11 @@ static int __init MPBIOS_polarity(int idx)
polarity = default_ISA_polarity(idx);
break;
}
+ case MP_BUS_EISA:
+ {
+ polarity = default_EISA_polarity(idx);
+ break;
+ }
case MP_BUS_PCI: /* PCI pin */
{
polarity = 1;
@@ -432,6 +436,11 @@ static int __init MPBIOS_trigger(int idx)
trigger = default_ISA_trigger(idx);
break;
}
+ case MP_BUS_EISA:
+ {
+ trigger = default_EISA_trigger(idx);
+ break;
+ }
case MP_BUS_PCI: /* PCI pin, level */
{
trigger = 1;
@@ -496,6 +505,7 @@ static int __init pin_2_irq(int idx, int pin)
switch (mp_bus_id_to_type[bus])
{
case MP_BUS_ISA: /* ISA pin */
+ case MP_BUS_EISA:
{
irq = mp_irqs[idx].mpc_srcbusirq;
break;
@@ -562,6 +572,9 @@ static int __init assign_irq_vector(int irq)
printk("WARNING: ASSIGN_IRQ_VECTOR wrapped back to %02X\n",
current_vector);
}
+ if (current_vector == SYSCALL_VECTOR)
+ panic("ran out of interrupt sources!");
+
IO_APIC_VECTOR(irq) = current_vector;
return current_vector;
}
@@ -625,7 +638,7 @@ void __init setup_IO_APIC_irqs(void)
/*
* Set up a certain pin as ExtINT delivered interrupt
*/
-void __init setup_ExtINT_pin(unsigned int pin)
+void __init setup_ExtINT_pin(unsigned int pin, int irq)
{
struct IO_APIC_route_entry entry;
@@ -635,11 +648,16 @@ void __init setup_ExtINT_pin(unsigned int pin)
memset(&entry,0,sizeof(entry));
entry.delivery_mode = dest_ExtINT;
- entry.dest_mode = 1; /* logical delivery */
+ entry.dest_mode = 0; /* physical delivery */
entry.mask = 0; /* unmask IRQ now */
- entry.dest.logical.logical_dest = 0x01; /* logical CPU #0 */
+ /*
+ * We use physical delivery to get the timer IRQ
+ * to the boot CPU. 'boot_cpu_id' is the physical
+ * APIC ID of the boot CPU.
+ */
+ entry.dest.physical.physical_dest = boot_cpu_id;
- entry.vector = 0; /* it's ignored */
+ entry.vector = assign_irq_vector(irq);
entry.polarity = 0;
entry.trigger = 0;
@@ -681,9 +699,11 @@ void __init print_IO_APIC(void)
printk(".... register #01: %08X\n", *(int *)&reg_01);
printk("....... : max redirection entries: %04X\n", reg_01.entries);
- if ( (reg_01.entries != 0x0f) && /* ISA-only Neptune boards */
- (reg_01.entries != 0x17) && /* ISA+PCI boards */
- (reg_01.entries != 0x3F) /* Xeon boards */
+ if ( (reg_01.entries != 0x0f) && /* older (Neptune) boards */
+ (reg_01.entries != 0x17) && /* typical ISA+PCI boards */
+ (reg_01.entries != 0x1b) && /* Compaq Proliant boards */
+ (reg_01.entries != 0x1f) && /* dual Xeon boards */
+ (reg_01.entries != 0x3F) /* bigger Xeon boards */
)
UNEXPECTED_IO_APIC();
if (reg_01.entries == 0x0f)
@@ -754,7 +774,7 @@ void __init print_IO_APIC(void)
static void __init init_sym_mode(void)
{
- int i, pin;
+ int i;
for (i = 0; i < PIN_MAP_SIZE; i++) {
irq_2_pin[i].pin = -1;
@@ -784,8 +804,7 @@ static void __init init_sym_mode(void)
/*
* Do not trust the IO-APIC being empty at bootup
*/
- for (pin = 0; pin < nr_ioapic_registers; pin++)
- clear_IO_APIC_pin(pin);
+ clear_IO_APIC();
}
/*
@@ -793,6 +812,15 @@ static void __init init_sym_mode(void)
*/
void init_pic_mode(void)
{
+ /*
+ * Clear the IO-APIC before rebooting:
+ */
+ clear_IO_APIC();
+
+ /*
+ * Put it back into PIC mode (has an effect only on
+ * certain boards)
+ */
printk("disabling symmetric IO mode... ");
outb_p(0x70, 0x22);
outb_p(0x00, 0x23);
@@ -885,6 +913,8 @@ static void __init setup_ioapic_id(void)
static void __init construct_default_ISA_mptable(void)
{
int i, pos = 0;
+ const int bus_type = (mpc_default_type == 2 || mpc_default_type == 3 ||
+ mpc_default_type == 6) ? MP_BUS_EISA : MP_BUS_ISA;
for (i = 0; i < 16; i++) {
if (!IO_APIC_IRQ(i))
@@ -892,14 +922,14 @@ static void __init construct_default_ISA_mptable(void)
mp_irqs[pos].mpc_irqtype = mp_INT;
mp_irqs[pos].mpc_irqflag = 0; /* default */
- mp_irqs[pos].mpc_srcbus = MP_BUS_ISA;
+ mp_irqs[pos].mpc_srcbus = 0;
mp_irqs[pos].mpc_srcbusirq = i;
mp_irqs[pos].mpc_dstapic = 0;
mp_irqs[pos].mpc_dstirq = i;
pos++;
}
mp_irq_entries = pos;
- mp_bus_id_to_type[0] = MP_BUS_ISA;
+ mp_bus_id_to_type[0] = bus_type;
/*
* MP specification 1.4 defines some extra rules for default
@@ -1019,7 +1049,7 @@ static void do_edge_ioapic_IRQ(unsigned int irq, struct pt_regs * regs)
* and do not need to be masked.
*/
ack_APIC_irq();
- status = desc->status & ~IRQ_REPLAY;
+ status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
status |= IRQ_PENDING;
/*
@@ -1030,8 +1060,9 @@ static void do_edge_ioapic_IRQ(unsigned int irq, struct pt_regs * regs)
if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
action = desc->action;
status &= ~IRQ_PENDING;
+ status |= IRQ_INPROGRESS;
}
- desc->status = status | IRQ_INPROGRESS;
+ desc->status = status;
spin_unlock(&irq_controller_lock);
/*
@@ -1073,7 +1104,7 @@ static void do_level_ioapic_IRQ(unsigned int irq, struct pt_regs * regs)
* So this all has to be within the spinlock.
*/
mask_IO_APIC_irq(irq);
- status = desc->status & ~IRQ_REPLAY;
+ status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
/*
* If the IRQ is disabled for whatever reason, we must
@@ -1082,8 +1113,9 @@ static void do_level_ioapic_IRQ(unsigned int irq, struct pt_regs * regs)
action = NULL;
if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
action = desc->action;
+ status |= IRQ_INPROGRESS;
}
- desc->status = status | IRQ_INPROGRESS;
+ desc->status = status;
ack_APIC_irq();
spin_unlock(&irq_controller_lock);
@@ -1143,7 +1175,7 @@ static inline void init_IO_APIC_traps(void)
* 0x80, because int 0x80 is hm, kind of importantish. ;)
*/
for (i = 0; i < NR_IRQS ; i++) {
- if (IO_APIC_IRQ(i)) {
+ if (IO_APIC_VECTOR(i) > 0) {
if (IO_APIC_irq_trigger(i))
irq_desc[i].handler = &ioapic_level_irq_type;
else
@@ -1153,8 +1185,25 @@ static inline void init_IO_APIC_traps(void)
*/
if (i < 16)
disable_8259A_irq(i);
+ } else {
+ if (!IO_APIC_IRQ(i))
+ continue;
+
+ /*
+ * Hmm.. We don't have an entry for this,
+ * so default to an old-fashioned 8259
+ * interrupt if we can..
+ */
+ if (i < 16) {
+ make_8259A_irq(i);
+ continue;
+ }
+
+ /* Strange. Oh, well.. */
+ irq_desc[i].handler = &no_irq_type;
}
}
+ init_IRQ_SMP();
}
/*
@@ -1178,7 +1227,7 @@ static inline void check_timer(void)
if (pin2 != -1) {
printk(".. (found pin %d) ...", pin2);
- setup_ExtINT_pin(pin2);
+ setup_ExtINT_pin(pin2, 0);
make_8259A_irq(0);
}
@@ -1258,14 +1307,12 @@ void __init setup_IO_APIC(void)
construct_default_ISA_mptable();
}
- init_IO_APIC_traps();
-
/*
* Set up the IO-APIC IRQ routing table by parsing the MP-BIOS
* mptable:
*/
setup_IO_APIC_irqs();
- init_IRQ_SMP();
+ init_IO_APIC_traps();
check_timer();
print_IO_APIC();
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
index 37878f59f..ea218fe45 100644
--- a/arch/i386/kernel/irq.c
+++ b/arch/i386/kernel/irq.c
@@ -70,11 +70,34 @@ atomic_t nmi_counter;
*/
spinlock_t irq_controller_lock;
-
/*
* Dummy controller type for unused interrupts
*/
-static void do_none(unsigned int irq, struct pt_regs * regs) { }
+static void do_none(unsigned int irq, struct pt_regs * regs)
+{
+ /*
+ * we are careful. While for ISA irqs it's common to happen
+ * outside of any driver (think autodetection), this is not
+ * at all nice for PCI interrupts. So we are stricter and
+ * print a warning when such spurious interrupts happen.
+ * Spurious interrupts can confuse other drivers if the PCI
+ * IRQ line is shared.
+ *
+ * Such spurious interrupts are either driver bugs, or
+ * sometimes hw (chipset) bugs.
+ */
+ printk("unexpected IRQ vector %d on CPU#%d!\n",irq, smp_processor_id());
+
+#ifdef __SMP__
+ /*
+ * [currently unexpected vectors happen only on SMP and APIC.
+ * if we want to have non-APIC and non-8259A controllers
+ * in the future with unexpected vectors, this ack should
+ * probably be made controller-specific.]
+ */
+ ack_APIC_irq();
+#endif
+}
static void enable_none(unsigned int irq) { }
static void disable_none(unsigned int irq) { }
@@ -82,7 +105,7 @@ static void disable_none(unsigned int irq) { }
#define startup_none enable_none
#define shutdown_none disable_none
-static struct hw_interrupt_type no_irq_type = {
+struct hw_interrupt_type no_irq_type = {
"none",
startup_none,
shutdown_none,
@@ -128,10 +151,7 @@ irq_desc_t irq_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = { 0, &no_irq_type, }};
*/
static unsigned int cached_irq_mask = 0xffff;
-#define __byte(x,y) (((unsigned char *)&(y))[x])
-#define __word(x,y) (((unsigned short *)&(y))[x])
-#define __long(x,y) (((unsigned int *)&(y))[x])
-
+#define __byte(x,y) (((unsigned char *)&(y))[x])
#define cached_21 (__byte(0,cached_irq_mask))
#define cached_A1 (__byte(1,cached_irq_mask))
@@ -141,10 +161,10 @@ static unsigned int cached_irq_mask = 0xffff;
* fed to the CPU IRQ line directly.
*
* Any '1' bit in this mask means the IRQ is routed through the IO-APIC.
- * this 'mixed mode' IRQ handling costs us one more branch in do_IRQ,
- * but we have _much_ higher compatibility and robustness this way.
+ * this 'mixed mode' IRQ handling costs nothing because it's only used
+ * at IRQ setup time.
*/
-unsigned long long io_apic_irqs = 0;
+unsigned long io_apic_irqs = 0;
/*
* These have to be protected by the irq controller spinlock
@@ -183,8 +203,8 @@ int i8259A_irq_pending(unsigned int irq)
void make_8259A_irq(unsigned int irq)
{
- disable_irq(irq);
- __long(0,io_apic_irqs) &= ~(1<<irq);
+ disable_irq_nosync(irq);
+ io_apic_irqs &= ~(1<<irq);
irq_desc[irq].handler = &i8259A_irq_type;
enable_irq(irq);
}
@@ -219,11 +239,13 @@ static void do_8259A_IRQ(unsigned int irq, struct pt_regs * regs)
{
unsigned int status;
mask_and_ack_8259A(irq);
- status = desc->status & ~IRQ_REPLAY;
+ status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
action = NULL;
- if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))
+ if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
action = desc->action;
- desc->status = status | IRQ_INPROGRESS;
+ status |= IRQ_INPROGRESS;
+ }
+ desc->status = status;
}
spin_unlock(&irq_controller_lock);
@@ -254,32 +276,43 @@ static void do_8259A_IRQ(unsigned int irq, struct pt_regs * regs)
BUILD_COMMON_IRQ()
+
+#define BI(x,y) \
+ BUILD_IRQ(##x##y)
+
+#define BUILD_16_IRQS(x) \
+ BI(x,0) BI(x,1) BI(x,2) BI(x,3) \
+ BI(x,4) BI(x,5) BI(x,6) BI(x,7) \
+ BI(x,8) BI(x,9) BI(x,a) BI(x,b) \
+ BI(x,c) BI(x,d) BI(x,e) BI(x,f)
+
/*
* ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
+ * (these are usually mapped to vectors 0x20-0x30)
*/
-BUILD_IRQ(0) BUILD_IRQ(1) BUILD_IRQ(2) BUILD_IRQ(3)
-BUILD_IRQ(4) BUILD_IRQ(5) BUILD_IRQ(6) BUILD_IRQ(7)
-BUILD_IRQ(8) BUILD_IRQ(9) BUILD_IRQ(10) BUILD_IRQ(11)
-BUILD_IRQ(12) BUILD_IRQ(13) BUILD_IRQ(14) BUILD_IRQ(15)
+BUILD_16_IRQS(0x0)
#ifdef CONFIG_X86_IO_APIC
/*
- * The IO-APIC gives us many more interrupt sources..
+ * The IO-APIC gives us many more interrupt sources. Most of these
+ * are unused but an SMP system is supposed to have enough memory ...
+ * sometimes (mostly wrt. hw bugs) we get corrupted vectors all
+ * across the spectrum, so we really want to be prepared to get all
+ * of these. Plus, more powerful systems might have more than 64
+ * IO-APIC registers.
+ *
+ * (these are usually mapped into the 0x30-0xff vector range)
*/
-BUILD_IRQ(16) BUILD_IRQ(17) BUILD_IRQ(18) BUILD_IRQ(19)
-BUILD_IRQ(20) BUILD_IRQ(21) BUILD_IRQ(22) BUILD_IRQ(23)
-BUILD_IRQ(24) BUILD_IRQ(25) BUILD_IRQ(26) BUILD_IRQ(27)
-BUILD_IRQ(28) BUILD_IRQ(29) BUILD_IRQ(30) BUILD_IRQ(31)
-BUILD_IRQ(32) BUILD_IRQ(33) BUILD_IRQ(34) BUILD_IRQ(35)
-BUILD_IRQ(36) BUILD_IRQ(37) BUILD_IRQ(38) BUILD_IRQ(39)
-BUILD_IRQ(40) BUILD_IRQ(41) BUILD_IRQ(42) BUILD_IRQ(43)
-BUILD_IRQ(44) BUILD_IRQ(45) BUILD_IRQ(46) BUILD_IRQ(47)
-BUILD_IRQ(48) BUILD_IRQ(49) BUILD_IRQ(50) BUILD_IRQ(51)
-BUILD_IRQ(52) BUILD_IRQ(53) BUILD_IRQ(54) BUILD_IRQ(55)
-BUILD_IRQ(56) BUILD_IRQ(57) BUILD_IRQ(58) BUILD_IRQ(59)
-BUILD_IRQ(60) BUILD_IRQ(61) BUILD_IRQ(62) BUILD_IRQ(63)
+ BUILD_16_IRQS(0x1) BUILD_16_IRQS(0x2) BUILD_16_IRQS(0x3)
+BUILD_16_IRQS(0x4) BUILD_16_IRQS(0x5) BUILD_16_IRQS(0x6) BUILD_16_IRQS(0x7)
+BUILD_16_IRQS(0x8) BUILD_16_IRQS(0x9) BUILD_16_IRQS(0xa) BUILD_16_IRQS(0xb)
+BUILD_16_IRQS(0xc) BUILD_16_IRQS(0xd)
#endif
+#undef BUILD_16_IRQS
+#undef BI
+
+
#ifdef __SMP__
/*
* The following vectors are part of the Linux architecture, there
@@ -289,7 +322,7 @@ BUILD_IRQ(60) BUILD_IRQ(61) BUILD_IRQ(62) BUILD_IRQ(63)
BUILD_SMP_INTERRUPT(reschedule_interrupt)
BUILD_SMP_INTERRUPT(invalidate_interrupt)
BUILD_SMP_INTERRUPT(stop_cpu_interrupt)
-BUILD_SMP_INTERRUPT(mtrr_interrupt)
+BUILD_SMP_INTERRUPT(call_function_interrupt)
BUILD_SMP_INTERRUPT(spurious_interrupt)
/*
@@ -303,37 +336,35 @@ BUILD_SMP_TIMER_INTERRUPT(apic_timer_interrupt)
#endif
+#define IRQ(x,y) \
+ IRQ##x##y##_interrupt
+
+#define IRQLIST_16(x) \
+ IRQ(x,0), IRQ(x,1), IRQ(x,2), IRQ(x,3), \
+ IRQ(x,4), IRQ(x,5), IRQ(x,6), IRQ(x,7), \
+ IRQ(x,8), IRQ(x,9), IRQ(x,a), IRQ(x,b), \
+ IRQ(x,c), IRQ(x,d), IRQ(x,e), IRQ(x,f)
+
static void (*interrupt[NR_IRQS])(void) = {
- IRQ0_interrupt, IRQ1_interrupt, IRQ2_interrupt, IRQ3_interrupt,
- IRQ4_interrupt, IRQ5_interrupt, IRQ6_interrupt, IRQ7_interrupt,
- IRQ8_interrupt, IRQ9_interrupt, IRQ10_interrupt, IRQ11_interrupt,
- IRQ12_interrupt, IRQ13_interrupt, IRQ14_interrupt, IRQ15_interrupt
+ IRQLIST_16(0x0),
+
#ifdef CONFIG_X86_IO_APIC
- ,IRQ16_interrupt, IRQ17_interrupt, IRQ18_interrupt, IRQ19_interrupt,
- IRQ20_interrupt, IRQ21_interrupt, IRQ22_interrupt, IRQ23_interrupt,
- IRQ24_interrupt, IRQ25_interrupt, IRQ26_interrupt, IRQ27_interrupt,
- IRQ28_interrupt, IRQ29_interrupt,
- IRQ30_interrupt, IRQ31_interrupt, IRQ32_interrupt, IRQ33_interrupt,
- IRQ34_interrupt, IRQ35_interrupt, IRQ36_interrupt, IRQ37_interrupt,
- IRQ38_interrupt, IRQ39_interrupt,
- IRQ40_interrupt, IRQ41_interrupt, IRQ42_interrupt, IRQ43_interrupt,
- IRQ44_interrupt, IRQ45_interrupt, IRQ46_interrupt, IRQ47_interrupt,
- IRQ48_interrupt, IRQ49_interrupt,
- IRQ50_interrupt, IRQ51_interrupt, IRQ52_interrupt, IRQ53_interrupt,
- IRQ54_interrupt, IRQ55_interrupt, IRQ56_interrupt, IRQ57_interrupt,
- IRQ58_interrupt, IRQ59_interrupt,
- IRQ60_interrupt, IRQ61_interrupt, IRQ62_interrupt, IRQ63_interrupt
+ IRQLIST_16(0x1), IRQLIST_16(0x2), IRQLIST_16(0x3),
+ IRQLIST_16(0x4), IRQLIST_16(0x5), IRQLIST_16(0x6), IRQLIST_16(0x7),
+ IRQLIST_16(0x8), IRQLIST_16(0x9), IRQLIST_16(0xa), IRQLIST_16(0xb),
+ IRQLIST_16(0xc), IRQLIST_16(0xd)
#endif
};
+#undef IRQ
+#undef IRQLIST_16
+
/*
- * Initial irq handlers.
+ * Special irq handlers.
*/
-void no_action(int cpl, void *dev_id, struct pt_regs *regs)
-{
-}
+void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
#ifndef CONFIG_VISWS
/*
@@ -718,7 +749,7 @@ int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction *
* hardware disable after having gotten the irq
* controller lock.
*/
-void disable_irq(unsigned int irq)
+void disable_irq_nosync(unsigned int irq)
{
unsigned long flags;
@@ -728,9 +759,21 @@ void disable_irq(unsigned int irq)
irq_desc[irq].handler->disable(irq);
}
spin_unlock_irqrestore(&irq_controller_lock, flags);
+}
- if (irq_desc[irq].status & IRQ_INPROGRESS)
- synchronize_irq();
+/*
+ * Synchronous version of the above, making sure the IRQ is
+ * no longer running on any other IRQ..
+ */
+void disable_irq(unsigned int irq)
+{
+ disable_irq_nosync(irq);
+
+ if (!local_irq_count[smp_processor_id()]) {
+ do {
+ barrier();
+ } while (irq_desc[irq].status & IRQ_INPROGRESS);
+ }
}
void enable_irq(unsigned int irq)
@@ -740,7 +783,7 @@ void enable_irq(unsigned int irq)
spin_lock_irqsave(&irq_controller_lock, flags);
switch (irq_desc[irq].depth) {
case 1:
- irq_desc[irq].status &= ~(IRQ_DISABLED | IRQ_INPROGRESS);
+ irq_desc[irq].status &= ~IRQ_DISABLED;
irq_desc[irq].handler->enable(irq);
/* fall throught */
default:
@@ -770,7 +813,7 @@ asmlinkage void do_IRQ(struct pt_regs regs)
* 0 return value means that this irq is already being
* handled by some other CPU. (or is disabled)
*/
- unsigned int irq = regs.orig_eax & 0xff;
+ int irq = regs.orig_eax & 0xff; /* subtle, see irq.h */
int cpu = smp_processor_id();
kstat.irqs[cpu][irq]++;
@@ -835,7 +878,7 @@ int setup_x86_irq(unsigned int irq, struct irqaction * new)
if (!shared) {
irq_desc[irq].depth = 0;
- irq_desc[irq].status &= ~(IRQ_DISABLED | IRQ_INPROGRESS);
+ irq_desc[irq].status &= ~IRQ_DISABLED;
irq_desc[irq].handler->startup(irq);
}
spin_unlock_irqrestore(&irq_controller_lock,flags);
@@ -907,7 +950,7 @@ out:
*
* This depends on the fact that any interrupt that
* comes in on to an unassigned handler will get stuck
- * with "IRQ_INPROGRESS" asserted and the interrupt
+ * with "IRQ_WAITING" cleared and the interrupt
* disabled.
*/
unsigned long probe_irq_on(void)
@@ -921,8 +964,7 @@ unsigned long probe_irq_on(void)
spin_lock_irq(&irq_controller_lock);
for (i = NR_IRQS-1; i > 0; i--) {
if (!irq_desc[i].action) {
- unsigned int status = irq_desc[i].status | IRQ_AUTODETECT;
- irq_desc[i].status = status & ~IRQ_INPROGRESS;
+ irq_desc[i].status |= IRQ_AUTODETECT | IRQ_WAITING;
irq_desc[i].handler->startup(i);
}
}
@@ -945,7 +987,7 @@ unsigned long probe_irq_on(void)
continue;
/* It triggered already - consider it spurious. */
- if (status & IRQ_INPROGRESS) {
+ if (!(status & IRQ_WAITING)) {
irq_desc[i].status = status & ~IRQ_AUTODETECT;
irq_desc[i].handler->shutdown(i);
}
@@ -971,7 +1013,7 @@ int probe_irq_off(unsigned long unused)
if (!(status & IRQ_AUTODETECT))
continue;
- if (status & IRQ_INPROGRESS) {
+ if (!(status & IRQ_WAITING)) {
if (!nr_irqs)
irq_found = i;
nr_irqs++;
@@ -986,42 +1028,6 @@ int probe_irq_off(unsigned long unused)
return irq_found;
}
-/*
- * Silly, horrible hack
- */
-static char uglybuffer[10*256];
-
-__asm__("\n" __ALIGN_STR"\n"
- "common_unexpected:\n\t"
- SAVE_ALL
- "pushl $ret_from_intr\n\t"
- "jmp strange_interrupt");
-
-void strange_interrupt(int irqnum)
-{
- printk("Unexpected interrupt %d\n", irqnum & 255);
- for (;;);
-}
-
-extern int common_unexpected;
-__initfunc(void init_unexpected_irq(void))
-{
- int i;
- for (i = 0; i < 256; i++) {
- char *code = uglybuffer + 10*i;
- unsigned long jumpto = (unsigned long) &common_unexpected;
-
- jumpto -= (unsigned long)(code+10);
- code[0] = 0x68; /* pushl */
- *(int *)(code+1) = i - 512;
- code[5] = 0xe9; /* jmp */
- *(int *)(code+6) = jumpto;
-
- set_intr_gate(i,code);
- }
-}
-
-
void init_ISA_irqs (void)
{
int i;
@@ -1033,7 +1039,7 @@ void init_ISA_irqs (void)
if (i < 16) {
/*
- * 16 old-style INTA-cycle interrupt gates:
+ * 16 old-style INTA-cycle interrupts:
*/
irq_desc[i].handler = &i8259A_irq_type;
} else {
@@ -1054,9 +1060,16 @@ __initfunc(void init_IRQ(void))
#else
init_VISWS_APIC_irqs();
#endif
-
- for (i = 0; i < 16; i++)
- set_intr_gate(0x20+i,interrupt[i]);
+ /*
+ * Cover the whole vector space, no vector can escape
+ * us. (some of these will be overridden and become
+ * 'special' SMP interrupts)
+ */
+ for (i = 0; i < NR_IRQS; i++) {
+ int vector = FIRST_EXTERNAL_VECTOR + i;
+ if (vector != SYSCALL_VECTOR)
+ set_intr_gate(vector, interrupt[i]);
+ }
#ifdef __SMP__
@@ -1067,13 +1080,9 @@ __initfunc(void init_IRQ(void))
set_intr_gate(IRQ0_TRAP_VECTOR, interrupt[0]);
/*
- * The reschedule interrupt slowly changes it's functionality,
- * while so far it was a kind of broadcasted timer interrupt,
- * in the future it should become a CPU-to-CPU rescheduling IPI,
- * driven by schedule() ?
+ * The reschedule interrupt is a CPU-to-CPU reschedule-helper
+ * IPI, driven by wakeup.
*/
-
- /* IPI for rescheduling */
set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
/* IPI for invalidation */
@@ -1085,8 +1094,8 @@ __initfunc(void init_IRQ(void))
/* self generated IPI for local APIC timer */
set_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
- /* IPI for MTRR control */
- set_intr_gate(MTRR_CHANGE_VECTOR, mtrr_interrupt);
+ /* IPI for generic function call */
+ set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
/* IPI vector for APIC spurious interrupts */
set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
diff --git a/arch/i386/kernel/irq.h b/arch/i386/kernel/irq.h
index 982ab101e..6a19d9884 100644
--- a/arch/i386/kernel/irq.h
+++ b/arch/i386/kernel/irq.h
@@ -16,6 +16,7 @@ struct hw_interrupt_type {
void (*disable)(unsigned int irq);
};
+extern struct hw_interrupt_type no_irq_type;
/*
* IRQ line status.
@@ -25,6 +26,7 @@ struct hw_interrupt_type {
#define IRQ_PENDING 4 /* IRQ pending - replay on enable */
#define IRQ_REPLAY 8 /* IRQ has been replayed but not acked yet */
#define IRQ_AUTODETECT 16 /* IRQ is being autodetected */
+#define IRQ_WAITING 32 /* IRQ not yet seen - for autodetection */
/*
* This is the "IRQ descriptor", which contains various information
@@ -41,6 +43,18 @@ typedef struct {
} irq_desc_t;
/*
+ * IDT vectors usable for external interrupt sources start
+ * at 0x20:
+ */
+#define FIRST_EXTERNAL_VECTOR 0x20
+
+#define SYSCALL_VECTOR 0x80
+
+/*
+ * Vectors 0x20-0x2f are used for ISA interrupts.
+ */
+
+/*
* Special IRQ vectors used by the SMP architecture:
*
* (some of the following vectors are 'rare', they might be merged
@@ -51,10 +65,10 @@ typedef struct {
#define INVALIDATE_TLB_VECTOR 0x31
#define STOP_CPU_VECTOR 0x40
#define LOCAL_TIMER_VECTOR 0x41
-#define MTRR_CHANGE_VECTOR 0x50
+#define CALL_FUNCTION_VECTOR 0x50
/*
- * First vector available to drivers: (vectors 0x51-0xfe)
+ * First APIC vector available to drivers: (vectors 0x51-0xfe)
*/
#define IRQ0_TRAP_VECTOR 0x51
@@ -85,7 +99,6 @@ extern void disable_8259A_irq(unsigned int irq);
extern int i8259A_irq_pending(unsigned int irq);
extern void ack_APIC_irq(void);
extern void FASTCALL(send_IPI_self(int vector));
-extern void smp_send_mtrr(void);
extern void init_VISWS_APIC_irqs(void);
extern void setup_IO_APIC(void);
extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
@@ -94,12 +107,15 @@ extern void send_IPI(int dest, int vector);
extern void init_pic_mode(void);
extern void print_IO_APIC(void);
-extern unsigned long long io_apic_irqs;
+extern unsigned long io_apic_irqs;
+
+extern char _stext, _etext;
#define MAX_IRQ_SOURCES 128
#define MAX_MP_BUSSES 32
enum mp_bustype {
MP_BUS_ISA,
+ MP_BUS_EISA,
MP_BUS_PCI
};
extern int mp_bus_id_to_type [MAX_MP_BUSSES];
@@ -126,7 +142,7 @@ static inline void irq_exit(int cpu, unsigned int irq)
hardirq_exit(cpu);
}
-#define IO_APIC_IRQ(x) ((1<<x) & io_apic_irqs)
+#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
#else
@@ -201,6 +217,13 @@ __asm__( \
"pushl $ret_from_intr\n\t" \
"jmp "SYMBOL_NAME_STR(do_IRQ));
+/*
+ * subtle. orig_eax is used by the signal code to distinct between
+ * system calls and interrupted 'random user-space'. Thus we have
+ * to put a negative value into orig_eax here. (the problem is that
+ * both system calls and IRQs want to have small integer numbers in
+ * orig_eax, and the syscall code has won the optimization conflict ;)
+ */
#define BUILD_IRQ(nr) \
asmlinkage void IRQ_NAME(nr); \
__asm__( \
@@ -216,7 +239,6 @@ SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \
static inline void x86_do_profile (unsigned long eip)
{
if (prof_buffer && current->pid) {
- extern int _stext;
eip -= (unsigned long) &_stext;
eip >>= prof_shift;
/*
diff --git a/arch/i386/kernel/mca.c b/arch/i386/kernel/mca.c
index de5a47e72..7c5ad2712 100644
--- a/arch/i386/kernel/mca.c
+++ b/arch/i386/kernel/mca.c
@@ -26,6 +26,9 @@
* - Added the 'driver_loaded' flag in MCA_adapter
* - Added an alternative implemention of ZP Gu's mca_find_unused_adapter
*
+ * David Weinehall March 24th, 1999
+ * - Fixed the output of 'Driver Installed' in /proc/mca/pos
+ * - Made the Integrated Video & SCSI show up even if they have id 0000
*/
#include <linux/types.h>
@@ -49,12 +52,12 @@
* Other miscellaneous information follows.
*/
-typedef enum {
- MCA_ADAPTER_NORMAL = 0,
- MCA_ADAPTER_NONE = 1,
- MCA_ADAPTER_DISABLED = 2,
- MCA_ADAPTER_ERROR = 3
-} MCA_AdapterStatus;
+typedef enum {
+ MCA_ADAPTER_NORMAL = 0,
+ MCA_ADAPTER_NONE = 1,
+ MCA_ADAPTER_DISABLED = 2,
+ MCA_ADAPTER_ERROR = 3
+} MCA_AdapterStatus;
struct MCA_adapter {
MCA_AdapterStatus status; /* is there a valid adapter? */
@@ -69,16 +72,17 @@ struct MCA_adapter {
};
struct MCA_info {
-/* one for each of the 8 possible slots, plus one for integrated SCSI
- and one for integrated video. */
+ /* one for each of the 8 possible slots, plus one for integrated SCSI
+ * and one for integrated video.
+ */
struct MCA_adapter slot[MCA_NUMADAPTERS];
-/* two potential addresses for integrated SCSI adapter - this will
- * track which one we think it is
- */
+ /* two potential addresses for integrated SCSI adapter - this will
+ * track which one we think it is.
+ */
- unsigned char which_scsi;
+ unsigned char which_scsi;
};
/* The mca_info structure pointer. If MCA bus is present, the function
@@ -88,7 +92,7 @@ struct MCA_info {
* is set to zero.
*/
-static struct MCA_info* mca_info = 0;
+static struct MCA_info* mca_info = NULL;
/* MCA registers */
@@ -102,10 +106,10 @@ static struct MCA_info* mca_info = 0;
#ifdef CONFIG_PROC_FS
-static void mca_do_proc_init( void );
-static int mca_default_procfn( char* buf, int slot );
+static void mca_do_proc_init(void);
+static int mca_default_procfn(char* buf, int slot);
-static ssize_t proc_mca_read( struct file*, char*, size_t, loff_t *);
+static ssize_t proc_mca_read(struct file*, char*, size_t, loff_t *);
static struct file_operations proc_mca_operations = {
NULL, /* array_lseek */
@@ -146,23 +150,26 @@ static struct inode_operations proc_mca_inode_operations = {
/* Build the status info for the adapter */
-static void mca_configure_adapter_status( int slot ) {
+static void mca_configure_adapter_status(int slot) {
mca_info->slot[slot].status = MCA_ADAPTER_NONE;
mca_info->slot[slot].id = mca_info->slot[slot].pos[0]
+ (mca_info->slot[slot].pos[1] << 8);
- if( !mca_info->slot[slot].id ) {
+ if(!mca_info->slot[slot].id && slot < MCA_MAX_SLOT_NR) {
/* id = 0x0000 usually indicates hardware failure,
* however, ZP Gu (zpg@castle.net> reports that his 9556
- * has 0x0000 as id and everything still works.
+ * has 0x0000 as id and everything still works. There
+ * also seem to be an adapter with id = 0x0000; the
+ * NCR Parallel Bus Memory Card. Until this is confirmed,
+ * however, this code will stay.
*/
mca_info->slot[slot].status = MCA_ADAPTER_ERROR;
return;
- } else if( mca_info->slot[slot].id != 0xffff ) {
+ } else if(mca_info->slot[slot].id != 0xffff) {
/* 0xffff usually indicates that there's no adapter,
* however, some integrated adapters may have 0xffff as
@@ -174,21 +181,21 @@ static void mca_configure_adapter_status( int slot ) {
mca_info->slot[slot].status = MCA_ADAPTER_NORMAL;
}
- if( (mca_info->slot[slot].id == 0xffff ||
- mca_info->slot[slot].id == 0x0000) && slot >= MCA_MAX_SLOT_NR ) {
+ if((mca_info->slot[slot].id == 0xffff ||
+ mca_info->slot[slot].id == 0x0000) && slot >= MCA_MAX_SLOT_NR) {
int j;
- for( j = 2; j < 8; j++ ) {
- if( mca_info->slot[slot].pos[j] != 0xff ) {
+ for(j = 2; j < 8; j++) {
+ if(mca_info->slot[slot].pos[j] != 0xff) {
mca_info->slot[slot].status = MCA_ADAPTER_NORMAL;
break;
}
}
}
- if( !(mca_info->slot[slot].pos[2] & MCA_ENABLED) ) {
+ if(!(mca_info->slot[slot].pos[2] & MCA_ENABLED)) {
- /* enabled bit is in pos 2 */
+ /* enabled bit is in POS 2 */
mca_info->slot[slot].status = MCA_ADAPTER_DISABLED;
}
@@ -198,94 +205,101 @@ static void mca_configure_adapter_status( int slot ) {
__initfunc(void mca_init(void))
{
- unsigned int i, j;
+ unsigned int i, j;
unsigned long flags;
/* WARNING: Be careful when making changes here. Putting an adapter
- * and the motherboard simultaneously into setup mode may result in
- * damage to chips (according to The Indispensible PC Hardware Book
- * by Hans-Peter Messmer). Also, we disable system interrupts (so
+ * and the motherboard simultaneously into setup mode may result in
+ * damage to chips (according to The Indispensible PC Hardware Book
+ * by Hans-Peter Messmer). Also, we disable system interrupts (so
* that we are not disturbed in the middle of this).
*/
/* Make sure the MCA bus is present */
- if (!MCA_bus)
+ if(!MCA_bus)
return;
- printk( "Micro Channel bus detected.\n" );
- save_flags( flags );
+ printk("Micro Channel bus detected.\n");
+ save_flags(flags);
cli();
/* Allocate MCA_info structure (at address divisible by 8) */
- mca_info = kmalloc(sizeof(struct MCA_info), GFP_ATOMIC);
+ mca_info = kmalloc(sizeof(struct MCA_info), GFP_KERNEL);
+
+ if(mca_info == NULL) {
+ printk("Failed to allocate memory for mca_info!");
+ restore_flags(flags);
+ return;
+ }
/* Make sure adapter setup is off */
outb_p(0, MCA_ADAPTER_SETUP_REG);
/* Put motherboard into video setup mode, read integrated video
- * pos registers, and turn motherboard setup off.
+ * POS registers, and turn motherboard setup off.
*/
outb_p(0xdf, MCA_MOTHERBOARD_SETUP_REG);
mca_info->slot[MCA_INTEGVIDEO].name[0] = 0;
- for (j=0; j<8; j++) {
- mca_info->slot[MCA_INTEGVIDEO].pos[j] = inb_p(MCA_POS_REG(j));
+ for(j=0; j<8; j++) {
+ mca_info->slot[MCA_INTEGVIDEO].pos[j] = inb_p(MCA_POS_REG(j));
}
mca_configure_adapter_status(MCA_INTEGVIDEO);
/* Put motherboard into scsi setup mode, read integrated scsi
- * pos registers, and turn motherboard setup off.
+ * POS registers, and turn motherboard setup off.
*
- * It seems there are two possible SCSI registers. Martin says that
+ * It seems there are two possible SCSI registers. Martin says that
* for the 56,57, 0xf7 is the one, but fails on the 76.
* Alfredo (apena@vnet.ibm.com) says
- * 0xfd works on his machine. We'll try both of them. I figure it's
- * a good bet that only one could be valid at a time. This could
+ * 0xfd works on his machine. We'll try both of them. I figure it's
+ * a good bet that only one could be valid at a time. This could
* screw up though if one is used for something else on the other
* machine.
*/
outb_p(0xf7, MCA_MOTHERBOARD_SETUP_REG);
mca_info->slot[MCA_INTEGSCSI].name[0] = 0;
- for (j=0; j<8; j++) {
- if( (mca_info->slot[MCA_INTEGSCSI].pos[j] = inb_p(MCA_POS_REG(j))) != 0xff )
+ for(j=0; j<8; j++) {
+ if((mca_info->slot[MCA_INTEGSCSI].pos[j] = inb_p(MCA_POS_REG(j))) != 0xff)
{
- /* 0xff all across means no device. 0x00 means something's
- * broken, but a device is probably there. However, if you get
- * 0x00 from a motherboard register it won't matter what we
- * find. For the record, on the 57SLC, the integrated SCSI
- * adapter has 0xffff for the adapter ID, but nonzero for
- * other registers.
+ /* 0xff all across means no device. 0x00 means
+ * something's broken, but a device is probably there.
+ * However, if you get 0x00 from a motherboard
+ * register it won't matter what we find. For the
+ * record, on the 57SLC, the integrated SCSI
+ * adapter has 0xffff for the adapter ID, but
+ * nonzero for other registers.
*/
mca_info->which_scsi = 0xf7;
}
}
- if( !mca_info->which_scsi ) {
+ if(!mca_info->which_scsi) {
/* Didn't find it at 0xf7, try somewhere else... */
mca_info->which_scsi = 0xfd;
outb_p(0xfd, MCA_MOTHERBOARD_SETUP_REG);
- for (j=0; j<8; j++)
- mca_info->slot[MCA_INTEGSCSI].pos[j] = inb_p(MCA_POS_REG(j));
+ for(j=0; j<8; j++)
+ mca_info->slot[MCA_INTEGSCSI].pos[j] = inb_p(MCA_POS_REG(j));
}
mca_configure_adapter_status(MCA_INTEGSCSI);
- /* turn off motherboard setup */
+ /* Turn off motherboard setup */
outb_p(0xff, MCA_MOTHERBOARD_SETUP_REG);
/* Now loop over MCA slots: put each adapter into setup mode, and
- * read its pos registers. Then put adapter setup off.
+ * read its POS registers. Then put adapter setup off.
*/
- for (i=0; i<MCA_MAX_SLOT_NR; i++) {
+ for(i=0; i<MCA_MAX_SLOT_NR; i++) {
outb_p(0x8|(i&0xf), MCA_ADAPTER_SETUP_REG);
- for (j=0; j<8; j++) {
- mca_info->slot[i].pos[j]=inb_p(MCA_POS_REG(j));
+ for(j=0; j<8; j++) {
+ mca_info->slot[i].pos[j]=inb_p(MCA_POS_REG(j));
}
mca_info->slot[i].name[0] = 0;
mca_info->slot[i].driver_loaded = 0;
@@ -295,7 +309,7 @@ __initfunc(void mca_init(void))
/* Enable interrupts and return memory start */
- restore_flags( flags );
+ restore_flags(flags);
request_region(0x60,0x01,"system control port B (MCA)");
request_region(0x90,0x01,"arbitration (MCA)");
@@ -312,89 +326,90 @@ __initfunc(void mca_init(void))
/*--------------------------------------------------------------------*/
-static void mca_handle_nmi_slot( int slot, int check_flag )
+static void mca_handle_nmi_slot(int slot, int check_flag)
{
- if( slot < MCA_MAX_SLOT_NR ) {
- printk( "NMI: caused by MCA adapter in slot %d (%s)\n", slot+1,
- mca_info->slot[slot].name );
- } else if( slot == MCA_INTEGSCSI ) {
- printk( "NMI: caused by MCA integrated SCSI adapter (%s)\n",
- mca_info->slot[slot].name );
- } else if( slot == MCA_INTEGVIDEO ) {
- printk( "NMI: caused by MCA integrated video adapter (%s)\n",
- mca_info->slot[slot].name );
- }
-
- /* more info available in pos 6 and 7? */
-
- if( check_flag ) {
- unsigned char pos6, pos7;
-
- pos6 = mca_read_pos( slot, 6 );
- pos7 = mca_read_pos( slot, 7 );
-
- printk( "NMI: POS 6 = 0x%x, POS 7 = 0x%x\n", pos6, pos7 );
- }
-
-} /* mca_handle_nmi_slot */
-
-/*--------------------------------------------------------------------*/
-
-void mca_handle_nmi( void )
+ if(slot < MCA_MAX_SLOT_NR) {
+ printk("NMI: caused by MCA adapter in slot %d (%s)\n", slot+1,
+ mca_info->slot[slot].name);
+ } else if(slot == MCA_INTEGSCSI) {
+ printk("NMI: caused by MCA integrated SCSI adapter (%s)\n",
+ mca_info->slot[slot].name);
+ } else if(slot == MCA_INTEGVIDEO) {
+ printk("NMI: caused by MCA integrated video adapter (%s)\n",
+ mca_info->slot[slot].name);
+ }
+
+ /* More info available in POS 6 and 7? */
+
+ if(check_flag) {
+ unsigned char pos6, pos7;
+
+ pos6 = mca_read_pos(slot, 6);
+ pos7 = mca_read_pos(slot, 7);
+
+ printk("NMI: POS 6 = 0x%x, POS 7 = 0x%x\n", pos6, pos7);
+ }
+
+} /* mca_handle_nmi_slot */
+
+/*--------------------------------------------------------------------*/
+
+void mca_handle_nmi(void)
{
int i;
- unsigned char pos5;
-
- /* First try - scan the various adapters and see if a specific
- * adapter was responsible for the error
- */
-
- for( i = 0; i < MCA_NUMADAPTERS; i += 1 ) {
-
- /* bit 7 of POS 5 is reset when this adapter has a hardware
- * error. bit 7 it reset if there's error information
- * available in pos 6 and 7. */
-
- pos5 = mca_read_pos( i, 5 );
-
- if( !(pos5 & 0x80) ) {
- mca_handle_nmi_slot( i, !(pos5 & 0x40) );
- return;
- }
- }
-
- /* if I recall correctly, there's a whole bunch of other things that
- * we can do to check for NMI problems, but that's all I know about
+ unsigned char pos5;
+
+ /* First try - scan the various adapters and see if a specific
+ * adapter was responsible for the error.
+ */
+
+ for(i = 0; i < MCA_NUMADAPTERS; i++) {
+
+ /* Bit 7 of POS 5 is reset when this adapter has a hardware
+ * error. Bit 7 it reset if there's error information
+ * available in POS 6 and 7.
+ */
+
+ pos5 = mca_read_pos(i, 5);
+
+ if(!(pos5 & 0x80)) {
+ mca_handle_nmi_slot(i, !(pos5 & 0x40));
+ return;
+ }
+ }
+
+ /* If I recall correctly, there's a whole bunch of other things that
+ * we can do to check for NMI problems, but that's all I know about
* at the moment.
- */
+ */
- printk( "NMI generated from unknown source!\n" );
-} /* mca_handle_nmi */
+ printk("NMI generated from unknown source!\n");
+} /* mca_handle_nmi */
/*--------------------------------------------------------------------*/
-int mca_find_adapter( int id, int start )
+int mca_find_adapter(int id, int start)
{
- if( mca_info == 0 || id == 0 || id == 0xffff ) {
+ if(mca_info == NULL || id == 0xffff) {
return MCA_NOTFOUND;
}
- for( ; start >= 0 && start < MCA_NUMADAPTERS; start += 1 ) {
+ for(; start >= 0 && start < MCA_NUMADAPTERS; start++) {
- /* not sure about this. There's no point in returning
+ /* Not sure about this. There's no point in returning
* adapters that aren't enabled, since they can't actually
- * be used. However, they might be needed for statistical
+ * be used. However, they might be needed for statistical
* purposes or something... But if that is the case, the
* user is free to write a routine that manually iterates
* through the adapters.
*/
- if( mca_info->slot[start].status == MCA_ADAPTER_DISABLED ) {
+ if(mca_info->slot[start].status == MCA_ADAPTER_DISABLED) {
continue;
}
- if( id == mca_info->slot[start].id ) {
+ if(id == mca_info->slot[start].id) {
return start;
}
}
@@ -404,28 +419,28 @@ int mca_find_adapter( int id, int start )
/*--------------------------------------------------------------------*/
-int mca_find_unused_adapter( int id, int start )
+int mca_find_unused_adapter(int id, int start)
{
- if( mca_info == 0 || id == 0 || id == 0xffff ) {
+ if(mca_info == NULL || id == 0xffff) {
return MCA_NOTFOUND;
}
- for( ; start >= 0 && start < MCA_NUMADAPTERS; start += 1 ) {
+ for(; start >= 0 && start < MCA_NUMADAPTERS; start++) {
- /* not sure about this. There's no point in returning
+ /* not sure about this. There's no point in returning
* adapters that aren't enabled, since they can't actually
- * be used. However, they might be needed for statistical
+ * be used. However, they might be needed for statistical
* purposes or something... But if that is the case, the
* user is free to write a routine that manually iterates
* through the adapters.
*/
- if( mca_info->slot[start].status == MCA_ADAPTER_DISABLED ||
- mca_info->slot[start].driver_loaded ) {
+ if(mca_info->slot[start].status == MCA_ADAPTER_DISABLED ||
+ mca_info->slot[start].driver_loaded) {
continue;
}
- if( id == mca_info->slot[start].id ) {
+ if(id == mca_info->slot[start].id) {
return start;
}
}
@@ -435,68 +450,68 @@ int mca_find_unused_adapter( int id, int start )
/*--------------------------------------------------------------------*/
-unsigned char mca_read_stored_pos( int slot, int reg )
+unsigned char mca_read_stored_pos(int slot, int reg)
{
- if( slot < 0 || slot >= MCA_NUMADAPTERS || mca_info == 0 ) return 0;
- if( reg < 0 || reg >= 8 ) return 0;
+ if(slot < 0 || slot >= MCA_NUMADAPTERS || mca_info == NULL) return 0;
+ if(reg < 0 || reg >= 8) return 0;
return mca_info->slot[slot].pos[reg];
} /* mca_read_stored_pos() */
/*--------------------------------------------------------------------*/
-unsigned char mca_read_pos( int slot, int reg )
+unsigned char mca_read_pos(int slot, int reg)
{
unsigned int byte = 0;
unsigned long flags;
- if( slot < 0 || slot >= MCA_NUMADAPTERS || mca_info == 0 ) return 0;
- if( reg < 0 || reg >= 8 ) return 0;
+ if(slot < 0 || slot >= MCA_NUMADAPTERS || mca_info == NULL) return 0;
+ if(reg < 0 || reg >= 8) return 0;
- save_flags( flags );
+ save_flags(flags);
cli();
- /* make sure motherboard setup is off */
+ /* Make sure motherboard setup is off */
outb_p(0xff, MCA_MOTHERBOARD_SETUP_REG);
- /* read in the appropriate register */
+ /* Read in the appropriate register */
- if( slot == MCA_INTEGSCSI && mca_info->which_scsi ) {
+ if(slot == MCA_INTEGSCSI && mca_info->which_scsi) {
- /* disable adapter setup, enable motherboard setup */
+ /* Disable adapter setup, enable motherboard setup */
outb_p(0, MCA_ADAPTER_SETUP_REG);
outb_p(mca_info->which_scsi, MCA_MOTHERBOARD_SETUP_REG);
byte = inb_p(MCA_POS_REG(reg));
outb_p(0xff, MCA_MOTHERBOARD_SETUP_REG);
- } else if( slot == MCA_INTEGVIDEO ) {
+ } else if(slot == MCA_INTEGVIDEO) {
- /* disable adapter setup, enable motherboard setup */
+ /* Disable adapter setup, enable motherboard setup */
outb_p(0, MCA_ADAPTER_SETUP_REG);
outb_p(0xdf, MCA_MOTHERBOARD_SETUP_REG);
byte = inb_p(MCA_POS_REG(reg));
outb_p(0xff, MCA_MOTHERBOARD_SETUP_REG);
- } else if( slot < MCA_MAX_SLOT_NR ) {
+ } else if(slot < MCA_MAX_SLOT_NR) {
- /* make sure motherboard setup is off */
+ /* Make sure motherboard setup is off */
outb_p(0xff, MCA_MOTHERBOARD_SETUP_REG);
- /* read the appropriate register */
+ /* Read the appropriate register */
outb_p(0x8|(slot&0xf), MCA_ADAPTER_SETUP_REG);
byte = inb_p(MCA_POS_REG(reg));
outb_p(0, MCA_ADAPTER_SETUP_REG);
}
- /* make sure the stored values are consistent, while we're here */
+ /* Make sure the stored values are consistent, while we're here */
mca_info->slot[slot].pos[reg] = byte;
- restore_flags( flags );
+ restore_flags(flags);
return byte;
} /* mca_read_pos() */
@@ -513,44 +528,47 @@ unsigned char mca_read_pos( int slot, int reg )
* screws up.
*/
-void mca_write_pos( int slot, int reg, unsigned char byte )
+void mca_write_pos(int slot, int reg, unsigned char byte)
{
unsigned long flags;
- if( slot < 0 || slot >= MCA_MAX_SLOT_NR ) return;
- if( reg < 0 || reg >= 8 ) return;
- if (mca_info == 0 ) return;
+ if(slot < 0 || slot >= MCA_MAX_SLOT_NR)
+ return;
+ if(reg < 0 || reg >= 8)
+ return;
+ if(mca_info == NULL)
+ return;
- save_flags( flags );
+ save_flags(flags);
cli();
- /* make sure motherboard setup is off */
+ /* Make sure motherboard setup is off */
outb_p(0xff, MCA_MOTHERBOARD_SETUP_REG);
- /* read in the appropriate register */
+ /* Read in the appropriate register */
outb_p(0x8|(slot&0xf), MCA_ADAPTER_SETUP_REG);
- outb_p( byte, MCA_POS_REG(reg) );
+ outb_p(byte, MCA_POS_REG(reg));
outb_p(0, MCA_ADAPTER_SETUP_REG);
- restore_flags( flags );
+ restore_flags(flags);
- /* update the global register list, while we have the byte */
+ /* Update the global register list, while we have the byte */
mca_info->slot[slot].pos[reg] = byte;
} /* mca_write_pos() */
/*--------------------------------------------------------------------*/
-void mca_set_adapter_name( int slot, char* name )
+void mca_set_adapter_name(int slot, char* name)
{
- if( mca_info == 0 ) return;
+ if(mca_info == NULL) return;
- if( slot >= 0 && slot < MCA_NUMADAPTERS ) {
- if( name != NULL ) {
- strncpy( mca_info->slot[slot].name, name,
- sizeof(mca_info->slot[slot].name)-1 );
+ if(slot >= 0 && slot < MCA_NUMADAPTERS) {
+ if(name != NULL) {
+ strncpy(mca_info->slot[slot].name, name,
+ sizeof(mca_info->slot[slot].name)-1);
mca_info->slot[slot].name[
sizeof(mca_info->slot[slot].name)-1] = 0;
} else {
@@ -559,61 +577,61 @@ void mca_set_adapter_name( int slot, char* name )
}
}
-void mca_set_adapter_procfn( int slot, MCA_ProcFn procfn, void* dev)
+void mca_set_adapter_procfn(int slot, MCA_ProcFn procfn, void* dev)
{
- if( mca_info == 0 ) return;
+ if(mca_info == NULL) return;
- if( slot >= 0 && slot < MCA_NUMADAPTERS ) {
+ if(slot >= 0 && slot < MCA_NUMADAPTERS) {
mca_info->slot[slot].procfn = procfn;
mca_info->slot[slot].dev = dev;
}
}
-int mca_is_adapter_used( int slot )
+int mca_is_adapter_used(int slot)
{
return mca_info->slot[slot].driver_loaded;
}
-int mca_mark_as_used( int slot )
+int mca_mark_as_used(int slot)
{
if(mca_info->slot[slot].driver_loaded) return 1;
mca_info->slot[slot].driver_loaded = 1;
return 0;
}
-void mca_mark_as_unused( int slot )
+void mca_mark_as_unused(int slot)
{
mca_info->slot[slot].driver_loaded = 0;
}
-char *mca_get_adapter_name( int slot )
+char *mca_get_adapter_name(int slot)
{
- if( mca_info == 0 ) return 0;
+ if(mca_info == NULL) return 0;
- if( slot >= 0 && slot < MCA_NUMADAPTERS ) {
+ if(slot >= 0 && slot < MCA_NUMADAPTERS) {
return mca_info->slot[slot].name;
}
return 0;
}
-int mca_isadapter( int slot )
+int mca_isadapter(int slot)
{
- if( mca_info == 0 ) return 0;
+ if(mca_info == NULL) return 0;
- if( slot >= 0 && slot < MCA_NUMADAPTERS ) {
- return (( mca_info->slot[slot].status == MCA_ADAPTER_NORMAL )
- || (mca_info->slot[slot].status == MCA_ADAPTER_DISABLED ) );
+ if(slot >= 0 && slot < MCA_NUMADAPTERS) {
+ return ((mca_info->slot[slot].status == MCA_ADAPTER_NORMAL)
+ || (mca_info->slot[slot].status == MCA_ADAPTER_DISABLED));
}
return 0;
}
-int mca_isenabled( int slot )
+int mca_isenabled(int slot)
{
- if( mca_info == 0 ) return 0;
+ if(mca_info == NULL) return 0;
- if( slot >= 0 && slot < MCA_NUMADAPTERS ) {
+ if(slot >= 0 && slot < MCA_NUMADAPTERS) {
return (mca_info->slot[slot].status == MCA_ADAPTER_NORMAL);
}
@@ -624,39 +642,37 @@ int mca_isenabled( int slot )
#ifdef CONFIG_PROC_FS
-int get_mca_info(char *buf)
+int get_mca_info(char *buf)
{
- int i, j, len = 0;
+ int i, j, len = 0;
- if( MCA_bus && mca_info != 0 )
+ if(MCA_bus && mca_info != NULL)
{
- /* Format pos registers of eight MCA slots */
+ /* Format POS registers of eight MCA slots */
- for (i=0; i<MCA_MAX_SLOT_NR; i++)
+ for(i=0; i<MCA_MAX_SLOT_NR; i++)
{
len += sprintf(buf+len, "Slot %d: ", i+1);
- for (j=0; j<8; j++)
+ for(j=0; j<8; j++)
len += sprintf(buf+len, "%02x ", mca_info->slot[i].pos[j]);
- len += sprintf( buf+len, " %s\n", mca_info->slot[i].name );
- }
+ len += sprintf(buf+len, " %s\n", mca_info->slot[i].name);
+ }
- /* Format pos registers of integrated video subsystem */
+ /* Format POS registers of integrated video subsystem */
len += sprintf(buf+len, "Video : ");
- for (j=0; j<8; j++)
+ for(j=0; j<8; j++)
len += sprintf(buf+len, "%02x ", mca_info->slot[MCA_INTEGVIDEO].pos[j]);
- len += sprintf( buf+len, " %s\n", mca_info->slot[MCA_INTEGVIDEO].name );
+ len += sprintf(buf+len, " %s\n", mca_info->slot[MCA_INTEGVIDEO].name);
- /* Format pos registers of integrated SCSI subsystem */
+ /* Format POS registers of integrated SCSI subsystem */
len += sprintf(buf+len, "SCSI : ");
- for (j=0; j<8; j++)
+ for(j=0; j<8; j++)
len += sprintf(buf+len, "%02x ", mca_info->slot[MCA_INTEGSCSI].pos[j]);
- len += sprintf( buf+len, " %s\n", mca_info->slot[MCA_INTEGSCSI].name );
- }
- else
- {
- /* Leave it empty if MCA not detected - this should *never*
+ len += sprintf(buf+len, " %s\n", mca_info->slot[MCA_INTEGSCSI].name);
+ } else {
+ /* Leave it empty if MCA not detected - this should *never*
* happen!
*/
}
@@ -667,119 +683,123 @@ int get_mca_info(char *buf)
/*--------------------------------------------------------------------*/
-__initfunc(void mca_do_proc_init( void ))
+__initfunc(void mca_do_proc_init(void))
{
- int i = 0;
- struct proc_dir_entry* node = 0;
+ int i;
+ struct proc_dir_entry* node = NULL;
- if( mca_info == 0 ) return; /* should never happen */
+ if(mca_info == NULL) return; /* Should never happen */
- proc_register( &proc_mca, &(struct proc_dir_entry) {
+ proc_register(&proc_mca, &(struct proc_dir_entry) {
PROC_MCA_REGISTERS, 3, "pos", S_IFREG|S_IRUGO,
- 1, 0, 0, 0, &proc_mca_inode_operations,} );
+ 1, 0, 0, 0, &proc_mca_inode_operations,});
- proc_register( &proc_mca, &(struct proc_dir_entry) {
+ proc_register(&proc_mca, &(struct proc_dir_entry) {
PROC_MCA_MACHINE, 7, "machine", S_IFREG|S_IRUGO,
- 1, 0, 0, 0, &proc_mca_inode_operations,} );
+ 1, 0, 0, 0, &proc_mca_inode_operations,});
- /* initialize /proc/mca entries for existing adapters */
+ /* Initialize /proc/mca entries for existing adapters */
- for( i = 0; i < MCA_NUMADAPTERS; i += 1 ) {
+ for(i = 0; i < MCA_NUMADAPTERS; i++) {
mca_info->slot[i].procfn = 0;
mca_info->slot[i].dev = 0;
- if( ! mca_isadapter( i ) ) continue;
- node = kmalloc(sizeof(struct proc_dir_entry), GFP_ATOMIC);
+ if(!mca_isadapter(i)) continue;
+ node = kmalloc(sizeof(struct proc_dir_entry), GFP_KERNEL);
- if( i < MCA_MAX_SLOT_NR ) {
+ if(node == NULL) {
+ printk("Failed to allocate memory for MCA proc-entries!");
+ return;
+ }
+ if(i < MCA_MAX_SLOT_NR) {
node->low_ino = PROC_MCA_SLOT + i;
- node->namelen = sprintf( mca_info->slot[i].procname,
- "slot%d", i+1 );
- } else if( i == MCA_INTEGVIDEO ) {
+ node->namelen = sprintf(mca_info->slot[i].procname,
+ "slot%d", i+1);
+ } else if(i == MCA_INTEGVIDEO) {
node->low_ino = PROC_MCA_VIDEO;
- node->namelen = sprintf( mca_info->slot[i].procname,
- "video" );
- } else if( i == MCA_INTEGSCSI ) {
+ node->namelen = sprintf(mca_info->slot[i].procname,
+ "video");
+ } else if(i == MCA_INTEGSCSI) {
node->low_ino = PROC_MCA_SCSI;
- node->namelen = sprintf( mca_info->slot[i].procname,
- "scsi" );
+ node->namelen = sprintf(mca_info->slot[i].procname,
+ "scsi");
}
node->name = mca_info->slot[i].procname;
node->mode = S_IFREG | S_IRUGO;
node->ops = &proc_mca_inode_operations;
- proc_register( &proc_mca, node );
+ proc_register(&proc_mca, node);
}
} /* mca_do_proc_init() */
/*--------------------------------------------------------------------*/
-int mca_default_procfn( char* buf, int slot )
+int mca_default_procfn(char* buf, int slot)
{
int len = 0, i;
- /* this really shouldn't happen... */
+ /* This really shouldn't happen... */
- if( mca_info == 0 ) {
+ if(mca_info == NULL) {
*buf = 0;
return 0;
}
- /* print out the basic information */
+ /* Print out the basic information */
- if( slot < MCA_MAX_SLOT_NR ) {
- len += sprintf( buf+len, "Slot: %d\n", slot+1 );
- } else if( slot == MCA_INTEGSCSI ) {
- len += sprintf( buf+len, "Integrated SCSI Adapter\n" );
- } else if( slot == MCA_INTEGVIDEO ) {
- len += sprintf( buf+len, "Integrated Video Adapter\n" );
+ if(slot < MCA_MAX_SLOT_NR) {
+ len += sprintf(buf+len, "Slot: %d\n", slot+1);
+ } else if(slot == MCA_INTEGSCSI) {
+ len += sprintf(buf+len, "Integrated SCSI Adapter\n");
+ } else if(slot == MCA_INTEGVIDEO) {
+ len += sprintf(buf+len, "Integrated Video Adapter\n");
}
- if( mca_info->slot[slot].name[0] ) {
+ if(mca_info->slot[slot].name[0]) {
- /* drivers might register a name without /proc handler... */
+ /* Drivers might register a name without /proc handler... */
- len += sprintf( buf+len, "Adapter Name: %s\n",
- mca_info->slot[slot].name );
+ len += sprintf(buf+len, "Adapter Name: %s\n",
+ mca_info->slot[slot].name);
} else {
- len += sprintf( buf+len, "Adapter Name: Unknown\n" );
+ len += sprintf(buf+len, "Adapter Name: Unknown\n");
}
- len += sprintf( buf+len, "Id: %02x%02x\n",
- mca_info->slot[slot].pos[1], mca_info->slot[slot].pos[0] );
- len += sprintf( buf+len, "Enabled: %s\nPOS: ",
- mca_isenabled(slot) ? "Yes" : "No" );
- len += sprintf( buf+len, "Driver Installed: %s\n",
- mca_is_adapter_used(slot) ? "Yes" : "No" );
- for (i=0; i<8; i++) {
+ len += sprintf(buf+len, "Id: %02x%02x\n",
+ mca_info->slot[slot].pos[1], mca_info->slot[slot].pos[0]);
+ len += sprintf(buf+len, "Enabled: %s\nPOS: ",
+ mca_isenabled(slot) ? "Yes" : "No");
+ for(i=0; i<8; i++) {
len += sprintf(buf+len, "%02x ", mca_info->slot[slot].pos[i]);
}
+ len += sprintf(buf+len, "\nDriver Installed: %s",
+ mca_is_adapter_used(slot) ? "Yes" : "No");
buf[len++] = '\n';
buf[len] = 0;
return len;
} /* mca_default_procfn() */
-static int get_mca_machine_info( char* buf )
+static int get_mca_machine_info(char* buf)
{
int len = 0;
- len += sprintf( buf+len, "Model Id: 0x%x\n", machine_id );
- len += sprintf( buf+len, "Submodel Id: 0x%x\n", machine_submodel_id );
- len += sprintf( buf+len, "BIOS Revision: 0x%x\n", BIOS_revision );
+ len += sprintf(buf+len, "Model Id: 0x%x\n", machine_id);
+ len += sprintf(buf+len, "Submodel Id: 0x%x\n", machine_submodel_id);
+ len += sprintf(buf+len, "BIOS Revision: 0x%x\n", BIOS_revision);
return len;
}
-static int mca_fill( char* page, int pid, int type, char** start,
+static int mca_fill(char* page, int pid, int type, char** start,
loff_t *offset, int length)
{
int len = 0;
int slot = 0;
- switch( type ) {
+ switch(type) {
case PROC_MCA_REGISTERS:
- return get_mca_info( page );
+ return get_mca_info(page);
case PROC_MCA_MACHINE:
- return get_mca_machine_info( page );
+ return get_mca_machine_info(page);
case PROC_MCA_VIDEO:
slot = MCA_INTEGVIDEO;
break;
@@ -787,24 +807,24 @@ static int mca_fill( char* page, int pid, int type, char** start,
slot = MCA_INTEGSCSI;
break;
default:
- if( type < PROC_MCA_SLOT || type >= PROC_MCA_LAST ) {
+ if(type < PROC_MCA_SLOT || type >= PROC_MCA_LAST) {
return -EBADF;
}
slot = type - PROC_MCA_SLOT;
break;
}
- /* if we made it here, we better have a valid slot */
+ /* If we made it here, we better have a valid slot */
- /* get the standard info */
+ /* Get the standard info */
- len = mca_default_procfn( page, slot );
+ len = mca_default_procfn(page, slot);
- /* do any device-specific processing, if there is any */
+ /* Do any device-specific processing, if there is any */
- if( mca_info->slot[slot].procfn ) {
- len += mca_info->slot[slot].procfn( page+len, slot,
- mca_info->slot[slot].dev );
+ if(mca_info->slot[slot].procfn) {
+ len += mca_info->slot[slot].procfn(page+len, slot,
+ mca_info->slot[slot].dev);
}
return len;
@@ -814,7 +834,7 @@ static int mca_fill( char* page, int pid, int type, char** start,
#define PROC_BLOCK_SIZE (3*1024)
-static ssize_t proc_mca_read( struct file* file,
+static ssize_t proc_mca_read(struct file* file,
char* buf, size_t count, loff_t *ppos)
{
unsigned long page;
@@ -825,11 +845,11 @@ static ssize_t proc_mca_read( struct file* file,
struct proc_dir_entry *dp;
struct inode *inode = file->f_dentry->d_inode;
- if (count < 0)
+ if(count < 0)
return -EINVAL;
- if (count > PROC_BLOCK_SIZE)
+ if(count > PROC_BLOCK_SIZE)
count = PROC_BLOCK_SIZE;
- if (!(page = __get_free_page(GFP_KERNEL)))
+ if(!(page = __get_free_page(GFP_KERNEL)))
return -ENOMEM;
type = inode->i_ino;
pid = type >> 16;
@@ -837,12 +857,12 @@ static ssize_t proc_mca_read( struct file* file,
start = 0;
dp = (struct proc_dir_entry *) inode->u.generic_ip;
length = mca_fill((char *) page, pid, type,
- &start, ppos, count);
- if (length < 0) {
+ &start, ppos, count);
+ if(length < 0) {
free_page(page);
return length;
}
- if (start != 0) {
+ if(start != 0) {
/* We have had block-adjusting processing! */
copy_to_user(buf, start, length);
@@ -851,11 +871,11 @@ static ssize_t proc_mca_read( struct file* file,
} else {
/* Static 4kB (or whatever) block capacity */
- if (*ppos >= length) {
+ if(*ppos >= length) {
free_page(page);
return 0;
}
- if (count + *ppos > length)
+ if(count + *ppos > length)
count = length - *ppos;
end = count + *ppos;
copy_to_user(buf, (char *) page + *ppos, count);
diff --git a/arch/i386/kernel/mtrr.c b/arch/i386/kernel/mtrr.c
index 16c767b4a..0d71d8bb5 100644
--- a/arch/i386/kernel/mtrr.c
+++ b/arch/i386/kernel/mtrr.c
@@ -132,6 +132,70 @@
Fixed harmless compiler warning in include/asm-i386/mtrr.h
Fixed version numbering and history for v1.23 -> v1.24.
v1.26
+ 19990118 Richard Gooch <rgooch@atnf.csiro.au>
+ PLACEHOLDER.
+ v1.27
+ 19990123 Richard Gooch <rgooch@atnf.csiro.au>
+ Changed locking to spin with reschedule.
+ Made use of new <smp_call_function>.
+ v1.28
+ 19990201 Zoltan Boszormenyi <zboszor@mol.hu>
+ Extended the driver to be able to use Cyrix style ARRs.
+ 19990204 Richard Gooch <rgooch@atnf.csiro.au>
+ Restructured Cyrix support.
+ v1.29
+ 19990204 Zoltan Boszormenyi <zboszor@mol.hu>
+ Refined ARR support: enable MAPEN in set_mtrr_prepare()
+ and disable MAPEN in set_mtrr_done().
+ 19990205 Richard Gooch <rgooch@atnf.csiro.au>
+ Minor cleanups.
+ v1.30
+ 19990208 Zoltan Boszormenyi <zboszor@mol.hu>
+ Protect plain 6x86s (and other processors without the
+ Page Global Enable feature) against accessing CR4 in
+ set_mtrr_prepare() and set_mtrr_done().
+ 19990210 Richard Gooch <rgooch@atnf.csiro.au>
+ Turned <set_mtrr_up> and <get_mtrr> into function pointers.
+ v1.31
+ 19990212 Zoltan Boszormenyi <zboszor@mol.hu>
+ Major rewrite of cyrix_arr_init(): do not touch ARRs,
+ leave them as the BIOS have set them up.
+ Enable usage of all 8 ARRs.
+ Avoid multiplications by 3 everywhere and other
+ code clean ups/speed ups.
+ 19990213 Zoltan Boszormenyi <zboszor@mol.hu>
+ Set up other Cyrix processors identical to the boot cpu.
+ Since Cyrix don't support Intel APIC, this is l'art pour l'art.
+ Weigh ARRs by size:
+ If size <= 32M is given, set up ARR# we were given.
+ If size > 32M is given, set up ARR7 only if it is free,
+ fail otherwise.
+ 19990214 Zoltan Boszormenyi <zboszor@mol.hu>
+ Also check for size >= 256K if we are to set up ARR7,
+ mtrr_add() returns the value it gets from set_mtrr()
+ 19990218 Zoltan Boszormenyi <zboszor@mol.hu>
+ Remove Cyrix "coma bug" workaround from here.
+ Moved to linux/arch/i386/kernel/setup.c and
+ linux/include/asm-i386/bugs.h
+ 19990228 Richard Gooch <rgooch@atnf.csiro.au>
+ Added #ifdef CONFIG_DEVFS_FS
+ Added MTRRIOC_KILL_ENTRY ioctl(2)
+ Trap for counter underflow in <mtrr_file_del>.
+ Trap for 4 MiB aligned regions for PPro, stepping <= 7.
+ 19990301 Richard Gooch <rgooch@atnf.csiro.au>
+ Created <get_free_region> hook.
+ 19990305 Richard Gooch <rgooch@atnf.csiro.au>
+ Temporarily disable AMD support now MTRR capability flag is set.
+ v1.32
+ 19990308 Zoltan Boszormenyi <zboszor@mol.hu>
+ Adjust my changes (19990212-19990218) to Richard Gooch's
+ latest changes. (19990228-19990305)
+ v1.33
+ 19990309 Richard Gooch <rgooch@atnf.csiro.au>
+ Fixed typo in <printk> message.
+ 19990310 Richard Gooch <rgooch@atnf.csiro.au>
+ Support K6-II/III based on Alan Cox's <alan@redhat.com> patches.
+ v1.34
*/
#include <linux/types.h>
#include <linux/errno.h>
@@ -163,11 +227,12 @@
#include <asm/segment.h>
#include <asm/bitops.h>
#include <asm/atomic.h>
+#include <asm/msr.h>
#include <asm/hardirq.h>
#include "irq.h"
-#define MTRR_VERSION "1.26 (19981001)"
+#define MTRR_VERSION "1.34 (19990310)"
#define TRUE 1
#define FALSE 0
@@ -197,7 +262,7 @@
# define MTRR_CHANGE_MASK_DEFTYPE 0x04
#endif
-/* In the processor's MTRR interface, the MTRR type is always held in
+/* In the Intel processor's MTRR interface, the MTRR type is always held in
an 8 bit field: */
typedef u8 mtrr_type;
@@ -207,9 +272,12 @@ typedef u8 mtrr_type;
#ifdef __SMP__
# define set_mtrr(reg,base,size,type) set_mtrr_smp (reg, base, size, type)
#else
-# define set_mtrr(reg,base,size,type) set_mtrr_up (reg, base, size, type,TRUE)
+# define set_mtrr(reg,base,size,type) (*set_mtrr_up) (reg, base, size, type, \
+ TRUE)
#endif
+#define spin_lock_reschedule(lock) while (!spin_trylock(lock)) schedule ();
+
#ifndef CONFIG_PROC_FS
# define compute_ascii() while (0)
#endif
@@ -233,49 +301,30 @@ struct set_mtrr_context
unsigned long deftype_lo;
unsigned long deftype_hi;
unsigned long cr4val;
+ unsigned long ccr3;
};
-/*
- * Access to machine-specific registers (available on 586 and better only)
- * Note: the rd* operations modify the parameters directly (without using
- * pointer indirection), this allows gcc to optimize better
- */
-#define rdmsr(msr,val1,val2) \
- __asm__ __volatile__("rdmsr" \
- : "=a" (val1), "=d" (val2) \
- : "c" (msr))
-
-#define wrmsr(msr,val1,val2) \
- __asm__ __volatile__("wrmsr" \
- : /* no outputs */ \
- : "c" (msr), "a" (val1), "d" (val2))
-#define rdtsc(low,high) \
- __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
-
-#define rdpmc(counter,low,high) \
- __asm__ __volatile__("rdpmc" \
- : "=a" (low), "=d" (high) \
- : "c" (counter))
-
-
-/* Put the processor into a state where MTRRs can be safely set. */
-static void set_mtrr_prepare(struct set_mtrr_context *ctxt)
+/* Put the processor into a state where MTRRs can be safely set */
+static void set_mtrr_prepare (struct set_mtrr_context *ctxt)
{
unsigned long tmp;
- /* disable interrupts locally */
+ /* Disable interrupts locally */
__save_flags (ctxt->flags); __cli ();
- /* save value of CR4 and clear Page Global Enable (bit 7) */
- asm volatile ("movl %%cr4, %0\n\t"
- "movl %0, %1\n\t"
- "andb $0x7f, %b1\n\t"
- "movl %1, %%cr4\n\t"
- : "=r" (ctxt->cr4val), "=q" (tmp) : : "memory");
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) return;
+
+ /* Save value of CR4 and clear Page Global Enable (bit 7) */
+ if (boot_cpu_data.x86_capability & X86_FEATURE_PGE)
+ asm volatile ("movl %%cr4, %0\n\t"
+ "movl %0, %1\n\t"
+ "andb $0x7f, %b1\n\t"
+ "movl %1, %%cr4\n\t"
+ : "=r" (ctxt->cr4val), "=q" (tmp) : : "memory");
- /* disable and flush caches. Note that wbinvd flushes the TLBs as
- a side-effect. */
+ /* Disable and flush caches. Note that wbinvd flushes the TLBs as
+ a side-effect */
asm volatile ("movl %%cr0, %0\n\t"
"orl $0x40000000, %0\n\t"
"wbinvd\n\t"
@@ -283,64 +332,108 @@ static void set_mtrr_prepare(struct set_mtrr_context *ctxt)
"wbinvd\n\t"
: "=r" (tmp) : : "memory");
- /* disable MTRRs, and set the default type to uncached. */
- rdmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
- wrmsr(MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL, ctxt->deftype_hi);
+ switch (boot_cpu_data.x86_vendor)
+ {
+ case X86_VENDOR_INTEL:
+ /* Disable MTRRs, and set the default type to uncached */
+ rdmsr (MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
+ wrmsr (MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL, ctxt->deftype_hi);
+ break;
+ case X86_VENDOR_CYRIX:
+ tmp = getCx86 (CX86_CCR3);
+ setCx86 (CX86_CCR3, (tmp & 0x0f) | 0x10);
+ ctxt->ccr3 = tmp;
+ break;
+ }
} /* End Function set_mtrr_prepare */
-
-/* Restore the processor after a set_mtrr_prepare */
-static void set_mtrr_done(struct set_mtrr_context *ctxt)
+/* Restore the processor after a set_mtrr_prepare */
+static void set_mtrr_done (struct set_mtrr_context *ctxt)
{
unsigned long tmp;
- /* flush caches and TLBs */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ {
+ __restore_flags (ctxt->flags);
+ return;
+ }
+
+ /* Flush caches and TLBs */
asm volatile ("wbinvd" : : : "memory" );
- /* restore MTRRdefType */
- wrmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
+ /* Restore MTRRdefType */
+ switch (boot_cpu_data.x86_vendor)
+ {
+ case X86_VENDOR_INTEL:
+ wrmsr (MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
+ break;
+ case X86_VENDOR_CYRIX:
+ setCx86 (CX86_CCR3, ctxt->ccr3);
+ break;
+ }
- /* enable caches */
+ /* Enable caches */
asm volatile ("movl %%cr0, %0\n\t"
"andl $0xbfffffff, %0\n\t"
"movl %0, %%cr0\n\t"
: "=r" (tmp) : : "memory");
- /* restore value of CR4 */
- asm volatile ("movl %0, %%cr4"
- : : "r" (ctxt->cr4val) : "memory");
+ /* Restore value of CR4 */
+ if (boot_cpu_data.x86_capability & X86_FEATURE_PGE)
+ asm volatile ("movl %0, %%cr4"
+ : : "r" (ctxt->cr4val) : "memory");
- /* re-enable interrupts locally (if enabled previously) */
+ /* Re-enable interrupts locally (if enabled previously) */
__restore_flags (ctxt->flags);
} /* End Function set_mtrr_done */
-
-/* this function returns the number of variable MTRRs */
+/* This function returns the number of variable MTRRs */
static unsigned int get_num_var_ranges (void)
{
unsigned long config, dummy;
- rdmsr(MTRRcap_MSR, config, dummy);
- return (config & 0xff);
+ switch (boot_cpu_data.x86_vendor)
+ {
+ case X86_VENDOR_INTEL:
+ rdmsr (MTRRcap_MSR, config, dummy);
+ return (config & 0xff);
+ /*break;*/
+ case X86_VENDOR_CYRIX:
+ /* Cyrix have 8 ARRs */
+ return 8;
+ /*break;*/
+ case X86_VENDOR_AMD:
+ return 2;
+ /*break;*/
+ }
+ return 0;
} /* End Function get_num_var_ranges */
-
-/* non-zero if we have the write-combining memory type. */
+/* Returns non-zero if we have the write-combining memory type */
static int have_wrcomb (void)
{
unsigned long config, dummy;
- rdmsr(MTRRcap_MSR, config, dummy);
- return (config & (1<<10));
-}
-
+ switch (boot_cpu_data.x86_vendor)
+ {
+ case X86_VENDOR_INTEL:
+ rdmsr (MTRRcap_MSR, config, dummy);
+ return (config & (1<<10));
+ /*break;*/
+ case X86_VENDOR_CYRIX:
+ case X86_VENDOR_AMD:
+ return 1;
+ /*break;*/
+ }
+ return 0;
+} /* End Function have_wrcomb */
-static void get_mtrr (unsigned int reg, unsigned long *base,
- unsigned long *size, mtrr_type *type)
+static void intel_get_mtrr (unsigned int reg, unsigned long *base,
+ unsigned long *size, mtrr_type *type)
{
unsigned long dummy, mask_lo, base_lo;
- rdmsr(MTRRphysMask_MSR(reg), mask_lo, dummy);
+ rdmsr (MTRRphysMask_MSR(reg), mask_lo, dummy);
if ((mask_lo & 0x800) == 0) {
/* Invalid (i.e. free) range. */
*base = 0;
@@ -364,11 +457,104 @@ static void get_mtrr (unsigned int reg, unsigned long *base,
*base = (base_lo & 0xfffff000UL);
*type = (base_lo & 0xff);
-} /* End Function get_mtrr */
+} /* End Function intel_get_mtrr */
+
+static void cyrix_get_arr (unsigned int reg, unsigned long *base,
+ unsigned long *size, mtrr_type *type)
+{
+ unsigned long flags;
+ unsigned char arr, ccr3, rcr, shift;
+
+ arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
+
+ /* Save flags and disable interrupts */
+ __save_flags (flags); __cli ();
+ ccr3 = getCx86 (CX86_CCR3);
+ setCx86 (CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
+ ((unsigned char *) base)[3] = getCx86 (arr);
+ ((unsigned char *) base)[2] = getCx86 (arr+1);
+ ((unsigned char *) base)[1] = getCx86 (arr+2);
+ rcr = getCx86(CX86_RCR_BASE + reg);
+ setCx86 (CX86_CCR3, ccr3); /* disable MAPEN */
+
+ /* Enable interrupts if it was enabled previously */
+ __restore_flags (flags);
+
+ shift = ((unsigned char *) base)[1] & 0x0f;
+ *base &= 0xfffff000UL;
+
+ /* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7
+ * Note: shift==0xf means 4G, this is unsupported.
+ */
+ if (shift)
+ *size = (reg < 7 ? 0x800UL : 0x20000UL) << shift;
+ else
+ *size = 0;
+
+ /* Bit 0 is Cache Enable on ARR7, Cache Disable on ARR0-ARR6 */
+ if (reg < 7) {
+ switch (rcr) {
+ case 1: *type = MTRR_TYPE_UNCACHABLE; break;
+ case 8: *type = MTRR_TYPE_WRBACK; break;
+ case 9: *type = MTRR_TYPE_WRCOMB; break;
+ case 24:
+ default: *type = MTRR_TYPE_WRTHROUGH; break;
+ }
+ } else {
+ switch (rcr) {
+ case 0: *type = MTRR_TYPE_UNCACHABLE; break;
+ case 8: *type = MTRR_TYPE_WRCOMB; break;
+ case 9: *type = MTRR_TYPE_WRBACK; break;
+ case 25:
+ default: *type = MTRR_TYPE_WRTHROUGH; break;
+ }
+ }
+} /* End Function cyrix_get_arr */
-static void set_mtrr_up (unsigned int reg, unsigned long base,
- unsigned long size, mtrr_type type, int do_safe)
+static void amd_get_mtrr (unsigned int reg, unsigned long *base,
+ unsigned long *size, mtrr_type *type)
+{
+ unsigned long low, high;
+
+ rdmsr (0xC0000085, low, high);
+ /* Upper dword is region 1, lower is region 0 */
+ if (reg == 1) low = high;
+ /* The base masks off on the right alignment */
+ *base = low & 0xFFFE0000;
+ *type = 0;
+ if (low & 1) *type = MTRR_TYPE_UNCACHABLE;
+ if (low & 2) *type = MTRR_TYPE_WRCOMB;
+ if ( !(low & 3) )
+ {
+ *size = 0;
+ return;
+ }
+ /*
+ * This needs a little explaining. The size is stored as an
+ * inverted mask of bits of 128K granularity 15 bits long offset
+ * 2 bits
+ *
+ * So to get a size we do invert the mask and add 1 to the lowest
+ * mask bit (4 as its 2 bits in). This gives us a size we then shift
+ * to turn into 128K blocks
+ *
+ * eg 111 1111 1111 1100 is 512K
+ *
+ * invert 000 0000 0000 0011
+ * +1 000 0000 0000 0100
+ * *128K ...
+ */
+ low = (~low) & 0x1FFFC;
+ *size = (low + 4) << 15;
+ return;
+} /* End Function amd_get_mtrr */
+
+static void (*get_mtrr) (unsigned int reg, unsigned long *base,
+ unsigned long *size, mtrr_type *type) = NULL;
+
+static void intel_set_mtrr_up (unsigned int reg, unsigned long base,
+ unsigned long size, mtrr_type type, int do_safe)
/* [SUMMARY] Set variable MTRR register on the local CPU.
<reg> The register to set.
<base> The base address of the region.
@@ -376,6 +562,7 @@ static void set_mtrr_up (unsigned int reg, unsigned long base,
<type> The type of the region.
<do_safe> If TRUE, do the change safely. If FALSE, safety measures should
be done externally.
+ [RETURNS] Nothing.
*/
{
struct set_mtrr_context ctxt;
@@ -393,8 +580,92 @@ static void set_mtrr_up (unsigned int reg, unsigned long base,
wrmsr (MTRRphysMask_MSR (reg), ~(size - 1) | 0x800, 0);
}
if (do_safe) set_mtrr_done (&ctxt);
-} /* End Function set_mtrr_up */
+} /* End Function intel_set_mtrr_up */
+
+static void cyrix_set_arr_up (unsigned int reg, unsigned long base,
+ unsigned long size, mtrr_type type, int do_safe)
+{
+ struct set_mtrr_context ctxt;
+ unsigned char arr, arr_type, arr_size;
+
+ arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
+
+ /* count down from 32M (ARR0-ARR6) or from 2G (ARR7) */
+ size >>= (reg < 7 ? 12 : 18);
+ size &= 0x7fff; /* make sure arr_size <= 14 */
+ for(arr_size = 0; size; arr_size++, size >>= 1);
+
+ if (reg<7) {
+ switch (type) {
+ case MTRR_TYPE_UNCACHABLE: arr_type = 1; break;
+ case MTRR_TYPE_WRCOMB: arr_type = 9; break;
+ case MTRR_TYPE_WRTHROUGH: arr_type = 24; break;
+ default: arr_type = 8; break;
+ }
+ } else {
+ switch (type) {
+ case MTRR_TYPE_UNCACHABLE: arr_type = 0; break;
+ case MTRR_TYPE_WRCOMB: arr_type = 8; break;
+ case MTRR_TYPE_WRTHROUGH: arr_type = 25; break;
+ default: arr_type = 9; break;
+ }
+ }
+
+ if (do_safe) set_mtrr_prepare (&ctxt);
+ setCx86(arr, ((unsigned char *) &base)[3]);
+ setCx86(arr+1, ((unsigned char *) &base)[2]);
+ setCx86(arr+2, (((unsigned char *) &base)[1]) | arr_size);
+ setCx86(CX86_RCR_BASE + reg, arr_type);
+ if (do_safe) set_mtrr_done (&ctxt);
+} /* End Function cyrix_set_arr_up */
+
+static void amd_set_mtrr_up (unsigned int reg, unsigned long base,
+ unsigned long size, mtrr_type type, int do_safe)
+/* [SUMMARY] Set variable MTRR register on the local CPU.
+ <reg> The register to set.
+ <base> The base address of the region.
+ <size> The size of the region. If this is 0 the region is disabled.
+ <type> The type of the region.
+ <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
+ be done externally.
+ [RETURNS] Nothing.
+*/
+{
+ u32 low, high;
+ struct set_mtrr_context ctxt;
+
+ if (do_safe) set_mtrr_prepare (&ctxt);
+ /*
+ * Low is MTRR0 , High MTRR 1
+ */
+ rdmsr (0xC0000085, low, high);
+ /*
+ * Blank to disable
+ */
+ if (size == 0)
+ *(reg ? &high : &low) = 0;
+ else
+ /* Set the register to the base (already shifted for us), the
+ type (off by one) and an inverted bitmask of the size
+
+ The size is the only odd bit. We are fed say 512K
+ We invert this and we get 111 1111 1111 1011 but
+ if you subtract one and invert you get the desired
+ 111 1111 1111 1100 mask
+ */
+ *(reg ? &high : &low)=(((~(size-1))>>15)&0x0001FFFC)|base|(type+1);
+ /*
+ * The writeback rule is quite specific. See the manual. Its
+ * disable local interrupts, write back the cache, set the mtrr
+ */
+ __asm__ __volatile__ ("wbinvd" : : : "memory");
+ wrmsr (0xC0000085, low, high);
+ if (do_safe) set_mtrr_done (&ctxt);
+} /* End Function amd_set_mtrr_up */
+static void (*set_mtrr_up) (unsigned int reg, unsigned long base,
+ unsigned long size, mtrr_type type,
+ int do_safe) = NULL;
#ifdef __SMP__
@@ -407,7 +678,7 @@ struct mtrr_var_range
};
-/* Get the MSR pair relating to a var range. */
+/* Get the MSR pair relating to a var range */
__initfunc(static void get_mtrr_var_range (unsigned int index,
struct mtrr_var_range *vr))
{
@@ -416,8 +687,8 @@ __initfunc(static void get_mtrr_var_range (unsigned int index,
} /* End Function get_mtrr_var_range */
-/* Set the MSR pair relating to a var range. Returns TRUE if
- changes are made. */
+/* Set the MSR pair relating to a var range. Returns TRUE if
+ changes are made */
__initfunc(static int set_mtrr_var_range_testing (unsigned int index,
struct mtrr_var_range *vr))
{
@@ -441,8 +712,7 @@ __initfunc(static int set_mtrr_var_range_testing (unsigned int index,
}
return changed;
-}
-
+} /* End Function set_mtrr_var_range_testing */
__initfunc(static void get_fixed_ranges(mtrr_type *frs))
{
@@ -456,8 +726,7 @@ __initfunc(static void get_fixed_ranges(mtrr_type *frs))
for (i = 0; i < 8; i++)
rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i*2], p[7 + i*2]);
-}
-
+} /* End Function get_fixed_ranges */
__initfunc(static int set_fixed_ranges_testing(mtrr_type *frs))
{
@@ -487,10 +756,8 @@ __initfunc(static int set_fixed_ranges_testing(mtrr_type *frs))
changed = TRUE;
}
}
-
return changed;
-}
-
+} /* End Function set_fixed_ranges_testing */
struct mtrr_state
{
@@ -502,7 +769,7 @@ struct mtrr_state
};
-/* Grab all of the MTRR state for this CPU into *state. */
+/* Grab all of the MTRR state for this CPU into *state */
__initfunc(static void get_mtrr_state(struct mtrr_state *state))
{
unsigned int nvrs, i;
@@ -511,22 +778,22 @@ __initfunc(static void get_mtrr_state(struct mtrr_state *state))
nvrs = state->num_var_ranges = get_num_var_ranges();
vrs = state->var_ranges
- = kmalloc(nvrs * sizeof(struct mtrr_var_range), GFP_KERNEL);
+ = kmalloc (nvrs * sizeof (struct mtrr_var_range), GFP_KERNEL);
if (vrs == NULL)
nvrs = state->num_var_ranges = 0;
for (i = 0; i < nvrs; i++)
- get_mtrr_var_range(i, &vrs[i]);
+ get_mtrr_var_range (i, &vrs[i]);
- get_fixed_ranges(state->fixed_ranges);
+ get_fixed_ranges (state->fixed_ranges);
- rdmsr(MTRRdefType_MSR, lo, dummy);
+ rdmsr (MTRRdefType_MSR, lo, dummy);
state->def_type = (lo & 0xff);
state->enabled = (lo & 0xc00) >> 10;
} /* End Function get_mtrr_state */
-/* Free resources associated with a struct mtrr_state */
+/* Free resources associated with a struct mtrr_state */
__initfunc(static void finalize_mtrr_state(struct mtrr_state *state))
{
if (state->var_ranges) kfree (state->var_ranges);
@@ -546,14 +813,14 @@ __initfunc(static unsigned long set_mtrr_state (struct mtrr_state *state,
unsigned long change_mask = 0;
for (i = 0; i < state->num_var_ranges; i++)
- if (set_mtrr_var_range_testing(i, &state->var_ranges[i]))
+ if ( set_mtrr_var_range_testing (i, &state->var_ranges[i]) )
change_mask |= MTRR_CHANGE_MASK_VARIABLE;
- if (set_fixed_ranges_testing(state->fixed_ranges))
+ if ( set_fixed_ranges_testing(state->fixed_ranges) )
change_mask |= MTRR_CHANGE_MASK_FIXED;
- /* set_mtrr_restore restores the old value of MTRRdefType,
- so to set it we fiddle with the saved value. */
+ /* Set_mtrr_restore restores the old value of MTRRdefType,
+ so to set it we fiddle with the saved value */
if ((ctxt->deftype_lo & 0xff) != state->def_type
|| ((ctxt->deftype_lo & 0xc00) >> 10) != state->enabled)
{
@@ -566,76 +833,63 @@ __initfunc(static unsigned long set_mtrr_state (struct mtrr_state *state,
static atomic_t undone_count;
-static void (*handler_func) (struct set_mtrr_context *ctxt, void *info);
-static void *handler_info;
static volatile int wait_barrier_execute = FALSE;
static volatile int wait_barrier_cache_enable = FALSE;
-static void sync_handler (void)
+struct set_mtrr_data
+{
+ unsigned long smp_base;
+ unsigned long smp_size;
+ unsigned int smp_reg;
+ mtrr_type smp_type;
+};
+
+static void ipi_handler (void *info)
/* [SUMMARY] Synchronisation handler. Executed by "other" CPUs.
[RETURNS] Nothing.
*/
{
+ struct set_mtrr_data *data = info;
struct set_mtrr_context ctxt;
set_mtrr_prepare (&ctxt);
- /* Notify master CPU that I'm at the barrier and then wait */
+ /* Notify master that I've flushed and disabled my cache */
atomic_dec (&undone_count);
while (wait_barrier_execute) barrier ();
/* The master has cleared me to execute */
- (*handler_func) (&ctxt, handler_info);
+ (*set_mtrr_up) (data->smp_reg, data->smp_base, data->smp_size,
+ data->smp_type, FALSE);
/* Notify master CPU that I've executed the function */
atomic_dec (&undone_count);
/* Wait for master to clear me to enable cache and return */
while (wait_barrier_cache_enable) barrier ();
set_mtrr_done (&ctxt);
-} /* End Function sync_handler */
-
-static void do_all_cpus (void (*handler) (struct set_mtrr_context *ctxt,
- void *info),
- void *info, int local)
-/* [SUMMARY] Execute a function on all CPUs, with caches flushed and disabled.
- [PURPOSE] This function will synchronise all CPUs, flush and disable caches
- on all CPUs, then call a specified function. When the specified function
- finishes on all CPUs, caches are enabled on all CPUs.
- <handler> The function to execute.
- <info> An arbitrary information pointer which is passed to <<handler>>.
- <local> If TRUE <<handler>> is executed locally.
- [RETURNS] Nothing.
-*/
+} /* End Function ipi_handler */
+
+static void set_mtrr_smp (unsigned int reg, unsigned long base,
+ unsigned long size, mtrr_type type)
{
- unsigned long timeout;
+ struct set_mtrr_data data;
struct set_mtrr_context ctxt;
- mtrr_hook = sync_handler;
- handler_func = handler;
- handler_info = info;
+ data.smp_reg = reg;
+ data.smp_base = base;
+ data.smp_size = size;
+ data.smp_type = type;
wait_barrier_execute = TRUE;
wait_barrier_cache_enable = TRUE;
- /* Send a message to all other CPUs and wait for them to enter the
- barrier */
atomic_set (&undone_count, smp_num_cpus - 1);
- smp_send_mtrr();
- /* Wait for it to be done */
- timeout = jiffies + JIFFIE_TIMEOUT;
- while ( (atomic_read (&undone_count) > 0) &&
- time_before(jiffies, timeout) )
- barrier ();
- if (atomic_read (&undone_count) > 0)
- {
+ /* Flush and disable the local CPU's cache and start the ball rolling on
+ other CPUs */
+ set_mtrr_prepare (&ctxt);
+ if (smp_call_function (ipi_handler, &data, 1, 0) != 0)
panic ("mtrr: timed out waiting for other CPUs\n");
- }
- mtrr_hook = NULL;
- /* All other CPUs should be waiting for the barrier, with their caches
- already flushed and disabled. Prepare for function completion
- notification */
+ /* Wait for all other CPUs to flush and disable their caches */
+ while (atomic_read (&undone_count) > 0) barrier ();
+ /* Set up for completion wait and then release other CPUs to change MTRRs*/
atomic_set (&undone_count, smp_num_cpus - 1);
- /* Flush and disable the local CPU's cache and release the barier, which
- should cause the other CPUs to execute the function. Also execute it
- locally if required */
- set_mtrr_prepare (&ctxt);
wait_barrier_execute = FALSE;
- if (local) (*handler) (&ctxt, info);
+ (*set_mtrr_up) (reg, base, size, type, FALSE);
/* Now wait for other CPUs to complete the function */
while (atomic_read (&undone_count) > 0) barrier ();
/* Now all CPUs should have finished the function. Release the barrier to
@@ -643,41 +897,10 @@ static void do_all_cpus (void (*handler) (struct set_mtrr_context *ctxt,
then enable the local cache and return */
wait_barrier_cache_enable = FALSE;
set_mtrr_done (&ctxt);
- handler_func = NULL;
- handler_info = NULL;
-} /* End Function do_all_cpus */
-
-
-struct set_mtrr_data
-{
- unsigned long smp_base;
- unsigned long smp_size;
- unsigned int smp_reg;
- mtrr_type smp_type;
-};
-
-static void set_mtrr_handler (struct set_mtrr_context *ctxt, void *info)
-{
- struct set_mtrr_data *data = info;
-
- set_mtrr_up (data->smp_reg, data->smp_base, data->smp_size, data->smp_type,
- FALSE);
-} /* End Function set_mtrr_handler */
-
-static void set_mtrr_smp (unsigned int reg, unsigned long base,
- unsigned long size, mtrr_type type)
-{
- struct set_mtrr_data data;
-
- data.smp_reg = reg;
- data.smp_base = base;
- data.smp_size = size;
- data.smp_type = type;
- do_all_cpus (set_mtrr_handler, &data, TRUE);
} /* End Function set_mtrr_smp */
-/* Some BIOS's are fucked and don't set all MTRRs the same! */
+/* Some BIOS's are fucked and don't set all MTRRs the same! */
__initfunc(static void mtrr_state_warn (unsigned long mask))
{
if (!mask) return;
@@ -720,6 +943,58 @@ static void init_table (void)
#endif
} /* End Function init_table */
+static int generic_get_free_region (unsigned long base, unsigned long size)
+/* [SUMMARY] Get a free MTRR.
+ <base> The starting (base) address of the region.
+ <size> The size (in bytes) of the region.
+ [RETURNS] The index of the region on success, else -1 on error.
+*/
+{
+ int i, max;
+ mtrr_type ltype;
+ unsigned long lbase, lsize;
+
+ max = get_num_var_ranges ();
+ for (i = 0; i < max; ++i)
+ {
+ (*get_mtrr) (i, &lbase, &lsize, &ltype);
+ if (lsize < 1) return i;
+ }
+ return -ENOSPC;
+} /* End Function generic_get_free_region */
+
+static int cyrix_get_free_region (unsigned long base, unsigned long size)
+/* [SUMMARY] Get a free ARR.
+ <base> The starting (base) address of the region.
+ <size> The size (in bytes) of the region.
+ [RETURNS] The index of the region on success, else -1 on error.
+*/
+{
+ int i;
+ mtrr_type ltype;
+ unsigned long lbase, lsize;
+
+ /* If we are to set up a region >32M then look at ARR7 immediately */
+ if (size > 0x2000000UL) {
+ cyrix_get_arr (7, &lbase, &lsize, &ltype);
+ if (lsize < 1) return 7;
+ /* else try ARR0-ARR6 first */
+ } else {
+ for (i = 0; i < 7; i++)
+ {
+ cyrix_get_arr (i, &lbase, &lsize, &ltype);
+ if (lsize < 1) return i;
+ }
+ /* ARR0-ARR6 isn't free, try ARR7 but its size must be at least 256K */
+ cyrix_get_arr (i, &lbase, &lsize, &ltype);
+ if ((lsize < 1) && (size >= 0x40000)) return i;
+ }
+ return -ENOSPC;
+} /* End Function cyrix_get_free_region */
+
+static int (*get_free_region) (unsigned long base,
+ unsigned long size) = generic_get_free_region;
+
int mtrr_add (unsigned long base, unsigned long size, unsigned int type,
char increment)
/* [SUMMARY] Add an MTRR entry.
@@ -738,28 +1013,57 @@ int mtrr_add (unsigned long base, unsigned long size, unsigned int type,
unsigned long lbase, lsize, last;
if ( !(boot_cpu_data.x86_capability & X86_FEATURE_MTRR) ) return -ENODEV;
- if ( (base & 0xfff) || (size & 0xfff) )
+ switch (boot_cpu_data.x86_vendor)
{
- printk ("mtrr: size and base must be multiples of 4kB\n");
- printk ("mtrr: size: %lx base: %lx\n", size, base);
- return -EINVAL;
- }
- if (base + size < 0x100000)
- {
- printk ("mtrr: cannot set region below 1 MByte (0x%lx,0x%lx)\n",
- base, size);
- return -EINVAL;
- }
- /* Check upper bits of base and last are equal and lower bits are 0 for
- base and 1 for last */
- last = base + size - 1;
- for (lbase = base; !(lbase & 1) && (last & 1);
- lbase = lbase >> 1, last = last >> 1);
- if (lbase != last)
- {
- printk ("mtrr: base(0x%lx) is not aligned on a size(0x%lx) boundary\n",
- base, size);
+ case X86_VENDOR_INTEL:
+ /* For Intel PPro stepping <= 7, must be 4 MiB aligned */
+ if ( (boot_cpu_data.x86 == 6) && (boot_cpu_data.x86_model == 1) &&
+ (boot_cpu_data.x86_mask <= 7) && ( base & ( (1 << 22) - 1 ) ) )
+ {
+ printk ("mtrr: base(0x%lx) is not 4 MiB aligned\n", base);
+ return -EINVAL;
+ }
+ /* Fall through */
+ case X86_VENDOR_CYRIX:
+ if ( (base & 0xfff) || (size & 0xfff) )
+ {
+ printk ("mtrr: size and base must be multiples of 4 kiB\n");
+ printk ("mtrr: size: %lx base: %lx\n", size, base);
+ return -EINVAL;
+ }
+ if (base + size < 0x100000)
+ {
+ printk ("mtrr: cannot set region below 1 MiB (0x%lx,0x%lx)\n",
+ base, size);
+ return -EINVAL;
+ }
+ /* Check upper bits of base and last are equal and lower bits are 0
+ for base and 1 for last */
+ last = base + size - 1;
+ for (lbase = base; !(lbase & 1) && (last & 1);
+ lbase = lbase >> 1, last = last >> 1);
+ if (lbase != last)
+ {
+ printk ("mtrr: base(0x%lx) is not aligned on a size(0x%lx) boundary\n",
+ base, size);
+ return -EINVAL;
+ }
+ break;
+ case X86_VENDOR_AMD:
+ /* Apply the K6 block alignment and size rules
+ In order
+ o Uncached or gathering only
+ o 128K or bigger block
+ o Power of 2 block
+ o base suitably aligned to the power
+ */
+ if (type > MTRR_TYPE_WRCOMB || size < (1 << 17) ||
+ (size & ~(size-1))-size || (base & (size-1)))
+ return -EINVAL;
+ break;
+ default:
return -EINVAL;
+ /*break;*/
}
if (type >= MTRR_NUM_TYPES)
{
@@ -775,10 +1079,10 @@ int mtrr_add (unsigned long base, unsigned long size, unsigned int type,
increment = increment ? 1 : 0;
max = get_num_var_ranges ();
/* Search for existing MTRR */
- spin_lock (&main_lock);
+ spin_lock_reschedule (&main_lock);
for (i = 0; i < max; ++i)
{
- get_mtrr (i, &lbase, &lsize, &ltype);
+ (*get_mtrr) (i, &lbase, &lsize, &ltype);
if (base >= lbase + lsize) continue;
if ( (base < lbase) && (base + size <= lbase) ) continue;
/* At this point we know there is some kind of overlap/enclosure */
@@ -804,19 +1108,18 @@ int mtrr_add (unsigned long base, unsigned long size, unsigned int type,
return i;
}
/* Search for an empty MTRR */
- for (i = 0; i < max; ++i)
+ i = (*get_free_region) (base, size);
+ if (i < 0)
{
- get_mtrr (i, &lbase, &lsize, &ltype);
- if (lsize > 0) continue;
- set_mtrr (i, base, size, type);
- usage_table[i] = 1;
- compute_ascii ();
spin_unlock (&main_lock);
+ printk ("mtrr: no more MTRRs available\n");
return i;
}
+ set_mtrr (i, base, size, type);
+ usage_table[i] = 1;
+ compute_ascii ();
spin_unlock (&main_lock);
- printk ("mtrr: no more MTRRs available\n");
- return -ENOSPC;
+ return i;
} /* End Function mtrr_add */
int mtrr_del (int reg, unsigned long base, unsigned long size)
@@ -836,13 +1139,13 @@ int mtrr_del (int reg, unsigned long base, unsigned long size)
if ( !(boot_cpu_data.x86_capability & X86_FEATURE_MTRR) ) return -ENODEV;
max = get_num_var_ranges ();
- spin_lock (&main_lock);
+ spin_lock_reschedule (&main_lock);
if (reg < 0)
{
/* Search for existing MTRR */
for (i = 0; i < max; ++i)
{
- get_mtrr (i, &lbase, &lsize, &ltype);
+ (*get_mtrr) (i, &lbase, &lsize, &ltype);
if ( (lbase == base) && (lsize == size) )
{
reg = i;
@@ -862,7 +1165,7 @@ int mtrr_del (int reg, unsigned long base, unsigned long size)
printk ("mtrr: register: %d too big\n", reg);
return -EINVAL;
}
- get_mtrr (reg, &lbase, &lsize, &ltype);
+ (*get_mtrr) (reg, &lbase, &lsize, &ltype);
if (lsize < 1)
{
spin_unlock (&main_lock);
@@ -913,7 +1216,9 @@ static int mtrr_file_del (unsigned long base, unsigned long size,
reg = mtrr_del (-1, base, size);
if (reg < 0) return reg;
- if (fcount != NULL) --fcount[reg];
+ if (fcount == NULL) return reg;
+ if (fcount[reg] < 1) return -EINVAL;
+ --fcount[reg];
return reg;
} /* End Function mtrr_file_del */
@@ -1019,11 +1324,18 @@ static int mtrr_ioctl (struct inode *inode, struct file *file,
err = mtrr_file_del (sentry.base, sentry.size, file);
if (err < 0) return err;
break;
+ case MTRRIOC_KILL_ENTRY:
+ if ( !suser () ) return -EPERM;
+ if ( copy_from_user (&sentry, (void *) arg, sizeof sentry) )
+ return -EFAULT;
+ err = mtrr_del (-1, sentry.base, sentry.size);
+ if (err < 0) return err;
+ break;
case MTRRIOC_GET_ENTRY:
if ( copy_from_user (&gentry, (void *) arg, sizeof gentry) )
return -EFAULT;
if ( gentry.regnum >= get_num_var_ranges () ) return -EINVAL;
- get_mtrr (gentry.regnum, &gentry.base, &gentry.size, &type);
+ (*get_mtrr) (gentry.regnum, &gentry.base, &gentry.size, &type);
gentry.type = type;
if ( copy_to_user ( (void *) arg, &gentry, sizeof gentry) )
return -EFAULT;
@@ -1115,7 +1427,7 @@ static void compute_ascii (void)
max = get_num_var_ranges ();
for (i = 0; i < max; i++)
{
- get_mtrr (i, &base, &size, &type);
+ (*get_mtrr) (i, &base, &size, &type);
if (size < 1) usage_table[i] = 0;
else
{
@@ -1148,23 +1460,165 @@ EXPORT_SYMBOL(mtrr_del);
#ifdef __SMP__
+typedef struct {
+ unsigned long base;
+ unsigned long size;
+ mtrr_type type;
+} arr_state_t;
+
+arr_state_t arr_state[8] __initdata = {
+ {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL},
+ {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL}, {0UL,0UL,0UL}
+};
+
+unsigned char ccr_state[7] __initdata = { 0, 0, 0, 0, 0, 0, 0 };
+
+__initfunc(static void cyrix_arr_init_secondary(void))
+{
+ struct set_mtrr_context ctxt;
+ int i;
+
+ set_mtrr_prepare (&ctxt); /* flush cache and enable MAPEN */
+
+ /* the CCRs are not contiguous */
+ for(i=0; i<4; i++) setCx86(CX86_CCR0 + i, ccr_state[i]);
+ for( ; i<7; i++) setCx86(CX86_CCR4 + i, ccr_state[i]);
+ for(i=0; i<8; i++)
+ cyrix_set_arr_up(i,
+ arr_state[i].base, arr_state[i].size, arr_state[i].type, FALSE);
+
+ set_mtrr_done (&ctxt); /* flush cache and disable MAPEN */
+} /* End Function cyrix_arr_init_secondary */
+
+#endif
+
+/*
+ * On Cyrix 6x86(MX) and M II the ARR3 is special: it has connection
+ * with the SMM (System Management Mode) mode. So we need the following:
+ * Check whether SMI_LOCK (CCR3 bit 0) is set
+ * if it is set, write a warning message: ARR3 cannot be changed!
+ * (it cannot be changed until the next processor reset)
+ * if it is reset, then we can change it, set all the needed bits:
+ * - disable access to SMM memory through ARR3 range (CCR1 bit 7 reset)
+ * - disable access to SMM memory (CCR1 bit 2 reset)
+ * - disable SMM mode (CCR1 bit 1 reset)
+ * - disable write protection of ARR3 (CCR6 bit 1 reset)
+ * - (maybe) disable ARR3
+ * Just to be sure, we enable ARR usage by the processor (CCR5 bit 5 set)
+ */
+__initfunc(static void cyrix_arr_init(void))
+{
+ struct set_mtrr_context ctxt;
+ unsigned char ccr[7];
+ int ccrc[7] = { 0, 0, 0, 0, 0, 0, 0 };
+#ifdef __SMP__
+ int i;
+#endif
+
+ set_mtrr_prepare (&ctxt); /* flush cache and enable MAPEN */
+
+ /* Save all CCRs locally */
+ ccr[0] = getCx86 (CX86_CCR0);
+ ccr[1] = getCx86 (CX86_CCR1);
+ ccr[2] = getCx86 (CX86_CCR2);
+ ccr[3] = ctxt.ccr3;
+ ccr[4] = getCx86 (CX86_CCR4);
+ ccr[5] = getCx86 (CX86_CCR5);
+ ccr[6] = getCx86 (CX86_CCR6);
+
+ if (ccr[3] & 1)
+ ccrc[3] = 1;
+ else {
+ /* Disable SMM mode (bit 1), access to SMM memory (bit 2) and
+ * access to SMM memory through ARR3 (bit 7).
+ */
+/*
+ if (ccr[1] & 0x80) { ccr[1] &= 0x7f; ccrc[1] |= 0x80; }
+ if (ccr[1] & 0x04) { ccr[1] &= 0xfb; ccrc[1] |= 0x04; }
+ if (ccr[1] & 0x02) { ccr[1] &= 0xfd; ccrc[1] |= 0x02; }
+*/
+ if (ccr[6] & 0x02) {
+ ccr[6] &= 0xfd; ccrc[6] = 1; /* Disable write protection of ARR3. */
+ setCx86 (CX86_CCR6, ccr[6]);
+ }
+ /* Disable ARR3. */
+ /* cyrix_set_arr_up (3, 0, 0, 0, FALSE); */
+ }
+ /* If we changed CCR1 in memory, change it in the processor, too. */
+ if (ccrc[1]) setCx86 (CX86_CCR1, ccr[1]);
+
+ /* Enable ARR usage by the processor */
+ if (!(ccr[5] & 0x20)) {
+ ccr[5] |= 0x20; ccrc[5] = 1;
+ setCx86 (CX86_CCR5, ccr[5]);
+ }
+
+#ifdef __SMP__
+ for(i=0; i<7; i++) ccr_state[i] = ccr[i];
+ for(i=0; i<8; i++)
+ cyrix_get_arr(i,
+ &arr_state[i].base, &arr_state[i].size, &arr_state[i].type);
+#endif
+
+ set_mtrr_done (&ctxt); /* flush cache and disable MAPEN */
+
+ if ( ccrc[5] ) printk ("mtrr: ARR usage was not enabled, enabled manually\n");
+ if ( ccrc[3] ) printk ("mtrr: ARR3 cannot be changed\n");
+/*
+ if ( ccrc[1] & 0x80) printk ("mtrr: SMM memory access through ARR3 disabled\n");
+ if ( ccrc[1] & 0x04) printk ("mtrr: SMM memory access disabled\n");
+ if ( ccrc[1] & 0x02) printk ("mtrr: SMM mode disabled\n");
+*/
+ if ( ccrc[6] ) printk ("mtrr: ARR3 was write protected, unprotected\n");
+} /* End Function cyrix_arr_init */
+
+__initfunc(static void mtrr_setup (void))
+{
+ printk ("mtrr: v%s Richard Gooch (rgooch@atnf.csiro.au)\n", MTRR_VERSION);
+ switch (boot_cpu_data.x86_vendor)
+ {
+ case X86_VENDOR_INTEL:
+ get_mtrr = intel_get_mtrr;
+ set_mtrr_up = intel_set_mtrr_up;
+ break;
+ case X86_VENDOR_CYRIX:
+ printk ("mtrr: Using Cyrix style ARRs\n");
+ get_mtrr = cyrix_get_arr;
+ set_mtrr_up = cyrix_set_arr_up;
+ get_free_region = cyrix_get_free_region;
+ break;
+ case X86_VENDOR_AMD:
+ get_mtrr = amd_get_mtrr;
+ set_mtrr_up = amd_set_mtrr_up;
+ break;
+ }
+} /* End Function mtrr_setup */
+
+#ifdef __SMP__
+
static volatile unsigned long smp_changes_mask __initdata = 0;
static struct mtrr_state smp_mtrr_state __initdata = {0, 0};
__initfunc(void mtrr_init_boot_cpu (void))
{
if ( !(boot_cpu_data.x86_capability & X86_FEATURE_MTRR) ) return;
- printk("mtrr: v%s Richard Gooch (rgooch@atnf.csiro.au)\n", MTRR_VERSION);
-
- get_mtrr_state (&smp_mtrr_state);
+ mtrr_setup ();
+ switch (boot_cpu_data.x86_vendor)
+ {
+ case X86_VENDOR_INTEL:
+ get_mtrr_state (&smp_mtrr_state);
+ break;
+ case X86_VENDOR_CYRIX:
+ cyrix_arr_init ();
+ break;
+ }
} /* End Function mtrr_init_boot_cpu */
-__initfunc(void mtrr_init_secondary_cpu (void))
+__initfunc(static void intel_mtrr_init_secondary_cpu (void))
{
unsigned long mask, count;
struct set_mtrr_context ctxt;
- if ( !(boot_cpu_data.x86_capability & X86_FEATURE_MTRR) ) return;
/* Note that this is not ideal, since the cache is only flushed/disabled
for this CPU while the MTRRs are changed, but changing this requires
more invasive changes to the way the kernel boots */
@@ -1177,21 +1631,52 @@ __initfunc(void mtrr_init_secondary_cpu (void))
if (mask & 0x01) set_bit (count, &smp_changes_mask);
mask >>= 1;
}
-} /* End Function mtrr_init_secondary_cpu */
+} /* End Function intel_mtrr_init_secondary_cpu */
+__initfunc(void mtrr_init_secondary_cpu (void))
+{
+ if ( !(boot_cpu_data.x86_capability & X86_FEATURE_MTRR) ) return;
+ switch (boot_cpu_data.x86_vendor)
+ {
+ case X86_VENDOR_INTEL:
+ intel_mtrr_init_secondary_cpu ();
+ break;
+ case X86_VENDOR_CYRIX:
+ /* This is _completely theoretical_!
+ * I assume here that one day Cyrix will support Intel APIC.
+ * In reality on non-Intel CPUs we won't even get to this routine.
+ * Hopefully no one will plug two Cyrix processors in a dual P5 board.
+ * :-)
+ */
+ cyrix_arr_init_secondary ();
+ break;
+ default:
+ printk ("mtrr: SMP support incomplete for this vendor\n");
+ break;
+ }
+} /* End Function mtrr_init_secondary_cpu */
#endif /* __SMP__ */
__initfunc(int mtrr_init(void))
{
if ( !(boot_cpu_data.x86_capability & X86_FEATURE_MTRR) ) return 0;
-# ifndef __SMP__
- printk("mtrr: v%s Richard Gooch (rgooch@atnf.csiro.au)\n", MTRR_VERSION);
-# endif
-
# ifdef __SMP__
- finalize_mtrr_state (&smp_mtrr_state);
- mtrr_state_warn (smp_changes_mask);
-# endif /* __SMP__ */
+ switch (boot_cpu_data.x86_vendor)
+ {
+ case X86_VENDOR_INTEL:
+ finalize_mtrr_state (&smp_mtrr_state);
+ mtrr_state_warn (smp_changes_mask);
+ break;
+ }
+# else /* __SMP__ */
+ mtrr_setup ();
+ switch (boot_cpu_data.x86_vendor)
+ {
+ case X86_VENDOR_CYRIX:
+ cyrix_arr_init ();
+ break;
+ }
+# endif /* !__SMP__ */
# ifdef CONFIG_PROC_FS
proc_register (&proc_root, &proc_root_mtrr);
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 00f7e0ba2..ad745f58a 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -111,6 +111,8 @@ static int cpu_idle(void *unused)
/* endless idle loop with no priority at all */
current->priority = 0;
current->counter = -100;
+ init_idle();
+
for (;;) {
if (work)
start_idle = jiffies;
@@ -139,6 +141,8 @@ int cpu_idle(void *unused)
/* endless idle loop with no priority at all */
current->priority = 0;
current->counter = -100;
+ init_idle();
+
while(1) {
if (current_cpu_data.hlt_works_ok && !hlt_counter &&
!current->need_resched)
@@ -316,7 +320,7 @@ void machine_restart(char * __unused)
/* Make sure the first page is mapped to the start of physical memory.
It is normally not mapped, to trap kernel NULL pointer dereferences. */
- pg0[0] = 7;
+ pg0[0] = _PAGE_RW | _PAGE_PRESENT;
/*
* Use `swapper_pg_dir' as our page directory. We bother with
diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c
index b0eca4345..9f5ce58f1 100644
--- a/arch/i386/kernel/ptrace.c
+++ b/arch/i386/kernel/ptrace.c
@@ -354,6 +354,7 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
{
struct task_struct *child;
struct user * dummy = NULL;
+ unsigned long flags;
int i, ret;
lock_kernel();
@@ -385,21 +386,22 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
(current->uid != child->uid) ||
(current->gid != child->egid) ||
(current->gid != child->sgid) ||
+ (!cap_issubset(child->cap_permitted, current->cap_permitted)) ||
(current->gid != child->gid)) && !capable(CAP_SYS_PTRACE))
goto out;
/* the same process cannot be attached many times */
if (child->flags & PF_PTRACED)
goto out;
child->flags |= PF_PTRACED;
- if (child->p_pptr != current) {
- unsigned long flags;
- write_lock_irqsave(&tasklist_lock, flags);
+ write_lock_irqsave(&tasklist_lock, flags);
+ if (child->p_pptr != current) {
REMOVE_LINKS(child);
child->p_pptr = current;
SET_LINKS(child);
- write_unlock_irqrestore(&tasklist_lock, flags);
}
+ write_unlock_irqrestore(&tasklist_lock, flags);
+
send_sig(SIGSTOP, child, 1);
ret = 0;
goto out;
@@ -559,7 +561,6 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
}
case PTRACE_DETACH: { /* detach a process that was attached. */
- unsigned long flags;
long tmp;
ret = -EIO;
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index c3f34270a..af6df1065 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -5,6 +5,10 @@
*
* Enhanced CPU type detection by Mike Jagdis, Patrick St. Jean
* and Martin Mares, November 1997.
+ *
+ * Force Cyrix 6x86(MX) and M II processors to report MTRR capability
+ * and fix against Cyrix "coma bug" by
+ * Zoltan Boszormenyi <zboszor@mol.hu> February 1999.
*/
/*
@@ -39,6 +43,7 @@
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/cobalt.h>
+#include <asm/msr.h>
/*
* Machine setup..
@@ -57,6 +62,7 @@ int MCA_bus = 0;
unsigned int machine_id = 0;
unsigned int machine_submodel_id = 0;
unsigned int BIOS_revision = 0;
+unsigned int mca_pentium_flag = 0;
/*
* Setup options
@@ -244,11 +250,6 @@ __initfunc(void setup_arch(char **cmdline_p,
unsigned long memory_start, memory_end;
char c = ' ', *to = command_line, *from = COMMAND_LINE;
int len = 0;
- static unsigned char smptrap=0;
-
- if (smptrap)
- return;
- smptrap=1;
#ifdef CONFIG_VISWS
visws_get_board_type_and_rev();
@@ -381,7 +382,7 @@ __initfunc(void setup_arch(char **cmdline_p,
}
-__initfunc(static int amd_model(struct cpuinfo_x86 *c))
+__initfunc(static int get_model_name(struct cpuinfo_x86 *c))
{
unsigned int n, dummy, *v;
@@ -398,9 +399,87 @@ __initfunc(static int amd_model(struct cpuinfo_x86 *c))
cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
c->x86_model_id[48] = 0;
+ /* Set MTRR capability flag if appropriate */
+ if(boot_cpu_data.x86 !=5)
+ return 1;
+ if((boot_cpu_data.x86_model == 9) ||
+ ((boot_cpu_data.x86_model == 8) &&
+ (boot_cpu_data.x86_mask >= 8)))
+ c->x86_capability |= X86_FEATURE_MTRR;
+
return 1;
}
+__initfunc(static int amd_model(struct cpuinfo_x86 *c))
+{
+ u32 l, h;
+ unsigned long flags;
+ int mbytes = max_mapnr >> (20-PAGE_SHIFT);
+
+ int r=get_model_name(c);
+
+ /*
+ * Now do the cache operations.
+ */
+
+ switch(c->x86)
+ {
+ case 5:
+ if( c->x86_model < 6 )
+ {
+ /* Anyone with a K5 want to fill this in */
+ break;
+ }
+
+ /* K6 with old style WHCR */
+ if( c->x86_model < 8 ||
+ (c->x86_model== 8 && c->x86_mask < 8))
+ {
+ /* We can only write allocate on the low 508Mb */
+ if(mbytes>508)
+ mbytes=508;
+
+ rdmsr(0xC0000082, l, h);
+ if((l&0x0000FFFF)==0)
+ {
+ l=(1<<0)|(mbytes/4);
+ save_flags(flags);
+ __cli();
+ __asm__ __volatile__ ("wbinvd": : :"memory");
+ wrmsr(0xC0000082, l, h);
+ restore_flags(flags);
+ printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
+ mbytes);
+
+ }
+ break;
+ }
+ if (c->x86_model == 8 || c->x86_model == 9)
+ {
+ /* The more serious chips .. */
+
+ if(mbytes>4092)
+ mbytes=4092;
+ rdmsr(0xC0000082, l, h);
+ if((l&0xFFFF0000)==0)
+ {
+ l=((mbytes>>2)<<22)|(1<<16);
+ save_flags(flags);
+ __cli();
+ __asm__ __volatile__ ("wbinvd": : :"memory");
+ wrmsr(0xC0000082, l, h);
+ restore_flags(flags);
+ printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
+ mbytes);
+ }
+ break;
+ }
+ break;
+ }
+ return r;
+}
+
+
/*
* Read Cyrix DEVID registers (DIR) to get more detailed info. about the CPU
*/
@@ -507,6 +586,10 @@ __initfunc(static void cyrix_model(struct cpuinfo_x86 *c))
(c->x86_model)++;
} else /* 686 */
p = Cx86_cb+1;
+ /* Emulate MTRRs using Cyrix's ARRs. */
+ c->x86_capability |= X86_FEATURE_MTRR;
+ /* 6x86's contain this bug */
+ c->coma_bug = 1;
break;
case 4: /* MediaGX/GXm */
@@ -517,7 +600,7 @@ __initfunc(static void cyrix_model(struct cpuinfo_x86 *c))
/* GXm supports extended cpuid levels 'ala' AMD */
if (c->cpuid_level == 2) {
- amd_model(c); /* get CPU marketing name */
+ get_model_name(c); /* get CPU marketing name */
c->x86_capability&=~X86_FEATURE_TSC;
return;
}
@@ -531,11 +614,14 @@ __initfunc(static void cyrix_model(struct cpuinfo_x86 *c))
case 5: /* 6x86MX/M II */
if (dir1 > 7) dir0_msn++; /* M II */
+ else c->coma_bug = 1; /* 6x86MX, it has the bug. */
tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0;
Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7];
p = Cx86_cb+tmp;
if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20))
(c->x86_model)++;
+ /* Emulate MTRRs using Cyrix's ARRs. */
+ c->x86_capability |= X86_FEATURE_MTRR;
break;
case 0xf: /* Cyrix 486 without DEVID registers */
@@ -642,6 +728,20 @@ __initfunc(void identify_cpu(struct cpuinfo_x86 *c))
if (c->x86_vendor == X86_VENDOR_AMD && amd_model(c))
return;
+
+ if (c->cpuid_level > 0 && c->x86_vendor == X86_VENDOR_INTEL)
+ {
+ if(c->x86_capability&(1<<18))
+ {
+ /* Disable processor serial number on Intel Pentium III
+ from code by Phil Karn */
+ unsigned long lo,hi;
+ rdmsr(0x119,lo,hi);
+ lo |= 0x200000;
+ wrmsr(0x119,lo,hi);
+ printk(KERN_INFO "Pentium-III serial number disabled.\n");
+ }
+ }
for (i = 0; i < sizeof(cpu_models)/sizeof(struct cpu_model_info); i++) {
if (c->cpuid_level > 1) {
@@ -726,15 +826,6 @@ __initfunc(void dodgy_tsc(void))
}
-#define rdmsr(msr,val1,val2) \
- __asm__ __volatile__("rdmsr" \
- : "=a" (val1), "=d" (val2) \
- : "c" (msr))
-
-#define wrmsr(msr,val1,val2) \
- __asm__ __volatile__("wrmsr" \
- : /* no outputs */ \
- : "c" (msr), "a" (val1), "d" (val2))
static char *cpu_vendor_names[] __initdata = {
"Intel", "Cyrix", "AMD", "UMC", "NexGen", "Centaur" };
@@ -784,9 +875,9 @@ int get_cpuinfo(char * buffer)
int sep_bug;
static char *x86_cap_flags[] = {
"fpu", "vme", "de", "pse", "tsc", "msr", "6", "mce",
- "cx8", "9", "10", "sep", "12", "pge", "14", "cmov",
- "16", "17", "18", "19", "20", "21", "22", "mmx",
- "24", "25", "26", "27", "28", "29", "30", "31"
+ "cx8", "9", "10", "sep", "mtrr", "pge", "14", "cmov",
+ "16", "17", "psn", "19", "20", "21", "22", "mmx",
+ "24", "kni", "26", "27", "28", "29", "30", "31"
};
struct cpuinfo_x86 *c = cpu_data;
int i, n;
@@ -807,7 +898,7 @@ int get_cpuinfo(char * buffer)
c->x86_model,
c->x86_model_id[0] ? c->x86_model_id : "unknown");
- if (c->x86_mask)
+ if (c->x86_mask || c->cpuid_level >= 0)
p += sprintf(p, "stepping\t: %d\n", c->x86_mask);
else
p += sprintf(p, "stepping\t: unknown\n");
@@ -832,10 +923,10 @@ int get_cpuinfo(char * buffer)
} else if (c->x86_vendor == X86_VENDOR_INTEL) {
x86_cap_flags[6] = "pae";
x86_cap_flags[9] = "apic";
- x86_cap_flags[12] = "mtrr";
x86_cap_flags[14] = "mca";
x86_cap_flags[16] = "pat";
x86_cap_flags[17] = "pse36";
+ x86_cap_flags[18] = "psn";
x86_cap_flags[24] = "osfxsr";
}
@@ -850,6 +941,7 @@ int get_cpuinfo(char * buffer)
"hlt_bug\t\t: %s\n"
"sep_bug\t\t: %s\n"
"f00f_bug\t: %s\n"
+ "coma_bug\t: %s\n"
"fpu\t\t: %s\n"
"fpu_exception\t: %s\n"
"cpuid level\t: %d\n"
@@ -859,6 +951,7 @@ int get_cpuinfo(char * buffer)
c->hlt_works_ok ? "no" : "yes",
sep_bug ? "yes" : "no",
c->f00f_bug ? "yes" : "no",
+ c->coma_bug ? "yes" : "no",
c->hard_math ? "yes" : "no",
(c->hard_math && ignore_irq13) ? "yes" : "no",
c->cpuid_level,
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c
index 2960d521c..4d13635ae 100644
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -29,6 +29,7 @@
* from Jose Renau
* Alan Cox : Added EBDA scanning
* Ingo Molnar : various cleanups and rewrites
+ * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
*/
#include <linux/config.h>
@@ -39,10 +40,12 @@
#include <linux/smp_lock.h>
#include <linux/init.h>
#include <asm/mtrr.h>
+#include <asm/msr.h>
#include "irq.h"
-extern unsigned long start_kernel, _etext;
+#define JIFFIE_TIMEOUT 100
+
extern void update_one_process( struct task_struct *p,
unsigned long ticks, unsigned long user,
unsigned long system, int cpu);
@@ -146,6 +149,8 @@ int skip_ioapic_setup = 0; /* 1 if "noapic" boot option passed */
*/
#define APIC_DEFAULT_PHYS_BASE 0xfee00000
+#define CLEAR_TSC wrmsr(0x10, 0x00001000, 0x00001000)
+
/*
* Setup routine for controlling SMP activation
*
@@ -308,8 +313,17 @@ static int __init smp_read_mpc(struct mp_config_table *mpc)
printk("Processor #%d unused. (Max %d processors).\n",m->mpc_apicid, NR_CPUS);
else
{
+ int ver = m->mpc_apicver;
+
cpu_present_map|=(1<<m->mpc_apicid);
- apic_version[m->mpc_apicid]=m->mpc_apicver;
+ /*
+ * Validate version
+ */
+ if (ver == 0x0) {
+ printk("BIOS bug, APIC version is 0 for CPU#%d! fixing up to 0x10. (tell your hw vendor)\n", m->mpc_apicid);
+ ver = 0x10;
+ }
+ apic_version[m->mpc_apicid] = ver;
}
}
mpt+=sizeof(*m);
@@ -325,11 +339,13 @@ static int __init smp_read_mpc(struct mp_config_table *mpc)
SMP_PRINTK(("Bus #%d is %s\n",
m->mpc_busid,
str));
- if ((strncmp(m->mpc_bustype,"ISA",3) == 0) ||
- (strncmp(m->mpc_bustype,"EISA",4) == 0))
+ if (strncmp(m->mpc_bustype,"ISA",3) == 0)
mp_bus_id_to_type[m->mpc_busid] =
MP_BUS_ISA;
else
+ if (strncmp(m->mpc_bustype,"EISA",4) == 0)
+ mp_bus_id_to_type[m->mpc_busid] =
+ MP_BUS_EISA;
if (strncmp(m->mpc_bustype,"PCI",3) == 0) {
mp_bus_id_to_type[m->mpc_busid] =
MP_BUS_PCI;
@@ -454,7 +470,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
*/
cfg=pg0[0];
- pg0[0] = (mp_lapic_addr | 7);
+ pg0[0] = (mp_lapic_addr | _PAGE_RW | _PAGE_PRESENT);
local_flush_tlb();
boot_cpu_id = GET_APIC_ID(*((volatile unsigned long *) APIC_ID));
@@ -710,24 +726,19 @@ void __init enable_local_APIC(void)
value |= 0xff; /* Set spurious IRQ vector to 0xff */
apic_write(APIC_SPIV,value);
- value = apic_read(APIC_TASKPRI);
- value &= ~APIC_TPRI_MASK; /* Set Task Priority to 'accept all' */
- apic_write(APIC_TASKPRI,value);
-
/*
- * Set arbitrarion priority to 0
+ * Set Task Priority to 'accept all'
*/
- value = apic_read(APIC_ARBPRI);
- value &= ~APIC_ARBPRI_MASK;
- apic_write(APIC_ARBPRI, value);
+ value = apic_read(APIC_TASKPRI);
+ value &= ~APIC_TPRI_MASK;
+ apic_write(APIC_TASKPRI,value);
/*
- * Set the logical destination ID to 'all', just to be safe.
+ * Clear the logical destination ID, just to be safe.
* also, put the APIC into flat delivery mode.
*/
value = apic_read(APIC_LDR);
value &= ~APIC_LDR_MASK;
- value |= SET_APIC_LOGICAL_ID(0xff);
apic_write(APIC_LDR,value);
value = apic_read(APIC_DFR);
@@ -735,8 +746,6 @@ void __init enable_local_APIC(void)
apic_write(APIC_DFR, value);
udelay(100); /* B safe */
- ack_APIC_irq();
- udelay(100);
}
unsigned long __init init_smp_mappings(unsigned long memory_start)
@@ -883,6 +892,7 @@ int __init start_secondary(void *unused)
* Everything has been set up for the secondary
* CPUs - they just need to reload everything
* from the task structure
+ * This function must not return.
*/
void __init initialize_secondary(void)
{
@@ -924,7 +934,6 @@ static void __init do_boot_cpu(int i)
/*
* We need an idle process for each processor.
*/
-
kernel_thread(start_secondary, NULL, CLONE_PID);
cpucount++;
@@ -935,6 +944,8 @@ static void __init do_boot_cpu(int i)
idle->processor = i;
__cpu_logical_map[cpucount] = i;
cpu_number_map[i] = cpucount;
+ idle->has_cpu = 1; /* we schedule the first task manually */
+ idle->tss.eip = (unsigned long) start_secondary;
/* start_eip had better be page-aligned! */
start_eip = setup_trampoline();
@@ -1167,6 +1178,7 @@ void __init smp_boot_cpus(void)
/* Must be done before other processors booted */
mtrr_init_boot_cpu ();
#endif
+ init_idle();
/*
* Initialize the logical to physical CPU number mapping
* and the per-CPU profiling counter/multiplier
@@ -1316,7 +1328,7 @@ void __init smp_boot_cpus(void)
* Install writable page 0 entry.
*/
cfg = pg0[0];
- pg0[0] = 3; /* writeable, present, addr 0 */
+ pg0[0] = _PAGE_RW | _PAGE_PRESENT; /* writeable, present, addr 0 */
local_flush_tlb();
/*
@@ -1641,15 +1653,84 @@ void smp_send_stop(void)
send_IPI_allbutself(STOP_CPU_VECTOR);
}
+/* Structure and data for smp_call_function(). This is designed to minimise
+ * static memory requirements. It also looks cleaner.
+ */
+struct smp_call_function_struct {
+ void (*func) (void *info);
+ void *info;
+ atomic_t unstarted_count;
+ atomic_t unfinished_count;
+ int wait;
+};
+static volatile struct smp_call_function_struct *smp_call_function_data = NULL;
+
/*
- * this function sends an 'reload MTRR state' IPI to all other CPUs
- * in the system. it goes straight through, completion processing
- * is done on the mttr.c level.
+ * this function sends a 'generic call function' IPI to all other CPUs
+ * in the system.
*/
-void smp_send_mtrr(void)
+int smp_call_function (void (*func) (void *info), void *info, int retry,
+ int wait)
+/* [SUMMARY] Run a function on all other CPUs.
+ <func> The function to run. This must be fast and non-blocking.
+ <info> An arbitrary pointer to pass to the function.
+ <retry> If true, keep retrying until ready.
+ <wait> If true, wait until function has completed on other CPUs.
+ [RETURNS] 0 on success, else a negative status code. Does not return until
+ remote CPUs are nearly ready to execute <<func>> or are or have executed.
+*/
{
- send_IPI_allbutself(MTRR_CHANGE_VECTOR);
+ unsigned long timeout;
+ struct smp_call_function_struct data;
+ static spinlock_t lock = SPIN_LOCK_UNLOCKED;
+
+ if (retry) {
+ while (1) {
+ if (smp_call_function_data) {
+ schedule (); /* Give a mate a go */
+ continue;
+ }
+ spin_lock (&lock);
+ if (smp_call_function_data) {
+ spin_unlock (&lock); /* Bad luck */
+ continue;
+ }
+ /* Mine, all mine! */
+ break;
+ }
+ }
+ else {
+ if (smp_call_function_data) return -EBUSY;
+ spin_lock (&lock);
+ if (smp_call_function_data) {
+ spin_unlock (&lock);
+ return -EBUSY;
+ }
+ }
+ smp_call_function_data = &data;
+ spin_unlock (&lock);
+ data.func = func;
+ data.info = info;
+ atomic_set (&data.unstarted_count, smp_num_cpus - 1);
+ data.wait = wait;
+ if (wait) atomic_set (&data.unfinished_count, smp_num_cpus - 1);
+ /* Send a message to all other CPUs and wait for them to respond */
+ send_IPI_allbutself (CALL_FUNCTION_VECTOR);
+ /* Wait for response */
+ timeout = jiffies + JIFFIE_TIMEOUT;
+ while ( (atomic_read (&data.unstarted_count) > 0) &&
+ time_before (jiffies, timeout) )
+ barrier ();
+ if (atomic_read (&data.unstarted_count) > 0) {
+ smp_call_function_data = NULL;
+ return -ETIMEDOUT;
+ }
+ if (wait)
+ while (atomic_read (&data.unfinished_count) > 0)
+ barrier ();
+ smp_call_function_data = NULL;
+ return 0;
}
/*
@@ -1692,9 +1773,8 @@ void smp_local_timer_interrupt(struct pt_regs * regs)
system=1;
irq_enter(cpu, 0);
+ update_one_process(p, 1, user, system, cpu);
if (p->pid) {
- update_one_process(p, 1, user, system, cpu);
-
p->counter -= 1;
if (p->counter < 0) {
p->counter = 0;
@@ -1707,7 +1787,6 @@ void smp_local_timer_interrupt(struct pt_regs * regs)
kstat.cpu_user += user;
kstat.per_cpu_user[cpu] += user;
}
-
kstat.cpu_system += system;
kstat.per_cpu_system[cpu] += system;
@@ -1767,6 +1846,7 @@ asmlinkage void smp_invalidate_interrupt(void)
local_flush_tlb();
ack_APIC_irq();
+
}
static void stop_this_cpu (void)
@@ -1789,12 +1869,19 @@ asmlinkage void smp_stop_cpu_interrupt(void)
stop_this_cpu();
}
-void (*mtrr_hook) (void) = NULL;
-
-asmlinkage void smp_mtrr_interrupt(void)
+asmlinkage void smp_call_function_interrupt(void)
{
- ack_APIC_irq();
- if (mtrr_hook) (*mtrr_hook)();
+ void (*func) (void *info) = smp_call_function_data->func;
+ void *info = smp_call_function_data->info;
+ int wait = smp_call_function_data->wait;
+
+ ack_APIC_irq ();
+ /* Notify initiating CPU that I've grabbed the data and am about to
+ execute the function */
+ atomic_dec (&smp_call_function_data->unstarted_count);
+ /* At this point the structure may be out of scope unless wait==1 */
+ (*func) (info);
+ if (wait) atomic_dec (&smp_call_function_data->unfinished_count);
}
/*
@@ -1802,8 +1889,10 @@ asmlinkage void smp_mtrr_interrupt(void)
*/
asmlinkage void smp_spurious_interrupt(void)
{
- /* ack_APIC_irq(); see sw-dev-man vol 3, chapter 7.4.13.5 */
- printk("spurious APIC interrupt, ayiee, should never happen.\n");
+ ack_APIC_irq();
+ /* see sw-dev-man vol 3, chapter 7.4.13.5 */
+ printk("spurious APIC interrupt on CPU#%d, should never happen.\n",
+ smp_processor_id());
}
/*
@@ -1815,10 +1904,6 @@ asmlinkage void smp_spurious_interrupt(void)
* closely follows bus clocks.
*/
-#define RDTSC(x) __asm__ __volatile__ ( "rdtsc" \
- :"=a" (((unsigned long*)&x)[0]), \
- "=d" (((unsigned long*)&x)[1]))
-
/*
* The timer chip is already set up at HZ interrupts per second here,
* but we do not accept timer interrupts yet. We only allow the BP
@@ -1937,7 +2022,7 @@ int __init calibrate_APIC_clock(void)
/*
* We wrapped around just now. Let's start:
*/
- RDTSC(t1);
+ rdtscll(t1);
tt1=apic_read(APIC_TMCCT);
#define LOOPS (HZ/10)
@@ -1948,7 +2033,7 @@ int __init calibrate_APIC_clock(void)
wait_8254_wraparound ();
tt2=apic_read(APIC_TMCCT);
- RDTSC(t2);
+ rdtscll(t2);
/*
* The APIC bus clock counter is 32 bits only, it
@@ -2058,3 +2143,4 @@ int setup_profiling_timer(unsigned int multiplier)
}
#undef APIC_DIVISOR
+
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c
index ec2ea5d60..2ab29d479 100644
--- a/arch/i386/kernel/time.c
+++ b/arch/i386/kernel/time.c
@@ -30,19 +30,6 @@
* serialize accesses to xtime/lost_ticks).
*/
-/* What about the "updated NTP code" stuff in 2.0 time.c? It's not in
- * 2.1, perhaps it should be ported, too.
- *
- * What about the BUGGY_NEPTUN_TIMER stuff in do_slow_gettimeoffset()?
- * Whatever it fixes, is it also fixed in the new code from the Jumbo
- * patch, so that that code can be used instead?
- *
- * The CPU Hz should probably be displayed in check_bugs() together
- * with the CPU vendor and type. Perhaps even only in MHz, though that
- * takes away some of the fun of the new code :)
- *
- * - Michael Krause */
-
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -60,6 +47,7 @@
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/delay.h>
+#include <asm/msr.h>
#include <linux/mc146818rtc.h>
#include <linux/timex.h>
@@ -96,8 +84,8 @@ static inline unsigned long do_fast_gettimeoffset(void)
register unsigned long edx asm("dx");
/* Read the Time Stamp Counter */
- __asm__("rdtsc"
- :"=a" (eax), "=d" (edx));
+
+ rdtsc(eax,edx);
/* .. relative to previous jiffy (32 bits is enough) */
eax -= last_tsc_low; /* tsc_low delta */
@@ -292,7 +280,6 @@ void do_settimeofday(struct timeval *tv)
xtime = *tv;
time_adjust = 0; /* stop active adjtime() */
time_status |= STA_UNSYNC;
- time_state = TIME_ERROR; /* p. 24, (a) */
time_maxerror = NTP_PHASE_LIMIT;
time_esterror = NTP_PHASE_LIMIT;
write_unlock_irq(&xtime_lock);
@@ -457,7 +444,8 @@ static void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
*/
/* read Pentium cycle counter */
- __asm__("rdtsc" : "=a" (last_tsc_low) : : "edx");
+
+ rdtscl(last_tsc_low);
outb_p(0x00, 0x43); /* latch the count ASAP */
@@ -556,70 +544,72 @@ static struct irqaction irq0 = { timer_interrupt, SA_INTERRUPT, 0, "timer", NUL
* device.
*/
+#define CALIBRATE_LATCH (5 * LATCH)
+#define CALIBRATE_TIME (5 * 1000020/HZ)
+
__initfunc(static unsigned long calibrate_tsc(void))
{
- unsigned long retval;
+ /* Set the Gate high, disable speaker */
+ outb((inb(0x61) & ~0x02) | 0x01, 0x61);
- __asm__( /* set the Gate high, program CTC channel 2 for mode 0
- * (interrupt on terminal count mode), binary count,
- * load 5 * LATCH count, (LSB and MSB)
- * to begin countdown, read the TSC and busy wait.
- * BTW LATCH is calculated in timex.h from the HZ value
- */
+ /*
+ * Now let's take care of CTC channel 2
+ *
+ * Set the Gate high, program CTC channel 2 for mode 0,
+ * (interrupt on terminal count mode), binary count,
+ * load 5 * LATCH count, (LSB and MSB) to begin countdown.
+ */
+ outb(0xb0, 0x43); /* binary, mode 0, LSB/MSB, Ch 2 */
+ outb(CALIBRATE_LATCH & 0xff, 0x42); /* LSB of count */
+ outb(CALIBRATE_LATCH >> 8, 0x42); /* MSB of count */
+
+ {
+ unsigned long startlow, starthigh;
+ unsigned long endlow, endhigh;
+ unsigned long count;
+
+ rdtsc(startlow,starthigh);
+ count = 0;
+ do {
+ count++;
+ } while ((inb(0x61) & 0x20) == 0);
+ rdtsc(endlow,endhigh);
+
+ last_tsc_low = endlow;
+
+ /* Error: ECTCNEVERSET */
+ if (count <= 1)
+ goto bad_ctc;
+
+ /* 64-bit subtract - gcc just messes up with long longs */
+ __asm__("subl %2,%0\n\t"
+ "sbbl %3,%1"
+ :"=a" (endlow), "=d" (endhigh)
+ :"g" (startlow), "g" (starthigh),
+ "0" (endlow), "1" (endhigh));
+
+ /* Error: ECPUTOOFAST */
+ if (endhigh)
+ goto bad_ctc;
+
+ /* Error: ECPUTOOSLOW */
+ if (endlow <= CALIBRATE_TIME)
+ goto bad_ctc;
+
+ __asm__("divl %2"
+ :"=a" (endlow), "=d" (endhigh)
+ :"r" (endlow), "0" (0), "1" (CALIBRATE_TIME));
+
+ return endlow;
+ }
- /* Set the Gate high, disable speaker */
- "inb $0x61, %%al\n\t" /* Read port */
- "andb $0xfd, %%al\n\t" /* Turn off speaker Data */
- "orb $0x01, %%al\n\t" /* Set Gate high */
- "outb %%al, $0x61\n\t" /* Write port */
-
- /* Now let's take care of CTC channel 2 */
- "movb $0xb0, %%al\n\t" /* binary, mode 0, LSB/MSB, ch 2*/
- "outb %%al, $0x43\n\t" /* Write to CTC command port */
- "movl %1, %%eax\n\t"
- "outb %%al, $0x42\n\t" /* LSB of count */
- "shrl $8, %%eax\n\t"
- "outb %%al, $0x42\n\t" /* MSB of count */
-
- /* Read the TSC; counting has just started */
- "rdtsc\n\t"
- /* Move the value for safe-keeping. */
- "movl %%eax, %%ebx\n\t"
- "movl %%edx, %%ecx\n\t"
-
- /* Busy wait. Only 50 ms wasted at boot time. */
- "0: inb $0x61, %%al\n\t" /* Read Speaker Output Port */
- "testb $0x20, %%al\n\t" /* Check CTC channel 2 output (bit 5) */
- "jz 0b\n\t"
-
- /* And read the TSC. 5 jiffies (50.00077ms) have elapsed. */
- "rdtsc\n\t"
-
- /* Great. So far so good. Store last TSC reading in
- * last_tsc_low (only 32 lsb bits needed) */
- "movl %%eax, last_tsc_low\n\t"
- /* And now calculate the difference between the readings. */
- "subl %%ebx, %%eax\n\t"
- "sbbl %%ecx, %%edx\n\t" /* 64-bit subtract */
- /* but probably edx = 0 at this point (see below). */
- /* Now we have 5 * (TSC counts per jiffy) in eax. We want
- * to calculate TSC->microsecond conversion factor. */
-
- /* Note that edx (high 32-bits of difference) will now be
- * zero iff CPU clock speed is less than 85 GHz. Moore's
- * law says that this is likely to be true for the next
- * 12 years or so. You will have to change this code to
- * do a real 64-by-64 divide before that time's up. */
- "movl %%eax, %%ecx\n\t"
- "xorl %%eax, %%eax\n\t"
- "movl %2, %%edx\n\t"
- "divl %%ecx\n\t" /* eax= 2^32 / (1 * TSC counts per microsecond) */
- /* Return eax for the use of fast_gettimeoffset */
- "movl %%eax, %0\n\t"
- : "=r" (retval)
- : "r" (5 * LATCH), "r" (5 * 1000020/HZ)
- : /* we clobber: */ "ax", "bx", "cx", "dx", "cc", "memory");
- return retval;
+ /*
+ * The CTC wasn't reliable: we got a hit on the very first read,
+ * or the CPU was so fast/slow that the quotient wouldn't fit in
+ * 32 bits..
+ */
+bad_ctc:
+ return 0;
}
__initfunc(void time_init(void))
@@ -655,23 +645,26 @@ __initfunc(void time_init(void))
dodgy_tsc();
if (boot_cpu_data.x86_capability & X86_FEATURE_TSC) {
+ unsigned long tsc_quotient = calibrate_tsc();
+ if (tsc_quotient) {
+ fast_gettimeoffset_quotient = tsc_quotient;
+ use_tsc = 1;
#ifndef do_gettimeoffset
- do_gettimeoffset = do_fast_gettimeoffset;
+ do_gettimeoffset = do_fast_gettimeoffset;
#endif
- do_get_fast_time = do_gettimeofday;
- use_tsc = 1;
- fast_gettimeoffset_quotient = calibrate_tsc();
-
- /* report CPU clock rate in Hz.
- * The formula is (10^6 * 2^32) / (2^32 * 1 / (clocks/us)) =
- * clock/second. Our precision is about 100 ppm.
- */
- { unsigned long eax=0, edx=1000000;
- __asm__("divl %2"
- :"=a" (cpu_hz), "=d" (edx)
- :"r" (fast_gettimeoffset_quotient),
- "0" (eax), "1" (edx));
- printk("Detected %ld Hz processor.\n", cpu_hz);
+ do_get_fast_time = do_gettimeofday;
+
+ /* report CPU clock rate in Hz.
+ * The formula is (10^6 * 2^32) / (2^32 * 1 / (clocks/us)) =
+ * clock/second. Our precision is about 100 ppm.
+ */
+ { unsigned long eax=0, edx=1000000;
+ __asm__("divl %2"
+ :"=a" (cpu_hz), "=d" (edx)
+ :"r" (tsc_quotient),
+ "0" (eax), "1" (edx));
+ printk("Detected %ld Hz processor.\n", cpu_hz);
+ }
}
}
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index f0dc06092..cce35ac80 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -42,6 +42,8 @@
#include <asm/lithium.h>
#endif
+#include "irq.h"
+
asmlinkage int system_call(void);
asmlinkage void lcall7(void);
@@ -125,7 +127,6 @@ static void show_registers(struct pt_regs *regs)
unsigned long esp;
unsigned short ss;
unsigned long *stack, addr, module_start, module_end;
- extern char _stext, _etext;
esp = (unsigned long) (1+regs);
ss = __KERNEL_DS;
@@ -669,9 +670,6 @@ cobalt_init(void)
#endif
void __init trap_init(void)
{
- /* Initially up all of the IDT to jump to unexpected */
- init_unexpected_irq();
-
if (readl(0x0FFFD9) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
EISA_bus = 1;
set_call_gate(&default_ldt,lcall7);
@@ -693,7 +691,7 @@ void __init trap_init(void)
set_trap_gate(15,&spurious_interrupt_bug);
set_trap_gate(16,&coprocessor_error);
set_trap_gate(17,&alignment_check);
- set_system_gate(0x80,&system_call);
+ set_system_gate(SYSCALL_VECTOR,&system_call);
/* set up GDT task & ldt entries */
set_tss_desc(0, &init_task.tss);
diff --git a/arch/i386/kernel/visws_apic.c b/arch/i386/kernel/visws_apic.c
index f7dabc15d..c12054689 100644
--- a/arch/i386/kernel/visws_apic.c
+++ b/arch/i386/kernel/visws_apic.c
@@ -201,11 +201,13 @@ static void do_cobalt_IRQ(unsigned int irq, struct pt_regs * regs)
{
unsigned int status;
/* XXX APIC EOI? */
- status = desc->status & ~IRQ_REPLAY;
+ status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
action = NULL;
- if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))
+ if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
action = desc->action;
- desc->status = status | IRQ_INPROGRESS;
+ status |= IRQ_INPROGRESS;
+ }
+ desc->status = status;
}
spin_unlock(&irq_controller_lock);