summaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-04-28 01:09:25 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-04-28 01:09:25 +0000
commitb9ba7aeb165cffecdffb60aec8c3fa8d590d9ca9 (patch)
tree42d07b0c7246ae2536a702e7c5de9e2732341116 /arch/ia64
parent7406b0a326f2d70ade2671c37d1beef62249db97 (diff)
Merge with 2.3.99-pre6.
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Makefile20
-rw-r--r--arch/ia64/config.in9
-rw-r--r--arch/ia64/defconfig4
-rw-r--r--arch/ia64/dig/Makefile2
-rw-r--r--arch/ia64/dig/iosapic.c453
-rw-r--r--arch/ia64/dig/setup.c12
-rw-r--r--arch/ia64/hp/Makefile2
-rw-r--r--arch/ia64/ia32/Makefile2
-rw-r--r--arch/ia64/ia32/binfmt_elf32.c13
-rw-r--r--arch/ia64/ia32/ia32_entry.S63
-rw-r--r--arch/ia64/ia32/ia32_signal.c27
-rw-r--r--arch/ia64/ia32/ia32_traps.c47
-rw-r--r--arch/ia64/ia32/sys_ia32.c1096
-rw-r--r--arch/ia64/kdb/Makefile21
-rw-r--r--arch/ia64/kdb/kdb_bt.c104
-rw-r--r--arch/ia64/kdb/kdb_io.c350
-rw-r--r--arch/ia64/kdb/kdb_traps.c55
-rw-r--r--arch/ia64/kdb/kdbsupport.c1329
-rw-r--r--arch/ia64/kdb/pc_keyb.h127
-rw-r--r--arch/ia64/kernel/Makefile12
-rw-r--r--arch/ia64/kernel/acpi.c55
-rw-r--r--arch/ia64/kernel/efi.c22
-rw-r--r--arch/ia64/kernel/entry.S155
-rw-r--r--arch/ia64/kernel/gate.S36
-rw-r--r--arch/ia64/kernel/irq.c63
-rw-r--r--arch/ia64/kernel/irq_ia64.c93
-rw-r--r--arch/ia64/kernel/irq_internal.c36
-rw-r--r--arch/ia64/kernel/irq_lock.c287
-rw-r--r--arch/ia64/kernel/irq_sapic.c38
-rw-r--r--arch/ia64/kernel/ivt.S373
-rw-r--r--arch/ia64/kernel/mca.c251
-rw-r--r--arch/ia64/kernel/mca_asm.S176
-rw-r--r--arch/ia64/kernel/minstate.h205
-rw-r--r--arch/ia64/kernel/ptrace.c214
-rw-r--r--arch/ia64/kernel/sal.c32
-rw-r--r--arch/ia64/kernel/sal_stub.S24
-rw-r--r--arch/ia64/kernel/semaphore.c2
-rw-r--r--arch/ia64/kernel/setup.c40
-rw-r--r--arch/ia64/kernel/signal.c19
-rw-r--r--arch/ia64/kernel/smp.c176
-rw-r--r--arch/ia64/kernel/smpboot.c2
-rw-r--r--arch/ia64/kernel/time.c24
-rw-r--r--arch/ia64/kernel/traps.c37
-rw-r--r--arch/ia64/kernel/unaligned.c105
-rw-r--r--arch/ia64/lib/clear_user.S1
-rw-r--r--arch/ia64/lib/strlen.S1
-rw-r--r--arch/ia64/mm/fault.c2
-rw-r--r--arch/ia64/mm/init.c1
-rw-r--r--arch/ia64/mm/tlb.c2
-rw-r--r--arch/ia64/tools/Makefile2
-rw-r--r--arch/ia64/tools/print_offsets.c7
-rw-r--r--arch/ia64/vmlinux.lds.S17
52 files changed, 2397 insertions, 3849 deletions
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index 3ace288ef..897bca8e7 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -9,6 +9,7 @@
#
NM := $(CROSS_COMPILE)nm -B
+AWK := awk
LINKFLAGS = -static -T arch/$(ARCH)/vmlinux.lds
# next line is for HP compiler backend:
@@ -16,10 +17,10 @@ LINKFLAGS = -static -T arch/$(ARCH)/vmlinux.lds
# The next line is needed when compiling with the July snapshot of the Cygnus compiler:
#EXTRA = -D__GCC_DOESNT_KNOW_IN_REGS__
# next two lines are for the September snapshot of the Cygnus compiler:
-AFLAGS += -D__GCC_MULTIREG_RETVALS__
+AFLAGS += -D__GCC_MULTIREG_RETVALS__ -Wa,-x
EXTRA = -D__GCC_MULTIREG_RETVALS__
-CFLAGS := $(CFLAGS) -pipe $(EXTRA) -ffixed-r13 -mfixed-range=f10-f15,f32-f127
+CFLAGS := $(CFLAGS) -pipe $(EXTRA) -Wa,-x -ffixed-r13 -mfixed-range=f10-f15,f32-f127
ifdef CONFIG_IA64_GENERIC
CORE_FILES := arch/$(ARCH)/hp/hp.a \
@@ -34,14 +35,14 @@ ifdef CONFIG_IA64_GENERIC
else # !GENERIC
-ifeq ($(CONFIG_IA64_HP_SIM),y)
+ifdef CONFIG_IA64_HP_SIM
SUBDIRS := arch/$(ARCH)/hp \
$(SUBDIRS)
CORE_FILES := arch/$(ARCH)/hp/hp.a \
$(CORE_FILES)
endif
-ifeq ($(CONFIG_IA64_SGI_SN1_SIM),y)
+ifdef CONFIG_IA64_SGI_SN1_SIM
SUBDIRS := arch/$(ARCH)/sn/sn1 \
arch/$(ARCH)/sn \
$(SUBDIRS)
@@ -49,14 +50,14 @@ ifeq ($(CONFIG_IA64_SGI_SN1_SIM),y)
$(CORE_FILES)
endif
-ifeq ($(CONFIG_IA64_SOFTSDV),y)
+ifdef CONFIG_IA64_SOFTSDV
SUBDIRS := arch/$(ARCH)/dig \
$(SUBDIRS)
CORE_FILES := arch/$(ARCH)/dig/dig.a \
$(CORE_FILES)
endif
-ifeq ($(CONFIG_IA64_DIG),y)
+ifdef CONFIG_IA64_DIG
SUBDIRS := arch/$(ARCH)/dig \
$(SUBDIRS)
CORE_FILES := arch/$(ARCH)/dig/dig.a \
@@ -65,16 +66,11 @@ endif
endif # !GENERIC
-ifeq ($(CONFIG_IA32_SUPPORT),y)
+ifdef CONFIG_IA32_SUPPORT
SUBDIRS := arch/$(ARCH)/ia32 $(SUBDIRS)
CORE_FILES := arch/$(ARCH)/ia32/ia32.o $(CORE_FILES)
endif
-ifdef CONFIG_KDB
- LIBS := $(LIBS) $(TOPDIR)/arch/$(ARCH)/kdb/kdb.a
- SUBDIRS := $(SUBDIRS) arch/$(ARCH)/kdb
-endif
-
HEAD := arch/$(ARCH)/kernel/head.o arch/ia64/kernel/init_task.o
SUBDIRS := arch/$(ARCH)/tools arch/$(ARCH)/kernel arch/$(ARCH)/mm arch/$(ARCH)/lib $(SUBDIRS)
diff --git a/arch/ia64/config.in b/arch/ia64/config.in
index b7cce3d73..2d1a11980 100644
--- a/arch/ia64/config.in
+++ b/arch/ia64/config.in
@@ -4,6 +4,8 @@ mainmenu_option next_comment
comment 'General setup'
define_bool CONFIG_IA64 y
+define_bool CONFIG_ITANIUM y # easy choice for now... ;-)
+
define_bool CONFIG_ISA n
define_bool CONFIG_SBUS n
@@ -25,7 +27,7 @@ if [ "$CONFIG_IA64_DIG" = "y" ]; then
bool ' Enable BigSur hacks' CONFIG_IA64_BIGSUR_HACKS
bool ' Enable Lion hacks' CONFIG_IA64_LION_HACKS
bool ' Emulate PAL/SAL/EFI firmware' CONFIG_IA64_FW_EMU
- bool ' Get PCI IRQ routing from firmware/ACPI' CONFIG_IA64_IRQ_ACPI
+ bool ' Enable IA64 Machine Check Abort' CONFIG_IA64_MCA
fi
if [ "$CONFIG_IA64_GENERIC" = "y" ]; then
@@ -185,10 +187,5 @@ bool 'Early printk support (requires VGA!)' CONFIG_IA64_EARLY_PRINTK
bool 'Turn on compare-and-exchange bug checking (slow!)' CONFIG_IA64_DEBUG_CMPXCHG
bool 'Turn on irq debug checks (slow!)' CONFIG_IA64_DEBUG_IRQ
bool 'Print possible IA64 hazards to console' CONFIG_IA64_PRINT_HAZARDS
-bool 'Built-in Kernel Debugger support' CONFIG_KDB
-if [ "$CONFIG_KDB" = "y" ]; then
- bool 'Compile the kernel with frame pointers' CONFIG_KDB_FRAMEPTR
- int 'KDB Kernel Symbol Table size?' CONFIG_KDB_STBSIZE 10000
-fi
endmenu
diff --git a/arch/ia64/defconfig b/arch/ia64/defconfig
index 12854f121..00a0e05b7 100644
--- a/arch/ia64/defconfig
+++ b/arch/ia64/defconfig
@@ -115,8 +115,8 @@ CONFIG_BLK_DEV_IDEDMA=y
CONFIG_IDEDMA_PCI_EXPERIMENTAL=y
# CONFIG_IDEDMA_PCI_WIP is not set
# CONFIG_IDEDMA_NEW_DRIVE_LISTINGS is not set
-# CONFIG_BLK_DEV_AEC6210 is not set
-# CONFIG_AEC6210_TUNING is not set
+# CONFIG_BLK_DEV_AEC62XX is not set
+# CONFIG_AEC62XX_TUNING is not set
# CONFIG_BLK_DEV_ALI15X3 is not set
# CONFIG_WDC_ALI15X3 is not set
# CONFIG_BLK_DEV_AMD7409 is not set
diff --git a/arch/ia64/dig/Makefile b/arch/ia64/dig/Makefile
index 8d0544ee5..f067606ee 100644
--- a/arch/ia64/dig/Makefile
+++ b/arch/ia64/dig/Makefile
@@ -15,7 +15,7 @@ all: dig.a
O_TARGET = dig.a
O_OBJS = iosapic.o setup.o
-ifeq ($(CONFIG_IA64_GENERIC),y)
+ifdef CONFIG_IA64_GENERIC
O_OBJS += machvec.o
endif
diff --git a/arch/ia64/dig/iosapic.c b/arch/ia64/dig/iosapic.c
index 4861aa2d9..9fd01063e 100644
--- a/arch/ia64/dig/iosapic.c
+++ b/arch/ia64/dig/iosapic.c
@@ -7,16 +7,20 @@
* Copyright (C) 1999-2000 David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999,2000 Walt Drummond <drummond@valinux.com>
+ *
+ * 00/04/19 D. Mosberger Rewritten to mirror more closely the x86 I/O APIC code.
+ * In particular, we now have separate handlers for edge
+ * and level triggered interrupts.
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/irq.h>
#include <linux/pci.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/string.h>
+#include <linux/irq.h>
#include <asm/io.h>
#include <asm/iosapic.h>
@@ -27,172 +31,19 @@
#undef DEBUG_IRQ_ROUTING
-/*
- * IRQ vectors 0..15 are treated as the legacy interrupts of the PC-AT
- * platform. No new drivers should ever ask for specific irqs, but we
- * provide compatibility here in case there is an old driver that does
- * ask for specific irqs (serial, keyboard, stuff like that). Since
- * IA-64 doesn't allow irq 0..15 to be used for external interrupts
- * anyhow, this in no way prevents us from doing the Right Thing
- * with new drivers.
- */
+static spinlock_t iosapic_lock = SPIN_LOCK_UNLOCKED;
+
struct iosapic_vector iosapic_vector[NR_IRQS] = {
[0 ... NR_IRQS-1] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }
};
-#ifndef CONFIG_IA64_IRQ_ACPI
-/*
- * Defines the default interrupt routing information for the LION platform
- * XXX - this information should be obtained from the ACPI and hardcoded since
- * we do not have ACPI AML support.
- */
-
-struct intr_routing_entry intr_routing[] = {
- {0,0,0,2,0,0,0,0},
- {0,0,1,1,0,0,0,0},
- {0,0,2,0xff,0,0,0,0},
- {0,0,3,3,0,0,0,0},
- {0,0,4,4,0,0,0,0},
- {0,0,5,5,0,0,0,0},
- {0,0,6,6,0,0,0,0},
- {0,0,7,7,0,0,0,0},
- {0,0,8,8,0,0,0,0},
- {0,0,9,9,0,0,0,0},
- {0,0,10,10,0,0,0,0},
- {0,0,11,11,0,0,0,0},
- {0,0,12,12,0,0,0,0},
- {0,0,13,13,0,0,0,0},
- {0,0,14,14,0,0,0,0},
- {0,0,15,15,0,0,0,0},
-#ifdef CONFIG_IA64_LION_HACKS
- {1, 0, 0x04, 16, 0, 0, 1, 1}, /* bus 0, device id 1, INTA */
- {1, 0, 0x05, 26, 0, 0, 1, 1}, /* bus 0, device id 1, INTB */
- {1, 0, 0x06, 36, 0, 0, 1, 1}, /* bus 0, device id 1, INTC */
- {1, 0, 0x07, 42, 0, 0, 1, 1}, /* bus 0, device id 1, INTD */
-
- {1, 0, 0x08, 17, 0, 0, 1, 1}, /* bus 0, device id 2, INTA */
- {1, 0, 0x09, 27, 0, 0, 1, 1}, /* bus 0, device id 2, INTB */
- {1, 0, 0x0a, 37, 0, 0, 1, 1}, /* bus 0, device id 2, INTC */
- {1, 0, 0x0b, 42, 0, 0, 1, 1}, /* bus 0, device id 2, INTD */
-
- {1, 0, 0x0f, 50, 0, 0, 1, 1}, /* bus 0, device id 3, INTD */
-
- {1, 0, 0x14, 51, 0, 0, 1, 1}, /* bus 0, device id 5, INTA */
-
- {1, 0, 0x18, 49, 0, 0, 1, 1}, /* bus 0, device id 6, INTA */
-
- {1, 1, 0x04, 18, 0, 0, 1, 1}, /* bus 1, device id 1, INTA */
- {1, 1, 0x05, 28, 0, 0, 1, 1}, /* bus 1, device id 1, INTB */
- {1, 1, 0x06, 38, 0, 0, 1, 1}, /* bus 1, device id 1, INTC */
- {1, 1, 0x07, 43, 0, 0, 1, 1}, /* bus 1, device id 1, INTD */
-
- {1, 1, 0x08, 48, 0, 0, 1, 1}, /* bus 1, device id 2, INTA */
-
- {1, 1, 0x0c, 19, 0, 0, 1, 1}, /* bus 1, device id 3, INTA */
- {1, 1, 0x0d, 29, 0, 0, 1, 1}, /* bus 1, device id 3, INTB */
- {1, 1, 0x0e, 38, 0, 0, 1, 1}, /* bus 1, device id 3, INTC */
- {1, 1, 0x0f, 44, 0, 0, 1, 1}, /* bus 1, device id 3, INTD */
-
- {1, 1, 0x10, 20, 0, 0, 1, 1}, /* bus 1, device id 4, INTA */
- {1, 1, 0x11, 30, 0, 0, 1, 1}, /* bus 1, device id 4, INTB */
- {1, 1, 0x12, 39, 0, 0, 1, 1}, /* bus 1, device id 4, INTC */
- {1, 1, 0x13, 45, 0, 0, 1, 1}, /* bus 1, device id 4, INTD */
-
- {1, 2, 0x04, 21, 0, 0, 1, 1}, /* bus 2, device id 1, INTA */
- {1, 2, 0x05, 31, 0, 0, 1, 1}, /* bus 2, device id 1, INTB */
- {1, 2, 0x06, 39, 0, 0, 1, 1}, /* bus 2, device id 1, INTC */
- {1, 2, 0x07, 45, 0, 0, 1, 1}, /* bus 2, device id 1, INTD */
-
- {1, 2, 0x08, 22, 0, 0, 1, 1}, /* bus 2, device id 2, INTA */
- {1, 2, 0x09, 32, 0, 0, 1, 1}, /* bus 2, device id 2, INTB */
- {1, 2, 0x0a, 40, 0, 0, 1, 1}, /* bus 2, device id 2, INTC */
- {1, 2, 0x0b, 46, 0, 0, 1, 1}, /* bus 2, device id 2, INTD */
-
- {1, 2, 0x0c, 23, 0, 0, 1, 1}, /* bus 2, device id 3, INTA */
- {1, 2, 0x0d, 33, 0, 0, 1, 1}, /* bus 2, device id 3, INTB */
- {1, 2, 0x0e, 40, 0, 0, 1, 1}, /* bus 2, device id 3, INTC */
- {1, 2, 0x0f, 46, 0, 0, 1, 1}, /* bus 2, device id 3, INTD */
-
- {1, 3, 0x04, 24, 0, 0, 1, 1}, /* bus 3, device id 1, INTA */
- {1, 3, 0x05, 34, 0, 0, 1, 1}, /* bus 3, device id 1, INTB */
- {1, 3, 0x06, 41, 0, 0, 1, 1}, /* bus 3, device id 1, INTC */
- {1, 3, 0x07, 47, 0, 0, 1, 1}, /* bus 3, device id 1, INTD */
-
- {1, 3, 0x08, 25, 0, 0, 1, 1}, /* bus 3, device id 2, INTA */
- {1, 3, 0x09, 35, 0, 0, 1, 1}, /* bus 3, device id 2, INTB */
- {1, 3, 0x0a, 41, 0, 0, 1, 1}, /* bus 3, device id 2, INTC */
- {1, 3, 0x0b, 47, 0, 0, 1, 1}, /* bus 3, device id 2, INTD */
-#else
- /*
- * BigSur platform, bus 0, device 1,2,4 and bus 1 device 0-3
- */
- {1,1,0x0,19,0,0,1,1}, /* bus 1, device id 0, INTA */
- {1,1,0x1,18,0,0,1,1}, /* bus 1, device id 0, INTB */
- {1,1,0x2,17,0,0,1,1}, /* bus 1, device id 0, INTC */
- {1,1,0x3,16,0,0,1,1}, /* bus 1, device id 0, INTD */
-
- {1,1,0x4,23,0,0,1,1}, /* bus 1, device id 1, INTA */
- {1,1,0x5,22,0,0,1,1}, /* bus 1, device id 1, INTB */
- {1,1,0x6,21,0,0,1,1}, /* bus 1, device id 1, INTC */
- {1,1,0x7,20,0,0,1,1}, /* bus 1, device id 1, INTD */
-
- {1,1,0x8,27,0,0,1,1}, /* bus 1, device id 2, INTA */
- {1,1,0x9,26,0,0,1,1}, /* bus 1, device id 2, INTB */
- {1,1,0xa,25,0,0,1,1}, /* bus 1, device id 2, INTC */
- {1,1,0xb,24,0,0,1,1}, /* bus 1, device id 2, INTD */
-
- {1,1,0xc,31,0,0,1,1}, /* bus 1, device id 3, INTA */
- {1,1,0xd,30,0,0,1,1}, /* bus 1, device id 3, INTB */
- {1,1,0xe,29,0,0,1,1}, /* bus 1, device id 3, INTC */
- {1,1,0xf,28,0,0,1,1}, /* bus 1, device id 3, INTD */
-
- {1,0,0x4,35,0,0,1,1}, /* bus 0, device id 1, INTA */
- {1,0,0x5,34,0,0,1,1}, /* bus 0, device id 1, INTB */
- {1,0,0x6,33,0,0,1,1}, /* bus 0, device id 1, INTC */
- {1,0,0x7,32,0,0,1,1}, /* bus 0, device id 1, INTD */
-
- {1,0,0x8,39,0,0,1,1}, /* bus 0, device id 2, INTA */
- {1,0,0x9,38,0,0,1,1}, /* bus 0, device id 2, INTB */
- {1,0,0xa,37,0,0,1,1}, /* bus 0, device id 2, INTC */
- {1,0,0xb,36,0,0,1,1}, /* bus 0, device id 2, INTD */
-
- {1,0,0x10,43,0,0,1,1}, /* bus 0, device id 4, INTA */
- {1,0,0x11,42,0,0,1,1}, /* bus 0, device id 4, INTB */
- {1,0,0x12,41,0,0,1,1}, /* bus 0, device id 4, INTC */
- {1,0,0x13,40,0,0,1,1}, /* bus 0, device id 4, INTD */
-
- {1,0,0x14,17,0,0,1,1}, /* bus 0, device id 5, INTA */
- {1,0,0x18,18,0,0,1,1}, /* bus 0, device id 6, INTA */
- {1,0,0x1c,19,0,0,1,1}, /* bus 0, device id 7, INTA */
-#endif
- {0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff},
-};
-
-int
-iosapic_get_PCI_irq_vector(int bus, int slot, int pci_pin)
-{
- int i = -1;
-
- while (intr_routing[++i].srcbus != 0xff) {
- if (intr_routing[i].srcbus == BUS_PCI) {
- if ((intr_routing[i].srcbusirq == ((slot << 2) | pci_pin))
- && (intr_routing[i].srcbusno == bus)) {
- return(intr_routing[i].iosapic_pin);
- }
- }
- }
- return -1;
-}
-
-#else /* CONFIG_IA64_IRQ_ACPI */
-
/*
* find the IRQ in the IOSAPIC map for the PCI device on bus/slot/pin
*/
int
-iosapic_get_PCI_irq_vector(int bus, int slot, int pci_pin)
+iosapic_get_PCI_irq_vector (int bus, int slot, int pci_pin)
{
- int i;
+ int i;
for (i = 0; i < NR_IRQS; i++) {
if ((iosapic_bustype(i) == BUS_PCI) &&
@@ -201,17 +52,15 @@ iosapic_get_PCI_irq_vector(int bus, int slot, int pci_pin)
return i;
}
}
-
return -1;
}
-#endif /* !CONFIG_IA64_IRQ_ACPI */
static void
set_rte (unsigned long iosapic_addr, int entry, int pol, int trigger, int delivery,
long dest, int vector)
{
- int low32;
- int high32;
+ u32 low32;
+ u32 high32;
low32 = ((pol << IO_SAPIC_POLARITY_SHIFT) |
(trigger << IO_SAPIC_TRIGGER_SHIFT) |
@@ -221,81 +70,137 @@ set_rte (unsigned long iosapic_addr, int entry, int pol, int trigger, int delive
/* dest contains both id and eid */
high32 = (dest << IO_SAPIC_DEST_SHIFT);
- /*
- * program the rte
- */
writel(IO_SAPIC_RTE_HIGH(entry), iosapic_addr + IO_SAPIC_REG_SELECT);
writel(high32, iosapic_addr + IO_SAPIC_WINDOW);
writel(IO_SAPIC_RTE_LOW(entry), iosapic_addr + IO_SAPIC_REG_SELECT);
writel(low32, iosapic_addr + IO_SAPIC_WINDOW);
}
+static void
+nop (unsigned int irq)
+{
+ /* do nothing... */
+}
static void
-enable_pin (unsigned int pin, unsigned long iosapic_addr)
+mask_irq (unsigned int irq)
{
- int low32;
+ unsigned long flags, iosapic_addr = iosapic_addr(irq);
+ u32 low32;
- writel(IO_SAPIC_RTE_LOW(pin), iosapic_addr + IO_SAPIC_REG_SELECT);
- low32 = readl(iosapic_addr + IO_SAPIC_WINDOW);
+ spin_lock_irqsave(&iosapic_lock, flags);
+ {
+ writel(IO_SAPIC_RTE_LOW(iosapic_pin(irq)), iosapic_addr + IO_SAPIC_REG_SELECT);
+ low32 = readl(iosapic_addr + IO_SAPIC_WINDOW);
- low32 &= ~(1 << IO_SAPIC_MASK_SHIFT); /* Zero only the mask bit */
- writel(low32, iosapic_addr + IO_SAPIC_WINDOW);
+ low32 |= (1 << IO_SAPIC_MASK_SHIFT); /* Zero only the mask bit */
+ writel(low32, iosapic_addr + IO_SAPIC_WINDOW);
+ }
+ spin_unlock_irqrestore(&iosapic_lock, flags);
}
-
static void
-disable_pin (unsigned int pin, unsigned long iosapic_addr)
+unmask_irq (unsigned int irq)
{
- int low32;
+ unsigned long flags, iosapic_addr = iosapic_addr(irq);
+ u32 low32;
- writel(IO_SAPIC_RTE_LOW(pin), iosapic_addr + IO_SAPIC_REG_SELECT);
- low32 = readl(iosapic_addr + IO_SAPIC_WINDOW);
+ spin_lock_irqsave(&iosapic_lock, flags);
+ {
+ writel(IO_SAPIC_RTE_LOW(iosapic_pin(irq)), iosapic_addr + IO_SAPIC_REG_SELECT);
+ low32 = readl(iosapic_addr + IO_SAPIC_WINDOW);
- low32 |= (1 << IO_SAPIC_MASK_SHIFT); /* Set only the mask bit */
- writel(low32, iosapic_addr + IO_SAPIC_WINDOW);
+ low32 &= ~(1 << IO_SAPIC_MASK_SHIFT); /* Zero only the mask bit */
+ writel(low32, iosapic_addr + IO_SAPIC_WINDOW);
+ }
+ spin_unlock_irqrestore(&iosapic_lock, flags);
+}
+
+
+static void
+iosapic_set_affinity (unsigned int irq, unsigned long mask)
+{
+ printk("iosapic_set_affinity: not implemented yet\n");
}
-#define iosapic_shutdown_irq iosapic_disable_irq
+/*
+ * Handlers for level-triggered interrupts.
+ */
static unsigned int
-iosapic_startup_irq (unsigned int irq)
+iosapic_startup_level_irq (unsigned int irq)
{
- int pin;
-
- pin = iosapic_pin(irq);
- if (pin < 0)
- /* happens during irq auto probing... */
- return 0;
- set_rte(iosapic_addr(irq), pin, iosapic_polarity(irq), iosapic_trigger(irq),
- iosapic_dmode(irq), (ia64_get_lid() >> 16) & 0xffff, irq);
- enable_pin(pin, iosapic_addr(irq));
+ unmask_irq(irq);
return 0;
}
static void
-iosapic_enable_irq (unsigned int irq)
+iosapic_end_level_irq (unsigned int irq)
{
- int pin = iosapic_pin(irq);
+ writel(irq, iosapic_addr(irq) + IO_SAPIC_EOI);
+}
- if (pin < 0)
- /* happens during irq auto probing... */
- return;
- enable_pin(pin, iosapic_addr(irq));
+#define iosapic_shutdown_level_irq mask_irq
+#define iosapic_enable_level_irq unmask_irq
+#define iosapic_disable_level_irq mask_irq
+#define iosapic_ack_level_irq nop
+
+struct hw_interrupt_type irq_type_iosapic_level = {
+ typename: "IO-SAPIC-level",
+ startup: iosapic_startup_level_irq,
+ shutdown: iosapic_shutdown_level_irq,
+ enable: iosapic_enable_level_irq,
+ disable: iosapic_disable_level_irq,
+ ack: iosapic_ack_level_irq,
+ end: iosapic_end_level_irq,
+ set_affinity: iosapic_set_affinity
+};
+
+/*
+ * Handlers for edge-triggered interrupts.
+ */
+
+static unsigned int
+iosapic_startup_edge_irq (unsigned int irq)
+{
+ unmask_irq(irq);
+ /*
+ * IOSAPIC simply drops interrupts pended while the
+ * corresponding pin was masked, so we can't know if an
+ * interrupt is pending already. Let's hope not...
+ */
+ return 0;
}
static void
-iosapic_disable_irq (unsigned int irq)
+iosapic_ack_edge_irq (unsigned int irq)
{
- int pin = iosapic_pin(irq);
-
- if (pin < 0)
- return;
- disable_pin(pin, iosapic_addr(irq));
+ /*
+ * Once we have recorded IRQ_PENDING already, we can mask the
+ * interrupt for real. This prevents IRQ storms from unhandled
+ * devices.
+ */
+ if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED)) == (IRQ_PENDING | IRQ_DISABLED))
+ mask_irq(irq);
}
+#define iosapic_enable_edge_irq unmask_irq
+#define iosapic_disable_edge_irq nop
+#define iosapic_end_edge_irq nop
+
+struct hw_interrupt_type irq_type_iosapic_edge = {
+ typename: "IO-SAPIC-edge",
+ startup: iosapic_startup_edge_irq,
+ shutdown: iosapic_disable_edge_irq,
+ enable: iosapic_enable_edge_irq,
+ disable: iosapic_disable_edge_irq,
+ ack: iosapic_ack_edge_irq,
+ end: iosapic_end_edge_irq,
+ set_affinity: iosapic_set_affinity
+};
+
unsigned int
-iosapic_version(unsigned long base_addr)
+iosapic_version (unsigned long base_addr)
{
/*
* IOSAPIC Version Register return 32 bit structure like:
@@ -310,99 +215,19 @@ iosapic_version(unsigned long base_addr)
return readl(IO_SAPIC_WINDOW + base_addr);
}
-static void
-iosapic_ack_irq (unsigned int irq)
-{
-}
-
-static void
-iosapic_end_irq (unsigned int irq)
-{
- if (iosapic_trigger(irq) == IO_SAPIC_LEVEL) /* ACK Level trigger interrupts */
- writel(irq, iosapic_addr(irq) + IO_SAPIC_EOI);
-}
-
-static void
-iosapic_set_affinity (unsigned int irq, unsigned long mask)
-{
- printk("iosapic_set_affinity: not implemented yet\n");
-}
-
void
iosapic_init (unsigned long address)
{
- int i;
-#ifdef CONFIG_IA64_IRQ_ACPI
+ struct hw_interrupt_type *irq_type;
struct pci_vector_struct *vectors;
- int irq;
-#else
- int vector;
-#endif
+ int i, irq;
- /*
- * Disable the compatibility mode interrupts (8259 style), needs IN/OUT support
- * enabled.
- */
- outb(0xff, 0xA1);
- outb(0xff, 0x21);
-
-#if defined(CONFIG_IA64_SOFTSDV_HACKS)
- memset(iosapic_vector, 0x0, sizeof(iosapic_vector));
- for (i = 0; i < NR_IRQS; i++) {
- iosapic_pin(i) = 0xff;
- iosapic_addr(i) = (unsigned long) ioremap(address, 0);
- }
- /* XXX this should come from systab or some such: */
-# if 0
- /* this doesn't look right --davidm 00/03/07 */
- iosapic_pin(TIMER_IRQ) = 5; /* System Clock Interrupt */
-# endif
- iosapic_pin(0x40) = 3; /* Keyboard */
- iosapic_pin(0x92) = 9; /* COM1 Serial Port */
- iosapic_pin(0x80) = 4; /* Periodic Interrupt */
- iosapic_pin(0xc0) = 2; /* Mouse */
- iosapic_pin(0xe0) = 1; /* IDE Disk */
- iosapic_pin(0xf0) = 6; /* E-IDE CDROM */
- iosapic_pin(0xa0) = 10; /* Real PCI Interrupt */
-#elif !defined(CONFIG_IA64_IRQ_ACPI)
- /*
- * For systems where the routing info in ACPI is
- * unavailable/wrong, use the intr_routing information to
- * initialize the iosapic array
- */
- i = -1;
- while (intr_routing[++i].srcbus != 0xff) {
- if (intr_routing[i].srcbus == BUS_ISA) {
- vector = isa_irq_to_vector(intr_routing[i].srcbusirq);
- } else if (intr_routing[i].srcbus == BUS_PCI) {
- vector = intr_routing[i].iosapic_pin;
- } else {
- printk("unknown bus type %d for intr_routing[%d]\n",
- intr_routing[i].srcbus, i);
- continue;
- }
- iosapic_pin(vector) = intr_routing[i].iosapic_pin;
- iosapic_dmode(vector) = intr_routing[i].mode;
- iosapic_polarity(vector) = intr_routing[i].polarity;
- iosapic_trigger(vector) = intr_routing[i].trigger;
-# ifdef DEBUG_IRQ_ROUTING
- printk("irq[0x%x(0x%x)]:0x%x, %d, %d, %d\n", vector, intr_routing[i].srcbusirq,
- iosapic_pin(vector), iosapic_dmode(vector), iosapic_polarity(vector),
- iosapic_trigger(vector));
-# endif
- }
-#else /* !defined(CONFIG_IA64_SOFTSDV_HACKS) && defined(CONFIG_IA64_IRQ_ACPI) */
/*
- * Map the legacy ISA devices into the IOAPIC data; We'll override these
- * later with data from the ACPI Interrupt Source Override table.
- *
- * Huh, the Lion w/ FPSWA firmware has entries for _all_ of the legacy IRQs,
- * including those that are not different from PC/AT standard. I don't know
- * if this is a bug in the other firmware or not. I'm going to leave this code
- * here, so that this works on BigSur but will go ask Intel. --wfd 2000-Jan-19
- *
+ * Map the legacy ISA devices into the IOSAPIC data. Some of
+ * these may get reprogrammed later on with data from the ACPI
+ * Interrupt Source Override table.
*/
- for (i =0 ; i < 16; i++) {
+ for (i = 0; i < 16; i++) {
irq = isa_irq_to_vector(i);
iosapic_pin(irq) = i;
iosapic_bus(irq) = BUS_ISA;
@@ -445,41 +270,37 @@ iosapic_init (unsigned long address)
irq, iosapic_pin(irq));
#endif
}
-#endif /* !CONFIG_IA64_IRQ_ACPI */
-}
-struct hw_interrupt_type irq_type_iosapic = {
- typename: "IOSAPIC",
- startup: iosapic_startup_irq,
- shutdown: iosapic_shutdown_irq,
- enable: iosapic_enable_irq,
- disable: iosapic_disable_irq,
- ack: iosapic_ack_irq,
- end: iosapic_end_irq,
- set_affinity: iosapic_set_affinity
-};
+ for (i = 0; i < NR_IRQS; ++i) {
+ if (iosapic_pin(i) != -1) {
+ if (iosapic_trigger(i) == IO_SAPIC_LEVEL)
+ irq_type = &irq_type_iosapic_level;
+ else
+ irq_type = &irq_type_iosapic_edge;
+ if (irq_desc[i].handler != &no_irq_type)
+ printk("dig_irq_init: warning: changing vector %d from %s to %s\n",
+ i, irq_desc[i].handler->typename,
+ irq_type->typename);
+ irq_desc[i].handler = irq_type;
+
+ /* program the IOSAPIC routing table: */
+ set_rte(iosapic_addr(i), iosapic_pin(i), iosapic_polarity(i),
+ iosapic_trigger(i), iosapic_dmode(i),
+ (ia64_get_lid() >> 16) & 0xffff, i);
+ }
+ }
+}
void
dig_irq_init (void)
{
- int i;
-
/*
- * Claim all non-legacy irq vectors as ours unless they're
- * claimed by someone else already (e.g., timer or IPI are
- * handled internally).
+ * Disable the compatibility mode interrupts (8259 style), needs IN/OUT support
+ * enabled.
*/
-#if 0
- for (i = IA64_MIN_VECTORED_IRQ; i <= IA64_MAX_VECTORED_IRQ; ++i) {
- if (irq_desc[i].handler == &no_irq_type)
- irq_desc[i].handler = &irq_type_iosapic;
- }
-#else
- for (i = 0; i <= IA64_MAX_VECTORED_IRQ; ++i) {
- if (irq_desc[i].handler == &no_irq_type)
- irq_desc[i].handler = &irq_type_iosapic;
- }
-#endif
+ outb(0xff, 0xA1);
+ outb(0xff, 0x21);
+
#ifndef CONFIG_IA64_DIG
iosapic_init(IO_SAPIC_DEFAULT_ADDR);
#endif
diff --git a/arch/ia64/dig/setup.c b/arch/ia64/dig/setup.c
index 6ae40319d..133f817a1 100644
--- a/arch/ia64/dig/setup.c
+++ b/arch/ia64/dig/setup.c
@@ -47,17 +47,11 @@ dig_setup (char **cmdline_p)
unsigned int orig_x, orig_y, num_cols, num_rows, font_height;
/*
- * This assumes that the EFI partition is physical disk 1
- * partition 1 and the Linux root disk is physical disk 1
- * partition 2.
+ * Default to /dev/sda2. This assumes that the EFI partition
+ * is physical disk 1 partition 1 and the Linux root disk is
+ * physical disk 1 partition 2.
*/
-#ifdef CONFIG_IA64_LION_HACKS
- /* default to /dev/sda2 on Lion... */
ROOT_DEV = to_kdev_t(0x0802); /* default to second partition on first drive */
-#else
- /* default to /dev/dha2 on BigSur... */
- ROOT_DEV = to_kdev_t(0x0302); /* default to second partition on first drive */
-#endif
#ifdef CONFIG_SMP
init_smp_config();
diff --git a/arch/ia64/hp/Makefile b/arch/ia64/hp/Makefile
index 64899f4be..458269e0a 100644
--- a/arch/ia64/hp/Makefile
+++ b/arch/ia64/hp/Makefile
@@ -10,7 +10,7 @@ all: hp.a
O_TARGET = hp.a
O_OBJS = hpsim_console.o hpsim_irq.o hpsim_setup.o
-ifeq ($(CONFIG_IA64_GENERIC),y)
+ifdef CONFIG_IA64_GENERIC
O_OBJS += hpsim_machvec.o
endif
diff --git a/arch/ia64/ia32/Makefile b/arch/ia64/ia32/Makefile
index 82017941c..1b18cca58 100644
--- a/arch/ia64/ia32/Makefile
+++ b/arch/ia64/ia32/Makefile
@@ -10,7 +10,7 @@
all: ia32.o
O_TARGET := ia32.o
-O_OBJS := ia32_entry.o ia32_signal.o sys_ia32.o ia32_support.o binfmt_elf32.o
+O_OBJS := ia32_entry.o sys_ia32.o ia32_signal.o ia32_support.o ia32_traps.o binfmt_elf32.o
clean::
diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
index a99983681..6f702df14 100644
--- a/arch/ia64/ia32/binfmt_elf32.c
+++ b/arch/ia64/ia32/binfmt_elf32.c
@@ -134,6 +134,19 @@ void ia64_elf32_init(struct pt_regs *regs)
regs->cr_ipsr &= ~IA64_PSR_AC;
regs->loadrs = 0;
+ /*
+ * According to the ABI %edx points to an `atexit' handler.
+ * Since we don't have one we'll set it to 0 and initialize
+ * all the other registers just to make things more deterministic,
+ * ala the i386 implementation.
+ */
+ regs->r8 = 0; /* %eax */
+ regs->r11 = 0; /* %ebx */
+ regs->r9 = 0; /* %ecx */
+ regs->r10 = 0; /* %edx */
+ regs->r13 = 0; /* %ebp */
+ regs->r14 = 0; /* %esi */
+ regs->r15 = 0; /* %edi */
}
#undef STACK_TOP
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S
index bd7b0517b..ff27a02ce 100644
--- a/arch/ia64/ia32/ia32_entry.S
+++ b/arch/ia64/ia32/ia32_entry.S
@@ -1,14 +1,55 @@
#include <asm/offsets.h>
#include <asm/signal.h>
+ //
+ // Get possibly unaligned sigmask argument into an aligned
+ // kernel buffer
+ .text
+ .proc ia32_rt_sigsuspend
+ .global ia32_rt_sigsuspend
+ia32_rt_sigsuspend:
+
+ // We'll cheat and not do an alloc here since we are ultimately
+ // going to do a simple branch to the IA64 sys_rt_sigsuspend.
+ // r32 is still the first argument which is the signal mask.
+ // We copy this 4-byte aligned value to an 8-byte aligned buffer
+ // in the task structure and then jump to the IA64 code.
+
+ mov r8=r0 // no memory access errors yet
+ add r10=4,r32
+ ;;
+1:
+ ld4 r2=[r32] // get first half of sigmask
+ ld4 r3=[r10] // get second half of sigmask
+2:
+ cmp.lt p6,p0=r8,r0 // check memory access
+ ;;
+(p6) br.ret.sptk.many rp // it failed
+
+ adds r32=IA64_TASK_THREAD_SIGMASK_OFFSET,r13
+ adds r10=IA64_TASK_THREAD_SIGMASK_OFFSET+4,r13
+ ;;
+ st4 [r32]=r2
+ st4 [r10]=r3
+ br.cond.sptk.many sys_rt_sigsuspend
+
+ .section __ex_table,"a"
+ data4 @gprel(1b)
+ data4 (2b-1b)|1
+ .previous
+
+
+ .endp ia32_rt_sigsuspend
+
.global ia32_ret_from_syscall
- .proc ia64_ret_from_syscall
+ .proc ia32_ret_from_syscall
ia32_ret_from_syscall:
cmp.ge p6,p7=r8,r0 // syscall executed successfully?
adds r2=IA64_PT_REGS_R8_OFFSET+16,sp // r2 = &pt_regs.r8
;;
st8 [r2]=r8 // store return value in slot for r8
br.cond.sptk.few ia64_leave_kernel
+ .endp ia32_ret_from_syscall
//
// Invoke a system call, but do some tracing before and after the call.
@@ -35,10 +76,21 @@ ia32_trace_syscall:
.endp ia32_trace_syscall
.align 16
+ .global sys32_vfork
+ .proc sys32_vfork
+sys32_vfork:
+ alloc r16=ar.pfs,2,2,3,0;;
+ mov out0=IA64_CLONE_VFORK|IA64_CLONE_VM|SIGCHLD // out0 = clone_flags
+ br.cond.sptk.few .fork1 // do the work
+ .endp sys32_vfork
+
+ .align 16
.global sys32_fork
.proc sys32_fork
sys32_fork:
alloc r16=ar.pfs,2,2,3,0;;
+ mov out0=SIGCHLD // out0 = clone_flags
+.fork1:
movl r28=1f
mov loc1=rp
br.cond.sptk.many save_switch_stack
@@ -46,7 +98,6 @@ sys32_fork:
mov loc0=r16 // save ar.pfs across do_fork
adds out2=IA64_SWITCH_STACK_SIZE+16,sp
adds r2=IA64_SWITCH_STACK_SIZE+IA64_PT_REGS_R12_OFFSET+16,sp
- mov out0=SIGCHLD // out0 = clone_flags
;;
ld8 out1=[r2] // fetch usp from pt_regs.r12
br.call.sptk.few rp=do_fork
@@ -88,7 +139,7 @@ ia32_syscall_table:
data8 sys_setuid
data8 sys_getuid
data8 sys_ni_syscall /* sys_stime is not supported on IA64 */ /* 25 */
- data8 sys_ptrace
+ data8 sys32_ptrace
data8 sys32_alarm
data8 sys_ni_syscall
data8 sys_ni_syscall
@@ -105,7 +156,7 @@ ia32_syscall_table:
data8 sys_rmdir /* 40 */
data8 sys_dup
data8 sys32_pipe
- data8 sys_times
+ data8 sys32_times
data8 sys_ni_syscall /* old prof syscall holder */
data8 sys_brk /* 45 */
data8 sys_setgid
@@ -139,7 +190,7 @@ ia32_syscall_table:
data8 sys_sethostname
data8 sys32_setrlimit /* 75 */
data8 sys32_getrlimit
- data8 sys_getrusage
+ data8 sys32_getrusage
data8 sys32_gettimeofday
data8 sys32_settimeofday
data8 sys_getgroups /* 80 */
@@ -241,7 +292,7 @@ ia32_syscall_table:
data8 sys_rt_sigpending
data8 sys_rt_sigtimedwait
data8 sys_rt_sigqueueinfo
- data8 sys_rt_sigsuspend
+ data8 ia32_rt_sigsuspend
data8 sys_pread /* 180 */
data8 sys_pwrite
data8 sys_chown
diff --git a/arch/ia64/ia32/ia32_signal.c b/arch/ia64/ia32/ia32_signal.c
index 2c2c53741..83e6c9e61 100644
--- a/arch/ia64/ia32/ia32_signal.c
+++ b/arch/ia64/ia32/ia32_signal.c
@@ -94,7 +94,9 @@ setup_sigcontext_ia32(struct sigcontext_ia32 *sc, struct _fpstate_ia32 *fpstate,
err |= __put_user(tmp ? fpstate : NULL, &sc->fpstate);
/* non-iBCS2 extensions.. */
+#endif
err |= __put_user(mask, &sc->oldmask);
+#if 0
err |= __put_user(current->tss.cr2, &sc->cr2);
#endif
@@ -196,7 +198,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
return (void *)((esp - frame_size) & -8ul);
}
-static void
+static int
setup_frame_ia32(int sig, struct k_sigaction *ka, sigset_t *set,
struct pt_regs * regs)
{
@@ -247,20 +249,21 @@ setup_frame_ia32(int sig, struct k_sigaction *ka, sigset_t *set,
regs->eflags &= ~TF_MASK;
#endif
-#if 1
- printk("SIG deliver (%s:%d): sp=%p pc=%lx ra=%x\n",
- current->comm, current->pid, frame, regs->cr_iip, frame->pretcode);
+#if 0
+ printk("SIG deliver (%s:%d): sig=%d sp=%p pc=%lx ra=%x\n",
+ current->comm, current->pid, sig, frame, regs->cr_iip, frame->pretcode);
#endif
- return;
+ return 1;
give_sigsegv:
if (sig == SIGSEGV)
ka->sa.sa_handler = SIG_DFL;
force_sig(SIGSEGV, current);
+ return 0;
}
-static void
+static int
setup_rt_frame_ia32(int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs * regs)
{
@@ -316,29 +319,29 @@ setup_rt_frame_ia32(int sig, struct k_sigaction *ka, siginfo_t *info,
regs->eflags &= ~TF_MASK;
#endif
-#if 1
+#if 0
printk("SIG deliver (%s:%d): sp=%p pc=%lx ra=%x\n",
current->comm, current->pid, frame, regs->cr_iip, frame->pretcode);
#endif
- return;
+ return 1;
give_sigsegv:
if (sig == SIGSEGV)
ka->sa.sa_handler = SIG_DFL;
force_sig(SIGSEGV, current);
+ return 0;
}
-long
+int
ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs)
{
/* Set up the stack frame */
if (ka->sa.sa_flags & SA_SIGINFO)
- setup_rt_frame_ia32(sig, ka, info, set, regs);
+ return(setup_rt_frame_ia32(sig, ka, info, set, regs));
else
- setup_frame_ia32(sig, ka, set, regs);
-
+ return(setup_frame_ia32(sig, ka, set, regs));
}
asmlinkage int
diff --git a/arch/ia64/ia32/ia32_traps.c b/arch/ia64/ia32/ia32_traps.c
new file mode 100644
index 000000000..de99a65b3
--- /dev/null
+++ b/arch/ia64/ia32/ia32_traps.c
@@ -0,0 +1,47 @@
+#include <linux/kernel.h>
+#include <linux/sched.h>
+
+#include <asm/ia32.h>
+#include <asm/ptrace.h>
+
+int
+ia32_exception (struct pt_regs *regs, unsigned long isr)
+{
+ struct siginfo siginfo;
+
+ switch ((isr >> 16) & 0xff) {
+ case 1:
+ case 2:
+ if (isr == 0)
+ siginfo.si_code = TRAP_TRACE;
+ else if (isr & 0x4)
+ siginfo.si_code = TRAP_BRANCH;
+ else
+ siginfo.si_code = TRAP_BRKPT;
+ break;
+
+ case 3:
+ siginfo.si_code = TRAP_BRKPT;
+ break;
+
+ case 0: /* Divide fault */
+ case 4: /* Overflow */
+ case 5: /* Bounds fault */
+ case 6: /* Invalid Op-code */
+ case 7: /* FP DNA */
+ case 8: /* Double Fault */
+ case 9: /* Invalid TSS */
+ case 11: /* Segment not present */
+ case 12: /* Stack fault */
+ case 13: /* General Protection Fault */
+ case 16: /* Pending FP error */
+ case 17: /* Alignment check */
+ case 19: /* SSE Numeric error */
+ default:
+ return -1;
+ }
+ siginfo.si_signo = SIGTRAP;
+ siginfo.si_errno = 0;
+ send_sig_info(SIGTRAP, &siginfo, current);
+ return 0;
+}
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index 00eca716d..2077abe93 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -58,32 +58,6 @@
#define A(__x) ((unsigned long)(__x))
#define AA(__x) ((unsigned long)(__x))
-/*
- * This is trivial, and on the face of it looks like it
- * could equally well be done in user mode.
- *
- * Not so, for quite unobvious reasons - register pressure.
- * In user mode vfork() cannot have a stack frame, and if
- * done by calling the "clone()" system call directly, you
- * do not have enough call-clobbered registers to hold all
- * the information you need.
- */
-asmlinkage int sys32_vfork(
-int dummy0,
-int dummy1,
-int dummy2,
-int dummy3,
-int dummy4,
-int dummy5,
-int dummy6,
-int dummy7,
-int stack)
-{
- struct pt_regs *regs = (struct pt_regs *)&stack;
-
- return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->r12, regs);
-}
-
static int
nargs(unsigned int arg, char **ap)
{
@@ -842,82 +816,6 @@ asmlinkage int old_select(struct sel_arg_struct *arg)
return sys32_select(a.n, a.inp, a.outp, a.exp, a.tvp);
}
-struct rusage32 {
- struct timeval32 ru_utime;
- struct timeval32 ru_stime;
- int ru_maxrss;
- int ru_ixrss;
- int ru_idrss;
- int ru_isrss;
- int ru_minflt;
- int ru_majflt;
- int ru_nswap;
- int ru_inblock;
- int ru_oublock;
- int ru_msgsnd;
- int ru_msgrcv;
- int ru_nsignals;
- int ru_nvcsw;
- int ru_nivcsw;
-};
-
-static int
-put_rusage (struct rusage32 *ru, struct rusage *r)
-{
- int err;
-
- err = put_user (r->ru_utime.tv_sec, &ru->ru_utime.tv_sec);
- err |= __put_user (r->ru_utime.tv_usec, &ru->ru_utime.tv_usec);
- err |= __put_user (r->ru_stime.tv_sec, &ru->ru_stime.tv_sec);
- err |= __put_user (r->ru_stime.tv_usec, &ru->ru_stime.tv_usec);
- err |= __put_user (r->ru_maxrss, &ru->ru_maxrss);
- err |= __put_user (r->ru_ixrss, &ru->ru_ixrss);
- err |= __put_user (r->ru_idrss, &ru->ru_idrss);
- err |= __put_user (r->ru_isrss, &ru->ru_isrss);
- err |= __put_user (r->ru_minflt, &ru->ru_minflt);
- err |= __put_user (r->ru_majflt, &ru->ru_majflt);
- err |= __put_user (r->ru_nswap, &ru->ru_nswap);
- err |= __put_user (r->ru_inblock, &ru->ru_inblock);
- err |= __put_user (r->ru_oublock, &ru->ru_oublock);
- err |= __put_user (r->ru_msgsnd, &ru->ru_msgsnd);
- err |= __put_user (r->ru_msgrcv, &ru->ru_msgrcv);
- err |= __put_user (r->ru_nsignals, &ru->ru_nsignals);
- err |= __put_user (r->ru_nvcsw, &ru->ru_nvcsw);
- err |= __put_user (r->ru_nivcsw, &ru->ru_nivcsw);
- return err;
-}
-
-extern asmlinkage int sys_wait4(pid_t pid,unsigned int * stat_addr,
- int options, struct rusage * ru);
-
-asmlinkage int
-sys32_wait4(__kernel_pid_t32 pid, unsigned int *stat_addr, int options,
- struct rusage32 *ru)
-{
- if (!ru)
- return sys_wait4(pid, stat_addr, options, NULL);
- else {
- struct rusage r;
- int ret;
- unsigned int status;
- mm_segment_t old_fs = get_fs();
-
- set_fs (KERNEL_DS);
- ret = sys_wait4(pid, stat_addr ? &status : NULL, options, &r);
- set_fs (old_fs);
- if (put_rusage (ru, &r)) return -EFAULT;
- if (stat_addr && put_user (status, stat_addr))
- return -EFAULT;
- return ret;
- }
-}
-
-asmlinkage int
-sys32_waitpid(__kernel_pid_t32 pid, unsigned int *stat_addr, int options)
-{
- return sys32_wait4(pid, stat_addr, options, NULL);
-}
-
struct timespec32 {
int tv_sec;
int tv_nsec;
@@ -1586,65 +1484,63 @@ do_sys32_semctl(int first, int second, int third, void *uptr)
{
union semun fourth;
u32 pad;
- int err = -EINVAL;
+ int err, err2;
+ struct semid64_ds s;
+ struct semid_ds32 *usp;
+ mm_segment_t old_fs;
if (!uptr)
- goto out;
+ return -EINVAL;
err = -EFAULT;
if (get_user (pad, (u32 *)uptr))
- goto out;
+ return err;
if(third == SETVAL)
fourth.val = (int)pad;
else
fourth.__pad = (void *)A(pad);
- if (IPCOP_MASK (third) &
- (IPCOP_MASK (IPC_INFO) | IPCOP_MASK (SEM_INFO) |
- IPCOP_MASK (GETVAL) | IPCOP_MASK (GETPID) |
- IPCOP_MASK (GETNCNT) | IPCOP_MASK (GETZCNT) |
- IPCOP_MASK (GETALL) | IPCOP_MASK (SETALL) |
- IPCOP_MASK (IPC_RMID))) {
+ switch (third) {
+
+ case IPC_INFO:
+ case IPC_RMID:
+ case IPC_SET:
+ case SEM_INFO:
+ case GETVAL:
+ case GETPID:
+ case GETNCNT:
+ case GETZCNT:
+ case GETALL:
+ case SETVAL:
+ case SETALL:
err = sys_semctl (first, second, third, fourth);
- } else {
- struct semid_ds s;
- struct semid_ds32 *usp = (struct semid_ds32 *)A(pad);
- mm_segment_t old_fs;
- int need_back_translation;
-
- if (third == IPC_SET) {
- err = get_user (s.sem_perm.uid, &usp->sem_perm.uid);
- err |= __get_user(s.sem_perm.gid, &usp->sem_perm.gid);
- err |= __get_user(s.sem_perm.mode, &usp->sem_perm.mode);
- if (err)
- goto out;
- fourth.__pad = &s;
- }
- need_back_translation =
- (IPCOP_MASK (third) &
- (IPCOP_MASK (SEM_STAT) | IPCOP_MASK (IPC_STAT))) != 0;
- if (need_back_translation)
- fourth.__pad = &s;
+ break;
+
+ case IPC_STAT:
+ case SEM_STAT:
+ usp = (struct semid_ds32 *)A(pad);
+ fourth.__pad = &s;
old_fs = get_fs ();
set_fs (KERNEL_DS);
err = sys_semctl (first, second, third, fourth);
set_fs (old_fs);
- if (need_back_translation) {
- int err2 = put_user(s.sem_perm.key, &usp->sem_perm.key);
- err2 |= __put_user(s.sem_perm.uid, &usp->sem_perm.uid);
- err2 |= __put_user(s.sem_perm.gid, &usp->sem_perm.gid);
- err2 |= __put_user(s.sem_perm.cuid,
- &usp->sem_perm.cuid);
- err2 |= __put_user (s.sem_perm.cgid,
- &usp->sem_perm.cgid);
- err2 |= __put_user (s.sem_perm.mode,
- &usp->sem_perm.mode);
- err2 |= __put_user (s.sem_perm.seq, &usp->sem_perm.seq);
- err2 |= __put_user (s.sem_otime, &usp->sem_otime);
- err2 |= __put_user (s.sem_ctime, &usp->sem_ctime);
- err2 |= __put_user (s.sem_nsems, &usp->sem_nsems);
- if (err2) err = -EFAULT;
- }
+ err2 = put_user(s.sem_perm.key, &usp->sem_perm.key);
+ err2 |= __put_user(s.sem_perm.uid, &usp->sem_perm.uid);
+ err2 |= __put_user(s.sem_perm.gid, &usp->sem_perm.gid);
+ err2 |= __put_user(s.sem_perm.cuid,
+ &usp->sem_perm.cuid);
+ err2 |= __put_user (s.sem_perm.cgid,
+ &usp->sem_perm.cgid);
+ err2 |= __put_user (s.sem_perm.mode,
+ &usp->sem_perm.mode);
+ err2 |= __put_user (s.sem_perm.seq, &usp->sem_perm.seq);
+ err2 |= __put_user (s.sem_otime, &usp->sem_otime);
+ err2 |= __put_user (s.sem_ctime, &usp->sem_ctime);
+ err2 |= __put_user (s.sem_nsems, &usp->sem_nsems);
+ if (err2)
+ err = -EFAULT;
+ break;
+
}
-out:
+
return err;
}
@@ -1717,52 +1613,60 @@ out:
static int
do_sys32_msgctl (int first, int second, void *uptr)
{
- int err;
+ int err, err2;
+ struct msqid_ds m;
+ struct msqid64_ds m64;
+ struct msqid_ds32 *up = (struct msqid_ds32 *)uptr;
+ mm_segment_t old_fs;
+
+ switch (second) {
- if (IPCOP_MASK (second) &
- (IPCOP_MASK (IPC_INFO) | IPCOP_MASK (MSG_INFO) |
- IPCOP_MASK (IPC_RMID))) {
+ case IPC_INFO:
+ case IPC_RMID:
+ case MSG_INFO:
err = sys_msgctl (first, second, (struct msqid_ds *)uptr);
- } else {
- struct msqid_ds m;
- struct msqid_ds32 *up = (struct msqid_ds32 *)uptr;
- mm_segment_t old_fs;
-
- if (second == IPC_SET) {
- err = get_user (m.msg_perm.uid, &up->msg_perm.uid);
- err |= __get_user (m.msg_perm.gid, &up->msg_perm.gid);
- err |= __get_user (m.msg_perm.mode, &up->msg_perm.mode);
- err |= __get_user (m.msg_qbytes, &up->msg_qbytes);
- if (err)
- goto out;
- }
+ break;
+
+ case IPC_SET:
+ err = get_user (m.msg_perm.uid, &up->msg_perm.uid);
+ err |= __get_user (m.msg_perm.gid, &up->msg_perm.gid);
+ err |= __get_user (m.msg_perm.mode, &up->msg_perm.mode);
+ err |= __get_user (m.msg_qbytes, &up->msg_qbytes);
+ if (err)
+ break;
old_fs = get_fs ();
set_fs (KERNEL_DS);
err = sys_msgctl (first, second, &m);
set_fs (old_fs);
- if (IPCOP_MASK (second) &
- (IPCOP_MASK (MSG_STAT) | IPCOP_MASK (IPC_STAT))) {
- int err2 = put_user (m.msg_perm.key, &up->msg_perm.key);
- err2 |= __put_user(m.msg_perm.uid, &up->msg_perm.uid);
- err2 |= __put_user(m.msg_perm.gid, &up->msg_perm.gid);
- err2 |= __put_user(m.msg_perm.cuid, &up->msg_perm.cuid);
- err2 |= __put_user(m.msg_perm.cgid, &up->msg_perm.cgid);
- err2 |= __put_user(m.msg_perm.mode, &up->msg_perm.mode);
- err2 |= __put_user(m.msg_perm.seq, &up->msg_perm.seq);
- err2 |= __put_user(m.msg_stime, &up->msg_stime);
- err2 |= __put_user(m.msg_rtime, &up->msg_rtime);
- err2 |= __put_user(m.msg_ctime, &up->msg_ctime);
- err2 |= __put_user(m.msg_cbytes, &up->msg_cbytes);
- err2 |= __put_user(m.msg_qnum, &up->msg_qnum);
- err2 |= __put_user(m.msg_qbytes, &up->msg_qbytes);
- err2 |= __put_user(m.msg_lspid, &up->msg_lspid);
- err2 |= __put_user(m.msg_lrpid, &up->msg_lrpid);
- if (err2)
- err = -EFAULT;
- }
+ break;
+
+ case IPC_STAT:
+ case MSG_STAT:
+ old_fs = get_fs ();
+ set_fs (KERNEL_DS);
+ err = sys_msgctl (first, second, &m64);
+ set_fs (old_fs);
+ err2 = put_user (m64.msg_perm.key, &up->msg_perm.key);
+ err2 |= __put_user(m64.msg_perm.uid, &up->msg_perm.uid);
+ err2 |= __put_user(m64.msg_perm.gid, &up->msg_perm.gid);
+ err2 |= __put_user(m64.msg_perm.cuid, &up->msg_perm.cuid);
+ err2 |= __put_user(m64.msg_perm.cgid, &up->msg_perm.cgid);
+ err2 |= __put_user(m64.msg_perm.mode, &up->msg_perm.mode);
+ err2 |= __put_user(m64.msg_perm.seq, &up->msg_perm.seq);
+ err2 |= __put_user(m64.msg_stime, &up->msg_stime);
+ err2 |= __put_user(m64.msg_rtime, &up->msg_rtime);
+ err2 |= __put_user(m64.msg_ctime, &up->msg_ctime);
+ err2 |= __put_user(m64.msg_cbytes, &up->msg_cbytes);
+ err2 |= __put_user(m64.msg_qnum, &up->msg_qnum);
+ err2 |= __put_user(m64.msg_qbytes, &up->msg_qbytes);
+ err2 |= __put_user(m64.msg_lspid, &up->msg_lspid);
+ err2 |= __put_user(m64.msg_lrpid, &up->msg_lrpid);
+ if (err2)
+ err = -EFAULT;
+ break;
+
}
-out:
return err;
}
@@ -1774,85 +1678,98 @@ do_sys32_shmat (int first, int second, int third, int version, void *uptr)
int err = -EINVAL;
if (version == 1)
- goto out;
+ return err;
err = sys_shmat (first, uptr, second, &raddr);
if (err)
- goto out;
+ return err;
err = put_user (raddr, uaddr);
-out:
return err;
}
static int
do_sys32_shmctl (int first, int second, void *uptr)
{
- int err;
-
- if (IPCOP_MASK (second) &
- (IPCOP_MASK (IPC_INFO) | IPCOP_MASK (SHM_LOCK)
- | IPCOP_MASK (SHM_UNLOCK) | IPCOP_MASK (IPC_RMID))) {
+ int err = -EFAULT, err2;
+ struct shmid_ds s;
+ struct shmid64_ds s64;
+ struct shmid_ds32 *up = (struct shmid_ds32 *)uptr;
+ mm_segment_t old_fs;
+ struct shm_info32 {
+ int used_ids;
+ u32 shm_tot, shm_rss, shm_swp;
+ u32 swap_attempts, swap_successes;
+ } *uip = (struct shm_info32 *)uptr;
+ struct shm_info si;
+
+ switch (second) {
+
+ case IPC_INFO:
+ case IPC_RMID:
+ case SHM_LOCK:
+ case SHM_UNLOCK:
err = sys_shmctl (first, second, (struct shmid_ds *)uptr);
- } else {
- struct shmid_ds s;
- struct shmid_ds32 *up = (struct shmid_ds32 *)uptr;
- mm_segment_t old_fs;
-
- if (second == IPC_SET) {
- err = get_user (s.shm_perm.uid, &up->shm_perm.uid);
- err |= __get_user (s.shm_perm.gid, &up->shm_perm.gid);
- err |= __get_user (s.shm_perm.mode, &up->shm_perm.mode);
- if (err)
- goto out;
- }
+ break;
+ case IPC_SET:
+ err = get_user (s.shm_perm.uid, &up->shm_perm.uid);
+ err |= __get_user (s.shm_perm.gid, &up->shm_perm.gid);
+ err |= __get_user (s.shm_perm.mode, &up->shm_perm.mode);
+ if (err)
+ break;
old_fs = get_fs ();
set_fs (KERNEL_DS);
err = sys_shmctl (first, second, &s);
set_fs (old_fs);
+ break;
+
+ case IPC_STAT:
+ case SHM_STAT:
+ old_fs = get_fs ();
+ set_fs (KERNEL_DS);
+ err = sys_shmctl (first, second, &s64);
+ set_fs (old_fs);
if (err < 0)
- goto out;
+ break;
+ err2 = put_user (s64.shm_perm.key, &up->shm_perm.key);
+ err2 |= __put_user (s64.shm_perm.uid, &up->shm_perm.uid);
+ err2 |= __put_user (s64.shm_perm.gid, &up->shm_perm.gid);
+ err2 |= __put_user (s64.shm_perm.cuid,
+ &up->shm_perm.cuid);
+ err2 |= __put_user (s64.shm_perm.cgid,
+ &up->shm_perm.cgid);
+ err2 |= __put_user (s64.shm_perm.mode,
+ &up->shm_perm.mode);
+ err2 |= __put_user (s64.shm_perm.seq, &up->shm_perm.seq);
+ err2 |= __put_user (s64.shm_atime, &up->shm_atime);
+ err2 |= __put_user (s64.shm_dtime, &up->shm_dtime);
+ err2 |= __put_user (s64.shm_ctime, &up->shm_ctime);
+ err2 |= __put_user (s64.shm_segsz, &up->shm_segsz);
+ err2 |= __put_user (s64.shm_nattch, &up->shm_nattch);
+ err2 |= __put_user (s64.shm_cpid, &up->shm_cpid);
+ err2 |= __put_user (s64.shm_lpid, &up->shm_lpid);
+ if (err2)
+ err = -EFAULT;
+ break;
+
+ case SHM_INFO:
+ old_fs = get_fs ();
+ set_fs (KERNEL_DS);
+ err = sys_shmctl (first, second, &si);
+ set_fs (old_fs);
+ if (err < 0)
+ break;
+ err2 = put_user (si.used_ids, &uip->used_ids);
+ err2 |= __put_user (si.shm_tot, &uip->shm_tot);
+ err2 |= __put_user (si.shm_rss, &uip->shm_rss);
+ err2 |= __put_user (si.shm_swp, &uip->shm_swp);
+ err2 |= __put_user (si.swap_attempts,
+ &uip->swap_attempts);
+ err2 |= __put_user (si.swap_successes,
+ &uip->swap_successes);
+ if (err2)
+ err = -EFAULT;
+ break;
- /* Mask it even in this case so it becomes a CSE. */
- if (second == SHM_INFO) {
- struct shm_info32 {
- int used_ids;
- u32 shm_tot, shm_rss, shm_swp;
- u32 swap_attempts, swap_successes;
- } *uip = (struct shm_info32 *)uptr;
- struct shm_info *kp = (struct shm_info *)&s;
- int err2 = put_user (kp->used_ids, &uip->used_ids);
- err2 |= __put_user (kp->shm_tot, &uip->shm_tot);
- err2 |= __put_user (kp->shm_rss, &uip->shm_rss);
- err2 |= __put_user (kp->shm_swp, &uip->shm_swp);
- err2 |= __put_user (kp->swap_attempts,
- &uip->swap_attempts);
- err2 |= __put_user (kp->swap_successes,
- &uip->swap_successes);
- if (err2)
- err = -EFAULT;
- } else if (IPCOP_MASK (second) &
- (IPCOP_MASK (SHM_STAT) | IPCOP_MASK (IPC_STAT))) {
- int err2 = put_user (s.shm_perm.key, &up->shm_perm.key);
- err2 |= __put_user (s.shm_perm.uid, &up->shm_perm.uid);
- err2 |= __put_user (s.shm_perm.gid, &up->shm_perm.gid);
- err2 |= __put_user (s.shm_perm.cuid,
- &up->shm_perm.cuid);
- err2 |= __put_user (s.shm_perm.cgid,
- &up->shm_perm.cgid);
- err2 |= __put_user (s.shm_perm.mode,
- &up->shm_perm.mode);
- err2 |= __put_user (s.shm_perm.seq, &up->shm_perm.seq);
- err2 |= __put_user (s.shm_atime, &up->shm_atime);
- err2 |= __put_user (s.shm_dtime, &up->shm_dtime);
- err2 |= __put_user (s.shm_ctime, &up->shm_ctime);
- err2 |= __put_user (s.shm_segsz, &up->shm_segsz);
- err2 |= __put_user (s.shm_nattch, &up->shm_nattch);
- err2 |= __put_user (s.shm_cpid, &up->shm_cpid);
- err2 |= __put_user (s.shm_lpid, &up->shm_lpid);
- if (err2)
- err = -EFAULT;
- }
}
-out:
return err;
}
@@ -1865,67 +1782,54 @@ sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
version = call >> 16; /* hack for backward compatibility */
call &= 0xffff;
- if (call <= SEMCTL)
- switch (call) {
- case SEMOP:
- /* struct sembuf is the same on 32 and 64bit :)) */
- err = sys_semop (first, (struct sembuf *)AA(ptr),
- second);
- goto out;
- case SEMGET:
- err = sys_semget (first, second, third);
- goto out;
- case SEMCTL:
- err = do_sys32_semctl (first, second, third,
- (void *)AA(ptr));
- goto out;
- default:
- err = -EINVAL;
- goto out;
- };
- if (call <= MSGCTL)
- switch (call) {
- case MSGSND:
- err = do_sys32_msgsnd (first, second, third,
- (void *)AA(ptr));
- goto out;
- case MSGRCV:
- err = do_sys32_msgrcv (first, second, fifth, third,
- version, (void *)AA(ptr));
- goto out;
- case MSGGET:
- err = sys_msgget ((key_t) first, second);
- goto out;
- case MSGCTL:
- err = do_sys32_msgctl (first, second, (void *)AA(ptr));
- goto out;
- default:
- err = -EINVAL;
- goto out;
- }
- if (call <= SHMCTL)
- switch (call) {
- case SHMAT:
- err = do_sys32_shmat (first, second, third,
- version, (void *)AA(ptr));
- goto out;
- case SHMDT:
- err = sys_shmdt ((char *)AA(ptr));
- goto out;
- case SHMGET:
- err = sys_shmget (first, second, third);
- goto out;
- case SHMCTL:
- err = do_sys32_shmctl (first, second, (void *)AA(ptr));
- goto out;
- default:
- err = -EINVAL;
- goto out;
- }
+ switch (call) {
- err = -EINVAL;
+ case SEMOP:
+ /* struct sembuf is the same on 32 and 64bit :)) */
+ err = sys_semop (first, (struct sembuf *)AA(ptr),
+ second);
+ break;
+ case SEMGET:
+ err = sys_semget (first, second, third);
+ break;
+ case SEMCTL:
+ err = do_sys32_semctl (first, second, third,
+ (void *)AA(ptr));
+ break;
+
+ case MSGSND:
+ err = do_sys32_msgsnd (first, second, third,
+ (void *)AA(ptr));
+ break;
+ case MSGRCV:
+ err = do_sys32_msgrcv (first, second, fifth, third,
+ version, (void *)AA(ptr));
+ break;
+ case MSGGET:
+ err = sys_msgget ((key_t) first, second);
+ break;
+ case MSGCTL:
+ err = do_sys32_msgctl (first, second, (void *)AA(ptr));
+ break;
+
+ case SHMAT:
+ err = do_sys32_shmat (first, second, third,
+ version, (void *)AA(ptr));
+ break;
+ case SHMDT:
+ err = sys_shmdt ((char *)AA(ptr));
+ break;
+ case SHMGET:
+ err = sys_shmget (first, second, third);
+ break;
+ case SHMCTL:
+ err = do_sys32_shmctl (first, second, (void *)AA(ptr));
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
-out:
unlock_kernel();
return err;
}
@@ -1949,6 +1853,537 @@ asmlinkage long sys32_time(int * tloc)
return i;
}
+struct rusage32 {
+ struct timeval32 ru_utime;
+ struct timeval32 ru_stime;
+ int ru_maxrss;
+ int ru_ixrss;
+ int ru_idrss;
+ int ru_isrss;
+ int ru_minflt;
+ int ru_majflt;
+ int ru_nswap;
+ int ru_inblock;
+ int ru_oublock;
+ int ru_msgsnd;
+ int ru_msgrcv;
+ int ru_nsignals;
+ int ru_nvcsw;
+ int ru_nivcsw;
+};
+
+static int
+put_rusage (struct rusage32 *ru, struct rusage *r)
+{
+ int err;
+
+ err = put_user (r->ru_utime.tv_sec, &ru->ru_utime.tv_sec);
+ err |= __put_user (r->ru_utime.tv_usec, &ru->ru_utime.tv_usec);
+ err |= __put_user (r->ru_stime.tv_sec, &ru->ru_stime.tv_sec);
+ err |= __put_user (r->ru_stime.tv_usec, &ru->ru_stime.tv_usec);
+ err |= __put_user (r->ru_maxrss, &ru->ru_maxrss);
+ err |= __put_user (r->ru_ixrss, &ru->ru_ixrss);
+ err |= __put_user (r->ru_idrss, &ru->ru_idrss);
+ err |= __put_user (r->ru_isrss, &ru->ru_isrss);
+ err |= __put_user (r->ru_minflt, &ru->ru_minflt);
+ err |= __put_user (r->ru_majflt, &ru->ru_majflt);
+ err |= __put_user (r->ru_nswap, &ru->ru_nswap);
+ err |= __put_user (r->ru_inblock, &ru->ru_inblock);
+ err |= __put_user (r->ru_oublock, &ru->ru_oublock);
+ err |= __put_user (r->ru_msgsnd, &ru->ru_msgsnd);
+ err |= __put_user (r->ru_msgrcv, &ru->ru_msgrcv);
+ err |= __put_user (r->ru_nsignals, &ru->ru_nsignals);
+ err |= __put_user (r->ru_nvcsw, &ru->ru_nvcsw);
+ err |= __put_user (r->ru_nivcsw, &ru->ru_nivcsw);
+ return err;
+}
+
+extern asmlinkage int sys_wait4(pid_t pid,unsigned int * stat_addr,
+ int options, struct rusage * ru);
+
+asmlinkage int
+sys32_wait4(__kernel_pid_t32 pid, unsigned int *stat_addr, int options,
+ struct rusage32 *ru)
+{
+ if (!ru)
+ return sys_wait4(pid, stat_addr, options, NULL);
+ else {
+ struct rusage r;
+ int ret;
+ unsigned int status;
+ mm_segment_t old_fs = get_fs();
+
+ set_fs (KERNEL_DS);
+ ret = sys_wait4(pid, stat_addr ? &status : NULL, options, &r);
+ set_fs (old_fs);
+ if (put_rusage (ru, &r)) return -EFAULT;
+ if (stat_addr && put_user (status, stat_addr))
+ return -EFAULT;
+ return ret;
+ }
+}
+
+asmlinkage int
+sys32_waitpid(__kernel_pid_t32 pid, unsigned int *stat_addr, int options)
+{
+ return sys32_wait4(pid, stat_addr, options, NULL);
+}
+
+
+extern asmlinkage int
+sys_getrusage(int who, struct rusage *ru);
+
+asmlinkage int
+sys32_getrusage(int who, struct rusage32 *ru)
+{
+ struct rusage r;
+ int ret;
+ mm_segment_t old_fs = get_fs();
+
+ set_fs (KERNEL_DS);
+ ret = sys_getrusage(who, &r);
+ set_fs (old_fs);
+ if (put_rusage (ru, &r)) return -EFAULT;
+ return ret;
+}
+
+struct tms32 {
+ __kernel_clock_t32 tms_utime;
+ __kernel_clock_t32 tms_stime;
+ __kernel_clock_t32 tms_cutime;
+ __kernel_clock_t32 tms_cstime;
+};
+
+extern asmlinkage long sys_times(struct tms * tbuf);
+
+asmlinkage long
+sys32_times(struct tms32 *tbuf)
+{
+ struct tms t;
+ long ret;
+ mm_segment_t old_fs = get_fs ();
+ int err;
+
+ set_fs (KERNEL_DS);
+ ret = sys_times(tbuf ? &t : NULL);
+ set_fs (old_fs);
+ if (tbuf) {
+ err = put_user (t.tms_utime, &tbuf->tms_utime);
+ err |= __put_user (t.tms_stime, &tbuf->tms_stime);
+ err |= __put_user (t.tms_cutime, &tbuf->tms_cutime);
+ err |= __put_user (t.tms_cstime, &tbuf->tms_cstime);
+ if (err)
+ ret = -EFAULT;
+ }
+ return ret;
+}
+
+unsigned int
+ia32_peek (struct pt_regs *regs, struct task_struct *child, unsigned long addr, unsigned int *val)
+{
+ size_t copied;
+ unsigned int ret;
+
+ copied = access_process_vm(child, addr, val, sizeof(*val), 0);
+ return(copied != sizeof(ret) ? -EIO : 0);
+}
+
+unsigned int
+ia32_poke (struct pt_regs *regs, struct task_struct *child, unsigned long addr, unsigned int val)
+{
+
+ if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val))
+ return -EIO;
+ return 0;
+}
+
+/*
+ * The order in which registers are stored in the ptrace regs structure
+ */
+#define PT_EBX 0
+#define PT_ECX 1
+#define PT_EDX 2
+#define PT_ESI 3
+#define PT_EDI 4
+#define PT_EBP 5
+#define PT_EAX 6
+#define PT_DS 7
+#define PT_ES 8
+#define PT_FS 9
+#define PT_GS 10
+#define PT_ORIG_EAX 11
+#define PT_EIP 12
+#define PT_CS 13
+#define PT_EFL 14
+#define PT_UESP 15
+#define PT_SS 16
+
+unsigned int
+getreg(struct task_struct *child, int regno)
+{
+ struct pt_regs *child_regs;
+
+ child_regs = ia64_task_regs(child);
+ switch (regno / sizeof(int)) {
+
+ case PT_EBX:
+ return(child_regs->r11);
+ case PT_ECX:
+ return(child_regs->r9);
+ case PT_EDX:
+ return(child_regs->r10);
+ case PT_ESI:
+ return(child_regs->r14);
+ case PT_EDI:
+ return(child_regs->r15);
+ case PT_EBP:
+ return(child_regs->r13);
+ case PT_EAX:
+ case PT_ORIG_EAX:
+ return(child_regs->r8);
+ case PT_EIP:
+ return(child_regs->cr_iip);
+ case PT_UESP:
+ return(child_regs->r12);
+ case PT_EFL:
+ return(child->thread.eflag);
+ case PT_DS:
+ case PT_ES:
+ case PT_FS:
+ case PT_GS:
+ case PT_SS:
+ return((unsigned int)__USER_DS);
+ case PT_CS:
+ return((unsigned int)__USER_CS);
+ default:
+ printk("getregs:unknown register %d\n", regno);
+ break;
+
+ }
+ return(0);
+}
+
+void
+putreg(struct task_struct *child, int regno, unsigned int value)
+{
+ struct pt_regs *child_regs;
+
+ child_regs = ia64_task_regs(child);
+ switch (regno / sizeof(int)) {
+
+ case PT_EBX:
+ child_regs->r11 = value;
+ break;
+ case PT_ECX:
+ child_regs->r9 = value;
+ break;
+ case PT_EDX:
+ child_regs->r10 = value;
+ break;
+ case PT_ESI:
+ child_regs->r14 = value;
+ break;
+ case PT_EDI:
+ child_regs->r15 = value;
+ break;
+ case PT_EBP:
+ child_regs->r13 = value;
+ break;
+ case PT_EAX:
+ case PT_ORIG_EAX:
+ child_regs->r8 = value;
+ break;
+ case PT_EIP:
+ child_regs->cr_iip = value;
+ break;
+ case PT_UESP:
+ child_regs->r12 = value;
+ break;
+ case PT_EFL:
+ child->thread.eflag = value;
+ break;
+ case PT_DS:
+ case PT_ES:
+ case PT_FS:
+ case PT_GS:
+ case PT_SS:
+ if (value != __USER_DS)
+ printk("setregs:try to set invalid segment register %d = %x\n", regno, value);
+ break;
+ case PT_CS:
+ if (value != __USER_CS)
+ printk("setregs:try to set invalid segment register %d = %x\n", regno, value);
+ break;
+ default:
+ printk("getregs:unknown register %d\n", regno);
+ break;
+
+ }
+}
+
+static inline void
+ia32f2ia64f(void *dst, void *src)
+{
+
+ __asm__ ("ldfe f6=[%1] ;;\n\t"
+ "stf.spill [%0]=f6"
+ :
+ : "r"(dst), "r"(src));
+ return;
+}
+
+static inline void
+ia64f2ia32f(void *dst, void *src)
+{
+
+ __asm__ ("ldf.fill f6=[%1] ;;\n\t"
+ "stfe [%0]=f6"
+ :
+ : "r"(dst), "r"(src));
+ return;
+}
+
+void
+put_fpreg(int regno, struct _fpreg_ia32 *reg, struct pt_regs *ptp, struct switch_stack *swp, int tos)
+{
+ struct _fpreg_ia32 *f;
+ char buf[32];
+
+ f = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15);
+ if ((regno += tos) >= 8)
+ regno -= 8;
+ switch (regno) {
+
+ case 0:
+ ia64f2ia32f(f, &ptp->f8);
+ break;
+ case 1:
+ ia64f2ia32f(f, &ptp->f9);
+ break;
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ ia64f2ia32f(f, &swp->f10 + (regno - 2));
+ break;
+
+ }
+ __copy_to_user(reg, f, sizeof(*reg));
+ return;
+}
+
+void
+get_fpreg(int regno, struct _fpreg_ia32 *reg, struct pt_regs *ptp, struct switch_stack *swp, int tos)
+{
+
+ if ((regno += tos) >= 8)
+ regno -= 8;
+ switch (regno) {
+
+ case 0:
+ __copy_from_user(&ptp->f8, reg, sizeof(*reg));
+ break;
+ case 1:
+ __copy_from_user(&ptp->f9, reg, sizeof(*reg));
+ break;
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ __copy_from_user(&swp->f10 + (regno - 2), reg, sizeof(*reg));
+ break;
+
+ }
+ return;
+}
+
+int
+save_ia32_fpstate(struct task_struct *tsk, struct _fpstate_ia32 *save)
+{
+ struct switch_stack *swp;
+ struct pt_regs *ptp;
+ int i, tos;
+
+ if (!access_ok(VERIFY_WRITE, save, sizeof(*save)))
+ return(-EIO);
+ __put_user(tsk->thread.fcr, &save->cw);
+ __put_user(tsk->thread.fsr, &save->sw);
+ __put_user(tsk->thread.fsr >> 32, &save->tag);
+ __put_user(tsk->thread.fir, &save->ipoff);
+ __put_user(__USER_CS, &save->cssel);
+ __put_user(tsk->thread.fdr, &save->dataoff);
+ __put_user(__USER_DS, &save->datasel);
+ /*
+ * Stack frames start with 16-bytes of temp space
+ */
+ swp = (struct switch_stack *)(tsk->thread.ksp + 16);
+ ptp = ia64_task_regs(tsk);
+ tos = (tsk->thread.fsr >> 11) & 3;
+ for (i = 0; i < 8; i++)
+ put_fpreg(i, &save->_st[i], ptp, swp, tos);
+ return(0);
+}
+
+int
+restore_ia32_fpstate(struct task_struct *tsk, struct _fpstate_ia32 *save)
+{
+ struct switch_stack *swp;
+ struct pt_regs *ptp;
+ int i, tos;
+ int fsrlo, fsrhi;
+
+ if (!access_ok(VERIFY_READ, save, sizeof(*save)))
+ return(-EIO);
+ __get_user(tsk->thread.fcr, (unsigned int *)&save->cw);
+ __get_user(fsrlo, (unsigned int *)&save->sw);
+ __get_user(fsrhi, (unsigned int *)&save->tag);
+ tsk->thread.fsr = ((long)fsrhi << 32) | (long)fsrlo;
+ __get_user(tsk->thread.fir, (unsigned int *)&save->ipoff);
+ __get_user(tsk->thread.fdr, (unsigned int *)&save->dataoff);
+ /*
+ * Stack frames start with 16-bytes of temp space
+ */
+ swp = (struct switch_stack *)(tsk->thread.ksp + 16);
+ ptp = ia64_task_regs(tsk);
+ tos = (tsk->thread.fsr >> 11) & 3;
+ for (i = 0; i < 8; i++)
+ get_fpreg(i, &save->_st[i], ptp, swp, tos);
+ return(0);
+}
+
+asmlinkage long sys_ptrace(long, pid_t, unsigned long, unsigned long, long, long, long, long, long);
+
+/*
+ * Note that the IA32 version of `ptrace' calls the IA64 routine for
+ * many of the requests. This will only work for requests that do
+ * not need access to the calling processes `pt_regs' which is located
+ * at the address of `stack'. Once we call the IA64 `sys_ptrace' then
+ * the address of `stack' will not be the address of the `pt_regs'.
+ */
+asmlinkage long
+sys32_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
+ long arg4, long arg5, long arg6, long arg7, long stack)
+{
+ struct pt_regs *regs = (struct pt_regs *) &stack;
+ struct task_struct *child;
+ long i, ret;
+ unsigned int value;
+
+ lock_kernel();
+ if (request == PTRACE_TRACEME) {
+ ret = sys_ptrace(request, pid, addr, data,
+ arg4, arg5, arg6, arg7, stack);
+ goto out;
+ }
+
+ ret = -ESRCH;
+ read_lock(&tasklist_lock);
+ child = find_task_by_pid(pid);
+ read_unlock(&tasklist_lock);
+ if (!child)
+ goto out;
+ ret = -EPERM;
+ if (pid == 1) /* no messing around with init! */
+ goto out;
+
+ if (request == PTRACE_ATTACH) {
+ ret = sys_ptrace(request, pid, addr, data,
+ arg4, arg5, arg6, arg7, stack);
+ goto out;
+ }
+ ret = -ESRCH;
+ if (!(child->flags & PF_PTRACED))
+ goto out;
+ if (child->state != TASK_STOPPED) {
+ if (request != PTRACE_KILL)
+ goto out;
+ }
+ if (child->p_pptr != current)
+ goto out;
+
+ switch (request) {
+ case PTRACE_PEEKTEXT:
+ case PTRACE_PEEKDATA: /* read word at location addr */
+ ret = ia32_peek(regs, child, addr, &value);
+ if (ret == 0)
+ ret = put_user(value, (unsigned int *)data);
+ else
+ ret = -EIO;
+ goto out;
+
+ case PTRACE_POKETEXT:
+ case PTRACE_POKEDATA: /* write the word at location addr */
+ ret = ia32_poke(regs, child, addr, (unsigned int)data);
+ goto out;
+
+ case PTRACE_PEEKUSR: /* read word at addr in USER area */
+ ret = 0;
+ break;
+
+ case PTRACE_POKEUSR: /* write word at addr in USER area */
+ ret = 0;
+ break;
+
+ case IA32_PTRACE_GETREGS:
+ if (!access_ok(VERIFY_WRITE, (int *)data, 17*sizeof(int))) {
+ ret = -EIO;
+ break;
+ }
+ for ( i = 0; i < 17*sizeof(int); i += sizeof(int) ) {
+ __put_user(getreg(child, i),(unsigned int *) data);
+ data += sizeof(int);
+ }
+ ret = 0;
+ break;
+
+ case IA32_PTRACE_SETREGS:
+ {
+ unsigned int tmp;
+ if (!access_ok(VERIFY_READ, (int *)data, 17*sizeof(int))) {
+ ret = -EIO;
+ break;
+ }
+ for ( i = 0; i < 17*sizeof(int); i += sizeof(int) ) {
+ __get_user(tmp, (unsigned int *) data);
+ putreg(child, i, tmp);
+ data += sizeof(int);
+ }
+ ret = 0;
+ break;
+ }
+
+ case IA32_PTRACE_GETFPREGS:
+ ret = save_ia32_fpstate(child, (struct _fpstate_ia32 *)data);
+ break;
+
+ case IA32_PTRACE_SETFPREGS:
+ ret = restore_ia32_fpstate(child, (struct _fpstate_ia32 *)data);
+ break;
+
+ case PTRACE_SYSCALL: /* continue, stop after next syscall */
+ case PTRACE_CONT: /* restart after signal. */
+ case PTRACE_KILL:
+ case PTRACE_SINGLESTEP: /* execute chile for one instruction */
+ case PTRACE_DETACH: /* detach a process */
+ unlock_kernel();
+ ret = sys_ptrace(request, pid, addr, data,
+ arg4, arg5, arg6, arg7, stack);
+ return(ret);
+
+ default:
+ ret = -EIO;
+ break;
+
+ }
+ out:
+ unlock_kernel();
+ return ret;
+}
+
#ifdef NOTYET /* UNTESTED FOR IA64 FROM HERE DOWN */
/* In order to reduce some races, while at the same time doing additional
@@ -2719,37 +3154,6 @@ sys32_getresgid(__kernel_gid_t32 *rgid, __kernel_gid_t32 *egid,
return ret;
}
-struct tms32 {
- __kernel_clock_t32 tms_utime;
- __kernel_clock_t32 tms_stime;
- __kernel_clock_t32 tms_cutime;
- __kernel_clock_t32 tms_cstime;
-};
-
-extern asmlinkage long sys_times(struct tms * tbuf);
-
-asmlinkage long
-sys32_times(struct tms32 *tbuf)
-{
- struct tms t;
- long ret;
- mm_segment_t old_fs = get_fs ();
- int err;
-
- set_fs (KERNEL_DS);
- ret = sys_times(tbuf ? &t : NULL);
- set_fs (old_fs);
- if (tbuf) {
- err = put_user (t.tms_utime, &tbuf->tms_utime);
- err |= __put_user (t.tms_stime, &tbuf->tms_stime);
- err |= __put_user (t.tms_cutime, &tbuf->tms_cutime);
- err |= __put_user (t.tms_cstime, &tbuf->tms_cstime);
- if (err)
- ret = -EFAULT;
- }
- return ret;
-}
-
extern asmlinkage int sys_getgroups(int gidsetsize, gid_t *grouplist);
asmlinkage int
@@ -2789,23 +3193,6 @@ sys32_setgroups(int gidsetsize, __kernel_gid_t32 *grouplist)
return ret;
}
-extern asmlinkage int
-sys_getrusage(int who, struct rusage *ru);
-
-asmlinkage int
-sys32_getrusage(int who, struct rusage32 *ru)
-{
- struct rusage r;
- int ret;
- mm_segment_t old_fs = get_fs();
-
- set_fs (KERNEL_DS);
- ret = sys_getrusage(who, &r);
- set_fs (old_fs);
- if (put_rusage (ru, &r)) return -EFAULT;
- return ret;
-}
-
/* XXX These as well... */
extern __inline__ struct socket *
@@ -4355,4 +4742,3 @@ sys32_adjtimex(struct timex32 *utp)
return ret;
}
#endif // NOTYET
-
diff --git a/arch/ia64/kdb/Makefile b/arch/ia64/kdb/Makefile
deleted file mode 100644
index 2e8db3fc4..000000000
--- a/arch/ia64/kdb/Makefile
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# Makefile for ia64-specific kdb files..
-#
-# Copyright 1999, Silicon Graphics Inc.
-#
-# Written March 1999 by Scott Lurndal at Silicon Graphics, Inc.
-# Code for IA64 written by Goutham Rao <goutham.rao@intel.com> and
-# Sreenivas Subramoney <sreenivas.subramoney@intel.com>
-#
-
-SUB_DIRS :=
-MOD_SUB_DIRS := $(SUB_DIRS)
-ALL_SUB_DIRS := $(SUB_DIRS)
-
-.S.o:
- $(CC) $(AFLAGS) -traditional -c $< -o $*.o
-
-L_TARGET = kdb.a
-L_OBJS = kdbsupport.o kdb_io.o kdb_bt.o kdb_traps.o
-
-include $(TOPDIR)/Rules.make
diff --git a/arch/ia64/kdb/kdb_bt.c b/arch/ia64/kdb/kdb_bt.c
deleted file mode 100644
index dbcb7a575..000000000
--- a/arch/ia64/kdb/kdb_bt.c
+++ /dev/null
@@ -1,104 +0,0 @@
-/**
- * Minimalist Kernel Debugger
- * Machine dependent stack traceback code for IA-64.
- *
- * Copyright (C) 1999 Goutham Rao <goutham.rao@intel.com>
- * Copyright (C) 1999 Sreenivas Subramoney <sreenivas.subramoney@intel.com>
- * Intel Corporation, August 1999.
- * Copyright (C) 1999 Hewlett-Packard Co
- * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * 99/12/03 D. Mosberger Reimplemented based on <asm-ia64/unwind.h> API.
- * 99/12/06 D. Mosberger Added support for backtracing other processes.
- */
-
-#include <linux/ctype.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/kdb.h>
-#include <asm/system.h>
-#include <asm/current.h>
-#include <asm/kdbsupport.h>
-
-/*
- * Minimal stack back trace functionality.
- */
-int
-kdb_bt (int argc, const char **argv, const char **envp, struct pt_regs *regs)
-{
- struct task_struct *task = current;
- struct ia64_frame_info info;
- char *name;
- int diag;
-
- if (strcmp(argv[0], "btp") == 0) {
- unsigned long pid;
-
- diag = kdbgetularg(argv[1], &pid);
- if (diag)
- return diag;
-
- task = find_task_by_pid(pid);
- if (!task) {
- kdb_printf("No process with pid == %d found\n", pid);
- return 0;
- }
- regs = ia64_task_regs(task);
- } else if (argc) {
- kdb_printf("bt <address> is unsupported for IA-64\n");
- return 0;
- }
-
- if (task == current) {
- /*
- * Upon entering kdb, the stack frame looks like this:
- *
- * +---------------------+
- * | struct pt_regs |
- * +---------------------+
- * | |
- * | kernel stack |
- * | |
- * +=====================+ <--- top of stack upon entering kdb
- * | struct pt_regs |
- * +---------------------+
- * | struct switch_stack |
- * +---------------------+
- */
- if (user_mode(regs)) {
- /* We are not implementing stack backtrace from user mode code */
- kdb_printf ("Not in Kernel\n");
- return 0;
- }
- ia64_unwind_init_from_current(&info, regs);
- } else {
- /*
- * For a blocked task, the stack frame looks like this:
- *
- * +---------------------+
- * | struct pt_regs |
- * +---------------------+
- * | |
- * | kernel stack |
- * | |
- * +---------------------+
- * | struct switch_stack |
- * +=====================+ <--- task->thread.ksp
- */
- ia64_unwind_init_from_blocked_task(&info, task);
- }
-
- kdb_printf("Ret Address Reg Stack base Name\n\n") ;
- do {
- unsigned long ip = ia64_unwind_get_ip(&info);
-
- name = kdbnearsym(ip);
- if (!name) {
- kdb_printf("Interrupt\n");
- return 0;
- }
- kdb_printf("0x%016lx: [0x%016lx] %s\n", ip, ia64_unwind_get_bsp(&info), name);
- } while (ia64_unwind_to_previous_frame(&info) >= 0);
- return 0;
-}
diff --git a/arch/ia64/kdb/kdb_io.c b/arch/ia64/kdb/kdb_io.c
deleted file mode 100644
index 0b5c6fd44..000000000
--- a/arch/ia64/kdb/kdb_io.c
+++ /dev/null
@@ -1,350 +0,0 @@
-/*
- * Kernel Debugger Console I/O handler
- *
- * Copyright (C) 1999 Silicon Graphics, Inc.
- * Copyright (C) Scott Lurndal (slurn@engr.sgi.com)
- * Copyright (C) Scott Foehner (sfoehner@engr.sgi.com)
- * Copyright (C) Srinivasa Thirumalachar (sprasad@engr.sgi.com)
- *
- * Written March 1999 by Scott Lurndal at Silicon Graphics, Inc.
- *
- * Modifications from:
- * Chuck Fleckenstein 1999/07/20
- * Move kdb_info struct declaration to this file
- * for cases where serial support is not compiled into
- * the kernel.
- *
- * Masahiro Adegawa 1999/07/20
- * Handle some peculiarities of japanese 86/106
- * keyboards.
- *
- * marc@mucom.co.il 1999/07/20
- * Catch buffer overflow for serial input.
- *
- * Scott Foehner
- * Port to ia64
- */
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/console.h>
-#include <linux/serial_reg.h>
-#include <linux/spinlock.h>
-
-#include <asm/io.h>
-
-#include "pc_keyb.h"
-
-int kdb_port = 0;
-
-/*
- * This module contains code to read characters from the keyboard or a serial
- * port.
- *
- * It is used by the kernel debugger, and is polled, not interrupt driven.
- *
- */
-
-/*
- * send: Send a byte to the keyboard controller. Used primarily to
- * alter LED settings.
- */
-
-static void
-kdb_kbdsend(unsigned char byte)
-{
- while (inb(KBD_STATUS_REG) & KBD_STAT_IBF)
- ;
- outb(KBD_DATA_REG, byte);
-}
-
-static void
-kdb_kbdsetled(int leds)
-{
- kdb_kbdsend(KBD_CMD_SET_LEDS);
- kdb_kbdsend((unsigned char)leds);
-}
-
-static void
-console_read (char *buffer, size_t bufsize)
-{
- struct console *in;
- struct console *out;
- char *cp, ch;
-
- for (in = console_drivers; in; in = in->next) {
- if ((in->flags & CON_ENABLED) && (in->read || in->wait_key))
- break;
- }
- for (out = console_drivers; out; out = out->next) {
- if ((out->flags & CON_ENABLED) && out->write)
- break;
- }
-
- if ((!in->read && !in->wait_key) || !out->write) {
- panic("kdb_io: can't do console i/o!");
- }
-
- if (in->read) {
- /* this is untested... */
- (*in->read)(in, buffer, bufsize);
- return;
- }
-
- bufsize -= 2; /* leave room for CR & NUL terminator */
- cp = buffer;
- while (1) {
- ch = (*in->wait_key)(in);
- switch (ch) {
- case '\b':
- if (cp > buffer) {
- --cp, ++bufsize;
- (*out->write)(out, "\b \b", 3);
- }
- break;
-
- case '\025':
- while (cp > buffer) {
- --cp, ++bufsize;
- (*out->write)(out, "\b \b", 3);
- }
- break;
-
- case '\r':
- case '\n':
- (*out->write)(out, "\r\n", 2);
- *cp++ = '\n';
- *cp++ = '\0';
- return;
-
- default:
- if (bufsize > 0) {
- (*out->write)(out, &ch, 1);
- --bufsize;
- *cp++ = ch;
- }
- break;
- }
- }
-}
-
-char *
-kdb_getscancode(char *buffer, size_t bufsize)
-{
- /*
- * XXX Shouldn't kdb _always_ use console based I/O? That's what the console
- * abstraction is for, after all... ---davidm
- */
-#ifdef CONFIG_IA64_HP_SIM
- extern spinlock_t console_lock;
- unsigned long flags;
-
- spin_lock_irqsave(&console_lock, flags);
- console_read(buffer, bufsize);
- spin_unlock_irqrestore(&console_lock, flags);
- return buffer;
-#else /* !CONFIG_IA64_HP_SIM */
- char *cp = buffer;
- int scancode, scanstatus;
- static int shift_lock = 0; /* CAPS LOCK state (0-off, 1-on) */
- static int shift_key = 0; /* Shift next keypress */
- static int ctrl_key = 0;
- static int leds = 2; /* Num lock */
- u_short keychar;
- extern u_short plain_map[], shift_map[], ctrl_map[];
-
- bufsize -= 2; /* Reserve space for newline and null byte */
-
- /*
- * If we came in via a serial console, we allow that to
- * be the input window for kdb.
- */
- if (kdb_port != 0) {
- char ch;
- int status;
-#define serial_inp(info, offset) inb((info) + (offset))
-#define serial_out(info, offset, v) outb((v), (info) + (offset))
-
- while(1) {
- while ((status = serial_inp(kdb_port, UART_LSR))
- & UART_LSR_DR) {
-readchar:
- ch = serial_inp(kdb_port, UART_RX);
- if (ch == 8) { /* BS */
- if (cp > buffer) {
- --cp, bufsize++;
- printk("%c %c", 0x08, 0x08);
- }
- continue;
- }
- serial_out(kdb_port, UART_TX, ch);
- if (ch == 13) { /* CR */
- *cp++ = '\n';
- *cp++ = '\0';
- serial_out(kdb_port, UART_TX, 10);
- return(buffer);
- }
- /*
- * Discard excess characters
- */
- if (bufsize > 0) {
- *cp++ = ch;
- bufsize--;
- }
- }
- while (((status = serial_inp(kdb_port, UART_LSR))
- & UART_LSR_DR) == 0);
- }
- }
-
- while (1) {
-
- /*
- * Wait for a valid scancode
- */
-
- while ((inb(KBD_STATUS_REG) & KBD_STAT_OBF) == 0)
- ;
-
- /*
- * Fetch the scancode
- */
- scancode = inb(KBD_DATA_REG);
- scanstatus = inb(KBD_STATUS_REG);
-
- /*
- * Ignore mouse events.
- */
- if (scanstatus & KBD_STAT_MOUSE_OBF)
- continue;
-
- /*
- * Ignore release, trigger on make
- * (except for shift keys, where we want to
- * keep the shift state so long as the key is
- * held down).
- */
-
- if (((scancode&0x7f) == 0x2a)
- || ((scancode&0x7f) == 0x36)) {
- /*
- * Next key may use shift table
- */
- if ((scancode & 0x80) == 0) {
- shift_key=1;
- } else {
- shift_key=0;
- }
- continue;
- }
-
- if ((scancode&0x7f) == 0x1d) {
- /*
- * Left ctrl key
- */
- if ((scancode & 0x80) == 0) {
- ctrl_key = 1;
- } else {
- ctrl_key = 0;
- }
- continue;
- }
-
- if ((scancode & 0x80) != 0)
- continue;
-
- scancode &= 0x7f;
-
- /*
- * Translate scancode
- */
-
- if (scancode == 0x3a) {
- /*
- * Toggle caps lock
- */
- shift_lock ^= 1;
- leds ^= 0x4; /* toggle caps lock led */
-
- kdb_kbdsetled(leds);
- continue;
- }
-
- if (scancode == 0x0e) {
- /*
- * Backspace
- */
- if (cp > buffer) {
- --cp, bufsize++;
-
- /*
- * XXX - erase character on screen
- */
- printk("%c %c", 0x08, 0x08);
- }
- continue;
- }
-
- if (scancode == 0xe0) {
- continue;
- }
-
- /*
- * For Japanese 86/106 keyboards
- * See comment in drivers/char/pc_keyb.c.
- * - Masahiro Adegawa
- */
- if (scancode == 0x73) {
- scancode = 0x59;
- } else if (scancode == 0x7d) {
- scancode = 0x7c;
- }
-
- if (!shift_lock && !shift_key) {
- keychar = plain_map[scancode];
- } else if (shift_lock || shift_key) {
- keychar = shift_map[scancode];
- } else if (ctrl_key) {
- keychar = ctrl_map[scancode];
- } else {
- keychar = 0x0020;
- printk("Unknown state/scancode (%d)\n", scancode);
- }
-
- if ((scancode & 0x7f) == 0x1c) {
- /*
- * enter key. All done.
- */
- printk("\n");
- break;
- }
-
- /*
- * echo the character.
- */
- printk("%c", keychar&0xff);
-
- if (bufsize) {
- --bufsize;
- *cp++ = keychar&0xff;
- } else {
- printk("buffer overflow\n");
- break;
- }
-
- }
-
- *cp++ = '\n'; /* White space for parser */
- *cp++ = '\0'; /* String termination */
-
-#if defined(NOTNOW)
- cp = buffer;
- while (*cp) {
- printk("char 0x%x\n", *cp++);
- }
-#endif
-
- return buffer;
-#endif /* !CONFIG_IA64_HP_SIM */
-}
-
diff --git a/arch/ia64/kdb/kdb_traps.c b/arch/ia64/kdb/kdb_traps.c
deleted file mode 100644
index 6358f7a30..000000000
--- a/arch/ia64/kdb/kdb_traps.c
+++ /dev/null
@@ -1,55 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/sched.h>
-#include <linux/kdb.h>
-
-static struct kdb_bp_support {
- unsigned long addr ;
- int slot ;
-} kdb_bp_info[NR_CPUS] ;
-
-
-extern void kdb_bp_install (void);
-
-/*
- * This gets invoked right before a call to ia64_fault().
- * Returns zero the normal fault handler should be invoked.
- */
-long
-ia64_kdb_fault_handler (unsigned long vector, unsigned long isr, unsigned long ifa,
- unsigned long iim, unsigned long itir, unsigned long arg5,
- unsigned long arg6, unsigned long arg7, unsigned long stack)
-{
- struct switch_stack *sw = (struct switch_stack *) &stack;
- struct pt_regs *regs = (struct pt_regs *) (sw + 1);
- int bundle_slot;
-
- /*
- * TBD
- * If KDB is configured, enter KDB for any fault.
- */
- if ((vector == 29) || (vector == 35) || (vector == 36)) {
- if (!user_mode(regs)) {
- bundle_slot = ia64_psr(regs)->ri;
- if (vector == 29) {
- if (bundle_slot == 0) {
- kdb_bp_info[0].addr = regs->cr_iip;
- kdb_bp_info[0].slot = bundle_slot;
- kdb(KDB_REASON_FLTDBG, 0, regs);
- } else {
- if ((bundle_slot < 3) &&
- (kdb_bp_info[0].addr == regs->cr_iip))
- {
- ia64_psr(regs)->id = 1;
- ia64_psr(regs)->db = 1;
- kdb_bp_install() ;
- } else /* some error ?? */
- kdb(KDB_REASON_FLTDBG, 0, regs);
- }
- } else /* single step or taken branch */
- kdb(KDB_REASON_DEBUG, 0, regs);
- return 1;
- }
- }
- return 0;
-}
diff --git a/arch/ia64/kdb/kdbsupport.c b/arch/ia64/kdb/kdbsupport.c
deleted file mode 100644
index d074a01a3..000000000
--- a/arch/ia64/kdb/kdbsupport.c
+++ /dev/null
@@ -1,1329 +0,0 @@
-/*
- * Minimalist Kernel Debugger
- *
- * Copyright (C) 1999 Silicon Graphics, Inc.
- * Copyright (C) Scott Lurndal (slurn@engr.sgi.com)
- * Copyright (C) Scott Foehner (sfoehner@engr.sgi.com)
- * Copyright (C) Srinivasa Thirumalachar (sprasad@engr.sgi.com)
- * Copyright (C) David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * Written March 1999 by Scott Lurndal at Silicon Graphics, Inc.
- *
- * Modifications from:
- * Richard Bass 1999/07/20
- * Many bug fixes and enhancements.
- * Scott Foehner
- * Port to ia64
- * Srinivasa Thirumalachar
- * RSE support for ia64
- */
-
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/kdb.h>
-#include <linux/stddef.h>
-#include <linux/vmalloc.h>
-
-#include <asm/delay.h>
-#include <asm/kdbsupport.h>
-#include <asm/rse.h>
-#include <asm/uaccess.h>
-
-extern kdb_state_t kdb_state ;
-k_machreg_t dbregs[KDB_DBREGS];
-
-static int __init
-kdb_setup (char *str)
-{
- kdb_flags |= KDB_FLAG_EARLYKDB;
- return 1;
-}
-
-__setup("kdb", kdb_setup);
-
-static int
-kdb_ia64_itm (int argc, const char **argv, const char **envp, struct pt_regs *regs)
-{
- int diag;
- unsigned long val;
-
- diag = kdbgetularg(argv[1], &val);
- if (diag)
- return diag;
- kdb_printf("new itm=%0xlx\n", val);
-
- ia64_set_itm(val);
- return 0;
-}
-
-static int
-kdb_ia64_sir (int argc, const char **argv, const char **envp, struct pt_regs *regs)
-{
- u64 lid, tpr, lrr0, lrr1, itv, pmv, cmcv;
-
- asm ("mov %0=cr.lid" : "=r"(lid));
- asm ("mov %0=cr.tpr" : "=r"(tpr));
- asm ("mov %0=cr.lrr0" : "=r"(lrr0));
- asm ("mov %0=cr.lrr1" : "=r"(lrr1));
- printk("lid=0x%lx, tpr=0x%lx, lrr0=0x%lx, llr1=0x%lx\n", lid, tpr, lrr0, lrr1);
-
- asm ("mov %0=cr.itv" : "=r"(itv));
- asm ("mov %0=cr.pmv" : "=r"(pmv));
- asm ("mov %0=cr.cmcv" : "=r"(cmcv));
- printk("itv=0x%lx, pmv=0x%lx, cmcv=0x%lx\n", itv, pmv, cmcv);
-
- printk("irr=0x%016lx,0x%016lx,0x%016lx,0x%016lx\n",
- ia64_get_irr0(), ia64_get_irr1(), ia64_get_irr2(), ia64_get_irr3());
-
- printk("itc=0x%016lx, itm=0x%016lx\n", ia64_get_itc(), ia64_get_itm());
- return 0;
-}
-
-void __init
-kdb_init (void)
-{
- extern void kdb_inittab(void);
- unsigned long reg;
-
- kdb_inittab();
- kdb_initbptab();
-#if 0
- kdb_disinit();
-#endif
- kdb_printf("kdb version %d.%d by Scott Lurndal. "\
- "Copyright SGI, All Rights Reserved\n",
- KDB_MAJOR_VERSION, KDB_MINOR_VERSION);
-
- /* Enable debug registers */
- __asm__ ("mov %0=psr":"=r"(reg));
- reg |= IA64_PSR_DB;
- __asm__ ("mov psr.l=%0"::"r"(reg));
- ia64_srlz_d();
-
- /* Init kdb state */
- kdb_state.bkpt_handling_state = BKPTSTATE_NOT_HANDLED ;
-
- kdb_register("irr", kdb_ia64_sir, "", "Show interrupt registers", 0);
- kdb_register("itm", kdb_ia64_itm, "", "Set new ITM value", 0);
-}
-
-/*
- * kdbprintf
- * kdbgetword
- * kdb_getstr
- */
-
-char *
-kbd_getstr(char *buffer, size_t bufsize, char *prompt)
-{
- extern char* kdb_getscancode(char *, size_t);
-
-#if defined(CONFIG_SMP)
- kdb_printf(prompt, smp_processor_id());
-#else
- kdb_printf("%s", prompt);
-#endif
-
- return kdb_getscancode(buffer, bufsize);
-
-}
-
-int
-kdb_printf(const char *fmt, ...)
-{
- char buffer[256];
- va_list ap;
- int diag;
- int linecount;
-
- diag = kdbgetintenv("LINES", &linecount);
- if (diag)
- linecount = 22;
-
- va_start(ap, fmt);
- vsprintf(buffer, fmt, ap);
- va_end(ap);
-
- printk("%s", buffer);
-#if 0
- if (strchr(buffer, '\n') != NULL) {
- kdb_nextline++;
- }
-
- if (kdb_nextline == linecount) {
- char buf1[16];
- char buf2[32];
- extern char* kdb_getscancode(char *, size_t);
- char *moreprompt;
-
- /*
- * Pause until cr.
- */
- moreprompt = kdbgetenv("MOREPROMPT");
- if (moreprompt == NULL) {
- moreprompt = "more> ";
- }
-
-#if defined(CONFIG_SMP)
- if (strchr(moreprompt, '%')) {
- sprintf(buf2, moreprompt, smp_processor_id());
- moreprompt = buf2;
- }
-#endif
-
- printk(moreprompt);
- (void) kdb_getscancode(buf1, sizeof(buf1));
-
- kdb_nextline = 1;
-
- if ((buf1[0] == 'q')
- || (buf1[0] == 'Q')) {
- kdb_longjmp(&kdbjmpbuf, 1);
- }
- }
-#endif
- return 0;
-}
-
-unsigned long
-kdbgetword(unsigned long addr, int width)
-{
- /*
- * This function checks the address for validity. Any address
- * in the range PAGE_OFFSET to high_memory is legal, any address
- * which maps to a vmalloc region is legal, and any address which
- * is a user address, we use get_user() to verify validity.
- */
-
- if (addr < PAGE_OFFSET) {
- /*
- * Usermode address.
- */
- unsigned long diag;
- unsigned long ulval;
-
- switch (width) {
- case 8:
- { unsigned long *lp;
-
- lp = (unsigned long *) addr;
- diag = get_user(ulval, lp);
- break;
- }
- case 4:
- { unsigned int *ip;
-
- ip = (unsigned int *) addr;
- diag = get_user(ulval, ip);
- break;
- }
- case 2:
- { unsigned short *sp;
-
- sp = (unsigned short *) addr;
- diag = get_user(ulval, sp);
- break;
- }
- case 1:
- { unsigned char *cp;
-
- cp = (unsigned char *) addr;
- diag = get_user(ulval, cp);
- break;
- }
- default:
- printk("kdbgetword: Bad width\n");
- return 0L;
- }
-
- if (diag) {
- if ((kdb_flags & KDB_FLAG_SUPRESS) == 0) {
- printk("kdb: Bad user address 0x%lx\n", addr);
- kdb_flags |= KDB_FLAG_SUPRESS;
- }
- return 0L;
- }
- kdb_flags &= ~KDB_FLAG_SUPRESS;
- return ulval;
- }
-
- if (addr > (unsigned long)high_memory) {
- extern int kdb_vmlist_check(unsigned long, unsigned long);
-
- if (!kdb_vmlist_check(addr, addr+width)) {
- /*
- * Would appear to be an illegal kernel address;
- * Print a message once, and don't print again until
- * a legal address is used.
- */
- if ((kdb_flags & KDB_FLAG_SUPRESS) == 0) {
- printk("kdb: Bad kernel address 0x%lx\n", addr);
- kdb_flags |= KDB_FLAG_SUPRESS;
- }
- return 0L;
- }
- }
-
- /*
- * A good address. Reset error flag.
- */
- kdb_flags &= ~KDB_FLAG_SUPRESS;
-
- switch (width) {
- case 8:
- { unsigned long *lp;
-
- lp = (unsigned long *)(addr);
- return *lp;
- }
- case 4:
- { unsigned int *ip;
-
- ip = (unsigned int *)(addr);
- return *ip;
- }
- case 2:
- { unsigned short *sp;
-
- sp = (unsigned short *)(addr);
- return *sp;
- }
- case 1:
- { unsigned char *cp;
-
- cp = (unsigned char *)(addr);
- return *cp;
- }
- }
-
- printk("kdbgetword: Bad width\n");
- return 0L;
-}
-
-/*
- * Start of breakpoint management routines
- */
-
-/*
- * Arg: bp structure
- */
-
-int
-kdb_allocdbreg(kdb_bp_t *bp)
-{
- int i=0;
-
- /* For inst bkpt, just return. No hw reg alloc to be done. */
-
- if (bp->bp_mode == BKPTMODE_INST) {
- return i;
- } else if (bp->bp_mode == BKPTMODE_DATAW) {
- for(i=0; i<KDB_DBREGS; i++) {
- if (dbregs[i] == 0xffffffff) {
- dbregs[i] = 0;
- return i;
- }
- }
- }
-
- return -1;
-}
-
-void
-kdb_freedbreg(kdb_bp_t *bp)
-{
- if (bp->bp_mode == BKPTMODE_DATAW)
- dbregs[bp->bp_reg] = 0xffffffff;
-}
-
-void
-kdb_initdbregs(void)
-{
- int i;
-
- for(i=0; i<KDB_DBREGS; i++) {
- dbregs[i] = 0xffffffff;
- }
-}
-int
-kdbinstalltrap(int type, handler_t newh, handler_t *oldh)
-{
- /*
- * Usurp INTn. XXX - TBD.
- */
-
- return 0;
-}
-
-int
-install_instbkpt(kdb_bp_t *bp)
-{
- unsigned long *addr = (unsigned long *)bp->bp_addr ;
- bundle_t *bundle = (bundle_t *)bp->bp_longinst;
-
- /* save current bundle */
- *bundle = *(bundle_t *)addr ;
-
- /* Set the break point! */
- ((bundle_t *)addr)->lform.low8 = (
- (((bundle_t *)addr)->lform.low8 & ~INST_SLOT0_MASK) |
- BREAK_INSTR);
-
- /* set flag */
- bp->bp_instvalid = 1 ;
-
- /* flush icache as it is stale now */
- ia64_flush_icache_page((unsigned long)addr) ;
-
-#ifdef KDB_DEBUG
- kdb_printf ("[0x%016lx]: install 0x%016lx with 0x%016lx\n",
- addr, bundle->lform.low8, addr[0]) ;
-#endif
- return 0 ;
-}
-
-int
-install_databkpt(kdb_bp_t *bp)
-{
- unsigned long dbreg_addr = bp->bp_reg * 2;
- unsigned long dbreg_cond = dbreg_addr + 1;
- unsigned long value = 0x8fffffffffffffff;
- unsigned long addr = (unsigned long)bp->bp_addr;
- __asm__ ("mov dbr[%0]=%1"::"r"(dbreg_cond),"r"(value));
-// __asm__ ("movl %0,%%db0\n\t"::"r"(contents));
- __asm__ ("mov dbr[%0]=%1"::"r"(dbreg_addr),"r"(addr));
- ia64_insn_group_barrier();
- ia64_srlz_i();
- ia64_insn_group_barrier();
-
-#ifdef KDB_DEBUG
- kdb_printf("installed dbkpt at 0x%016lx\n", addr) ;
-#endif
- return 0;
-}
-
-int
-kdbinstalldbreg(kdb_bp_t *bp)
-{
- if (bp->bp_mode == BKPTMODE_INST) {
- return install_instbkpt(bp) ;
- } else if (bp->bp_mode == BKPTMODE_DATAW) {
- return install_databkpt(bp) ;
- }
- return 0;
-}
-
-void
-remove_instbkpt(kdb_bp_t *bp)
-{
- unsigned long *addr = (unsigned long *)bp->bp_addr ;
- bundle_t *bundle = (bundle_t *)bp->bp_longinst;
-
- if (!bp->bp_instvalid)
- /* Nothing to remove. If we just alloced the bkpt
- * but never resumed, the bp_inst will not be valid. */
- return ;
-
-#ifdef KDB_DEBUG
- kdb_printf ("[0x%016lx]: remove 0x%016lx with 0x%016lx\n",
- addr, addr[0], bundle->lform.low8) ;
-#endif
-
- /* restore current bundle */
- *(bundle_t *)addr = *bundle ;
- /* reset the flag */
- bp->bp_instvalid = 0 ;
- ia64_flush_icache_page((unsigned long)addr) ;
-}
-
-void
-remove_databkpt(kdb_bp_t *bp)
-{
- int regnum = bp->bp_reg ;
- unsigned long dbreg_addr = regnum * 2;
- unsigned long dbreg_cond = dbreg_addr + 1;
- unsigned long value = 0x0fffffffffffffff;
- __asm__ ("mov dbr[%0]=%1"::"r"(dbreg_cond),"r"(value));
-// __asm__ ("movl %0,%%db0\n\t"::"r"(contents));
- ia64_insn_group_barrier();
- ia64_srlz_i();
- ia64_insn_group_barrier();
-
-#ifdef KDB_DEBUG
- kdb_printf("removed dbkpt at 0x%016lx\n", bp->bp_addr) ;
-#endif
-}
-
-void
-kdbremovedbreg(kdb_bp_t *bp)
-{
- if (bp->bp_mode == BKPTMODE_INST) {
- remove_instbkpt(bp) ;
- } else if (bp->bp_mode == BKPTMODE_DATAW) {
- remove_databkpt(bp) ;
- }
-}
-
-k_machreg_t
-kdb_getdr6(void)
-{
- return kdb_getdr(6);
-}
-
-k_machreg_t
-kdb_getdr7(void)
-{
- return kdb_getdr(7);
-}
-
-k_machreg_t
-kdb_getdr(int regnum)
-{
- k_machreg_t contents = 0;
- unsigned long reg = (unsigned long)regnum;
-
- __asm__ ("mov %0=ibr[%1]"::"r"(contents),"r"(reg));
-// __asm__ ("mov ibr[%0]=%1"::"r"(dbreg_cond),"r"(value));
-
- return contents;
-}
-
-
-k_machreg_t
-kdb_getcr(int regnum)
-{
- k_machreg_t contents = 0;
- return contents;
-}
-
-void
-kdb_putdr6(k_machreg_t contents)
-{
- kdb_putdr(6, contents);
-}
-
-void
-kdb_putdr7(k_machreg_t contents)
-{
- kdb_putdr(7, contents);
-}
-
-void
-kdb_putdr(int regnum, k_machreg_t contents)
-{
-}
-
-void
-get_fault_regs(fault_regs_t *fr)
-{
- fr->ifa = 0 ;
- fr->isr = 0 ;
-
- __asm__ ("rsm psr.ic;;") ;
- ia64_srlz_d();
- __asm__ ("mov %0=cr.ifa" : "=r"(fr->ifa));
- __asm__ ("mov %0=cr.isr" : "=r"(fr->isr));
- __asm__ ("ssm psr.ic;;") ;
- ia64_srlz_d();
-}
-
-/*
- * kdb_db_trap
- *
- * Perform breakpoint processing upon entry to the
- * processor debugger fault. Determine and print
- * the active breakpoint.
- *
- * Parameters:
- * ef Exception frame containing machine register state
- * reason Why did we enter kdb - fault or break
- * Outputs:
- * None.
- * Returns:
- * 0 Standard instruction or data breakpoint encountered
- * 1 Single Step fault ('ss' command)
- * 2 Single Step fault, caller should continue ('ssb' command)
- * Locking:
- * None.
- * Remarks:
- * Yup, there be goto's here.
- */
-
-int
-kdb_db_trap(struct pt_regs *ef, int reason)
-{
- int i, rv=0;
-
- /* Trying very hard to not change the interface to kdb.
- * So, eventhough we have these values in the fault function
- * it is not passed in but read again.
- */
- fault_regs_t faultregs ;
-
- if (reason == KDB_REASON_FLTDBG)
- get_fault_regs(&faultregs) ;
-
- /* NOTE : XXX: This has to be done only for data bkpts */
- /* Prevent it from continuously faulting */
- ef->cr_ipsr |= 0x0000002000000000;
-
- if (ef->cr_ipsr & 0x0000010000000000) {
- /* single step */
- ef->cr_ipsr &= 0xfffffeffffffffff;
- if ((kdb_state.bkpt_handling_state == BKPTSTATE_HANDLED)
- && (kdb_state.cmd_given == CMDGIVEN_GO))
- ;
- else
- kdb_printf("SS trap at 0x%lx\n", ef->cr_iip + ia64_psr(ef)->ri);
- rv = 1;
- kdb_state.reason_for_entry = ENTRYREASON_SSTEP ;
- goto handled;
- } else
- kdb_state.reason_for_entry = ENTRYREASON_GO ;
-
- /*
- * Determine which breakpoint was encountered.
- */
- for(i=0; i<KDB_MAXBPT; i++) {
- if ((breakpoints[i].bp_enabled)
- && ((breakpoints[i].bp_addr == ef->cr_iip) ||
- ((faultregs.ifa) &&
- (breakpoints[i].bp_addr == faultregs.ifa)))) {
- /*
- * Hit this breakpoint. Remove it while we are
- * handling hit to avoid recursion. XXX ??
- */
- if (breakpoints[i].bp_addr == faultregs.ifa)
- kdb_printf("Data breakpoint #%d for 0x%lx at 0x%lx\n",
- i, breakpoints[i].bp_addr, ef->cr_iip + ia64_psr(ef)->ri);
- else
- kdb_printf("%s breakpoint #%d at 0x%lx\n",
- rwtypes[0],
- i, breakpoints[i].bp_addr);
-
- /*
- * For an instruction breakpoint, disassemble
- * the current instruction.
- */
-#if 0
- if (rw == 0) {
- kdb_id1(ef->eip);
- }
-#endif
-
- goto handled;
- }
- }
-
-#if 0
-unknown:
-#endif
- kdb_printf("Unknown breakpoint. Should forward. \n");
- /* Need a flag for this. The skip should be done XXX
- * when a go or single step command is done for this session.
- * For now it is here.
- */
- ia64_increment_ip(ef) ;
- return rv ;
-
-handled:
-
- /* We are here after handling a break inst/data bkpt */
- if (kdb_state.bkpt_handling_state == BKPTSTATE_NOT_HANDLED) {
- kdb_state.bkpt_handling_state = BKPTSTATE_HANDLED ;
- if (kdb_state.reason_for_entry == ENTRYREASON_GO) {
- kdb_setsinglestep(ef) ;
- kdb_state.kdb_action = ACTION_NOBPINSTALL;
- /* We dont want bp install just this once */
- kdb_state.cmd_given = CMDGIVEN_UNKNOWN ;
- }
- } else if (kdb_state.bkpt_handling_state == BKPTSTATE_HANDLED) {
- kdb_state.bkpt_handling_state = BKPTSTATE_NOT_HANDLED ;
- if (kdb_state.reason_for_entry == ENTRYREASON_SSTEP) {
- if (kdb_state.cmd_given == CMDGIVEN_GO)
- kdb_state.kdb_action = ACTION_NOPROMPT ;
- kdb_state.cmd_given = CMDGIVEN_UNKNOWN ;
- }
- } else
- kdb_printf("Unknown value of bkpt state\n") ;
-
- return rv;
-
-}
-
-void
-kdb_setsinglestep(struct pt_regs *regs)
-{
- regs->cr_ipsr |= 0x0000010000000000;
-#if 0
- regs->eflags |= EF_TF;
-#endif
-}
-
-/*
- * Symbol table functions.
- */
-
-/*
- * kdbgetsym
- *
- * Return the symbol table entry for the given symbol
- *
- * Parameters:
- * symname Character string containing symbol name
- * Outputs:
- * Returns:
- * NULL Symbol doesn't exist
- * ksp Pointer to symbol table entry
- * Locking:
- * None.
- * Remarks:
- */
-
-__ksymtab_t *
-kdbgetsym(const char *symname)
-{
- __ksymtab_t *ksp = __kdbsymtab;
- int i;
-
- if (symname == NULL)
- return NULL;
-
- for (i=0; i<__kdbsymtabsize; i++, ksp++) {
- if (ksp->name && (strcmp(ksp->name, symname)==0)) {
- return ksp;
- }
- }
-
- return NULL;
-}
-
-/*
- * kdbgetsymval
- *
- * Return the address of the given symbol.
- *
- * Parameters:
- * symname Character string containing symbol name
- * Outputs:
- * Returns:
- * 0 Symbol name is NULL
- * addr Address corresponding to symname
- * Locking:
- * None.
- * Remarks:
- */
-
-unsigned long
-kdbgetsymval(const char *symname)
-{
- __ksymtab_t *ksp = kdbgetsym(symname);
-
- return (ksp?ksp->value:0);
-}
-
-/*
- * kdbaddmodsym
- *
- * Add a symbol to the kernel debugger symbol table. Called when
- * a new module is loaded into the kernel.
- *
- * Parameters:
- * symname Character string containing symbol name
- * value Value of symbol
- * Outputs:
- * Returns:
- * 0 Successfully added to table.
- * 1 Duplicate symbol
- * 2 Symbol table full
- * Locking:
- * None.
- * Remarks:
- */
-
-int
-kdbaddmodsym(char *symname, unsigned long value)
-{
-
- /*
- * Check for duplicate symbols.
- */
- if (kdbgetsym(symname)) {
- printk("kdb: Attempt to register duplicate symbol '%s' @ 0x%lx\n",
- symname, value);
- return 1;
- }
-
- if (__kdbsymtabsize < __kdbmaxsymtabsize) {
- __ksymtab_t *ksp = &__kdbsymtab[__kdbsymtabsize++];
-
- ksp->name = symname;
- ksp->value = value;
- return 0;
- }
-
- /*
- * No room left in kernel symbol table.
- */
- {
- static int __kdbwarn = 0;
-
- if (__kdbwarn == 0) {
- __kdbwarn++;
- printk("kdb: Exceeded symbol table size. Increase CONFIG_KDB_SYMTAB_SIZE in kernel configuration\n");
- }
- }
-
- return 2;
-}
-
-/*
- * kdbdelmodsym
- *
- * Add a symbol to the kernel debugger symbol table. Called when
- * a new module is loaded into the kernel.
- *
- * Parameters:
- * symname Character string containing symbol name
- * value Value of symbol
- * Outputs:
- * Returns:
- * 0 Successfully added to table.
- * 1 Symbol not found
- * Locking:
- * None.
- * Remarks:
- */
-
-int
-kdbdelmodsym(const char *symname)
-{
- __ksymtab_t *ksp, *endksp;
-
- if (symname == NULL)
- return 1;
-
- /*
- * Search for the symbol. If found, move
- * all successive symbols down one position
- * in the symbol table to avoid leaving holes.
- */
- endksp = &__kdbsymtab[__kdbsymtabsize];
- for (ksp = __kdbsymtab; ksp < endksp; ksp++) {
- if (ksp->name && (strcmp(ksp->name, symname) == 0)) {
- endksp--;
- for ( ; ksp < endksp; ksp++) {
- *ksp = *(ksp + 1);
- }
- __kdbsymtabsize--;
- return 0;
- }
- }
-
- return 1;
-}
-
-/*
- * kdbnearsym
- *
- * Return the name of the symbol with the nearest address
- * less than 'addr'.
- *
- * Parameters:
- * addr Address to check for symbol near
- * Outputs:
- * Returns:
- * NULL No symbol with address less than 'addr'
- * symbol Returns the actual name of the symbol.
- * Locking:
- * None.
- * Remarks:
- */
-
-char *
-kdbnearsym(unsigned long addr)
-{
- __ksymtab_t *ksp = __kdbsymtab;
- __ksymtab_t *kpp = NULL;
- int i;
-
- for(i=0; i<__kdbsymtabsize; i++, ksp++) {
- if (!ksp->name)
- continue;
-
- if (addr == ksp->value) {
- kpp = ksp;
- break;
- }
- if (addr > ksp->value) {
- if ((kpp == NULL)
- || (ksp->value > kpp->value)) {
- kpp = ksp;
- }
- }
- }
-
- /*
- * If more than 128k away, don't bother.
- */
- if ((kpp == NULL)
- || ((addr - kpp->value) > 0x20000)) {
- return NULL;
- }
-
- return kpp->name;
-}
-
-/*
- * kdbgetregcontents
- *
- * Return the contents of the register specified by the
- * input string argument. Return an error if the string
- * does not match a machine register.
- *
- * The following pseudo register names are supported:
- * &regs - Prints address of exception frame
- * kesp - Prints kernel stack pointer at time of fault
- * sstk - Prints switch stack for ia64
- * %<regname> - Uses the value of the registers at the
- * last time the user process entered kernel
- * mode, instead of the registers at the time
- * kdb was entered.
- *
- * Parameters:
- * regname Pointer to string naming register
- * regs Pointer to structure containing registers.
- * Outputs:
- * *contents Pointer to unsigned long to recieve register contents
- * Returns:
- * 0 Success
- * KDB_BADREG Invalid register name
- * Locking:
- * None.
- * Remarks:
- *
- * Note that this function is really machine independent. The kdb
- * register list is not, however.
- */
-
-static struct kdbregs {
- char *reg_name;
- size_t reg_offset;
-} kdbreglist[] = {
- { " psr", offsetof(struct pt_regs, cr_ipsr) },
- { " ifs", offsetof(struct pt_regs, cr_ifs) },
- { " ip", offsetof(struct pt_regs, cr_iip) },
-
- { "unat", offsetof(struct pt_regs, ar_unat) },
- { " pfs", offsetof(struct pt_regs, ar_pfs) },
- { " rsc", offsetof(struct pt_regs, ar_rsc) },
-
- { "rnat", offsetof(struct pt_regs, ar_rnat) },
- { "bsps", offsetof(struct pt_regs, ar_bspstore) },
- { " pr", offsetof(struct pt_regs, pr) },
-
- { "ldrs", offsetof(struct pt_regs, loadrs) },
- { " ccv", offsetof(struct pt_regs, ar_ccv) },
- { "fpsr", offsetof(struct pt_regs, ar_fpsr) },
-
- { " b0", offsetof(struct pt_regs, b0) },
- { " b6", offsetof(struct pt_regs, b6) },
- { " b7", offsetof(struct pt_regs, b7) },
-
- { " r1",offsetof(struct pt_regs, r1) },
- { " r2",offsetof(struct pt_regs, r2) },
- { " r3",offsetof(struct pt_regs, r3) },
-
- { " r8",offsetof(struct pt_regs, r8) },
- { " r9",offsetof(struct pt_regs, r9) },
- { " r10",offsetof(struct pt_regs, r10) },
-
- { " r11",offsetof(struct pt_regs, r11) },
- { " r12",offsetof(struct pt_regs, r12) },
- { " r13",offsetof(struct pt_regs, r13) },
-
- { " r14",offsetof(struct pt_regs, r14) },
- { " r15",offsetof(struct pt_regs, r15) },
- { " r16",offsetof(struct pt_regs, r16) },
-
- { " r17",offsetof(struct pt_regs, r17) },
- { " r18",offsetof(struct pt_regs, r18) },
- { " r19",offsetof(struct pt_regs, r19) },
-
- { " r20",offsetof(struct pt_regs, r20) },
- { " r21",offsetof(struct pt_regs, r21) },
- { " r22",offsetof(struct pt_regs, r22) },
-
- { " r23",offsetof(struct pt_regs, r23) },
- { " r24",offsetof(struct pt_regs, r24) },
- { " r25",offsetof(struct pt_regs, r25) },
-
- { " r26",offsetof(struct pt_regs, r26) },
- { " r27",offsetof(struct pt_regs, r27) },
- { " r28",offsetof(struct pt_regs, r28) },
-
- { " r29",offsetof(struct pt_regs, r29) },
- { " r30",offsetof(struct pt_regs, r30) },
- { " r31",offsetof(struct pt_regs, r31) },
-
-};
-
-static const int nkdbreglist = sizeof(kdbreglist) / sizeof(struct kdbregs);
-
-int
-kdbgetregcontents(const char *regname,
- struct pt_regs *regs,
- unsigned long *contents)
-{
- int i;
-
- if (strcmp(regname, "&regs") == 0) {
- *contents = (unsigned long)regs;
- return 0;
- }
-
- if (strcmp(regname, "sstk") == 0) {
- *contents = (unsigned long)getprsregs(regs) ;
- return 0;
- }
-
- if (strcmp(regname, "isr") == 0) {
- fault_regs_t fr ;
- get_fault_regs(&fr) ;
- *contents = fr.isr ;
- return 0 ;
- }
-
-#if 0
- /* XXX need to verify this */
- if (strcmp(regname, "kesp") == 0) {
- *contents = (unsigned long)regs + sizeof(struct pt_regs);
- return 0;
- }
-
- if (regname[0] == '%') {
- /* User registers: %%e[a-c]x, etc */
- regname++;
- regs = (struct pt_regs *)
- (current->thread.ksp - sizeof(struct pt_regs));
- }
-#endif
-
- for (i=0; i<nkdbreglist; i++) {
- if (strstr(kdbreglist[i].reg_name, regname))
- break;
- }
-
- if (i == nkdbreglist) {
- /* Lets check the rse maybe */
- if (regname[0] == 'r')
- if (show_cur_stack_frame(regs, simple_strtoul(regname+1, 0, 0) - 31,
- contents))
- return 0 ;
- return KDB_BADREG;
- }
-
- *contents = *(unsigned long *)((unsigned long)regs +
- kdbreglist[i].reg_offset);
-
- return 0;
-}
-
-/*
- * kdbsetregcontents
- *
- * Set the contents of the register specified by the
- * input string argument. Return an error if the string
- * does not match a machine register.
- *
- * Supports modification of user-mode registers via
- * %<register-name>
- *
- * Parameters:
- * regname Pointer to string naming register
- * regs Pointer to structure containing registers.
- * contents Unsigned long containing new register contents
- * Outputs:
- * Returns:
- * 0 Success
- * KDB_BADREG Invalid register name
- * Locking:
- * None.
- * Remarks:
- */
-
-int
-kdbsetregcontents(const char *regname,
- struct pt_regs *regs,
- unsigned long contents)
-{
- int i;
-
- if (regname[0] == '%') {
- regname++;
- regs = (struct pt_regs *)
- (current->thread.ksp - sizeof(struct pt_regs));
- }
-
- for (i=0; i<nkdbreglist; i++) {
- if (strnicmp(kdbreglist[i].reg_name,
- regname,
- strlen(regname)) == 0)
- break;
- }
-
- if ((i == nkdbreglist)
- || (strlen(kdbreglist[i].reg_name) != strlen(regname))) {
- return KDB_BADREG;
- }
-
- *(unsigned long *)((unsigned long)regs + kdbreglist[i].reg_offset) =
- contents;
-
- return 0;
-}
-
-/*
- * kdbdumpregs
- *
- * Dump the specified register set to the display.
- *
- * Parameters:
- * regs Pointer to structure containing registers.
- * type Character string identifying register set to dump
- * extra string further identifying register (optional)
- * Outputs:
- * Returns:
- * 0 Success
- * Locking:
- * None.
- * Remarks:
- * This function will dump the general register set if the type
- * argument is NULL (struct pt_regs). The alternate register
- * set types supported by this function:
- *
- * d Debug registers
- * c Control registers
- * u User registers at most recent entry to kernel
- * Following not yet implemented:
- * m Model Specific Registers (extra defines register #)
- * r Memory Type Range Registers (extra defines register)
- *
- * For now, all registers are covered as follows:
- *
- * rd - dumps all regs
- * rd %isr - current interrupt status reg, read freshly
- * rd s - valid stacked regs
- * rd %sstk - gets switch stack addr. dump memory and search
- * rd d - debug regs, may not be too useful
- *
- * ARs TB Done
- * Interrupt regs TB Done ??
- * OTHERS TB Decided ??
- *
- * Intel wish list
- * These will be implemented later - Srinivasa
- *
- * type action
- * ---- ------
- * g dump all General static registers
- * s dump all general Stacked registers
- * f dump all Floating Point registers
- * p dump all Predicate registers
- * b dump all Branch registers
- * a dump all Application registers
- * c dump all Control registers
- *
- */
-
-int
-kdbdumpregs(struct pt_regs *regs,
- const char *type,
- const char *extra)
-
-{
- int i;
- int count = 0;
-
- if (type
- && (type[0] == 'u')) {
- type = NULL;
- regs = (struct pt_regs *)
- (current->thread.ksp - sizeof(struct pt_regs));
- }
-
- if (type == NULL) {
- for (i=0; i<nkdbreglist; i++) {
- kdb_printf("%s: 0x%16.16lx ",
- kdbreglist[i].reg_name,
- *(unsigned long *)((unsigned long)regs +
- kdbreglist[i].reg_offset));
-
- if ((++count % 3) == 0)
- kdb_printf("\n");
- }
-
- kdb_printf("&regs = 0x%16.16lx\n", regs);
-
- return 0;
- }
-
- switch (type[0]) {
- case 'd':
- {
- for(i=0; i<8; i+=2) {
- kdb_printf("idr%d: 0x%16.16lx idr%d: 0x%16.16lx\n", i,
- kdb_getdr(i), i+1, kdb_getdr(i+1));
-
- }
- return 0;
- }
-#if 0
- case 'c':
- {
- unsigned long cr[5];
-
- for (i=0; i<5; i++) {
- cr[i] = kdb_getcr(i);
- }
- kdb_printf("cr0 = 0x%8.8x cr1 = 0x%8.8x cr2 = 0x%8.8x cr3 = 0x%8.8x\ncr4 = 0x%8.8x\n",
- cr[0], cr[1], cr[2], cr[3], cr[4]);
- return 0;
- }
-#endif
- case 'm':
- break;
- case 'r':
- break;
-
- case 's':
- {
- show_cur_stack_frame(regs, 0, NULL) ;
-
- return 0 ;
- }
-
- case '%':
- {
- unsigned long contents ;
-
- if (!kdbgetregcontents(type+1, regs, &contents))
- kdb_printf("%s = 0x%16.16lx\n", type+1, contents) ;
- else
- kdb_printf("diag: Invalid register %s\n", type+1) ;
-
- return 0 ;
- }
-
- default:
- return KDB_BADREG;
- }
-
- /* NOTREACHED */
- return 0;
-}
-
-k_machreg_t
-kdb_getpc(struct pt_regs *regs)
-{
- return regs->cr_iip + ia64_psr(regs)->ri;
-}
-
-int
-kdb_setpc(struct pt_regs *regs, k_machreg_t newpc)
-{
- regs->cr_iip = newpc & ~0xf;
- ia64_psr(regs)->ri = newpc & 0x3;
- return 0;
-}
-
-void
-kdb_disableint(kdbintstate_t *state)
-{
- int *fp = (int *)state;
- int flags;
-
- __save_flags(flags);
- __cli();
-
- *fp = flags;
-}
-
-void
-kdb_restoreint(kdbintstate_t *state)
-{
- int flags = *(int *)state;
- __restore_flags(flags);
-}
-
-int
-kdb_putword(unsigned long addr, unsigned long contents)
-{
- *(unsigned long *)addr = contents;
- return 0;
-}
-
-int
-kdb_getcurrentframe(struct pt_regs *regs)
-{
-#if 0
- regs->xcs = 0;
-#if defined(CONFIG_KDB_FRAMEPTR)
- asm volatile("movl %%ebp,%0":"=m" (*(int *)&regs->ebp));
-#endif
- asm volatile("movl %%esp,%0":"=m" (*(int *)&regs->esp));
-#endif
- return 0;
-}
-
-unsigned long
-show_cur_stack_frame(struct pt_regs *regs, int regno, unsigned long *contents)
-{
- long sof = regs->cr_ifs & ((1<<7)-1) ; /* size of frame */
- unsigned long i ;
- int j;
- struct switch_stack *prs_regs = getprsregs(regs) ;
- unsigned long *sofptr = (prs_regs? ia64_rse_skip_regs(
- (unsigned long *)prs_regs->ar_bspstore, -sof) : NULL) ;
-
- if (!sofptr) {
- printk("Unable to display Current Stack Frame\n") ;
- return 0 ;
- }
-
- if (regno < 0)
- return 0 ;
-
- for (i=sof, j=0;i;i--,j++) {
- /* remember to skip the nat collection dword */
- if ((((unsigned long)sofptr>>3) & (((1<<6)-1)))
- == ((1<<6)-1))
- sofptr++ ;
-
- /* return the value in the reg if regno is non zero */
-
- if (regno) {
- if ((j+1) == regno) {
- if (contents)
- *contents = *sofptr ;
- return -1;
- }
- sofptr++ ;
- } else {
- printk(" r%d: %016lx ", 32+j, *sofptr++) ;
- if (!((j+1)%3)) printk("\n") ;
- }
- }
-
- if (regno) {
- if (!i) /* bogus rse number */
- return 0 ;
- } else
- printk("\n") ;
-
- return 0 ;
-}
diff --git a/arch/ia64/kdb/pc_keyb.h b/arch/ia64/kdb/pc_keyb.h
deleted file mode 100644
index 3d4831a80..000000000
--- a/arch/ia64/kdb/pc_keyb.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * linux/drivers/char/pc_keyb.h
- *
- * PC Keyboard And Keyboard Controller
- *
- * (c) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
- */
-
-/*
- * Configuration Switches
- */
-
-#undef KBD_REPORT_ERR /* Report keyboard errors */
-#define KBD_REPORT_UNKN /* Report unknown scan codes */
-#define KBD_REPORT_TIMEOUTS /* Report keyboard timeouts */
-#undef KBD_IS_FOCUS_9000 /* We have the brain-damaged FOCUS-9000 keyboard */
-#undef INITIALIZE_MOUSE /* Define if your PS/2 mouse needs initialization. */
-
-
-
-#define KBD_INIT_TIMEOUT 1000 /* Timeout in ms for initializing the keyboard */
-#define KBC_TIMEOUT 250 /* Timeout in ms for sending to keyboard controller */
-#define KBD_TIMEOUT 1000 /* Timeout in ms for keyboard command acknowledge */
-
-/*
- * Internal variables of the driver
- */
-
-extern unsigned char pckbd_read_mask;
-extern unsigned char aux_device_present;
-
-/*
- * Keyboard Controller Registers
- */
-
-#define KBD_STATUS_REG 0x64 /* Status register (R) */
-#define KBD_CNTL_REG 0x64 /* Controller command register (W) */
-#define KBD_DATA_REG 0x60 /* Keyboard data register (R/W) */
-
-/*
- * Keyboard Controller Commands
- */
-
-#define KBD_CCMD_READ_MODE 0x20 /* Read mode bits */
-#define KBD_CCMD_WRITE_MODE 0x60 /* Write mode bits */
-#define KBD_CCMD_GET_VERSION 0xA1 /* Get controller version */
-#define KBD_CCMD_MOUSE_DISABLE 0xA7 /* Disable mouse interface */
-#define KBD_CCMD_MOUSE_ENABLE 0xA8 /* Enable mouse interface */
-#define KBD_CCMD_TEST_MOUSE 0xA9 /* Mouse interface test */
-#define KBD_CCMD_SELF_TEST 0xAA /* Controller self test */
-#define KBD_CCMD_KBD_TEST 0xAB /* Keyboard interface test */
-#define KBD_CCMD_KBD_DISABLE 0xAD /* Keyboard interface disable */
-#define KBD_CCMD_KBD_ENABLE 0xAE /* Keyboard interface enable */
-#define KBD_CCMD_WRITE_AUX_OBUF 0xD3 /* Write to output buffer as if
- initiated by the auxiliary device */
-#define KBD_CCMD_WRITE_MOUSE 0xD4 /* Write the following byte to the mouse */
-
-/*
- * Keyboard Commands
- */
-
-#define KBD_CMD_SET_LEDS 0xED /* Set keyboard leds */
-#define KBD_CMD_SET_RATE 0xF3 /* Set typematic rate */
-#define KBD_CMD_ENABLE 0xF4 /* Enable scanning */
-#define KBD_CMD_DISABLE 0xF5 /* Disable scanning */
-#define KBD_CMD_RESET 0xFF /* Reset */
-
-/*
- * Keyboard Replies
- */
-
-#define KBD_REPLY_POR 0xAA /* Power on reset */
-#define KBD_REPLY_ACK 0xFA /* Command ACK */
-#define KBD_REPLY_RESEND 0xFE /* Command NACK, send the cmd again */
-
-/*
- * Status Register Bits
- */
-
-#define KBD_STAT_OBF 0x01 /* Keyboard output buffer full */
-#define KBD_STAT_IBF 0x02 /* Keyboard input buffer full */
-#define KBD_STAT_SELFTEST 0x04 /* Self test successful */
-#define KBD_STAT_CMD 0x08 /* Last write was a command write (0=data) */
-#define KBD_STAT_UNLOCKED 0x10 /* Zero if keyboard locked */
-#define KBD_STAT_MOUSE_OBF 0x20 /* Mouse output buffer full */
-#define KBD_STAT_GTO 0x40 /* General receive/xmit timeout */
-#define KBD_STAT_PERR 0x80 /* Parity error */
-
-#define AUX_STAT_OBF (KBD_STAT_OBF | KBD_STAT_MOUSE_OBF)
-
-/*
- * Controller Mode Register Bits
- */
-
-#define KBD_MODE_KBD_INT 0x01 /* Keyboard data generate IRQ1 */
-#define KBD_MODE_MOUSE_INT 0x02 /* Mouse data generate IRQ12 */
-#define KBD_MODE_SYS 0x04 /* The system flag (?) */
-#define KBD_MODE_NO_KEYLOCK 0x08 /* The keylock doesn't affect the keyboard if set */
-#define KBD_MODE_DISABLE_KBD 0x10 /* Disable keyboard interface */
-#define KBD_MODE_DISABLE_MOUSE 0x20 /* Disable mouse interface */
-#define KBD_MODE_KCC 0x40 /* Scan code conversion to PC format */
-#define KBD_MODE_RFU 0x80
-
-/*
- * Mouse Commands
- */
-
-#define AUX_SET_RES 0xE8 /* Set resolution */
-#define AUX_SET_SCALE11 0xE6 /* Set 1:1 scaling */
-#define AUX_SET_SCALE21 0xE7 /* Set 2:1 scaling */
-#define AUX_GET_SCALE 0xE9 /* Get scaling factor */
-#define AUX_SET_STREAM 0xEA /* Set stream mode */
-#define AUX_SET_SAMPLE 0xF3 /* Set sample rate */
-#define AUX_ENABLE_DEV 0xF4 /* Enable aux device */
-#define AUX_DISABLE_DEV 0xF5 /* Disable aux device */
-#define AUX_RESET 0xFF /* Reset aux device */
-
-#define AUX_BUF_SIZE 2048
-
-struct aux_queue {
- unsigned long head;
- unsigned long tail;
- struct wait_queue *proc_list;
- struct fasync_struct *fasync;
- unsigned char buf[AUX_BUF_SIZE];
-};
-
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 6631d33c3..225cbec5d 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -15,13 +15,13 @@
all: kernel.o head.o init_task.o
O_TARGET := kernel.o
-O_OBJS := acpi.o entry.o gate.o efi.o efi_stub.o irq.o irq_ia64.o irq_internal.o ivt.o \
- pal.o pci-dma.o process.o perfmon.o ptrace.o sal.o sal_stub.o semaphore.o setup.o signal.o \
- sys_ia64.o traps.o time.o unaligned.o unwind.o
+O_OBJS := acpi.o entry.o gate.o efi.o efi_stub.o irq.o irq_ia64.o irq_sapic.o ivt.o \
+ pal.o pci-dma.o process.o perfmon.o ptrace.o sal.o sal_stub.o semaphore.o setup.o \
+ signal.o sys_ia64.o traps.o time.o unaligned.o unwind.o
#O_OBJS := fpreg.o
#OX_OBJS := ia64_ksyms.o
-ifeq ($(CONFIG_IA64_GENERIC),y)
+ifdef CONFIG_IA64_GENERIC
O_OBJS += machvec.o
endif
@@ -30,10 +30,10 @@ O_OBJS += pci.o
endif
ifdef CONFIG_SMP
-O_OBJS += smp.o irq_lock.o
+O_OBJS += smp.o
endif
-ifeq ($(CONFIG_MCA),y)
+ifdef CONFIG_IA64_MCA
O_OBJS += mca.o mca_asm.o
endif
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 078d908c8..72e10a683 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -11,12 +11,12 @@
#include <linux/config.h>
#include <linux/init.h>
-#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/string.h>
#include <linux/types.h>
+#include <linux/irq.h>
#include <asm/acpi-ext.h>
#include <asm/page.h>
@@ -27,13 +27,12 @@
#undef ACPI_DEBUG /* Guess what this does? */
#ifdef CONFIG_SMP
-extern unsigned long ipi_base_addr;
+extern struct smp_boot_data smp;
#endif
/* These are ugly but will be reclaimed by the kernel */
-int __initdata acpi_cpus = 0;
-int __initdata acpi_apic_map[32];
-int __initdata cpu_cnt = 0;
+int __initdata available_cpus = 0;
+int __initdata total_cpus = 0;
void (*pm_idle) (void);
@@ -50,7 +49,7 @@ acpi_lsapic(char *p)
if ((lsapic->flags & LSAPIC_PRESENT) == 0)
return;
- printk(" CPU %d (%.04x:%.04x): ", cpu_cnt, lsapic->eid, lsapic->id);
+ printk(" CPU %d (%.04x:%.04x): ", total_cpus, lsapic->eid, lsapic->id);
if ((lsapic->flags & LSAPIC_ENABLED) == 0) {
printk("Disabled.\n");
@@ -62,11 +61,17 @@ acpi_lsapic(char *p)
if (add) {
printk("Available.\n");
- acpi_cpus++;
- acpi_apic_map[cpu_cnt] = (lsapic->id << 8) | lsapic->eid;
+ available_cpus++;
+#ifdef CONFIG_SMP
+# if LARGE_CPU_ID_OK
+ smp.cpu_map[total_cpus] = (lsapic->id << 8) | lsapic->eid;
+# else
+ smp.cpu_map[total_cpus] = lsapic->id;
+# endif
+#endif
}
- cpu_cnt++;
+ total_cpus++;
}
/*
@@ -174,7 +179,7 @@ acpi_legacy_irq(char *p)
break;
}
-#ifdef ACPI_DEBUG
+#if 1/*def ACPI_DEBUG*/
printk("Legacy ISA IRQ %x -> IA64 Vector %x IOSAPIC Pin %x Active %s %s Trigger\n",
legacy->isa_irq, vector, iosapic_pin(vector),
((iosapic_polarity(vector) == IO_SAPIC_POL_LOW) ? "Low" : "High"),
@@ -204,11 +209,11 @@ acpi_parse_msapic(acpi_sapic_t *msapic)
{
char *p, *end;
- memset(&acpi_apic_map, -1, sizeof(acpi_apic_map));
+ /* Base address of IPI Message Block */
+ ipi_base_addr = (unsigned long) ioremap(msapic->interrupt_block, 0);
#ifdef CONFIG_SMP
- /* Base address of IPI Message Block */
- ipi_base_addr = ioremap(msapic->interrupt_block, 0);
+ memset(&smp, -1, sizeof(smp));
#endif
p = (char *) (msapic + 1);
@@ -238,11 +243,22 @@ acpi_parse_msapic(acpi_sapic_t *msapic)
}
/* Move to next table entry. */
- p += *(p + 1);
+#define BAD_ACPI_TABLE
+#ifdef BAD_ACPI_TABLE
+ /*
+ * Some prototype Lion's have a bad ACPI table
+ * requiring this fix. Without this fix, those
+ * machines crash during bootup.
+ */
+ if (p[1] == 0)
+ p = end;
+ else
+#endif
+ p += p[1];
}
/* Make bootup pretty */
- printk(" %d CPUs available, %d CPUs total\n", acpi_cpus, cpu_cnt);
+ printk(" %d CPUs available, %d CPUs total\n", available_cpus, total_cpus);
}
int __init
@@ -281,12 +297,15 @@ acpi_parse(acpi_rsdp_t *rsdp)
continue;
acpi_parse_msapic((acpi_sapic_t *) hdrp);
- } /* while() */
+ }
- if (acpi_cpus == 0) {
+#ifdef CONFIG_SMP
+ if (available_cpus == 0) {
printk("ACPI: Found 0 CPUS; assuming 1\n");
- acpi_cpus = 1; /* We've got at least one of these, no? */
+ available_cpus = 1; /* We've got at least one of these, no? */
}
+ smp.cpu_count = available_cpus;
+#endif
return 1;
}
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index fc2d50558..0ce1db504 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -24,7 +24,7 @@
#include <asm/io.h>
#include <asm/processor.h>
-#define EFI_DEBUG
+#define EFI_DEBUG 0
extern efi_status_t efi_call_phys (void *, ...);
@@ -210,9 +210,8 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
void __init
efi_init (void)
{
- void *efi_map_start, *efi_map_end, *p;
+ void *efi_map_start, *efi_map_end;
efi_config_table_t *config_tables;
- efi_memory_desc_t *md;
efi_char16_t *c16;
u64 efi_desc_size;
char vendor[100] = "unknown";
@@ -278,13 +277,18 @@ efi_init (void)
efi_map_end = efi_map_start + ia64_boot_param.efi_memmap_size;
efi_desc_size = ia64_boot_param.efi_memdesc_size;
-#ifdef EFI_DEBUG
+#if EFI_DEBUG
/* print EFI memory map: */
- for (i = 0, p = efi_map_start; p < efi_map_end; ++i, p += efi_desc_size) {
- md = p;
- printk("mem%02u: type=%u, attr=0x%lx, range=[0x%016lx-0x%016lx) (%luMB)\n",
- i, md->type, md->attribute,
- md->phys_addr, md->phys_addr + (md->num_pages<<12) - 1, md->num_pages >> 8);
+ {
+ efi_memory_desc_t *md = p;
+ void *p;
+
+ for (i = 0, p = efi_map_start; p < efi_map_end; ++i, p += efi_desc_size) {
+ md = p;
+ printk("mem%02u: type=%u, attr=0x%lx, range=[0x%016lx-0x%016lx) (%luMB)\n",
+ i, md->type, md->attribute, md->phys_addr,
+ md->phys_addr + (md->num_pages<<12) - 1, md->num_pages >> 8);
+ }
}
#endif
}
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index eb575a39c..755e3a0c1 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -25,6 +25,7 @@
#include <linux/config.h>
+#include <asm/cache.h>
#include <asm/errno.h>
#include <asm/offsets.h>
#include <asm/processor.h>
@@ -228,11 +229,11 @@ save_switch_stack:
stf.spill [r2]=f30,32
stf.spill [r3]=f31,24
;;
- st8.spill [r2]=r4,16
- st8.spill [r3]=r5,16
+.mem.offset 0,0; st8.spill [r2]=r4,16
+.mem.offset 8,0; st8.spill [r3]=r5,16
;;
- st8.spill [r2]=r6,16
- st8.spill [r3]=r7,16
+.mem.offset 0,0; st8.spill [r2]=r6,16
+.mem.offset 8,0; st8.spill [r3]=r7,16
;;
st8 [r2]=r21,16 // save b0
st8 [r3]=r22,16 // save b1
@@ -437,8 +438,8 @@ strace_check_retval:
(p6) br.cond.sptk.few strace_error // syscall failed ->
;; // avoid RAW on r10
strace_save_retval:
- st8.spill [r2]=r8 // store return value in slot for r8
- st8.spill [r3]=r10 // clear error indication in slot for r10
+.mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8
+.mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10
ia64_strace_leave_kernel:
br.call.sptk.few rp=invoke_syscall_trace // give parent a chance to catch return value
.ret6: br.cond.sptk.many ia64_leave_kernel
@@ -491,7 +492,9 @@ ia64_ret_from_syscall:
adds r2=IA64_PT_REGS_R8_OFFSET+16,sp // r2 = &pt_regs.r8
adds r3=IA64_PT_REGS_R8_OFFSET+32,sp // r3 = &pt_regs.r10
;;
+ .mem.offset 0,0
(p6) st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit
+ .mem.offset 8,0
(p6) st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
(p7) br.cond.spnt.few handle_syscall_error // handle potential syscall failure
@@ -504,7 +507,9 @@ ia64_leave_kernel:
;;
ld4 r2=[r2]
;;
- shladd r3=r2,3,r3
+ shl r2=r2,SMP_LOG_CACHE_BYTES // can't use shladd here...
+ ;;
+ add r3=r2,r3
#else
movl r3=softirq_state
#endif
@@ -550,6 +555,16 @@ back_from_resched:
2:
// check & deliver pending signals:
(p2) br.call.spnt.few rp=handle_signal_delivery
+#if defined(CONFIG_SMP) || defined(CONFIG_IA64_SOFTSDV_HACKS)
+ // Check for lost ticks
+ mov r2 = ar.itc
+ mov r3 = cr.itm
+ ;;
+ sub r2 = r2, r3
+ ;;
+ cmp.ge p6,p7 = r2, r0
+(p6) br.call.spnt.few rp=invoke_ia64_reset_itm
+#endif
restore_all:
// start restoring the state saved on the kernel stack (struct pt_regs):
@@ -735,8 +750,8 @@ handle_syscall_error:
(p6) mov r9=r8
(p6) mov r10=0
;;
- st8.spill [r2]=r9 // store errno in pt_regs.r8 and set unat bit
- st8.spill [r3]=r10 // store error indication in pt_regs.r10 and set unat bit
+.mem.offset 0,0; st8.spill [r2]=r9 // store errno in pt_regs.r8 and set unat bit
+.mem.offset 8,0; st8.spill [r3]=r10 // store error indication in pt_regs.r10 and set unat bit
br.cond.sptk.many ia64_leave_kernel
.endp handle_syscall_error
@@ -757,6 +772,19 @@ invoke_schedule_tail:
mov rp=loc1
br.ret.sptk.many rp
.endp invoke_schedule_tail
+
+ .proc invoke_ia64_reset_itm
+invoke_ia64_reset_itm:
+ alloc loc0=ar.pfs,8,2,0,0
+ mov loc1=rp
+ ;;
+ br.call.sptk.many rp=ia64_reset_itm
+ ;;
+ mov ar.pfs=loc0
+ mov rp=loc1
+ br.ret.sptk.many rp
+ .endp invoke_ia64_reset_itm
+
#endif /* CONFIG_SMP */
/*
@@ -855,26 +883,22 @@ setup_switch_stack:
.global sys_rt_sigsuspend
sys_rt_sigsuspend:
alloc loc0=ar.pfs,2,2,3,0
- mov r9=ar.unat
// If the process is being ptraced, the signal may not actually be delivered to
// the process. Instead, SIGCHLD will be sent to the parent. We need to
// setup a switch_stack so ptrace can inspect the processes state if necessary.
- adds r2=IA64_TASK_FLAGS_OFFSET,r13
- ;;
- ld8 r2=[r2]
+ // Also, the process might not ptraced until stopped in sigsuspend, so this
+ // isn't something that we can do conditionally based upon the value of
+ // PF_PTRACED_BIT.
mov out0=in0 // mask
mov out1=in1 // sigsetsize
;;
adds out2=16,sp // out1=&pt_regs
- tbit.nz p16,p17=r2,PF_PTRACED_BIT
-(p16) br.cond.spnt.many sigsuspend_setup_switch_stack
+ movl r28=back_from_sigsuspend_setup_switch_stack
+ mov r16=loc0
+ br.cond.sptk.many save_switch_stack
;;
back_from_sigsuspend_setup_switch_stack:
- adds r3=-IA64_SWITCH_STACK_SIZE+IA64_SWITCH_STACK_CALLER_UNAT_OFFSET+16,sp
-(p17) adds sp=-IA64_SWITCH_STACK_SIZE,sp // make space for (dummy) switch_stack
- ;;
-(p17) st8 [r3]=r9 // save ar.unat in sw->caller_unat
mov loc1=rp // save return address
br.call.sptk.many rp=ia64_rt_sigsuspend
.ret12:
@@ -883,32 +907,22 @@ back_from_sigsuspend_setup_switch_stack:
ld8 r9=[r3] // load new unat from sw->caller_unat
mov rp=loc1
;;
-(p17) adds sp=IA64_SWITCH_STACK_SIZE,sp // drop (dummy) switch_stack
-(p17) mov ar.unat=r9
-(p17) mov ar.pfs=loc0
-(p17) br.ret.sptk.many rp
// restore the switch stack (ptrace may have modified it):
movl r28=1f
br.cond.sptk.many load_switch_stack
1: br.ret.sptk.many rp
// NOT REACHED
-
-sigsuspend_setup_switch_stack:
- movl r28=back_from_sigsuspend_setup_switch_stack
- mov r16=loc0
- br.cond.sptk.many save_switch_stack
- // NOT REACHED
-
.endp sys_rt_sigsuspend
.align 16
.proc sys_rt_sigreturn
sys_rt_sigreturn:
- alloc loc0=ar.pfs,8,1,1,0 // preserve all eight input regs in case of syscall restart!
+ .regstk 0,0,3,0 // inherited from gate.s:invoke_sighandler()
adds out0=16,sp // out0 = &pt_regs
- ;;
adds sp=-IA64_SWITCH_STACK_SIZE,sp // make space for unat and padding
+ ;;
+ cmp.eq pNonSys,p0=r0,r0 // sigreturn isn't a normal syscall...
br.call.sptk.few rp=ia64_rt_sigreturn
.ret13:
adds r3=IA64_SWITCH_STACK_CALLER_UNAT_OFFSET+16,sp
@@ -918,8 +932,7 @@ sys_rt_sigreturn:
;;
adds sp=IA64_SWITCH_STACK_SIZE,sp // drop (dummy) switch-stack frame
mov ar.unat=r9
- mov ar.pfs=loc0
- br.ret.sptk.many rp
+ br rp
.endp sys_rt_sigreturn
.align 16
@@ -940,76 +953,6 @@ ia64_prepare_handle_unaligned:
2: br.cond.sptk.many rp // goes to ia64_leave_kernel
.endp ia64_prepare_handle_unaligned
-#ifdef CONFIG_KDB
- //
- // This gets called from ivt.S with:
- // SAVE MIN with cover done
- // SAVE REST done
- // no parameters
- // r15 has return value = ia64_leave_kernel
- //
- .align 16
- .global ia64_invoke_kdb
- .proc ia64_invoke_kdb
-ia64_invoke_kdb:
- alloc r16=ar.pfs,0,0,4,0
- movl r28=1f // save_switch_stack protocol
- ;; // avoid WAW on CFM
- br.cond.sptk.many save_switch_stack // to flushrs
-1: mov out0=4 // kdb entry reason
- mov out1=0 // err number
- adds out2=IA64_SWITCH_STACK_SIZE+16,sp // pt_regs
- add out3=16,sp // switch_stack
- br.call.sptk.few rp=kdb
-.ret15:
- movl r28=1f // load_switch_stack proto
- br.cond.sptk.many load_switch_stack
-1: br.ret.sptk.many rp
- .endp ia64_invoke_kdb
-
- //
- // When KDB is compiled in, we intercept each fault and give
- // kdb a chance to run before calling the normal fault handler.
- //
- .align 16
- .global ia64_invoke_kdb_fault_handler
- .proc ia64_invoke_kdb_fault_handler
-ia64_invoke_kdb_fault_handler:
- alloc r16=ar.pfs,5,1,5,0
- movl r28=1f
- mov loc0=rp // save this
- br.cond.sptk.many save_switch_stack // to flushrs
- ;; // avoid WAW on CFM
-1: mov out0=in0 // vector number
- mov out1=in1 // cr.isr
- mov out2=in2 // cr.ifa
- mov out3=in3 // cr.iim
- mov out4=in4 // cr.itir
- br.call.sptk.few rp=ia64_kdb_fault_handler
-.ret16:
-
- movl r28=1f
- br.cond.sptk.many load_switch_stack
-1: cmp.ne p6,p0=r8,r0 // did ia64_kdb_fault_handler return 0?
- mov rp=loc0
-(p6) br.ret.spnt.many rp // no, we're done
- ;; // avoid WAW on rp
- mov out0=in0 // vector number
- mov out1=in1 // cr.isr
- mov out2=in2 // cr.ifa
- mov out3=in3 // cr.iim
- mov out4=in4 // cr.itir
- mov in0=ar.pfs // preserve ar.pfs returned by load_switch_stack
- br.call.sptk.few rp=ia64_fault // yup -> we need to invoke normal fault handler now
-.ret17:
- mov ar.pfs=in0
- mov rp=loc0
- br.ret.sptk.many rp
-
- .endp ia64_invoke_kdb_fault_handler
-
-#endif /* CONFIG_KDB */
-
.rodata
.align 8
.globl sys_call_table
@@ -1198,8 +1141,8 @@ sys_call_table:
data8 sys_sendmsg // 1205
data8 sys_recvmsg
data8 sys_pivot_root
- data8 ia64_ni_syscall
- data8 ia64_ni_syscall
+ data8 sys_mincore
+ data8 sys_madvise
data8 ia64_ni_syscall // 1210
data8 ia64_ni_syscall
data8 ia64_ni_syscall
diff --git a/arch/ia64/kernel/gate.S b/arch/ia64/kernel/gate.S
index a710870c0..8eabe53d1 100644
--- a/arch/ia64/kernel/gate.S
+++ b/arch/ia64/kernel/gate.S
@@ -80,8 +80,6 @@
ia64_sigtramp:
ld8 r10=[r3],8 // get signal handler entry point
br.call.sptk.many rp=invoke_sighandler
-.ret0: mov r15=__NR_rt_sigreturn
- break __BREAK_SYSCALL
.endp ia64_sigtramp
.proc invoke_sighandler
@@ -90,10 +88,9 @@ invoke_sighandler:
mov b6=r10
cover // push args in interrupted frame onto backing store
;;
- alloc r8=ar.pfs,0,1,3,0 // get CFM0, EC0, and CPL0 into r8
- mov r17=ar.bsp // fetch ar.bsp
- mov loc0=rp // save return pointer
+ alloc r8=ar.pfs,0,0,3,0 // get CFM0, EC0, and CPL0 into r8
;;
+ mov r17=ar.bsp // fetch ar.bsp
cmp.ne p8,p0=r15,r0 // do we need to switch the rbs?
mov out0=r2 // signal number
(p8) br.cond.spnt.few setup_rbs // yup -> (clobbers r14 and r16)
@@ -101,10 +98,11 @@ back_from_setup_rbs:
adds base0=(BSP_OFF+SIGCONTEXT_OFF),sp
;;
st8 [base0]=r17,(CFM_OFF-BSP_OFF) // save sc_ar_bsp
+ dep r8=0,r8,38,26 // clear EC0, CPL0 and reserved bits
adds base1=(FR6_OFF+16+SIGCONTEXT_OFF),sp
;;
- st8 [base0]=r8 // save CFM0, EC0, and CPL0
+ st8 [base0]=r8 // save CFM0
adds base0=(FR6_OFF+SIGCONTEXT_OFF),sp
;;
stf.spill [base0]=f6,32
@@ -124,7 +122,8 @@ back_from_setup_rbs:
stf.spill [base0]=f14,32
stf.spill [base1]=f15,32
br.call.sptk.few rp=b6 // call the signal handler
-.ret2: adds base0=(BSP_OFF+SIGCONTEXT_OFF),sp
+.ret2:
+ adds base0=(BSP_OFF+SIGCONTEXT_OFF),sp
;;
ld8 r15=[base0],(CFM_OFF-BSP_OFF) // fetch sc_ar_bsp and advance to CFM_OFF
mov r14=ar.bsp
@@ -134,23 +133,11 @@ back_from_setup_rbs:
(p8) br.cond.spnt.few restore_rbs // yup -> (clobbers r14 and r16)
;;
back_from_restore_rbs:
- {
- and r9=0x7f,r8 // r9 <- CFM0.sof
- extr.u r10=r8,7,7 // r10 <- CFM0.sol
- mov r11=ip
- }
- ;;
adds base0=(FR6_OFF+SIGCONTEXT_OFF),sp
- adds r11=(cont-back_from_restore_rbs),r11
- sub r9=r9,r10 // r9 <- CFM0.sof - CFM0.sol == CFM0.nout
- ;;
adds base1=(FR6_OFF+16+SIGCONTEXT_OFF),sp
- dep r9=r9,r9,7,7 // r9.sol = r9.sof
- mov b6=r11
;;
ldf.fill f6=[base0],32
ldf.fill f7=[base1],32
- mov rp=loc0 // copy return pointer out of stacked register
;;
ldf.fill f8=[base0],32
ldf.fill f9=[base1],32
@@ -160,26 +147,23 @@ back_from_restore_rbs:
;;
ldf.fill f12=[base0],32
ldf.fill f13=[base1],32
- mov ar.pfs=r9
;;
ldf.fill f14=[base0],32
ldf.fill f15=[base1],32
- br.ret.sptk.few b6
-cont: mov ar.pfs=r8 // ar.pfs = CFM0
- br.ret.sptk.few rp // re-establish CFM0
+ mov r15=__NR_rt_sigreturn
+ break __BREAK_SYSCALL
.endp invoke_sighandler
.proc setup_rbs
setup_rbs:
flushrs // must be first in insn
- ;;
mov ar.rsc=r0 // put RSE into enforced lazy mode
adds r16=(RNAT_OFF+SIGCONTEXT_OFF),sp
- mov r14=ar.rnat // get rnat as updated by flushrs
;;
+ mov r14=ar.rnat // get rnat as updated by flushrs
mov ar.bspstore=r15 // set new register backing store area
- st8 [r16]=r14 // save sc_ar_rnat
;;
+ st8 [r16]=r14 // save sc_ar_rnat
mov ar.rsc=0xf // set RSE into eager mode, pl 3
invala // invalidate ALAT
br.cond.sptk.many back_from_setup_rbs
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 0ddfe3f05..a01432a60 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -66,7 +66,7 @@ irq_cpustat_t irq_stat [NR_CPUS];
* Controller mappings for all interrupt sources:
*/
irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
- { [0 ... NR_IRQS-1] = { 0, &no_irq_type, NULL, 0, SPIN_LOCK_UNLOCKED}};
+ { [0 ... NR_IRQS-1] = { IRQ_DISABLED, &no_irq_type, NULL, 0, SPIN_LOCK_UNLOCKED}};
static void register_irq_proc (unsigned int irq);
@@ -164,7 +164,7 @@ int get_irq_list(char *buf)
p += sprintf(p, "%10u ",
atomic_read(&nmi_counter(cpu_logical_map(j))));
p += sprintf(p, "\n");
-#if CONFIG_SMP
+#if defined(CONFIG_SMP) && defined(__i386__)
p += sprintf(p, "LOC: ");
for (j = 0; j < smp_num_cpus; j++)
p += sprintf(p, "%10u ",
@@ -182,8 +182,8 @@ int get_irq_list(char *buf)
*/
#ifdef CONFIG_SMP
-unsigned char global_irq_holder = NO_PROC_ID;
-unsigned volatile int global_irq_lock;
+unsigned int global_irq_holder = NO_PROC_ID;
+volatile unsigned int global_irq_lock;
extern void show_stack(unsigned long* esp);
@@ -201,6 +201,10 @@ static void show(char * str)
printk(" %d",local_bh_count(i));
printk(" ]\nStack dumps:");
+#ifdef __ia64__
+ printk(" ]\nStack dumps: <unimplemented on IA-64---please fix me>");
+ /* for now we don't have stack dumping support... */
+#elif __i386__
for(i=0;i< smp_num_cpus;i++) {
unsigned long esp;
if(i==cpu)
@@ -219,8 +223,13 @@ static void show(char * str)
esp += sizeof(struct task_struct);
show_stack((void*)esp);
}
+#else
+ You lose...
+#endif
printk("\nCPU %d:",cpu);
+#ifdef __i386__
show_stack(NULL);
+#endif
printk("\n");
}
@@ -250,7 +259,11 @@ static void show(char * str)
/*
* We have to allow irqs to arrive between __sti and __cli
*/
-# define SYNC_OTHER_CORES(x) __asm__ __volatile__ ("nop")
+# ifdef __ia64__
+# define SYNC_OTHER_CORES(x) __asm__ __volatile__ ("nop 0")
+# else
+# define SYNC_OTHER_CORES(x) __asm__ __volatile__ ("nop")
+# endif
#endif
static inline void wait_on_irq(int cpu)
@@ -311,7 +324,7 @@ static inline void get_irqlock(int cpu)
{
if (test_and_set_bit(0,&global_irq_lock)) {
/* do we already hold the lock? */
- if ((unsigned char) cpu == global_irq_holder)
+ if (cpu == global_irq_holder)
return;
/* Uhhuh.. Somebody else got it. Wait.. */
do {
@@ -349,6 +362,15 @@ void __global_cli(void)
{
unsigned int flags;
+#ifdef __ia64__
+ __save_flags(flags);
+ if (flags & IA64_PSR_I) {
+ int cpu = smp_processor_id();
+ __cli();
+ if (!local_irq_count(cpu))
+ get_irqlock(cpu);
+ }
+#else
__save_flags(flags);
if (flags & (1 << EFLAGS_IF_SHIFT)) {
int cpu = smp_processor_id();
@@ -356,6 +378,7 @@ void __global_cli(void)
if (!local_irq_count(cpu))
get_irqlock(cpu);
}
+#endif
}
void __global_sti(void)
@@ -382,7 +405,11 @@ unsigned long __global_save_flags(void)
int cpu = smp_processor_id();
__save_flags(flags);
+#ifdef __ia64__
+ local_enabled = (flags & IA64_PSR_I) != 0;
+#else
local_enabled = (flags >> EFLAGS_IF_SHIFT) & 1;
+#endif
/* default to local */
retval = 2 + local_enabled;
@@ -479,11 +506,13 @@ void disable_irq(unsigned int irq)
{
disable_irq_nosync(irq);
+#ifdef CONFIG_SMP
if (!local_irq_count(smp_processor_id())) {
do {
barrier();
} while (irq_desc[irq].status & IRQ_INPROGRESS);
}
+#endif
}
void enable_irq(unsigned int irq)
@@ -559,15 +588,12 @@ unsigned int do_IRQ(unsigned long irq, struct pt_regs *regs)
/*
* If there is no IRQ handler or it was disabled, exit early.
- Since we set PENDING, if another processor is handling
- a different instance of this same irq, the other processor
- will take care of it.
+ * Since we set PENDING, if another processor is handling
+ * a different instance of this same irq, the other processor
+ * will take care of it.
*/
if (!action)
-{
- desc->status = status & ~IRQ_INPROGRESS;
goto out;
-}
/*
* Edge triggered interrupts need to remember
@@ -597,15 +623,6 @@ out:
desc->handler->end(irq);
spin_unlock(&desc->lock);
-#if 0
- /*
- * let kernel exit path take care of this; we want to do the
- * CPU EOI before doing softirq() so a new interrupt can come
- * through
- */
- if (softirq_state[cpu].active & softirq_state[cpu].mask)
- do_softirq();
-#endif
return 1;
}
@@ -1019,7 +1036,7 @@ static void register_irq_proc (unsigned int irq)
irq_dir[irq] = proc_mkdir(name, root_irq_dir);
/* create /proc/irq/1234/smp_affinity */
- entry = create_proc_entry("smp_affinity", 0700, irq_dir[irq]);
+ entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
entry->nlink = 1;
entry->data = (void *)(long)irq;
@@ -1040,7 +1057,7 @@ void init_irq_proc (void)
root_irq_dir = proc_mkdir("irq", 0);
/* create /proc/irq/prof_cpu_mask */
- entry = create_proc_entry("prof_cpu_mask", 0700, root_irq_dir);
+ entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
entry->nlink = 1;
entry->data = (void *)&prof_cpu_mask;
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index a2c493ba5..1a8398f85 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -25,10 +25,6 @@
#include <linux/smp_lock.h>
#include <linux/threads.h>
-#ifdef CONFIG_KDB
-# include <linux/kdb.h>
-#endif
-
#include <asm/bitops.h>
#include <asm/delay.h>
#include <asm/io.h>
@@ -41,13 +37,15 @@
spinlock_t ivr_read_lock;
#endif
+unsigned long ipi_base_addr = IPI_DEFAULT_BASE_ADDR; /* default base addr of IPI table */
+
/*
* Legacy IRQ to IA-64 vector translation table. Any vector not in
* this table maps to itself (ie: irq 0x30 => IA64 vector 0x30)
*/
-__u8 isa_irq_to_vector_map[IA64_MIN_VECTORED_IRQ] = {
+__u8 isa_irq_to_vector_map[16] = {
/* 8259 IRQ translation, first 16 entries */
- 0x60, 0x50, 0x0f, 0x51, 0x52, 0x53, 0x43, 0x54,
+ 0x60, 0x50, 0x10, 0x51, 0x52, 0x53, 0x43, 0x54,
0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x40, 0x41
};
@@ -80,8 +78,8 @@ ia64_handle_irq (unsigned long vector, struct pt_regs *regs)
#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC
# ifndef CONFIG_SMP
static unsigned int max_prio = 0;
-# endif
unsigned int prev_prio;
+# endif
unsigned long eoi_ptr;
# ifdef CONFIG_USB
@@ -95,21 +93,25 @@ ia64_handle_irq (unsigned long vector, struct pt_regs *regs)
* Stop IPIs by getting the ivr_read_lock
*/
spin_lock(&ivr_read_lock);
+ {
+ unsigned int tmp;
- /*
- * Disable PCI writes
- */
- outl(0x80ff81c0, 0xcf8);
- outl(0x73002188, 0xcfc);
- eoi_ptr = inl(0xcfc);
+ /*
+ * Disable PCI writes
+ */
+ outl(0x80ff81c0, 0xcf8);
+ tmp = inl(0xcfc);
+ outl(tmp | 0x400, 0xcfc);
- vector = ia64_get_ivr();
+ eoi_ptr = inl(0xcfc);
- /*
- * Enable PCI writes
- */
- outl(0x73182188, 0xcfc);
+ vector = ia64_get_ivr();
+ /*
+ * Enable PCI writes
+ */
+ outl(tmp, 0xcfc);
+ }
spin_unlock(&ivr_read_lock);
# ifdef CONFIG_USB
@@ -152,9 +154,6 @@ ia64_handle_irq (unsigned long vector, struct pt_regs *regs)
printk("ia64_handle_irq: DANGER: less than 1KB of free stack space!!\n"
"(bsp=0x%lx, sp=%lx)\n", bsp, sp);
}
-#ifdef CONFIG_KDB
- kdb(KDB_REASON_PANIC, 0, regs);
-#endif
}
/*
@@ -175,9 +174,6 @@ ia64_handle_irq (unsigned long vector, struct pt_regs *regs)
if (!pEOI) {
printk("Yikes: ia64_handle_irq() without pEOI!!\n");
asm volatile ("cmp.eq p1,p0=r0,r0" : "=r"(pEOI));
-# ifdef CONFIG_KDB
- kdb(KDB_REASON_PANIC, 0, regs);
-# endif
}
}
@@ -195,13 +191,13 @@ ia64_handle_irq (unsigned long vector, struct pt_regs *regs)
#ifdef CONFIG_SMP
-void __init
-init_IRQ_SMP (void)
-{
- if (request_irq(IPI_IRQ, handle_IPI, 0, "IPI", NULL))
- panic("Could not allocate IPI Interrupt Handler!");
-}
+extern void handle_IPI (int irq, void *dev_id, struct pt_regs *regs);
+static struct irqaction ipi_irqaction = {
+ handler: handle_IPI,
+ flags: SA_INTERRUPT,
+ name: "IPI"
+};
#endif
void __init
@@ -214,13 +210,14 @@ init_IRQ (void)
ia64_set_lrr0(0, 1);
ia64_set_lrr1(0, 1);
- irq_desc[TIMER_IRQ].handler = &irq_type_ia64_internal;
+ irq_desc[TIMER_IRQ].handler = &irq_type_ia64_sapic;
+ irq_desc[IA64_SPURIOUS_INT].handler = &irq_type_ia64_sapic;
#ifdef CONFIG_SMP
/*
* Configure the IPI vector and handler
*/
- irq_desc[IPI_IRQ].handler = &irq_type_ia64_internal;
- init_IRQ_SMP();
+ irq_desc[IPI_IRQ].handler = &irq_type_ia64_sapic;
+ setup_irq(IPI_IRQ, &ipi_irqaction);
#endif
ia64_set_pmv(1 << 16);
@@ -232,16 +229,26 @@ init_IRQ (void)
ia64_set_tpr(0);
}
-/* TBD:
- * Certain IA64 platforms can have inter-processor interrupt support.
- * This interface is supposed to default to the IA64 IPI block-based
- * mechanism if the platform doesn't provide a separate mechanism
- * for IPIs.
- * Choices : (1) Extend hw_interrupt_type interfaces
- * (2) Use machine vector mechanism
- * For now defining the following interface as a place holder.
- */
void
-ipi_send (int cpu, int vector, int delivery_mode)
+ipi_send (int cpu, int vector, int delivery_mode, int redirect)
{
+ unsigned long ipi_addr;
+ unsigned long ipi_data;
+#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC
+ unsigned long flags;
+#endif
+# define EID 0
+
+ ipi_data = (delivery_mode << 8) | (vector & 0xff);
+ ipi_addr = ipi_base_addr | ((cpu << 8 | EID) << 4) | ((redirect & 1) << 3);
+
+#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC
+ spin_lock_irqsave(&ivr_read_lock, flags);
+#endif /* CONFIG_ITANIUM_ASTEP_SPECIFIC */
+
+ writeq(ipi_data, ipi_addr);
+
+#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC
+ spin_unlock_irqrestore(&ivr_read_lock, flags);
+#endif
}
diff --git a/arch/ia64/kernel/irq_internal.c b/arch/ia64/kernel/irq_internal.c
deleted file mode 100644
index 2b768cec1..000000000
--- a/arch/ia64/kernel/irq_internal.c
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Internal Interrupt Vectors
- *
- * This takes care of interrupts that are generated by the CPU
- * internally, such as the ITC and IPI interrupts.
- *
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
- * Copyright (C) 2000 Hewlett-Packard Co
- * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
-#include <linux/irq.h>
-
-static unsigned int
-internal_noop_startup (unsigned int irq)
-{
- return 0;
-}
-
-static void
-internal_noop (unsigned int irq)
-{
- /* nuthing to do... */
-}
-
-struct hw_interrupt_type irq_type_ia64_internal = {
- typename: "IA64-internal",
- startup: internal_noop_startup,
- shutdown: internal_noop,
- enable: internal_noop,
- disable: internal_noop,
- ack: internal_noop,
- end: internal_noop,
- set_affinity: (void (*)(unsigned int, unsigned long)) internal_noop
-};
diff --git a/arch/ia64/kernel/irq_lock.c b/arch/ia64/kernel/irq_lock.c
deleted file mode 100644
index 43afeac60..000000000
--- a/arch/ia64/kernel/irq_lock.c
+++ /dev/null
@@ -1,287 +0,0 @@
-/*
- * SMP IRQ Lock support
- *
- * Global interrupt locks for SMP. Allow interrupts to come in on any
- * CPU, yet make cli/sti act globally to protect critical regions..
- * These function usually appear in irq.c, but I think it's cleaner this way.
- *
- * Copyright (C) 1999 VA Linux Systems
- * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
- */
-
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/smp.h>
-#include <linux/threads.h>
-#include <linux/init.h>
-
-#include <asm/system.h>
-#include <asm/processor.h>
-#include <asm/irq.h>
-#include <asm/bitops.h>
-#include <asm/pgtable.h>
-#include <asm/delay.h>
-
-int global_irq_holder = NO_PROC_ID;
-spinlock_t global_irq_lock;
-atomic_t global_irq_count;
-atomic_t global_bh_count;
-spinlock_t global_bh_lock;
-
-#define INIT_STUCK (1<<26)
-
-void
-irq_enter(int cpu, int irq)
-{
- int stuck = INIT_STUCK;
-
- hardirq_enter(cpu, irq);
- barrier();
- while (global_irq_lock.lock) {
- if (cpu == global_irq_holder) {
- break;
- }
-
- if (!--stuck) {
- printk("irq_enter stuck (irq=%d, cpu=%d, global=%d)\n",
- irq, cpu,global_irq_holder);
- stuck = INIT_STUCK;
- }
- barrier();
- }
-}
-
-void
-irq_exit(int cpu, int irq)
-{
- hardirq_exit(cpu, irq);
- release_irqlock(cpu);
-}
-
-static void
-show(char * str)
-{
- int i;
- unsigned long *stack;
- int cpu = smp_processor_id();
-
- printk("\n%s, CPU %d:\n", str, cpu);
- printk("irq: %d [%d %d]\n",
- atomic_read(&global_irq_count), local_irq_count[0], local_irq_count[1]);
- printk("bh: %d [%d %d]\n",
- atomic_read(&global_bh_count), local_bh_count[0], local_bh_count[1]);
-
- stack = (unsigned long *) &stack;
- for (i = 40; i ; i--) {
- unsigned long x = *++stack;
- if (x > (unsigned long) &get_options && x < (unsigned long) &vsprintf) {
- printk("<[%08lx]> ", x);
- }
- }
-}
-
-#define MAXCOUNT 100000000
-
-static inline void
-wait_on_bh(void)
-{
- int count = MAXCOUNT;
- do {
- if (!--count) {
- show("wait_on_bh");
- count = ~0;
- }
- /* nothing .. wait for the other bh's to go away */
- } while (atomic_read(&global_bh_count) != 0);
-}
-
-static inline void
-wait_on_irq(int cpu)
-{
- int count = MAXCOUNT;
-
- for (;;) {
-
- /*
- * Wait until all interrupts are gone. Wait
- * for bottom half handlers unless we're
- * already executing in one..
- */
- if (!atomic_read(&global_irq_count)) {
- if (local_bh_count[cpu] || !atomic_read(&global_bh_count))
- break;
- }
-
- /* Duh, we have to loop. Release the lock to avoid deadlocks */
- spin_unlock(&global_irq_lock);
- mb();
-
- for (;;) {
- if (!--count) {
- show("wait_on_irq");
- count = ~0;
- }
- __sti();
- udelay(cpu + 1);
- __cli();
- if (atomic_read(&global_irq_count))
- continue;
- if (global_irq_lock.lock)
- continue;
- if (!local_bh_count[cpu] && atomic_read(&global_bh_count))
- continue;
- if (spin_trylock(&global_irq_lock))
- break;
- }
- }
-}
-
-/*
- * This is called when we want to synchronize with
- * bottom half handlers. We need to wait until
- * no other CPU is executing any bottom half handler.
- *
- * Don't wait if we're already running in an interrupt
- * context or are inside a bh handler.
- */
-void
-synchronize_bh(void)
-{
- if (atomic_read(&global_bh_count)) {
- int cpu = smp_processor_id();
- if (!local_irq_count[cpu] && !local_bh_count[cpu]) {
- wait_on_bh();
- }
- }
-}
-
-
-/*
- * This is called when we want to synchronize with
- * interrupts. We may for example tell a device to
- * stop sending interrupts: but to make sure there
- * are no interrupts that are executing on another
- * CPU we need to call this function.
- */
-void
-synchronize_irq(void)
-{
- int cpu = smp_processor_id();
- int local_count;
- int global_count;
-
- mb();
- do {
- local_count = local_irq_count[cpu];
- global_count = atomic_read(&global_irq_count);
- } while (global_count != local_count);
-}
-
-static inline void
-get_irqlock(int cpu)
-{
- if (!spin_trylock(&global_irq_lock)) {
- /* do we already hold the lock? */
- if ((unsigned char) cpu == global_irq_holder)
- return;
- /* Uhhuh.. Somebody else got it. Wait.. */
- spin_lock(&global_irq_lock);
- }
- /*
- * We also to make sure that nobody else is running
- * in an interrupt context.
- */
- wait_on_irq(cpu);
-
- /*
- * Ok, finally..
- */
- global_irq_holder = cpu;
-}
-
-/*
- * A global "cli()" while in an interrupt context
- * turns into just a local cli(). Interrupts
- * should use spinlocks for the (very unlikely)
- * case that they ever want to protect against
- * each other.
- *
- * If we already have local interrupts disabled,
- * this will not turn a local disable into a
- * global one (problems with spinlocks: this makes
- * save_flags+cli+sti usable inside a spinlock).
- */
-void
-__global_cli(void)
-{
- unsigned long flags;
-
- __save_flags(flags);
- if (flags & IA64_PSR_I) {
- int cpu = smp_processor_id();
- __cli();
- if (!local_irq_count[cpu])
- get_irqlock(cpu);
- }
-}
-
-void
-__global_sti(void)
-{
- int cpu = smp_processor_id();
-
- if (!local_irq_count[cpu])
- release_irqlock(cpu);
- __sti();
-}
-
-/*
- * SMP flags value to restore to:
- * 0 - global cli
- * 1 - global sti
- * 2 - local cli
- * 3 - local sti
- */
-unsigned long
-__global_save_flags(void)
-{
- int retval;
- int local_enabled;
- unsigned long flags;
-
- __save_flags(flags);
- local_enabled = flags & IA64_PSR_I;
- /* default to local */
- retval = 2 + local_enabled;
-
- /* check for global flags if we're not in an interrupt */
- if (!local_irq_count[smp_processor_id()]) {
- if (local_enabled)
- retval = 1;
- if (global_irq_holder == (unsigned char) smp_processor_id())
- retval = 0;
- }
- return retval;
-}
-
-void
-__global_restore_flags(unsigned long flags)
-{
- switch (flags) {
- case 0:
- __global_cli();
- break;
- case 1:
- __global_sti();
- break;
- case 2:
- __cli();
- break;
- case 3:
- __sti();
- break;
- default:
- printk("global_restore_flags: %08lx (%08lx) from %p\n",
- flags, (&flags)[-1], __builtin_return_address(0));
- }
-}
diff --git a/arch/ia64/kernel/irq_sapic.c b/arch/ia64/kernel/irq_sapic.c
new file mode 100644
index 000000000..a431275a8
--- /dev/null
+++ b/arch/ia64/kernel/irq_sapic.c
@@ -0,0 +1,38 @@
+/*
+ * SAPIC Interrupt Controller
+ *
+ * This takes care of interrupts that are generated by the CPU's
+ * internal Streamlined Advanced Programmable Interrupt Controller
+ * (SAPIC), such as the ITC and IPI interrupts.
+ *
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
+ * Copyright (C) 2000 Hewlett-Packard Co
+ * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#include <linux/sched.h>
+#include <linux/irq.h>
+
+static unsigned int
+sapic_noop_startup (unsigned int irq)
+{
+ return 0;
+}
+
+static void
+sapic_noop (unsigned int irq)
+{
+ /* nuthing to do... */
+}
+
+struct hw_interrupt_type irq_type_ia64_sapic = {
+ typename: "SAPIC",
+ startup: sapic_noop_startup,
+ shutdown: sapic_noop,
+ enable: sapic_noop,
+ disable: sapic_noop,
+ ack: sapic_noop,
+ end: sapic_noop,
+ set_affinity: (void (*)(unsigned int, unsigned long)) sapic_noop
+};
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index b4592999f..56dd2a333 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -5,213 +5,6 @@
* Copyright (C) 1998, 1999 Stephane Eranian <eranian@hpl.hp.com>
* Copyright (C) 1998-2000 David Mosberger <davidm@hpl.hp.com>
*/
-
-#include <linux/config.h>
-
-#include <asm/break.h>
-#include <asm/offsets.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/ptrace.h>
-#include <asm/system.h>
-#include <asm/unistd.h>
-
-#include "entry.h"
-
-/*
- * A couple of convenience macros that make writing and reading
- * SAVE_MIN and SAVE_REST easier.
- */
-#define rARPR r31
-#define rCRIFS r30
-#define rCRIPSR r29
-#define rCRIIP r28
-#define rARRSC r27
-#define rARPFS r26
-#define rARUNAT r25
-#define rARRNAT r24
-#define rARBSPSTORE r23
-#define rKRBS r22
-#define rB6 r21
-#define rR1 r20
-
-/*
- * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
- * the minimum state necessary that allows us to turn psr.ic back
- * on.
- *
- * Assumed state upon entry:
- * psr.ic: off
- * psr.dt: off
- * r31: contains saved predicates (pr)
- *
- * Upon exit, the state is as follows:
- * psr.ic: off
- * psr.dt: off
- * r2 = points to &pt_regs.r16
- * r12 = kernel sp (kernel virtual address)
- * r13 = points to current task_struct (kernel virtual address)
- * p15 = TRUE if psr.i is set in cr.ipsr
- * predicate registers (other than p6, p7, and p15), b6, r3, r8, r9, r10, r11, r14, r15:
- * preserved
- *
- * Note that psr.ic is NOT turned on by this macro. This is so that
- * we can pass interruption state as arguments to a handler.
- */
-#define DO_SAVE_MIN(COVER,EXTRA) \
- mov rARRSC=ar.rsc; \
- mov rARPFS=ar.pfs; \
- mov rR1=r1; \
- mov rARUNAT=ar.unat; \
- mov rCRIPSR=cr.ipsr; \
- mov rB6=b6; /* rB6 = branch reg 6 */ \
- mov rCRIIP=cr.iip; \
- mov r1=ar.k6; /* r1 = current */ \
- ;; \
- invala; \
- extr.u r16=rCRIPSR,32,2; /* extract psr.cpl */ \
- ;; \
- cmp.eq pKern,p7=r0,r16; /* are we in kernel mode already? (psr.cpl==0) */ \
- /* switch from user to kernel RBS: */ \
- COVER; \
- ;; \
-(p7) mov ar.rsc=r0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
-(p7) addl rKRBS=IA64_RBS_OFFSET,r1; /* compute base of register backing store */ \
- ;; \
-(p7) mov rARRNAT=ar.rnat; \
-(pKern) dep r1=0,sp,61,3; /* compute physical addr of sp */ \
-(p7) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
-(p7) mov rARBSPSTORE=ar.bspstore; /* save ar.bspstore */ \
-(p7) dep rKRBS=-1,rKRBS,61,3; /* compute kernel virtual addr of RBS */ \
- ;; \
-(pKern) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
-(p7) mov ar.bspstore=rKRBS; /* switch to kernel RBS */ \
- ;; \
-(p7) mov r18=ar.bsp; \
-(p7) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
- \
- mov r16=r1; /* initialize first base pointer */ \
- adds r17=8,r1; /* initialize second base pointer */ \
- ;; \
- st8 [r16]=rCRIPSR,16; /* save cr.ipsr */ \
- st8 [r17]=rCRIIP,16; /* save cr.iip */ \
-(pKern) mov r18=r0; /* make sure r18 isn't NaT */ \
- ;; \
- st8 [r16]=rCRIFS,16; /* save cr.ifs */ \
- st8 [r17]=rARUNAT,16; /* save ar.unat */ \
-(p7) sub r18=r18,rKRBS; /* r18=RSE.ndirty*8 */ \
- ;; \
- st8 [r16]=rARPFS,16; /* save ar.pfs */ \
- st8 [r17]=rARRSC,16; /* save ar.rsc */ \
- tbit.nz p15,p0=rCRIPSR,IA64_PSR_I_BIT \
- ;; /* avoid RAW on r16 & r17 */ \
-(pKern) adds r16=16,r16; /* skip over ar_rnat field */ \
-(pKern) adds r17=16,r17; /* skip over ar_bspstore field */ \
-(p7) st8 [r16]=rARRNAT,16; /* save ar.rnat */ \
-(p7) st8 [r17]=rARBSPSTORE,16; /* save ar.bspstore */ \
- ;; \
- st8 [r16]=rARPR,16; /* save predicates */ \
- st8 [r17]=rB6,16; /* save b6 */ \
- shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
- ;; \
- st8 [r16]=r18,16; /* save ar.rsc value for "loadrs" */ \
- st8.spill [r17]=rR1,16; /* save original r1 */ \
- cmp.ne pEOI,p0=r0,r0 /* clear pEOI by default */ \
- ;; \
- st8.spill [r16]=r2,16; \
- st8.spill [r17]=r3,16; \
- adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
- ;; \
- st8.spill [r16]=r12,16; \
- st8.spill [r17]=r13,16; \
- cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \
- ;; \
- st8.spill [r16]=r14,16; \
- st8.spill [r17]=r15,16; \
- dep r14=-1,r0,61,3; \
- ;; \
- st8.spill [r16]=r8,16; \
- st8.spill [r17]=r9,16; \
- adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \
- ;; \
- st8.spill [r16]=r10,16; \
- st8.spill [r17]=r11,16; \
- mov r13=ar.k6; /* establish `current' */ \
- ;; \
- or r2=r2,r14; /* make first base a kernel virtual address */ \
- EXTRA; \
- movl r1=__gp; /* establish kernel global pointer */ \
- ;; \
- or r12=r12,r14; /* make sp a kernel virtual address */ \
- or r13=r13,r14; /* make `current' a kernel virtual address */ \
- bsw.1;; /* switch back to bank 1 (must be last in insn group) */
-
-#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC
-# define STOPS nop.i 0x0;; nop.i 0x0;; nop.i 0x0;;
-#else
-# define STOPS
-#endif
-
-#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover;; mov rCRIFS=cr.ifs,) STOPS
-#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover;; mov rCRIFS=cr.ifs, mov r15=r19) STOPS
-#define SAVE_MIN DO_SAVE_MIN(mov rCRIFS=r0,) STOPS
-
-/*
- * SAVE_REST saves the remainder of pt_regs (with psr.ic on). This
- * macro guarantees to preserve all predicate registers, r8, r9, r10,
- * r11, r14, and r15.
- *
- * Assumed state upon entry:
- * psr.ic: on
- * psr.dt: on
- * r2: points to &pt_regs.r16
- * r3: points to &pt_regs.r17
- */
-#define SAVE_REST \
- st8.spill [r2]=r16,16; \
- st8.spill [r3]=r17,16; \
- ;; \
- st8.spill [r2]=r18,16; \
- st8.spill [r3]=r19,16; \
- ;; \
- mov r16=ar.ccv; /* M-unit */ \
- movl r18=FPSR_DEFAULT /* L-unit */ \
- ;; \
- mov r17=ar.fpsr; /* M-unit */ \
- mov ar.fpsr=r18; /* M-unit */ \
- ;; \
- st8.spill [r2]=r20,16; \
- st8.spill [r3]=r21,16; \
- mov r18=b0; \
- ;; \
- st8.spill [r2]=r22,16; \
- st8.spill [r3]=r23,16; \
- mov r19=b7; \
- ;; \
- st8.spill [r2]=r24,16; \
- st8.spill [r3]=r25,16; \
- ;; \
- st8.spill [r2]=r26,16; \
- st8.spill [r3]=r27,16; \
- ;; \
- st8.spill [r2]=r28,16; \
- st8.spill [r3]=r29,16; \
- ;; \
- st8.spill [r2]=r30,16; \
- st8.spill [r3]=r31,16; \
- ;; \
- st8 [r2]=r16,16; /* ar.ccv */ \
- st8 [r3]=r17,16; /* ar.fpsr */ \
- ;; \
- st8 [r2]=r18,16; /* b0 */ \
- st8 [r3]=r19,16+8; /* b7 */ \
- ;; \
- stf.spill [r2]=f6,32; \
- stf.spill [r3]=f7,32; \
- ;; \
- stf.spill [r2]=f8,32; \
- stf.spill [r3]=f9,32
-
/*
* This file defines the interrupt vector table used by the CPU.
* It does not include one entry per possible cause of interruption.
@@ -236,9 +29,29 @@
* The table is 32KB in size and must be aligned on 32KB boundary.
* (The CPU ignores the 15 lower bits of the address)
*
- * Table is based upon EAS2.4 (June 1998)
+ * Table is based upon EAS2.6 (Oct 1999)
*/
+#include <linux/config.h>
+
+#include <asm/break.h>
+#include <asm/offsets.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/system.h>
+#include <asm/unistd.h>
+
+#define MINSTATE_START_SAVE_MIN /* no special action needed */
+#define MINSTATE_END_SAVE_MIN \
+ or r2=r2,r14; /* make first base a kernel virtual address */ \
+ or r12=r12,r14; /* make sp a kernel virtual address */ \
+ or r13=r13,r14; /* make `current' a kernel virtual address */ \
+ bsw.1; /* switch back to bank 1 (must be last in insn group) */ \
+ ;;
+
+#include "minstate.h"
+
#define FAULT(n) \
rsm psr.dt; /* avoid nested faults due to TLB misses... */ \
;; \
@@ -336,8 +149,8 @@ ia64_ivt:
(p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB miss?
dep r17=0,r17,0,PAGE_SHIFT // clear low bits to get page address
;;
-(p10) itc.i r18;; // insert the instruction TLB entry (EAS2.6: must be last in insn group!)
-(p11) itc.d r18;; // insert the data TLB entry (EAS2.6: must be last in insn group!)
+(p10) itc.i r18 // insert the instruction TLB entry
+(p11) itc.d r18 // insert the data TLB entry
(p6) br.spnt.few page_fault // handle bad address/page not present (page fault)
mov cr.ifa=r21
@@ -346,9 +159,9 @@ ia64_ivt:
// the exception deferral bit.
adds r16=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r17
;;
-(p7) itc.d r16;; // EAS2.6: must be last in insn group!
+(p7) itc.d r16
mov pr=r31,-1 // restore predicate registers
- rfi;; // must be last insn in an insn group
+ rfi
.align 1024
/////////////////////////////////////////////////////////////////////////////////////////
@@ -395,11 +208,11 @@ ia64_ivt:
;;
(p7) tbit.z p6,p7=r18,0 // page present bit cleared?
;;
-(p7) itc.i r18;; // insert the instruction TLB entry (EAS2.6: must be last in insn group!)
+(p7) itc.i r18 // insert the instruction TLB entry
(p6) br.spnt.few page_fault // handle bad address/page not present (page fault)
;;
mov pr=r31,-1 // restore predicate registers
- rfi;; // must be last insn in an insn group
+ rfi
.align 1024
/////////////////////////////////////////////////////////////////////////////////////////
@@ -446,11 +259,11 @@ ia64_ivt:
;;
(p7) tbit.z p6,p7=r18,0 // page present bit cleared?
;;
-(p7) itc.d r18;; // insert the instruction TLB entry (EAS2.6: must be last in insn group!)
+(p7) itc.d r18 // insert the instruction TLB entry
(p6) br.spnt.few page_fault // handle bad address/page not present (page fault)
;;
mov pr=r31,-1 // restore predicate registers
- rfi;; // must be last insn in an insn group
+ rfi
//-----------------------------------------------------------------------------------
// call do_page_fault (predicates are in r31, psr.dt is off, r16 is faulting address)
@@ -468,10 +281,9 @@ page_fault:
;;
ssm psr.ic | psr.dt
;;
- srlz.d // guarantee that interrupt collection is enabled
-(p15) ssm psr.i // restore psr.i
+ srlz.i // guarantee that interrupt collection is enabled
;;
- srlz.i // must precede "alloc"! (srlz.i implies srlz.d)
+(p15) ssm psr.i // restore psr.i
movl r14=ia64_leave_kernel
;;
alloc r15=ar.pfs,0,0,3,0 // must be first in insn group
@@ -491,15 +303,15 @@ page_fault:
movl r17=__DIRTY_BITS|_PAGE_PL_0|_PAGE_AR_RX
;;
shr.u r18=r16,57 // move address bit 61 to bit 4
- dep r16=0,r16,52,12 // clear top 12 bits of address
+ dep r16=0,r16,IA64_PHYS_BITS,(64-IA64_PHYS_BITS) // clear ed, resvd, and unimpl. phys bits
;;
andcm r18=0x10,r18 // bit 4=~address-bit(61)
dep r16=r17,r16,0,12 // insert PTE control bits into r16
;;
or r16=r16,r18 // set bit 4 (uncached) if the access was to region 6
;;
- itc.i r16;; // insert the TLB entry(EAS2.6: must be last in insn group!)
- rfi;; // must be last insn in an insn group
+ itc.i r16 // insert the TLB entry
+ rfi
.align 1024
/////////////////////////////////////////////////////////////////////////////////////////
@@ -508,15 +320,15 @@ page_fault:
movl r17=__DIRTY_BITS|_PAGE_PL_0|_PAGE_AR_RW
;;
shr.u r18=r16,57 // move address bit 61 to bit 4
- dep r16=0,r16,52,12 // clear top 12 bits of address
+ dep r16=0,r16,IA64_PHYS_BITS,(64-IA64_PHYS_BITS) // clear ed, resvd, and unimpl. phys bits
;;
andcm r18=0x10,r18 // bit 4=~address-bit(61)
dep r16=r17,r16,0,12 // insert PTE control bits into r16
;;
or r16=r16,r18 // set bit 4 (uncached) if the access was to region 6
;;
- itc.d r16;; // insert the TLB entry (EAS2.6: must be last in insn group!)
- rfi;; // must be last insn in an insn group
+ itc.d r16 // insert the TLB entry
+ rfi
.align 1024
/////////////////////////////////////////////////////////////////////////////////////////
@@ -609,27 +421,31 @@ page_fault:
mov b0=r29 // restore b0
;;
st8 [r17]=r18 // store back updated PTE
- itc.d r18;; // install updated PTE (EAS2.6: must be last in insn group!)
- rfi;; // must be last insn in an insn group
+ itc.d r18 // install updated PTE
+ rfi
.align 1024
/////////////////////////////////////////////////////////////////////////////////////////
// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
// Like Entry 8, except for instruction access
mov r16=cr.ifa // get the address that caused the fault
-#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC
+#ifdef CONFIG_ITANIUM
+ /*
+ * Erratum 10 (IFA may contain incorrect address) now has
+ * "NoFix" status. There are no plans for fixing this.
+ */
+ mov r17=cr.ipsr
mov r31=pr // save predicates
- mov r30=cr.ipsr
;;
- extr.u r17=r30,IA64_PSR_IS_BIT,1 // get instruction arch. indicator
+ mov r18=cr.iip
+ tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set?
;;
- cmp.eq p6,p0 = r17,r0 // check if IA64 instruction set
+(p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa
+#if 0
;;
-(p6) mov r16=cr.iip // get real faulting address
- ;;
-(p6) mov cr.ifa=r16 // reset IFA
+#endif
mov pr=r31,-1
-#endif /* CONFIG_ITANIUM_ASTEP_SPECIFIC */
+#endif /* CONFIG_ITANIUM */
movl r30=1f // load continuation point in case of nested fault
;;
thash r17=r16 // compute virtual address of L3 PTE
@@ -641,8 +457,8 @@ page_fault:
mov b0=r29 // restore b0
;;
st8 [r17]=r18 // store back updated PTE
- itc.i r18;; // install updated PTE (EAS2.6: must be last in insn group!)
- rfi;; // must be last insn in an insn group
+ itc.i r18 // install updated PTE
+ rfi
.align 1024
/////////////////////////////////////////////////////////////////////////////////////////
@@ -660,8 +476,8 @@ page_fault:
mov b0=r29 // restore b0
;;
st8 [r17]=r18 // store back updated PTE
- itc.d r18;; // install updated PTE (EAS2.6: must be last in insn group!)
- rfi;; // must be last insn in an insn group
+ itc.d r18 // install updated PTE
+ rfi
.align 1024
/////////////////////////////////////////////////////////////////////////////////////////
@@ -689,12 +505,11 @@ page_fault:
// turn interrupt collection and data translation back on:
ssm psr.ic | psr.dt
- srlz.d // guarantee that interrupt collection is enabled
+ ;;
+ srlz.i // guarantee that interrupt collection is enabled
cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
;;
(p15) ssm psr.i // restore psr.i
- ;;
- srlz.i // ensure everybody knows psr.ic and psr.dt are back on
adds r8=(IA64_PT_REGS_R8_OFFSET-IA64_PT_REGS_R16_OFFSET),r2
;;
stf8 [r8]=f1 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
@@ -813,11 +628,10 @@ dispatch_to_ia32_handler:
;;
mov r14=cr.isr
ssm psr.ic | psr.dt
- srlz.d // guarantee that interrupt collection is enabled
;;
-(p15) ssm psr.i
+ srlz.i // guarantee that interrupt collection is enabled
;;
- srlz.d
+(p15) ssm psr.i
adds r3=8,r2 // Base pointer for SAVE_REST
;;
SAVE_REST
@@ -858,12 +672,13 @@ dispatch_to_ia32_handler:
ld8 r16=[r16]
tbit.z p8,p0=r2,5 // (current->flags & PF_TRACESYS) == 0?
;;
- movl r15=ia32_ret_from_syscall
mov b6=r16
+ movl r15=ia32_ret_from_syscall
;;
mov rp=r15
-(p8) br.call.sptk.few b6=b6
- br.call.sptk.few rp=ia32_trace_syscall // rp will be overwritten (ignored)
+(p8) br.call.sptk.many b6=b6
+ ;;
+ br.call.sptk.many rp=ia32_trace_syscall // rp will be overwritten (ignored)
non_ia32_syscall:
alloc r15=ar.pfs,0,0,2,0
@@ -885,13 +700,6 @@ non_ia32_syscall:
FAULT(17)
non_syscall:
-
-#ifdef CONFIG_KDB
- mov r17=__IA64_BREAK_KDB
- ;;
- cmp.eq p8,p0=r16,r17 // is this a kernel breakpoint?
-#endif
-
SAVE_MIN_WITH_COVER
// There is no particular reason for this code to be here, other than that
@@ -904,11 +712,10 @@ non_syscall:
// turn interrupt collection and data translation back on:
ssm psr.ic | psr.dt
- srlz.d // guarantee that interrupt collection is enabled
;;
-(p15) ssm psr.i // restore psr.i
+ srlz.i // guarantee that interrupt collection is enabled
;;
- srlz.i // ensure everybody knows psr.ic and psr.dt are back on
+(p15) ssm psr.i // restore psr.i
movl r15=ia64_leave_kernel
;;
alloc r14=ar.pfs,0,0,2,0
@@ -918,9 +725,6 @@ non_syscall:
SAVE_REST
mov rp=r15
;;
-#ifdef CONFIG_KDB
-(p8) br.call.sptk.few b6=ia64_invoke_kdb
-#endif
br.call.sptk.few b6=ia64_bad_break // avoid WAW on CFM and ignore return addr
.align 1024
@@ -945,11 +749,10 @@ dispatch_unaligned_handler:
//
mov r15=cr.ifa
ssm psr.ic | psr.dt
- srlz.d // guarantee that interrupt collection is enabled
;;
-(p15) ssm psr.i // restore psr.i
+ srlz.i // guarantee that interrupt collection is enabled
;;
- srlz.i
+(p15) ssm psr.i // restore psr.i
adds r3=8,r2 // set up second base pointer
;;
SAVE_REST
@@ -994,13 +797,12 @@ dispatch_to_fault_handler:
mov r11=cr.itir
;;
ssm psr.ic | psr.dt
- srlz.d // guarantee that interrupt collection is enabled
+ ;;
+ srlz.i // guarantee that interrupt collection is enabled
;;
(p15) ssm psr.i // restore psr.i
adds r3=8,r2 // set up second base pointer for SAVE_REST
;;
- srlz.i // must precede "alloc"!
- ;;
alloc r14=ar.pfs,0,0,5,0 // must be first in insn group
mov out0=r15
mov out1=r8
@@ -1012,11 +814,7 @@ dispatch_to_fault_handler:
movl r14=ia64_leave_kernel
;;
mov rp=r14
-#ifdef CONFIG_KDB
- br.call.sptk.few b6=ia64_invoke_kdb_fault_handler
-#else
br.call.sptk.few b6=ia64_fault
-#endif
//
// --- End of long entries, Beginning of short entries
//
@@ -1121,7 +919,7 @@ dispatch_to_fault_handler:
mov cr.ipsr=r16
;;
- rfi;; // and go back (must be last insn in group)
+ rfi // and go back
.align 256
/////////////////////////////////////////////////////////////////////////////////////////
@@ -1142,11 +940,7 @@ dispatch_to_fault_handler:
;;
srlz.d // ensure everyone knows psr.dt is off
mov r19=30 // error vector for fault_handler (when kernel)
- extr.u r16=r16,32,2 // extract psr.cpl
- ;;
- cmp.eq p6,p7=r0,r16 // if kernel cpl then fault else emulate
-(p7) br.cond.sptk.many dispatch_unaligned_handler
-(p6) br.cond.sptk.many dispatch_to_fault_handler
+ br.cond.sptk.many dispatch_unaligned_handler
.align 256
/////////////////////////////////////////////////////////////////////////////////////////
@@ -1226,6 +1020,31 @@ dispatch_to_fault_handler:
.align 256
/////////////////////////////////////////////////////////////////////////////////////////
// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
+#ifdef CONFIG_IA32_SUPPORT
+ rsm psr.dt
+ ;;
+ srlz.d
+ mov r31=pr
+ mov r16=cr.isr
+ ;;
+ extr.u r17=r16,16,8 // get ISR.code
+ mov r18=ar.eflag
+ mov r19=cr.iim // old eflag value
+ ;;
+ cmp.ne p2,p0=2,r17
+(p2) br.cond.spnt 1f // not a system flag fault
+ xor r16=r18,r19
+ ;;
+ extr.u r17=r16,18,1 // get the eflags.ac bit
+ ;;
+ cmp.eq p2,p0=0,r17
+(p2) br.cond.spnt 1f // eflags.ac bit didn't change
+ ;;
+ mov pr=r31,-1 // restore predicate registers
+ rfi
+
+1:
+#endif // CONFIG_IA32_SUPPORT
FAULT(46)
.align 256
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 320c56ebc..150feac03 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -2,21 +2,37 @@
* File: mca.c
* Purpose: Generic MCA handling layer
*
+ * Updated for latest kernel
+ * Copyright (C) 2000 Intel
+ * Copyright (C) Chuck Fleckenstein (cfleck@co.intel.com)
+ *
* Copyright (C) 1999 Silicon Graphics, Inc.
* Copyright (C) Vijay Chander(vijay@engr.sgi.com)
+ *
+ * 00/03/29 C. Fleckenstein Fixed PAL/SAL update issues, began MCA bug fixes, logging issues,
+ * added min save state dump, added INIT handler.
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/sched.h>
+#include <linux/irq.h>
+#include <linux/smp_lock.h>
+#include <linux/config.h>
+
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/system.h>
#include <asm/sal.h>
#include <asm/mca.h>
-#include <asm/spinlock.h>
+
#include <asm/irq.h>
#include <asm/machvec.h>
+
+typedef struct ia64_fptr {
+ unsigned long fp;
+ unsigned long gp;
+} ia64_fptr_t;
ia64_mc_info_t ia64_mc_info;
ia64_mca_sal_to_os_state_t ia64_sal_to_os_handoff_state;
@@ -25,6 +41,11 @@ u64 ia64_mca_proc_state_dump[256];
u64 ia64_mca_stack[1024];
u64 ia64_mca_stackframe[32];
u64 ia64_mca_bspstore[1024];
+u64 ia64_init_stack[INIT_TASK_SIZE] __attribute__((aligned(16)));
+
+#if defined(SAL_MPINIT_WORKAROUND) && !defined(CONFIG_SMP)
+int bootstrap_processor = -1;
+#endif
static void ia64_mca_cmc_vector_setup(int enable,
int_vector_t cmc_vector);
@@ -34,7 +55,98 @@ static void ia64_mca_wakeup_all(void);
static void ia64_log_init(int,int);
static void ia64_log_get(int,int, prfunc_t);
static void ia64_log_clear(int,int,int, prfunc_t);
+extern void ia64_monarch_init_handler (void);
+extern void ia64_slave_init_handler (void);
+
+/*
+ * hack for now, add platform dependent handlers
+ * here
+ */
+#ifndef PLATFORM_MCA_HANDLERS
+void
+mca_handler_platform (void)
+{
+
+}
+
+void
+cmci_handler_platform (int cmc_irq, void *arg, struct pt_regs *ptregs)
+{
+
+}
+/*
+ * This routine will be used to deal with platform specific handling
+ * of the init, i.e. drop into the kernel debugger on server machine,
+ * or if the processor is part of some parallel machine without a
+ * console, then we would call the appropriate debug hooks here.
+ */
+void
+init_handler_platform (struct pt_regs *regs)
+{
+ /* if a kernel debugger is available call it here else just dump the registers */
+ show_regs(regs); /* dump the state info */
+}
+
+void
+log_print_platform ( void *cur_buff_ptr, prfunc_t prfunc)
+{
+}
+
+void
+ia64_mca_init_platform (void)
+{
+}
+
+#endif /* PLATFORM_MCA_HANDLERS */
+
+static char *min_state_labels[] = {
+ "nat",
+ "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8",
+ "r9", "r10","r11", "r12","r13","r14", "r15",
+ "b0r16","b0r17", "b0r18", "b0r19", "b0r20",
+ "b0r21", "b0r22","b0r23", "b0r24", "b0r25",
+ "b0r26", "b0r27", "b0r28","b0r29", "b0r30", "b0r31",
+ "r16", "r17", "r18","r19", "r20", "r21","r22",
+ "r23", "r24","r25", "r26", "r27","r28", "r29", "r30","r31",
+ "preds", "br0", "rsc",
+ "iip", "ipsr", "ifs",
+ "xip", "xpsr", "xfs"
+};
+
+int ia64_pmss_dump_bank0=0; /* dump bank 0 ? */
+
+/*
+ * routine to process and prepare to dump min_state_save
+ * information for debugging purposes.
+ *
+ */
+void
+ia64_process_min_state_save (pal_min_state_area_t *pmss, struct pt_regs *ptregs)
+{
+ int i, max=57;
+ u64 *tpmss_ptr=(u64 *)pmss;
+
+ /* dump out the min_state_area information */
+ for (i=0;i<max;i++) {
+
+ if(!ia64_pmss_dump_bank0) {
+ if(strncmp("B0",min_state_labels[i],2)==0) {
+ tpmss_ptr++; /* skip to next entry */
+ continue;
+ }
+ }
+
+ printk("%5s=0x%16.16lx ",min_state_labels[i],*tpmss_ptr++);
+
+ if (((i+1)%3)==0 || ((!strcmp("GR16",min_state_labels[i]))
+ && !ia64_pmss_dump_bank0))
+ printk("\n");
+ }
+ /* hang city for now, until we include debugger or copy to ptregs to show: */
+ while (1);
+}
+
/*
* ia64_mca_cmc_vector_setup
* Setup the correctable machine check vector register in the processor
@@ -83,7 +195,7 @@ mca_test(void)
#endif /* #if defined(MCA_TEST) */
/*
- * mca_init
+ * ia64_mca_init
* Do all the mca specific initialization on a per-processor basis.
*
* 1. Register spinloop and wakeup request interrupt vectors
@@ -93,7 +205,7 @@ mca_test(void)
* 3. Register OS_INIT handler entry point
*
* 4. Initialize CMCV register to enable/disable CMC interrupt on the
- * processor and hook a handler in the platform-specific mca_init.
+ * processor and hook a handler in the platform-specific ia64_mca_init.
*
* 5. Initialize MCA/CMC/INIT related log buffers maintained by the OS.
*
@@ -103,11 +215,20 @@ mca_test(void)
* None
*/
void __init
-mca_init(void)
+ia64_mca_init(void)
{
- int i;
+ ia64_fptr_t *mon_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler;
+ ia64_fptr_t *slave_init_ptr = (ia64_fptr_t *)ia64_slave_init_handler;
+ int i;
+
+ IA64_MCA_DEBUG("ia64_mca_init : begin\n");
+
+#if defined(SAL_MPINIT_WORKAROUND) && !defined(CONFIG_SMP)
+ /* XXX -- workaround for SAL bug for running on MP system, but UP kernel */
+
+ bootstrap_processor = hard_smp_processor_id();
+#endif
- MCA_DEBUG("mca_init : begin\n");
/* Clear the Rendez checkin flag for all cpus */
for(i = 0 ; i < IA64_MAXCPUS; i++)
ia64_mc_info.imi_rendez_checkin[i] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
@@ -134,14 +255,14 @@ mca_init(void)
0))
return;
- MCA_DEBUG("mca_init : registered mca rendezvous spinloop and wakeup mech.\n");
+ IA64_MCA_DEBUG("ia64_mca_init : registered mca rendezvous spinloop and wakeup mech.\n");
/*
* Setup the correctable machine check vector
*/
ia64_mca_cmc_vector_setup(IA64_CMC_INT_ENABLE,
IA64_MCA_CMC_INT_VECTOR);
- MCA_DEBUG("mca_init : correctable mca vector setup done\n");
+ IA64_MCA_DEBUG("ia64_mca_init : correctable mca vector setup done\n");
ia64_mc_info.imi_mca_handler = __pa(ia64_os_mca_dispatch);
ia64_mc_info.imi_mca_handler_size =
@@ -155,12 +276,15 @@ mca_init(void)
return;
- MCA_DEBUG("mca_init : registered os mca handler with SAL\n");
+ IA64_MCA_DEBUG("ia64_mca_init : registered os mca handler with SAL\n");
- ia64_mc_info.imi_monarch_init_handler = __pa(ia64_monarch_init_handler);
+ ia64_mc_info.imi_monarch_init_handler = __pa(mon_init_ptr->fp);
ia64_mc_info.imi_monarch_init_handler_size = IA64_INIT_HANDLER_SIZE;
- ia64_mc_info.imi_slave_init_handler = __pa(ia64_slave_init_handler);
+ ia64_mc_info.imi_slave_init_handler = __pa(slave_init_ptr->fp);
ia64_mc_info.imi_slave_init_handler_size = IA64_INIT_HANDLER_SIZE;
+
+ IA64_MCA_DEBUG("ia64_mca_init : os init handler at %lx\n",ia64_mc_info.imi_monarch_init_handler);
+
/* Register the os init handler with SAL */
if (ia64_sal_set_vectors(SAL_VECTOR_OS_INIT,
ia64_mc_info.imi_monarch_init_handler,
@@ -173,7 +297,7 @@ mca_init(void)
return;
- MCA_DEBUG("mca_init : registered os init handler with SAL\n");
+ IA64_MCA_DEBUG("ia64_mca_init : registered os init handler with SAL\n");
/* Initialize the areas set aside by the OS to buffer the
* platform/processor error states for MCA/INIT/CMC
@@ -186,9 +310,9 @@ mca_init(void)
ia64_log_init(SAL_INFO_TYPE_CMC, SAL_SUB_INFO_TYPE_PROCESSOR);
ia64_log_init(SAL_INFO_TYPE_CMC, SAL_SUB_INFO_TYPE_PLATFORM);
- mca_init_platform();
+ ia64_mca_init_platform();
- MCA_DEBUG("mca_init : platform-specific mca handling setup done\n");
+ IA64_MCA_DEBUG("ia64_mca_init : platform-specific mca handling setup done\n");
#if defined(MCA_TEST)
mca_test();
@@ -244,7 +368,7 @@ ia64_mca_wakeup_ipi_wait(void)
void
ia64_mca_wakeup(int cpu)
{
- ipi_send(cpu, IA64_MCA_WAKEUP_INT_VECTOR, IA64_IPI_DM_INT);
+ ipi_send(cpu, IA64_MCA_WAKEUP_INT_VECTOR, IA64_IPI_DM_INT, 0);
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
}
@@ -396,25 +520,6 @@ ia64_mca_ucmc_handler(void)
ia64_return_to_sal_check();
}
-/*
- * SAL to OS entry point for INIT on the monarch processor
- * This has been defined for registration purposes with SAL
- * as a part of mca_init.
- */
-void
-ia64_monarch_init_handler()
-{
-}
-/*
- * SAL to OS entry point for INIT on the slave processor
- * This has been defined for registration purposes with SAL
- * as a part of mca_init.
- */
-
-void
-ia64_slave_init_handler()
-{
-}
/*
* ia64_mca_cmc_int_handler
* This is correctable machine check interrupt handler.
@@ -450,10 +555,9 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
#define IA64_MAX_LOG_SUBTYPES 2 /* Processor, Platform */
typedef struct ia64_state_log_s {
- spinlock_t isl_lock;
- int isl_index;
- sal_log_header_t isl_log[IA64_MAX_LOGS];
-
+ spinlock_t isl_lock;
+ int isl_index;
+ ia64_psilog_t isl_log[IA64_MAX_LOGS]; /* need space to store header + error log */
} ia64_state_log_t;
static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES][IA64_MAX_LOG_SUBTYPES];
@@ -472,6 +576,53 @@ static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES][IA64_MAX_LOG_SUBTYPES
#define IA64_LOG_CURR_BUFFER(it, sit) (void *)(&(ia64_state_log[it][sit].isl_log[IA64_LOG_CURR_INDEX(it,sit)]))
/*
+ * C portion of the OS INIT handler
+ *
+ * Called from ia64_<monarch/slave>_init_handler
+ *
+ * Inputs: pointer to pt_regs where processor info was saved.
+ *
+ * Returns:
+ * 0 if SAL must warm boot the System
+ * 1 if SAL must retrun to interrupted context using PAL_MC_RESUME
+ *
+ */
+
+void
+ia64_init_handler (struct pt_regs *regs)
+{
+ sal_log_processor_info_t *proc_ptr;
+ ia64_psilog_t *plog_ptr;
+
+ printk("Entered OS INIT handler\n");
+
+ /* Get the INIT processor log */
+ ia64_log_get(SAL_INFO_TYPE_INIT, SAL_SUB_INFO_TYPE_PROCESSOR, (prfunc_t)printk);
+ /* Get the INIT platform log */
+ ia64_log_get(SAL_INFO_TYPE_INIT, SAL_SUB_INFO_TYPE_PLATFORM, (prfunc_t)printk);
+
+#ifdef IA64_DUMP_ALL_PROC_INFO
+ ia64_log_print(SAL_INFO_TYPE_INIT, SAL_SUB_INFO_TYPE_PROCESSOR, (prfunc_t)printk);
+#endif
+
+ /*
+ * get pointer to min state save area
+ *
+ */
+ plog_ptr=(ia64_psilog_t *)IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_INIT,
+ SAL_SUB_INFO_TYPE_PROCESSOR);
+ proc_ptr = &plog_ptr->devlog.proclog;
+
+ ia64_process_min_state_save(&proc_ptr->slpi_min_state_area,regs);
+
+ init_handler_platform(regs); /* call platform specific routines */
+
+ /* Clear the INIT SAL logs now that they have been saved in the OS buffer */
+ ia64_sal_clear_state_info(SAL_INFO_TYPE_INIT, SAL_SUB_INFO_TYPE_PROCESSOR);
+ ia64_sal_clear_state_info(SAL_INFO_TYPE_INIT, SAL_SUB_INFO_TYPE_PLATFORM);
+}
+
+/*
* ia64_log_init
* Reset the OS ia64 log buffer
* Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC})
@@ -484,7 +635,7 @@ ia64_log_init(int sal_info_type, int sal_sub_info_type)
IA64_LOG_LOCK_INIT(sal_info_type, sal_sub_info_type);
IA64_LOG_NEXT_INDEX(sal_info_type, sal_sub_info_type) = 0;
memset(IA64_LOG_NEXT_BUFFER(sal_info_type, sal_sub_info_type), 0,
- sizeof(sal_log_header_t) * IA64_MAX_LOGS);
+ sizeof(ia64_psilog_t) * IA64_MAX_LOGS);
}
/*
@@ -499,7 +650,7 @@ void
ia64_log_get(int sal_info_type, int sal_sub_info_type, prfunc_t prfunc)
{
sal_log_header_t *log_buffer;
- int s;
+ int s,total_len=0;
IA64_LOG_LOCK(sal_info_type, sal_sub_info_type);
@@ -507,9 +658,11 @@ ia64_log_get(int sal_info_type, int sal_sub_info_type, prfunc_t prfunc)
/* Get the process state information */
log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type, sal_sub_info_type);
- if (ia64_sal_get_state_info(sal_info_type, sal_sub_info_type ,(u64 *)log_buffer))
+ if (!(total_len=ia64_sal_get_state_info(sal_info_type, sal_sub_info_type ,(u64 *)log_buffer)))
prfunc("ia64_mca_log_get : Getting processor log failed\n");
+ IA64_MCA_DEBUG("ia64_log_get: retrieved %d bytes of error information\n",total_len);
+
IA64_LOG_INDEX_INC(sal_info_type, sal_sub_info_type);
IA64_LOG_UNLOCK(sal_info_type, sal_sub_info_type);
@@ -542,7 +695,7 @@ ia64_log_clear(int sal_info_type, int sal_sub_info_type, int clear_os_buffer, pr
/* Get the process state information */
log_buffer = IA64_LOG_CURR_BUFFER(sal_info_type, sal_sub_info_type);
- memset(log_buffer, 0, sizeof(sal_log_header_t));
+ memset(log_buffer, 0, sizeof(ia64_psilog_t));
IA64_LOG_INDEX_DEC(sal_info_type, sal_sub_info_type);
@@ -731,11 +884,7 @@ ia64_log_processor_info_print(sal_log_header_t *lh, prfunc_t prfunc)
if (lh->slh_log_type != SAL_SUB_INFO_TYPE_PROCESSOR)
return;
-#if defined(MCA_TEST)
- slpi = &slpi_buf;
-#else
- slpi = (sal_log_processor_info_t *)lh->slh_log_dev_spec_info;
-#endif /#if defined(MCA_TEST) */
+ slpi = (sal_log_processor_info_t *)((char *)lh+sizeof(sal_log_header_t)); /* point to proc info */
if (!slpi) {
prfunc("No Processor Error Log found\n");
@@ -763,14 +912,6 @@ ia64_log_processor_info_print(sal_log_header_t *lh, prfunc_t prfunc)
ia64_log_processor_regs_print(slpi->slpi_fr, 128, "Floating-point", "fr",
prfunc);
- /* Print bank1-gr NAT register contents if valid */
- ia64_log_processor_regs_print(&slpi->slpi_bank1_nat_bits, 1, "NAT", "nat", prfunc);
-
- /* Print bank 1 register contents if valid */
- if (slpi->slpi_valid.slpi_bank1_gr)
- ia64_log_processor_regs_print(slpi->slpi_bank1_gr, 16, "Bank1-General", "gr",
- prfunc);
-
/* Print the cache check information if any*/
for (i = 0 ; i < MAX_CACHE_ERRORS; i++)
ia64_log_cache_check_info_print(i,
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S
index 3d49ac06e..81966bb99 100644
--- a/arch/ia64/kernel/mca_asm.S
+++ b/arch/ia64/kernel/mca_asm.S
@@ -1,7 +1,31 @@
+//
+// assembly portion of the IA64 MCA handling
+//
+// Mods by cfleck to integrate into kernel build
+// 00/03/15 davidm Added various stop bits to get a clean compile
+// 00/03/29 cfleck Added code to save INIT handoff state in pt_regs format, switch to temp kstack,
+// switch modes, jump to C INIT handler
+//
+#include <asm/offsets.h>
+#include <asm/pgtable.h>
#include <asm/processor.h>
-#include <asm/mcaasm.h>
-#include <asm/page.h>
+#include <asm/mca_asm.h>
#include <asm/mca.h>
+
+/*
+ * When we get an machine check, the kernel stack pointer is no longer
+ * valid, so we need to set a new stack pointer.
+ */
+#define MINSTATE_START_SAVE_MIN \
+(pKern) movl sp=ia64_init_stack+IA64_STK_OFFSET-IA64_PT_REGS_SIZE; \
+ ;;
+
+#define MINSTATE_END_SAVE_MIN \
+ or r12=r12,r14; /* make sp a kernel virtual address */ \
+ or r13=r13,r14; /* make `current' a kernel virtual address */ \
+ ;;
+
+#include "minstate.h"
.psr abi64
.psr lsb
@@ -54,7 +78,9 @@
100: (p) mov temp=ip; \
;; \
(p) adds temp=to_label-100b,temp;\
+ ;; \
(p) adds temp=adjust,temp; \
+ ;; \
(p) mov b1=temp ; \
(p) br b1
@@ -68,6 +94,7 @@
.global ia64_mca_stack
.global ia64_mca_stackframe
.global ia64_mca_bspstore
+ .global ia64_init_stack
.text
.align 16
@@ -341,6 +368,7 @@ cSaveARs:
mov r3=ar16 // ar.rsc
mov ar16=r0 // put RSE in enforced lazy mode
mov r5=ar17 // ar.bsp
+ ;;
mov r7=ar18;; // ar.bspstore
st8 [r2]=r3,3*8
st8 [r4]=r5,3*8
@@ -575,6 +603,7 @@ restore_ARs:
// mov ar16=r3 // ar.rsc
// mov ar17=r5 // ar.bsp is read only
mov ar16=r0 // make sure that RSE is in enforced lazy mode
+ ;;
mov ar18=r7;; // ar.bspstore
ld8 r9=[r2],8*13;;
@@ -619,3 +648,146 @@ end_os_mca_restore:
BRANCH(ia64_os_mca_done_restore, r2, p0, -0x20)
;;
//EndStub//////////////////////////////////////////////////////////////////////
+
+// ok, the issue here is that we need to save state information so
+// it can be useable by the kernel debugger and show regs routines.
+// In order to do this, our best bet is save the current state (plus
+// the state information obtain from the MIN_STATE_AREA) into a pt_regs
+// format. This way we can pass it on in a useable format.
+//
+
+//
+// SAL to OS entry point for INIT on the monarch processor
+// This has been defined for registration purposes with SAL
+// as a part of ia64_mca_init.
+//
+// When we get here, the follow registers have been
+// set by the SAL for our use
+//
+// 1. GR1 = OS INIT GP
+// 2. GR8 = PAL_PROC physical address
+// 3. GR9 = SAL_PROC physical address
+// 4. GR10 = SAL GP (physical)
+// 5. GR11 = Init Reason
+// 0 = Received INIT for event other than crash dump switch
+// 1 = Received wakeup at the end of an OS_MCA corrected machine check
+// 2 = Received INIT dude to CrashDump switch assertion
+//
+// 6. GR12 = Return address to location within SAL_INIT procedure
+
+
+ .text
+ .align 16
+.global ia64_monarch_init_handler
+.proc ia64_monarch_init_handler
+ia64_monarch_init_handler:
+
+#if defined(SAL_MPINIT_WORKAROUND)
+ //
+ // work around SAL bug that sends all processors to monarch entry
+ //
+ .global bootstrap_processor
+
+ movl r21=24
+ movl r20=16
+ mov r17=cr.lid
+ movl r18=bootstrap_processor
+ ;;
+ dep r18=0,r18,61,3 // convert bsp to physical address
+ ;;
+ shr r19=r17,r20
+ shr r22=r17,r21
+ ld4 r18=[r18] // get the BSP ID
+ ;;
+ and r19=0xf, r19
+ and r22=0xf, r22
+ ;;
+ shl r19=r19,8 // get them in the right order
+ ;;
+ or r22=r22,r19 // combine EID and LID
+ ;;
+ cmp.eq p6,p7=r22,r18 // Am I the BSP ?
+(p7) br.cond.spnt slave_init_spin_me
+ ;;
+#endif
+
+
+//
+// ok, the first thing we do is stash the information
+// the SAL passed to os
+//
+_tmp = r2
+ movl _tmp=ia64_sal_to_os_handoff_state
+ ;;
+ dep _tmp=0,_tmp, 61, 3 // get physical address
+ ;;
+ st8 [_tmp]=r1,0x08;;
+ st8 [_tmp]=r8,0x08;;
+ st8 [_tmp]=r9,0x08;;
+ st8 [_tmp]=r10,0x08;;
+ st8 [_tmp]=r11,0x08;;
+ st8 [_tmp]=r12,0x08;;
+
+// now we want to save information so we can dump registers
+ SAVE_MIN_WITH_COVER
+ ;;
+ mov r8=cr.ifa
+ mov r9=cr.isr
+ adds r3=8,r2 // set up second base pointer
+ ;;
+ SAVE_REST
+
+// ok, enough should be saved at this point to be dangerous, and supply
+// information for a dump
+// We need to switch to Virtual mode before hitting the C functions.
+//
+//
+//
+ movl r2=IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN
+ mov r3=psr // get the current psr, minimum enabled at this point
+ ;;
+ or r2=r2,r3
+ ;;
+ movl r3=IVirtual_Switch
+ ;;
+ mov cr.iip=r3 // short return to set the appropriate bits
+ mov cr.ipsr=r2 // need to do an rfi to set appropriate bits
+ ;;
+ rfi
+ ;;
+IVirtual_Switch:
+ //
+ // We should now be running virtual
+ //
+ // Lets call the C handler to get the rest of the state info
+ //
+ alloc r14=ar.pfs,0,0,1,0 // now it's safe (must be first in insn group!)
+ ;; //
+ adds out0=16,sp // out0 = pointer to pt_regs
+ ;;
+
+ br.call.sptk.few rp=ia64_init_handler
+ ;;
+
+return_from_init:
+ br.sptk return_from_init
+
+ .endp
+
+//
+// SAL to OS entry point for INIT on the slave processor
+// This has been defined for registration purposes with SAL
+// as a part of ia64_mca_init.
+//
+
+ .text
+ .align 16
+.global ia64_slave_init_handler
+.proc ia64_slave_init_handler
+ia64_slave_init_handler:
+
+
+slave_init_spin_me:
+ br.sptk slave_init_spin_me
+ ;;
+ .endp
diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h
new file mode 100644
index 000000000..bcfe1659c
--- /dev/null
+++ b/arch/ia64/kernel/minstate.h
@@ -0,0 +1,205 @@
+#include <linux/config.h>
+
+#include "entry.h"
+
+/*
+ * A couple of convenience macros that make writing and reading
+ * SAVE_MIN and SAVE_REST easier.
+ */
+#define rARPR r31
+#define rCRIFS r30
+#define rCRIPSR r29
+#define rCRIIP r28
+#define rARRSC r27
+#define rARPFS r26
+#define rARUNAT r25
+#define rARRNAT r24
+#define rARBSPSTORE r23
+#define rKRBS r22
+#define rB6 r21
+#define rR1 r20
+
+/*
+ * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
+ * the minimum state necessary that allows us to turn psr.ic back
+ * on.
+ *
+ * Assumed state upon entry:
+ * psr.ic: off
+ * psr.dt: off
+ * r31: contains saved predicates (pr)
+ *
+ * Upon exit, the state is as follows:
+ * psr.ic: off
+ * psr.dt: off
+ * r2 = points to &pt_regs.r16
+ * r12 = kernel sp (kernel virtual address)
+ * r13 = points to current task_struct (kernel virtual address)
+ * p15 = TRUE if psr.i is set in cr.ipsr
+ * predicate registers (other than p6, p7, and p15), b6, r3, r8, r9, r10, r11, r14, r15:
+ * preserved
+ *
+ * Note that psr.ic is NOT turned on by this macro. This is so that
+ * we can pass interruption state as arguments to a handler.
+ */
+#define DO_SAVE_MIN(COVER,EXTRA) \
+ mov rARRSC=ar.rsc; \
+ mov rARPFS=ar.pfs; \
+ mov rR1=r1; \
+ mov rARUNAT=ar.unat; \
+ mov rCRIPSR=cr.ipsr; \
+ mov rB6=b6; /* rB6 = branch reg 6 */ \
+ mov rCRIIP=cr.iip; \
+ mov r1=ar.k6; /* r1 = current */ \
+ ;; \
+ invala; \
+ extr.u r16=rCRIPSR,32,2; /* extract psr.cpl */ \
+ ;; \
+ cmp.eq pKern,p7=r0,r16; /* are we in kernel mode already? (psr.cpl==0) */ \
+ /* switch from user to kernel RBS: */ \
+ COVER; \
+ ;; \
+ MINSTATE_START_SAVE_MIN \
+(p7) mov ar.rsc=r0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
+(p7) addl rKRBS=IA64_RBS_OFFSET,r1; /* compute base of register backing store */ \
+ ;; \
+(p7) mov rARRNAT=ar.rnat; \
+(pKern) dep r1=0,sp,61,3; /* compute physical addr of sp */ \
+(p7) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
+(p7) mov rARBSPSTORE=ar.bspstore; /* save ar.bspstore */ \
+(p7) dep rKRBS=-1,rKRBS,61,3; /* compute kernel virtual addr of RBS */ \
+ ;; \
+(pKern) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
+(p7) mov ar.bspstore=rKRBS; /* switch to kernel RBS */ \
+ ;; \
+(p7) mov r18=ar.bsp; \
+(p7) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
+ \
+ mov r16=r1; /* initialize first base pointer */ \
+ adds r17=8,r1; /* initialize second base pointer */ \
+ ;; \
+ st8 [r16]=rCRIPSR,16; /* save cr.ipsr */ \
+ st8 [r17]=rCRIIP,16; /* save cr.iip */ \
+(pKern) mov r18=r0; /* make sure r18 isn't NaT */ \
+ ;; \
+ st8 [r16]=rCRIFS,16; /* save cr.ifs */ \
+ st8 [r17]=rARUNAT,16; /* save ar.unat */ \
+(p7) sub r18=r18,rKRBS; /* r18=RSE.ndirty*8 */ \
+ ;; \
+ st8 [r16]=rARPFS,16; /* save ar.pfs */ \
+ st8 [r17]=rARRSC,16; /* save ar.rsc */ \
+ tbit.nz p15,p0=rCRIPSR,IA64_PSR_I_BIT \
+ ;; /* avoid RAW on r16 & r17 */ \
+(pKern) adds r16=16,r16; /* skip over ar_rnat field */ \
+(pKern) adds r17=16,r17; /* skip over ar_bspstore field */ \
+(p7) st8 [r16]=rARRNAT,16; /* save ar.rnat */ \
+(p7) st8 [r17]=rARBSPSTORE,16; /* save ar.bspstore */ \
+ ;; \
+ st8 [r16]=rARPR,16; /* save predicates */ \
+ st8 [r17]=rB6,16; /* save b6 */ \
+ shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
+ ;; \
+ st8 [r16]=r18,16; /* save ar.rsc value for "loadrs" */ \
+ st8.spill [r17]=rR1,16; /* save original r1 */ \
+ cmp.ne pEOI,p0=r0,r0 /* clear pEOI by default */ \
+ ;; \
+.mem.offset 0,0; st8.spill [r16]=r2,16; \
+.mem.offset 8,0; st8.spill [r17]=r3,16; \
+ adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
+ ;; \
+.mem.offset 0,0; st8.spill [r16]=r12,16; \
+.mem.offset 8,0; st8.spill [r17]=r13,16; \
+ cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \
+ ;; \
+.mem.offset 0,0; st8.spill [r16]=r14,16; \
+.mem.offset 8,0; st8.spill [r17]=r15,16; \
+ dep r14=-1,r0,61,3; \
+ ;; \
+.mem.offset 0,0; st8.spill [r16]=r8,16; \
+.mem.offset 8,0; st8.spill [r17]=r9,16; \
+ adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \
+ ;; \
+.mem.offset 0,0; st8.spill [r16]=r10,16; \
+.mem.offset 8,0; st8.spill [r17]=r11,16; \
+ mov r13=ar.k6; /* establish `current' */ \
+ ;; \
+ EXTRA; \
+ movl r1=__gp; /* establish kernel global pointer */ \
+ ;; \
+ MINSTATE_END_SAVE_MIN
+
+/*
+ * SAVE_REST saves the remainder of pt_regs (with psr.ic on). This
+ * macro guarantees to preserve all predicate registers, r8, r9, r10,
+ * r11, r14, and r15.
+ *
+ * Assumed state upon entry:
+ * psr.ic: on
+ * psr.dt: on
+ * r2: points to &pt_regs.r16
+ * r3: points to &pt_regs.r17
+ */
+#define SAVE_REST \
+.mem.offset 0,0; st8.spill [r2]=r16,16; \
+.mem.offset 8,0; st8.spill [r3]=r17,16; \
+ ;; \
+.mem.offset 0,0; st8.spill [r2]=r18,16; \
+.mem.offset 8,0; st8.spill [r3]=r19,16; \
+ ;; \
+ mov r16=ar.ccv; /* M-unit */ \
+ movl r18=FPSR_DEFAULT /* L-unit */ \
+ ;; \
+ mov r17=ar.fpsr; /* M-unit */ \
+ mov ar.fpsr=r18; /* M-unit */ \
+ ;; \
+.mem.offset 0,0; st8.spill [r2]=r20,16; \
+.mem.offset 8,0; st8.spill [r3]=r21,16; \
+ mov r18=b0; \
+ ;; \
+.mem.offset 0,0; st8.spill [r2]=r22,16; \
+.mem.offset 8,0; st8.spill [r3]=r23,16; \
+ mov r19=b7; \
+ ;; \
+.mem.offset 0,0; st8.spill [r2]=r24,16; \
+.mem.offset 8,0; st8.spill [r3]=r25,16; \
+ ;; \
+.mem.offset 0,0; st8.spill [r2]=r26,16; \
+.mem.offset 8,0; st8.spill [r3]=r27,16; \
+ ;; \
+.mem.offset 0,0; st8.spill [r2]=r28,16; \
+.mem.offset 8,0; st8.spill [r3]=r29,16; \
+ ;; \
+.mem.offset 0,0; st8.spill [r2]=r30,16; \
+.mem.offset 8,0; st8.spill [r3]=r31,16; \
+ ;; \
+ st8 [r2]=r16,16; /* ar.ccv */ \
+ st8 [r3]=r17,16; /* ar.fpsr */ \
+ ;; \
+ st8 [r2]=r18,16; /* b0 */ \
+ st8 [r3]=r19,16+8; /* b7 */ \
+ ;; \
+ stf.spill [r2]=f6,32; \
+ stf.spill [r3]=f7,32; \
+ ;; \
+ stf.spill [r2]=f8,32; \
+ stf.spill [r3]=f9,32
+
+#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC
+# define STOPS nop.i 0x0;; nop.i 0x0;; nop.i 0x0;;
+#else
+# define STOPS
+#endif
+
+#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover;; mov rCRIFS=cr.ifs,) STOPS
+#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover;; mov rCRIFS=cr.ifs, mov r15=r19) STOPS
+#define SAVE_MIN DO_SAVE_MIN(mov rCRIFS=r0,) STOPS
+
+#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC
+# define STOPS nop.i 0x0;; nop.i 0x0;; nop.i 0x0;;
+#else
+# define STOPS
+#endif
+
+#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover;; mov rCRIFS=cr.ifs,) STOPS
+#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover;; mov rCRIFS=cr.ifs, mov r15=r19) STOPS
+#define SAVE_MIN DO_SAVE_MIN(mov rCRIFS=r0,) STOPS
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 18a8e342e..22ed4f569 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -23,6 +23,16 @@
#include <asm/uaccess.h>
/*
+ * Bits in the PSR that we allow ptrace() to change:
+ * be, up, ac, mfl, mfh (the user mask; five bits total)
+ * db (debug breakpoint fault; one bit)
+ * id (instruction debug fault disable; one bit)
+ * dd (data debug fault disable; one bit)
+ * ri (restart instruction; two bits)
+ */
+#define CR_IPSR_CHANGE_MASK 0x06a00100003eUL
+
+/*
* Collect the NaT bits for r1-r31 from sw->caller_unat and
* sw->ar_unat and return a NaT bitset where bit i is set iff the NaT
* bit of register i is set.
@@ -352,6 +362,94 @@ ia64_poke (struct pt_regs *regs, struct task_struct *child, unsigned long addr,
}
/*
+ * Synchronize (i.e, write) the RSE backing store living in kernel
+ * space to the VM of the indicated child process.
+ *
+ * If new_bsp is non-zero, the bsp will (effectively) be updated to
+ * the new value upon resumption of the child process. This is
+ * accomplished by setting the loadrs value to zero and the bspstore
+ * value to the new bsp value.
+ *
+ * When new_bsp and force_loadrs_to_zero are both 0, the register
+ * backing store in kernel space is written to user space and the
+ * loadrs and bspstore values are left alone.
+ *
+ * When new_bsp is zero and force_loadrs_to_zero is 1 (non-zero),
+ * loadrs is set to 0, and the bspstore value is set to the old bsp
+ * value. This will cause the stacked registers (r32 and up) to be
+ * obtained entirely from the the child's memory space rather than
+ * from the kernel. (This makes it easier to write code for
+ * modifying the stacked registers in multi-threaded programs.)
+ *
+ * Note: I had originally written this function without the
+ * force_loadrs_to_zero parameter; it was written so that loadrs would
+ * always be set to zero. But I had problems with certain system
+ * calls apparently causing a portion of the RBS to be zeroed. (I
+ * still don't understand why this was happening.) Anyway, it'd
+ * definitely less intrusive to leave loadrs and bspstore alone if
+ * possible.
+ */
+static long
+sync_kernel_register_backing_store (struct task_struct *child,
+ long new_bsp,
+ int force_loadrs_to_zero)
+{
+ unsigned long *krbs, bspstore, bsp, krbs_num_regs, rbs_end, addr, val;
+ long ndirty, ret;
+ struct pt_regs *child_regs;
+ struct switch_stack *child_stack;
+
+ ret = 0;
+ child_regs = ia64_task_regs(child);
+ child_stack = (struct switch_stack *) child_regs - 1;
+
+ krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
+ ndirty = ia64_rse_num_regs(krbs, krbs + (child_regs->loadrs >> 19));
+ bspstore = child_regs->ar_bspstore;
+ bsp = (long) ia64_rse_skip_regs((long *)bspstore, ndirty);
+ krbs_num_regs = ia64_rse_num_regs(krbs, (unsigned long *) child_stack->ar_bspstore);
+ rbs_end = (long) ia64_rse_skip_regs((long *)bspstore, krbs_num_regs);
+
+ /* Return early if nothing to do */
+ if (bsp == new_bsp)
+ return 0;
+
+ /* Write portion of backing store living on kernel stack to the child's VM. */
+ for (addr = bspstore; addr < rbs_end; addr += 8) {
+ ret = ia64_peek(child_regs, child, addr, &val);
+ if (ret != 0)
+ return ret;
+ if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val))
+ return -EIO;
+ }
+
+ if (new_bsp != 0) {
+ force_loadrs_to_zero = 1;
+ bsp = new_bsp;
+ }
+
+ if (force_loadrs_to_zero) {
+ child_regs->loadrs = 0;
+ child_regs->ar_bspstore = bsp;
+ }
+
+ return ret;
+}
+
+static void
+sync_thread_rbs (struct task_struct *child, int make_writable)
+{
+ struct task_struct *p;
+ read_lock(&tasklist_lock);
+ for_each_task(p) {
+ if (p->mm == child->mm && p->state != TASK_RUNNING)
+ sync_kernel_register_backing_store(p, 0, make_writable);
+ }
+ read_unlock(&tasklist_lock);
+ child->thread.flags |= IA64_THREAD_KRBS_SYNCED;
+}
+
+/*
* Ensure the state in child->thread.fph is up-to-date.
*/
static void
@@ -375,8 +473,8 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
struct switch_stack *child_stack;
struct pt_regs *child_regs;
struct task_struct *child;
- unsigned long flags, *base;
- long ret, regnum;
+ unsigned long flags, regnum, *base;
+ long ret;
lock_kernel();
ret = -EPERM;
@@ -441,6 +539,9 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
switch (request) {
case PTRACE_PEEKTEXT:
case PTRACE_PEEKDATA: /* read word at location addr */
+ if (!(child->thread.flags & IA64_THREAD_KRBS_SYNCED)
+ && atomic_read(&child->mm->mm_users) > 1)
+ sync_thread_rbs(child, 0);
ret = ia64_peek(regs, child, addr, &data);
if (ret == 0) {
ret = data;
@@ -450,6 +551,9 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
case PTRACE_POKETEXT:
case PTRACE_POKEDATA: /* write the word at location addr */
+ if (!(child->thread.flags & IA64_THREAD_KRBS_SYNCED)
+ && atomic_read(&child->mm->mm_users) > 1)
+ sync_thread_rbs(child, 1);
ret = ia64_poke(regs, child, addr, data);
goto out;
@@ -477,8 +581,35 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
bspstore = (unsigned long *) child_regs->ar_bspstore;
ndirty = ia64_rse_num_regs(rbs, rbs + (ret >> 19));
ret = (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
+
+ /*
+ * If we're in a system call, no ``cover'' was done. So
+ * to make things uniform, we'll add the appropriate
+ * displacement onto bsp if we're in a system call.
+ *
+ * Note: It may be better to leave the system call case
+ * alone and subtract the amount of the cover for the
+ * non-syscall case. That way the reported bsp value
+ * would actually be the correct bsp for the child
+ * process.
+ */
+ if (!(child_regs->cr_ifs & (1UL << 63))) {
+ ret = (unsigned long)
+ ia64_rse_skip_regs((unsigned long *) ret,
+ child_stack->ar_pfs & 0x7f);
+ }
+ } else if (addr == PT_CFM) {
+ /* ret currently contains pt_regs.cr_ifs */
+ if ((ret & (1UL << 63)) == 0)
+ ret = child_stack->ar_pfs;
+ ret &= 0x3fffffffffUL; /* return only the CFM */
}
} else {
+ if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
+ child->thread.flags |= IA64_THREAD_DBG_VALID;
+ memset(child->thread.dbr, 0, sizeof child->thread.dbr);
+ memset(child->thread.ibr, 0, sizeof child->thread.ibr);
+ }
if (addr >= PT_IBR) {
regnum = (addr - PT_IBR) >> 3;
base = &child->thread.ibr[0];
@@ -488,7 +619,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
}
if (regnum >= 8)
goto out;
- data = base[regnum];
+ ret = base[regnum];
}
regs->r8 = 0; /* ensure "ret" is not mistaken as an error code */
goto out;
@@ -503,29 +634,47 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
sync_fph(child);
addr += (unsigned long) &child->thread.fph;
*(unsigned long *) addr = data;
- if (ret < 0)
- goto out;
+ } else if (addr == PT_AR_BSPSTORE || addr == PT_CALLER_UNAT
+ || addr == PT_KERNEL_FPSR || addr == PT_K_B0 || addr == PT_K_AR_PFS
+ || (PT_K_AR_UNAT <= addr && addr <= PT_K_PR)) {
+ /*
+ * Don't permit changes to certain registers.
+ *
+ * We don't allow bspstore to be modified because doing
+ * so would mess up any modifications to bsp. (See
+ * sync_kernel_register_backing_store for the details.)
+ */
+ goto out;
+ } else if (addr == PT_AR_BSP) {
+ /* FIXME? Account for lack of ``cover'' in the syscall case */
+ ret = sync_kernel_register_backing_store(child, data, 1);
+ goto out;
+ } else if (addr == PT_CFM) {
+ child_regs = ia64_task_regs(child);
+ child_stack = (struct switch_stack *) child_regs - 1;
+
+ if (child_regs->cr_ifs & (1UL << 63)) {
+ child_regs->cr_ifs = (child_regs->cr_ifs & ~0x3fffffffffUL)
+ | (data & 0x3fffffffffUL);
+ } else {
+ child_stack->ar_pfs = (child_stack->ar_pfs & ~0x3fffffffffUL)
+ | (data & 0x3fffffffffUL);
+ }
} else if (addr < PT_F9+16) {
/* accessing switch_stack or pt_regs */
child_regs = ia64_task_regs(child);
child_stack = (struct switch_stack *) child_regs - 1;
- if (addr == PT_AR_BSP) {
- /* compute the loadrs value based on bsp and bspstore: */
- unsigned long *rbs, *bspstore, ndirty, *kbsp;
-
- bspstore = (unsigned long *) child_regs->ar_bspstore;
- ndirty = ia64_rse_num_regs(bspstore, (unsigned long *) data);
- rbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
- kbsp = ia64_rse_skip_regs(rbs, ndirty);
- data = (kbsp - rbs) << 19;
- }
+ if (addr == PT_CR_IPSR)
+ data = (data & CR_IPSR_CHANGE_MASK)
+ | (child_regs->cr_ipsr & ~CR_IPSR_CHANGE_MASK);
+
*(unsigned long *) ((long) child_stack + addr - PT_CALLER_UNAT) = data;
} else {
if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
child->thread.flags |= IA64_THREAD_DBG_VALID;
- memset(current->thread.dbr, 0, sizeof current->thread.dbr);
- memset(current->thread.ibr, 0, sizeof current->thread.ibr);
+ memset(child->thread.dbr, 0, sizeof child->thread.dbr);
+ memset(child->thread.ibr, 0, sizeof child->thread.ibr);
}
if (addr >= PT_IBR) {
@@ -538,7 +687,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
if (regnum >= 8)
goto out;
if (regnum & 1) {
- /* force breakpoint to be effective a most for user-level: */
+ /* force breakpoint to be effective only for user-level: */
data &= ~(0x7UL << 56);
}
base[regnum] = data;
@@ -546,6 +695,23 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
ret = 0;
goto out;
+ case PTRACE_GETSIGINFO:
+ ret = -EIO;
+ if (!access_ok(VERIFY_WRITE, data, sizeof (siginfo_t))
+ || child->thread.siginfo == 0)
+ goto out;
+ copy_to_user((siginfo_t *) data, child->thread.siginfo, sizeof (siginfo_t));
+ ret = 0;
+ goto out;
+ break;
+ case PTRACE_SETSIGINFO:
+ ret = -EIO;
+ if (!access_ok(VERIFY_READ, data, sizeof (siginfo_t))
+ || child->thread.siginfo == 0)
+ goto out;
+ copy_from_user(child->thread.siginfo, (siginfo_t *) data, sizeof (siginfo_t));
+ ret = 0;
+ goto out;
case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
case PTRACE_CONT: /* restart after signal. */
ret = -EIO;
@@ -561,6 +727,9 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
ia64_psr(ia64_task_regs(child))->ss = 0;
ia64_psr(ia64_task_regs(child))->tb = 0;
+ /* Turn off flag indicating that the KRBS is sync'd with child's VM: */
+ child->thread.flags &= ~IA64_THREAD_KRBS_SYNCED;
+
wake_up_process(child);
ret = 0;
goto out;
@@ -579,6 +748,9 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
ia64_psr(ia64_task_regs(child))->ss = 0;
ia64_psr(ia64_task_regs(child))->tb = 0;
+ /* Turn off flag indicating that the KRBS is sync'd with child's VM: */
+ child->thread.flags &= ~IA64_THREAD_KRBS_SYNCED;
+
wake_up_process(child);
ret = 0;
goto out;
@@ -597,6 +769,9 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
}
child->exit_code = data;
+ /* Turn off flag indicating that the KRBS is sync'd with child's VM: */
+ child->thread.flags &= ~IA64_THREAD_KRBS_SYNCED;
+
/* give it a chance to run. */
wake_up_process(child);
ret = 0;
@@ -619,6 +794,9 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
ia64_psr(ia64_task_regs(child))->ss = 0;
ia64_psr(ia64_task_regs(child))->tb = 0;
+ /* Turn off flag indicating that the KRBS is sync'd with child's VM: */
+ child->thread.flags &= ~IA64_THREAD_KRBS_SYNCED;
+
wake_up_process(child);
ret = 0;
goto out;
diff --git a/arch/ia64/kernel/sal.c b/arch/ia64/kernel/sal.c
index 8743f6588..f4b8ce9dd 100644
--- a/arch/ia64/kernel/sal.c
+++ b/arch/ia64/kernel/sal.c
@@ -139,7 +139,7 @@ ia64_sal_init (struct ia64_sal_systab *systab)
case IA64_SAL_AP_EXTERNAL_INT:
ap_wakeup_vector = ap->vector;
# ifdef SAL_DEBUG
- printk("SAL: AP wakeup using external interrupt; "
+ printk("SAL: AP wakeup using external interrupt "
"vector 0x%lx\n", ap_wakeup_vector);
# endif
break;
@@ -151,6 +151,36 @@ ia64_sal_init (struct ia64_sal_systab *systab)
break;
}
#endif
+ case SAL_DESC_PLATFORM_FEATURE:
+ {
+ struct ia64_sal_desc_platform_feature *pf = (void *) p;
+ printk("SAL: Platform features ");
+
+ if (pf->feature_mask & (1 << 0))
+ printk("BusLock ");
+
+ if (pf->feature_mask & (1 << 1)) {
+ printk("IRQ_Redirection ");
+#ifdef CONFIG_SMP
+ if (no_int_routing)
+ smp_int_redirect &= ~SMP_IRQ_REDIRECTION;
+ else
+ smp_int_redirect |= SMP_IRQ_REDIRECTION;
+#endif
+ }
+ if (pf->feature_mask & (1 << 2)) {
+ printk("IPI_Redirection ");
+#ifdef CONFIG_SMP
+ if (no_int_routing)
+ smp_int_redirect &= ~SMP_IPI_REDIRECTION;
+ else
+ smp_int_redirect |= SMP_IPI_REDIRECTION;
+#endif
+ }
+ printk("\n");
+ break;
+ }
+
}
p += SAL_DESC_SIZE(*p);
}
diff --git a/arch/ia64/kernel/sal_stub.S b/arch/ia64/kernel/sal_stub.S
index 7ab16bbcd..d73851810 100644
--- a/arch/ia64/kernel/sal_stub.S
+++ b/arch/ia64/kernel/sal_stub.S
@@ -1,17 +1,19 @@
/*
- * gcc currently does not conform to the ia-64 calling convention as far
- * as returning function values are concerned. Instead of returning
- * values up to 32 bytes in size in r8-r11, gcc returns any value
- * bigger than a doubleword via a structure that's allocated by the
- * caller and whose address is passed into the function. Since
- * SAL_PROC returns values according to the calling convention, this
- * stub takes care of copying r8-r11 to the place where gcc expects
- * them.
- *
- * Copyright (C) 1998, 1999 Hewlett-Packard Co
- * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1998-2000 Hewlett-Packard Co
+ * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
*/
#ifndef __GCC_MULTIREG_RETVALS__
+ /*
+ * gcc currently does not conform to the ia-64 calling
+ * convention as far as returning function values are
+ * concerned. Instead of returning values up to 32 bytes in
+ * size in r8-r11, gcc returns any value bigger than a
+ * doubleword via a structure that's allocated by the caller
+ * and whose address is passed into the function. Since
+ * SAL_PROC returns values according to the calling
+ * convention, this stub takes care of copying r8-r11 to the
+ * place where gcc expects them.
+ */
.text
.psr abi64
.psr lsb
diff --git a/arch/ia64/kernel/semaphore.c b/arch/ia64/kernel/semaphore.c
index 980fa4329..bc55670bf 100644
--- a/arch/ia64/kernel/semaphore.c
+++ b/arch/ia64/kernel/semaphore.c
@@ -310,7 +310,7 @@ __down_write_failed (struct rw_semaphore *sem, long count)
do {
old_count = sem->count;
count = old_count - RW_LOCK_BIAS;
- } while (cmpxchg(&sem->count, old_count, count) != old_count);
+ } while (cmpxchg_acq(&sem->count, old_count, count) != old_count);
if (count == 0)
return;
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 58ddb1fb1..80838f990 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -8,10 +8,12 @@
* Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
*
- * 02/04/00 D.Mosberger some more get_cpuinfo fixes...
- * 02/01/00 R.Seth fixed get_cpuinfo for SMP
- * 01/07/99 S.Eranian added the support for command line argument
- * 06/24/99 W.Drummond added boot_cpu_data.
+ * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map
+ * 03/31/00 R.Seth cpu_initialized and current->processor fixes
+ * 02/04/00 D.Mosberger some more get_cpuinfo fixes...
+ * 02/01/00 R.Seth fixed get_cpuinfo for SMP
+ * 01/07/99 S.Eranian added the support for command line argument
+ * 06/24/99 W.Drummond added boot_cpu_data.
*/
#include <linux/config.h>
#include <linux/init.h>
@@ -32,6 +34,7 @@
#include <asm/sal.h>
#include <asm/system.h>
#include <asm/efi.h>
+#include <asm/mca.h>
extern char _end;
@@ -41,10 +44,13 @@ struct cpuinfo_ia64 cpu_data[NR_CPUS];
unsigned long ia64_cycles_per_usec;
struct ia64_boot_param ia64_boot_param;
struct screen_info screen_info;
-unsigned long cpu_initialized = 0;
/* This tells _start which CPU is booting. */
int cpu_now_booting = 0;
+#ifdef CONFIG_SMP
+volatile unsigned long cpu_online_map;
+#endif
+
#define COMMAND_LINE_SIZE 512
char saved_command_line[COMMAND_LINE_SIZE]; /* used in proc filesystem */
@@ -101,7 +107,6 @@ void __init
setup_arch (char **cmdline_p)
{
unsigned long max_pfn, bootmap_start, bootmap_size;
- u64 progress;
/*
* The secondary bootstrap loader passes us the boot
@@ -147,7 +152,10 @@ setup_arch (char **cmdline_p)
printk("args to kernel: %s\n", *cmdline_p);
-#ifndef CONFIG_SMP
+#ifdef CONFIG_SMP
+ bootstrap_processor = hard_smp_processor_id();
+ current->processor = bootstrap_processor;
+#else
cpu_init();
identify_cpu(&cpu_data[0]);
#endif
@@ -168,6 +176,11 @@ setup_arch (char **cmdline_p)
conswitchp = &dummy_con;
# endif
#endif
+
+#ifdef CONFIG_IA64_MCA
+ /* enable IA-64 Machine Check Abort Handling */
+ ia64_mca_init();
+#endif
paging_init();
platform_setup(cmdline_p);
}
@@ -183,8 +196,10 @@ get_cpuinfo (char *buffer)
unsigned long mask;
for (c = cpu_data; c < cpu_data + NR_CPUS; ++c) {
- if (!(cpu_initialized & (1UL << (c - cpu_data))))
+#ifdef CONFIG_SMP
+ if (!(cpu_online_map & (1UL << (c - cpu_data))))
continue;
+#endif
mask = c->features;
@@ -209,7 +224,7 @@ get_cpuinfo (char *buffer)
if (mask)
sprintf(cp, " 0x%lx", mask);
- p += sprintf(buffer,
+ p += sprintf(p,
"CPU# %lu\n"
"\tvendor : %s\n"
"\tfamily : %s\n"
@@ -303,8 +318,6 @@ identify_cpu (struct cpuinfo_ia64 *c)
void
cpu_init (void)
{
- int nr = smp_processor_id();
-
/* Clear the stack memory reserved for pt_regs: */
memset(ia64_task_regs(current), 0, sizeof(struct pt_regs));
@@ -318,11 +331,6 @@ cpu_init (void)
*/
ia64_set_dcr(IA64_DCR_DR | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_PP);
ia64_set_fpu_owner(0); /* initialize ar.k5 */
-
- if (test_and_set_bit(nr, &cpu_initialized)) {
- printk("CPU#%d already initialized!\n", nr);
- machine_halt();
- }
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
}
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index 19be1f840..a0cca9da7 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -94,7 +94,7 @@ static long
restore_sigcontext (struct sigcontext *sc, struct pt_regs *pt)
{
struct switch_stack *sw = (struct switch_stack *) pt - 1;
- unsigned long ip, flags, nat, um;
+ unsigned long ip, flags, nat, um, cfm;
long err;
/* restore scratch that always needs gets updated during signal delivery: */
@@ -102,20 +102,24 @@ restore_sigcontext (struct sigcontext *sc, struct pt_regs *pt)
err |= __get_user(nat, &sc->sc_nat);
err |= __get_user(ip, &sc->sc_ip); /* instruction pointer */
- err |= __get_user(pt->ar_fpsr, &sc->sc_ar_fpsr);
- err |= __get_user(pt->ar_pfs, &sc->sc_ar_pfs);
+ err |= __get_user(cfm, &sc->sc_cfm);
err |= __get_user(um, &sc->sc_um); /* user mask */
err |= __get_user(pt->ar_rsc, &sc->sc_ar_rsc);
err |= __get_user(pt->ar_ccv, &sc->sc_ar_ccv);
err |= __get_user(pt->ar_unat, &sc->sc_ar_unat);
+ err |= __get_user(pt->ar_fpsr, &sc->sc_ar_fpsr);
+ err |= __get_user(pt->ar_pfs, &sc->sc_ar_pfs);
err |= __get_user(pt->pr, &sc->sc_pr); /* predicates */
err |= __get_user(pt->b0, &sc->sc_br[0]); /* b0 (rp) */
- err |= __get_user(pt->b6, &sc->sc_br[6]);
+ err |= __get_user(pt->b6, &sc->sc_br[6]); /* b6 */
+ err |= __get_user(pt->b7, &sc->sc_br[7]); /* b7 */
err |= __copy_from_user(&pt->r1, &sc->sc_gr[1], 3*8); /* r1-r3 */
err |= __copy_from_user(&pt->r8, &sc->sc_gr[8], 4*8); /* r8-r11 */
err |= __copy_from_user(&pt->r12, &sc->sc_gr[12], 4*8); /* r12-r15 */
err |= __copy_from_user(&pt->r16, &sc->sc_gr[16], 16*8); /* r16-r31 */
+ pt->cr_ifs = cfm | (1UL << 63);
+
/* establish new instruction pointer: */
pt->cr_iip = ip & ~0x3UL;
ia64_psr(pt)->ri = ip & 0x3;
@@ -240,6 +244,7 @@ setup_sigcontext (struct sigcontext *sc, sigset_t *mask, struct pt_regs *pt)
nat = ia64_get_nat_bits(pt, sw);
err = __put_user(flags, &sc->sc_flags);
+
err |= __put_user(nat, &sc->sc_nat);
err |= PUT_SIGSET(mask, &sc->sc_mask);
err |= __put_user(pt->cr_ipsr & IA64_PSR_UM, &sc->sc_um);
@@ -255,8 +260,8 @@ setup_sigcontext (struct sigcontext *sc, sigset_t *mask, struct pt_regs *pt)
err |= __copy_to_user(&sc->sc_gr[1], &pt->r1, 3*8); /* r1-r3 */
err |= __copy_to_user(&sc->sc_gr[8], &pt->r8, 4*8); /* r8-r11 */
- err |= __copy_to_user(&sc->sc_gr[12], &pt->r12, 4*8); /* r12-r15 */
- err |= __copy_to_user(&sc->sc_gr[16], &pt->r16, 16*8); /* r16-r31 */
+ err |= __copy_to_user(&sc->sc_gr[12], &pt->r12, 4*8); /* r12-r15 */
+ err |= __copy_to_user(&sc->sc_gr[16], &pt->r16, 16*8); /* r16-r31 */
err |= __put_user(pt->cr_iip + ia64_psr(pt)->ri, &sc->sc_ip);
err |= __put_user(pt->r12, &sc->sc_gr[12]); /* r12 */
@@ -415,10 +420,12 @@ ia64_do_signal (sigset_t *oldset, struct pt_regs *pt, long in_syscall)
if ((current->flags & PF_PTRACED) && signr != SIGKILL) {
/* Let the debugger run. */
current->exit_code = signr;
+ current->thread.siginfo = &info;
set_current_state(TASK_STOPPED);
notify_parent(current, SIGCHLD);
schedule();
signr = current->exit_code;
+ current->thread.siginfo = 0;
/* We're back. Did the debugger cancel the sig? */
if (!signr)
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index ed5d594a6..43d9f2dde 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -6,6 +6,8 @@
*
* Lots of stuff stolen from arch/alpha/kernel/smp.c
*
+ * 00/03/31 Rohit Seth <rohit.seth@intel.com> Fixes for Bootstrap Processor & cpu_online_map
+ * now gets done here (instead of setup.c)
* 99/10/05 davidm Update to bring it in sync with new command-line processing scheme.
*/
#define __KERNEL_SYSCALLS__
@@ -24,15 +26,6 @@
#include <asm/bitops.h>
#include <asm/current.h>
#include <asm/delay.h>
-
-#ifdef CONFIG_KDB
-#include <linux/kdb.h>
-void smp_kdb_interrupt (struct pt_regs* regs);
-void kdb_global(int cpuid);
-extern unsigned long smp_kdb_wait;
-extern int kdb_new_cpu;
-#endif
-
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/page.h>
@@ -47,25 +40,24 @@ extern int kdb_new_cpu;
extern int cpu_idle(void * unused);
extern void _start(void);
-extern int cpu_now_booting; /* Used by head.S to find idle task */
-extern unsigned long cpu_initialized; /* Bitmap of available cpu's */
-extern struct cpuinfo_ia64 cpu_data[NR_CPUS]; /* Duh... */
+extern int cpu_now_booting; /* Used by head.S to find idle task */
+extern volatile unsigned long cpu_online_map; /* Bitmap of available cpu's */
+extern struct cpuinfo_ia64 cpu_data[NR_CPUS]; /* Duh... */
spinlock_t kernel_flag = SPIN_LOCK_UNLOCKED;
-#ifdef CONFIG_KDB
-unsigned long cpu_online_map = 1;
-#endif
+struct smp_boot_data __initdata smp;
+char __initdata no_int_routing = 0;
+unsigned char smp_int_redirect; /* are INT and IPI redirectable by the chipset? */
volatile int __cpu_number_map[NR_CPUS] = { -1, }; /* SAPIC ID -> Logical ID */
volatile int __cpu_logical_map[NR_CPUS] = { -1, }; /* logical ID -> SAPIC ID */
int smp_num_cpus = 1;
-int bootstrap_processor = -1; /* SAPIC ID of BSP */
-int smp_threads_ready = 0; /* Set when the idlers are all forked */
-unsigned long ipi_base_addr = IPI_DEFAULT_BASE_ADDR; /* Base addr of IPI table */
+int bootstrap_processor = -1; /* SAPIC ID of BSP */
+int smp_threads_ready = 0; /* Set when the idlers are all forked */
cycles_t cacheflush_time = 0;
-unsigned long ap_wakeup_vector = -1; /* External Int to use to wakeup AP's */
-static int max_cpus = -1; /* Command line */
+unsigned long ap_wakeup_vector = -1; /* External Int to use to wakeup AP's */
+static int max_cpus = -1; /* Command line */
static unsigned long ipi_op[NR_CPUS];
struct smp_call_struct {
void (*func) (void *info);
@@ -76,20 +68,13 @@ struct smp_call_struct {
};
static struct smp_call_struct *smp_call_function_data;
-#ifdef CONFIG_KDB
-unsigned long smp_kdb_wait = 0; /* Bitmask of waiters */
-#endif
-
#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC
extern spinlock_t ivr_read_lock;
#endif
-int use_xtp = 0; /* XXX */
-
#define IPI_RESCHEDULE 0
#define IPI_CALL_FUNC 1
#define IPI_CPU_STOP 2
-#define IPI_KDB_INTERRUPT 4
/*
* Setup routine for controlling SMP activation
@@ -118,13 +103,22 @@ static int __init maxcpus(char *str)
__setup("maxcpus=", maxcpus);
+static int __init
+nointroute(char *str)
+{
+ no_int_routing = 1;
+ return 1;
+}
+
+__setup("nointroute", nointroute);
+
/*
* Yoink this CPU from the runnable list...
*/
void
halt_processor(void)
{
- clear_bit(smp_processor_id(), &cpu_initialized);
+ clear_bit(smp_processor_id(), &cpu_online_map);
max_xtp();
__cli();
for (;;)
@@ -188,12 +182,6 @@ handle_IPI(int irq, void *dev_id, struct pt_regs *regs)
halt_processor();
break;
-#ifdef CONFIG_KDB
- case IPI_KDB_INTERRUPT:
- smp_kdb_interrupt(regs);
- break;
-#endif
-
default:
printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which);
break;
@@ -205,32 +193,6 @@ handle_IPI(int irq, void *dev_id, struct pt_regs *regs)
}
static inline void
-send_IPI(int dest_cpu, unsigned char vector)
-{
- unsigned long ipi_addr;
- unsigned long ipi_data;
-#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC
- unsigned long flags;
-#endif
-
- ipi_data = vector;
- ipi_addr = ipi_base_addr | ((dest_cpu << 8) << 4); /* 16-bit SAPIC ID's; assume CPU bus 0 */
- mb();
-
-#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC
- /*
- * Disable IVR reads
- */
- spin_lock_irqsave(&ivr_read_lock, flags);
- writeq(ipi_data, ipi_addr);
- spin_unlock_irqrestore(&ivr_read_lock, flags);
-#else
- writeq(ipi_data, ipi_addr);
-#endif /* CONFIG_ITANIUM_ASTEP_SPECIFIC */
-
-}
-
-static inline void
send_IPI_single(int dest_cpu, int op)
{
@@ -238,7 +200,7 @@ send_IPI_single(int dest_cpu, int op)
return;
ipi_op[dest_cpu] |= (1 << op);
- send_IPI(dest_cpu, IPI_IRQ);
+ ipi_send(dest_cpu, IPI_IRQ, IA64_IPI_DM_INT, 0);
}
static inline void
@@ -452,9 +414,11 @@ start_ap(void)
ia64_clear_ic(flags);
ia64_set_rr( 0, (0x1000 << 8) | (_PAGE_SIZE_1M << 2));
ia64_set_rr(PAGE_OFFSET, (ia64_rid(0, PAGE_OFFSET) << 8) | (_PAGE_SIZE_256M << 2));
+ ia64_srlz_d();
ia64_itr(0x3, 1, PAGE_OFFSET,
pte_val(mk_pte_phys(0, __pgprot(__DIRTY_BITS|_PAGE_PL_0|_PAGE_AR_RWX))),
_PAGE_SIZE_256M);
+ ia64_srlz_i();
flags = (IA64_PSR_IT | IA64_PSR_IC | IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_DFH |
IA64_PSR_BN);
@@ -492,6 +456,10 @@ smp_callin(void)
smp_store_cpu_info(smp_processor_id());
smp_setup_percpu_timer(smp_processor_id());
+ if (test_and_set_bit(smp_processor_id(), &cpu_online_map)) {
+ printk("CPU#%d already initialized!\n", smp_processor_id());
+ machine_halt();
+ }
while (!smp_threads_ready)
mb();
@@ -505,6 +473,9 @@ smp_callin(void)
ia64_set_lrr1(0, 1);
__sti(); /* Interrupts have been off till now. */
+
+ printk("SMP: CPU %d starting idle loop\n", smp_processor_id());
+
cpu_idle(NULL);
}
@@ -565,7 +536,7 @@ smp_boot_one_cpu(int cpuid, int cpunum)
cpu_now_booting = cpunum;
/* Kick the AP in the butt */
- send_IPI(cpuid, ap_wakeup_vector);
+ ipi_send(cpuid, ap_wakeup_vector, IA64_IPI_DM_INT, 0);
ia64_srlz_i();
mb();
@@ -575,24 +546,20 @@ smp_boot_one_cpu(int cpuid, int cpunum)
* is waiting for smp_threads_ready to be 1 and we can move on.
*/
for (timeout = 0; timeout < 100000; timeout++) {
- if (test_bit(cpuid, &cpu_initialized))
+ if (test_bit(cpuid, &cpu_online_map))
goto alive;
- udelay(10);
+ udelay(100);
barrier();
}
printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
- return -1;
+ return 0;
alive:
/* Remember the AP data */
__cpu_number_map[cpuid] = cpunum;
-#ifdef CONFIG_KDB
- cpu_online_map |= (1<<cpunum);
- printk ("DEBUGGER: cpu_online_map = 0x%08x\n", cpu_online_map);
-#endif
__cpu_logical_map[cpunum] = cpuid;
- return 0;
+ return 1;
}
@@ -607,9 +574,6 @@ smp_boot_cpus(void)
{
int i, cpu_count = 1;
unsigned long bogosum;
- int sapic_id;
- extern int acpi_cpus;
- extern int acpi_apic_map[32];
/* Take care of some initial bookkeeping. */
memset(&__cpu_number_map, -1, sizeof(__cpu_number_map));
@@ -634,6 +598,10 @@ smp_boot_cpus(void)
#endif
smp_setup_percpu_timer(bootstrap_processor);
+ if (test_and_set_bit(bootstrap_processor, &cpu_online_map)) {
+ printk("CPU#%d already initialized!\n", smp_processor_id());
+ machine_halt();
+ }
init_idle();
/* Nothing to do when told not to. */
@@ -642,33 +610,36 @@ smp_boot_cpus(void)
return;
}
- if (acpi_cpus > 1) {
+ if (max_cpus != -1)
+ printk("Limiting CPUs to %d\n", max_cpus);
+
+ if (smp.cpu_count > 1) {
printk(KERN_INFO "SMP: starting up secondaries.\n");
for (i = 0; i < NR_CPUS; i++) {
- if (acpi_apic_map[i] == -1 ||
- acpi_apic_map[i] == bootstrap_processor << 8) /* XXX Fix me Walt */
+ if (smp.cpu_map[i] == -1 ||
+ smp.cpu_map[i] == bootstrap_processor)
continue;
- /*
- * IA64 SAPIC ID's are 16-bits. See asm/smp.h for more info
- */
- sapic_id = acpi_apic_map[i] >> 8;
- if (smp_boot_one_cpu(sapic_id, cpu_count))
+ if (smp_boot_one_cpu(smp.cpu_map[i], cpu_count) == 0)
continue;
cpu_count++; /* Count good CPUs only... */
+ /*
+ * Bail if we've started as many CPUS as we've been told to.
+ */
+ if (cpu_count == max_cpus)
+ break;
}
}
if (cpu_count == 1) {
printk(KERN_ERR "SMP: Bootstrap processor only.\n");
- return;
}
bogosum = 0;
for (i = 0; i < NR_CPUS; i++) {
- if (cpu_initialized & (1L << i))
+ if (cpu_online_map & (1L << i))
bogosum += cpu_data[i].loops_per_sec;
}
@@ -733,45 +704,8 @@ init_smp_config(void)
if (sal_ret < 0) {
printk("SMP: Can't set SAL AP Boot Rendezvous: %s\n", ia64_sal_strerror(sal_ret));
printk(" Forcing UP mode\n");
+ max_cpus = 0;
smp_num_cpus = 1;
}
}
-
-#ifdef CONFIG_KDB
-void smp_kdb_stop (int all, struct pt_regs* regs)
-{
- if (all)
- {
- printk ("Sending IPI to all on CPU %i\n", smp_processor_id ());
- smp_kdb_wait = 0xffffffff;
- clear_bit (smp_processor_id(), &smp_kdb_wait);
- send_IPI_allbutself (IPI_KDB_INTERRUPT);
- }
- else
- {
- printk ("Sending IPI to self on CPU %i\n",
- smp_processor_id ());
- set_bit (smp_processor_id(), &smp_kdb_wait);
- clear_bit (__cpu_logical_map[kdb_new_cpu], &smp_kdb_wait);
- smp_kdb_interrupt (regs);
- }
-}
-
-void smp_kdb_interrupt (struct pt_regs* regs)
-{
- printk ("kdb: IPI on CPU %i with mask 0x%08x\n",
- smp_processor_id (), smp_kdb_wait);
-
- /* All CPUs spin here forever */
- while (test_bit (smp_processor_id(), &smp_kdb_wait));
-
- /* Enter KDB on CPU selected by KDB on the last CPU */
- if (__cpu_logical_map[kdb_new_cpu] == smp_processor_id ())
- {
- kdb (KDB_REASON_SWITCH, 0, regs);
- }
-}
-
-#endif
-
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
new file mode 100644
index 000000000..3550cc390
--- /dev/null
+++ b/arch/ia64/kernel/smpboot.c
@@ -0,0 +1,2 @@
+unsigned long cpu_online_map;
+
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 133520b84..b88855ce4 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -9,11 +9,12 @@
* Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com>
*/
#include <linux/config.h>
+
#include <linux/init.h>
-#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/time.h>
+#include <linux/interrupt.h>
#include <asm/delay.h>
#include <asm/efi.h>
@@ -136,6 +137,7 @@ timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
static unsigned long last_time;
static unsigned char count;
int cpu = smp_processor_id();
+ int printed = 0;
/*
* Here we are in the timer irq handler. We have irqs locally
@@ -145,9 +147,14 @@ timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
*/
write_lock(&xtime_lock);
while (1) {
- /* do kernel PC profiling here. */
+ /*
+ * Do kernel PC profiling here. We multiply the
+ * instruction number by four so that we can use a
+ * prof_shift of 2 to get instruction-level instead of
+ * just bundle-level accuracy.
+ */
if (!user_mode(regs))
- do_profile(regs->cr_iip);
+ do_profile(regs->cr_iip + 4*ia64_psr(regs)->ri);
#ifdef CONFIG_SMP
smp_do_timer(regs);
@@ -172,15 +179,18 @@ timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
#if !(defined(CONFIG_IA64_SOFTSDV_HACKS) && defined(CONFIG_SMP))
/*
- * SoftSDV in SMP mode is _slow_, so we do "loose" ticks,
+ * SoftSDV in SMP mode is _slow_, so we do "lose" ticks,
* but it's really OK...
*/
if (count > 0 && jiffies - last_time > 5*HZ)
count = 0;
if (count++ == 0) {
last_time = jiffies;
- printk("Lost clock tick on CPU %d (now=%lx, next=%lx)!!\n",
- cpu, ia64_get_itc(), itm.next[cpu]);
+ if (!printed) {
+ printk("Lost clock tick on CPU %d (now=%lx, next=%lx)!!\n",
+ cpu, ia64_get_itc(), itm.next[cpu]);
+ printed = 1;
+ }
# ifdef CONFIG_IA64_DEBUG_IRQ
printk("last_cli_ip=%lx\n", last_cli_ip);
# endif
@@ -303,8 +313,6 @@ void __init
time_init (void)
{
/* we can't do request_irq() here because the kmalloc() would fail... */
- irq_desc[TIMER_IRQ].status = IRQ_DISABLED;
- irq_desc[TIMER_IRQ].handler = &irq_type_ia64_internal;
setup_irq(TIMER_IRQ, &timer_irqaction);
efi_gettimeofday(&xtime);
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index ddb079f13..3a7706a27 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -32,10 +32,7 @@ register double f30 asm ("f30"); register double f31 asm ("f31");
#include <linux/init.h>
#include <linux/sched.h>
-#ifdef CONFIG_KDB
-# include <linux/kdb.h>
-#endif
-
+#include <asm/ia32.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
@@ -88,13 +85,6 @@ die_if_kernel (char *str, struct pt_regs *regs, long err)
printk("%s[%d]: %s %ld\n", current->comm, current->pid, str, err);
-#ifdef CONFIG_KDB
- while (1) {
- kdb(KDB_REASON_PANIC, 0, regs);
- printk("Cant go anywhere from Panic!\n");
- }
-#endif
-
show_regs(regs);
if (current->thread.flags & IA64_KERNEL_DEATH) {
@@ -440,7 +430,18 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
case 35: /* Taken Branch Trap */
case 36: /* Single Step Trap */
switch (vector) {
- case 29: siginfo.si_code = TRAP_BRKPT; break;
+ case 29:
+ siginfo.si_code = TRAP_HWBKPT;
+#ifdef CONFIG_ITANIUM
+ /*
+ * Erratum 10 (IFA may contain incorrect address) now has
+ * "NoFix" status. There are no plans for fixing this.
+ */
+ if (ia64_psr(regs)->is == 0)
+ ifa = regs->cr_iip;
+#endif
+ siginfo.si_addr = (void *) ifa;
+ break;
case 35: siginfo.si_code = TRAP_BRANCH; break;
case 36: siginfo.si_code = TRAP_TRACE; break;
}
@@ -479,12 +480,18 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
break;
case 45:
- printk("Unexpected IA-32 exception\n");
+#ifdef CONFIG_IA32_SUPPORT
+ if (ia32_exception(regs, isr) == 0)
+ return;
+#endif
+ printk("Unexpected IA-32 exception (Trap 45)\n");
+ printk(" iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n", regs->cr_iip, ifa, isr);
force_sig(SIGSEGV, current);
- return;
+ break;
case 46:
- printk("Unexpected IA-32 intercept trap\n");
+ printk("Unexpected IA-32 intercept trap (Trap 46)\n");
+ printk(" iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n", regs->cr_iip, ifa, isr);
force_sig(SIGSEGV, current);
return;
diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c
index 014adcf35..35e8cb846 100644
--- a/arch/ia64/kernel/unaligned.c
+++ b/arch/ia64/kernel/unaligned.c
@@ -305,7 +305,7 @@ set_rse_reg(struct pt_regs *regs, unsigned long r1, unsigned long val, int nat)
DPRINT(("rnat @%p = 0x%lx nat=%d rnatval=%lx\n",
addr, rnats, nat, rnats &ia64_rse_slot_num(slot)));
- if ( nat ) {
+ if (nat) {
rnats |= __IA64_UL(1) << ia64_rse_slot_num(slot);
} else {
rnats &= ~(__IA64_UL(1) << ia64_rse_slot_num(slot));
@@ -385,7 +385,8 @@ get_rse_reg(struct pt_regs *regs, unsigned long r1, unsigned long *val, int *nat
ia64_peek(regs, current, (unsigned long)addr, &rnats);
DPRINT(("rnat @%p = 0x%lx\n", addr, rnats));
- if ( nat ) *nat = rnats >> ia64_rse_slot_num(slot) & 0x1;
+ if (nat)
+ *nat = rnats >> ia64_rse_slot_num(slot) & 0x1;
}
@@ -401,7 +402,7 @@ setreg(unsigned long regnum, unsigned long val, int nat, struct pt_regs *regs)
/*
* First takes care of stacked registers
*/
- if ( regnum >= IA64_FIRST_STACKED_GR ) {
+ if (regnum >= IA64_FIRST_STACKED_GR) {
set_rse_reg(regs, regnum, val, nat);
return;
}
@@ -414,7 +415,7 @@ setreg(unsigned long regnum, unsigned long val, int nat, struct pt_regs *regs)
/*
* Now look at registers in [0-31] range and init correct UNAT
*/
- if ( GR_IN_SW(regnum) ) {
+ if (GR_IN_SW(regnum)) {
addr = (unsigned long)sw;
unat = &sw->ar_unat;
} else {
@@ -437,7 +438,7 @@ setreg(unsigned long regnum, unsigned long val, int nat, struct pt_regs *regs)
*/
bitmask = __IA64_UL(1) << (addr >> 3 & 0x3f);
DPRINT(("*0x%lx=0x%lx NaT=%d prev_unat @%p=%lx\n", addr, val, nat, unat, *unat));
- if ( nat ) {
+ if (nat) {
*unat |= bitmask;
} else {
*unat &= ~bitmask;
@@ -465,7 +466,7 @@ setfpreg(unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs)
* fly to store to the right register.
* For now, we are using the (slow) save/restore way.
*/
- if ( regnum >= IA64_FIRST_ROTATING_FR ) {
+ if (regnum >= IA64_FIRST_ROTATING_FR) {
/*
* force a save of [32-127] to tss
* we use the __() form to avoid fiddling with the dfh bit
@@ -489,7 +490,7 @@ setfpreg(unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs)
/*
* pt_regs or switch_stack ?
*/
- if ( FR_IN_SW(regnum) ) {
+ if (FR_IN_SW(regnum)) {
addr = (unsigned long)sw;
} else {
addr = (unsigned long)regs;
@@ -542,7 +543,7 @@ getfpreg(unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs)
* we need to force a save to the tss to get access to it.
* See discussion in setfpreg() for reasons and other ways of doing this.
*/
- if ( regnum >= IA64_FIRST_ROTATING_FR ) {
+ if (regnum >= IA64_FIRST_ROTATING_FR) {
/*
* force a save of [32-127] to tss
@@ -587,7 +588,7 @@ getreg(unsigned long regnum, unsigned long *val, int *nat, struct pt_regs *regs)
struct switch_stack *sw = (struct switch_stack *)regs -1;
unsigned long addr, *unat;
- if ( regnum >= IA64_FIRST_STACKED_GR ) {
+ if (regnum >= IA64_FIRST_STACKED_GR) {
get_rse_reg(regs, regnum, val, nat);
return;
}
@@ -595,7 +596,7 @@ getreg(unsigned long regnum, unsigned long *val, int *nat, struct pt_regs *regs)
/*
* take care of r0 (read-only always evaluate to 0)
*/
- if ( regnum == 0 ) {
+ if (regnum == 0) {
*val = 0;
*nat = 0;
return;
@@ -604,7 +605,7 @@ getreg(unsigned long regnum, unsigned long *val, int *nat, struct pt_regs *regs)
/*
* Now look at registers in [0-31] range and init correct UNAT
*/
- if ( GR_IN_SW(regnum) ) {
+ if (GR_IN_SW(regnum)) {
addr = (unsigned long)sw;
unat = &sw->ar_unat;
} else {
@@ -621,7 +622,8 @@ getreg(unsigned long regnum, unsigned long *val, int *nat, struct pt_regs *regs)
/*
* do it only when requested
*/
- if ( nat ) *nat = (*unat >> (addr >> 3 & 0x3f)) & 0x1UL;
+ if (nat)
+ *nat = (*unat >> (addr >> 3 & 0x3f)) & 0x1UL;
}
static void
@@ -633,7 +635,7 @@ emulate_load_updates(update_t type, load_store_t *ld, struct pt_regs *regs, unsi
* not get to this point in the code but we keep this sanity check,
* just in case.
*/
- if ( ld->x6_op == 1 || ld->x6_op == 3 ) {
+ if (ld->x6_op == 1 || ld->x6_op == 3) {
printk(KERN_ERR __FUNCTION__": register update on speculative load, error\n");
die_if_kernel("unaligned reference on specualtive load with register update\n",
regs, 30);
@@ -644,7 +646,7 @@ emulate_load_updates(update_t type, load_store_t *ld, struct pt_regs *regs, unsi
* at this point, we know that the base register to update is valid i.e.,
* it's not r0
*/
- if ( type == UPD_IMMEDIATE ) {
+ if (type == UPD_IMMEDIATE) {
unsigned long imm;
/*
@@ -670,7 +672,7 @@ emulate_load_updates(update_t type, load_store_t *ld, struct pt_regs *regs, unsi
DPRINT(("ld.x=%d ld.m=%d imm=%ld r3=0x%lx\n", ld->x, ld->m, imm, ifa));
- } else if ( ld->m ) {
+ } else if (ld->m) {
unsigned long r2;
int nat_r2;
@@ -719,7 +721,7 @@ emulate_load_int(unsigned long ifa, load_store_t *ld, struct pt_regs *regs)
*
* Note: the first argument is ignored
*/
- if ( access_ok(VERIFY_READ, (void *)ifa, len) < 0 ) {
+ if (access_ok(VERIFY_READ, (void *)ifa, len) < 0) {
DPRINT(("verify area failed on %lx\n", ifa));
return -1;
}
@@ -737,7 +739,7 @@ emulate_load_int(unsigned long ifa, load_store_t *ld, struct pt_regs *regs)
* invalidate the ALAT entry.
* See comment below for explanation on how we handle ldX.a
*/
- if ( ld->x6_op != 0x2 ) {
+ if (ld->x6_op != 0x2) {
/*
* we rely on the macros in unaligned.h for now i.e.,
* we let the compiler figure out how to read memory gracefully.
@@ -767,9 +769,8 @@ emulate_load_int(unsigned long ifa, load_store_t *ld, struct pt_regs *regs)
/*
* check for updates on any kind of loads
*/
- if ( ld->op == 0x5 || ld->m )
- emulate_load_updates(ld->op == 0x5 ? UPD_IMMEDIATE: UPD_REG,
- ld, regs, ifa);
+ if (ld->op == 0x5 || ld->m)
+ emulate_load_updates(ld->op == 0x5 ? UPD_IMMEDIATE: UPD_REG, ld, regs, ifa);
/*
* handling of various loads (based on EAS2.4):
@@ -882,7 +883,7 @@ emulate_store_int(unsigned long ifa, load_store_t *ld, struct pt_regs *regs)
*
* Note: the first argument is ignored
*/
- if ( access_ok(VERIFY_WRITE, (void *)ifa, len) < 0 ) {
+ if (access_ok(VERIFY_WRITE, (void *)ifa, len) < 0) {
DPRINT(("verify area failed on %lx\n",ifa));
return -1;
}
@@ -926,7 +927,7 @@ emulate_store_int(unsigned long ifa, load_store_t *ld, struct pt_regs *regs)
* ld->r3 can never be r0, because r0 would not generate an
* unaligned access.
*/
- if ( ld->op == 0x5 ) {
+ if (ld->op == 0x5) {
unsigned long imm;
/*
@@ -936,7 +937,7 @@ emulate_store_int(unsigned long ifa, load_store_t *ld, struct pt_regs *regs)
/*
* sign extend (8bits) if m set
*/
- if ( ld->m ) imm |= SIGN_EXT9;
+ if (ld->m) imm |= SIGN_EXT9;
/*
* ifa == r3 (NaT is necessarily cleared)
*/
@@ -955,7 +956,8 @@ emulate_store_int(unsigned long ifa, load_store_t *ld, struct pt_regs *regs)
/*
* stX.rel: use fence instead of release
*/
- if ( ld->x6_op == 0xd ) mb();
+ if (ld->x6_op == 0xd)
+ mb();
return 0;
}
@@ -1033,7 +1035,7 @@ emulate_load_floatpair(unsigned long ifa, load_store_t *ld, struct pt_regs *regs
struct ia64_fpreg fpr_final[2];
unsigned long len = float_fsz[ld->x6_sz];
- if ( access_ok(VERIFY_READ, (void *)ifa, len<<1) < 0 ) {
+ if (access_ok(VERIFY_READ, (void *)ifa, len<<1) < 0) {
DPRINT(("verify area failed on %lx\n", ifa));
return -1;
}
@@ -1055,7 +1057,7 @@ emulate_load_floatpair(unsigned long ifa, load_store_t *ld, struct pt_regs *regs
* ldfpX.a: we don't try to emulate anything but we must
* invalidate the ALAT entry and execute updates, if any.
*/
- if ( ld->x6_op != 0x2 ) {
+ if (ld->x6_op != 0x2) {
/*
* does the unaligned access
*/
@@ -1118,7 +1120,7 @@ emulate_load_floatpair(unsigned long ifa, load_store_t *ld, struct pt_regs *regs
* Check for updates: only immediate updates are available for this
* instruction.
*/
- if ( ld->m ) {
+ if (ld->m) {
/*
* the immediate is implicit given the ldsz of the operation:
@@ -1132,8 +1134,9 @@ emulate_load_floatpair(unsigned long ifa, load_store_t *ld, struct pt_regs *regs
* as long as we don't come here with a ldfpX.s.
* For this reason we keep this sanity check
*/
- if ( ld->x6_op == 1 || ld->x6_op == 3 ) {
- printk(KERN_ERR "%s: register update on speculative load pair, error\n", __FUNCTION__);
+ if (ld->x6_op == 1 || ld->x6_op == 3) {
+ printk(KERN_ERR "%s: register update on speculative load pair, error\n",
+ __FUNCTION__);
}
@@ -1143,7 +1146,7 @@ emulate_load_floatpair(unsigned long ifa, load_store_t *ld, struct pt_regs *regs
/*
* Invalidate ALAT entries, if any, for both registers.
*/
- if ( ld->x6_op == 0x2 ) {
+ if (ld->x6_op == 0x2) {
invala_fr(ld->r1);
invala_fr(ld->imm);
}
@@ -1160,10 +1163,10 @@ emulate_load_float(unsigned long ifa, load_store_t *ld, struct pt_regs *regs)
/*
* check for load pair because our masking scheme is not fine grain enough
- if ( ld->x == 1 ) return emulate_load_floatpair(ifa,ld,regs);
+ if (ld->x == 1) return emulate_load_floatpair(ifa,ld,regs);
*/
- if ( access_ok(VERIFY_READ, (void *)ifa, len) < 0 ) {
+ if (access_ok(VERIFY_READ, (void *)ifa, len) < 0) {
DPRINT(("verify area failed on %lx\n", ifa));
return -1;
}
@@ -1187,7 +1190,7 @@ emulate_load_float(unsigned long ifa, load_store_t *ld, struct pt_regs *regs)
* invalidate the ALAT entry.
* See comments in ldX for descriptions on how the various loads are handled.
*/
- if ( ld->x6_op != 0x2 ) {
+ if (ld->x6_op != 0x2) {
/*
* does the unaligned access
@@ -1243,7 +1246,7 @@ emulate_load_float(unsigned long ifa, load_store_t *ld, struct pt_regs *regs)
/*
* check for updates on any loads
*/
- if ( ld->op == 0x7 || ld->m )
+ if (ld->op == 0x7 || ld->m)
emulate_load_updates(ld->op == 0x7 ? UPD_IMMEDIATE: UPD_REG,
ld, regs, ifa);
@@ -1274,7 +1277,7 @@ emulate_store_float(unsigned long ifa, load_store_t *ld, struct pt_regs *regs)
*
* Note: the first argument is ignored
*/
- if ( access_ok(VERIFY_WRITE, (void *)ifa, len) < 0 ) {
+ if (access_ok(VERIFY_WRITE, (void *)ifa, len) < 0) {
DPRINT(("verify area failed on %lx\n",ifa));
return -1;
}
@@ -1342,7 +1345,7 @@ emulate_store_float(unsigned long ifa, load_store_t *ld, struct pt_regs *regs)
* ld->r3 can never be r0, because r0 would not generate an
* unaligned access.
*/
- if ( ld->op == 0x7 ) {
+ if (ld->op == 0x7) {
unsigned long imm;
/*
@@ -1352,7 +1355,8 @@ emulate_store_float(unsigned long ifa, load_store_t *ld, struct pt_regs *regs)
/*
* sign extend (8bits) if m set
*/
- if ( ld->m ) imm |= SIGN_EXT9;
+ if (ld->m)
+ imm |= SIGN_EXT9;
/*
* ifa == r3 (NaT is necessarily cleared)
*/
@@ -1384,6 +1388,28 @@ ia64_handle_unaligned(unsigned long ifa, struct pt_regs *regs)
load_store_t *insn;
int ret = -1;
+ /*
+ * Unaligned references in the kernel could come from unaligned
+ * arguments to system calls. We fault the user process in
+ * these cases and panic the kernel otherwise (the kernel should
+ * be fixed to not make unaligned accesses).
+ */
+ if (!user_mode(regs)) {
+ const struct exception_table_entry *fix;
+
+ fix = search_exception_table(regs->cr_iip);
+ if (fix) {
+ regs->r8 = -EFAULT;
+ if (fix->skip & 1) {
+ regs->r9 = 0;
+ }
+ regs->cr_iip += ((long) fix->skip) & ~15;
+ regs->cr_ipsr &= ~IA64_PSR_RI; /* clear exception slot number */
+ return;
+ }
+ die_if_kernel("Unaligned reference while in kernel\n", regs, 30);
+ /* NOT_REACHED */
+ }
if (current->thread.flags & IA64_THREAD_UAC_SIGBUS) {
struct siginfo si;
@@ -1539,7 +1565,7 @@ ia64_handle_unaligned(unsigned long ifa, struct pt_regs *regs)
}
DPRINT(("ret=%d\n", ret));
- if ( ret ) {
+ if (ret) {
lock_kernel();
force_sig(SIGSEGV, current);
unlock_kernel();
@@ -1549,7 +1575,8 @@ ia64_handle_unaligned(unsigned long ifa, struct pt_regs *regs)
* because a memory access instruction (M) can never be in the
* last slot of a bundle. But let's keep it for now.
*/
- if ( ipsr->ri == 2 ) regs->cr_iip += 16;
+ if (ipsr->ri == 2)
+ regs->cr_iip += 16;
ipsr->ri = ++ipsr->ri & 3;
}
diff --git a/arch/ia64/lib/clear_user.S b/arch/ia64/lib/clear_user.S
index 0db4a78f8..0b9a453b1 100644
--- a/arch/ia64/lib/clear_user.S
+++ b/arch/ia64/lib/clear_user.S
@@ -210,6 +210,7 @@ long_do_clear:
// if p7 -> coming from st4 or st1 : len3 contains what's left
// We must restore lc/pr even though might not have been used.
.Lexit2:
+ .pred.rel "mutex", p6, p7
(p6) mov len=len2
(p7) mov len=len3
;;
diff --git a/arch/ia64/lib/strlen.S b/arch/ia64/lib/strlen.S
index 3062716b1..22f205656 100644
--- a/arch/ia64/lib/strlen.S
+++ b/arch/ia64/lib/strlen.S
@@ -186,6 +186,7 @@ recover:
;;
cmp.eq p6,p0=8,val1 // val1==8 ?
(p6) br.wtop.dptk.few 2b // loop until p6 == 0
+ ;; // (avoid WAW on p63)
sub ret0=base,orig // distance from base
sub tmp=8,val1
mov pr=saved_pr,0xffffffffffff0000
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 99cf5048c..02c4c5792 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -94,7 +94,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
* sure we exit gracefully rather than endlessly redo the
* fault.
*/
- if (!handle_mm_fault(current, vma, address, (isr & IA64_ISR_W) != 0)) {
+ if (!handle_mm_fault(mm, vma, address, (isr & IA64_ISR_W) != 0)) {
/*
* We ran out of memory, or some other thing happened
* to us that made us unable to handle the page fault
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 3a630ca8c..b3047ce34 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -237,6 +237,7 @@ put_gate_page (struct page *page, unsigned long address)
if (!PageReserved(page))
printk("put_gate_page: gate page at 0x%lx not in reserved memory\n",
page_address(page));
+
pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */
pmd = pmd_alloc(pgd, address);
if (!pmd) {
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index 568f7a347..acad4e200 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -138,7 +138,7 @@ flush_tlb_range (struct mm_struct *mm, unsigned long start, unsigned long end)
*/
++nbits;
if (((1UL << nbits) & SUPPORTED_PGBITS) == 0)
- panic("flush_tlb_range: BUG: nbits=%lu\n", nbits);
+ panic("flush_tlb_range: BUG: nbits=%lu\n", nbits);
}
start &= ~((1UL << nbits) - 1);
diff --git a/arch/ia64/tools/Makefile b/arch/ia64/tools/Makefile
index 0491ca943..974634e17 100644
--- a/arch/ia64/tools/Makefile
+++ b/arch/ia64/tools/Makefile
@@ -8,6 +8,8 @@ TARGET = $(TOPDIR)/include/asm-ia64/offsets.h
all:
+mrproper:
+
clean:
rm -f print_offsets.s print_offsets offsets.h
diff --git a/arch/ia64/tools/print_offsets.c b/arch/ia64/tools/print_offsets.c
index f1b298e21..7e5901144 100644
--- a/arch/ia64/tools/print_offsets.c
+++ b/arch/ia64/tools/print_offsets.c
@@ -12,6 +12,8 @@
* file, be sure to verify that the awk procedure still works (see
* prin_offsets.awk).
*/
+#include <linux/config.h>
+
#include <linux/sched.h>
#include <asm-ia64/processor.h>
@@ -50,6 +52,9 @@ tab[] =
{ "IA64_TASK_PROCESSOR_OFFSET", offsetof (struct task_struct, processor) },
{ "IA64_TASK_THREAD_OFFSET", offsetof (struct task_struct, thread) },
{ "IA64_TASK_THREAD_KSP_OFFSET", offsetof (struct task_struct, thread.ksp) },
+#ifdef CONFIG_IA32_SUPPORT
+ { "IA64_TASK_THREAD_SIGMASK_OFFSET",offsetof (struct task_struct, thread.un.sigmask) },
+#endif
{ "IA64_TASK_PID_OFFSET", offsetof (struct task_struct, pid) },
{ "IA64_TASK_MM_OFFSET", offsetof (struct task_struct, mm) },
{ "IA64_PT_REGS_CR_IPSR_OFFSET", offsetof (struct pt_regs, cr_ipsr) },
@@ -63,6 +68,8 @@ tab[] =
{ "IA64_SIGCONTEXT_FLAGS_OFFSET", offsetof (struct sigcontext, sc_flags) },
{ "IA64_SIGCONTEXT_CFM_OFFSET", offsetof (struct sigcontext, sc_cfm) },
{ "IA64_SIGCONTEXT_FR6_OFFSET", offsetof (struct sigcontext, sc_fr[6]) },
+ { "IA64_CLONE_VFORK", CLONE_VFORK },
+ { "IA64_CLONE_VM", CLONE_VM },
};
static const char *tabs = "\t\t\t\t\t\t\t\t\t\t";
diff --git a/arch/ia64/vmlinux.lds.S b/arch/ia64/vmlinux.lds.S
index b095baeb9..08e7f9f9a 100644
--- a/arch/ia64/vmlinux.lds.S
+++ b/arch/ia64/vmlinux.lds.S
@@ -8,7 +8,7 @@ OUTPUT_ARCH(ia64)
ENTRY(_start)
SECTIONS
{
- v = PAGE_OFFSET; /* this symbol is here to make debugging with kdb easier... */
+ v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
. = KERNEL_START;
@@ -39,21 +39,6 @@ SECTIONS
{ *(__ex_table) }
__stop___ex_table = .;
-#if defined(CONFIG_KDB)
- /* Kernel symbols and strings for kdb */
-# define KDB_MEAN_SYMBOL_SIZE 48
-# define KDB_SPACE (CONFIG_KDB_STBSIZE * KDB_MEAN_SYMBOL_SIZE)
- . = ALIGN(8);
- _skdb = .;
- .kdb : AT(ADDR(.kdb) - PAGE_OFFSET)
- {
- *(kdbsymtab)
- *(kdbstrings)
- }
- _ekdb = .;
- . = _skdb + KDB_SPACE;
-#endif
-
/* Kernel symbol names for modules: */
.kstrtab : AT(ADDR(.kstrtab) - PAGE_OFFSET)
{ *(.kstrtab) }