summaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-06-25 01:20:01 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-06-25 01:20:01 +0000
commit3797ba0b62debb71af4606910acacc9896a9ae3b (patch)
tree414eea76253c7871bfdf3bd9d1817771eb40917c /arch/ia64
parent2b6c0c580795a4404f72d2a794214dd9e080709d (diff)
Merge with Linux 2.4.0-test2.
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Makefile14
-rw-r--r--arch/ia64/boot/Makefile3
-rw-r--r--arch/ia64/config.in10
-rw-r--r--arch/ia64/defconfig8
-rw-r--r--arch/ia64/dig/iosapic.c47
-rw-r--r--arch/ia64/dig/setup.c4
-rw-r--r--arch/ia64/hp/hpsim_irq.c2
-rw-r--r--arch/ia64/ia32/binfmt_elf32.c14
-rw-r--r--arch/ia64/ia32/ia32_entry.S65
-rw-r--r--arch/ia64/ia32/ia32_signal.c42
-rw-r--r--arch/ia64/ia32/ia32_support.c54
-rw-r--r--arch/ia64/ia32/ia32_traps.c83
-rw-r--r--arch/ia64/ia32/sys_ia32.c664
-rw-r--r--arch/ia64/kernel/Makefile20
-rw-r--r--arch/ia64/kernel/acpi.c10
-rw-r--r--arch/ia64/kernel/brl_emu.c220
-rw-r--r--arch/ia64/kernel/efi.c73
-rw-r--r--arch/ia64/kernel/efi_stub.S73
-rw-r--r--arch/ia64/kernel/entry.S573
-rw-r--r--arch/ia64/kernel/entry.h59
-rw-r--r--arch/ia64/kernel/fw-emu.c20
-rw-r--r--arch/ia64/kernel/gate.S26
-rw-r--r--arch/ia64/kernel/head.S122
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c72
-rw-r--r--arch/ia64/kernel/irq.c17
-rw-r--r--arch/ia64/kernel/irq_ia64.c133
-rw-r--r--arch/ia64/kernel/ivt.S196
-rw-r--r--arch/ia64/kernel/mca.c5
-rw-r--r--arch/ia64/kernel/mca_asm.S1
-rw-r--r--arch/ia64/kernel/minstate.h1
-rw-r--r--arch/ia64/kernel/pal.S192
-rw-r--r--arch/ia64/kernel/palinfo.c780
-rw-r--r--arch/ia64/kernel/pci-dma.c4
-rw-r--r--arch/ia64/kernel/pci.c6
-rw-r--r--arch/ia64/kernel/process.c193
-rw-r--r--arch/ia64/kernel/ptrace.c755
-rw-r--r--arch/ia64/kernel/sal_stub.S118
-rw-r--r--arch/ia64/kernel/setup.c94
-rw-r--r--arch/ia64/kernel/signal.c279
-rw-r--r--arch/ia64/kernel/smp.c198
-rw-r--r--arch/ia64/kernel/sys_ia64.c173
-rw-r--r--arch/ia64/kernel/time.c120
-rw-r--r--arch/ia64/kernel/traps.c56
-rw-r--r--arch/ia64/kernel/unaligned.c57
-rw-r--r--arch/ia64/kernel/unwind.c2015
-rw-r--r--arch/ia64/kernel/unwind_decoder.c459
-rw-r--r--arch/ia64/kernel/unwind_i.h160
-rw-r--r--arch/ia64/lib/Makefile14
-rw-r--r--arch/ia64/lib/clear_page.S18
-rw-r--r--arch/ia64/lib/clear_user.S23
-rw-r--r--arch/ia64/lib/copy_page.S16
-rw-r--r--arch/ia64/lib/copy_user.S18
-rw-r--r--arch/ia64/lib/do_csum.S15
-rw-r--r--arch/ia64/lib/flush.S17
-rw-r--r--arch/ia64/lib/idiv.S14
-rw-r--r--arch/ia64/lib/io.c54
-rw-r--r--arch/ia64/lib/memset.S18
-rw-r--r--arch/ia64/lib/strlen.S16
-rw-r--r--arch/ia64/lib/strlen_user.S16
-rw-r--r--arch/ia64/lib/strncpy_from_user.S10
-rw-r--r--arch/ia64/lib/strnlen_user.S15
-rw-r--r--arch/ia64/mm/fault.c28
-rw-r--r--arch/ia64/mm/init.c81
-rw-r--r--arch/ia64/mm/tlb.c88
-rw-r--r--arch/ia64/tools/Makefile2
-rw-r--r--arch/ia64/tools/print_offsets.c91
-rw-r--r--arch/ia64/vmlinux.lds.S48
67 files changed, 7082 insertions, 1810 deletions
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index 897bca8e7..a87cb7c08 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -12,15 +12,11 @@ NM := $(CROSS_COMPILE)nm -B
AWK := awk
LINKFLAGS = -static -T arch/$(ARCH)/vmlinux.lds
-# next line is for HP compiler backend:
-#AFLAGS += -DGCC_RETVAL_POINTER_IN_R8
-# The next line is needed when compiling with the July snapshot of the Cygnus compiler:
-#EXTRA = -D__GCC_DOESNT_KNOW_IN_REGS__
-# next two lines are for the September snapshot of the Cygnus compiler:
-AFLAGS += -D__GCC_MULTIREG_RETVALS__ -Wa,-x
-EXTRA = -D__GCC_MULTIREG_RETVALS__
-
-CFLAGS := $(CFLAGS) -pipe $(EXTRA) -Wa,-x -ffixed-r13 -mfixed-range=f10-f15,f32-f127
+AFLAGS += -Wa,-x
+EXTRA =
+
+CFLAGS := $(CFLAGS) -pipe $(EXTRA) -Wa,-x -ffixed-r13 -mfixed-range=f10-f15,f32-f127 \
+ -mconstant-gp -funwind-tables
ifdef CONFIG_IA64_GENERIC
CORE_FILES := arch/$(ARCH)/hp/hp.a \
diff --git a/arch/ia64/boot/Makefile b/arch/ia64/boot/Makefile
index 5228d6c57..bdeef72ff 100644
--- a/arch/ia64/boot/Makefile
+++ b/arch/ia64/boot/Makefile
@@ -25,7 +25,8 @@ endif
all: $(TARGETS)
bootloader: $(OBJECTS)
- $(LD) $(LINKFLAGS) $(OBJECTS) $(LIBS) -o bootloader
+ $(LD) $(LINKFLAGS) $(OBJECTS) $(TOPDIR)/lib/lib.a $(TOPDIR)/arch/$(ARCH)/lib/lib.a \
+ -o bootloader
clean:
rm -f $(TARGETS)
diff --git a/arch/ia64/config.in b/arch/ia64/config.in
index 3008c6152..b9991cfef 100644
--- a/arch/ia64/config.in
+++ b/arch/ia64/config.in
@@ -7,7 +7,6 @@ mainmenu_option next_comment
comment 'General setup'
define_bool CONFIG_IA64 y
-define_bool CONFIG_ITANIUM y # easy choice for now... ;-)
define_bool CONFIG_ISA n
define_bool CONFIG_SBUS n
@@ -25,10 +24,13 @@ choice 'Kernel page size' \
64KB CONFIG_IA64_PAGE_SIZE_64KB" 16KB
if [ "$CONFIG_IA64_DIG" = "y" ]; then
+ define_bool CONFIG_ITANIUM y
+ define_bool CONFIG_IA64_BRL_EMU y
bool ' Enable Itanium A-step specific code' CONFIG_ITANIUM_ASTEP_SPECIFIC
+ bool ' Enable Itanium A1-step specific code' CONFIG_ITANIUM_A1_SPECIFIC
+ bool ' Enable use of global TLB purge instruction (ptc.g)' CONFIG_ITANIUM_PTCG
bool ' Enable SoftSDV hacks' CONFIG_IA64_SOFTSDV_HACKS
- bool ' Enable BigSur hacks' CONFIG_IA64_BIGSUR_HACKS
- bool ' Enable Lion hacks' CONFIG_IA64_LION_HACKS
+ bool ' Enable AzusA hacks' CONFIG_IA64_AZUSA_HACKS
bool ' Emulate PAL/SAL/EFI firmware' CONFIG_IA64_FW_EMU
bool ' Enable IA64 Machine Check Abort' CONFIG_IA64_MCA
fi
@@ -46,6 +48,7 @@ define_bool CONFIG_KCORE_ELF y # On IA-64, we always want an ELF /proc/kcore.
bool 'SMP support' CONFIG_SMP
bool 'Performance monitor support' CONFIG_PERFMON
+bool '/proc/palinfo support' CONFIG_IA64_PALINFO
bool 'Networking support' CONFIG_NET
bool 'System V IPC' CONFIG_SYSVIPC
@@ -190,5 +193,6 @@ bool 'Early printk support (requires VGA!)' CONFIG_IA64_EARLY_PRINTK
bool 'Turn on compare-and-exchange bug checking (slow!)' CONFIG_IA64_DEBUG_CMPXCHG
bool 'Turn on irq debug checks (slow!)' CONFIG_IA64_DEBUG_IRQ
bool 'Print possible IA64 hazards to console' CONFIG_IA64_PRINT_HAZARDS
+bool 'Enable new unwind support' CONFIG_IA64_NEW_UNWIND
endmenu
diff --git a/arch/ia64/defconfig b/arch/ia64/defconfig
index 8dac10752..ce3b4bbe7 100644
--- a/arch/ia64/defconfig
+++ b/arch/ia64/defconfig
@@ -59,6 +59,10 @@ CONFIG_EXPERIMENTAL=y
# CONFIG_PARIDE is not set
# CONFIG_BLK_CPQ_DA is not set
# CONFIG_BLK_DEV_DAC960 is not set
+
+#
+# Additional Block Devices
+#
# CONFIG_BLK_DEV_LOOP is not set
# CONFIG_BLK_DEV_MD is not set
# CONFIG_BLK_DEV_RAM is not set
@@ -111,8 +115,8 @@ CONFIG_BLK_DEV_IDEDMA=y
CONFIG_IDEDMA_PCI_EXPERIMENTAL=y
# CONFIG_IDEDMA_PCI_WIP is not set
# CONFIG_IDEDMA_NEW_DRIVE_LISTINGS is not set
-# CONFIG_BLK_DEV_AEC62XX is not set
-# CONFIG_AEC62XX_TUNING is not set
+# CONFIG_BLK_DEV_AEC6210 is not set
+# CONFIG_AEC6210_TUNING is not set
# CONFIG_BLK_DEV_ALI15X3 is not set
# CONFIG_WDC_ALI15X3 is not set
# CONFIG_BLK_DEV_AMD7409 is not set
diff --git a/arch/ia64/dig/iosapic.c b/arch/ia64/dig/iosapic.c
index 9fd01063e..2426a0193 100644
--- a/arch/ia64/dig/iosapic.c
+++ b/arch/ia64/dig/iosapic.c
@@ -67,6 +67,12 @@ set_rte (unsigned long iosapic_addr, int entry, int pol, int trigger, int delive
(delivery << IO_SAPIC_DELIVERY_SHIFT) |
vector);
+#ifdef CONFIG_IA64_AZUSA_HACKS
+ /* set Flush Disable bit */
+ if (iosapic_addr != 0xc0000000fec00000)
+ low32 |= (1 << 17);
+#endif
+
/* dest contains both id and eid */
high32 = (dest << IO_SAPIC_DEST_SHIFT);
@@ -216,30 +222,33 @@ iosapic_version (unsigned long base_addr)
}
void
-iosapic_init (unsigned long address)
+iosapic_init (unsigned long address, int irqbase)
{
struct hw_interrupt_type *irq_type;
struct pci_vector_struct *vectors;
int i, irq;
- /*
- * Map the legacy ISA devices into the IOSAPIC data. Some of
- * these may get reprogrammed later on with data from the ACPI
- * Interrupt Source Override table.
- */
- for (i = 0; i < 16; i++) {
- irq = isa_irq_to_vector(i);
- iosapic_pin(irq) = i;
- iosapic_bus(irq) = BUS_ISA;
- iosapic_busdata(irq) = 0;
- iosapic_dmode(irq) = IO_SAPIC_LOWEST_PRIORITY;
- iosapic_trigger(irq) = IO_SAPIC_EDGE;
- iosapic_polarity(irq) = IO_SAPIC_POL_HIGH;
+ if (irqbase == 0)
+ /*
+ * Map the legacy ISA devices into the IOSAPIC data.
+ * Some of these may get reprogrammed later on with
+ * data from the ACPI Interrupt Source Override table.
+ */
+ for (i = 0; i < 16; i++) {
+ irq = isa_irq_to_vector(i);
+ iosapic_pin(irq) = i;
+ iosapic_bus(irq) = BUS_ISA;
+ iosapic_busdata(irq) = 0;
+ iosapic_dmode(irq) = IO_SAPIC_LOWEST_PRIORITY;
+ iosapic_trigger(irq) = IO_SAPIC_EDGE;
+ iosapic_polarity(irq) = IO_SAPIC_POL_HIGH;
#ifdef DEBUG_IRQ_ROUTING
- printk("ISA: IRQ %02x -> Vector %02x IOSAPIC Pin %d\n", i, irq, iosapic_pin(irq));
+ printk("ISA: IRQ %02x -> Vector %02x IOSAPIC Pin %d\n",
+ i, irq, iosapic_pin(irq));
#endif
- }
+ }
+#ifndef CONFIG_IA64_SOFTSDV_HACKS
/*
* Map the PCI Interrupt data into the ACPI IOSAPIC data using
* the info that the bootstrap loader passed to us.
@@ -250,6 +259,8 @@ iosapic_init (unsigned long address)
irq = vectors[i].irq;
if (irq < 16)
irq = isa_irq_to_vector(irq);
+ if (iosapic_baseirq(irq) != irqbase)
+ continue;
iosapic_bustype(irq) = BUS_PCI;
iosapic_pin(irq) = irq - iosapic_baseirq(irq);
@@ -270,8 +281,12 @@ iosapic_init (unsigned long address)
irq, iosapic_pin(irq));
#endif
}
+#endif /* CONFIG_IA64_SOFTSDV_HACKS */
for (i = 0; i < NR_IRQS; ++i) {
+ if (iosapic_baseirq(i) != irqbase)
+ continue;
+
if (iosapic_pin(i) != -1) {
if (iosapic_trigger(i) == IO_SAPIC_LEVEL)
irq_type = &irq_type_iosapic_level;
diff --git a/arch/ia64/dig/setup.c b/arch/ia64/dig/setup.c
index 133f817a1..4d22f46a4 100644
--- a/arch/ia64/dig/setup.c
+++ b/arch/ia64/dig/setup.c
@@ -53,6 +53,10 @@ dig_setup (char **cmdline_p)
*/
ROOT_DEV = to_kdev_t(0x0802); /* default to second partition on first drive */
+#ifdef CONFIG_IA64_SOFTSDV_HACKS
+ ROOT_DEV = to_kdev_t(0x0302); /* 2nd partion on 1st IDE */
+#endif /* CONFIG_IA64_SOFTSDV_HACKS */
+
#ifdef CONFIG_SMP
init_smp_config();
#endif
diff --git a/arch/ia64/hp/hpsim_irq.c b/arch/ia64/hp/hpsim_irq.c
index 00f4d1a51..376f664fa 100644
--- a/arch/ia64/hp/hpsim_irq.c
+++ b/arch/ia64/hp/hpsim_irq.c
@@ -6,6 +6,8 @@
*/
#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
#include <linux/irq.h>
static unsigned int
diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
index 6f702df14..bd29c97e7 100644
--- a/arch/ia64/ia32/binfmt_elf32.c
+++ b/arch/ia64/ia32/binfmt_elf32.c
@@ -2,6 +2,8 @@
* IA-32 ELF support.
*
* Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
+ *
+ * 06/16/00 A. Mallick initialize csd/ssd/tssd/cflg for ia32_load_state
*/
#include <linux/config.h>
#include <linux/posix_types.h>
@@ -84,6 +86,15 @@ void ia64_elf32_init(struct pt_regs *regs)
current->thread.map_base = 0x40000000;
+
+ /* setup ia32 state for ia32_load_state */
+
+ current->thread.eflag = IA32_EFLAG;
+ current->thread.csd = IA64_SEG_DESCRIPTOR(0L, 0xFFFFFL, 0xBL, 1L, 3L, 1L, 1L, 1L);
+ current->thread.ssd = IA64_SEG_DESCRIPTOR(0L, 0xFFFFFL, 0x3L, 1L, 3L, 1L, 1L, 1L);
+ current->thread.tssd = IA64_SEG_DESCRIPTOR(IA32_PAGE_OFFSET + PAGE_SIZE, 0x1FFFL, 0xBL,
+ 1L, 3L, 1L, 1L, 1L);
+
/* CS descriptor */
__asm__("mov ar.csd = %0" : /* no outputs */
: "r" IA64_SEG_DESCRIPTOR(0L, 0xFFFFFL, 0xBL, 1L,
@@ -96,9 +107,6 @@ void ia64_elf32_init(struct pt_regs *regs)
__asm__("mov ar.eflag = %0" : /* no outputs */ : "r" (IA32_EFLAG));
/* Control registers */
- __asm__("mov ar.cflg = %0"
- : /* no outputs */
- : "r" (((ulong) IA32_CR4 << 32) | IA32_CR0));
__asm__("mov ar.fsr = %0"
: /* no outputs */
: "r" ((ulong)IA32_FSR_DEFAULT));
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S
index ff27a02ce..5bf5ad2c3 100644
--- a/arch/ia64/ia32/ia32_entry.S
+++ b/arch/ia64/ia32/ia32_entry.S
@@ -1,14 +1,15 @@
+#include <asm/asmmacro.h>
#include <asm/offsets.h>
#include <asm/signal.h>
+#include "../kernel/entry.h"
+
//
// Get possibly unaligned sigmask argument into an aligned
// kernel buffer
.text
- .proc ia32_rt_sigsuspend
- .global ia32_rt_sigsuspend
-ia32_rt_sigsuspend:
+GLOBAL_ENTRY(ia32_rt_sigsuspend)
// We'll cheat and not do an alloc here since we are ultimately
// going to do a simple branch to the IA64 sys_rt_sigsuspend.
// r32 is still the first argument which is the signal mask.
@@ -32,24 +33,22 @@ ia32_rt_sigsuspend:
st4 [r32]=r2
st4 [r10]=r3
br.cond.sptk.many sys_rt_sigsuspend
+END(ia32_rt_sigsuspend)
.section __ex_table,"a"
data4 @gprel(1b)
data4 (2b-1b)|1
.previous
+GLOBAL_ENTRY(ia32_ret_from_syscall)
+ PT_REGS_UNWIND_INFO(0)
- .endp ia32_rt_sigsuspend
-
- .global ia32_ret_from_syscall
- .proc ia32_ret_from_syscall
-ia32_ret_from_syscall:
cmp.ge p6,p7=r8,r0 // syscall executed successfully?
adds r2=IA64_PT_REGS_R8_OFFSET+16,sp // r2 = &pt_regs.r8
;;
st8 [r2]=r8 // store return value in slot for r8
br.cond.sptk.few ia64_leave_kernel
- .endp ia32_ret_from_syscall
+END(ia32_ret_from_syscall)
//
// Invoke a system call, but do some tracing before and after the call.
@@ -61,9 +60,8 @@ ia32_ret_from_syscall:
// r15 = syscall number
// b6 = syscall entry point
//
- .global ia32_trace_syscall
- .proc ia32_trace_syscall
-ia32_trace_syscall:
+GLOBAL_ENTRY(ia32_trace_syscall)
+ PT_REGS_UNWIND_INFO(0)
br.call.sptk.few rp=invoke_syscall_trace // give parent a chance to catch syscall args
.Lret4: br.call.sptk.few rp=b6 // do the syscall
.Lret5: cmp.lt p6,p0=r8,r0 // syscall failed?
@@ -72,42 +70,38 @@ ia32_trace_syscall:
st8.spill [r2]=r8 // store return value in slot for r8
br.call.sptk.few rp=invoke_syscall_trace // give parent a chance to catch return value
.Lret6: br.cond.sptk.many ia64_leave_kernel // rp MUST be != ia64_leave_kernel!
+END(ia32_trace_syscall)
- .endp ia32_trace_syscall
-
- .align 16
- .global sys32_vfork
- .proc sys32_vfork
-sys32_vfork:
+GLOBAL_ENTRY(sys32_vfork)
alloc r16=ar.pfs,2,2,3,0;;
mov out0=IA64_CLONE_VFORK|IA64_CLONE_VM|SIGCHLD // out0 = clone_flags
br.cond.sptk.few .fork1 // do the work
- .endp sys32_vfork
+END(sys32_vfork)
- .align 16
- .global sys32_fork
- .proc sys32_fork
-sys32_fork:
- alloc r16=ar.pfs,2,2,3,0;;
+GLOBAL_ENTRY(sys32_fork)
+ UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2))
+ alloc r16=ar.pfs,2,2,3,0
mov out0=SIGCHLD // out0 = clone_flags
+ ;;
.fork1:
- movl r28=1f
- mov loc1=rp
- br.cond.sptk.many save_switch_stack
-1:
- mov loc0=r16 // save ar.pfs across do_fork
+ mov loc0=rp
+ mov loc1=r16 // save ar.pfs across do_fork
+ DO_SAVE_SWITCH_STACK
+
+ UNW(.body)
+
adds out2=IA64_SWITCH_STACK_SIZE+16,sp
adds r2=IA64_SWITCH_STACK_SIZE+IA64_PT_REGS_R12_OFFSET+16,sp
;;
ld8 out1=[r2] // fetch usp from pt_regs.r12
br.call.sptk.few rp=do_fork
.ret1:
- mov ar.pfs=loc0
+ mov ar.pfs=loc1
+ UNW(.restore sp)
adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
- mov rp=loc1
- ;;
+ mov rp=loc0
br.ret.sptk.many rp
- .endp sys32_fork
+END(sys32_fork)
.rodata
.align 8
@@ -304,3 +298,8 @@ ia32_syscall_table:
data8 sys_ni_syscall /* streams1 */
data8 sys_ni_syscall /* streams2 */
data8 sys32_vfork /* 190 */
+ /*
+ * CAUTION: If any system calls are added beyond this point
+ * then the check in `arch/ia64/kernel/ivt.S' will have
+ * to be modified also. You've been warned.
+ */
diff --git a/arch/ia64/ia32/ia32_signal.c b/arch/ia64/ia32/ia32_signal.c
index e85af6ced..574c1937f 100644
--- a/arch/ia64/ia32/ia32_signal.c
+++ b/arch/ia64/ia32/ia32_signal.c
@@ -55,7 +55,7 @@ struct rt_sigframe_ia32
};
static int
-copy_siginfo_to_user32(siginfo_t32 *to, siginfo_t *from)
+copy_siginfo_to_user32 (siginfo_t32 *to, siginfo_t *from)
{
int err;
@@ -104,6 +104,7 @@ setup_sigcontext_ia32(struct sigcontext_ia32 *sc, struct _fpstate_ia32 *fpstate,
struct pt_regs *regs, unsigned long mask)
{
int err = 0;
+ unsigned long flag;
err |= __put_user((regs->r16 >> 32) & 0xffff , (unsigned int *)&sc->fs);
err |= __put_user((regs->r16 >> 48) & 0xffff , (unsigned int *)&sc->gs);
@@ -124,9 +125,11 @@ setup_sigcontext_ia32(struct sigcontext_ia32 *sc, struct _fpstate_ia32 *fpstate,
#endif
err |= __put_user(regs->cr_iip, &sc->eip);
err |= __put_user(regs->r17 & 0xffff, (unsigned int *)&sc->cs);
-#if 0
- err |= __put_user(regs->eflags, &sc->eflags);
-#endif
+ /*
+ * `eflags' is in an ar register for this context
+ */
+ asm volatile ("mov %0=ar.eflag ;;" : "=r"(flag));
+ err |= __put_user((unsigned int)flag, &sc->eflags);
err |= __put_user(regs->r12, &sc->esp_at_signal);
err |= __put_user((regs->r17 >> 16) & 0xffff, (unsigned int *)&sc->ss);
@@ -190,15 +193,26 @@ restore_sigcontext_ia32(struct pt_regs *regs, struct sigcontext_ia32 *sc, int *p
COPY(cr_iip, eip);
COPY_SEG_STRICT(cs);
COPY_SEG_STRICT(ss);
-#if 0
{
- unsigned int tmpflags;
- err |= __get_user(tmpflags, &sc->eflags);
- /* XXX: Change this to ar.eflags */
- regs->eflags = (regs->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
- regs->orig_eax = -1; /* disable syscall checks */
+ unsigned int tmpflags;
+ unsigned long flag;
+
+ /*
+ * IA32 `eflags' is not part of `pt_regs', it's
+ * in an ar register which is part of the thread
+ * context. Fortunately, we are executing in the
+ * IA32 process's context.
+ */
+ err |= __get_user(tmpflags, &sc->eflags);
+ asm volatile ("mov %0=ar.eflag ;;" : "=r"(flag));
+ flag &= ~0x40DD5;
+ flag |= (tmpflags & 0x40DD5);
+ asm volatile ("mov ar.eflag=%0 ;;" :: "r"(flag));
+
+ regs->r1 = -1; /* disable syscall checks, r1 is orig_eax */
}
+#if 0
{
struct _fpstate * buf;
err |= __get_user(buf, &sc->fpstate);
@@ -271,7 +285,7 @@ setup_frame_ia32(int sig, struct k_sigaction *ka, sigset_t *set,
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
- err |= __put_user(frame->retcode, &frame->pretcode);
+ err |= __put_user((long)frame->retcode, &frame->pretcode);
/* This is popl %eax ; movl $,%eax ; int $0x80 */
err |= __put_user(0xb858, (short *)(frame->retcode+0));
#define __IA32_NR_sigreturn 119
@@ -326,8 +340,8 @@ setup_rt_frame_ia32(int sig, struct k_sigaction *ka, siginfo_t *info,
? current->exec_domain->signal_invmap[sig]
: sig),
&frame->sig);
- err |= __put_user(&frame->info, &frame->pinfo);
- err |= __put_user(&frame->uc, &frame->puc);
+ err |= __put_user((long)&frame->info, &frame->pinfo);
+ err |= __put_user((long)&frame->uc, &frame->puc);
err |= copy_siginfo_to_user32(&frame->info, info);
/* Create the ucontext. */
@@ -341,7 +355,7 @@ setup_rt_frame_ia32(int sig, struct k_sigaction *ka, siginfo_t *info,
regs, set->sig[0]);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
- err |= __put_user(frame->retcode, &frame->pretcode);
+ err |= __put_user((long)frame->retcode, &frame->pretcode);
/* This is movl $,%eax ; int $0x80 */
err |= __put_user(0xb8, (char *)(frame->retcode+0));
#define __IA32_NR_rt_sigreturn 173
diff --git a/arch/ia64/ia32/ia32_support.c b/arch/ia64/ia32/ia32_support.c
index dcf61e8e4..ab5bebfe1 100644
--- a/arch/ia64/ia32/ia32_support.c
+++ b/arch/ia64/ia32/ia32_support.c
@@ -1,6 +1,9 @@
/*
* IA32 helper functions
+ *
+ * 06/16/00 A. Mallick added csd/ssd/tssd for ia32 thread context
*/
+
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mm.h>
@@ -16,6 +19,57 @@ extern unsigned long *ia32_gdt_table, *ia32_tss;
extern void die_if_kernel (char *str, struct pt_regs *regs, long err);
+void
+ia32_save_state (struct thread_struct *thread)
+{
+ unsigned long eflag, fsr, fcr, fir, fdr, csd, ssd, tssd;
+
+ asm ("mov %0=ar.eflag;"
+ "mov %1=ar.fsr;"
+ "mov %2=ar.fcr;"
+ "mov %3=ar.fir;"
+ "mov %4=ar.fdr;"
+ "mov %5=ar.csd;"
+ "mov %6=ar.ssd;"
+ "mov %7=ar.k1"
+ : "=r"(eflag), "=r"(fsr), "=r"(fcr), "=r"(fir), "=r"(fdr),
+ "=r"(csd), "=r"(ssd), "=r"(tssd));
+ thread->eflag = eflag;
+ thread->fsr = fsr;
+ thread->fcr = fcr;
+ thread->fir = fir;
+ thread->fdr = fdr;
+ thread->csd = csd;
+ thread->ssd = ssd;
+ thread->tssd = tssd;
+}
+
+void
+ia32_load_state (struct thread_struct *thread)
+{
+ unsigned long eflag, fsr, fcr, fir, fdr, csd, ssd, tssd;
+
+ eflag = thread->eflag;
+ fsr = thread->fsr;
+ fcr = thread->fcr;
+ fir = thread->fir;
+ fdr = thread->fdr;
+ csd = thread->csd;
+ ssd = thread->ssd;
+ tssd = thread->tssd;
+
+ asm volatile ("mov ar.eflag=%0;"
+ "mov ar.fsr=%1;"
+ "mov ar.fcr=%2;"
+ "mov ar.fir=%3;"
+ "mov ar.fdr=%4;"
+ "mov ar.csd=%5;"
+ "mov ar.ssd=%6;"
+ "mov ar.k1=%7"
+ :: "r"(eflag), "r"(fsr), "r"(fcr), "r"(fir), "r"(fdr),
+ "r"(csd), "r"(ssd), "r"(tssd));
+}
+
/*
* Setup IA32 GDT and TSS
*/
diff --git a/arch/ia64/ia32/ia32_traps.c b/arch/ia64/ia32/ia32_traps.c
index de99a65b3..2cfc9ae02 100644
--- a/arch/ia64/ia32/ia32_traps.c
+++ b/arch/ia64/ia32/ia32_traps.c
@@ -1,3 +1,9 @@
+/*
+ * IA32 exceptions handler
+ *
+ * 06/16/00 A. Mallick added siginfo for most cases (close to IA32)
+ */
+
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -9,9 +15,11 @@ ia32_exception (struct pt_regs *regs, unsigned long isr)
{
struct siginfo siginfo;
+ siginfo.si_errno = 0;
switch ((isr >> 16) & 0xff) {
case 1:
case 2:
+ siginfo.si_signo = SIGTRAP;
if (isr == 0)
siginfo.si_code = TRAP_TRACE;
else if (isr & 0x4)
@@ -21,27 +29,96 @@ ia32_exception (struct pt_regs *regs, unsigned long isr)
break;
case 3:
+ siginfo.si_signo = SIGTRAP;
siginfo.si_code = TRAP_BRKPT;
break;
case 0: /* Divide fault */
+ siginfo.si_signo = SIGFPE;
+ siginfo.si_code = FPE_INTDIV;
+ break;
+
case 4: /* Overflow */
case 5: /* Bounds fault */
+ siginfo.si_signo = SIGFPE;
+ siginfo.si_code = 0;
+ break;
+
case 6: /* Invalid Op-code */
+ siginfo.si_signo = SIGILL;
+ siginfo.si_code = ILL_ILLOPN;
+ break;
+
case 7: /* FP DNA */
case 8: /* Double Fault */
case 9: /* Invalid TSS */
case 11: /* Segment not present */
case 12: /* Stack fault */
case 13: /* General Protection Fault */
+ siginfo.si_signo = SIGSEGV;
+ siginfo.si_code = 0;
+ break;
+
case 16: /* Pending FP error */
+ {
+ unsigned long fsr, fcr;
+
+ asm ("mov %0=ar.fsr;"
+ "mov %1=ar.fcr;"
+ : "=r"(fsr), "=r"(fcr));
+
+ siginfo.si_signo = SIGFPE;
+ /*
+ * (~cwd & swd) will mask out exceptions that are not set to unmasked
+ * status. 0x3f is the exception bits in these regs, 0x200 is the
+ * C1 reg you need in case of a stack fault, 0x040 is the stack
+ * fault bit. We should only be taking one exception at a time,
+ * so if this combination doesn't produce any single exception,
+ * then we have a bad program that isn't syncronizing its FPU usage
+ * and it will suffer the consequences since we won't be able to
+ * fully reproduce the context of the exception
+ */
+ switch(((~fcr) & (fsr & 0x3f)) | (fsr & 0x240)) {
+ case 0x000:
+ default:
+ siginfo.si_code = 0;
+ break;
+ case 0x001: /* Invalid Op */
+ case 0x040: /* Stack Fault */
+ case 0x240: /* Stack Fault | Direction */
+ siginfo.si_code = FPE_FLTINV;
+ break;
+ case 0x002: /* Denormalize */
+ case 0x010: /* Underflow */
+ siginfo.si_code = FPE_FLTUND;
+ break;
+ case 0x004: /* Zero Divide */
+ siginfo.si_code = FPE_FLTDIV;
+ break;
+ case 0x008: /* Overflow */
+ siginfo.si_code = FPE_FLTOVF;
+ break;
+ case 0x020: /* Precision */
+ siginfo.si_code = FPE_FLTRES;
+ break;
+ }
+
+ break;
+ }
+
case 17: /* Alignment check */
+ siginfo.si_signo = SIGSEGV;
+ siginfo.si_code = BUS_ADRALN;
+ break;
+
case 19: /* SSE Numeric error */
+ siginfo.si_signo = SIGFPE;
+ siginfo.si_code = 0;
+ break;
+
default:
return -1;
}
- siginfo.si_signo = SIGTRAP;
- siginfo.si_errno = 0;
- send_sig_info(SIGTRAP, &siginfo, current);
+ force_sig_info(SIGTRAP, &siginfo, current);
return 0;
}
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index 06642dcec..bd925c0e4 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -7,6 +7,8 @@
* Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 2000 Hewlett-Packard Co.
+ * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
*
* These routines maintain argument size conversion between 32bit and 64bit
* environment.
@@ -55,24 +57,29 @@
#include <net/sock.h>
#include <asm/ia32.h>
-#define A(__x) ((unsigned long)(__x))
-#define AA(__x) ((unsigned long)(__x))
+#define A(__x) ((unsigned long)(__x))
+#define AA(__x) ((unsigned long)(__x))
+#define ROUND_UP(x,a) ((__typeof__(x))(((unsigned long)(x) + ((a) - 1)) & ~((a) - 1)))
+#define NAME_OFFSET(de) ((int) ((de)->d_name - (char *) (de)))
+
+extern asmlinkage long sys_execve (char *, char **, char **, struct pt_regs *);
+extern asmlinkage long sys_munmap (unsigned long, size_t len);
+extern asmlinkage long sys_mprotect (unsigned long, size_t, unsigned long);
static int
nargs(unsigned int arg, char **ap)
{
- char *ptr;
- int n, err;
+ int n, err, addr;
n = 0;
do {
- if (err = get_user(ptr, (int *)arg))
+ if ((err = get_user(addr, (int *)A(arg))) != 0)
return(err);
if (ap)
- *ap++ = ptr;
+ *ap++ = (char *)A(addr);
arg += sizeof(unsigned int);
n++;
- } while (ptr);
+ } while (addr);
return(n - 1);
}
@@ -106,14 +113,14 @@ int stack)
down(&current->mm->mmap_sem);
lock_kernel();
- av = do_mmap_pgoff(0, NULL, len,
- PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0);
+ av = (char **) do_mmap_pgoff(0, 0UL, len, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, 0);
unlock_kernel();
up(&current->mm->mmap_sem);
if (IS_ERR(av))
- return(av);
+ return (long)av;
ae = av + na + 1;
av[na] = (char *)0;
ae[ne] = (char *)0;
@@ -121,7 +128,7 @@ int stack)
(void)nargs(envp, ae);
r = sys_execve(filename, av, ae, regs);
if (IS_ERR(r))
- sys_munmap(av, len);
+ sys_munmap((unsigned long) av, len);
return(r);
}
@@ -146,9 +153,9 @@ putstat(struct stat32 *ubuf, struct stat *kbuf)
return err;
}
-extern asmlinkage int sys_newstat(char * filename, struct stat * statbuf);
+extern asmlinkage long sys_newstat(char * filename, struct stat * statbuf);
-asmlinkage int
+asmlinkage long
sys32_newstat(char * filename, struct stat32 *statbuf)
{
int ret;
@@ -163,9 +170,9 @@ sys32_newstat(char * filename, struct stat32 *statbuf)
return ret;
}
-extern asmlinkage int sys_newlstat(char * filename, struct stat * statbuf);
+extern asmlinkage long sys_newlstat(char * filename, struct stat * statbuf);
-asmlinkage int
+asmlinkage long
sys32_newlstat(char * filename, struct stat32 *statbuf)
{
int ret;
@@ -180,9 +187,9 @@ sys32_newlstat(char * filename, struct stat32 *statbuf)
return ret;
}
-extern asmlinkage int sys_newfstat(unsigned int fd, struct stat * statbuf);
+extern asmlinkage long sys_newfstat(unsigned int fd, struct stat * statbuf);
-asmlinkage int
+asmlinkage long
sys32_newfstat(unsigned int fd, struct stat32 *statbuf)
{
int ret;
@@ -214,34 +221,26 @@ do_mmap_fake(struct file *file, unsigned long addr, unsigned long len,
return -EINVAL;
if (prot & PROT_WRITE)
prot |= PROT_EXEC;
-#ifdef DDD
-#else // DDD
prot |= PROT_WRITE;
-#endif // DDD
front = NULL;
back = NULL;
if ((baddr = (addr & PAGE_MASK)) != addr && get_user(c, (char *)baddr) == 0) {
front = kmalloc(addr - baddr, GFP_KERNEL);
memcpy(front, (void *)baddr, addr - baddr);
}
-#ifndef DDD
- if (addr)
-#endif
- if (((addr + len) & ~PAGE_MASK) && get_user(c, (char *)(addr + len)) == 0) {
+ if (addr && ((addr + len) & ~PAGE_MASK) && get_user(c, (char *)(addr + len)) == 0) {
back = kmalloc(PAGE_SIZE - ((addr + len) & ~PAGE_MASK), GFP_KERNEL);
- memcpy(back, addr + len, PAGE_SIZE - ((addr + len) & ~PAGE_MASK));
+ memcpy(back, (char *)addr + len, PAGE_SIZE - ((addr + len) & ~PAGE_MASK));
}
down(&current->mm->mmap_sem);
r = do_mmap(0, baddr, len + (addr - baddr), prot, flags | MAP_ANONYMOUS, 0);
up(&current->mm->mmap_sem);
if (r < 0)
return(r);
-#ifndef DDD
if (addr == 0)
addr = r;
-#endif // DDD
if (back) {
- memcpy(addr + len, back, PAGE_SIZE - ((addr + len) & ~PAGE_MASK));
+ memcpy((char *)addr + len, back, PAGE_SIZE - ((addr + len) & ~PAGE_MASK));
kfree(back);
}
if (front) {
@@ -249,7 +248,7 @@ do_mmap_fake(struct file *file, unsigned long addr, unsigned long len,
kfree(front);
}
if (flags & MAP_ANONYMOUS) {
- memset(addr, 0, len);
+ memset((char *)addr, 0, len);
return(addr);
}
if (!file)
@@ -283,7 +282,7 @@ struct mmap_arg_struct {
unsigned int offset;
};
-asmlinkage int
+asmlinkage long
sys32_mmap(struct mmap_arg_struct *arg)
{
int error = -EFAULT;
@@ -302,11 +301,7 @@ sys32_mmap(struct mmap_arg_struct *arg)
}
a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-#ifdef DDD
if ((a.flags & MAP_FIXED) && ((a.addr & ~PAGE_MASK) || (a.offset & ~PAGE_MASK))) {
-#else // DDD
- if (1) {
-#endif // DDD
unlock_kernel();
error = do_mmap_fake(file, a.addr, a.len, a.prot, a.flags, a.offset);
lock_kernel();
@@ -351,7 +346,7 @@ sys32_mprotect(unsigned long start, size_t len, unsigned long prot)
return(sys_mprotect(start & PAGE_MASK, len & PAGE_MASK, prot));
}
-asmlinkage int
+asmlinkage long
sys32_rt_sigaction(int sig, struct sigaction32 *act,
struct sigaction32 *oact, unsigned int sigsetsize)
{
@@ -410,10 +405,10 @@ sys32_rt_sigaction(int sig, struct sigaction32 *act,
}
-extern asmlinkage int sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset,
- size_t sigsetsize);
+extern asmlinkage long sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset,
+ size_t sigsetsize);
-asmlinkage int
+asmlinkage long
sys32_rt_sigprocmask(int how, sigset32_t *set, sigset32_t *oset,
unsigned int sigsetsize)
{
@@ -468,9 +463,9 @@ put_statfs (struct statfs32 *ubuf, struct statfs *kbuf)
return err;
}
-extern asmlinkage int sys_statfs(const char * path, struct statfs * buf);
+extern asmlinkage long sys_statfs(const char * path, struct statfs * buf);
-asmlinkage int
+asmlinkage long
sys32_statfs(const char * path, struct statfs32 *buf)
{
int ret;
@@ -485,9 +480,9 @@ sys32_statfs(const char * path, struct statfs32 *buf)
return ret;
}
-extern asmlinkage int sys_fstatfs(unsigned int fd, struct statfs * buf);
+extern asmlinkage long sys_fstatfs(unsigned int fd, struct statfs * buf);
-asmlinkage int
+asmlinkage long
sys32_fstatfs(unsigned int fd, struct statfs32 *buf)
{
int ret;
@@ -554,7 +549,7 @@ put_it32(struct itimerval32 *o, struct itimerval *i)
extern int do_getitimer(int which, struct itimerval *value);
-asmlinkage int
+asmlinkage long
sys32_getitimer(int which, struct itimerval32 *it)
{
struct itimerval kit;
@@ -569,7 +564,7 @@ sys32_getitimer(int which, struct itimerval32 *it)
extern int do_setitimer(int which, struct itimerval *, struct itimerval *);
-asmlinkage int
+asmlinkage long
sys32_setitimer(int which, struct itimerval32 *in, struct itimerval32 *out)
{
struct itimerval kin, kout;
@@ -614,7 +609,7 @@ sys32_alarm(unsigned int seconds)
extern struct timezone sys_tz;
extern int do_sys_settimeofday(struct timeval *tv, struct timezone *tz);
-asmlinkage int
+asmlinkage long
sys32_gettimeofday(struct timeval32 *tv, struct timezone *tz)
{
if (tv) {
@@ -630,7 +625,7 @@ sys32_gettimeofday(struct timeval32 *tv, struct timezone *tz)
return 0;
}
-asmlinkage int
+asmlinkage long
sys32_settimeofday(struct timeval32 *tv, struct timezone *tz)
{
struct timeval ktv;
@@ -648,56 +643,135 @@ sys32_settimeofday(struct timeval32 *tv, struct timezone *tz)
return do_sys_settimeofday(tv ? &ktv : NULL, tz ? &ktz : NULL);
}
-struct dirent32 {
- unsigned int d_ino;
- unsigned int d_off;
- unsigned short d_reclen;
- char d_name[NAME_MAX + 1];
+struct linux32_dirent {
+ u32 d_ino;
+ u32 d_off;
+ u16 d_reclen;
+ char d_name[1];
};
-static void
-xlate_dirent(void *dirent64, void *dirent32, long n)
-{
- long off;
- struct dirent *dirp;
- struct dirent32 *dirp32;
-
- off = 0;
- while (off < n) {
- dirp = (struct dirent *)(dirent64 + off);
- dirp32 = (struct dirent32 *)(dirent32 + off);
- off += dirp->d_reclen;
- dirp32->d_ino = dirp->d_ino;
- dirp32->d_off = (unsigned int)dirp->d_off;
- dirp32->d_reclen = dirp->d_reclen;
- strncpy(dirp32->d_name, dirp->d_name, dirp->d_reclen - ((3 * 4) + 2));
- }
- return;
+struct old_linux32_dirent {
+ u32 d_ino;
+ u32 d_offset;
+ u16 d_namlen;
+ char d_name[1];
+};
+
+struct getdents32_callback {
+ struct linux32_dirent * current_dir;
+ struct linux32_dirent * previous;
+ int count;
+ int error;
+};
+
+struct readdir32_callback {
+ struct old_linux32_dirent * dirent;
+ int count;
+};
+
+static int
+filldir32 (void *__buf, const char *name, int namlen, off_t offset, ino_t ino)
+{
+ struct linux32_dirent * dirent;
+ struct getdents32_callback * buf = (struct getdents32_callback *) __buf;
+ int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 1, 4);
+
+ buf->error = -EINVAL; /* only used if we fail.. */
+ if (reclen > buf->count)
+ return -EINVAL;
+ dirent = buf->previous;
+ if (dirent)
+ put_user(offset, &dirent->d_off);
+ dirent = buf->current_dir;
+ buf->previous = dirent;
+ put_user(ino, &dirent->d_ino);
+ put_user(reclen, &dirent->d_reclen);
+ copy_to_user(dirent->d_name, name, namlen);
+ put_user(0, dirent->d_name + namlen);
+ ((char *) dirent) += reclen;
+ buf->current_dir = dirent;
+ buf->count -= reclen;
+ return 0;
}
asmlinkage long
-sys32_getdents(unsigned int fd, void * dirent32, unsigned int count)
+sys32_getdents (unsigned int fd, void * dirent, unsigned int count)
+{
+ struct file * file;
+ struct linux32_dirent * lastdirent;
+ struct getdents32_callback buf;
+ int error;
+
+ error = -EBADF;
+ file = fget(fd);
+ if (!file)
+ goto out;
+
+ buf.current_dir = (struct linux32_dirent *) dirent;
+ buf.previous = NULL;
+ buf.count = count;
+ buf.error = 0;
+
+ lock_kernel();
+ error = vfs_readdir(file, filldir32, &buf);
+ if (error < 0)
+ goto out_putf;
+ error = buf.error;
+ lastdirent = buf.previous;
+ if (lastdirent) {
+ put_user(file->f_pos, &lastdirent->d_off);
+ error = count - buf.count;
+ }
+
+out_putf:
+ unlock_kernel();
+ fput(file);
+out:
+ return error;
+}
+
+static int
+fillonedir32 (void * __buf, const char * name, int namlen, off_t offset, ino_t ino)
{
- long n;
- void *dirent64;
+ struct readdir32_callback * buf = (struct readdir32_callback *) __buf;
+ struct old_linux32_dirent * dirent;
- dirent64 = (unsigned long)(dirent32 + (sizeof(long) - 1)) & ~(sizeof(long) - 1);
- if ((n = sys_getdents(fd, dirent64, count - (dirent64 - dirent32))) < 0)
- return(n);
- xlate_dirent(dirent64, dirent32, n);
- return(n);
+ if (buf->count)
+ return -EINVAL;
+ buf->count++;
+ dirent = buf->dirent;
+ put_user(ino, &dirent->d_ino);
+ put_user(offset, &dirent->d_offset);
+ put_user(namlen, &dirent->d_namlen);
+ copy_to_user(dirent->d_name, name, namlen);
+ put_user(0, dirent->d_name + namlen);
+ return 0;
}
-asmlinkage int
-sys32_readdir(unsigned int fd, void * dirent32, unsigned int count)
+asmlinkage long
+sys32_readdir (unsigned int fd, void * dirent, unsigned int count)
{
- int n;
- struct dirent dirent64;
+ int error;
+ struct file * file;
+ struct readdir32_callback buf;
+
+ error = -EBADF;
+ file = fget(fd);
+ if (!file)
+ goto out;
+
+ buf.count = 0;
+ buf.dirent = dirent;
+
+ lock_kernel();
+ error = vfs_readdir(file, fillonedir32, &buf);
+ if (error >= 0)
+ error = buf.count;
+ unlock_kernel();
- if ((n = old_readdir(fd, &dirent64, count)) < 0)
- return(n);
- xlate_dirent(&dirent64, dirent32, dirent64.d_reclen);
- return(n);
+ fput(file);
+out:
+ return error;
}
/*
@@ -710,9 +784,9 @@ sys32_readdir(unsigned int fd, void * dirent32, unsigned int count)
*/
#define MAX_SELECT_SECONDS \
((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1)
-#define ROUND_UP(x,y) (((x)+(y)-1)/(y))
+#define ROUND_UP_TIME(x,y) (((x)+(y)-1)/(y))
-asmlinkage int
+asmlinkage long
sys32_select(int n, fd_set *inp, fd_set *outp, fd_set *exp, struct timeval32 *tvp32)
{
fd_set_bits fds;
@@ -732,7 +806,7 @@ sys32_select(int n, fd_set *inp, fd_set *outp, fd_set *exp, struct timeval32 *tv
goto out_nofds;
if ((unsigned long) sec < MAX_SELECT_SECONDS) {
- timeout = ROUND_UP(usec, 1000000/HZ);
+ timeout = ROUND_UP_TIME(usec, 1000000/HZ);
timeout += sec * (unsigned long) HZ;
}
}
@@ -809,13 +883,15 @@ struct sel_arg_struct {
unsigned int tvp;
};
-asmlinkage int old_select(struct sel_arg_struct *arg)
+asmlinkage long
+old_select(struct sel_arg_struct *arg)
{
struct sel_arg_struct a;
if (copy_from_user(&a, arg, sizeof(a)))
return -EFAULT;
- return sys32_select(a.n, a.inp, a.outp, a.exp, a.tvp);
+ return sys32_select(a.n, (fd_set *)A(a.inp), (fd_set *)A(a.outp), (fd_set *)A(a.exp),
+ (struct timeval32 *)A(a.tvp));
}
struct timespec32 {
@@ -823,10 +899,9 @@ struct timespec32 {
int tv_nsec;
};
-extern asmlinkage int sys_nanosleep(struct timespec *rqtp,
- struct timespec *rmtp);
+extern asmlinkage long sys_nanosleep(struct timespec *rqtp, struct timespec *rmtp);
-asmlinkage int
+asmlinkage long
sys32_nanosleep(struct timespec32 *rqtp, struct timespec32 *rmtp)
{
struct timespec t;
@@ -1007,9 +1082,9 @@ struct rlimit32 {
int rlim_max;
};
-extern asmlinkage int sys_getrlimit(unsigned int resource, struct rlimit *rlim);
+extern asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit *rlim);
-asmlinkage int
+asmlinkage long
sys32_getrlimit(unsigned int resource, struct rlimit32 *rlim)
{
struct rlimit r;
@@ -1026,9 +1101,9 @@ sys32_getrlimit(unsigned int resource, struct rlimit32 *rlim)
return ret;
}
-extern asmlinkage int sys_setrlimit(unsigned int resource, struct rlimit *rlim);
+extern asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit *rlim);
-asmlinkage int
+asmlinkage long
sys32_setrlimit(unsigned int resource, struct rlimit32 *rlim)
{
struct rlimit r;
@@ -1049,118 +1124,6 @@ sys32_setrlimit(unsigned int resource, struct rlimit32 *rlim)
return ret;
}
-/* Argument list sizes for sys_socketcall */
-#define AL(x) ((x) * sizeof(u32))
-static unsigned char nas[18]={AL(0),AL(3),AL(3),AL(3),AL(2),AL(3),
- AL(3),AL(3),AL(4),AL(4),AL(4),AL(6),
- AL(6),AL(2),AL(5),AL(5),AL(3),AL(3)};
-#undef AL
-
-extern asmlinkage int sys_bind(int fd, struct sockaddr *umyaddr, int addrlen);
-extern asmlinkage int sys_connect(int fd, struct sockaddr *uservaddr,
- int addrlen);
-extern asmlinkage int sys_accept(int fd, struct sockaddr *upeer_sockaddr,
- int *upeer_addrlen);
-extern asmlinkage int sys_getsockname(int fd, struct sockaddr *usockaddr,
- int *usockaddr_len);
-extern asmlinkage int sys_getpeername(int fd, struct sockaddr *usockaddr,
- int *usockaddr_len);
-extern asmlinkage int sys_send(int fd, void *buff, size_t len, unsigned flags);
-extern asmlinkage int sys_sendto(int fd, u32 buff, __kernel_size_t32 len,
- unsigned flags, u32 addr, int addr_len);
-extern asmlinkage int sys_recv(int fd, void *ubuf, size_t size, unsigned flags);
-extern asmlinkage int sys_recvfrom(int fd, u32 ubuf, __kernel_size_t32 size,
- unsigned flags, u32 addr, u32 addr_len);
-extern asmlinkage int sys_setsockopt(int fd, int level, int optname,
- char *optval, int optlen);
-extern asmlinkage int sys_getsockopt(int fd, int level, int optname,
- u32 optval, u32 optlen);
-
-extern asmlinkage int sys_socket(int family, int type, int protocol);
-extern asmlinkage int sys_socketpair(int family, int type, int protocol,
- int usockvec[2]);
-extern asmlinkage int sys_shutdown(int fd, int how);
-extern asmlinkage int sys_listen(int fd, int backlog);
-
-asmlinkage int sys32_socketcall(int call, u32 *args)
-{
- int i, ret;
- u32 a[6];
- u32 a0,a1;
-
- if (call<SYS_SOCKET||call>SYS_RECVMSG)
- return -EINVAL;
- if (copy_from_user(a, args, nas[call]))
- return -EFAULT;
- a0=a[0];
- a1=a[1];
-
- switch(call)
- {
- case SYS_SOCKET:
- ret = sys_socket(a0, a1, a[2]);
- break;
- case SYS_BIND:
- ret = sys_bind(a0, (struct sockaddr *)A(a1), a[2]);
- break;
- case SYS_CONNECT:
- ret = sys_connect(a0, (struct sockaddr *)A(a1), a[2]);
- break;
- case SYS_LISTEN:
- ret = sys_listen(a0, a1);
- break;
- case SYS_ACCEPT:
- ret = sys_accept(a0, (struct sockaddr *)A(a1),
- (int *)A(a[2]));
- break;
- case SYS_GETSOCKNAME:
- ret = sys_getsockname(a0, (struct sockaddr *)A(a1),
- (int *)A(a[2]));
- break;
- case SYS_GETPEERNAME:
- ret = sys_getpeername(a0, (struct sockaddr *)A(a1),
- (int *)A(a[2]));
- break;
- case SYS_SOCKETPAIR:
- ret = sys_socketpair(a0, a1, a[2], (int *)A(a[3]));
- break;
- case SYS_SEND:
- ret = sys_send(a0, (void *)A(a1), a[2], a[3]);
- break;
- case SYS_SENDTO:
- ret = sys_sendto(a0, a1, a[2], a[3], a[4], a[5]);
- break;
- case SYS_RECV:
- ret = sys_recv(a0, (void *)A(a1), a[2], a[3]);
- break;
- case SYS_RECVFROM:
- ret = sys_recvfrom(a0, a1, a[2], a[3], a[4], a[5]);
- break;
- case SYS_SHUTDOWN:
- ret = sys_shutdown(a0,a1);
- break;
- case SYS_SETSOCKOPT:
- ret = sys_setsockopt(a0, a1, a[2], (char *)A(a[3]),
- a[4]);
- break;
- case SYS_GETSOCKOPT:
- ret = sys_getsockopt(a0, a1, a[2], a[3], a[4]);
- break;
- case SYS_SENDMSG:
- ret = sys32_sendmsg(a0, (struct msghdr32 *)A(a1),
- a[2]);
- break;
- case SYS_RECVMSG:
- ret = sys32_recvmsg(a0, (struct msghdr32 *)A(a1),
- a[2]);
- break;
- default:
- ret = EINVAL;
- break;
- }
- return ret;
-}
-
/*
* Declare the IA32 version of the msghdr
*/
@@ -1183,13 +1146,13 @@ shape_msg(struct msghdr *mp, struct msghdr32 *mp32)
if (!access_ok(VERIFY_READ, mp32, sizeof(*mp32)))
return(-EFAULT);
__get_user(i, &mp32->msg_name);
- mp->msg_name = (void *)i;
+ mp->msg_name = (void *)A(i);
__get_user(mp->msg_namelen, &mp32->msg_namelen);
__get_user(i, &mp32->msg_iov);
- mp->msg_iov = (struct iov *)i;
+ mp->msg_iov = (struct iovec *)A(i);
__get_user(mp->msg_iovlen, &mp32->msg_iovlen);
__get_user(i, &mp32->msg_control);
- mp->msg_control = (void *)i;
+ mp->msg_control = (void *)A(i);
__get_user(mp->msg_controllen, &mp32->msg_controllen);
__get_user(mp->msg_flags, &mp32->msg_flags);
return(0);
@@ -1235,7 +1198,7 @@ verify_iovec32(struct msghdr *m, struct iovec *iov, char *address, int mode)
iov32 = (struct iovec32 *)iov;
for (ct = m->msg_iovlen; ct-- > 0; ) {
iov[ct].iov_len = (__kernel_size_t)iov32[ct].iov_len;
- iov[ct].iov_base = (void *)iov32[ct].iov_base;
+ iov[ct].iov_base = (void *) A(iov32[ct].iov_base);
err += iov[ct].iov_len;
}
out:
@@ -1260,7 +1223,7 @@ extern struct socket *sockfd_lookup(int fd, int *err);
* BSD sendmsg interface
*/
-asmlinkage int sys32_sendmsg(int fd, struct msghdr32 *msg, unsigned flags)
+int sys32_sendmsg(int fd, struct msghdr32 *msg, unsigned flags)
{
struct socket *sock;
char address[MAX_SOCK_ADDR];
@@ -1339,7 +1302,8 @@ out:
* BSD recvmsg interface
*/
-asmlinkage int sys32_recvmsg(int fd, struct msghdr32 *msg, unsigned int flags)
+int
+sys32_recvmsg (int fd, struct msghdr32 *msg, unsigned int flags)
{
struct socket *sock;
struct iovec iovstack[UIO_FASTIOV];
@@ -1421,6 +1385,118 @@ out:
return err;
}
+/* Argument list sizes for sys_socketcall */
+#define AL(x) ((x) * sizeof(u32))
+static unsigned char nas[18]={AL(0),AL(3),AL(3),AL(3),AL(2),AL(3),
+ AL(3),AL(3),AL(4),AL(4),AL(4),AL(6),
+ AL(6),AL(2),AL(5),AL(5),AL(3),AL(3)};
+#undef AL
+
+extern asmlinkage long sys_bind(int fd, struct sockaddr *umyaddr, int addrlen);
+extern asmlinkage long sys_connect(int fd, struct sockaddr *uservaddr,
+ int addrlen);
+extern asmlinkage long sys_accept(int fd, struct sockaddr *upeer_sockaddr,
+ int *upeer_addrlen);
+extern asmlinkage long sys_getsockname(int fd, struct sockaddr *usockaddr,
+ int *usockaddr_len);
+extern asmlinkage long sys_getpeername(int fd, struct sockaddr *usockaddr,
+ int *usockaddr_len);
+extern asmlinkage long sys_send(int fd, void *buff, size_t len, unsigned flags);
+extern asmlinkage long sys_sendto(int fd, u32 buff, __kernel_size_t32 len,
+ unsigned flags, u32 addr, int addr_len);
+extern asmlinkage long sys_recv(int fd, void *ubuf, size_t size, unsigned flags);
+extern asmlinkage long sys_recvfrom(int fd, u32 ubuf, __kernel_size_t32 size,
+ unsigned flags, u32 addr, u32 addr_len);
+extern asmlinkage long sys_setsockopt(int fd, int level, int optname,
+ char *optval, int optlen);
+extern asmlinkage long sys_getsockopt(int fd, int level, int optname,
+ u32 optval, u32 optlen);
+
+extern asmlinkage long sys_socket(int family, int type, int protocol);
+extern asmlinkage long sys_socketpair(int family, int type, int protocol,
+ int usockvec[2]);
+extern asmlinkage long sys_shutdown(int fd, int how);
+extern asmlinkage long sys_listen(int fd, int backlog);
+
+asmlinkage long sys32_socketcall(int call, u32 *args)
+{
+ int ret;
+ u32 a[6];
+ u32 a0,a1;
+
+ if (call<SYS_SOCKET||call>SYS_RECVMSG)
+ return -EINVAL;
+ if (copy_from_user(a, args, nas[call]))
+ return -EFAULT;
+ a0=a[0];
+ a1=a[1];
+
+ switch(call)
+ {
+ case SYS_SOCKET:
+ ret = sys_socket(a0, a1, a[2]);
+ break;
+ case SYS_BIND:
+ ret = sys_bind(a0, (struct sockaddr *)A(a1), a[2]);
+ break;
+ case SYS_CONNECT:
+ ret = sys_connect(a0, (struct sockaddr *)A(a1), a[2]);
+ break;
+ case SYS_LISTEN:
+ ret = sys_listen(a0, a1);
+ break;
+ case SYS_ACCEPT:
+ ret = sys_accept(a0, (struct sockaddr *)A(a1),
+ (int *)A(a[2]));
+ break;
+ case SYS_GETSOCKNAME:
+ ret = sys_getsockname(a0, (struct sockaddr *)A(a1),
+ (int *)A(a[2]));
+ break;
+ case SYS_GETPEERNAME:
+ ret = sys_getpeername(a0, (struct sockaddr *)A(a1),
+ (int *)A(a[2]));
+ break;
+ case SYS_SOCKETPAIR:
+ ret = sys_socketpair(a0, a1, a[2], (int *)A(a[3]));
+ break;
+ case SYS_SEND:
+ ret = sys_send(a0, (void *)A(a1), a[2], a[3]);
+ break;
+ case SYS_SENDTO:
+ ret = sys_sendto(a0, a1, a[2], a[3], a[4], a[5]);
+ break;
+ case SYS_RECV:
+ ret = sys_recv(a0, (void *)A(a1), a[2], a[3]);
+ break;
+ case SYS_RECVFROM:
+ ret = sys_recvfrom(a0, a1, a[2], a[3], a[4], a[5]);
+ break;
+ case SYS_SHUTDOWN:
+ ret = sys_shutdown(a0,a1);
+ break;
+ case SYS_SETSOCKOPT:
+ ret = sys_setsockopt(a0, a1, a[2], (char *)A(a[3]),
+ a[4]);
+ break;
+ case SYS_GETSOCKOPT:
+ ret = sys_getsockopt(a0, a1, a[2], a[3], a[4]);
+ break;
+ case SYS_SENDMSG:
+ ret = sys32_sendmsg(a0, (struct msghdr32 *)A(a1),
+ a[2]);
+ break;
+ case SYS_RECVMSG:
+ ret = sys32_recvmsg(a0, (struct msghdr32 *)A(a1),
+ a[2]);
+ break;
+ default:
+ ret = EINVAL;
+ break;
+ }
+ return ret;
+}
+
/*
* sys32_ipc() is the de-multiplexer for the SysV IPC calls in 32bit emulation..
*
@@ -1615,7 +1691,7 @@ out:
static int
do_sys32_msgctl (int first, int second, void *uptr)
{
- int err, err2;
+ int err = -EINVAL, err2;
struct msqid_ds m;
struct msqid64_ds m64;
struct msqid_ds32 *up = (struct msqid_ds32 *)uptr;
@@ -1646,7 +1722,7 @@ do_sys32_msgctl (int first, int second, void *uptr)
case MSG_STAT:
old_fs = get_fs ();
set_fs (KERNEL_DS);
- err = sys_msgctl (first, second, &m64);
+ err = sys_msgctl (first, second, (void *) &m64);
set_fs (old_fs);
err2 = put_user (m64.msg_perm.key, &up->msg_perm.key);
err2 |= __put_user(m64.msg_perm.uid, &up->msg_perm.uid);
@@ -1727,7 +1803,7 @@ do_sys32_shmctl (int first, int second, void *uptr)
case SHM_STAT:
old_fs = get_fs ();
set_fs (KERNEL_DS);
- err = sys_shmctl (first, second, &s64);
+ err = sys_shmctl (first, second, (void *) &s64);
set_fs (old_fs);
if (err < 0)
break;
@@ -1755,7 +1831,7 @@ do_sys32_shmctl (int first, int second, void *uptr)
case SHM_INFO:
old_fs = get_fs ();
set_fs (KERNEL_DS);
- err = sys_shmctl (first, second, &si);
+ err = sys_shmctl (first, second, (void *)&si);
set_fs (old_fs);
if (err < 0)
break;
@@ -1775,7 +1851,7 @@ do_sys32_shmctl (int first, int second, void *uptr)
return err;
}
-asmlinkage int
+asmlinkage long
sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
{
int version, err;
@@ -1900,10 +1976,10 @@ put_rusage (struct rusage32 *ru, struct rusage *r)
return err;
}
-extern asmlinkage int sys_wait4(pid_t pid,unsigned int * stat_addr,
+extern asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr,
int options, struct rusage * ru);
-asmlinkage int
+asmlinkage long
sys32_wait4(__kernel_pid_t32 pid, unsigned int *stat_addr, int options,
struct rusage32 *ru)
{
@@ -1925,17 +2001,17 @@ sys32_wait4(__kernel_pid_t32 pid, unsigned int *stat_addr, int options,
}
}
-asmlinkage int
+asmlinkage long
sys32_waitpid(__kernel_pid_t32 pid, unsigned int *stat_addr, int options)
{
return sys32_wait4(pid, stat_addr, options, NULL);
}
-extern asmlinkage int
+extern asmlinkage long
sys_getrusage(int who, struct rusage *ru);
-asmlinkage int
+asmlinkage long
sys32_getrusage(int who, struct rusage32 *ru)
{
struct rusage r;
@@ -2431,9 +2507,9 @@ getname32(const char *filename)
/* 32-bit timeval and related flotsam. */
-extern asmlinkage int sys_ioperm(unsigned long from, unsigned long num, int on);
+extern asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int on);
-asmlinkage int
+asmlinkage long
sys32_ioperm(u32 from, u32 num, int on)
{
return sys_ioperm((unsigned long)from, (unsigned long)num, on);
@@ -2505,10 +2581,10 @@ struct dqblk32 {
__kernel_time_t32 dqb_itime;
};
-extern asmlinkage int sys_quotactl(int cmd, const char *special, int id,
+extern asmlinkage long sys_quotactl(int cmd, const char *special, int id,
caddr_t addr);
-asmlinkage int
+asmlinkage long
sys32_quotactl(int cmd, const char *special, int id, unsigned long addr)
{
int cmds = cmd >> SUBCMDSHIFT;
@@ -2552,13 +2628,13 @@ sys32_quotactl(int cmd, const char *special, int id, unsigned long addr)
return err;
}
-extern asmlinkage int sys_utime(char * filename, struct utimbuf * times);
+extern asmlinkage long sys_utime(char * filename, struct utimbuf * times);
struct utimbuf32 {
__kernel_time_t32 actime, modtime;
};
-asmlinkage int
+asmlinkage long
sys32_utime(char * filename, struct utimbuf32 *times)
{
struct utimbuf t;
@@ -2642,10 +2718,10 @@ set_fd_set32(unsigned long n, u32 *ufdset, unsigned long *fdset)
__put_user(*fdset, ufdset);
}
-extern asmlinkage int sys_sysfs(int option, unsigned long arg1,
+extern asmlinkage long sys_sysfs(int option, unsigned long arg1,
unsigned long arg2);
-asmlinkage int
+asmlinkage long
sys32_sysfs(int option, u32 arg1, u32 arg2)
{
return sys_sysfs(option, arg1, arg2);
@@ -2741,7 +2817,7 @@ extern asmlinkage long sys_mount(char * dev_name, char * dir_name, char * type,
#define SMBFS_NAME "smbfs"
#define NCPFS_NAME "ncpfs"
-asmlinkage int
+asmlinkage long
sys32_mount(char *dev_name, char *dir_name, char *type,
unsigned long new_flags, u32 data)
{
@@ -2815,9 +2891,9 @@ struct sysinfo32 {
char _f[22];
};
-extern asmlinkage int sys_sysinfo(struct sysinfo *info);
+extern asmlinkage long sys_sysinfo(struct sysinfo *info);
-asmlinkage int
+asmlinkage long
sys32_sysinfo(struct sysinfo32 *info)
{
struct sysinfo s;
@@ -2843,10 +2919,10 @@ sys32_sysinfo(struct sysinfo32 *info)
return ret;
}
-extern asmlinkage int sys_sched_rr_get_interval(pid_t pid,
+extern asmlinkage long sys_sched_rr_get_interval(pid_t pid,
struct timespec *interval);
-asmlinkage int
+asmlinkage long
sys32_sched_rr_get_interval(__kernel_pid_t32 pid, struct timespec32 *interval)
{
struct timespec t;
@@ -2862,10 +2938,10 @@ sys32_sched_rr_get_interval(__kernel_pid_t32 pid, struct timespec32 *interval)
return ret;
}
-extern asmlinkage int sys_sigprocmask(int how, old_sigset_t *set,
+extern asmlinkage long sys_sigprocmask(int how, old_sigset_t *set,
old_sigset_t *oset);
-asmlinkage int
+asmlinkage long
sys32_sigprocmask(int how, old_sigset_t32 *set, old_sigset_t32 *oset)
{
old_sigset_t s;
@@ -2881,9 +2957,9 @@ sys32_sigprocmask(int how, old_sigset_t32 *set, old_sigset_t32 *oset)
return 0;
}
-extern asmlinkage int sys_sigpending(old_sigset_t *set);
+extern asmlinkage long sys_sigpending(old_sigset_t *set);
-asmlinkage int
+asmlinkage long
sys32_sigpending(old_sigset_t32 *set)
{
old_sigset_t s;
@@ -2897,9 +2973,9 @@ sys32_sigpending(old_sigset_t32 *set)
return ret;
}
-extern asmlinkage int sys_rt_sigpending(sigset_t *set, size_t sigsetsize);
+extern asmlinkage long sys_rt_sigpending(sigset_t *set, size_t sigsetsize);
-asmlinkage int
+asmlinkage long
sys32_rt_sigpending(sigset_t32 *set, __kernel_size_t32 sigsetsize)
{
sigset_t s;
@@ -3002,11 +3078,11 @@ siginfo32to64(siginfo_t *d, siginfo_t32 *s)
return d;
}
-extern asmlinkage int
+extern asmlinkage long
sys_rt_sigtimedwait(const sigset_t *uthese, siginfo_t *uinfo,
const struct timespec *uts, size_t sigsetsize);
-asmlinkage int
+asmlinkage long
sys32_rt_sigtimedwait(sigset_t32 *uthese, siginfo_t32 *uinfo,
struct timespec32 *uts, __kernel_size_t32 sigsetsize)
{
@@ -3043,10 +3119,10 @@ sys32_rt_sigtimedwait(sigset_t32 *uthese, siginfo_t32 *uinfo,
return ret;
}
-extern asmlinkage int
+extern asmlinkage long
sys_rt_sigqueueinfo(int pid, int sig, siginfo_t *uinfo);
-asmlinkage int
+asmlinkage long
sys32_rt_sigqueueinfo(int pid, int sig, siginfo_t32 *uinfo)
{
siginfo_t info;
@@ -3064,9 +3140,9 @@ sys32_rt_sigqueueinfo(int pid, int sig, siginfo_t32 *uinfo)
return ret;
}
-extern asmlinkage int sys_setreuid(uid_t ruid, uid_t euid);
+extern asmlinkage long sys_setreuid(uid_t ruid, uid_t euid);
-asmlinkage int sys32_setreuid(__kernel_uid_t32 ruid, __kernel_uid_t32 euid)
+asmlinkage long sys32_setreuid(__kernel_uid_t32 ruid, __kernel_uid_t32 euid)
{
uid_t sruid, seuid;
@@ -3075,9 +3151,9 @@ asmlinkage int sys32_setreuid(__kernel_uid_t32 ruid, __kernel_uid_t32 euid)
return sys_setreuid(sruid, seuid);
}
-extern asmlinkage int sys_setresuid(uid_t ruid, uid_t euid, uid_t suid);
+extern asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid);
-asmlinkage int
+asmlinkage long
sys32_setresuid(__kernel_uid_t32 ruid, __kernel_uid_t32 euid,
__kernel_uid_t32 suid)
{
@@ -3089,9 +3165,9 @@ sys32_setresuid(__kernel_uid_t32 ruid, __kernel_uid_t32 euid,
return sys_setresuid(sruid, seuid, ssuid);
}
-extern asmlinkage int sys_getresuid(uid_t *ruid, uid_t *euid, uid_t *suid);
+extern asmlinkage long sys_getresuid(uid_t *ruid, uid_t *euid, uid_t *suid);
-asmlinkage int
+asmlinkage long
sys32_getresuid(__kernel_uid_t32 *ruid, __kernel_uid_t32 *euid,
__kernel_uid_t32 *suid)
{
@@ -3107,9 +3183,9 @@ sys32_getresuid(__kernel_uid_t32 *ruid, __kernel_uid_t32 *euid,
return ret;
}
-extern asmlinkage int sys_setregid(gid_t rgid, gid_t egid);
+extern asmlinkage long sys_setregid(gid_t rgid, gid_t egid);
-asmlinkage int
+asmlinkage long
sys32_setregid(__kernel_gid_t32 rgid, __kernel_gid_t32 egid)
{
gid_t srgid, segid;
@@ -3119,9 +3195,9 @@ sys32_setregid(__kernel_gid_t32 rgid, __kernel_gid_t32 egid)
return sys_setregid(srgid, segid);
}
-extern asmlinkage int sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid);
+extern asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid);
-asmlinkage int
+asmlinkage long
sys32_setresgid(__kernel_gid_t32 rgid, __kernel_gid_t32 egid,
__kernel_gid_t32 sgid)
{
@@ -3133,9 +3209,9 @@ sys32_setresgid(__kernel_gid_t32 rgid, __kernel_gid_t32 egid,
return sys_setresgid(srgid, segid, ssgid);
}
-extern asmlinkage int sys_getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid);
+extern asmlinkage long sys_getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid);
-asmlinkage int
+asmlinkage long
sys32_getresgid(__kernel_gid_t32 *rgid, __kernel_gid_t32 *egid,
__kernel_gid_t32 *sgid)
{
@@ -3154,9 +3230,9 @@ sys32_getresgid(__kernel_gid_t32 *rgid, __kernel_gid_t32 *egid,
return ret;
}
-extern asmlinkage int sys_getgroups(int gidsetsize, gid_t *grouplist);
+extern asmlinkage long sys_getgroups(int gidsetsize, gid_t *grouplist);
-asmlinkage int
+asmlinkage long
sys32_getgroups(int gidsetsize, __kernel_gid_t32 *grouplist)
{
gid_t gl[NGROUPS];
@@ -3173,9 +3249,9 @@ sys32_getgroups(int gidsetsize, __kernel_gid_t32 *grouplist)
return ret;
}
-extern asmlinkage int sys_setgroups(int gidsetsize, gid_t *grouplist);
+extern asmlinkage long sys_setgroups(int gidsetsize, gid_t *grouplist);
-asmlinkage int
+asmlinkage long
sys32_setgroups(int gidsetsize, __kernel_gid_t32 *grouplist)
{
gid_t gl[NGROUPS];
@@ -3619,7 +3695,7 @@ fail:
kmsg->msg_control = (void *) orig_cmsg_uptr;
}
-asmlinkage int
+asmlinkage long
sys32_sendmsg(int fd, struct msghdr32 *user_msg, unsigned user_flags)
{
struct socket *sock;
@@ -3667,7 +3743,7 @@ out:
return err;
}
-asmlinkage int
+asmlinkage long
sys32_recvmsg(int fd, struct msghdr32 *user_msg, unsigned int user_flags)
{
struct iovec iovstack[UIO_FASTIOV];
@@ -3758,7 +3834,7 @@ out:
extern void check_pending(int signum);
-asmlinkage int
+asmlinkage long
sys32_sigaction (int sig, struct old_sigaction32 *act,
struct old_sigaction32 *oact)
{
@@ -3803,21 +3879,21 @@ sys32_create_module(const char *name_user, __kernel_size_t32 size)
return sys_create_module(name_user, (size_t)size);
}
-extern asmlinkage int sys_init_module(const char *name_user,
+extern asmlinkage long sys_init_module(const char *name_user,
struct module *mod_user);
/* Hey, when you're trying to init module, take time and prepare us a nice 64bit
* module structure, even if from 32bit modutils... Why to pollute kernel... :))
*/
-asmlinkage int
+asmlinkage long
sys32_init_module(const char *name_user, struct module *mod_user)
{
return sys_init_module(name_user, mod_user);
}
-extern asmlinkage int sys_delete_module(const char *name_user);
+extern asmlinkage long sys_delete_module(const char *name_user);
-asmlinkage int
+asmlinkage long
sys32_delete_module(const char *name_user)
{
return sys_delete_module(name_user);
@@ -4092,7 +4168,7 @@ qm_info(struct module *mod, char *buf, size_t bufsize, __kernel_size_t32 *ret)
return error;
}
-asmlinkage int
+asmlinkage long
sys32_query_module(char *name_user, int which, char *buf,
__kernel_size_t32 bufsize, u32 ret)
{
@@ -4160,9 +4236,9 @@ struct kernel_sym32 {
char name[60];
};
-extern asmlinkage int sys_get_kernel_syms(struct kernel_sym *table);
+extern asmlinkage long sys_get_kernel_syms(struct kernel_sym *table);
-asmlinkage int
+asmlinkage long
sys32_get_kernel_syms(struct kernel_sym32 *table)
{
int len, i;
@@ -4194,19 +4270,19 @@ sys32_create_module(const char *name_user, size_t size)
return -ENOSYS;
}
-asmlinkage int
+asmlinkage long
sys32_init_module(const char *name_user, struct module *mod_user)
{
return -ENOSYS;
}
-asmlinkage int
+asmlinkage long
sys32_delete_module(const char *name_user)
{
return -ENOSYS;
}
-asmlinkage int
+asmlinkage long
sys32_query_module(const char *name_user, int which, char *buf, size_t bufsize,
size_t *ret)
{
@@ -4218,7 +4294,7 @@ sys32_query_module(const char *name_user, int which, char *buf, size_t bufsize,
return -ENOSYS;
}
-asmlinkage int
+asmlinkage long
sys32_get_kernel_syms(struct kernel_sym *table)
{
return -ENOSYS;
@@ -4434,7 +4510,7 @@ nfs_getfh32_res_trans(union nfsctl_res *kres, union nfsctl_res32 *res32)
return err;
}
-extern asmlinkage int sys_nfsservctl(int cmd, void *arg, void *resp);
+extern asmlinkage long sys_nfsservctl(int cmd, void *arg, void *resp);
int asmlinkage
sys32_nfsservctl(int cmd, struct nfsctl_arg32 *arg32, union nfsctl_res32 *res32)
@@ -4505,9 +4581,9 @@ done:
return err;
}
-asmlinkage int sys_utimes(char *, struct timeval *);
+asmlinkage long sys_utimes(char *, struct timeval *);
-asmlinkage int
+asmlinkage long
sys32_utimes(char *filename, struct timeval32 *tvs)
{
char *kfilename;
@@ -4535,7 +4611,7 @@ sys32_utimes(char *filename, struct timeval32 *tvs)
}
/* These are here just in case some old ia32 binary calls it. */
-asmlinkage int
+asmlinkage long
sys32_pause(void)
{
current->state = TASK_INTERRUPTIBLE;
@@ -4544,19 +4620,19 @@ sys32_pause(void)
}
/* PCI config space poking. */
-extern asmlinkage int sys_pciconfig_read(unsigned long bus,
+extern asmlinkage long sys_pciconfig_read(unsigned long bus,
unsigned long dfn,
unsigned long off,
unsigned long len,
unsigned char *buf);
-extern asmlinkage int sys_pciconfig_write(unsigned long bus,
+extern asmlinkage long sys_pciconfig_write(unsigned long bus,
unsigned long dfn,
unsigned long off,
unsigned long len,
unsigned char *buf);
-asmlinkage int
+asmlinkage long
sys32_pciconfig_read(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf)
{
return sys_pciconfig_read((unsigned long) bus,
@@ -4566,7 +4642,7 @@ sys32_pciconfig_read(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf)
(unsigned char *)AA(ubuf));
}
-asmlinkage int
+asmlinkage long
sys32_pciconfig_write(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf)
{
return sys_pciconfig_write((unsigned long) bus,
@@ -4576,11 +4652,11 @@ sys32_pciconfig_write(u32 bus, u32 dfn, u32 off, u32 len, u32 ubuf)
(unsigned char *)AA(ubuf));
}
-extern asmlinkage int sys_prctl(int option, unsigned long arg2,
+extern asmlinkage long sys_prctl(int option, unsigned long arg2,
unsigned long arg3, unsigned long arg4,
unsigned long arg5);
-asmlinkage int
+asmlinkage long
sys32_prctl(int option, u32 arg2, u32 arg3, u32 arg4, u32 arg5)
{
return sys_prctl(option,
@@ -4591,9 +4667,9 @@ sys32_prctl(int option, u32 arg2, u32 arg3, u32 arg4, u32 arg5)
}
-extern asmlinkage int sys_newuname(struct new_utsname * name);
+extern asmlinkage long sys_newuname(struct new_utsname * name);
-asmlinkage int
+asmlinkage long
sys32_newuname(struct new_utsname * name)
{
int ret = sys_newuname(name);
@@ -4629,9 +4705,9 @@ sys32_pwrite(unsigned int fd, char *ubuf, __kernel_size_t32 count,
}
-extern asmlinkage int sys_personality(unsigned long);
+extern asmlinkage long sys_personality(unsigned long);
-asmlinkage int
+asmlinkage long
sys32_personality(unsigned long personality)
{
int ret;
@@ -4648,7 +4724,7 @@ sys32_personality(unsigned long personality)
extern asmlinkage ssize_t sys_sendfile(int out_fd, int in_fd, off_t *offset,
size_t count);
-asmlinkage int
+asmlinkage long
sys32_sendfile(int out_fd, int in_fd, __kernel_off_t32 *offset, s32 count)
{
mm_segment_t old_fs = get_fs();
@@ -4685,7 +4761,7 @@ struct timex32 {
extern int do_adjtimex(struct timex *);
-asmlinkage int
+asmlinkage long
sys32_adjtimex(struct timex32 *utp)
{
struct timex txc;
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 225cbec5d..3fb62560d 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -1,11 +1,6 @@
#
# Makefile for the linux kernel.
#
-# Note! Dependencies are done automagically by 'make dep', which also
-# removes any old dependencies. DON'T put your own dependencies here
-# unless it's something special (ie not a .c file).
-#
-# Note 2! The CFLAGS definitions are now in the main makefile...
.S.s:
$(CPP) $(AFLAGS) -o $*.s $<
@@ -15,16 +10,19 @@
all: kernel.o head.o init_task.o
O_TARGET := kernel.o
-O_OBJS := acpi.o entry.o gate.o efi.o efi_stub.o irq.o irq_ia64.o irq_sapic.o ivt.o \
- pal.o pci-dma.o process.o perfmon.o ptrace.o sal.o sal_stub.o semaphore.o setup.o \
+O_OBJS := acpi.o entry.o gate.o efi.o efi_stub.o irq.o irq_ia64.o irq_sapic.o ivt.o \
+ pal.o pci-dma.o process.o perfmon.o ptrace.o sal.o semaphore.o setup.o \
signal.o sys_ia64.o traps.o time.o unaligned.o unwind.o
-#O_OBJS := fpreg.o
-#OX_OBJS := ia64_ksyms.o
+OX_OBJS := ia64_ksyms.o
ifdef CONFIG_IA64_GENERIC
O_OBJS += machvec.o
endif
+ifdef CONFIG_IA64_PALINFO
+O_OBJS += palinfo.o
+endif
+
ifdef CONFIG_PCI
O_OBJS += pci.o
endif
@@ -37,6 +35,10 @@ ifdef CONFIG_IA64_MCA
O_OBJS += mca.o mca_asm.o
endif
+ifdef CONFIG_IA64_BRL_EMU
+O_OBJS += brl_emu.o
+endif
+
clean::
include $(TOPDIR)/Rules.make
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 72e10a683..20521da36 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -89,16 +89,16 @@ acpi_iosapic(char *p)
#ifdef CONFIG_IA64_DIG
acpi_entry_iosapic_t *iosapic = (acpi_entry_iosapic_t *) p;
unsigned int ver, v;
- int l, pins;
+ int l, max_pin;
ver = iosapic_version(iosapic->address);
- pins = (ver >> 16) & 0xff;
+ max_pin = (ver >> 16) & 0xff;
printk("IOSAPIC Version %x.%x: address 0x%lx IRQs 0x%x - 0x%x\n",
(ver & 0xf0) >> 4, (ver & 0x0f), iosapic->address,
- iosapic->irq_base, iosapic->irq_base + pins);
+ iosapic->irq_base, iosapic->irq_base + max_pin);
- for (l = 0; l < pins; l++) {
+ for (l = 0; l <= max_pin; l++) {
v = iosapic->irq_base + l;
if (v < 16)
v = isa_irq_to_vector(v);
@@ -110,7 +110,7 @@ acpi_iosapic(char *p)
iosapic_addr(v) = (unsigned long) ioremap(iosapic->address, 0);
iosapic_baseirq(v) = iosapic->irq_base;
}
- iosapic_init(iosapic->address);
+ iosapic_init(iosapic->address, iosapic->irq_base);
#endif
}
diff --git a/arch/ia64/kernel/brl_emu.c b/arch/ia64/kernel/brl_emu.c
new file mode 100644
index 000000000..8948b7bb2
--- /dev/null
+++ b/arch/ia64/kernel/brl_emu.c
@@ -0,0 +1,220 @@
+/*
+ * Emulation of the "brl" instruction for IA64 processors that
+ * don't support it in hardware.
+ * Author: Stephan Zeisset, Intel Corp. <Stephan.Zeisset@intel.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <asm/uaccess.h>
+#include <asm/processor.h>
+
+extern char ia64_set_b1, ia64_set_b2, ia64_set_b3, ia64_set_b4, ia64_set_b5;
+
+struct illegal_op_return {
+ unsigned long fkt, arg1, arg2, arg3;
+};
+
+/*
+ * The unimplemented bits of a virtual address must be set
+ * to the value of the most significant implemented bit.
+ * unimpl_va_mask includes all unimplemented bits and
+ * the most significant implemented bit, so the result
+ * of an and operation with the mask must be all 0's
+ * or all 1's for the address to be valid.
+ */
+#define unimplemented_virtual_address(va) ( \
+ ((va) & my_cpu_data.unimpl_va_mask) != 0 && \
+ ((va) & my_cpu_data.unimpl_va_mask) != my_cpu_data.unimpl_va_mask \
+)
+
+/*
+ * The unimplemented bits of a physical address must be 0.
+ * unimpl_pa_mask includes all unimplemented bits, so the result
+ * of an and operation with the mask must be all 0's for the
+ * address to be valid.
+ */
+#define unimplemented_physical_address(pa) ( \
+ ((pa) & my_cpu_data.unimpl_pa_mask) != 0 \
+)
+
+/*
+ * Handle an illegal operation fault that was caused by an
+ * unimplemented "brl" instruction.
+ * If we are not successful (e.g because the illegal operation
+ * wasn't caused by a "brl" after all), we return -1.
+ * If we are successful, we return either 0 or the address
+ * of a "fixup" function for manipulating preserved register
+ * state.
+ */
+
+struct illegal_op_return
+ia64_emulate_brl (struct pt_regs *regs, unsigned long ar_ec)
+{
+ unsigned long bundle[2];
+ unsigned long opcode, btype, qp, offset;
+ unsigned long next_ip;
+ struct siginfo siginfo;
+ struct illegal_op_return rv;
+ int tmp_taken, unimplemented_address;
+
+ rv.fkt = (unsigned long) -1;
+
+ /*
+ * Decode the instruction bundle.
+ */
+
+ if (copy_from_user(bundle, (void *) (regs->cr_iip), sizeof(bundle)))
+ return rv;
+
+ next_ip = (unsigned long) regs->cr_iip + 16;
+
+ /* "brl" must be in slot 2. */
+ if (ia64_psr(regs)->ri != 1) return rv;
+
+ /* Must be "mlx" template */
+ if ((bundle[0] & 0x1e) != 0x4) return rv;
+
+ opcode = (bundle[1] >> 60);
+ btype = ((bundle[1] >> 29) & 0x7);
+ qp = ((bundle[1] >> 23) & 0x3f);
+ offset = ((bundle[1] & 0x0800000000000000L) << 4)
+ | ((bundle[1] & 0x00fffff000000000L) >> 32)
+ | ((bundle[1] & 0x00000000007fffffL) << 40)
+ | ((bundle[0] & 0xffff000000000000L) >> 24);
+
+ tmp_taken = regs->pr & (1L << qp);
+
+ switch(opcode) {
+
+ case 0xC:
+ /*
+ * Long Branch.
+ */
+ if (btype != 0) return rv;
+ rv.fkt = 0;
+ if (!(tmp_taken)) {
+ /*
+ * Qualifying predicate is 0.
+ * Skip instruction.
+ */
+ regs->cr_iip = next_ip;
+ ia64_psr(regs)->ri = 0;
+ return rv;
+ }
+ break;
+
+ case 0xD:
+ /*
+ * Long Call.
+ */
+ rv.fkt = 0;
+ if (!(tmp_taken)) {
+ /*
+ * Qualifying predicate is 0.
+ * Skip instruction.
+ */
+ regs->cr_iip = next_ip;
+ ia64_psr(regs)->ri = 0;
+ return rv;
+ }
+
+ /*
+ * BR[btype] = IP+16
+ */
+ switch(btype) {
+ case 0:
+ regs->b0 = next_ip;
+ break;
+ case 1:
+ rv.fkt = (unsigned long) &ia64_set_b1;
+ break;
+ case 2:
+ rv.fkt = (unsigned long) &ia64_set_b2;
+ break;
+ case 3:
+ rv.fkt = (unsigned long) &ia64_set_b3;
+ break;
+ case 4:
+ rv.fkt = (unsigned long) &ia64_set_b4;
+ break;
+ case 5:
+ rv.fkt = (unsigned long) &ia64_set_b5;
+ break;
+ case 6:
+ regs->b6 = next_ip;
+ break;
+ case 7:
+ regs->b7 = next_ip;
+ break;
+ }
+ rv.arg1 = next_ip;
+
+ /*
+ * AR[PFS].pfm = CFM
+ * AR[PFS].pec = AR[EC]
+ * AR[PFS].ppl = PSR.cpl
+ */
+ regs->ar_pfs = ((regs->cr_ifs & 0x3fffffffff)
+ | (ar_ec << 52)
+ | ((unsigned long) ia64_psr(regs)->cpl << 62));
+
+ /*
+ * CFM.sof -= CFM.sol
+ * CFM.sol = 0
+ * CFM.sor = 0
+ * CFM.rrb.gr = 0
+ * CFM.rrb.fr = 0
+ * CFM.rrb.pr = 0
+ */
+ regs->cr_ifs = ((regs->cr_ifs & 0xffffffc00000007f)
+ - ((regs->cr_ifs >> 7) & 0x7f));
+
+ break;
+
+ default:
+ /*
+ * Unknown opcode.
+ */
+ return rv;
+
+ }
+
+ regs->cr_iip += offset;
+ ia64_psr(regs)->ri = 0;
+
+ if (ia64_psr(regs)->it == 0)
+ unimplemented_address = unimplemented_physical_address(regs->cr_iip);
+ else
+ unimplemented_address = unimplemented_virtual_address(regs->cr_iip);
+
+ if (unimplemented_address) {
+ /*
+ * The target address contains unimplemented bits.
+ */
+ printk("Woah! Unimplemented Instruction Address Trap!\n");
+ siginfo.si_signo = SIGILL;
+ siginfo.si_errno = 0;
+ siginfo.si_code = ILL_BADIADDR;
+ force_sig_info(SIGILL, &siginfo, current);
+ } else if (ia64_psr(regs)->tb) {
+ /*
+ * Branch Tracing is enabled.
+ * Force a taken branch signal.
+ */
+ siginfo.si_signo = SIGTRAP;
+ siginfo.si_errno = 0;
+ siginfo.si_code = TRAP_BRANCH;
+ force_sig_info(SIGTRAP, &siginfo, current);
+ } else if (ia64_psr(regs)->ss) {
+ /*
+ * Single Step is enabled.
+ * Force a trace signal.
+ */
+ siginfo.si_signo = SIGTRAP;
+ siginfo.si_errno = 0;
+ siginfo.si_code = TRAP_TRACE;
+ force_sig_info(SIGTRAP, &siginfo, current);
+ }
+ return rv;
+}
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 0ce1db504..c4383b97f 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -5,15 +5,18 @@
*
* Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
- * Copyright (C) 1999 Hewlett-Packard Co.
+ * Copyright (C) 1999-2000 Hewlett-Packard Co.
* Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
- * Copyright (C) 1999 Stephane Eranian <eranian@hpl.hp.com>
+ * Copyright (C) 1999-2000 Stephane Eranian <eranian@hpl.hp.com>
*
* All EFI Runtime Services are not implemented yet as EFI only
* supports physical mode addressing on SoftSDV. This is to be fixed
* in a future version. --drummond 1999-07-20
*
* Implemented EFI runtime services and virtual mode calls. --davidm
+ *
+ * Goutham Rao: <goutham.rao@intel.com>
+ * Skip non-WB memory and ignore empty memory ranges.
*/
#include <linux/kernel.h>
#include <linux/init.h>
@@ -22,6 +25,7 @@
#include <asm/efi.h>
#include <asm/io.h>
+#include <asm/pgtable.h>
#include <asm/processor.h>
#define EFI_DEBUG 0
@@ -172,6 +176,14 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
continue;
}
+ if (!(md->attribute & EFI_MEMORY_WB))
+ continue;
+ if (md->num_pages == 0) {
+ printk("efi_memmap_walk: ignoring empty region at 0x%lx",
+ md->phys_addr);
+ continue;
+ }
+
curr.start = PAGE_OFFSET + md->phys_addr;
curr.end = curr.start + (md->num_pages << 12);
@@ -207,6 +219,61 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
}
}
+/*
+ * Look for the PAL_CODE region reported by EFI and maps it using an
+ * ITR to enable safe PAL calls in virtual mode. See IA-64 Processor
+ * Abstraction Layer chapter 11 in ADAG
+ */
+static void
+map_pal_code (void)
+{
+ void *efi_map_start, *efi_map_end, *p;
+ efi_memory_desc_t *md;
+ u64 efi_desc_size;
+ int pal_code_count=0;
+ u64 mask, flags;
+ u64 vaddr;
+
+ efi_map_start = __va(ia64_boot_param.efi_memmap);
+ efi_map_end = efi_map_start + ia64_boot_param.efi_memmap_size;
+ efi_desc_size = ia64_boot_param.efi_memdesc_size;
+
+ for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
+ md = p;
+ if (md->type != EFI_PAL_CODE) continue;
+
+ if (++pal_code_count > 1) {
+ printk(KERN_ERR "Too many EFI Pal Code memory ranges, dropped @ %lx\n",
+ md->phys_addr);
+ continue;
+ }
+ mask = ~((1 << _PAGE_SIZE_4M)-1); /* XXX should be dynamic? */
+ vaddr = PAGE_OFFSET + md->phys_addr;
+
+ printk(__FUNCTION__": mapping PAL code [0x%lx-0x%lx) into [0x%lx-0x%lx)\n",
+ md->phys_addr, md->phys_addr + (md->num_pages << 12),
+ vaddr & mask, (vaddr & mask) + 4*1024*1024);
+
+ /*
+ * Cannot write to CRx with PSR.ic=1
+ */
+ ia64_clear_ic(flags);
+
+ /*
+ * ITR0/DTR0: used for kernel code/data
+ * ITR1/DTR1: used by HP simulator
+ * ITR2/DTR2: map PAL code
+ * ITR3/DTR3: used to map PAL calls buffer
+ */
+ ia64_itr(0x1, 2, vaddr & mask,
+ pte_val(mk_pte_phys(md->phys_addr,
+ __pgprot(__DIRTY_BITS|_PAGE_PL_0|_PAGE_AR_RX))),
+ _PAGE_SIZE_4M);
+ local_irq_restore(flags);
+ ia64_srlz_i ();
+ }
+}
+
void __init
efi_init (void)
{
@@ -291,6 +358,8 @@ efi_init (void)
}
}
#endif
+
+ map_pal_code();
}
void
diff --git a/arch/ia64/kernel/efi_stub.S b/arch/ia64/kernel/efi_stub.S
index 4e6f1fc63..2bb45c790 100644
--- a/arch/ia64/kernel/efi_stub.S
+++ b/arch/ia64/kernel/efi_stub.S
@@ -1,7 +1,8 @@
/*
* EFI call stub.
*
- * Copyright (C) 1999 David Mosberger <davidm@hpl.hp.com>
+ * Copyright (C) 1999-2000 Hewlett-Packard Co
+ * Copyright (C) 1999-2000 David Mosberger <davidm@hpl.hp.com>
*
* This stub allows us to make EFI calls in physical mode with interrupts
* turned off. We need this because we can't call SetVirtualMap() until
@@ -30,6 +31,7 @@
(IA64_PSR_BN)
#include <asm/processor.h>
+#include <asm/asmmacro.h>
.text
.psr abi64
@@ -39,53 +41,6 @@
.text
/*
- * Switch execution mode from virtual to physical or vice versa.
- *
- * Inputs:
- * r16 = new psr to establish
- */
- .proc switch_mode
-switch_mode:
- {
- alloc r2=ar.pfs,0,0,0,0
- rsm psr.i | psr.ic // disable interrupts and interrupt collection
- mov r15=ip
- }
- ;;
- {
- flushrs // must be first insn in group
- srlz.i
- shr.u r19=r15,61 // r19 <- top 3 bits of current IP
- }
- ;;
- mov cr.ipsr=r16 // set new PSR
- add r3=1f-switch_mode,r15
- xor r15=0x7,r19 // flip the region bits
-
- mov r17=ar.bsp
- mov r14=rp // get return address into a general register
-
- // switch RSE backing store:
- ;;
- dep r17=r15,r17,61,3 // make ar.bsp physical or virtual
- mov r18=ar.rnat // save ar.rnat
- ;;
- mov ar.bspstore=r17 // this steps on ar.rnat
- dep r3=r15,r3,61,3 // make rfi return address physical or virtual
- ;;
- mov cr.iip=r3
- mov cr.ifs=r0
- dep sp=r15,sp,61,3 // make stack pointer physical or virtual
- ;;
- mov ar.rnat=r18 // restore ar.rnat
- dep r14=r15,r14,61,3 // make function return address physical or virtual
- rfi // must be last insn in group
- ;;
-1: mov rp=r14
- br.ret.sptk.few rp
- .endp switch_mode
-
-/*
* Inputs:
* in0 = address of function descriptor of EFI routine to call
* in1..in7 = arguments to routine
@@ -94,13 +49,12 @@ switch_mode:
* r8 = EFI_STATUS returned by called function
*/
- .global efi_call_phys
- .proc efi_call_phys
-efi_call_phys:
-
- alloc loc0=ar.pfs,8,5,7,0
+GLOBAL_ENTRY(efi_call_phys)
+ UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8))
+ alloc loc1=ar.pfs,8,5,7,0
ld8 r2=[in0],8 // load EFI function's entry point
- mov loc1=rp
+ mov loc0=rp
+ UNW(.body)
;;
mov loc2=gp // save global pointer
mov loc4=ar.rsc // save RSE configuration
@@ -121,7 +75,7 @@ efi_call_phys:
;;
andcm r16=loc3,r16 // get psr with IT, DT, and RT bits cleared
mov out3=in4
- br.call.sptk.few rp=switch_mode
+ br.call.sptk.few rp=ia64_switch_mode
.ret0:
mov out4=in5
mov out5=in6
@@ -130,12 +84,11 @@ efi_call_phys:
.ret1:
mov ar.rsc=r0 // put RSE in enforced lazy, LE mode
mov r16=loc3
- br.call.sptk.few rp=switch_mode // return to virtual mode
+ br.call.sptk.few rp=ia64_switch_mode // return to virtual mode
.ret2:
mov ar.rsc=loc4 // restore RSE configuration
- mov ar.pfs=loc0
- mov rp=loc1
+ mov ar.pfs=loc1
+ mov rp=loc0
mov gp=loc2
br.ret.sptk.few rp
-
- .endp efi_call_phys
+END(efi_call_phys)
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 755e3a0c1..e56e3fc8e 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -13,8 +13,6 @@
/*
* Global (preserved) predicate usage on syscall entry/exit path:
*
- *
- * pEOI: See entry.h.
* pKern: See entry.h.
* pSys: See entry.h.
* pNonSys: !pSys
@@ -30,6 +28,7 @@
#include <asm/offsets.h>
#include <asm/processor.h>
#include <asm/unistd.h>
+#include <asm/asmmacro.h>
#include "entry.h"
@@ -42,11 +41,11 @@
* execve() is special because in case of success, we need to
* setup a null register window frame.
*/
- .align 16
- .proc ia64_execve
-ia64_execve:
- alloc loc0=ar.pfs,3,2,4,0
- mov loc1=rp
+ENTRY(ia64_execve)
+ UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(3))
+ alloc loc1=ar.pfs,3,2,4,0
+ mov loc0=rp
+ UNW(.body)
mov out0=in0 // filename
;; // stop bit between alloc and call
mov out1=in1 // argv
@@ -54,25 +53,22 @@ ia64_execve:
add out3=16,sp // regs
br.call.sptk.few rp=sys_execve
.ret0: cmp4.ge p6,p0=r8,r0
- mov ar.pfs=loc0 // restore ar.pfs
+ mov ar.pfs=loc1 // restore ar.pfs
;;
(p6) mov ar.pfs=r0 // clear ar.pfs in case of success
sxt4 r8=r8 // return 64-bit result
- mov rp=loc1
+ mov rp=loc0
br.ret.sptk.few rp
- .endp ia64_execve
+END(ia64_execve)
- .align 16
- .global sys_clone
- .proc sys_clone
-sys_clone:
+GLOBAL_ENTRY(sys_clone)
+ UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2))
alloc r16=ar.pfs,2,2,3,0;;
- movl r28=1f
- mov loc1=rp
- br.cond.sptk.many save_switch_stack
-1:
- mov loc0=r16 // save ar.pfs across do_fork
+ mov loc0=rp
+ DO_SAVE_SWITCH_STACK
+ mov loc1=r16 // save ar.pfs across do_fork
+ UNW(.body)
adds out2=IA64_SWITCH_STACK_SIZE+16,sp
adds r2=IA64_SWITCH_STACK_SIZE+IA64_PT_REGS_R12_OFFSET+16,sp
cmp.eq p8,p9=in1,r0 // usp == 0?
@@ -82,24 +78,22 @@ sys_clone:
(p9) mov out1=in1
br.call.sptk.few rp=do_fork
.ret1:
- mov ar.pfs=loc0
+ mov ar.pfs=loc1
+ UNW(.restore sp)
adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
- mov rp=loc1
+ mov rp=loc0
;;
br.ret.sptk.many rp
- .endp sys_clone
+END(sys_clone)
/*
- * prev_task <- switch_to(struct task_struct *next)
+ * prev_task <- ia64_switch_to(struct task_struct *next)
*/
- .align 16
- .global ia64_switch_to
- .proc ia64_switch_to
-ia64_switch_to:
+GLOBAL_ENTRY(ia64_switch_to)
+ UNW(.prologue)
alloc r16=ar.pfs,1,0,0,0
- movl r28=1f
- br.cond.sptk.many save_switch_stack
-1:
+ DO_SAVE_SWITCH_STACK
+ UNW(.body)
// disable interrupts to ensure atomicity for next few instructions:
mov r17=psr // M-unit
;;
@@ -123,66 +117,60 @@ ia64_switch_to:
mov psr.l=r17
;;
srlz.d
-
- movl r28=1f
- br.cond.sptk.many load_switch_stack
-1:
+ DO_LOAD_SWITCH_STACK( )
br.ret.sptk.few rp
- .endp ia64_switch_to
+END(ia64_switch_to)
+#ifndef CONFIG_IA64_NEW_UNWIND
/*
* Like save_switch_stack, but also save the stack frame that is active
* at the time this function is called.
*/
- .align 16
- .proc save_switch_stack_with_current_frame
-save_switch_stack_with_current_frame:
-1: {
- alloc r16=ar.pfs,0,0,0,0 // pass ar.pfs to save_switch_stack
- mov r28=ip
- }
- ;;
- adds r28=1f-1b,r28
- br.cond.sptk.many save_switch_stack
-1: br.ret.sptk.few rp
- .endp save_switch_stack_with_current_frame
+ENTRY(save_switch_stack_with_current_frame)
+ UNW(.prologue)
+ alloc r16=ar.pfs,0,0,0,0 // pass ar.pfs to save_switch_stack
+ DO_SAVE_SWITCH_STACK
+ br.ret.sptk.few rp
+END(save_switch_stack_with_current_frame)
+#endif /* !CONFIG_IA64_NEW_UNWIND */
+
/*
* Note that interrupts are enabled during save_switch_stack and
* load_switch_stack. This means that we may get an interrupt with
* "sp" pointing to the new kernel stack while ar.bspstore is still
* pointing to the old kernel backing store area. Since ar.rsc,
* ar.rnat, ar.bsp, and ar.bspstore are all preserved by interrupts,
- * this is not a problem.
+ * this is not a problem. Also, we don't need to specify unwind
+ * information for preserved registers that are not modified in
+ * save_switch_stack as the right unwind information is already
+ * specified at the call-site of save_switch_stack.
*/
/*
* save_switch_stack:
* - r16 holds ar.pfs
- * - r28 holds address to return to
+ * - b7 holds address to return to
* - rp (b0) holds return address to save
*/
- .align 16
- .global save_switch_stack
- .proc save_switch_stack
-save_switch_stack:
+GLOBAL_ENTRY(save_switch_stack)
+ UNW(.prologue)
+ UNW(.altrp b7)
flushrs // flush dirty regs to backing store (must be first in insn group)
mov r17=ar.unat // preserve caller's
- adds r2=-IA64_SWITCH_STACK_SIZE+16,sp // r2 = &sw->caller_unat
+ adds r2=16,sp // r2 = &sw->caller_unat
;;
mov r18=ar.fpsr // preserve fpsr
mov ar.rsc=r0 // put RSE in mode: enforced lazy, little endian, pl 0
;;
mov r19=ar.rnat
- adds r3=-IA64_SWITCH_STACK_SIZE+24,sp // r3 = &sw->ar_fpsr
-
- // Note: the instruction ordering is important here: we can't
- // store anything to the switch stack before sp is updated
- // as otherwise an interrupt might overwrite the memory!
- adds sp=-IA64_SWITCH_STACK_SIZE,sp
+ adds r3=24,sp // r3 = &sw->ar_fpsr
;;
+ .savesp ar.unat,SW(CALLER_UNAT)
st8 [r2]=r17,16
+ .savesp ar.fpsr,SW(AR_FPSR)
st8 [r3]=r18,24
;;
+ UNW(.body)
stf.spill [r2]=f2,32
stf.spill [r3]=f3,32
mov r21=b0
@@ -259,16 +247,17 @@ save_switch_stack:
st8 [r3]=r21 // save predicate registers
mov ar.rsc=3 // put RSE back into eager mode, pl 0
br.cond.sptk.few b7
- .endp save_switch_stack
+END(save_switch_stack)
/*
* load_switch_stack:
- * - r28 holds address to return to
+ * - b7 holds address to return to
*/
- .align 16
- .proc load_switch_stack
-load_switch_stack:
+ENTRY(load_switch_stack)
+ UNW(.prologue)
+ UNW(.altrp b7)
invala // invalidate ALAT
+ UNW(.body)
adds r2=IA64_SWITCH_STACK_B0_OFFSET+16,sp // get pointer to switch_stack.b0
mov ar.rsc=r0 // put RSE into enforced lazy mode
adds r3=IA64_SWITCH_STACK_B0_OFFSET+24,sp // get pointer to switch_stack.b1
@@ -353,21 +342,16 @@ load_switch_stack:
;;
ld8.fill r4=[r2],16
ld8.fill r5=[r3],16
- mov b7=r28
;;
ld8.fill r6=[r2],16
ld8.fill r7=[r3],16
mov ar.unat=r18 // restore caller's unat
mov ar.fpsr=r19 // restore fpsr
mov ar.rsc=3 // put RSE back into eager mode, pl 0
- adds sp=IA64_SWITCH_STACK_SIZE,sp // pop switch_stack
br.cond.sptk.few b7
- .endp load_switch_stack
+END(load_switch_stack)
- .align 16
- .global __ia64_syscall
- .proc __ia64_syscall
-__ia64_syscall:
+GLOBAL_ENTRY(__ia64_syscall)
.regstk 6,0,0,0
mov r15=in5 // put syscall number in place
break __BREAK_SYSCALL
@@ -377,30 +361,42 @@ __ia64_syscall:
(p6) st4 [r2]=r8
(p6) mov r8=-1
br.ret.sptk.few rp
- .endp __ia64_syscall
+END(__ia64_syscall)
//
// We invoke syscall_trace through this intermediate function to
// ensure that the syscall input arguments are not clobbered. We
// also use it to preserve b6, which contains the syscall entry point.
//
- .align 16
- .global invoke_syscall_trace
- .proc invoke_syscall_trace
-invoke_syscall_trace:
- alloc loc0=ar.pfs,8,3,0,0
+GLOBAL_ENTRY(invoke_syscall_trace)
+#ifdef CONFIG_IA64_NEW_UNWIND
+ UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8))
+ alloc loc1=ar.pfs,8,3,0,0
+ mov loc0=rp
+ UNW(.body)
+ mov loc2=b6
+ ;;
+ br.call.sptk.few rp=syscall_trace
+.ret3: mov rp=loc0
+ mov ar.pfs=loc1
+ mov b6=loc2
+ br.ret.sptk.few rp
+#else /* !CONFIG_IA64_NEW_SYSCALL */
+ UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8))
+ alloc loc1=ar.pfs,8,3,0,0
;; // WAW on CFM at the br.call
- mov loc1=rp
+ mov loc0=rp
br.call.sptk.many rp=save_switch_stack_with_current_frame // must preserve b6!!
.ret2: mov loc2=b6
br.call.sptk.few rp=syscall_trace
.ret3: adds sp=IA64_SWITCH_STACK_SIZE,sp // drop switch_stack frame
- mov rp=loc1
- mov ar.pfs=loc0
+ mov rp=loc0
+ mov ar.pfs=loc1
mov b6=loc2
;;
br.ret.sptk.few rp
- .endp invoke_syscall_trace
+#endif /* !CONFIG_IA64_NEW_SYSCALL */
+END(invoke_syscall_trace)
//
// Invoke a system call, but do some tracing before and after the call.
@@ -414,19 +410,19 @@ invoke_syscall_trace:
//
.global ia64_trace_syscall
.global ia64_strace_leave_kernel
- .global ia64_strace_clear_r8
- .proc ia64_strace_clear_r8
-ia64_strace_clear_r8: // this is where we return after cloning when PF_TRACESYS is on
+GLOBAL_ENTRY(ia64_strace_clear_r8)
+ // this is where we return after cloning when PF_TRACESYS is on
+ PT_REGS_UNWIND_INFO(0)
# ifdef CONFIG_SMP
br.call.sptk.few rp=invoke_schedule_tail
# endif
mov r8=0
br strace_check_retval
- .endp ia64_strace_clear_r8
+END(ia64_strace_clear_r8)
- .proc ia64_trace_syscall
-ia64_trace_syscall:
+ENTRY(ia64_trace_syscall)
+ PT_REGS_UNWIND_INFO(0)
br.call.sptk.few rp=invoke_syscall_trace // give parent a chance to catch syscall args
.ret4: br.call.sptk.few rp=b6 // do the syscall
strace_check_retval:
@@ -454,7 +450,7 @@ strace_error:
(p6) mov r10=-1
(p6) mov r8=r9
br.cond.sptk.few strace_save_retval
- .endp ia64_trace_syscall
+END(ia64_trace_syscall)
/*
* A couple of convenience macros to help implement/understand the state
@@ -472,12 +468,8 @@ strace_error:
#define rKRBS r22
#define rB6 r21
- .align 16
- .global ia64_ret_from_syscall
- .global ia64_ret_from_syscall_clear_r8
- .global ia64_leave_kernel
- .proc ia64_ret_from_syscall
-ia64_ret_from_syscall_clear_r8:
+GLOBAL_ENTRY(ia64_ret_from_syscall_clear_r8)
+ PT_REGS_UNWIND_INFO(0)
#ifdef CONFIG_SMP
// In SMP mode, we need to call schedule_tail to complete the scheduling process.
// Called by ia64_switch_to after do_fork()->copy_thread(). r8 contains the
@@ -487,7 +479,10 @@ ia64_ret_from_syscall_clear_r8:
#endif
mov r8=0
;; // added stop bits to prevent r8 dependency
-ia64_ret_from_syscall:
+END(ia64_ret_from_syscall_clear_r8)
+ // fall through
+GLOBAL_ENTRY(ia64_ret_from_syscall)
+ PT_REGS_UNWIND_INFO(0)
cmp.ge p6,p7=r8,r0 // syscall executed successfully?
adds r2=IA64_PT_REGS_R8_OFFSET+16,sp // r2 = &pt_regs.r8
adds r3=IA64_PT_REGS_R8_OFFSET+32,sp // r3 = &pt_regs.r10
@@ -497,19 +492,21 @@ ia64_ret_from_syscall:
.mem.offset 8,0
(p6) st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
(p7) br.cond.spnt.few handle_syscall_error // handle potential syscall failure
-
-ia64_leave_kernel:
+END(ia64_ret_from_syscall)
+ // fall through
+GLOBAL_ENTRY(ia64_leave_kernel)
// check & deliver software interrupts:
+ PT_REGS_UNWIND_INFO(0)
#ifdef CONFIG_SMP
- adds r2=IA64_TASK_PROCESSOR_OFFSET,r13
- movl r3=softirq_state
+ adds r2=IA64_TASK_PROCESSOR_OFFSET,r13
+ movl r3=softirq_state
;;
- ld4 r2=[r2]
+ ld4 r2=[r2]
;;
- shl r2=r2,SMP_LOG_CACHE_BYTES // can't use shladd here...
+ shl r2=r2,SMP_LOG_CACHE_BYTES // can't use shladd here...
;;
- add r3=r2,r3
+ add r3=r2,r3
#else
movl r3=softirq_state
#endif
@@ -538,32 +535,28 @@ back_from_resched:
ld4 r14=[r14]
mov rp=r3 // arrange for schedule() to return to back_from_resched
;;
- /*
- * If pEOI is set, we need to write the cr.eoi now and then
- * clear pEOI because both invoke_schedule() and
- * handle_signal_delivery() may call the scheduler. Since
- * we're returning to user-level, we get at most one nested
- * interrupt of the same priority level, which doesn't tax the
- * kernel stack too much.
- */
-(pEOI) mov cr.eoi=r0
cmp.ne p6,p0=r2,r0
cmp.ne p2,p0=r14,r0 // NOTE: pKern is an alias for p2!!
-(pEOI) cmp.ne pEOI,p0=r0,r0 // clear pEOI before calling schedule()
srlz.d
(p6) br.call.spnt.many b6=invoke_schedule // ignore return value
2:
// check & deliver pending signals:
(p2) br.call.spnt.few rp=handle_signal_delivery
-#if defined(CONFIG_SMP) || defined(CONFIG_IA64_SOFTSDV_HACKS)
+#if defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) || defined(CONFIG_IA64_SOFTSDV_HACKS)
// Check for lost ticks
+ rsm psr.i
mov r2 = ar.itc
+ movl r14 = 1000 // latency tolerance
mov r3 = cr.itm
;;
sub r2 = r2, r3
;;
+ sub r2 = r2, r14
+ ;;
cmp.ge p6,p7 = r2, r0
(p6) br.call.spnt.few rp=invoke_ia64_reset_itm
+ ;;
+ ssm psr.i
#endif
restore_all:
@@ -692,18 +685,6 @@ restore_all:
;;
add r18=r16,r18 // adjust the loadrs value
;;
-#ifdef CONFIG_IA64_SOFTSDV_HACKS
- // Reset ITM if we've missed a timer tick. Workaround for SoftSDV bug
- mov r16 = r2
- mov r2 = ar.itc
- mov r17 = cr.itm
- ;;
- cmp.gt p6,p7 = r2, r17
-(p6) addl r17 = 100, r2
- ;;
- mov cr.itm = r17
- mov r2 = r16
-#endif
dont_preserve_current_frame:
alloc r16=ar.pfs,0,0,0,0 // drop the current call frame (noop for syscalls)
;;
@@ -724,14 +705,14 @@ skip_rbs_switch:
mov ar.rsc=rARRSC
mov ar.unat=rARUNAT
mov cr.ifs=rCRIFS // restore cr.ifs only if not a (synchronous) syscall
-(pEOI) mov cr.eoi=r0
mov pr=rARPR,-1
mov cr.iip=rCRIIP
mov cr.ipsr=rCRIPSR
;;
rfi;; // must be last instruction in an insn group
+END(ia64_leave_kernel)
-handle_syscall_error:
+ENTRY(handle_syscall_error)
/*
* Some system calls (e.g., ptrace, mmap) can return arbitrary
* values which could lead us to mistake a negative return
@@ -740,6 +721,7 @@ handle_syscall_error:
* If pt_regs.r8 is zero, we assume that the call completed
* successfully.
*/
+ PT_REGS_UNWIND_INFO(0)
ld8 r3=[r2] // load pt_regs.r8
sub r9=0,r8 // negate return value to get errno
;;
@@ -753,205 +735,283 @@ handle_syscall_error:
.mem.offset 0,0; st8.spill [r2]=r9 // store errno in pt_regs.r8 and set unat bit
.mem.offset 8,0; st8.spill [r3]=r10 // store error indication in pt_regs.r10 and set unat bit
br.cond.sptk.many ia64_leave_kernel
- .endp handle_syscall_error
+END(handle_syscall_error)
#ifdef CONFIG_SMP
/*
* Invoke schedule_tail(task) while preserving in0-in7, which may be needed
* in case a system call gets restarted.
*/
- .proc invoke_schedule_tail
-invoke_schedule_tail:
- alloc loc0=ar.pfs,8,2,1,0
- mov loc1=rp
+ENTRY(invoke_schedule_tail)
+ UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8))
+ alloc loc1=ar.pfs,8,2,1,0
+ mov loc0=rp
mov out0=r8 // Address of previous task
;;
br.call.sptk.few rp=schedule_tail
.ret8:
- mov ar.pfs=loc0
- mov rp=loc1
+ mov ar.pfs=loc1
+ mov rp=loc0
br.ret.sptk.many rp
- .endp invoke_schedule_tail
+END(invoke_schedule_tail)
+
+#endif /* CONFIG_SMP */
+
+#if defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) || defined(CONFIG_IA64_SOFTSDV_HACKS)
- .proc invoke_ia64_reset_itm
-invoke_ia64_reset_itm:
- alloc loc0=ar.pfs,8,2,0,0
- mov loc1=rp
+ENTRY(invoke_ia64_reset_itm)
+ UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8))
+ alloc loc1=ar.pfs,8,2,0,0
+ mov loc0=rp
;;
+ UNW(.body)
br.call.sptk.many rp=ia64_reset_itm
;;
- mov ar.pfs=loc0
- mov rp=loc1
+ mov ar.pfs=loc1
+ mov rp=loc0
br.ret.sptk.many rp
- .endp invoke_ia64_reset_itm
+END(invoke_ia64_reset_itm)
-#endif /* CONFIG_SMP */
+#endif /* CONFIG_ITANIUM_ASTEP_SPECIFIC || CONFIG_IA64_SOFTSDV_HACKS */
/*
* Invoke do_softirq() while preserving in0-in7, which may be needed
* in case a system call gets restarted.
*/
- .proc invoke_do_softirq
-invoke_do_softirq:
- alloc loc0=ar.pfs,8,2,0,0
- mov loc1=rp
-(pEOI) mov cr.eoi=r0
+ENTRY(invoke_do_softirq)
+ UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8))
+ alloc loc1=ar.pfs,8,2,0,0
+ mov loc0=rp
;;
-(pEOI) cmp.ne pEOI,p0=r0,r0
+ UNW(.body)
br.call.sptk.few rp=do_softirq
.ret9:
- mov ar.pfs=loc0
- mov rp=loc1
+ mov ar.pfs=loc1
+ mov rp=loc0
br.ret.sptk.many rp
- .endp invoke_do_softirq
+END(invoke_do_softirq)
/*
* Invoke schedule() while preserving in0-in7, which may be needed
* in case a system call gets restarted.
*/
- .proc invoke_schedule
-invoke_schedule:
- alloc loc0=ar.pfs,8,2,0,0
- mov loc1=rp
+ENTRY(invoke_schedule)
+ UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8))
+ alloc loc1=ar.pfs,8,2,0,0
+ mov loc0=rp
;;
+ UNW(.body)
br.call.sptk.few rp=schedule
.ret10:
- mov ar.pfs=loc0
- mov rp=loc1
+ mov ar.pfs=loc1
+ mov rp=loc0
br.ret.sptk.many rp
- .endp invoke_schedule
+END(invoke_schedule)
//
// Setup stack and call ia64_do_signal. Note that pSys and pNonSys need to
// be set up by the caller. We declare 8 input registers so the system call
// args get preserved, in case we need to restart a system call.
//
- .align 16
- .proc handle_signal_delivery
-handle_signal_delivery:
- alloc loc0=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
+ENTRY(handle_signal_delivery)
+#ifdef CONFIG_IA64_NEW_UNWIND
+ .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
+ alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
mov r9=ar.unat
-
- // If the process is being ptraced, the signal may not actually be delivered to
- // the process. Instead, SIGCHLD will be sent to the parent. We need to
- // setup a switch_stack so ptrace can inspect the processes state if necessary.
- adds r2=IA64_TASK_FLAGS_OFFSET,r13
- ;;
- ld8 r2=[r2]
+ mov loc0=rp // save return address
mov out0=0 // there is no "oldset"
- adds out1=16,sp // out1=&pt_regs
- ;;
+ adds out1=0,sp // out1=&sigscratch
(pSys) mov out2=1 // out2==1 => we're in a syscall
- tbit.nz p16,p17=r2,PF_PTRACED_BIT
-(p16) br.cond.spnt.many setup_switch_stack
;;
-back_from_setup_switch_stack:
(pNonSys) mov out2=0 // out2==0 => not a syscall
- adds r3=-IA64_SWITCH_STACK_SIZE+IA64_SWITCH_STACK_CALLER_UNAT_OFFSET+16,sp
-(p17) adds sp=-IA64_SWITCH_STACK_SIZE,sp // make space for (dummy) switch_stack
- ;;
-(p17) st8 [r3]=r9 // save ar.unat in sw->caller_unat
- mov loc1=rp // save return address
+ .fframe 16
+ .spillpsp ar.unat, 16 // (note that offset is relative to psp+0x10!)
+ st8 [sp]=r9,-16 // allocate space for ar.unat and save it
+ .body
br.call.sptk.few rp=ia64_do_signal
.ret11:
- adds r3=IA64_SWITCH_STACK_CALLER_UNAT_OFFSET+16,sp
+ .restore sp
+ adds sp=16,sp // pop scratch stack space
;;
- ld8 r9=[r3] // load new unat from sw->caller_unat
- mov rp=loc1
+ ld8 r9=[sp] // load new unat from sw->caller_unat
+ mov rp=loc0
;;
-(p17) adds sp=IA64_SWITCH_STACK_SIZE,sp // drop (dummy) switch_stack
-(p17) mov ar.unat=r9
-(p17) mov ar.pfs=loc0
-(p17) br.ret.sptk.many rp
-
- // restore the switch stack (ptrace may have modified it):
- movl r28=1f
- br.cond.sptk.many load_switch_stack
-1: br.ret.sptk.many rp
- // NOT REACHED
-
-setup_switch_stack:
- movl r28=back_from_setup_switch_stack
- mov r16=loc0
- br.cond.sptk.many save_switch_stack
- // NOT REACHED
-
- .endp handle_signal_delivery
+ mov ar.unat=r9
+ mov ar.pfs=loc1
+ br.ret.sptk.many rp
+#else /* !CONFIG_IA64_NEW_UNWIND */
+ .prologue
+ alloc r16=ar.pfs,8,0,3,0 // preserve all eight input regs in case of syscall restart!
+ DO_SAVE_SWITCH_STACK
+ UNW(.body)
- .align 16
- .proc sys_rt_sigsuspend
- .global sys_rt_sigsuspend
-sys_rt_sigsuspend:
- alloc loc0=ar.pfs,2,2,3,0
+ mov out0=0 // there is no "oldset"
+ adds out1=16,sp // out1=&sigscratch
+ .pred.rel.mutex pSys, pNonSys
+(pSys) mov out2=1 // out2==1 => we're in a syscall
+(pNonSys) mov out2=0 // out2==0 => not a syscall
+ br.call.sptk.few rp=ia64_do_signal
+.ret11:
+ // restore the switch stack (ptrace may have modified it)
+ DO_LOAD_SWITCH_STACK( )
+ br.ret.sptk.many rp
+#endif /* !CONFIG_IA64_NEW_UNWIND */
+END(handle_signal_delivery)
- // If the process is being ptraced, the signal may not actually be delivered to
- // the process. Instead, SIGCHLD will be sent to the parent. We need to
- // setup a switch_stack so ptrace can inspect the processes state if necessary.
- // Also, the process might not ptraced until stopped in sigsuspend, so this
- // isn't something that we can do conditionally based upon the value of
- // PF_PTRACED_BIT.
+GLOBAL_ENTRY(sys_rt_sigsuspend)
+#ifdef CONFIG_IA64_NEW_UNWIND
+ .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
+ alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
+ mov r9=ar.unat
+ mov loc0=rp // save return address
mov out0=in0 // mask
mov out1=in1 // sigsetsize
+ adds out2=0,sp // out2=&sigscratch
;;
- adds out2=16,sp // out1=&pt_regs
- movl r28=back_from_sigsuspend_setup_switch_stack
- mov r16=loc0
- br.cond.sptk.many save_switch_stack
- ;;
-back_from_sigsuspend_setup_switch_stack:
- mov loc1=rp // save return address
- br.call.sptk.many rp=ia64_rt_sigsuspend
+ .fframe 16
+ .spillpsp ar.unat, 16 // (note that offset is relative to psp+0x10!)
+ st8 [sp]=r9,-16 // allocate space for ar.unat and save it
+ .body
+ br.call.sptk.few rp=ia64_rt_sigsuspend
.ret12:
- adds r3=IA64_SWITCH_STACK_CALLER_UNAT_OFFSET+16,sp
+ .restore sp
+ adds sp=16,sp // pop scratch stack space
;;
- ld8 r9=[r3] // load new unat from sw->caller_unat
- mov rp=loc1
+ ld8 r9=[sp] // load new unat from sw->caller_unat
+ mov rp=loc0
;;
+ mov ar.unat=r9
+ mov ar.pfs=loc1
+ br.ret.sptk.many rp
+#else /* !CONFIG_IA64_NEW_UNWIND */
+ UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2))
+ alloc r16=ar.pfs,2,0,3,0
+ DO_SAVE_SWITCH_STACK
+ UNW(.body)
- // restore the switch stack (ptrace may have modified it):
- movl r28=1f
- br.cond.sptk.many load_switch_stack
-1: br.ret.sptk.many rp
- // NOT REACHED
- .endp sys_rt_sigsuspend
+ mov out0=in0 // mask
+ mov out1=in1 // sigsetsize
+ adds out2=16,sp // out1=&sigscratch
+ br.call.sptk.many rp=ia64_rt_sigsuspend
+.ret12:
+ // restore the switch stack (ptrace may have modified it)
+ DO_LOAD_SWITCH_STACK( )
+ br.ret.sptk.many rp
+#endif /* !CONFIG_IA64_NEW_UNWIND */
+END(sys_rt_sigsuspend)
- .align 16
- .proc sys_rt_sigreturn
-sys_rt_sigreturn:
+ENTRY(sys_rt_sigreturn)
+#ifdef CONFIG_IA64_NEW_UNWIND
.regstk 0,0,3,0 // inherited from gate.s:invoke_sighandler()
- adds out0=16,sp // out0 = &pt_regs
- adds sp=-IA64_SWITCH_STACK_SIZE,sp // make space for unat and padding
+ PT_REGS_UNWIND_INFO(0)
+ .prologue
+ PT_REGS_SAVES(16)
+ adds sp=-16,sp
+ .body
+ cmp.eq pNonSys,p0=r0,r0 // sigreturn isn't a normal syscall...
+ ;;
+ adds out0=16,sp // out0 = &sigscratch
+ br.call.sptk.few rp=ia64_rt_sigreturn
+.ret13:
+ adds sp=16,sp // doesn't drop pt_regs, so don't mark it as restoring sp!
+ PT_REGS_UNWIND_INFO(0) // instead, create a new body section with the smaller frame
;;
+ ld8 r9=[sp] // load new ar.unat
+ mov b7=r8
+ ;;
+ mov ar.unat=r9
+ br b7
+#else /* !CONFIG_IA64_NEW_UNWIND */
+ .regstk 0,0,3,0 // inherited from gate.s:invoke_sighandler()
+ PT_REGS_UNWIND_INFO(0)
+ UNW(.prologue)
+ UNW(.fframe IA64_PT_REGS_SIZE+IA64_SWITCH_STACK_SIZE)
+ UNW(.spillsp rp, PT(CR_IIP)+IA64_SWITCH_STACK_SIZE)
+ UNW(.spillsp ar.pfs, PT(CR_IFS)+IA64_SWITCH_STACK_SIZE)
+ UNW(.spillsp ar.unat, PT(AR_UNAT)+IA64_SWITCH_STACK_SIZE)
+ UNW(.spillsp pr, PT(PR)+IA64_SWITCH_STACK_SIZE)
+ adds sp=-IA64_SWITCH_STACK_SIZE,sp
cmp.eq pNonSys,p0=r0,r0 // sigreturn isn't a normal syscall...
+ ;;
+ UNW(.body)
+
+ adds out0=16,sp // out0 = &sigscratch
br.call.sptk.few rp=ia64_rt_sigreturn
.ret13:
adds r3=IA64_SWITCH_STACK_CALLER_UNAT_OFFSET+16,sp
;;
ld8 r9=[r3] // load new ar.unat
- mov rp=r8
+ mov b7=r8
;;
+ PT_REGS_UNWIND_INFO(0)
adds sp=IA64_SWITCH_STACK_SIZE,sp // drop (dummy) switch-stack frame
mov ar.unat=r9
- br rp
- .endp sys_rt_sigreturn
+ br b7
+#endif /* !CONFIG_IA64_NEW_UNWIND */
+END(sys_rt_sigreturn)
- .align 16
- .global ia64_prepare_handle_unaligned
- .proc ia64_prepare_handle_unaligned
-ia64_prepare_handle_unaligned:
- movl r28=1f
+GLOBAL_ENTRY(ia64_prepare_handle_unaligned)
//
// r16 = fake ar.pfs, we simply need to make sure
// privilege is still 0
//
+ PT_REGS_UNWIND_INFO(0)
mov r16=r0
- br.cond.sptk.few save_switch_stack
-1: br.call.sptk.few rp=ia64_handle_unaligned // stack frame setup in ivt
+ DO_SAVE_SWITCH_STACK
+ br.call.sptk.few rp=ia64_handle_unaligned // stack frame setup in ivt
.ret14:
- movl r28=2f
- br.cond.sptk.many load_switch_stack
-2: br.cond.sptk.many rp // goes to ia64_leave_kernel
- .endp ia64_prepare_handle_unaligned
+ DO_LOAD_SWITCH_STACK(PT_REGS_UNWIND_INFO(0))
+ br.cond.sptk.many rp // goes to ia64_leave_kernel
+END(ia64_prepare_handle_unaligned)
+
+#ifdef CONFIG_IA64_NEW_UNWIND
+
+ //
+ // unw_init_running(void (*callback)(info, arg), void *arg)
+ //
+# define EXTRA_FRAME_SIZE ((UNW_FRAME_INFO_SIZE+15)&~15)
+
+GLOBAL_ENTRY(unw_init_running)
+ .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
+ alloc loc1=ar.pfs,2,3,3,0
+ ;;
+ ld8 loc2=[in0],8
+ mov loc0=rp
+ mov r16=loc1
+ DO_SAVE_SWITCH_STACK
+ .body
+
+ .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
+ .fframe IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE
+ SWITCH_STACK_SAVES(EXTRA_FRAME_SIZE)
+ adds sp=-EXTRA_FRAME_SIZE,sp
+ .body
+ ;;
+ adds out0=16,sp // &info
+ mov out1=r13 // current
+ adds out2=16+EXTRA_FRAME_SIZE,sp // &switch_stack
+ br.call.sptk.few rp=unw_init_frame_info
+1: adds out0=16,sp // &info
+ mov b6=loc2
+ mov loc2=gp // save gp across indirect function call
+ ;;
+ ld8 gp=[in0]
+ mov out1=in1 // arg
+ br.call.sptk.few rp=b6 // invoke the callback function
+1: mov gp=loc2 // restore gp
+
+ // For now, we don't allow changing registers from within
+ // unw_init_running; if we ever want to allow that, we'd
+ // have to do a load_switch_stack here:
+ .restore sp
+ adds sp=IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE,sp
+
+ mov ar.pfs=loc1
+ mov rp=loc0
+ br.ret.sptk.many rp
+END(unw_init_running)
+
+#endif
.rodata
.align 8
@@ -1053,9 +1113,9 @@ sys_call_table:
data8 sys_syslog
data8 sys_setitimer
data8 sys_getitimer
- data8 sys_newstat // 1120
- data8 sys_newlstat
- data8 sys_newfstat
+ data8 ia64_oldstat // 1120
+ data8 ia64_oldlstat
+ data8 ia64_oldfstat
data8 sys_vhangup
data8 sys_lchown
data8 sys_vm86 // 1125
@@ -1065,7 +1125,7 @@ sys_call_table:
data8 sys_setdomainname
data8 sys_newuname // 1130
data8 sys_adjtimex
- data8 sys_create_module
+ data8 ia64_create_module
data8 sys_init_module
data8 sys_delete_module
data8 sys_get_kernel_syms // 1135
@@ -1143,9 +1203,9 @@ sys_call_table:
data8 sys_pivot_root
data8 sys_mincore
data8 sys_madvise
- data8 ia64_ni_syscall // 1210
- data8 ia64_ni_syscall
- data8 ia64_ni_syscall
+ data8 sys_newstat // 1210
+ data8 sys_newlstat
+ data8 sys_newfstat
data8 ia64_ni_syscall
data8 ia64_ni_syscall
data8 ia64_ni_syscall // 1215
@@ -1212,4 +1272,3 @@ sys_call_table:
data8 ia64_ni_syscall
data8 ia64_ni_syscall
data8 ia64_ni_syscall
-
diff --git a/arch/ia64/kernel/entry.h b/arch/ia64/kernel/entry.h
index ecef44f60..41307f1b0 100644
--- a/arch/ia64/kernel/entry.h
+++ b/arch/ia64/kernel/entry.h
@@ -2,7 +2,64 @@
* Preserved registers that are shared between code in ivt.S and entry.S. Be
* careful not to step on these!
*/
-#define pEOI p1 /* should leave_kernel write EOI? */
#define pKern p2 /* will leave_kernel return to kernel-mode? */
#define pSys p4 /* are we processing a (synchronous) system call? */
#define pNonSys p5 /* complement of pSys */
+
+#define PT(f) (IA64_PT_REGS_##f##_OFFSET + 16)
+#define SW(f) (IA64_SWITCH_STACK_##f##_OFFSET + 16)
+
+#define PT_REGS_SAVES(off) \
+ UNW(.unwabi @svr4, 'i'); \
+ UNW(.fframe IA64_PT_REGS_SIZE+16+(off)); \
+ UNW(.spillsp rp, PT(CR_IIP)+(off)); \
+ UNW(.spillsp ar.pfs, PT(CR_IFS)+(off)); \
+ UNW(.spillsp ar.unat, PT(AR_UNAT)+(off)); \
+ UNW(.spillsp ar.fpsr, PT(AR_FPSR)+(off)); \
+ UNW(.spillsp pr, PT(PR)+(off));
+
+#define PT_REGS_UNWIND_INFO(off) \
+ UNW(.prologue); \
+ PT_REGS_SAVES(off); \
+ UNW(.body)
+
+#define SWITCH_STACK_SAVES(off) \
+ UNW(.savesp ar.unat,SW(CALLER_UNAT)+(off)); UNW(.savesp ar.fpsr,SW(AR_FPSR)+(off)); \
+ UNW(.spillsp f2,SW(F2)+(off)); UNW(.spillsp f3,SW(F3)+(off)); \
+ UNW(.spillsp f4,SW(F4)+(off)); UNW(.spillsp f5,SW(F5)+(off)); \
+ UNW(.spillsp f16,SW(F16)+(off)); UNW(.spillsp f17,SW(F17)+(off)); \
+ UNW(.spillsp f18,SW(F18)+(off)); UNW(.spillsp f19,SW(F19)+(off)); \
+ UNW(.spillsp f20,SW(F20)+(off)); UNW(.spillsp f21,SW(F21)+(off)); \
+ UNW(.spillsp f22,SW(F22)+(off)); UNW(.spillsp f23,SW(F23)+(off)); \
+ UNW(.spillsp f24,SW(F24)+(off)); UNW(.spillsp f25,SW(F25)+(off)); \
+ UNW(.spillsp f26,SW(F26)+(off)); UNW(.spillsp f27,SW(F27)+(off)); \
+ UNW(.spillsp f28,SW(F28)+(off)); UNW(.spillsp f29,SW(F29)+(off)); \
+ UNW(.spillsp f30,SW(F30)+(off)); UNW(.spillsp f31,SW(F31)+(off)); \
+ UNW(.spillsp r4,SW(R4)+(off)); UNW(.spillsp r5,SW(R5)+(off)); \
+ UNW(.spillsp r6,SW(R6)+(off)); UNW(.spillsp r7,SW(R7)+(off)); \
+ UNW(.spillsp b0,SW(B0)+(off)); UNW(.spillsp b1,SW(B1)+(off)); \
+ UNW(.spillsp b2,SW(B2)+(off)); UNW(.spillsp b3,SW(B3)+(off)); \
+ UNW(.spillsp b4,SW(B4)+(off)); UNW(.spillsp b5,SW(B5)+(off)); \
+ UNW(.spillsp ar.pfs,SW(AR_PFS)+(off)); UNW(.spillsp ar.lc,SW(AR_LC)+(off)); \
+ UNW(.spillsp @priunat,SW(AR_UNAT)+(off)); \
+ UNW(.spillsp ar.rnat,SW(AR_RNAT)+(off)); UNW(.spillsp ar.bspstore,SW(AR_BSPSTORE)+(off)); \
+ UNW(.spillsp pr,SW(PR)+(off))
+
+#define DO_SAVE_SWITCH_STACK \
+ movl r28=1f; \
+ ;; \
+ .fframe IA64_SWITCH_STACK_SIZE; \
+ adds sp=-IA64_SWITCH_STACK_SIZE,sp; \
+ mov b7=r28; \
+ SWITCH_STACK_SAVES(0); \
+ br.cond.sptk.many save_switch_stack; \
+1:
+
+#define DO_LOAD_SWITCH_STACK(extra) \
+ movl r28=1f; \
+ ;; \
+ mov b7=r28; \
+ br.cond.sptk.many load_switch_stack; \
+1: UNW(.restore sp); \
+ extra; \
+ adds sp=IA64_SWITCH_STACK_SIZE,sp
diff --git a/arch/ia64/kernel/fw-emu.c b/arch/ia64/kernel/fw-emu.c
index 23ded0730..9e5ec1668 100644
--- a/arch/ia64/kernel/fw-emu.c
+++ b/arch/ia64/kernel/fw-emu.c
@@ -124,7 +124,18 @@ asm ("
.proc pal_emulator_static
pal_emulator_static:
mov r8=-1
- cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */
+
+ mov r9=256
+ ;;
+ cmp.gtu p6,p7=r9,r28 /* r28 <= 255? */
+(p6) br.cond.sptk.few static
+ ;;
+ mov r9=512
+ ;;
+ cmp.gtu p6,p7=r9,r28
+(p6) br.cond.sptk.few stacked
+ ;;
+static: cmp.eq p6,p7=6,r28 /* PAL_PTCE_INFO */
(p7) br.cond.sptk.few 1f
;;
mov r8=0 /* status = 0 */
@@ -157,7 +168,12 @@ pal_emulator_static:
;;
mov ar.lc=r9
mov r8=r0
-1: br.cond.sptk.few rp
+1:
+ br.cond.sptk.few rp
+
+stacked:
+ br.ret.sptk.few rp
+
.endp pal_emulator_static\n");
/* Macro to emulate SAL call using legacy IN and OUT calls to CF8, CFC etc.. */
diff --git a/arch/ia64/kernel/gate.S b/arch/ia64/kernel/gate.S
index 8eabe53d1..f7f8d02ae 100644
--- a/arch/ia64/kernel/gate.S
+++ b/arch/ia64/kernel/gate.S
@@ -3,10 +3,11 @@
* each task's text region. For now, it contains the signal
* trampoline code only.
*
- * Copyright (C) 1999 Hewlett-Packard Co
- * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999-2000 Hewlett-Packard Co
+ * Copyright (C) 1999-2000 David Mosberger-Tang <davidm@hpl.hp.com>
*/
+#include <asm/asmmacro.h>
#include <asm/offsets.h>
#include <asm/sigcontext.h>
#include <asm/system.h>
@@ -75,15 +76,12 @@
* [sp+16] = sigframe
*/
- .global ia64_sigtramp
- .proc ia64_sigtramp
-ia64_sigtramp:
+GLOBAL_ENTRY(ia64_sigtramp)
ld8 r10=[r3],8 // get signal handler entry point
br.call.sptk.many rp=invoke_sighandler
- .endp ia64_sigtramp
+END(ia64_sigtramp)
- .proc invoke_sighandler
-invoke_sighandler:
+ENTRY(invoke_sighandler)
ld8 gp=[r3] // get signal handler's global pointer
mov b6=r10
cover // push args in interrupted frame onto backing store
@@ -152,10 +150,9 @@ back_from_restore_rbs:
ldf.fill f15=[base1],32
mov r15=__NR_rt_sigreturn
break __BREAK_SYSCALL
- .endp invoke_sighandler
+END(invoke_sighandler)
- .proc setup_rbs
-setup_rbs:
+ENTRY(setup_rbs)
flushrs // must be first in insn
mov ar.rsc=r0 // put RSE into enforced lazy mode
adds r16=(RNAT_OFF+SIGCONTEXT_OFF),sp
@@ -167,9 +164,9 @@ setup_rbs:
mov ar.rsc=0xf // set RSE into eager mode, pl 3
invala // invalidate ALAT
br.cond.sptk.many back_from_setup_rbs
+END(setup_rbs)
- .proc restore_rbs
-restore_rbs:
+ENTRY(restore_rbs)
flushrs
mov ar.rsc=r0 // put RSE into enforced lazy mode
adds r16=(RNAT_OFF+SIGCONTEXT_OFF),sp
@@ -181,5 +178,4 @@ restore_rbs:
mov ar.rsc=0xf // (will be restored later on from sc_ar_rsc)
// invala not necessary as that will happen when returning to user-mode
br.cond.sptk.many back_from_restore_rbs
-
- .endp restore_rbs
+END(restore_rbs)
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 35a52628a..d0bc7687f 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -16,6 +16,7 @@
#include <linux/config.h>
+#include <asm/asmmacro.h>
#include <asm/fpu.h>
#include <asm/pal.h>
#include <asm/offsets.h>
@@ -54,10 +55,12 @@ halt_msg:
stringz "Halting kernel\n"
.text
- .align 16
- .global _start
- .proc _start
-_start:
+
+GLOBAL_ENTRY(_start)
+ UNW(.prologue)
+ UNW(.save rp, r4) // terminate unwind chain with a NULL rp
+ UNW(mov r4=r0)
+ UNW(.body)
// set IVT entry point---can't access I/O ports without it
movl r3=ia64_ivt
;;
@@ -156,12 +159,9 @@ alive_msg:
ld8 out0=[r2]
br.call.sptk.few b0=console_print
self: br.sptk.few self // endless loop
- .endp _start
+END(_start)
- .align 16
- .global ia64_save_debug_regs
- .proc ia64_save_debug_regs
-ia64_save_debug_regs:
+GLOBAL_ENTRY(ia64_save_debug_regs)
alloc r16=ar.pfs,1,0,0,0
mov r20=ar.lc // preserve ar.lc
mov ar.lc=IA64_NUM_DBG_REGS-1
@@ -177,13 +177,10 @@ ia64_save_debug_regs:
br.cloop.sptk.few 1b
;;
mov ar.lc=r20 // restore ar.lc
- br.ret.sptk.few b0
- .endp ia64_save_debug_regs
+ br.ret.sptk.few rp
+END(ia64_save_debug_regs)
- .align 16
- .global ia64_load_debug_regs
- .proc ia64_load_debug_regs
-ia64_load_debug_regs:
+GLOBAL_ENTRY(ia64_load_debug_regs)
alloc r16=ar.pfs,1,0,0,0
lfetch.nta [in0]
mov r20=ar.lc // preserve ar.lc
@@ -200,13 +197,10 @@ ia64_load_debug_regs:
br.cloop.sptk.few 1b
;;
mov ar.lc=r20 // restore ar.lc
- br.ret.sptk.few b0
- .endp ia64_load_debug_regs
+ br.ret.sptk.few rp
+END(ia64_load_debug_regs)
- .align 16
- .global __ia64_save_fpu
- .proc __ia64_save_fpu
-__ia64_save_fpu:
+GLOBAL_ENTRY(__ia64_save_fpu)
alloc r2=ar.pfs,1,0,0,0
adds r3=16,in0
;;
@@ -354,12 +348,9 @@ __ia64_save_fpu:
stf.spill.nta [in0]=f126,32
stf.spill.nta [ r3]=f127,32
br.ret.sptk.few rp
- .endp __ia64_save_fpu
+END(__ia64_save_fpu)
- .align 16
- .global __ia64_load_fpu
- .proc __ia64_load_fpu
-__ia64_load_fpu:
+GLOBAL_ENTRY(__ia64_load_fpu)
alloc r2=ar.pfs,1,0,0,0
adds r3=16,in0
;;
@@ -507,12 +498,9 @@ __ia64_load_fpu:
ldf.fill.nta f126=[in0],32
ldf.fill.nta f127=[ r3],32
br.ret.sptk.few rp
- .endp __ia64_load_fpu
+END(__ia64_load_fpu)
- .align 16
- .global __ia64_init_fpu
- .proc __ia64_init_fpu
-__ia64_init_fpu:
+GLOBAL_ENTRY(__ia64_init_fpu)
alloc r2=ar.pfs,0,0,0,0
stf.spill [sp]=f0
mov f32=f0
@@ -644,4 +632,74 @@ __ia64_init_fpu:
ldf.fill f126=[sp]
mov f127=f0
br.ret.sptk.few rp
- .endp __ia64_init_fpu
+END(__ia64_init_fpu)
+
+/*
+ * Switch execution mode from virtual to physical or vice versa.
+ *
+ * Inputs:
+ * r16 = new psr to establish
+ *
+ * Note: RSE must already be in enforced lazy mode
+ */
+GLOBAL_ENTRY(ia64_switch_mode)
+ {
+ alloc r2=ar.pfs,0,0,0,0
+ rsm psr.i | psr.ic // disable interrupts and interrupt collection
+ mov r15=ip
+ }
+ ;;
+ {
+ flushrs // must be first insn in group
+ srlz.i
+ shr.u r19=r15,61 // r19 <- top 3 bits of current IP
+ }
+ ;;
+ mov cr.ipsr=r16 // set new PSR
+ add r3=1f-ia64_switch_mode,r15
+ xor r15=0x7,r19 // flip the region bits
+
+ mov r17=ar.bsp
+ mov r14=rp // get return address into a general register
+
+ // switch RSE backing store:
+ ;;
+ dep r17=r15,r17,61,3 // make ar.bsp physical or virtual
+ mov r18=ar.rnat // save ar.rnat
+ ;;
+ mov ar.bspstore=r17 // this steps on ar.rnat
+ dep r3=r15,r3,61,3 // make rfi return address physical or virtual
+ ;;
+ mov cr.iip=r3
+ mov cr.ifs=r0
+ dep sp=r15,sp,61,3 // make stack pointer physical or virtual
+ ;;
+ mov ar.rnat=r18 // restore ar.rnat
+ dep r14=r15,r14,61,3 // make function return address physical or virtual
+ rfi // must be last insn in group
+ ;;
+1: mov rp=r14
+ br.ret.sptk.few rp
+END(ia64_switch_mode)
+
+#ifdef CONFIG_IA64_BRL_EMU
+
+/*
+ * Assembly routines used by brl_emu.c to set preserved register state.
+ */
+
+#define SET_REG(reg) \
+ GLOBAL_ENTRY(ia64_set_##reg); \
+ alloc r16=ar.pfs,1,0,0,0; \
+ mov reg=r32; \
+ ;; \
+ br.ret.sptk rp; \
+ END(ia64_set_##reg)
+
+SET_REG(b1);
+SET_REG(b2);
+SET_REG(b3);
+SET_REG(b4);
+SET_REG(b5);
+
+#endif /* CONFIG_IA64_BRL_EMU */
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
new file mode 100644
index 000000000..7f01b667c
--- /dev/null
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -0,0 +1,72 @@
+/*
+ * Architecture-specific kernel symbols
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+
+#include <linux/string.h>
+EXPORT_SYMBOL_NOVERS(memset);
+EXPORT_SYMBOL(memcmp);
+EXPORT_SYMBOL_NOVERS(memcpy);
+EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(strcat);
+EXPORT_SYMBOL(strchr);
+EXPORT_SYMBOL(strcmp);
+EXPORT_SYMBOL(strcpy);
+EXPORT_SYMBOL(strlen);
+EXPORT_SYMBOL(strncat);
+EXPORT_SYMBOL(strncmp);
+EXPORT_SYMBOL(strncpy);
+EXPORT_SYMBOL(strtok);
+
+#include <linux/pci.h>
+EXPORT_SYMBOL(pci_alloc_consistent);
+EXPORT_SYMBOL(pci_free_consistent);
+
+#include <linux/in6.h>
+#include <asm/checksum.h>
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
+
+#include <asm/irq.h>
+EXPORT_SYMBOL(enable_irq);
+EXPORT_SYMBOL(disable_irq);
+
+#include <asm/current.h>
+#include <asm/hardirq.h>
+EXPORT_SYMBOL(irq_stat);
+
+#include <asm/processor.h>
+EXPORT_SYMBOL(cpu_data);
+EXPORT_SYMBOL(kernel_thread);
+
+#ifdef CONFIG_SMP
+EXPORT_SYMBOL(synchronize_irq);
+
+#include <asm/smplock.h>
+EXPORT_SYMBOL(kernel_flag);
+
+#include <asm/system.h>
+EXPORT_SYMBOL(__global_sti);
+EXPORT_SYMBOL(__global_cli);
+EXPORT_SYMBOL(__global_save_flags);
+EXPORT_SYMBOL(__global_restore_flags);
+
+#endif
+
+#include <asm/uaccess.h>
+EXPORT_SYMBOL(__copy_user);
+
+#include <asm/unistd.h>
+EXPORT_SYMBOL(__ia64_syscall);
+
+/* from arch/ia64/lib */
+extern void __divdi3(void);
+extern void __udivdi3(void);
+extern void __moddi3(void);
+extern void __umoddi3(void);
+
+EXPORT_SYMBOL_NOVERS(__divdi3);
+EXPORT_SYMBOL_NOVERS(__udivdi3);
+EXPORT_SYMBOL_NOVERS(__moddi3);
+EXPORT_SYMBOL_NOVERS(__umoddi3);
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index a01432a60..279befd3b 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -201,10 +201,14 @@ static void show(char * str)
printk(" %d",local_bh_count(i));
printk(" ]\nStack dumps:");
-#ifdef __ia64__
- printk(" ]\nStack dumps: <unimplemented on IA-64---please fix me>");
- /* for now we don't have stack dumping support... */
-#elif __i386__
+#if defined(__ia64__)
+ /*
+ * We can't unwind the stack of another CPU without access to
+ * the registers of that CPU. And sending an IPI when we're
+ * in a potentially wedged state doesn't sound like a smart
+ * idea.
+ */
+#elif defined(__i386__)
for(i=0;i< smp_num_cpus;i++) {
unsigned long esp;
if(i==cpu)
@@ -227,9 +231,7 @@ static void show(char * str)
You lose...
#endif
printk("\nCPU %d:",cpu);
-#ifdef __i386__
show_stack(NULL);
-#endif
printk("\n");
}
@@ -582,7 +584,8 @@ unsigned int do_IRQ(unsigned long irq, struct pt_regs *regs)
if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
action = desc->action;
status &= ~IRQ_PENDING; /* we commit to handling */
- status |= IRQ_INPROGRESS; /* we are handling it */
+ if (!(status & IRQ_PER_CPU))
+ status |= IRQ_INPROGRESS; /* we are handling it */
}
desc->status = status;
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 1a8398f85..1ee2974b5 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -33,7 +33,9 @@
#include <asm/pgtable.h>
#include <asm/system.h>
-#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC
+#define IRQ_DEBUG 0
+
+#ifdef CONFIG_ITANIUM_A1_SPECIFIC
spinlock_t ivr_read_lock;
#endif
@@ -49,7 +51,7 @@ __u8 isa_irq_to_vector_map[16] = {
0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x40, 0x41
};
-#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC
+#ifdef CONFIG_ITANIUM_A1_SPECIFIC
int usbfix;
@@ -63,7 +65,7 @@ usbfix_option (char *str)
__setup("usbfix", usbfix_option);
-#endif /* CONFIG_ITANIUM_ASTEP_SPECIFIC */
+#endif /* CONFIG_ITANIUM_A1_SPECIFIC */
/*
* That's where the IVT branches when we get an external
@@ -73,13 +75,8 @@ __setup("usbfix", usbfix_option);
void
ia64_handle_irq (unsigned long vector, struct pt_regs *regs)
{
- unsigned long bsp, sp, saved_tpr;
-
-#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC
-# ifndef CONFIG_SMP
- static unsigned int max_prio = 0;
- unsigned int prev_prio;
-# endif
+ unsigned long saved_tpr;
+#ifdef CONFIG_ITANIUM_A1_SPECIFIC
unsigned long eoi_ptr;
# ifdef CONFIG_USB
@@ -95,18 +92,14 @@ ia64_handle_irq (unsigned long vector, struct pt_regs *regs)
spin_lock(&ivr_read_lock);
{
unsigned int tmp;
-
/*
* Disable PCI writes
*/
outl(0x80ff81c0, 0xcf8);
tmp = inl(0xcfc);
outl(tmp | 0x400, 0xcfc);
-
eoi_ptr = inl(0xcfc);
-
vector = ia64_get_ivr();
-
/*
* Enable PCI writes
*/
@@ -118,75 +111,61 @@ ia64_handle_irq (unsigned long vector, struct pt_regs *regs)
if (usbfix)
reenable_usb();
# endif
+#endif /* CONFIG_ITANIUM_A1_SPECIFIC */
-# ifndef CONFIG_SMP
- prev_prio = max_prio;
- if (vector < max_prio) {
- printk ("ia64_handle_irq: got vector %lu while %u was in progress!\n",
- vector, max_prio);
-
- } else
- max_prio = vector;
-# endif /* !CONFIG_SMP */
-#endif /* CONFIG_ITANIUM_ASTEP_SPECIFIC */
+#if IRQ_DEBUG
+ {
+ unsigned long bsp, sp;
+
+ asm ("mov %0=ar.bsp" : "=r"(bsp));
+ asm ("mov %0=sp" : "=r"(sp));
+
+ if ((sp - bsp) < 1024) {
+ static unsigned char count;
+ static long last_time;
+
+ if (count > 5 && jiffies - last_time > 5*HZ)
+ count = 0;
+ if (++count < 5) {
+ last_time = jiffies;
+ printk("ia64_handle_irq: DANGER: less than "
+ "1KB of free stack space!!\n"
+ "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
+ }
+ }
+ }
+#endif /* IRQ_DEBUG */
/*
* Always set TPR to limit maximum interrupt nesting depth to
* 16 (without this, it would be ~240, which could easily lead
- * to kernel stack overflows.
+ * to kernel stack overflows).
*/
saved_tpr = ia64_get_tpr();
ia64_srlz_d();
- ia64_set_tpr(vector);
- ia64_srlz_d();
-
- asm ("mov %0=ar.bsp" : "=r"(bsp));
- asm ("mov %0=sp" : "=r"(sp));
-
- if ((sp - bsp) < 1024) {
- static long last_time;
- static unsigned char count;
-
- if (count > 5 && jiffies - last_time > 5*HZ)
- count = 0;
- if (++count < 5) {
- last_time = jiffies;
- printk("ia64_handle_irq: DANGER: less than 1KB of free stack space!!\n"
- "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
+ do {
+ if (vector >= NR_IRQS) {
+ printk("handle_irq: invalid vector %lu\n", vector);
+ ia64_set_tpr(saved_tpr);
+ ia64_srlz_d();
+ return;
}
- }
+ ia64_set_tpr(vector);
+ ia64_srlz_d();
- /*
- * The interrupt is now said to be in service
- */
- if (vector >= NR_IRQS) {
- printk("handle_irq: invalid vector %lu\n", vector);
- goto out;
- }
-
- do_IRQ(vector, regs);
- out:
-#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC
- {
- long pEOI;
-
- asm ("mov %0=0;; (p1) mov %0=1" : "=r"(pEOI));
- if (!pEOI) {
- printk("Yikes: ia64_handle_irq() without pEOI!!\n");
- asm volatile ("cmp.eq p1,p0=r0,r0" : "=r"(pEOI));
- }
- }
-
- local_irq_disable();
-# ifndef CONFIG_SMP
- if (max_prio == vector)
- max_prio = prev_prio;
-# endif /* !CONFIG_SMP */
-#endif /* CONFIG_ITANIUM_ASTEP_SPECIFIC */
+ do_IRQ(vector, regs);
- ia64_srlz_d();
- ia64_set_tpr(saved_tpr);
- ia64_srlz_d();
+ /*
+ * Disable interrupts and send EOI:
+ */
+ local_irq_disable();
+ ia64_set_tpr(saved_tpr);
+ ia64_eoi();
+#ifdef CONFIG_ITANIUM_A1_SPECIFIC
+ break;
+#endif
+ vector = ia64_get_ivr();
+ } while (vector != IA64_SPURIOUS_INT);
}
#ifdef CONFIG_SMP
@@ -210,12 +189,12 @@ init_IRQ (void)
ia64_set_lrr0(0, 1);
ia64_set_lrr1(0, 1);
- irq_desc[TIMER_IRQ].handler = &irq_type_ia64_sapic;
irq_desc[IA64_SPURIOUS_INT].handler = &irq_type_ia64_sapic;
#ifdef CONFIG_SMP
/*
* Configure the IPI vector and handler
*/
+ irq_desc[IPI_IRQ].status |= IRQ_PER_CPU;
irq_desc[IPI_IRQ].handler = &irq_type_ia64_sapic;
setup_irq(IPI_IRQ, &ipi_irqaction);
#endif
@@ -234,7 +213,7 @@ ipi_send (int cpu, int vector, int delivery_mode, int redirect)
{
unsigned long ipi_addr;
unsigned long ipi_data;
-#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC
+#ifdef CONFIG_ITANIUM_A1_SPECIFIC
unsigned long flags;
#endif
# define EID 0
@@ -242,13 +221,13 @@ ipi_send (int cpu, int vector, int delivery_mode, int redirect)
ipi_data = (delivery_mode << 8) | (vector & 0xff);
ipi_addr = ipi_base_addr | ((cpu << 8 | EID) << 4) | ((redirect & 1) << 3);
-#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC
+#ifdef CONFIG_ITANIUM_A1_SPECIFIC
spin_lock_irqsave(&ivr_read_lock, flags);
-#endif /* CONFIG_ITANIUM_ASTEP_SPECIFIC */
+#endif
writeq(ipi_data, ipi_addr);
-#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC
+#ifdef CONFIG_ITANIUM_A1_SPECIFIC
spin_unlock_irqrestore(&ivr_read_lock, flags);
#endif
}
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index 56dd2a333..d58cd494e 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -170,9 +170,31 @@ ia64_ivt:
* The ITLB basically does the same as the VHPT handler except
* that we always insert exactly one instruction TLB entry.
*/
+#if 0
+ /*
+ * This code works, but I don't want to enable it until I have numbers
+ * that prove this to be a win.
+ */
+ mov r31=pr // save predicates
+ ;;
+ thash r17=r16 // compute virtual address of L3 PTE
+ ;;
+ ld8.s r18=[r17] // try to read L3 PTE
+ ;;
+ tnat.nz p6,p0=r18 // did read succeed?
+(p6) br.cond.spnt.many 1f
+ ;;
+ itc.i r18
+ ;;
+ mov pr=r31,-1
+ rfi
+
+1: rsm psr.dt // use physical addressing for data
+#else
mov r16=cr.ifa // get address that caused the TLB miss
;;
rsm psr.dt // use physical addressing for data
+#endif
mov r31=pr // save the predicate registers
mov r19=ar.k7 // get page table base address
shl r21=r16,3 // shift bit 60 into sign bit
@@ -222,9 +244,31 @@ ia64_ivt:
* that we always insert exactly one data TLB entry.
*/
mov r16=cr.ifa // get address that caused the TLB miss
+#if 0
+ /*
+ * This code works, but I don't want to enable it until I have numbers
+ * that prove this to be a win.
+ */
+ mov r31=pr // save predicates
+ ;;
+ thash r17=r16 // compute virtual address of L3 PTE
+ ;;
+ ld8.s r18=[r17] // try to read L3 PTE
+ ;;
+ tnat.nz p6,p0=r18 // did read succeed?
+(p6) br.cond.spnt.many 1f
+ ;;
+ itc.d r18
;;
+ mov pr=r31,-1
+ rfi
+
+1: rsm psr.dt // use physical addressing for data
+#else
rsm psr.dt // use physical addressing for data
mov r31=pr // save the predicate registers
+ ;;
+#endif
mov r19=ar.k7 // get page table base address
shl r21=r16,3 // shift bit 60 into sign bit
shr.u r17=r16,61 // get the region number into r17
@@ -265,37 +309,6 @@ ia64_ivt:
mov pr=r31,-1 // restore predicate registers
rfi
- //-----------------------------------------------------------------------------------
- // call do_page_fault (predicates are in r31, psr.dt is off, r16 is faulting address)
-page_fault:
- SAVE_MIN_WITH_COVER
- //
- // Copy control registers to temporary registers, then turn on psr bits,
- // then copy the temporary regs to the output regs. We have to do this
- // because the "alloc" can cause a mandatory store which could lead to
- // an "Alt DTLB" fault which we can handle only if psr.ic is on.
- //
- mov r8=cr.ifa
- mov r9=cr.isr
- adds r3=8,r2 // set up second base pointer
- ;;
- ssm psr.ic | psr.dt
- ;;
- srlz.i // guarantee that interrupt collection is enabled
- ;;
-(p15) ssm psr.i // restore psr.i
- movl r14=ia64_leave_kernel
- ;;
- alloc r15=ar.pfs,0,0,3,0 // must be first in insn group
- mov out0=r8
- mov out1=r9
- ;;
- SAVE_REST
- mov rp=r14
- ;;
- adds out2=16,r12 // out2 = pointer to pt_regs
- br.call.sptk.few b6=ia64_do_page_fault // ignore return address
-
.align 1024
/////////////////////////////////////////////////////////////////////////////////////////
// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
@@ -303,7 +316,7 @@ page_fault:
movl r17=__DIRTY_BITS|_PAGE_PL_0|_PAGE_AR_RX
;;
shr.u r18=r16,57 // move address bit 61 to bit 4
- dep r16=0,r16,IA64_PHYS_BITS,(64-IA64_PHYS_BITS) // clear ed, resvd, and unimpl. phys bits
+ dep r16=0,r16,IA64_MAX_PHYS_BITS,(64-IA64_MAX_PHYS_BITS) // clear ed & reserved bits
;;
andcm r18=0x10,r18 // bit 4=~address-bit(61)
dep r16=r17,r16,0,12 // insert PTE control bits into r16
@@ -318,18 +331,58 @@ page_fault:
// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
mov r16=cr.ifa // get address that caused the TLB miss
movl r17=__DIRTY_BITS|_PAGE_PL_0|_PAGE_AR_RW
+ mov r20=cr.isr
+ mov r21=cr.ipsr
+ mov r19=pr
;;
+ tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on?
shr.u r18=r16,57 // move address bit 61 to bit 4
- dep r16=0,r16,IA64_PHYS_BITS,(64-IA64_PHYS_BITS) // clear ed, resvd, and unimpl. phys bits
+ dep r16=0,r16,IA64_MAX_PHYS_BITS,(64-IA64_MAX_PHYS_BITS) // clear ed & reserved bits
;;
+ dep r21=-1,r21,IA64_PSR_ED_BIT,1
andcm r18=0x10,r18 // bit 4=~address-bit(61)
dep r16=r17,r16,0,12 // insert PTE control bits into r16
;;
or r16=r16,r18 // set bit 4 (uncached) if the access was to region 6
+(p6) mov cr.ipsr=r21
;;
- itc.d r16 // insert the TLB entry
+(p7) itc.d r16 // insert the TLB entry
+ mov pr=r19,-1
rfi
+ ;;
+
+ //-----------------------------------------------------------------------------------
+ // call do_page_fault (predicates are in r31, psr.dt is off, r16 is faulting address)
+page_fault:
+ SAVE_MIN_WITH_COVER
+ //
+ // Copy control registers to temporary registers, then turn on psr bits,
+ // then copy the temporary regs to the output regs. We have to do this
+ // because the "alloc" can cause a mandatory store which could lead to
+ // an "Alt DTLB" fault which we can handle only if psr.ic is on.
+ //
+ mov r8=cr.ifa
+ mov r9=cr.isr
+ adds r3=8,r2 // set up second base pointer
+ ;;
+ ssm psr.ic | psr.dt
+ ;;
+ srlz.i // guarantee that interrupt collection is enabled
+ ;;
+(p15) ssm psr.i // restore psr.i
+ movl r14=ia64_leave_kernel
+ ;;
+ alloc r15=ar.pfs,0,0,3,0 // must be first in insn group
+ mov out0=r8
+ mov out1=r9
+ ;;
+ SAVE_REST
+ mov rp=r14
+ ;;
+ adds out2=16,r12 // out2 = pointer to pt_regs
+ br.call.sptk.few b6=ia64_do_page_fault // ignore return address
+
.align 1024
/////////////////////////////////////////////////////////////////////////////////////////
// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
@@ -338,7 +391,7 @@ page_fault:
// Access-bit, or Data Access-bit faults cause a nested fault because the
// dTLB entry for the virtual page table isn't present. In such a case,
// we lookup the pte for the faulting address by walking the page table
- // and return to the contination point passed in register r30.
+ // and return to the continuation point passed in register r30.
// In accessing the page tables, we don't need to check for NULL entries
// because if the page tables didn't map the faulting address, it would not
// be possible to receive one of the above faults.
@@ -441,9 +494,6 @@ page_fault:
tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set?
;;
(p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa
-#if 0
- ;;
-#endif
mov pr=r31,-1
#endif /* CONFIG_ITANIUM */
movl r30=1f // load continuation point in case of nested fault
@@ -489,7 +539,6 @@ page_fault:
;;
srlz.d // ensure everyone knows psr.dt is off...
cmp.eq p0,p7=r16,r17 // is this a system call? (p7 <- false, if so)
-
#if 1
// Allow syscalls via the old system call number for the time being. This is
// so we can transition to the new syscall number in a relatively smooth
@@ -498,7 +547,6 @@ page_fault:
;;
(p7) cmp.eq.or.andcm p0,p7=r16,r17 // is this the old syscall number?
#endif
-
(p7) br.cond.spnt.many non_syscall
SAVE_MIN // uses r31; defines r2:
@@ -575,13 +623,12 @@ page_fault:
ssm psr.ic | psr.dt // turn interrupt collection and data translation back on
;;
adds r3=8,r2 // set up second base pointer for SAVE_REST
- cmp.eq pEOI,p0=r0,r0 // set pEOI flag so that ia64_leave_kernel writes cr.eoi
srlz.i // ensure everybody knows psr.ic and psr.dt are back on
;;
SAVE_REST
;;
alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
-#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC
+#ifdef CONFIG_ITANIUM_A1_SPECIFIC
mov out0=r0 // defer reading of cr.ivr to handle_irq...
#else
mov out0=cr.ivr // pass cr.ivr as first arg
@@ -609,6 +656,50 @@ page_fault:
// 0x3c00 Entry 15 (size 64 bundles) Reserved
FAULT(15)
+//
+// Squatting in this space ...
+//
+// This special case dispatcher for illegal operation faults
+// allows preserved registers to be modified through a
+// callback function (asm only) that is handed back from
+// the fault handler in r8. Up to three arguments can be
+// passed to the callback function by returning an aggregate
+// with the callback as its first element, followed by the
+// arguments.
+//
+dispatch_illegal_op_fault:
+ SAVE_MIN_WITH_COVER
+ //
+ // The "alloc" can cause a mandatory store which could lead to
+ // an "Alt DTLB" fault which we can handle only if psr.ic is on.
+ //
+ ssm psr.ic | psr.dt
+ ;;
+ srlz.i // guarantee that interrupt collection is enabled
+ ;;
+(p15) ssm psr.i // restore psr.i
+ adds r3=8,r2 // set up second base pointer for SAVE_REST
+ ;;
+ alloc r14=ar.pfs,0,0,1,0 // must be first in insn group
+ mov out0=ar.ec
+ ;;
+ SAVE_REST
+ ;;
+ br.call.sptk.few rp=ia64_illegal_op_fault
+ ;;
+ alloc r14=ar.pfs,0,0,3,0 // must be first in insn group
+ mov out0=r9
+ mov out1=r10
+ mov out2=r11
+ movl r15=ia64_leave_kernel
+ ;;
+ mov rp=r15
+ mov b6=r8
+ ;;
+ cmp.ne p6,p0=0,r8
+(p6) br.call.dpnt b6=b6 // call returns to ia64_leave_kernel
+ br.sptk ia64_leave_kernel
+
.align 1024
/////////////////////////////////////////////////////////////////////////////////////////
// 0x4000 Entry 16 (size 64 bundles) Reserved
@@ -643,14 +734,17 @@ dispatch_to_ia32_handler:
(p6) br.call.dpnt.few b6=non_ia32_syscall
adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp // 16 byte hole per SW conventions
-
+ adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp
+ ;;
+ cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
+ st8 [r15]=r8 // save orignal EAX in r1 (IA32 procs don't use the GP)
;;
alloc r15=ar.pfs,0,0,6,0 // must first in an insn group
;;
ld4 r8=[r14],8 // r8 == EAX (syscall number)
- mov r15=0xff
+ mov r15=190 // sys_vfork - last implemented system call
;;
- cmp.ltu.unc p6,p7=r8,r15
+ cmp.leu.unc p6,p7=r8,r15
ld4 out1=[r14],8 // r9 == ecx
;;
ld4 out2=[r14],8 // r10 == edx
@@ -868,7 +962,16 @@ dispatch_to_fault_handler:
.align 256
/////////////////////////////////////////////////////////////////////////////////////////
// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
- FAULT(24)
+ mov r16=cr.isr
+ mov r31=pr
+ rsm psr.dt // avoid nested faults due to TLB misses...
+ ;;
+ srlz.d // ensure everyone knows psr.dt is off...
+ cmp4.eq p6,p0=0,r16
+(p6) br.sptk dispatch_illegal_op_fault
+ ;;
+ mov r19=24 // fault number
+ br.cond.sptk.many dispatch_to_fault_handler
.align 256
/////////////////////////////////////////////////////////////////////////////////////////
@@ -939,7 +1042,6 @@ dispatch_to_fault_handler:
mov r31=pr // prepare to save predicates
;;
srlz.d // ensure everyone knows psr.dt is off
- mov r19=30 // error vector for fault_handler (when kernel)
br.cond.sptk.many dispatch_unaligned_handler
.align 256
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 150feac03..003b8dd69 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -9,15 +9,16 @@
* Copyright (C) 1999 Silicon Graphics, Inc.
* Copyright (C) Vijay Chander(vijay@engr.sgi.com)
*
- * 00/03/29 C. Fleckenstein Fixed PAL/SAL update issues, began MCA bug fixes, logging issues,
+ * 00/03/29 C. Fleckenstein Fixed PAL/SAL update issues, began MCA bug fixes,
+ * logging issues,
* added min save state dump, added INIT handler.
*/
+#include <linux/config.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/irq.h>
#include <linux/smp_lock.h>
-#include <linux/config.h>
#include <asm/page.h>
#include <asm/ptrace.h>
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S
index 81966bb99..e4a9f0530 100644
--- a/arch/ia64/kernel/mca_asm.S
+++ b/arch/ia64/kernel/mca_asm.S
@@ -6,7 +6,6 @@
// 00/03/29 cfleck Added code to save INIT handoff state in pt_regs format, switch to temp kstack,
// switch modes, jump to C INIT handler
//
-#include <asm/offsets.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/mca_asm.h>
diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h
index bcfe1659c..24be2f53d 100644
--- a/arch/ia64/kernel/minstate.h
+++ b/arch/ia64/kernel/minstate.h
@@ -101,7 +101,6 @@
;; \
st8 [r16]=r18,16; /* save ar.rsc value for "loadrs" */ \
st8.spill [r17]=rR1,16; /* save original r1 */ \
- cmp.ne pEOI,p0=r0,r0 /* clear pEOI by default */ \
;; \
.mem.offset 0,0; st8.spill [r16]=r2,16; \
.mem.offset 8,0; st8.spill [r17]=r3,16; \
diff --git a/arch/ia64/kernel/pal.S b/arch/ia64/kernel/pal.S
index 1506bacc2..e6f44cfb6 100644
--- a/arch/ia64/kernel/pal.S
+++ b/arch/ia64/kernel/pal.S
@@ -4,9 +4,16 @@
*
* Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
- * Copyright (C) 1999 David Mosberger <davidm@hpl.hp.com>
+ * Copyright (C) 1999-2000 David Mosberger <davidm@hpl.hp.com>
+ * Copyright (C) 2000 Stephane Eranian <eranian@hpl.hp.com>
+ *
+ * 05/22/2000 eranian Added support for stacked register calls
+ * 05/24/2000 eranian Added support for physical mode static calls
*/
+#include <asm/asmmacro.h>
+#include <asm/processor.h>
+
.text
.psr abi64
.psr lsb
@@ -24,29 +31,23 @@ pal_entry_point:
*
* in0 Address of the PAL entry point (text address, NOT a function descriptor).
*/
- .align 16
- .global ia64_pal_handler_init
- .proc ia64_pal_handler_init
-ia64_pal_handler_init:
+GLOBAL_ENTRY(ia64_pal_handler_init)
alloc r3=ar.pfs,1,0,0,0
movl r2=pal_entry_point
;;
st8 [r2]=in0
br.ret.sptk.few rp
-
- .endp ia64_pal_handler_init
+END(ia64_pal_handler_init)
/*
* Default PAL call handler. This needs to be coded in assembly because it uses
* the static calling convention, i.e., the RSE may not be used and calls are
* done via "br.cond" (not "br.call").
*/
- .align 16
- .global ia64_pal_default_handler
- .proc ia64_pal_default_handler
-ia64_pal_default_handler:
+GLOBAL_ENTRY(ia64_pal_default_handler)
mov r8=-1
br.cond.sptk.few rp
+END(ia64_pal_default_handler)
/*
* Make a PAL call using the static calling convention.
@@ -56,64 +57,139 @@ ia64_pal_default_handler:
* in2 - in4 Remaning PAL arguments
*
*/
+GLOBAL_ENTRY(ia64_pal_call_static)
+ UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(6))
+ alloc loc1 = ar.pfs,6,90,0,0
+ movl loc2 = pal_entry_point
+1: {
+ mov r28 = in0
+ mov r29 = in1
+ mov r8 = ip
+ }
+ ;;
+ ld8 loc2 = [loc2] // loc2 <- entry point
+ mov r30 = in2
+ mov r31 = in3
+ ;;
+ mov loc3 = psr
+ mov loc0 = rp
+ UNW(.body)
+ adds r8 = .ret0-1b,r8
+ ;;
+ rsm psr.i
+ mov b7 = loc2
+ mov rp = r8
+ ;;
+ br.cond.sptk.few b7
+.ret0: mov psr.l = loc3
+ mov ar.pfs = loc1
+ mov rp = loc0
+ ;;
+ srlz.d // seralize restoration of psr.l
+ br.ret.sptk.few b0
+END(ia64_pal_call_static)
-#ifdef __GCC_MULTIREG_RETVALS__
-# define arg0 in0
-# define arg1 in1
-# define arg2 in2
-# define arg3 in3
-# define arg4 in4
-#else
-# define arg0 in1
-# define arg1 in2
-# define arg2 in3
-# define arg3 in4
-# define arg4 in5
-#endif
+/*
+ * Make a PAL call using the stacked registers calling convention.
+ *
+ * Inputs:
+ * in0 Index of PAL service
+ * in2 - in3 Remaning PAL arguments
+ */
+GLOBAL_ENTRY(ia64_pal_call_stacked)
+ UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5))
+ alloc loc1 = ar.pfs,5,4,87,0
+ movl loc2 = pal_entry_point
- .text
- .psr abi64
- .psr lsb
- .lsb
+ mov r28 = in0 // Index MUST be copied to r28
+ mov out0 = in0 // AND in0 of PAL function
+ mov loc0 = rp
+ UNW(.body)
+ ;;
+ ld8 loc2 = [loc2] // loc2 <- entry point
+ mov out1 = in1
+ mov out2 = in2
+ mov out3 = in3
+ mov loc3 = psr
+ ;;
+ rsm psr.i
+ mov b7 = loc2
+ ;;
+ br.call.sptk.many rp=b7 // now make the call
+.ret2:
+ mov psr.l = loc3
+ mov ar.pfs = loc1
+ mov rp = loc0
+ ;;
+ srlz.d // serialize restoration of psr.l
+ br.ret.sptk.few b0
+END(ia64_pal_call_stacked)
+
+/*
+ * Make a physical mode PAL call using the static registers calling convention.
+ *
+ * Inputs:
+ * in0 Index of PAL service
+ * in2 - in3 Remaning PAL arguments
+ *
+ * PSR_DB, PSR_LP, PSR_TB, PSR_ID, PSR_DA are never set by the kernel.
+ * So we don't need to clear them.
+ */
+#define PAL_PSR_BITS_TO_CLEAR \
+ (IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_RT | \
+ IA64_PSR_DD | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | \
+ IA64_PSR_DFL | IA64_PSR_DFH)
+
+#define PAL_PSR_BITS_TO_SET \
+ (IA64_PSR_BN)
- .align 16
- .global ia64_pal_call_static
- .proc ia64_pal_call_static
-ia64_pal_call_static:
- alloc loc0 = ar.pfs,6,90,0,0
- movl loc2 = pal_entry_point
+
+GLOBAL_ENTRY(ia64_pal_call_phys_static)
+ UNW(.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(6))
+ alloc loc1 = ar.pfs,6,90,0,0
+ movl loc2 = pal_entry_point
1: {
- mov r28 = arg0
- mov r29 = arg1
- mov r8 = ip
+ mov r28 = in0 // copy procedure index
+ mov r8 = ip // save ip to compute branch
+ mov loc0 = rp // save rp
}
+ UNW(.body)
;;
- ld8 loc2 = [loc2] // loc2 <- entry point
- mov r30 = arg2
- mov r31 = arg3
+ ld8 loc2 = [loc2] // loc2 <- entry point
+ mov r29 = in1 // first argument
+ mov r30 = in2 // copy arg2
+ mov r31 = in3 // copy arg3
;;
- mov loc3 = psr
- mov loc1 = rp
- adds r8 = .ret0-1b,r8
- ;;
- rsm psr.i
- mov b7 = loc2
- mov rp = r8
+ mov loc3 = psr // save psr
+ adds r8 = .ret4-1b,r8 // calculate return address for call
;;
- br.cond.sptk.few b7
-.ret0: mov psr.l = loc3
-#ifndef __GCC_MULTIREG_RETVALS__
- st8 [in0] = r8, 8
+ mov loc4=ar.rsc // save RSE configuration
+ dep.z loc2=loc2,0,61 // convert pal entry point to physical
+ dep.z r8=r8,0,61 // convert rp to physical
;;
- st8 [in0] = r9, 8
+ mov b7 = loc2 // install target to branch reg
+ mov ar.rsc=r0 // put RSE in enforced lazy, LE mode
+ movl r16=PAL_PSR_BITS_TO_CLEAR
+ movl r17=PAL_PSR_BITS_TO_SET
;;
- st8 [in0] = r10, 8
+ or loc3=loc3,r17 // add in psr the bits to set
;;
- st8 [in0] = r11, 8
-#endif
- mov ar.pfs = loc0
- mov rp = loc1
+ andcm r16=loc3,r16 // removes bits to clear from psr
+ br.call.sptk.few rp=ia64_switch_mode
+.ret3:
+ mov rp = r8 // install return address (physical)
+ br.cond.sptk.few b7
+.ret4:
+ mov ar.rsc=r0 // put RSE in enforced lazy, LE mode
+ mov r16=loc3 // r16= original psr
+ br.call.sptk.few rp=ia64_switch_mode // return to virtual mode
+
+.ret5: mov psr.l = loc3 // restore init PSR
+
+ mov ar.pfs = loc1
+ mov rp = loc0
;;
+ mov ar.rsc=loc4 // restore RSE configuration
srlz.d // seralize restoration of psr.l
br.ret.sptk.few b0
- .endp ia64_pal_call_static
+END(ia64_pal_call_phys_static)
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
new file mode 100644
index 000000000..ad40e911e
--- /dev/null
+++ b/arch/ia64/kernel/palinfo.c
@@ -0,0 +1,780 @@
+/*
+ * palinfo.c
+ *
+ * Prints processor specific information reported by PAL.
+ * This code is based on specification of PAL as of the
+ * Intel IA-64 Architecture Software Developer's Manual v1.0.
+ *
+ *
+ * Copyright (C) 2000 Hewlett-Packard Co
+ * Copyright (C) 2000 Stephane Eranian <eranian@hpl.hp.com>
+ *
+ * 05/26/2000 S.Eranian initial release
+ *
+ * ISSUES:
+ * - because of some PAL bugs, some calls return invalid results or
+ * are empty for now.
+ * - remove hack to avoid problem with <= 256M RAM for itr.
+ */
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/mm.h>
+
+#include <asm/pal.h>
+#include <asm/sal.h>
+#include <asm/efi.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+
+/*
+ * Hope to get rid of these in a near future
+*/
+#define IA64_PAL_VERSION_BUG 1
+
+#define PALINFO_VERSION "0.1"
+
+typedef int (*palinfo_func_t)(char*);
+
+typedef struct {
+ const char *name; /* name of the proc entry */
+ palinfo_func_t proc_read; /* function to call for reading */
+ struct proc_dir_entry *entry; /* registered entry (removal) */
+} palinfo_entry_t;
+
+static struct proc_dir_entry *palinfo_dir;
+
+/*
+ * A bunch of string array to get pretty printing
+ */
+
+static char *cache_types[] = {
+ "", /* not used */
+ "Instruction",
+ "Data",
+ "Data/Instruction" /* unified */
+};
+
+static const char *cache_mattrib[]={
+ "WriteThrough",
+ "WriteBack",
+ "", /* reserved */
+ "" /* reserved */
+};
+
+static const char *cache_st_hints[]={
+ "Temporal, level 1",
+ "Reserved",
+ "Reserved",
+ "Non-temporal, all levels",
+ "Reserved",
+ "Reserved",
+ "Reserved",
+ "Reserved"
+};
+
+static const char *cache_ld_hints[]={
+ "Temporal, level 1",
+ "Non-temporal, level 1",
+ "Reserved",
+ "Non-temporal, all levels",
+ "Reserved",
+ "Reserved",
+ "Reserved",
+ "Reserved"
+};
+
+static const char *rse_hints[]={
+ "enforced lazy",
+ "eager stores",
+ "eager loads",
+ "eager loads and stores"
+};
+
+#define RSE_HINTS_COUNT (sizeof(rse_hints)/sizeof(const char *))
+
+/*
+ * The current resvision of the Volume 2 of
+ * IA-64 Architecture Software Developer's Manual is wrong.
+ * Table 4-10 has invalid information concerning the ma field:
+ * Correct table is:
+ * bit 0 - 001 - UC
+ * bit 4 - 100 - UC
+ * bit 5 - 101 - UCE
+ * bit 6 - 110 - WC
+ * bit 7 - 111 - NatPage
+ */
+static const char *mem_attrib[]={
+ "Write Back (WB)", /* 000 */
+ "Uncacheable (UC)", /* 001 */
+ "Reserved", /* 010 */
+ "Reserved", /* 011 */
+ "Uncacheable (UC)", /* 100 */
+ "Uncacheable Exported (UCE)", /* 101 */
+ "Write Coalescing (WC)", /* 110 */
+ "NaTPage" /* 111 */
+};
+
+
+
+/*
+ * Allocate a buffer suitable for calling PAL code in Virtual mode
+ *
+ * The documentation (PAL2.6) requires thius buffer to have a pinned
+ * translation to avoid any DTLB faults. For this reason we allocate
+ * a page (large enough to hold any possible reply) and use a DTC
+ * to hold the translation during the call. A call the free_palbuffer()
+ * is required to release ALL resources (page + translation).
+ *
+ * The size of the page allocated is based on the PAGE_SIZE defined
+ * at compile time for the kernel, i.e. >= 4Kb.
+ *
+ * Return: a pointer to the newly allocated page (virtual address)
+ */
+static void *
+get_palcall_buffer(void)
+{
+ void *tmp;
+
+ tmp = (void *)__get_free_page(GFP_KERNEL);
+ if (tmp == 0) {
+ printk(KERN_ERR "%s: can't get a buffer page\n", __FUNCTION__);
+ } else if ( ((u64)tmp - PAGE_OFFSET) > (1<<_PAGE_SIZE_256M) ) { /* XXX: temporary hack */
+ unsigned long flags;
+
+ /* PSR.ic must be zero to insert new DTR */
+ ia64_clear_ic(flags);
+
+ /*
+ * we only insert of DTR
+ *
+ * XXX: we need to figure out a way to "allocate" TR(s) to avoid
+ * conflicts. Maybe something in an include file like pgtable.h
+ * page.h or processor.h
+ *
+ * ITR0/DTR0: used for kernel code/data
+ * ITR1/DTR1: used by HP simulator
+ * ITR2/DTR2: used to map PAL code
+ */
+ ia64_itr(0x2, 3, (u64)tmp,
+ pte_val(mk_pte_phys(__pa(tmp), __pgprot(__DIRTY_BITS|_PAGE_PL_0|_PAGE_AR_RW))), PAGE_SHIFT);
+
+ ia64_srlz_d ();
+
+ __restore_flags(flags);
+ }
+
+ return tmp;
+}
+
+/*
+ * Free a palcall buffer allocated with the previous call
+ *
+ * The translation is also purged.
+ */
+static void
+free_palcall_buffer(void *addr)
+{
+ __free_page(addr);
+ ia64_ptr(0x2, (u64)addr, PAGE_SHIFT);
+ ia64_srlz_d ();
+}
+
+/*
+ * Take a 64bit vector and produces a string such that
+ * if bit n is set then 2^n in clear text is generated. The adjustment
+ * to the right unit is also done.
+ *
+ * Input:
+ * - a pointer to a buffer to hold the string
+ * - a 64-bit vector
+ * Ouput:
+ * - a pointer to the end of the buffer
+ *
+ */
+static char *
+bitvector_process(char *p, u64 vector)
+{
+ int i,j;
+ const char *units[]={ "", "K", "M", "G", "T" };
+
+ for (i=0, j=0; i < 64; i++ , j=i/10) {
+ if (vector & 0x1) {
+ p += sprintf(p, "%d%s ", 1 << (i-j*10), units[j]);
+ }
+ vector >>= 1;
+ }
+ return p;
+}
+
+/*
+ * Take a 64bit vector and produces a string such that
+ * if bit n is set then register n is present. The function
+ * takes into account consecutive registers and prints out ranges.
+ *
+ * Input:
+ * - a pointer to a buffer to hold the string
+ * - a 64-bit vector
+ * Ouput:
+ * - a pointer to the end of the buffer
+ *
+ */
+static char *
+bitregister_process(char *p, u64 *reg_info, int max)
+{
+ int i, begin, skip = 0;
+ u64 value = reg_info[0];
+
+ value >>= i = begin = ffs(value) - 1;
+
+ for(; i < max; i++ ) {
+
+ if (i != 0 && (i%64) == 0) value = *++reg_info;
+
+ if ((value & 0x1) == 0 && skip == 0) {
+ if (begin <= i - 2)
+ p += sprintf(p, "%d-%d ", begin, i-1);
+ else
+ p += sprintf(p, "%d ", i-1);
+ skip = 1;
+ begin = -1;
+ } else if ((value & 0x1) && skip == 1) {
+ skip = 0;
+ begin = i;
+ }
+ value >>=1;
+ }
+ if (begin > -1) {
+ if (begin < 127)
+ p += sprintf(p, "%d-127", begin);
+ else
+ p += sprintf(p, "127");
+ }
+
+ return p;
+}
+
+static int
+power_info(char *page)
+{
+ s64 status;
+ char *p = page;
+ pal_power_mgmt_info_u_t *halt_info;
+ int i;
+
+ halt_info = get_palcall_buffer();
+ if (halt_info == 0) return 0;
+
+ status = ia64_pal_halt_info(halt_info);
+ if (status != 0) {
+ free_palcall_buffer(halt_info);
+ return 0;
+ }
+
+ for (i=0; i < 8 ; i++ ) {
+ if (halt_info[i].pal_power_mgmt_info_s.im == 1) {
+ p += sprintf(p, "Power level %d:\n" \
+ "\tentry_latency : %d cycles\n" \
+ "\texit_latency : %d cycles\n" \
+ "\tpower consumption : %d mW\n" \
+ "\tCache+TLB coherency : %s\n", i,
+ halt_info[i].pal_power_mgmt_info_s.entry_latency,
+ halt_info[i].pal_power_mgmt_info_s.exit_latency,
+ halt_info[i].pal_power_mgmt_info_s.power_consumption,
+ halt_info[i].pal_power_mgmt_info_s.co ? "Yes" : "No");
+ } else {
+ p += sprintf(p,"Power level %d: not implemented\n",i);
+ }
+ }
+
+ free_palcall_buffer(halt_info);
+
+ return p - page;
+}
+
+static int
+cache_info(char *page)
+{
+ char *p = page;
+ u64 levels, unique_caches;
+ pal_cache_config_info_t cci;
+ int i,j, k;
+ s64 status;
+
+ if ((status=ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
+ printk("ia64_pal_cache_summary=%ld\n", status);
+ return 0;
+ }
+
+ p += sprintf(p, "Cache levels : %ld\n" \
+ "Unique caches : %ld\n\n",
+ levels,
+ unique_caches);
+
+ for (i=0; i < levels; i++) {
+
+ for (j=2; j >0 ; j--) {
+
+ /* even without unification some level may not be present */
+ if ((status=ia64_pal_cache_config_info(i,j, &cci)) != 0) {
+ continue;
+ }
+ p += sprintf(p, "%s Cache level %d:\n" \
+ "\tSize : %ld bytes\n" \
+ "\tAttributes : ",
+ cache_types[j+cci.pcci_unified], i+1,
+ cci.pcci_cache_size);
+
+ if (cci.pcci_unified) p += sprintf(p, "Unified ");
+
+ p += sprintf(p, "%s\n", cache_mattrib[cci.pcci_cache_attr]);
+
+ p += sprintf(p, "\tAssociativity : %d\n" \
+ "\tLine size : %d bytes\n" \
+ "\tStride : %d bytes\n",
+ cci.pcci_assoc,
+ 1<<cci.pcci_line_size,
+ 1<<cci.pcci_stride);
+ if (j == 1)
+ p += sprintf(p, "\tStore latency : N/A\n");
+ else
+ p += sprintf(p, "\tStore latency : %d cycle(s)\n",
+ cci.pcci_st_latency);
+
+ p += sprintf(p, "\tLoad latency : %d cycle(s)\n" \
+ "\tStore hints : ",
+ cci.pcci_ld_latency);
+
+ for(k=0; k < 8; k++ ) {
+ if ( cci.pcci_st_hints & 0x1) p += sprintf(p, "[%s]", cache_st_hints[k]);
+ cci.pcci_st_hints >>=1;
+ }
+ p += sprintf(p, "\n\tLoad hints : ");
+
+ for(k=0; k < 8; k++ ) {
+ if ( cci.pcci_ld_hints & 0x1) p += sprintf(p, "[%s]", cache_ld_hints[k]);
+ cci.pcci_ld_hints >>=1;
+ }
+ p += sprintf(p, "\n\tAlias boundary : %d byte(s)\n" \
+ "\tTag LSB : %d\n" \
+ "\tTag MSB : %d\n",
+ 1<<cci.pcci_alias_boundary,
+ cci.pcci_tag_lsb,
+ cci.pcci_tag_msb);
+
+ /* when unified, data(j=2) is enough */
+ if (cci.pcci_unified) break;
+ }
+ }
+ return p - page;
+}
+
+
+static int
+vm_info(char *page)
+{
+ char *p = page;
+ u64 tr_pages =0, vw_pages=0, tc_pages;
+ u64 attrib;
+ pal_vm_info_1_u_t vm_info_1;
+ pal_vm_info_2_u_t vm_info_2;
+ pal_tc_info_u_t tc_info;
+ ia64_ptce_info_t ptce;
+ int i, j;
+ s64 status;
+
+ if ((status=ia64_pal_vm_summary(&vm_info_1, &vm_info_2)) !=0) {
+ printk("ia64_pal_vm_summary=%ld\n", status);
+ return 0;
+ }
+
+
+ p += sprintf(p, "Physical Address Space : %d bits\n" \
+ "Virtual Address Space : %d bits\n" \
+ "Protection Key Registers(PKR) : %d\n" \
+ "Implemented bits in PKR.key : %d\n" \
+ "Hash Tag ID : 0x%x\n" \
+ "Size of RR.rid : %d\n",
+ vm_info_1.pal_vm_info_1_s.phys_add_size,
+ vm_info_2.pal_vm_info_2_s.impl_va_msb+1,
+ vm_info_1.pal_vm_info_1_s.max_pkr+1,
+ vm_info_1.pal_vm_info_1_s.key_size,
+ vm_info_1.pal_vm_info_1_s.hash_tag_id,
+ vm_info_2.pal_vm_info_2_s.rid_size);
+
+ if (ia64_pal_mem_attrib(&attrib) != 0) return 0;
+
+ p += sprintf(p, "Supported memory attributes : %s\n", mem_attrib[attrib&0x7]);
+
+ if ((status=ia64_pal_vm_page_size(&tr_pages, &vw_pages)) !=0) {
+ printk("ia64_pal_vm_page_size=%ld\n", status);
+ return 0;
+ }
+
+ p += sprintf(p, "\nTLB walker : %s implemented\n" \
+ "Number of DTR : %d\n" \
+ "Number of ITR : %d\n" \
+ "TLB insertable page sizes : ",
+ vm_info_1.pal_vm_info_1_s.vw ? "\b":"not",
+ vm_info_1.pal_vm_info_1_s.max_dtr_entry+1,
+ vm_info_1.pal_vm_info_1_s.max_itr_entry+1);
+
+
+ p = bitvector_process(p, tr_pages);
+
+ p += sprintf(p, "\nTLB purgeable page sizes : ");
+
+ p = bitvector_process(p, vw_pages);
+
+ if ((status=ia64_get_ptce(&ptce)) != 0) {
+ printk("ia64_get_ptce=%ld\n",status);
+ return 0;
+ }
+
+ p += sprintf(p, "\nPurge base address : 0x%016lx\n" \
+ "Purge outer loop count : %d\n" \
+ "Purge inner loop count : %d\n" \
+ "Purge outer loop stride : %d\n" \
+ "Purge inner loop stride : %d\n",
+ ptce.base,
+ ptce.count[0],
+ ptce.count[1],
+ ptce.stride[0],
+ ptce.stride[1]);
+
+ p += sprintf(p, "TC Levels : %d\n" \
+ "Unique TC(s) : %d\n",
+ vm_info_1.pal_vm_info_1_s.num_tc_levels,
+ vm_info_1.pal_vm_info_1_s.max_unique_tcs);
+
+ for(i=0; i < vm_info_1.pal_vm_info_1_s.num_tc_levels; i++) {
+ for (j=2; j>0 ; j--) {
+ tc_pages = 0; /* just in case */
+
+
+ /* even without unification, some levels may not be present */
+ if ((status=ia64_pal_vm_info(i,j, &tc_info, &tc_pages)) != 0) {
+ continue;
+ }
+
+ p += sprintf(p, "\n%s Translation Cache Level %d:\n" \
+ "\tHash sets : %d\n" \
+ "\tAssociativity : %d\n" \
+ "\tNumber of entries : %d\n" \
+ "\tFlags : ",
+ cache_types[j+tc_info.tc_unified], i+1,
+ tc_info.tc_num_sets,
+ tc_info.tc_associativity,
+ tc_info.tc_num_entries);
+
+ if (tc_info.tc_pf) p += sprintf(p, "PreferredPageSizeOptimized ");
+ if (tc_info.tc_unified) p += sprintf(p, "Unified ");
+ if (tc_info.tc_reduce_tr) p += sprintf(p, "TCReduction");
+
+ p += sprintf(p, "\n\tSupported page sizes: ");
+
+ p = bitvector_process(p, tc_pages);
+
+ /* when unified date (j=2) is enough */
+ if (tc_info.tc_unified) break;
+ }
+ }
+ p += sprintf(p, "\n");
+
+ return p - page;
+}
+
+
+static int
+register_info(char *page)
+{
+ char *p = page;
+ u64 reg_info[2];
+ u64 info;
+ u64 phys_stacked;
+ pal_hints_u_t hints;
+ u64 iregs, dregs;
+ char *info_type[]={
+ "Implemented AR(s)",
+ "AR(s) with read side-effects",
+ "Implemented CR(s)",
+ "CR(s) with read side-effects",
+ };
+
+ for(info=0; info < 4; info++) {
+
+ if (ia64_pal_register_info(info, &reg_info[0], &reg_info[1]) != 0) return 0;
+
+ p += sprintf(p, "%-32s : ", info_type[info]);
+
+ p = bitregister_process(p, reg_info, 128);
+
+ p += sprintf(p, "\n");
+ }
+
+ if (ia64_pal_rse_info(&phys_stacked, &hints) != 0) return 0;
+
+ p += sprintf(p, "RSE stacked physical registers : %ld\n" \
+ "RSE load/store hints : %ld (%s)\n",
+ phys_stacked,
+ hints.ph_data,
+ hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(??)");
+
+ if (ia64_pal_debug_info(&iregs, &dregs)) return 0;
+
+ p += sprintf(p, "Instruction debug register pairs : %ld\n" \
+ "Data debug register pairs : %ld\n",
+ iregs, dregs);
+
+ return p - page;
+}
+
+static const char *proc_features[]={
+ NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
+ NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,
+ NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
+ NULL,NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL,
+ NULL,NULL,NULL,NULL,NULL,
+ "XIP,XPSR,XFS implemented",
+ "XR1-XR3 implemented",
+ "Disable dynamic predicate prediction",
+ "Disable processor physical number",
+ "Disable dynamic data cache prefetch",
+ "Disable dynamic inst cache prefetch",
+ "Disable dynamic branch prediction",
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ "Disable BINIT on processor time-out",
+ "Disable dynamic power management (DPM)",
+ "Disable coherency",
+ "Disable cache",
+ "Enable CMCI promotion",
+ "Enable MCA to BINIT promotion",
+ "Enable MCA promotion",
+ "Enable BEER promotion"
+};
+
+
+static int
+processor_info(char *page)
+{
+ char *p = page;
+ const char **v = proc_features;
+ u64 avail=1, status=1, control=1;
+ int i;
+ s64 ret;
+
+ /* must be in physical mode */
+ if ((ret=ia64_pal_proc_get_features(&avail, &status, &control)) != 0) return 0;
+
+ for(i=0; i < 64; i++, v++,avail >>=1, status >>=1, control >>=1) {
+ if ( ! *v ) continue;
+ p += sprintf(p, "%-40s : %s%s %s\n", *v,
+ avail & 0x1 ? "" : "NotImpl",
+ avail & 0x1 ? (status & 0x1 ? "On" : "Off"): "",
+ avail & 0x1 ? (control & 0x1 ? "Ctrl" : "NoCtrl"): "");
+ }
+ return p - page;
+}
+
+/*
+ * physical mode call for PAL_VERSION is working fine.
+ * This function is meant to go away once PAL get fixed.
+ */
+static inline s64
+ia64_pal_version_phys(pal_version_u_t *pal_min_version, pal_version_u_t *pal_cur_version)
+{
+ struct ia64_pal_retval iprv;
+ PAL_CALL_PHYS(iprv, PAL_VERSION, 0, 0, 0);
+ if (pal_min_version)
+ pal_min_version->pal_version_val = iprv.v0;
+ if (pal_cur_version)
+ pal_cur_version->pal_version_val = iprv.v1;
+ return iprv.status;
+}
+
+static int
+version_info(char *page)
+{
+ s64 status;
+ pal_version_u_t min_ver, cur_ver;
+ char *p = page;
+
+#ifdef IA64_PAL_VERSION_BUG
+ /* The virtual mode call is buggy. But the physical mode call seems
+ * to be ok. Until they fix virtual mode, we do physical.
+ */
+ status = ia64_pal_version_phys(&min_ver, &cur_ver);
+#else
+ /* The system crashes if you enable this code with the wrong PAL
+ * code
+ */
+ status = ia64_pal_version(&min_ver, &cur_ver);
+#endif
+ if (status != 0) return 0;
+
+ p += sprintf(p, "PAL_vendor : 0x%x (min=0x%x)\n" \
+ "PAL_A revision : 0x%x (min=0x%x)\n" \
+ "PAL_A model : 0x%x (min=0x%x)\n" \
+ "PAL_B mode : 0x%x (min=0x%x)\n" \
+ "PAL_B revision : 0x%x (min=0x%x)\n",
+ cur_ver.pal_version_s.pv_pal_vendor,
+ min_ver.pal_version_s.pv_pal_vendor,
+ cur_ver.pal_version_s.pv_pal_a_rev,
+ cur_ver.pal_version_s.pv_pal_a_rev,
+ cur_ver.pal_version_s.pv_pal_a_model,
+ min_ver.pal_version_s.pv_pal_a_model,
+ cur_ver.pal_version_s.pv_pal_b_rev,
+ min_ver.pal_version_s.pv_pal_b_rev,
+ cur_ver.pal_version_s.pv_pal_b_model,
+ min_ver.pal_version_s.pv_pal_b_model);
+
+ return p - page;
+}
+
+static int
+perfmon_info(char *page)
+{
+ char *p = page;
+ u64 *pm_buffer;
+ pal_perf_mon_info_u_t pm_info;
+
+ pm_buffer = (u64 *)get_palcall_buffer();
+ if (pm_buffer == 0) return 0;
+
+ if (ia64_pal_perf_mon_info(pm_buffer, &pm_info) != 0) {
+ free_palcall_buffer(pm_buffer);
+ return 0;
+ }
+
+#ifdef IA64_PAL_PERF_MON_INFO_BUG
+ pm_buffer[5]=0x3;
+ pm_info.pal_perf_mon_info_s.cycles = 0x12;
+ pm_info.pal_perf_mon_info_s.retired = 0x08;
+#endif
+
+ p += sprintf(p, "PMC/PMD pairs : %d\n" \
+ "Counter width : %d bits\n" \
+ "Cycle event number : %d\n" \
+ "Retired event number : %d\n" \
+ "Implemented PMC : ",
+ pm_info.pal_perf_mon_info_s.generic,
+ pm_info.pal_perf_mon_info_s.width,
+ pm_info.pal_perf_mon_info_s.cycles,
+ pm_info.pal_perf_mon_info_s.retired);
+
+ p = bitregister_process(p, pm_buffer, 256);
+
+ p += sprintf(p, "\nImplemented PMD : ");
+
+ p = bitregister_process(p, pm_buffer+4, 256);
+
+ p += sprintf(p, "\nCycles count capable : ");
+
+ p = bitregister_process(p, pm_buffer+8, 256);
+
+ p += sprintf(p, "\nRetired bundles count capable : ");
+
+ p = bitregister_process(p, pm_buffer+12, 256);
+
+ p += sprintf(p, "\n");
+
+ free_palcall_buffer(pm_buffer);
+
+ return p - page;
+}
+
+static int
+frequency_info(char *page)
+{
+ char *p = page;
+ struct pal_freq_ratio proc, itc, bus;
+ u64 base;
+
+ if (ia64_pal_freq_base(&base) == -1)
+ p += sprintf(p, "Output clock : not implemented\n");
+ else
+ p += sprintf(p, "Output clock : %ld ticks/s\n", base);
+
+ if (ia64_pal_freq_ratios(&proc, &bus, &itc) != 0) return 0;
+
+ p += sprintf(p, "Processor/Clock ratio : %ld/%ld\n" \
+ "Bus/Clock ratio : %ld/%ld\n" \
+ "ITC/Clock ratio : %ld/%ld\n",
+ proc.num, proc.den,
+ bus.num, bus.den,
+ itc.num, itc.den);
+
+ return p - page;
+}
+
+
+/*
+ * Entry point routine: all calls go trhough this function
+ */
+static int
+palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ palinfo_func_t info = (palinfo_func_t)data;
+ int len = info(page);
+
+ if (len <= off+count) *eof = 1;
+
+ *start = page + off;
+ len -= off;
+
+ if (len>count) len = count;
+ if (len<0) len = 0;
+
+ return len;
+}
+
+/*
+ * List names,function pairs for every entry in /proc/palinfo
+ * Must be terminated with the NULL,NULL entry.
+ */
+static palinfo_entry_t palinfo_entries[]={
+ { "version_info", version_info, },
+ { "vm_info", vm_info, },
+ { "cache_info", cache_info, },
+ { "power_info", power_info, },
+ { "register_info", register_info, },
+ { "processor_info", processor_info, },
+ { "perfmon_info", perfmon_info, },
+ { "frequency_info", frequency_info, },
+ { NULL, NULL,}
+};
+
+
+static int __init
+palinfo_init(void)
+{
+ palinfo_entry_t *p;
+
+ printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION);
+
+ palinfo_dir = create_proc_entry("palinfo", S_IFDIR | S_IRUGO | S_IXUGO, NULL);
+
+ for (p = palinfo_entries; p->name ; p++){
+ p->entry = create_proc_read_entry (p->name, 0, palinfo_dir,
+ palinfo_read_entry, p->proc_read);
+ }
+
+ return 0;
+}
+
+static int __exit
+palinfo_exit(void)
+{
+ palinfo_entry_t *p;
+
+ for (p = palinfo_entries; p->name ; p++){
+ remove_proc_entry (p->name, palinfo_dir);
+ }
+ remove_proc_entry ("palinfo", 0);
+
+ return 0;
+}
+
+module_init(palinfo_init);
+module_exit(palinfo_exit);
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
index 0bc110510..ab86e69b3 100644
--- a/arch/ia64/kernel/pci-dma.c
+++ b/arch/ia64/kernel/pci-dma.c
@@ -23,8 +23,8 @@ pci_alloc_consistent (struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle
void *ret;
int gfp = GFP_ATOMIC;
- if (!hwdev || hwdev->dma_mask != 0xffffffff)
- gfp |= GFP_DMA;
+ if (!hwdev || hwdev->dma_mask == 0xffffffff)
+ gfp |= GFP_DMA; /* XXX fix me: should change this to GFP_32BIT or ZONE_32BIT */
ret = (void *)__get_free_pages(gfp, get_order(size));
if (ret) {
diff --git a/arch/ia64/kernel/pci.c b/arch/ia64/kernel/pci.c
index 767cfa5ce..2d814b443 100644
--- a/arch/ia64/kernel/pci.c
+++ b/arch/ia64/kernel/pci.c
@@ -133,7 +133,7 @@ pci_find_bios(void)
* Initialization. Uses the SAL interface
*/
-#define PCI_BUSSES_TO_SCAN 2 /* On "real" ;) hardware this will be 255 */
+#define PCI_BUSES_TO_SCAN 255
void __init
pcibios_init(void)
@@ -147,7 +147,7 @@ pcibios_init(void)
}
printk("PCI: Probing PCI hardware\n");
- for (i = 0; i < PCI_BUSSES_TO_SCAN; i++)
+ for (i = 0; i < PCI_BUSES_TO_SCAN; i++)
pci_scan_bus(i, ops, NULL);
platform_pci_fixup();
return;
@@ -197,7 +197,7 @@ pcibios_fixup_pbus_ranges (struct pci_bus * bus, struct pbus_set_ranges_data * r
ranges->mem_end -= bus->resource[1]->start;
}
-int __init
+int
pcibios_enable_device (struct pci_dev *dev)
{
/* Not needed, since we enable all devices at startup. */
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index a8c217b9a..58ad3c21c 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -23,8 +23,40 @@
#include <asm/processor.h>
#include <asm/sal.h>
#include <asm/uaccess.h>
+#include <asm/unwind.h>
#include <asm/user.h>
+static void
+do_show_stack (struct unw_frame_info *info, void *arg)
+{
+ unsigned long ip, sp, bsp;
+
+ printk("\nCall Trace: ");
+ do {
+ unw_get_ip(info, &ip);
+ if (ip == 0)
+ break;
+
+ unw_get_sp(info, &sp);
+ unw_get_bsp(info, &bsp);
+ printk("[<%016lx>] sp=0x%016lx bsp=0x%016lx\n", ip, sp, bsp);
+ } while (unw_unwind(info) >= 0);
+}
+
+void
+show_stack (struct task_struct *task)
+{
+#ifdef CONFIG_IA64_NEW_UNWIND
+ if (!task)
+ unw_init_running(do_show_stack, 0);
+ else {
+ struct unw_frame_info info;
+
+ unw_init_from_blocked_task(&info, task);
+ do_show_stack(&info, 0);
+ }
+#endif
+}
void
show_regs (struct pt_regs *regs)
@@ -71,6 +103,10 @@ show_regs (struct pt_regs *regs)
((i == sof - 1) || (i % 3) == 2) ? "\n" : " ");
}
}
+#ifdef CONFIG_IA64_NEW_UNWIND
+ if (!user_mode(regs))
+ show_stack(0);
+#endif
}
void __attribute__((noreturn))
@@ -98,16 +134,49 @@ cpu_idle (void *unused)
if (pm_idle)
(*pm_idle)();
#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC
- if (ia64_get_itm() < ia64_get_itc()) {
- extern void ia64_reset_itm (void);
-
- printk("cpu_idle: ITM in past, resetting it...\n");
- ia64_reset_itm();
+ local_irq_disable();
+ {
+ u64 itc, itm;
+
+ itc = ia64_get_itc();
+ itm = ia64_get_itm();
+ if (time_after(itc, itm + 1000)) {
+ extern void ia64_reset_itm (void);
+
+ printk("cpu_idle: ITM in past (itc=%lx,itm=%lx:%lums)\n",
+ itc, itm, (itc - itm)/500000);
+ ia64_reset_itm();
+ }
}
+ local_irq_enable();
#endif
}
}
+void
+ia64_save_extra (struct task_struct *task)
+{
+ extern void ia64_save_debug_regs (unsigned long *save_area);
+ extern void ia32_save_state (struct thread_struct *thread);
+
+ if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0)
+ ia64_save_debug_regs(&task->thread.dbr[0]);
+ if (IS_IA32_PROCESS(ia64_task_regs(task)))
+ ia32_save_state(&task->thread);
+}
+
+void
+ia64_load_extra (struct task_struct *task)
+{
+ extern void ia64_load_debug_regs (unsigned long *save_area);
+ extern void ia32_load_state (struct thread_struct *thread);
+
+ if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0)
+ ia64_load_debug_regs(&task->thread.dbr[0]);
+ if (IS_IA32_PROCESS(ia64_task_regs(task)))
+ ia32_load_state(&task->thread);
+}
+
/*
* Copy the state of an ia-64 thread.
*
@@ -234,9 +303,103 @@ copy_thread (int nr, unsigned long clone_flags, unsigned long usp,
return 0;
}
+#ifdef CONFIG_IA64_NEW_UNWIND
+
+void
+do_copy_regs (struct unw_frame_info *info, void *arg)
+{
+ unsigned long ar_bsp, ndirty, *krbs, addr, mask, sp, nat_bits = 0, ip;
+ elf_greg_t *dst = arg;
+ struct pt_regs *pt;
+ char nat;
+ long val;
+ int i;
+
+ memset(dst, 0, sizeof(elf_gregset_t)); /* don't leak any kernel bits to user-level */
+
+ if (unw_unwind_to_user(info) < 0)
+ return;
+
+ unw_get_sp(info, &sp);
+ pt = (struct pt_regs *) (sp + 16);
+
+ krbs = (unsigned long *) current + IA64_RBS_OFFSET/8;
+ ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
+ ar_bsp = (unsigned long) ia64_rse_skip_regs((long *) pt->ar_bspstore, ndirty);
+
+ /*
+ * Write portion of RSE backing store living on the kernel
+ * stack to the VM of the process.
+ */
+ for (addr = pt->ar_bspstore; addr < ar_bsp; addr += 8)
+ if (ia64_peek(pt, current, addr, &val) == 0)
+ access_process_vm(current, addr, &val, sizeof(val), 1);
+
+ /* r0 is zero */
+ for (i = 1, mask = (1UL << i); i < 32; ++i) {
+ unw_get_gr(info, i, &dst[i], &nat);
+ if (nat)
+ nat_bits |= mask;
+ mask <<= 1;
+ }
+ dst[32] = nat_bits;
+ unw_get_pr(info, &dst[33]);
+
+ for (i = 0; i < 8; ++i)
+ unw_get_br(info, i, &dst[34 + i]);
+
+ unw_get_rp(info, &ip);
+ dst[42] = ip + ia64_psr(pt)->ri;
+ dst[43] = pt->cr_ifs & 0x3fffffffff;
+ dst[44] = pt->cr_ipsr & IA64_PSR_UM;
+
+ unw_get_ar(info, UNW_AR_RSC, &dst[45]);
+ /*
+ * For bsp and bspstore, unw_get_ar() would return the kernel
+ * addresses, but we need the user-level addresses instead:
+ */
+ dst[46] = ar_bsp;
+ dst[47] = pt->ar_bspstore;
+ unw_get_ar(info, UNW_AR_RNAT, &dst[48]);
+ unw_get_ar(info, UNW_AR_CCV, &dst[49]);
+ unw_get_ar(info, UNW_AR_UNAT, &dst[50]);
+ unw_get_ar(info, UNW_AR_FPSR, &dst[51]);
+ dst[52] = pt->ar_pfs; /* UNW_AR_PFS is == to pt->cr_ifs for interrupt frames */
+ unw_get_ar(info, UNW_AR_LC, &dst[53]);
+ unw_get_ar(info, UNW_AR_EC, &dst[54]);
+}
+
+void
+do_dump_fpu (struct unw_frame_info *info, void *arg)
+{
+ struct task_struct *fpu_owner = ia64_get_fpu_owner();
+ elf_fpreg_t *dst = arg;
+ int i;
+
+ memset(dst, 0, sizeof(elf_fpregset_t)); /* don't leak any "random" bits */
+
+ if (unw_unwind_to_user(info) < 0)
+ return;
+
+ /* f0 is 0.0, f1 is 1.0 */
+
+ for (i = 2; i < 32; ++i)
+ unw_get_fr(info, i, dst + i);
+
+ if ((fpu_owner == current) || (current->thread.flags & IA64_THREAD_FPH_VALID)) {
+ ia64_sync_fph(current);
+ memcpy(dst + 32, current->thread.fph, 96*16);
+ }
+}
+
+#endif /* CONFIG_IA64_NEW_UNWIND */
+
void
ia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst)
{
+#ifdef CONFIG_IA64_NEW_UNWIND
+ unw_init_running(do_copy_regs, dst);
+#else
struct switch_stack *sw = ((struct switch_stack *) pt) - 1;
unsigned long ar_ec, cfm, ar_bsp, ndirty, *krbs, addr;
@@ -270,7 +433,7 @@ ia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst)
* ar.rsc ar.bsp ar.bspstore ar.rnat
* ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
*/
- memset(dst, 0, sizeof (dst)); /* don't leak any "random" bits */
+ memset(dst, 0, sizeof(dst)); /* don't leak any "random" bits */
/* r0 is zero */ dst[ 1] = pt->r1; dst[ 2] = pt->r2; dst[ 3] = pt->r3;
dst[ 4] = sw->r4; dst[ 5] = sw->r5; dst[ 6] = sw->r6; dst[ 7] = sw->r7;
@@ -285,17 +448,22 @@ ia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst)
dst[34] = pt->b0; dst[35] = sw->b1; dst[36] = sw->b2; dst[37] = sw->b3;
dst[38] = sw->b4; dst[39] = sw->b5; dst[40] = pt->b6; dst[41] = pt->b7;
- dst[42] = pt->cr_iip; dst[43] = pt->cr_ifs;
- dst[44] = pt->cr_ipsr; /* XXX perhaps we should filter out some bits here? --davidm */
+ dst[42] = pt->cr_iip + ia64_psr(pt)->ri;
+ dst[43] = pt->cr_ifs;
+ dst[44] = pt->cr_ipsr & IA64_PSR_UM;
dst[45] = pt->ar_rsc; dst[46] = ar_bsp; dst[47] = pt->ar_bspstore; dst[48] = pt->ar_rnat;
dst[49] = pt->ar_ccv; dst[50] = pt->ar_unat; dst[51] = sw->ar_fpsr; dst[52] = pt->ar_pfs;
dst[53] = sw->ar_lc; dst[54] = (sw->ar_pfs >> 52) & 0x3f;
+#endif /* !CONFIG_IA64_NEW_UNWIND */
}
int
dump_fpu (struct pt_regs *pt, elf_fpregset_t dst)
{
+#ifdef CONFIG_IA64_NEW_UNWIND
+ unw_init_running(do_dump_fpu, dst);
+#else
struct switch_stack *sw = ((struct switch_stack *) pt) - 1;
struct task_struct *fpu_owner = ia64_get_fpu_owner();
@@ -312,6 +480,7 @@ dump_fpu (struct pt_regs *pt, elf_fpregset_t dst)
}
memcpy(dst + 32, current->thread.fph, 96*16);
}
+#endif
return 1; /* f0-f31 are always valid so we always return 1 */
}
@@ -384,7 +553,7 @@ release_thread (struct task_struct *dead_task)
unsigned long
get_wchan (struct task_struct *p)
{
- struct ia64_frame_info info;
+ struct unw_frame_info info;
unsigned long ip;
int count = 0;
/*
@@ -403,11 +572,11 @@ get_wchan (struct task_struct *p)
* gracefully if the process wasn't really blocked after all.
* --davidm 99/12/15
*/
- ia64_unwind_init_from_blocked_task(&info, p);
+ unw_init_from_blocked_task(&info, p);
do {
- if (ia64_unwind_to_previous_frame(&info) < 0)
+ if (unw_unwind(&info) < 0)
return 0;
- ip = ia64_unwind_get_ip(&info);
+ unw_get_ip(&info, &ip);
if (ip < first_sched || ip >= last_sched)
return ip;
} while (count++ < 16);
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 22ed4f569..0efd42bb8 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -7,6 +7,7 @@
* Derived from the x86 and Alpha versions. Most of the code in here
* could actually be factored into a common set of routines.
*/
+#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
@@ -29,8 +30,74 @@
* id (instruction debug fault disable; one bit)
* dd (data debug fault disable; one bit)
* ri (restart instruction; two bits)
+ * is (instruction set; one bit)
*/
-#define CR_IPSR_CHANGE_MASK 0x06a00100003eUL
+#define IPSR_WRITE_MASK \
+ (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
+#define IPSR_READ_MASK IPSR_WRITE_MASK
+
+#ifdef CONFIG_IA64_NEW_UNWIND
+
+#define PTRACE_DEBUG 1
+
+#if PTRACE_DEBUG
+# define dprintk(format...) printk(format)
+# define inline
+#else
+# define dprintk(format...)
+#endif
+
+/*
+ * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT
+ * bitset where bit i is set iff the NaT bit of register i is set.
+ */
+unsigned long
+ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat)
+{
+# define GET_BITS(first, last, unat) \
+ ({ \
+ unsigned long bit = ia64_unat_pos(&pt->r##first); \
+ unsigned long mask = ((1UL << (last - first + 1)) - 1) << first; \
+ (ia64_rotl(unat, first) >> bit) & mask; \
+ })
+ unsigned long val;
+
+ val = GET_BITS( 1, 3, scratch_unat);
+ val |= GET_BITS(12, 15, scratch_unat);
+ val |= GET_BITS( 8, 11, scratch_unat);
+ val |= GET_BITS(16, 31, scratch_unat);
+ return val;
+
+# undef GET_BITS
+}
+
+/*
+ * Set the NaT bits for the scratch registers according to NAT and
+ * return the resulting unat (assuming the scratch registers are
+ * stored in PT).
+ */
+unsigned long
+ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat)
+{
+ unsigned long scratch_unat;
+
+# define PUT_BITS(first, last, nat) \
+ ({ \
+ unsigned long bit = ia64_unat_pos(&pt->r##first); \
+ unsigned long mask = ((1UL << (last - first + 1)) - 1) << bit; \
+ (ia64_rotr(nat, first) << bit) & mask; \
+ })
+ scratch_unat = PUT_BITS( 1, 3, nat);
+ scratch_unat |= PUT_BITS(12, 15, nat);
+ scratch_unat |= PUT_BITS( 8, 11, nat);
+ scratch_unat |= PUT_BITS(16, 31, nat);
+
+ return scratch_unat;
+
+# undef PUT_BITS
+}
+
+#else /* !CONFIG_IA64_NEW_UNWIND */
/*
* Collect the NaT bits for r1-r31 from sw->caller_unat and
@@ -79,28 +146,26 @@ ia64_put_nat_bits (struct pt_regs *pt, struct switch_stack *sw, unsigned long na
# undef PUT_BITS
}
-#define IA64_MLI_TEMPLATE 0x2
+#endif /* !CONFIG_IA64_NEW_UNWIND */
+
+#define IA64_MLX_TEMPLATE 0x2
#define IA64_MOVL_OPCODE 6
void
ia64_increment_ip (struct pt_regs *regs)
{
- unsigned long w0, w1, ri = ia64_psr(regs)->ri + 1;
+ unsigned long w0, ri = ia64_psr(regs)->ri + 1;
if (ri > 2) {
ri = 0;
regs->cr_iip += 16;
} else if (ri == 2) {
get_user(w0, (char *) regs->cr_iip + 0);
- get_user(w1, (char *) regs->cr_iip + 8);
- if (((w0 >> 1) & 0xf) == IA64_MLI_TEMPLATE && (w1 >> 60) == IA64_MOVL_OPCODE) {
+ if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
/*
- * rfi'ing to slot 2 of an MLI bundle causes
+ * rfi'ing to slot 2 of an MLX bundle causes
* an illegal operation fault. We don't want
- * that to happen... Note that we check the
- * opcode only. "movl" has a vc bit of 0, but
- * since a vc bit of 1 is currently reserved,
- * we might just as well treat it like a movl.
+ * that to happen...
*/
ri = 0;
regs->cr_iip += 16;
@@ -112,21 +177,17 @@ ia64_increment_ip (struct pt_regs *regs)
void
ia64_decrement_ip (struct pt_regs *regs)
{
- unsigned long w0, w1, ri = ia64_psr(regs)->ri - 1;
+ unsigned long w0, ri = ia64_psr(regs)->ri - 1;
if (ia64_psr(regs)->ri == 0) {
regs->cr_iip -= 16;
ri = 2;
get_user(w0, (char *) regs->cr_iip + 0);
- get_user(w1, (char *) regs->cr_iip + 8);
- if (((w0 >> 1) & 0xf) == IA64_MLI_TEMPLATE && (w1 >> 60) == IA64_MOVL_OPCODE) {
+ if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
/*
- * rfi'ing to slot 2 of an MLI bundle causes
+ * rfi'ing to slot 2 of an MLX bundle causes
* an illegal operation fault. We don't want
- * that to happen... Note that we check the
- * opcode only. "movl" has a vc bit of 0, but
- * since a vc bit of 1 is currently reserved,
- * we might just as well treat it like a movl.
+ * that to happen...
*/
ri = 1;
}
@@ -291,7 +352,11 @@ ia64_peek (struct pt_regs *regs, struct task_struct *child, unsigned long addr,
laddr = (unsigned long *) addr;
child_regs = ia64_task_regs(child);
+#ifdef CONFIG_IA64_NEW_UNWIND
+ child_stack = (struct switch_stack *) (child->thread.ksp + 16);
+#else
child_stack = (struct switch_stack *) child_regs - 1;
+#endif
bspstore = (unsigned long *) child_regs->ar_bspstore;
krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
krbs_num_regs = ia64_rse_num_regs(krbs, (unsigned long *) child_stack->ar_bspstore);
@@ -335,7 +400,11 @@ ia64_poke (struct pt_regs *regs, struct task_struct *child, unsigned long addr,
laddr = (unsigned long *) addr;
child_regs = ia64_task_regs(child);
+#ifdef CONFIG_IA64_NEW_UNWIND
+ child_stack = (struct switch_stack *) (child->thread.ksp + 16);
+#else
child_stack = (struct switch_stack *) child_regs - 1;
+#endif
bspstore = (unsigned long *) child_regs->ar_bspstore;
krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
krbs_num_regs = ia64_rse_num_regs(krbs, (unsigned long *) child_stack->ar_bspstore);
@@ -394,21 +463,43 @@ sync_kernel_register_backing_store (struct task_struct *child,
long new_bsp,
int force_loadrs_to_zero)
{
- unsigned long *krbs, bspstore, bsp, krbs_num_regs, rbs_end, addr, val;
- long ndirty, ret;
- struct pt_regs *child_regs;
+ unsigned long *krbs, bspstore, *kbspstore, bsp, rbs_end, addr, val;
+ long ndirty, ret = 0;
+ struct pt_regs *child_regs = ia64_task_regs(child);
+
+#ifdef CONFIG_IA64_NEW_UNWIND
+ struct unw_frame_info info;
+ unsigned long cfm, sof;
+
+ unw_init_from_blocked_task(&info, child);
+ if (unw_unwind_to_user(&info) < 0)
+ return -1;
+
+ unw_get_bsp(&info, (unsigned long *) &kbspstore);
+
+ krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
+ ndirty = ia64_rse_num_regs(krbs, krbs + (child_regs->loadrs >> 19));
+ bspstore = child_regs->ar_bspstore;
+ bsp = (long) ia64_rse_skip_regs((long *)bspstore, ndirty);
+
+ cfm = child_regs->cr_ifs;
+ if (!(cfm & (1UL << 63)))
+ unw_get_cfm(&info, &cfm);
+ sof = (cfm & 0x7f);
+ rbs_end = (long) ia64_rse_skip_regs((long *)bspstore, sof);
+#else
struct switch_stack *child_stack;
+ unsigned long krbs_num_regs;
- ret = 0;
- child_regs = ia64_task_regs(child);
child_stack = (struct switch_stack *) child_regs - 1;
-
+ kbspstore = (unsigned long *) child_stack->ar_bspstore;
krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
ndirty = ia64_rse_num_regs(krbs, krbs + (child_regs->loadrs >> 19));
bspstore = child_regs->ar_bspstore;
bsp = (long) ia64_rse_skip_regs((long *)bspstore, ndirty);
- krbs_num_regs = ia64_rse_num_regs(krbs, (unsigned long *) child_stack->ar_bspstore);
+ krbs_num_regs = ia64_rse_num_regs(krbs, kbspstore);
rbs_end = (long) ia64_rse_skip_regs((long *)bspstore, krbs_num_regs);
+#endif
/* Return early if nothing to do */
if (bsp == new_bsp)
@@ -437,13 +528,15 @@ sync_kernel_register_backing_store (struct task_struct *child,
}
static void
-sync_thread_rbs (struct task_struct *child, int make_writable)
+sync_thread_rbs (struct task_struct *child, struct mm_struct *mm, int make_writable)
{
struct task_struct *p;
read_lock(&tasklist_lock);
- for_each_task(p) {
- if (p->mm == child->mm && p->state != TASK_RUNNING)
- sync_kernel_register_backing_store(p, 0, make_writable);
+ {
+ for_each_task(p) {
+ if (p->mm == mm && p->state != TASK_RUNNING)
+ sync_kernel_register_backing_store(p, 0, make_writable);
+ }
}
read_unlock(&tasklist_lock);
child->thread.flags |= IA64_THREAD_KRBS_SYNCED;
@@ -452,10 +545,11 @@ sync_thread_rbs (struct task_struct *child, int make_writable)
/*
* Ensure the state in child->thread.fph is up-to-date.
*/
-static void
-sync_fph (struct task_struct *child)
+void
+ia64_sync_fph (struct task_struct *child)
{
if (ia64_psr(ia64_task_regs(child))->mfh && ia64_get_fpu_owner() == child) {
+ ia64_set_fpu_owner(0);
ia64_save_fpu(&child->thread.fph[0]);
child->thread.flags |= IA64_THREAD_FPH_VALID;
}
@@ -465,15 +559,383 @@ sync_fph (struct task_struct *child)
}
}
+#ifdef CONFIG_IA64_NEW_UNWIND
+
+#include <asm/unwind.h>
+
+static int
+access_fr (struct unw_frame_info *info, int regnum, int hi, unsigned long *data, int write_access)
+{
+ struct ia64_fpreg fpval;
+ int ret;
+
+ ret = unw_get_fr(info, regnum, &fpval);
+ if (ret < 0)
+ return ret;
+
+ if (write_access) {
+ fpval.u.bits[hi] = *data;
+ ret = unw_set_fr(info, regnum, fpval);
+ } else
+ *data = fpval.u.bits[hi];
+ return ret;
+}
+
+static int
+access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data, int write_access)
+{
+ unsigned long *ptr, *rbs, *bspstore, ndirty, regnum;
+ struct switch_stack *sw;
+ struct unw_frame_info info;
+ struct pt_regs *pt;
+
+ pt = ia64_task_regs(child);
+ sw = (struct switch_stack *) (child->thread.ksp + 16);
+
+ if ((addr & 0x7) != 0) {
+ dprintk("ptrace: unaligned register address 0x%lx\n", addr);
+ return -1;
+ }
+
+ if (addr < PT_F127 + 16) {
+ /* accessing fph */
+ ia64_sync_fph(child);
+ ptr = (unsigned long *) ((unsigned long) &child->thread.fph + addr);
+ } else if (addr >= PT_F10 && addr < PT_F15 + 16) {
+ /* scratch registers untouched by kernel (saved in switch_stack) */
+ ptr = (unsigned long *) ((long) sw + addr - PT_NAT_BITS);
+ } else if (addr < PT_AR_LC + 8) {
+ /* preserved state: */
+ unsigned long nat_bits, scratch_unat, dummy = 0;
+ struct unw_frame_info info;
+ char nat = 0;
+ int ret;
+
+ unw_init_from_blocked_task(&info, child);
+ if (unw_unwind_to_user(&info) < 0)
+ return -1;
+
+ switch (addr) {
+ case PT_NAT_BITS:
+ if (write_access) {
+ nat_bits = *data;
+ scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits);
+ if (unw_set_ar(&info, UNW_AR_UNAT, scratch_unat) < 0) {
+ dprintk("ptrace: failed to set ar.unat\n");
+ return -1;
+ }
+ for (regnum = 4; regnum <= 7; ++regnum) {
+ unw_get_gr(&info, regnum, &dummy, &nat);
+ unw_set_gr(&info, regnum, dummy, (nat_bits >> regnum) & 1);
+ }
+ } else {
+ if (unw_get_ar(&info, UNW_AR_UNAT, &scratch_unat) < 0) {
+ dprintk("ptrace: failed to read ar.unat\n");
+ return -1;
+ }
+ nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat);
+ for (regnum = 4; regnum <= 7; ++regnum) {
+ unw_get_gr(&info, regnum, &dummy, &nat);
+ nat_bits |= (nat != 0) << regnum;
+ }
+ *data = nat_bits;
+ }
+ return 0;
+
+ case PT_R4: case PT_R5: case PT_R6: case PT_R7:
+ if (write_access) {
+ /* read NaT bit first: */
+ ret = unw_get_gr(&info, (addr - PT_R4)/8 + 4, data, &nat);
+ if (ret < 0)
+ return ret;
+ }
+ return unw_access_gr(&info, (addr - PT_R4)/8 + 4, data, &nat,
+ write_access);
+
+ case PT_B1: case PT_B2: case PT_B3: case PT_B4: case PT_B5:
+ return unw_access_br(&info, (addr - PT_B1)/8 + 1, data, write_access);
+
+ case PT_AR_LC:
+ return unw_access_ar(&info, UNW_AR_LC, data, write_access);
+
+ default:
+ if (addr >= PT_F2 && addr < PT_F5 + 16)
+ return access_fr(&info, (addr - PT_F2)/16 + 2, (addr & 8) != 0,
+ data, write_access);
+ else if (addr >= PT_F16 && addr < PT_F31 + 16)
+ return access_fr(&info, (addr - PT_F16)/16 + 16, (addr & 8) != 0,
+ data, write_access);
+ else {
+ dprintk("ptrace: rejecting access to register address 0x%lx\n",
+ addr);
+ return -1;
+ }
+ }
+ } else if (addr < PT_F9+16) {
+ /* scratch state */
+ switch (addr) {
+ case PT_AR_BSP:
+ if (write_access)
+ /* FIXME? Account for lack of ``cover'' in the syscall case */
+ return sync_kernel_register_backing_store(child, *data, 1);
+ else {
+ rbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
+ bspstore = (unsigned long *) pt->ar_bspstore;
+ ndirty = ia64_rse_num_regs(rbs, rbs + (pt->loadrs >> 19));
+
+ /*
+ * If we're in a system call, no ``cover'' was done. So to
+ * make things uniform, we'll add the appropriate displacement
+ * onto bsp if we're in a system call.
+ */
+ if (!(pt->cr_ifs & (1UL << 63))) {
+ struct unw_frame_info info;
+ unsigned long cfm;
+
+ unw_init_from_blocked_task(&info, child);
+ if (unw_unwind_to_user(&info) < 0)
+ return -1;
+
+ unw_get_cfm(&info, &cfm);
+ ndirty += cfm & 0x7f;
+ }
+ *data = (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
+ return 0;
+ }
+
+ case PT_CFM:
+ if (pt->cr_ifs & (1UL << 63)) {
+ if (write_access)
+ pt->cr_ifs = ((pt->cr_ifs & ~0x3fffffffffUL)
+ | (*data & 0x3fffffffffUL));
+ else
+ *data = pt->cr_ifs & 0x3fffffffffUL;
+ } else {
+ /* kernel was entered through a system call */
+ unsigned long cfm;
+
+ unw_init_from_blocked_task(&info, child);
+ if (unw_unwind_to_user(&info) < 0)
+ return -1;
+
+ unw_get_cfm(&info, &cfm);
+ if (write_access)
+ unw_set_cfm(&info, ((cfm & ~0x3fffffffffU)
+ | (*data & 0x3fffffffffUL)));
+ else
+ *data = cfm;
+ }
+ return 0;
+
+ case PT_CR_IPSR:
+ if (write_access)
+ pt->cr_ipsr = ((*data & IPSR_WRITE_MASK)
+ | (pt->cr_ipsr & ~IPSR_WRITE_MASK));
+ else
+ *data = (pt->cr_ipsr & IPSR_READ_MASK);
+ return 0;
+
+ case PT_R1: case PT_R2: case PT_R3:
+ case PT_R8: case PT_R9: case PT_R10: case PT_R11:
+ case PT_R12: case PT_R13: case PT_R14: case PT_R15:
+ case PT_R16: case PT_R17: case PT_R18: case PT_R19:
+ case PT_R20: case PT_R21: case PT_R22: case PT_R23:
+ case PT_R24: case PT_R25: case PT_R26: case PT_R27:
+ case PT_R28: case PT_R29: case PT_R30: case PT_R31:
+ case PT_B0: case PT_B6: case PT_B7:
+ case PT_F6: case PT_F6+8: case PT_F7: case PT_F7+8:
+ case PT_F8: case PT_F8+8: case PT_F9: case PT_F9+8:
+ case PT_AR_BSPSTORE:
+ case PT_AR_RSC: case PT_AR_UNAT: case PT_AR_PFS: case PT_AR_RNAT:
+ case PT_AR_CCV: case PT_AR_FPSR: case PT_CR_IIP: case PT_PR:
+ /* scratch register */
+ ptr = (unsigned long *) ((long) pt + addr - PT_CR_IPSR);
+ break;
+
+ default:
+ /* disallow accessing anything else... */
+ dprintk("ptrace: rejecting access to register address 0x%lx\n",
+ addr);
+ return -1;
+ }
+ } else {
+ /* access debug registers */
+
+ if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
+ child->thread.flags |= IA64_THREAD_DBG_VALID;
+ memset(child->thread.dbr, 0, sizeof(child->thread.dbr));
+ memset(child->thread.ibr, 0, sizeof( child->thread.ibr));
+ }
+ if (addr >= PT_IBR) {
+ regnum = (addr - PT_IBR) >> 3;
+ ptr = &child->thread.ibr[0];
+ } else {
+ regnum = (addr - PT_DBR) >> 3;
+ ptr = &child->thread.dbr[0];
+ }
+
+ if (regnum >= 8) {
+ dprintk("ptrace: rejecting access to register address 0x%lx\n", addr);
+ return -1;
+ }
+
+ ptr += regnum;
+ }
+ if (write_access)
+ *ptr = *data;
+ else
+ *data = *ptr;
+ return 0;
+}
+
+#else /* !CONFIG_IA64_NEW_UNWIND */
+
+static int
+access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data, int write_access)
+{
+ unsigned long *ptr, *rbs, *bspstore, ndirty, regnum;
+ struct switch_stack *sw;
+ struct pt_regs *pt;
+
+ if ((addr & 0x7) != 0)
+ return -1;
+
+ if (addr < PT_F127+16) {
+ /* accessing fph */
+ ia64_sync_fph(child);
+ ptr = (unsigned long *) ((unsigned long) &child->thread.fph + addr);
+ } else if (addr < PT_F9+16) {
+ /* accessing switch_stack or pt_regs: */
+ pt = ia64_task_regs(child);
+ sw = (struct switch_stack *) pt - 1;
+
+ switch (addr) {
+ case PT_NAT_BITS:
+ if (write_access)
+ ia64_put_nat_bits(pt, sw, *data);
+ else
+ *data = ia64_get_nat_bits(pt, sw);
+ return 0;
+
+ case PT_AR_BSP:
+ if (write_access)
+ /* FIXME? Account for lack of ``cover'' in the syscall case */
+ return sync_kernel_register_backing_store(child, *data, 1);
+ else {
+ rbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
+ bspstore = (unsigned long *) pt->ar_bspstore;
+ ndirty = ia64_rse_num_regs(rbs, rbs + (pt->loadrs >> 19));
+
+ /*
+ * If we're in a system call, no ``cover'' was done. So to
+ * make things uniform, we'll add the appropriate displacement
+ * onto bsp if we're in a system call.
+ */
+ if (!(pt->cr_ifs & (1UL << 63)))
+ ndirty += sw->ar_pfs & 0x7f;
+ *data = (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
+ return 0;
+ }
+
+ case PT_CFM:
+ if (write_access) {
+ if (pt->cr_ifs & (1UL << 63))
+ pt->cr_ifs = ((pt->cr_ifs & ~0x3fffffffffUL)
+ | (*data & 0x3fffffffffUL));
+ else
+ sw->ar_pfs = ((sw->ar_pfs & ~0x3fffffffffUL)
+ | (*data & 0x3fffffffffUL));
+ return 0;
+ } else {
+ if ((pt->cr_ifs & (1UL << 63)) == 0)
+ *data = sw->ar_pfs;
+ else
+ /* return only the CFM */
+ *data = pt->cr_ifs & 0x3fffffffffUL;
+ return 0;
+ }
+
+ case PT_CR_IPSR:
+ if (write_access)
+ pt->cr_ipsr = ((*data & IPSR_WRITE_MASK)
+ | (pt->cr_ipsr & ~IPSR_WRITE_MASK));
+ else
+ *data = (pt->cr_ipsr & IPSR_READ_MASK);
+ return 0;
+
+ case PT_R1: case PT_R2: case PT_R3:
+ case PT_R4: case PT_R5: case PT_R6: case PT_R7:
+ case PT_R8: case PT_R9: case PT_R10: case PT_R11:
+ case PT_R12: case PT_R13: case PT_R14: case PT_R15:
+ case PT_R16: case PT_R17: case PT_R18: case PT_R19:
+ case PT_R20: case PT_R21: case PT_R22: case PT_R23:
+ case PT_R24: case PT_R25: case PT_R26: case PT_R27:
+ case PT_R28: case PT_R29: case PT_R30: case PT_R31:
+ case PT_B0: case PT_B1: case PT_B2: case PT_B3:
+ case PT_B4: case PT_B5: case PT_B6: case PT_B7:
+ case PT_F2: case PT_F2+8: case PT_F3: case PT_F3+8:
+ case PT_F4: case PT_F4+8: case PT_F5: case PT_F5+8:
+ case PT_F6: case PT_F6+8: case PT_F7: case PT_F7+8:
+ case PT_F8: case PT_F8+8: case PT_F9: case PT_F9+8:
+ case PT_F10: case PT_F10+8: case PT_F11: case PT_F11+8:
+ case PT_F12: case PT_F12+8: case PT_F13: case PT_F13+8:
+ case PT_F14: case PT_F14+8: case PT_F15: case PT_F15+8:
+ case PT_F16: case PT_F16+8: case PT_F17: case PT_F17+8:
+ case PT_F18: case PT_F18+8: case PT_F19: case PT_F19+8:
+ case PT_F20: case PT_F20+8: case PT_F21: case PT_F21+8:
+ case PT_F22: case PT_F22+8: case PT_F23: case PT_F23+8:
+ case PT_F24: case PT_F24+8: case PT_F25: case PT_F25+8:
+ case PT_F26: case PT_F26+8: case PT_F27: case PT_F27+8:
+ case PT_F28: case PT_F28+8: case PT_F29: case PT_F29+8:
+ case PT_F30: case PT_F30+8: case PT_F31: case PT_F31+8:
+ case PT_AR_BSPSTORE:
+ case PT_AR_RSC: case PT_AR_UNAT: case PT_AR_PFS: case PT_AR_RNAT:
+ case PT_AR_CCV: case PT_AR_FPSR: case PT_CR_IIP: case PT_PR:
+ case PT_AR_LC:
+ ptr = (unsigned long *) ((long) sw + addr - PT_NAT_BITS);
+ break;
+
+ default:
+ /* disallow accessing anything else... */
+ return -1;
+ }
+ } else {
+ /* access debug registers */
+
+ if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
+ child->thread.flags |= IA64_THREAD_DBG_VALID;
+ memset(child->thread.dbr, 0, sizeof child->thread.dbr);
+ memset(child->thread.ibr, 0, sizeof child->thread.ibr);
+ }
+ if (addr >= PT_IBR) {
+ regnum = (addr - PT_IBR) >> 3;
+ ptr = &child->thread.ibr[0];
+ } else {
+ regnum = (addr - PT_DBR) >> 3;
+ ptr = &child->thread.dbr[0];
+ }
+
+ if (regnum >= 8)
+ return -1;
+
+ ptr += regnum;
+ }
+ if (write_access)
+ *ptr = *data;
+ else
+ *data = *ptr;
+ return 0;
+}
+
+#endif /* !CONFIG_IA64_NEW_UNWIND */
+
asmlinkage long
sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
long arg4, long arg5, long arg6, long arg7, long stack)
{
struct pt_regs *regs = (struct pt_regs *) &stack;
- struct switch_stack *child_stack;
- struct pt_regs *child_regs;
struct task_struct *child;
- unsigned long flags, regnum, *base;
+ unsigned long flags;
long ret;
lock_kernel();
@@ -489,17 +951,21 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
ret = -ESRCH;
read_lock(&tasklist_lock);
- child = find_task_by_pid(pid);
+ {
+ child = find_task_by_pid(pid);
+ if (child)
+ get_task_struct(child);
+ }
read_unlock(&tasklist_lock);
if (!child)
goto out;
ret = -EPERM;
if (pid == 1) /* no messing around with init! */
- goto out;
+ goto out_tsk;
if (request == PTRACE_ATTACH) {
if (child == current)
- goto out;
+ goto out_tsk;
if ((!child->dumpable ||
(current->uid != child->euid) ||
(current->uid != child->suid) ||
@@ -508,10 +974,10 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
(current->gid != child->sgid) ||
(!cap_issubset(child->cap_permitted, current->cap_permitted)) ||
(current->gid != child->gid)) && !capable(CAP_SYS_PTRACE))
- goto out;
+ goto out_tsk;
/* the same process cannot be attached many times */
if (child->flags & PF_PTRACED)
- goto out;
+ goto out_tsk;
child->flags |= PF_PTRACED;
if (child->p_pptr != current) {
unsigned long flags;
@@ -524,199 +990,98 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
}
send_sig(SIGSTOP, child, 1);
ret = 0;
- goto out;
+ goto out_tsk;
}
ret = -ESRCH;
if (!(child->flags & PF_PTRACED))
- goto out;
+ goto out_tsk;
if (child->state != TASK_STOPPED) {
if (request != PTRACE_KILL)
- goto out;
+ goto out_tsk;
}
if (child->p_pptr != current)
- goto out;
+ goto out_tsk;
switch (request) {
case PTRACE_PEEKTEXT:
case PTRACE_PEEKDATA: /* read word at location addr */
- if (!(child->thread.flags & IA64_THREAD_KRBS_SYNCED)
- && atomic_read(&child->mm->mm_users) > 1)
- sync_thread_rbs(child, 0);
+ if (!(child->thread.flags & IA64_THREAD_KRBS_SYNCED)) {
+ struct mm_struct *mm;
+ long do_sync;
+
+ task_lock(child);
+ {
+ mm = child->mm;
+ do_sync = mm && (atomic_read(&mm->mm_users) > 1);
+ }
+ task_unlock(child);
+ if (do_sync)
+ sync_thread_rbs(child, mm, 0);
+ }
ret = ia64_peek(regs, child, addr, &data);
if (ret == 0) {
ret = data;
regs->r8 = 0; /* ensure "ret" is not mistaken as an error code */
}
- goto out;
+ goto out_tsk;
case PTRACE_POKETEXT:
case PTRACE_POKEDATA: /* write the word at location addr */
- if (!(child->thread.flags & IA64_THREAD_KRBS_SYNCED)
- && atomic_read(&child->mm->mm_users) > 1)
- sync_thread_rbs(child, 1);
+ if (!(child->thread.flags & IA64_THREAD_KRBS_SYNCED)) {
+ struct mm_struct *mm;
+ long do_sync;
+
+ task_lock(child);
+ {
+ mm = child->mm;
+ do_sync = mm && (atomic_read(&child->mm->mm_users) > 1);
+ }
+ task_unlock(child);
+ if (do_sync)
+ sync_thread_rbs(child, mm, 1);
+ }
ret = ia64_poke(regs, child, addr, data);
- goto out;
+ goto out_tsk;
case PTRACE_PEEKUSR: /* read the word at addr in the USER area */
- ret = -EIO;
- if ((addr & 0x7) != 0)
- goto out;
-
- if (addr < PT_CALLER_UNAT) {
- /* accessing fph */
- sync_fph(child);
- addr += (unsigned long) &child->thread.fph;
- ret = *(unsigned long *) addr;
- } else if (addr < PT_F9+16) {
- /* accessing switch_stack or pt_regs: */
- child_regs = ia64_task_regs(child);
- child_stack = (struct switch_stack *) child_regs - 1;
- ret = *(unsigned long *) ((long) child_stack + addr - PT_CALLER_UNAT);
-
- if (addr == PT_AR_BSP) {
- /* ret currently contains pt_regs.loadrs */
- unsigned long *rbs, *bspstore, ndirty;
-
- rbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
- bspstore = (unsigned long *) child_regs->ar_bspstore;
- ndirty = ia64_rse_num_regs(rbs, rbs + (ret >> 19));
- ret = (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
-
- /*
- * If we're in a system call, no ``cover'' was done. So
- * to make things uniform, we'll add the appropriate
- * displacement onto bsp if we're in a system call.
- *
- * Note: It may be better to leave the system call case
- * alone and subtract the amount of the cover for the
- * non-syscall case. That way the reported bsp value
- * would actually be the correct bsp for the child
- * process.
- */
- if (!(child_regs->cr_ifs & (1UL << 63))) {
- ret = (unsigned long)
- ia64_rse_skip_regs((unsigned long *) ret,
- child_stack->ar_pfs & 0x7f);
- }
- } else if (addr == PT_CFM) {
- /* ret currently contains pt_regs.cr_ifs */
- if ((ret & (1UL << 63)) == 0)
- ret = child_stack->ar_pfs;
- ret &= 0x3fffffffffUL; /* return only the CFM */
- }
- } else {
- if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
- child->thread.flags |= IA64_THREAD_DBG_VALID;
- memset(child->thread.dbr, 0, sizeof child->thread.dbr);
- memset(child->thread.ibr, 0, sizeof child->thread.ibr);
- }
- if (addr >= PT_IBR) {
- regnum = (addr - PT_IBR) >> 3;
- base = &child->thread.ibr[0];
- } else {
- regnum = (addr - PT_DBR) >> 3;
- base = &child->thread.dbr[0];
- }
- if (regnum >= 8)
- goto out;
- ret = base[regnum];
+ if (access_uarea(child, addr, &data, 0) < 0) {
+ ret = -EIO;
+ goto out_tsk;
}
+ ret = data;
regs->r8 = 0; /* ensure "ret" is not mistaken as an error code */
- goto out;
+ goto out_tsk;
case PTRACE_POKEUSR: /* write the word at addr in the USER area */
- ret = -EIO;
- if ((addr & 0x7) != 0)
- goto out;
-
- if (addr < PT_CALLER_UNAT) {
- /* accessing fph */
- sync_fph(child);
- addr += (unsigned long) &child->thread.fph;
- *(unsigned long *) addr = data;
- } else if (addr == PT_AR_BSPSTORE || addr == PT_CALLER_UNAT
- || addr == PT_KERNEL_FPSR || addr == PT_K_B0 || addr == PT_K_AR_PFS
- || (PT_K_AR_UNAT <= addr && addr <= PT_K_PR)) {
- /*
- * Don't permit changes to certain registers.
- *
- * We don't allow bspstore to be modified because doing
- * so would mess up any modifications to bsp. (See
- * sync_kernel_register_backing_store for the details.)
- */
- goto out;
- } else if (addr == PT_AR_BSP) {
- /* FIXME? Account for lack of ``cover'' in the syscall case */
- ret = sync_kernel_register_backing_store(child, data, 1);
- goto out;
- } else if (addr == PT_CFM) {
- child_regs = ia64_task_regs(child);
- child_stack = (struct switch_stack *) child_regs - 1;
-
- if (child_regs->cr_ifs & (1UL << 63)) {
- child_regs->cr_ifs = (child_regs->cr_ifs & ~0x3fffffffffUL)
- | (data & 0x3fffffffffUL);
- } else {
- child_stack->ar_pfs = (child_stack->ar_pfs & ~0x3fffffffffUL)
- | (data & 0x3fffffffffUL);
- }
- } else if (addr < PT_F9+16) {
- /* accessing switch_stack or pt_regs */
- child_regs = ia64_task_regs(child);
- child_stack = (struct switch_stack *) child_regs - 1;
-
- if (addr == PT_CR_IPSR)
- data = (data & CR_IPSR_CHANGE_MASK)
- | (child_regs->cr_ipsr & ~CR_IPSR_CHANGE_MASK);
-
- *(unsigned long *) ((long) child_stack + addr - PT_CALLER_UNAT) = data;
- } else {
- if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
- child->thread.flags |= IA64_THREAD_DBG_VALID;
- memset(child->thread.dbr, 0, sizeof child->thread.dbr);
- memset(child->thread.ibr, 0, sizeof child->thread.ibr);
- }
-
- if (addr >= PT_IBR) {
- regnum = (addr - PT_IBR) >> 3;
- base = &child->thread.ibr[0];
- } else {
- regnum = (addr - PT_DBR) >> 3;
- base = &child->thread.dbr[0];
- }
- if (regnum >= 8)
- goto out;
- if (regnum & 1) {
- /* force breakpoint to be effective only for user-level: */
- data &= ~(0x7UL << 56);
- }
- base[regnum] = data;
+ if (access_uarea(child, addr, &data, 1) < 0) {
+ ret = -EIO;
+ goto out_tsk;
}
ret = 0;
- goto out;
+ goto out_tsk;
case PTRACE_GETSIGINFO:
ret = -EIO;
if (!access_ok(VERIFY_WRITE, data, sizeof (siginfo_t))
|| child->thread.siginfo == 0)
- goto out;
+ goto out_tsk;
copy_to_user((siginfo_t *) data, child->thread.siginfo, sizeof (siginfo_t));
ret = 0;
- goto out;
+ goto out_tsk;
break;
case PTRACE_SETSIGINFO:
ret = -EIO;
if (!access_ok(VERIFY_READ, data, sizeof (siginfo_t))
|| child->thread.siginfo == 0)
- goto out;
+ goto out_tsk;
copy_from_user(child->thread.siginfo, (siginfo_t *) data, sizeof (siginfo_t));
ret = 0;
- goto out;
+ goto out_tsk;
case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
case PTRACE_CONT: /* restart after signal. */
ret = -EIO;
if (data > _NSIG)
- goto out;
+ goto out_tsk;
if (request == PTRACE_SYSCALL)
child->flags |= PF_TRACESYS;
else
@@ -732,7 +1097,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
wake_up_process(child);
ret = 0;
- goto out;
+ goto out_tsk;
case PTRACE_KILL:
/*
@@ -741,7 +1106,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
* that it wants to exit.
*/
if (child->state == TASK_ZOMBIE) /* already dead */
- goto out;
+ goto out_tsk;
child->exit_code = SIGKILL;
/* make sure the single step/take-branch tra bits are not set: */
@@ -753,13 +1118,13 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
wake_up_process(child);
ret = 0;
- goto out;
+ goto out_tsk;
case PTRACE_SINGLESTEP: /* let child execute for one instruction */
case PTRACE_SINGLEBLOCK:
ret = -EIO;
if (data > _NSIG)
- goto out;
+ goto out_tsk;
child->flags &= ~PF_TRACESYS;
if (request == PTRACE_SINGLESTEP) {
@@ -775,12 +1140,12 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
/* give it a chance to run. */
wake_up_process(child);
ret = 0;
- goto out;
+ goto out_tsk;
case PTRACE_DETACH: /* detach a process that was attached. */
ret = -EIO;
if (data > _NSIG)
- goto out;
+ goto out_tsk;
child->flags &= ~(PF_PTRACED|PF_TRACESYS);
child->exit_code = data;
@@ -799,12 +1164,14 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
wake_up_process(child);
ret = 0;
- goto out;
+ goto out_tsk;
default:
ret = -EIO;
- goto out;
+ goto out_tsk;
}
+ out_tsk:
+ free_task_struct(child);
out:
unlock_kernel();
return ret;
diff --git a/arch/ia64/kernel/sal_stub.S b/arch/ia64/kernel/sal_stub.S
deleted file mode 100644
index d73851810..000000000
--- a/arch/ia64/kernel/sal_stub.S
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Copyright (C) 1998-2000 Hewlett-Packard Co
- * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
- */
-#ifndef __GCC_MULTIREG_RETVALS__
- /*
- * gcc currently does not conform to the ia-64 calling
- * convention as far as returning function values are
- * concerned. Instead of returning values up to 32 bytes in
- * size in r8-r11, gcc returns any value bigger than a
- * doubleword via a structure that's allocated by the caller
- * and whose address is passed into the function. Since
- * SAL_PROC returns values according to the calling
- * convention, this stub takes care of copying r8-r11 to the
- * place where gcc expects them.
- */
- .text
- .psr abi64
- .psr lsb
- .lsb
-
- .align 16
- .global ia64_sal_stub
-ia64_sal_stub:
- /*
- * Sheesh, the Cygnus backend passes the pointer to a return value structure in
- * in0 whereas the HP backend passes it in r8. Don't you hate those little
- * differences...
- */
-#ifdef GCC_RETVAL_POINTER_IN_R8
- adds r2=-24,sp
- adds sp=-48,sp
- mov r14=rp
- ;;
- st8 [r2]=r8,8 // save pointer to return value
- addl r3=@ltoff(ia64_sal),gp
- ;;
- ld8 r3=[r3]
- st8 [r2]=gp,8 // save global pointer
- ;;
- ld8 r3=[r3] // fetch the value of ia64_sal
- st8 [r2]=r14 // save return pointer
- ;;
- ld8 r2=[r3],8 // load function's entry point
- ;;
- ld8 gp=[r3] // load function's global pointer
- ;;
- mov b6=r2
- br.call.sptk.few rp=b6
-.ret0: adds r2=24,sp
- ;;
- ld8 r3=[r2],8 // restore pointer to return value
- ;;
- ld8 gp=[r2],8 // restore global pointer
- st8 [r3]=r8,8
- ;;
- ld8 r14=[r2] // restore return pointer
- st8 [r3]=r9,8
- ;;
- mov rp=r14
- st8 [r3]=r10,8
- ;;
- st8 [r3]=r11,8
- adds sp=48,sp
- br.sptk.few rp
-#else
- /*
- * On input:
- * in0 = pointer to return value structure
- * in1 = index of SAL function to call
- * in2..inN = remaining args to SAL call
- */
- /*
- * We allocate one input and eight output register such that the br.call instruction
- * will rename in1-in7 to in0-in6---exactly what we want because SAL doesn't want to
- * see the pointer to the return value structure.
- */
- alloc r15=ar.pfs,1,0,8,0
-
- adds r2=-24,sp
- adds sp=-48,sp
- mov r14=rp
- ;;
- st8 [r2]=r15,8 // save ar.pfs
- addl r3=@ltoff(ia64_sal),gp
- ;;
- ld8 r3=[r3] // get address of ia64_sal
- st8 [r2]=gp,8 // save global pointer
- ;;
- ld8 r3=[r3] // get value of ia64_sal
- st8 [r2]=r14,8 // save return address (rp)
- ;;
- ld8 r2=[r3],8 // load function's entry point
- ;;
- ld8 gp=[r3] // load function's global pointer
- mov b6=r2
- br.call.sptk.few rp=b6 // make SAL call
-.ret0: adds r2=24,sp
- ;;
- ld8 r15=[r2],8 // restore ar.pfs
- ;;
- ld8 gp=[r2],8 // restore global pointer
- st8 [in0]=r8,8 // store 1. dword of return value
- ;;
- ld8 r14=[r2] // restore return address (rp)
- st8 [in0]=r9,8 // store 2. dword of return value
- ;;
- mov rp=r14
- st8 [in0]=r10,8 // store 3. dword of return value
- ;;
- st8 [in0]=r11,8
- adds sp=48,sp // pop stack frame
- mov ar.pfs=r15
- br.ret.sptk.few rp
-#endif
-
- .endp ia64_sal_stub
-#endif /* __GCC_MULTIREG_RETVALS__ */
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 80838f990..09850fdd8 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -28,6 +28,7 @@
#include <linux/console.h>
#include <asm/acpi-ext.h>
+#include <asm/ia32.h>
#include <asm/page.h>
#include <asm/machvec.h>
#include <asm/processor.h>
@@ -36,6 +37,10 @@
#include <asm/efi.h>
#include <asm/mca.h>
+#ifdef CONFIG_BLK_DEV_RAM
+# include <linux/blk.h>
+#endif
+
extern char _end;
/* cpu_data[bootstrap_processor] is data for the bootstrap processor: */
@@ -108,6 +113,8 @@ setup_arch (char **cmdline_p)
{
unsigned long max_pfn, bootmap_start, bootmap_size;
+ unw_init();
+
/*
* The secondary bootstrap loader passes us the boot
* parameters at the beginning of the ZERO_PAGE, so let's
@@ -125,11 +132,22 @@ setup_arch (char **cmdline_p)
* change APIs, they'd do things for the better. Grumble...
*/
bootmap_start = PAGE_ALIGN(__pa(&_end));
+ if (ia64_boot_param.initrd_size)
+ bootmap_start = PAGE_ALIGN(bootmap_start + ia64_boot_param.initrd_size);
bootmap_size = init_bootmem(bootmap_start >> PAGE_SHIFT, max_pfn);
efi_memmap_walk(free_available_memory, 0);
reserve_bootmem(bootmap_start, bootmap_size);
+#ifdef CONFIG_BLK_DEV_INITRD
+ initrd_start = ia64_boot_param.initrd_start;
+ if (initrd_start) {
+ initrd_end = initrd_start+ia64_boot_param.initrd_size;
+ printk("Initial ramdisk at: 0x%p (%lu bytes)\n",
+ (void *) initrd_start, ia64_boot_param.initrd_size);
+ reserve_bootmem(virt_to_phys(initrd_start), ia64_boot_param.initrd_size);
+ }
+#endif
#if 0
/* XXX fix me */
init_mm.start_code = (unsigned long) &_stext;
@@ -155,10 +173,8 @@ setup_arch (char **cmdline_p)
#ifdef CONFIG_SMP
bootstrap_processor = hard_smp_processor_id();
current->processor = bootstrap_processor;
-#else
- cpu_init();
- identify_cpu(&cpu_data[0]);
#endif
+ cpu_init(); /* initialize the bootstrap CPU */
if (efi.acpi) {
/* Parse the ACPI tables */
@@ -270,35 +286,18 @@ identify_cpu (struct cpuinfo_ia64 *c)
u64 features;
} field;
} cpuid;
+ pal_vm_info_1_u_t vm1;
+ pal_vm_info_2_u_t vm2;
+ pal_status_t status;
+ unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */
int i;
- for (i = 0; i < 5; ++i) {
+ for (i = 0; i < 5; ++i)
cpuid.bits[i] = ia64_get_cpuid(i);
- }
-#ifdef CONFIG_SMP
- /*
- * XXX Instead of copying the ITC info from the bootstrap
- * processor, ia64_init_itm() should be done per CPU. That
- * should get you the right info. --davidm 1/24/00
- */
- if (c != &cpu_data[bootstrap_processor]) {
- memset(c, 0, sizeof(struct cpuinfo_ia64));
- c->proc_freq = cpu_data[bootstrap_processor].proc_freq;
- c->itc_freq = cpu_data[bootstrap_processor].itc_freq;
- c->cyc_per_usec = cpu_data[bootstrap_processor].cyc_per_usec;
- c->usec_per_cyc = cpu_data[bootstrap_processor].usec_per_cyc;
- }
-#else
memset(c, 0, sizeof(struct cpuinfo_ia64));
-#endif
memcpy(c->vendor, cpuid.field.vendor, 16);
-#ifdef CONFIG_IA64_SOFTSDV_HACKS
- /* BUG: SoftSDV doesn't support the cpuid registers. */
- if (c->vendor[0] == '\0')
- memcpy(c->vendor, "Intel", 6);
-#endif
c->ppn = cpuid.field.ppn;
c->number = cpuid.field.number;
c->revision = cpuid.field.revision;
@@ -306,8 +305,29 @@ identify_cpu (struct cpuinfo_ia64 *c)
c->family = cpuid.field.family;
c->archrev = cpuid.field.archrev;
c->features = cpuid.field.features;
-#ifdef CONFIG_SMP
- c->loops_per_sec = loops_per_sec;
+
+ status = ia64_pal_vm_summary(&vm1, &vm2);
+ if (status == PAL_STATUS_SUCCESS) {
+#if 1
+ /*
+ * XXX the current PAL code returns IMPL_VA_MSB==60, which is dead-wrong.
+ * --davidm 00/05/26
+ s*/
+ impl_va_msb = 50;
+#else
+ impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb;
+#endif
+ phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size;
+ }
+ printk("processor implements %lu virtual and %lu physical address bits\n",
+ impl_va_msb + 1, phys_addr_size);
+ c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1));
+ c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
+
+#ifdef CONFIG_IA64_SOFTSDV_HACKS
+ /* BUG: SoftSDV doesn't support the cpuid registers. */
+ if (c->vendor[0] == '\0')
+ memcpy(c->vendor, "Intel", 6);
#endif
}
@@ -318,6 +338,11 @@ identify_cpu (struct cpuinfo_ia64 *c)
void
cpu_init (void)
{
+ extern void __init ia64_rid_init (void);
+ extern void __init ia64_tlb_init (void);
+
+ identify_cpu(&my_cpu_data);
+
/* Clear the stack memory reserved for pt_regs: */
memset(ia64_task_regs(current), 0, sizeof(struct pt_regs));
@@ -331,6 +356,21 @@ cpu_init (void)
*/
ia64_set_dcr(IA64_DCR_DR | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_PP);
ia64_set_fpu_owner(0); /* initialize ar.k5 */
+
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
+
+ ia64_rid_init();
+ ia64_tlb_init();
+
+#ifdef CONFIG_IA32_SUPPORT
+ /* initialize global ia32 state - CR0 and CR4 */
+ __asm__("mov ar.cflg = %0"
+ : /* no outputs */
+ : "r" (((ulong) IA32_CR4 << 32) | IA32_CR0));
+#endif
+
+#ifdef CONFIG_SMP
+ normal_xtp();
+#endif
}
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index 25197c1d4..8a46377c9 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -37,16 +37,26 @@
# define GET_SIGSET(k,u) __get_user((k)->sig[0], &(u)->sig[0])
#endif
+struct sigscratch {
+#ifdef CONFIG_IA64_NEW_UNWIND
+ unsigned long scratch_unat; /* ar.unat for the general registers saved in pt */
+ unsigned long pad;
+#else
+ struct switch_stack sw;
+#endif
+ struct pt_regs pt;
+};
+
struct sigframe {
struct siginfo info;
struct sigcontext sc;
};
extern long sys_wait4 (int, int *, int, struct rusage *);
-extern long ia64_do_signal (sigset_t *, struct pt_regs *, long); /* forward decl */
+extern long ia64_do_signal (sigset_t *, struct sigscratch *, long); /* forward decl */
long
-ia64_rt_sigsuspend (sigset_t *uset, size_t sigsetsize, struct pt_regs *pt)
+ia64_rt_sigsuspend (sigset_t *uset, size_t sigsetsize, struct sigscratch *scr)
{
sigset_t oldset, set;
@@ -71,12 +81,19 @@ ia64_rt_sigsuspend (sigset_t *uset, size_t sigsetsize, struct pt_regs *pt)
* pre-set the correct error code here to ensure that the right values
* get saved in sigcontext by ia64_do_signal.
*/
- pt->r8 = EINTR;
- pt->r10 = -1;
+#ifdef CONFIG_IA32_SUPPORT
+ if (IS_IA32_PROCESS(&scr->pt)) {
+ scr->pt.r8 = -EINTR;
+ } else
+#endif
+ {
+ scr->pt.r8 = EINTR;
+ scr->pt.r10 = -1;
+ }
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
schedule();
- if (ia64_do_signal(&oldset, pt, 1))
+ if (ia64_do_signal(&oldset, scr, 1))
return -EINTR;
}
}
@@ -91,9 +108,8 @@ sys_sigaltstack (const stack_t *uss, stack_t *uoss, long arg2, long arg3, long a
}
static long
-restore_sigcontext (struct sigcontext *sc, struct pt_regs *pt)
+restore_sigcontext (struct sigcontext *sc, struct sigscratch *scr)
{
- struct switch_stack *sw = (struct switch_stack *) pt - 1;
unsigned long ip, flags, nat, um, cfm;
long err;
@@ -104,28 +120,32 @@ restore_sigcontext (struct sigcontext *sc, struct pt_regs *pt)
err |= __get_user(ip, &sc->sc_ip); /* instruction pointer */
err |= __get_user(cfm, &sc->sc_cfm);
err |= __get_user(um, &sc->sc_um); /* user mask */
- err |= __get_user(pt->ar_rsc, &sc->sc_ar_rsc);
- err |= __get_user(pt->ar_ccv, &sc->sc_ar_ccv);
- err |= __get_user(pt->ar_unat, &sc->sc_ar_unat);
- err |= __get_user(pt->ar_fpsr, &sc->sc_ar_fpsr);
- err |= __get_user(pt->ar_pfs, &sc->sc_ar_pfs);
- err |= __get_user(pt->pr, &sc->sc_pr); /* predicates */
- err |= __get_user(pt->b0, &sc->sc_br[0]); /* b0 (rp) */
- err |= __get_user(pt->b6, &sc->sc_br[6]); /* b6 */
- err |= __get_user(pt->b7, &sc->sc_br[7]); /* b7 */
- err |= __copy_from_user(&pt->r1, &sc->sc_gr[1], 3*8); /* r1-r3 */
- err |= __copy_from_user(&pt->r8, &sc->sc_gr[8], 4*8); /* r8-r11 */
- err |= __copy_from_user(&pt->r12, &sc->sc_gr[12], 4*8); /* r12-r15 */
- err |= __copy_from_user(&pt->r16, &sc->sc_gr[16], 16*8); /* r16-r31 */
-
- pt->cr_ifs = cfm | (1UL << 63);
+ err |= __get_user(scr->pt.ar_rsc, &sc->sc_ar_rsc);
+ err |= __get_user(scr->pt.ar_ccv, &sc->sc_ar_ccv);
+ err |= __get_user(scr->pt.ar_unat, &sc->sc_ar_unat);
+ err |= __get_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr);
+ err |= __get_user(scr->pt.ar_pfs, &sc->sc_ar_pfs);
+ err |= __get_user(scr->pt.pr, &sc->sc_pr); /* predicates */
+ err |= __get_user(scr->pt.b0, &sc->sc_br[0]); /* b0 (rp) */
+ err |= __get_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */
+ err |= __get_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */
+ err |= __copy_from_user(&scr->pt.r1, &sc->sc_gr[1], 3*8); /* r1-r3 */
+ err |= __copy_from_user(&scr->pt.r8, &sc->sc_gr[8], 4*8); /* r8-r11 */
+ err |= __copy_from_user(&scr->pt.r12, &sc->sc_gr[12], 4*8); /* r12-r15 */
+ err |= __copy_from_user(&scr->pt.r16, &sc->sc_gr[16], 16*8); /* r16-r31 */
+
+ scr->pt.cr_ifs = cfm | (1UL << 63);
/* establish new instruction pointer: */
- pt->cr_iip = ip & ~0x3UL;
- ia64_psr(pt)->ri = ip & 0x3;
- pt->cr_ipsr = (pt->cr_ipsr & ~IA64_PSR_UM) | (um & IA64_PSR_UM);
+ scr->pt.cr_iip = ip & ~0x3UL;
+ ia64_psr(&scr->pt)->ri = ip & 0x3;
+ scr->pt.cr_ipsr = (scr->pt.cr_ipsr & ~IA64_PSR_UM) | (um & IA64_PSR_UM);
- ia64_put_nat_bits (pt, sw, nat); /* restore the original scratch NaT bits */
+#ifdef CONFIG_IA64_NEW_UNWIND
+ scr->scratch_unat = ia64_put_scratch_nat_bits(&scr->pt, nat);
+#else
+ ia64_put_nat_bits(&scr->pt, &scr->sw, nat); /* restore the original scratch NaT bits */
+#endif
if (flags & IA64_SC_FLAG_FPH_VALID) {
struct task_struct *fpu_owner = ia64_get_fpu_owner();
@@ -138,7 +158,8 @@ restore_sigcontext (struct sigcontext *sc, struct pt_regs *pt)
return err;
}
-int copy_siginfo_to_user(siginfo_t *to, siginfo_t *from)
+int
+copy_siginfo_to_user (siginfo_t *to, siginfo_t *from)
{
if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
return -EFAULT;
@@ -147,43 +168,39 @@ int copy_siginfo_to_user(siginfo_t *to, siginfo_t *from)
else {
int err;
- /* If you change siginfo_t structure, please be sure
- this code is fixed accordingly.
- It should never copy any pad contained in the structure
- to avoid security leaks, but must copy the generic
- 3 ints plus the relevant union member. */
+ /*
+ * If you change siginfo_t structure, please be sure
+ * this code is fixed accordingly. It should never
+ * copy any pad contained in the structure to avoid
+ * security leaks, but must copy the generic 3 ints
+ * plus the relevant union member.
+ */
err = __put_user(from->si_signo, &to->si_signo);
err |= __put_user(from->si_errno, &to->si_errno);
err |= __put_user((short)from->si_code, &to->si_code);
switch (from->si_code >> 16) {
- case __SI_FAULT >> 16:
- case __SI_POLL >> 16:
+ case __SI_FAULT >> 16:
+ err |= __put_user(from->si_isr, &to->si_isr);
+ case __SI_POLL >> 16:
err |= __put_user(from->si_addr, &to->si_addr);
err |= __put_user(from->si_imm, &to->si_imm);
break;
- case __SI_CHLD >> 16:
+ case __SI_CHLD >> 16:
err |= __put_user(from->si_utime, &to->si_utime);
err |= __put_user(from->si_stime, &to->si_stime);
err |= __put_user(from->si_status, &to->si_status);
- default:
+ default:
err |= __put_user(from->si_uid, &to->si_uid);
err |= __put_user(from->si_pid, &to->si_pid);
break;
- /* case __SI_RT: This is not generated by the kernel as of now. */
+ /* case __SI_RT: This is not generated by the kernel as of now. */
}
return err;
}
}
-/*
- * When we get here, ((struct switch_stack *) pt - 1) is a
- * switch_stack frame that has no defined value. Upon return, we
- * expect sw->caller_unat to contain the new unat value. The reason
- * we use a full switch_stack frame is so everything is symmetric
- * with ia64_do_signal().
- */
long
-ia64_rt_sigreturn (struct pt_regs *pt)
+ia64_rt_sigreturn (struct sigscratch *scr)
{
extern char ia64_strace_leave_kernel, ia64_leave_kernel;
struct sigcontext *sc;
@@ -191,7 +208,7 @@ ia64_rt_sigreturn (struct pt_regs *pt)
sigset_t set;
long retval;
- sc = &((struct sigframe *) (pt->r12 + 16))->sc;
+ sc = &((struct sigframe *) (scr->pt.r12 + 16))->sc;
/*
* When we return to the previously executing context, r8 and
@@ -200,9 +217,15 @@ ia64_rt_sigreturn (struct pt_regs *pt)
* must not touch r8 or r10 as otherwise user-level stat could
* be corrupted.
*/
- retval = (long) &ia64_leave_kernel | 1;
- if ((current->flags & PF_TRACESYS)
- && (sc->sc_flags & IA64_SC_FLAG_IN_SYSCALL))
+ retval = (long) &ia64_leave_kernel;
+ if (current->flags & PF_TRACESYS)
+ /*
+ * strace expects to be notified after sigreturn
+ * returns even though the context to which we return
+ * may not be in the middle of a syscall. Thus, the
+ * return-value that strace displays for sigreturn is
+ * meaningless.
+ */
retval = (long) &ia64_strace_leave_kernel;
if (!access_ok(VERIFY_READ, sc, sizeof(*sc)))
@@ -217,18 +240,18 @@ ia64_rt_sigreturn (struct pt_regs *pt)
recalc_sigpending(current);
spin_unlock_irq(&current->sigmask_lock);
- if (restore_sigcontext(sc, pt))
+ if (restore_sigcontext(sc, scr))
goto give_sigsegv;
#if DEBUG_SIG
printk("SIG return (%s:%d): sp=%lx ip=%lx\n",
- current->comm, current->pid, pt->r12, pt->cr_iip);
+ current->comm, current->pid, scr->pt.r12, scr->pt.cr_iip);
#endif
/*
* It is more difficult to avoid calling this function than to
* call it and ignore errors.
*/
- do_sigaltstack(&sc->sc_stack, 0, pt->r12);
+ do_sigaltstack(&sc->sc_stack, 0, scr->pt.r12);
return retval;
give_sigsegv:
@@ -249,14 +272,13 @@ ia64_rt_sigreturn (struct pt_regs *pt)
* trampoline starts. Everything else is done at the user-level.
*/
static long
-setup_sigcontext (struct sigcontext *sc, sigset_t *mask, struct pt_regs *pt)
+setup_sigcontext (struct sigcontext *sc, sigset_t *mask, struct sigscratch *scr)
{
- struct switch_stack *sw = (struct switch_stack *) pt - 1;
struct task_struct *fpu_owner = ia64_get_fpu_owner();
unsigned long flags = 0, ifs, nat;
long err;
- ifs = pt->cr_ifs;
+ ifs = scr->pt.cr_ifs;
if (on_sig_stack((unsigned long) sc))
flags |= IA64_SC_FLAG_ONSTACK;
@@ -276,46 +298,49 @@ setup_sigcontext (struct sigcontext *sc, sigset_t *mask, struct pt_regs *pt)
* Note: sw->ar_unat is UNDEFINED unless the process is being
* PTRACED. However, this is OK because the NaT bits of the
* preserved registers (r4-r7) are never being looked at by
- * the signal handler (register r4-r7 are used instead).
+ * the signal handler (registers r4-r7 are used instead).
*/
- nat = ia64_get_nat_bits(pt, sw);
+#ifdef CONFIG_IA64_NEW_UNWIND
+ nat = ia64_get_scratch_nat_bits(&scr->pt, scr->scratch_unat);
+#else
+ nat = ia64_get_nat_bits(&scr->pt, &scr->sw);
+#endif
err = __put_user(flags, &sc->sc_flags);
err |= __put_user(nat, &sc->sc_nat);
err |= PUT_SIGSET(mask, &sc->sc_mask);
- err |= __put_user(pt->cr_ipsr & IA64_PSR_UM, &sc->sc_um);
- err |= __put_user(pt->ar_rsc, &sc->sc_ar_rsc);
- err |= __put_user(pt->ar_ccv, &sc->sc_ar_ccv);
- err |= __put_user(pt->ar_unat, &sc->sc_ar_unat); /* ar.unat */
- err |= __put_user(pt->ar_fpsr, &sc->sc_ar_fpsr); /* ar.fpsr */
- err |= __put_user(pt->ar_pfs, &sc->sc_ar_pfs);
- err |= __put_user(pt->pr, &sc->sc_pr); /* predicates */
- err |= __put_user(pt->b0, &sc->sc_br[0]); /* b0 (rp) */
- err |= __put_user(pt->b6, &sc->sc_br[6]); /* b6 */
- err |= __put_user(pt->b7, &sc->sc_br[7]); /* b7 */
-
- err |= __copy_to_user(&sc->sc_gr[1], &pt->r1, 3*8); /* r1-r3 */
- err |= __copy_to_user(&sc->sc_gr[8], &pt->r8, 4*8); /* r8-r11 */
- err |= __copy_to_user(&sc->sc_gr[12], &pt->r12, 4*8); /* r12-r15 */
- err |= __copy_to_user(&sc->sc_gr[16], &pt->r16, 16*8); /* r16-r31 */
-
- err |= __put_user(pt->cr_iip + ia64_psr(pt)->ri, &sc->sc_ip);
- err |= __put_user(pt->r12, &sc->sc_gr[12]); /* r12 */
+ err |= __put_user(scr->pt.cr_ipsr & IA64_PSR_UM, &sc->sc_um);
+ err |= __put_user(scr->pt.ar_rsc, &sc->sc_ar_rsc);
+ err |= __put_user(scr->pt.ar_ccv, &sc->sc_ar_ccv);
+ err |= __put_user(scr->pt.ar_unat, &sc->sc_ar_unat); /* ar.unat */
+ err |= __put_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr); /* ar.fpsr */
+ err |= __put_user(scr->pt.ar_pfs, &sc->sc_ar_pfs);
+ err |= __put_user(scr->pt.pr, &sc->sc_pr); /* predicates */
+ err |= __put_user(scr->pt.b0, &sc->sc_br[0]); /* b0 (rp) */
+ err |= __put_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */
+ err |= __put_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */
+
+ err |= __copy_to_user(&sc->sc_gr[1], &scr->pt.r1, 3*8); /* r1-r3 */
+ err |= __copy_to_user(&sc->sc_gr[8], &scr->pt.r8, 4*8); /* r8-r11 */
+ err |= __copy_to_user(&sc->sc_gr[12], &scr->pt.r12, 4*8); /* r12-r15 */
+ err |= __copy_to_user(&sc->sc_gr[16], &scr->pt.r16, 16*8); /* r16-r31 */
+
+ err |= __put_user(scr->pt.cr_iip + ia64_psr(&scr->pt)->ri, &sc->sc_ip);
return err;
}
static long
-setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *pt)
+setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set,
+ struct sigscratch *scr)
{
- struct switch_stack *sw = (struct switch_stack *) pt - 1;
extern char ia64_sigtramp[], __start_gate_section[];
unsigned long tramp_addr, new_rbs = 0;
struct sigframe *frame;
struct siginfo si;
long err;
- frame = (void *) pt->r12;
+ frame = (void *) scr->pt.r12;
tramp_addr = GATE_ADDR + (ia64_sigtramp - __start_gate_section);
if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && !on_sig_stack((unsigned long) frame)) {
new_rbs = (current->sas_ss_sp + sizeof(long) - 1) & ~(sizeof(long) - 1);
@@ -331,31 +356,39 @@ setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, st
err |= __put_user(current->sas_ss_sp, &frame->sc.sc_stack.ss_sp);
err |= __put_user(current->sas_ss_size, &frame->sc.sc_stack.ss_size);
- err |= __put_user(sas_ss_flags(pt->r12), &frame->sc.sc_stack.ss_flags);
- err |= setup_sigcontext(&frame->sc, set, pt);
+ err |= __put_user(sas_ss_flags(scr->pt.r12), &frame->sc.sc_stack.ss_flags);
+ err |= setup_sigcontext(&frame->sc, set, scr);
if (err)
goto give_sigsegv;
- pt->r12 = (unsigned long) frame - 16; /* new stack pointer */
- pt->r2 = sig; /* signal number */
- pt->r3 = (unsigned long) ka->sa.sa_handler; /* addr. of handler's proc. descriptor */
- pt->r15 = new_rbs;
- pt->ar_fpsr = FPSR_DEFAULT; /* reset fpsr for signal handler */
- pt->cr_iip = tramp_addr;
- ia64_psr(pt)->ri = 0; /* start executing in first slot */
+ scr->pt.r12 = (unsigned long) frame - 16; /* new stack pointer */
+ scr->pt.r2 = sig; /* signal number */
+ scr->pt.r3 = (unsigned long) ka->sa.sa_handler; /* addr. of handler's proc desc */
+ scr->pt.r15 = new_rbs;
+ scr->pt.ar_fpsr = FPSR_DEFAULT; /* reset fpsr for signal handler */
+ scr->pt.cr_iip = tramp_addr;
+ ia64_psr(&scr->pt)->ri = 0; /* start executing in first slot */
+#ifdef CONFIG_IA64_NEW_UNWIND
+ /*
+ * Note: this affects only the NaT bits of the scratch regs
+ * (the ones saved in pt_regs), which is exactly what we want.
+ */
+ scr->scratch_unat = 0; /* ensure NaT bits of at least r2, r3, r12, and r15 are clear */
+#else
/*
* Note: this affects only the NaT bits of the scratch regs
- * (the ones saved in pt_regs, which is exactly what we want.
+ * (the ones saved in pt_regs), which is exactly what we want.
* The NaT bits for the preserved regs (r4-r7) are in
* sw->ar_unat iff this process is being PTRACED.
*/
- sw->caller_unat = 0; /* ensure NaT bits of at least r2, r3, r12, and r15 are clear */
+ scr->sw.caller_unat = 0; /* ensure NaT bits of at least r2, r3, r12, and r15 are clear */
+#endif
#if DEBUG_SIG
printk("SIG deliver (%s:%d): sig=%d sp=%lx ip=%lx handler=%lx\n",
- current->comm, current->pid, sig, pt->r12, pt->cr_iip, pt->r3);
+ current->comm, current->pid, sig, scr->pt.r12, scr->pt.cr_iip, scr->pt.r3);
#endif
return 1;
@@ -374,17 +407,17 @@ setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, st
static long
handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset,
- struct pt_regs *pt)
+ struct sigscratch *scr)
{
#ifdef CONFIG_IA32_SUPPORT
- if (IS_IA32_PROCESS(pt)) {
+ if (IS_IA32_PROCESS(&scr->pt)) {
/* send signal to IA-32 process */
- if (!ia32_setup_frame1(sig, ka, info, oldset, pt))
+ if (!ia32_setup_frame1(sig, ka, info, oldset, &scr->pt))
return 0;
} else
#endif
/* send signal to IA-64 process */
- if (!setup_frame(sig, ka, info, oldset, pt))
+ if (!setup_frame(sig, ka, info, oldset, scr))
return 0;
if (ka->sa.sa_flags & SA_ONESHOT)
@@ -401,12 +434,6 @@ handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigse
}
/*
- * When we get here, `pt' points to struct pt_regs and ((struct
- * switch_stack *) pt - 1) points to a switch stack structure.
- * HOWEVER, in the normal case, the ONLY value valid in the
- * switch_stack is the caller_unat field. The entire switch_stack is
- * valid ONLY if current->flags has PF_PTRACED set.
- *
* Note that `init' is a special process: it doesn't get signals it
* doesn't want to handle. Thus you cannot kill init even with a
* SIGKILL even by mistake.
@@ -416,24 +443,35 @@ handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigse
* user-level signal handling stack-frames in one go after that.
*/
long
-ia64_do_signal (sigset_t *oldset, struct pt_regs *pt, long in_syscall)
+ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
{
struct k_sigaction *ka;
siginfo_t info;
long restart = in_syscall;
+ long errno = scr->pt.r8;
/*
* In the ia64_leave_kernel code path, we want the common case
* to go fast, which is why we may in certain cases get here
* from kernel mode. Just return without doing anything if so.
*/
- if (!user_mode(pt))
+ if (!user_mode(&scr->pt))
return 0;
if (!oldset)
oldset = &current->blocked;
- if (pt->r10 != -1) {
+#ifdef CONFIG_IA32_SUPPORT
+ if (IS_IA32_PROCESS(&scr->pt)) {
+ if (in_syscall) {
+ if (errno >= 0)
+ restart = 0;
+ else
+ errno = -errno;
+ }
+ } else
+#endif
+ if (scr->pt.r10 != -1) {
/*
* A system calls has to be restarted only if one of
* the error codes ERESTARTNOHAND, ERESTARTSYS, or
@@ -527,7 +565,7 @@ ia64_do_signal (sigset_t *oldset, struct pt_regs *pt, long in_syscall)
case SIGQUIT: case SIGILL: case SIGTRAP:
case SIGABRT: case SIGFPE: case SIGSEGV:
case SIGBUS: case SIGSYS: case SIGXCPU: case SIGXFSZ:
- if (do_coredump(signr, pt))
+ if (do_coredump(signr, &scr->pt))
exit_code |= 0x80;
/* FALLTHRU */
@@ -542,39 +580,54 @@ ia64_do_signal (sigset_t *oldset, struct pt_regs *pt, long in_syscall)
}
if (restart) {
- switch (pt->r8) {
+ switch (errno) {
case ERESTARTSYS:
if ((ka->sa.sa_flags & SA_RESTART) == 0) {
case ERESTARTNOHAND:
- pt->r8 = EINTR;
- /* note: pt->r10 is already -1 */
+#ifdef CONFIG_IA32_SUPPORT
+ if (IS_IA32_PROCESS(&scr->pt))
+ scr->pt.r8 = -EINTR;
+ else
+#endif
+ scr->pt.r8 = EINTR;
+ /* note: scr->pt.r10 is already -1 */
break;
}
case ERESTARTNOINTR:
- ia64_decrement_ip(pt);
+#ifdef CONFIG_IA32_SUPPORT
+ if (IS_IA32_PROCESS(&scr->pt)) {
+ scr->pt.r8 = scr->pt.r1;
+ scr->pt.cr_iip -= 2;
+ } else
+#endif
+ ia64_decrement_ip(&scr->pt);
}
}
/* Whee! Actually deliver the signal. If the
delivery failed, we need to continue to iterate in
this loop so we can deliver the SIGSEGV... */
- if (handle_signal(signr, ka, &info, oldset, pt))
+ if (handle_signal(signr, ka, &info, oldset, scr))
return 1;
}
/* Did we come from a system call? */
if (restart) {
/* Restart the system call - no handlers present */
- if (pt->r8 == ERESTARTNOHAND ||
- pt->r8 == ERESTARTSYS ||
- pt->r8 == ERESTARTNOINTR) {
+ if (errno == ERESTARTNOHAND || errno == ERESTARTSYS || errno == ERESTARTNOINTR) {
+#ifdef CONFIG_IA32_SUPPORT
+ if (IS_IA32_PROCESS(&scr->pt)) {
+ scr->pt.r8 = scr->pt.r1;
+ scr->pt.cr_iip -= 2;
+ } else
+#endif
/*
* Note: the syscall number is in r15 which is
* saved in pt_regs so all we need to do here
* is adjust ip so that the "break"
* instruction gets re-executed.
*/
- ia64_decrement_ip(pt);
+ ia64_decrement_ip(&scr->pt);
}
}
return 0;
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 43d9f2dde..e6f0f36fe 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -21,11 +21,13 @@
#include <linux/smp.h>
#include <linux/kernel_stat.h>
#include <linux/mm.h>
+#include <linux/delay.h>
#include <asm/atomic.h>
#include <asm/bitops.h>
#include <asm/current.h>
#include <asm/delay.h>
+
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/page.h>
@@ -39,6 +41,7 @@
extern int cpu_idle(void * unused);
extern void _start(void);
+extern void machine_halt(void);
extern int cpu_now_booting; /* Used by head.S to find idle task */
extern volatile unsigned long cpu_online_map; /* Bitmap of available cpu's */
@@ -66,15 +69,18 @@ struct smp_call_struct {
atomic_t unstarted_count;
atomic_t unfinished_count;
};
-static struct smp_call_struct *smp_call_function_data;
+static volatile struct smp_call_struct *smp_call_function_data;
-#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC
+#ifdef CONFIG_ITANIUM_A1_SPECIFIC
extern spinlock_t ivr_read_lock;
#endif
#define IPI_RESCHEDULE 0
#define IPI_CALL_FUNC 1
#define IPI_CPU_STOP 2
+#ifndef CONFIG_ITANIUM_PTCG
+# define IPI_FLUSH_TLB 3
+#endif /*!CONFIG_ITANIUM_PTCG */
/*
* Setup routine for controlling SMP activation
@@ -126,6 +132,22 @@ halt_processor(void)
}
+static inline int
+pointer_lock(void *lock, void *data, int retry)
+{
+ again:
+ if (cmpxchg_acq((void **) lock, 0, data) == 0)
+ return 0;
+
+ if (!retry)
+ return -EBUSY;
+
+ while (*(void **) lock)
+ ;
+
+ goto again;
+}
+
void
handle_IPI(int irq, void *dev_id, struct pt_regs *regs)
{
@@ -160,13 +182,14 @@ handle_IPI(int irq, void *dev_id, struct pt_regs *regs)
void *info;
int wait;
+ /* release the 'pointer lock' */
data = smp_call_function_data;
func = data->func;
info = data->info;
wait = data->wait;
mb();
- atomic_dec (&data->unstarted_count);
+ atomic_dec(&data->unstarted_count);
/* At this point the structure may be gone unless wait is true. */
(*func)(info);
@@ -174,7 +197,7 @@ handle_IPI(int irq, void *dev_id, struct pt_regs *regs)
/* Notify the sending CPU that the task is done. */
mb();
if (wait)
- atomic_dec (&data->unfinished_count);
+ atomic_dec(&data->unfinished_count);
}
break;
@@ -182,6 +205,51 @@ handle_IPI(int irq, void *dev_id, struct pt_regs *regs)
halt_processor();
break;
+#ifndef CONFIG_ITANIUM_PTCG
+ case IPI_FLUSH_TLB:
+ {
+ extern unsigned long flush_start, flush_end, flush_nbits, flush_rid;
+ extern atomic_t flush_cpu_count;
+ unsigned long saved_rid = ia64_get_rr(flush_start);
+ unsigned long end = flush_end;
+ unsigned long start = flush_start;
+ unsigned long nbits = flush_nbits;
+
+ /*
+ * Current CPU may be running with different
+ * RID so we need to reload the RID of flushed
+ * address. Purging the translation also
+ * needs ALAT invalidation; we do not need
+ * "invala" here since it is done in
+ * ia64_leave_kernel.
+ */
+ ia64_srlz_d();
+ if (saved_rid != flush_rid) {
+ ia64_set_rr(flush_start, flush_rid);
+ ia64_srlz_d();
+ }
+
+ do {
+ /*
+ * Purge local TLB entries.
+ */
+ __asm__ __volatile__ ("ptc.l %0,%1" ::
+ "r"(start), "r"(nbits<<2) : "memory");
+ start += (1UL << nbits);
+ } while (start < end);
+
+ ia64_insn_group_barrier();
+ ia64_srlz_i(); /* srlz.i implies srlz.d */
+
+ if (saved_rid != flush_rid) {
+ ia64_set_rr(flush_start, saved_rid);
+ ia64_srlz_d();
+ }
+ atomic_dec(&flush_cpu_count);
+ break;
+ }
+#endif /* !CONFIG_ITANIUM_PTCG */
+
default:
printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which);
break;
@@ -199,7 +267,7 @@ send_IPI_single(int dest_cpu, int op)
if (dest_cpu == -1)
return;
- ipi_op[dest_cpu] |= (1 << op);
+ set_bit(op, &ipi_op[dest_cpu]);
ipi_send(dest_cpu, IPI_IRQ, IA64_IPI_DM_INT, 0);
}
@@ -243,6 +311,14 @@ smp_send_stop(void)
send_IPI_allbutself(IPI_CPU_STOP);
}
+#ifndef CONFIG_ITANIUM_PTCG
+void
+smp_send_flush_tlb(void)
+{
+ send_IPI_allbutself(IPI_FLUSH_TLB);
+}
+#endif /* !CONFIG_ITANIUM_PTCG */
+
/*
* Run a function on all other CPUs.
* <func> The function to run. This must be fast and non-blocking.
@@ -260,63 +336,35 @@ smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
{
struct smp_call_struct data;
long timeout;
- static spinlock_t lock = SPIN_LOCK_UNLOCKED;
+ int cpus = smp_num_cpus - 1;
+
+ if (cpus == 0)
+ return 0;
data.func = func;
data.info = info;
data.wait = wait;
- atomic_set(&data.unstarted_count, smp_num_cpus - 1);
- atomic_set(&data.unfinished_count, smp_num_cpus - 1);
+ atomic_set(&data.unstarted_count, cpus);
+ atomic_set(&data.unfinished_count, cpus);
- if (retry) {
- while (1) {
- if (smp_call_function_data) {
- schedule (); /* Give a mate a go */
- continue;
- }
- spin_lock (&lock);
- if (smp_call_function_data) {
- spin_unlock (&lock); /* Bad luck */
- continue;
- }
- /* Mine, all mine! */
- break;
- }
- }
- else {
- if (smp_call_function_data)
- return -EBUSY;
- spin_lock (&lock);
- if (smp_call_function_data) {
- spin_unlock (&lock);
- return -EBUSY;
- }
- }
+ if (pointer_lock(&smp_call_function_data, &data, retry))
+ return -EBUSY;
- smp_call_function_data = &data;
- spin_unlock (&lock);
- data.func = func;
- data.info = info;
- atomic_set (&data.unstarted_count, smp_num_cpus - 1);
- data.wait = wait;
- if (wait)
- atomic_set (&data.unfinished_count, smp_num_cpus - 1);
-
/* Send a message to all other CPUs and wait for them to respond */
send_IPI_allbutself(IPI_CALL_FUNC);
/* Wait for response */
timeout = jiffies + HZ;
- while ( (atomic_read (&data.unstarted_count) > 0) &&
- time_before (jiffies, timeout) )
- barrier ();
- if (atomic_read (&data.unstarted_count) > 0) {
+ while ((atomic_read(&data.unstarted_count) > 0) && time_before(jiffies, timeout))
+ barrier();
+ if (atomic_read(&data.unstarted_count) > 0) {
smp_call_function_data = NULL;
return -ETIMEDOUT;
}
if (wait)
- while (atomic_read (&data.unfinished_count) > 0)
- barrier ();
+ while (atomic_read(&data.unfinished_count) > 0)
+ barrier();
+ /* unlock pointer */
smp_call_function_data = NULL;
return 0;
}
@@ -382,17 +430,21 @@ smp_do_timer(struct pt_regs *regs)
}
}
-
-/*
- * Called by both boot and secondaries to move global data into
- * per-processor storage.
- */
static inline void __init
-smp_store_cpu_info(int cpuid)
+smp_calibrate_delay(int cpuid)
{
struct cpuinfo_ia64 *c = &cpu_data[cpuid];
-
- identify_cpu(c);
+#if 0
+ unsigned long old = loops_per_sec;
+ extern void calibrate_delay(void);
+
+ loops_per_sec = 0;
+ calibrate_delay();
+ c->loops_per_sec = loops_per_sec;
+ loops_per_sec = old;
+#else
+ c->loops_per_sec = loops_per_sec;
+#endif
}
/*
@@ -446,34 +498,26 @@ smp_callin(void)
extern void ia64_init_itm(void);
extern void ia64_cpu_local_tick(void);
- ia64_set_dcr(IA64_DCR_DR | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_PP);
- ia64_set_fpu_owner(0);
- ia64_rid_init(); /* initialize region ids */
-
cpu_init();
- __flush_tlb_all();
- smp_store_cpu_info(smp_processor_id());
smp_setup_percpu_timer(smp_processor_id());
- if (test_and_set_bit(smp_processor_id(), &cpu_online_map)) {
- printk("CPU#%d already initialized!\n", smp_processor_id());
- machine_halt();
- }
- while (!smp_threads_ready)
- mb();
-
- normal_xtp();
-
/* setup the CPU local timer tick */
- ia64_cpu_local_tick();
+ ia64_init_itm();
/* Disable all local interrupts */
ia64_set_lrr0(0, 1);
ia64_set_lrr1(0, 1);
- __sti(); /* Interrupts have been off till now. */
+ if (test_and_set_bit(smp_processor_id(), &cpu_online_map)) {
+ printk("CPU#%d already initialized!\n", smp_processor_id());
+ machine_halt();
+ }
+ while (!smp_threads_ready)
+ mb();
+ local_irq_enable(); /* Interrupts have been off until now */
+ smp_calibrate_delay(smp_processor_id());
printk("SMP: CPU %d starting idle loop\n", smp_processor_id());
cpu_idle(NULL);
@@ -583,16 +627,8 @@ smp_boot_cpus(void)
/* Setup BSP mappings */
__cpu_number_map[bootstrap_processor] = 0;
__cpu_logical_map[0] = bootstrap_processor;
- current->processor = bootstrap_processor;
-
- /* Mark BSP booted and get active_mm context */
- cpu_init();
-
- /* reset XTP for interrupt routing */
- normal_xtp();
- /* And generate an entry in cpu_data */
- smp_store_cpu_info(bootstrap_processor);
+ smp_calibrate_delay(smp_processor_id());
#if 0
smp_tune_scheduling();
#endif
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
index f06d3bea8..cd9d64fce 100644
--- a/arch/ia64/kernel/sys_ia64.c
+++ b/arch/ia64/kernel/sys_ia64.c
@@ -14,6 +14,9 @@
#include <linux/file.h> /* doh, must come after sched.h... */
#include <linux/smp.h>
#include <linux/smp_lock.h>
+#include <linux/highuid.h>
+
+#include <asm/uaccess.h>
asmlinkage long
ia64_getpriority (int which, int who, long arg2, long arg3, long arg4, long arg5, long arg6,
@@ -94,7 +97,11 @@ sys_pipe (long arg0, long arg1, long arg2, long arg3,
static inline unsigned long
do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, unsigned long pgoff)
{
+ long start_low, end_low, starting_region, ending_region;
+ unsigned long loff, hoff;
struct file *file = 0;
+ /* the virtual address space that is mappable in each region: */
+# define OCTANT_SIZE ((PTRS_PER_PGD<<PGDIR_SHIFT)/8)
/*
* A zero mmap always succeeds in Linux, independent of
@@ -103,15 +110,19 @@ do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, un
if (PAGE_ALIGN(len) == 0)
return addr;
-#ifdef notyet
- /* Don't permit mappings that would cross a region boundary: */
- region_start = IA64_GET_REGION(addr);
- region_end = IA64_GET_REGION(addr + len);
- if (region_start != region_end)
+ /* Don't permit mappings into or across the address hole in a region: */
+ loff = REGION_OFFSET(addr);
+ hoff = loff - (REGION_SIZE - OCTANT_SIZE/2);
+ if ((len | loff | (loff + len)) >= OCTANT_SIZE/2
+ && (len | hoff | (hoff + len)) >= OCTANT_SIZE/2)
return -EINVAL;
- <<x??x>>
-#endif
+ /* Don't permit mappings that would cross a region boundary: */
+
+ starting_region = REGION_NUMBER(addr);
+ ending_region = REGION_NUMBER(addr + len);
+ if (starting_region != ending_region)
+ return -EINVAL;
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
if (!(flags & MAP_ANONYMOUS)) {
@@ -156,6 +167,9 @@ sys_mmap (unsigned long addr, unsigned long len, int prot, int flags,
{
struct pt_regs *regs = (struct pt_regs *) &stack;
+ if ((off & ~PAGE_MASK) != 0)
+ return -EINVAL;
+
addr = do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
if (!IS_ERR(addr))
regs->r8 = 0; /* ensure large addresses are not mistaken as failures... */
@@ -196,6 +210,150 @@ sys_modify_ldt (long arg0, long arg1, long arg2, long arg3)
return -ENOSYS;
}
+asmlinkage unsigned long
+ia64_create_module (const char *name_user, size_t size, long arg2, long arg3,
+ long arg4, long arg5, long arg6, long arg7, long stack)
+{
+ extern unsigned long sys_create_module (const char *, size_t);
+ struct pt_regs *regs = (struct pt_regs *) &stack;
+ unsigned long addr;
+
+ addr = sys_create_module (name_user, size);
+ if (!IS_ERR(addr))
+ regs->r8 = 0; /* ensure large addresses are not mistaken as failures... */
+ return addr;
+}
+
+#if 1
+/*
+ * This is here for a while to keep compatibillity with the old stat()
+ * call - it will be removed later once everybody migrates to the new
+ * kernel stat structure that matches the glibc one - Jes
+ */
+static __inline__ int
+do_revalidate (struct dentry *dentry)
+{
+ struct inode * inode = dentry->d_inode;
+ if (inode->i_op && inode->i_op->revalidate)
+ return inode->i_op->revalidate(dentry);
+ return 0;
+}
+
+static int
+cp_ia64_old_stat (struct inode *inode, struct ia64_oldstat *statbuf)
+{
+ struct ia64_oldstat tmp;
+ unsigned int blocks, indirect;
+
+ memset(&tmp, 0, sizeof(tmp));
+ tmp.st_dev = kdev_t_to_nr(inode->i_dev);
+ tmp.st_ino = inode->i_ino;
+ tmp.st_mode = inode->i_mode;
+ tmp.st_nlink = inode->i_nlink;
+ SET_STAT_UID(tmp, inode->i_uid);
+ SET_STAT_GID(tmp, inode->i_gid);
+ tmp.st_rdev = kdev_t_to_nr(inode->i_rdev);
+ tmp.st_size = inode->i_size;
+ tmp.st_atime = inode->i_atime;
+ tmp.st_mtime = inode->i_mtime;
+ tmp.st_ctime = inode->i_ctime;
+/*
+ * st_blocks and st_blksize are approximated with a simple algorithm if
+ * they aren't supported directly by the filesystem. The minix and msdos
+ * filesystems don't keep track of blocks, so they would either have to
+ * be counted explicitly (by delving into the file itself), or by using
+ * this simple algorithm to get a reasonable (although not 100% accurate)
+ * value.
+ */
+
+/*
+ * Use minix fs values for the number of direct and indirect blocks. The
+ * count is now exact for the minix fs except that it counts zero blocks.
+ * Everything is in units of BLOCK_SIZE until the assignment to
+ * tmp.st_blksize.
+ */
+#define D_B 7
+#define I_B (BLOCK_SIZE / sizeof(unsigned short))
+
+ if (!inode->i_blksize) {
+ blocks = (tmp.st_size + BLOCK_SIZE - 1) / BLOCK_SIZE;
+ if (blocks > D_B) {
+ indirect = (blocks - D_B + I_B - 1) / I_B;
+ blocks += indirect;
+ if (indirect > 1) {
+ indirect = (indirect - 1 + I_B - 1) / I_B;
+ blocks += indirect;
+ if (indirect > 1)
+ blocks++;
+ }
+ }
+ tmp.st_blocks = (BLOCK_SIZE / 512) * blocks;
+ tmp.st_blksize = BLOCK_SIZE;
+ } else {
+ tmp.st_blocks = inode->i_blocks;
+ tmp.st_blksize = inode->i_blksize;
+ }
+ return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
+}
+
+asmlinkage long
+ia64_oldstat (char *filename, struct ia64_oldstat *statbuf)
+{
+ struct nameidata nd;
+ int error;
+
+ lock_kernel();
+ error = user_path_walk(filename, &nd);
+ if (!error) {
+ error = do_revalidate(nd.dentry);
+ if (!error)
+ error = cp_ia64_old_stat(nd.dentry->d_inode, statbuf);
+ path_release(&nd);
+ }
+ unlock_kernel();
+ return error;
+}
+
+
+asmlinkage long
+ia64_oldlstat (char *filename, struct ia64_oldstat *statbuf) {
+ struct nameidata nd;
+ int error;
+
+ lock_kernel();
+ error = user_path_walk_link(filename, &nd);
+ if (!error) {
+ error = do_revalidate(nd.dentry);
+ if (!error)
+ error = cp_ia64_old_stat(nd.dentry->d_inode, statbuf);
+ path_release(&nd);
+ }
+ unlock_kernel();
+ return error;
+}
+
+asmlinkage long
+ia64_oldfstat (unsigned int fd, struct ia64_oldstat *statbuf)
+{
+ struct file * f;
+ int err = -EBADF;
+
+ lock_kernel();
+ f = fget(fd);
+ if (f) {
+ struct dentry * dentry = f->f_dentry;
+
+ err = do_revalidate(dentry);
+ if (!err)
+ err = cp_ia64_old_stat(dentry->d_inode, statbuf);
+ fput(f);
+ }
+ unlock_kernel();
+ return err;
+}
+
+#endif
+
#ifndef CONFIG_PCI
asmlinkage long
@@ -212,5 +370,4 @@ sys_pciconfig_write (unsigned long bus, unsigned long dfn, unsigned long off, un
return -ENOSYS;
}
-
#endif /* CONFIG_PCI */
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index b88855ce4..d14ba0031 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -34,7 +34,10 @@ unsigned long last_cli_ip;
static struct {
unsigned long delta;
- unsigned long next[NR_CPUS];
+ union {
+ unsigned long count;
+ unsigned char pad[SMP_CACHE_BYTES];
+ } next[NR_CPUS];
} itm;
static void
@@ -69,16 +72,27 @@ do_profile (unsigned long ip)
static inline unsigned long
gettimeoffset (void)
{
- unsigned long now = ia64_get_itc();
- unsigned long elapsed_cycles, lost;
-
- elapsed_cycles = now - (itm.next[smp_processor_id()] - itm.delta);
-
- lost = lost_ticks;
- if (lost)
- elapsed_cycles += lost*itm.delta;
-
+#ifdef CONFIG_SMP
+ /*
+ * The code below doesn't work for SMP because only CPU 0
+ * keeps track of the time.
+ */
+ return 0;
+#else
+ unsigned long now = ia64_get_itc(), last_tick;
+ unsigned long elapsed_cycles, lost = lost_ticks;
+
+ last_tick = (itm.next[smp_processor_id()].count - (lost+1)*itm.delta);
+# if 1
+ if ((long) (now - last_tick) < 0) {
+ printk("Yikes: now < last_tick (now=0x%lx,last_tick=%lx)! No can do.\n",
+ now, last_tick);
+ return 0;
+ }
+# endif
+ elapsed_cycles = now - last_tick;
return (elapsed_cycles*my_cpu_data.usec_per_cyc) >> IA64_USEC_PER_CYC_SHIFT;
+#endif
}
void
@@ -137,6 +151,7 @@ timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
static unsigned long last_time;
static unsigned char count;
int cpu = smp_processor_id();
+ unsigned long new_itm;
int printed = 0;
/*
@@ -146,6 +161,12 @@ timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
* xtime_lock.
*/
write_lock(&xtime_lock);
+ new_itm = itm.next[cpu].count;
+
+ if (!time_after(ia64_get_itc(), new_itm))
+ printk("Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
+ ia64_get_itc(), new_itm);
+
while (1) {
/*
* Do kernel PC profiling here. We multiply the
@@ -164,18 +185,10 @@ timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
do_timer(regs);
#endif
- itm.next[cpu] += itm.delta;
- /*
- * There is a race condition here: to be on the "safe"
- * side, we process timer ticks until itm.next is
- * ahead of the itc by at least half the timer
- * interval. This should give us enough time to set
- * the new itm value without losing a timer tick.
- */
- if (time_after(itm.next[cpu], ia64_get_itc() + itm.delta/2)) {
- ia64_set_itm(itm.next[cpu]);
+ new_itm += itm.delta;
+ itm.next[cpu].count = new_itm;
+ if (time_after(new_itm, ia64_get_itc()))
break;
- }
#if !(defined(CONFIG_IA64_SOFTSDV_HACKS) && defined(CONFIG_SMP))
/*
@@ -188,28 +201,39 @@ timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
last_time = jiffies;
if (!printed) {
printk("Lost clock tick on CPU %d (now=%lx, next=%lx)!!\n",
- cpu, ia64_get_itc(), itm.next[cpu]);
+ cpu, ia64_get_itc(), itm.next[cpu].count);
printed = 1;
- }
# ifdef CONFIG_IA64_DEBUG_IRQ
- printk("last_cli_ip=%lx\n", last_cli_ip);
+ printk("last_cli_ip=%lx\n", last_cli_ip);
# endif
+ }
}
#endif
}
write_unlock(&xtime_lock);
+
+ /*
+ * If we're too close to the next clock tick for comfort, we
+ * increase the saftey margin by intentionally dropping the
+ * next tick(s). We do NOT update itm.next accordingly
+ * because that would force us to call do_timer() which in
+ * turn would let our clock run too fast (with the potentially
+ * devastating effect of losing monotony of time).
+ */
+ while (!time_after(new_itm, ia64_get_itc() + itm.delta/2))
+ new_itm += itm.delta;
+ ia64_set_itm(new_itm);
}
-#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC
+#if defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) || defined(CONFIG_IA64_SOFTSDV_HACKS)
-void
+/*
+ * Interrupts must be disabled before calling this routine.
+ */
+void
ia64_reset_itm (void)
{
- unsigned long flags;
-
- local_irq_save(flags);
timer_interrupt(0, 0, ia64_task_regs(current));
- local_irq_restore(flags);
}
#endif /* CONFIG_ITANIUM_ASTEP_SPECIFIC */
@@ -220,11 +244,14 @@ ia64_reset_itm (void)
void __init
ia64_cpu_local_tick(void)
{
+#ifdef CONFIG_IA64_SOFTSDV_HACKS
+ ia64_set_itc(0);
+#endif
+
/* arrange for the cycle counter to generate a timer interrupt: */
ia64_set_itv(TIMER_IRQ, 0);
- ia64_set_itc(0);
- itm.next[smp_processor_id()] = ia64_get_itc() + itm.delta;
- ia64_set_itm(itm.next[smp_processor_id()]);
+ itm.next[smp_processor_id()].count = ia64_get_itc() + itm.delta;
+ ia64_set_itm(itm.next[smp_processor_id()].count);
}
void __init
@@ -254,25 +281,7 @@ ia64_init_itm (void)
itc_ratio.num = 3;
itc_ratio.den = 1;
}
-#if defined(CONFIG_IA64_LION_HACKS)
- /* Our Lion currently returns base freq 104.857MHz, which
- ain't right (it really is 100MHz). */
- printk("SAL/PAL returned: base-freq=%lu, itc-ratio=%lu/%lu, proc-ratio=%lu/%lu\n",
- platform_base_freq, itc_ratio.num, itc_ratio.den,
- proc_ratio.num, proc_ratio.den);
- platform_base_freq = 100000000;
-#elif 0 && defined(CONFIG_IA64_BIGSUR_HACKS)
- /* BigSur with 991020 firmware returned itc-ratio=9/2 and base
- freq 75MHz, which wasn't right. The 991119 firmware seems
- to return the right values, so this isn't necessary
- anymore... */
- printk("SAL/PAL returned: base-freq=%lu, itc-ratio=%lu/%lu, proc-ratio=%lu/%lu\n",
- platform_base_freq, itc_ratio.num, itc_ratio.den,
- proc_ratio.num, proc_ratio.den);
- platform_base_freq = 100000000;
- proc_ratio.num = 5; proc_ratio.den = 1;
- itc_ratio.num = 5; itc_ratio.den = 1;
-#elif defined(CONFIG_IA64_SOFTSDV_HACKS)
+#ifdef CONFIG_IA64_SOFTSDV_HACKS
platform_base_freq = 10000000;
proc_ratio.num = 4; proc_ratio.den = 1;
itc_ratio.num = 4; itc_ratio.den = 1;
@@ -290,8 +299,9 @@ ia64_init_itm (void)
itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den;
itm.delta = itc_freq / HZ;
- printk("timer: base freq=%lu.%03luMHz, ITC ratio=%lu/%lu, ITC freq=%lu.%03luMHz\n",
- platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000,
+ printk("timer: CPU %d base freq=%lu.%03luMHz, ITC ratio=%lu/%lu, ITC freq=%lu.%03luMHz\n",
+ smp_processor_id(),
+ platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000,
itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000);
my_cpu_data.proc_freq = (platform_base_freq*proc_ratio.num)/proc_ratio.den;
@@ -313,6 +323,8 @@ void __init
time_init (void)
{
/* we can't do request_irq() here because the kmalloc() would fail... */
+ irq_desc[TIMER_IRQ].status |= IRQ_PER_CPU;
+ irq_desc[TIMER_IRQ].handler = &irq_type_ia64_sapic;
setup_irq(TIMER_IRQ, &timer_irqaction);
efi_gettimeofday(&xtime);
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index 3a7706a27..4003b20f1 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -3,8 +3,12 @@
*
* Copyright (C) 1998-2000 Hewlett-Packard Co
* Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * 05/12/00 grao <goutham.rao@intel.com> : added isr in siginfo for SIGFPE
*/
+#define FPSWA_DEBUG 1
+
/*
* The fpu_fault() handler needs to be able to access and update all
* floating point registers. Those saved in pt_regs can be accessed
@@ -168,7 +172,7 @@ ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
siginfo.si_signo = sig;
siginfo.si_errno = 0;
siginfo.si_code = code;
- send_sig_info(sig, &siginfo, current);
+ force_sig_info(sig, &siginfo, current);
}
/*
@@ -300,6 +304,7 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
if (copy_from_user(bundle, (void *) fault_ip, sizeof(bundle)))
return -1;
+#ifdef FPSWA_DEBUG
if (fpu_swa_count > 5 && jiffies - last_time > 5*HZ)
fpu_swa_count = 0;
if (++fpu_swa_count < 5) {
@@ -307,7 +312,7 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
printk("%s(%d): floating-point assist fault at ip %016lx\n",
current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri);
}
-
+#endif
exception = fp_emulate(fp_fault, bundle, &regs->cr_ipsr, &regs->ar_fpsr, &isr, &regs->pr,
&regs->cr_ifs, regs);
if (fp_fault) {
@@ -331,7 +336,8 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
} else if (isr & 0x44) {
siginfo.si_code = FPE_FLTDIV;
}
- send_sig_info(SIGFPE, &siginfo, current);
+ siginfo.si_isr = isr;
+ force_sig_info(SIGFPE, &siginfo, current);
}
} else {
if (exception == -1) {
@@ -350,12 +356,49 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
} else if (isr & 0x2200) {
siginfo.si_code = FPE_FLTRES;
}
- send_sig_info(SIGFPE, &siginfo, current);
+ siginfo.si_isr = isr;
+ force_sig_info(SIGFPE, &siginfo, current);
}
}
return 0;
}
+struct illegal_op_return {
+ unsigned long fkt, arg1, arg2, arg3;
+};
+
+struct illegal_op_return
+ia64_illegal_op_fault (unsigned long ec, unsigned long arg1, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4, unsigned long arg5,
+ unsigned long arg6, unsigned long arg7, unsigned long stack)
+{
+ struct pt_regs *regs = (struct pt_regs *) &stack;
+ struct illegal_op_return rv;
+ struct siginfo si;
+ char buf[128];
+
+#ifdef CONFIG_IA64_BRL_EMU
+ {
+ extern struct illegal_op_return ia64_emulate_brl (struct pt_regs *, unsigned long);
+
+ rv = ia64_emulate_brl(regs, ec);
+ if (rv.fkt != (unsigned long) -1)
+ return rv;
+ }
+#endif
+
+ sprintf(buf, "IA-64 Illegal operation fault");
+ die_if_kernel(buf, regs, 0);
+
+ memset(&si, 0, sizeof(si));
+ si.si_signo = SIGILL;
+ si.si_code = ILL_ILLOPC;
+ si.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
+ force_sig_info(SIGILL, &si, current);
+ rv.fkt = 0;
+ return rv;
+}
+
void
ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
unsigned long iim, unsigned long itir, unsigned long arg5,
@@ -450,11 +493,6 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
force_sig_info(SIGTRAP, &siginfo, current);
return;
- case 30: /* Unaligned fault */
- sprintf(buf, "Kernel unaligned trap accessing %016lx (ip=%016lx)!",
- ifa, regs->cr_iip + ia64_psr(regs)->ri);
- break;
-
case 32: /* fp fault */
case 33: /* fp trap */
result = handle_fpu_swa((vector == 32) ? 1 : 0, regs, isr);
diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c
index 35e8cb846..a95b78f64 100644
--- a/arch/ia64/kernel/unaligned.c
+++ b/arch/ia64/kernel/unaligned.c
@@ -1,8 +1,8 @@
/*
* Architecture-specific unaligned trap handling.
*
- * Copyright (C) 1999 Hewlett-Packard Co
- * Copyright (C) 1999 Stephane Eranian <eranian@hpl.hp.com>
+ * Copyright (C) 1999-2000 Hewlett-Packard Co
+ * Copyright (C) 1999-2000 Stephane Eranian <eranian@hpl.hp.com>
*/
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -460,32 +460,15 @@ setfpreg(unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs)
* enabled.
*
* The registers [32-127] are ususally saved in the tss. When get here,
- * they are NECESSARY live because they are only saved explicitely.
+ * they are NECESSARILY live because they are only saved explicitely.
* We have 3 ways of updating the values: force a save of the range
* in tss, use a gigantic switch/case statement or generate code on the
* fly to store to the right register.
* For now, we are using the (slow) save/restore way.
*/
if (regnum >= IA64_FIRST_ROTATING_FR) {
- /*
- * force a save of [32-127] to tss
- * we use the __() form to avoid fiddling with the dfh bit
- */
- __ia64_save_fpu(&current->thread.fph[0]);
-
+ ia64_sync_fph(current);
current->thread.fph[IA64_FPH_OFFS(regnum)] = *fpval;
-
- __ia64_load_fpu(&current->thread.fph[0]);
-
- /*
- * mark the high partition as being used now
- *
- * This is REQUIRED because the disabled_fph_fault() does
- * not set it, it's relying on the faulting instruction to
- * do it. In our case the faulty instruction never gets executed
- * completely, so we need to toggle the bit.
- */
- regs->cr_ipsr |= IA64_PSR_MFH;
} else {
/*
* pt_regs or switch_stack ?
@@ -544,15 +527,8 @@ getfpreg(unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs)
* See discussion in setfpreg() for reasons and other ways of doing this.
*/
if (regnum >= IA64_FIRST_ROTATING_FR) {
-
- /*
- * force a save of [32-127] to tss
- * we use the__ia64_save_fpu() form to avoid fiddling with
- * the dfh bit.
- */
- __ia64_save_fpu(&current->thread.fph[0]);
-
- *fpval = current->thread.fph[IA64_FPH_OFFS(regnum)];
+ ia64_sync_fph(current);
+ *fpval = current->thread.fph[IA64_FPH_OFFS(regnum)];
} else {
/*
* f0 = 0.0, f1= 1.0. Those registers are constant and are thus
@@ -1410,6 +1386,25 @@ ia64_handle_unaligned(unsigned long ifa, struct pt_regs *regs)
die_if_kernel("Unaligned reference while in kernel\n", regs, 30);
/* NOT_REACHED */
}
+ /*
+ * For now, we don't support user processes running big-endian
+ * which do unaligned accesses
+ */
+ if (ia64_psr(regs)->be) {
+ struct siginfo si;
+
+ printk(KERN_ERR "%s(%d): big-endian unaligned access %016lx (ip=%016lx) not "
+ "yet supported\n",
+ current->comm, current->pid, ifa, regs->cr_iip + ipsr->ri);
+
+ si.si_signo = SIGBUS;
+ si.si_errno = 0;
+ si.si_code = BUS_ADRALN;
+ si.si_addr = (void *) ifa;
+ force_sig_info(SIGBUS, &si, current);
+ return;
+ }
+
if (current->thread.flags & IA64_THREAD_UAC_SIGBUS) {
struct siginfo si;
@@ -1417,7 +1412,7 @@ ia64_handle_unaligned(unsigned long ifa, struct pt_regs *regs)
si.si_errno = 0;
si.si_code = BUS_ADRALN;
si.si_addr = (void *) ifa;
- send_sig_info (SIGBUS, &si, current);
+ force_sig_info(SIGBUS, &si, current);
return;
}
diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c
index c2b772e68..7f3c203ad 100644
--- a/arch/ia64/kernel/unwind.c
+++ b/arch/ia64/kernel/unwind.c
@@ -1,16 +1,1796 @@
/*
- * Copyright (C) 1999 Hewlett-Packard Co
- * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999-2000 Hewlett-Packard Co
+ * Copyright (C) 1999-2000 David Mosberger-Tang <davidm@hpl.hp.com>
*/
+/*
+ * This file implements call frame unwind support for the Linux
+ * kernel. Parsing and processing the unwind information is
+ * time-consuming, so this implementation translates the the unwind
+ * descriptors into unwind scripts. These scripts are very simple
+ * (basically a sequence of assignments) and efficient to execute.
+ * They are cached for later re-use. Each script is specific for a
+ * given instruction pointer address and the set of predicate values
+ * that the script depends on (most unwind descriptors are
+ * unconditional and scripts often do not depend on predicates at
+ * all). This code is based on the unwind conventions described in
+ * the "IA-64 Software Conventions and Runtime Architecture" manual.
+ *
+ * SMP conventions:
+ * o updates to the global unwind data (in structure "unw") are serialized
+ * by the unw.lock spinlock
+ * o each unwind script has its own read-write lock; a thread must acquire
+ * a read lock before executing a script and must acquire a write lock
+ * before modifying a script
+ * o if both the unw.lock spinlock and a script's read-write lock must be
+ * acquired, then the read-write lock must be acquired first.
+ */
+#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/slab.h>
#include <asm/unwind.h>
+#ifdef CONFIG_IA64_NEW_UNWIND
+
+#include <asm/delay.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+#include <asm/ptrace_offsets.h>
+#include <asm/rse.h>
+#include <asm/system.h>
+
+#include "entry.h"
+#include "unwind_i.h"
+
+#define MIN(a,b) ((a) < (b) ? (a) : (b))
+#define p5 5
+
+/*
+ * The unwind tables are supposed to be sorted, but the GNU toolchain
+ * currently fails to produce a sorted table in the presence of
+ * functions that go into sections other than .text. For example, the
+ * kernel likes to put initialization code into .text.init, which
+ * messes up the sort order. Hopefully, this will get fixed sometime
+ * soon. --davidm 00/05/23
+ */
+#define UNWIND_TABLE_SORT_BUG
+
+#define UNW_LOG_CACHE_SIZE 7 /* each unw_script is ~256 bytes in size */
+#define UNW_CACHE_SIZE (1 << UNW_LOG_CACHE_SIZE)
+
+#define UNW_LOG_HASH_SIZE (UNW_LOG_CACHE_SIZE + 1)
+#define UNW_HASH_SIZE (1 << UNW_LOG_HASH_SIZE)
+
+#define UNW_DEBUG 1
+#define UNW_STATS 0 /* WARNING: this disabled interrupts for long time-spans!! */
+
+#if UNW_DEBUG
+# define dprintk(format...) printk(format)
+# define inline
+#else
+# define dprintk(format...)
+#endif
+
+#if UNW_STATS
+# define STAT(x...) x
+#else
+# define STAT(x...)
+#endif
+
+#define alloc_reg_state() kmalloc(sizeof(struct unw_state_record), GFP_ATOMIC)
+#define free_reg_state(usr) kfree(usr)
+
+typedef unsigned long unw_word;
+typedef unsigned char unw_hash_index_t;
+
+#define struct_offset(str,fld) ((char *)&((str *)NULL)->fld - (char *) 0)
+
+static struct {
+ spinlock_t lock; /* spinlock for unwind data */
+
+ /* list of unwind tables (one per load-module) */
+ struct unw_table *tables;
+
+ /* table of registers that prologues can save (and order in which they're saved): */
+ const unsigned char save_order[8];
+
+ /* maps a preserved register index (preg_index) to corresponding switch_stack offset: */
+ unsigned short sw_off[sizeof(struct unw_frame_info) / 8];
+
+ unsigned short lru_head; /* index of lead-recently used script */
+ unsigned short lru_tail; /* index of most-recently used script */
+
+ /* index into unw_frame_info for preserved register i */
+ unsigned short preg_index[UNW_NUM_REGS];
+
+ /* unwind table for the kernel: */
+ struct unw_table kernel_table;
+
+ /* hash table that maps instruction pointer to script index: */
+ unw_hash_index_t hash[UNW_HASH_SIZE];
+
+ /* script cache: */
+ struct unw_script cache[UNW_CACHE_SIZE];
+
+# if UNW_DEBUG
+ const char *preg_name[UNW_NUM_REGS];
+# endif
+# if UNW_STATS
+ struct {
+ struct {
+ int lookups;
+ int hinted_hits;
+ int normal_hits;
+ int collision_chain_traversals;
+ } cache;
+ struct {
+ unsigned long build_time;
+ unsigned long run_time;
+ unsigned long parse_time;
+ int builds;
+ int news;
+ int collisions;
+ int runs;
+ } script;
+ struct {
+ unsigned long init_time;
+ unsigned long unwind_time;
+ int inits;
+ int unwinds;
+ } api;
+ } stat;
+# endif
+} unw = {
+ tables: &unw.kernel_table,
+ lock: SPIN_LOCK_UNLOCKED,
+ save_order: {
+ UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR,
+ UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR
+ },
+ preg_index: {
+ struct_offset(struct unw_frame_info, pri_unat)/8, /* PRI_UNAT_GR */
+ struct_offset(struct unw_frame_info, pri_unat)/8, /* PRI_UNAT_MEM */
+ struct_offset(struct unw_frame_info, pbsp)/8,
+ struct_offset(struct unw_frame_info, bspstore)/8,
+ struct_offset(struct unw_frame_info, pfs)/8,
+ struct_offset(struct unw_frame_info, rnat)/8,
+ struct_offset(struct unw_frame_info, psp)/8,
+ struct_offset(struct unw_frame_info, rp)/8,
+ struct_offset(struct unw_frame_info, r4)/8,
+ struct_offset(struct unw_frame_info, r5)/8,
+ struct_offset(struct unw_frame_info, r6)/8,
+ struct_offset(struct unw_frame_info, r7)/8,
+ struct_offset(struct unw_frame_info, unat)/8,
+ struct_offset(struct unw_frame_info, pr)/8,
+ struct_offset(struct unw_frame_info, lc)/8,
+ struct_offset(struct unw_frame_info, fpsr)/8,
+ struct_offset(struct unw_frame_info, b1)/8,
+ struct_offset(struct unw_frame_info, b2)/8,
+ struct_offset(struct unw_frame_info, b3)/8,
+ struct_offset(struct unw_frame_info, b4)/8,
+ struct_offset(struct unw_frame_info, b5)/8,
+ struct_offset(struct unw_frame_info, f2)/8,
+ struct_offset(struct unw_frame_info, f3)/8,
+ struct_offset(struct unw_frame_info, f4)/8,
+ struct_offset(struct unw_frame_info, f5)/8,
+ struct_offset(struct unw_frame_info, fr[16 - 16])/8,
+ struct_offset(struct unw_frame_info, fr[17 - 16])/8,
+ struct_offset(struct unw_frame_info, fr[18 - 16])/8,
+ struct_offset(struct unw_frame_info, fr[19 - 16])/8,
+ struct_offset(struct unw_frame_info, fr[20 - 16])/8,
+ struct_offset(struct unw_frame_info, fr[21 - 16])/8,
+ struct_offset(struct unw_frame_info, fr[22 - 16])/8,
+ struct_offset(struct unw_frame_info, fr[23 - 16])/8,
+ struct_offset(struct unw_frame_info, fr[24 - 16])/8,
+ struct_offset(struct unw_frame_info, fr[25 - 16])/8,
+ struct_offset(struct unw_frame_info, fr[26 - 16])/8,
+ struct_offset(struct unw_frame_info, fr[27 - 16])/8,
+ struct_offset(struct unw_frame_info, fr[28 - 16])/8,
+ struct_offset(struct unw_frame_info, fr[29 - 16])/8,
+ struct_offset(struct unw_frame_info, fr[30 - 16])/8,
+ struct_offset(struct unw_frame_info, fr[31 - 16])/8,
+ },
+ hash : { [0 ... UNW_HASH_SIZE - 1] = -1 },
+#if UNW_DEBUG
+ preg_name: {
+ "pri_unat_gr", "pri_unat_mem", "bsp", "bspstore", "ar.pfs", "ar.rnat", "psp", "rp",
+ "r4", "r5", "r6", "r7",
+ "ar.unat", "pr", "ar.lc", "ar.fpsr",
+ "b1", "b2", "b3", "b4", "b5",
+ "f2", "f3", "f4", "f5",
+ "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
+ "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
+ }
+#endif
+};
+
+
+/* Unwind accessors. */
+
+int
+unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char *nat, int write)
+{
+ unsigned long *addr, *nat_addr, nat_mask = 0, dummy_nat;
+ struct unw_ireg *ireg;
+ struct pt_regs *pt;
+
+ if ((unsigned) regnum - 1 >= 127) {
+ dprintk("unwind: trying to access non-existent r%u\n", regnum);
+ return -1;
+ }
+
+ if (regnum < 32) {
+ if (regnum >= 4 && regnum <= 7) {
+ /* access a preserved register */
+ ireg = &info->r4 + (regnum - 4);
+ addr = ireg->loc;
+ if (addr) {
+ nat_addr = addr + ireg->nat.off;
+ switch (ireg->nat.type) {
+ case UNW_NAT_VAL:
+ /* simulate getf.sig/setf.sig */
+ if (write) {
+ if (*nat) {
+ /* write NaTVal and be done with it */
+ addr[0] = 0;
+ addr[1] = 0x1fffe;
+ return 0;
+ }
+ addr[1] = 0x1003e;
+ } else {
+ if (addr[0] == 0 && addr[1] == 0x1ffe) {
+ /* return NaT and be done with it */
+ *val = 0;
+ *nat = 1;
+ return 0;
+ }
+ }
+ /* fall through */
+ case UNW_NAT_NONE:
+ nat_addr = &dummy_nat;
+ break;
+
+ case UNW_NAT_SCRATCH:
+ if (info->pri_unat)
+ nat_addr = info->pri_unat;
+ else
+ nat_addr = &info->sw->caller_unat;
+ case UNW_NAT_PRI_UNAT:
+ nat_mask = (1UL << ((long) addr & 0x1f8)/8);
+ break;
+
+ case UNW_NAT_STACKED:
+ nat_addr = ia64_rse_rnat_addr(addr);
+ if ((unsigned long) addr < info->regstk.limit
+ || (unsigned long) addr >= info->regstk.top)
+ {
+ dprintk("unwind: 0x%p outside of regstk "
+ "[0x%lx-0x%lx)\n", addr,
+ info->regstk.limit, info->regstk.top);
+ return -1;
+ }
+ if ((unsigned long) nat_addr >= info->regstk.top)
+ nat_addr = &info->sw->ar_rnat;
+ nat_mask = (1UL << ia64_rse_slot_num(addr));
+ break;
+ }
+ } else {
+ addr = &info->sw->r4 + (regnum - 4);
+ nat_addr = &info->sw->ar_unat;
+ nat_mask = (1UL << ((long) addr & 0x1f8)/8);
+ }
+ } else {
+ /* access a scratch register */
+ if (info->flags & UNW_FLAG_INTERRUPT_FRAME)
+ pt = (struct pt_regs *) info->psp - 1;
+ else
+ pt = (struct pt_regs *) info->sp - 1;
+ if (regnum <= 3)
+ addr = &pt->r1 + (regnum - 1);
+ else if (regnum <= 11)
+ addr = &pt->r8 + (regnum - 8);
+ else if (regnum <= 15)
+ addr = &pt->r12 + (regnum - 12);
+ else
+ addr = &pt->r16 + (regnum - 16);
+ if (info->pri_unat)
+ nat_addr = info->pri_unat;
+ else
+ nat_addr = &info->sw->caller_unat;
+ nat_mask = (1UL << ((long) addr & 0x1f8)/8);
+ }
+ } else {
+ /* access a stacked register */
+ addr = ia64_rse_skip_regs((unsigned long *) info->bsp, regnum);
+ nat_addr = ia64_rse_rnat_addr(addr);
+ if ((unsigned long) addr < info->regstk.limit
+ || (unsigned long) addr >= info->regstk.top)
+ {
+ dprintk("unwind: ignoring attempt to access register outside of rbs\n");
+ return -1;
+ }
+ if ((unsigned long) nat_addr >= info->regstk.top)
+ nat_addr = &info->sw->ar_rnat;
+ nat_mask = (1UL << ia64_rse_slot_num(addr));
+ }
+
+ if (write) {
+ *addr = *val;
+ *nat_addr = (*nat_addr & ~nat_mask) | nat_mask;
+ } else {
+ *val = *addr;
+ *nat = (*nat_addr & nat_mask) != 0;
+ }
+ return 0;
+}
+
+int
+unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
+{
+ unsigned long *addr;
+ struct pt_regs *pt;
+
+ if (info->flags & UNW_FLAG_INTERRUPT_FRAME)
+ pt = (struct pt_regs *) info->psp - 1;
+ else
+ pt = (struct pt_regs *) info->sp - 1;
+ switch (regnum) {
+ /* scratch: */
+ case 0: addr = &pt->b0; break;
+ case 6: addr = &pt->b6; break;
+ case 7: addr = &pt->b7; break;
+
+ /* preserved: */
+ case 1: case 2: case 3: case 4: case 5:
+ addr = *(&info->b1 + (regnum - 1));
+ if (!addr)
+ addr = &info->sw->b1 + (regnum - 1);
+ break;
+
+ default:
+ dprintk("unwind: trying to access non-existent b%u\n", regnum);
+ return -1;
+ }
+ if (write)
+ *addr = *val;
+ else
+ *val = *addr;
+ return 0;
+}
+
+int
+unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write)
+{
+ struct ia64_fpreg *addr = 0;
+ struct pt_regs *pt;
+
+ if ((unsigned) (regnum - 2) >= 126) {
+ dprintk("unwind: trying to access non-existent f%u\n", regnum);
+ return -1;
+ }
+
+ if (info->flags & UNW_FLAG_INTERRUPT_FRAME)
+ pt = (struct pt_regs *) info->psp - 1;
+ else
+ pt = (struct pt_regs *) info->sp - 1;
+
+ if (regnum <= 5) {
+ addr = *(&info->f2 + (regnum - 2));
+ if (!addr)
+ addr = &info->sw->f2 + (regnum - 2);
+ } else if (regnum <= 15) {
+ if (regnum <= 9)
+ addr = &pt->f6 + (regnum - 6);
+ else
+ addr = &info->sw->f10 + (regnum - 10);
+ } else if (regnum <= 31) {
+ addr = info->fr[regnum - 16];
+ if (!addr)
+ addr = &info->sw->f16 + (regnum - 16);
+ } else {
+ struct task_struct *t = info->task;
+
+ ia64_sync_fph(t);
+ addr = t->thread.fph + (regnum - 32);
+ }
+
+ if (write)
+ *addr = *val;
+ else
+ *val = *addr;
+ return 0;
+}
+
+int
+unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
+{
+ unsigned long *addr;
+ struct pt_regs *pt;
+
+ if (info->flags & UNW_FLAG_INTERRUPT_FRAME)
+ pt = (struct pt_regs *) info->psp - 1;
+ else
+ pt = (struct pt_regs *) info->sp - 1;
+
+ switch (regnum) {
+ case UNW_AR_BSP:
+ addr = info->pbsp;
+ if (!addr)
+ addr = &info->sw->ar_bspstore;
+ break;
+
+ case UNW_AR_BSPSTORE:
+ addr = info->bspstore;
+ if (!addr)
+ addr = &info->sw->ar_bspstore;
+ break;
+
+ case UNW_AR_PFS:
+ addr = info->pfs;
+ if (!addr)
+ addr = &info->sw->ar_pfs;
+ break;
+
+ case UNW_AR_RNAT:
+ addr = info->rnat;
+ if (!addr)
+ addr = &info->sw->ar_rnat;
+ break;
+
+ case UNW_AR_UNAT:
+ addr = info->unat;
+ if (!addr)
+ addr = &info->sw->ar_unat;
+ break;
+
+ case UNW_AR_LC:
+ addr = info->lc;
+ if (!addr)
+ addr = &info->sw->ar_lc;
+ break;
+
+ case UNW_AR_EC:
+ if (!info->cfm)
+ return -1;
+ if (write)
+ *info->cfm = (*info->cfm & ~(0x3fUL << 52)) | ((*val & 0x3f) << 52);
+ else
+ *val = (*info->cfm >> 52) & 0x3f;
+ return 0;
+
+ case UNW_AR_FPSR:
+ addr = info->fpsr;
+ if (!addr)
+ addr = &info->sw->ar_fpsr;
+ break;
+
+ case UNW_AR_RSC:
+ addr = &pt->ar_rsc;
+ break;
+
+ case UNW_AR_CCV:
+ addr = &pt->ar_ccv;
+ break;
+
+ default:
+ dprintk("unwind: trying to access non-existent ar%u\n", regnum);
+ return -1;
+ }
+
+ if (write)
+ *addr = *val;
+ else
+ *val = *addr;
+ return 0;
+}
+
+inline int
+unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write)
+{
+ unsigned long *addr;
+
+ addr = info->pr;
+ if (!addr)
+ addr = &info->sw->pr;
+
+ if (write)
+ *addr = *val;
+ else
+ *val = *addr;
+ return 0;
+}
+
+
+/* Unwind decoder routines */
+
+static inline void
+push (struct unw_state_record *sr)
+{
+ struct unw_reg_state *rs;
+
+ rs = alloc_reg_state();
+ memcpy(rs, &sr->curr, sizeof(*rs));
+ rs->next = sr->stack;
+ sr->stack = rs;
+}
+
+static void
+pop (struct unw_state_record *sr)
+{
+ struct unw_reg_state *rs;
+
+ if (!sr->stack) {
+ printk ("unwind: stack underflow!\n");
+ return;
+ }
+ rs = sr->stack;
+ sr->stack = rs->next;
+ free_reg_state(rs);
+}
+
+static enum unw_register_index __attribute__((const))
+decode_abreg (unsigned char abreg, int memory)
+{
+ switch (abreg) {
+ case 0x04 ... 0x07: return UNW_REG_R4 + (abreg - 0x04);
+ case 0x22 ... 0x25: return UNW_REG_F2 + (abreg - 0x22);
+ case 0x30 ... 0x3f: return UNW_REG_F16 + (abreg - 0x30);
+ case 0x41 ... 0x45: return UNW_REG_B1 + (abreg - 0x41);
+ case 0x60: return UNW_REG_PR;
+ case 0x61: return UNW_REG_PSP;
+ case 0x62: return memory ? UNW_REG_PRI_UNAT_MEM : UNW_REG_PRI_UNAT_GR;
+ case 0x63: return UNW_REG_RP;
+ case 0x64: return UNW_REG_BSP;
+ case 0x65: return UNW_REG_BSPSTORE;
+ case 0x66: return UNW_REG_RNAT;
+ case 0x67: return UNW_REG_UNAT;
+ case 0x68: return UNW_REG_FPSR;
+ case 0x69: return UNW_REG_PFS;
+ case 0x6a: return UNW_REG_LC;
+ default:
+ break;
+ }
+ dprintk("unwind: bad abreg=0x%x\n", abreg);
+ return UNW_REG_LC;
+}
+
+static void
+set_reg (struct unw_reg_info *reg, enum unw_where where, int when, unsigned long val)
+{
+ reg->val = val;
+ reg->where = where;
+ if (reg->when == UNW_WHEN_NEVER)
+ reg->when = when;
+}
+
+static void
+alloc_spill_area (unsigned long *offp, unsigned long regsize,
+ struct unw_reg_info *lo, struct unw_reg_info *hi)
+{
+ struct unw_reg_info *reg;
+
+ for (reg = hi; reg >= lo; --reg) {
+ if (reg->where == UNW_WHERE_SPILL_HOME) {
+ reg->where = UNW_WHERE_PSPREL;
+ reg->val = 0x10 - *offp;
+ *offp += regsize;
+ }
+ }
+}
+
+static inline void
+spill_next_when (struct unw_reg_info **regp, struct unw_reg_info *lim, unw_word t)
+{
+ struct unw_reg_info *reg;
+
+ for (reg = *regp; reg <= lim; ++reg) {
+ if (reg->where == UNW_WHERE_SPILL_HOME) {
+ reg->when = t;
+ *regp = reg + 1;
+ return;
+ }
+ }
+ dprintk("unwind: excess spill!\n");
+}
+
+static inline void
+finish_prologue (struct unw_state_record *sr)
+{
+ struct unw_reg_info *reg;
+ unsigned long off;
+ int i;
+
+ /*
+ * First, resolve implicit register save locations
+ * (see Section "11.4.2.3 Rules for Using Unwind
+ * Descriptors", rule 3):
+ */
+ for (i = 0; i < (int) sizeof(unw.save_order)/sizeof(unw.save_order[0]); ++i) {
+ reg = sr->curr.reg + unw.save_order[i];
+ if (reg->where == UNW_WHERE_GR_SAVE) {
+ reg->where = UNW_WHERE_GR;
+ reg->val = sr->gr_save_loc++;
+ }
+ }
+
+ /*
+ * Next, compute when the fp, general, and branch registers get
+ * saved. This must come before alloc_spill_area() because
+ * we need to know which registers are spilled to their home
+ * locations.
+ */
+ if (sr->imask) {
+ unsigned char kind, mask = 0, *cp = sr->imask;
+ unsigned long t;
+ static const unsigned char limit[3] = {
+ UNW_REG_F31, UNW_REG_R7, UNW_REG_B5
+ };
+ struct unw_reg_info *(regs[3]);
+
+ regs[0] = sr->curr.reg + UNW_REG_F2;
+ regs[1] = sr->curr.reg + UNW_REG_R4;
+ regs[2] = sr->curr.reg + UNW_REG_B1;
+
+ for (t = 0; t < sr->region_len; ++t) {
+ if ((t & 3) == 0)
+ mask = *cp++;
+ kind = (mask >> 2*(3-(t & 3))) & 3;
+ if (kind > 0)
+ spill_next_when(&regs[kind - 1], sr->curr.reg + limit[kind - 1],
+ sr->region_start + t);
+ }
+ }
+ /*
+ * Next, lay out the memory stack spill area:
+ */
+ if (sr->any_spills) {
+ off = sr->spill_offset;
+ alloc_spill_area(&off, 16, sr->curr.reg + UNW_REG_F2, sr->curr.reg + UNW_REG_F31);
+ alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_B1, sr->curr.reg + UNW_REG_B5);
+ alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_R4, sr->curr.reg + UNW_REG_R7);
+ }
+}
+
+/*
+ * Region header descriptors.
+ */
+
+static void
+desc_prologue (int body, unw_word rlen, unsigned char mask, unsigned char grsave,
+ struct unw_state_record *sr)
+{
+ int i;
+
+ if (!(sr->in_body || sr->first_region))
+ finish_prologue(sr);
+ sr->first_region = 0;
+
+ /* check if we're done: */
+ if (body && sr->when_target < sr->region_start + sr->region_len) {
+ sr->done = 1;
+ return;
+ }
+
+ for (i = 0; i < sr->epilogue_count; ++i)
+ pop(sr);
+ sr->epilogue_count = 0;
+ sr->epilogue_start = UNW_WHEN_NEVER;
+
+ if (!body)
+ push(sr);
+
+ sr->region_start += sr->region_len;
+ sr->region_len = rlen;
+ sr->in_body = body;
+
+ if (!body) {
+ for (i = 0; i < 4; ++i) {
+ if (mask & 0x8)
+ set_reg(sr->curr.reg + unw.save_order[i], UNW_WHERE_GR,
+ sr->region_start + sr->region_len - 1, grsave++);
+ mask <<= 1;
+ }
+ sr->gr_save_loc = grsave;
+ sr->any_spills = 0;
+ sr->imask = 0;
+ sr->spill_offset = 0x10; /* default to psp+16 */
+ }
+}
+
+/*
+ * Prologue descriptors.
+ */
+
+static inline void
+desc_abi (unsigned char abi, unsigned char context, struct unw_state_record *sr)
+{
+ if (abi == 0 && context == 'i')
+ sr->flags |= UNW_FLAG_INTERRUPT_FRAME;
+ else
+ dprintk("unwind: ignoring unwabi(abi=0x%x,context=0x%x)\n", abi, context);
+}
+
+static inline void
+desc_br_gr (unsigned char brmask, unsigned char gr, struct unw_state_record *sr)
+{
+ int i;
+
+ for (i = 0; i < 5; ++i) {
+ if (brmask & 1)
+ set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_GR,
+ sr->region_start + sr->region_len - 1, gr++);
+ brmask >>= 1;
+ }
+}
+
+static inline void
+desc_br_mem (unsigned char brmask, struct unw_state_record *sr)
+{
+ int i;
+
+ for (i = 0; i < 5; ++i) {
+ if (brmask & 1) {
+ set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_SPILL_HOME,
+ sr->region_start + sr->region_len - 1, 0);
+ sr->any_spills = 1;
+ }
+ brmask >>= 1;
+ }
+}
+
+static inline void
+desc_frgr_mem (unsigned char grmask, unw_word frmask, struct unw_state_record *sr)
+{
+ int i;
+
+ for (i = 0; i < 4; ++i) {
+ if ((grmask & 1) != 0) {
+ set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
+ sr->region_start + sr->region_len - 1, 0);
+ sr->any_spills = 1;
+ }
+ grmask >>= 1;
+ }
+ for (i = 0; i < 20; ++i) {
+ if ((frmask & 1) != 0) {
+ set_reg(sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME,
+ sr->region_start + sr->region_len - 1, 0);
+ sr->any_spills = 1;
+ }
+ frmask >>= 1;
+ }
+}
+
+static inline void
+desc_fr_mem (unsigned char frmask, struct unw_state_record *sr)
+{
+ int i;
+
+ for (i = 0; i < 4; ++i) {
+ if ((frmask & 1) != 0) {
+ set_reg(sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME,
+ sr->region_start + sr->region_len - 1, 0);
+ sr->any_spills = 1;
+ }
+ frmask >>= 1;
+ }
+}
+
+static inline void
+desc_gr_gr (unsigned char grmask, unsigned char gr, struct unw_state_record *sr)
+{
+ int i;
+
+ for (i = 0; i < 4; ++i) {
+ if ((grmask & 1) != 0)
+ set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_GR,
+ sr->region_start + sr->region_len - 1, gr++);
+ grmask >>= 1;
+ }
+}
+
+static inline void
+desc_gr_mem (unsigned char grmask, struct unw_state_record *sr)
+{
+ int i;
+
+ for (i = 0; i < 4; ++i) {
+ if ((grmask & 1) != 0) {
+ set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
+ sr->region_start + sr->region_len - 1, 0);
+ sr->any_spills = 1;
+ }
+ grmask >>= 1;
+ }
+}
+
+static inline void
+desc_mem_stack_f (unw_word t, unw_word size, struct unw_state_record *sr)
+{
+ set_reg(sr->curr.reg + UNW_REG_PSP, UNW_WHERE_NONE,
+ sr->region_start + MIN((int)t, sr->region_len - 1), 16*size);
+}
+
+static inline void
+desc_mem_stack_v (unw_word t, struct unw_state_record *sr)
+{
+ sr->curr.reg[UNW_REG_PSP].when = sr->region_start + MIN((int)t, sr->region_len - 1);
+}
+
+static inline void
+desc_reg_gr (unsigned char reg, unsigned char dst, struct unw_state_record *sr)
+{
+ set_reg(sr->curr.reg + reg, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, dst);
+}
+
+static inline void
+desc_reg_psprel (unsigned char reg, unw_word pspoff, struct unw_state_record *sr)
+{
+ set_reg(sr->curr.reg + reg, UNW_WHERE_PSPREL, sr->region_start + sr->region_len - 1,
+ 0x10 - 4*pspoff);
+}
+
+static inline void
+desc_reg_sprel (unsigned char reg, unw_word spoff, struct unw_state_record *sr)
+{
+ set_reg(sr->curr.reg + reg, UNW_WHERE_SPREL, sr->region_start + sr->region_len - 1,
+ 4*spoff);
+}
+
+static inline void
+desc_rp_br (unsigned char dst, struct unw_state_record *sr)
+{
+ sr->return_link_reg = dst;
+}
+
+static inline void
+desc_reg_when (unsigned char regnum, unw_word t, struct unw_state_record *sr)
+{
+ struct unw_reg_info *reg = sr->curr.reg + regnum;
+
+ if (reg->where == UNW_WHERE_NONE)
+ reg->where = UNW_WHERE_GR_SAVE;
+ reg->when = sr->region_start + MIN((int)t, sr->region_len - 1);
+}
+
+static inline void
+desc_spill_base (unw_word pspoff, struct unw_state_record *sr)
+{
+ sr->spill_offset = 0x10 - 4*pspoff;
+}
+
+static inline unsigned char *
+desc_spill_mask (unsigned char *imaskp, struct unw_state_record *sr)
+{
+ sr->imask = imaskp;
+ return imaskp + (2*sr->region_len + 7)/8;
+}
+
+/*
+ * Body descriptors.
+ */
+static inline void
+desc_epilogue (unw_word t, unw_word ecount, struct unw_state_record *sr)
+{
+ sr->epilogue_start = sr->region_start + sr->region_len - 1 - t;
+ sr->epilogue_count = ecount + 1;
+}
+
+static inline void
+desc_copy_state (unw_word label, struct unw_state_record *sr)
+{
+ struct unw_reg_state *rs;
+
+ for (rs = sr->reg_state_list; rs; rs = rs->next) {
+ if (rs->label == label) {
+ memcpy (&sr->curr, rs, sizeof(sr->curr));
+ return;
+ }
+ }
+ printk("unwind: failed to find state labelled 0x%lx\n", label);
+}
+
+static inline void
+desc_label_state (unw_word label, struct unw_state_record *sr)
+{
+ struct unw_reg_state *rs;
+
+ rs = alloc_reg_state();
+ memcpy(rs, &sr->curr, sizeof(*rs));
+ rs->label = label;
+ rs->next = sr->reg_state_list;
+ sr->reg_state_list = rs;
+}
+
+/*
+ * General descriptors.
+ */
+
+static inline int
+desc_is_active (unsigned char qp, unw_word t, struct unw_state_record *sr)
+{
+ if (sr->when_target <= sr->region_start + MIN((int)t, sr->region_len - 1))
+ return 0;
+ if (qp > 0) {
+ if ((sr->pr_val & (1UL << qp)) == 0)
+ return 0;
+ sr->pr_mask |= (1UL << qp);
+ }
+ return 1;
+}
+
+static inline void
+desc_restore_p (unsigned char qp, unw_word t, unsigned char abreg, struct unw_state_record *sr)
+{
+ struct unw_reg_info *r;
+
+ if (!desc_is_active(qp, t, sr))
+ return;
+
+ r = sr->curr.reg + decode_abreg(abreg, 0);
+ r->where = UNW_WHERE_NONE;
+ r->when = sr->region_start + MIN((int)t, sr->region_len - 1);
+ r->val = 0;
+}
+
+static inline void
+desc_spill_reg_p (unsigned char qp, unw_word t, unsigned char abreg, unsigned char x,
+ unsigned char ytreg, struct unw_state_record *sr)
+{
+ enum unw_where where = UNW_WHERE_GR;
+ struct unw_reg_info *r;
+
+ if (!desc_is_active(qp, t, sr))
+ return;
+
+ if (x)
+ where = UNW_WHERE_BR;
+ else if (ytreg & 0x80)
+ where = UNW_WHERE_FR;
+
+ r = sr->curr.reg + decode_abreg(abreg, 0);
+ r->where = where;
+ r->when = sr->region_start + MIN((int)t, sr->region_len - 1);
+ r->val = (ytreg & 0x7f);
+}
+
+static inline void
+desc_spill_psprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff,
+ struct unw_state_record *sr)
+{
+ struct unw_reg_info *r;
+
+ if (!desc_is_active(qp, t, sr))
+ return;
+
+ r = sr->curr.reg + decode_abreg(abreg, 1);
+ r->where = UNW_WHERE_PSPREL;
+ r->when = sr->region_start + MIN((int)t, sr->region_len - 1);
+ r->val = 0x10 - 4*pspoff;
+}
+
+static inline void
+desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word spoff,
+ struct unw_state_record *sr)
+{
+ struct unw_reg_info *r;
+
+ if (!desc_is_active(qp, t, sr))
+ return;
+
+ r = sr->curr.reg + decode_abreg(abreg, 1);
+ r->where = UNW_WHERE_SPREL;
+ r->when = sr->region_start + MIN((int)t, sr->region_len - 1);
+ r->val = 4*spoff;
+}
+
+#define UNW_DEC_BAD_CODE(code) printk("unwind: unknown code 0x%02x\n", code);
+
+/*
+ * region headers:
+ */
+#define UNW_DEC_PROLOGUE_GR(fmt,r,m,gr,arg) desc_prologue(0,r,m,gr,arg)
+#define UNW_DEC_PROLOGUE(fmt,b,r,arg) desc_prologue(b,r,0,32,arg)
+/*
+ * prologue descriptors:
+ */
+#define UNW_DEC_ABI(fmt,a,c,arg) desc_abi(a,c,arg)
+#define UNW_DEC_BR_GR(fmt,b,g,arg) desc_br_gr(b,g,arg)
+#define UNW_DEC_BR_MEM(fmt,b,arg) desc_br_mem(b,arg)
+#define UNW_DEC_FRGR_MEM(fmt,g,f,arg) desc_frgr_mem(g,f,arg)
+#define UNW_DEC_FR_MEM(fmt,f,arg) desc_fr_mem(f,arg)
+#define UNW_DEC_GR_GR(fmt,m,g,arg) desc_gr_gr(m,g,arg)
+#define UNW_DEC_GR_MEM(fmt,m,arg) desc_gr_mem(m,arg)
+#define UNW_DEC_MEM_STACK_F(fmt,t,s,arg) desc_mem_stack_f(t,s,arg)
+#define UNW_DEC_MEM_STACK_V(fmt,t,arg) desc_mem_stack_v(t,arg)
+#define UNW_DEC_REG_GR(fmt,r,d,arg) desc_reg_gr(r,d,arg)
+#define UNW_DEC_REG_PSPREL(fmt,r,o,arg) desc_reg_psprel(r,o,arg)
+#define UNW_DEC_REG_SPREL(fmt,r,o,arg) desc_reg_sprel(r,o,arg)
+#define UNW_DEC_REG_WHEN(fmt,r,t,arg) desc_reg_when(r,t,arg)
+#define UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_GR,t,arg)
+#define UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_MEM,t,arg)
+#define UNW_DEC_PRIUNAT_GR(fmt,r,arg) desc_reg_gr(UNW_REG_PRI_UNAT_GR,r,arg)
+#define UNW_DEC_PRIUNAT_PSPREL(fmt,o,arg) desc_reg_psprel(UNW_REG_PRI_UNAT_MEM,o,arg)
+#define UNW_DEC_PRIUNAT_SPREL(fmt,o,arg) desc_reg_sprel(UNW_REG_PRI_UNAT_MEM,o,arg)
+#define UNW_DEC_RP_BR(fmt,d,arg) desc_rp_br(d,arg)
+#define UNW_DEC_SPILL_BASE(fmt,o,arg) desc_spill_base(o,arg)
+#define UNW_DEC_SPILL_MASK(fmt,m,arg) (m = desc_spill_mask(m,arg))
+/*
+ * body descriptors:
+ */
+#define UNW_DEC_EPILOGUE(fmt,t,c,arg) desc_epilogue(t,c,arg)
+#define UNW_DEC_COPY_STATE(fmt,l,arg) desc_copy_state(l,arg)
+#define UNW_DEC_LABEL_STATE(fmt,l,arg) desc_label_state(l,arg)
+/*
+ * general unwind descriptors:
+ */
+#define UNW_DEC_SPILL_REG_P(f,p,t,a,x,y,arg) desc_spill_reg_p(p,t,a,x,y,arg)
+#define UNW_DEC_SPILL_REG(f,t,a,x,y,arg) desc_spill_reg_p(0,t,a,x,y,arg)
+#define UNW_DEC_SPILL_PSPREL_P(f,p,t,a,o,arg) desc_spill_psprel_p(p,t,a,o,arg)
+#define UNW_DEC_SPILL_PSPREL(f,t,a,o,arg) desc_spill_psprel_p(0,t,a,o,arg)
+#define UNW_DEC_SPILL_SPREL_P(f,p,t,a,o,arg) desc_spill_sprel_p(p,t,a,o,arg)
+#define UNW_DEC_SPILL_SPREL(f,t,a,o,arg) desc_spill_sprel_p(0,t,a,o,arg)
+#define UNW_DEC_RESTORE_P(f,p,t,a,arg) desc_restore_p(p,t,a,arg)
+#define UNW_DEC_RESTORE(f,t,a,arg) desc_restore_p(0,t,a,arg)
+
+#include "unwind_decoder.c"
+
+
+/* Unwind scripts. */
+
+static inline unw_hash_index_t
+hash (unsigned long ip)
+{
+# define magic 0x9e3779b97f4a7c16 /* (sqrt(5)/2-1)*2^64 */
+
+ return (ip >> 4)*magic >> (64 - UNW_LOG_HASH_SIZE);
+}
+
+static inline long
+cache_match (struct unw_script *script, unsigned long ip, unsigned long pr_val)
+{
+ read_lock(&script->lock);
+ if ((ip) == (script)->ip && (((pr_val) ^ (script)->pr_val) & (script)->pr_mask) == 0)
+ /* keep the read lock... */
+ return 1;
+ read_unlock(&script->lock);
+ return 0;
+}
+
+static inline struct unw_script *
+script_lookup (struct unw_frame_info *info)
+{
+ struct unw_script *script = unw.cache + info->hint;
+ unsigned long ip, pr_val;
+
+ STAT(++unw.stat.cache.lookups);
+
+ ip = info->ip;
+ pr_val = info->pr_val;
+
+ if (cache_match(script, ip, pr_val)) {
+ STAT(++unw.stat.cache.hinted_hits);
+ return script;
+ }
+
+ script = unw.cache + unw.hash[hash(ip)];
+ while (1) {
+ if (cache_match(script, ip, pr_val)) {
+ /* update hint; no locking required as single-word writes are atomic */
+ STAT(++unw.stat.cache.normal_hits);
+ unw.cache[info->prev_script].hint = script - unw.cache;
+ return script;
+ }
+ if (script->coll_chain >= UNW_HASH_SIZE)
+ return 0;
+ script = unw.cache + script->coll_chain;
+ STAT(++unw.stat.cache.collision_chain_traversals);
+ }
+}
+
+/*
+ * On returning, a write lock for the SCRIPT is still being held.
+ */
+static inline struct unw_script *
+script_new (unsigned long ip)
+{
+ struct unw_script *script, *prev, *tmp;
+ unsigned long flags;
+ unsigned char index;
+ unsigned short head;
+
+ STAT(++unw.stat.script.news);
+
+ /*
+ * Can't (easily) use cmpxchg() here because of ABA problem
+ * that is intrinsic in cmpxchg()...
+ */
+ spin_lock_irqsave(&unw.lock, flags);
+ {
+ head = unw.lru_head;
+ script = unw.cache + head;
+ unw.lru_head = script->lru_chain;
+ }
+ spin_unlock(&unw.lock);
+
+ /*
+ * XXX We'll deadlock here if we interrupt a thread that is
+ * holding a read lock on script->lock. A try_write_lock()
+ * might be mighty handy here... Alternatively, we could
+ * disable interrupts whenever we hold a read-lock, but that
+ * seems silly.
+ */
+ write_lock(&script->lock);
+
+ spin_lock(&unw.lock);
+ {
+ /* re-insert script at the tail of the LRU chain: */
+ unw.cache[unw.lru_tail].lru_chain = head;
+ unw.lru_tail = head;
+
+ /* remove the old script from the hash table (if it's there): */
+ index = hash(script->ip);
+ tmp = unw.cache + unw.hash[index];
+ prev = 0;
+ while (1) {
+ if (tmp == script) {
+ if (prev)
+ prev->coll_chain = tmp->coll_chain;
+ else
+ unw.hash[index] = tmp->coll_chain;
+ break;
+ } else
+ prev = tmp;
+ if (tmp->coll_chain >= UNW_CACHE_SIZE)
+ /* old script wasn't in the hash-table */
+ break;
+ tmp = unw.cache + tmp->coll_chain;
+ }
+
+ /* enter new script in the hash table */
+ index = hash(ip);
+ script->coll_chain = unw.hash[index];
+ unw.hash[index] = script - unw.cache;
+
+ script->ip = ip; /* set new IP while we're holding the locks */
+
+ STAT(if (script->coll_chain < UNW_CACHE_SIZE) ++unw.stat.script.collisions);
+ }
+ spin_unlock_irqrestore(&unw.lock, flags);
+
+ script->flags = 0;
+ script->hint = 0;
+ script->count = 0;
+ return script;
+}
+
+static void
+script_finalize (struct unw_script *script, struct unw_state_record *sr)
+{
+ script->pr_mask = sr->pr_mask;
+ script->pr_val = sr->pr_val;
+ /*
+ * We could down-grade our write-lock on script->lock here but
+ * the rwlock API doesn't offer atomic lock downgrading, so
+ * we'll just keep the write-lock and release it later when
+ * we're done using the script.
+ */
+}
+
+static inline void
+script_emit (struct unw_script *script, struct unw_insn insn)
+{
+ if (script->count >= UNW_MAX_SCRIPT_LEN) {
+ dprintk("unwind: script exceeds maximum size of %u instructions!\n",
+ UNW_MAX_SCRIPT_LEN);
+ return;
+ }
+ script->insn[script->count++] = insn;
+}
+
+static inline void
+emit_nat_info (struct unw_state_record *sr, int i, struct unw_script *script)
+{
+ struct unw_reg_info *r = sr->curr.reg + i;
+ enum unw_insn_opcode opc;
+ struct unw_insn insn;
+ unsigned long val;
+
+ switch (r->where) {
+ case UNW_WHERE_GR:
+ if (r->val >= 32) {
+ /* register got spilled to a stacked register */
+ opc = UNW_INSN_SETNAT_TYPE;
+ val = UNW_NAT_STACKED;
+ } else {
+ /* register got spilled to a scratch register */
+ opc = UNW_INSN_SETNAT_TYPE;
+ val = UNW_NAT_SCRATCH;
+ }
+ break;
+
+ case UNW_WHERE_FR:
+ opc = UNW_INSN_SETNAT_TYPE;
+ val = UNW_NAT_VAL;
+ break;
+
+ case UNW_WHERE_BR:
+ opc = UNW_INSN_SETNAT_TYPE;
+ val = UNW_NAT_NONE;
+ break;
+
+ case UNW_WHERE_PSPREL:
+ case UNW_WHERE_SPREL:
+ opc = UNW_INSN_SETNAT_PRI_UNAT;
+ val = 0;
+ break;
+
+ default:
+ dprintk("unwind: don't know how to emit nat info for where = %u\n", r->where);
+ return;
+ }
+ insn.opc = opc;
+ insn.dst = unw.preg_index[i];
+ insn.val = val;
+ script_emit(script, insn);
+}
+
+static void
+compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
+{
+ struct unw_reg_info *r = sr->curr.reg + i;
+ enum unw_insn_opcode opc;
+ unsigned long val, rval;
+ struct unw_insn insn;
+ long need_nat_info;
+
+ if (r->where == UNW_WHERE_NONE || r->when >= sr->when_target)
+ return;
+
+ opc = UNW_INSN_MOVE;
+ val = rval = r->val;
+ need_nat_info = (i >= UNW_REG_R4 && i <= UNW_REG_R7);
+
+ switch (r->where) {
+ case UNW_WHERE_GR:
+ if (rval >= 32) {
+ opc = UNW_INSN_MOVE_STACKED;
+ val = rval - 32;
+ } else if (rval >= 4 && rval <= 7) {
+ if (need_nat_info) {
+ opc = UNW_INSN_MOVE2;
+ need_nat_info = 0;
+ }
+ val = unw.preg_index[UNW_REG_R4 + (rval - 4)];
+ } else {
+ opc = UNW_INSN_LOAD_SPREL;
+ val = -sizeof(struct pt_regs);
+ if (rval >= 1 && rval <= 3)
+ val += struct_offset(struct pt_regs, r1) + 8*(rval - 1);
+ else if (rval <= 11)
+ val += struct_offset(struct pt_regs, r8) + 8*(rval - 8);
+ else if (rval <= 15)
+ val += struct_offset(struct pt_regs, r12) + 8*(rval - 12);
+ else if (rval <= 31)
+ val += struct_offset(struct pt_regs, r16) + 8*(rval - 16);
+ else
+ dprintk("unwind: bad scratch reg r%lu\n", rval);
+ }
+ break;
+
+ case UNW_WHERE_FR:
+ if (rval <= 5)
+ val = unw.preg_index[UNW_REG_F2 + (rval - 1)];
+ else if (rval >= 16 && rval <= 31)
+ val = unw.preg_index[UNW_REG_F16 + (rval - 16)];
+ else {
+ opc = UNW_INSN_LOAD_SPREL;
+ val = -sizeof(struct pt_regs);
+ if (rval <= 9)
+ val += struct_offset(struct pt_regs, f6) + 16*(rval - 6);
+ else
+ dprintk("unwind: kernel may not touch f%lu\n", rval);
+ }
+ break;
+
+ case UNW_WHERE_BR:
+ if (rval >= 1 && rval <= 5)
+ val = unw.preg_index[UNW_REG_B1 + (rval - 1)];
+ else {
+ opc = UNW_INSN_LOAD_SPREL;
+ val = -sizeof(struct pt_regs);
+ if (rval == 0)
+ val += struct_offset(struct pt_regs, b0);
+ else if (rval == 6)
+ val += struct_offset(struct pt_regs, b6);
+ else
+ val += struct_offset(struct pt_regs, b7);
+ }
+ break;
+
+ case UNW_WHERE_SPREL:
+ opc = UNW_INSN_LOAD_SPREL;
+ break;
+
+ case UNW_WHERE_PSPREL:
+ opc = UNW_INSN_LOAD_PSPREL;
+ break;
+
+ default:
+ dprintk("unwind: register %u has unexpected `where' value of %u\n", i, r->where);
+ break;
+ }
+ insn.opc = opc;
+ insn.dst = unw.preg_index[i];
+ insn.val = val;
+ script_emit(script, insn);
+ if (need_nat_info)
+ emit_nat_info(sr, i, script);
+}
+
+static inline struct unw_table_entry *
+lookup (struct unw_table *table, unsigned long rel_ip)
+{
+ struct unw_table_entry *e = 0;
+ unsigned long lo, hi, mid;
+
+ /* do a binary search for right entry: */
+ for (lo = 0, hi = table->length; lo < hi; ) {
+ mid = (lo + hi) / 2;
+ e = &table->array[mid];
+ if (rel_ip < e->start_offset)
+ hi = mid;
+ else if (rel_ip >= e->end_offset)
+ lo = mid + 1;
+ else
+ break;
+ }
+ return e;
+}
+
+/*
+ * Build an unwind script that unwinds from state OLD_STATE to the
+ * entrypoint of the function that called OLD_STATE.
+ */
+static inline struct unw_script *
+build_script (struct unw_frame_info *info)
+{
+ struct unw_reg_state *rs, *next;
+ struct unw_table_entry *e = 0;
+ struct unw_script *script = 0;
+ unsigned long ip = info->ip;
+ struct unw_state_record sr;
+ struct unw_table *table;
+ struct unw_reg_info *r;
+ struct unw_insn insn;
+ u8 *dp, *desc_end;
+ u64 hdr;
+ int i;
+ STAT(unsigned long start, parse_start;)
+
+ STAT(++unw.stat.script.builds; start = ia64_get_itc());
+
+ /* build state record */
+ memset(&sr, 0, sizeof(sr));
+ for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
+ r->when = UNW_WHEN_NEVER;
+ sr.pr_val = info->pr_val;
+
+ script = script_new(ip);
+ if (!script) {
+ dprintk("unwind: failed to create unwind script\n");
+ STAT(unw.stat.script.build_time += ia64_get_itc() - start);
+ return 0;
+ }
+ unw.cache[info->prev_script].hint = script - unw.cache;
+
+ /* search the kernels and the modules' unwind tables for IP: */
+
+ STAT(parse_start = ia64_get_itc());
+
+ for (table = unw.tables; table; table = table->next) {
+ if (ip >= table->start && ip < table->end) {
+ e = lookup(table, ip - table->segment_base);
+ break;
+ }
+ }
+ if (!e) {
+ /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */
+ dprintk("unwind: no unwind info for ip=0x%lx (prev ip=0x%lx)\n", ip,
+ unw.cache[info->prev_script].ip);
+ sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
+ sr.curr.reg[UNW_REG_RP].when = -1;
+ sr.curr.reg[UNW_REG_RP].val = 0;
+ compile_reg(&sr, UNW_REG_RP, script);
+ script_finalize(script, &sr);
+ STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
+ STAT(unw.stat.script.build_time += ia64_get_itc() - start);
+ return script;
+ }
+
+ sr.when_target = (3*((ip & ~0xfUL) - (table->segment_base + e->start_offset))/16
+ + (ip & 0xfUL));
+ hdr = *(u64 *) (table->segment_base + e->info_offset);
+ dp = (u8 *) (table->segment_base + e->info_offset + 8);
+ desc_end = dp + 8*UNW_LENGTH(hdr);
+
+ while (!sr.done && dp < desc_end)
+ dp = unw_decode(dp, sr.in_body, &sr);
+
+ if (sr.when_target > sr.epilogue_start) {
+ /*
+ * sp has been restored and all values on the memory stack below
+ * psp also have been restored.
+ */
+ sr.curr.reg[UNW_REG_PSP].where = UNW_WHERE_NONE;
+ sr.curr.reg[UNW_REG_PSP].val = 0;
+ for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
+ if ((r->where == UNW_WHERE_PSPREL && r->val <= 0x10)
+ || r->where == UNW_WHERE_SPREL)
+ r->where = UNW_WHERE_NONE;
+ }
+
+ script->flags = sr.flags;
+
+ /*
+ * If RP did't get saved, generate entry for the return link
+ * register.
+ */
+ if (sr.curr.reg[UNW_REG_RP].when >= sr.when_target) {
+ sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
+ sr.curr.reg[UNW_REG_RP].when = -1;
+ sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg;
+ }
+
+#if UNW_DEBUG
+ printk ("unwind: state record for func 0x%lx, t=%u:\n",
+ table->segment_base + e->start_offset, sr.when_target);
+ for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) {
+ if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) {
+ printk(" %s <- ", unw.preg_name[r - sr.curr.reg]);
+ switch (r->where) {
+ case UNW_WHERE_GR: printk("r%lu", r->val); break;
+ case UNW_WHERE_FR: printk("f%lu", r->val); break;
+ case UNW_WHERE_BR: printk("b%lu", r->val); break;
+ case UNW_WHERE_SPREL: printk("[sp+0x%lx]", r->val); break;
+ case UNW_WHERE_PSPREL: printk("[psp+0x%lx]", r->val); break;
+ case UNW_WHERE_NONE:
+ printk("%s+0x%lx", unw.preg_name[r - sr.curr.reg], r->val);
+ break;
+ default: printk("BADWHERE(%d)", r->where); break;
+ }
+ printk ("\t\t%d\n", r->when);
+ }
+ }
+#endif
+
+ STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
+
+ /* translate state record into unwinder instructions: */
+
+ if (sr.curr.reg[UNW_REG_PSP].where == UNW_WHERE_NONE
+ && sr.when_target > sr.curr.reg[UNW_REG_PSP].when && sr.curr.reg[UNW_REG_PSP].val != 0)
+ {
+ /* new psp is sp plus frame size */
+ insn.opc = UNW_INSN_ADD;
+ insn.dst = unw.preg_index[UNW_REG_PSP];
+ insn.val = sr.curr.reg[UNW_REG_PSP].val;
+ script_emit(script, insn);
+ }
+
+ /* determine where the primary UNaT is: */
+ if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
+ i = UNW_REG_PRI_UNAT_MEM;
+ else if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when)
+ i = UNW_REG_PRI_UNAT_GR;
+ else if (sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when > sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
+ i = UNW_REG_PRI_UNAT_MEM;
+ else
+ i = UNW_REG_PRI_UNAT_GR;
+
+ compile_reg(&sr, i, script);
+
+ for (i = UNW_REG_BSP; i < UNW_NUM_REGS; ++i)
+ compile_reg(&sr, i, script);
+
+ /* free labelled register states & stack: */
+
+ STAT(parse_start = ia64_get_itc());
+ for (rs = sr.reg_state_list; rs; rs = next) {
+ next = rs->next;
+ free_reg_state(rs);
+ }
+ while (sr.stack)
+ pop(&sr);
+ STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
+
+ script_finalize(script, &sr);
+ STAT(unw.stat.script.build_time += ia64_get_itc() - start);
+ return script;
+}
+
+/*
+ * Apply the unwinding actions represented by OPS and update SR to
+ * reflect the state that existed upon entry to the function that this
+ * unwinder represents.
+ */
+static inline void
+run_script (struct unw_script *script, struct unw_frame_info *state)
+{
+ struct unw_insn *ip, *limit, next_insn;
+ unsigned long opc, dst, val, off;
+ unsigned long *s = (unsigned long *) state;
+ STAT(unsigned long start;)
+
+ STAT(++unw.stat.script.runs; start = ia64_get_itc());
+ state->flags = script->flags;
+ ip = script->insn;
+ limit = script->insn + script->count;
+ next_insn = *ip;
+
+ while (ip++ < limit) {
+ opc = next_insn.opc;
+ dst = next_insn.dst;
+ val = next_insn.val;
+ next_insn = *ip;
+
+ redo:
+ switch (opc) {
+ case UNW_INSN_ADD:
+ s[dst] += val;
+ break;
+
+ case UNW_INSN_MOVE2:
+ if (!s[val])
+ goto lazy_init;
+ s[dst+1] = s[val+1];
+ s[dst] = s[val];
+ break;
+
+ case UNW_INSN_MOVE:
+ if (!s[val])
+ goto lazy_init;
+ s[dst] = s[val];
+ break;
+
+ case UNW_INSN_MOVE_STACKED:
+ s[dst] = (unsigned long) ia64_rse_skip_regs((unsigned long *)state->bsp,
+ val);
+ break;
+
+ case UNW_INSN_LOAD_PSPREL:
+ s[dst] = state->psp + val;
+ break;
+
+ case UNW_INSN_LOAD_SPREL:
+ s[dst] = state->sp + val;
+ break;
+
+ case UNW_INSN_SETNAT_PRI_UNAT:
+ if (!state->pri_unat)
+ state->pri_unat = &state->sw->caller_unat;
+ s[dst+1] = ((*state->pri_unat - s[dst]) << 32) | UNW_NAT_PRI_UNAT;
+ break;
+
+ case UNW_INSN_SETNAT_TYPE:
+ s[dst+1] = val;
+ break;
+ }
+ }
+ STAT(unw.stat.script.run_time += ia64_get_itc() - start);
+ return;
+
+ lazy_init:
+ off = unw.sw_off[val];
+ s[val] = (unsigned long) state->sw + off;
+ if (off >= struct_offset (struct unw_frame_info, r4)
+ && off <= struct_offset (struct unw_frame_info, r7))
+ /*
+ * We're initializing a general register: init NaT info, too. Note that we
+ * rely on the fact that call_unat is the first field in struct switch_stack:
+ */
+ s[val+1] = (-off << 32) | UNW_NAT_PRI_UNAT;
+ goto redo;
+}
+
+static int
+find_save_locs (struct unw_frame_info *info)
+{
+ int have_write_lock = 0;
+ struct unw_script *scr;
+
+ if ((info->ip & (my_cpu_data.unimpl_va_mask | 0xf))
+ || REGION_NUMBER(info->ip) != REGION_KERNEL)
+ {
+ /* don't let obviously bad addresses pollute the cache */
+ dprintk("unwind: rejecting bad ip=0x%lx\n", info->ip);
+ info->rp = 0;
+ return -1;
+ }
+
+ scr = script_lookup(info);
+ if (!scr) {
+ scr = build_script(info);
+ if (!scr) {
+ dprintk("unwind: failed to locate/build unwind script for ip %lx\n",
+ info->ip);
+ return -1;
+ }
+ have_write_lock = 1;
+ }
+ info->hint = scr->hint;
+ info->prev_script = scr - unw.cache;
+
+ run_script(scr, info);
+
+ if (have_write_lock)
+ write_unlock(&scr->lock);
+ else
+ read_unlock(&scr->lock);
+ return 0;
+}
+
+int
+unw_unwind (struct unw_frame_info *info)
+{
+ unsigned long prev_ip, prev_sp, prev_bsp;
+ unsigned long ip, pr, num_regs;
+ STAT(unsigned long start, flags;)
+ int retval;
+
+ STAT(local_irq_save(flags); ++unw.stat.api.unwinds; start = ia64_get_itc());
+
+ prev_ip = info->ip;
+ prev_sp = info->sp;
+ prev_bsp = info->bsp;
+
+ /* restore the ip */
+ if (!info->rp) {
+ dprintk("unwind: failed to locate return link (ip=0x%lx)!\n", info->ip);
+ STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
+ return -1;
+ }
+ ip = info->ip = *info->rp;
+ if (ip < GATE_ADDR + PAGE_SIZE) {
+ /*
+ * We don't have unwind info for the gate page, so we consider that part
+ * of user-space for the purpose of unwinding.
+ */
+ dprintk("unwind: reached user-space (ip=0x%lx)\n", ip);
+ STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
+ return -1;
+ }
+
+ /* restore the cfm: */
+ if (!info->pfs) {
+ dprintk("unwind: failed to locate ar.pfs!\n");
+ STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
+ return -1;
+ }
+ info->cfm = info->pfs;
+
+ /* restore the bsp: */
+ pr = info->pr_val;
+ num_regs = 0;
+ if ((info->flags & UNW_FLAG_INTERRUPT_FRAME)) {
+ if ((pr & (1UL << pNonSys)) != 0)
+ num_regs = *info->cfm & 0x7f; /* size of frame */
+ info->pfs =
+ (unsigned long *) (info->sp + 16 + struct_offset(struct pt_regs, ar_pfs));
+ } else
+ num_regs = (*info->cfm >> 7) & 0x7f; /* size of locals */
+ info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs);
+ if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) {
+ dprintk("unwind: bsp (0x%lx) out of range [0x%lx-0x%lx]\n",
+ info->bsp, info->regstk.limit, info->regstk.top);
+ STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
+ return -1;
+ }
+
+ /* restore the sp: */
+ info->sp = info->psp;
+ if (info->sp < info->memstk.top || info->sp > info->memstk.limit) {
+ dprintk("unwind: sp (0x%lx) out of range [0x%lx-0x%lx]\n",
+ info->sp, info->regstk.top, info->regstk.limit);
+ STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
+ return -1;
+ }
+
+ if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) {
+ dprintk("unwind: ip, sp, bsp remain unchanged; stopping here (ip=0x%lx)\n", ip);
+ STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
+ return -1;
+ }
+
+ /* finally, restore the predicates: */
+ unw_get_pr(info, &info->pr_val);
+
+ retval = find_save_locs(info);
+ STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
+ return retval;
+}
+
+int
+unw_unwind_to_user (struct unw_frame_info *info)
+{
+ unsigned long ip;
+
+ while (unw_unwind(info) >= 0) {
+ if (unw_get_rp(info, &ip) < 0) {
+ unw_get_ip(info, &ip);
+ dprintk("unwind: failed to read return pointer (ip=0x%lx)\n", ip);
+ return -1;
+ }
+ /*
+ * We don't have unwind info for the gate page, so we consider that part
+ * of user-space for the purpose of unwinding.
+ */
+ if (ip < GATE_ADDR + PAGE_SIZE)
+ return 0;
+ }
+ unw_get_ip(info, &ip);
+ dprintk("unwind: failed to unwind to user-level (ip=0x%lx)\n", ip);
+ return -1;
+}
+
+void
+unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw)
+{
+ unsigned long rbslimit, rbstop, stklimit, stktop, sol;
+ STAT(unsigned long start, flags;)
+
+ STAT(local_irq_save(flags); ++unw.stat.api.inits; start = ia64_get_itc());
+
+ /*
+ * Subtle stuff here: we _could_ unwind through the
+ * switch_stack frame but we don't want to do that because it
+ * would be slow as each preserved register would have to be
+ * processed. Instead, what we do here is zero out the frame
+ * info and start the unwind process at the function that
+ * created the switch_stack frame. When a preserved value in
+ * switch_stack needs to be accessed, run_script() will
+ * initialize the appropriate pointer on demand.
+ */
+ memset(info, 0, sizeof(*info));
+
+ rbslimit = (unsigned long) t + IA64_RBS_OFFSET;
+ rbstop = sw->ar_bspstore;
+ if (rbstop - (unsigned long) t >= IA64_STK_OFFSET)
+ rbstop = rbslimit;
+
+ stklimit = (unsigned long) t + IA64_STK_OFFSET;
+ stktop = (unsigned long) sw - 16;
+ if (stktop <= rbstop)
+ stktop = rbstop;
+
+ info->regstk.limit = rbslimit;
+ info->regstk.top = rbstop;
+ info->memstk.limit = stklimit;
+ info->memstk.top = stktop;
+ info->task = t;
+ info->sw = sw;
+ info->sp = info->psp = (unsigned long) (sw + 1) - 16;
+ info->cfm = &sw->ar_pfs;
+ sol = (*info->cfm >> 7) & 0x7f;
+ info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sol);
+ info->ip = sw->b0;
+ info->pr_val = sw->pr;
+
+ find_save_locs(info);
+ STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags));
+}
+
+#endif /* CONFIG_IA64_NEW_UNWIND */
+
void
-ia64_unwind_init_from_blocked_task (struct ia64_frame_info *info, struct task_struct *t)
+unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t)
{
struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16);
+
+#ifdef CONFIG_IA64_NEW_UNWIND
+ unw_init_frame_info(info, t, sw);
+#else
unsigned long sol, limit, top;
memset(info, 0, sizeof(*info));
@@ -22,17 +1802,25 @@ ia64_unwind_init_from_blocked_task (struct ia64_frame_info *info, struct task_st
if (top - (unsigned long) t >= IA64_STK_OFFSET)
top = limit;
- info->regstk.limit = (unsigned long *) limit;
- info->regstk.top = (unsigned long *) top;
- info->bsp = ia64_rse_skip_regs(info->regstk.top, -sol);
- info->top_rnat = sw->ar_rnat;
- info->cfm = sw->ar_pfs;
- info->ip = sw->b0;
+ info->regstk.limit = limit;
+ info->regstk.top = top;
+ info->sw = sw;
+ info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sol);
+ info->cfm = &sw->ar_pfs;
+ info->ip = sw->b0;
+#endif
}
void
-ia64_unwind_init_from_current (struct ia64_frame_info *info, struct pt_regs *regs)
+unw_init_from_current (struct unw_frame_info *info, struct pt_regs *regs)
{
+#ifdef CONFIG_IA64_NEW_UNWIND
+ struct switch_stack *sw = (struct switch_stack *) regs - 1;
+
+ unw_init_frame_info(info, current, sw);
+ /* skip over interrupt frame: */
+ unw_unwind(info);
+#else
struct switch_stack *sw = (struct switch_stack *) regs - 1;
unsigned long sol, sof, *bsp, limit, top;
@@ -44,34 +1832,40 @@ ia64_unwind_init_from_current (struct ia64_frame_info *info, struct pt_regs *reg
memset(info, 0, sizeof(*info));
sol = (sw->ar_pfs >> 7) & 0x7f; /* size of frame */
- info->regstk.limit = (unsigned long *) limit;
- info->regstk.top = (unsigned long *) top;
- info->top_rnat = sw->ar_rnat;
/* this gives us the bsp top level frame (kdb interrupt frame): */
bsp = ia64_rse_skip_regs((unsigned long *) top, -sol);
/* now skip past the interrupt frame: */
sof = regs->cr_ifs & 0x7f; /* size of frame */
- info->cfm = regs->cr_ifs;
- info->bsp = ia64_rse_skip_regs(bsp, -sof);
+
+ info->regstk.limit = limit;
+ info->regstk.top = top;
+ info->sw = sw;
+ info->bsp = (unsigned long) ia64_rse_skip_regs(bsp, -sof);
+ info->cfm = &regs->cr_ifs;
info->ip = regs->cr_iip;
+#endif
}
+#ifndef CONFIG_IA64_NEW_UNWIND
+
static unsigned long
-read_reg (struct ia64_frame_info *info, int regnum, int *is_nat)
+read_reg (struct unw_frame_info *info, int regnum, int *is_nat)
{
unsigned long *addr, *rnat_addr, rnat;
- addr = ia64_rse_skip_regs(info->bsp, regnum);
- if (addr < info->regstk.limit || addr >= info->regstk.top || ((long) addr & 0x7) != 0) {
+ addr = ia64_rse_skip_regs((unsigned long *) info->bsp, regnum);
+ if ((unsigned long) addr < info->regstk.limit
+ || (unsigned long) addr >= info->regstk.top || ((long) addr & 0x7) != 0)
+ {
*is_nat = 1;
return 0xdeadbeefdeadbeef;
}
rnat_addr = ia64_rse_rnat_addr(addr);
- if (rnat_addr >= info->regstk.top)
- rnat = info->top_rnat;
+ if ((unsigned long) rnat_addr >= info->regstk.top)
+ rnat = info->sw->ar_rnat;
else
rnat = *rnat_addr;
*is_nat = (rnat & (1UL << ia64_rse_slot_num(addr))) != 0;
@@ -83,9 +1877,9 @@ read_reg (struct ia64_frame_info *info, int regnum, int *is_nat)
* store for r32.
*/
int
-ia64_unwind_to_previous_frame (struct ia64_frame_info *info)
+unw_unwind (struct unw_frame_info *info)
{
- unsigned long sol, cfm = info->cfm;
+ unsigned long sol, cfm = *info->cfm;
int is_nat;
sol = (cfm >> 7) & 0x7f; /* size of locals */
@@ -103,16 +1897,187 @@ ia64_unwind_to_previous_frame (struct ia64_frame_info *info)
return -1;
info->ip = read_reg(info, sol - 2, &is_nat);
- if (is_nat)
+ if (is_nat || (info->ip & (my_cpu_data.unimpl_va_mask | 0xf)))
+ /* reject let obviously bad addresses */
return -1;
+ info->cfm = ia64_rse_skip_regs((unsigned long *) info->bsp, sol - 1);
cfm = read_reg(info, sol - 1, &is_nat);
if (is_nat)
return -1;
sol = (cfm >> 7) & 0x7f;
- info->cfm = cfm;
- info->bsp = ia64_rse_skip_regs(info->bsp, -sol);
+ info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -sol);
return 0;
}
+#endif /* !CONFIG_IA64_NEW_UNWIND */
+
+#ifdef CONFIG_IA64_NEW_UNWIND
+
+static void
+init_unwind_table (struct unw_table *table, const char *name, unsigned long segment_base,
+ unsigned long gp, void *table_start, void *table_end)
+{
+ struct unw_table_entry *start = table_start, *end = table_end;
+
+#ifdef UNWIND_TABLE_SORT_BUG
+ {
+ struct unw_table_entry *e1, *e2, tmp;
+
+ /* stupid bubble sort... */
+
+ for (e1 = start; e1 < end; ++e1) {
+ for (e2 = e1 + 1; e2 < end; ++e2) {
+ if (e2->start_offset < e1->start_offset) {
+ tmp = *e1;
+ *e1 = *e2;
+ *e2 = tmp;
+ }
+ }
+ }
+ }
+#endif
+ table->name = name;
+ table->segment_base = segment_base;
+ table->gp = gp;
+ table->start = segment_base + start[0].start_offset;
+ table->end = segment_base + end[-1].end_offset;
+ table->array = start;
+ table->length = end - start;
+}
+
+void *
+unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp,
+ void *table_start, void *table_end)
+{
+ struct unw_table_entry *start = table_start, *end = table_end;
+ struct unw_table *table;
+ unsigned long flags;
+
+ if (end - start <= 0) {
+ dprintk("unwind: ignoring attempt to insert empty unwind table\n");
+ return 0;
+ }
+
+ table = kmalloc(sizeof(*table), GFP_USER);
+ if (!table)
+ return 0;
+
+ init_unwind_table(table, name, segment_base, gp, table_start, table_end);
+
+ spin_lock_irqsave(&unw.lock, flags);
+ {
+ /* keep kernel unwind table at the front (it's searched most commonly): */
+ table->next = unw.tables->next;
+ unw.tables->next = table;
+ }
+ spin_unlock_irqrestore(&unw.lock, flags);
+
+ return table;
+}
+
+void
+unw_remove_unwind_table (void *handle)
+{
+ struct unw_table *table, *prevt;
+ struct unw_script *tmp, *prev;
+ unsigned long flags;
+ long index;
+
+ if (!handle) {
+ dprintk("unwind: ignoring attempt to remove non-existent unwind table\n");
+ return;
+ }
+
+ table = handle;
+ if (table == &unw.kernel_table) {
+ dprintk("unwind: sorry, freeing the kernel's unwind table is a no-can-do!\n");
+ return;
+ }
+
+ spin_lock_irqsave(&unw.lock, flags);
+ {
+ /* first, delete the table: */
+
+ for (prevt = (struct unw_table *) &unw.tables; prevt; prevt = prevt->next)
+ if (prevt->next == table)
+ break;
+ if (!prevt) {
+ dprintk("unwind: failed to find unwind table %p\n", table);
+ spin_unlock_irqrestore(&unw.lock, flags);
+ return;
+ }
+ prevt->next = table->next;
+
+ /* next, remove hash table entries for this table */
+
+ for (index = 0; index <= UNW_HASH_SIZE; ++index) {
+ if (unw.hash[index] >= UNW_CACHE_SIZE)
+ continue;
+
+ tmp = unw.cache + unw.hash[index];
+ prev = 0;
+ while (1) {
+ write_lock(&tmp->lock);
+ {
+ if (tmp->ip >= table->start && tmp->ip < table->end) {
+ if (prev)
+ prev->coll_chain = tmp->coll_chain;
+ else
+ unw.hash[index] = -1;
+ tmp->ip = 0;
+ } else
+ prev = tmp;
+ }
+ write_unlock(&tmp->lock);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&unw.lock, flags);
+
+ kfree(table);
+}
+#endif /* CONFIG_IA64_NEW_UNWIND */
+
+void
+unw_init (void)
+{
+#ifdef CONFIG_IA64_NEW_UNWIND
+ extern int ia64_unw_start, ia64_unw_end, __gp;
+ extern void unw_hash_index_t_is_too_narrow (void);
+ long i, off;
+
+ if (8*sizeof(unw_hash_index_t) < UNW_LOG_HASH_SIZE)
+ unw_hash_index_t_is_too_narrow();
+
+ unw.sw_off[unw.preg_index[UNW_REG_PRI_UNAT_GR]] = SW(AR_UNAT);
+ unw.sw_off[unw.preg_index[UNW_REG_BSPSTORE]] = SW(AR_BSPSTORE);
+ unw.sw_off[unw.preg_index[UNW_REG_PFS]] = SW(AR_UNAT);
+ unw.sw_off[unw.preg_index[UNW_REG_RP]] = SW(B0);
+ unw.sw_off[unw.preg_index[UNW_REG_UNAT]] = SW(AR_UNAT);
+ unw.sw_off[unw.preg_index[UNW_REG_PR]] = SW(PR);
+ unw.sw_off[unw.preg_index[UNW_REG_LC]] = SW(AR_LC);
+ unw.sw_off[unw.preg_index[UNW_REG_FPSR]] = SW(AR_FPSR);
+ for (i = UNW_REG_R4, off = SW(R4); i <= UNW_REG_R7; ++i, off += 8)
+ unw.sw_off[unw.preg_index[i]] = off;
+ for (i = UNW_REG_B1, off = SW(B1); i <= UNW_REG_B5; ++i, off += 8)
+ unw.sw_off[unw.preg_index[i]] = off;
+ for (i = UNW_REG_F2, off = SW(F2); i <= UNW_REG_F5; ++i, off += 16)
+ unw.sw_off[unw.preg_index[i]] = off;
+ for (i = UNW_REG_F16, off = SW(F16); i <= UNW_REG_F31; ++i, off += 16)
+ unw.sw_off[unw.preg_index[i]] = off;
+
+ unw.cache[0].coll_chain = -1;
+ for (i = 1; i < UNW_CACHE_SIZE; ++i) {
+ unw.cache[i].lru_chain = (i - 1);
+ unw.cache[i].coll_chain = -1;
+ unw.cache[i].lock = RW_LOCK_UNLOCKED;
+ }
+ unw.lru_head = UNW_CACHE_SIZE - 1;
+ unw.lru_tail = 0;
+
+ init_unwind_table(&unw.kernel_table, "kernel", KERNEL_START, (unsigned long) &__gp,
+ &ia64_unw_start, &ia64_unw_end);
+#endif /* CONFIG_IA64_NEW_UNWIND */
+}
diff --git a/arch/ia64/kernel/unwind_decoder.c b/arch/ia64/kernel/unwind_decoder.c
new file mode 100644
index 000000000..50ac2d82f
--- /dev/null
+++ b/arch/ia64/kernel/unwind_decoder.c
@@ -0,0 +1,459 @@
+/*
+ * Copyright (C) 2000 Hewlett-Packard Co
+ * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * Generic IA-64 unwind info decoder.
+ *
+ * This file is used both by the Linux kernel and objdump. Please keep
+ * the two copies of this file in sync.
+ *
+ * You need to customize the decoder by defining the following
+ * macros/constants before including this file:
+ *
+ * Types:
+ * unw_word Unsigned integer type with at least 64 bits
+ *
+ * Register names:
+ * UNW_REG_BSP
+ * UNW_REG_BSPSTORE
+ * UNW_REG_FPSR
+ * UNW_REG_LC
+ * UNW_REG_PFS
+ * UNW_REG_PR
+ * UNW_REG_RNAT
+ * UNW_REG_PSP
+ * UNW_REG_RP
+ * UNW_REG_UNAT
+ *
+ * Decoder action macros:
+ * UNW_DEC_BAD_CODE(code)
+ * UNW_DEC_ABI(fmt,abi,context,arg)
+ * UNW_DEC_BR_GR(fmt,brmask,gr,arg)
+ * UNW_DEC_BR_MEM(fmt,brmask,arg)
+ * UNW_DEC_COPY_STATE(fmt,label,arg)
+ * UNW_DEC_EPILOGUE(fmt,t,ecount,arg)
+ * UNW_DEC_FRGR_MEM(fmt,grmask,frmask,arg)
+ * UNW_DEC_FR_MEM(fmt,frmask,arg)
+ * UNW_DEC_GR_GR(fmt,grmask,gr,arg)
+ * UNW_DEC_GR_MEM(fmt,grmask,arg)
+ * UNW_DEC_LABEL_STATE(fmt,label,arg)
+ * UNW_DEC_MEM_STACK_F(fmt,t,size,arg)
+ * UNW_DEC_MEM_STACK_V(fmt,t,arg)
+ * UNW_DEC_PRIUNAT_GR(fmt,r,arg)
+ * UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg)
+ * UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg)
+ * UNW_DEC_PRIUNAT_WHEN_PSPREL(fmt,pspoff,arg)
+ * UNW_DEC_PRIUNAT_WHEN_SPREL(fmt,spoff,arg)
+ * UNW_DEC_PROLOGUE(fmt,body,rlen,arg)
+ * UNW_DEC_PROLOGUE_GR(fmt,rlen,mask,grsave,arg)
+ * UNW_DEC_REG_PSPREL(fmt,reg,pspoff,arg)
+ * UNW_DEC_REG_REG(fmt,src,dst,arg)
+ * UNW_DEC_REG_SPREL(fmt,reg,spoff,arg)
+ * UNW_DEC_REG_WHEN(fmt,reg,t,arg)
+ * UNW_DEC_RESTORE(fmt,t,abreg,arg)
+ * UNW_DEC_RESTORE_P(fmt,qp,t,abreg,arg)
+ * UNW_DEC_SPILL_BASE(fmt,pspoff,arg)
+ * UNW_DEC_SPILL_MASK(fmt,imaskp,arg)
+ * UNW_DEC_SPILL_PSPREL(fmt,t,abreg,pspoff,arg)
+ * UNW_DEC_SPILL_PSPREL_P(fmt,qp,t,abreg,pspoff,arg)
+ * UNW_DEC_SPILL_REG(fmt,t,abreg,x,ytreg,arg)
+ * UNW_DEC_SPILL_REG_P(fmt,qp,t,abreg,x,ytreg,arg)
+ * UNW_DEC_SPILL_SPREL(fmt,t,abreg,spoff,arg)
+ * UNW_DEC_SPILL_SPREL_P(fmt,qp,t,abreg,pspoff,arg)
+ */
+
+static unw_word
+unw_decode_uleb128 (unsigned char **dpp)
+{
+ unsigned shift = 0;
+ unw_word byte, result = 0;
+ unsigned char *bp = *dpp;
+
+ while (1)
+ {
+ byte = *bp++;
+ result |= (byte & 0x7f) << shift;
+ if ((byte & 0x80) == 0)
+ break;
+ shift += 7;
+ }
+ *dpp = bp;
+ return result;
+}
+
+static unsigned char *
+unw_decode_x1 (unsigned char *dp, unsigned char code, void *arg)
+{
+ unsigned char byte1, abreg;
+ unw_word t, off;
+
+ byte1 = *dp++;
+ t = unw_decode_uleb128 (&dp);
+ off = unw_decode_uleb128 (&dp);
+ abreg = (byte1 & 0x7f);
+ if (byte1 & 0x80)
+ UNW_DEC_SPILL_SPREL(X1, t, abreg, off, arg);
+ else
+ UNW_DEC_SPILL_PSPREL(X1, t, abreg, off, arg);
+ return dp;
+}
+
+static unsigned char *
+unw_decode_x2 (unsigned char *dp, unsigned char code, void *arg)
+{
+ unsigned char byte1, byte2, abreg, x, ytreg;
+ unw_word t;
+
+ byte1 = *dp++; byte2 = *dp++;
+ t = unw_decode_uleb128 (&dp);
+ abreg = (byte1 & 0x7f);
+ ytreg = byte2;
+ x = (byte1 >> 7) & 1;
+ if ((byte1 & 0x80) == 0 && ytreg == 0)
+ UNW_DEC_RESTORE(X2, t, abreg, arg);
+ else
+ UNW_DEC_SPILL_REG(X2, t, abreg, x, ytreg, arg);
+ return dp;
+}
+
+static unsigned char *
+unw_decode_x3 (unsigned char *dp, unsigned char code, void *arg)
+{
+ unsigned char byte1, byte2, abreg, qp;
+ unw_word t, off;
+
+ byte1 = *dp++; byte2 = *dp++;
+ t = unw_decode_uleb128 (&dp);
+ off = unw_decode_uleb128 (&dp);
+
+ qp = (byte1 & 0x3f);
+ abreg = (byte2 & 0x7f);
+
+ if (byte1 & 0x80)
+ UNW_DEC_SPILL_SPREL_P(X3, qp, t, abreg, off, arg);
+ else
+ UNW_DEC_SPILL_PSPREL_P(X3, qp, t, abreg, off, arg);
+ return dp;
+}
+
+static unsigned char *
+unw_decode_x4 (unsigned char *dp, unsigned char code, void *arg)
+{
+ unsigned char byte1, byte2, byte3, qp, abreg, x, ytreg;
+ unw_word t;
+
+ byte1 = *dp++; byte2 = *dp++; byte3 = *dp++;
+ t = unw_decode_uleb128 (&dp);
+
+ qp = (byte1 & 0x3f);
+ abreg = (byte2 & 0x7f);
+ x = (byte2 >> 7) & 1;
+ ytreg = byte3;
+
+ if ((byte2 & 0x80) == 0 && byte3 == 0)
+ UNW_DEC_RESTORE_P(X4, qp, t, abreg, arg);
+ else
+ UNW_DEC_SPILL_REG_P(X4, qp, t, abreg, x, ytreg, arg);
+ return dp;
+}
+
+static unsigned char *
+unw_decode_r1 (unsigned char *dp, unsigned char code, void *arg)
+{
+ int body = (code & 0x20) != 0;
+ unw_word rlen;
+
+ rlen = (code & 0x1f);
+ UNW_DEC_PROLOGUE(R1, body, rlen, arg);
+ return dp;
+}
+
+static unsigned char *
+unw_decode_r2 (unsigned char *dp, unsigned char code, void *arg)
+{
+ unsigned char byte1, mask, grsave;
+ unw_word rlen;
+
+ byte1 = *dp++;
+
+ mask = ((code & 0x7) << 1) | ((byte1 >> 7) & 1);
+ grsave = (byte1 & 0x7f);
+ rlen = unw_decode_uleb128 (&dp);
+ UNW_DEC_PROLOGUE_GR(R2, rlen, mask, grsave, arg);
+ return dp;
+}
+
+static unsigned char *
+unw_decode_r3 (unsigned char *dp, unsigned char code, void *arg)
+{
+ unw_word rlen;
+
+ rlen = unw_decode_uleb128 (&dp);
+ UNW_DEC_PROLOGUE(R3, ((code & 0x3) == 1), rlen, arg);
+ return dp;
+}
+
+static unsigned char *
+unw_decode_p1 (unsigned char *dp, unsigned char code, void *arg)
+{
+ unsigned char brmask = (code & 0x1f);
+
+ UNW_DEC_BR_MEM(P1, brmask, arg);
+ return dp;
+}
+
+static unsigned char *
+unw_decode_p2_p5 (unsigned char *dp, unsigned char code, void *arg)
+{
+ if ((code & 0x10) == 0)
+ {
+ unsigned char byte1 = *dp++;
+
+ UNW_DEC_BR_GR(P2, ((code & 0xf) << 1) | ((byte1 >> 7) & 1),
+ (byte1 & 0x7f), arg);
+ }
+ else if ((code & 0x08) == 0)
+ {
+ unsigned char byte1 = *dp++, r, dst;
+
+ r = ((code & 0x7) << 1) | ((byte1 >> 7) & 1);
+ dst = (byte1 & 0x7f);
+ switch (r)
+ {
+ case 0: UNW_DEC_REG_GR(P3, UNW_REG_PSP, dst, arg); break;
+ case 1: UNW_DEC_REG_GR(P3, UNW_REG_RP, dst, arg); break;
+ case 2: UNW_DEC_REG_GR(P3, UNW_REG_PFS, dst, arg); break;
+ case 3: UNW_DEC_REG_GR(P3, UNW_REG_PR, dst, arg); break;
+ case 4: UNW_DEC_REG_GR(P3, UNW_REG_UNAT, dst, arg); break;
+ case 5: UNW_DEC_REG_GR(P3, UNW_REG_LC, dst, arg); break;
+ case 6: UNW_DEC_RP_BR(P3, dst, arg); break;
+ case 7: UNW_DEC_REG_GR(P3, UNW_REG_RNAT, dst, arg); break;
+ case 8: UNW_DEC_REG_GR(P3, UNW_REG_BSP, dst, arg); break;
+ case 9: UNW_DEC_REG_GR(P3, UNW_REG_BSPSTORE, dst, arg); break;
+ case 10: UNW_DEC_REG_GR(P3, UNW_REG_FPSR, dst, arg); break;
+ case 11: UNW_DEC_PRIUNAT_GR(P3, dst, arg); break;
+ default: UNW_DEC_BAD_CODE(r); break;
+ }
+ }
+ else if ((code & 0x7) == 0)
+ UNW_DEC_SPILL_MASK(P4, dp, arg);
+ else if ((code & 0x7) == 1)
+ {
+ unw_word grmask, frmask, byte1, byte2, byte3;
+
+ byte1 = *dp++; byte2 = *dp++; byte3 = *dp++;
+ grmask = ((byte1 >> 4) & 0xf);
+ frmask = ((byte1 & 0xf) << 16) | (byte2 << 8) | byte3;
+ UNW_DEC_FRGR_MEM(P5, grmask, frmask, arg);
+ }
+ else
+ UNW_DEC_BAD_CODE(code);
+ return dp;
+}
+
+static unsigned char *
+unw_decode_p6 (unsigned char *dp, unsigned char code, void *arg)
+{
+ int gregs = (code & 0x10) != 0;
+ unsigned char mask = (code & 0x0f);
+
+ if (gregs)
+ UNW_DEC_GR_MEM(P6, mask, arg);
+ else
+ UNW_DEC_FR_MEM(P6, mask, arg);
+ return dp;
+}
+
+static unsigned char *
+unw_decode_p7_p10 (unsigned char *dp, unsigned char code, void *arg)
+{
+ unsigned char r, byte1, byte2;
+ unw_word t, size;
+
+ if ((code & 0x10) == 0)
+ {
+ r = (code & 0xf);
+ t = unw_decode_uleb128 (&dp);
+ switch (r)
+ {
+ case 0:
+ size = unw_decode_uleb128 (&dp);
+ UNW_DEC_MEM_STACK_F(P7, t, size, arg);
+ break;
+
+ case 1: UNW_DEC_MEM_STACK_V(P7, t, arg); break;
+ case 2: UNW_DEC_SPILL_BASE(P7, t, arg); break;
+ case 3: UNW_DEC_REG_SPREL(P7, UNW_REG_PSP, t, arg); break;
+ case 4: UNW_DEC_REG_WHEN(P7, UNW_REG_RP, t, arg); break;
+ case 5: UNW_DEC_REG_PSPREL(P7, UNW_REG_RP, t, arg); break;
+ case 6: UNW_DEC_REG_WHEN(P7, UNW_REG_PFS, t, arg); break;
+ case 7: UNW_DEC_REG_PSPREL(P7, UNW_REG_PFS, t, arg); break;
+ case 8: UNW_DEC_REG_WHEN(P7, UNW_REG_PR, t, arg); break;
+ case 9: UNW_DEC_REG_PSPREL(P7, UNW_REG_PR, t, arg); break;
+ case 10: UNW_DEC_REG_WHEN(P7, UNW_REG_LC, t, arg); break;
+ case 11: UNW_DEC_REG_PSPREL(P7, UNW_REG_LC, t, arg); break;
+ case 12: UNW_DEC_REG_WHEN(P7, UNW_REG_UNAT, t, arg); break;
+ case 13: UNW_DEC_REG_PSPREL(P7, UNW_REG_UNAT, t, arg); break;
+ case 14: UNW_DEC_REG_WHEN(P7, UNW_REG_FPSR, t, arg); break;
+ case 15: UNW_DEC_REG_PSPREL(P7, UNW_REG_FPSR, t, arg); break;
+ default: UNW_DEC_BAD_CODE(r); break;
+ }
+ }
+ else
+ {
+ switch (code & 0xf)
+ {
+ case 0x0: /* p8 */
+ {
+ r = *dp++;
+ t = unw_decode_uleb128 (&dp);
+ switch (r)
+ {
+ case 1: UNW_DEC_REG_SPREL(P8, UNW_REG_RP, t, arg); break;
+ case 2: UNW_DEC_REG_SPREL(P8, UNW_REG_PFS, t, arg); break;
+ case 3: UNW_DEC_REG_SPREL(P8, UNW_REG_PR, t, arg); break;
+ case 4: UNW_DEC_REG_SPREL(P8, UNW_REG_LC, t, arg); break;
+ case 5: UNW_DEC_REG_SPREL(P8, UNW_REG_UNAT, t, arg); break;
+ case 6: UNW_DEC_REG_SPREL(P8, UNW_REG_FPSR, t, arg); break;
+ case 7: UNW_DEC_REG_WHEN(P8, UNW_REG_BSP, t, arg); break;
+ case 8: UNW_DEC_REG_PSPREL(P8, UNW_REG_BSP, t, arg); break;
+ case 9: UNW_DEC_REG_SPREL(P8, UNW_REG_BSP, t, arg); break;
+ case 10: UNW_DEC_REG_WHEN(P8, UNW_REG_BSPSTORE, t, arg); break;
+ case 11: UNW_DEC_REG_PSPREL(P8, UNW_REG_BSPSTORE, t, arg); break;
+ case 12: UNW_DEC_REG_SPREL(P8, UNW_REG_BSPSTORE, t, arg); break;
+ case 13: UNW_DEC_REG_WHEN(P8, UNW_REG_RNAT, t, arg); break;
+ case 14: UNW_DEC_REG_PSPREL(P8, UNW_REG_RNAT, t, arg); break;
+ case 15: UNW_DEC_REG_SPREL(P8, UNW_REG_RNAT, t, arg); break;
+ case 16: UNW_DEC_PRIUNAT_WHEN_GR(P8, t, arg); break;
+ case 17: UNW_DEC_PRIUNAT_PSPREL(P8, t, arg); break;
+ case 18: UNW_DEC_PRIUNAT_SPREL(P8, t, arg); break;
+ case 19: UNW_DEC_PRIUNAT_WHEN_MEM(P8, t, arg); break;
+ default: UNW_DEC_BAD_CODE(r); break;
+ }
+ }
+ break;
+
+ case 0x1:
+ byte1 = *dp++; byte2 = *dp++;
+ UNW_DEC_GR_GR(P9, (byte1 & 0xf), (byte2 & 0x7f), arg);
+ break;
+
+ case 0xf: /* p10 */
+ byte1 = *dp++; byte2 = *dp++;
+ UNW_DEC_ABI(P10, byte1, byte2, arg);
+ break;
+
+ case 0x9:
+ return unw_decode_x1 (dp, code, arg);
+
+ case 0xa:
+ return unw_decode_x2 (dp, code, arg);
+
+ case 0xb:
+ return unw_decode_x3 (dp, code, arg);
+
+ case 0xc:
+ return unw_decode_x4 (dp, code, arg);
+
+ default:
+ UNW_DEC_BAD_CODE(code);
+ break;
+ }
+ }
+ return dp;
+}
+
+static unsigned char *
+unw_decode_b1 (unsigned char *dp, unsigned char code, void *arg)
+{
+ unw_word label = (code & 0x1f);
+
+ if ((code & 0x20) != 0)
+ UNW_DEC_COPY_STATE(B1, label, arg);
+ else
+ UNW_DEC_LABEL_STATE(B1, label, arg);
+ return dp;
+}
+
+static unsigned char *
+unw_decode_b2 (unsigned char *dp, unsigned char code, void *arg)
+{
+ unw_word t;
+
+ t = unw_decode_uleb128 (&dp);
+ UNW_DEC_EPILOGUE(B2, t, (code & 0x1f), arg);
+ return dp;
+}
+
+static unsigned char *
+unw_decode_b3_x4 (unsigned char *dp, unsigned char code, void *arg)
+{
+ unw_word t, ecount, label;
+
+ if ((code & 0x10) == 0)
+ {
+ t = unw_decode_uleb128 (&dp);
+ ecount = unw_decode_uleb128 (&dp);
+ UNW_DEC_EPILOGUE(B3, t, ecount, arg);
+ }
+ else if ((code & 0x07) == 0)
+ {
+ label = unw_decode_uleb128 (&dp);
+ if ((code & 0x08) != 0)
+ UNW_DEC_COPY_STATE(B4, label, arg);
+ else
+ UNW_DEC_LABEL_STATE(B4, label, arg);
+ }
+ else
+ switch (code & 0x7)
+ {
+ case 1: return unw_decode_x1 (dp, code, arg);
+ case 2: return unw_decode_x2 (dp, code, arg);
+ case 3: return unw_decode_x3 (dp, code, arg);
+ case 4: return unw_decode_x4 (dp, code, arg);
+ default: UNW_DEC_BAD_CODE(code); break;
+ }
+ return dp;
+}
+
+typedef unsigned char *(*unw_decoder) (unsigned char *, unsigned char, void *);
+
+static unw_decoder unw_decode_table[2][8] =
+{
+ /* prologue table: */
+ {
+ unw_decode_r1, /* 0 */
+ unw_decode_r1,
+ unw_decode_r2,
+ unw_decode_r3,
+ unw_decode_p1, /* 4 */
+ unw_decode_p2_p5,
+ unw_decode_p6,
+ unw_decode_p7_p10
+ },
+ {
+ unw_decode_r1, /* 0 */
+ unw_decode_r1,
+ unw_decode_r2,
+ unw_decode_r3,
+ unw_decode_b1, /* 4 */
+ unw_decode_b1,
+ unw_decode_b2,
+ unw_decode_b3_x4
+ }
+};
+
+/*
+ * Decode one descriptor and return address of next descriptor.
+ */
+static inline unsigned char *
+unw_decode (unsigned char *dp, int inside_body, void *arg)
+{
+ unw_decoder decoder;
+ unsigned char code;
+
+ code = *dp++;
+ decoder = unw_decode_table[inside_body][code >> 5];
+ dp = (*decoder) (dp, code, arg);
+ return dp;
+}
diff --git a/arch/ia64/kernel/unwind_i.h b/arch/ia64/kernel/unwind_i.h
new file mode 100644
index 000000000..fea655efd
--- /dev/null
+++ b/arch/ia64/kernel/unwind_i.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2000 Hewlett-Packard Co
+ * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * Kernel unwind support.
+ */
+
+#define UNW_VER(x) ((x) >> 48)
+#define UNW_FLAG_MASK 0x0000ffff00000000
+#define UNW_FLAG_OSMASK 0x0000f00000000000
+#define UNW_FLAG_EHANDLER(x) ((x) & 0x0000000100000000L)
+#define UNW_FLAG_UHANDLER(x) ((x) & 0x0000000200000000L)
+#define UNW_LENGTH(x) ((x) & 0x00000000ffffffffL)
+
+enum unw_register_index {
+ /* primary unat: */
+ UNW_REG_PRI_UNAT_GR,
+ UNW_REG_PRI_UNAT_MEM,
+
+ /* register stack */
+ UNW_REG_BSP, /* register stack pointer */
+ UNW_REG_BSPSTORE,
+ UNW_REG_PFS, /* previous function state */
+ UNW_REG_RNAT,
+ /* memory stack */
+ UNW_REG_PSP, /* previous memory stack pointer */
+ /* return pointer: */
+ UNW_REG_RP,
+
+ /* preserved registers: */
+ UNW_REG_R4, UNW_REG_R5, UNW_REG_R6, UNW_REG_R7,
+ UNW_REG_UNAT, UNW_REG_PR, UNW_REG_LC, UNW_REG_FPSR,
+ UNW_REG_B1, UNW_REG_B2, UNW_REG_B3, UNW_REG_B4, UNW_REG_B5,
+ UNW_REG_F2, UNW_REG_F3, UNW_REG_F4, UNW_REG_F5,
+ UNW_REG_F16, UNW_REG_F17, UNW_REG_F18, UNW_REG_F19,
+ UNW_REG_F20, UNW_REG_F21, UNW_REG_F22, UNW_REG_F23,
+ UNW_REG_F24, UNW_REG_F25, UNW_REG_F26, UNW_REG_F27,
+ UNW_REG_F28, UNW_REG_F29, UNW_REG_F30, UNW_REG_F31,
+ UNW_NUM_REGS
+};
+
+struct unw_info_block {
+ u64 header;
+ u64 desc[0]; /* unwind descriptors */
+ /* personality routine and language-specific data follow behind descriptors */
+};
+
+struct unw_table_entry {
+ u64 start_offset;
+ u64 end_offset;
+ u64 info_offset;
+};
+
+struct unw_table {
+ struct unw_table *next; /* must be first member! */
+ const char *name;
+ unsigned long gp; /* global pointer for this load-module */
+ unsigned long segment_base; /* base for offsets in the unwind table entries */
+ unsigned long start;
+ unsigned long end;
+ struct unw_table_entry *array;
+ unsigned long length;
+};
+
+enum unw_where {
+ UNW_WHERE_NONE, /* register isn't saved at all */
+ UNW_WHERE_GR, /* register is saved in a general register */
+ UNW_WHERE_FR, /* register is saved in a floating-point register */
+ UNW_WHERE_BR, /* register is saved in a branch register */
+ UNW_WHERE_SPREL, /* register is saved on memstack (sp-relative) */
+ UNW_WHERE_PSPREL, /* register is saved on memstack (psp-relative) */
+ /*
+ * At the end of each prologue these locations get resolved to
+ * UNW_WHERE_PSPREL and UNW_WHERE_GR, respectively:
+ */
+ UNW_WHERE_SPILL_HOME, /* register is saved in its spill home */
+ UNW_WHERE_GR_SAVE /* register is saved in next general register */
+};
+
+#define UNW_WHEN_NEVER 0x7fffffff
+
+struct unw_reg_info {
+ unsigned long val; /* save location: register number or offset */
+ enum unw_where where; /* where the register gets saved */
+ int when; /* when the register gets saved */
+};
+
+struct unw_state_record {
+ unsigned int first_region : 1; /* is this the first region? */
+ unsigned int done : 1; /* are we done scanning descriptors? */
+ unsigned int any_spills : 1; /* got any register spills? */
+ unsigned int in_body : 1; /* are we inside a body (as opposed to a prologue)? */
+ unsigned long flags; /* see UNW_FLAG_* in unwind.h */
+
+ u8 *imask; /* imask of of spill_mask record or NULL */
+ unsigned long pr_val; /* predicate values */
+ unsigned long pr_mask; /* predicate mask */
+ long spill_offset; /* psp-relative offset for spill base */
+ int region_start;
+ int region_len;
+ int epilogue_start;
+ int epilogue_count;
+ int when_target;
+
+ u8 gr_save_loc; /* next general register to use for saving a register */
+ u8 return_link_reg; /* branch register in which the return link is passed */
+
+ struct unw_reg_state {
+ struct unw_reg_state *next;
+ unsigned long label; /* label of this state record */
+ struct unw_reg_info reg[UNW_NUM_REGS];
+ } curr, *stack, *reg_state_list;
+};
+
+enum unw_nat_type {
+ UNW_NAT_NONE, /* NaT not represented */
+ UNW_NAT_VAL, /* NaT represented by NaT value (fp reg) */
+ UNW_NAT_PRI_UNAT, /* NaT value is in unat word at offset OFF */
+ UNW_NAT_SCRATCH, /* NaT value is in scratch.pri_unat */
+ UNW_NAT_STACKED /* NaT is in rnat */
+};
+
+enum unw_insn_opcode {
+ UNW_INSN_ADD, /* s[dst] += val */
+ UNW_INSN_MOVE, /* s[dst] = s[val] */
+ UNW_INSN_MOVE2, /* s[dst] = s[val]; s[dst+1] = s[val+1] */
+ UNW_INSN_MOVE_STACKED, /* s[dst] = ia64_rse_skip(*s.bsp, val) */
+ UNW_INSN_LOAD_PSPREL, /* s[dst] = *(*s.psp + 8*val) */
+ UNW_INSN_LOAD_SPREL, /* s[dst] = *(*s.sp + 8*val) */
+ UNW_INSN_SETNAT_PRI_UNAT, /* s[dst+1].nat.type = PRI_UNAT;
+ s[dst+1].nat.off = *s.pri_unat - s[dst] */
+ UNW_INSN_SETNAT_TYPE /* s[dst+1].nat.type = val */
+};
+
+struct unw_insn {
+ unsigned int opc : 4;
+ unsigned int dst : 9;
+ signed int val : 19;
+};
+
+/*
+ * Preserved general static registers (r2-r5) give rise to two script
+ * instructions; everything else yields at most one instruction; at
+ * the end of the script, the psp gets popped, accounting for one more
+ * instruction.
+ */
+#define UNW_MAX_SCRIPT_LEN (UNW_NUM_REGS + 5)
+
+struct unw_script {
+ unsigned long ip; /* ip this script is for */
+ unsigned long pr_mask; /* mask of predicates script depends on */
+ unsigned long pr_val; /* predicate values this script is for */
+ rwlock_t lock;
+ unsigned int flags; /* see UNW_FLAG_* in unwind.h */
+ unsigned short lru_chain; /* used for least-recently-used chain */
+ unsigned short coll_chain; /* used for hash collisions */
+ unsigned short hint; /* hint for next script to try (or -1) */
+ unsigned short count; /* number of instructions in script */
+ struct unw_insn insn[UNW_MAX_SCRIPT_LEN];
+};
diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile
index 376d0d6d4..882bdaed9 100644
--- a/arch/ia64/lib/Makefile
+++ b/arch/ia64/lib/Makefile
@@ -5,15 +5,21 @@
.S.o:
$(CC) $(AFLAGS) -c $< -o $@
-OBJS = __divdi3.o __divsi3.o __udivdi3.o __udivsi3.o \
+L_TARGET = lib.a
+
+L_OBJS = __divdi3.o __divsi3.o __udivdi3.o __udivsi3.o \
__moddi3.o __modsi3.o __umoddi3.o __umodsi3.o \
checksum.o clear_page.o csum_partial_copy.o copy_page.o \
copy_user.o clear_user.o memset.o strncpy_from_user.o \
strlen.o strlen_user.o strnlen_user.o \
flush.o do_csum.o
-lib.a: $(OBJS)
- $(AR) rcs lib.a $(OBJS)
+LX_OBJS = io.o
+
+IGNORE_FLAGS_OBJS = __divdi3.o __divsi3.o __udivdi3.o __udivsi3.o \
+ __moddi3.o __modsi3.o __umoddi3.o __umodsi3.o
+
+include $(TOPDIR)/Rules.make
__divdi3.o: idiv.S
$(CC) $(AFLAGS) -c -o $@ $<
@@ -38,5 +44,3 @@ __umoddi3.o: idiv.S
__umodsi3.o: idiv.S
$(CC) $(AFLAGS) -c -DMODULO -DUNSIGNED -DSINGLE -c -o $@ $<
-
-include $(TOPDIR)/Rules.make
diff --git a/arch/ia64/lib/clear_page.S b/arch/ia64/lib/clear_page.S
index 314311c5c..c544e3725 100644
--- a/arch/ia64/lib/clear_page.S
+++ b/arch/ia64/lib/clear_page.S
@@ -10,10 +10,11 @@
* Output:
* none
*
- * Copyright (C) 1999 Hewlett-Packard Co
+ * Copyright (C) 1999-2000 Hewlett-Packard Co
* Copyright (C) 1999 Stephane Eranian <eranian@hpl.hp.com>
- * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999-2000 David Mosberger-Tang <davidm@hpl.hp.com>
*/
+#include <asm/asmmacro.h>
#include <asm/page.h>
.text
@@ -21,12 +22,14 @@
.psr lsb
.lsb
- .align 32
- .global clear_page
- .proc clear_page
-clear_page:
+GLOBAL_ENTRY(clear_page)
+ UNW(.prologue)
alloc r11=ar.pfs,1,0,0,0
+ UNW(.save ar.lc, r16)
mov r16=ar.lc // slow
+
+ UNW(.body)
+
mov r17=PAGE_SIZE/32-1 // -1 = repeat/until
;;
adds r18=16,in0
@@ -38,5 +41,4 @@ clear_page:
;;
mov ar.lc=r16 // restore lc
br.ret.sptk.few rp
-
- .endp clear_page
+END(clear_page)
diff --git a/arch/ia64/lib/clear_user.S b/arch/ia64/lib/clear_user.S
index 0b9a453b1..d937db016 100644
--- a/arch/ia64/lib/clear_user.S
+++ b/arch/ia64/lib/clear_user.S
@@ -11,6 +11,8 @@
* Copyright (C) 1999 Stephane Eranian <eranian@hpl.hp.com>
*/
+#include <asm/asmmacro.h>
+
//
// arguments
//
@@ -23,11 +25,10 @@
#define cnt r16
#define buf2 r17
#define saved_lc r18
-#define saved_pr r19
-#define saved_pfs r20
-#define tmp r21
-#define len2 r22
-#define len3 r23
+#define saved_pfs r19
+#define tmp r20
+#define len2 r21
+#define len3 r22
//
// Theory of operations:
@@ -65,14 +66,14 @@
.psr lsb
.lsb
- .align 32
- .global __do_clear_user
- .proc __do_clear_user
-
-__do_clear_user:
+GLOBAL_ENTRY(__do_clear_user)
+ UNW(.prologue)
+ UNW(.save ar.pfs, saved_pfs)
alloc saved_pfs=ar.pfs,2,0,0,0
cmp.eq p6,p0=r0,len // check for zero length
+ UNW(.save ar.lc, saved_lc)
mov saved_lc=ar.lc // preserve ar.lc (slow)
+ .body
;; // avoid WAW on CFM
adds tmp=-1,len // br.ctop is repeat/until
mov ret0=len // return value is length at this point
@@ -222,4 +223,4 @@ long_do_clear:
mov ret0=len
mov ar.lc=saved_lc
br.ret.dptk.few rp
- .endp
+END(__do_clear_user)
diff --git a/arch/ia64/lib/copy_page.S b/arch/ia64/lib/copy_page.S
index 7595ac83a..811db4c96 100644
--- a/arch/ia64/lib/copy_page.S
+++ b/arch/ia64/lib/copy_page.S
@@ -13,6 +13,7 @@
* Copyright (C) 1999 Hewlett-Packard Co
* Copyright (C) 1999 Stephane Eranian <eranian@hpl.hp.com>
*/
+#include <asm/asmmacro.h>
#include <asm/page.h>
#define PIPE_DEPTH 6
@@ -32,19 +33,21 @@
.psr lsb
.lsb
- .align 32
- .global copy_page
- .proc copy_page
-
-copy_page:
+GLOBAL_ENTRY(copy_page)
+ UNW(.prologue)
+ UNW(.save ar.pfs, saved_pfs)
alloc saved_pfs=ar.pfs,3,((2*PIPE_DEPTH+7)&~7),0,((2*PIPE_DEPTH+7)&~7)
.rotr t1[PIPE_DEPTH], t2[PIPE_DEPTH]
.rotp p[PIPE_DEPTH]
+ UNW(.save ar.lc, saved_lc)
mov saved_lc=ar.lc // save ar.lc ahead of time
+ UNW(.save pr, saved_pr)
mov saved_pr=pr // rotating predicates are preserved
// resgisters we must save.
+ UNW(.body)
+
mov src1=in1 // initialize 1st stream source
adds src2=8,in1 // initialize 2nd stream source
mov lcount=PAGE_SIZE/16-1 // as many 16bytes as there are on a page
@@ -87,5 +90,4 @@ copy_page:
mov ar.pfs=saved_pfs // restore ar.ec
mov ar.lc=saved_lc // restore saved lc
br.ret.sptk.few rp // bye...
-
- .endp copy_page
+END(copy_page)
diff --git a/arch/ia64/lib/copy_user.S b/arch/ia64/lib/copy_user.S
index 3743174ba..4a0abaed0 100644
--- a/arch/ia64/lib/copy_user.S
+++ b/arch/ia64/lib/copy_user.S
@@ -29,6 +29,8 @@
* - fix extraneous stop bit introduced by the EX() macro.
*/
+#include <asm/asmmacro.h>
+
// The label comes first because our store instruction contains a comma
// and confuse the preprocessor otherwise
//
@@ -81,10 +83,9 @@
.psr abi64
.psr lsb
- .align 16
- .global __copy_user
- .proc __copy_user
-__copy_user:
+GLOBAL_ENTRY(__copy_user)
+ UNW(.prologue)
+ UNW(.save ar.pfs, saved_pfs)
alloc saved_pfs=ar.pfs,3,((2*PIPE_DEPTH+7)&~7),0,((2*PIPE_DEPTH+7)&~7)
.rotr val1[PIPE_DEPTH],val2[PIPE_DEPTH]
@@ -95,13 +96,17 @@ __copy_user:
;; // RAW of cfm when len=0
cmp.eq p8,p0=r0,len // check for zero length
+ UNW(.save ar.lc, saved_lc)
mov saved_lc=ar.lc // preserve ar.lc (slow)
(p8) br.ret.spnt.few rp // empty mempcy()
;;
add enddst=dst,len // first byte after end of source
add endsrc=src,len // first byte after end of destination
+ UNW(.save pr, saved_pr)
mov saved_pr=pr // preserve predicates
+ UNW(.body)
+
mov dst1=dst // copy because of rotation
mov ar.ec=PIPE_DEPTH
mov pr.rot=1<<16 // p16=true all others are false
@@ -400,7 +405,4 @@ failure_out:
mov ar.pfs=saved_pfs
br.ret.dptk.few rp
-
-
- .endp __copy_user
-
+END(__copy_user)
diff --git a/arch/ia64/lib/do_csum.S b/arch/ia64/lib/do_csum.S
index d8174f10a..11bdfe031 100644
--- a/arch/ia64/lib/do_csum.S
+++ b/arch/ia64/lib/do_csum.S
@@ -13,6 +13,8 @@
*
*/
+#include <asm/asmmacro.h>
+
//
// Theory of operations:
// The goal is to go as quickly as possible to the point where
@@ -100,10 +102,9 @@
// unsigned long do_csum(unsigned char *buf,int len)
- .align 32
- .global do_csum
- .proc do_csum
-do_csum:
+GLOBAL_ENTRY(do_csum)
+ UNW(.prologue)
+ UNW(.save ar.pfs, saved_pfs)
alloc saved_pfs=ar.pfs,2,8,0,8
.rotr p[4], result[3]
@@ -125,6 +126,7 @@ do_csum:
;;
and lastoff=7,tmp1 // how many bytes off for last element
andcm last=tmp2,tmp3 // address of word containing last byte
+ UNW(.save pr, saved_pr)
mov saved_pr=pr // preserve predicates (rotation)
;;
sub tmp3=last,first // tmp3=distance from first to last
@@ -145,8 +147,12 @@ do_csum:
shl hmask=hmask,tmp2 // build head mask, mask off [0,firstoff[
;;
shr.u tmask=tmask,tmp1 // build tail mask, mask off ]8,lastoff]
+ UNW(.save ar.lc, saved_lc)
mov saved_lc=ar.lc // save lc
;;
+
+ UNW(.body)
+
(p8) and hmask=hmask,tmask // apply tail mask to head mask if 1 word only
(p9) and p[1]=lastval,tmask // mask last it as appropriate
shr.u tmp3=tmp3,3 // we do 8 bytes per loop
@@ -228,3 +234,4 @@ do_csum:
mov ar.lc=saved_lc
(p10) shr.u ret0=ret0,64-16 // + shift back to position = swap bytes
br.ret.sptk.few rp
+END(do_csum)
diff --git a/arch/ia64/lib/flush.S b/arch/ia64/lib/flush.S
index 0195ae5f5..ba9d59f84 100644
--- a/arch/ia64/lib/flush.S
+++ b/arch/ia64/lib/flush.S
@@ -1,9 +1,10 @@
/*
* Cache flushing routines.
*
- * Copyright (C) 1999 Hewlett-Packard Co
- * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999-2000 Hewlett-Packard Co
+ * Copyright (C) 1999-2000 David Mosberger-Tang <davidm@hpl.hp.com>
*/
+#include <asm/asmmacro.h>
#include <asm/page.h>
.text
@@ -11,12 +12,14 @@
.psr lsb
.lsb
- .align 16
- .global ia64_flush_icache_page
- .proc ia64_flush_icache_page
-ia64_flush_icache_page:
+GLOBAL_ENTRY(ia64_flush_icache_page)
+ UNW(.prologue)
alloc r2=ar.pfs,1,0,0,0
+ UNW(.save ar.lc, r3)
mov r3=ar.lc // save ar.lc
+
+ .body
+
mov r8=PAGE_SIZE/64-1 // repeat/until loop
;;
mov ar.lc=r8
@@ -34,4 +37,4 @@ ia64_flush_icache_page:
;;
mov ar.lc=r3 // restore ar.lc
br.ret.sptk.few rp
- .endp ia64_flush_icache_page
+END(ia64_flush_icache_page)
diff --git a/arch/ia64/lib/idiv.S b/arch/ia64/lib/idiv.S
index a12097c94..af424c41b 100644
--- a/arch/ia64/lib/idiv.S
+++ b/arch/ia64/lib/idiv.S
@@ -31,6 +31,7 @@
nops while maximizing parallelism
*/
+#include <asm/asmmacro.h>
#include <asm/break.h>
.text
@@ -73,12 +74,10 @@
#define PASTE(a,b) PASTE1(a,b)
#define NAME PASTE(PASTE(__,SGN),PASTE(OP,PASTE(PREC,3)))
- .align 32
- .global NAME
- .proc NAME
-NAME:
-
+GLOBAL_ENTRY(NAME)
+ UNW(.prologue)
alloc r2=ar.pfs,2,6,0,8
+ UNW(.save pr, r18)
mov r18=pr
#ifdef SINGLE
# ifdef UNSIGNED
@@ -101,6 +100,10 @@ NAME:
#endif
setf.sig f8=in0
+ UNW(.save ar.lc, r3)
+
+ UNW(.body)
+
mov r3=ar.lc // save ar.lc
setf.sig f9=in1
;;
@@ -156,3 +159,4 @@ NAME:
mov ar.lc=r3 // restore ar.lc
mov pr=r18,0xffffffffffff0000 // restore p16-p63
br.ret.sptk.few rp
+END(NAME)
diff --git a/arch/ia64/lib/io.c b/arch/ia64/lib/io.c
new file mode 100644
index 000000000..466335172
--- /dev/null
+++ b/arch/ia64/lib/io.c
@@ -0,0 +1,54 @@
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include <asm/io.h>
+
+/*
+ * Copy data from IO memory space to "real" memory space.
+ * This needs to be optimized.
+ */
+void
+__ia64_memcpy_fromio (void * to, unsigned long from, long count)
+{
+ while (count) {
+ count--;
+ *(char *) to = readb(from);
+ ((char *) to)++;
+ from++;
+ }
+}
+
+/*
+ * Copy data from "real" memory space to IO memory space.
+ * This needs to be optimized.
+ */
+void
+__ia64_memcpy_toio (unsigned long to, void * from, long count)
+{
+ while (count) {
+ count--;
+ writeb(*(char *) from, to);
+ ((char *) from)++;
+ to++;
+ }
+}
+
+/*
+ * "memset" on IO memory space.
+ * This needs to be optimized.
+ */
+void
+__ia64_memset_c_io (unsigned long dst, unsigned long c, long count)
+{
+ unsigned char ch = (char)(c & 0xff);
+
+ while (count) {
+ count--;
+ writeb(ch, dst);
+ dst++;
+ }
+}
+
+EXPORT_SYMBOL(__ia64_memcpy_fromio);
+EXPORT_SYMBOL(__ia64_memcpy_toio);
+EXPORT_SYMBOL(__ia64_memset_c_io);
diff --git a/arch/ia64/lib/memset.S b/arch/ia64/lib/memset.S
index 595720a2d..ddb42cc56 100644
--- a/arch/ia64/lib/memset.S
+++ b/arch/ia64/lib/memset.S
@@ -14,6 +14,7 @@
* Copyright (C) 1999 Stephane Eranian <eranian@hpl.hp.com>
*/
+#include <asm/asmmacro.h>
// arguments
//
@@ -28,22 +29,23 @@
#define cnt r18
#define buf2 r19
#define saved_lc r20
-#define saved_pr r21
-#define tmp r22
+#define tmp r21
.text
.psr abi64
.psr lsb
- .align 16
- .global memset
- .proc memset
-
-memset:
+GLOBAL_ENTRY(memset)
+ UNW(.prologue)
+ UNW(.save ar.pfs, saved_pfs)
alloc saved_pfs=ar.pfs,3,0,0,0 // cnt is sink here
cmp.eq p8,p0=r0,len // check for zero length
+ UNW(.save ar.lc, saved_lc)
mov saved_lc=ar.lc // preserve ar.lc (slow)
;;
+
+ UNW(.body)
+
adds tmp=-1,len // br.ctop is repeat/until
tbit.nz p6,p0=buf,0 // odd alignment
(p8) br.ret.spnt.few rp
@@ -108,4 +110,4 @@ long_memset:
;;
(p6) st1 [buf]=val // only 1 byte left
br.ret.dptk.few rp
- .endp
+END(memset)
diff --git a/arch/ia64/lib/strlen.S b/arch/ia64/lib/strlen.S
index 22f205656..fbc786b41 100644
--- a/arch/ia64/lib/strlen.S
+++ b/arch/ia64/lib/strlen.S
@@ -16,6 +16,8 @@
* 09/24/99 S.Eranian add speculation recovery code
*/
+#include <asm/asmmacro.h>
+
//
//
// This is an enhanced version of the basic strlen. it includes a combination
@@ -82,10 +84,9 @@
.psr lsb
.lsb
- .align 32
- .global strlen
- .proc strlen
-strlen:
+GLOBAL_ENTRY(strlen)
+ UNW(.prologue)
+ UNW(.save ar.pfs, saved_pfs)
alloc saved_pfs=ar.pfs,11,0,0,8 // rotating must be multiple of 8
.rotr v[2], w[2] // declares our 4 aliases
@@ -93,8 +94,12 @@ strlen:
extr.u tmp=in0,0,3 // tmp=least significant 3 bits
mov orig=in0 // keep trackof initial byte address
dep src=0,in0,0,3 // src=8byte-aligned in0 address
+ UNW(.save pr, saved_pr)
mov saved_pr=pr // preserve predicates (rotation)
;;
+
+ UNW(.body)
+
ld8 v[1]=[src],8 // must not speculate: can fail here
shl tmp=tmp,3 // multiply by 8bits/byte
mov mask=-1 // our mask
@@ -194,5 +199,4 @@ recover:
sub ret0=ret0,tmp // length=now - back -1
mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what
br.ret.sptk.few rp // end of sucessful recovery code
-
- .endp strlen
+END(strlen)
diff --git a/arch/ia64/lib/strlen_user.S b/arch/ia64/lib/strlen_user.S
index 8149dde8a..7f222bb13 100644
--- a/arch/ia64/lib/strlen_user.S
+++ b/arch/ia64/lib/strlen_user.S
@@ -15,6 +15,8 @@
* 09/24/99 S.Eranian added speculation recovery code
*/
+#include <asm/asmmacro.h>
+
//
// int strlen_user(char *)
// ------------------------
@@ -93,10 +95,9 @@
.psr lsb
.lsb
- .align 32
- .global __strlen_user
- .proc __strlen_user
-__strlen_user:
+GLOBAL_ENTRY(__strlen_user)
+ UNW(.prologue)
+ UNW(.save ar.pfs, saved_pfs)
alloc saved_pfs=ar.pfs,11,0,0,8
.rotr v[2], w[2] // declares our 4 aliases
@@ -104,8 +105,12 @@ __strlen_user:
extr.u tmp=in0,0,3 // tmp=least significant 3 bits
mov orig=in0 // keep trackof initial byte address
dep src=0,in0,0,3 // src=8byte-aligned in0 address
+ UNW(.save pr, saved_pr)
mov saved_pr=pr // preserve predicates (rotation)
;;
+
+ .body
+
ld8.s v[1]=[src],8 // load the initial 8bytes (must speculate)
shl tmp=tmp,3 // multiply by 8bits/byte
mov mask=-1 // our mask
@@ -209,5 +214,4 @@ recover:
mov pr=saved_pr,0xffffffffffff0000
mov ar.pfs=saved_pfs // because of ar.ec, restore no matter what
br.ret.sptk.few rp
-
- .endp __strlen_user
+END(__strlen_user)
diff --git a/arch/ia64/lib/strncpy_from_user.S b/arch/ia64/lib/strncpy_from_user.S
index f2d40984a..6ff7fa9b6 100644
--- a/arch/ia64/lib/strncpy_from_user.S
+++ b/arch/ia64/lib/strncpy_from_user.S
@@ -16,6 +16,8 @@
* by Andreas Schwab <schwab@suse.de>).
*/
+#include <asm/asmmacro.h>
+
#define EX(x...) \
99: x; \
.section __ex_table,"a"; \
@@ -28,10 +30,7 @@
.psr lsb
.lsb
- .align 32
- .global __strncpy_from_user
- .proc __strncpy_from_user
-__strncpy_from_user:
+GLOBAL_ENTRY(__strncpy_from_user)
alloc r2=ar.pfs,3,0,0,0
mov r8=0
mov r9=in1
@@ -53,5 +52,4 @@ __strncpy_from_user:
.Lexit:
br.ret.sptk.few rp
-
- .endp __strncpy_from_user
+END(__strncpy_from_user)
diff --git a/arch/ia64/lib/strnlen_user.S b/arch/ia64/lib/strnlen_user.S
index c227a9003..d70f54f2b 100644
--- a/arch/ia64/lib/strnlen_user.S
+++ b/arch/ia64/lib/strnlen_user.S
@@ -12,6 +12,8 @@
* Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
*/
+#include <asm/asmmacro.h>
+
/* If a fault occurs, r8 gets set to -EFAULT and r9 gets cleared. */
#define EX(x...) \
.section __ex_table,"a"; \
@@ -25,12 +27,14 @@
.psr lsb
.lsb
- .align 32
- .global __strnlen_user
- .proc __strnlen_user
-__strnlen_user:
+GLOBAL_ENTRY(__strnlen_user)
+ UNW(.prologue)
alloc r2=ar.pfs,2,0,0,0
+ UNW(.save ar.lc, r16)
mov r16=ar.lc // preserve ar.lc
+
+ UNW(.body)
+
add r3=-1,in1
;;
mov ar.lc=r3
@@ -51,5 +55,4 @@ __strnlen_user:
mov r8=r9
mov ar.lc=r16 // restore ar.lc
br.ret.sptk.few rp
-
- .endp __strnlen_user
+END(__strnlen_user)
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 02c4c5792..eaac24372 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -1,8 +1,8 @@
/*
* MMU fault handling support.
*
- * Copyright (C) 1998, 1999 Hewlett-Packard Co
- * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1998-2000 Hewlett-Packard Co
+ * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -94,7 +94,14 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
* sure we exit gracefully rather than endlessly redo the
* fault.
*/
- if (!handle_mm_fault(mm, vma, address, (isr & IA64_ISR_W) != 0)) {
+ switch (handle_mm_fault(mm, vma, address, (mask & VM_WRITE) != 0)) {
+ case 1:
+ ++current->min_flt;
+ break;
+ case 2:
+ ++current->maj_flt;
+ break;
+ case 0:
/*
* We ran out of memory, or some other thing happened
* to us that made us unable to handle the page fault
@@ -102,6 +109,8 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
*/
signal = SIGBUS;
goto bad_area;
+ default:
+ goto out_of_memory;
}
up(&mm->mmap_sem);
return;
@@ -128,15 +137,11 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
return;
}
if (user_mode(regs)) {
-#if 0
-printk("%s(%d): segfault accessing %lx\n", current->comm, current->pid, address);
-show_regs(regs);
-#endif
si.si_signo = signal;
si.si_errno = 0;
si.si_code = SI_KERNEL;
si.si_addr = (void *) address;
- force_sig_info(SIGSEGV, &si, current);
+ force_sig_info(signal, &si, current);
return;
}
@@ -161,4 +166,11 @@ show_regs(regs);
die_if_kernel("Oops", regs, isr);
do_exit(SIGKILL);
return;
+
+ out_of_memory:
+ up(&mm->mmap_sem);
+ printk("VM: killing process %s\n", current->comm);
+ if (user_mode(regs))
+ do_exit(SIGKILL);
+ goto no_context;
}
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index b3047ce34..aed999573 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/swap.h>
+#include <asm/bitops.h>
#include <asm/dma.h>
#include <asm/efi.h>
#include <asm/ia32.h>
@@ -182,6 +183,19 @@ free_initmem (void)
}
void
+free_initrd_mem(unsigned long start, unsigned long end)
+{
+ if (start < end)
+ printk ("Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
+ for (; start < end; start += PAGE_SIZE) {
+ clear_bit(PG_reserved, &mem_map[MAP_NR(start)].flags);
+ set_page_count(&mem_map[MAP_NR(start)], 1);
+ free_page(start);
+ ++totalram_pages;
+ }
+}
+
+void
si_meminfo (struct sysinfo *val)
{
val->totalram = totalram_pages;
@@ -265,7 +279,7 @@ put_gate_page (struct page *page, unsigned long address)
void __init
ia64_rid_init (void)
{
- unsigned long flags, rid, pta;
+ unsigned long flags, rid, pta, impl_va_msb;
/* Set up the kernel identity mappings (regions 6 & 7) and the vmalloc area (region 5): */
ia64_clear_ic(flags);
@@ -300,11 +314,15 @@ ia64_rid_init (void)
# define ld_max_addr_space_size (ld_max_addr_space_pages + PAGE_SHIFT)
# define ld_max_vpt_size (ld_max_addr_space_pages + ld_pte_size)
# define POW2(n) (1ULL << (n))
-# define IMPL_VA_MSB 50
- if (POW2(ld_max_addr_space_size - 1) + POW2(ld_max_vpt_size) > POW2(IMPL_VA_MSB))
+ impl_va_msb = ffz(~my_cpu_data.unimpl_va_mask) - 1;
+
+ if (impl_va_msb < 50 || impl_va_msb > 60)
+ panic("Bogus impl_va_msb value of %lu!\n", impl_va_msb);
+
+ if (POW2(ld_max_addr_space_size - 1) + POW2(ld_max_vpt_size) > POW2(impl_va_msb))
panic("mm/init: overlap between virtually mapped linear page table and "
"mapped kernel space!");
- pta = POW2(61) - POW2(IMPL_VA_MSB);
+ pta = POW2(61) - POW2(impl_va_msb);
/*
* Set the (virtually mapped linear) page table address. Bit
* 8 selects between the short and long format, bits 2-7 the
@@ -314,54 +332,6 @@ ia64_rid_init (void)
ia64_set_pta(pta | (0<<8) | ((3*(PAGE_SHIFT-3)+3)<<2) | 1);
}
-#ifdef CONFIG_IA64_VIRTUAL_MEM_MAP
-
-static int
-create_mem_map_page_table (u64 start, u64 end, void *arg)
-{
- unsigned long address, start_page, end_page;
- struct page *map_start, *map_end;
- pgd_t *pgd;
- pmd_t *pmd;
- pte_t *pte;
- void *page;
-
- map_start = mem_map + MAP_NR(start);
- map_end = mem_map + MAP_NR(end);
-
- start_page = (unsigned long) map_start & PAGE_MASK;
- end_page = PAGE_ALIGN((unsigned long) map_end);
-
- printk("[%lx,%lx) -> %lx-%lx\n", start, end, start_page, end_page);
-
- for (address = start_page; address < end_page; address += PAGE_SIZE) {
- pgd = pgd_offset_k(address);
- if (pgd_none(*pgd)) {
- pmd = alloc_bootmem_pages(PAGE_SIZE);
- clear_page(pmd);
- pgd_set(pgd, pmd);
- pmd += (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
- } else
- pmd = pmd_offset(pgd, address);
- if (pmd_none(*pmd)) {
- pte = alloc_bootmem_pages(PAGE_SIZE);
- clear_page(pte);
- pmd_set(pmd, pte);
- pte += (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
- } else
- pte = pte_offset(pmd, address);
-
- if (pte_none(*pte)) {
- page = alloc_bootmem_pages(PAGE_SIZE);
- clear_page(page);
- set_pte(pte, mk_pte_phys(__pa(page), PAGE_KERNEL));
- }
- }
- return 0;
-}
-
-#endif /* CONFIG_IA64_VIRTUAL_MEM_MAP */
-
/*
* Set up the page tables.
*/
@@ -372,14 +342,11 @@ paging_init (void)
clear_page((void *) ZERO_PAGE_ADDR);
- ia64_rid_init();
- __flush_tlb_all();
-
/* initialize mem_map[] */
memset(zones_size, 0, sizeof(zones_size));
- max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS);
+ max_dma = (PAGE_ALIGN(MAX_DMA_ADDRESS) >> PAGE_SHIFT);
if (max_low_pfn < max_dma)
zones_size[ZONE_DMA] = max_low_pfn;
else {
@@ -427,8 +394,6 @@ mem_init (void)
max_mapnr = max_low_pfn;
high_memory = __va(max_low_pfn * PAGE_SIZE);
- ia64_tlb_init();
-
totalram_pages += free_all_bootmem();
reserved_pages = 0;
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index acad4e200..2a57d0ae3 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -42,6 +42,70 @@ ia64_ptce_info_t ia64_ptce_info;
*/
spinlock_t ptcg_lock = SPIN_LOCK_UNLOCKED; /* see <asm/pgtable.h> */
+#if defined(CONFIG_SMP) && !defined(CONFIG_ITANIUM_PTCG)
+
+#include <linux/irq.h>
+
+unsigned long flush_end, flush_start, flush_nbits, flush_rid;
+atomic_t flush_cpu_count;
+
+/*
+ * flush_tlb_no_ptcg is called with ptcg_lock locked
+ */
+static inline void
+flush_tlb_no_ptcg (unsigned long start, unsigned long end, unsigned long nbits)
+{
+ extern void smp_send_flush_tlb (void);
+ unsigned long saved_tpr = 0;
+ unsigned long flags;
+
+ /*
+ * Some times this is called with interrupts disabled and causes
+ * dead-lock; to avoid this we enable interrupt and raise the TPR
+ * to enable ONLY IPI.
+ */
+ __save_flags(flags);
+ if (!(flags & IA64_PSR_I)) {
+ saved_tpr = ia64_get_tpr();
+ ia64_srlz_d();
+ ia64_set_tpr(IPI_IRQ - 16);
+ ia64_srlz_d();
+ local_irq_enable();
+ }
+
+ spin_lock(&ptcg_lock);
+ flush_rid = ia64_get_rr(start);
+ ia64_srlz_d();
+ flush_start = start;
+ flush_end = end;
+ flush_nbits = nbits;
+ atomic_set(&flush_cpu_count, smp_num_cpus - 1);
+ smp_send_flush_tlb();
+ /*
+ * Purge local TLB entries. ALAT invalidation is done in ia64_leave_kernel.
+ */
+ do {
+ asm volatile ("ptc.l %0,%1" :: "r"(start), "r"(nbits<<2) : "memory");
+ start += (1UL << nbits);
+ } while (start < end);
+
+ ia64_srlz_i(); /* srlz.i implies srlz.d */
+
+ /*
+ * Wait for other CPUs to finish purging entries.
+ */
+ while (atomic_read(&flush_cpu_count)) {
+ /* Nothing */
+ }
+ if (!(flags & IA64_PSR_I)) {
+ local_irq_disable();
+ ia64_set_tpr(saved_tpr);
+ ia64_srlz_d();
+ }
+}
+
+#endif /* CONFIG_SMP && !CONFIG_ITANIUM_PTCG */
+
void
get_new_mmu_context (struct mm_struct *mm)
{
@@ -97,7 +161,7 @@ __flush_tlb_all (void)
stride0 = ia64_ptce_info.stride[0];
stride1 = ia64_ptce_info.stride[1];
- __save_and_cli(flags);
+ local_irq_save(flags);
for (i = 0; i < count0; ++i) {
for (j = 0; j < count1; ++j) {
asm volatile ("ptc.e %0" :: "r"(addr));
@@ -105,7 +169,7 @@ __flush_tlb_all (void)
}
addr += stride0;
}
- __restore_flags(flags);
+ local_irq_restore(flags);
ia64_insn_group_barrier();
ia64_srlz_i(); /* srlz.i implies srlz.d */
ia64_insn_group_barrier();
@@ -142,23 +206,29 @@ flush_tlb_range (struct mm_struct *mm, unsigned long start, unsigned long end)
}
start &= ~((1UL << nbits) - 1);
+#if defined(CONFIG_SMP) && !defined(CONFIG_ITANIUM_PTCG)
+ flush_tlb_no_ptcg(start, end, nbits);
+#else
spin_lock(&ptcg_lock);
do {
-#ifdef CONFIG_SMP
- __asm__ __volatile__ ("ptc.g %0,%1;;srlz.i;;"
- :: "r"(start), "r"(nbits<<2) : "memory");
-#else
- __asm__ __volatile__ ("ptc.l %0,%1" :: "r"(start), "r"(nbits<<2) : "memory");
-#endif
+# ifdef CONFIG_SMP
+ /*
+ * Flush ALAT entries also.
+ */
+ asm volatile ("ptc.ga %0,%1;;srlz.i;;" :: "r"(start), "r"(nbits<<2) : "memory");
+# else
+ asm volatile ("ptc.l %0,%1" :: "r"(start), "r"(nbits<<2) : "memory");
+# endif
start += (1UL << nbits);
} while (start < end);
+#endif /* CONFIG_SMP && !defined(CONFIG_ITANIUM_PTCG) */
spin_unlock(&ptcg_lock);
ia64_insn_group_barrier();
ia64_srlz_i(); /* srlz.i implies srlz.d */
ia64_insn_group_barrier();
}
-void
+void __init
ia64_tlb_init (void)
{
ia64_get_ptce(&ia64_ptce_info);
diff --git a/arch/ia64/tools/Makefile b/arch/ia64/tools/Makefile
index 06e38e3b4..b3d2e74b9 100644
--- a/arch/ia64/tools/Makefile
+++ b/arch/ia64/tools/Makefile
@@ -44,4 +44,4 @@ print_offsets.s: print_offsets.c
endif
-.PHONY: all
+.PHONY: all modules
diff --git a/arch/ia64/tools/print_offsets.c b/arch/ia64/tools/print_offsets.c
index 7e5901144..d7d7e3e6f 100644
--- a/arch/ia64/tools/print_offsets.c
+++ b/arch/ia64/tools/print_offsets.c
@@ -45,6 +45,9 @@ tab[] =
{ "IA64_PT_REGS_SIZE", sizeof (struct pt_regs) },
{ "IA64_SWITCH_STACK_SIZE", sizeof (struct switch_stack) },
{ "IA64_SIGINFO_SIZE", sizeof (struct siginfo) },
+#ifdef CONFIG_IA64_NEW_UNWIND
+ { "UNW_FRAME_INFO_SIZE", sizeof (struct unw_frame_info) },
+#endif
{ "", 0 }, /* spacer */
{ "IA64_TASK_FLAGS_OFFSET", offsetof (struct task_struct, flags) },
{ "IA64_TASK_SIGPENDING_OFFSET", offsetof (struct task_struct, sigpending) },
@@ -58,11 +61,95 @@ tab[] =
{ "IA64_TASK_PID_OFFSET", offsetof (struct task_struct, pid) },
{ "IA64_TASK_MM_OFFSET", offsetof (struct task_struct, mm) },
{ "IA64_PT_REGS_CR_IPSR_OFFSET", offsetof (struct pt_regs, cr_ipsr) },
+ { "IA64_PT_REGS_CR_IIP_OFFSET", offsetof (struct pt_regs, cr_iip) },
+ { "IA64_PT_REGS_CR_IFS_OFFSET", offsetof (struct pt_regs, cr_ifs) },
+ { "IA64_PT_REGS_AR_UNAT_OFFSET", offsetof (struct pt_regs, ar_unat) },
+ { "IA64_PT_REGS_AR_PFS_OFFSET", offsetof (struct pt_regs, ar_pfs) },
+ { "IA64_PT_REGS_AR_RSC_OFFSET", offsetof (struct pt_regs, ar_rsc) },
+ { "IA64_PT_REGS_AR_RNAT_OFFSET", offsetof (struct pt_regs, ar_rnat) },
+ { "IA64_PT_REGS_AR_BSPSTORE_OFFSET",offsetof (struct pt_regs, ar_bspstore) },
+ { "IA64_PT_REGS_PR_OFFSET", offsetof (struct pt_regs, pr) },
+ { "IA64_PT_REGS_B6_OFFSET", offsetof (struct pt_regs, b6) },
+ { "IA64_PT_REGS_LOADRS_OFFSET", offsetof (struct pt_regs, loadrs) },
+ { "IA64_PT_REGS_R1_OFFSET", offsetof (struct pt_regs, r1) },
+ { "IA64_PT_REGS_R2_OFFSET", offsetof (struct pt_regs, r2) },
+ { "IA64_PT_REGS_R3_OFFSET", offsetof (struct pt_regs, r3) },
{ "IA64_PT_REGS_R12_OFFSET", offsetof (struct pt_regs, r12) },
+ { "IA64_PT_REGS_R13_OFFSET", offsetof (struct pt_regs, r13) },
+ { "IA64_PT_REGS_R14_OFFSET", offsetof (struct pt_regs, r14) },
+ { "IA64_PT_REGS_R15_OFFSET", offsetof (struct pt_regs, r15) },
{ "IA64_PT_REGS_R8_OFFSET", offsetof (struct pt_regs, r8) },
+ { "IA64_PT_REGS_R9_OFFSET", offsetof (struct pt_regs, r9) },
+ { "IA64_PT_REGS_R10_OFFSET", offsetof (struct pt_regs, r10) },
+ { "IA64_PT_REGS_R11_OFFSET", offsetof (struct pt_regs, r11) },
{ "IA64_PT_REGS_R16_OFFSET", offsetof (struct pt_regs, r16) },
- { "IA64_SWITCH_STACK_B0_OFFSET", offsetof (struct switch_stack, b0) },
- { "IA64_SWITCH_STACK_CALLER_UNAT_OFFSET", offsetof (struct switch_stack, caller_unat) },
+ { "IA64_PT_REGS_R17_OFFSET", offsetof (struct pt_regs, r17) },
+ { "IA64_PT_REGS_R18_OFFSET", offsetof (struct pt_regs, r18) },
+ { "IA64_PT_REGS_R19_OFFSET", offsetof (struct pt_regs, r19) },
+ { "IA64_PT_REGS_R20_OFFSET", offsetof (struct pt_regs, r20) },
+ { "IA64_PT_REGS_R21_OFFSET", offsetof (struct pt_regs, r21) },
+ { "IA64_PT_REGS_R22_OFFSET", offsetof (struct pt_regs, r22) },
+ { "IA64_PT_REGS_R23_OFFSET", offsetof (struct pt_regs, r23) },
+ { "IA64_PT_REGS_R24_OFFSET", offsetof (struct pt_regs, r24) },
+ { "IA64_PT_REGS_R25_OFFSET", offsetof (struct pt_regs, r25) },
+ { "IA64_PT_REGS_R26_OFFSET", offsetof (struct pt_regs, r26) },
+ { "IA64_PT_REGS_R27_OFFSET", offsetof (struct pt_regs, r27) },
+ { "IA64_PT_REGS_R28_OFFSET", offsetof (struct pt_regs, r28) },
+ { "IA64_PT_REGS_R29_OFFSET", offsetof (struct pt_regs, r29) },
+ { "IA64_PT_REGS_R30_OFFSET", offsetof (struct pt_regs, r30) },
+ { "IA64_PT_REGS_R31_OFFSET", offsetof (struct pt_regs, r31) },
+ { "IA64_PT_REGS_AR_CCV_OFFSET", offsetof (struct pt_regs, ar_ccv) },
+ { "IA64_PT_REGS_AR_FPSR_OFFSET", offsetof (struct pt_regs, ar_fpsr) },
+ { "IA64_PT_REGS_B0_OFFSET", offsetof (struct pt_regs, b0) },
+ { "IA64_PT_REGS_B7_OFFSET", offsetof (struct pt_regs, b7) },
+ { "IA64_PT_REGS_F6_OFFSET", offsetof (struct pt_regs, f6) },
+ { "IA64_PT_REGS_F7_OFFSET", offsetof (struct pt_regs, f7) },
+ { "IA64_PT_REGS_F8_OFFSET", offsetof (struct pt_regs, f8) },
+ { "IA64_PT_REGS_F9_OFFSET", offsetof (struct pt_regs, f9) },
+ { "IA64_SWITCH_STACK_CALLER_UNAT_OFFSET", offsetof (struct switch_stack, caller_unat) },
+ { "IA64_SWITCH_STACK_AR_FPSR_OFFSET", offsetof (struct switch_stack, ar_fpsr) },
+ { "IA64_SWITCH_STACK_F2_OFFSET", offsetof (struct switch_stack, f2) },
+ { "IA64_SWITCH_STACK_F3_OFFSET", offsetof (struct switch_stack, f3) },
+ { "IA64_SWITCH_STACK_F4_OFFSET", offsetof (struct switch_stack, f4) },
+ { "IA64_SWITCH_STACK_F5_OFFSET", offsetof (struct switch_stack, f5) },
+ { "IA64_SWITCH_STACK_F10_OFFSET", offsetof (struct switch_stack, f10) },
+ { "IA64_SWITCH_STACK_F11_OFFSET", offsetof (struct switch_stack, f11) },
+ { "IA64_SWITCH_STACK_F12_OFFSET", offsetof (struct switch_stack, f12) },
+ { "IA64_SWITCH_STACK_F13_OFFSET", offsetof (struct switch_stack, f13) },
+ { "IA64_SWITCH_STACK_F14_OFFSET", offsetof (struct switch_stack, f14) },
+ { "IA64_SWITCH_STACK_F15_OFFSET", offsetof (struct switch_stack, f15) },
+ { "IA64_SWITCH_STACK_F16_OFFSET", offsetof (struct switch_stack, f16) },
+ { "IA64_SWITCH_STACK_F17_OFFSET", offsetof (struct switch_stack, f17) },
+ { "IA64_SWITCH_STACK_F18_OFFSET", offsetof (struct switch_stack, f18) },
+ { "IA64_SWITCH_STACK_F19_OFFSET", offsetof (struct switch_stack, f19) },
+ { "IA64_SWITCH_STACK_F20_OFFSET", offsetof (struct switch_stack, f20) },
+ { "IA64_SWITCH_STACK_F21_OFFSET", offsetof (struct switch_stack, f21) },
+ { "IA64_SWITCH_STACK_F22_OFFSET", offsetof (struct switch_stack, f22) },
+ { "IA64_SWITCH_STACK_F23_OFFSET", offsetof (struct switch_stack, f23) },
+ { "IA64_SWITCH_STACK_F24_OFFSET", offsetof (struct switch_stack, f24) },
+ { "IA64_SWITCH_STACK_F25_OFFSET", offsetof (struct switch_stack, f25) },
+ { "IA64_SWITCH_STACK_F26_OFFSET", offsetof (struct switch_stack, f26) },
+ { "IA64_SWITCH_STACK_F27_OFFSET", offsetof (struct switch_stack, f27) },
+ { "IA64_SWITCH_STACK_F28_OFFSET", offsetof (struct switch_stack, f28) },
+ { "IA64_SWITCH_STACK_F29_OFFSET", offsetof (struct switch_stack, f29) },
+ { "IA64_SWITCH_STACK_F30_OFFSET", offsetof (struct switch_stack, f30) },
+ { "IA64_SWITCH_STACK_F31_OFFSET", offsetof (struct switch_stack, f31) },
+ { "IA64_SWITCH_STACK_R4_OFFSET", offsetof (struct switch_stack, r4) },
+ { "IA64_SWITCH_STACK_R5_OFFSET", offsetof (struct switch_stack, r5) },
+ { "IA64_SWITCH_STACK_R6_OFFSET", offsetof (struct switch_stack, r6) },
+ { "IA64_SWITCH_STACK_R7_OFFSET", offsetof (struct switch_stack, r7) },
+ { "IA64_SWITCH_STACK_B0_OFFSET", offsetof (struct switch_stack, b0) },
+ { "IA64_SWITCH_STACK_B1_OFFSET", offsetof (struct switch_stack, b1) },
+ { "IA64_SWITCH_STACK_B2_OFFSET", offsetof (struct switch_stack, b2) },
+ { "IA64_SWITCH_STACK_B3_OFFSET", offsetof (struct switch_stack, b3) },
+ { "IA64_SWITCH_STACK_B4_OFFSET", offsetof (struct switch_stack, b4) },
+ { "IA64_SWITCH_STACK_B5_OFFSET", offsetof (struct switch_stack, b5) },
+ { "IA64_SWITCH_STACK_AR_PFS_OFFSET", offsetof (struct switch_stack, ar_pfs) },
+ { "IA64_SWITCH_STACK_AR_LC_OFFSET", offsetof (struct switch_stack, ar_lc) },
+ { "IA64_SWITCH_STACK_AR_UNAT_OFFSET", offsetof (struct switch_stack, ar_unat) },
+ { "IA64_SWITCH_STACK_AR_RNAT_OFFSET", offsetof (struct switch_stack, ar_rnat) },
+ { "IA64_SWITCH_STACK_AR_BSPSTORE_OFFSET", offsetof (struct switch_stack, ar_bspstore) },
+ { "IA64_SWITCH_STACK_PR_OFFSET", offsetof (struct switch_stack, b0) },
{ "IA64_SIGCONTEXT_AR_BSP_OFFSET", offsetof (struct sigcontext, sc_ar_bsp) },
{ "IA64_SIGCONTEXT_AR_RNAT_OFFSET", offsetof (struct sigcontext, sc_ar_rnat) },
{ "IA64_SIGCONTEXT_FLAGS_OFFSET", offsetof (struct sigcontext, sc_flags) },
diff --git a/arch/ia64/vmlinux.lds.S b/arch/ia64/vmlinux.lds.S
index 08e7f9f9a..0e2dc7aaa 100644
--- a/arch/ia64/vmlinux.lds.S
+++ b/arch/ia64/vmlinux.lds.S
@@ -32,6 +32,13 @@ SECTIONS
#endif
_etext = .;
+ /* Read-only data */
+
+ __gp = ALIGN(8) + 0x200000;
+
+ /* Global data */
+ _data = .;
+
/* Exception table */
. = ALIGN(16);
__start___ex_table = .;
@@ -39,19 +46,33 @@ SECTIONS
{ *(__ex_table) }
__stop___ex_table = .;
- /* Kernel symbol names for modules: */
+ __start___ksymtab = .; /* Kernel symbol table */
+ __ksymtab : AT(ADDR(__ksymtab) - PAGE_OFFSET)
+ { *(__ksymtab) }
+ __stop___ksymtab = .;
+
+ /* Unwind table */
+ ia64_unw_start = .;
+ .IA_64.unwind : AT(ADDR(.IA_64.unwind) - PAGE_OFFSET)
+ { *(.IA_64.unwind) }
+ ia64_unw_end = .;
+ .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - PAGE_OFFSET)
+ { *(.IA_64.unwind_info) }
+
+ .rodata : AT(ADDR(.rodata) - PAGE_OFFSET)
+ { *(.rodata) }
.kstrtab : AT(ADDR(.kstrtab) - PAGE_OFFSET)
{ *(.kstrtab) }
+ .opd : AT(ADDR(.opd) - PAGE_OFFSET)
+ { *(.opd) }
- /* The initial task and kernel stack */
- . = ALIGN(PAGE_SIZE);
- init_task : AT(ADDR(init_task) - PAGE_OFFSET)
- { *(init_task) }
+ /* Initialization code and data: */
- /* Startup code */
+ . = ALIGN(PAGE_SIZE);
__init_begin = .;
.text.init : AT(ADDR(.text.init) - PAGE_OFFSET)
{ *(.text.init) }
+
.data.init : AT(ADDR(.data.init) - PAGE_OFFSET)
{ *(.data.init) }
. = ALIGN(16);
@@ -66,6 +87,10 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__init_end = .;
+ /* The initial task and kernel stack */
+ init_task : AT(ADDR(init_task) - PAGE_OFFSET)
+ { *(init_task) }
+
.data.page_aligned : AT(ADDR(.data.page_aligned) - PAGE_OFFSET)
{ *(.data.idt) }
@@ -73,18 +98,13 @@ SECTIONS
.data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - PAGE_OFFSET)
{ *(.data.cacheline_aligned) }
- /* Global data */
- _data = .;
+ /* Kernel symbol names for modules: */
+ .kstrtab : AT(ADDR(.kstrtab) - PAGE_OFFSET)
+ { *(.kstrtab) }
- .rodata : AT(ADDR(.rodata) - PAGE_OFFSET)
- { *(.rodata) }
- .opd : AT(ADDR(.opd) - PAGE_OFFSET)
- { *(.opd) }
.data : AT(ADDR(.data) - PAGE_OFFSET)
{ *(.data) *(.gnu.linkonce.d*) CONSTRUCTORS }
- __gp = ALIGN (8) + 0x200000;
-
.got : AT(ADDR(.got) - PAGE_OFFSET)
{ *(.got.plt) *(.got) }
/* We want the small data sections together, so single-instruction offsets