summaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-03-02 02:36:47 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-03-02 02:36:47 +0000
commit8624512aa908741ba2795200133eae0d7f4557ea (patch)
treed5d3036fccf2604f4c98dedc11e8adb929d6b52e /arch/ia64
parent7b8f5d6f1d45d9f9de1d26e7d3c32aa5af11b488 (diff)
Merge with 2.3.48.
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/ia32/binfmt_elf32.c2
-rw-r--r--arch/ia64/ia32/ia32_entry.S2
-rw-r--r--arch/ia64/ia32/sys_ia32.c21
-rw-r--r--arch/ia64/kdb/kdbsupport.c27
-rw-r--r--arch/ia64/kernel/irq.c3
-rw-r--r--arch/ia64/kernel/irq_internal.c2
-rw-r--r--arch/ia64/kernel/ivt.S2
-rw-r--r--arch/ia64/kernel/process.c8
-rw-r--r--arch/ia64/kernel/time.c20
-rw-r--r--arch/ia64/kernel/traps.c116
-rw-r--r--arch/ia64/kernel/unaligned.c47
-rw-r--r--arch/ia64/lib/copy_user.S440
12 files changed, 572 insertions, 118 deletions
diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
index 685d85b20..af51038e5 100644
--- a/arch/ia64/ia32/binfmt_elf32.c
+++ b/arch/ia64/ia32/binfmt_elf32.c
@@ -82,6 +82,8 @@ void ia64_elf32_init(struct pt_regs *regs)
/* Do all the IA-32 setup here */
+ current->thread.map_base = 0x40000000;
+
/* CS descriptor */
__asm__("mov ar.csd = %0" : /* no outputs */
: "r" IA64_SEG_DESCRIPTOR(0L, 0xFFFFFL, 0xBL, 1L,
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S
index 82ba58129..bd7b0517b 100644
--- a/arch/ia64/ia32/ia32_entry.S
+++ b/arch/ia64/ia32/ia32_entry.S
@@ -75,7 +75,7 @@ ia32_syscall_table:
data8 sys_unlink /* 10 */
data8 sys32_execve
data8 sys_chdir
- data8 sys_ni_syscall /* sys_time is not supported on ia64 */
+ data8 sys32_time
data8 sys_mknod
data8 sys_chmod /* 15 */
data8 sys_lchown
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index d61f1cfe5..8d4e4a8fd 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -278,7 +278,7 @@ do_mmap_fake(struct file *file, unsigned long addr, unsigned long len,
if (!file)
return -EINVAL;
inode = file->f_dentry->d_inode;
- if (!inode->i_op || !inode->i_op->default_file_ops)
+ if (!inode->i_fop)
return -EINVAL;
if (!file->f_op->read)
return -EINVAL;
@@ -1930,6 +1930,25 @@ out:
return err;
}
+/*
+ * sys_time() can be implemented in user-level using
+ * sys_gettimeofday(). IA64 did this but i386 Linux did not
+ * so we have to implement this system call here.
+ */
+asmlinkage long sys32_time(int * tloc)
+{
+ int i;
+
+ /* SMP: This is fairly trivial. We grab CURRENT_TIME and
+ stuff it to user space. No side effects */
+ i = CURRENT_TIME;
+ if (tloc) {
+ if (put_user(i,tloc))
+ i = -EFAULT;
+ }
+ return i;
+}
+
#ifdef NOTYET /* UNTESTED FOR IA64 FROM HERE DOWN */
/* In order to reduce some races, while at the same time doing additional
diff --git a/arch/ia64/kdb/kdbsupport.c b/arch/ia64/kdb/kdbsupport.c
index 0b574ae6e..d074a01a3 100644
--- a/arch/ia64/kdb/kdbsupport.c
+++ b/arch/ia64/kdb/kdbsupport.c
@@ -28,9 +28,10 @@
#include <linux/stddef.h>
#include <linux/vmalloc.h>
-#include <asm/uaccess.h>
+#include <asm/delay.h>
#include <asm/kdbsupport.h>
#include <asm/rse.h>
+#include <asm/uaccess.h>
extern kdb_state_t kdb_state ;
k_machreg_t dbregs[KDB_DBREGS];
@@ -45,6 +46,21 @@ kdb_setup (char *str)
__setup("kdb", kdb_setup);
static int
+kdb_ia64_itm (int argc, const char **argv, const char **envp, struct pt_regs *regs)
+{
+ int diag;
+ unsigned long val;
+
+ diag = kdbgetularg(argv[1], &val);
+ if (diag)
+ return diag;
+ kdb_printf("new itm=%0xlx\n", val);
+
+ ia64_set_itm(val);
+ return 0;
+}
+
+static int
kdb_ia64_sir (int argc, const char **argv, const char **envp, struct pt_regs *regs)
{
u64 lid, tpr, lrr0, lrr1, itv, pmv, cmcv;
@@ -53,15 +69,17 @@ kdb_ia64_sir (int argc, const char **argv, const char **envp, struct pt_regs *re
asm ("mov %0=cr.tpr" : "=r"(tpr));
asm ("mov %0=cr.lrr0" : "=r"(lrr0));
asm ("mov %0=cr.lrr1" : "=r"(lrr1));
- printk ("lid=0x%lx, tpr=0x%lx, lrr0=0x%lx, llr1=0x%lx\n", lid, tpr, lrr0, lrr1);
+ printk("lid=0x%lx, tpr=0x%lx, lrr0=0x%lx, llr1=0x%lx\n", lid, tpr, lrr0, lrr1);
asm ("mov %0=cr.itv" : "=r"(itv));
asm ("mov %0=cr.pmv" : "=r"(pmv));
asm ("mov %0=cr.cmcv" : "=r"(cmcv));
- printk ("itv=0x%lx, pmv=0x%lx, cmcv=0x%lx\n", itv, pmv, cmcv);
+ printk("itv=0x%lx, pmv=0x%lx, cmcv=0x%lx\n", itv, pmv, cmcv);
- printk ("irr=0x%016lx,0x%016lx,0x%016lx,0x%016lx\n",
+ printk("irr=0x%016lx,0x%016lx,0x%016lx,0x%016lx\n",
ia64_get_irr0(), ia64_get_irr1(), ia64_get_irr2(), ia64_get_irr3());
+
+ printk("itc=0x%016lx, itm=0x%016lx\n", ia64_get_itc(), ia64_get_itm());
return 0;
}
@@ -90,6 +108,7 @@ kdb_init (void)
kdb_state.bkpt_handling_state = BKPTSTATE_NOT_HANDLED ;
kdb_register("irr", kdb_ia64_sir, "", "Show interrupt registers", 0);
+ kdb_register("itm", kdb_ia64_itm, "", "Set new ITM value", 0);
}
/*
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 5efe50164..6059e41c6 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -158,6 +158,9 @@ ia64_handle_irq (unsigned long irq, struct pt_regs *regs)
unsigned long eoi_ptr;
# ifdef CONFIG_USB
+ extern void reenable_usb (void);
+ extern void disable_usb (void);
+
if (usbfix)
disable_usb();
# endif
diff --git a/arch/ia64/kernel/irq_internal.c b/arch/ia64/kernel/irq_internal.c
index 1ae904fe8..cc59e0c72 100644
--- a/arch/ia64/kernel/irq_internal.c
+++ b/arch/ia64/kernel/irq_internal.c
@@ -60,7 +60,7 @@ internal_noop (unsigned int irq)
}
struct hw_interrupt_type irq_type_ia64_internal = {
- "IA64 internal",
+ "IA64-internal",
(void (*)(unsigned long)) internal_noop, /* init */
internal_noop, /* startup */
internal_noop, /* shutdown */
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index 4c3ac242a..b4592999f 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -1026,7 +1026,7 @@ dispatch_to_fault_handler:
// 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
mov r16=cr.ifa
rsm psr.dt
-#if 0
+#if 1
// If you disable this, you MUST re-enable to update_mmu_cache() code in pgtable.h
mov r17=_PAGE_SIZE_4K<<2
;;
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 5b6deb5f5..cc26b8760 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -97,6 +97,14 @@ cpu_idle (void *unused)
check_pgt_cache();
if (pm_idle)
(*pm_idle)();
+#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC
+ if (ia64_get_itm() < ia64_get_itc()) {
+ extern void ia64_reset_itm();
+
+ printk("cpu_idle: ITM in past, resetting it...\n");
+ ia64_reset_itm();
+ }
+#endif
}
}
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 7c5ace740..cfcff3063 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -21,6 +21,10 @@
#include <asm/ptrace.h>
#include <asm/sal.h>
#include <asm/system.h>
+#ifdef CONFIG_KDB
+# include <linux/kdb.h>
+#endif
+
extern rwlock_t xtime_lock;
extern volatile unsigned long lost_ticks;
@@ -61,7 +65,7 @@ do_profile (unsigned long ip)
* update to jiffy. The xtime_lock must be at least read-locked when
* calling this routine.
*/
-static inline unsigned long
+static /*inline*/ unsigned long
gettimeoffset (void)
{
unsigned long now = ia64_get_itc();
@@ -186,6 +190,20 @@ timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
write_unlock(&xtime_lock);
}
+#ifdef CONFIG_ITANIUM_ASTEP_SPECIFIC
+
+void
+ia64_reset_itm (void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ timer_interrupt(0, 0, current);
+ local_irq_restore(flags);
+}
+
+#endif /* CONFIG_ITANIUM_ASTEP_SPECIFIC */
+
/*
* Encapsulate access to the itm structure for SMP.
*/
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index c242622ec..1f5106036 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -110,15 +110,75 @@ void
ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
{
siginfo_t siginfo;
+ int sig, code;
- /* gdb uses a break number of 0xccccc for debug breakpoints: */
- if (break_num != 0xccccc)
- die_if_kernel("Bad break", regs, break_num);
+ /* SIGILL, SIGFPE, SIGSEGV, and SIGBUS want these field initialized: */
+ siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
+ siginfo.si_imm = break_num;
- siginfo.si_signo = SIGTRAP;
- siginfo.si_errno = break_num; /* XXX is it legal to abuse si_errno like this? */
- siginfo.si_code = TRAP_BRKPT;
- send_sig_info(SIGTRAP, &siginfo, current);
+ switch (break_num) {
+ case 0: /* unknown error */
+ sig = SIGILL; code = ILL_ILLOPC;
+ break;
+
+ case 1: /* integer divide by zero */
+ sig = SIGFPE; code = FPE_INTDIV;
+ break;
+
+ case 2: /* integer overflow */
+ sig = SIGFPE; code = FPE_INTOVF;
+ break;
+
+ case 3: /* range check/bounds check */
+ sig = SIGFPE; code = FPE_FLTSUB;
+ break;
+
+ case 4: /* null pointer dereference */
+ sig = SIGSEGV; code = SEGV_MAPERR;
+ break;
+
+ case 5: /* misaligned data */
+ sig = SIGSEGV; code = BUS_ADRALN;
+ break;
+
+ case 6: /* decimal overflow */
+ sig = SIGFPE; code = __FPE_DECOVF;
+ break;
+
+ case 7: /* decimal divide by zero */
+ sig = SIGFPE; code = __FPE_DECDIV;
+ break;
+
+ case 8: /* packed decimal error */
+ sig = SIGFPE; code = __FPE_DECERR;
+ break;
+
+ case 9: /* invalid ASCII digit */
+ sig = SIGFPE; code = __FPE_INVASC;
+ break;
+
+ case 10: /* invalid decimal digit */
+ sig = SIGFPE; code = __FPE_INVDEC;
+ break;
+
+ case 11: /* paragraph stack overflow */
+ sig = SIGSEGV; code = __SEGV_PSTKOVF;
+ break;
+
+ default:
+ if (break_num < 0x40000 || break_num > 0x100000)
+ die_if_kernel("Bad break", regs, break_num);
+
+ if (break_num < 0x80000) {
+ sig = SIGILL; code = __ILL_BREAK;
+ } else {
+ sig = SIGTRAP; code = TRAP_BRKPT;
+ }
+ }
+ siginfo.si_signo = sig;
+ siginfo.si_errno = 0;
+ siginfo.si_code = code;
+ send_sig_info(sig, &siginfo, current);
}
/*
@@ -240,6 +300,7 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
{
long exception, bundle[2];
unsigned long fault_ip;
+ struct siginfo siginfo;
static int fpu_swa_count = 0;
static unsigned long last_time;
@@ -265,21 +326,41 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
ia64_increment_ip(regs);
} else if (exception == -1) {
printk("handle_fpu_swa: fp_emulate() returned -1\n");
- return -2;
+ return -1;
} else {
/* is next instruction a trap? */
if (exception & 2) {
ia64_increment_ip(regs);
}
- return -1;
+ siginfo.si_signo = SIGFPE;
+ siginfo.si_errno = 0;
+ siginfo.si_code = 0;
+ siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
+ if (isr & 0x11) {
+ siginfo.si_code = FPE_FLTINV;
+ } else if (isr & 0x44) {
+ siginfo.si_code = FPE_FLTDIV;
+ }
+ send_sig_info(SIGFPE, &siginfo, current);
}
} else {
if (exception == -1) {
printk("handle_fpu_swa: fp_emulate() returned -1\n");
- return -2;
+ return -1;
} else if (exception != 0) {
/* raise exception */
- return -1;
+ siginfo.si_signo = SIGFPE;
+ siginfo.si_errno = 0;
+ siginfo.si_code = 0;
+ siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
+ if (isr & 0x880) {
+ siginfo.si_code = FPE_FLTOVF;
+ } else if (isr & 0x1100) {
+ siginfo.si_code = FPE_FLTUND;
+ } else if (isr & 0x2200) {
+ siginfo.si_code = FPE_FLTRES;
+ }
+ send_sig_info(SIGFPE, &siginfo, current);
}
}
return 0;
@@ -369,22 +450,19 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
return;
case 30: /* Unaligned fault */
- sprintf(buf, "Unaligned access in kernel mode---don't do this!");
+ sprintf(buf, "Kernel unaligned trap accessing %016lx (ip=%016lx)!",
+ ifa, regs->cr_iip + ia64_psr(regs)->ri);
break;
case 32: /* fp fault */
case 33: /* fp trap */
- result = handle_fpu_swa((vector == 32) ? 1 : 0, regs, isr);
+ result = handle_fpu_swa((vector == 32) ? 1 : 0, regs, &isr);
if (result < 0) {
siginfo.si_signo = SIGFPE;
siginfo.si_errno = 0;
- siginfo.si_code = 0; /* XXX fix me */
+ siginfo.si_code = FPE_FLTINV;
siginfo.si_addr = (void *) (regs->cr_iip + ia64_psr(regs)->ri);
- send_sig_info(SIGFPE, &siginfo, current);
- if (result == -1)
- send_sig_info(SIGFPE, &siginfo, current);
- else
- force_sig(SIGFPE, current);
+ force_sig(SIGFPE, current);
}
return;
diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c
index 0bd213f6b..014adcf35 100644
--- a/arch/ia64/kernel/unaligned.c
+++ b/arch/ia64/kernel/unaligned.c
@@ -1384,30 +1384,33 @@ ia64_handle_unaligned(unsigned long ifa, struct pt_regs *regs)
load_store_t *insn;
int ret = -1;
- /*
- * We flag unaligned references while in kernel as
- * errors: the kernel must be fixed. The switch code
- * is in ivt.S at entry 30.
- *
- * So here we keep a simple sanity check.
- */
- if ( !user_mode(regs) ) {
- die_if_kernel("Unaligned reference while in kernel\n", regs, 30);
- /* NOT_REACHED */
+ if (current->thread.flags & IA64_THREAD_UAC_SIGBUS) {
+ struct siginfo si;
+
+ si.si_signo = SIGBUS;
+ si.si_errno = 0;
+ si.si_code = BUS_ADRALN;
+ si.si_addr = (void *) ifa;
+ send_sig_info (SIGBUS, &si, current);
+ return;
}
- /*
- * Make sure we log the unaligned access, so that user/sysadmin can notice it
- * and eventually fix the program.
- *
- * We don't want to do that for every access so we pace it with jiffies.
- */
- if ( unalign_count > 5 && jiffies - last_time > 5*HZ ) unalign_count = 0;
- if ( ++unalign_count < 5 ) {
- last_time = jiffies;
- printk("%s(%d): unaligned trap accessing %016lx (ip=%016lx)\n",
- current->comm, current->pid, ifa, regs->cr_iip + ipsr->ri);
-
+ if (!(current->thread.flags & IA64_THREAD_UAC_NOPRINT)) {
+ /*
+ * Make sure we log the unaligned access, so that
+ * user/sysadmin can notice it and eventually fix the
+ * program.
+ *
+ * We don't want to do that for every access so we
+ * pace it with jiffies.
+ */
+ if (unalign_count > 5 && jiffies - last_time > 5*HZ)
+ unalign_count = 0;
+ if (++unalign_count < 5) {
+ last_time = jiffies;
+ printk("%s(%d): unaligned trap accessing %016lx (ip=%016lx)\n",
+ current->comm, current->pid, ifa, regs->cr_iip + ipsr->ri);
+ }
}
DPRINT(("iip=%lx ifa=%lx isr=%lx\n", regs->cr_iip, ifa, regs->cr_ipsr));
diff --git a/arch/ia64/lib/copy_user.S b/arch/ia64/lib/copy_user.S
index 03a540a80..58c92876f 100644
--- a/arch/ia64/lib/copy_user.S
+++ b/arch/ia64/lib/copy_user.S
@@ -1,71 +1,375 @@
-/*
- * This routine copies a linear memory buffer across the user/kernel boundary. When
- * reading a byte from the source causes a fault, the remainder of the destination
- * buffer is zeroed out. Note that this can happen only when copying from user
- * to kernel memory and we do this to absolutely guarantee that the
- * kernel doesn't operate on random data.
- *
- * This file is derived from arch/alpha/lib/copy_user.S.
- *
- * Inputs:
- * in0: address of destination buffer
- * in1: address of source buffer
- * in2: length of buffer in bytes
- * Outputs:
- * r8: number of bytes that didn't get copied due to a fault
- *
- * Copyright (C) 1999 Hewlett-Packard Co
- * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
- */
-
-#define EXI(x...) \
-99: x; \
+// The label comes first because our store instruction contains a comma
+// and confuse the preprocessor otherwise
+//
+#undef DEBUG
+#ifdef DEBUG
+#define EX(y,x...) \
+99: x
+#else
+#define EX(y,x...) \
.section __ex_table,"a"; \
- data4 @gprel(99b); \
- data4 .Lexit_in-99b; \
- .previous
+ data4 @gprel(99f); \
+ data4 y-99f; \
+ .previous; \
+99: x
+#endif
-#define EXO(x...) \
-99: x; \
- .section __ex_table,"a"; \
- data4 @gprel(99b); \
- data4 .Lexit_out-99b; \
- .previous
-
- .text
- .psr abi64
- .psr lsb
- .lsb
-
- .align 32
- .global __copy_user
- .proc __copy_user
+//
+// Tuneable parameters
+//
+#define COPY_BREAK 16 // we do byte copy below (must be >=16)
+#define PIPE_DEPTH 4 // pipe depth
+
+#define EPI p[PIPE_DEPTH-1] // PASTE(p,16+PIPE_DEPTH-1)
+
+//
+// arguments
+//
+#define dst in0
+#define src in1
+#define len in2
+
+//
+// local registers
+//
+#define cnt r18
+#define len2 r19
+#define saved_lc r20
+#define saved_pr r21
+#define tmp r22
+#define val r23
+#define src1 r24
+#define dst1 r25
+#define src2 r26
+#define dst2 r27
+#define len1 r28
+#define enddst r29
+#define endsrc r30
+#define saved_pfs r31
+ .text
+ .psr abi64
+ .psr lsb
+
+ .align 16
+ .global __copy_user
+ .proc __copy_user
__copy_user:
- alloc r10=ar.pfs,3,0,0,0
- mov r9=ar.lc // save ar.lc
- mov ar.lc=in2 // set ar.lc to length of buffer
- br.sptk.few .Lentr
-
- // XXX braindead copy loop---this needs to be optimized
-.Loop1:
- EXI(ld1 r8=[in1],1)
- ;;
- EXO(st1 [in0]=r8,1)
-.Lentr: br.cloop.dptk.few .Loop1 // repeat unless ar.lc--==0
- ;; // avoid RAW on ar.lc
-.Lexit_out:
- mov r8=ar.lc // return how many bytes we _didn't_ copy
- mov ar.lc=r9
- br.ret.sptk.few rp
-
-.Lexit_in:
- // clear the remainder of the buffer:
- mov r8=ar.lc // return how many bytes we _didn't_ copy
-.Loop2:
- st1 [in0]=r0,1 // this cannot fault because we get here only on user->kernel copies
- br.cloop.dptk.few .Loop2
- ;; // avoid RAW on ar.lc
- mov ar.lc=r9
- br.ret.sptk.few rp
-
- .endp __copy_user
+ alloc saved_pfs=ar.pfs,3,((2*PIPE_DEPTH+7)&~7),0,((2*PIPE_DEPTH+7)&~7)
+
+ .rotr val1[PIPE_DEPTH],val2[PIPE_DEPTH]
+ .rotp p[PIPE_DEPTH]
+
+ adds len2=-1,len // br.ctop is repeat/until
+ mov ret0=r0
+
+ ;; // RAW of cfm when len=0
+ cmp.eq p8,p0=r0,len // check for zero length
+ mov saved_lc=ar.lc // preserve ar.lc (slow)
+(p8) br.ret.spnt.few rp // empty mempcy()
+ ;;
+ add enddst=dst,len // first byte after end of source
+ add endsrc=src,len // first byte after end of destination
+ mov saved_pr=pr // preserve predicates
+
+ mov dst1=dst // copy because of rotation
+ mov ar.ec=PIPE_DEPTH
+ mov pr.rot=1<<16 // p16=true all others are false
+
+ mov src1=src // copy because of rotation
+ mov ar.lc=len2 // initialize lc for small count
+ cmp.lt p10,p7=COPY_BREAK,len // if len > COPY_BREAK then long copy
+
+ xor tmp=src,dst // same alignment test prepare
+(p10) br.cond.dptk.few long_memcpy
+ ;; // RAW pr.rot/p16 ?
+ //
+ // Now we do the byte by byte loop with software pipeline
+ //
+ // p7 is necessarily false by now
+1:
+ EX(failure_in_pipe1,(p16) ld1 val1[0]=[src1],1)
+
+ EX(failure_out,(EPI) st1 [dst1]=val1[PIPE_DEPTH-1],1)
+ br.ctop.dptk.few 1b
+ ;;
+ mov ar.lc=saved_lc
+ mov pr=saved_pr,0xffffffffffff0000
+ mov ar.pfs=saved_pfs // restore ar.ec
+ br.ret.sptk.few rp // end of short memcpy
+
+ //
+ // Beginning of long mempcy (i.e. > 16 bytes)
+ //
+long_memcpy:
+ tbit.nz p6,p7=src1,0 // odd alignement
+ and tmp=7,tmp
+ ;;
+ cmp.eq p10,p8=r0,tmp
+ mov len1=len // copy because of rotation
+(p8) br.cond.dpnt.few 1b // XXX Fixme. memcpy_diff_align
+ ;;
+ // At this point we know we have more than 16 bytes to copy
+ // and also that both src and dest have the same alignment
+ // which may not be the one we want. So for now we must move
+ // forward slowly until we reach 16byte alignment: no need to
+ // worry about reaching the end of buffer.
+ //
+ EX(failure_in1,(p6) ld1 val1[0]=[src1],1) // 1-byte aligned
+(p6) adds len1=-1,len1;;
+ tbit.nz p7,p0=src1,1
+ ;;
+ EX(failure_in1,(p7) ld2 val1[1]=[src1],2) // 2-byte aligned
+(p7) adds len1=-2,len1;;
+ tbit.nz p8,p0=src1,2
+ ;;
+ //
+ // Stop bit not required after ld4 because if we fail on ld4
+ // we have never executed the ld1, therefore st1 is not executed.
+ //
+ EX(failure_in1,(p8) ld4 val2[0]=[src1],4) // 4-byte aligned
+ EX(failure_out,(p6) st1 [dst1]=val1[0],1)
+ tbit.nz p9,p0=src1,3
+ ;;
+ //
+ // Stop bit not required after ld8 because if we fail on ld8
+ // we have never executed the ld2, therefore st2 is not executed.
+ //
+ EX(failure_in1,(p9) ld8 val2[1]=[src1],8) // 8-byte aligned
+ EX(failure_out,(p7) st2 [dst1]=val1[1],2)
+(p8) adds len1=-4,len1
+ ;;
+ EX(failure_out, (p8) st4 [dst1]=val2[0],4)
+(p9) adds len1=-8,len1;;
+ shr.u cnt=len1,4 // number of 128-bit (2x64bit) words
+ ;;
+ EX(failure_out, (p9) st8 [dst1]=val2[1],8)
+ tbit.nz p6,p0=len1,3
+ cmp.eq p7,p0=r0,cnt
+ adds tmp=-1,cnt // br.ctop is repeat/until
+(p7) br.cond.dpnt.few .dotail // we have less than 16 bytes left
+ ;;
+ adds src2=8,src1
+ adds dst2=8,dst1
+ mov ar.lc=tmp
+ ;;
+ //
+ // 16bytes/iteration
+ //
+2:
+ EX(failure_in3,(p16) ld8 val1[0]=[src1],16)
+(p16) ld8 val2[0]=[src2],16
+
+ EX(failure_out, (EPI) st8 [dst1]=val1[PIPE_DEPTH-1],16)
+(EPI) st8 [dst2]=val2[PIPE_DEPTH-1],16
+ br.ctop.dptk.few 2b
+ ;; // RAW on src1 when fall through from loop
+ //
+ // Tail correction based on len only
+ //
+ // No matter where we come from (loop or test) the src1 pointer
+ // is 16 byte aligned AND we have less than 16 bytes to copy.
+ //
+.dotail:
+ EX(failure_in1,(p6) ld8 val1[0]=[src1],8) // at least 8 bytes
+ tbit.nz p7,p0=len1,2
+ ;;
+ EX(failure_in1,(p7) ld4 val1[1]=[src1],4) // at least 4 bytes
+ tbit.nz p8,p0=len1,1
+ ;;
+ EX(failure_in1,(p8) ld2 val2[0]=[src1],2) // at least 2 bytes
+ tbit.nz p9,p0=len1,0
+ ;;
+ EX(failure_out, (p6) st8 [dst1]=val1[0],8)
+ ;;
+ EX(failure_in1,(p9) ld1 val2[1]=[src1]) // only 1 byte left
+ mov ar.lc=saved_lc
+ ;;
+ EX(failure_out,(p7) st4 [dst1]=val1[1],4)
+ mov pr=saved_pr,0xffffffffffff0000
+ ;;
+ EX(failure_out, (p8) st2 [dst1]=val2[0],2)
+ mov ar.pfs=saved_pfs
+ ;;
+ EX(failure_out, (p9) st1 [dst1]=val2[1])
+ br.ret.dptk.few rp
+
+
+
+ //
+ // Here we handle the case where the byte by byte copy fails
+ // on the load.
+ // Several factors make the zeroing of the rest of the buffer kind of
+ // tricky:
+ // - the pipeline: loads/stores are not in sync (pipeline)
+ //
+ // In the same loop iteration, the dst1 pointer does not directly
+ // reflect where the faulty load was.
+ //
+ // - pipeline effect
+ // When you get a fault on load, you may have valid data from
+ // previous loads not yet store in transit. Such data must be
+ // store normally before moving onto zeroing the rest.
+ //
+ // - single/multi dispersal independence.
+ //
+ // solution:
+ // - we don't disrupt the pipeline, i.e. data in transit in
+ // the software pipeline will be eventually move to memory.
+ // We simply replace the load with a simple mov and keep the
+ // pipeline going. We can't really do this inline because
+ // p16 is always reset to 1 when lc > 0.
+ //
+failure_in_pipe1:
+ sub ret0=endsrc,src1 // number of bytes to zero, i.e. not copied
+1:
+(p16) mov val1[0]=r0
+(EPI) st1 [dst1]=val1[PIPE_DEPTH-1],1
+ br.ctop.dptk.few 1b
+ ;;
+ mov pr=saved_pr,0xffffffffffff0000
+ mov ar.lc=saved_lc
+ mov ar.pfs=saved_pfs
+ br.ret.dptk.few rp
+
+
+ //
+ // Here we handle the head & tail part when we check for alignment.
+ // The following code handles only the load failures. The
+ // main diffculty comes from the fact that loads/stores are
+ // scheduled. So when you fail on a load, the stores corresponding
+ // to previous successful loads must be executed.
+ //
+ // However some simplifications are possible given the way
+ // things work.
+ //
+ // 1) HEAD
+ // Theory of operation:
+ //
+ // Page A | Page B
+ // ---------|-----
+ // 1|8 x
+ // 1 2|8 x
+ // 4|8 x
+ // 1 4|8 x
+ // 2 4|8 x
+ // 1 2 4|8 x
+ // |1
+ // |2 x
+ // |4 x
+ //
+ // page_size >= 4k (2^12). (x means 4, 2, 1)
+ // Here we suppose Page A exists and Page B does not.
+ //
+ // As we move towards eight byte alignment we may encounter faults.
+ // The numbers on each page show the size of the load (current alignment).
+ //
+ // Key point:
+ // - if you fail on 1, 2, 4 then you have never executed any smaller
+ // size loads, e.g. failing ld4 means no ld1 nor ld2 executed
+ // before.
+ //
+ // This allows us to simplify the cleanup code, because basically you
+ // only have to worry about "pending" stores in the case of a failing
+ // ld8(). Given the way the code is written today, this means only
+ // worry about st2, st4. There we can use the information encapsulated
+ // into the predicates.
+ //
+ // Other key point:
+ // - if you fail on the ld8 in the head, it means you went straight
+ // to it, i.e. 8byte alignment within an unexisting page.
+ // Again this comes from the fact that if you crossed just for the the ld8 then
+ // you are 8byte aligned but also 16byte align, therefore you would
+ // either go for the 16byte copy loop OR the ld8 in the tail part.
+ // The combination ld1, ld2, ld4, ld8 where you fail on ld8 is impossible
+ // because it would mean you had 15bytes to copy in which case you
+ // would have defaulted to the byte by byte copy.
+ //
+ //
+ // 2) TAIL
+ // Here we now we have less than 16 bytes AND we are either 8 or 16 byte
+ // aligned.
+ //
+ // Key point:
+ // This means that we either:
+ // - are right on a page boundary
+ // OR
+ // - are at more than 16 bytes from a page boundary with
+ // at most 15 bytes to copy: no chance of crossing.
+ //
+ // This allows us to assume that if we fail on a load we haven't possibly
+ // executed any of the previous (tail) ones, so we don't need to do
+ // any stores. For instance, if we fail on ld2, this means we had
+ // 2 or 3 bytes left to copy and we did not execute the ld8 nor ld4.
+ //
+ // This means that we are in a situation similar the a fault in the
+ // head part. That's nice!
+ //
+failure_in1:
+// sub ret0=enddst,dst1 // number of bytes to zero, i.e. not copied
+// sub len=enddst,dst1,1
+ sub ret0=endsrc,src1 // number of bytes to zero, i.e. not copied
+ sub len=endsrc,src1,1
+ //
+ // we know that ret0 can never be zero at this point
+ // because we failed why trying to do a load, i.e. there is still
+ // some work to do.
+ // The failure_in1bis and length problem is taken care of at the
+ // calling side.
+ //
+ ;;
+failure_in1bis: // from (failure_in3)
+ mov ar.lc=len // Continue with a stupid byte store.
+ ;;
+5:
+ st1 [dst1]=r0,1
+ br.cloop.dptk.few 5b
+ ;;
+skip_loop:
+ mov pr=saved_pr,0xffffffffffff0000
+ mov ar.lc=saved_lc
+ mov ar.pfs=saved_pfs
+ br.ret.dptk.few rp
+
+ //
+ // Here we simply restart the loop but instead
+ // of doing loads we fill the pipeline with zeroes
+ // We can't simply store r0 because we may have valid
+ // data in transit in the pipeline.
+ // ar.lc and ar.ec are setup correctly at this point
+ //
+ // we MUST use src1/endsrc here and not dst1/enddst because
+ // of the pipeline effect.
+ //
+failure_in3:
+ sub ret0=endsrc,src1 // number of bytes to zero, i.e. not copied
+ ;;
+2:
+(p16) mov val1[0]=r0
+(p16) mov val2[0]=r0
+(EPI) st8 [dst1]=val1[PIPE_DEPTH-1],16
+(EPI) st8 [dst2]=val2[PIPE_DEPTH-1],16
+ br.ctop.dptk.few 2b
+ ;;
+ cmp.ne p6,p0=dst1,enddst // Do we need to finish the tail ?
+ sub len=enddst,dst1,1 // precompute len
+(p6) br.cond.dptk.few failure_in1bis
+ ;;
+ mov pr=saved_pr,0xffffffffffff0000
+ mov ar.lc=saved_lc
+ mov ar.pfs=saved_pfs
+ br.ret.dptk.few rp
+
+ //
+ // handling of failures on stores: that's the easy part
+ //
+failure_out:
+ sub ret0=enddst,dst1
+ mov pr=saved_pr,0xffffffffffff0000
+ mov ar.lc=saved_lc
+
+ mov ar.pfs=saved_pfs
+ br.ret.dptk.few rp
+
+
+ .endp __copy_user
+