diff options
Diffstat (limited to 'arch/s390/lib')
-rw-r--r-- | arch/s390/lib/Makefile | 2 | ||||
-rw-r--r-- | arch/s390/lib/uaccess.c | 407 | ||||
-rw-r--r-- | arch/s390/lib/uaccess.h | 16 | ||||
-rw-r--r-- | arch/s390/lib/uaccess_mvcos.c | 263 | ||||
-rw-r--r-- | arch/s390/lib/uaccess_pt.c | 471 |
5 files changed, 408 insertions, 751 deletions
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile index e3fffe1dff51..c6d752e8bf28 100644 --- a/arch/s390/lib/Makefile +++ b/arch/s390/lib/Makefile @@ -2,7 +2,7 @@ # Makefile for s390-specific library files.. # -lib-y += delay.o string.o uaccess_pt.o uaccess_mvcos.o find.o +lib-y += delay.o string.o uaccess.o find.o obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o obj-$(CONFIG_64BIT) += mem64.o lib-$(CONFIG_SMP) += spinlock.o diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c new file mode 100644 index 000000000000..23f866b4c7f1 --- /dev/null +++ b/arch/s390/lib/uaccess.c @@ -0,0 +1,407 @@ +/* + * Standard user space access functions based on mvcp/mvcs and doing + * interesting things in the secondary space mode. + * + * Copyright IBM Corp. 2006,2014 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + * Gerald Schaefer (gerald.schaefer@de.ibm.com) + */ + +#include <linux/jump_label.h> +#include <linux/uaccess.h> +#include <linux/export.h> +#include <linux/errno.h> +#include <linux/mm.h> +#include <asm/mmu_context.h> +#include <asm/facility.h> + +#ifndef CONFIG_64BIT +#define AHI "ahi" +#define ALR "alr" +#define CLR "clr" +#define LHI "lhi" +#define SLR "slr" +#else +#define AHI "aghi" +#define ALR "algr" +#define CLR "clgr" +#define LHI "lghi" +#define SLR "slgr" +#endif + +static struct static_key have_mvcos = STATIC_KEY_INIT_FALSE; + +static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr, + unsigned long size) +{ + register unsigned long reg0 asm("0") = 0x81UL; + unsigned long tmp1, tmp2; + + tmp1 = -4096UL; + asm volatile( + "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n" + "9: jz 7f\n" + "1:"ALR" %0,%3\n" + " "SLR" %1,%3\n" + " "SLR" %2,%3\n" + " j 0b\n" + "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ + " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ + " "SLR" %4,%1\n" + " "CLR" %0,%4\n" /* copy crosses next page boundary? */ + " jnh 4f\n" + "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n" + "10:"SLR" %0,%4\n" + " "ALR" %2,%4\n" + "4:"LHI" %4,-1\n" + " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */ + " bras %3,6f\n" /* memset loop */ + " xc 0(1,%2),0(%2)\n" + "5: xc 0(256,%2),0(%2)\n" + " la %2,256(%2)\n" + "6:"AHI" %4,-256\n" + " jnm 5b\n" + " ex %4,0(%3)\n" + " j 8f\n" + "7:"SLR" %0,%0\n" + "8:\n" + EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b) + : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) + : "d" (reg0) : "cc", "memory"); + return size; +} + +static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr, + unsigned long size) +{ + unsigned long tmp1, tmp2; + + update_primary_asce(current); + tmp1 = -256UL; + asm volatile( + " sacf 0\n" + "0: mvcp 0(%0,%2),0(%1),%3\n" + "10:jz 8f\n" + "1:"ALR" %0,%3\n" + " la %1,256(%1)\n" + " la %2,256(%2)\n" + "2: mvcp 0(%0,%2),0(%1),%3\n" + "11:jnz 1b\n" + " j 8f\n" + "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ + " "LHI" %3,-4096\n" + " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ + " "SLR" %4,%1\n" + " "CLR" %0,%4\n" /* copy crosses next page boundary? */ + " jnh 5f\n" + "4: mvcp 0(%4,%2),0(%1),%3\n" + "12:"SLR" %0,%4\n" + " "ALR" %2,%4\n" + "5:"LHI" %4,-1\n" + " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */ + " bras %3,7f\n" /* memset loop */ + " xc 0(1,%2),0(%2)\n" + "6: xc 0(256,%2),0(%2)\n" + " la %2,256(%2)\n" + "7:"AHI" %4,-256\n" + " jnm 6b\n" + " ex %4,0(%3)\n" + " j 9f\n" + "8:"SLR" %0,%0\n" + "9: sacf 768\n" + EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b) + EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b) + : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) + : : "cc", "memory"); + return size; +} + +unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) +{ + if (static_key_false(&have_mvcos)) + return copy_from_user_mvcos(to, from, n); + return copy_from_user_mvcp(to, from, n); +} +EXPORT_SYMBOL(__copy_from_user); + +static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x, + unsigned long size) +{ + register unsigned long reg0 asm("0") = 0x810000UL; + unsigned long tmp1, tmp2; + + tmp1 = -4096UL; + asm volatile( + "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" + "6: jz 4f\n" + "1:"ALR" %0,%3\n" + " "SLR" %1,%3\n" + " "SLR" %2,%3\n" + " j 0b\n" + "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ + " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ + " "SLR" %4,%1\n" + " "CLR" %0,%4\n" /* copy crosses next page boundary? */ + " jnh 5f\n" + "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n" + "7:"SLR" %0,%4\n" + " j 5f\n" + "4:"SLR" %0,%0\n" + "5:\n" + EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b) + : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) + : "d" (reg0) : "cc", "memory"); + return size; +} + +static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x, + unsigned long size) +{ + unsigned long tmp1, tmp2; + + update_primary_asce(current); + tmp1 = -256UL; + asm volatile( + " sacf 0\n" + "0: mvcs 0(%0,%1),0(%2),%3\n" + "7: jz 5f\n" + "1:"ALR" %0,%3\n" + " la %1,256(%1)\n" + " la %2,256(%2)\n" + "2: mvcs 0(%0,%1),0(%2),%3\n" + "8: jnz 1b\n" + " j 5f\n" + "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ + " "LHI" %3,-4096\n" + " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ + " "SLR" %4,%1\n" + " "CLR" %0,%4\n" /* copy crosses next page boundary? */ + " jnh 6f\n" + "4: mvcs 0(%4,%1),0(%2),%3\n" + "9:"SLR" %0,%4\n" + " j 6f\n" + "5:"SLR" %0,%0\n" + "6: sacf 768\n" + EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b) + EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b) + : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) + : : "cc", "memory"); + return size; +} + +unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) +{ + if (static_key_false(&have_mvcos)) + return copy_to_user_mvcos(to, from, n); + return copy_to_user_mvcs(to, from, n); +} +EXPORT_SYMBOL(__copy_to_user); + +static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from, + unsigned long size) +{ + register unsigned long reg0 asm("0") = 0x810081UL; + unsigned long tmp1, tmp2; + + tmp1 = -4096UL; + /* FIXME: copy with reduced length. */ + asm volatile( + "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" + " jz 2f\n" + "1:"ALR" %0,%3\n" + " "SLR" %1,%3\n" + " "SLR" %2,%3\n" + " j 0b\n" + "2:"SLR" %0,%0\n" + "3: \n" + EX_TABLE(0b,3b) + : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2) + : "d" (reg0) : "cc", "memory"); + return size; +} + +static inline unsigned long copy_in_user_mvc(void __user *to, const void __user *from, + unsigned long size) +{ + unsigned long tmp1; + + update_primary_asce(current); + asm volatile( + " sacf 256\n" + " "AHI" %0,-1\n" + " jo 5f\n" + " bras %3,3f\n" + "0:"AHI" %0,257\n" + "1: mvc 0(1,%1),0(%2)\n" + " la %1,1(%1)\n" + " la %2,1(%2)\n" + " "AHI" %0,-1\n" + " jnz 1b\n" + " j 5f\n" + "2: mvc 0(256,%1),0(%2)\n" + " la %1,256(%1)\n" + " la %2,256(%2)\n" + "3:"AHI" %0,-256\n" + " jnm 2b\n" + "4: ex %0,1b-0b(%3)\n" + "5: "SLR" %0,%0\n" + "6: sacf 768\n" + EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) + : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1) + : : "cc", "memory"); + return size; +} + +unsigned long __copy_in_user(void __user *to, const void __user *from, unsigned long n) +{ + if (static_key_false(&have_mvcos)) + return copy_in_user_mvcos(to, from, n); + return copy_in_user_mvc(to, from, n); +} +EXPORT_SYMBOL(__copy_in_user); + +static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size) +{ + register unsigned long reg0 asm("0") = 0x810000UL; + unsigned long tmp1, tmp2; + + tmp1 = -4096UL; + asm volatile( + "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n" + " jz 4f\n" + "1:"ALR" %0,%2\n" + " "SLR" %1,%2\n" + " j 0b\n" + "2: la %3,4095(%1)\n"/* %4 = to + 4095 */ + " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */ + " "SLR" %3,%1\n" + " "CLR" %0,%3\n" /* copy crosses next page boundary? */ + " jnh 5f\n" + "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n" + " "SLR" %0,%3\n" + " j 5f\n" + "4:"SLR" %0,%0\n" + "5:\n" + EX_TABLE(0b,2b) EX_TABLE(3b,5b) + : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2) + : "a" (empty_zero_page), "d" (reg0) : "cc", "memory"); + return size; +} + +static inline unsigned long clear_user_xc(void __user *to, unsigned long size) +{ + unsigned long tmp1, tmp2; + + update_primary_asce(current); + asm volatile( + " sacf 256\n" + " "AHI" %0,-1\n" + " jo 5f\n" + " bras %3,3f\n" + " xc 0(1,%1),0(%1)\n" + "0:"AHI" %0,257\n" + " la %2,255(%1)\n" /* %2 = ptr + 255 */ + " srl %2,12\n" + " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */ + " "SLR" %2,%1\n" + " "CLR" %0,%2\n" /* clear crosses next page boundary? */ + " jnh 5f\n" + " "AHI" %2,-1\n" + "1: ex %2,0(%3)\n" + " "AHI" %2,1\n" + " "SLR" %0,%2\n" + " j 5f\n" + "2: xc 0(256,%1),0(%1)\n" + " la %1,256(%1)\n" + "3:"AHI" %0,-256\n" + " jnm 2b\n" + "4: ex %0,0(%3)\n" + "5: "SLR" %0,%0\n" + "6: sacf 768\n" + EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) + : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2) + : : "cc", "memory"); + return size; +} + +unsigned long __clear_user(void __user *to, unsigned long size) +{ + if (static_key_false(&have_mvcos)) + return clear_user_mvcos(to, size); + return clear_user_xc(to, size); +} +EXPORT_SYMBOL(__clear_user); + +static inline unsigned long strnlen_user_srst(const char __user *src, + unsigned long size) +{ + register unsigned long reg0 asm("0") = 0; + unsigned long tmp1, tmp2; + + if (unlikely(!size)) + return 0; + update_primary_asce(current); + asm volatile( + " la %2,0(%1)\n" + " la %3,0(%0,%1)\n" + " "SLR" %0,%0\n" + " sacf 256\n" + "0: srst %3,%2\n" + " jo 0b\n" + " la %0,1(%3)\n" /* strnlen_user results includes \0 */ + " "SLR" %0,%1\n" + "1: sacf 768\n" + EX_TABLE(0b,1b) + : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2) + : "d" (reg0) : "cc", "memory"); + return size; +} + +unsigned long __strnlen_user(const char __user *src, unsigned long size) +{ + update_primary_asce(current); + return strnlen_user_srst(src, size); +} +EXPORT_SYMBOL(__strnlen_user); + +long __strncpy_from_user(char *dst, const char __user *src, long size) +{ + size_t done, len, offset, len_str; + + if (unlikely(size <= 0)) + return 0; + done = 0; + do { + offset = (size_t)src & ~PAGE_MASK; + len = min(size - done, PAGE_SIZE - offset); + if (copy_from_user(dst, src, len)) + return -EFAULT; + len_str = strnlen(dst, len); + done += len_str; + src += len_str; + dst += len_str; + } while ((len_str == len) && (done < size)); + return done; +} +EXPORT_SYMBOL(__strncpy_from_user); + +/* + * The "old" uaccess variant without mvcos can be enforced with the + * uaccess_primary kernel parameter. This is mainly for debugging purposes. + */ +static int uaccess_primary __initdata; + +static int __init parse_uaccess_pt(char *__unused) +{ + uaccess_primary = 1; + return 0; +} +early_param("uaccess_primary", parse_uaccess_pt); + +static int __init uaccess_init(void) +{ + if (IS_ENABLED(CONFIG_64BIT) && !uaccess_primary && test_facility(27)) + static_key_slow_inc(&have_mvcos); + return 0; +} +early_initcall(uaccess_init); diff --git a/arch/s390/lib/uaccess.h b/arch/s390/lib/uaccess.h deleted file mode 100644 index c7e0e81f4b4e..000000000000 --- a/arch/s390/lib/uaccess.h +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright IBM Corp. 2007 - * - */ - -#ifndef __ARCH_S390_LIB_UACCESS_H -#define __ARCH_S390_LIB_UACCESS_H - -unsigned long copy_from_user_pt(void *to, const void __user *from, unsigned long n); -unsigned long copy_to_user_pt(void __user *to, const void *from, unsigned long n); -unsigned long copy_in_user_pt(void __user *to, const void __user *from, unsigned long n); -unsigned long clear_user_pt(void __user *to, unsigned long n); -unsigned long strnlen_user_pt(const char __user *src, unsigned long count); -long strncpy_from_user_pt(char *dst, const char __user *src, long count); - -#endif /* __ARCH_S390_LIB_UACCESS_H */ diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c deleted file mode 100644 index ae97b8df11aa..000000000000 --- a/arch/s390/lib/uaccess_mvcos.c +++ /dev/null @@ -1,263 +0,0 @@ -/* - * Optimized user space space access functions based on mvcos. - * - * Copyright IBM Corp. 2006 - * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), - * Gerald Schaefer (gerald.schaefer@de.ibm.com) - */ - -#include <linux/jump_label.h> -#include <linux/errno.h> -#include <linux/init.h> -#include <linux/mm.h> -#include <asm/facility.h> -#include <asm/uaccess.h> -#include <asm/futex.h> -#include "uaccess.h" - -#ifndef CONFIG_64BIT -#define AHI "ahi" -#define ALR "alr" -#define CLR "clr" -#define LHI "lhi" -#define SLR "slr" -#else -#define AHI "aghi" -#define ALR "algr" -#define CLR "clgr" -#define LHI "lghi" -#define SLR "slgr" -#endif - -static struct static_key have_mvcos = STATIC_KEY_INIT_TRUE; - -static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr, - unsigned long size) -{ - register unsigned long reg0 asm("0") = 0x81UL; - unsigned long tmp1, tmp2; - - tmp1 = -4096UL; - asm volatile( - "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n" - "9: jz 7f\n" - "1:"ALR" %0,%3\n" - " "SLR" %1,%3\n" - " "SLR" %2,%3\n" - " j 0b\n" - "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ - " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ - " "SLR" %4,%1\n" - " "CLR" %0,%4\n" /* copy crosses next page boundary? */ - " jnh 4f\n" - "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n" - "10:"SLR" %0,%4\n" - " "ALR" %2,%4\n" - "4:"LHI" %4,-1\n" - " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */ - " bras %3,6f\n" /* memset loop */ - " xc 0(1,%2),0(%2)\n" - "5: xc 0(256,%2),0(%2)\n" - " la %2,256(%2)\n" - "6:"AHI" %4,-256\n" - " jnm 5b\n" - " ex %4,0(%3)\n" - " j 8f\n" - "7:"SLR" %0,%0\n" - "8: \n" - EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b) - : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) - : "d" (reg0) : "cc", "memory"); - return size; -} - -unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) -{ - if (static_key_true(&have_mvcos)) - return copy_from_user_mvcos(to, from, n); - return copy_from_user_pt(to, from, n); -} -EXPORT_SYMBOL(__copy_from_user); - -static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x, - unsigned long size) -{ - register unsigned long reg0 asm("0") = 0x810000UL; - unsigned long tmp1, tmp2; - - tmp1 = -4096UL; - asm volatile( - "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" - "6: jz 4f\n" - "1:"ALR" %0,%3\n" - " "SLR" %1,%3\n" - " "SLR" %2,%3\n" - " j 0b\n" - "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ - " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ - " "SLR" %4,%1\n" - " "CLR" %0,%4\n" /* copy crosses next page boundary? */ - " jnh 5f\n" - "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n" - "7:"SLR" %0,%4\n" - " j 5f\n" - "4:"SLR" %0,%0\n" - "5: \n" - EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b) - : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) - : "d" (reg0) : "cc", "memory"); - return size; -} - -unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) -{ - if (static_key_true(&have_mvcos)) - return copy_to_user_mvcos(to, from, n); - return copy_to_user_pt(to, from, n); -} -EXPORT_SYMBOL(__copy_to_user); - -static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from, - unsigned long size) -{ - register unsigned long reg0 asm("0") = 0x810081UL; - unsigned long tmp1, tmp2; - - tmp1 = -4096UL; - /* FIXME: copy with reduced length. */ - asm volatile( - "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" - " jz 2f\n" - "1:"ALR" %0,%3\n" - " "SLR" %1,%3\n" - " "SLR" %2,%3\n" - " j 0b\n" - "2:"SLR" %0,%0\n" - "3: \n" - EX_TABLE(0b,3b) - : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2) - : "d" (reg0) : "cc", "memory"); - return size; -} - -unsigned long __copy_in_user(void __user *to, const void __user *from, unsigned long n) -{ - if (static_key_true(&have_mvcos)) - return copy_in_user_mvcos(to, from, n); - return copy_in_user_pt(to, from, n); -} -EXPORT_SYMBOL(__copy_in_user); - -static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size) -{ - register unsigned long reg0 asm("0") = 0x810000UL; - unsigned long tmp1, tmp2; - - tmp1 = -4096UL; - asm volatile( - "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n" - " jz 4f\n" - "1:"ALR" %0,%2\n" - " "SLR" %1,%2\n" - " j 0b\n" - "2: la %3,4095(%1)\n"/* %4 = to + 4095 */ - " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */ - " "SLR" %3,%1\n" - " "CLR" %0,%3\n" /* copy crosses next page boundary? */ - " jnh 5f\n" - "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n" - " "SLR" %0,%3\n" - " j 5f\n" - "4:"SLR" %0,%0\n" - "5: \n" - EX_TABLE(0b,2b) EX_TABLE(3b,5b) - : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2) - : "a" (empty_zero_page), "d" (reg0) : "cc", "memory"); - return size; -} - -unsigned long __clear_user(void __user *to, unsigned long size) -{ - if (static_key_true(&have_mvcos)) - return clear_user_mvcos(to, size); - return clear_user_pt(to, size); -} -EXPORT_SYMBOL(__clear_user); - -static inline unsigned long strnlen_user_mvcos(const char __user *src, - unsigned long count) -{ - unsigned long done, len, offset, len_str; - char buf[256]; - - done = 0; - do { - offset = (unsigned long)src & ~PAGE_MASK; - len = min(256UL, PAGE_SIZE - offset); - len = min(count - done, len); - if (copy_from_user_mvcos(buf, src, len)) - return 0; - len_str = strnlen(buf, len); - done += len_str; - src += len_str; - } while ((len_str == len) && (done < count)); - return done + 1; -} - -unsigned long __strnlen_user(const char __user *src, unsigned long count) -{ - if (static_key_true(&have_mvcos)) - return strnlen_user_mvcos(src, count); - return strnlen_user_pt(src, count); -} -EXPORT_SYMBOL(__strnlen_user); - -static inline long strncpy_from_user_mvcos(char *dst, const char __user *src, - long count) -{ - unsigned long done, len, offset, len_str; - - if (unlikely(count <= 0)) - return 0; - done = 0; - do { - offset = (unsigned long)src & ~PAGE_MASK; - len = min(count - done, PAGE_SIZE - offset); - if (copy_from_user_mvcos(dst, src, len)) - return -EFAULT; - len_str = strnlen(dst, len); - done += len_str; - src += len_str; - dst += len_str; - } while ((len_str == len) && (done < count)); - return done; -} - -long __strncpy_from_user(char *dst, const char __user *src, long count) -{ - if (static_key_true(&have_mvcos)) - return strncpy_from_user_mvcos(dst, src, count); - return strncpy_from_user_pt(dst, src, count); -} -EXPORT_SYMBOL(__strncpy_from_user); - -/* - * The uaccess page tabe walk variant can be enforced with the "uaccesspt" - * kernel parameter. This is mainly for debugging purposes. - */ -static int force_uaccess_pt __initdata; - -static int __init parse_uaccess_pt(char *__unused) -{ - force_uaccess_pt = 1; - return 0; -} -early_param("uaccesspt", parse_uaccess_pt); - -static int __init uaccess_init(void) -{ - if (IS_ENABLED(CONFIG_32BIT) || force_uaccess_pt || !test_facility(27)) - static_key_slow_dec(&have_mvcos); - return 0; -} -early_initcall(uaccess_init); diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c deleted file mode 100644 index 8d39760bae68..000000000000 --- a/arch/s390/lib/uaccess_pt.c +++ /dev/null @@ -1,471 +0,0 @@ -/* - * User access functions based on page table walks for enhanced - * system layout without hardware support. - * - * Copyright IBM Corp. 2006, 2012 - * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com) - */ - -#include <linux/errno.h> -#include <linux/hardirq.h> -#include <linux/mm.h> -#include <linux/hugetlb.h> -#include <asm/uaccess.h> -#include <asm/futex.h> -#include "uaccess.h" - -#ifndef CONFIG_64BIT -#define AHI "ahi" -#define SLR "slr" -#else -#define AHI "aghi" -#define SLR "slgr" -#endif - -static unsigned long strnlen_kernel(const char __user *src, unsigned long count) -{ - register unsigned long reg0 asm("0") = 0UL; - unsigned long tmp1, tmp2; - - asm volatile( - " la %2,0(%1)\n" - " la %3,0(%0,%1)\n" - " "SLR" %0,%0\n" - "0: srst %3,%2\n" - " jo 0b\n" - " la %0,1(%3)\n" /* strnlen_kernel results includes \0 */ - " "SLR" %0,%1\n" - "1:\n" - EX_TABLE(0b,1b) - : "+a" (count), "+a" (src), "=a" (tmp1), "=a" (tmp2) - : "d" (reg0) : "cc", "memory"); - return count; -} - -static unsigned long copy_in_kernel(void __user *to, const void __user *from, - unsigned long count) -{ - unsigned long tmp1; - - asm volatile( - " "AHI" %0,-1\n" - " jo 5f\n" - " bras %3,3f\n" - "0:"AHI" %0,257\n" - "1: mvc 0(1,%1),0(%2)\n" - " la %1,1(%1)\n" - " la %2,1(%2)\n" - " "AHI" %0,-1\n" - " jnz 1b\n" - " j 5f\n" - "2: mvc 0(256,%1),0(%2)\n" - " la %1,256(%1)\n" - " la %2,256(%2)\n" - "3:"AHI" %0,-256\n" - " jnm 2b\n" - "4: ex %0,1b-0b(%3)\n" - "5:"SLR" %0,%0\n" - "6:\n" - EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) - : "+a" (count), "+a" (to), "+a" (from), "=a" (tmp1) - : : "cc", "memory"); - return count; -} - -/* - * Returns kernel address for user virtual address. If the returned address is - * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occurred and the - * address contains the (negative) exception code. - */ -#ifdef CONFIG_64BIT - -static unsigned long follow_table(struct mm_struct *mm, - unsigned long address, int write) -{ - unsigned long *table = (unsigned long *)__pa(mm->pgd); - - if (unlikely(address > mm->context.asce_limit - 1)) - return -0x38UL; - switch (mm->context.asce_bits & _ASCE_TYPE_MASK) { - case _ASCE_TYPE_REGION1: - table = table + ((address >> 53) & 0x7ff); - if (unlikely(*table & _REGION_ENTRY_INVALID)) - return -0x39UL; - table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); - /* fallthrough */ - case _ASCE_TYPE_REGION2: - table = table + ((address >> 42) & 0x7ff); - if (unlikely(*table & _REGION_ENTRY_INVALID)) - return -0x3aUL; - table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); - /* fallthrough */ - case _ASCE_TYPE_REGION3: - table = table + ((address >> 31) & 0x7ff); - if (unlikely(*table & _REGION_ENTRY_INVALID)) - return -0x3bUL; - table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); - /* fallthrough */ - case _ASCE_TYPE_SEGMENT: - table = table + ((address >> 20) & 0x7ff); - if (unlikely(*table & _SEGMENT_ENTRY_INVALID)) - return -0x10UL; - if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) { - if (write && (*table & _SEGMENT_ENTRY_PROTECT)) - return -0x04UL; - return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) + - (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE); - } - table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); - } - table = table + ((address >> 12) & 0xff); - if (unlikely(*table & _PAGE_INVALID)) - return -0x11UL; - if (write && (*table & _PAGE_PROTECT)) - return -0x04UL; - return (*table & PAGE_MASK) + (address & ~PAGE_MASK); -} - -#else /* CONFIG_64BIT */ - -static unsigned long follow_table(struct mm_struct *mm, - unsigned long address, int write) -{ - unsigned long *table = (unsigned long *)__pa(mm->pgd); - - table = table + ((address >> 20) & 0x7ff); - if (unlikely(*table & _SEGMENT_ENTRY_INVALID)) - return -0x10UL; - table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); - table = table + ((address >> 12) & 0xff); - if (unlikely(*table & _PAGE_INVALID)) - return -0x11UL; - if (write && (*table & _PAGE_PROTECT)) - return -0x04UL; - return (*table & PAGE_MASK) + (address & ~PAGE_MASK); -} - -#endif /* CONFIG_64BIT */ - -static inline unsigned long __user_copy_pt(unsigned long uaddr, void *kptr, - unsigned long n, int write_user) -{ - struct mm_struct *mm = current->mm; - unsigned long offset, done, size, kaddr; - void *from, *to; - - if (!mm) - return n; - done = 0; -retry: - spin_lock(&mm->page_table_lock); - do { - kaddr = follow_table(mm, uaddr, write_user); - if (IS_ERR_VALUE(kaddr)) - goto fault; - - offset = uaddr & ~PAGE_MASK; - size = min(n - done, PAGE_SIZE - offset); - if (write_user) { - to = (void *) kaddr; - from = kptr + done; - } else { - from = (void *) kaddr; - to = kptr + done; - } - memcpy(to, from, size); - done += size; - uaddr += size; - } while (done < n); - spin_unlock(&mm->page_table_lock); - return n - done; -fault: - spin_unlock(&mm->page_table_lock); - if (__handle_fault(uaddr, -kaddr, write_user)) - return n - done; - goto retry; -} - -/* - * Do DAT for user address by page table walk, return kernel address. - * This function needs to be called with current->mm->page_table_lock held. - */ -static inline unsigned long __dat_user_addr(unsigned long uaddr, int write) -{ - struct mm_struct *mm = current->mm; - unsigned long kaddr; - int rc; - -retry: - kaddr = follow_table(mm, uaddr, write); - if (IS_ERR_VALUE(kaddr)) - goto fault; - - return kaddr; -fault: - spin_unlock(&mm->page_table_lock); - rc = __handle_fault(uaddr, -kaddr, write); - spin_lock(&mm->page_table_lock); - if (!rc) - goto retry; - return 0; -} - -unsigned long copy_from_user_pt(void *to, const void __user *from, unsigned long n) -{ - unsigned long rc; - - if (segment_eq(get_fs(), KERNEL_DS)) - return copy_in_kernel((void __user *) to, from, n); - rc = __user_copy_pt((unsigned long) from, to, n, 0); - if (unlikely(rc)) - memset(to + n - rc, 0, rc); - return rc; -} - -unsigned long copy_to_user_pt(void __user *to, const void *from, unsigned long n) -{ - if (segment_eq(get_fs(), KERNEL_DS)) - return copy_in_kernel(to, (void __user *) from, n); - return __user_copy_pt((unsigned long) to, (void *) from, n, 1); -} - -unsigned long clear_user_pt(void __user *to, unsigned long n) -{ - void *zpage = (void *) empty_zero_page; - unsigned long done, size, ret; - - done = 0; - do { - if (n - done > PAGE_SIZE) - size = PAGE_SIZE; - else - size = n - done; - if (segment_eq(get_fs(), KERNEL_DS)) - ret = copy_in_kernel(to, (void __user *) zpage, n); - else - ret = __user_copy_pt((unsigned long) to, zpage, size, 1); - done += size; - to += size; - if (ret) - return ret + n - done; - } while (done < n); - return 0; -} - -unsigned long strnlen_user_pt(const char __user *src, unsigned long count) -{ - unsigned long uaddr = (unsigned long) src; - struct mm_struct *mm = current->mm; - unsigned long offset, done, len, kaddr; - unsigned long len_str; - - if (unlikely(!count)) - return 0; - if (segment_eq(get_fs(), KERNEL_DS)) - return strnlen_kernel(src, count); - if (!mm) - return 0; - done = 0; -retry: - spin_lock(&mm->page_table_lock); - do { - kaddr = follow_table(mm, uaddr, 0); - if (IS_ERR_VALUE(kaddr)) - goto fault; - - offset = uaddr & ~PAGE_MASK; - len = min(count - done, PAGE_SIZE - offset); - len_str = strnlen((char *) kaddr, len); - done += len_str; - uaddr += len_str; - } while ((len_str == len) && (done < count)); - spin_unlock(&mm->page_table_lock); - return done + 1; -fault: - spin_unlock(&mm->page_table_lock); - if (__handle_fault(uaddr, -kaddr, 0)) - return 0; - goto retry; -} - -long strncpy_from_user_pt(char *dst, const char __user *src, long count) -{ - unsigned long done, len, offset, len_str; - - if (unlikely(count <= 0)) - return 0; - done = 0; - do { - offset = (unsigned long)src & ~PAGE_MASK; - len = min(count - done, PAGE_SIZE - offset); - if (segment_eq(get_fs(), KERNEL_DS)) { - if (copy_in_kernel((void __user *) dst, src, len)) - return -EFAULT; - } else { - if (__user_copy_pt((unsigned long) src, dst, len, 0)) - return -EFAULT; - } - len_str = strnlen(dst, len); - done += len_str; - src += len_str; - dst += len_str; - } while ((len_str == len) && (done < count)); - return done; -} - -unsigned long copy_in_user_pt(void __user *to, const void __user *from, - unsigned long n) -{ - struct mm_struct *mm = current->mm; - unsigned long offset_max, uaddr, done, size, error_code; - unsigned long uaddr_from = (unsigned long) from; - unsigned long uaddr_to = (unsigned long) to; - unsigned long kaddr_to, kaddr_from; - int write_user; - - if (segment_eq(get_fs(), KERNEL_DS)) - return copy_in_kernel(to, from, n); - if (!mm) - return n; - done = 0; -retry: - spin_lock(&mm->page_table_lock); - do { - write_user = 0; - uaddr = uaddr_from; - kaddr_from = follow_table(mm, uaddr_from, 0); - error_code = kaddr_from; - if (IS_ERR_VALUE(error_code)) - goto fault; - - write_user = 1; - uaddr = uaddr_to; - kaddr_to = follow_table(mm, uaddr_to, 1); - error_code = (unsigned long) kaddr_to; - if (IS_ERR_VALUE(error_code)) - goto fault; - - offset_max = max(uaddr_from & ~PAGE_MASK, - uaddr_to & ~PAGE_MASK); - size = min(n - done, PAGE_SIZE - offset_max); - - memcpy((void *) kaddr_to, (void *) kaddr_from, size); - done += size; - uaddr_from += size; - uaddr_to += size; - } while (done < n); - spin_unlock(&mm->page_table_lock); - return n - done; -fault: - spin_unlock(&mm->page_table_lock); - if (__handle_fault(uaddr, -error_code, write_user)) - return n - done; - goto retry; -} - -#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \ - asm volatile("0: l %1,0(%6)\n" \ - "1: " insn \ - "2: cs %1,%2,0(%6)\n" \ - "3: jl 1b\n" \ - " lhi %0,0\n" \ - "4:\n" \ - EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \ - : "=d" (ret), "=&d" (oldval), "=&d" (newval), \ - "=m" (*uaddr) \ - : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ - "m" (*uaddr) : "cc" ); - -static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old) -{ - int oldval = 0, newval, ret; - - switch (op) { - case FUTEX_OP_SET: - __futex_atomic_op("lr %2,%5\n", - ret, oldval, newval, uaddr, oparg); - break; - case FUTEX_OP_ADD: - __futex_atomic_op("lr %2,%1\nar %2,%5\n", - ret, oldval, newval, uaddr, oparg); - break; - case FUTEX_OP_OR: - __futex_atomic_op("lr %2,%1\nor %2,%5\n", - ret, oldval, newval, uaddr, oparg); - break; - case FUTEX_OP_ANDN: - __futex_atomic_op("lr %2,%1\nnr %2,%5\n", - ret, oldval, newval, uaddr, oparg); - break; - case FUTEX_OP_XOR: - __futex_atomic_op("lr %2,%1\nxr %2,%5\n", - ret, oldval, newval, uaddr, oparg); - break; - default: - ret = -ENOSYS; - } - if (ret == 0) - *old = oldval; - return ret; -} - -int __futex_atomic_op_inuser(int op, u32 __user *uaddr, int oparg, int *old) -{ - int ret; - - if (segment_eq(get_fs(), KERNEL_DS)) - return __futex_atomic_op_pt(op, uaddr, oparg, old); - if (unlikely(!current->mm)) - return -EFAULT; - spin_lock(¤t->mm->page_table_lock); - uaddr = (u32 __force __user *) - __dat_user_addr((__force unsigned long) uaddr, 1); - if (!uaddr) { - spin_unlock(¤t->mm->page_table_lock); - return -EFAULT; - } - get_page(virt_to_page(uaddr)); - spin_unlock(¤t->mm->page_table_lock); - ret = __futex_atomic_op_pt(op, uaddr, oparg, old); - put_page(virt_to_page(uaddr)); - return ret; -} - -static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr, - u32 oldval, u32 newval) -{ - int ret; - - asm volatile("0: cs %1,%4,0(%5)\n" - "1: la %0,0\n" - "2:\n" - EX_TABLE(0b,2b) EX_TABLE(1b,2b) - : "=d" (ret), "+d" (oldval), "=m" (*uaddr) - : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) - : "cc", "memory" ); - *uval = oldval; - return ret; -} - -int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, - u32 oldval, u32 newval) -{ - int ret; - - if (segment_eq(get_fs(), KERNEL_DS)) - return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval); - if (unlikely(!current->mm)) - return -EFAULT; - spin_lock(¤t->mm->page_table_lock); - uaddr = (u32 __force __user *) - __dat_user_addr((__force unsigned long) uaddr, 1); - if (!uaddr) { - spin_unlock(¤t->mm->page_table_lock); - return -EFAULT; - } - get_page(virt_to_page(uaddr)); - spin_unlock(¤t->mm->page_table_lock); - ret = __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval); - put_page(virt_to_page(uaddr)); - return ret; -} |