summaryrefslogtreecommitdiffstats
path: root/arch/alpha/lib
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>1998-05-07 02:55:41 +0000
committerRalf Baechle <ralf@linux-mips.org>1998-05-07 02:55:41 +0000
commitdcec8a13bf565e47942a1751a9cec21bec5648fe (patch)
tree548b69625b18cc2e88c3e68d0923be546c9ebb03 /arch/alpha/lib
parent2e0f55e79c49509b7ff70ff1a10e1e9e90a3dfd4 (diff)
o Merge with Linux 2.1.99.
o Fix ancient bug in the ELF loader making ldd crash. o Fix ancient bug in the keyboard code for SGI, SNI and Jazz.
Diffstat (limited to 'arch/alpha/lib')
-rw-r--r--arch/alpha/lib/checksum.c21
-rw-r--r--arch/alpha/lib/copy_user.S53
-rw-r--r--arch/alpha/lib/csum_partial_copy.c6
-rw-r--r--arch/alpha/lib/memcpy.c23
4 files changed, 68 insertions, 35 deletions
diff --git a/arch/alpha/lib/checksum.c b/arch/alpha/lib/checksum.c
index f95b535ca..5165279f0 100644
--- a/arch/alpha/lib/checksum.c
+++ b/arch/alpha/lib/checksum.c
@@ -37,6 +37,27 @@ unsigned short int csum_tcpudp_magic(unsigned long saddr,
((unsigned long) proto << 8));
}
+unsigned int csum_tcpudp_nofold(unsigned long saddr,
+ unsigned long daddr,
+ unsigned short len,
+ unsigned short proto,
+ unsigned int sum)
+{
+ unsigned long result;
+
+ result = (saddr + daddr + sum +
+ ((unsigned long) ntohs(len) << 16) +
+ ((unsigned long) proto << 8));
+
+ /* Fold down to 32-bits so we don't loose in the typedef-less
+ network stack. */
+ /* 64 to 33 */
+ result = (result & 0xffffffff) + (result >> 32);
+ /* 33 to 32 */
+ result = (result & 0xffffffff) + (result >> 32);
+ return result;
+}
+
/*
* Do a 64-bit checksum on an arbitrary memory area..
*
diff --git a/arch/alpha/lib/copy_user.S b/arch/alpha/lib/copy_user.S
index da57fd6d1..aa309b9f5 100644
--- a/arch/alpha/lib/copy_user.S
+++ b/arch/alpha/lib/copy_user.S
@@ -27,11 +27,18 @@
*/
/* Allow an exception for an insn; exit if we get one. */
-#define EX(x,y...) \
+#define EXI(x,y...) \
99: x,##y; \
.section __ex_table,"a"; \
.gprel32 99b; \
- lda $31, $exit-99b($31); \
+ lda $31, $exitin-99b($31); \
+ .previous
+
+#define EXO(x,y...) \
+ 99: x,##y; \
+ .section __ex_table,"a"; \
+ .gprel32 99b; \
+ lda $31, $exitout-99b($31); \
.previous
.set noat
@@ -45,14 +52,14 @@ __copy_user:
subq $3,8,$3
.align 5
$37:
- EX( ldq_u $1,0($7) )
- EX( ldq_u $2,0($6) )
+ EXI( ldq_u $1,0($7) )
+ EXO( ldq_u $2,0($6) )
extbl $1,$7,$1
mskbl $2,$6,$2
insbl $1,$6,$1
addq $3,1,$3
bis $1,$2,$1
- EX( stq_u $1,0($6) )
+ EXO( stq_u $1,0($6) )
subq $0,1,$0
addq $6,1,$6
addq $7,1,$7
@@ -63,10 +70,10 @@ $36:
bic $0,7,$4
beq $1,$43
beq $4,$48
- EX( ldq_u $3,0($7) )
+ EXI( ldq_u $3,0($7) )
.align 5
$50:
- EX( ldq_u $2,8($7) )
+ EXI( ldq_u $2,8($7) )
subq $4,8,$4
extql $3,$7,$3
extqh $2,$7,$1
@@ -81,13 +88,13 @@ $48:
beq $0,$41
.align 5
$57:
- EX( ldq_u $1,0($7) )
- EX( ldq_u $2,0($6) )
+ EXI( ldq_u $1,0($7) )
+ EXO( ldq_u $2,0($6) )
extbl $1,$7,$1
mskbl $2,$6,$2
insbl $1,$6,$1
bis $1,$2,$1
- EX( stq_u $1,0($6) )
+ EXO( stq_u $1,0($6) )
subq $0,1,$0
addq $6,1,$6
addq $7,1,$7
@@ -98,7 +105,7 @@ $43:
beq $4,$65
.align 5
$66:
- EX( ldq $1,0($7) )
+ EXI( ldq $1,0($7) )
subq $4,8,$4
stq $1,0($6)
addq $7,8,$7
@@ -107,15 +114,31 @@ $66:
bne $4,$66
$65:
beq $0,$41
- EX( ldq $2,0($7) )
- EX( ldq $1,0($6) )
+ EXI( ldq $2,0($7) )
+ EXO( ldq $1,0($6) )
mskql $2,$0,$2
mskqh $1,$0,$1
bis $2,$1,$2
- EX( stq $2,0($6) )
+ EXO( stq $2,0($6) )
bis $31,$31,$0
$41:
$35:
-$exit:
+$exitout:
ret $31,($28),1
+
+$exitin:
+ /* A stupid byte-by-byte zeroing of the rest of the output
+ buffer. This cures security holes by never leaving
+ random kernel data around to be copied elsewhere. */
+
+ mov $0,$1
+$101:
+ EXO ( ldq_u $2,0($6) )
+ subq $1,1,$1
+ mskbl $2,$6,$2
+ EXO ( stq_u $2,0($6) )
+ addq $6,1,$6
+ bgt $1,$101
+ ret $31,($28),1
+
.end __copy_user
diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c
index 1328eeaba..713081330 100644
--- a/arch/alpha/lib/csum_partial_copy.c
+++ b/arch/alpha/lib/csum_partial_copy.c
@@ -365,6 +365,12 @@ csum_partial_copy_from_user(const char *src, char *dst, int len,
}
unsigned int
+csum_partial_copy_nocheck(const char *src, char *dst, int len, unsigned int sum)
+{
+ return do_csum_partial_copy_from_user(src, dst, len, sum, NULL);
+}
+
+unsigned int
csum_partial_copy (const char *src, char *dst, int len, unsigned int sum)
{
unsigned int ret;
diff --git a/arch/alpha/lib/memcpy.c b/arch/alpha/lib/memcpy.c
index bcfac1020..dc708c73e 100644
--- a/arch/alpha/lib/memcpy.c
+++ b/arch/alpha/lib/memcpy.c
@@ -104,7 +104,7 @@ static inline void __memcpy_aligned(unsigned long d, unsigned long s, long n)
DO_REST_ALIGNED(d,s,n);
}
-void * __memcpy(void * dest, const void *src, size_t n)
+void * memcpy(void * dest, const void *src, size_t n)
{
if (!(((unsigned long) dest ^ (unsigned long) src) & 7)) {
__memcpy_aligned((unsigned long) dest, (unsigned long) src, n);
@@ -114,22 +114,5 @@ void * __memcpy(void * dest, const void *src, size_t n)
return dest;
}
-/*
- * Broken compiler uses "bcopy" to do internal
- * assignments. Silly OSF/1 BSDism.
- */
-char * bcopy(const char * src, char * dest, size_t n)
-{
- __memcpy(dest, src, n);
- return dest;
-}
-
-/*
- * gcc-2.7.1 and newer generate calls to memset and memcpy. So we
- * need to define that here:
- */
-#ifdef __ELF__
- asm (".weak memcpy; memcpy = __memcpy");
-#else
- asm (".weakext memcpy, __memcpy");
-#endif
+/* For backward modules compatibility, define __memcpy. */
+asm("__memcpy = memcpy; .globl __memcpy");