diff options
Diffstat (limited to 'include/asm-arm/proc-armv/system.h')
-rw-r--r-- | include/asm-arm/proc-armv/system.h | 71 |
1 files changed, 58 insertions, 13 deletions
diff --git a/include/asm-arm/proc-armv/system.h b/include/asm-arm/proc-armv/system.h index 247ed3e2b..becb31c2d 100644 --- a/include/asm-arm/proc-armv/system.h +++ b/include/asm-arm/proc-armv/system.h @@ -10,19 +10,7 @@ #ifndef __ASM_PROC_SYSTEM_H #define __ASM_PROC_SYSTEM_H -extern __inline__ unsigned long __xchg(unsigned long x, volatile void *ptr, int size) -{ - extern void __bad_xchg(volatile void *, int); - - switch (size) { - case 1: __asm__ __volatile__ ("swpb %0, %1, [%2]" : "=r" (x) : "r" (x), "r" (ptr) : "memory"); - break; - case 4: __asm__ __volatile__ ("swp %0, %1, [%2]" : "=r" (x) : "r" (x), "r" (ptr) : "memory"); - break; - default: __bad_xchg(ptr, size); - } - return x; -} +#include <linux/config.h> #define set_cr(x) \ __asm__ __volatile__( \ @@ -131,4 +119,61 @@ extern unsigned long cr_alignment; /* defined in entry-armv.S */ : "r" (x) \ : "memory") +#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) +/* + * On the StrongARM, "swp" is terminally broken since it bypasses the + * cache totally. This means that the cache becomes inconsistent, and, + * since we use normal loads/stores as well, this is really bad. + * Typically, this causes oopsen in filp_close, but could have other, + * more disasterous effects. There are two work-arounds: + * 1. Disable interrupts and emulate the atomic swap + * 2. Clean the cache, perform atomic swap, flush the cache + * + * We choose (1) since its the "easiest" to achieve here and is not + * dependent on the processor type. + */ +#define swp_is_buggy +#endif + +extern __inline__ unsigned long __xchg(unsigned long x, volatile void *ptr, int size) +{ + extern void __bad_xchg(volatile void *, int); + unsigned long ret; +#ifdef swp_is_buggy + unsigned long flags; +#endif + + switch (size) { +#ifdef swp_is_buggy + case 1: + __save_flags_cli(flags); + ret = *(volatile unsigned char *)ptr; + *(volatile unsigned char *)ptr = x; + __restore_flags(flags); + break; + + case 4: + __save_flags_cli(flags); + ret = *(volatile unsigned long *)ptr; + *(volatile unsigned long *)ptr = x; + __restore_flags(flags); + break; +#else + case 1: __asm__ __volatile__ ("swpb %0, %1, [%2]" + : "=r" (ret) + : "r" (x), "r" (ptr) + : "memory"); + break; + case 4: __asm__ __volatile__ ("swp %0, %1, [%2]" + : "=r" (ret) + : "r" (x), "r" (ptr) + : "memory"); + break; +#endif + default: __bad_xchg(ptr, size); + } + + return ret; +} + #endif |