summaryrefslogtreecommitdiffstats
path: root/include/asm-i386/system.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386/system.h')
-rw-r--r--include/asm-i386/system.h39
1 files changed, 25 insertions, 14 deletions
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 65012f648..6dd4b33f0 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -35,30 +35,30 @@ extern void FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *n
"a" (prev), "d" (next)); \
} while (0)
-#define _set_base(addr,base) \
-__asm__("movw %%dx,%0\n\t" \
+#define _set_base(addr,base) do { unsigned long __pr; \
+__asm__ __volatile__ ("movw %%dx,%1\n\t" \
"rorl $16,%%edx\n\t" \
- "movb %%dl,%1\n\t" \
- "movb %%dh,%2" \
- : /* no output */ \
+ "movb %%dl,%2\n\t" \
+ "movb %%dh,%3" \
+ :"=&d" (__pr) \
:"m" (*((addr)+2)), \
"m" (*((addr)+4)), \
"m" (*((addr)+7)), \
- "d" (base) \
- :"dx")
+ "0" (base) \
+ ); } while(0)
-#define _set_limit(addr,limit) \
-__asm__("movw %%dx,%0\n\t" \
+#define _set_limit(addr,limit) do { unsigned long __lr; \
+__asm__ __volatile__ ("movw %%dx,%1\n\t" \
"rorl $16,%%edx\n\t" \
- "movb %1,%%dh\n\t" \
+ "movb %2,%%dh\n\t" \
"andb $0xf0,%%dh\n\t" \
"orb %%dh,%%dl\n\t" \
- "movb %%dl,%1" \
- : /* no output */ \
+ "movb %%dl,%2" \
+ :"=&d" (__lr) \
:"m" (*(addr)), \
"m" (*((addr)+6)), \
- "d" (limit) \
- :"dx")
+ "0" (limit) \
+ ); } while(0)
#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 )
@@ -165,8 +165,19 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
* to devices.
+ *
+ * For now, "wmb()" doesn't actually do anything, as all
+ * Intel CPU's follow what Intel calls a *Processor Order*,
+ * in which all writes are seen in the program order even
+ * outside the CPU.
+ *
+ * I expect future Intel CPU's to have a weaker ordering,
+ * but I'd also expect them to finally get their act together
+ * and add some real memory barriers if so.
*/
#define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
+#define rmb() mb()
+#define wmb() __asm__ __volatile__ ("": : :"memory")
/* interrupt control.. */
#define __sti() __asm__ __volatile__ ("sti": : :"memory")