summaryrefslogtreecommitdiffstats
path: root/include/asm-i386
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386')
-rw-r--r--include/asm-i386/bitops.h14
-rw-r--r--include/asm-i386/bugs.h58
-rw-r--r--include/asm-i386/cache.h2
-rw-r--r--include/asm-i386/irq.h13
-rw-r--r--include/asm-i386/keyboard.h4
-rw-r--r--include/asm-i386/msr.h30
-rw-r--r--include/asm-i386/mtrr.h11
-rw-r--r--include/asm-i386/pgtable.h3
-rw-r--r--include/asm-i386/processor.h7
-rw-r--r--include/asm-i386/semaphore-helper.h94
-rw-r--r--include/asm-i386/semaphore.h144
-rw-r--r--include/asm-i386/siginfo.h2
-rw-r--r--include/asm-i386/string.h2
-rw-r--r--include/asm-i386/system.h20
-rw-r--r--include/asm-i386/termios.h1
-rw-r--r--include/asm-i386/timex.h3
16 files changed, 258 insertions, 150 deletions
diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h
index 09f25dc78..08df0f278 100644
--- a/include/asm-i386/bitops.h
+++ b/include/asm-i386/bitops.h
@@ -46,7 +46,7 @@ extern __inline__ void set_bit(int nr, volatile void * addr)
__asm__ __volatile__( LOCK_PREFIX
"btsl %1,%0"
:"=m" (ADDR)
- :"ir" (nr));
+ :"Ir" (nr));
}
extern __inline__ void clear_bit(int nr, volatile void * addr)
@@ -54,7 +54,7 @@ extern __inline__ void clear_bit(int nr, volatile void * addr)
__asm__ __volatile__( LOCK_PREFIX
"btrl %1,%0"
:"=m" (ADDR)
- :"ir" (nr));
+ :"Ir" (nr));
}
extern __inline__ void change_bit(int nr, volatile void * addr)
@@ -62,7 +62,7 @@ extern __inline__ void change_bit(int nr, volatile void * addr)
__asm__ __volatile__( LOCK_PREFIX
"btcl %1,%0"
:"=m" (ADDR)
- :"ir" (nr));
+ :"Ir" (nr));
}
extern __inline__ int test_and_set_bit(int nr, volatile void * addr)
@@ -72,7 +72,7 @@ extern __inline__ int test_and_set_bit(int nr, volatile void * addr)
__asm__ __volatile__( LOCK_PREFIX
"btsl %2,%1\n\tsbbl %0,%0"
:"=r" (oldbit),"=m" (ADDR)
- :"ir" (nr));
+ :"Ir" (nr));
return oldbit;
}
@@ -83,7 +83,7 @@ extern __inline__ int test_and_clear_bit(int nr, volatile void * addr)
__asm__ __volatile__( LOCK_PREFIX
"btrl %2,%1\n\tsbbl %0,%0"
:"=r" (oldbit),"=m" (ADDR)
- :"ir" (nr));
+ :"Ir" (nr));
return oldbit;
}
@@ -94,7 +94,7 @@ extern __inline__ int test_and_change_bit(int nr, volatile void * addr)
__asm__ __volatile__( LOCK_PREFIX
"btcl %2,%1\n\tsbbl %0,%0"
:"=r" (oldbit),"=m" (ADDR)
- :"ir" (nr));
+ :"Ir" (nr));
return oldbit;
}
@@ -113,7 +113,7 @@ extern __inline__ int __test_bit(int nr, volatile void * addr)
__asm__ __volatile__(
"btl %2,%1\n\tsbbl %0,%0"
:"=r" (oldbit)
- :"m" (ADDR),"ir" (nr));
+ :"m" (ADDR),"Ir" (nr));
return oldbit;
}
diff --git a/include/asm-i386/bugs.h b/include/asm-i386/bugs.h
index c9ca53c66..ebd9a4f1a 100644
--- a/include/asm-i386/bugs.h
+++ b/include/asm-i386/bugs.h
@@ -19,6 +19,7 @@
#include <linux/config.h>
#include <asm/processor.h>
+#include <asm/msr.h>
#define CONFIG_BUGi386
@@ -27,6 +28,11 @@ __initfunc(static void no_halt(char *s, int *ints))
boot_cpu_data.hlt_works_ok = 0;
}
+__initfunc(static void mca_pentium(char *s, int *ints))
+{
+ mca_pentium_flag = 1;
+}
+
__initfunc(static void no_387(char *s, int *ints))
{
boot_cpu_data.hard_math = 0;
@@ -61,6 +67,31 @@ __initfunc(static void check_fpu(void))
#endif
return;
}
+ if (mca_pentium_flag) {
+ /* The IBM Model 95 machines with pentiums lock up on
+ * fpu test, so we avoid it. All pentiums have inbuilt
+ * FPU and thus should use exception 16. We still do
+ * the FDIV test, although I doubt there where ever any
+ * MCA boxes built with non-FDIV-bug cpus.
+ */
+ __asm__("fninit\n\t"
+ "fldl %1\n\t"
+ "fdivl %2\n\t"
+ "fmull %2\n\t"
+ "fldl %1\n\t"
+ "fsubp %%st,%%st(1)\n\t"
+ "fistpl %0\n\t"
+ "fwait\n\t"
+ "fninit"
+ : "=m" (*&boot_cpu_data.fdiv_bug)
+ : "m" (*&x), "m" (*&y));
+ printk("mca-pentium specified, avoiding FPU coupling test... ");
+ if (!boot_cpu_data.fdiv_bug)
+ printk("??? No FDIV bug? Lucky you...\n");
+ else
+ printk("detected FDIV bug though.\n");
+ return;
+ }
/*
* check if exception 16 works correctly.. This is truly evil
* code: it disables the high 8 interrupts to make sure that
@@ -173,10 +204,10 @@ __initfunc(static void check_amd_k6(void))
n = K6_BUG_LOOP;
f_vide = vide;
- __asm__ ("rdtsc" : "=a" (d));
+ rdtscl(d);
while (n--)
f_vide();
- __asm__ ("rdtsc" : "=a" (d2));
+ rdtscl(d2);
d = d2-d;
/* Knock these two lines out if it debugs out ok */
@@ -246,6 +277,7 @@ __initfunc(static void check_cx686_cpuid(void))
((Cx86_dir0_msb == 5) || (Cx86_dir0_msb == 3))) {
int eax, dummy;
unsigned char ccr3, ccr4;
+ __u32 old_cap;
cli();
ccr3 = getCx86(CX86_CCR3);
@@ -257,8 +289,11 @@ __initfunc(static void check_cx686_cpuid(void))
/* we have up to level 1 available on the Cx6x86(L|MX) */
boot_cpu_data.cpuid_level = 1;
+ /* Need to preserve some externally computed capabilities */
+ old_cap = boot_cpu_data.x86_capability & X86_FEATURE_MTRR;
cpuid(1, &eax, &dummy, &dummy,
&boot_cpu_data.x86_capability);
+ boot_cpu_data.x86_capability |= old_cap;
boot_cpu_data.x86 = (eax >> 8) & 15;
/*
@@ -314,6 +349,24 @@ __initfunc(static void check_cyrix_cpu(void))
}
/*
+ * In setup.c's cyrix_model() we have set the boot_cpu_data.coma_bug
+ * on certain processors that we know contain this bug and now we
+ * enable the workaround for it.
+ */
+
+__initfunc(static void check_cyrix_coma(void))
+{
+ if (boot_cpu_data.coma_bug) {
+ unsigned char ccr1;
+ cli();
+ ccr1 = getCx86 (CX86_CCR1);
+ setCx86 (CX86_CCR1, ccr1 | 0x10);
+ sti();
+ printk("Cyrix processor with \"coma bug\" found, workaround enabled\n");
+ }
+}
+
+/*
* Check wether we are able to run this kernel safely on SMP.
*
* - In order to run on a i386, we need to be compiled for i386
@@ -371,5 +424,6 @@ __initfunc(static void check_bugs(void))
check_popad();
check_amd_k6();
check_pentium_f00f();
+ check_cyrix_coma();
system_utsname.machine[1] = '0' + boot_cpu_data.x86;
}
diff --git a/include/asm-i386/cache.h b/include/asm-i386/cache.h
index 50c1dbe8f..cea6c8540 100644
--- a/include/asm-i386/cache.h
+++ b/include/asm-i386/cache.h
@@ -13,4 +13,6 @@
#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
+#define SMP_CACHE_BYTES L1_CACHE_BYTES
+
#endif
diff --git a/include/asm-i386/irq.h b/include/asm-i386/irq.h
index 3d9a7c46c..436ad1d21 100644
--- a/include/asm-i386/irq.h
+++ b/include/asm-i386/irq.h
@@ -13,11 +13,15 @@
#define TIMER_IRQ 0
/*
- * 16 XT IRQ's, 8 potential APIC interrupt sources.
- * Right now the APIC is only used for SMP, but this
- * may change.
+ * 16 8259A IRQ's, 240 potential APIC interrupt sources.
+ * Right now the APIC is mostly only used for SMP.
+ * 256 vectors is an architectural limit. (we can have
+ * more than 256 devices theoretically, but they will
+ * have to use shared interrupts)
+ * Since vectors 0x00-0x1f are used/reserved for the CPU,
+ * the usable vector space is 0x20-0xff (224 vectors)
*/
-#define NR_IRQS 64
+#define NR_IRQS 224
static __inline__ int irq_cannonicalize(int irq)
{
@@ -25,6 +29,7 @@ static __inline__ int irq_cannonicalize(int irq)
}
extern void disable_irq(unsigned int);
+extern void disable_irq_nosync(unsigned int);
extern void enable_irq(unsigned int);
#endif /* _ASM_IRQ_H */
diff --git a/include/asm-i386/keyboard.h b/include/asm-i386/keyboard.h
index fd3009724..2a7effa3c 100644
--- a/include/asm-i386/keyboard.h
+++ b/include/asm-i386/keyboard.h
@@ -3,7 +3,7 @@
*
* Created 3 Nov 1996 by Geert Uytterhoeven
*
- * $Id: keyboard.h,v 1.7 1999/02/01 15:51:16 ralf Exp $
+ * $Id: keyboard.h,v 1.8 1999/06/10 08:02:38 ralf Exp $
*/
/*
@@ -23,7 +23,6 @@
extern int pckbd_setkeycode(unsigned int scancode, unsigned int keycode);
extern int pckbd_getkeycode(unsigned int scancode);
-extern int pckbd_pretranslate(unsigned char scancode, char raw_mode);
extern int pckbd_translate(unsigned char scancode, unsigned char *keycode,
char raw_mode);
extern char pckbd_unexpected_up(unsigned char keycode);
@@ -33,7 +32,6 @@ extern unsigned char pckbd_sysrq_xlate[128];
#define kbd_setkeycode pckbd_setkeycode
#define kbd_getkeycode pckbd_getkeycode
-#define kbd_pretranslate pckbd_pretranslate
#define kbd_translate pckbd_translate
#define kbd_unexpected_up pckbd_unexpected_up
#define kbd_leds pckbd_leds
diff --git a/include/asm-i386/msr.h b/include/asm-i386/msr.h
new file mode 100644
index 000000000..1ed8ea851
--- /dev/null
+++ b/include/asm-i386/msr.h
@@ -0,0 +1,30 @@
+/*
+ * Access to machine-specific registers (available on 586 and better only)
+ * Note: the rd* operations modify the parameters directly (without using
+ * pointer indirection), this allows gcc to optimize better
+ */
+
+#define rdmsr(msr,val1,val2) \
+ __asm__ __volatile__("rdmsr" \
+ : "=a" (val1), "=d" (val2) \
+ : "c" (msr))
+
+#define wrmsr(msr,val1,val2) \
+ __asm__ __volatile__("wrmsr" \
+ : /* no outputs */ \
+ : "c" (msr), "a" (val1), "d" (val2))
+
+#define rdtsc(low,high) \
+ __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
+
+#define rdtscl(low) \
+ __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx")
+
+#define rdtscll(val) \
+ __asm__ __volatile__ ("rdtsc" : "=A" (val))
+
+#define rdpmc(counter,low,high) \
+ __asm__ __volatile__("rdpmc" \
+ : "=a" (low), "=d" (high) \
+ : "c" (counter))
+
diff --git a/include/asm-i386/mtrr.h b/include/asm-i386/mtrr.h
index 5b1e8470c..e6d130c9a 100644
--- a/include/asm-i386/mtrr.h
+++ b/include/asm-i386/mtrr.h
@@ -1,6 +1,6 @@
/* Generic MTRR (Memory Type Range Register) ioctls.
- Copyright (C) 1997-1998 Richard Gooch
+ Copyright (C) 1997-1999 Richard Gooch
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public
@@ -44,10 +44,11 @@ struct mtrr_gentry
};
/* These are the various ioctls */
-#define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry)
-#define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry)
-#define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry)
+#define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry)
+#define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry)
+#define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry)
#define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry)
+#define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry)
/* These are the region types */
#define MTRR_TYPE_UNCACHABLE 0
@@ -75,7 +76,7 @@ static char *mtrr_strings[MTRR_NUM_TYPES] =
#ifdef __KERNEL__
/* The following functions are for use by other drivers */
-# if defined(CONFIG_MTRR) || defined(CONFIG_MTRR_MODULE)
+# ifdef CONFIG_MTRR
extern int mtrr_add (unsigned long base, unsigned long size,
unsigned int type, char increment);
extern int mtrr_del (int reg, unsigned long base, unsigned long size);
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index 257a0e99d..bd631d244 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -220,7 +220,7 @@ static inline void flush_tlb_range(struct mm_struct *mm,
#define _PAGE_PRESENT 0x001
#define _PAGE_RW 0x002
#define _PAGE_USER 0x004
-#define _PAGE_WT 0x008
+#define _PAGE_PWT 0x008
#define _PAGE_PCD 0x010
#define _PAGE_ACCESSED 0x020
#define _PAGE_DIRTY 0x040
@@ -594,5 +594,6 @@ extern inline void update_mmu_cache(struct vm_area_struct * vma,
/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
#define PageSkip(page) (0)
+#define kern_addr_valid(addr) (1)
#endif /* _I386_PAGE_H */
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 9dd4f9df2..25ef571e3 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -35,6 +35,7 @@ struct cpuinfo_x86 {
call */
int fdiv_bug;
int f00f_bug;
+ int coma_bug;
unsigned long loops_per_sec;
unsigned long *pgd_quick;
unsigned long *pte_quick;
@@ -119,12 +120,17 @@ extern inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx)
/*
* Cyrix CPU configuration register indexes
*/
+#define CX86_CCR0 0xc0
+#define CX86_CCR1 0xc1
#define CX86_CCR2 0xc2
#define CX86_CCR3 0xc3
#define CX86_CCR4 0xe8
#define CX86_CCR5 0xe9
+#define CX86_CCR6 0xea
#define CX86_DIR0 0xfe
#define CX86_DIR1 0xff
+#define CX86_ARR_BASE 0xc4
+#define CX86_RCR_BASE 0xdc
/*
* Cyrix CPU indexed register access macros
@@ -148,6 +154,7 @@ others may find it useful. */
extern unsigned int machine_id;
extern unsigned int machine_submodel_id;
extern unsigned int BIOS_revision;
+extern unsigned int mca_pentium_flag;
/*
* User space process size: 3GB (default).
diff --git a/include/asm-i386/semaphore-helper.h b/include/asm-i386/semaphore-helper.h
new file mode 100644
index 000000000..c8636da0c
--- /dev/null
+++ b/include/asm-i386/semaphore-helper.h
@@ -0,0 +1,94 @@
+#ifndef _I386_SEMAPHORE_HELPER_H
+#define _I386_SEMAPHORE_HELPER_H
+
+/*
+ * SMP- and interrupt-safe semaphores helper functions.
+ *
+ * (C) Copyright 1996 Linus Torvalds
+ * (C) Copyright 1999 Andrea Arcangeli
+ */
+
+/*
+ * These two _must_ execute atomically wrt each other.
+ *
+ * This is trivially done with load_locked/store_cond,
+ * but on the x86 we need an external synchronizer.
+ */
+static inline void wake_one_more(struct semaphore * sem)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&semaphore_wake_lock, flags);
+ if (atomic_read(&sem->count) <= 0)
+ sem->waking++;
+ spin_unlock_irqrestore(&semaphore_wake_lock, flags);
+}
+
+static inline int waking_non_zero(struct semaphore *sem)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&semaphore_wake_lock, flags);
+ if (sem->waking > 0) {
+ sem->waking--;
+ ret = 1;
+ }
+ spin_unlock_irqrestore(&semaphore_wake_lock, flags);
+ return ret;
+}
+
+/*
+ * waking_non_zero_interruptible:
+ * 1 got the lock
+ * 0 go to sleep
+ * -EINTR interrupted
+ *
+ * We must undo the sem->count down_interruptible() increment while we are
+ * protected by the spinlock in order to make atomic this atomic_inc() with the
+ * atomic_read() in wake_one_more(), otherwise we can race. -arca
+ */
+static inline int waking_non_zero_interruptible(struct semaphore *sem,
+ struct task_struct *tsk)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&semaphore_wake_lock, flags);
+ if (sem->waking > 0) {
+ sem->waking--;
+ ret = 1;
+ } else if (signal_pending(tsk)) {
+ atomic_inc(&sem->count);
+ ret = -EINTR;
+ }
+ spin_unlock_irqrestore(&semaphore_wake_lock, flags);
+ return ret;
+}
+
+/*
+ * waking_non_zero_trylock:
+ * 1 failed to lock
+ * 0 got the lock
+ *
+ * We must undo the sem->count down_trylock() increment while we are
+ * protected by the spinlock in order to make atomic this atomic_inc() with the
+ * atomic_read() in wake_one_more(), otherwise we can race. -arca
+ */
+static inline int waking_non_zero_trylock(struct semaphore *sem)
+{
+ unsigned long flags;
+ int ret = 1;
+
+ spin_lock_irqsave(&semaphore_wake_lock, flags);
+ if (sem->waking <= 0)
+ atomic_inc(&sem->count);
+ else {
+ sem->waking--;
+ ret = 0;
+ }
+ spin_unlock_irqrestore(&semaphore_wake_lock, flags);
+ return ret;
+}
+
+#endif
diff --git a/include/asm-i386/semaphore.h b/include/asm-i386/semaphore.h
index d78970da0..ccf69385c 100644
--- a/include/asm-i386/semaphore.h
+++ b/include/asm-i386/semaphore.h
@@ -12,6 +12,11 @@
* the original code and to make semaphore waits
* interruptible so that processes waiting on
* semaphores can be killed.
+ * Modified 1999-02-14 by Andrea Arcangeli, split the sched.c helper
+ * functions in asm/sempahore-helper.h while fixing a
+ * potential and subtle race discovered by Ulrich Schmid
+ * in down_interruptible(). Since I started to play here I
+ * also implemented the `trylock' semaphore operation.
*
* If you would like to see an analysis of this implementation, please
* ftp to gcom.com and download the file
@@ -23,56 +28,23 @@
#include <asm/atomic.h>
#include <asm/spinlock.h>
-/*
- * Semaphores are recursive: we allow the holder process
- * to recursively do down() operations on a semaphore that
- * the process already owns. In order to do that, we need
- * to keep a semaphore-local copy of the owner and the
- * "depth of ownership".
- *
- * NOTE! Nasty memory ordering rules:
- * - "owner" and "owner_count" may only be modified once you hold the
- * lock.
- * - "owner_count" must be written _after_ modifying owner, and
- * must be read _before_ reading owner. There must be appropriate
- * write and read barriers to enforce this.
- *
- * On an x86, writes are always ordered, so the only enformcement
- * necessary is to make sure that the owner_depth is written after
- * the owner value in program order.
- *
- * For read ordering guarantees, the semaphore wake_lock spinlock
- * is already giving us ordering guarantees.
- *
- * Other (saner) architectures would use "wmb()" and "rmb()" to
- * do this in a more obvious manner.
- */
struct semaphore {
atomic_t count;
- unsigned long owner, owner_depth;
int waking;
struct wait_queue * wait;
};
-/*
- * Because we want the non-contention case to be
- * fast, we save the stack pointer into the "owner"
- * field, and to get the true task pointer we have
- * to do the bit masking. That moves the masking
- * operation into the slow path.
- */
-#define semaphore_owner(sem) \
- ((struct task_struct *)((2*PAGE_MASK) & (sem)->owner))
-
-#define MUTEX ((struct semaphore) { ATOMIC_INIT(1), 0, 0, 0, NULL })
-#define MUTEX_LOCKED ((struct semaphore) { ATOMIC_INIT(0), 0, 1, 0, NULL })
+#define MUTEX ((struct semaphore) { ATOMIC_INIT(1), 0, NULL })
+#define MUTEX_LOCKED ((struct semaphore) { ATOMIC_INIT(0), 0, NULL })
asmlinkage void __down_failed(void /* special register calling convention */);
asmlinkage int __down_failed_interruptible(void /* params in registers */);
+asmlinkage int __down_failed_trylock(void /* params in registers */);
asmlinkage void __up_wakeup(void /* special register calling convention */);
asmlinkage void __down(struct semaphore * sem);
asmlinkage int __down_interruptible(struct semaphore * sem);
+asmlinkage int __down_trylock(struct semaphore * sem);
asmlinkage void __up(struct semaphore * sem);
extern spinlock_t semaphore_wake_lock;
@@ -80,75 +52,6 @@ extern spinlock_t semaphore_wake_lock;
#define sema_init(sem, val) atomic_set(&((sem)->count), (val))
/*
- * These two _must_ execute atomically wrt each other.
- *
- * This is trivially done with load_locked/store_cond,
- * but on the x86 we need an external synchronizer.
- */
-static inline void wake_one_more(struct semaphore * sem)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- sem->waking++;
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
-}
-
-/*
- * NOTE NOTE NOTE!
- *
- * We read owner-count _before_ getting the semaphore. This
- * is important, because the semaphore also acts as a memory
- * ordering point between reading owner_depth and reading
- * the owner.
- *
- * Why is this necessary? The "owner_depth" essentially protects
- * us from using stale owner information - in the case that this
- * process was the previous owner but somebody else is racing to
- * aquire the semaphore, the only way we can see ourselves as an
- * owner is with "owner_depth" of zero (so that we know to avoid
- * the stale value).
- *
- * In the non-race case (where we really _are_ the owner), there
- * is not going to be any question about what owner_depth is.
- *
- * In the race case, the race winner will not even get here, because
- * it will have successfully gotten the semaphore with the locked
- * decrement operation.
- *
- * Basically, we have two values, and we cannot guarantee that either
- * is really up-to-date until we have aquired the semaphore. But we
- * _can_ depend on a ordering between the two values, so we can use
- * one of them to determine whether we can trust the other:
- *
- * Cases:
- * - owner_depth == zero: ignore the semaphore owner, because it
- * cannot possibly be us. Somebody else may be in the process
- * of modifying it and the zero may be "stale", but it sure isn't
- * going to say that "we" are the owner anyway, so who cares?
- * - owner_depth is non-zero. That means that even if somebody
- * else wrote the non-zero count value, the write ordering requriement
- * means that they will have written themselves as the owner, so
- * if we now see ourselves as an owner we can trust it to be true.
- */
-static inline int waking_non_zero(struct semaphore *sem, struct task_struct *tsk)
-{
- unsigned long flags;
- unsigned long owner_depth = sem->owner_depth;
- int ret = 0;
-
- spin_lock_irqsave(&semaphore_wake_lock, flags);
- if (sem->waking > 0 || (owner_depth && semaphore_owner(sem) == tsk)) {
- sem->owner = (unsigned long) tsk;
- sem->owner_depth++; /* Don't use the possibly stale value */
- sem->waking--;
- ret = 1;
- }
- spin_unlock_irqrestore(&semaphore_wake_lock, flags);
- return ret;
-}
-
-/*
* This is ugly, but we want the default case to fall through.
* "down_failed" is a special asm handler that calls the C
* routine that actually waits. See arch/i386/lib/semaphore.S
@@ -161,9 +64,7 @@ extern inline void down(struct semaphore * sem)
"lock ; "
#endif
"decl 0(%0)\n\t"
- "js 2f\n\t"
- "movl %%esp,4(%0)\n"
- "movl $1,8(%0)\n\t"
+ "js 2f\n"
"1:\n"
".section .text.lock,\"ax\"\n"
"2:\tpushl $1b\n\t"
@@ -185,8 +86,6 @@ extern inline int down_interruptible(struct semaphore * sem)
#endif
"decl 0(%1)\n\t"
"js 2f\n\t"
- "movl %%esp,4(%1)\n\t"
- "movl $1,8(%1)\n\t"
"xorl %0,%0\n"
"1:\n"
".section .text.lock,\"ax\"\n"
@@ -199,6 +98,28 @@ extern inline int down_interruptible(struct semaphore * sem)
return result;
}
+extern inline int down_trylock(struct semaphore * sem)
+{
+ int result;
+
+ __asm__ __volatile__(
+ "# atomic interruptible down operation\n\t"
+#ifdef __SMP__
+ "lock ; "
+#endif
+ "decl 0(%1)\n\t"
+ "js 2f\n\t"
+ "xorl %0,%0\n"
+ "1:\n"
+ ".section .text.lock,\"ax\"\n"
+ "2:\tpushl $1b\n\t"
+ "jmp __down_failed_trylock\n"
+ ".previous"
+ :"=a" (result)
+ :"c" (sem)
+ :"memory");
+ return result;
+}
/*
* Note! This is subtle. We jump to wake people up only if
@@ -210,7 +131,6 @@ extern inline void up(struct semaphore * sem)
{
__asm__ __volatile__(
"# atomic up operation\n\t"
- "decl 8(%0)\n\t"
#ifdef __SMP__
"lock ; "
#endif
diff --git a/include/asm-i386/siginfo.h b/include/asm-i386/siginfo.h
index d0cae4709..7c805525c 100644
--- a/include/asm-i386/siginfo.h
+++ b/include/asm-i386/siginfo.h
@@ -138,7 +138,7 @@ typedef struct siginfo {
*/
#define TRAP_BRKPT 1 /* process breakpoint */
#define TRAP_TRACE 2 /* process trace trap */
-#define NSIGTRAP
+#define NSIGTRAP 2
/*
* SIGCHLD si_codes
diff --git a/include/asm-i386/string.h b/include/asm-i386/string.h
index 48b119895..8417d4aba 100644
--- a/include/asm-i386/string.h
+++ b/include/asm-i386/string.h
@@ -462,7 +462,7 @@ __asm__ __volatile__("cld\n\t" \
#define __HAVE_ARCH_MEMSET
#define memset(s, c, count) \
(__builtin_constant_p(c) ? \
- __constant_c_x_memset((s),(0x01010101UL*(unsigned char)c),(count)) : \
+ __constant_c_x_memset((s),(0x01010101UL*(unsigned char)(c)),(count)) : \
__memset((s),(c),(count)))
/*
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 6dd4b33f0..ebdb8b790 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -9,30 +9,24 @@
struct task_struct; /* one of the stranger aspects of C forward declarations.. */
extern void FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
-/*
- * We do most of the task switching in C, but we need
- * to do the EIP/ESP switch in assembly..
- */
-#define switch_to(prev,next) do { \
- unsigned long eax, edx, ecx; \
- asm volatile("pushl %%ebx\n\t" \
- "pushl %%esi\n\t" \
+#define switch_to(prev,next,last) do { \
+ asm volatile("pushl %%esi\n\t" \
"pushl %%edi\n\t" \
"pushl %%ebp\n\t" \
"movl %%esp,%0\n\t" /* save ESP */ \
- "movl %5,%%esp\n\t" /* restore ESP */ \
+ "movl %3,%%esp\n\t" /* restore ESP */ \
"movl $1f,%1\n\t" /* save EIP */ \
- "pushl %6\n\t" /* restore EIP */ \
+ "pushl %4\n\t" /* restore EIP */ \
"jmp __switch_to\n" \
"1:\t" \
"popl %%ebp\n\t" \
"popl %%edi\n\t" \
"popl %%esi\n\t" \
- "popl %%ebx" \
:"=m" (prev->tss.esp),"=m" (prev->tss.eip), \
- "=a" (eax), "=d" (edx), "=c" (ecx) \
+ "=b" (last) \
:"m" (next->tss.esp),"m" (next->tss.eip), \
- "a" (prev), "d" (next)); \
+ "a" (prev), "d" (next), \
+ "b" (prev)); \
} while (0)
#define _set_base(addr,base) do { unsigned long __pr; \
diff --git a/include/asm-i386/termios.h b/include/asm-i386/termios.h
index 6c8600060..6d576ea00 100644
--- a/include/asm-i386/termios.h
+++ b/include/asm-i386/termios.h
@@ -52,6 +52,7 @@ struct termio {
#define N_PROFIBUS_FDL 10 /* Reserved for Profibus <Dave@mvhi.com> */
#define N_IRDA 11 /* Linux IR - http://www.cs.uit.no/~dagb/irda/irda.html */
#define N_SMSBLOCK 12 /* SMS block mode - for talking to GSM data cards about SMS messages */
+#define N_HDLC 13 /* synchronous HDLC */
#ifdef __KERNEL__
diff --git a/include/asm-i386/timex.h b/include/asm-i386/timex.h
index bca879f73..f6cf7303d 100644
--- a/include/asm-i386/timex.h
+++ b/include/asm-i386/timex.h
@@ -7,6 +7,7 @@
#define _ASMi386_TIMEX_H
#include <linux/config.h>
+#include <asm/msr.h>
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
#define CLOCK_TICK_FACTOR 20 /* Factor of both 1000000 and CLOCK_TICK_RATE */
@@ -39,7 +40,7 @@ static inline cycles_t get_cycles (void)
#else
unsigned long eax, edx;
- __asm__("rdtsc":"=a" (eax), "=d" (edx));
+ rdtsc(eax,edx);
return eax;
#endif
}