diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2000-04-28 01:09:25 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2000-04-28 01:09:25 +0000 |
commit | b9ba7aeb165cffecdffb60aec8c3fa8d590d9ca9 (patch) | |
tree | 42d07b0c7246ae2536a702e7c5de9e2732341116 /include/asm-i386 | |
parent | 7406b0a326f2d70ade2671c37d1beef62249db97 (diff) |
Merge with 2.3.99-pre6.
Diffstat (limited to 'include/asm-i386')
-rw-r--r-- | include/asm-i386/apic.h | 33 | ||||
-rw-r--r-- | include/asm-i386/apicdef.h | 33 | ||||
-rw-r--r-- | include/asm-i386/atomic.h | 6 | ||||
-rw-r--r-- | include/asm-i386/bitops.h | 4 | ||||
-rw-r--r-- | include/asm-i386/bugs.h | 31 | ||||
-rw-r--r-- | include/asm-i386/elf.h | 3 | ||||
-rw-r--r-- | include/asm-i386/hardirq.h | 5 | ||||
-rw-r--r-- | include/asm-i386/hw_irq.h | 3 | ||||
-rw-r--r-- | include/asm-i386/mca_dma.h | 12 | ||||
-rw-r--r-- | include/asm-i386/mmu_context.h | 4 | ||||
-rw-r--r-- | include/asm-i386/mpspec.h | 10 | ||||
-rw-r--r-- | include/asm-i386/mtrr.h | 2 | ||||
-rw-r--r-- | include/asm-i386/pgalloc.h | 6 | ||||
-rw-r--r-- | include/asm-i386/processor.h | 5 | ||||
-rw-r--r-- | include/asm-i386/spinlock.h | 41 | ||||
-rw-r--r-- | include/asm-i386/string.h | 30 | ||||
-rw-r--r-- | include/asm-i386/system.h | 2 |
17 files changed, 146 insertions, 84 deletions
diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h index 3c6e6aa4b..9ee236336 100644 --- a/include/asm-i386/apic.h +++ b/include/asm-i386/apic.h @@ -3,6 +3,7 @@ #include <linux/config.h> #include <asm/apicdef.h> +#include <asm/system.h> #define APIC_DEBUG 1 @@ -20,7 +21,12 @@ extern __inline void apic_write(unsigned long reg, unsigned long v) { - *((volatile unsigned long *)(APIC_BASE+reg))=v; + *((volatile unsigned long *)(APIC_BASE+reg)) = v; +} + +extern __inline void apic_write_atomic(unsigned long reg, unsigned long v) +{ + xchg((volatile unsigned long *)(APIC_BASE+reg), v); } extern __inline unsigned long apic_read(unsigned long reg) @@ -32,30 +38,33 @@ extern unsigned int apic_timer_irqs [NR_CPUS]; #ifdef CONFIG_X86_GOOD_APIC # define FORCE_READ_AROUND_WRITE 0 -# define apic_readaround(x) +# define apic_read_around(x) +# define apic_write_around(x,y) apic_write((x),(y)) #else # define FORCE_READ_AROUND_WRITE 1 -# define apic_readaround(x) apic_read(x) +# define apic_read_around(x) apic_read(x) +# define apic_write_around(x,y) apic_write_atomic((x),(y)) #endif -#define apic_write_around(x,y) \ - do { apic_readaround(x); apic_write(x,y); } while (0) - extern inline void ack_APIC_irq(void) { - /* Clear the IPI */ - - apic_readaround(APIC_EOI); /* - * on P6+ cores (CONFIG_X86_GOOD_APIC) ack_APIC_irq() actually - * gets compiled as a single instruction ... yummie. + * ack_APIC_irq() actually gets compiled as a single instruction: + * - a single rmw on Pentium/82489DX + * - a single write on P6+ cores (CONFIG_X86_GOOD_APIC) + * ... yummie. */ - apic_write(APIC_EOI, 0); /* Docs say use 0 for future compatibility */ + + /* Docs say use 0 for future compatibility */ + apic_write_around(APIC_EOI, 0); } extern int get_maxlvt(void); +extern void connect_bsp_APIC (void); +extern void disconnect_bsp_APIC (void); extern void disable_local_APIC (void); extern void cache_APIC_registers (void); +extern void sync_Arb_IDs(void); extern void setup_local_APIC (void); extern void init_apic_mappings(void); extern void smp_local_timer_interrupt(struct pt_regs * regs); diff --git a/include/asm-i386/apicdef.h b/include/asm-i386/apicdef.h index c47395384..2f0e2d3c3 100644 --- a/include/asm-i386/apicdef.h +++ b/include/asm-i386/apicdef.h @@ -29,8 +29,6 @@ #define SET_APIC_LOGICAL_ID(x) (((x)<<24)) #define APIC_ALL_CPUS 0xFF #define APIC_DFR 0xE0 -#define GET_APIC_DFR(x) (((x)>>28)&0x0F) -#define SET_APIC_DFR(x) ((x)<<28) #define APIC_SPIV 0xF0 #define APIC_ISR 0x100 #define APIC_TMR 0x180 @@ -47,22 +45,23 @@ #define APIC_DEST_SELF 0x40000 #define APIC_DEST_ALLINC 0x80000 #define APIC_DEST_ALLBUT 0xC0000 -#define APIC_DEST_RR_MASK 0x30000 -#define APIC_DEST_RR_INVALID 0x00000 -#define APIC_DEST_RR_INPROG 0x10000 -#define APIC_DEST_RR_VALID 0x20000 -#define APIC_DEST_LEVELTRIG 0x08000 -#define APIC_DEST_ASSERT 0x04000 -#define APIC_DEST_BUSY 0x01000 +#define APIC_ICR_RR_MASK 0x30000 +#define APIC_ICR_RR_INVALID 0x00000 +#define APIC_ICR_RR_INPROG 0x10000 +#define APIC_ICR_RR_VALID 0x20000 +#define APIC_INT_LEVELTRIG 0x08000 +#define APIC_INT_ASSERT 0x04000 +#define APIC_ICR_BUSY 0x01000 #define APIC_DEST_LOGICAL 0x00800 -#define APIC_DEST_DM_FIXED 0x00000 -#define APIC_DEST_DM_LOWEST 0x00100 -#define APIC_DEST_DM_SMI 0x00200 -#define APIC_DEST_DM_REMRD 0x00300 -#define APIC_DEST_DM_NMI 0x00400 -#define APIC_DEST_DM_INIT 0x00500 -#define APIC_DEST_DM_STARTUP 0x00600 -#define APIC_DEST_VECTOR_MASK 0x000FF +#define APIC_DM_FIXED 0x00000 +#define APIC_DM_LOWEST 0x00100 +#define APIC_DM_SMI 0x00200 +#define APIC_DM_REMRD 0x00300 +#define APIC_DM_NMI 0x00400 +#define APIC_DM_INIT 0x00500 +#define APIC_DM_STARTUP 0x00600 +#define APIC_DM_EXTINT 0x00700 +#define APIC_VECTOR_MASK 0x000FF #define APIC_ICR2 0x310 #define GET_APIC_DEST_FIELD(x) (((x)>>24)&0xFF) #define SET_APIC_DEST_FIELD(x) ((x)<<24) diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h index 51805488f..945ab9b5d 100644 --- a/include/asm-i386/atomic.h +++ b/include/asm-i386/atomic.h @@ -1,12 +1,14 @@ #ifndef __ARCH_I386_ATOMIC__ #define __ARCH_I386_ATOMIC__ +#include <linux/config.h> + /* * Atomic operations that C can't guarantee us. Useful for * resource counting etc.. */ -#ifdef __SMP__ +#ifdef CONFIG_SMP #define LOCK "lock ; " #else #define LOCK "" @@ -19,7 +21,7 @@ */ #define __atomic_fool_gcc(x) (*(volatile struct { int a[100]; } *)x) -#ifdef __SMP__ +#ifdef CONFIG_SMP typedef struct { volatile int counter; } atomic_t; #else typedef struct { int counter; } atomic_t; diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h index 86068d069..e8c859dd4 100644 --- a/include/asm-i386/bitops.h +++ b/include/asm-i386/bitops.h @@ -5,6 +5,8 @@ * Copyright 1992, Linus Torvalds. */ +#include <linux/config.h> + /* * These have to be done with inline assembly: that way the bit-setting * is guaranteed to be atomic. All bit operations return 0 if the bit @@ -13,7 +15,7 @@ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). */ -#ifdef __SMP__ +#ifdef CONFIG_SMP #define LOCK_PREFIX "lock ; " #else #define LOCK_PREFIX "" diff --git a/include/asm-i386/bugs.h b/include/asm-i386/bugs.h index dbaa8d261..34df19c05 100644 --- a/include/asm-i386/bugs.h +++ b/include/asm-i386/bugs.h @@ -21,8 +21,6 @@ #include <asm/processor.h> #include <asm/msr.h> -#define CONFIG_BUGi386 - static int __init no_halt(char *s) { boot_cpu_data.hlt_works_ok = 0; @@ -370,16 +368,18 @@ static void __init check_cyrix_coma(void) } /* - * Check wether we are able to run this kernel safely on SMP. + * Check whether we are able to run this kernel safely on SMP. * * - In order to run on a i386, we need to be compiled for i386 * (for due to lack of "invlpg" and working WP on a i386) * - In order to run on anything without a TSC, we need to be * compiled for a i486. - * - In order to work on a Pentium/SMP machine, we need to be - * compiled for a Pentium or lower, as a PPro config implies - * a properly working local APIC without the need to do extra - * reads from the APIC. + * - In order to support the local APIC on a buggy Pentium machine, + * we need to be compiled with CONFIG_X86_GOOD_APIC disabled, + * which happens implicitly if compiled for a Pentium or lower + * (unless an advanced selection of CPU features is used) as an + * otherwise config implies a properly working local APIC without + * the need to do extra reads from the APIC. */ static void __init check_config(void) @@ -411,11 +411,18 @@ static void __init check_config(void) #endif /* - * If we were told we had a good APIC for SMP, we'd better be a PPro + * If we were told we had a good local APIC, check for buggy Pentia, + * i.e. all B steppings and the C2 stepping of P54C when using their + * integrated APIC (see 11AP erratum in "Pentium Processor + * Specification Update"). */ -#if defined(CONFIG_X86_GOOD_APIC) && defined(CONFIG_SMP) - if (smp_found_config && boot_cpu_data.x86 <= 5) - panic("Kernel compiled for PPro+, assumes local APIC without read-before-write bug"); +#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_GOOD_APIC) + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL + && boot_cpu_data.x86_capability & X86_FEATURE_APIC + && boot_cpu_data.x86 == 5 + && boot_cpu_data.x86_model == 2 + && (boot_cpu_data.x86_mask < 6 || boot_cpu_data.x86_mask == 11)) + panic("Kernel compiled for PPro+, assumes a local APIC without the read-before-write bug!"); #endif } @@ -425,7 +432,7 @@ static void __init check_bugs(void) identify_cpu(&boot_cpu_data); check_cx686_cpuid(); check_cx686_slop(); -#ifndef __SMP__ +#ifndef CONFIG_SMP printk("CPU: "); print_cpu_info(&boot_cpu_data); #endif diff --git a/include/asm-i386/elf.h b/include/asm-i386/elf.h index dacd01b99..0083b3f20 100644 --- a/include/asm-i386/elf.h +++ b/include/asm-i386/elf.h @@ -94,8 +94,7 @@ typedef struct user_i387_struct elf_fpregset_t; #define ELF_PLATFORM ("i386\0i486\0i586\0i686"+((boot_cpu_data.x86-3)*5)) #ifdef __KERNEL__ -#define SET_PERSONALITY(ex, ibcs2) \ - current->personality = (ibcs2 ? PER_SVR4 : PER_LINUX) +#define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX) #endif #endif diff --git a/include/asm-i386/hardirq.h b/include/asm-i386/hardirq.h index fb0c3e5d4..e6fa12909 100644 --- a/include/asm-i386/hardirq.h +++ b/include/asm-i386/hardirq.h @@ -1,6 +1,7 @@ #ifndef __ASM_HARDIRQ_H #define __ASM_HARDIRQ_H +#include <linux/config.h> #include <linux/threads.h> #include <linux/irq.h> @@ -29,7 +30,7 @@ extern irq_cpustat_t irq_stat [NR_CPUS]; #define in_irq() (local_irq_count(smp_processor_id()) != 0) -#ifndef __SMP__ +#ifndef CONFIG_SMP #define hardirq_trylock(cpu) (local_irq_count(cpu) == 0) #define hardirq_endlock(cpu) do { } while (0) @@ -89,6 +90,6 @@ static inline int hardirq_trylock(int cpu) extern void synchronize_irq(void); -#endif /* __SMP__ */ +#endif /* CONFIG_SMP */ #endif /* __ASM_HARDIRQ_H */ diff --git a/include/asm-i386/hw_irq.h b/include/asm-i386/hw_irq.h index b371bd79f..413a98c55 100644 --- a/include/asm-i386/hw_irq.h +++ b/include/asm-i386/hw_irq.h @@ -12,6 +12,7 @@ * <tomsoft@informatik.tu-chemnitz.de> */ +#include <linux/config.h> #include <asm/irq.h> /* @@ -211,7 +212,7 @@ static inline void x86_do_profile (unsigned long eip) atomic_inc((atomic_t *)&prof_buffer[eip]); } -#ifdef __SMP__ /*more of this file should probably be ifdefed SMP */ +#ifdef CONFIG_SMP /*more of this file should probably be ifdefed SMP */ static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) { if (IO_APIC_IRQ(i)) send_IPI_self(IO_APIC_VECTOR(i)); diff --git a/include/asm-i386/mca_dma.h b/include/asm-i386/mca_dma.h index fb42da38d..a9e780cc4 100644 --- a/include/asm-i386/mca_dma.h +++ b/include/asm-i386/mca_dma.h @@ -178,18 +178,18 @@ static __inline__ void mca_set_dma_io(unsigned int dmanr, unsigned int io_addr) /** * mca_set_dma_mode - set the DMA mode * @dmanr: DMA channel - * @mode: The mode to set + * @mode: mode to set * * The DMA controller supports several modes. The mode values you can - * set are + * set are : * - * MCA_DMA_MODE_READ when reading from the DMA device. + * %MCA_DMA_MODE_READ when reading from the DMA device. * - * MCA_DMA_MODE_WRITE to writing to the DMA device. + * %MCA_DMA_MODE_WRITE to writing to the DMA device. * - * MCA_DMA_MODE_IO to do DMA to or from an I/O port. + * %MCA_DMA_MODE_IO to do DMA to or from an I/O port. * - * MCA_DMA_MODE_16 to do 16bit transfers. + * %MCA_DMA_MODE_16 to do 16bit transfers. * */ diff --git a/include/asm-i386/mmu_context.h b/include/asm-i386/mmu_context.h index 1d9248632..e02aff08d 100644 --- a/include/asm-i386/mmu_context.h +++ b/include/asm-i386/mmu_context.h @@ -12,7 +12,7 @@ #define destroy_context(mm) do { } while(0) #define init_new_context(tsk,mm) do { } while (0) -#ifdef __SMP__ +#ifdef CONFIG_SMP static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) { @@ -42,7 +42,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, str asm volatile("movl %0,%%cr3": :"r" (__pa(next->pgd))); clear_bit(cpu, &prev->cpu_vm_mask); } -#ifdef __SMP__ +#ifdef CONFIG_SMP else { int old_state = cpu_tlbstate[cpu].state; cpu_tlbstate[cpu].state = TLBSTATE_OK; diff --git a/include/asm-i386/mpspec.h b/include/asm-i386/mpspec.h index eaa803cb0..2956def6b 100644 --- a/include/asm-i386/mpspec.h +++ b/include/asm-i386/mpspec.h @@ -13,6 +13,11 @@ #define SMP_MAGIC_IDENT (('_'<<24)|('P'<<16)|('M'<<8)|'_') +/* + * a maximum of 16 APICs with the current APIC ID architecture. + */ +#define MAX_APICS 16 + struct intel_mp_floating { char mpf_signature[4]; /* "_MP_" */ @@ -144,7 +149,8 @@ struct mpc_config_lintsrc enum mp_bustype { MP_BUS_ISA, MP_BUS_EISA, - MP_BUS_PCI + MP_BUS_PCI, + MP_BUS_MCA }; extern int mp_bus_id_to_type [MAX_MP_BUSSES]; extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES]; @@ -155,7 +161,7 @@ extern int smp_found_config; extern void find_smp_config (void); extern void get_smp_config (void); extern int nr_ioapics; -extern int apic_version [NR_CPUS]; +extern int apic_version [MAX_APICS]; extern int mp_bus_id_to_type [MAX_MP_BUSSES]; extern int mp_irq_entries; extern struct mpc_config_intsrc mp_irqs [MAX_IRQ_SOURCES]; diff --git a/include/asm-i386/mtrr.h b/include/asm-i386/mtrr.h index e6d130c9a..cc28a42c7 100644 --- a/include/asm-i386/mtrr.h +++ b/include/asm-i386/mtrr.h @@ -95,7 +95,7 @@ static __inline__ int mtrr_del (int reg, unsigned long base, /* The following functions are for initialisation: don't use them! */ extern int mtrr_init (void); -# if defined(__SMP__) && defined(CONFIG_MTRR) +# if defined(CONFIG_SMP) && defined(CONFIG_MTRR) extern void mtrr_init_boot_cpu (void); extern void mtrr_init_secondary_cpu (void); # endif diff --git a/include/asm-i386/pgalloc.h b/include/asm-i386/pgalloc.h index 5cb20763d..78a229362 100644 --- a/include/asm-i386/pgalloc.h +++ b/include/asm-i386/pgalloc.h @@ -156,7 +156,7 @@ extern inline void set_pgdir(unsigned long address, pgd_t entry) { struct task_struct * p; pgd_t *pgd; -#ifdef __SMP__ +#ifdef CONFIG_SMP int i; #endif @@ -167,7 +167,7 @@ extern inline void set_pgdir(unsigned long address, pgd_t entry) *pgd_offset(p->mm,address) = entry; } read_unlock(&tasklist_lock); -#ifndef __SMP__ +#ifndef CONFIG_SMP for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd) pgd[address >> PGDIR_SHIFT] = entry; #else @@ -193,7 +193,7 @@ extern inline void set_pgdir(unsigned long address, pgd_t entry) * and page-granular flushes are available only on i486 and up. */ -#ifndef __SMP__ +#ifndef CONFIG_SMP #define flush_tlb() __flush_tlb() #define flush_tlb_all() __flush_tlb_all() diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h index 9f594ab5d..41f386716 100644 --- a/include/asm-i386/processor.h +++ b/include/asm-i386/processor.h @@ -12,6 +12,7 @@ #include <asm/segment.h> #include <asm/page.h> #include <asm/types.h> +#include <linux/config.h> #include <linux/threads.h> /* @@ -100,7 +101,7 @@ struct cpuinfo_x86 { extern struct cpuinfo_x86 boot_cpu_data; extern struct tss_struct init_tss[NR_CPUS]; -#ifdef __SMP__ +#ifdef CONFIG_SMP extern struct cpuinfo_x86 cpu_data[]; #define current_cpu_data cpu_data[smp_processor_id()] #else @@ -356,7 +357,7 @@ struct thread_struct { } #define start_thread(regs, new_eip, new_esp) do { \ - __asm__("movl %w0,%%fs ; movl %w0,%%gs": :"r" (0)); \ + __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0)); \ set_fs(USER_DS); \ regs->xds = __USER_DS; \ regs->xes = __USER_DS; \ diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h index e187026a2..234e79275 100644 --- a/include/asm-i386/spinlock.h +++ b/include/asm-i386/spinlock.h @@ -33,7 +33,7 @@ typedef struct { #define SPINLOCK_MAGIC_INIT /* */ #endif -#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 SPINLOCK_MAGIC_INIT } +#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT } #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) /* @@ -43,32 +43,39 @@ typedef struct { * We make no fairness assumptions. They have a cost. */ -#define spin_unlock_wait(x) do { barrier(); } while(((volatile spinlock_t *)(x))->lock) -#define spin_is_locked(x) ((x)->lock != 0) +#define spin_is_locked(x) (*(volatile char *)(&(x)->lock) <= 0) +#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) #define spin_lock_string \ "\n1:\t" \ - "lock ; btsl $0,%0\n\t" \ - "jc 2f\n" \ + "lock ; decb %0\n\t" \ + "js 2f\n" \ ".section .text.lock,\"ax\"\n" \ "2:\t" \ - "testb $1,%0\n\t" \ + "cmpb $0,%0\n\t" \ "rep;nop\n\t" \ - "jne 2b\n\t" \ + "jle 2b\n\t" \ "jmp 1b\n" \ ".previous" /* - * Sadly, some early PPro chips require the locked access, - * otherwise we could just always simply do - * - * #define spin_unlock_string \ - * "movb $0,%0" - * - * Which is noticeably faster. + * This works. Despite all the confusion. */ #define spin_unlock_string \ - "lock ; btrl $0,%0" + "movb $1,%0" + +/* + * Won't work on i386-SMP. Does anybody care? + */ +static inline int spin_trylock(spinlock_t *lock) +{ + char oldval; + __asm__ __volatile__( + "lock ; cmpxchg %b2,%1" + :"=a" (oldval), "=m" (__dummy_lock(lock)) + :"q" (0), "0" (1)); + return oldval > 0; +} extern inline void spin_lock(spinlock_t *lock) { @@ -90,7 +97,7 @@ extern inline void spin_unlock(spinlock_t *lock) #if SPINLOCK_DEBUG if (lock->magic != SPINLOCK_MAGIC) BUG(); - if (!lock->lock) + if (!spin_is_locked(lock)) BUG(); #endif __asm__ __volatile__( @@ -98,8 +105,6 @@ extern inline void spin_unlock(spinlock_t *lock) :"=m" (__dummy_lock(lock))); } -#define spin_trylock(lock) ({ !test_and_set_bit(0,(lock)); }) - /* * Read-write spinlocks, allowing multiple readers * but only one writer. diff --git a/include/asm-i386/string.h b/include/asm-i386/string.h index ef0dd75d8..5db9768d0 100644 --- a/include/asm-i386/string.h +++ b/include/asm-i386/string.h @@ -452,6 +452,36 @@ return __res; } /* end of additional stuff */ +#define __HAVE_ARCH_STRSTR +extern inline char * strstr(const char * cs,const char * ct) +{ +int d0, d1; +register char * __res; +__asm__ __volatile__( + "movl %6,%%edi\n\t" + "repne\n\t" + "scasb\n\t" + "notl %%ecx\n\t" + "decl %%ecx\n\t" /* NOTE! This also sets Z if searchstring='' */ + "movl %%ecx,%%edx\n" + "1:\tmovl %6,%%edi\n\t" + "movl %%esi,%%eax\n\t" + "movl %%edx,%%ecx\n\t" + "repe\n\t" + "cmpsb\n\t" + "je 2f\n\t" /* also works for empty string, see above */ + "xchgl %%eax,%%esi\n\t" + "incl %%esi\n\t" + "cmpb $0,-1(%%eax)\n\t" + "jne 1b\n\t" + "xorl %%eax,%%eax\n\t" + "2:" + :"=a" (__res), "=&c" (d0), "=&S" (d1) + :"0" (0), "1" (0xffffffff), "2" (cs), "g" (ct) + :"dx", "di"); +return __res; +} + /* * This looks horribly ugly, but the compiler can optimize it totally, * as we by now know that both pattern and count is constant.. diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h index 75e04e938..ec9f33ba6 100644 --- a/include/asm-i386/system.h +++ b/include/asm-i386/system.h @@ -289,7 +289,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, #define local_irq_disable() __asm__ __volatile__("cli": : :"memory") #define local_irq_enable() __asm__ __volatile__("sti": : :"memory") -#ifdef __SMP__ +#ifdef CONFIG_SMP extern void __global_cli(void); extern void __global_sti(void); |