summaryrefslogtreecommitdiffstats
path: root/include/asm-i386
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386')
-rw-r--r--include/asm-i386/atomic.h84
-rw-r--r--include/asm-i386/highmem.h4
-rw-r--r--include/asm-i386/pci.h8
-rw-r--r--include/asm-i386/pgalloc-2level.h23
-rw-r--r--include/asm-i386/pgalloc-3level.h68
-rw-r--r--include/asm-i386/pgalloc.h145
-rw-r--r--include/asm-i386/pgtable-3level.h31
-rw-r--r--include/asm-i386/pgtable.h6
-rw-r--r--include/asm-i386/string-486.h9
-rw-r--r--include/asm-i386/string.h19
-rw-r--r--include/asm-i386/system.h17
11 files changed, 188 insertions, 226 deletions
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h
index 94a7ea264..a50b747cd 100644
--- a/include/asm-i386/atomic.h
+++ b/include/asm-i386/atomic.h
@@ -23,9 +23,33 @@ typedef struct { volatile int counter; } atomic_t;
#define ATOMIC_INIT(i) { (i) }
+/**
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
#define atomic_read(v) ((v)->counter)
+
+/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
#define atomic_set(v,i) (((v)->counter) = (i))
+/**
+ * atomic_add - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v. Note that the guaranteed useful range
+ * of an atomic_t is only 24 bits.
+ */
static __inline__ void atomic_add(int i, atomic_t *v)
{
__asm__ __volatile__(
@@ -34,6 +58,14 @@ static __inline__ void atomic_add(int i, atomic_t *v)
:"ir" (i), "m" (v->counter));
}
+/**
+ * atomic_sub - subtract the atomic variable
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
static __inline__ void atomic_sub(int i, atomic_t *v)
{
__asm__ __volatile__(
@@ -42,6 +74,16 @@ static __inline__ void atomic_sub(int i, atomic_t *v)
:"ir" (i), "m" (v->counter));
}
+/**
+ * atomic_sub_and_test - test variable then subtract
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
{
unsigned char c;
@@ -53,6 +95,13 @@ static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
return c;
}
+/**
+ * atomic_inc - increment atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
static __inline__ void atomic_inc(atomic_t *v)
{
__asm__ __volatile__(
@@ -61,6 +110,13 @@ static __inline__ void atomic_inc(atomic_t *v)
:"m" (v->counter));
}
+/**
+ * atomic_dec - decrement the atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
static __inline__ void atomic_dec(atomic_t *v)
{
__asm__ __volatile__(
@@ -69,6 +125,15 @@ static __inline__ void atomic_dec(atomic_t *v)
:"m" (v->counter));
}
+/**
+ * atomic_dec_and_test - decrement by 1 and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
static __inline__ int atomic_dec_and_test(atomic_t *v)
{
unsigned char c;
@@ -80,6 +145,15 @@ static __inline__ int atomic_dec_and_test(atomic_t *v)
return c != 0;
}
+/**
+ * atomic_inc_and_test - increment by 1 and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
static __inline__ int atomic_inc_and_test(atomic_t *v)
{
unsigned char c;
@@ -91,6 +165,16 @@ static __inline__ int atomic_inc_and_test(atomic_t *v)
return c != 0;
}
+/**
+ * atomic_add_negative - add and test if negative
+ * @v: pointer of type atomic_t
+ * @i: integer value to add
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
static __inline__ int atomic_add_negative(int i, atomic_t *v)
{
unsigned char c;
diff --git a/include/asm-i386/highmem.h b/include/asm-i386/highmem.h
index dfff7fad0..49c848738 100644
--- a/include/asm-i386/highmem.h
+++ b/include/asm-i386/highmem.h
@@ -2,14 +2,14 @@
* highmem.h: virtual kernel memory mappings for high memory
*
* Used in CONFIG_HIGHMEM systems for memory pages which
- * are not addressable by direct kernel virtual adresses.
+ * are not addressable by direct kernel virtual addresses.
*
* Copyright (C) 1999 Gerhard Wichert, Siemens AG
* Gerhard.Wichert@pdb.siemens.de
*
*
* Redesigned the x86 32-bit VM architecture to deal with
- * up to 16 Terrabyte physical memory. With current x86 CPUs
+ * up to 16 Terabyte physical memory. With current x86 CPUs
* we now support up to 64 Gigabytes physical RAM.
*
* Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
diff --git a/include/asm-i386/pci.h b/include/asm-i386/pci.h
index 074957886..d580f8e71 100644
--- a/include/asm-i386/pci.h
+++ b/include/asm-i386/pci.h
@@ -152,6 +152,14 @@ extern inline void pci_dma_sync_sg(struct pci_dev *hwdev,
*/
extern inline int pci_dma_supported(struct pci_dev *hwdev, dma_addr_t mask)
{
+ /*
+ * we fall back to GFP_DMA when the mask isn't all 1s,
+ * so we can't guarantee allocations that must be
+ * within a tighter range than GFP_DMA..
+ */
+ if(mask < 0x00ffffff)
+ return 0;
+
return 1;
}
diff --git a/include/asm-i386/pgalloc-2level.h b/include/asm-i386/pgalloc-2level.h
deleted file mode 100644
index 4ff5ce3b7..000000000
--- a/include/asm-i386/pgalloc-2level.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef _I386_PGALLOC_2LEVEL_H
-#define _I386_PGALLOC_2LEVEL_H
-
-/*
- * traditional i386 two-level paging, page table allocation routines:
- */
-
-extern __inline__ pmd_t *get_pmd_fast(void)
-{
- return (pmd_t *)0;
-}
-
-extern __inline__ void free_pmd_fast(pmd_t *pmd) { }
-extern __inline__ void free_pmd_slow(pmd_t *pmd) { }
-
-extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
-{
- if (!pgd)
- BUG();
- return (pmd_t *) pgd;
-}
-
-#endif /* _I386_PGALLOC_2LEVEL_H */
diff --git a/include/asm-i386/pgalloc-3level.h b/include/asm-i386/pgalloc-3level.h
deleted file mode 100644
index 30099a755..000000000
--- a/include/asm-i386/pgalloc-3level.h
+++ /dev/null
@@ -1,68 +0,0 @@
-#ifndef _I386_PGALLOC_3LEVEL_H
-#define _I386_PGALLOC_3LEVEL_H
-
-/*
- * Intel Physical Address Extension (PAE) Mode - three-level page
- * tables on PPro+ CPUs. Page-table allocation routines.
- *
- * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
- */
-
-extern __inline__ pmd_t *get_pmd_slow(void)
-{
- pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL);
-
- if (ret)
- memset(ret, 0, PAGE_SIZE);
- return ret;
-}
-
-extern __inline__ pmd_t *get_pmd_fast(void)
-{
- unsigned long *ret;
-
- if ((ret = pmd_quicklist) != NULL) {
- pmd_quicklist = (unsigned long *)(*ret);
- ret[0] = 0;
- pgtable_cache_size--;
- } else
- ret = (unsigned long *)get_pmd_slow();
- return (pmd_t *)ret;
-}
-
-extern __inline__ void free_pmd_fast(pmd_t *pmd)
-{
- *(unsigned long *)pmd = (unsigned long) pmd_quicklist;
- pmd_quicklist = (unsigned long *) pmd;
- pgtable_cache_size++;
-}
-
-extern __inline__ void free_pmd_slow(pmd_t *pmd)
-{
- free_page((unsigned long)pmd);
-}
-
-extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
-{
- if (!pgd)
- BUG();
- address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
- if (pgd_none(*pgd)) {
- pmd_t *page = get_pmd_fast();
-
- if (!page)
- page = get_pmd_slow();
- if (page) {
- if (pgd_none(*pgd)) {
- set_pgd(pgd, __pgd(1 + __pa(page)));
- __flush_tlb();
- return page + address;
- } else
- free_pmd_fast(page);
- } else
- return NULL;
- }
- return (pmd_t *)pgd_page(*pgd) + address;
-}
-
-#endif /* _I386_PGALLOC_3LEVEL_H */
diff --git a/include/asm-i386/pgalloc.h b/include/asm-i386/pgalloc.h
index 64a9a7f68..b8c12a6f7 100644
--- a/include/asm-i386/pgalloc.h
+++ b/include/asm-i386/pgalloc.h
@@ -11,35 +11,56 @@
#define pte_quicklist (current_cpu_data.pte_quick)
#define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
-#if CONFIG_X86_PAE
-# include <asm/pgalloc-3level.h>
-#else
-# include <asm/pgalloc-2level.h>
-#endif
+#define pmd_populate(mm, pmd, pte) \
+ set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
/*
- * Allocate and free page tables. The xxx_kernel() versions are
- * used to allocate a kernel page table - this turns on ASN bits
- * if any.
+ * Allocate and free page tables.
*/
+#if CONFIG_X86_PAE
+
+extern void *kmalloc(size_t, int);
+extern void kfree(const void *);
+
extern __inline__ pgd_t *get_pgd_slow(void)
{
- pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
+ int i;
+ pgd_t *pgd = kmalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
+
+ if (pgd) {
+ for (i = 0; i < USER_PTRS_PER_PGD; i++) {
+ unsigned long pmd = __get_free_page(GFP_KERNEL);
+ if (!pmd)
+ goto out_oom;
+ clear_page(pmd);
+ set_pgd(pgd + i, __pgd(1 + __pa(pmd)));
+ }
+ memcpy(pgd + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ }
+ return pgd;
+out_oom:
+ for (i--; i >= 0; i--)
+ free_page((unsigned long)__va(pgd_val(pgd[i])-1));
+ kfree(pgd);
+ return NULL;
+}
- if (ret) {
-#if CONFIG_X86_PAE
- int i;
- for (i = 0; i < USER_PTRS_PER_PGD; i++)
- __pgd_clear(ret + i);
#else
- memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
-#endif
- memcpy(ret + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+
+extern __inline__ pgd_t *get_pgd_slow(void)
+{
+ pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
+
+ if (pgd) {
+ memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+ memcpy(pgd + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
}
- return ret;
+ return pgd;
}
+#endif
+
extern __inline__ pgd_t *get_pgd_fast(void)
{
unsigned long *ret;
@@ -62,17 +83,32 @@ extern __inline__ void free_pgd_fast(pgd_t *pgd)
extern __inline__ void free_pgd_slow(pgd_t *pgd)
{
+#if CONFIG_X86_PAE
+ int i;
+
+ for (i = 0; i < USER_PTRS_PER_PGD; i++)
+ free_page((unsigned long)__va(pgd_val(pgd[i])-1));
+ kfree(pgd);
+#else
free_page((unsigned long)pgd);
+#endif
}
-extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);
-extern pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long address_preadjusted);
+static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ pte_t *pte;
-extern __inline__ pte_t *get_pte_fast(void)
+ pte = (pte_t *) __get_free_page(GFP_KERNEL);
+ if (pte)
+ clear_page(pte);
+ return pte;
+}
+
+static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
{
unsigned long *ret;
- if((ret = (unsigned long *)pte_quicklist) != NULL) {
+ if ((ret = (unsigned long *)pte_quicklist) != NULL) {
pte_quicklist = (unsigned long *)(*ret);
ret[0] = ret[1];
pgtable_cache_size--;
@@ -80,75 +116,34 @@ extern __inline__ pte_t *get_pte_fast(void)
return (pte_t *)ret;
}
-extern __inline__ void free_pte_fast(pte_t *pte)
+extern __inline__ void pte_free_fast(pte_t *pte)
{
*(unsigned long *)pte = (unsigned long) pte_quicklist;
pte_quicklist = (unsigned long *) pte;
pgtable_cache_size++;
}
-extern __inline__ void free_pte_slow(pte_t *pte)
+extern __inline__ void pte_free_slow(pte_t *pte)
{
free_page((unsigned long)pte);
}
-#define pte_free_kernel(pte) free_pte_slow(pte)
-#define pte_free(pte) free_pte_slow(pte)
-#define pgd_free(pgd) free_pgd_slow(pgd)
-#define pgd_alloc() get_pgd_fast()
-
-extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
-{
- if (!pmd)
- BUG();
- address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
- if (pmd_none(*pmd)) {
- pte_t * page = (pte_t *) get_pte_fast();
-
- if (!page)
- return get_pte_kernel_slow(pmd, address);
- set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(page)));
- return page + address;
- }
- if (pmd_bad(*pmd)) {
- __handle_bad_pmd_kernel(pmd);
- return NULL;
- }
- return (pte_t *) pmd_page(*pmd) + address;
-}
-
-extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
-{
- address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
-
- if (pmd_none(*pmd))
- goto getnew;
- if (pmd_bad(*pmd))
- goto fix;
- return (pte_t *)pmd_page(*pmd) + address;
-getnew:
-{
- unsigned long page = (unsigned long) get_pte_fast();
-
- if (!page)
- return get_pte_slow(pmd, address);
- set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(page)));
- return (pte_t *)page + address;
-}
-fix:
- __handle_bad_pmd(pmd);
- return NULL;
-}
+#define pte_free(pte) pte_free_slow(pte)
+#define pgd_free(pgd) free_pgd_slow(pgd)
+#define pgd_alloc() get_pgd_fast()
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
* inside the pgd, so has no extra memory associated with it.
- * (In the PAE case we free the page.)
+ * (In the PAE case we free the pmds as part of the pgd.)
*/
-#define pmd_free(pmd) free_pmd_slow(pmd)
-#define pmd_free_kernel pmd_free
-#define pmd_alloc_kernel pmd_alloc
+#define pmd_alloc_one_fast(mm, addr) ({ BUG(); ((pmd_t *)1); })
+#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
+#define pmd_free_slow(x) do { } while (0)
+#define pmd_free_fast(x) do { } while (0)
+#define pmd_free(x) do { } while (0)
+#define pgd_populate(mm, pmd, pte) BUG()
extern int do_check_pgt_cache(int, int);
diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h
index aac1e3121..6253c0585 100644
--- a/include/asm-i386/pgtable-3level.h
+++ b/include/asm-i386/pgtable-3level.h
@@ -33,17 +33,9 @@
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
-/*
- * Subtle, in PAE mode we cannot have zeroes in the top level
- * page directory, the CPU enforces this. (ie. the PGD entry
- * always has to have the present bit set.) The CPU caches
- * the 4 pgd entries internally, so there is no extra memory
- * load on TLB miss, despite one more level of indirection.
- */
-#define EMPTY_PGD (__pa(empty_zero_page) + 1)
-#define pgd_none(x) (pgd_val(x) == EMPTY_PGD)
+extern inline int pgd_none(pgd_t pgd) { return 0; }
extern inline int pgd_bad(pgd_t pgd) { return 0; }
-extern inline int pgd_present(pgd_t pgd) { return !pgd_none(pgd); }
+extern inline int pgd_present(pgd_t pgd) { return 1; }
/* Rules for using set_pte: the pte being assigned *must* be
* either not present or in a state where the hardware will
@@ -63,21 +55,12 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
set_64bit((unsigned long long *)(pgdptr),pgd_val(pgdval))
/*
- * Pentium-II errata A13: in PAE mode we explicitly have to flush
- * the TLB via cr3 if the top-level pgd is changed... This was one tough
- * thing to find out - guess i should first read all the documentation
- * next time around ;)
+ * Pentium-II erratum A13: in PAE mode we explicitly have to flush
+ * the TLB via cr3 if the top-level pgd is changed...
+ * We do not let the generic code free and clear pgd entries due to
+ * this erratum.
*/
-extern inline void __pgd_clear (pgd_t * pgd)
-{
- set_pgd(pgd, __pgd(EMPTY_PGD));
-}
-
-extern inline void pgd_clear (pgd_t * pgd)
-{
- __pgd_clear(pgd);
- __flush_tlb();
-}
+extern inline void pgd_clear (pgd_t * pgd) { }
#define pgd_page(pgd) \
((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index a25f3bcfd..28f1b7b26 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -243,12 +243,6 @@ extern unsigned long empty_zero_page[1024];
/* page table for 0-4MB for everybody */
extern unsigned long pg0[1024];
-/*
- * Handling allocation failures during page table setup.
- */
-extern void __handle_bad_pmd(pmd_t * pmd);
-extern void __handle_bad_pmd_kernel(pmd_t * pmd);
-
#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
#define pte_clear(xp) do { set_pte(xp, __pte(0)); } while (0)
diff --git a/include/asm-i386/string-486.h b/include/asm-i386/string-486.h
index 06022132c..51bfd051b 100644
--- a/include/asm-i386/string-486.h
+++ b/include/asm-i386/string-486.h
@@ -352,11 +352,6 @@ return (to);
#ifdef CONFIG_X86_USE_3DNOW
-#include <linux/spinlock.h>
-#include <asm/system.h>
-#include <asm/ptrace.h>
-#include <linux/smp.h>
-#include <linux/interrupt.h>
#include <asm/mmx.h>
/*
@@ -365,14 +360,14 @@ return (to);
static inline void * __constant_memcpy3d(void * to, const void * from, size_t len)
{
- if(len<512 || in_interrupt())
+ if (len < 512)
return __memcpy_c(to, from, len);
return _mmx_memcpy(to, from, len);
}
static inline void *__memcpy3d(void *to, const void *from, size_t len)
{
- if(len<512 || in_interrupt())
+ if(len < 512)
return __memcpy_g(to, from, len);
return _mmx_memcpy(to, from, len);
}
diff --git a/include/asm-i386/string.h b/include/asm-i386/string.h
index 62bf7916c..0c1ca69b3 100644
--- a/include/asm-i386/string.h
+++ b/include/asm-i386/string.h
@@ -287,13 +287,6 @@ __asm__ __volatile__( \
#ifdef CONFIG_X86_USE_3DNOW
-/* All this just for in_interrupt() ... */
-
-#include <asm/system.h>
-#include <asm/ptrace.h>
-#include <linux/smp.h>
-#include <linux/spinlock.h>
-#include <linux/interrupt.h>
#include <asm/mmx.h>
/*
@@ -302,14 +295,14 @@ __asm__ __volatile__( \
static inline void * __constant_memcpy3d(void * to, const void * from, size_t len)
{
- if(len<512 || in_interrupt())
+ if (len < 512)
return __constant_memcpy(to, from, len);
return _mmx_memcpy(to, from, len);
}
extern __inline__ void *__memcpy3d(void *to, const void *from, size_t len)
{
- if(len<512 || in_interrupt())
+ if (len < 512)
return __memcpy(to, from, len);
return _mmx_memcpy(to, from, len);
}
@@ -549,10 +542,10 @@ static inline void * memscan(void * addr, int c, size_t size)
{
if (!size)
return addr;
- __asm__("repnz; scasb
- jnz 1f
- dec %%edi
-1: "
+ __asm__("repnz; scasb\n\t"
+ "jnz 1f\n\t"
+ "dec %%edi\n"
+ "1:"
: "=D" (addr), "=c" (size)
: "0" (addr), "1" (size), "a" (c));
return addr;
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 52e24682e..8da0ce326 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -144,16 +144,17 @@ struct __xchg_dummy { unsigned long a[100]; };
extern inline void __set_64bit (unsigned long long * ptr,
unsigned int low, unsigned int high)
{
-__asm__ __volatile__ (
- "1: movl (%0), %%eax;
- movl 4(%0), %%edx;
- cmpxchg8b (%0);
- jnz 1b"
- :: "D"(ptr),
+ __asm__ __volatile__ (
+ "\n1:\t"
+ "movl (%0), %%eax\n\t"
+ "movl 4(%0), %%edx\n\t"
+ "cmpxchg8b (%0)\n\t"
+ "jnz 1b"
+ : /* no outputs */
+ : "D"(ptr),
"b"(low),
"c"(high)
- :
- "ax","dx","memory");
+ : "ax","dx","memory");
}
extern void inline __set_64bit_constant (unsigned long long *ptr,