summaryrefslogtreecommitdiffstats
path: root/include/asm-sparc64
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-sparc64')
-rw-r--r--include/asm-sparc64/asm_offsets.h2
-rw-r--r--include/asm-sparc64/bitops.h16
-rw-r--r--include/asm-sparc64/byteorder.h18
-rw-r--r--include/asm-sparc64/cache.h3
-rw-r--r--include/asm-sparc64/hardirq.h61
-rw-r--r--include/asm-sparc64/head.h14
-rw-r--r--include/asm-sparc64/mmu_context.h10
-rw-r--r--include/asm-sparc64/pgtable.h4
-rw-r--r--include/asm-sparc64/processor.h8
-rw-r--r--include/asm-sparc64/ptrace.h18
-rw-r--r--include/asm-sparc64/softirq.h88
-rw-r--r--include/asm-sparc64/spinlock.h201
-rw-r--r--include/asm-sparc64/unistd.h4
13 files changed, 400 insertions, 47 deletions
diff --git a/include/asm-sparc64/asm_offsets.h b/include/asm-sparc64/asm_offsets.h
index ac7449777..70e6c37a4 100644
--- a/include/asm-sparc64/asm_offsets.h
+++ b/include/asm-sparc64/asm_offsets.h
@@ -228,6 +228,8 @@
#define ASIZ_thread_wstate 0x00000008
#define AOFF_thread_cwp 0x00000120
#define ASIZ_thread_cwp 0x00000008
+#define AOFF_thread_ctx 0x00000128
+#define ASIZ_thread_ctx 0x00000008
#define AOFF_thread_reg_window 0x00000130
#define ASIZ_thread_reg_window 0x00000400
#define AOFF_thread_rwbuf_stkptrs 0x00000530
diff --git a/include/asm-sparc64/bitops.h b/include/asm-sparc64/bitops.h
index 8e7a9a472..b76772016 100644
--- a/include/asm-sparc64/bitops.h
+++ b/include/asm-sparc64/bitops.h
@@ -1,4 +1,4 @@
-/* $Id: bitops.h,v 1.12 1997/05/14 20:48:04 davem Exp $
+/* $Id: bitops.h,v 1.13 1997/05/27 06:47:16 davem Exp $
* bitops.h: Bit string operations on the V9.
*
* Copyright 1996 David S. Miller (davem@caip.rutgers.edu)
@@ -109,12 +109,24 @@ extern __inline__ unsigned long test_bit(int nr, __const__ void *addr)
/* The easy/cheese version for now. */
extern __inline__ unsigned long ffz(unsigned long word)
{
- unsigned long result = 0;
+ unsigned long result;
+#ifdef ULTRA_HAS_POPULATION_COUNT /* Thanks for nothing Sun... */
+ __asm__ __volatile__("
+ brz,pn %0, 1f
+ neg %0, %%g1
+ xnor %0, %%g1, %%g2
+ popc %%g2, %0
+1: " : "=&r" (result)
+ : "0" (word)
+ : "g1", "g2");
+#else
+ result = 0;
while(word & 1) {
result++;
word >>= 1;
}
+#endif
return result;
}
diff --git a/include/asm-sparc64/byteorder.h b/include/asm-sparc64/byteorder.h
index 4ff1717fd..21f4b0ba0 100644
--- a/include/asm-sparc64/byteorder.h
+++ b/include/asm-sparc64/byteorder.h
@@ -1,17 +1,17 @@
-/* $Id: byteorder.h,v 1.3 1997/03/14 21:05:31 jj Exp $ */
+/* $Id: byteorder.h,v 1.4 1997/05/26 23:37:47 davem Exp $ */
#ifndef _SPARC64_BYTEORDER_H
#define _SPARC64_BYTEORDER_H
-#define ntohl(x) (x)
-#define ntohs(x) (x)
-#define htonl(x) (x)
-#define htons(x) (x)
+#define ntohl(x) ((unsigned long int)(x))
+#define ntohs(x) ((unsigned short int)(x))
+#define htonl(x) ((unsigned long int)(x))
+#define htons(x) ((unsigned short int)(x))
/* Some programs depend upon these being around. */
-#define __constant_ntohl(x) (x)
-#define __constant_ntohs(x) (x)
-#define __constant_htonl(x) (x)
-#define __constant_htons(x) (x)
+#define __constant_ntohl(x) ((unsigned long int)(x))
+#define __constant_ntohs(x) ((unsigned short int)(x))
+#define __constant_htonl(x) ((unsigned long int)(x))
+#define __constant_htons(x) ((unsigned short int)(x))
#ifndef __BIG_ENDIAN
#define __BIG_ENDIAN 4321
diff --git a/include/asm-sparc64/cache.h b/include/asm-sparc64/cache.h
index 3d2198c89..8c2ec6bc6 100644
--- a/include/asm-sparc64/cache.h
+++ b/include/asm-sparc64/cache.h
@@ -4,9 +4,8 @@
#ifndef __ARCH_SPARC64_CACHE_H
#define __ARCH_SPARC64_CACHE_H
-/* FIXME: Should look at this soon */
/* bytes per L1 cache line */
-#define L1_CACHE_BYTES 32 /* a guess */
+#define L1_CACHE_BYTES 32 /* Two 16-byte sub-blocks per line. */
#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
diff --git a/include/asm-sparc64/hardirq.h b/include/asm-sparc64/hardirq.h
index b7cd278e7..4680a4095 100644
--- a/include/asm-sparc64/hardirq.h
+++ b/include/asm-sparc64/hardirq.h
@@ -10,11 +10,8 @@
extern unsigned int local_irq_count[NR_CPUS];
#define in_interrupt() (local_irq_count[smp_processor_id()] != 0)
-#define hardirq_depth() (local_irq_count[smp_processor_id()])
-#ifdef __SMP__
-#error SMP not supported on sparc64
-#else /* !(__SMP__) */
+#ifndef __SMP__
#define hardirq_trylock(cpu) (++local_irq_count[cpu], (cpu)==0)
#define hardirq_endlock(cpu) (--local_irq_count[cpu])
@@ -24,6 +21,62 @@ extern unsigned int local_irq_count[NR_CPUS];
#define synchronize_irq() do { } while(0)
+#else /* (__SMP__) */
+
+#include <asm/atomic.h>
+#include <asm/spinlock.h>
+#include <asm/system.h>
+#include <asm/smp.h>
+
+extern unsigned char global_irq_holder;
+extern spinlock_t global_irq_lock;
+extern atomic_t global_irq_count;
+
+static inline void release_irqlock(int cpu)
+{
+ /* if we didn't own the irq lock, just ignore... */
+ if(global_irq_holder == (unsigned char) cpu) {
+ global_irq_holder = NO_PROC_ID;
+ spin_unlock(&global_irq_lock);
+ }
+}
+
+static inline void hardirq_enter(int cpu)
+{
+ ++local_irq_count[cpu];
+ atomic_inc(&global_irq_count);
+}
+
+static inline void hardirq_exit(int cpu)
+{
+ atomic_dec(&global_irq_count);
+ --local_irq_count[cpu];
+}
+
+static inline int hardirq_trylock(int cpu)
+{
+ unsigned long flags;
+
+ __save_and_cli(flags);
+ if(atomic_add_return(1, &global_irq_count) != 1 ||
+ *(((unsigned char *)(&global_irq_lock)))) {
+ atomic_dec(&global_irq_count);
+ __restore_flags(flags);
+ return 0;
+ }
+ ++local_irq_count[cpu];
+ return 1;
+}
+
+static inline void hardirq_endlock(int cpu)
+{
+ __cli();
+ hardirq_exit(cpu);
+ __sti();
+}
+
+extern void synchronize_irq(void);
+
#endif /* __SMP__ */
#endif /* !(__SPARC64_HARDIRQ_H) */
diff --git a/include/asm-sparc64/head.h b/include/asm-sparc64/head.h
index e3d03bf0f..7127ca74c 100644
--- a/include/asm-sparc64/head.h
+++ b/include/asm-sparc64/head.h
@@ -1,4 +1,4 @@
-/* $Id: head.h,v 1.19 1997/05/18 08:42:18 davem Exp $ */
+/* $Id: head.h,v 1.21 1997/05/27 06:28:17 davem Exp $ */
#ifndef _SPARC64_HEAD_H
#define _SPARC64_HEAD_H
@@ -143,9 +143,15 @@
#define BTRAPTL1(lvl) TRAPTL1_ARG(bad_trap_tl1, lvl)
-#define FLUSH_WINDOW_TRAP \
- flushw; \
- done; nop; nop; nop; nop; nop; nop;
+#define FLUSH_WINDOW_TRAP \
+ ba,pt %xcc, etrap; \
+ rd %pc, %g7; \
+ flushw; \
+ ldx [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TNPC], %l1; \
+ add %l1, 4, %l2; \
+ stx %l1, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TPC]; \
+ ba,pt %xcc, rtrap; \
+ stx %l2, [%sp + STACK_BIAS + REGWIN_SZ + PT_V9_TNPC];
/* Before touching these macros, you owe it to yourself to go and
* see how arch/sparc64/kernel/winfixup.S works... -DaveM
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index 0e8168bb5..9a5b10458 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -1,4 +1,4 @@
-/* $Id: mmu_context.h,v 1.8 1997/05/18 20:44:23 davem Exp $ */
+/* $Id: mmu_context.h,v 1.10 1997/05/23 09:35:55 jj Exp $ */
#ifndef __SPARC64_MMU_CONTEXT_H
#define __SPARC64_MMU_CONTEXT_H
@@ -20,8 +20,6 @@
extern unsigned long tlb_context_cache;
-#define MAX_CTX PAGE_SIZE
-
#define CTX_VERSION_SHIFT PAGE_SHIFT
#define CTX_VERSION_MASK ((~0UL) << CTX_VERSION_SHIFT)
#define CTX_FIRST_VERSION ((1UL << CTX_VERSION_SHIFT) + 1UL)
@@ -29,7 +27,7 @@ extern unsigned long tlb_context_cache;
extern __inline__ void get_new_mmu_context(struct mm_struct *mm,
unsigned long ctx)
{
- if((ctx & ~CTX_VERSION_MASK) > MAX_CTX) {
+ if((ctx & ~(CTX_VERSION_MASK)) == 0) {
unsigned long flags;
int entry;
@@ -68,7 +66,9 @@ extern __inline__ void get_mmu_context(struct task_struct *tsk)
flushw_user();
if((mm->context ^ ctx) & CTX_VERSION_MASK)
get_new_mmu_context(mm, ctx);
- spitfire_set_secondary_context(mm->context);
+ tsk->tss.ctx = (mm->context & 0x1fff);
+ spitfire_set_secondary_context(tsk->tss.current_ds ?
+ mm->context : 0);
paddr = __pa(mm->pgd);
__asm__ __volatile__("
rdpr %%pstate, %%o4
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index ca35d567f..a739cea5e 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -1,4 +1,4 @@
-/* $Id: pgtable.h,v 1.31 1997/05/18 21:11:42 davem Exp $
+/* $Id: pgtable.h,v 1.32 1997/05/26 23:39:20 davem Exp $
* pgtable.h: SpitFire page table operations.
*
* Copyright 1996,1997 David S. Miller (davem@caip.rutgers.edu)
@@ -440,7 +440,7 @@ extern inline void SET_PAGE_DIR(struct task_struct *tsk, pgd_t *pgdir)
paddr = __pa(pgdir);
- if(tsk->mm == current->mm) {
+ if(tsk == current) {
__asm__ __volatile__ ("
rdpr %%pstate, %%o4
wrpr %%o4, %1, %%pstate
diff --git a/include/asm-sparc64/processor.h b/include/asm-sparc64/processor.h
index 8b2380a2e..f58c9da70 100644
--- a/include/asm-sparc64/processor.h
+++ b/include/asm-sparc64/processor.h
@@ -1,4 +1,4 @@
-/* $Id: processor.h,v 1.26 1997/05/17 05:59:10 davem Exp $
+/* $Id: processor.h,v 1.27 1997/05/23 09:35:52 jj Exp $
* include/asm-sparc64/processor.h
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
@@ -41,7 +41,7 @@ struct thread_struct {
unsigned long fsr;
/* Context switch saved kernel state. */
- unsigned long ksp, kpc, wstate, cwp;
+ unsigned long ksp, kpc, wstate, cwp, ctx;
/* Storage for windows when user stack is bogus. */
struct reg_window reg_window[NSWINS] __attribute__ ((aligned (16)));
@@ -80,8 +80,8 @@ struct thread_struct {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, \
/* FPU status */ \
0, \
-/* ksp, kpc, wstate, cwp */ \
- 0, 0, 0, 0, \
+/* ksp, kpc, wstate, cwp, secctx */ \
+ 0, 0, 0, 0, 0, \
/* reg_window */ \
{ { { 0, }, { 0, } }, }, \
/* rwbuf_stkptrs */ \
diff --git a/include/asm-sparc64/ptrace.h b/include/asm-sparc64/ptrace.h
index fcf33721b..5da6f6dd1 100644
--- a/include/asm-sparc64/ptrace.h
+++ b/include/asm-sparc64/ptrace.h
@@ -1,4 +1,4 @@
-/* $Id: ptrace.h,v 1.7 1997/03/04 16:27:32 jj Exp $ */
+/* $Id: ptrace.h,v 1.8 1997/05/27 19:30:27 jj Exp $ */
#ifndef _SPARC64_PTRACE_H
#define _SPARC64_PTRACE_H
@@ -251,4 +251,20 @@ extern void show_regs(struct pt_regs *);
#define SF_XARG5 0x58
#define SF_XXARG 0x5c
+/* Stuff for the ptrace system call */
+#define PTRACE_SUNATTACH 10
+#define PTRACE_SUNDETACH 11
+#define PTRACE_GETREGS 12
+#define PTRACE_SETREGS 13
+#define PTRACE_GETFPREGS 14
+#define PTRACE_SETFPREGS 15
+#define PTRACE_READDATA 16
+#define PTRACE_WRITEDATA 17
+#define PTRACE_READTEXT 18
+#define PTRACE_WRITETEXT 19
+#define PTRACE_GETFPAREGS 20
+#define PTRACE_SETFPAREGS 21
+
+#define PTRACE_GETUCODE 29 /* stupid bsd-ism */
+
#endif /* !(_SPARC64_PTRACE_H) */
diff --git a/include/asm-sparc64/softirq.h b/include/asm-sparc64/softirq.h
index 3e5d7cc72..fa32f67e5 100644
--- a/include/asm-sparc64/softirq.h
+++ b/include/asm-sparc64/softirq.h
@@ -13,18 +13,15 @@
* is entirely private to an implementation, it should not be
* referenced at all outside of this file.
*/
-extern atomic_t __sparc64_bh_counter;
#define get_active_bhs() (bh_mask & bh_active)
-#ifdef __SMP__
-#error SMP not supported on sparc64 yet
-#else
+#ifndef __SMP__
+
+extern int __sparc64_bh_counter;
-#define softirq_trylock() (atomic_read(&__sparc64_bh_counter) ? \
- 0 : \
- ((atomic_set(&__sparc64_bh_counter,1)),1))
-#define softirq_endlock() (atomic_set(&__sparc64_bh_counter, 0))
+#define softirq_trylock() (__sparc64_bh_counter ? 0 : (__sparc64_bh_counter=1))
+#define softirq_endlock() (__sparc64_bh_counter = 0)
#define clear_active_bhs(x) (bh_active &= ~(x))
#define init_bh(nr, routine) \
@@ -54,11 +51,82 @@ do { int ent = nr; \
bh_mask |= 1 << ent; \
} while(0)
+#define start_bh_atomic() do { __sparc64_bh_counter++; barrier(); } while(0)
+
+#define end_bh_atomic() do { barrier(); __sparc64_bh_counter--; } while(0)
+
+#else /* (__SMP__) */
+
+extern atomic_t __sparc64_bh_counter;
+
#define start_bh_atomic() \
do { atomic_inc(&__sparc64_bh_counter); synchronize_irq(); } while(0)
-#define end_bh_atomic() do { atomic_dec(&__sparc64_bh_counter); } while(0)
+#define end_bh_atomic() atomic_dec(&__sparc64_bh_counter)
+
+#include <asm/spinlock.h>
+
+#define init_bh(nr, routine) \
+do { unsigned long flags; \
+ int ent = nr; \
+ spin_lock_irqsave(&global_bh_lock, flags); \
+ bh_base[ent] = routine; \
+ bh_mask_count[ent] = 0; \
+ bh_mask |= 1 << ent; \
+ spin_unlock_irqrestore(&global_bh_lock, flags); \
+} while(0)
+
+#define remove_bh(nr) \
+do { unsigned long flags; \
+ int ent = nr; \
+ spin_lock_irqsave(&global_bh_lock, flags); \
+ bh_base[ent] = NULL; \
+ bh_mask &= ~(1 << ent); \
+ spin_unlock_irqrestore(&global_bh_lock, flags); \
+} while(0)
+
+#define mark_bh(nr) \
+do { unsigned long flags; \
+ spin_lock_irqsave(&global_bh_lock, flags); \
+ bh_active |= (1 << nr); \
+ spin_unlock_irqrestore(&global_bh_lock, flags); \
+} while(0)
+
+#define disable_bh(nr) \
+do { unsigned long flags; \
+ int ent = nr; \
+ spin_lock_irqsave(&global_bh_lock, flags); \
+ bh_mask &= ~(1 << ent); \
+ bh_mask_count[ent]++; \
+ spin_unlock_irqrestore(&global_bh_lock, flags); \
+} while(0)
+
+#define enable_bh(nr) \
+do { unsigned long flags; \
+ int ent = nr; \
+ spin_lock_irqsave(&global_bh_lock, flags); \
+ if (!--bh_mask_count[ent]) \
+ bh_mask |= 1 << ent; \
+ spin_unlock_irqrestore(&global_bh_lock, flags); \
+} while(0)
+
+#define softirq_trylock() \
+({ \
+ int ret = 1; \
+ if(atomic_add_return(1, &__sparc_bh_counter) != 1) { \
+ atomic_dec(&__sparc_bh_counter); \
+ ret = 0; \
+ } \
+ ret; \
+})
+#define softirq_endlock() atomic_dec(&__sparc_bh_counter)
+#define clear_active_bhs(mask) \
+do { unsigned long flags; \
+ spin_lock_irqsave(&global_bh_lock, flags); \
+ bh_active &= ~(mask); \
+ spin_unlock_irqrestore(&global_bh_lock, flags); \
+} while(0)
-#endif /* !(__SMP__) */
+#endif /* (__SMP__) */
#endif /* !(__SPARC64_SOFTIRQ_H) */
diff --git a/include/asm-sparc64/spinlock.h b/include/asm-sparc64/spinlock.h
index dbb569f18..ec1ad2ea0 100644
--- a/include/asm-sparc64/spinlock.h
+++ b/include/asm-sparc64/spinlock.h
@@ -52,9 +52,206 @@ typedef struct { } rwlock_t;
#define write_unlock_irqrestore(lock, flags) restore_flags(flags)
#else /* !(__SMP__) */
-#error SMP not supported on sparc64
+
+typedef unsigned char spinlock_t;
+#define SPIN_LOCK_UNLOCKED 0
+#define spin_lock_init(lock) (*(lock) = 0)
+#define spin_unlock_wait(lock) do { barrier(); } while(*(volatile spinlock_t *)lock)
+
+extern __inline__ void spin_lock(spinlock_t *lock)
+{
+ __asm__ __volatile__("
+1: ldstub [%0], %%g2
+ brnz,a,pn %%g2, 2f
+ ldub [%0], %%g2
+ .text 2
+2: brnz,a,pt 2b
+ ldub [%0], %%g2
+ b,a,pt %%xcc, 1b
+ .previous
+" : /* no outputs */
+ : "r" (lock)
+ : "g2", "memory");
+}
+
+extern __inline__ int spin_trylock(spinlock_t *lock)
+{
+ unsigned int result;
+ __asm__ __volatile__("ldstub [%1], %0"
+ : "=r" (result)
+ : "r" (lock)
+ : "memory");
+ return (result == 0);
+}
+
+extern __inline__ void spin_unlock(spinlock_t *lock)
+{
+ __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
+}
+
+extern __inline__ void spin_lock_irq(spinlock_t *lock)
+{
+ __asm__ __volatile__("
+ wrpr %%g0, 15, %%pil
+ ldstub [%0], %%g2
+ brnz,a,pn %%g2, 2f
+ ldub [%0], %%g2
+ .text 2
+2: brnz,a,pt 2b
+ ldub [%0], %%g2
+ b,a,pt %%xcc, 1b
+ .previous
+" : /* no outputs */
+ : "r" (lock)
+ : "g2", "memory");
+}
+
+extern __inline__ void spin_unlock_irq(spinlock_t *lock)
+{
+ __asm__ __volatile__("
+ stb %%g0, [%0]
+ wrpr %%g0, 0x0, %%pil
+" : /* no outputs */
+ : "r" (lock)
+ : "memory");
+}
+
+#define spin_lock_irqsave(lock, flags) \
+do { register spinlock_t *lp asm("g1"); \
+ lp = lock; \
+ __asm__ __volatile__( \
+ " rdpr %%pil, %0\n\t" \
+ " wrpr %%g0, 15, %%pil\n\t" \
+ "1: ldstub [%1], %%g2\n\t" \
+ " brnz,a,pnt %%g2, 2f\n\t" \
+ " ldub [%1], %%g2\n\t" \
+ " .text 2\n\t" \
+ "2: brnz,a,pt %%g2, 2b\n\t" \
+ " ldub [%1], %%g2\n\t" \
+ " b,a,pt %%xcc, 1b\n\t" \
+ " .previous\n" \
+ : "=r" (flags) \
+ : "r" (lp) \
+ : "g2", "memory"); \
+} while(0)
+
+extern __inline__ void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+{
+ __asm__ __volatile__("
+ stb %%g0, [%0]
+ wrpr %1, 0x0, %%pil
+" : /* no outputs */
+ : "r" (lock), "r" (flags)
+ : "memory");
+}
+
+/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
+
+typedef unsigned long rwlock_t;
+#define RW_LOCK_UNLOCKED 0
+
+extern __inline__ void read_lock(rwlock_t *rw)
+{
+ __asm__ __volatile__("
+ ldx [%0], %%g2
+1:
+ brlz,pn %%g2, 2f
+4: add %%g2, 1, %%g3
+ casx [%0], %%g2, %%g3
+ cmp %%g2, %%g3
+ bne,a,pn %%xcc, 1b
+ ldx [%0],%%g2
+ .text 2
+2: ldx [%0], %%g2
+3: brlz,pt %%g2, 3b
+ ldx [%0], %%g2
+ b,a,pt %%xcc, 4b
+ .previous
+" : /* no outputs */
+ : "r" (rw)
+ : "g2", "g3", "memory");
+}
+
+extern __inline__ void read_unlock(rwlock_t *rw)
+{
+ __asm__ __volatile__("
+ ldx [%0], %%g2
+1:
+ sub %%g2, 1, %%g3
+ casx [%0], %%g2, %%g3
+ cmp %%g2, %%g3
+ bne,a,pn %%xcc, 1b
+ ldx [%0], %%g2
+" : /* no outputs */
+ : "r" (rw)
+ : "g2", "g3", "memory");
+}
+
+extern __inline__ void write_lock(rwlock_t *rw)
+{
+ __asm__ __volatile__("
+ sethi %%uhi(0x8000000000000000), %%g5
+ ldx [%0] %%g2
+ sllx %%g5, 32, %%g5
+1:
+ brlz,pn %%g2, 5f
+4: or %%g2, %%g5, %%g3
+ casx [%0], %%g2, %%g3
+ cmp %%g2, %%g3
+ bne,a,pn %%xcc, 1b
+ ldx [%0], %%g2
+ andncc %%g3, %%g5, %%g0
+ bne,a,pn %%xcc, 3f
+ ldx [%0], %%g2
+ .text 2
+3:
+ andn %%g2, %%g5, %%g3
+ casx [%0], %%g2, %%g3
+ cmp %%g2, %%g3
+ bne,a,pn %%xcc, 3b
+ ldx [%0], %%g2
+5: ldx [%0], %%g2
+6: brlz,pt %%g2, 6b
+ ldx [%0], %%g2
+ b,a,pt %%xcc, 4b
+" : /* no outputs */
+ : "r" (rw)
+ : "g2", "g3", "g5", "memory", "cc");
+}
+
+extern __inline__ void write_unlock(rwlock_t *rw)
+{
+ __asm__ __volatile__("
+ sethi %%uhi(0x8000000000000000), %%g5
+ ldx [%0], %%g2
+ sllx %%g5, 32, %%g5
+1:
+ andn %%g2, %%g5, %%g3
+ casx [%0], %%g2, %%g3
+ cmp %%g2, %%g3
+ bne,a,pn %%xcc, 1b
+ ldx [%0], %%g2
+" : /* no outputs */
+ : "r" (rw)
+ : "g2", "g3", "g5", "memory", "cc");
+}
+
+#define read_lock_irq(lock) do { __cli(); read_lock(lock); } while (0)
+#define read_unlock_irq(lock) do { read_unlock(lock); __sti(); } while (0)
+#define write_lock_irq(lock) do { __cli(); write_lock(lock); } while (0)
+#define write_unlock_irq(lock) do { write_unlock(lock); __sti(); } while (0)
+
+#define read_lock_irqsave(lock, flags) \
+ do { __save_and_cli(flags); read_lock(lock); } while (0)
+#define read_unlock_irqrestore(lock, flags) \
+ do { read_unlock(lock); __restore_flags(flags); } while (0)
+#define write_lock_irqsave(lock, flags) \
+ do { __save_and_cli(flags); write_lock(lock); } while (0)
+#define write_unlock_irqrestore(lock, flags) \
+ do { write_unlock(lock); __restore_flags(flags); } while (0)
+
#endif /* __SMP__ */
#endif /* !(__ASSEMBLY__) */
-#endif /* !(__SPARC64_SPINLOCK_H) */
+#endif /* !(__SPARC64_SPIN%0_H) */
diff --git a/include/asm-sparc64/unistd.h b/include/asm-sparc64/unistd.h
index 51750311c..cb17f1888 100644
--- a/include/asm-sparc64/unistd.h
+++ b/include/asm-sparc64/unistd.h
@@ -1,4 +1,4 @@
-/* $Id: unistd.h,v 1.4 1997/04/19 08:52:25 jj Exp $ */
+/* $Id: unistd.h,v 1.5 1997/05/21 10:21:57 jj Exp $ */
#ifndef _SPARC64_UNISTD_H
#define _SPARC64_UNISTD_H
@@ -416,7 +416,7 @@ static __inline__ _syscall0(int,idle)
static __inline__ _syscall0(int,fork)
static __inline__ _syscall2(int,clone,unsigned long,flags,char *,ksp)
static __inline__ _syscall0(int,pause)
-static __inline__ _syscall0(int,setup)
+static __inline__ _syscall1(int,setup,int,magic)
static __inline__ _syscall0(int,sync)
static __inline__ _syscall0(pid_t,setsid)
static __inline__ _syscall3(int,write,int,fd,__const__ char *,buf,off_t,count)