summaryrefslogtreecommitdiffstats
path: root/include/asm-sparc64
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-sparc64')
-rw-r--r--include/asm-sparc64/bitops.h12
-rw-r--r--include/asm-sparc64/hardirq.h5
-rw-r--r--include/asm-sparc64/io.h35
-rw-r--r--include/asm-sparc64/mmu_context.h6
-rw-r--r--include/asm-sparc64/pgalloc.h22
-rw-r--r--include/asm-sparc64/posix_types.h3
-rw-r--r--include/asm-sparc64/softirq.h113
7 files changed, 41 insertions, 155 deletions
diff --git a/include/asm-sparc64/bitops.h b/include/asm-sparc64/bitops.h
index 27820f265..6a6ec52b1 100644
--- a/include/asm-sparc64/bitops.h
+++ b/include/asm-sparc64/bitops.h
@@ -1,4 +1,4 @@
-/* $Id: bitops.h,v 1.26 1999/01/07 14:14:15 jj Exp $
+/* $Id: bitops.h,v 1.27 2000/02/09 03:28:33 davem Exp $
* bitops.h: Bit string operations on the V9.
*
* Copyright 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
@@ -20,7 +20,7 @@
* all bit-ops return 0 if bit was previously clear and != 0 otherwise.
*/
-extern __inline__ unsigned long test_and_set_bit(unsigned long nr, void *addr)
+extern __inline__ int test_and_set_bit(unsigned long nr, void *addr)
{
unsigned long * m = ((unsigned long *) addr) + (nr >> 6);
unsigned long oldbit;
@@ -60,7 +60,7 @@ extern __inline__ void set_bit(unsigned long nr, void *addr)
: "g5", "g7", "cc", "memory");
}
-extern __inline__ unsigned long test_and_clear_bit(unsigned long nr, void *addr)
+extern __inline__ int test_and_clear_bit(unsigned long nr, void *addr)
{
unsigned long * m = ((unsigned long *) addr) + (nr >> 6);
unsigned long oldbit;
@@ -100,7 +100,7 @@ extern __inline__ void clear_bit(unsigned long nr, void *addr)
: "g5", "g7", "cc", "memory");
}
-extern __inline__ unsigned long test_and_change_bit(unsigned long nr, void *addr)
+extern __inline__ int test_and_change_bit(unsigned long nr, void *addr)
{
unsigned long * m = ((unsigned long *) addr) + (nr >> 6);
unsigned long oldbit;
@@ -135,9 +135,9 @@ extern __inline__ void change_bit(unsigned long nr, void *addr)
: "g5", "g7", "cc", "memory");
}
-extern __inline__ unsigned long test_bit(int nr, __const__ void *addr)
+extern __inline__ int test_bit(int nr, __const__ void *addr)
{
- return 1UL & (((__const__ long *) addr)[nr >> 6] >> (nr & 63));
+ return (1UL & (((__const__ long *) addr)[nr >> 6] >> (nr & 63))) != 0UL;
}
/* The easy/cheese version for now. */
diff --git a/include/asm-sparc64/hardirq.h b/include/asm-sparc64/hardirq.h
index 7df1d1346..daff61ac4 100644
--- a/include/asm-sparc64/hardirq.h
+++ b/include/asm-sparc64/hardirq.h
@@ -16,10 +16,13 @@ extern unsigned int local_irq_count;
/*
* Are we in an interrupt context? Either doing bottom half
- * or hardware interrupt processing?
+ * or hardware interrupt processing? On any cpu?
*/
#define in_interrupt() ((local_irq_count + local_bh_count) != 0)
+/* This tests only the local processors hw IRQ context disposition. */
+#define in_irq() (local_irq_count != 0)
+
#ifndef __SMP__
#define hardirq_trylock(cpu) (local_irq_count == 0)
diff --git a/include/asm-sparc64/io.h b/include/asm-sparc64/io.h
index cb7fba53f..788e8dd18 100644
--- a/include/asm-sparc64/io.h
+++ b/include/asm-sparc64/io.h
@@ -1,4 +1,4 @@
-/* $Id: io.h,v 1.30 2000/01/28 13:43:14 jj Exp $ */
+/* $Id: io.h,v 1.31 2000/02/08 05:11:38 jj Exp $ */
#ifndef __SPARC64_IO_H
#define __SPARC64_IO_H
@@ -13,43 +13,10 @@
#define __SLOW_DOWN_IO do { } while (0)
#define SLOW_DOWN_IO do { } while (0)
-#define NEW_PCI_DMA_MAP
-
-#ifndef NEW_PCI_DMA_MAP
-#define PCI_DVMA_HASHSZ 256
-
-extern unsigned long pci_dvma_v2p_hash[PCI_DVMA_HASHSZ];
-extern unsigned long pci_dvma_p2v_hash[PCI_DVMA_HASHSZ];
-
-#define pci_dvma_ahashfn(addr) (((addr) >> 24) & 0xff)
-
-extern __inline__ unsigned long virt_to_bus(volatile void *addr)
-{
- unsigned long vaddr = (unsigned long)addr;
- unsigned long off;
-
- /* Handle kernel variable pointers... */
- if (vaddr < PAGE_OFFSET)
- vaddr += PAGE_OFFSET - (unsigned long)&empty_zero_page;
-
- off = pci_dvma_v2p_hash[pci_dvma_ahashfn(vaddr - PAGE_OFFSET)];
- return vaddr + off;
-}
-
-extern __inline__ void *bus_to_virt(unsigned long addr)
-{
- unsigned long paddr = addr & 0xffffffffUL;
- unsigned long off;
-
- off = pci_dvma_p2v_hash[pci_dvma_ahashfn(paddr)];
- return (void *)(paddr + off);
-}
-#else
extern unsigned long virt_to_bus_not_defined_use_pci_map(volatile void *addr);
#define virt_to_bus virt_to_bus_not_defined_use_pci_map
extern unsigned long bus_to_virt_not_defined_use_pci_map(volatile void *addr);
#define bus_to_virt bus_to_virt_not_defined_use_pci_map
-#endif
/* Different PCI controllers we support have their PCI MEM space
* mapped to an either 2GB (Psycho) or 4GB (Sabre) aligned area,
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index ae61e47a6..d6ef977d9 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -1,4 +1,4 @@
-/* $Id: mmu_context.h,v 1.41 1999/09/10 15:39:03 jj Exp $ */
+/* $Id: mmu_context.h,v 1.42 2000/02/08 07:47:03 davem Exp $ */
#ifndef __SPARC64_MMU_CONTEXT_H
#define __SPARC64_MMU_CONTEXT_H
@@ -10,6 +10,10 @@
#include <asm/system.h>
#include <asm/spitfire.h>
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+{
+}
+
extern spinlock_t ctx_alloc_lock;
extern unsigned long tlb_context_cache;
extern unsigned long mmu_context_bmap[];
diff --git a/include/asm-sparc64/pgalloc.h b/include/asm-sparc64/pgalloc.h
index c0e4a12a1..fe4d9e1fa 100644
--- a/include/asm-sparc64/pgalloc.h
+++ b/include/asm-sparc64/pgalloc.h
@@ -94,6 +94,28 @@ extern __inline__ void flush_tlb_page(struct vm_area_struct *vma, unsigned long
#endif /* ! __SMP__ */
+/* This will change for Cheetah and later chips. */
+#define VPTE_BASE 0xfffffffe00000000
+
+extern __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start,
+ unsigned long end)
+{
+ /* Note the signed type. */
+ long s = start, e = end;
+ if (s > e)
+ /* Nobody should call us with start below VM hole and end above.
+ See if it is really true. */
+ BUG();
+#if 0
+ /* Currently free_pgtables guarantees this. */
+ s &= PMD_MASK;
+ e = (e + PMD_SIZE - 1) & PMD_MASK;
+#endif
+ flush_tlb_range(mm,
+ VPTE_BASE + (s >> (PAGE_SHIFT - 3)),
+ VPTE_BASE + (e >> (PAGE_SHIFT - 3)));
+}
+
/* Page table allocation/freeing. */
#ifdef __SMP__
/* Sliiiicck */
diff --git a/include/asm-sparc64/posix_types.h b/include/asm-sparc64/posix_types.h
index e2a024e3e..e486344ad 100644
--- a/include/asm-sparc64/posix_types.h
+++ b/include/asm-sparc64/posix_types.h
@@ -9,11 +9,12 @@
#if (__GNUC__ > 2) || (__GNUC_MINOR__ >= 8)
typedef unsigned long int __kernel_size_t;
+typedef long int __kernel_ssize_t;
#else
typedef unsigned long long __kernel_size_t;
+typedef long long __kernel_ssize_t;
#endif
-typedef long long __kernel_ssize_t;
typedef long __kernel_ptrdiff_t;
typedef long __kernel_time_t;
typedef long __kernel_clock_t;
diff --git a/include/asm-sparc64/softirq.h b/include/asm-sparc64/softirq.h
index b8e017d79..460c96633 100644
--- a/include/asm-sparc64/softirq.h
+++ b/include/asm-sparc64/softirq.h
@@ -19,117 +19,6 @@ extern unsigned int local_bh_count;
#define local_bh_disable() (local_bh_count++)
#define local_bh_enable() (local_bh_count--)
-/* The locking mechanism for base handlers, to prevent re-entrancy,
- * is entirely private to an implementation, it should not be
- * referenced at all outside of this file.
- */
-
-#define get_active_bhs() (bh_mask & bh_active)
-#define clear_active_bhs(mask) \
- __asm__ __volatile__( \
-"1: ldx [%1], %%g7\n" \
-" andn %%g7, %0, %%g5\n" \
-" casx [%1], %%g7, %%g5\n" \
-" cmp %%g7, %%g5\n" \
-" bne,pn %%xcc, 1b\n" \
-" nop" \
- : /* no outputs */ \
- : "HIr" (mask), "r" (&bh_active) \
- : "g5", "g7", "cc", "memory")
-
-extern inline void init_bh(int nr, void (*routine)(void))
-{
- bh_base[nr] = routine;
- atomic_set(&bh_mask_count[nr], 0);
- bh_mask |= 1 << nr;
-}
-
-extern inline void remove_bh(int nr)
-{
- bh_mask &= ~(1 << nr);
- membar("#StoreStore");
- bh_base[nr] = NULL;
-}
-
-extern inline void mark_bh(int nr)
-{
- set_bit(nr, &bh_active);
-}
-
-#ifndef __SMP__
-
-extern inline void start_bh_atomic(void)
-{
- local_bh_count++;
- barrier();
-}
-
-extern inline void end_bh_atomic(void)
-{
- barrier();
- local_bh_count--;
-}
-
-/* These are for the irq's testing the lock */
-#define softirq_trylock(cpu) (local_bh_count ? 0 : (local_bh_count=1))
-#define softirq_endlock(cpu) (local_bh_count = 0)
-#define synchronize_bh() barrier()
-
-#else /* (__SMP__) */
-
-extern atomic_t global_bh_lock;
-extern spinlock_t global_bh_count;
-
-extern void synchronize_bh(void);
-
-static inline void start_bh_atomic(void)
-{
- atomic_inc(&global_bh_lock);
- synchronize_bh();
-}
-
-static inline void end_bh_atomic(void)
-{
- atomic_dec(&global_bh_lock);
-}
-
-/* These are for the IRQs testing the lock */
-static inline int softirq_trylock(int cpu)
-{
- if (spin_trylock(&global_bh_count)) {
- if (atomic_read(&global_bh_lock) == 0 &&
- cpu_data[cpu].bh_count == 0) {
- ++(cpu_data[cpu].bh_count);
- return 1;
- }
- spin_unlock(&global_bh_count);
- }
- return 0;
-}
-
-static inline void softirq_endlock(int cpu)
-{
- (cpu_data[cpu].bh_count)--;
- spin_unlock(&global_bh_count);
-}
-
-#endif /* (__SMP__) */
-
-/*
- * These use a mask count to correctly handle
- * nested disable/enable calls
- */
-extern inline void disable_bh(int nr)
-{
- bh_mask &= ~(1 << nr);
- atomic_inc(&bh_mask_count[nr]);
- synchronize_bh();
-}
-
-extern inline void enable_bh(int nr)
-{
- if (atomic_dec_and_test(&bh_mask_count[nr]))
- bh_mask |= 1 << nr;
-}
+#define in_softirq() (local_bh_count != 0)
#endif /* !(__SPARC64_SOFTIRQ_H) */