summaryrefslogtreecommitdiffstats
path: root/include/asm-sparc
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-02-23 00:40:54 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-02-23 00:40:54 +0000
commit529c593ece216e4aaffd36bd940cb94f1fa63129 (patch)
tree78f1c0b805f5656aa7b0417a043c5346f700a2cf /include/asm-sparc
parent0bd079751d25808d1972baee5c4eaa1db2227257 (diff)
Merge with 2.3.43. I did ignore all modifications to the qlogicisp.c
driver due to the Origin A64 hacks.
Diffstat (limited to 'include/asm-sparc')
-rw-r--r--include/asm-sparc/bitops.h12
-rw-r--r--include/asm-sparc/hardirq.h5
-rw-r--r--include/asm-sparc/mmu_context.h4
-rw-r--r--include/asm-sparc/pci.h85
-rw-r--r--include/asm-sparc/pgalloc.h6
-rw-r--r--include/asm-sparc/pgtable.h19
-rw-r--r--include/asm-sparc/softirq.h155
7 files changed, 120 insertions, 166 deletions
diff --git a/include/asm-sparc/bitops.h b/include/asm-sparc/bitops.h
index fbaa0f005..1139f58f7 100644
--- a/include/asm-sparc/bitops.h
+++ b/include/asm-sparc/bitops.h
@@ -1,4 +1,4 @@
-/* $Id: bitops.h,v 1.54 1998/09/21 05:07:34 jj Exp $
+/* $Id: bitops.h,v 1.55 2000/02/09 03:28:32 davem Exp $
* bitops.h: Bit string operations on the Sparc.
*
* Copyright 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -94,7 +94,7 @@ extern __inline__ void change_bit(unsigned long nr, void *addr)
* all bit-ops return 0 if bit was previously clear and != 0 otherwise.
*/
-extern __inline__ unsigned long test_and_set_bit(unsigned long nr, __SMPVOL void *addr)
+extern __inline__ int test_and_set_bit(unsigned long nr, __SMPVOL void *addr)
{
register unsigned long mask asm("g2");
register unsigned long *ADDR asm("g1");
@@ -116,7 +116,7 @@ extern __inline__ void set_bit(unsigned long nr, __SMPVOL void *addr)
(void) test_and_set_bit(nr, addr);
}
-extern __inline__ unsigned long test_and_clear_bit(unsigned long nr, __SMPVOL void *addr)
+extern __inline__ int test_and_clear_bit(unsigned long nr, __SMPVOL void *addr)
{
register unsigned long mask asm("g2");
register unsigned long *ADDR asm("g1");
@@ -139,7 +139,7 @@ extern __inline__ void clear_bit(unsigned long nr, __SMPVOL void *addr)
(void) test_and_clear_bit(nr, addr);
}
-extern __inline__ unsigned long test_and_change_bit(unsigned long nr, __SMPVOL void *addr)
+extern __inline__ int test_and_change_bit(unsigned long nr, __SMPVOL void *addr)
{
register unsigned long mask asm("g2");
register unsigned long *ADDR asm("g1");
@@ -165,9 +165,9 @@ extern __inline__ void change_bit(unsigned long nr, __SMPVOL void *addr)
#endif /* __KERNEL__ */
/* The following routine need not be atomic. */
-extern __inline__ unsigned long test_bit(int nr, __const__ __SMPVOL void *addr)
+extern __inline__ int test_bit(int nr, __const__ __SMPVOL void *addr)
{
- return 1UL & (((__const__ unsigned int *) addr)[nr >> 5] >> (nr & 31));
+ return (1 & (((__const__ unsigned int *) addr)[nr >> 5] >> (nr & 31))) != 0;
}
/* The easy/cheese version for now. */
diff --git a/include/asm-sparc/hardirq.h b/include/asm-sparc/hardirq.h
index ed47c7760..56fe88bba 100644
--- a/include/asm-sparc/hardirq.h
+++ b/include/asm-sparc/hardirq.h
@@ -26,6 +26,8 @@ extern unsigned int local_irq_count;
#define synchronize_irq() barrier()
+#define in_irq() (local_irq_count != 0)
+
#else
#include <asm/atomic.h>
@@ -45,6 +47,9 @@ extern atomic_t global_irq_count;
#define in_interrupt() ({ int __cpu = smp_processor_id(); \
(local_irq_count[__cpu] + local_bh_count[__cpu] != 0); })
+#define in_irq() ({ int __cpu = smp_processor_id(); \
+ (local_irq_count[__cpu] != 0); })
+
static inline void release_irqlock(int cpu)
{
/* if we didn't own the irq lock, just ignore.. */
diff --git a/include/asm-sparc/mmu_context.h b/include/asm-sparc/mmu_context.h
index ab2aeebef..604c447a9 100644
--- a/include/asm-sparc/mmu_context.h
+++ b/include/asm-sparc/mmu_context.h
@@ -5,6 +5,10 @@
#ifndef __ASSEMBLY__
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
+{
+}
+
/*
* Initialize a new mmu context. This is invoked when a new
* address space instance (unique or shared) is instantiated.
diff --git a/include/asm-sparc/pci.h b/include/asm-sparc/pci.h
index f30a5bcc2..a2749a907 100644
--- a/include/asm-sparc/pci.h
+++ b/include/asm-sparc/pci.h
@@ -10,4 +10,89 @@
#define PCIBIOS_MIN_IO 0UL
#define PCIBIOS_MIN_MEM 0UL
+#ifdef __KERNEL__
+
+/* Dynamic DMA mapping stuff.
+ */
+
+#include <asm/scatterlist.h>
+
+struct pci_dev;
+
+/* Allocate and map kernel buffer using consistent mode DMA for a device.
+ * hwdev should be valid struct pci_dev pointer for PCI devices.
+ */
+extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle);
+
+/* Free and unmap a consistent DMA buffer.
+ * cpu_addr is what was returned from pci_alloc_consistent,
+ * size must be the same as what as passed into pci_alloc_consistent,
+ * and likewise dma_addr must be the same as what *dma_addrp was set to.
+ *
+ * References to the memory and mappings assosciated with cpu_addr/dma_addr
+ * past this call are illegal.
+ */
+extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
+
+/* Map a single buffer of the indicated size for DMA in streaming mode.
+ * The 32-bit bus address to use is returned.
+ *
+ * Once the device is given the dma address, the device owns this memory
+ * until either pci_unmap_single or pci_dma_sync_single is performed.
+ */
+extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size);
+
+/* Unmap a single streaming mode DMA translation. The dma_addr and size
+ * must match what was provided for in a previous pci_map_single call. All
+ * other usages are undefined.
+ *
+ * After this call, reads by the cpu to the buffer are guarenteed to see
+ * whatever the device wrote there.
+ */
+extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size);
+
+/* Map a set of buffers described by scatterlist in streaming
+ * mode for DMA. This is the scather-gather version of the
+ * above pci_map_single interface. Here the scatter gather list
+ * elements are each tagged with the appropriate dma address
+ * and length. They are obtained via sg_dma_{address,length}(SG).
+ *
+ * NOTE: An implementation may be able to use a smaller number of
+ * DMA address/length pairs than there are SG table elements.
+ * (for example via virtual mapping capabilities)
+ * The routine returns the number of addr/length pairs actually
+ * used, at most nents.
+ *
+ * Device ownership issues as mentioned above for pci_map_single are
+ * the same here.
+ */
+extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents);
+
+/* Unmap a set of streaming mode DMA translations.
+ * Again, cpu read rules concerning calls here are the same as for
+ * pci_unmap_single() above.
+ */
+extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nhwents);
+
+/* Make physical memory consistent for a single
+ * streaming mode DMA translation after a transfer.
+ *
+ * If you perform a pci_map_single() but wish to interrogate the
+ * buffer using the cpu, yet do not wish to teardown the PCI dma
+ * mapping, you must call this function before doing so. At the
+ * next point you give the PCI dma address back to the card, the
+ * device again owns the buffer.
+ */
+extern void pci_dma_sync_single(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size);
+
+/* Make physical memory consistent for a set of streaming
+ * mode DMA translations after a transfer.
+ *
+ * The same as pci_dma_sync_single but for a scatter-gather list,
+ * same rules and usage.
+ */
+extern void pci_dma_sync_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nelems);
+
+#endif /* __KERNEL__ */
+
#endif /* __SPARC_PCI_H */
diff --git a/include/asm-sparc/pgalloc.h b/include/asm-sparc/pgalloc.h
index ab87c0629..fc323280b 100644
--- a/include/asm-sparc/pgalloc.h
+++ b/include/asm-sparc/pgalloc.h
@@ -1,4 +1,4 @@
-/* $Id: pgalloc.h,v 1.2 2000/01/15 00:51:42 anton Exp $ */
+/* $Id: pgalloc.h,v 1.3 2000/02/03 10:13:31 jj Exp $ */
#ifndef _SPARC_PGALLOC_H
#define _SPARC_PGALLOC_H
@@ -69,6 +69,10 @@ BTFIXUPDEF_CALL(void, flush_tlb_mm, struct mm_struct *)
BTFIXUPDEF_CALL(void, flush_tlb_range, struct mm_struct *, unsigned long, unsigned long)
BTFIXUPDEF_CALL(void, flush_tlb_page, struct vm_area_struct *, unsigned long)
+extern __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+}
+
#define flush_tlb_all() BTFIXUP_CALL(flush_tlb_all)()
#define flush_tlb_mm(mm) BTFIXUP_CALL(flush_tlb_mm)(mm)
#define flush_tlb_range(mm,start,end) BTFIXUP_CALL(flush_tlb_range)(mm,start,end)
diff --git a/include/asm-sparc/pgtable.h b/include/asm-sparc/pgtable.h
index 931e67169..8829d323c 100644
--- a/include/asm-sparc/pgtable.h
+++ b/include/asm-sparc/pgtable.h
@@ -1,4 +1,4 @@
-/* $Id: pgtable.h,v 1.87 1999/12/27 06:37:14 anton Exp $ */
+/* $Id: pgtable.h,v 1.88 2000/02/06 22:56:09 zaitcev Exp $ */
#ifndef _SPARC_PGTABLE_H
#define _SPARC_PGTABLE_H
@@ -50,16 +50,21 @@ BTFIXUPDEF_CALL(void, mmu_release_scsi_sgl, struct scatterlist *, int, struct s
#define mmu_release_scsi_one(vaddr,len,sbus) BTFIXUP_CALL(mmu_release_scsi_one)(vaddr,len,sbus)
#define mmu_release_scsi_sgl(sg,sz,sbus) BTFIXUP_CALL(mmu_release_scsi_sgl)(sg,sz,sbus)
-/* mmu_map/unmap is provided by iommu/iounit; mmu_flush/inval probably belongs to CPU... */
+/*
+ * mmu_map/unmap are provided by iommu/iounit; Invalid to call on IIep.
+ * mmu_flush/inval belong to CPU. Valid on IIep.
+ */
BTFIXUPDEF_CALL(void, mmu_map_dma_area, unsigned long va, __u32 addr, int len)
-BTFIXUPDEF_CALL(void, mmu_unmap_dma_area, unsigned long addr, int len)
-BTFIXUPDEF_CALL(void, mmu_inval_dma_area, unsigned long addr, int len)
-BTFIXUPDEF_CALL(void, mmu_flush_dma_area, unsigned long addr, int len)
+BTFIXUPDEF_CALL(unsigned long /*phys*/, mmu_translate_dvma, unsigned long busa)
+BTFIXUPDEF_CALL(void, mmu_unmap_dma_area, unsigned long busa, int len)
+BTFIXUPDEF_CALL(void, mmu_inval_dma_area, unsigned long virt, int len)
+BTFIXUPDEF_CALL(void, mmu_flush_dma_area, unsigned long virt, int len)
#define mmu_map_dma_area(va, ba,len) BTFIXUP_CALL(mmu_map_dma_area)(va,ba,len)
#define mmu_unmap_dma_area(ba,len) BTFIXUP_CALL(mmu_unmap_dma_area)(ba,len)
-#define mmu_inval_dma_area(va,len) BTFIXUP_CALL(mmu_unmap_dma_area)(va,len)
-#define mmu_flush_dma_area(va,len) BTFIXUP_CALL(mmu_unmap_dma_area)(va,len)
+#define mmu_translate_dvma(ba) BTFIXUP_CALL(mmu_translate_dvma)(ba)
+#define mmu_inval_dma_area(va,len) BTFIXUP_CALL(mmu_inval_dma_area)(va,len)
+#define mmu_flush_dma_area(va,len) BTFIXUP_CALL(mmu_flush_dma_area)(va,len)
BTFIXUPDEF_SIMM13(pmd_shift)
BTFIXUPDEF_SETHI(pmd_size)
diff --git a/include/asm-sparc/softirq.h b/include/asm-sparc/softirq.h
index c82a080ad..d61b56554 100644
--- a/include/asm-sparc/softirq.h
+++ b/include/asm-sparc/softirq.h
@@ -14,170 +14,21 @@
#include <asm/hardirq.h>
-#define get_active_bhs() (bh_mask & bh_active)
-
#ifdef __SMP__
extern unsigned int local_bh_count[NR_CPUS];
-/*
- * The locking mechanism for base handlers, to prevent re-entrancy,
- * is entirely private to an implementation, it should not be
- * referenced at all outside of this file.
- */
-extern atomic_t global_bh_lock;
-extern spinlock_t global_bh_count;
-extern spinlock_t sparc_bh_lock;
-
-extern void synchronize_bh(void);
-
-static inline void clear_active_bhs(unsigned int mask)
-{
- unsigned long flags;
- spin_lock_irqsave(&sparc_bh_lock, flags);
- bh_active &= ~(mask);
- spin_unlock_irqrestore(&sparc_bh_lock, flags);
-}
-
-extern inline void init_bh(int nr, void (*routine)(void))
-{
- unsigned long flags;
- spin_lock_irqsave(&sparc_bh_lock, flags);
- bh_base[nr] = routine;
- atomic_set(&bh_mask_count[nr], 0);
- bh_mask |= 1 << nr;
- spin_unlock_irqrestore(&sparc_bh_lock, flags);
-}
-
-extern inline void remove_bh(int nr)
-{
- unsigned long flags;
- spin_lock_irqsave(&sparc_bh_lock, flags);
- bh_mask &= ~(1 << nr);
- bh_base[nr] = NULL;
- spin_unlock_irqrestore(&sparc_bh_lock, flags);
-}
-
-extern inline void mark_bh(int nr)
-{
- unsigned long flags;
- spin_lock_irqsave(&sparc_bh_lock, flags);
- bh_active |= (1 << nr);
- spin_unlock_irqrestore(&sparc_bh_lock, flags);
-}
-
-/*
- * These use a mask count to correctly handle
- * nested disable/enable calls
- */
-extern inline void disable_bh(int nr)
-{
- unsigned long flags;
- spin_lock_irqsave(&sparc_bh_lock, flags);
- bh_mask &= ~(1 << nr);
- atomic_inc(&bh_mask_count[nr]);
- spin_unlock_irqrestore(&sparc_bh_lock, flags);
- synchronize_bh();
-}
-
-extern inline void enable_bh(int nr)
-{
- unsigned long flags;
- spin_lock_irqsave(&sparc_bh_lock, flags);
- if (atomic_dec_and_test(&bh_mask_count[nr]))
- bh_mask |= 1 << nr;
- spin_unlock_irqrestore(&sparc_bh_lock, flags);
-}
-
-static inline void start_bh_atomic(void)
-{
- atomic_inc(&global_bh_lock);
- synchronize_bh();
-}
-
-static inline void end_bh_atomic(void)
-{
- atomic_dec(&global_bh_lock);
-}
-
-/* These are for the IRQs testing the lock */
-static inline int softirq_trylock(int cpu)
-{
- if (spin_trylock(&global_bh_count)) {
- if (atomic_read(&global_bh_lock) == 0 &&
- local_bh_count[cpu] == 0) {
- ++local_bh_count[cpu];
- return 1;
- }
- spin_unlock(&global_bh_count);
- }
- return 0;
-}
-
-static inline void softirq_endlock(int cpu)
-{
- local_bh_count[cpu]--;
- spin_unlock(&global_bh_count);
-}
-
#define local_bh_disable() (local_bh_count[smp_processor_id()]++)
#define local_bh_enable() (local_bh_count[smp_processor_id()]--)
+#define in_softirq() (local_bh_count[smp_processor_id()] != 0)
+
#else
extern unsigned int local_bh_count;
-#define clear_active_bhs(x) (bh_active &= ~(x))
-#define mark_bh(nr) (bh_active |= (1 << (nr)))
-
-/* These are for the irq's testing the lock */
-#define softirq_trylock(cpu) (local_bh_count ? 0 : (local_bh_count=1))
-#define softirq_endlock(cpu) (local_bh_count = 0)
-#define synchronize_bh() barrier()
-
#define local_bh_disable() (local_bh_count++)
#define local_bh_enable() (local_bh_count--)
-/*
- * These use a mask count to correctly handle
- * nested disable/enable calls
- */
-extern inline void disable_bh(int nr)
-{
- bh_mask &= ~(1 << nr);
- atomic_inc(&bh_mask_count[nr]);
- synchronize_bh();
-}
-
-extern inline void enable_bh(int nr)
-{
- if (atomic_dec_and_test(&bh_mask_count[nr]))
- bh_mask |= 1 << nr;
-}
-
-extern inline void init_bh(int nr, void (*routine)(void))
-{
- bh_base[nr] = routine;
- atomic_set(&bh_mask_count[nr], 0);
- bh_mask |= 1 << nr;
-}
-
-extern inline void remove_bh(int nr)
-{
- bh_mask &= ~(1 << nr);
- mb();
- bh_base[nr] = NULL;
-}
-
-extern inline void start_bh_atomic(void)
-{
- local_bh_count++;
- barrier();
-}
-
-extern inline void end_bh_atomic(void)
-{
- barrier();
- local_bh_count--;
-}
+#define in_softirq() (local_bh_count != 0)
#endif /* SMP */