summaryrefslogtreecommitdiffstats
path: root/include/asm-arm
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2000-02-24 00:12:35 +0000
committerRalf Baechle <ralf@linux-mips.org>2000-02-24 00:12:35 +0000
commit482368b1a8e45430672c58c9a42e7d2004367126 (patch)
treece2a1a567d4d62dee7c2e71a46a99cf72cf1d606 /include/asm-arm
parente4d0251c6f56ab2e191afb70f80f382793e23f74 (diff)
Merge with 2.3.47. Guys, this is buggy as shit. You've been warned.
Diffstat (limited to 'include/asm-arm')
-rw-r--r--include/asm-arm/cpu-multi32.h5
-rw-r--r--include/asm-arm/cpu-single.h2
-rw-r--r--include/asm-arm/hardirq.h5
-rw-r--r--include/asm-arm/io.h19
-rw-r--r--include/asm-arm/md.h13
-rw-r--r--include/asm-arm/page.h14
-rw-r--r--include/asm-arm/pci.h160
-rw-r--r--include/asm-arm/pgtable.h5
-rw-r--r--include/asm-arm/proc-armo/cache.h1
-rw-r--r--include/asm-arm/proc-armo/semaphore.h170
-rw-r--r--include/asm-arm/proc-armv/cache.h8
-rw-r--r--include/asm-arm/proc-armv/locks.h134
-rw-r--r--include/asm-arm/proc-armv/semaphore.h117
-rw-r--r--include/asm-arm/semaphore.h219
-rw-r--r--include/asm-arm/softirq.h65
-rw-r--r--include/asm-arm/unaligned.h2
16 files changed, 622 insertions, 317 deletions
diff --git a/include/asm-arm/cpu-multi32.h b/include/asm-arm/cpu-multi32.h
index 5b5b07921..500e473ca 100644
--- a/include/asm-arm/cpu-multi32.h
+++ b/include/asm-arm/cpu-multi32.h
@@ -96,6 +96,10 @@ extern struct processor {
* Idle the processor
*/
int (*_do_idle)(void);
+ /*
+ * flush I cache for a page
+ */
+ void (*_flush_icache_page)(unsigned long address);
} processor;
extern const struct processor arm6_processor_functions;
@@ -123,6 +127,7 @@ extern const struct processor sa110_processor_functions;
#define cpu_flush_icache_area(start,end) processor._flush_icache_area(start,end)
#define cpu_cache_wback_area(start,end) processor._cache_wback_area(start,end)
#define cpu_cache_purge_area(start,end) processor._cache_purge_area(start,end)
+#define cpu_flush_icache_page(virt) processor._flush_icache_page(virt)
#define cpu_switch_mm(pgd,tsk) cpu_set_pgd(__virt_to_phys((unsigned long)(pgd)))
diff --git a/include/asm-arm/cpu-single.h b/include/asm-arm/cpu-single.h
index fcfaf7745..0e992a564 100644
--- a/include/asm-arm/cpu-single.h
+++ b/include/asm-arm/cpu-single.h
@@ -35,6 +35,7 @@
#define cpu_flush_icache_area cpu_fn(CPU_NAME,_flush_icache_area)
#define cpu_cache_wback_area cpu_fn(CPU_NAME,_cache_wback_area)
#define cpu_cache_purge_area cpu_fn(CPU_NAME,_cache_purge_area)
+#define cpu_flush_icache_page cpu_fn(CPU_NAME,_flush_icache_page)
#ifndef __ASSEMBLY__
@@ -65,6 +66,7 @@ extern unsigned long cpu_reset(void);
extern void cpu_flush_icache_area(unsigned long start, unsigned long size);
extern void cpu_cache_wback_area(unsigned long start, unsigned long end);
extern void cpu_cache_purge_area(unsigned long start, unsigned long end);
+extern void cpu_flush_icache_page(unsigned long virt);
#define cpu_switch_mm(pgd,tsk) cpu_set_pgd(__virt_to_phys((unsigned long)(pgd)))
diff --git a/include/asm-arm/hardirq.h b/include/asm-arm/hardirq.h
index 79aec2cf6..399ba566d 100644
--- a/include/asm-arm/hardirq.h
+++ b/include/asm-arm/hardirq.h
@@ -9,7 +9,10 @@ extern unsigned int local_irq_count[NR_CPUS];
* Are we in an interrupt context? Either doing bottom half
* or hardware interrupt processing?
*/
-#define in_interrupt() (local_irq_count[smp_processor_id()] + local_bh_count[smp_processor_id()] != 0)
+#define in_interrupt() ({ const int __cpu = smp_processor_id(); \
+ (local_irq_count[__cpu] + local_bh_count[__cpu] != 0); })
+
+#define in_irq() (local_irq_count[smp_processor_id()] != 0)
#ifndef __SMP__
diff --git a/include/asm-arm/io.h b/include/asm-arm/io.h
index 7e2192902..871e50a65 100644
--- a/include/asm-arm/io.h
+++ b/include/asm-arm/io.h
@@ -15,6 +15,7 @@
#ifndef __ASM_ARM_IO_H
#define __ASM_ARM_IO_H
+#include <linux/types.h>
#include <asm/arch/hardware.h>
#include <asm/arch/io.h>
#include <asm/proc/io.h>
@@ -66,21 +67,31 @@ extern __inline__ void *phys_to_virt(unsigned long x)
/*
* ioremap and friends
*/
-extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
+extern void * __ioremap(unsigned long offset, size_t size, unsigned long flags);
extern void __iounmap(void *addr);
#define ioremap(off,sz) __arch_ioremap((off),(sz),0)
#define ioremap_nocache(off,sz) __arch_ioremap((off),(sz),1)
#define iounmap(_addr) __iounmap(_addr)
+/*
+ * DMA-consistent mapping functions. These allocate/free a region of
+ * uncached, unwrite-buffered mapped memory space for use with DMA
+ * devices. This is the "generic" version. The PCI specific version
+ * is in pci.h
+ */
+extern void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle);
+extern void consistent_free(void *vaddr);
+extern void consistent_sync(void *vaddr, size_t size, int rw);
+
extern void __readwrite_bug(const char *fn);
/*
* String version of IO memory access ops:
*/
-extern void _memcpy_fromio(void *, unsigned long, unsigned long);
-extern void _memcpy_toio(unsigned long, const void *, unsigned long);
-extern void _memset_io(unsigned long, int, unsigned long);
+extern void _memcpy_fromio(void *, unsigned long, size_t);
+extern void _memcpy_toio(unsigned long, const void *, size_t);
+extern void _memset_io(unsigned long, int, size_t);
#define __raw_writeb(val,addr) __arch_putb(val,addr)
#define __raw_writew(val,addr) __arch_putw(val,addr)
diff --git a/include/asm-arm/md.h b/include/asm-arm/md.h
deleted file mode 100644
index 0a2c5dd01..000000000
--- a/include/asm-arm/md.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* $Id: md.h,v 1.1 1997/12/15 15:11:57 jj Exp $
- * md.h: High speed xor_block operation for RAID4/5
- *
- */
-
-#ifndef __ASM_MD_H
-#define __ASM_MD_H
-
-/* #define HAVE_ARCH_XORBLOCK */
-
-#define MD_XORBLOCK_ALIGNMENT sizeof(long)
-
-#endif /* __ASM_MD_H */
diff --git a/include/asm-arm/page.h b/include/asm-arm/page.h
index f46fadbb8..e33597ff6 100644
--- a/include/asm-arm/page.h
+++ b/include/asm-arm/page.h
@@ -65,6 +65,20 @@ extern void __bug(const char *file, int line, void *data);
#define BUG() __bug(__FILE__, __LINE__, NULL)
#define PAGE_BUG(page) __bug(__FILE__, __LINE__, page)
+/* Pure 2^n version of get_order */
+extern __inline__ int get_order(unsigned long size)
+{
+ int order;
+
+ size = (size-1) >> (PAGE_SHIFT-1);
+ order = -1;
+ do {
+ size >>= 1;
+ order++;
+ } while (size);
+ return order;
+}
+
#endif /* !__ASSEMBLY__ */
#include <asm/arch/memory.h>
diff --git a/include/asm-arm/pci.h b/include/asm-arm/pci.h
index 5505909d4..e0de271b3 100644
--- a/include/asm-arm/pci.h
+++ b/include/asm-arm/pci.h
@@ -3,5 +3,163 @@
#define pcibios_assign_all_busses() 0
-#endif
+#define PCIBIOS_MIN_IO 0x8000
+#define PCIBIOS_MIN_MEM 0x40000000
+
+#ifdef __KERNEL__
+
+#include <asm/scatterlist.h>
+#include <asm/io.h>
+
+struct pci_dev;
+
+/* Allocate and map kernel buffer using consistent mode DMA for a device.
+ * hwdev should be valid struct pci_dev pointer for PCI devices,
+ * NULL for PCI-like buses (ISA, EISA).
+ * Returns non-NULL cpu-view pointer to the buffer if successful and
+ * sets *dma_addrp to the pci side dma address as well, else *dma_addrp
+ * is undefined.
+ */
+#define pci_alloc_consistent(hwdev,size,handle) \
+ ({ \
+ void *__ret; \
+ int __gfp = GFP_KERNEL; \
+ \
+ if ((hwdev) == NULL || \
+ (hwdev)->dma_mask != 0xffffffff) \
+ __gfp |= GFP_DMA; \
+ \
+ __ret = consistent_alloc(__gfp, (size), \
+ (handle)); \
+ __ret; \
+ })
+
+/* Free and unmap a consistent DMA buffer.
+ * cpu_addr is what was returned from pci_alloc_consistent,
+ * size must be the same as what as passed into pci_alloc_consistent,
+ * and likewise dma_addr must be the same as what *dma_addrp was set to.
+ *
+ * References to the memory and mappings associated with cpu_addr/dma_addr
+ * past this call are illegal.
+ */
+extern inline void
+pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr,
+ dma_addr_t dma_handle)
+{
+ consistent_free(vaddr);
+}
+
+/* Map a single buffer of the indicated size for DMA in streaming mode.
+ * The 32-bit bus address to use is returned.
+ *
+ * Once the device is given the dma address, the device owns this memory
+ * until either pci_unmap_single or pci_dma_sync_single is performed.
+ */
+extern inline dma_addr_t
+pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
+{
+ consistent_sync(ptr, size, 3);
+ return virt_to_bus(ptr);
+}
+
+/* Unmap a single streaming mode DMA translation. The dma_addr and size
+ * must match what was provided for in a previous pci_map_single call. All
+ * other usages are undefined.
+ *
+ * After this call, reads by the cpu to the buffer are guarenteed to see
+ * whatever the device wrote there.
+ */
+extern inline void
+pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction)
+{
+ /* nothing to do */
+}
+
+/* Map a set of buffers described by scatterlist in streaming
+ * mode for DMA. This is the scather-gather version of the
+ * above pci_map_single interface. Here the scatter gather list
+ * elements are each tagged with the appropriate dma address
+ * and length. They are obtained via sg_dma_{address,length}(SG).
+ *
+ * NOTE: An implementation may be able to use a smaller number of
+ * DMA address/length pairs than there are SG table elements.
+ * (for example via virtual mapping capabilities)
+ * The routine returns the number of addr/length pairs actually
+ * used, at most nents.
+ *
+ * Device ownership issues as mentioned above for pci_map_single are
+ * the same here.
+ */
+extern inline int
+pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
+{
+ int i;
+ for (i = 0; i < nents; i++, sg++)
+ consistent_sync(sg->address, sg->length, 3);
+
+ return nents;
+}
+
+/* Unmap a set of streaming mode DMA translations.
+ * Again, cpu read rules concerning calls here are the same as for
+ * pci_unmap_single() above.
+ */
+extern inline void
+pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
+{
+ /* nothing to do */
+}
+
+/* Make physical memory consistent for a single
+ * streaming mode DMA translation after a transfer.
+ *
+ * If you perform a pci_map_single() but wish to interrogate the
+ * buffer using the cpu, yet do not wish to teardown the PCI dma
+ * mapping, you must call this function before doing so. At the
+ * next point you give the PCI dma address back to the card, the
+ * device again owns the buffer.
+ */
+extern inline void
+pci_dma_sync_single(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction)
+{
+ consistent_sync(bus_to_virt(dma_handle), size, 3);
+}
+
+/* Make physical memory consistent for a set of streaming
+ * mode DMA translations after a transfer.
+ *
+ * The same as pci_dma_sync_single but for a scatter-gather list,
+ * same rules and usage.
+ */
+extern inline void
+pci_dma_sync_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction)
+{
+ int i;
+
+ for (i = 0; i < nelems; i++, sg++)
+ consistent_sync(sg->address, sg->length, 3);
+}
+
+/* Return whether the given PCI device DMA address mask can
+ * be supported properly. For example, if your device can
+ * only drive the low 24-bits during PCI bus mastering, then
+ * you would pass 0x00ffffff as the mask to this function.
+ */
+extern inline int pci_dma_supported(struct pci_dev *hwdev, dma_addr_t mask)
+{
+ return 1;
+}
+
+/* These macros should be used after a pci_map_sg call has been done
+ * to get bus addresses of each of the SG entries and their lengths.
+ * You should only work with the number of sg entries pci_map_sg
+ * returns, or alternatively stop on the first sg_dma_len(sg) which
+ * is 0.
+ */
+#define sg_dma_address(sg) (virt_to_bus((sg)->address))
+#define sg_dma_len(sg) ((sg)->length)
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/include/asm-arm/pgtable.h b/include/asm-arm/pgtable.h
index 033541764..418ed812a 100644
--- a/include/asm-arm/pgtable.h
+++ b/include/asm-arm/pgtable.h
@@ -122,9 +122,10 @@ extern __inline__ pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
#define page_pte(page) mk_pte(page, __pgprot(0))
/* to find an entry in a page-table-directory */
-#define __pgd_offset(addr) ((addr) >> PGDIR_SHIFT)
+#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
+#define __pgd_offset(addr) pgd_index(addr)
-#define pgd_offset(mm, addr) ((mm)->pgd+__pgd_offset(addr))
+#define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr))
/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
diff --git a/include/asm-arm/proc-armo/cache.h b/include/asm-arm/proc-armo/cache.h
index cb0aa1975..5459eca07 100644
--- a/include/asm-arm/proc-armo/cache.h
+++ b/include/asm-arm/proc-armo/cache.h
@@ -6,6 +6,7 @@
#define flush_cache_range(mm,start,end) do { } while (0)
#define flush_cache_page(vma,vmaddr) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
+#define flush_icache_page(vma,page) do { } while (0)
#define flush_icache_range(start,end) do { } while (0)
/*
diff --git a/include/asm-arm/proc-armo/semaphore.h b/include/asm-arm/proc-armo/semaphore.h
index 19fa29bf9..6926fad12 100644
--- a/include/asm-arm/proc-armo/semaphore.h
+++ b/include/asm-arm/proc-armo/semaphore.h
@@ -1,109 +1,75 @@
/*
- * linux/include/asm-arm/proc-armo/semaphore.h
+ * linux/include/asm-arm/proc-armo/locks.h
+ *
+ * Copyright (C) 2000 Russell King
+ *
+ * Interrupt safe locking assembler.
*/
-#ifndef __ASM_PROC_SEMAPHORE_H
-#define __ASM_PROC_SEMAPHORE_H
+#ifndef __ASM_PROC_LOCKS_H
+#define __ASM_PROC_LOCKS_H
-/*
- * This is ugly, but we want the default case to fall through.
- * "__down" is the actual routine that waits...
- */
-extern inline void down(struct semaphore * sem)
-{
- __asm__ __volatile__ ("
- @ atomic down operation
- mov r0, pc
- orr lr, r0, #0x08000000
- teqp lr, #0
- ldr lr, [%0]
- and r0, r0, #0x0c000003
- subs lr, lr, #1
- str lr, [%0]
- orrmi r0, r0, #0x80000000 @ set N
- teqp r0, #0
- movmi r0, %0
- blmi " SYMBOL_NAME_STR(__down_failed)
- :
- : "r" (sem)
- : "r0", "lr", "cc");
-}
+#define __down_op(ptr,fail) \
+ ({ \
+ __asm__ __volatile__ ( \
+ "@ atomic down operation\n" \
+" mov r0, pc\n" \
+" orr lr, r0, #0x08000000\n" \
+" teqp lr, #0\n" \
+" ldr lr, [%0]\n" \
+" and r0, r0, #0x0c000003\n" \
+" subs lr, lr, #1\n" \
+" str lr, [%0]\n" \
+" orrmi r0, r0, #0x80000000 @ set N\n" \
+" teqp r0, #0\n" \
+" movmi r0, %0\n" \
+ blmi " SYMBOL_NAME_STR(fail) \
+ : \
+ : "r" (ptr) \
+ : "r0", "lr", "cc"); \
+ })
-/*
- * This is ugly, but we want the default case to fall through.
- * "__down_interruptible" is the actual routine that waits...
- */
-extern inline int down_interruptible (struct semaphore * sem)
-{
- int result;
- __asm__ __volatile__ ("
- @ atomic down operation
- mov r0, pc
- orr lr, r0, #0x08000000
- teqp lr, #0
- ldr lr, [%1]
- and r0, r0, #0x0c000003
- subs lr, lr, #1
- str lr, [%1]
- orrmi r0, r0, #0x80000000 @ set N
- teqp r0, #0
- movmi r0, %1
- movpl r0, #0
- blmi " SYMBOL_NAME_STR(__down_interruptible_failed) "
- mov %0, r0"
- : "=r" (result)
- : "r" (sem)
- : "r0", "lr", "cc");
- return result;
-}
+#define __down_op_ret(ptr,fail) \
+ ({ \
+ unsigned int result; \
+ __asm__ __volatile__ ( \
+" @ down_op_ret\n" \
+" mov r0, pc\n" \
+" orr lr, r0, #0x08000000\n" \
+" teqp lr, #0\n" \
+" ldr lr, [%1]\m" \
+" and r0, r0, #0x0c000003\n" \
+" subs lr, lr, #1\n" \
+" str lr, [%1]\n" \
+" orrmi r0, r0, #0x80000000 @ set N\n" \
+" teqp r0, #0\n" \
+" movmi r0, %1\n" \
+" movpl r0, #0\n" \
+" blmi " SYMBOL_NAME_STR(fail) "\n" \
+" mov %0, r0" \
+ : "=&r" (result) \
+ : "r" (ptr) \
+ : "r0", "lr", "cc"); \
+ result; \
+ })
-extern inline int down_trylock(struct semaphore * sem)
-{
- int result;
- __asm__ __volatile__ ("
- @ atomic down operation
- mov r0, pc
- orr lr, r0, #0x08000000
- teqp lr, #0
- ldr lr, [%1]
- and r0, r0, #0x0c000003
- subs lr, lr, #1
- str lr, [%1]
- orrmi r0, r0, #0x80000000 @ set N
- teqp r0, #0
- movmi r0, %1
- movpl r0, #0
- blmi " SYMBOL_NAME_STR(__down_trylock_failed) "
- mov %0, r0"
- : "=r" (result)
- : "r" (sem)
- : "r0", "lr", "cc");
- return result;
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- * The default case (no contention) will result in NO
- * jumps for both down() and up().
- */
-extern inline void up(struct semaphore * sem)
-{
- __asm__ __volatile__ ("
- @ atomic up operation
- mov r0, pc
- orr lr, r0, #0x08000000
- teqp lr, #0
- ldr lr, [%0]
- and r0, r0, #0x0c000003
- adds lr, lr, #1
- str lr, [%0]
- orrle r0, r0, #0x80000000 @ set N
- teqp r0, #0
- movmi r0, %0
- blmi " SYMBOL_NAME_STR(__up_wakeup)
- :
- : "r" (sem)
- : "r0", "lr", "cc");
-}
+#define __up_op(ptr,wake) \
+ ({ \
+ __asm__ __volatile__ ( \
+ "@ up_op\n" \
+ mov r0, pc\n" \
+ orr lr, r0, #0x08000000\n" \
+ teqp lr, #0\n" \
+ ldr lr, [%0]\n" \
+ and r0, r0, #0x0c000003\n" \
+ adds lr, lr, #1\n" \
+ str lr, [%0]\n" \
+ orrle r0, r0, #0x80000000 @ set N\n" \
+ teqp r0, #0\n" \
+ movmi r0, %0\n" \
+ blmi " SYMBOL_NAME_STR(wake) \
+ : \
+ : "r" (ptr) \
+ : "r0", "lr", "cc"); \
+ })
#endif
diff --git a/include/asm-arm/proc-armv/cache.h b/include/asm-arm/proc-armv/cache.h
index ac136d6ef..b6e45ebc4 100644
--- a/include/asm-arm/proc-armv/cache.h
+++ b/include/asm-arm/proc-armv/cache.h
@@ -1,3 +1,5 @@
+#include <asm/mman.h>
+
/*
* Cache flushing...
*/
@@ -42,6 +44,12 @@
#define flush_icache_range(_start,_end) \
cpu_flush_icache_area((_start), (_end) - (_start))
+#define flush_icache_page(vma,pg) \
+ do { \
+ if ((vma)->vm_flags & PROT_EXEC) \
+ cpu_flush_icache_page(page_address(pg)); \
+ } while (0)
+
/*
* We don't have a MEMC chip...
*/
diff --git a/include/asm-arm/proc-armv/locks.h b/include/asm-arm/proc-armv/locks.h
new file mode 100644
index 000000000..c1cfded3e
--- /dev/null
+++ b/include/asm-arm/proc-armv/locks.h
@@ -0,0 +1,134 @@
+/*
+ * linux/include/asm-arm/proc-armv/locks.h
+ *
+ * Copyright (C) 2000 Russell King
+ *
+ * Interrupt safe locking assembler.
+ */
+#ifndef __ASM_PROC_LOCKS_H
+#define __ASM_PROC_LOCKS_H
+
+#define __down_op(ptr,fail) \
+ ({ \
+ __asm__ __volatile__( \
+ "@ down_op\n" \
+" mrs r0, cpsr\n" \
+" orr lr, r0, #128\n" \
+" msr cpsr_c, lr\n" \
+" ldr lr, [%0]\n" \
+" subs lr, lr, %1\n" \
+" str lr, [%0]\n" \
+" msr cpsr_c, r0\n" \
+" movmi r0, %0\n" \
+" blmi " SYMBOL_NAME_STR(fail) \
+ : \
+ : "r" (ptr), "I" (1) \
+ : "r0", "lr", "cc"); \
+ })
+
+#define __down_op_ret(ptr,fail) \
+ ({ \
+ unsigned int ret; \
+ __asm__ __volatile__( \
+ "@ down_op_ret\n" \
+" mrs r0, cpsr\n" \
+" orr lr, r0, #128\n" \
+" msr cpsr_c, lr\n" \
+" ldr lr, [%1]\n" \
+" subs lr, lr, %2\n" \
+" str lr, [%1]\n" \
+" msr cpsr_c, r0\n" \
+" movmi r0, %1\n" \
+" movpl r0, #0\n" \
+" blmi " SYMBOL_NAME_STR(fail) "\n" \
+" mov %0, r0" \
+ : "=&r" (ret) \
+ : "r" (ptr), "I" (1) \
+ : "r0", "lr", "cc"); \
+ ret; \
+ })
+
+#define __up_op(ptr,wake) \
+ ({ \
+ __asm__ __volatile__( \
+ "@ up_op\n" \
+" mrs r0, cpsr\n" \
+" orr lr, r0, #128\n" \
+" msr cpsr_c, lr\n" \
+" ldr lr, [%0]\n" \
+" adds lr, lr, %1\n" \
+" str lr, [%0]\n" \
+" msr cpsr_c, r0\n" \
+" movle r0, %0\n" \
+" blle " SYMBOL_NAME_STR(wake) \
+ : \
+ : "r" (ptr), "I" (1) \
+ : "r0", "lr", "cc"); \
+ })
+
+/*
+ * The value 0x01000000 supports up to 128 processors and
+ * lots of processes. BIAS must be chosen such that sub'ing
+ * BIAS once per CPU will result in the long remaining
+ * negative.
+ */
+#define RW_LOCK_BIAS 0x01000000
+
+#define __down_op_write(ptr,fail) \
+ ({ \
+ __asm__ __volatile__( \
+ "@ down_op_write\n" \
+" mrs r0, cpsr\n" \
+" orr lr, r0, #128\n" \
+" msr cpsr_c, lr\n" \
+" ldr lr, [%0]\n" \
+" subs lr, lr, %1\n" \
+" str lr, [%0]\n" \
+" msr cpsr_c, r0\n" \
+" movne r0, %0\n" \
+" blne " SYMBOL_NAME_STR(fail) \
+ : \
+ : "r" (ptr), "I" (RW_LOCK_BIAS) \
+ : "r0", "lr", "cc"); \
+ })
+
+#define __up_op_write(ptr,wake) \
+ ({ \
+ __asm__ __volatile__( \
+ "@ up_op_read\n" \
+" mrs r0, cpsr\n" \
+" orr lr, r0, #128\n" \
+" msr cpsr_c, lr\n" \
+" ldr lr, [%0]\n" \
+" adds lr, lr, %1\n" \
+" str lr, [%0]\n" \
+" msr cpsr_c, r0\n" \
+" movcs r0, %0\n" \
+" blcs " SYMBOL_NAME_STR(wake) \
+ : \
+ : "r" (ptr), "I" (RW_LOCK_BIAS) \
+ : "r0", "lr", "cc"); \
+ })
+
+#define __down_op_read(ptr,fail) \
+ __down_op(ptr, fail)
+
+#define __up_op_read(ptr,wake) \
+ ({ \
+ __asm__ __volatile__( \
+ "@ up_op_read\n" \
+" mrs r0, cpsr\n" \
+" orr lr, r0, #128\n" \
+" msr cpsr_c, lr\n" \
+" ldr lr, [%0]\n" \
+" adds lr, lr, %1\n" \
+" str lr, [%0]\n" \
+" msr cpsr_c, r0\n" \
+" moveq r0, %0\n" \
+" bleq " SYMBOL_NAME_STR(wake) \
+ : \
+ : "r" (ptr), "I" (1) \
+ : "r0", "lr", "cc"); \
+ })
+
+#endif
diff --git a/include/asm-arm/proc-armv/semaphore.h b/include/asm-arm/proc-armv/semaphore.h
deleted file mode 100644
index 45ceaa3f1..000000000
--- a/include/asm-arm/proc-armv/semaphore.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * linux/include/asm-arm/semaphore.h
- */
-#ifndef __ASM_PROC_SEMAPHORE_H
-#define __ASM_PROC_SEMAPHORE_H
-
-/*
- * This is ugly, but we want the default case to fall through.
- * "__down" is the actual routine that waits...
- */
-extern inline void down(struct semaphore * sem)
-{
- unsigned int cpsr, temp;
-
- __asm__ __volatile__ ("
- @ atomic down operation
- mrs %0, cpsr
- orr %1, %0, #128 @ disable IRQs
- msr cpsr, %1
- ldr %1, [%2]
- bic %0, %0, #0x80000000 @ clear N
- subs %1, %1, #1
- str %1, [%2]
- orrmi %0, %0, #0x80000000 @ set N
- msr cpsr, %0
- movmi r0, %2
- blmi " SYMBOL_NAME_STR(__down_failed)
- : "=&r" (cpsr), "=&r" (temp)
- : "r" (sem)
- : "r0", "lr", "cc");
-}
-
-/*
- * This is ugly, but we want the default case to fall through.
- * "__down_interruptible" is the actual routine that waits...
- */
-extern inline int down_interruptible (struct semaphore * sem)
-{
- unsigned int cpsr, temp;
-
- __asm__ __volatile__ ("
- @ atomic down interruptible operation
- mrs %0, cpsr
- orr %1, %0, #128 @ disable IRQs
- msr cpsr, %1
- ldr %1, [%2]
- bic %0, %0, #0x80000000 @ clear N
- subs %1, %1, #1
- str %1, [%2]
- orrmi %0, %0, #0x80000000 @ set N
- msr cpsr, %0
- movmi r0, %2
- movpl r0, #0
- blmi " SYMBOL_NAME_STR(__down_interruptible_failed) "
- mov %1, r0"
- : "=&r" (cpsr), "=&r" (temp)
- : "r" (sem)
- : "r0", "lr", "cc");
-
- return temp;
-}
-
-extern inline int down_trylock(struct semaphore *sem)
-{
- unsigned int cpsr, temp;
-
- __asm__ __volatile__ ("
- @ atomic down try lock operation
- mrs %0, cpsr
- orr %1, %0, #128 @ disable IRQs
- msr cpsr, %1
- ldr %1, [%2]
- bic %0, %0, #0x80000000 @ clear N
- subs %1, %1, #1
- str %1, [%2]
- orrmi %0, %0, #0x80000000 @ set N
- msr cpsr, %0
- movmi r0, %2
- movpl r0, #0
- blmi " SYMBOL_NAME_STR(__down_trylock_failed) "
- mov %1, r0"
- : "=&r" (cpsr), "=&r" (temp)
- : "r" (sem)
- : "r0", "lr", "cc");
-
- return temp;
-}
-
-/*
- * Note! This is subtle. We jump to wake people up only if
- * the semaphore was negative (== somebody was waiting on it).
- * The default case (no contention) will result in NO
- * jumps for both down() and up().
- */
-extern inline void up(struct semaphore * sem)
-{
- unsigned int cpsr, temp;
-
- __asm__ __volatile__ ("
- @ atomic up operation
- mrs %0, cpsr
- orr %1, %0, #128 @ disable IRQs
- msr cpsr, %1
- ldr %1, [%2]
- bic %0, %0, #0x80000000 @ clear N
- adds %1, %1, #1
- str %1, [%2]
- orrle %0, %0, #0x80000000 @ set N
- msr cpsr, %0
- movmi r0, %2
- blmi " SYMBOL_NAME_STR(__up_wakeup)
- : "=&r" (cpsr), "=&r" (temp)
- : "r" (sem)
- : "r0", "lr", "cc");
-}
-
-#endif
diff --git a/include/asm-arm/semaphore.h b/include/asm-arm/semaphore.h
index 5ba171be8..0ce171a3f 100644
--- a/include/asm-arm/semaphore.h
+++ b/include/asm-arm/semaphore.h
@@ -9,16 +9,28 @@
#include <linux/wait.h>
#include <asm/atomic.h>
+#include <asm/proc/locks.h>
struct semaphore {
atomic_t count;
int sleepers;
wait_queue_head_t wait;
+#if WAITQUEUE_DEBUG
+ long __magic;
+#endif
};
+#if WAITQUEUE_DEBUG
+# define __SEM_DEBUG_INIT(name) \
+ , (long)&(name).__magic
+#else
+# define __SEM_DEBUG_INIT(name)
+#endif
+
#define __SEMAPHORE_INIT(name,count) \
{ ATOMIC_INIT(count), 0, \
- __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) }
+ __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
+ __SEM_DEBUG_INIT(name) }
#define __MUTEX_INITIALIZER(name) \
__SEMAPHORE_INIT(name,1)
@@ -29,12 +41,15 @@ struct semaphore {
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
-#define sema_init(sem, val) \
-do { \
- atomic_set(&((sem)->count), (val)); \
- (sem)->sleepers = 0; \
- init_waitqueue_head(&(sem)->wait); \
-} while (0)
+extern inline void sema_init(struct semaphore *sem, int val)
+{
+ atomic_set(&sem->count, val);
+ sem->sleepers = 0;
+ init_waitqueue_head(&sem->wait);
+#if WAITQUEUE_DEBUG
+ sem->__magic = (long)&sem->__magic;
+#endif
+}
static inline void init_MUTEX(struct semaphore *sem)
{
@@ -46,18 +61,196 @@ static inline void init_MUTEX_LOCKED(struct semaphore *sem)
sema_init(sem, 0);
}
-asmlinkage void __down_failed (void /* special register calling convention */);
-asmlinkage int __down_interruptible_failed (void /* special register calling convention */);
-asmlinkage int __down_trylock_failed(void /* params in registers */);
-asmlinkage void __up_wakeup (void /* special register calling convention */);
+/*
+ * special register calling convention
+ */
+asmlinkage void __down_failed(void);
+asmlinkage int __down_interruptible_failed(void);
+asmlinkage int __down_trylock_failed(void);
+asmlinkage void __up_wakeup(void);
extern void __down(struct semaphore * sem);
extern int __down_interruptible(struct semaphore * sem);
extern int __down_trylock(struct semaphore * sem);
extern void __up(struct semaphore * sem);
-extern spinlock_t semaphore_wake_lock;
+/*
+ * This is ugly, but we want the default case to fall through.
+ * "__down" is the actual routine that waits...
+ */
+extern inline void down(struct semaphore * sem)
+{
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+
+ __down_op(sem, __down_failed);
+}
+
+/*
+ * This is ugly, but we want the default case to fall through.
+ * "__down_interruptible" is the actual routine that waits...
+ */
+extern inline int down_interruptible (struct semaphore * sem)
+{
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+
+ return __down_op_ret(sem, __down_interruptible_failed);
+}
+
+extern inline int down_trylock(struct semaphore *sem)
+{
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+
+ return __down_op_ret(sem, __down_trylock_failed);
+}
+
+/*
+ * Note! This is subtle. We jump to wake people up only if
+ * the semaphore was negative (== somebody was waiting on it).
+ * The default case (no contention) will result in NO
+ * jumps for both down() and up().
+ */
+extern inline void up(struct semaphore * sem)
+{
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+
+ __up_op(sem, __up_wakeup);
+}
+
+/* rw mutexes (should that be mutices? =) -- throw rw
+ * spinlocks and semaphores together, and this is what we
+ * end up with...
+ *
+ * The lock is initialized to BIAS. This way, a writer
+ * subtracts BIAS ands gets 0 for the case of an uncontended
+ * lock. Readers decrement by 1 and see a positive value
+ * when uncontended, negative if there are writers waiting
+ * (in which case it goes to sleep).
+ *
+ * In terms of fairness, this should result in the lock
+ * flopping back and forth between readers and writers
+ * under heavy use.
+ *
+ * -ben
+ */
+struct rw_semaphore {
+ atomic_t count;
+ volatile unsigned char write_bias_granted;
+ volatile unsigned char read_bias_granted;
+ volatile unsigned char pad1;
+ volatile unsigned char pad2;
+ wait_queue_head_t wait;
+ wait_queue_head_t write_bias_wait;
+#if WAITQUEUE_DEBUG
+ long __magic;
+ atomic_t readers;
+ atomic_t writers;
+#endif
+};
+
+#if WAITQUEUE_DEBUG
+#define __RWSEM_DEBUG_INIT , ATOMIC_INIT(0), ATOMIC_INIT(0)
+#else
+#define __RWSEM_DEBUG_INIT /* */
+#endif
+
+#define __RWSEM_INITIALIZER(name,count) \
+{ ATOMIC_INIT(count), 0, 0, 0, 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
+ __WAIT_QUEUE_HEAD_INITIALIZER((name).write_bias_wait) \
+ __SEM_DEBUG_INIT(name) __RWSEM_DEBUG_INIT }
+
+#define __DECLARE_RWSEM_GENERIC(name,count) \
+ struct rw_semaphore name = __RWSEM_INITIALIZER(name,count)
+
+#define DECLARE_RWSEM(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS)
+#define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS-1)
+#define DECLARE_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,0)
+
+extern inline void init_rwsem(struct rw_semaphore *sem)
+{
+ atomic_set(&sem->count, RW_LOCK_BIAS);
+ sem->read_bias_granted = 0;
+ sem->write_bias_granted = 0;
+ init_waitqueue_head(&sem->wait);
+ init_waitqueue_head(&sem->write_bias_wait);
+#if WAITQUEUE_DEBUG
+ sem->__magic = (long)&sem->__magic;
+ atomic_set(&sem->readers, 0);
+ atomic_set(&sem->writers, 0);
+#endif
+}
+
+extern struct rw_semaphore *__down_read_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *__down_write_failed(struct rw_semaphore *sem);
+extern struct rw_semaphore *__rwsem_wake(struct rw_semaphore *sem);
+
+extern inline void down_read(struct rw_semaphore *sem)
+{
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+ __down_op_read(sem, __down_read_failed);
+#if WAITQUEUE_DEBUG
+ if (sem->write_bias_granted)
+ BUG();
+ if (atomic_read(&sem->writers))
+ BUG();
+ atomic_inc(&sem->readers);
+#endif
+}
+
+extern inline void down_write(struct rw_semaphore *sem)
+{
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+ __down_op_write(sem, __down_write_failed);
+#if WAITQUEUE_DEBUG
+ if (atomic_read(&sem->writers))
+ BUG();
+ if (atomic_read(&sem->readers))
+ BUG();
+ if (sem->read_bias_granted)
+ BUG();
+ if (sem->write_bias_granted)
+ BUG();
+ atomic_inc(&sem->writers);
+#endif
+}
+
+extern inline void up_read(struct rw_semaphore *sem)
+{
+#if WAITQUEUE_DEBUG
+ if (sem->write_bias_granted)
+ BUG();
+ if (atomic_read(&sem->writers))
+ BUG();
+ atomic_dec(&sem->readers);
+#endif
+ __up_op_read(sem, __rwsem_wake);
+}
-#include <asm/proc/semaphore.h>
+extern inline void up_write(struct rw_semaphore *sem)
+{
+#if WAITQUEUE_DEBUG
+ if (sem->read_bias_granted)
+ BUG();
+ if (sem->write_bias_granted)
+ BUG();
+ if (atomic_read(&sem->readers))
+ BUG();
+ if (atomic_read(&sem->writers) != 1)
+ BUG();
+ atomic_dec(&sem->writers);
+#endif
+ __up_op_write(sem, __rwsem_wake);
+}
#endif
diff --git a/include/asm-arm/softirq.h b/include/asm-arm/softirq.h
index 28ac2eb2a..f98754813 100644
--- a/include/asm-arm/softirq.h
+++ b/include/asm-arm/softirq.h
@@ -9,72 +9,9 @@ extern unsigned int local_bh_count[NR_CPUS];
#define cpu_bh_disable(cpu) do { local_bh_count[(cpu)]++; barrier(); } while (0)
#define cpu_bh_enable(cpu) do { barrier(); local_bh_count[(cpu)]--; } while (0)
-#define cpu_bh_trylock(cpu) (local_bh_count[(cpu)] ? 0 : (local_bh_count[(cpu)] = 1))
-#define cpu_bh_endlock(cpu) (local_bh_count[(cpu)] = 0)
-
#define local_bh_disable() cpu_bh_disable(smp_processor_id())
#define local_bh_enable() cpu_bh_enable(smp_processor_id())
-#define get_active_bhs() (bh_mask & bh_active)
-#define clear_active_bhs(x) atomic_clear_mask((x),&bh_active)
-
-extern inline void init_bh(int nr, void (*routine)(void))
-{
- bh_base[nr] = routine;
- atomic_set(&bh_mask_count[nr], 0);
- bh_mask |= 1 << nr;
-}
-
-extern inline void remove_bh(int nr)
-{
- bh_mask &= ~(1 << nr);
- mb();
- bh_base[nr] = NULL;
-}
-
-extern inline void mark_bh(int nr)
-{
- set_bit(nr, &bh_active);
-}
-
-#ifdef __SMP__
-#error SMP not supported
-#else
-
-extern inline void start_bh_atomic(void)
-{
- local_bh_disable();
- barrier();
-}
-
-extern inline void end_bh_atomic(void)
-{
- barrier();
- local_bh_enable();
-}
-
-/* These are for the irq's testing the lock */
-#define softirq_trylock(cpu) (cpu_bh_trylock(cpu))
-#define softirq_endlock(cpu) (cpu_bh_endlock(cpu))
-#define synchronize_bh() barrier()
-
-#endif /* SMP */
-
-/*
- * These use a mask count to correctly handle
- * nested disable/enable calls
- */
-extern inline void disable_bh(int nr)
-{
- bh_mask &= ~(1 << nr);
- atomic_inc(&bh_mask_count[nr]);
- synchronize_bh();
-}
-
-extern inline void enable_bh(int nr)
-{
- if (atomic_dec_and_test(&bh_mask_count[nr]))
- bh_mask |= 1 << nr;
-}
+#define in_softirq() (local_bh_count[smp_processor_id()] != 0)
#endif /* __ASM_SOFTIRQ_H */
diff --git a/include/asm-arm/unaligned.h b/include/asm-arm/unaligned.h
index 277d778a1..3e4912a5c 100644
--- a/include/asm-arm/unaligned.h
+++ b/include/asm-arm/unaligned.h
@@ -1,6 +1,8 @@
#ifndef __ASM_ARM_UNALIGNED_H
#define __ASM_ARM_UNALIGNED_H
+#include <linux/types.h>
+
#define get_unaligned(ptr) \
((__typeof__(*(ptr)))__get_unaligned_size((ptr), sizeof(*(ptr))))