diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2000-02-24 00:12:35 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2000-02-24 00:12:35 +0000 |
commit | 482368b1a8e45430672c58c9a42e7d2004367126 (patch) | |
tree | ce2a1a567d4d62dee7c2e71a46a99cf72cf1d606 /include/asm-arm/pci.h | |
parent | e4d0251c6f56ab2e191afb70f80f382793e23f74 (diff) |
Merge with 2.3.47. Guys, this is buggy as shit. You've been warned.
Diffstat (limited to 'include/asm-arm/pci.h')
-rw-r--r-- | include/asm-arm/pci.h | 160 |
1 files changed, 159 insertions, 1 deletions
diff --git a/include/asm-arm/pci.h b/include/asm-arm/pci.h index 5505909d4..e0de271b3 100644 --- a/include/asm-arm/pci.h +++ b/include/asm-arm/pci.h @@ -3,5 +3,163 @@ #define pcibios_assign_all_busses() 0 -#endif +#define PCIBIOS_MIN_IO 0x8000 +#define PCIBIOS_MIN_MEM 0x40000000 + +#ifdef __KERNEL__ + +#include <asm/scatterlist.h> +#include <asm/io.h> + +struct pci_dev; + +/* Allocate and map kernel buffer using consistent mode DMA for a device. + * hwdev should be valid struct pci_dev pointer for PCI devices, + * NULL for PCI-like buses (ISA, EISA). + * Returns non-NULL cpu-view pointer to the buffer if successful and + * sets *dma_addrp to the pci side dma address as well, else *dma_addrp + * is undefined. + */ +#define pci_alloc_consistent(hwdev,size,handle) \ + ({ \ + void *__ret; \ + int __gfp = GFP_KERNEL; \ + \ + if ((hwdev) == NULL || \ + (hwdev)->dma_mask != 0xffffffff) \ + __gfp |= GFP_DMA; \ + \ + __ret = consistent_alloc(__gfp, (size), \ + (handle)); \ + __ret; \ + }) + +/* Free and unmap a consistent DMA buffer. + * cpu_addr is what was returned from pci_alloc_consistent, + * size must be the same as what as passed into pci_alloc_consistent, + * and likewise dma_addr must be the same as what *dma_addrp was set to. + * + * References to the memory and mappings associated with cpu_addr/dma_addr + * past this call are illegal. + */ +extern inline void +pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, + dma_addr_t dma_handle) +{ + consistent_free(vaddr); +} + +/* Map a single buffer of the indicated size for DMA in streaming mode. + * The 32-bit bus address to use is returned. + * + * Once the device is given the dma address, the device owns this memory + * until either pci_unmap_single or pci_dma_sync_single is performed. + */ +extern inline dma_addr_t +pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction) +{ + consistent_sync(ptr, size, 3); + return virt_to_bus(ptr); +} + +/* Unmap a single streaming mode DMA translation. The dma_addr and size + * must match what was provided for in a previous pci_map_single call. All + * other usages are undefined. + * + * After this call, reads by the cpu to the buffer are guarenteed to see + * whatever the device wrote there. + */ +extern inline void +pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction) +{ + /* nothing to do */ +} + +/* Map a set of buffers described by scatterlist in streaming + * mode for DMA. This is the scather-gather version of the + * above pci_map_single interface. Here the scatter gather list + * elements are each tagged with the appropriate dma address + * and length. They are obtained via sg_dma_{address,length}(SG). + * + * NOTE: An implementation may be able to use a smaller number of + * DMA address/length pairs than there are SG table elements. + * (for example via virtual mapping capabilities) + * The routine returns the number of addr/length pairs actually + * used, at most nents. + * + * Device ownership issues as mentioned above for pci_map_single are + * the same here. + */ +extern inline int +pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction) +{ + int i; + for (i = 0; i < nents; i++, sg++) + consistent_sync(sg->address, sg->length, 3); + + return nents; +} + +/* Unmap a set of streaming mode DMA translations. + * Again, cpu read rules concerning calls here are the same as for + * pci_unmap_single() above. + */ +extern inline void +pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction) +{ + /* nothing to do */ +} + +/* Make physical memory consistent for a single + * streaming mode DMA translation after a transfer. + * + * If you perform a pci_map_single() but wish to interrogate the + * buffer using the cpu, yet do not wish to teardown the PCI dma + * mapping, you must call this function before doing so. At the + * next point you give the PCI dma address back to the card, the + * device again owns the buffer. + */ +extern inline void +pci_dma_sync_single(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction) +{ + consistent_sync(bus_to_virt(dma_handle), size, 3); +} + +/* Make physical memory consistent for a set of streaming + * mode DMA translations after a transfer. + * + * The same as pci_dma_sync_single but for a scatter-gather list, + * same rules and usage. + */ +extern inline void +pci_dma_sync_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction) +{ + int i; + + for (i = 0; i < nelems; i++, sg++) + consistent_sync(sg->address, sg->length, 3); +} + +/* Return whether the given PCI device DMA address mask can + * be supported properly. For example, if your device can + * only drive the low 24-bits during PCI bus mastering, then + * you would pass 0x00ffffff as the mask to this function. + */ +extern inline int pci_dma_supported(struct pci_dev *hwdev, dma_addr_t mask) +{ + return 1; +} + +/* These macros should be used after a pci_map_sg call has been done + * to get bus addresses of each of the SG entries and their lengths. + * You should only work with the number of sg entries pci_map_sg + * returns, or alternatively stop on the first sg_dma_len(sg) which + * is 0. + */ +#define sg_dma_address(sg) (virt_to_bus((sg)->address)) +#define sg_dma_len(sg) ((sg)->length) + +#endif /* __KERNEL__ */ + +#endif |