6 #define pcibios_assign_all_busses() 0
8 #define PCIBIOS_MIN_IO 0x8000
9 #define PCIBIOS_MIN_MEM 0x40000000
11 extern inline void pcibios_set_master(struct pci_dev
*dev
)
13 /* No special bus mastering setup handling */
16 extern inline void pcibios_penalize_isa_irq(int irq
)
18 /* We don't do dynamic PCI IRQ allocation */
21 #include <asm/scatterlist.h>
26 /* Allocate and map kernel buffer using consistent mode DMA for a device.
27 * hwdev should be valid struct pci_dev pointer for PCI devices,
28 * NULL for PCI-like buses (ISA, EISA).
29 * Returns non-NULL cpu-view pointer to the buffer if successful and
30 * sets *dma_addrp to the pci side dma address as well, else *dma_addrp
33 extern void *pci_alloc_consistent(struct pci_dev
*hwdev
, size_t size
, dma_addr_t
*handle
);
35 /* Free and unmap a consistent DMA buffer.
36 * cpu_addr is what was returned from pci_alloc_consistent,
37 * size must be the same as what as passed into pci_alloc_consistent,
38 * and likewise dma_addr must be the same as what *dma_addrp was set to.
40 * References to the memory and mappings associated with cpu_addr/dma_addr
41 * past this call are illegal.
44 pci_free_consistent(struct pci_dev
*hwdev
, size_t size
, void *vaddr
,
45 dma_addr_t dma_handle
)
47 consistent_free(vaddr
);
50 /* Map a single buffer of the indicated size for DMA in streaming mode.
51 * The 32-bit bus address to use is returned.
53 * Once the device is given the dma address, the device owns this memory
54 * until either pci_unmap_single or pci_dma_sync_single is performed.
56 extern inline dma_addr_t
57 pci_map_single(struct pci_dev
*hwdev
, void *ptr
, size_t size
, int direction
)
59 consistent_sync(ptr
, size
, direction
);
60 return virt_to_bus(ptr
);
63 /* Unmap a single streaming mode DMA translation. The dma_addr and size
64 * must match what was provided for in a previous pci_map_single call. All
65 * other usages are undefined.
67 * After this call, reads by the cpu to the buffer are guarenteed to see
68 * whatever the device wrote there.
71 pci_unmap_single(struct pci_dev
*hwdev
, dma_addr_t dma_addr
, size_t size
, int direction
)
76 /* Map a set of buffers described by scatterlist in streaming
77 * mode for DMA. This is the scather-gather version of the
78 * above pci_map_single interface. Here the scatter gather list
79 * elements are each tagged with the appropriate dma address
80 * and length. They are obtained via sg_dma_{address,length}(SG).
82 * NOTE: An implementation may be able to use a smaller number of
83 * DMA address/length pairs than there are SG table elements.
84 * (for example via virtual mapping capabilities)
85 * The routine returns the number of addr/length pairs actually
86 * used, at most nents.
88 * Device ownership issues as mentioned above for pci_map_single are
92 pci_map_sg(struct pci_dev
*hwdev
, struct scatterlist
*sg
, int nents
, int direction
)
96 for (i
= 0; i
< nents
; i
++, sg
++)
97 consistent_sync(sg
->address
, sg
->length
, direction
);
102 /* Unmap a set of streaming mode DMA translations.
103 * Again, cpu read rules concerning calls here are the same as for
104 * pci_unmap_single() above.
107 pci_unmap_sg(struct pci_dev
*hwdev
, struct scatterlist
*sg
, int nents
, int direction
)
112 /* Make physical memory consistent for a single
113 * streaming mode DMA translation after a transfer.
115 * If you perform a pci_map_single() but wish to interrogate the
116 * buffer using the cpu, yet do not wish to teardown the PCI dma
117 * mapping, you must call this function before doing so. At the
118 * next point you give the PCI dma address back to the card, the
119 * device again owns the buffer.
122 pci_dma_sync_single(struct pci_dev
*hwdev
, dma_addr_t dma_handle
, size_t size
, int direction
)
124 consistent_sync(bus_to_virt(dma_handle
), size
, direction
);
127 /* Make physical memory consistent for a set of streaming
128 * mode DMA translations after a transfer.
130 * The same as pci_dma_sync_single but for a scatter-gather list,
131 * same rules and usage.
134 pci_dma_sync_sg(struct pci_dev
*hwdev
, struct scatterlist
*sg
, int nelems
, int direction
)
138 for (i
= 0; i
< nelems
; i
++, sg
++)
139 consistent_sync(sg
->address
, sg
->length
, 3);
142 /* Return whether the given PCI device DMA address mask can
143 * be supported properly. For example, if your device can
144 * only drive the low 24-bits during PCI bus mastering, then
145 * you would pass 0x00ffffff as the mask to this function.
147 extern inline int pci_dma_supported(struct pci_dev
*hwdev
, dma_addr_t mask
)
152 /* These macros should be used after a pci_map_sg call has been done
153 * to get bus addresses of each of the SG entries and their lengths.
154 * You should only work with the number of sg entries pci_map_sg
155 * returns, or alternatively stop on the first sg_dma_len(sg) which
158 #define sg_dma_address(sg) (virt_to_bus((sg)->address))
159 #define sg_dma_len(sg) ((sg)->length)
161 #endif /* __KERNEL__ */