on_each_cpu(): kill unused 'retry' parameter
[linux-2.6/libata-dev.git] / arch / alpha / kernel / pci_iommu.c
blob2179c602032a6e5a7a4d55ec59bca02e2a745334
1 /*
2 * linux/arch/alpha/kernel/pci_iommu.c
3 */
5 #include <linux/kernel.h>
6 #include <linux/mm.h>
7 #include <linux/pci.h>
8 #include <linux/slab.h>
9 #include <linux/bootmem.h>
10 #include <linux/scatterlist.h>
11 #include <linux/log2.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/iommu-helper.h>
15 #include <asm/io.h>
16 #include <asm/hwrpb.h>
18 #include "proto.h"
19 #include "pci_impl.h"
22 #define DEBUG_ALLOC 0
23 #if DEBUG_ALLOC > 0
24 # define DBGA(args...) printk(KERN_DEBUG args)
25 #else
26 # define DBGA(args...)
27 #endif
28 #if DEBUG_ALLOC > 1
29 # define DBGA2(args...) printk(KERN_DEBUG args)
30 #else
31 # define DBGA2(args...)
32 #endif
34 #define DEBUG_NODIRECT 0
36 #define ISA_DMA_MASK 0x00ffffff
38 static inline unsigned long
39 mk_iommu_pte(unsigned long paddr)
41 return (paddr >> (PAGE_SHIFT-1)) | 1;
44 static inline long
45 calc_npages(long bytes)
47 return (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
51 /* Return the minimum of MAX or the first power of two larger
52 than main memory. */
54 unsigned long
55 size_for_memory(unsigned long max)
57 unsigned long mem = max_low_pfn << PAGE_SHIFT;
58 if (mem < max)
59 max = roundup_pow_of_two(mem);
60 return max;
63 struct pci_iommu_arena * __init
64 iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
65 unsigned long window_size, unsigned long align)
67 unsigned long mem_size;
68 struct pci_iommu_arena *arena;
70 mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));
72 /* Note that the TLB lookup logic uses bitwise concatenation,
73 not addition, so the required arena alignment is based on
74 the size of the window. Retain the align parameter so that
75 particular systems can over-align the arena. */
76 if (align < mem_size)
77 align = mem_size;
80 #ifdef CONFIG_DISCONTIGMEM
82 arena = alloc_bootmem_node(NODE_DATA(nid), sizeof(*arena));
83 if (!NODE_DATA(nid) || !arena) {
84 printk("%s: couldn't allocate arena from node %d\n"
85 " falling back to system-wide allocation\n",
86 __func__, nid);
87 arena = alloc_bootmem(sizeof(*arena));
90 arena->ptes = __alloc_bootmem_node(NODE_DATA(nid), mem_size, align, 0);
91 if (!NODE_DATA(nid) || !arena->ptes) {
92 printk("%s: couldn't allocate arena ptes from node %d\n"
93 " falling back to system-wide allocation\n",
94 __func__, nid);
95 arena->ptes = __alloc_bootmem(mem_size, align, 0);
98 #else /* CONFIG_DISCONTIGMEM */
100 arena = alloc_bootmem(sizeof(*arena));
101 arena->ptes = __alloc_bootmem(mem_size, align, 0);
103 #endif /* CONFIG_DISCONTIGMEM */
105 spin_lock_init(&arena->lock);
106 arena->hose = hose;
107 arena->dma_base = base;
108 arena->size = window_size;
109 arena->next_entry = 0;
111 /* Align allocations to a multiple of a page size. Not needed
112 unless there are chip bugs. */
113 arena->align_entry = 1;
115 return arena;
118 struct pci_iommu_arena * __init
119 iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
120 unsigned long window_size, unsigned long align)
122 return iommu_arena_new_node(0, hose, base, window_size, align);
125 /* Must be called with the arena lock held */
126 static long
127 iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena,
128 long n, long mask)
130 unsigned long *ptes;
131 long i, p, nent;
132 int pass = 0;
133 unsigned long base;
134 unsigned long boundary_size;
136 base = arena->dma_base >> PAGE_SHIFT;
137 if (dev) {
138 boundary_size = dma_get_seg_boundary(dev) + 1;
139 boundary_size >>= PAGE_SHIFT;
140 } else {
141 boundary_size = 1UL << (32 - PAGE_SHIFT);
144 /* Search forward for the first mask-aligned sequence of N free ptes */
145 ptes = arena->ptes;
146 nent = arena->size >> PAGE_SHIFT;
147 p = ALIGN(arena->next_entry, mask + 1);
148 i = 0;
150 again:
151 while (i < n && p+i < nent) {
152 if (!i && iommu_is_span_boundary(p, n, base, boundary_size)) {
153 p = ALIGN(p + 1, mask + 1);
154 goto again;
157 if (ptes[p+i])
158 p = ALIGN(p + i + 1, mask + 1), i = 0;
159 else
160 i = i + 1;
163 if (i < n) {
164 if (pass < 1) {
166 * Reached the end. Flush the TLB and restart
167 * the search from the beginning.
169 alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
171 pass++;
172 p = 0;
173 i = 0;
174 goto again;
175 } else
176 return -1;
179 /* Success. It's the responsibility of the caller to mark them
180 in use before releasing the lock */
181 return p;
184 static long
185 iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n,
186 unsigned int align)
188 unsigned long flags;
189 unsigned long *ptes;
190 long i, p, mask;
192 spin_lock_irqsave(&arena->lock, flags);
194 /* Search for N empty ptes */
195 ptes = arena->ptes;
196 mask = max(align, arena->align_entry) - 1;
197 p = iommu_arena_find_pages(dev, arena, n, mask);
198 if (p < 0) {
199 spin_unlock_irqrestore(&arena->lock, flags);
200 return -1;
203 /* Success. Mark them all in use, ie not zero and invalid
204 for the iommu tlb that could load them from under us.
205 The chip specific bits will fill this in with something
206 kosher when we return. */
207 for (i = 0; i < n; ++i)
208 ptes[p+i] = IOMMU_INVALID_PTE;
210 arena->next_entry = p + n;
211 spin_unlock_irqrestore(&arena->lock, flags);
213 return p;
216 static void
217 iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
219 unsigned long *p;
220 long i;
222 p = arena->ptes + ofs;
223 for (i = 0; i < n; ++i)
224 p[i] = 0;
227 /* True if the machine supports DAC addressing, and DEV can
228 make use of it given MASK. */
229 static int pci_dac_dma_supported(struct pci_dev *hwdev, u64 mask);
231 /* Map a single buffer of the indicated size for PCI DMA in streaming
232 mode. The 32-bit PCI bus mastering address to use is returned.
233 Once the device is given the dma address, the device owns this memory
234 until either pci_unmap_single or pci_dma_sync_single is performed. */
236 static dma_addr_t
237 pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
238 int dac_allowed)
240 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
241 dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
242 struct pci_iommu_arena *arena;
243 long npages, dma_ofs, i;
244 unsigned long paddr;
245 dma_addr_t ret;
246 unsigned int align = 0;
247 struct device *dev = pdev ? &pdev->dev : NULL;
249 paddr = __pa(cpu_addr);
251 #if !DEBUG_NODIRECT
252 /* First check to see if we can use the direct map window. */
253 if (paddr + size + __direct_map_base - 1 <= max_dma
254 && paddr + size <= __direct_map_size) {
255 ret = paddr + __direct_map_base;
257 DBGA2("pci_map_single: [%p,%lx] -> direct %lx from %p\n",
258 cpu_addr, size, ret, __builtin_return_address(0));
260 return ret;
262 #endif
264 /* Next, use DAC if selected earlier. */
265 if (dac_allowed) {
266 ret = paddr + alpha_mv.pci_dac_offset;
268 DBGA2("pci_map_single: [%p,%lx] -> DAC %lx from %p\n",
269 cpu_addr, size, ret, __builtin_return_address(0));
271 return ret;
274 /* If the machine doesn't define a pci_tbi routine, we have to
275 assume it doesn't support sg mapping, and, since we tried to
276 use direct_map above, it now must be considered an error. */
277 if (! alpha_mv.mv_pci_tbi) {
278 static int been_here = 0; /* Only print the message once. */
279 if (!been_here) {
280 printk(KERN_WARNING "pci_map_single: no HW sg\n");
281 been_here = 1;
283 return 0;
286 arena = hose->sg_pci;
287 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
288 arena = hose->sg_isa;
290 npages = calc_npages((paddr & ~PAGE_MASK) + size);
292 /* Force allocation to 64KB boundary for ISA bridges. */
293 if (pdev && pdev == isa_bridge)
294 align = 8;
295 dma_ofs = iommu_arena_alloc(dev, arena, npages, align);
296 if (dma_ofs < 0) {
297 printk(KERN_WARNING "pci_map_single failed: "
298 "could not allocate dma page tables\n");
299 return 0;
302 paddr &= PAGE_MASK;
303 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
304 arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
306 ret = arena->dma_base + dma_ofs * PAGE_SIZE;
307 ret += (unsigned long)cpu_addr & ~PAGE_MASK;
309 DBGA2("pci_map_single: [%p,%lx] np %ld -> sg %lx from %p\n",
310 cpu_addr, size, npages, ret, __builtin_return_address(0));
312 return ret;
315 dma_addr_t
316 pci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size, int dir)
318 int dac_allowed;
320 if (dir == PCI_DMA_NONE)
321 BUG();
323 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
324 return pci_map_single_1(pdev, cpu_addr, size, dac_allowed);
326 EXPORT_SYMBOL(pci_map_single);
328 dma_addr_t
329 pci_map_page(struct pci_dev *pdev, struct page *page, unsigned long offset,
330 size_t size, int dir)
332 int dac_allowed;
334 if (dir == PCI_DMA_NONE)
335 BUG();
337 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
338 return pci_map_single_1(pdev, (char *)page_address(page) + offset,
339 size, dac_allowed);
341 EXPORT_SYMBOL(pci_map_page);
343 /* Unmap a single streaming mode DMA translation. The DMA_ADDR and
344 SIZE must match what was provided for in a previous pci_map_single
345 call. All other usages are undefined. After this call, reads by
346 the cpu to the buffer are guaranteed to see whatever the device
347 wrote there. */
349 void
350 pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size,
351 int direction)
353 unsigned long flags;
354 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
355 struct pci_iommu_arena *arena;
356 long dma_ofs, npages;
358 if (direction == PCI_DMA_NONE)
359 BUG();
361 if (dma_addr >= __direct_map_base
362 && dma_addr < __direct_map_base + __direct_map_size) {
363 /* Nothing to do. */
365 DBGA2("pci_unmap_single: direct [%lx,%lx] from %p\n",
366 dma_addr, size, __builtin_return_address(0));
368 return;
371 if (dma_addr > 0xffffffff) {
372 DBGA2("pci64_unmap_single: DAC [%lx,%lx] from %p\n",
373 dma_addr, size, __builtin_return_address(0));
374 return;
377 arena = hose->sg_pci;
378 if (!arena || dma_addr < arena->dma_base)
379 arena = hose->sg_isa;
381 dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
382 if (dma_ofs * PAGE_SIZE >= arena->size) {
383 printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %lx "
384 " base %lx size %x\n", dma_addr, arena->dma_base,
385 arena->size);
386 return;
387 BUG();
390 npages = calc_npages((dma_addr & ~PAGE_MASK) + size);
392 spin_lock_irqsave(&arena->lock, flags);
394 iommu_arena_free(arena, dma_ofs, npages);
396 /* If we're freeing ptes above the `next_entry' pointer (they
397 may have snuck back into the TLB since the last wrap flush),
398 we need to flush the TLB before reallocating the latter. */
399 if (dma_ofs >= arena->next_entry)
400 alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);
402 spin_unlock_irqrestore(&arena->lock, flags);
404 DBGA2("pci_unmap_single: sg [%lx,%lx] np %ld from %p\n",
405 dma_addr, size, npages, __builtin_return_address(0));
407 EXPORT_SYMBOL(pci_unmap_single);
409 void
410 pci_unmap_page(struct pci_dev *pdev, dma_addr_t dma_addr,
411 size_t size, int direction)
413 pci_unmap_single(pdev, dma_addr, size, direction);
415 EXPORT_SYMBOL(pci_unmap_page);
417 /* Allocate and map kernel buffer using consistent mode DMA for PCI
418 device. Returns non-NULL cpu-view pointer to the buffer if
419 successful and sets *DMA_ADDRP to the pci side dma address as well,
420 else DMA_ADDRP is undefined. */
422 void *
423 __pci_alloc_consistent(struct pci_dev *pdev, size_t size,
424 dma_addr_t *dma_addrp, gfp_t gfp)
426 void *cpu_addr;
427 long order = get_order(size);
429 gfp &= ~GFP_DMA;
431 try_again:
432 cpu_addr = (void *)__get_free_pages(gfp, order);
433 if (! cpu_addr) {
434 printk(KERN_INFO "pci_alloc_consistent: "
435 "get_free_pages failed from %p\n",
436 __builtin_return_address(0));
437 /* ??? Really atomic allocation? Otherwise we could play
438 with vmalloc and sg if we can't find contiguous memory. */
439 return NULL;
441 memset(cpu_addr, 0, size);
443 *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
444 if (*dma_addrp == 0) {
445 free_pages((unsigned long)cpu_addr, order);
446 if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
447 return NULL;
448 /* The address doesn't fit required mask and we
449 do not have iommu. Try again with GFP_DMA. */
450 gfp |= GFP_DMA;
451 goto try_again;
454 DBGA2("pci_alloc_consistent: %lx -> [%p,%x] from %p\n",
455 size, cpu_addr, *dma_addrp, __builtin_return_address(0));
457 return cpu_addr;
459 EXPORT_SYMBOL(__pci_alloc_consistent);
461 /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
462 be values that were returned from pci_alloc_consistent. SIZE must
463 be the same as what as passed into pci_alloc_consistent.
464 References to the memory and mappings associated with CPU_ADDR or
465 DMA_ADDR past this call are illegal. */
467 void
468 pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr,
469 dma_addr_t dma_addr)
471 pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
472 free_pages((unsigned long)cpu_addr, get_order(size));
474 DBGA2("pci_free_consistent: [%x,%lx] from %p\n",
475 dma_addr, size, __builtin_return_address(0));
477 EXPORT_SYMBOL(pci_free_consistent);
479 /* Classify the elements of the scatterlist. Write dma_address
480 of each element with:
481 0 : Followers all physically adjacent.
482 1 : Followers all virtually adjacent.
483 -1 : Not leader, physically adjacent to previous.
484 -2 : Not leader, virtually adjacent to previous.
485 Write dma_length of each leader with the combined lengths of
486 the mergable followers. */
488 #define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
489 #define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
491 static void
492 sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end,
493 int virt_ok)
495 unsigned long next_paddr;
496 struct scatterlist *leader;
497 long leader_flag, leader_length;
498 unsigned int max_seg_size;
500 leader = sg;
501 leader_flag = 0;
502 leader_length = leader->length;
503 next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
505 /* we will not marge sg without device. */
506 max_seg_size = dev ? dma_get_max_seg_size(dev) : 0;
507 for (++sg; sg < end; ++sg) {
508 unsigned long addr, len;
509 addr = SG_ENT_PHYS_ADDRESS(sg);
510 len = sg->length;
512 if (leader_length + len > max_seg_size)
513 goto new_segment;
515 if (next_paddr == addr) {
516 sg->dma_address = -1;
517 leader_length += len;
518 } else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) {
519 sg->dma_address = -2;
520 leader_flag = 1;
521 leader_length += len;
522 } else {
523 new_segment:
524 leader->dma_address = leader_flag;
525 leader->dma_length = leader_length;
526 leader = sg;
527 leader_flag = 0;
528 leader_length = len;
531 next_paddr = addr + len;
534 leader->dma_address = leader_flag;
535 leader->dma_length = leader_length;
538 /* Given a scatterlist leader, choose an allocation method and fill
539 in the blanks. */
541 static int
542 sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
543 struct scatterlist *out, struct pci_iommu_arena *arena,
544 dma_addr_t max_dma, int dac_allowed)
546 unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader);
547 long size = leader->dma_length;
548 struct scatterlist *sg;
549 unsigned long *ptes;
550 long npages, dma_ofs, i;
552 #if !DEBUG_NODIRECT
553 /* If everything is physically contiguous, and the addresses
554 fall into the direct-map window, use it. */
555 if (leader->dma_address == 0
556 && paddr + size + __direct_map_base - 1 <= max_dma
557 && paddr + size <= __direct_map_size) {
558 out->dma_address = paddr + __direct_map_base;
559 out->dma_length = size;
561 DBGA(" sg_fill: [%p,%lx] -> direct %lx\n",
562 __va(paddr), size, out->dma_address);
564 return 0;
566 #endif
568 /* If physically contiguous and DAC is available, use it. */
569 if (leader->dma_address == 0 && dac_allowed) {
570 out->dma_address = paddr + alpha_mv.pci_dac_offset;
571 out->dma_length = size;
573 DBGA(" sg_fill: [%p,%lx] -> DAC %lx\n",
574 __va(paddr), size, out->dma_address);
576 return 0;
579 /* Otherwise, we'll use the iommu to make the pages virtually
580 contiguous. */
582 paddr &= ~PAGE_MASK;
583 npages = calc_npages(paddr + size);
584 dma_ofs = iommu_arena_alloc(dev, arena, npages, 0);
585 if (dma_ofs < 0) {
586 /* If we attempted a direct map above but failed, die. */
587 if (leader->dma_address == 0)
588 return -1;
590 /* Otherwise, break up the remaining virtually contiguous
591 hunks into individual direct maps and retry. */
592 sg_classify(dev, leader, end, 0);
593 return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed);
596 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
597 out->dma_length = size;
599 DBGA(" sg_fill: [%p,%lx] -> sg %lx np %ld\n",
600 __va(paddr), size, out->dma_address, npages);
602 /* All virtually contiguous. We need to find the length of each
603 physically contiguous subsegment to fill in the ptes. */
604 ptes = &arena->ptes[dma_ofs];
605 sg = leader;
606 do {
607 #if DEBUG_ALLOC > 0
608 struct scatterlist *last_sg = sg;
609 #endif
611 size = sg->length;
612 paddr = SG_ENT_PHYS_ADDRESS(sg);
614 while (sg+1 < end && (int) sg[1].dma_address == -1) {
615 size += sg[1].length;
616 sg++;
619 npages = calc_npages((paddr & ~PAGE_MASK) + size);
621 paddr &= PAGE_MASK;
622 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
623 *ptes++ = mk_iommu_pte(paddr);
625 #if DEBUG_ALLOC > 0
626 DBGA(" (%ld) [%p,%x] np %ld\n",
627 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
628 last_sg->length, npages);
629 while (++last_sg <= sg) {
630 DBGA(" (%ld) [%p,%x] cont\n",
631 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
632 last_sg->length);
634 #endif
635 } while (++sg < end && (int) sg->dma_address < 0);
637 return 1;
641 pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
642 int direction)
644 struct scatterlist *start, *end, *out;
645 struct pci_controller *hose;
646 struct pci_iommu_arena *arena;
647 dma_addr_t max_dma;
648 int dac_allowed;
649 struct device *dev;
651 if (direction == PCI_DMA_NONE)
652 BUG();
654 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
656 dev = pdev ? &pdev->dev : NULL;
658 /* Fast path single entry scatterlists. */
659 if (nents == 1) {
660 sg->dma_length = sg->length;
661 sg->dma_address
662 = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
663 sg->length, dac_allowed);
664 return sg->dma_address != 0;
667 start = sg;
668 end = sg + nents;
670 /* First, prepare information about the entries. */
671 sg_classify(dev, sg, end, alpha_mv.mv_pci_tbi != 0);
673 /* Second, figure out where we're going to map things. */
674 if (alpha_mv.mv_pci_tbi) {
675 hose = pdev ? pdev->sysdata : pci_isa_hose;
676 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
677 arena = hose->sg_pci;
678 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
679 arena = hose->sg_isa;
680 } else {
681 max_dma = -1;
682 arena = NULL;
683 hose = NULL;
686 /* Third, iterate over the scatterlist leaders and allocate
687 dma space as needed. */
688 for (out = sg; sg < end; ++sg) {
689 if ((int) sg->dma_address < 0)
690 continue;
691 if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0)
692 goto error;
693 out++;
696 /* Mark the end of the list for pci_unmap_sg. */
697 if (out < end)
698 out->dma_length = 0;
700 if (out - start == 0)
701 printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
702 DBGA("pci_map_sg: %ld entries\n", out - start);
704 return out - start;
706 error:
707 printk(KERN_WARNING "pci_map_sg failed: "
708 "could not allocate dma page tables\n");
710 /* Some allocation failed while mapping the scatterlist
711 entries. Unmap them now. */
712 if (out > start)
713 pci_unmap_sg(pdev, start, out - start, direction);
714 return 0;
716 EXPORT_SYMBOL(pci_map_sg);
718 /* Unmap a set of streaming mode DMA translations. Again, cpu read
719 rules concerning calls here are the same as for pci_unmap_single()
720 above. */
722 void
723 pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
724 int direction)
726 unsigned long flags;
727 struct pci_controller *hose;
728 struct pci_iommu_arena *arena;
729 struct scatterlist *end;
730 dma_addr_t max_dma;
731 dma_addr_t fbeg, fend;
733 if (direction == PCI_DMA_NONE)
734 BUG();
736 if (! alpha_mv.mv_pci_tbi)
737 return;
739 hose = pdev ? pdev->sysdata : pci_isa_hose;
740 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
741 arena = hose->sg_pci;
742 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
743 arena = hose->sg_isa;
745 fbeg = -1, fend = 0;
747 spin_lock_irqsave(&arena->lock, flags);
749 for (end = sg + nents; sg < end; ++sg) {
750 dma64_addr_t addr;
751 size_t size;
752 long npages, ofs;
753 dma_addr_t tend;
755 addr = sg->dma_address;
756 size = sg->dma_length;
757 if (!size)
758 break;
760 if (addr > 0xffffffff) {
761 /* It's a DAC address -- nothing to do. */
762 DBGA(" (%ld) DAC [%lx,%lx]\n",
763 sg - end + nents, addr, size);
764 continue;
767 if (addr >= __direct_map_base
768 && addr < __direct_map_base + __direct_map_size) {
769 /* Nothing to do. */
770 DBGA(" (%ld) direct [%lx,%lx]\n",
771 sg - end + nents, addr, size);
772 continue;
775 DBGA(" (%ld) sg [%lx,%lx]\n",
776 sg - end + nents, addr, size);
778 npages = calc_npages((addr & ~PAGE_MASK) + size);
779 ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
780 iommu_arena_free(arena, ofs, npages);
782 tend = addr + size - 1;
783 if (fbeg > addr) fbeg = addr;
784 if (fend < tend) fend = tend;
787 /* If we're freeing ptes above the `next_entry' pointer (they
788 may have snuck back into the TLB since the last wrap flush),
789 we need to flush the TLB before reallocating the latter. */
790 if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)
791 alpha_mv.mv_pci_tbi(hose, fbeg, fend);
793 spin_unlock_irqrestore(&arena->lock, flags);
795 DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
797 EXPORT_SYMBOL(pci_unmap_sg);
800 /* Return whether the given PCI device DMA address mask can be
801 supported properly. */
804 pci_dma_supported(struct pci_dev *pdev, u64 mask)
806 struct pci_controller *hose;
807 struct pci_iommu_arena *arena;
809 /* If there exists a direct map, and the mask fits either
810 the entire direct mapped space or the total system memory as
811 shifted by the map base */
812 if (__direct_map_size != 0
813 && (__direct_map_base + __direct_map_size - 1 <= mask ||
814 __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask))
815 return 1;
817 /* Check that we have a scatter-gather arena that fits. */
818 hose = pdev ? pdev->sysdata : pci_isa_hose;
819 arena = hose->sg_isa;
820 if (arena && arena->dma_base + arena->size - 1 <= mask)
821 return 1;
822 arena = hose->sg_pci;
823 if (arena && arena->dma_base + arena->size - 1 <= mask)
824 return 1;
826 /* As last resort try ZONE_DMA. */
827 if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask)
828 return 1;
830 return 0;
832 EXPORT_SYMBOL(pci_dma_supported);
836 * AGP GART extensions to the IOMMU
839 iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
841 unsigned long flags;
842 unsigned long *ptes;
843 long i, p;
845 if (!arena) return -EINVAL;
847 spin_lock_irqsave(&arena->lock, flags);
849 /* Search for N empty ptes. */
850 ptes = arena->ptes;
851 p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask);
852 if (p < 0) {
853 spin_unlock_irqrestore(&arena->lock, flags);
854 return -1;
857 /* Success. Mark them all reserved (ie not zero and invalid)
858 for the iommu tlb that could load them from under us.
859 They will be filled in with valid bits by _bind() */
860 for (i = 0; i < pg_count; ++i)
861 ptes[p+i] = IOMMU_RESERVED_PTE;
863 arena->next_entry = p + pg_count;
864 spin_unlock_irqrestore(&arena->lock, flags);
866 return p;
869 int
870 iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
872 unsigned long *ptes;
873 long i;
875 if (!arena) return -EINVAL;
877 ptes = arena->ptes;
879 /* Make sure they're all reserved first... */
880 for(i = pg_start; i < pg_start + pg_count; i++)
881 if (ptes[i] != IOMMU_RESERVED_PTE)
882 return -EBUSY;
884 iommu_arena_free(arena, pg_start, pg_count);
885 return 0;
889 iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
890 unsigned long *physaddrs)
892 unsigned long flags;
893 unsigned long *ptes;
894 long i, j;
896 if (!arena) return -EINVAL;
898 spin_lock_irqsave(&arena->lock, flags);
900 ptes = arena->ptes;
902 for(j = pg_start; j < pg_start + pg_count; j++) {
903 if (ptes[j] != IOMMU_RESERVED_PTE) {
904 spin_unlock_irqrestore(&arena->lock, flags);
905 return -EBUSY;
909 for(i = 0, j = pg_start; i < pg_count; i++, j++)
910 ptes[j] = mk_iommu_pte(physaddrs[i]);
912 spin_unlock_irqrestore(&arena->lock, flags);
914 return 0;
918 iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
920 unsigned long *p;
921 long i;
923 if (!arena) return -EINVAL;
925 p = arena->ptes + pg_start;
926 for(i = 0; i < pg_count; i++)
927 p[i] = IOMMU_RESERVED_PTE;
929 return 0;
932 /* True if the machine supports DAC addressing, and DEV can
933 make use of it given MASK. */
935 static int
936 pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
938 dma64_addr_t dac_offset = alpha_mv.pci_dac_offset;
939 int ok = 1;
941 /* If this is not set, the machine doesn't support DAC at all. */
942 if (dac_offset == 0)
943 ok = 0;
945 /* The device has to be able to address our DAC bit. */
946 if ((dac_offset & dev->dma_mask) != dac_offset)
947 ok = 0;
949 /* If both conditions above are met, we are fine. */
950 DBGA("pci_dac_dma_supported %s from %p\n",
951 ok ? "yes" : "no", __builtin_return_address(0));
953 return ok;
956 /* Helper for generic DMA-mapping functions. */
958 struct pci_dev *
959 alpha_gendev_to_pci(struct device *dev)
961 if (dev && dev->bus == &pci_bus_type)
962 return to_pci_dev(dev);
964 /* Assume that non-PCI devices asking for DMA are either ISA or EISA,
965 BUG() otherwise. */
966 BUG_ON(!isa_bridge);
968 /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA
969 bridge is bus master then). */
970 if (!dev || !dev->dma_mask || !*dev->dma_mask)
971 return isa_bridge;
973 /* For EISA bus masters, return isa_bridge (it might have smaller
974 dma_mask due to wiring limitations). */
975 if (*dev->dma_mask >= isa_bridge->dma_mask)
976 return isa_bridge;
978 /* This assumes ISA bus master with dma_mask 0xffffff. */
979 return NULL;
981 EXPORT_SYMBOL(alpha_gendev_to_pci);
984 dma_set_mask(struct device *dev, u64 mask)
986 if (!dev->dma_mask ||
987 !pci_dma_supported(alpha_gendev_to_pci(dev), mask))
988 return -EIO;
990 *dev->dma_mask = mask;
992 return 0;
994 EXPORT_SYMBOL(dma_set_mask);