[ALSA] cmipci: remove invalid channels constraint
[firewire-audio.git] / arch / alpha / kernel / pci_iommu.c
blobe1c470752ebc0db7e146c2d544da50496f3076af
1 /*
2 * linux/arch/alpha/kernel/pci_iommu.c
3 */
5 #include <linux/kernel.h>
6 #include <linux/mm.h>
7 #include <linux/pci.h>
8 #include <linux/slab.h>
9 #include <linux/bootmem.h>
10 #include <linux/log2.h>
12 #include <asm/io.h>
13 #include <asm/hwrpb.h>
15 #include "proto.h"
16 #include "pci_impl.h"
19 #define DEBUG_ALLOC 0
20 #if DEBUG_ALLOC > 0
21 # define DBGA(args...) printk(KERN_DEBUG args)
22 #else
23 # define DBGA(args...)
24 #endif
25 #if DEBUG_ALLOC > 1
26 # define DBGA2(args...) printk(KERN_DEBUG args)
27 #else
28 # define DBGA2(args...)
29 #endif
31 #define DEBUG_NODIRECT 0
32 #define DEBUG_FORCEDAC 0
34 #define ISA_DMA_MASK 0x00ffffff
36 static inline unsigned long
37 mk_iommu_pte(unsigned long paddr)
39 return (paddr >> (PAGE_SHIFT-1)) | 1;
42 static inline long
43 calc_npages(long bytes)
45 return (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
49 /* Return the minimum of MAX or the first power of two larger
50 than main memory. */
52 unsigned long
53 size_for_memory(unsigned long max)
55 unsigned long mem = max_low_pfn << PAGE_SHIFT;
56 if (mem < max)
57 max = roundup_pow_of_two(mem);
58 return max;
61 struct pci_iommu_arena * __init
62 iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
63 unsigned long window_size, unsigned long align)
65 unsigned long mem_size;
66 struct pci_iommu_arena *arena;
68 mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));
70 /* Note that the TLB lookup logic uses bitwise concatenation,
71 not addition, so the required arena alignment is based on
72 the size of the window. Retain the align parameter so that
73 particular systems can over-align the arena. */
74 if (align < mem_size)
75 align = mem_size;
78 #ifdef CONFIG_DISCONTIGMEM
80 if (!NODE_DATA(nid) ||
81 (NULL == (arena = alloc_bootmem_node(NODE_DATA(nid),
82 sizeof(*arena))))) {
83 printk("%s: couldn't allocate arena from node %d\n"
84 " falling back to system-wide allocation\n",
85 __FUNCTION__, nid);
86 arena = alloc_bootmem(sizeof(*arena));
89 if (!NODE_DATA(nid) ||
90 (NULL == (arena->ptes = __alloc_bootmem_node(NODE_DATA(nid),
91 mem_size,
92 align,
93 0)))) {
94 printk("%s: couldn't allocate arena ptes from node %d\n"
95 " falling back to system-wide allocation\n",
96 __FUNCTION__, nid);
97 arena->ptes = __alloc_bootmem(mem_size, align, 0);
100 #else /* CONFIG_DISCONTIGMEM */
102 arena = alloc_bootmem(sizeof(*arena));
103 arena->ptes = __alloc_bootmem(mem_size, align, 0);
105 #endif /* CONFIG_DISCONTIGMEM */
107 spin_lock_init(&arena->lock);
108 arena->hose = hose;
109 arena->dma_base = base;
110 arena->size = window_size;
111 arena->next_entry = 0;
113 /* Align allocations to a multiple of a page size. Not needed
114 unless there are chip bugs. */
115 arena->align_entry = 1;
117 return arena;
120 struct pci_iommu_arena * __init
121 iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
122 unsigned long window_size, unsigned long align)
124 return iommu_arena_new_node(0, hose, base, window_size, align);
127 /* Must be called with the arena lock held */
128 static long
129 iommu_arena_find_pages(struct pci_iommu_arena *arena, long n, long mask)
131 unsigned long *ptes;
132 long i, p, nent;
134 /* Search forward for the first mask-aligned sequence of N free ptes */
135 ptes = arena->ptes;
136 nent = arena->size >> PAGE_SHIFT;
137 p = (arena->next_entry + mask) & ~mask;
138 i = 0;
139 while (i < n && p+i < nent) {
140 if (ptes[p+i])
141 p = (p + i + 1 + mask) & ~mask, i = 0;
142 else
143 i = i + 1;
146 if (i < n) {
147 /* Reached the end. Flush the TLB and restart the
148 search from the beginning. */
149 alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
151 p = 0, i = 0;
152 while (i < n && p+i < nent) {
153 if (ptes[p+i])
154 p = (p + i + 1 + mask) & ~mask, i = 0;
155 else
156 i = i + 1;
159 if (i < n)
160 return -1;
163 /* Success. It's the responsibility of the caller to mark them
164 in use before releasing the lock */
165 return p;
168 static long
169 iommu_arena_alloc(struct pci_iommu_arena *arena, long n, unsigned int align)
171 unsigned long flags;
172 unsigned long *ptes;
173 long i, p, mask;
175 spin_lock_irqsave(&arena->lock, flags);
177 /* Search for N empty ptes */
178 ptes = arena->ptes;
179 mask = max(align, arena->align_entry) - 1;
180 p = iommu_arena_find_pages(arena, n, mask);
181 if (p < 0) {
182 spin_unlock_irqrestore(&arena->lock, flags);
183 return -1;
186 /* Success. Mark them all in use, ie not zero and invalid
187 for the iommu tlb that could load them from under us.
188 The chip specific bits will fill this in with something
189 kosher when we return. */
190 for (i = 0; i < n; ++i)
191 ptes[p+i] = IOMMU_INVALID_PTE;
193 arena->next_entry = p + n;
194 spin_unlock_irqrestore(&arena->lock, flags);
196 return p;
199 static void
200 iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
202 unsigned long *p;
203 long i;
205 p = arena->ptes + ofs;
206 for (i = 0; i < n; ++i)
207 p[i] = 0;
210 /* True if the machine supports DAC addressing, and DEV can
211 make use of it given MASK. */
212 static int pci_dac_dma_supported(struct pci_dev *hwdev, u64 mask);
214 /* Map a single buffer of the indicated size for PCI DMA in streaming
215 mode. The 32-bit PCI bus mastering address to use is returned.
216 Once the device is given the dma address, the device owns this memory
217 until either pci_unmap_single or pci_dma_sync_single is performed. */
219 static dma_addr_t
220 pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
221 int dac_allowed)
223 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
224 dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
225 struct pci_iommu_arena *arena;
226 long npages, dma_ofs, i;
227 unsigned long paddr;
228 dma_addr_t ret;
229 unsigned int align = 0;
231 paddr = __pa(cpu_addr);
233 #if !DEBUG_NODIRECT
234 /* First check to see if we can use the direct map window. */
235 if (paddr + size + __direct_map_base - 1 <= max_dma
236 && paddr + size <= __direct_map_size) {
237 ret = paddr + __direct_map_base;
239 DBGA2("pci_map_single: [%p,%lx] -> direct %lx from %p\n",
240 cpu_addr, size, ret, __builtin_return_address(0));
242 return ret;
244 #endif
246 /* Next, use DAC if selected earlier. */
247 if (dac_allowed) {
248 ret = paddr + alpha_mv.pci_dac_offset;
250 DBGA2("pci_map_single: [%p,%lx] -> DAC %lx from %p\n",
251 cpu_addr, size, ret, __builtin_return_address(0));
253 return ret;
256 /* If the machine doesn't define a pci_tbi routine, we have to
257 assume it doesn't support sg mapping, and, since we tried to
258 use direct_map above, it now must be considered an error. */
259 if (! alpha_mv.mv_pci_tbi) {
260 static int been_here = 0; /* Only print the message once. */
261 if (!been_here) {
262 printk(KERN_WARNING "pci_map_single: no HW sg\n");
263 been_here = 1;
265 return 0;
268 arena = hose->sg_pci;
269 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
270 arena = hose->sg_isa;
272 npages = calc_npages((paddr & ~PAGE_MASK) + size);
274 /* Force allocation to 64KB boundary for ISA bridges. */
275 if (pdev && pdev == isa_bridge)
276 align = 8;
277 dma_ofs = iommu_arena_alloc(arena, npages, align);
278 if (dma_ofs < 0) {
279 printk(KERN_WARNING "pci_map_single failed: "
280 "could not allocate dma page tables\n");
281 return 0;
284 paddr &= PAGE_MASK;
285 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
286 arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
288 ret = arena->dma_base + dma_ofs * PAGE_SIZE;
289 ret += (unsigned long)cpu_addr & ~PAGE_MASK;
291 DBGA2("pci_map_single: [%p,%lx] np %ld -> sg %lx from %p\n",
292 cpu_addr, size, npages, ret, __builtin_return_address(0));
294 return ret;
297 dma_addr_t
298 pci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size, int dir)
300 int dac_allowed;
302 if (dir == PCI_DMA_NONE)
303 BUG();
305 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
306 return pci_map_single_1(pdev, cpu_addr, size, dac_allowed);
308 EXPORT_SYMBOL(pci_map_single);
310 dma_addr_t
311 pci_map_page(struct pci_dev *pdev, struct page *page, unsigned long offset,
312 size_t size, int dir)
314 int dac_allowed;
316 if (dir == PCI_DMA_NONE)
317 BUG();
319 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
320 return pci_map_single_1(pdev, (char *)page_address(page) + offset,
321 size, dac_allowed);
323 EXPORT_SYMBOL(pci_map_page);
325 /* Unmap a single streaming mode DMA translation. The DMA_ADDR and
326 SIZE must match what was provided for in a previous pci_map_single
327 call. All other usages are undefined. After this call, reads by
328 the cpu to the buffer are guaranteed to see whatever the device
329 wrote there. */
331 void
332 pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size,
333 int direction)
335 unsigned long flags;
336 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
337 struct pci_iommu_arena *arena;
338 long dma_ofs, npages;
340 if (direction == PCI_DMA_NONE)
341 BUG();
343 if (dma_addr >= __direct_map_base
344 && dma_addr < __direct_map_base + __direct_map_size) {
345 /* Nothing to do. */
347 DBGA2("pci_unmap_single: direct [%lx,%lx] from %p\n",
348 dma_addr, size, __builtin_return_address(0));
350 return;
353 if (dma_addr > 0xffffffff) {
354 DBGA2("pci64_unmap_single: DAC [%lx,%lx] from %p\n",
355 dma_addr, size, __builtin_return_address(0));
356 return;
359 arena = hose->sg_pci;
360 if (!arena || dma_addr < arena->dma_base)
361 arena = hose->sg_isa;
363 dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
364 if (dma_ofs * PAGE_SIZE >= arena->size) {
365 printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %lx "
366 " base %lx size %x\n", dma_addr, arena->dma_base,
367 arena->size);
368 return;
369 BUG();
372 npages = calc_npages((dma_addr & ~PAGE_MASK) + size);
374 spin_lock_irqsave(&arena->lock, flags);
376 iommu_arena_free(arena, dma_ofs, npages);
378 /* If we're freeing ptes above the `next_entry' pointer (they
379 may have snuck back into the TLB since the last wrap flush),
380 we need to flush the TLB before reallocating the latter. */
381 if (dma_ofs >= arena->next_entry)
382 alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);
384 spin_unlock_irqrestore(&arena->lock, flags);
386 DBGA2("pci_unmap_single: sg [%lx,%lx] np %ld from %p\n",
387 dma_addr, size, npages, __builtin_return_address(0));
389 EXPORT_SYMBOL(pci_unmap_single);
391 void
392 pci_unmap_page(struct pci_dev *pdev, dma_addr_t dma_addr,
393 size_t size, int direction)
395 pci_unmap_single(pdev, dma_addr, size, direction);
397 EXPORT_SYMBOL(pci_unmap_page);
399 /* Allocate and map kernel buffer using consistent mode DMA for PCI
400 device. Returns non-NULL cpu-view pointer to the buffer if
401 successful and sets *DMA_ADDRP to the pci side dma address as well,
402 else DMA_ADDRP is undefined. */
404 void *
405 pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
407 void *cpu_addr;
408 long order = get_order(size);
409 gfp_t gfp = GFP_ATOMIC;
411 try_again:
412 cpu_addr = (void *)__get_free_pages(gfp, order);
413 if (! cpu_addr) {
414 printk(KERN_INFO "pci_alloc_consistent: "
415 "get_free_pages failed from %p\n",
416 __builtin_return_address(0));
417 /* ??? Really atomic allocation? Otherwise we could play
418 with vmalloc and sg if we can't find contiguous memory. */
419 return NULL;
421 memset(cpu_addr, 0, size);
423 *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
424 if (*dma_addrp == 0) {
425 free_pages((unsigned long)cpu_addr, order);
426 if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
427 return NULL;
428 /* The address doesn't fit required mask and we
429 do not have iommu. Try again with GFP_DMA. */
430 gfp |= GFP_DMA;
431 goto try_again;
434 DBGA2("pci_alloc_consistent: %lx -> [%p,%x] from %p\n",
435 size, cpu_addr, *dma_addrp, __builtin_return_address(0));
437 return cpu_addr;
439 EXPORT_SYMBOL(pci_alloc_consistent);
441 /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
442 be values that were returned from pci_alloc_consistent. SIZE must
443 be the same as what as passed into pci_alloc_consistent.
444 References to the memory and mappings associated with CPU_ADDR or
445 DMA_ADDR past this call are illegal. */
447 void
448 pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr,
449 dma_addr_t dma_addr)
451 pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
452 free_pages((unsigned long)cpu_addr, get_order(size));
454 DBGA2("pci_free_consistent: [%x,%lx] from %p\n",
455 dma_addr, size, __builtin_return_address(0));
457 EXPORT_SYMBOL(pci_free_consistent);
459 /* Classify the elements of the scatterlist. Write dma_address
460 of each element with:
461 0 : Followers all physically adjacent.
462 1 : Followers all virtually adjacent.
463 -1 : Not leader, physically adjacent to previous.
464 -2 : Not leader, virtually adjacent to previous.
465 Write dma_length of each leader with the combined lengths of
466 the mergable followers. */
468 #define SG_ENT_VIRT_ADDRESS(SG) (page_address((SG)->page) + (SG)->offset)
469 #define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
471 static void
472 sg_classify(struct scatterlist *sg, struct scatterlist *end, int virt_ok)
474 unsigned long next_paddr;
475 struct scatterlist *leader;
476 long leader_flag, leader_length;
478 leader = sg;
479 leader_flag = 0;
480 leader_length = leader->length;
481 next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
483 for (++sg; sg < end; ++sg) {
484 unsigned long addr, len;
485 addr = SG_ENT_PHYS_ADDRESS(sg);
486 len = sg->length;
488 if (next_paddr == addr) {
489 sg->dma_address = -1;
490 leader_length += len;
491 } else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) {
492 sg->dma_address = -2;
493 leader_flag = 1;
494 leader_length += len;
495 } else {
496 leader->dma_address = leader_flag;
497 leader->dma_length = leader_length;
498 leader = sg;
499 leader_flag = 0;
500 leader_length = len;
503 next_paddr = addr + len;
506 leader->dma_address = leader_flag;
507 leader->dma_length = leader_length;
510 /* Given a scatterlist leader, choose an allocation method and fill
511 in the blanks. */
513 static int
514 sg_fill(struct scatterlist *leader, struct scatterlist *end,
515 struct scatterlist *out, struct pci_iommu_arena *arena,
516 dma_addr_t max_dma, int dac_allowed)
518 unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader);
519 long size = leader->dma_length;
520 struct scatterlist *sg;
521 unsigned long *ptes;
522 long npages, dma_ofs, i;
524 #if !DEBUG_NODIRECT
525 /* If everything is physically contiguous, and the addresses
526 fall into the direct-map window, use it. */
527 if (leader->dma_address == 0
528 && paddr + size + __direct_map_base - 1 <= max_dma
529 && paddr + size <= __direct_map_size) {
530 out->dma_address = paddr + __direct_map_base;
531 out->dma_length = size;
533 DBGA(" sg_fill: [%p,%lx] -> direct %lx\n",
534 __va(paddr), size, out->dma_address);
536 return 0;
538 #endif
540 /* If physically contiguous and DAC is available, use it. */
541 if (leader->dma_address == 0 && dac_allowed) {
542 out->dma_address = paddr + alpha_mv.pci_dac_offset;
543 out->dma_length = size;
545 DBGA(" sg_fill: [%p,%lx] -> DAC %lx\n",
546 __va(paddr), size, out->dma_address);
548 return 0;
551 /* Otherwise, we'll use the iommu to make the pages virtually
552 contiguous. */
554 paddr &= ~PAGE_MASK;
555 npages = calc_npages(paddr + size);
556 dma_ofs = iommu_arena_alloc(arena, npages, 0);
557 if (dma_ofs < 0) {
558 /* If we attempted a direct map above but failed, die. */
559 if (leader->dma_address == 0)
560 return -1;
562 /* Otherwise, break up the remaining virtually contiguous
563 hunks into individual direct maps and retry. */
564 sg_classify(leader, end, 0);
565 return sg_fill(leader, end, out, arena, max_dma, dac_allowed);
568 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
569 out->dma_length = size;
571 DBGA(" sg_fill: [%p,%lx] -> sg %lx np %ld\n",
572 __va(paddr), size, out->dma_address, npages);
574 /* All virtually contiguous. We need to find the length of each
575 physically contiguous subsegment to fill in the ptes. */
576 ptes = &arena->ptes[dma_ofs];
577 sg = leader;
578 do {
579 #if DEBUG_ALLOC > 0
580 struct scatterlist *last_sg = sg;
581 #endif
583 size = sg->length;
584 paddr = SG_ENT_PHYS_ADDRESS(sg);
586 while (sg+1 < end && (int) sg[1].dma_address == -1) {
587 size += sg[1].length;
588 sg++;
591 npages = calc_npages((paddr & ~PAGE_MASK) + size);
593 paddr &= PAGE_MASK;
594 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
595 *ptes++ = mk_iommu_pte(paddr);
597 #if DEBUG_ALLOC > 0
598 DBGA(" (%ld) [%p,%x] np %ld\n",
599 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
600 last_sg->length, npages);
601 while (++last_sg <= sg) {
602 DBGA(" (%ld) [%p,%x] cont\n",
603 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
604 last_sg->length);
606 #endif
607 } while (++sg < end && (int) sg->dma_address < 0);
609 return 1;
613 pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
614 int direction)
616 struct scatterlist *start, *end, *out;
617 struct pci_controller *hose;
618 struct pci_iommu_arena *arena;
619 dma_addr_t max_dma;
620 int dac_allowed;
622 if (direction == PCI_DMA_NONE)
623 BUG();
625 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
627 /* Fast path single entry scatterlists. */
628 if (nents == 1) {
629 sg->dma_length = sg->length;
630 sg->dma_address
631 = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
632 sg->length, dac_allowed);
633 return sg->dma_address != 0;
636 start = sg;
637 end = sg + nents;
639 /* First, prepare information about the entries. */
640 sg_classify(sg, end, alpha_mv.mv_pci_tbi != 0);
642 /* Second, figure out where we're going to map things. */
643 if (alpha_mv.mv_pci_tbi) {
644 hose = pdev ? pdev->sysdata : pci_isa_hose;
645 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
646 arena = hose->sg_pci;
647 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
648 arena = hose->sg_isa;
649 } else {
650 max_dma = -1;
651 arena = NULL;
652 hose = NULL;
655 /* Third, iterate over the scatterlist leaders and allocate
656 dma space as needed. */
657 for (out = sg; sg < end; ++sg) {
658 if ((int) sg->dma_address < 0)
659 continue;
660 if (sg_fill(sg, end, out, arena, max_dma, dac_allowed) < 0)
661 goto error;
662 out++;
665 /* Mark the end of the list for pci_unmap_sg. */
666 if (out < end)
667 out->dma_length = 0;
669 if (out - start == 0)
670 printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
671 DBGA("pci_map_sg: %ld entries\n", out - start);
673 return out - start;
675 error:
676 printk(KERN_WARNING "pci_map_sg failed: "
677 "could not allocate dma page tables\n");
679 /* Some allocation failed while mapping the scatterlist
680 entries. Unmap them now. */
681 if (out > start)
682 pci_unmap_sg(pdev, start, out - start, direction);
683 return 0;
685 EXPORT_SYMBOL(pci_map_sg);
687 /* Unmap a set of streaming mode DMA translations. Again, cpu read
688 rules concerning calls here are the same as for pci_unmap_single()
689 above. */
691 void
692 pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
693 int direction)
695 unsigned long flags;
696 struct pci_controller *hose;
697 struct pci_iommu_arena *arena;
698 struct scatterlist *end;
699 dma_addr_t max_dma;
700 dma_addr_t fbeg, fend;
702 if (direction == PCI_DMA_NONE)
703 BUG();
705 if (! alpha_mv.mv_pci_tbi)
706 return;
708 hose = pdev ? pdev->sysdata : pci_isa_hose;
709 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
710 arena = hose->sg_pci;
711 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
712 arena = hose->sg_isa;
714 fbeg = -1, fend = 0;
716 spin_lock_irqsave(&arena->lock, flags);
718 for (end = sg + nents; sg < end; ++sg) {
719 dma64_addr_t addr;
720 size_t size;
721 long npages, ofs;
722 dma_addr_t tend;
724 addr = sg->dma_address;
725 size = sg->dma_length;
726 if (!size)
727 break;
729 if (addr > 0xffffffff) {
730 /* It's a DAC address -- nothing to do. */
731 DBGA(" (%ld) DAC [%lx,%lx]\n",
732 sg - end + nents, addr, size);
733 continue;
736 if (addr >= __direct_map_base
737 && addr < __direct_map_base + __direct_map_size) {
738 /* Nothing to do. */
739 DBGA(" (%ld) direct [%lx,%lx]\n",
740 sg - end + nents, addr, size);
741 continue;
744 DBGA(" (%ld) sg [%lx,%lx]\n",
745 sg - end + nents, addr, size);
747 npages = calc_npages((addr & ~PAGE_MASK) + size);
748 ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
749 iommu_arena_free(arena, ofs, npages);
751 tend = addr + size - 1;
752 if (fbeg > addr) fbeg = addr;
753 if (fend < tend) fend = tend;
756 /* If we're freeing ptes above the `next_entry' pointer (they
757 may have snuck back into the TLB since the last wrap flush),
758 we need to flush the TLB before reallocating the latter. */
759 if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)
760 alpha_mv.mv_pci_tbi(hose, fbeg, fend);
762 spin_unlock_irqrestore(&arena->lock, flags);
764 DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
766 EXPORT_SYMBOL(pci_unmap_sg);
769 /* Return whether the given PCI device DMA address mask can be
770 supported properly. */
773 pci_dma_supported(struct pci_dev *pdev, u64 mask)
775 struct pci_controller *hose;
776 struct pci_iommu_arena *arena;
778 /* If there exists a direct map, and the mask fits either
779 the entire direct mapped space or the total system memory as
780 shifted by the map base */
781 if (__direct_map_size != 0
782 && (__direct_map_base + __direct_map_size - 1 <= mask ||
783 __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask))
784 return 1;
786 /* Check that we have a scatter-gather arena that fits. */
787 hose = pdev ? pdev->sysdata : pci_isa_hose;
788 arena = hose->sg_isa;
789 if (arena && arena->dma_base + arena->size - 1 <= mask)
790 return 1;
791 arena = hose->sg_pci;
792 if (arena && arena->dma_base + arena->size - 1 <= mask)
793 return 1;
795 /* As last resort try ZONE_DMA. */
796 if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask)
797 return 1;
799 return 0;
801 EXPORT_SYMBOL(pci_dma_supported);
805 * AGP GART extensions to the IOMMU
808 iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
810 unsigned long flags;
811 unsigned long *ptes;
812 long i, p;
814 if (!arena) return -EINVAL;
816 spin_lock_irqsave(&arena->lock, flags);
818 /* Search for N empty ptes. */
819 ptes = arena->ptes;
820 p = iommu_arena_find_pages(arena, pg_count, align_mask);
821 if (p < 0) {
822 spin_unlock_irqrestore(&arena->lock, flags);
823 return -1;
826 /* Success. Mark them all reserved (ie not zero and invalid)
827 for the iommu tlb that could load them from under us.
828 They will be filled in with valid bits by _bind() */
829 for (i = 0; i < pg_count; ++i)
830 ptes[p+i] = IOMMU_RESERVED_PTE;
832 arena->next_entry = p + pg_count;
833 spin_unlock_irqrestore(&arena->lock, flags);
835 return p;
838 int
839 iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
841 unsigned long *ptes;
842 long i;
844 if (!arena) return -EINVAL;
846 ptes = arena->ptes;
848 /* Make sure they're all reserved first... */
849 for(i = pg_start; i < pg_start + pg_count; i++)
850 if (ptes[i] != IOMMU_RESERVED_PTE)
851 return -EBUSY;
853 iommu_arena_free(arena, pg_start, pg_count);
854 return 0;
858 iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
859 unsigned long *physaddrs)
861 unsigned long flags;
862 unsigned long *ptes;
863 long i, j;
865 if (!arena) return -EINVAL;
867 spin_lock_irqsave(&arena->lock, flags);
869 ptes = arena->ptes;
871 for(j = pg_start; j < pg_start + pg_count; j++) {
872 if (ptes[j] != IOMMU_RESERVED_PTE) {
873 spin_unlock_irqrestore(&arena->lock, flags);
874 return -EBUSY;
878 for(i = 0, j = pg_start; i < pg_count; i++, j++)
879 ptes[j] = mk_iommu_pte(physaddrs[i]);
881 spin_unlock_irqrestore(&arena->lock, flags);
883 return 0;
887 iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
889 unsigned long *p;
890 long i;
892 if (!arena) return -EINVAL;
894 p = arena->ptes + pg_start;
895 for(i = 0; i < pg_count; i++)
896 p[i] = IOMMU_RESERVED_PTE;
898 return 0;
901 /* True if the machine supports DAC addressing, and DEV can
902 make use of it given MASK. */
904 static int
905 pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
907 dma64_addr_t dac_offset = alpha_mv.pci_dac_offset;
908 int ok = 1;
910 /* If this is not set, the machine doesn't support DAC at all. */
911 if (dac_offset == 0)
912 ok = 0;
914 /* The device has to be able to address our DAC bit. */
915 if ((dac_offset & dev->dma_mask) != dac_offset)
916 ok = 0;
918 /* If both conditions above are met, we are fine. */
919 DBGA("pci_dac_dma_supported %s from %p\n",
920 ok ? "yes" : "no", __builtin_return_address(0));
922 return ok;
925 /* Helper for generic DMA-mapping functions. */
927 struct pci_dev *
928 alpha_gendev_to_pci(struct device *dev)
930 if (dev && dev->bus == &pci_bus_type)
931 return to_pci_dev(dev);
933 /* Assume that non-PCI devices asking for DMA are either ISA or EISA,
934 BUG() otherwise. */
935 BUG_ON(!isa_bridge);
937 /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA
938 bridge is bus master then). */
939 if (!dev || !dev->dma_mask || !*dev->dma_mask)
940 return isa_bridge;
942 /* For EISA bus masters, return isa_bridge (it might have smaller
943 dma_mask due to wiring limitations). */
944 if (*dev->dma_mask >= isa_bridge->dma_mask)
945 return isa_bridge;
947 /* This assumes ISA bus master with dma_mask 0xffffff. */
948 return NULL;
950 EXPORT_SYMBOL(alpha_gendev_to_pci);
953 dma_set_mask(struct device *dev, u64 mask)
955 if (!dev->dma_mask ||
956 !pci_dma_supported(alpha_gendev_to_pci(dev), mask))
957 return -EIO;
959 *dev->dma_mask = mask;
961 return 0;
963 EXPORT_SYMBOL(dma_set_mask);