Do not disable interrupts when reading min_free_kbytes
[linux-2.6/mini2440.git] / arch / alpha / kernel / pci_iommu.c
blob6e7d1fe6e93532daa923ae6d6b69839e639954b6
1 /*
2 * linux/arch/alpha/kernel/pci_iommu.c
3 */
5 #include <linux/kernel.h>
6 #include <linux/mm.h>
7 #include <linux/pci.h>
8 #include <linux/slab.h>
9 #include <linux/bootmem.h>
11 #include <asm/io.h>
12 #include <asm/hwrpb.h>
14 #include "proto.h"
15 #include "pci_impl.h"
18 #define DEBUG_ALLOC 0
19 #if DEBUG_ALLOC > 0
20 # define DBGA(args...) printk(KERN_DEBUG args)
21 #else
22 # define DBGA(args...)
23 #endif
24 #if DEBUG_ALLOC > 1
25 # define DBGA2(args...) printk(KERN_DEBUG args)
26 #else
27 # define DBGA2(args...)
28 #endif
30 #define DEBUG_NODIRECT 0
31 #define DEBUG_FORCEDAC 0
33 #define ISA_DMA_MASK 0x00ffffff
35 static inline unsigned long
36 mk_iommu_pte(unsigned long paddr)
38 return (paddr >> (PAGE_SHIFT-1)) | 1;
41 static inline long
42 calc_npages(long bytes)
44 return (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
48 /* Return the minimum of MAX or the first power of two larger
49 than main memory. */
51 unsigned long
52 size_for_memory(unsigned long max)
54 unsigned long mem = max_low_pfn << PAGE_SHIFT;
55 if (mem < max)
56 max = 1UL << ceil_log2(mem);
57 return max;
60 struct pci_iommu_arena *
61 iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
62 unsigned long window_size, unsigned long align)
64 unsigned long mem_size;
65 struct pci_iommu_arena *arena;
67 mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));
69 /* Note that the TLB lookup logic uses bitwise concatenation,
70 not addition, so the required arena alignment is based on
71 the size of the window. Retain the align parameter so that
72 particular systems can over-align the arena. */
73 if (align < mem_size)
74 align = mem_size;
77 #ifdef CONFIG_DISCONTIGMEM
79 if (!NODE_DATA(nid) ||
80 (NULL == (arena = alloc_bootmem_node(NODE_DATA(nid),
81 sizeof(*arena))))) {
82 printk("%s: couldn't allocate arena from node %d\n"
83 " falling back to system-wide allocation\n",
84 __FUNCTION__, nid);
85 arena = alloc_bootmem(sizeof(*arena));
88 if (!NODE_DATA(nid) ||
89 (NULL == (arena->ptes = __alloc_bootmem_node(NODE_DATA(nid),
90 mem_size,
91 align,
92 0)))) {
93 printk("%s: couldn't allocate arena ptes from node %d\n"
94 " falling back to system-wide allocation\n",
95 __FUNCTION__, nid);
96 arena->ptes = __alloc_bootmem(mem_size, align, 0);
99 #else /* CONFIG_DISCONTIGMEM */
101 arena = alloc_bootmem(sizeof(*arena));
102 arena->ptes = __alloc_bootmem(mem_size, align, 0);
104 #endif /* CONFIG_DISCONTIGMEM */
106 spin_lock_init(&arena->lock);
107 arena->hose = hose;
108 arena->dma_base = base;
109 arena->size = window_size;
110 arena->next_entry = 0;
112 /* Align allocations to a multiple of a page size. Not needed
113 unless there are chip bugs. */
114 arena->align_entry = 1;
116 return arena;
119 struct pci_iommu_arena *
120 iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
121 unsigned long window_size, unsigned long align)
123 return iommu_arena_new_node(0, hose, base, window_size, align);
126 /* Must be called with the arena lock held */
127 static long
128 iommu_arena_find_pages(struct pci_iommu_arena *arena, long n, long mask)
130 unsigned long *ptes;
131 long i, p, nent;
133 /* Search forward for the first mask-aligned sequence of N free ptes */
134 ptes = arena->ptes;
135 nent = arena->size >> PAGE_SHIFT;
136 p = (arena->next_entry + mask) & ~mask;
137 i = 0;
138 while (i < n && p+i < nent) {
139 if (ptes[p+i])
140 p = (p + i + 1 + mask) & ~mask, i = 0;
141 else
142 i = i + 1;
145 if (i < n) {
146 /* Reached the end. Flush the TLB and restart the
147 search from the beginning. */
148 alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
150 p = 0, i = 0;
151 while (i < n && p+i < nent) {
152 if (ptes[p+i])
153 p = (p + i + 1 + mask) & ~mask, i = 0;
154 else
155 i = i + 1;
158 if (i < n)
159 return -1;
162 /* Success. It's the responsibility of the caller to mark them
163 in use before releasing the lock */
164 return p;
167 static long
168 iommu_arena_alloc(struct pci_iommu_arena *arena, long n, unsigned int align)
170 unsigned long flags;
171 unsigned long *ptes;
172 long i, p, mask;
174 spin_lock_irqsave(&arena->lock, flags);
176 /* Search for N empty ptes */
177 ptes = arena->ptes;
178 mask = max(align, arena->align_entry) - 1;
179 p = iommu_arena_find_pages(arena, n, mask);
180 if (p < 0) {
181 spin_unlock_irqrestore(&arena->lock, flags);
182 return -1;
185 /* Success. Mark them all in use, ie not zero and invalid
186 for the iommu tlb that could load them from under us.
187 The chip specific bits will fill this in with something
188 kosher when we return. */
189 for (i = 0; i < n; ++i)
190 ptes[p+i] = IOMMU_INVALID_PTE;
192 arena->next_entry = p + n;
193 spin_unlock_irqrestore(&arena->lock, flags);
195 return p;
198 static void
199 iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
201 unsigned long *p;
202 long i;
204 p = arena->ptes + ofs;
205 for (i = 0; i < n; ++i)
206 p[i] = 0;
209 /* Map a single buffer of the indicated size for PCI DMA in streaming
210 mode. The 32-bit PCI bus mastering address to use is returned.
211 Once the device is given the dma address, the device owns this memory
212 until either pci_unmap_single or pci_dma_sync_single is performed. */
214 static dma_addr_t
215 pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
216 int dac_allowed)
218 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
219 dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
220 struct pci_iommu_arena *arena;
221 long npages, dma_ofs, i;
222 unsigned long paddr;
223 dma_addr_t ret;
224 unsigned int align = 0;
226 paddr = __pa(cpu_addr);
228 #if !DEBUG_NODIRECT
229 /* First check to see if we can use the direct map window. */
230 if (paddr + size + __direct_map_base - 1 <= max_dma
231 && paddr + size <= __direct_map_size) {
232 ret = paddr + __direct_map_base;
234 DBGA2("pci_map_single: [%p,%lx] -> direct %lx from %p\n",
235 cpu_addr, size, ret, __builtin_return_address(0));
237 return ret;
239 #endif
241 /* Next, use DAC if selected earlier. */
242 if (dac_allowed) {
243 ret = paddr + alpha_mv.pci_dac_offset;
245 DBGA2("pci_map_single: [%p,%lx] -> DAC %lx from %p\n",
246 cpu_addr, size, ret, __builtin_return_address(0));
248 return ret;
251 /* If the machine doesn't define a pci_tbi routine, we have to
252 assume it doesn't support sg mapping, and, since we tried to
253 use direct_map above, it now must be considered an error. */
254 if (! alpha_mv.mv_pci_tbi) {
255 static int been_here = 0; /* Only print the message once. */
256 if (!been_here) {
257 printk(KERN_WARNING "pci_map_single: no HW sg\n");
258 been_here = 1;
260 return 0;
263 arena = hose->sg_pci;
264 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
265 arena = hose->sg_isa;
267 npages = calc_npages((paddr & ~PAGE_MASK) + size);
269 /* Force allocation to 64KB boundary for ISA bridges. */
270 if (pdev && pdev == isa_bridge)
271 align = 8;
272 dma_ofs = iommu_arena_alloc(arena, npages, align);
273 if (dma_ofs < 0) {
274 printk(KERN_WARNING "pci_map_single failed: "
275 "could not allocate dma page tables\n");
276 return 0;
279 paddr &= PAGE_MASK;
280 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
281 arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
283 ret = arena->dma_base + dma_ofs * PAGE_SIZE;
284 ret += (unsigned long)cpu_addr & ~PAGE_MASK;
286 DBGA2("pci_map_single: [%p,%lx] np %ld -> sg %lx from %p\n",
287 cpu_addr, size, npages, ret, __builtin_return_address(0));
289 return ret;
292 dma_addr_t
293 pci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size, int dir)
295 int dac_allowed;
297 if (dir == PCI_DMA_NONE)
298 BUG();
300 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
301 return pci_map_single_1(pdev, cpu_addr, size, dac_allowed);
303 EXPORT_SYMBOL(pci_map_single);
305 dma_addr_t
306 pci_map_page(struct pci_dev *pdev, struct page *page, unsigned long offset,
307 size_t size, int dir)
309 int dac_allowed;
311 if (dir == PCI_DMA_NONE)
312 BUG();
314 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
315 return pci_map_single_1(pdev, (char *)page_address(page) + offset,
316 size, dac_allowed);
318 EXPORT_SYMBOL(pci_map_page);
320 /* Unmap a single streaming mode DMA translation. The DMA_ADDR and
321 SIZE must match what was provided for in a previous pci_map_single
322 call. All other usages are undefined. After this call, reads by
323 the cpu to the buffer are guaranteed to see whatever the device
324 wrote there. */
326 void
327 pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size,
328 int direction)
330 unsigned long flags;
331 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
332 struct pci_iommu_arena *arena;
333 long dma_ofs, npages;
335 if (direction == PCI_DMA_NONE)
336 BUG();
338 if (dma_addr >= __direct_map_base
339 && dma_addr < __direct_map_base + __direct_map_size) {
340 /* Nothing to do. */
342 DBGA2("pci_unmap_single: direct [%lx,%lx] from %p\n",
343 dma_addr, size, __builtin_return_address(0));
345 return;
348 if (dma_addr > 0xffffffff) {
349 DBGA2("pci64_unmap_single: DAC [%lx,%lx] from %p\n",
350 dma_addr, size, __builtin_return_address(0));
351 return;
354 arena = hose->sg_pci;
355 if (!arena || dma_addr < arena->dma_base)
356 arena = hose->sg_isa;
358 dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
359 if (dma_ofs * PAGE_SIZE >= arena->size) {
360 printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %lx "
361 " base %lx size %x\n", dma_addr, arena->dma_base,
362 arena->size);
363 return;
364 BUG();
367 npages = calc_npages((dma_addr & ~PAGE_MASK) + size);
369 spin_lock_irqsave(&arena->lock, flags);
371 iommu_arena_free(arena, dma_ofs, npages);
373 /* If we're freeing ptes above the `next_entry' pointer (they
374 may have snuck back into the TLB since the last wrap flush),
375 we need to flush the TLB before reallocating the latter. */
376 if (dma_ofs >= arena->next_entry)
377 alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);
379 spin_unlock_irqrestore(&arena->lock, flags);
381 DBGA2("pci_unmap_single: sg [%lx,%lx] np %ld from %p\n",
382 dma_addr, size, npages, __builtin_return_address(0));
384 EXPORT_SYMBOL(pci_unmap_single);
386 void
387 pci_unmap_page(struct pci_dev *pdev, dma_addr_t dma_addr,
388 size_t size, int direction)
390 pci_unmap_single(pdev, dma_addr, size, direction);
392 EXPORT_SYMBOL(pci_unmap_page);
394 /* Allocate and map kernel buffer using consistent mode DMA for PCI
395 device. Returns non-NULL cpu-view pointer to the buffer if
396 successful and sets *DMA_ADDRP to the pci side dma address as well,
397 else DMA_ADDRP is undefined. */
399 void *
400 pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
402 void *cpu_addr;
403 long order = get_order(size);
404 gfp_t gfp = GFP_ATOMIC;
406 try_again:
407 cpu_addr = (void *)__get_free_pages(gfp, order);
408 if (! cpu_addr) {
409 printk(KERN_INFO "pci_alloc_consistent: "
410 "get_free_pages failed from %p\n",
411 __builtin_return_address(0));
412 /* ??? Really atomic allocation? Otherwise we could play
413 with vmalloc and sg if we can't find contiguous memory. */
414 return NULL;
416 memset(cpu_addr, 0, size);
418 *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
419 if (*dma_addrp == 0) {
420 free_pages((unsigned long)cpu_addr, order);
421 if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
422 return NULL;
423 /* The address doesn't fit required mask and we
424 do not have iommu. Try again with GFP_DMA. */
425 gfp |= GFP_DMA;
426 goto try_again;
429 DBGA2("pci_alloc_consistent: %lx -> [%p,%x] from %p\n",
430 size, cpu_addr, *dma_addrp, __builtin_return_address(0));
432 return cpu_addr;
434 EXPORT_SYMBOL(pci_alloc_consistent);
436 /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
437 be values that were returned from pci_alloc_consistent. SIZE must
438 be the same as what as passed into pci_alloc_consistent.
439 References to the memory and mappings associated with CPU_ADDR or
440 DMA_ADDR past this call are illegal. */
442 void
443 pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr,
444 dma_addr_t dma_addr)
446 pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
447 free_pages((unsigned long)cpu_addr, get_order(size));
449 DBGA2("pci_free_consistent: [%x,%lx] from %p\n",
450 dma_addr, size, __builtin_return_address(0));
452 EXPORT_SYMBOL(pci_free_consistent);
454 /* Classify the elements of the scatterlist. Write dma_address
455 of each element with:
456 0 : Followers all physically adjacent.
457 1 : Followers all virtually adjacent.
458 -1 : Not leader, physically adjacent to previous.
459 -2 : Not leader, virtually adjacent to previous.
460 Write dma_length of each leader with the combined lengths of
461 the mergable followers. */
463 #define SG_ENT_VIRT_ADDRESS(SG) (page_address((SG)->page) + (SG)->offset)
464 #define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
466 static void
467 sg_classify(struct scatterlist *sg, struct scatterlist *end, int virt_ok)
469 unsigned long next_paddr;
470 struct scatterlist *leader;
471 long leader_flag, leader_length;
473 leader = sg;
474 leader_flag = 0;
475 leader_length = leader->length;
476 next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
478 for (++sg; sg < end; ++sg) {
479 unsigned long addr, len;
480 addr = SG_ENT_PHYS_ADDRESS(sg);
481 len = sg->length;
483 if (next_paddr == addr) {
484 sg->dma_address = -1;
485 leader_length += len;
486 } else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) {
487 sg->dma_address = -2;
488 leader_flag = 1;
489 leader_length += len;
490 } else {
491 leader->dma_address = leader_flag;
492 leader->dma_length = leader_length;
493 leader = sg;
494 leader_flag = 0;
495 leader_length = len;
498 next_paddr = addr + len;
501 leader->dma_address = leader_flag;
502 leader->dma_length = leader_length;
505 /* Given a scatterlist leader, choose an allocation method and fill
506 in the blanks. */
508 static int
509 sg_fill(struct scatterlist *leader, struct scatterlist *end,
510 struct scatterlist *out, struct pci_iommu_arena *arena,
511 dma_addr_t max_dma, int dac_allowed)
513 unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader);
514 long size = leader->dma_length;
515 struct scatterlist *sg;
516 unsigned long *ptes;
517 long npages, dma_ofs, i;
519 #if !DEBUG_NODIRECT
520 /* If everything is physically contiguous, and the addresses
521 fall into the direct-map window, use it. */
522 if (leader->dma_address == 0
523 && paddr + size + __direct_map_base - 1 <= max_dma
524 && paddr + size <= __direct_map_size) {
525 out->dma_address = paddr + __direct_map_base;
526 out->dma_length = size;
528 DBGA(" sg_fill: [%p,%lx] -> direct %lx\n",
529 __va(paddr), size, out->dma_address);
531 return 0;
533 #endif
535 /* If physically contiguous and DAC is available, use it. */
536 if (leader->dma_address == 0 && dac_allowed) {
537 out->dma_address = paddr + alpha_mv.pci_dac_offset;
538 out->dma_length = size;
540 DBGA(" sg_fill: [%p,%lx] -> DAC %lx\n",
541 __va(paddr), size, out->dma_address);
543 return 0;
546 /* Otherwise, we'll use the iommu to make the pages virtually
547 contiguous. */
549 paddr &= ~PAGE_MASK;
550 npages = calc_npages(paddr + size);
551 dma_ofs = iommu_arena_alloc(arena, npages, 0);
552 if (dma_ofs < 0) {
553 /* If we attempted a direct map above but failed, die. */
554 if (leader->dma_address == 0)
555 return -1;
557 /* Otherwise, break up the remaining virtually contiguous
558 hunks into individual direct maps and retry. */
559 sg_classify(leader, end, 0);
560 return sg_fill(leader, end, out, arena, max_dma, dac_allowed);
563 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
564 out->dma_length = size;
566 DBGA(" sg_fill: [%p,%lx] -> sg %lx np %ld\n",
567 __va(paddr), size, out->dma_address, npages);
569 /* All virtually contiguous. We need to find the length of each
570 physically contiguous subsegment to fill in the ptes. */
571 ptes = &arena->ptes[dma_ofs];
572 sg = leader;
573 do {
574 #if DEBUG_ALLOC > 0
575 struct scatterlist *last_sg = sg;
576 #endif
578 size = sg->length;
579 paddr = SG_ENT_PHYS_ADDRESS(sg);
581 while (sg+1 < end && (int) sg[1].dma_address == -1) {
582 size += sg[1].length;
583 sg++;
586 npages = calc_npages((paddr & ~PAGE_MASK) + size);
588 paddr &= PAGE_MASK;
589 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
590 *ptes++ = mk_iommu_pte(paddr);
592 #if DEBUG_ALLOC > 0
593 DBGA(" (%ld) [%p,%x] np %ld\n",
594 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
595 last_sg->length, npages);
596 while (++last_sg <= sg) {
597 DBGA(" (%ld) [%p,%x] cont\n",
598 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
599 last_sg->length);
601 #endif
602 } while (++sg < end && (int) sg->dma_address < 0);
604 return 1;
608 pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
609 int direction)
611 struct scatterlist *start, *end, *out;
612 struct pci_controller *hose;
613 struct pci_iommu_arena *arena;
614 dma_addr_t max_dma;
615 int dac_allowed;
617 if (direction == PCI_DMA_NONE)
618 BUG();
620 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
622 /* Fast path single entry scatterlists. */
623 if (nents == 1) {
624 sg->dma_length = sg->length;
625 sg->dma_address
626 = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
627 sg->length, dac_allowed);
628 return sg->dma_address != 0;
631 start = sg;
632 end = sg + nents;
634 /* First, prepare information about the entries. */
635 sg_classify(sg, end, alpha_mv.mv_pci_tbi != 0);
637 /* Second, figure out where we're going to map things. */
638 if (alpha_mv.mv_pci_tbi) {
639 hose = pdev ? pdev->sysdata : pci_isa_hose;
640 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
641 arena = hose->sg_pci;
642 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
643 arena = hose->sg_isa;
644 } else {
645 max_dma = -1;
646 arena = NULL;
647 hose = NULL;
650 /* Third, iterate over the scatterlist leaders and allocate
651 dma space as needed. */
652 for (out = sg; sg < end; ++sg) {
653 if ((int) sg->dma_address < 0)
654 continue;
655 if (sg_fill(sg, end, out, arena, max_dma, dac_allowed) < 0)
656 goto error;
657 out++;
660 /* Mark the end of the list for pci_unmap_sg. */
661 if (out < end)
662 out->dma_length = 0;
664 if (out - start == 0)
665 printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
666 DBGA("pci_map_sg: %ld entries\n", out - start);
668 return out - start;
670 error:
671 printk(KERN_WARNING "pci_map_sg failed: "
672 "could not allocate dma page tables\n");
674 /* Some allocation failed while mapping the scatterlist
675 entries. Unmap them now. */
676 if (out > start)
677 pci_unmap_sg(pdev, start, out - start, direction);
678 return 0;
680 EXPORT_SYMBOL(pci_map_sg);
682 /* Unmap a set of streaming mode DMA translations. Again, cpu read
683 rules concerning calls here are the same as for pci_unmap_single()
684 above. */
686 void
687 pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
688 int direction)
690 unsigned long flags;
691 struct pci_controller *hose;
692 struct pci_iommu_arena *arena;
693 struct scatterlist *end;
694 dma_addr_t max_dma;
695 dma_addr_t fbeg, fend;
697 if (direction == PCI_DMA_NONE)
698 BUG();
700 if (! alpha_mv.mv_pci_tbi)
701 return;
703 hose = pdev ? pdev->sysdata : pci_isa_hose;
704 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
705 arena = hose->sg_pci;
706 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
707 arena = hose->sg_isa;
709 fbeg = -1, fend = 0;
711 spin_lock_irqsave(&arena->lock, flags);
713 for (end = sg + nents; sg < end; ++sg) {
714 dma64_addr_t addr;
715 size_t size;
716 long npages, ofs;
717 dma_addr_t tend;
719 addr = sg->dma_address;
720 size = sg->dma_length;
721 if (!size)
722 break;
724 if (addr > 0xffffffff) {
725 /* It's a DAC address -- nothing to do. */
726 DBGA(" (%ld) DAC [%lx,%lx]\n",
727 sg - end + nents, addr, size);
728 continue;
731 if (addr >= __direct_map_base
732 && addr < __direct_map_base + __direct_map_size) {
733 /* Nothing to do. */
734 DBGA(" (%ld) direct [%lx,%lx]\n",
735 sg - end + nents, addr, size);
736 continue;
739 DBGA(" (%ld) sg [%lx,%lx]\n",
740 sg - end + nents, addr, size);
742 npages = calc_npages((addr & ~PAGE_MASK) + size);
743 ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
744 iommu_arena_free(arena, ofs, npages);
746 tend = addr + size - 1;
747 if (fbeg > addr) fbeg = addr;
748 if (fend < tend) fend = tend;
751 /* If we're freeing ptes above the `next_entry' pointer (they
752 may have snuck back into the TLB since the last wrap flush),
753 we need to flush the TLB before reallocating the latter. */
754 if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)
755 alpha_mv.mv_pci_tbi(hose, fbeg, fend);
757 spin_unlock_irqrestore(&arena->lock, flags);
759 DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
761 EXPORT_SYMBOL(pci_unmap_sg);
764 /* Return whether the given PCI device DMA address mask can be
765 supported properly. */
768 pci_dma_supported(struct pci_dev *pdev, u64 mask)
770 struct pci_controller *hose;
771 struct pci_iommu_arena *arena;
773 /* If there exists a direct map, and the mask fits either
774 the entire direct mapped space or the total system memory as
775 shifted by the map base */
776 if (__direct_map_size != 0
777 && (__direct_map_base + __direct_map_size - 1 <= mask ||
778 __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask))
779 return 1;
781 /* Check that we have a scatter-gather arena that fits. */
782 hose = pdev ? pdev->sysdata : pci_isa_hose;
783 arena = hose->sg_isa;
784 if (arena && arena->dma_base + arena->size - 1 <= mask)
785 return 1;
786 arena = hose->sg_pci;
787 if (arena && arena->dma_base + arena->size - 1 <= mask)
788 return 1;
790 /* As last resort try ZONE_DMA. */
791 if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask)
792 return 1;
794 return 0;
796 EXPORT_SYMBOL(pci_dma_supported);
800 * AGP GART extensions to the IOMMU
803 iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
805 unsigned long flags;
806 unsigned long *ptes;
807 long i, p;
809 if (!arena) return -EINVAL;
811 spin_lock_irqsave(&arena->lock, flags);
813 /* Search for N empty ptes. */
814 ptes = arena->ptes;
815 p = iommu_arena_find_pages(arena, pg_count, align_mask);
816 if (p < 0) {
817 spin_unlock_irqrestore(&arena->lock, flags);
818 return -1;
821 /* Success. Mark them all reserved (ie not zero and invalid)
822 for the iommu tlb that could load them from under us.
823 They will be filled in with valid bits by _bind() */
824 for (i = 0; i < pg_count; ++i)
825 ptes[p+i] = IOMMU_RESERVED_PTE;
827 arena->next_entry = p + pg_count;
828 spin_unlock_irqrestore(&arena->lock, flags);
830 return p;
833 int
834 iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
836 unsigned long *ptes;
837 long i;
839 if (!arena) return -EINVAL;
841 ptes = arena->ptes;
843 /* Make sure they're all reserved first... */
844 for(i = pg_start; i < pg_start + pg_count; i++)
845 if (ptes[i] != IOMMU_RESERVED_PTE)
846 return -EBUSY;
848 iommu_arena_free(arena, pg_start, pg_count);
849 return 0;
853 iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
854 unsigned long *physaddrs)
856 unsigned long flags;
857 unsigned long *ptes;
858 long i, j;
860 if (!arena) return -EINVAL;
862 spin_lock_irqsave(&arena->lock, flags);
864 ptes = arena->ptes;
866 for(j = pg_start; j < pg_start + pg_count; j++) {
867 if (ptes[j] != IOMMU_RESERVED_PTE) {
868 spin_unlock_irqrestore(&arena->lock, flags);
869 return -EBUSY;
873 for(i = 0, j = pg_start; i < pg_count; i++, j++)
874 ptes[j] = mk_iommu_pte(physaddrs[i]);
876 spin_unlock_irqrestore(&arena->lock, flags);
878 return 0;
882 iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
884 unsigned long *p;
885 long i;
887 if (!arena) return -EINVAL;
889 p = arena->ptes + pg_start;
890 for(i = 0; i < pg_count; i++)
891 p[i] = IOMMU_RESERVED_PTE;
893 return 0;
896 /* True if the machine supports DAC addressing, and DEV can
897 make use of it given MASK. */
900 pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
902 dma64_addr_t dac_offset = alpha_mv.pci_dac_offset;
903 int ok = 1;
905 /* If this is not set, the machine doesn't support DAC at all. */
906 if (dac_offset == 0)
907 ok = 0;
909 /* The device has to be able to address our DAC bit. */
910 if ((dac_offset & dev->dma_mask) != dac_offset)
911 ok = 0;
913 /* If both conditions above are met, we are fine. */
914 DBGA("pci_dac_dma_supported %s from %p\n",
915 ok ? "yes" : "no", __builtin_return_address(0));
917 return ok;
919 EXPORT_SYMBOL(pci_dac_dma_supported);
921 dma64_addr_t
922 pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page,
923 unsigned long offset, int direction)
925 return (alpha_mv.pci_dac_offset
926 + __pa(page_address(page))
927 + (dma64_addr_t) offset);
929 EXPORT_SYMBOL(pci_dac_page_to_dma);
931 struct page *
932 pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
934 unsigned long paddr = (dma_addr & PAGE_MASK) - alpha_mv.pci_dac_offset;
935 return virt_to_page(__va(paddr));
937 EXPORT_SYMBOL(pci_dac_dma_to_page);
939 unsigned long
940 pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
942 return (dma_addr & ~PAGE_MASK);
944 EXPORT_SYMBOL(pci_dac_dma_to_offset);
946 /* Helper for generic DMA-mapping functions. */
948 struct pci_dev *
949 alpha_gendev_to_pci(struct device *dev)
951 if (dev && dev->bus == &pci_bus_type)
952 return to_pci_dev(dev);
954 /* Assume that non-PCI devices asking for DMA are either ISA or EISA,
955 BUG() otherwise. */
956 BUG_ON(!isa_bridge);
958 /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA
959 bridge is bus master then). */
960 if (!dev || !dev->dma_mask || !*dev->dma_mask)
961 return isa_bridge;
963 /* For EISA bus masters, return isa_bridge (it might have smaller
964 dma_mask due to wiring limitations). */
965 if (*dev->dma_mask >= isa_bridge->dma_mask)
966 return isa_bridge;
968 /* This assumes ISA bus master with dma_mask 0xffffff. */
969 return NULL;
971 EXPORT_SYMBOL(alpha_gendev_to_pci);
974 dma_set_mask(struct device *dev, u64 mask)
976 if (!dev->dma_mask ||
977 !pci_dma_supported(alpha_gendev_to_pci(dev), mask))
978 return -EIO;
980 *dev->dma_mask = mask;
982 return 0;
984 EXPORT_SYMBOL(dma_set_mask);