allow coexistance of N build and AC build.
[tomato.git] / release / src-rt-6.x / linux / linux-2.6 / arch / sparc64 / kernel / pci_iommu.c
blob70d2364fdfe0b6910be668dd9e5a032752c25e4b
1 /* pci_iommu.c: UltraSparc PCI controller IOM/STC support.
3 * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
5 */
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
9 #include <linux/mm.h>
10 #include <linux/delay.h>
11 #include <linux/pci.h>
13 #include <asm/oplib.h>
15 #include "iommu_common.h"
16 #include "pci_impl.h"
18 #define PCI_STC_CTXMATCH_ADDR(STC, CTX) \
19 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
21 /* Accessing IOMMU and Streaming Buffer registers.
22 * REG parameter is a physical address. All registers
23 * are 64-bits in size.
25 #define pci_iommu_read(__reg) \
26 ({ u64 __ret; \
27 __asm__ __volatile__("ldxa [%1] %2, %0" \
28 : "=r" (__ret) \
29 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
30 : "memory"); \
31 __ret; \
33 #define pci_iommu_write(__reg, __val) \
34 __asm__ __volatile__("stxa %0, [%1] %2" \
35 : /* no outputs */ \
36 : "r" (__val), "r" (__reg), \
37 "i" (ASI_PHYS_BYPASS_EC_E))
39 /* Must be invoked under the IOMMU lock. */
40 static void __iommu_flushall(struct iommu *iommu)
42 if (iommu->iommu_flushinv) {
43 pci_iommu_write(iommu->iommu_flushinv, ~(u64)0);
44 } else {
45 unsigned long tag;
46 int entry;
48 tag = iommu->iommu_flush + (0xa580UL - 0x0210UL);
49 for (entry = 0; entry < 16; entry++) {
50 pci_iommu_write(tag, 0);
51 tag += 8;
54 /* Ensure completion of previous PIO writes. */
55 (void) pci_iommu_read(iommu->write_complete_reg);
59 #define IOPTE_CONSISTENT(CTX) \
60 (IOPTE_VALID | IOPTE_CACHE | \
61 (((CTX) << 47) & IOPTE_CONTEXT))
63 #define IOPTE_STREAMING(CTX) \
64 (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
66 /* Existing mappings are never marked invalid, instead they
67 * are pointed to a dummy page.
69 #define IOPTE_IS_DUMMY(iommu, iopte) \
70 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
72 static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
74 unsigned long val = iopte_val(*iopte);
76 val &= ~IOPTE_PAGE;
77 val |= iommu->dummy_page_pa;
79 iopte_val(*iopte) = val;
82 /* Based largely upon the ppc64 iommu allocator. */
83 static long pci_arena_alloc(struct iommu *iommu, unsigned long npages)
85 struct iommu_arena *arena = &iommu->arena;
86 unsigned long n, i, start, end, limit;
87 int pass;
89 limit = arena->limit;
90 start = arena->hint;
91 pass = 0;
93 again:
94 n = find_next_zero_bit(arena->map, limit, start);
95 end = n + npages;
96 if (unlikely(end >= limit)) {
97 if (likely(pass < 1)) {
98 limit = start;
99 start = 0;
100 __iommu_flushall(iommu);
101 pass++;
102 goto again;
103 } else {
104 /* Scanned the whole thing, give up. */
105 return -1;
109 for (i = n; i < end; i++) {
110 if (test_bit(i, arena->map)) {
111 start = i + 1;
112 goto again;
116 for (i = n; i < end; i++)
117 __set_bit(i, arena->map);
119 arena->hint = end;
121 return n;
124 static void pci_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages)
126 unsigned long i;
128 for (i = base; i < (base + npages); i++)
129 __clear_bit(i, arena->map);
132 void pci_iommu_table_init(struct iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask)
134 unsigned long i, tsbbase, order, sz, num_tsb_entries;
136 num_tsb_entries = tsbsize / sizeof(iopte_t);
138 /* Setup initial software IOMMU state. */
139 spin_lock_init(&iommu->lock);
140 iommu->ctx_lowest_free = 1;
141 iommu->page_table_map_base = dma_offset;
142 iommu->dma_addr_mask = dma_addr_mask;
144 /* Allocate and initialize the free area map. */
145 sz = num_tsb_entries / 8;
146 sz = (sz + 7UL) & ~7UL;
147 iommu->arena.map = kzalloc(sz, GFP_KERNEL);
148 if (!iommu->arena.map) {
149 prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
150 prom_halt();
152 iommu->arena.limit = num_tsb_entries;
154 /* Allocate and initialize the dummy page which we
155 * set inactive IO PTEs to point to.
157 iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
158 if (!iommu->dummy_page) {
159 prom_printf("PCI_IOMMU: Error, gfp(dummy_page) failed.\n");
160 prom_halt();
162 memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
163 iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
165 /* Now allocate and setup the IOMMU page table itself. */
166 order = get_order(tsbsize);
167 tsbbase = __get_free_pages(GFP_KERNEL, order);
168 if (!tsbbase) {
169 prom_printf("PCI_IOMMU: Error, gfp(tsb) failed.\n");
170 prom_halt();
172 iommu->page_table = (iopte_t *)tsbbase;
174 for (i = 0; i < num_tsb_entries; i++)
175 iopte_make_dummy(iommu, &iommu->page_table[i]);
178 static inline iopte_t *alloc_npages(struct iommu *iommu, unsigned long npages)
180 long entry;
182 entry = pci_arena_alloc(iommu, npages);
183 if (unlikely(entry < 0))
184 return NULL;
186 return iommu->page_table + entry;
189 static inline void free_npages(struct iommu *iommu, dma_addr_t base, unsigned long npages)
191 pci_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages);
194 static int iommu_alloc_ctx(struct iommu *iommu)
196 int lowest = iommu->ctx_lowest_free;
197 int sz = IOMMU_NUM_CTXS - lowest;
198 int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest);
200 if (unlikely(n == sz)) {
201 n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
202 if (unlikely(n == lowest)) {
203 printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
204 n = 0;
207 if (n)
208 __set_bit(n, iommu->ctx_bitmap);
210 return n;
213 static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
215 if (likely(ctx)) {
216 __clear_bit(ctx, iommu->ctx_bitmap);
217 if (ctx < iommu->ctx_lowest_free)
218 iommu->ctx_lowest_free = ctx;
222 /* Allocate and map kernel buffer of size SIZE using consistent mode
223 * DMA for PCI device PDEV. Return non-NULL cpu-side address if
224 * successful and set *DMA_ADDRP to the PCI side dma address.
226 static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp)
228 struct iommu *iommu;
229 iopte_t *iopte;
230 unsigned long flags, order, first_page;
231 void *ret;
232 int npages;
234 size = IO_PAGE_ALIGN(size);
235 order = get_order(size);
236 if (order >= 10)
237 return NULL;
239 first_page = __get_free_pages(gfp, order);
240 if (first_page == 0UL)
241 return NULL;
242 memset((char *)first_page, 0, PAGE_SIZE << order);
244 iommu = pdev->dev.archdata.iommu;
246 spin_lock_irqsave(&iommu->lock, flags);
247 iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT);
248 spin_unlock_irqrestore(&iommu->lock, flags);
250 if (unlikely(iopte == NULL)) {
251 free_pages(first_page, order);
252 return NULL;
255 *dma_addrp = (iommu->page_table_map_base +
256 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
257 ret = (void *) first_page;
258 npages = size >> IO_PAGE_SHIFT;
259 first_page = __pa(first_page);
260 while (npages--) {
261 iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
262 IOPTE_WRITE |
263 (first_page & IOPTE_PAGE));
264 iopte++;
265 first_page += IO_PAGE_SIZE;
268 return ret;
271 /* Free and unmap a consistent DMA translation. */
272 static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
274 struct iommu *iommu;
275 iopte_t *iopte;
276 unsigned long flags, order, npages;
278 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
279 iommu = pdev->dev.archdata.iommu;
280 iopte = iommu->page_table +
281 ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
283 spin_lock_irqsave(&iommu->lock, flags);
285 free_npages(iommu, dvma - iommu->page_table_map_base, npages);
287 spin_unlock_irqrestore(&iommu->lock, flags);
289 order = get_order(size);
290 if (order < 10)
291 free_pages((unsigned long)cpu, order);
294 /* Map a single buffer at PTR of SZ bytes for PCI DMA
295 * in streaming mode.
297 static dma_addr_t pci_4u_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
299 struct iommu *iommu;
300 struct strbuf *strbuf;
301 iopte_t *base;
302 unsigned long flags, npages, oaddr;
303 unsigned long i, base_paddr, ctx;
304 u32 bus_addr, ret;
305 unsigned long iopte_protection;
307 iommu = pdev->dev.archdata.iommu;
308 strbuf = pdev->dev.archdata.stc;
310 if (unlikely(direction == PCI_DMA_NONE))
311 goto bad_no_ctx;
313 oaddr = (unsigned long)ptr;
314 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
315 npages >>= IO_PAGE_SHIFT;
317 spin_lock_irqsave(&iommu->lock, flags);
318 base = alloc_npages(iommu, npages);
319 ctx = 0;
320 if (iommu->iommu_ctxflush)
321 ctx = iommu_alloc_ctx(iommu);
322 spin_unlock_irqrestore(&iommu->lock, flags);
324 if (unlikely(!base))
325 goto bad;
327 bus_addr = (iommu->page_table_map_base +
328 ((base - iommu->page_table) << IO_PAGE_SHIFT));
329 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
330 base_paddr = __pa(oaddr & IO_PAGE_MASK);
331 if (strbuf->strbuf_enabled)
332 iopte_protection = IOPTE_STREAMING(ctx);
333 else
334 iopte_protection = IOPTE_CONSISTENT(ctx);
335 if (direction != PCI_DMA_TODEVICE)
336 iopte_protection |= IOPTE_WRITE;
338 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
339 iopte_val(*base) = iopte_protection | base_paddr;
341 return ret;
343 bad:
344 iommu_free_ctx(iommu, ctx);
345 bad_no_ctx:
346 if (printk_ratelimit())
347 WARN_ON(1);
348 return PCI_DMA_ERROR_CODE;
351 static void pci_strbuf_flush(struct strbuf *strbuf, struct iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, int direction)
353 int limit;
355 if (strbuf->strbuf_ctxflush &&
356 iommu->iommu_ctxflush) {
357 unsigned long matchreg, flushreg;
358 u64 val;
360 flushreg = strbuf->strbuf_ctxflush;
361 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
363 pci_iommu_write(flushreg, ctx);
364 val = pci_iommu_read(matchreg);
365 val &= 0xffff;
366 if (!val)
367 goto do_flush_sync;
369 while (val) {
370 if (val & 0x1)
371 pci_iommu_write(flushreg, ctx);
372 val >>= 1;
374 val = pci_iommu_read(matchreg);
375 if (unlikely(val)) {
376 printk(KERN_WARNING "pci_strbuf_flush: ctx flush "
377 "timeout matchreg[%lx] ctx[%lx]\n",
378 val, ctx);
379 goto do_page_flush;
381 } else {
382 unsigned long i;
384 do_page_flush:
385 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
386 pci_iommu_write(strbuf->strbuf_pflush, vaddr);
389 do_flush_sync:
390 /* If the device could not have possibly put dirty data into
391 * the streaming cache, no flush-flag synchronization needs
392 * to be performed.
394 if (direction == PCI_DMA_TODEVICE)
395 return;
397 PCI_STC_FLUSHFLAG_INIT(strbuf);
398 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
399 (void) pci_iommu_read(iommu->write_complete_reg);
401 limit = 100000;
402 while (!PCI_STC_FLUSHFLAG_SET(strbuf)) {
403 limit--;
404 if (!limit)
405 break;
406 udelay(1);
407 rmb();
409 if (!limit)
410 printk(KERN_WARNING "pci_strbuf_flush: flushflag timeout "
411 "vaddr[%08x] ctx[%lx] npages[%ld]\n",
412 vaddr, ctx, npages);
415 /* Unmap a single streaming mode DMA translation. */
416 static void pci_4u_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
418 struct iommu *iommu;
419 struct strbuf *strbuf;
420 iopte_t *base;
421 unsigned long flags, npages, ctx, i;
423 if (unlikely(direction == PCI_DMA_NONE)) {
424 if (printk_ratelimit())
425 WARN_ON(1);
426 return;
429 iommu = pdev->dev.archdata.iommu;
430 strbuf = pdev->dev.archdata.stc;
432 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
433 npages >>= IO_PAGE_SHIFT;
434 base = iommu->page_table +
435 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
436 #ifdef DEBUG_PCI_IOMMU
437 if (IOPTE_IS_DUMMY(iommu, base))
438 printk("pci_unmap_single called on non-mapped region %08x,%08x from %016lx\n",
439 bus_addr, sz, __builtin_return_address(0));
440 #endif
441 bus_addr &= IO_PAGE_MASK;
443 spin_lock_irqsave(&iommu->lock, flags);
445 /* Record the context, if any. */
446 ctx = 0;
447 if (iommu->iommu_ctxflush)
448 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
450 /* Step 1: Kick data out of streaming buffers if necessary. */
451 if (strbuf->strbuf_enabled)
452 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx,
453 npages, direction);
455 /* Step 2: Clear out TSB entries. */
456 for (i = 0; i < npages; i++)
457 iopte_make_dummy(iommu, base + i);
459 free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
461 iommu_free_ctx(iommu, ctx);
463 spin_unlock_irqrestore(&iommu->lock, flags);
466 #define SG_ENT_PHYS_ADDRESS(SG) \
467 (__pa(page_address((SG)->page)) + (SG)->offset)
469 static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
470 int nused, int nelems, unsigned long iopte_protection)
472 struct scatterlist *dma_sg = sg;
473 struct scatterlist *sg_end = sg + nelems;
474 int i;
476 for (i = 0; i < nused; i++) {
477 unsigned long pteval = ~0UL;
478 u32 dma_npages;
480 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
481 dma_sg->dma_length +
482 ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
483 do {
484 unsigned long offset;
485 signed int len;
487 /* If we are here, we know we have at least one
488 * more page to map. So walk forward until we
489 * hit a page crossing, and begin creating new
490 * mappings from that spot.
492 for (;;) {
493 unsigned long tmp;
495 tmp = SG_ENT_PHYS_ADDRESS(sg);
496 len = sg->length;
497 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
498 pteval = tmp & IO_PAGE_MASK;
499 offset = tmp & (IO_PAGE_SIZE - 1UL);
500 break;
502 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
503 pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
504 offset = 0UL;
505 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
506 break;
508 sg++;
511 pteval = iopte_protection | (pteval & IOPTE_PAGE);
512 while (len > 0) {
513 *iopte++ = __iopte(pteval);
514 pteval += IO_PAGE_SIZE;
515 len -= (IO_PAGE_SIZE - offset);
516 offset = 0;
517 dma_npages--;
520 pteval = (pteval & IOPTE_PAGE) + len;
521 sg++;
523 /* Skip over any tail mappings we've fully mapped,
524 * adjusting pteval along the way. Stop when we
525 * detect a page crossing event.
527 while (sg < sg_end &&
528 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
529 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
530 ((pteval ^
531 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
532 pteval += sg->length;
533 sg++;
535 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
536 pteval = ~0UL;
537 } while (dma_npages != 0);
538 dma_sg++;
542 /* Map a set of buffers described by SGLIST with NELEMS array
543 * elements in streaming mode for PCI DMA.
544 * When making changes here, inspect the assembly output. I was having
545 * hard time to keep this routine out of using stack slots for holding variables.
547 static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
549 struct iommu *iommu;
550 struct strbuf *strbuf;
551 unsigned long flags, ctx, npages, iopte_protection;
552 iopte_t *base;
553 u32 dma_base;
554 struct scatterlist *sgtmp;
555 int used;
557 /* Fast path single entry scatterlists. */
558 if (nelems == 1) {
559 sglist->dma_address =
560 pci_4u_map_single(pdev,
561 (page_address(sglist->page) + sglist->offset),
562 sglist->length, direction);
563 if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE))
564 return 0;
565 sglist->dma_length = sglist->length;
566 return 1;
569 iommu = pdev->dev.archdata.iommu;
570 strbuf = pdev->dev.archdata.stc;
572 if (unlikely(direction == PCI_DMA_NONE))
573 goto bad_no_ctx;
575 /* Step 1: Prepare scatter list. */
577 npages = prepare_sg(sglist, nelems);
579 /* Step 2: Allocate a cluster and context, if necessary. */
581 spin_lock_irqsave(&iommu->lock, flags);
583 base = alloc_npages(iommu, npages);
584 ctx = 0;
585 if (iommu->iommu_ctxflush)
586 ctx = iommu_alloc_ctx(iommu);
588 spin_unlock_irqrestore(&iommu->lock, flags);
590 if (base == NULL)
591 goto bad;
593 dma_base = iommu->page_table_map_base +
594 ((base - iommu->page_table) << IO_PAGE_SHIFT);
596 /* Step 3: Normalize DMA addresses. */
597 used = nelems;
599 sgtmp = sglist;
600 while (used && sgtmp->dma_length) {
601 sgtmp->dma_address += dma_base;
602 sgtmp++;
603 used--;
605 used = nelems - used;
607 /* Step 4: Create the mappings. */
608 if (strbuf->strbuf_enabled)
609 iopte_protection = IOPTE_STREAMING(ctx);
610 else
611 iopte_protection = IOPTE_CONSISTENT(ctx);
612 if (direction != PCI_DMA_TODEVICE)
613 iopte_protection |= IOPTE_WRITE;
615 fill_sg(base, sglist, used, nelems, iopte_protection);
617 #ifdef VERIFY_SG
618 verify_sglist(sglist, nelems, base, npages);
619 #endif
621 return used;
623 bad:
624 iommu_free_ctx(iommu, ctx);
625 bad_no_ctx:
626 if (printk_ratelimit())
627 WARN_ON(1);
628 return 0;
631 /* Unmap a set of streaming mode DMA translations. */
632 static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
634 struct iommu *iommu;
635 struct strbuf *strbuf;
636 iopte_t *base;
637 unsigned long flags, ctx, i, npages;
638 u32 bus_addr;
640 if (unlikely(direction == PCI_DMA_NONE)) {
641 if (printk_ratelimit())
642 WARN_ON(1);
645 iommu = pdev->dev.archdata.iommu;
646 strbuf = pdev->dev.archdata.stc;
648 bus_addr = sglist->dma_address & IO_PAGE_MASK;
650 for (i = 1; i < nelems; i++)
651 if (sglist[i].dma_length == 0)
652 break;
653 i--;
654 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
655 bus_addr) >> IO_PAGE_SHIFT;
657 base = iommu->page_table +
658 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
660 #ifdef DEBUG_PCI_IOMMU
661 if (IOPTE_IS_DUMMY(iommu, base))
662 printk("pci_unmap_sg called on non-mapped region %016lx,%d from %016lx\n", sglist->dma_address, nelems, __builtin_return_address(0));
663 #endif
665 spin_lock_irqsave(&iommu->lock, flags);
667 /* Record the context, if any. */
668 ctx = 0;
669 if (iommu->iommu_ctxflush)
670 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
672 /* Step 1: Kick data out of streaming buffers if necessary. */
673 if (strbuf->strbuf_enabled)
674 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
676 /* Step 2: Clear out the TSB entries. */
677 for (i = 0; i < npages; i++)
678 iopte_make_dummy(iommu, base + i);
680 free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
682 iommu_free_ctx(iommu, ctx);
684 spin_unlock_irqrestore(&iommu->lock, flags);
687 /* Make physical memory consistent for a single
688 * streaming mode DMA translation after a transfer.
690 static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
692 struct iommu *iommu;
693 struct strbuf *strbuf;
694 unsigned long flags, ctx, npages;
696 iommu = pdev->dev.archdata.iommu;
697 strbuf = pdev->dev.archdata.stc;
699 if (!strbuf->strbuf_enabled)
700 return;
702 spin_lock_irqsave(&iommu->lock, flags);
704 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
705 npages >>= IO_PAGE_SHIFT;
706 bus_addr &= IO_PAGE_MASK;
708 /* Step 1: Record the context, if any. */
709 ctx = 0;
710 if (iommu->iommu_ctxflush &&
711 strbuf->strbuf_ctxflush) {
712 iopte_t *iopte;
714 iopte = iommu->page_table +
715 ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
716 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
719 /* Step 2: Kick data out of streaming buffers. */
720 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
722 spin_unlock_irqrestore(&iommu->lock, flags);
725 /* Make physical memory consistent for a set of streaming
726 * mode DMA translations after a transfer.
728 static void pci_4u_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
730 struct iommu *iommu;
731 struct strbuf *strbuf;
732 unsigned long flags, ctx, npages, i;
733 u32 bus_addr;
735 iommu = pdev->dev.archdata.iommu;
736 strbuf = pdev->dev.archdata.stc;
738 if (!strbuf->strbuf_enabled)
739 return;
741 spin_lock_irqsave(&iommu->lock, flags);
743 /* Step 1: Record the context, if any. */
744 ctx = 0;
745 if (iommu->iommu_ctxflush &&
746 strbuf->strbuf_ctxflush) {
747 iopte_t *iopte;
749 iopte = iommu->page_table +
750 ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
751 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
754 /* Step 2: Kick data out of streaming buffers. */
755 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
756 for(i = 1; i < nelems; i++)
757 if (!sglist[i].dma_length)
758 break;
759 i--;
760 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
761 - bus_addr) >> IO_PAGE_SHIFT;
762 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
764 spin_unlock_irqrestore(&iommu->lock, flags);
767 const struct pci_iommu_ops pci_sun4u_iommu_ops = {
768 .alloc_consistent = pci_4u_alloc_consistent,
769 .free_consistent = pci_4u_free_consistent,
770 .map_single = pci_4u_map_single,
771 .unmap_single = pci_4u_unmap_single,
772 .map_sg = pci_4u_map_sg,
773 .unmap_sg = pci_4u_unmap_sg,
774 .dma_sync_single_for_cpu = pci_4u_dma_sync_single_for_cpu,
775 .dma_sync_sg_for_cpu = pci_4u_dma_sync_sg_for_cpu,
778 static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
780 struct pci_dev *ali_isa_bridge;
781 u8 val;
783 /* ALI sound chips generate 31-bits of DMA, a special register
784 * determines what bit 31 is emitted as.
786 ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL,
787 PCI_DEVICE_ID_AL_M1533,
788 NULL);
790 pci_read_config_byte(ali_isa_bridge, 0x7e, &val);
791 if (set_bit)
792 val |= 0x01;
793 else
794 val &= ~0x01;
795 pci_write_config_byte(ali_isa_bridge, 0x7e, val);
796 pci_dev_put(ali_isa_bridge);
799 int pci_dma_supported(struct pci_dev *pdev, u64 device_mask)
801 u64 dma_addr_mask;
803 if (pdev == NULL) {
804 dma_addr_mask = 0xffffffff;
805 } else {
806 struct iommu *iommu = pdev->dev.archdata.iommu;
808 dma_addr_mask = iommu->dma_addr_mask;
810 if (pdev->vendor == PCI_VENDOR_ID_AL &&
811 pdev->device == PCI_DEVICE_ID_AL_M5451 &&
812 device_mask == 0x7fffffff) {
813 ali_sound_dma_hack(pdev,
814 (dma_addr_mask & 0x80000000) != 0);
815 return 1;
819 if (device_mask >= (1UL << 32UL))
820 return 0;
822 return (device_mask & dma_addr_mask) == dma_addr_mask;