- Kai Germaschewski: ISDN update (including Makefiles)
[davej-history.git] / arch / sparc64 / kernel / pci_iommu.c
blobd7267880adf276c5c8d3afd3b77175cdd2d82e37
1 /* $Id: pci_iommu.c,v 1.11 2000/03/10 02:42:15 davem Exp $
2 * pci_iommu.c: UltraSparc PCI controller IOM/STC support.
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
5 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
6 */
8 #include <linux/kernel.h>
9 #include <linux/sched.h>
10 #include <linux/mm.h>
12 #include <asm/pbm.h>
14 #include "iommu_common.h"
16 #define PCI_STC_CTXMATCH_ADDR(STC, CTX) \
17 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
19 /* Accessing IOMMU and Streaming Buffer registers.
20 * REG parameter is a physical address. All registers
21 * are 64-bits in size.
23 #define pci_iommu_read(__reg) \
24 ({ u64 __ret; \
25 __asm__ __volatile__("ldxa [%1] %2, %0" \
26 : "=r" (__ret) \
27 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
28 : "memory"); \
29 __ret; \
31 #define pci_iommu_write(__reg, __val) \
32 __asm__ __volatile__("stxa %0, [%1] %2" \
33 : /* no outputs */ \
34 : "r" (__val), "r" (__reg), \
35 "i" (ASI_PHYS_BYPASS_EC_E))
37 /* Must be invoked under the IOMMU lock. */
38 static void __iommu_flushall(struct pci_iommu *iommu)
40 unsigned long tag;
41 int entry;
43 tag = iommu->iommu_flush + (0xa580UL - 0x0210UL);
44 for (entry = 0; entry < 16; entry++) {
45 pci_iommu_write(tag, 0);
46 tag += 8;
49 /* Ensure completion of previous PIO writes. */
50 (void) pci_iommu_read(iommu->write_complete_reg);
52 /* Now update everyone's flush point. */
53 for (entry = 0; entry < PBM_NCLUSTERS; entry++) {
54 iommu->alloc_info[entry].flush =
55 iommu->alloc_info[entry].next;
59 static iopte_t *alloc_streaming_cluster(struct pci_iommu *iommu, unsigned long npages)
61 iopte_t *iopte, *limit;
62 unsigned long cnum, ent, flush_point;
64 cnum = 0;
65 while ((1UL << cnum) < npages)
66 cnum++;
67 iopte = (iommu->page_table +
68 (cnum << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
70 if (cnum == 0)
71 limit = (iommu->page_table +
72 iommu->lowest_consistent_map);
73 else
74 limit = (iopte +
75 (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
77 iopte += ((ent = iommu->alloc_info[cnum].next) << cnum);
78 flush_point = iommu->alloc_info[cnum].flush;
80 for (;;) {
81 if (iopte_val(*iopte) == 0UL) {
82 if ((iopte + (1 << cnum)) >= limit)
83 ent = 0;
84 else
85 ent = ent + 1;
86 iommu->alloc_info[cnum].next = ent;
87 if (ent == flush_point)
88 __iommu_flushall(iommu);
89 break;
91 iopte += (1 << cnum);
92 ent++;
93 if (iopte >= limit) {
94 iopte = (iommu->page_table +
95 (cnum <<
96 (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
97 ent = 0;
99 if (ent == flush_point)
100 __iommu_flushall(iommu);
103 /* I've got your streaming cluster right here buddy boy... */
104 return iopte;
107 static void free_streaming_cluster(struct pci_iommu *iommu, dma_addr_t base,
108 unsigned long npages, unsigned long ctx)
110 unsigned long cnum, ent;
112 cnum = 0;
113 while ((1UL << cnum) < npages)
114 cnum++;
116 ent = (base << (32 - PAGE_SHIFT + PBM_LOGCLUSTERS - iommu->page_table_sz_bits))
117 >> (32 + PBM_LOGCLUSTERS + cnum - iommu->page_table_sz_bits);
119 /* If the global flush might not have caught this entry,
120 * adjust the flush point such that we will flush before
121 * ever trying to reuse it.
123 #define between(X,Y,Z) (((Z) - (Y)) >= ((X) - (Y)))
124 if (between(ent, iommu->alloc_info[cnum].next, iommu->alloc_info[cnum].flush))
125 iommu->alloc_info[cnum].flush = ent;
126 #undef between
129 /* We allocate consistent mappings from the end of cluster zero. */
130 static iopte_t *alloc_consistent_cluster(struct pci_iommu *iommu, unsigned long npages)
132 iopte_t *iopte;
134 iopte = iommu->page_table + (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS));
135 while (iopte > iommu->page_table) {
136 iopte--;
137 if (!(iopte_val(*iopte) & IOPTE_VALID)) {
138 unsigned long tmp = npages;
140 while (--tmp) {
141 iopte--;
142 if (iopte_val(*iopte) & IOPTE_VALID)
143 break;
145 if (tmp == 0) {
146 u32 entry = (iopte - iommu->page_table);
148 if (entry < iommu->lowest_consistent_map)
149 iommu->lowest_consistent_map = entry;
150 return iopte;
154 return NULL;
157 #define IOPTE_CONSISTENT(CTX) \
158 (IOPTE_VALID | IOPTE_CACHE | \
159 (((CTX) << 47) & IOPTE_CONTEXT))
161 #define IOPTE_STREAMING(CTX) \
162 (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
164 #define IOPTE_INVALID 0UL
166 /* Allocate and map kernel buffer of size SIZE using consistent mode
167 * DMA for PCI device PDEV. Return non-NULL cpu-side address if
168 * successful and set *DMA_ADDRP to the PCI side dma address.
170 void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
172 struct pcidev_cookie *pcp;
173 struct pci_iommu *iommu;
174 iopte_t *iopte;
175 unsigned long flags, order, first_page, ctx;
176 void *ret;
177 int npages;
179 size = PAGE_ALIGN(size);
180 order = get_order(size);
181 if (order >= 10)
182 return NULL;
184 first_page = __get_free_pages(GFP_ATOMIC, order);
185 if (first_page == 0UL)
186 return NULL;
187 memset((char *)first_page, 0, PAGE_SIZE << order);
189 pcp = pdev->sysdata;
190 iommu = &pcp->pbm->parent->iommu;
192 spin_lock_irqsave(&iommu->lock, flags);
193 iopte = alloc_consistent_cluster(iommu, size >> PAGE_SHIFT);
194 if (iopte == NULL) {
195 spin_unlock_irqrestore(&iommu->lock, flags);
196 free_pages(first_page, order);
197 return NULL;
200 *dma_addrp = (iommu->page_table_map_base +
201 ((iopte - iommu->page_table) << PAGE_SHIFT));
202 ret = (void *) first_page;
203 npages = size >> PAGE_SHIFT;
204 ctx = 0;
205 if (iommu->iommu_ctxflush)
206 ctx = iommu->iommu_cur_ctx++;
207 first_page = __pa(first_page);
208 while (npages--) {
209 iopte_val(*iopte) = (IOPTE_CONSISTENT(ctx) |
210 IOPTE_WRITE |
211 (first_page & IOPTE_PAGE));
212 iopte++;
213 first_page += PAGE_SIZE;
216 if (iommu->iommu_ctxflush) {
217 pci_iommu_write(iommu->iommu_ctxflush, ctx);
218 } else {
219 int i;
220 u32 daddr = *dma_addrp;
222 npages = size >> PAGE_SHIFT;
223 for (i = 0; i < npages; i++) {
224 pci_iommu_write(iommu->iommu_flush, daddr);
225 daddr += PAGE_SIZE;
229 spin_unlock_irqrestore(&iommu->lock, flags);
231 return ret;
234 /* Free and unmap a consistent DMA translation. */
235 void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
237 struct pcidev_cookie *pcp;
238 struct pci_iommu *iommu;
239 iopte_t *iopte;
240 unsigned long flags, order, npages, i, ctx;
242 npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
243 pcp = pdev->sysdata;
244 iommu = &pcp->pbm->parent->iommu;
245 iopte = iommu->page_table +
246 ((dvma - iommu->page_table_map_base) >> PAGE_SHIFT);
248 spin_lock_irqsave(&iommu->lock, flags);
250 if ((iopte - iommu->page_table) ==
251 iommu->lowest_consistent_map) {
252 iopte_t *walk = iopte + npages;
253 iopte_t *limit;
255 limit = (iommu->page_table +
256 (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
257 while (walk < limit) {
258 if (iopte_val(*walk) != IOPTE_INVALID)
259 break;
260 walk++;
262 iommu->lowest_consistent_map =
263 (walk - iommu->page_table);
266 /* Data for consistent mappings cannot enter the streaming
267 * buffers, so we only need to update the TSB. We flush
268 * the IOMMU here as well to prevent conflicts with the
269 * streaming mapping deferred tlb flush scheme.
272 ctx = 0;
273 if (iommu->iommu_ctxflush)
274 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
276 for (i = 0; i < npages; i++, iopte++)
277 iopte_val(*iopte) = IOPTE_INVALID;
279 if (iommu->iommu_ctxflush) {
280 pci_iommu_write(iommu->iommu_ctxflush, ctx);
281 } else {
282 for (i = 0; i < npages; i++) {
283 u32 daddr = dvma + (i << PAGE_SHIFT);
285 pci_iommu_write(iommu->iommu_flush, daddr);
289 spin_unlock_irqrestore(&iommu->lock, flags);
291 order = get_order(size);
292 if (order < 10)
293 free_pages((unsigned long)cpu, order);
296 /* Map a single buffer at PTR of SZ bytes for PCI DMA
297 * in streaming mode.
299 dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
301 struct pcidev_cookie *pcp;
302 struct pci_iommu *iommu;
303 struct pci_strbuf *strbuf;
304 iopte_t *base;
305 unsigned long flags, npages, oaddr;
306 unsigned long i, base_paddr, ctx;
307 u32 bus_addr, ret;
308 unsigned long iopte_protection;
310 pcp = pdev->sysdata;
311 iommu = &pcp->pbm->parent->iommu;
312 strbuf = &pcp->pbm->stc;
314 if (direction == PCI_DMA_NONE)
315 BUG();
317 oaddr = (unsigned long)ptr;
318 npages = PAGE_ALIGN(oaddr + sz) - (oaddr & PAGE_MASK);
319 npages >>= PAGE_SHIFT;
321 spin_lock_irqsave(&iommu->lock, flags);
323 base = alloc_streaming_cluster(iommu, npages);
324 bus_addr = (iommu->page_table_map_base +
325 ((base - iommu->page_table) << PAGE_SHIFT));
326 ret = bus_addr | (oaddr & ~PAGE_MASK);
327 base_paddr = __pa(oaddr & PAGE_MASK);
328 ctx = 0;
329 if (iommu->iommu_ctxflush)
330 ctx = iommu->iommu_cur_ctx++;
331 if (strbuf->strbuf_enabled)
332 iopte_protection = IOPTE_STREAMING(ctx);
333 else
334 iopte_protection = IOPTE_CONSISTENT(ctx);
335 if (direction != PCI_DMA_TODEVICE)
336 iopte_protection |= IOPTE_WRITE;
338 for (i = 0; i < npages; i++, base++, base_paddr += PAGE_SIZE)
339 iopte_val(*base) = iopte_protection | base_paddr;
341 spin_unlock_irqrestore(&iommu->lock, flags);
343 return ret;
346 /* Unmap a single streaming mode DMA translation. */
347 void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
349 struct pcidev_cookie *pcp;
350 struct pci_iommu *iommu;
351 struct pci_strbuf *strbuf;
352 iopte_t *base;
353 unsigned long flags, npages, i, ctx;
355 if (direction == PCI_DMA_NONE)
356 BUG();
358 pcp = pdev->sysdata;
359 iommu = &pcp->pbm->parent->iommu;
360 strbuf = &pcp->pbm->stc;
362 npages = PAGE_ALIGN(bus_addr + sz) - (bus_addr & PAGE_MASK);
363 npages >>= PAGE_SHIFT;
364 base = iommu->page_table +
365 ((bus_addr - iommu->page_table_map_base) >> PAGE_SHIFT);
366 #ifdef DEBUG_PCI_IOMMU
367 if (iopte_val(*base) == IOPTE_INVALID)
368 printk("pci_unmap_single called on non-mapped region %08x,%08x from %016lx\n", bus_addr, sz, __builtin_return_address(0));
369 #endif
370 bus_addr &= PAGE_MASK;
372 spin_lock_irqsave(&iommu->lock, flags);
374 /* Record the context, if any. */
375 ctx = 0;
376 if (iommu->iommu_ctxflush)
377 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
379 /* Step 1: Kick data out of streaming buffers if necessary. */
380 if (strbuf->strbuf_enabled) {
381 u32 vaddr = bus_addr;
383 PCI_STC_FLUSHFLAG_INIT(strbuf);
384 if (strbuf->strbuf_ctxflush &&
385 iommu->iommu_ctxflush) {
386 unsigned long matchreg, flushreg;
388 flushreg = strbuf->strbuf_ctxflush;
389 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
390 do {
391 pci_iommu_write(flushreg, ctx);
392 } while(((long)pci_iommu_read(matchreg)) < 0L);
393 } else {
394 for (i = 0; i < npages; i++, vaddr += PAGE_SIZE)
395 pci_iommu_write(strbuf->strbuf_pflush, vaddr);
398 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
399 (void) pci_iommu_read(iommu->write_complete_reg);
400 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
401 membar("#LoadLoad");
404 /* Step 2: Clear out first TSB entry. */
405 iopte_val(*base) = IOPTE_INVALID;
407 free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,
408 npages, ctx);
410 spin_unlock_irqrestore(&iommu->lock, flags);
413 static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, int nused, unsigned long iopte_protection)
415 struct scatterlist *dma_sg = sg;
416 int i;
418 for (i = 0; i < nused; i++) {
419 unsigned long pteval = ~0UL;
420 u32 dma_npages;
422 dma_npages = ((dma_sg->dvma_address & (PAGE_SIZE - 1UL)) +
423 dma_sg->dvma_length +
424 ((u32)(PAGE_SIZE - 1UL))) >> PAGE_SHIFT;
425 do {
426 unsigned long offset;
427 signed int len;
429 /* If we are here, we know we have at least one
430 * more page to map. So walk forward until we
431 * hit a page crossing, and begin creating new
432 * mappings from that spot.
434 for (;;) {
435 unsigned long tmp;
437 tmp = (unsigned long) __pa(sg->address);
438 len = sg->length;
439 if (((tmp ^ pteval) >> PAGE_SHIFT) != 0UL) {
440 pteval = tmp & PAGE_MASK;
441 offset = tmp & (PAGE_SIZE - 1UL);
442 break;
444 if (((tmp ^ (tmp + len - 1UL)) >> PAGE_SHIFT) != 0UL) {
445 pteval = (tmp + PAGE_SIZE) & PAGE_MASK;
446 offset = 0UL;
447 len -= (PAGE_SIZE - (tmp & (PAGE_SIZE - 1UL)));
448 break;
450 sg++;
453 pteval = iopte_protection | (pteval & IOPTE_PAGE);
454 while (len > 0) {
455 *iopte++ = __iopte(pteval);
456 pteval += PAGE_SIZE;
457 len -= (PAGE_SIZE - offset);
458 offset = 0;
459 dma_npages--;
462 pteval = (pteval & IOPTE_PAGE) + len;
463 sg++;
465 /* Skip over any tail mappings we've fully mapped,
466 * adjusting pteval along the way. Stop when we
467 * detect a page crossing event.
469 while ((pteval << (64 - PAGE_SHIFT)) != 0UL &&
470 pteval == __pa(sg->address) &&
471 ((pteval ^
472 (__pa(sg->address) + sg->length - 1UL)) >> PAGE_SHIFT) == 0UL) {
473 pteval += sg->length;
474 sg++;
476 if ((pteval << (64 - PAGE_SHIFT)) == 0UL)
477 pteval = ~0UL;
478 } while (dma_npages != 0);
479 dma_sg++;
483 /* Map a set of buffers described by SGLIST with NELEMS array
484 * elements in streaming mode for PCI DMA.
485 * When making changes here, inspect the assembly output. I was having
486 * hard time to kepp this routine out of using stack slots for holding variables.
488 int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
490 struct pcidev_cookie *pcp;
491 struct pci_iommu *iommu;
492 struct pci_strbuf *strbuf;
493 unsigned long flags, ctx, npages, iopte_protection;
494 iopte_t *base;
495 u32 dma_base;
496 struct scatterlist *sgtmp;
497 int used;
499 /* Fast path single entry scatterlists. */
500 if (nelems == 1) {
501 sglist->dvma_address = pci_map_single(pdev, sglist->address, sglist->length, direction);
502 sglist->dvma_length = sglist->length;
503 return 1;
506 pcp = pdev->sysdata;
507 iommu = &pcp->pbm->parent->iommu;
508 strbuf = &pcp->pbm->stc;
510 if (direction == PCI_DMA_NONE)
511 BUG();
513 /* Step 1: Prepare scatter list. */
515 npages = prepare_sg(sglist, nelems);
517 /* Step 2: Allocate a cluster. */
519 spin_lock_irqsave(&iommu->lock, flags);
521 base = alloc_streaming_cluster(iommu, npages);
522 dma_base = iommu->page_table_map_base + ((base - iommu->page_table) << PAGE_SHIFT);
524 /* Step 3: Normalize DMA addresses. */
525 used = nelems;
527 sgtmp = sglist;
528 while (used && sgtmp->dvma_length) {
529 sgtmp->dvma_address += dma_base;
530 sgtmp++;
531 used--;
533 used = nelems - used;
535 /* Step 4: Choose a context if necessary. */
536 ctx = 0;
537 if (iommu->iommu_ctxflush)
538 ctx = iommu->iommu_cur_ctx++;
540 /* Step 5: Create the mappings. */
541 if (strbuf->strbuf_enabled)
542 iopte_protection = IOPTE_STREAMING(ctx);
543 else
544 iopte_protection = IOPTE_CONSISTENT(ctx);
545 if (direction != PCI_DMA_TODEVICE)
546 iopte_protection |= IOPTE_WRITE;
547 fill_sg (base, sglist, used, iopte_protection);
548 #ifdef VERIFY_SG
549 verify_sglist(sglist, nelems, base, npages);
550 #endif
552 spin_unlock_irqrestore(&iommu->lock, flags);
554 return used;
557 /* Unmap a set of streaming mode DMA translations. */
558 void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
560 struct pcidev_cookie *pcp;
561 struct pci_iommu *iommu;
562 struct pci_strbuf *strbuf;
563 iopte_t *base;
564 unsigned long flags, ctx, i, npages;
565 u32 bus_addr;
567 if (direction == PCI_DMA_NONE)
568 BUG();
570 pcp = pdev->sysdata;
571 iommu = &pcp->pbm->parent->iommu;
572 strbuf = &pcp->pbm->stc;
574 bus_addr = sglist->dvma_address & PAGE_MASK;
576 for (i = 1; i < nelems; i++)
577 if (sglist[i].dvma_length == 0)
578 break;
579 i--;
580 npages = (PAGE_ALIGN(sglist[i].dvma_address + sglist[i].dvma_length) - bus_addr) >> PAGE_SHIFT;
582 base = iommu->page_table +
583 ((bus_addr - iommu->page_table_map_base) >> PAGE_SHIFT);
585 #ifdef DEBUG_PCI_IOMMU
586 if (iopte_val(*base) == IOPTE_INVALID)
587 printk("pci_unmap_sg called on non-mapped region %08x,%d from %016lx\n", sglist->dvma_address, nelems, __builtin_return_address(0));
588 #endif
590 spin_lock_irqsave(&iommu->lock, flags);
592 /* Record the context, if any. */
593 ctx = 0;
594 if (iommu->iommu_ctxflush)
595 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
597 /* Step 1: Kick data out of streaming buffers if necessary. */
598 if (strbuf->strbuf_enabled) {
599 u32 vaddr = bus_addr;
601 PCI_STC_FLUSHFLAG_INIT(strbuf);
602 if (strbuf->strbuf_ctxflush &&
603 iommu->iommu_ctxflush) {
604 unsigned long matchreg, flushreg;
606 flushreg = strbuf->strbuf_ctxflush;
607 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
608 do {
609 pci_iommu_write(flushreg, ctx);
610 } while(((long)pci_iommu_read(matchreg)) < 0L);
611 } else {
612 for (i = 0; i < npages; i++, vaddr += PAGE_SIZE)
613 pci_iommu_write(strbuf->strbuf_pflush, vaddr);
616 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
617 (void) pci_iommu_read(iommu->write_complete_reg);
618 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
619 membar("#LoadLoad");
622 /* Step 2: Clear out first TSB entry. */
623 iopte_val(*base) = IOPTE_INVALID;
625 free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,
626 npages, ctx);
628 spin_unlock_irqrestore(&iommu->lock, flags);
631 /* Make physical memory consistent for a single
632 * streaming mode DMA translation after a transfer.
634 void pci_dma_sync_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
636 struct pcidev_cookie *pcp;
637 struct pci_iommu *iommu;
638 struct pci_strbuf *strbuf;
639 unsigned long flags, ctx, npages;
641 pcp = pdev->sysdata;
642 iommu = &pcp->pbm->parent->iommu;
643 strbuf = &pcp->pbm->stc;
645 if (!strbuf->strbuf_enabled)
646 return;
648 spin_lock_irqsave(&iommu->lock, flags);
650 npages = PAGE_ALIGN(bus_addr + sz) - (bus_addr & PAGE_MASK);
651 npages >>= PAGE_SHIFT;
652 bus_addr &= PAGE_MASK;
654 /* Step 1: Record the context, if any. */
655 ctx = 0;
656 if (iommu->iommu_ctxflush &&
657 strbuf->strbuf_ctxflush) {
658 iopte_t *iopte;
660 iopte = iommu->page_table +
661 ((bus_addr - iommu->page_table_map_base)>>PAGE_SHIFT);
662 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
665 /* Step 2: Kick data out of streaming buffers. */
666 PCI_STC_FLUSHFLAG_INIT(strbuf);
667 if (iommu->iommu_ctxflush &&
668 strbuf->strbuf_ctxflush) {
669 unsigned long matchreg, flushreg;
671 flushreg = strbuf->strbuf_ctxflush;
672 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
673 do {
674 pci_iommu_write(flushreg, ctx);
675 } while(((long)pci_iommu_read(matchreg)) < 0L);
676 } else {
677 unsigned long i;
679 for (i = 0; i < npages; i++, bus_addr += PAGE_SIZE)
680 pci_iommu_write(strbuf->strbuf_pflush, bus_addr);
683 /* Step 3: Perform flush synchronization sequence. */
684 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
685 (void) pci_iommu_read(iommu->write_complete_reg);
686 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
687 membar("#LoadLoad");
689 spin_unlock_irqrestore(&iommu->lock, flags);
692 /* Make physical memory consistent for a set of streaming
693 * mode DMA translations after a transfer.
695 void pci_dma_sync_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
697 struct pcidev_cookie *pcp;
698 struct pci_iommu *iommu;
699 struct pci_strbuf *strbuf;
700 unsigned long flags, ctx;
702 pcp = pdev->sysdata;
703 iommu = &pcp->pbm->parent->iommu;
704 strbuf = &pcp->pbm->stc;
706 if (!strbuf->strbuf_enabled)
707 return;
709 spin_lock_irqsave(&iommu->lock, flags);
711 /* Step 1: Record the context, if any. */
712 ctx = 0;
713 if (iommu->iommu_ctxflush &&
714 strbuf->strbuf_ctxflush) {
715 iopte_t *iopte;
717 iopte = iommu->page_table +
718 ((sglist[0].dvma_address - iommu->page_table_map_base) >> PAGE_SHIFT);
719 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
722 /* Step 2: Kick data out of streaming buffers. */
723 PCI_STC_FLUSHFLAG_INIT(strbuf);
724 if (iommu->iommu_ctxflush &&
725 strbuf->strbuf_ctxflush) {
726 unsigned long matchreg, flushreg;
728 flushreg = strbuf->strbuf_ctxflush;
729 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
730 do {
731 pci_iommu_write(flushreg, ctx);
732 } while (((long)pci_iommu_read(matchreg)) < 0L);
733 } else {
734 unsigned long i, npages;
735 u32 bus_addr;
737 bus_addr = sglist[0].dvma_address & PAGE_MASK;
739 for(i = 1; i < nelems; i++)
740 if (!sglist[i].dvma_length)
741 break;
742 i--;
743 npages = (PAGE_ALIGN(sglist[i].dvma_address + sglist[i].dvma_length) - bus_addr) >> PAGE_SHIFT;
744 for (i = 0; i < npages; i++, bus_addr += PAGE_SIZE)
745 pci_iommu_write(strbuf->strbuf_pflush, bus_addr);
748 /* Step 3: Perform flush synchronization sequence. */
749 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
750 (void) pci_iommu_read(iommu->write_complete_reg);
751 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
752 membar("#LoadLoad");
754 spin_unlock_irqrestore(&iommu->lock, flags);
757 int pci_dma_supported(struct pci_dev *pdev, dma_addr_t device_mask)
759 struct pcidev_cookie *pcp = pdev->sysdata;
760 u32 dma_addr_mask;
762 if (pdev == NULL) {
763 dma_addr_mask = 0xffffffff;
764 } else {
765 struct pci_iommu *iommu = &pcp->pbm->parent->iommu;
767 dma_addr_mask = iommu->dma_addr_mask;
770 return (device_mask & dma_addr_mask) == dma_addr_mask;