1 /* $Id: pci_iommu.c,v 1.11 2000/03/10 02:42:15 davem Exp $
2 * pci_iommu.c: UltraSparc PCI controller IOM/STC support.
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
5 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
8 #include <linux/kernel.h>
9 #include <linux/sched.h>
14 #include "iommu_common.h"
16 #define PCI_STC_CTXMATCH_ADDR(STC, CTX) \
17 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
19 /* Accessing IOMMU and Streaming Buffer registers.
20 * REG parameter is a physical address. All registers
21 * are 64-bits in size.
23 #define pci_iommu_read(__reg) \
25 __asm__ __volatile__("ldxa [%1] %2, %0" \
27 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
31 #define pci_iommu_write(__reg, __val) \
32 __asm__ __volatile__("stxa %0, [%1] %2" \
34 : "r" (__val), "r" (__reg), \
35 "i" (ASI_PHYS_BYPASS_EC_E))
37 /* Must be invoked under the IOMMU lock. */
38 static void __iommu_flushall(struct pci_iommu
*iommu
)
43 tag
= iommu
->iommu_flush
+ (0xa580UL
- 0x0210UL
);
44 for (entry
= 0; entry
< 16; entry
++) {
45 pci_iommu_write(tag
, 0);
49 /* Ensure completion of previous PIO writes. */
50 (void) pci_iommu_read(iommu
->write_complete_reg
);
52 /* Now update everyone's flush point. */
53 for (entry
= 0; entry
< PBM_NCLUSTERS
; entry
++) {
54 iommu
->alloc_info
[entry
].flush
=
55 iommu
->alloc_info
[entry
].next
;
59 static iopte_t
*alloc_streaming_cluster(struct pci_iommu
*iommu
, unsigned long npages
)
61 iopte_t
*iopte
, *limit
;
62 unsigned long cnum
, ent
, flush_point
;
65 while ((1UL << cnum
) < npages
)
67 iopte
= (iommu
->page_table
+
68 (cnum
<< (iommu
->page_table_sz_bits
- PBM_LOGCLUSTERS
)));
71 limit
= (iommu
->page_table
+
72 iommu
->lowest_consistent_map
);
75 (1 << (iommu
->page_table_sz_bits
- PBM_LOGCLUSTERS
)));
77 iopte
+= ((ent
= iommu
->alloc_info
[cnum
].next
) << cnum
);
78 flush_point
= iommu
->alloc_info
[cnum
].flush
;
81 if (iopte_val(*iopte
) == 0UL) {
82 if ((iopte
+ (1 << cnum
)) >= limit
)
86 iommu
->alloc_info
[cnum
].next
= ent
;
87 if (ent
== flush_point
)
88 __iommu_flushall(iommu
);
94 iopte
= (iommu
->page_table
+
96 (iommu
->page_table_sz_bits
- PBM_LOGCLUSTERS
)));
99 if (ent
== flush_point
)
100 __iommu_flushall(iommu
);
103 /* I've got your streaming cluster right here buddy boy... */
107 static void free_streaming_cluster(struct pci_iommu
*iommu
, dma_addr_t base
,
108 unsigned long npages
, unsigned long ctx
)
110 unsigned long cnum
, ent
;
113 while ((1UL << cnum
) < npages
)
116 ent
= (base
<< (32 - PAGE_SHIFT
+ PBM_LOGCLUSTERS
- iommu
->page_table_sz_bits
))
117 >> (32 + PBM_LOGCLUSTERS
+ cnum
- iommu
->page_table_sz_bits
);
119 /* If the global flush might not have caught this entry,
120 * adjust the flush point such that we will flush before
121 * ever trying to reuse it.
123 #define between(X,Y,Z) (((Z) - (Y)) >= ((X) - (Y)))
124 if (between(ent
, iommu
->alloc_info
[cnum
].next
, iommu
->alloc_info
[cnum
].flush
))
125 iommu
->alloc_info
[cnum
].flush
= ent
;
129 /* We allocate consistent mappings from the end of cluster zero. */
130 static iopte_t
*alloc_consistent_cluster(struct pci_iommu
*iommu
, unsigned long npages
)
134 iopte
= iommu
->page_table
+ (1 << (iommu
->page_table_sz_bits
- PBM_LOGCLUSTERS
));
135 while (iopte
> iommu
->page_table
) {
137 if (!(iopte_val(*iopte
) & IOPTE_VALID
)) {
138 unsigned long tmp
= npages
;
142 if (iopte_val(*iopte
) & IOPTE_VALID
)
146 u32 entry
= (iopte
- iommu
->page_table
);
148 if (entry
< iommu
->lowest_consistent_map
)
149 iommu
->lowest_consistent_map
= entry
;
157 #define IOPTE_CONSISTENT(CTX) \
158 (IOPTE_VALID | IOPTE_CACHE | \
159 (((CTX) << 47) & IOPTE_CONTEXT))
161 #define IOPTE_STREAMING(CTX) \
162 (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
164 #define IOPTE_INVALID 0UL
166 /* Allocate and map kernel buffer of size SIZE using consistent mode
167 * DMA for PCI device PDEV. Return non-NULL cpu-side address if
168 * successful and set *DMA_ADDRP to the PCI side dma address.
170 void *pci_alloc_consistent(struct pci_dev
*pdev
, size_t size
, dma_addr_t
*dma_addrp
)
172 struct pcidev_cookie
*pcp
;
173 struct pci_iommu
*iommu
;
175 unsigned long flags
, order
, first_page
, ctx
;
179 size
= PAGE_ALIGN(size
);
180 order
= get_order(size
);
184 first_page
= __get_free_pages(GFP_ATOMIC
, order
);
185 if (first_page
== 0UL)
187 memset((char *)first_page
, 0, PAGE_SIZE
<< order
);
190 iommu
= &pcp
->pbm
->parent
->iommu
;
192 spin_lock_irqsave(&iommu
->lock
, flags
);
193 iopte
= alloc_consistent_cluster(iommu
, size
>> PAGE_SHIFT
);
195 spin_unlock_irqrestore(&iommu
->lock
, flags
);
196 free_pages(first_page
, order
);
200 *dma_addrp
= (iommu
->page_table_map_base
+
201 ((iopte
- iommu
->page_table
) << PAGE_SHIFT
));
202 ret
= (void *) first_page
;
203 npages
= size
>> PAGE_SHIFT
;
205 if (iommu
->iommu_ctxflush
)
206 ctx
= iommu
->iommu_cur_ctx
++;
207 first_page
= __pa(first_page
);
209 iopte_val(*iopte
) = (IOPTE_CONSISTENT(ctx
) |
211 (first_page
& IOPTE_PAGE
));
213 first_page
+= PAGE_SIZE
;
216 if (iommu
->iommu_ctxflush
) {
217 pci_iommu_write(iommu
->iommu_ctxflush
, ctx
);
220 u32 daddr
= *dma_addrp
;
222 npages
= size
>> PAGE_SHIFT
;
223 for (i
= 0; i
< npages
; i
++) {
224 pci_iommu_write(iommu
->iommu_flush
, daddr
);
229 spin_unlock_irqrestore(&iommu
->lock
, flags
);
234 /* Free and unmap a consistent DMA translation. */
235 void pci_free_consistent(struct pci_dev
*pdev
, size_t size
, void *cpu
, dma_addr_t dvma
)
237 struct pcidev_cookie
*pcp
;
238 struct pci_iommu
*iommu
;
240 unsigned long flags
, order
, npages
, i
, ctx
;
242 npages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
244 iommu
= &pcp
->pbm
->parent
->iommu
;
245 iopte
= iommu
->page_table
+
246 ((dvma
- iommu
->page_table_map_base
) >> PAGE_SHIFT
);
248 spin_lock_irqsave(&iommu
->lock
, flags
);
250 if ((iopte
- iommu
->page_table
) ==
251 iommu
->lowest_consistent_map
) {
252 iopte_t
*walk
= iopte
+ npages
;
255 limit
= (iommu
->page_table
+
256 (1 << (iommu
->page_table_sz_bits
- PBM_LOGCLUSTERS
)));
257 while (walk
< limit
) {
258 if (iopte_val(*walk
) != IOPTE_INVALID
)
262 iommu
->lowest_consistent_map
=
263 (walk
- iommu
->page_table
);
266 /* Data for consistent mappings cannot enter the streaming
267 * buffers, so we only need to update the TSB. We flush
268 * the IOMMU here as well to prevent conflicts with the
269 * streaming mapping deferred tlb flush scheme.
273 if (iommu
->iommu_ctxflush
)
274 ctx
= (iopte_val(*iopte
) & IOPTE_CONTEXT
) >> 47UL;
276 for (i
= 0; i
< npages
; i
++, iopte
++)
277 iopte_val(*iopte
) = IOPTE_INVALID
;
279 if (iommu
->iommu_ctxflush
) {
280 pci_iommu_write(iommu
->iommu_ctxflush
, ctx
);
282 for (i
= 0; i
< npages
; i
++) {
283 u32 daddr
= dvma
+ (i
<< PAGE_SHIFT
);
285 pci_iommu_write(iommu
->iommu_flush
, daddr
);
289 spin_unlock_irqrestore(&iommu
->lock
, flags
);
291 order
= get_order(size
);
293 free_pages((unsigned long)cpu
, order
);
296 /* Map a single buffer at PTR of SZ bytes for PCI DMA
299 dma_addr_t
pci_map_single(struct pci_dev
*pdev
, void *ptr
, size_t sz
, int direction
)
301 struct pcidev_cookie
*pcp
;
302 struct pci_iommu
*iommu
;
303 struct pci_strbuf
*strbuf
;
305 unsigned long flags
, npages
, oaddr
;
306 unsigned long i
, base_paddr
, ctx
;
308 unsigned long iopte_protection
;
311 iommu
= &pcp
->pbm
->parent
->iommu
;
312 strbuf
= &pcp
->pbm
->stc
;
314 if (direction
== PCI_DMA_NONE
)
317 oaddr
= (unsigned long)ptr
;
318 npages
= PAGE_ALIGN(oaddr
+ sz
) - (oaddr
& PAGE_MASK
);
319 npages
>>= PAGE_SHIFT
;
321 spin_lock_irqsave(&iommu
->lock
, flags
);
323 base
= alloc_streaming_cluster(iommu
, npages
);
324 bus_addr
= (iommu
->page_table_map_base
+
325 ((base
- iommu
->page_table
) << PAGE_SHIFT
));
326 ret
= bus_addr
| (oaddr
& ~PAGE_MASK
);
327 base_paddr
= __pa(oaddr
& PAGE_MASK
);
329 if (iommu
->iommu_ctxflush
)
330 ctx
= iommu
->iommu_cur_ctx
++;
331 if (strbuf
->strbuf_enabled
)
332 iopte_protection
= IOPTE_STREAMING(ctx
);
334 iopte_protection
= IOPTE_CONSISTENT(ctx
);
335 if (direction
!= PCI_DMA_TODEVICE
)
336 iopte_protection
|= IOPTE_WRITE
;
338 for (i
= 0; i
< npages
; i
++, base
++, base_paddr
+= PAGE_SIZE
)
339 iopte_val(*base
) = iopte_protection
| base_paddr
;
341 spin_unlock_irqrestore(&iommu
->lock
, flags
);
346 /* Unmap a single streaming mode DMA translation. */
347 void pci_unmap_single(struct pci_dev
*pdev
, dma_addr_t bus_addr
, size_t sz
, int direction
)
349 struct pcidev_cookie
*pcp
;
350 struct pci_iommu
*iommu
;
351 struct pci_strbuf
*strbuf
;
353 unsigned long flags
, npages
, i
, ctx
;
355 if (direction
== PCI_DMA_NONE
)
359 iommu
= &pcp
->pbm
->parent
->iommu
;
360 strbuf
= &pcp
->pbm
->stc
;
362 npages
= PAGE_ALIGN(bus_addr
+ sz
) - (bus_addr
& PAGE_MASK
);
363 npages
>>= PAGE_SHIFT
;
364 base
= iommu
->page_table
+
365 ((bus_addr
- iommu
->page_table_map_base
) >> PAGE_SHIFT
);
366 #ifdef DEBUG_PCI_IOMMU
367 if (iopte_val(*base
) == IOPTE_INVALID
)
368 printk("pci_unmap_single called on non-mapped region %08x,%08x from %016lx\n", bus_addr
, sz
, __builtin_return_address(0));
370 bus_addr
&= PAGE_MASK
;
372 spin_lock_irqsave(&iommu
->lock
, flags
);
374 /* Record the context, if any. */
376 if (iommu
->iommu_ctxflush
)
377 ctx
= (iopte_val(*base
) & IOPTE_CONTEXT
) >> 47UL;
379 /* Step 1: Kick data out of streaming buffers if necessary. */
380 if (strbuf
->strbuf_enabled
) {
381 u32 vaddr
= bus_addr
;
383 PCI_STC_FLUSHFLAG_INIT(strbuf
);
384 if (strbuf
->strbuf_ctxflush
&&
385 iommu
->iommu_ctxflush
) {
386 unsigned long matchreg
, flushreg
;
388 flushreg
= strbuf
->strbuf_ctxflush
;
389 matchreg
= PCI_STC_CTXMATCH_ADDR(strbuf
, ctx
);
391 pci_iommu_write(flushreg
, ctx
);
392 } while(((long)pci_iommu_read(matchreg
)) < 0L);
394 for (i
= 0; i
< npages
; i
++, vaddr
+= PAGE_SIZE
)
395 pci_iommu_write(strbuf
->strbuf_pflush
, vaddr
);
398 pci_iommu_write(strbuf
->strbuf_fsync
, strbuf
->strbuf_flushflag_pa
);
399 (void) pci_iommu_read(iommu
->write_complete_reg
);
400 while (!PCI_STC_FLUSHFLAG_SET(strbuf
))
404 /* Step 2: Clear out first TSB entry. */
405 iopte_val(*base
) = IOPTE_INVALID
;
407 free_streaming_cluster(iommu
, bus_addr
- iommu
->page_table_map_base
,
410 spin_unlock_irqrestore(&iommu
->lock
, flags
);
413 static inline void fill_sg(iopte_t
*iopte
, struct scatterlist
*sg
, int nused
, unsigned long iopte_protection
)
415 struct scatterlist
*dma_sg
= sg
;
418 for (i
= 0; i
< nused
; i
++) {
419 unsigned long pteval
= ~0UL;
422 dma_npages
= ((dma_sg
->dvma_address
& (PAGE_SIZE
- 1UL)) +
423 dma_sg
->dvma_length
+
424 ((u32
)(PAGE_SIZE
- 1UL))) >> PAGE_SHIFT
;
426 unsigned long offset
;
429 /* If we are here, we know we have at least one
430 * more page to map. So walk forward until we
431 * hit a page crossing, and begin creating new
432 * mappings from that spot.
437 tmp
= (unsigned long) __pa(sg
->address
);
439 if (((tmp
^ pteval
) >> PAGE_SHIFT
) != 0UL) {
440 pteval
= tmp
& PAGE_MASK
;
441 offset
= tmp
& (PAGE_SIZE
- 1UL);
444 if (((tmp
^ (tmp
+ len
- 1UL)) >> PAGE_SHIFT
) != 0UL) {
445 pteval
= (tmp
+ PAGE_SIZE
) & PAGE_MASK
;
447 len
-= (PAGE_SIZE
- (tmp
& (PAGE_SIZE
- 1UL)));
453 pteval
= iopte_protection
| (pteval
& IOPTE_PAGE
);
455 *iopte
++ = __iopte(pteval
);
457 len
-= (PAGE_SIZE
- offset
);
462 pteval
= (pteval
& IOPTE_PAGE
) + len
;
465 /* Skip over any tail mappings we've fully mapped,
466 * adjusting pteval along the way. Stop when we
467 * detect a page crossing event.
469 while ((pteval
<< (64 - PAGE_SHIFT
)) != 0UL &&
470 pteval
== __pa(sg
->address
) &&
472 (__pa(sg
->address
) + sg
->length
- 1UL)) >> PAGE_SHIFT
) == 0UL) {
473 pteval
+= sg
->length
;
476 if ((pteval
<< (64 - PAGE_SHIFT
)) == 0UL)
478 } while (dma_npages
!= 0);
483 /* Map a set of buffers described by SGLIST with NELEMS array
484 * elements in streaming mode for PCI DMA.
485 * When making changes here, inspect the assembly output. I was having
486 * hard time to kepp this routine out of using stack slots for holding variables.
488 int pci_map_sg(struct pci_dev
*pdev
, struct scatterlist
*sglist
, int nelems
, int direction
)
490 struct pcidev_cookie
*pcp
;
491 struct pci_iommu
*iommu
;
492 struct pci_strbuf
*strbuf
;
493 unsigned long flags
, ctx
, npages
, iopte_protection
;
496 struct scatterlist
*sgtmp
;
499 /* Fast path single entry scatterlists. */
501 sglist
->dvma_address
= pci_map_single(pdev
, sglist
->address
, sglist
->length
, direction
);
502 sglist
->dvma_length
= sglist
->length
;
507 iommu
= &pcp
->pbm
->parent
->iommu
;
508 strbuf
= &pcp
->pbm
->stc
;
510 if (direction
== PCI_DMA_NONE
)
513 /* Step 1: Prepare scatter list. */
515 npages
= prepare_sg(sglist
, nelems
);
517 /* Step 2: Allocate a cluster. */
519 spin_lock_irqsave(&iommu
->lock
, flags
);
521 base
= alloc_streaming_cluster(iommu
, npages
);
522 dma_base
= iommu
->page_table_map_base
+ ((base
- iommu
->page_table
) << PAGE_SHIFT
);
524 /* Step 3: Normalize DMA addresses. */
528 while (used
&& sgtmp
->dvma_length
) {
529 sgtmp
->dvma_address
+= dma_base
;
533 used
= nelems
- used
;
535 /* Step 4: Choose a context if necessary. */
537 if (iommu
->iommu_ctxflush
)
538 ctx
= iommu
->iommu_cur_ctx
++;
540 /* Step 5: Create the mappings. */
541 if (strbuf
->strbuf_enabled
)
542 iopte_protection
= IOPTE_STREAMING(ctx
);
544 iopte_protection
= IOPTE_CONSISTENT(ctx
);
545 if (direction
!= PCI_DMA_TODEVICE
)
546 iopte_protection
|= IOPTE_WRITE
;
547 fill_sg (base
, sglist
, used
, iopte_protection
);
549 verify_sglist(sglist
, nelems
, base
, npages
);
552 spin_unlock_irqrestore(&iommu
->lock
, flags
);
557 /* Unmap a set of streaming mode DMA translations. */
558 void pci_unmap_sg(struct pci_dev
*pdev
, struct scatterlist
*sglist
, int nelems
, int direction
)
560 struct pcidev_cookie
*pcp
;
561 struct pci_iommu
*iommu
;
562 struct pci_strbuf
*strbuf
;
564 unsigned long flags
, ctx
, i
, npages
;
567 if (direction
== PCI_DMA_NONE
)
571 iommu
= &pcp
->pbm
->parent
->iommu
;
572 strbuf
= &pcp
->pbm
->stc
;
574 bus_addr
= sglist
->dvma_address
& PAGE_MASK
;
576 for (i
= 1; i
< nelems
; i
++)
577 if (sglist
[i
].dvma_length
== 0)
580 npages
= (PAGE_ALIGN(sglist
[i
].dvma_address
+ sglist
[i
].dvma_length
) - bus_addr
) >> PAGE_SHIFT
;
582 base
= iommu
->page_table
+
583 ((bus_addr
- iommu
->page_table_map_base
) >> PAGE_SHIFT
);
585 #ifdef DEBUG_PCI_IOMMU
586 if (iopte_val(*base
) == IOPTE_INVALID
)
587 printk("pci_unmap_sg called on non-mapped region %08x,%d from %016lx\n", sglist
->dvma_address
, nelems
, __builtin_return_address(0));
590 spin_lock_irqsave(&iommu
->lock
, flags
);
592 /* Record the context, if any. */
594 if (iommu
->iommu_ctxflush
)
595 ctx
= (iopte_val(*base
) & IOPTE_CONTEXT
) >> 47UL;
597 /* Step 1: Kick data out of streaming buffers if necessary. */
598 if (strbuf
->strbuf_enabled
) {
599 u32 vaddr
= bus_addr
;
601 PCI_STC_FLUSHFLAG_INIT(strbuf
);
602 if (strbuf
->strbuf_ctxflush
&&
603 iommu
->iommu_ctxflush
) {
604 unsigned long matchreg
, flushreg
;
606 flushreg
= strbuf
->strbuf_ctxflush
;
607 matchreg
= PCI_STC_CTXMATCH_ADDR(strbuf
, ctx
);
609 pci_iommu_write(flushreg
, ctx
);
610 } while(((long)pci_iommu_read(matchreg
)) < 0L);
612 for (i
= 0; i
< npages
; i
++, vaddr
+= PAGE_SIZE
)
613 pci_iommu_write(strbuf
->strbuf_pflush
, vaddr
);
616 pci_iommu_write(strbuf
->strbuf_fsync
, strbuf
->strbuf_flushflag_pa
);
617 (void) pci_iommu_read(iommu
->write_complete_reg
);
618 while (!PCI_STC_FLUSHFLAG_SET(strbuf
))
622 /* Step 2: Clear out first TSB entry. */
623 iopte_val(*base
) = IOPTE_INVALID
;
625 free_streaming_cluster(iommu
, bus_addr
- iommu
->page_table_map_base
,
628 spin_unlock_irqrestore(&iommu
->lock
, flags
);
631 /* Make physical memory consistent for a single
632 * streaming mode DMA translation after a transfer.
634 void pci_dma_sync_single(struct pci_dev
*pdev
, dma_addr_t bus_addr
, size_t sz
, int direction
)
636 struct pcidev_cookie
*pcp
;
637 struct pci_iommu
*iommu
;
638 struct pci_strbuf
*strbuf
;
639 unsigned long flags
, ctx
, npages
;
642 iommu
= &pcp
->pbm
->parent
->iommu
;
643 strbuf
= &pcp
->pbm
->stc
;
645 if (!strbuf
->strbuf_enabled
)
648 spin_lock_irqsave(&iommu
->lock
, flags
);
650 npages
= PAGE_ALIGN(bus_addr
+ sz
) - (bus_addr
& PAGE_MASK
);
651 npages
>>= PAGE_SHIFT
;
652 bus_addr
&= PAGE_MASK
;
654 /* Step 1: Record the context, if any. */
656 if (iommu
->iommu_ctxflush
&&
657 strbuf
->strbuf_ctxflush
) {
660 iopte
= iommu
->page_table
+
661 ((bus_addr
- iommu
->page_table_map_base
)>>PAGE_SHIFT
);
662 ctx
= (iopte_val(*iopte
) & IOPTE_CONTEXT
) >> 47UL;
665 /* Step 2: Kick data out of streaming buffers. */
666 PCI_STC_FLUSHFLAG_INIT(strbuf
);
667 if (iommu
->iommu_ctxflush
&&
668 strbuf
->strbuf_ctxflush
) {
669 unsigned long matchreg
, flushreg
;
671 flushreg
= strbuf
->strbuf_ctxflush
;
672 matchreg
= PCI_STC_CTXMATCH_ADDR(strbuf
, ctx
);
674 pci_iommu_write(flushreg
, ctx
);
675 } while(((long)pci_iommu_read(matchreg
)) < 0L);
679 for (i
= 0; i
< npages
; i
++, bus_addr
+= PAGE_SIZE
)
680 pci_iommu_write(strbuf
->strbuf_pflush
, bus_addr
);
683 /* Step 3: Perform flush synchronization sequence. */
684 pci_iommu_write(strbuf
->strbuf_fsync
, strbuf
->strbuf_flushflag_pa
);
685 (void) pci_iommu_read(iommu
->write_complete_reg
);
686 while (!PCI_STC_FLUSHFLAG_SET(strbuf
))
689 spin_unlock_irqrestore(&iommu
->lock
, flags
);
692 /* Make physical memory consistent for a set of streaming
693 * mode DMA translations after a transfer.
695 void pci_dma_sync_sg(struct pci_dev
*pdev
, struct scatterlist
*sglist
, int nelems
, int direction
)
697 struct pcidev_cookie
*pcp
;
698 struct pci_iommu
*iommu
;
699 struct pci_strbuf
*strbuf
;
700 unsigned long flags
, ctx
;
703 iommu
= &pcp
->pbm
->parent
->iommu
;
704 strbuf
= &pcp
->pbm
->stc
;
706 if (!strbuf
->strbuf_enabled
)
709 spin_lock_irqsave(&iommu
->lock
, flags
);
711 /* Step 1: Record the context, if any. */
713 if (iommu
->iommu_ctxflush
&&
714 strbuf
->strbuf_ctxflush
) {
717 iopte
= iommu
->page_table
+
718 ((sglist
[0].dvma_address
- iommu
->page_table_map_base
) >> PAGE_SHIFT
);
719 ctx
= (iopte_val(*iopte
) & IOPTE_CONTEXT
) >> 47UL;
722 /* Step 2: Kick data out of streaming buffers. */
723 PCI_STC_FLUSHFLAG_INIT(strbuf
);
724 if (iommu
->iommu_ctxflush
&&
725 strbuf
->strbuf_ctxflush
) {
726 unsigned long matchreg
, flushreg
;
728 flushreg
= strbuf
->strbuf_ctxflush
;
729 matchreg
= PCI_STC_CTXMATCH_ADDR(strbuf
, ctx
);
731 pci_iommu_write(flushreg
, ctx
);
732 } while (((long)pci_iommu_read(matchreg
)) < 0L);
734 unsigned long i
, npages
;
737 bus_addr
= sglist
[0].dvma_address
& PAGE_MASK
;
739 for(i
= 1; i
< nelems
; i
++)
740 if (!sglist
[i
].dvma_length
)
743 npages
= (PAGE_ALIGN(sglist
[i
].dvma_address
+ sglist
[i
].dvma_length
) - bus_addr
) >> PAGE_SHIFT
;
744 for (i
= 0; i
< npages
; i
++, bus_addr
+= PAGE_SIZE
)
745 pci_iommu_write(strbuf
->strbuf_pflush
, bus_addr
);
748 /* Step 3: Perform flush synchronization sequence. */
749 pci_iommu_write(strbuf
->strbuf_fsync
, strbuf
->strbuf_flushflag_pa
);
750 (void) pci_iommu_read(iommu
->write_complete_reg
);
751 while (!PCI_STC_FLUSHFLAG_SET(strbuf
))
754 spin_unlock_irqrestore(&iommu
->lock
, flags
);
757 int pci_dma_supported(struct pci_dev
*pdev
, dma_addr_t device_mask
)
759 struct pcidev_cookie
*pcp
= pdev
->sysdata
;
763 dma_addr_mask
= 0xffffffff;
765 struct pci_iommu
*iommu
= &pcp
->pbm
->parent
->iommu
;
767 dma_addr_mask
= iommu
->dma_addr_mask
;
770 return (device_mask
& dma_addr_mask
) == dma_addr_mask
;