2 * Copyright (C) 2008 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/scatterlist.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/stacktrace.h>
23 #include <linux/dma-debug.h>
24 #include <linux/spinlock.h>
25 #include <linux/debugfs.h>
26 #include <linux/device.h>
27 #include <linux/types.h>
28 #include <linux/sched.h>
29 #include <linux/list.h>
30 #include <linux/slab.h>
32 #include <asm/sections.h>
34 #define HASH_SIZE 1024ULL
35 #define HASH_FN_SHIFT 13
36 #define HASH_FN_MASK (HASH_SIZE - 1)
45 #define DMA_DEBUG_STACKTRACE_ENTRIES 5
47 struct dma_debug_entry
{
48 struct list_head list
;
57 #ifdef CONFIG_STACKTRACE
58 struct stack_trace stacktrace
;
59 unsigned long st_entries
[DMA_DEBUG_STACKTRACE_ENTRIES
];
64 struct list_head list
;
66 } ____cacheline_aligned_in_smp
;
68 /* Hash list to save the allocated dma addresses */
69 static struct hash_bucket dma_entry_hash
[HASH_SIZE
];
70 /* List of pre-allocated dma_debug_entry's */
71 static LIST_HEAD(free_entries
);
72 /* Lock for the list above */
73 static DEFINE_SPINLOCK(free_entries_lock
);
75 /* Global disable flag - will be set in case of an error */
76 static bool global_disable __read_mostly
;
78 /* Global error count */
79 static u32 error_count
;
81 /* Global error show enable*/
82 static u32 show_all_errors __read_mostly
;
83 /* Number of errors to show */
84 static u32 show_num_errors
= 1;
86 static u32 num_free_entries
;
87 static u32 min_free_entries
;
88 static u32 nr_total_entries
;
90 /* number of preallocated entries requested by kernel cmdline */
91 static u32 req_entries
;
93 /* debugfs dentry's for the stuff above */
94 static struct dentry
*dma_debug_dent __read_mostly
;
95 static struct dentry
*global_disable_dent __read_mostly
;
96 static struct dentry
*error_count_dent __read_mostly
;
97 static struct dentry
*show_all_errors_dent __read_mostly
;
98 static struct dentry
*show_num_errors_dent __read_mostly
;
99 static struct dentry
*num_free_entries_dent __read_mostly
;
100 static struct dentry
*min_free_entries_dent __read_mostly
;
102 static const char *type2name
[4] = { "single", "page",
103 "scather-gather", "coherent" };
105 static const char *dir2name
[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
106 "DMA_FROM_DEVICE", "DMA_NONE" };
109 * The access to some variables in this macro is racy. We can't use atomic_t
110 * here because all these variables are exported to debugfs. Some of them even
111 * writeable. This is also the reason why a lock won't help much. But anyway,
112 * the races are no big deal. Here is why:
114 * error_count: the addition is racy, but the worst thing that can happen is
115 * that we don't count some errors
116 * show_num_errors: the subtraction is racy. Also no big deal because in
117 * worst case this will result in one warning more in the
118 * system log than the user configured. This variable is
119 * writeable via debugfs.
121 static inline void dump_entry_trace(struct dma_debug_entry
*entry
)
123 #ifdef CONFIG_STACKTRACE
125 printk(KERN_WARNING
"Mapped at:\n");
126 print_stack_trace(&entry
->stacktrace
, 0);
131 #define err_printk(dev, entry, format, arg...) do { \
133 if (show_all_errors || show_num_errors > 0) { \
134 WARN(1, "%s %s: " format, \
135 dev_driver_string(dev), \
136 dev_name(dev) , ## arg); \
137 dump_entry_trace(entry); \
139 if (!show_all_errors && show_num_errors > 0) \
140 show_num_errors -= 1; \
144 * Hash related functions
146 * Every DMA-API request is saved into a struct dma_debug_entry. To
147 * have quick access to these structs they are stored into a hash.
149 static int hash_fn(struct dma_debug_entry
*entry
)
152 * Hash function is based on the dma address.
153 * We use bits 20-27 here as the index into the hash
155 return (entry
->dev_addr
>> HASH_FN_SHIFT
) & HASH_FN_MASK
;
159 * Request exclusive access to a hash bucket for a given dma_debug_entry.
161 static struct hash_bucket
*get_hash_bucket(struct dma_debug_entry
*entry
,
162 unsigned long *flags
)
164 int idx
= hash_fn(entry
);
165 unsigned long __flags
;
167 spin_lock_irqsave(&dma_entry_hash
[idx
].lock
, __flags
);
169 return &dma_entry_hash
[idx
];
173 * Give up exclusive access to the hash bucket
175 static void put_hash_bucket(struct hash_bucket
*bucket
,
176 unsigned long *flags
)
178 unsigned long __flags
= *flags
;
180 spin_unlock_irqrestore(&bucket
->lock
, __flags
);
184 * Search a given entry in the hash bucket list
186 static struct dma_debug_entry
*hash_bucket_find(struct hash_bucket
*bucket
,
187 struct dma_debug_entry
*ref
)
189 struct dma_debug_entry
*entry
, *ret
= NULL
;
190 int matches
= 0, match_lvl
, last_lvl
= 0;
192 list_for_each_entry(entry
, &bucket
->list
, list
) {
193 if ((entry
->dev_addr
!= ref
->dev_addr
) ||
194 (entry
->dev
!= ref
->dev
))
198 * Some drivers map the same physical address multiple
199 * times. Without a hardware IOMMU this results in the
200 * same device addresses being put into the dma-debug
201 * hash multiple times too. This can result in false
202 * positives being reported. Therfore we implement a
203 * best-fit algorithm here which returns the entry from
204 * the hash which fits best to the reference value
205 * instead of the first-fit.
209 entry
->size
== ref
->size
? ++match_lvl
: match_lvl
;
210 entry
->type
== ref
->type
? ++match_lvl
: match_lvl
;
211 entry
->direction
== ref
->direction
? ++match_lvl
: match_lvl
;
213 if (match_lvl
== 3) {
214 /* perfect-fit - return the result */
216 } else if (match_lvl
> last_lvl
) {
218 * We found an entry that fits better then the
221 last_lvl
= match_lvl
;
227 * If we have multiple matches but no perfect-fit, just return
230 ret
= (matches
== 1) ? ret
: NULL
;
236 * Add an entry to a hash bucket
238 static void hash_bucket_add(struct hash_bucket
*bucket
,
239 struct dma_debug_entry
*entry
)
241 list_add_tail(&entry
->list
, &bucket
->list
);
245 * Remove entry from a hash bucket list
247 static void hash_bucket_del(struct dma_debug_entry
*entry
)
249 list_del(&entry
->list
);
253 * Dump mapping entries for debugging purposes
255 void debug_dma_dump_mappings(struct device
*dev
)
259 for (idx
= 0; idx
< HASH_SIZE
; idx
++) {
260 struct hash_bucket
*bucket
= &dma_entry_hash
[idx
];
261 struct dma_debug_entry
*entry
;
264 spin_lock_irqsave(&bucket
->lock
, flags
);
266 list_for_each_entry(entry
, &bucket
->list
, list
) {
267 if (!dev
|| dev
== entry
->dev
) {
269 "%s idx %d P=%Lx D=%Lx L=%Lx %s\n",
270 type2name
[entry
->type
], idx
,
271 (unsigned long long)entry
->paddr
,
272 entry
->dev_addr
, entry
->size
,
273 dir2name
[entry
->direction
]);
277 spin_unlock_irqrestore(&bucket
->lock
, flags
);
280 EXPORT_SYMBOL(debug_dma_dump_mappings
);
283 * Wrapper function for adding an entry to the hash.
284 * This function takes care of locking itself.
286 static void add_dma_entry(struct dma_debug_entry
*entry
)
288 struct hash_bucket
*bucket
;
291 bucket
= get_hash_bucket(entry
, &flags
);
292 hash_bucket_add(bucket
, entry
);
293 put_hash_bucket(bucket
, &flags
);
296 static struct dma_debug_entry
*__dma_entry_alloc(void)
298 struct dma_debug_entry
*entry
;
300 entry
= list_entry(free_entries
.next
, struct dma_debug_entry
, list
);
301 list_del(&entry
->list
);
302 memset(entry
, 0, sizeof(*entry
));
304 num_free_entries
-= 1;
305 if (num_free_entries
< min_free_entries
)
306 min_free_entries
= num_free_entries
;
311 /* struct dma_entry allocator
313 * The next two functions implement the allocator for
314 * struct dma_debug_entries.
316 static struct dma_debug_entry
*dma_entry_alloc(void)
318 struct dma_debug_entry
*entry
= NULL
;
321 spin_lock_irqsave(&free_entries_lock
, flags
);
323 if (list_empty(&free_entries
)) {
324 printk(KERN_ERR
"DMA-API: debugging out of memory "
326 global_disable
= true;
330 entry
= __dma_entry_alloc();
332 #ifdef CONFIG_STACKTRACE
333 entry
->stacktrace
.max_entries
= DMA_DEBUG_STACKTRACE_ENTRIES
;
334 entry
->stacktrace
.entries
= entry
->st_entries
;
335 entry
->stacktrace
.skip
= 2;
336 save_stack_trace(&entry
->stacktrace
);
340 spin_unlock_irqrestore(&free_entries_lock
, flags
);
345 static void dma_entry_free(struct dma_debug_entry
*entry
)
350 * add to beginning of the list - this way the entries are
351 * more likely cache hot when they are reallocated.
353 spin_lock_irqsave(&free_entries_lock
, flags
);
354 list_add(&entry
->list
, &free_entries
);
355 num_free_entries
+= 1;
356 spin_unlock_irqrestore(&free_entries_lock
, flags
);
359 int dma_debug_resize_entries(u32 num_entries
)
361 int i
, delta
, ret
= 0;
363 struct dma_debug_entry
*entry
;
366 spin_lock_irqsave(&free_entries_lock
, flags
);
368 if (nr_total_entries
< num_entries
) {
369 delta
= num_entries
- nr_total_entries
;
371 spin_unlock_irqrestore(&free_entries_lock
, flags
);
373 for (i
= 0; i
< delta
; i
++) {
374 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
378 list_add_tail(&entry
->list
, &tmp
);
381 spin_lock_irqsave(&free_entries_lock
, flags
);
383 list_splice(&tmp
, &free_entries
);
384 nr_total_entries
+= i
;
385 num_free_entries
+= i
;
387 delta
= nr_total_entries
- num_entries
;
389 for (i
= 0; i
< delta
&& !list_empty(&free_entries
); i
++) {
390 entry
= __dma_entry_alloc();
394 nr_total_entries
-= i
;
397 if (nr_total_entries
!= num_entries
)
400 spin_unlock_irqrestore(&free_entries_lock
, flags
);
404 EXPORT_SYMBOL(dma_debug_resize_entries
);
407 * DMA-API debugging init code
409 * The init code does two things:
410 * 1. Initialize core data structures
411 * 2. Preallocate a given number of dma_debug_entry structs
414 static int prealloc_memory(u32 num_entries
)
416 struct dma_debug_entry
*entry
, *next_entry
;
419 for (i
= 0; i
< num_entries
; ++i
) {
420 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
424 list_add_tail(&entry
->list
, &free_entries
);
427 num_free_entries
= num_entries
;
428 min_free_entries
= num_entries
;
430 printk(KERN_INFO
"DMA-API: preallocated %d debug entries\n",
437 list_for_each_entry_safe(entry
, next_entry
, &free_entries
, list
) {
438 list_del(&entry
->list
);
445 static int dma_debug_fs_init(void)
447 dma_debug_dent
= debugfs_create_dir("dma-api", NULL
);
448 if (!dma_debug_dent
) {
449 printk(KERN_ERR
"DMA-API: can not create debugfs directory\n");
453 global_disable_dent
= debugfs_create_bool("disabled", 0444,
455 (u32
*)&global_disable
);
456 if (!global_disable_dent
)
459 error_count_dent
= debugfs_create_u32("error_count", 0444,
460 dma_debug_dent
, &error_count
);
461 if (!error_count_dent
)
464 show_all_errors_dent
= debugfs_create_u32("all_errors", 0644,
467 if (!show_all_errors_dent
)
470 show_num_errors_dent
= debugfs_create_u32("num_errors", 0644,
473 if (!show_num_errors_dent
)
476 num_free_entries_dent
= debugfs_create_u32("num_free_entries", 0444,
479 if (!num_free_entries_dent
)
482 min_free_entries_dent
= debugfs_create_u32("min_free_entries", 0444,
485 if (!min_free_entries_dent
)
491 debugfs_remove_recursive(dma_debug_dent
);
496 void dma_debug_add_bus(struct bus_type
*bus
)
498 /* FIXME: register notifier */
502 * Let the architectures decide how many entries should be preallocated.
504 void dma_debug_init(u32 num_entries
)
511 for (i
= 0; i
< HASH_SIZE
; ++i
) {
512 INIT_LIST_HEAD(&dma_entry_hash
[i
].list
);
513 dma_entry_hash
[i
].lock
= SPIN_LOCK_UNLOCKED
;
516 if (dma_debug_fs_init() != 0) {
517 printk(KERN_ERR
"DMA-API: error creating debugfs entries "
519 global_disable
= true;
525 num_entries
= req_entries
;
527 if (prealloc_memory(num_entries
) != 0) {
528 printk(KERN_ERR
"DMA-API: debugging out of memory error "
530 global_disable
= true;
535 nr_total_entries
= num_free_entries
;
537 printk(KERN_INFO
"DMA-API: debugging enabled by kernel config\n");
540 static __init
int dma_debug_cmdline(char *str
)
545 if (strncmp(str
, "off", 3) == 0) {
546 printk(KERN_INFO
"DMA-API: debugging disabled on kernel "
548 global_disable
= true;
554 static __init
int dma_debug_entries_cmdline(char *str
)
561 res
= get_option(&str
, &req_entries
);
569 __setup("dma_debug=", dma_debug_cmdline
);
570 __setup("dma_debug_entries=", dma_debug_entries_cmdline
);
572 static void check_unmap(struct dma_debug_entry
*ref
)
574 struct dma_debug_entry
*entry
;
575 struct hash_bucket
*bucket
;
578 if (dma_mapping_error(ref
->dev
, ref
->dev_addr
)) {
579 err_printk(ref
->dev
, NULL
, "DMA-API: device driver tries "
580 "to free an invalid DMA memory address\n");
584 bucket
= get_hash_bucket(ref
, &flags
);
585 entry
= hash_bucket_find(bucket
, ref
);
588 err_printk(ref
->dev
, NULL
, "DMA-API: device driver tries "
589 "to free DMA memory it has not allocated "
590 "[device address=0x%016llx] [size=%llu bytes]\n",
591 ref
->dev_addr
, ref
->size
);
595 if (ref
->size
!= entry
->size
) {
596 err_printk(ref
->dev
, entry
, "DMA-API: device driver frees "
597 "DMA memory with different size "
598 "[device address=0x%016llx] [map size=%llu bytes] "
599 "[unmap size=%llu bytes]\n",
600 ref
->dev_addr
, entry
->size
, ref
->size
);
603 if (ref
->type
!= entry
->type
) {
604 err_printk(ref
->dev
, entry
, "DMA-API: device driver frees "
605 "DMA memory with wrong function "
606 "[device address=0x%016llx] [size=%llu bytes] "
607 "[mapped as %s] [unmapped as %s]\n",
608 ref
->dev_addr
, ref
->size
,
609 type2name
[entry
->type
], type2name
[ref
->type
]);
610 } else if ((entry
->type
== dma_debug_coherent
) &&
611 (ref
->paddr
!= entry
->paddr
)) {
612 err_printk(ref
->dev
, entry
, "DMA-API: device driver frees "
613 "DMA memory with different CPU address "
614 "[device address=0x%016llx] [size=%llu bytes] "
615 "[cpu alloc address=%p] [cpu free address=%p]",
616 ref
->dev_addr
, ref
->size
,
617 (void *)entry
->paddr
, (void *)ref
->paddr
);
620 if (ref
->sg_call_ents
&& ref
->type
== dma_debug_sg
&&
621 ref
->sg_call_ents
!= entry
->sg_call_ents
) {
622 err_printk(ref
->dev
, entry
, "DMA-API: device driver frees "
623 "DMA sg list with different entry count "
624 "[map count=%d] [unmap count=%d]\n",
625 entry
->sg_call_ents
, ref
->sg_call_ents
);
629 * This may be no bug in reality - but most implementations of the
630 * DMA API don't handle this properly, so check for it here
632 if (ref
->direction
!= entry
->direction
) {
633 err_printk(ref
->dev
, entry
, "DMA-API: device driver frees "
634 "DMA memory with different direction "
635 "[device address=0x%016llx] [size=%llu bytes] "
636 "[mapped with %s] [unmapped with %s]\n",
637 ref
->dev_addr
, ref
->size
,
638 dir2name
[entry
->direction
],
639 dir2name
[ref
->direction
]);
642 hash_bucket_del(entry
);
643 dma_entry_free(entry
);
646 put_hash_bucket(bucket
, &flags
);
649 static void check_for_stack(struct device
*dev
, void *addr
)
651 if (object_is_on_stack(addr
))
652 err_printk(dev
, NULL
, "DMA-API: device driver maps memory from"
653 "stack [addr=%p]\n", addr
);
656 static inline bool overlap(void *addr
, u64 size
, void *start
, void *end
)
658 void *addr2
= (char *)addr
+ size
;
660 return ((addr
>= start
&& addr
< end
) ||
661 (addr2
>= start
&& addr2
< end
) ||
662 ((addr
< start
) && (addr2
>= end
)));
665 static void check_for_illegal_area(struct device
*dev
, void *addr
, u64 size
)
667 if (overlap(addr
, size
, _text
, _etext
) ||
668 overlap(addr
, size
, __start_rodata
, __end_rodata
))
669 err_printk(dev
, NULL
, "DMA-API: device driver maps "
670 "memory from kernel text or rodata "
671 "[addr=%p] [size=%llu]\n", addr
, size
);
674 static void check_sync(struct device
*dev
, dma_addr_t addr
,
675 u64 size
, u64 offset
, int direction
, bool to_cpu
)
677 struct dma_debug_entry ref
= {
681 .direction
= direction
,
683 struct dma_debug_entry
*entry
;
684 struct hash_bucket
*bucket
;
687 bucket
= get_hash_bucket(&ref
, &flags
);
689 entry
= hash_bucket_find(bucket
, &ref
);
692 err_printk(dev
, NULL
, "DMA-API: device driver tries "
693 "to sync DMA memory it has not allocated "
694 "[device address=0x%016llx] [size=%llu bytes]\n",
695 (unsigned long long)addr
, size
);
699 if ((offset
+ size
) > entry
->size
) {
700 err_printk(dev
, entry
, "DMA-API: device driver syncs"
701 " DMA memory outside allocated range "
702 "[device address=0x%016llx] "
703 "[allocation size=%llu bytes] [sync offset=%llu] "
704 "[sync size=%llu]\n", entry
->dev_addr
, entry
->size
,
708 if (direction
!= entry
->direction
) {
709 err_printk(dev
, entry
, "DMA-API: device driver syncs "
710 "DMA memory with different direction "
711 "[device address=0x%016llx] [size=%llu bytes] "
712 "[mapped with %s] [synced with %s]\n",
713 (unsigned long long)addr
, entry
->size
,
714 dir2name
[entry
->direction
],
715 dir2name
[direction
]);
718 if (entry
->direction
== DMA_BIDIRECTIONAL
)
721 if (to_cpu
&& !(entry
->direction
== DMA_FROM_DEVICE
) &&
722 !(direction
== DMA_TO_DEVICE
))
723 err_printk(dev
, entry
, "DMA-API: device driver syncs "
724 "device read-only DMA memory for cpu "
725 "[device address=0x%016llx] [size=%llu bytes] "
726 "[mapped with %s] [synced with %s]\n",
727 (unsigned long long)addr
, entry
->size
,
728 dir2name
[entry
->direction
],
729 dir2name
[direction
]);
731 if (!to_cpu
&& !(entry
->direction
== DMA_TO_DEVICE
) &&
732 !(direction
== DMA_FROM_DEVICE
))
733 err_printk(dev
, entry
, "DMA-API: device driver syncs "
734 "device write-only DMA memory to device "
735 "[device address=0x%016llx] [size=%llu bytes] "
736 "[mapped with %s] [synced with %s]\n",
737 (unsigned long long)addr
, entry
->size
,
738 dir2name
[entry
->direction
],
739 dir2name
[direction
]);
742 put_hash_bucket(bucket
, &flags
);
746 void debug_dma_map_page(struct device
*dev
, struct page
*page
, size_t offset
,
747 size_t size
, int direction
, dma_addr_t dma_addr
,
750 struct dma_debug_entry
*entry
;
752 if (unlikely(global_disable
))
755 if (unlikely(dma_mapping_error(dev
, dma_addr
)))
758 entry
= dma_entry_alloc();
763 entry
->type
= dma_debug_page
;
764 entry
->paddr
= page_to_phys(page
) + offset
;
765 entry
->dev_addr
= dma_addr
;
767 entry
->direction
= direction
;
770 entry
->type
= dma_debug_single
;
772 if (!PageHighMem(page
)) {
773 void *addr
= ((char *)page_address(page
)) + offset
;
774 check_for_stack(dev
, addr
);
775 check_for_illegal_area(dev
, addr
, size
);
778 add_dma_entry(entry
);
780 EXPORT_SYMBOL(debug_dma_map_page
);
782 void debug_dma_unmap_page(struct device
*dev
, dma_addr_t addr
,
783 size_t size
, int direction
, bool map_single
)
785 struct dma_debug_entry ref
= {
786 .type
= dma_debug_page
,
790 .direction
= direction
,
793 if (unlikely(global_disable
))
797 ref
.type
= dma_debug_single
;
801 EXPORT_SYMBOL(debug_dma_unmap_page
);
803 void debug_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
804 int nents
, int mapped_ents
, int direction
)
806 struct dma_debug_entry
*entry
;
807 struct scatterlist
*s
;
810 if (unlikely(global_disable
))
813 for_each_sg(sg
, s
, mapped_ents
, i
) {
814 entry
= dma_entry_alloc();
818 entry
->type
= dma_debug_sg
;
820 entry
->paddr
= sg_phys(s
);
821 entry
->size
= s
->length
;
822 entry
->dev_addr
= s
->dma_address
;
823 entry
->direction
= direction
;
824 entry
->sg_call_ents
= nents
;
825 entry
->sg_mapped_ents
= mapped_ents
;
827 if (!PageHighMem(sg_page(s
))) {
828 check_for_stack(dev
, sg_virt(s
));
829 check_for_illegal_area(dev
, sg_virt(s
), s
->length
);
832 add_dma_entry(entry
);
835 EXPORT_SYMBOL(debug_dma_map_sg
);
837 void debug_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sglist
,
840 struct dma_debug_entry
*entry
;
841 struct scatterlist
*s
;
842 int mapped_ents
= 0, i
;
845 if (unlikely(global_disable
))
848 for_each_sg(sglist
, s
, nelems
, i
) {
850 struct dma_debug_entry ref
= {
851 .type
= dma_debug_sg
,
854 .dev_addr
= s
->dma_address
,
860 if (mapped_ents
&& i
>= mapped_ents
)
863 if (mapped_ents
== 0) {
864 struct hash_bucket
*bucket
;
865 ref
.sg_call_ents
= nelems
;
866 bucket
= get_hash_bucket(&ref
, &flags
);
867 entry
= hash_bucket_find(bucket
, &ref
);
869 mapped_ents
= entry
->sg_mapped_ents
;
870 put_hash_bucket(bucket
, &flags
);
876 EXPORT_SYMBOL(debug_dma_unmap_sg
);
878 void debug_dma_alloc_coherent(struct device
*dev
, size_t size
,
879 dma_addr_t dma_addr
, void *virt
)
881 struct dma_debug_entry
*entry
;
883 if (unlikely(global_disable
))
886 if (unlikely(virt
== NULL
))
889 entry
= dma_entry_alloc();
893 entry
->type
= dma_debug_coherent
;
895 entry
->paddr
= virt_to_phys(virt
);
897 entry
->dev_addr
= dma_addr
;
898 entry
->direction
= DMA_BIDIRECTIONAL
;
900 add_dma_entry(entry
);
902 EXPORT_SYMBOL(debug_dma_alloc_coherent
);
904 void debug_dma_free_coherent(struct device
*dev
, size_t size
,
905 void *virt
, dma_addr_t addr
)
907 struct dma_debug_entry ref
= {
908 .type
= dma_debug_coherent
,
910 .paddr
= virt_to_phys(virt
),
913 .direction
= DMA_BIDIRECTIONAL
,
916 if (unlikely(global_disable
))
921 EXPORT_SYMBOL(debug_dma_free_coherent
);
923 void debug_dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t dma_handle
,
924 size_t size
, int direction
)
926 if (unlikely(global_disable
))
929 check_sync(dev
, dma_handle
, size
, 0, direction
, true);
931 EXPORT_SYMBOL(debug_dma_sync_single_for_cpu
);
933 void debug_dma_sync_single_for_device(struct device
*dev
,
934 dma_addr_t dma_handle
, size_t size
,
937 if (unlikely(global_disable
))
940 check_sync(dev
, dma_handle
, size
, 0, direction
, false);
942 EXPORT_SYMBOL(debug_dma_sync_single_for_device
);
944 void debug_dma_sync_single_range_for_cpu(struct device
*dev
,
945 dma_addr_t dma_handle
,
946 unsigned long offset
, size_t size
,
949 if (unlikely(global_disable
))
952 check_sync(dev
, dma_handle
, size
, offset
, direction
, true);
954 EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu
);
956 void debug_dma_sync_single_range_for_device(struct device
*dev
,
957 dma_addr_t dma_handle
,
958 unsigned long offset
,
959 size_t size
, int direction
)
961 if (unlikely(global_disable
))
964 check_sync(dev
, dma_handle
, size
, offset
, direction
, false);
966 EXPORT_SYMBOL(debug_dma_sync_single_range_for_device
);
968 void debug_dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sg
,
969 int nelems
, int direction
)
971 struct scatterlist
*s
;
974 if (unlikely(global_disable
))
977 for_each_sg(sg
, s
, nelems
, i
) {
978 check_sync(dev
, s
->dma_address
, s
->dma_length
, 0,
982 EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu
);
984 void debug_dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sg
,
985 int nelems
, int direction
)
987 struct scatterlist
*s
;
990 if (unlikely(global_disable
))
993 for_each_sg(sg
, s
, nelems
, i
) {
994 check_sync(dev
, s
->dma_address
, s
->dma_length
, 0,
998 EXPORT_SYMBOL(debug_dma_sync_sg_for_device
);