coredump: zap_threads: comments && use while_each_thread()
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / lib / iommu-helper.c
bloba3b8d4c3f77a5e7e466bd8f5e5591f3dcaa2c3bd
1 /*
2 * IOMMU helper functions for the free area management
3 */
5 #include <linux/module.h>
6 #include <linux/bitops.h>
8 static unsigned long find_next_zero_area(unsigned long *map,
9 unsigned long size,
10 unsigned long start,
11 unsigned int nr,
12 unsigned long align_mask)
14 unsigned long index, end, i;
15 again:
16 index = find_next_zero_bit(map, size, start);
18 /* Align allocation */
19 index = (index + align_mask) & ~align_mask;
21 end = index + nr;
22 if (end >= size)
23 return -1;
24 for (i = index; i < end; i++) {
25 if (test_bit(i, map)) {
26 start = i+1;
27 goto again;
30 return index;
33 static inline void set_bit_area(unsigned long *map, unsigned long i,
34 int len)
36 unsigned long end = i + len;
37 while (i < end) {
38 __set_bit(i, map);
39 i++;
43 int iommu_is_span_boundary(unsigned int index, unsigned int nr,
44 unsigned long shift,
45 unsigned long boundary_size)
47 BUG_ON(!is_power_of_2(boundary_size));
49 shift = (shift + index) & (boundary_size - 1);
50 return shift + nr > boundary_size;
53 unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
54 unsigned long start, unsigned int nr,
55 unsigned long shift, unsigned long boundary_size,
56 unsigned long align_mask)
58 unsigned long index;
59 again:
60 index = find_next_zero_area(map, size, start, nr, align_mask);
61 if (index != -1) {
62 if (iommu_is_span_boundary(index, nr, shift, boundary_size)) {
63 /* we could do more effectively */
64 start = index + 1;
65 goto again;
67 set_bit_area(map, index, nr);
69 return index;
71 EXPORT_SYMBOL(iommu_area_alloc);
73 void iommu_area_free(unsigned long *map, unsigned long start, unsigned int nr)
75 unsigned long end = start + nr;
77 while (start < end) {
78 __clear_bit(start, map);
79 start++;
82 EXPORT_SYMBOL(iommu_area_free);