2 * Copyright (c) 2006, Intel Corporation.
4 * This file is released under the GPLv2.
6 * Copyright (C) 2006-2008 Intel Corporation
7 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
13 init_iova_domain(struct iova_domain
*iovad
, unsigned long pfn_32bit
)
15 spin_lock_init(&iovad
->iova_alloc_lock
);
16 spin_lock_init(&iovad
->iova_rbtree_lock
);
17 iovad
->rbroot
= RB_ROOT
;
18 iovad
->cached32_node
= NULL
;
19 iovad
->dma_32bit_pfn
= pfn_32bit
;
22 static struct rb_node
*
23 __get_cached_rbnode(struct iova_domain
*iovad
, unsigned long *limit_pfn
)
25 if ((*limit_pfn
!= iovad
->dma_32bit_pfn
) ||
26 (iovad
->cached32_node
== NULL
))
27 return rb_last(&iovad
->rbroot
);
29 struct rb_node
*prev_node
= rb_prev(iovad
->cached32_node
);
30 struct iova
*curr_iova
=
31 container_of(iovad
->cached32_node
, struct iova
, node
);
32 *limit_pfn
= curr_iova
->pfn_lo
- 1;
38 __cached_rbnode_insert_update(struct iova_domain
*iovad
,
39 unsigned long limit_pfn
, struct iova
*new)
41 if (limit_pfn
!= iovad
->dma_32bit_pfn
)
43 iovad
->cached32_node
= &new->node
;
47 __cached_rbnode_delete_update(struct iova_domain
*iovad
, struct iova
*free
)
49 struct iova
*cached_iova
;
52 if (!iovad
->cached32_node
)
54 curr
= iovad
->cached32_node
;
55 cached_iova
= container_of(curr
, struct iova
, node
);
57 if (free
->pfn_lo
>= cached_iova
->pfn_lo
)
58 iovad
->cached32_node
= rb_next(&free
->node
);
61 /* Computes the padding size required, to make the
62 * the start address naturally aligned on its size
65 iova_get_pad_size(int size
, unsigned int limit_pfn
)
67 unsigned int pad_size
= 0;
68 unsigned int order
= ilog2(size
);
71 pad_size
= (limit_pfn
+ 1) % (1 << order
);
76 static int __alloc_and_insert_iova_range(struct iova_domain
*iovad
,
77 unsigned long size
, unsigned long limit_pfn
,
78 struct iova
*new, bool size_aligned
)
80 struct rb_node
*prev
, *curr
= NULL
;
82 unsigned long saved_pfn
;
83 unsigned int pad_size
= 0;
85 /* Walk the tree backwards */
86 spin_lock_irqsave(&iovad
->iova_rbtree_lock
, flags
);
87 saved_pfn
= limit_pfn
;
88 curr
= __get_cached_rbnode(iovad
, &limit_pfn
);
91 struct iova
*curr_iova
= container_of(curr
, struct iova
, node
);
93 if (limit_pfn
< curr_iova
->pfn_lo
)
95 else if (limit_pfn
< curr_iova
->pfn_hi
)
96 goto adjust_limit_pfn
;
99 pad_size
= iova_get_pad_size(size
, limit_pfn
);
100 if ((curr_iova
->pfn_hi
+ size
+ pad_size
) <= limit_pfn
)
101 break; /* found a free slot */
104 limit_pfn
= curr_iova
->pfn_lo
- 1;
107 curr
= rb_prev(curr
);
112 pad_size
= iova_get_pad_size(size
, limit_pfn
);
113 if ((IOVA_START_PFN
+ size
+ pad_size
) > limit_pfn
) {
114 spin_unlock_irqrestore(&iovad
->iova_rbtree_lock
, flags
);
119 /* pfn_lo will point to size aligned address if size_aligned is set */
120 new->pfn_lo
= limit_pfn
- (size
+ pad_size
) + 1;
121 new->pfn_hi
= new->pfn_lo
+ size
- 1;
123 /* Insert the new_iova into domain rbtree by holding writer lock */
124 /* Add new node and rebalance tree. */
126 struct rb_node
**entry
= &((prev
)), *parent
= NULL
;
127 /* Figure out where to put new node */
129 struct iova
*this = container_of(*entry
,
133 if (new->pfn_lo
< this->pfn_lo
)
134 entry
= &((*entry
)->rb_left
);
135 else if (new->pfn_lo
> this->pfn_lo
)
136 entry
= &((*entry
)->rb_right
);
138 BUG(); /* this should not happen */
141 /* Add new node and rebalance tree. */
142 rb_link_node(&new->node
, parent
, entry
);
143 rb_insert_color(&new->node
, &iovad
->rbroot
);
145 __cached_rbnode_insert_update(iovad
, saved_pfn
, new);
147 spin_unlock_irqrestore(&iovad
->iova_rbtree_lock
, flags
);
154 iova_insert_rbtree(struct rb_root
*root
, struct iova
*iova
)
156 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
157 /* Figure out where to put new node */
159 struct iova
*this = container_of(*new, struct iova
, node
);
162 if (iova
->pfn_lo
< this->pfn_lo
)
163 new = &((*new)->rb_left
);
164 else if (iova
->pfn_lo
> this->pfn_lo
)
165 new = &((*new)->rb_right
);
167 BUG(); /* this should not happen */
169 /* Add new node and rebalance tree. */
170 rb_link_node(&iova
->node
, parent
, new);
171 rb_insert_color(&iova
->node
, root
);
175 * alloc_iova - allocates an iova
176 * @iovad - iova domain in question
177 * @size - size of page frames to allocate
178 * @limit_pfn - max limit address
179 * @size_aligned - set if size_aligned address range is required
180 * This function allocates an iova in the range limit_pfn to IOVA_START_PFN
181 * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned
182 * flag is set then the allocated address iova->pfn_lo will be naturally
183 * aligned on roundup_power_of_two(size).
186 alloc_iova(struct iova_domain
*iovad
, unsigned long size
,
187 unsigned long limit_pfn
,
191 struct iova
*new_iova
;
194 new_iova
= alloc_iova_mem();
198 /* If size aligned is set then round the size to
199 * to next power of two.
202 size
= __roundup_pow_of_two(size
);
204 spin_lock_irqsave(&iovad
->iova_alloc_lock
, flags
);
205 ret
= __alloc_and_insert_iova_range(iovad
, size
, limit_pfn
,
206 new_iova
, size_aligned
);
208 spin_unlock_irqrestore(&iovad
->iova_alloc_lock
, flags
);
210 free_iova_mem(new_iova
);
218 * find_iova - find's an iova for a given pfn
219 * @iovad - iova domain in question.
220 * pfn - page frame number
221 * This function finds and returns an iova belonging to the
222 * given doamin which matches the given pfn.
224 struct iova
*find_iova(struct iova_domain
*iovad
, unsigned long pfn
)
227 struct rb_node
*node
;
229 /* Take the lock so that no other thread is manipulating the rbtree */
230 spin_lock_irqsave(&iovad
->iova_rbtree_lock
, flags
);
231 node
= iovad
->rbroot
.rb_node
;
233 struct iova
*iova
= container_of(node
, struct iova
, node
);
235 /* If pfn falls within iova's range, return iova */
236 if ((pfn
>= iova
->pfn_lo
) && (pfn
<= iova
->pfn_hi
)) {
237 spin_unlock_irqrestore(&iovad
->iova_rbtree_lock
, flags
);
238 /* We are not holding the lock while this iova
239 * is referenced by the caller as the same thread
240 * which called this function also calls __free_iova()
241 * and it is by desing that only one thread can possibly
242 * reference a particular iova and hence no conflict.
247 if (pfn
< iova
->pfn_lo
)
248 node
= node
->rb_left
;
249 else if (pfn
> iova
->pfn_lo
)
250 node
= node
->rb_right
;
253 spin_unlock_irqrestore(&iovad
->iova_rbtree_lock
, flags
);
258 * __free_iova - frees the given iova
259 * @iovad: iova domain in question.
260 * @iova: iova in question.
261 * Frees the given iova belonging to the giving domain
264 __free_iova(struct iova_domain
*iovad
, struct iova
*iova
)
268 spin_lock_irqsave(&iovad
->iova_rbtree_lock
, flags
);
269 __cached_rbnode_delete_update(iovad
, iova
);
270 rb_erase(&iova
->node
, &iovad
->rbroot
);
271 spin_unlock_irqrestore(&iovad
->iova_rbtree_lock
, flags
);
276 * free_iova - finds and frees the iova for a given pfn
277 * @iovad: - iova domain in question.
278 * @pfn: - pfn that is allocated previously
279 * This functions finds an iova for a given pfn and then
280 * frees the iova from that domain.
283 free_iova(struct iova_domain
*iovad
, unsigned long pfn
)
285 struct iova
*iova
= find_iova(iovad
, pfn
);
287 __free_iova(iovad
, iova
);
292 * put_iova_domain - destroys the iova doamin
293 * @iovad: - iova domain in question.
294 * All the iova's in that domain are destroyed.
296 void put_iova_domain(struct iova_domain
*iovad
)
298 struct rb_node
*node
;
301 spin_lock_irqsave(&iovad
->iova_rbtree_lock
, flags
);
302 node
= rb_first(&iovad
->rbroot
);
304 struct iova
*iova
= container_of(node
, struct iova
, node
);
305 rb_erase(node
, &iovad
->rbroot
);
307 node
= rb_first(&iovad
->rbroot
);
309 spin_unlock_irqrestore(&iovad
->iova_rbtree_lock
, flags
);
313 __is_range_overlap(struct rb_node
*node
,
314 unsigned long pfn_lo
, unsigned long pfn_hi
)
316 struct iova
*iova
= container_of(node
, struct iova
, node
);
318 if ((pfn_lo
<= iova
->pfn_hi
) && (pfn_hi
>= iova
->pfn_lo
))
324 __insert_new_range(struct iova_domain
*iovad
,
325 unsigned long pfn_lo
, unsigned long pfn_hi
)
329 iova
= alloc_iova_mem();
333 iova
->pfn_hi
= pfn_hi
;
334 iova
->pfn_lo
= pfn_lo
;
335 iova_insert_rbtree(&iovad
->rbroot
, iova
);
340 __adjust_overlap_range(struct iova
*iova
,
341 unsigned long *pfn_lo
, unsigned long *pfn_hi
)
343 if (*pfn_lo
< iova
->pfn_lo
)
344 iova
->pfn_lo
= *pfn_lo
;
345 if (*pfn_hi
> iova
->pfn_hi
)
346 *pfn_lo
= iova
->pfn_hi
+ 1;
350 * reserve_iova - reserves an iova in the given range
351 * @iovad: - iova domain pointer
352 * @pfn_lo: - lower page frame address
353 * @pfn_hi:- higher pfn adderss
354 * This function allocates reserves the address range from pfn_lo to pfn_hi so
355 * that this address is not dished out as part of alloc_iova.
358 reserve_iova(struct iova_domain
*iovad
,
359 unsigned long pfn_lo
, unsigned long pfn_hi
)
361 struct rb_node
*node
;
364 unsigned int overlap
= 0;
366 spin_lock_irqsave(&iovad
->iova_alloc_lock
, flags
);
367 spin_lock(&iovad
->iova_rbtree_lock
);
368 for (node
= rb_first(&iovad
->rbroot
); node
; node
= rb_next(node
)) {
369 if (__is_range_overlap(node
, pfn_lo
, pfn_hi
)) {
370 iova
= container_of(node
, struct iova
, node
);
371 __adjust_overlap_range(iova
, &pfn_lo
, &pfn_hi
);
372 if ((pfn_lo
>= iova
->pfn_lo
) &&
373 (pfn_hi
<= iova
->pfn_hi
))
381 /* We are here either becasue this is the first reserver node
382 * or need to insert remaining non overlap addr range
384 iova
= __insert_new_range(iovad
, pfn_lo
, pfn_hi
);
387 spin_unlock(&iovad
->iova_rbtree_lock
);
388 spin_unlock_irqrestore(&iovad
->iova_alloc_lock
, flags
);
393 * copy_reserved_iova - copies the reserved between domains
394 * @from: - source doamin from where to copy
395 * @to: - destination domin where to copy
396 * This function copies reserved iova's from one doamin to
400 copy_reserved_iova(struct iova_domain
*from
, struct iova_domain
*to
)
403 struct rb_node
*node
;
405 spin_lock_irqsave(&from
->iova_alloc_lock
, flags
);
406 spin_lock(&from
->iova_rbtree_lock
);
407 for (node
= rb_first(&from
->rbroot
); node
; node
= rb_next(node
)) {
408 struct iova
*iova
= container_of(node
, struct iova
, node
);
409 struct iova
*new_iova
;
410 new_iova
= reserve_iova(to
, iova
->pfn_lo
, iova
->pfn_hi
);
412 printk(KERN_ERR
"Reserve iova range %lx@%lx failed\n",
413 iova
->pfn_lo
, iova
->pfn_lo
);
415 spin_unlock(&from
->iova_rbtree_lock
);
416 spin_unlock_irqrestore(&from
->iova_alloc_lock
, flags
);