2 * Copyright © 2006-2009, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
20 #include <linux/iova.h>
23 init_iova_domain(struct iova_domain
*iovad
, unsigned long pfn_32bit
)
25 spin_lock_init(&iovad
->iova_rbtree_lock
);
26 iovad
->rbroot
= RB_ROOT
;
27 iovad
->cached32_node
= NULL
;
28 iovad
->dma_32bit_pfn
= pfn_32bit
;
31 static struct rb_node
*
32 __get_cached_rbnode(struct iova_domain
*iovad
, unsigned long *limit_pfn
)
34 if ((*limit_pfn
!= iovad
->dma_32bit_pfn
) ||
35 (iovad
->cached32_node
== NULL
))
36 return rb_last(&iovad
->rbroot
);
38 struct rb_node
*prev_node
= rb_prev(iovad
->cached32_node
);
39 struct iova
*curr_iova
=
40 container_of(iovad
->cached32_node
, struct iova
, node
);
41 *limit_pfn
= curr_iova
->pfn_lo
- 1;
47 __cached_rbnode_insert_update(struct iova_domain
*iovad
,
48 unsigned long limit_pfn
, struct iova
*new)
50 if (limit_pfn
!= iovad
->dma_32bit_pfn
)
52 iovad
->cached32_node
= &new->node
;
56 __cached_rbnode_delete_update(struct iova_domain
*iovad
, struct iova
*free
)
58 struct iova
*cached_iova
;
61 if (!iovad
->cached32_node
)
63 curr
= iovad
->cached32_node
;
64 cached_iova
= container_of(curr
, struct iova
, node
);
66 if (free
->pfn_lo
>= cached_iova
->pfn_lo
)
67 iovad
->cached32_node
= rb_next(&free
->node
);
70 /* Computes the padding size required, to make the
71 * the start address naturally aligned on its size
74 iova_get_pad_size(int size
, unsigned int limit_pfn
)
76 unsigned int pad_size
= 0;
77 unsigned int order
= ilog2(size
);
80 pad_size
= (limit_pfn
+ 1) % (1 << order
);
85 static int __alloc_and_insert_iova_range(struct iova_domain
*iovad
,
86 unsigned long size
, unsigned long limit_pfn
,
87 struct iova
*new, bool size_aligned
)
89 struct rb_node
*prev
, *curr
= NULL
;
91 unsigned long saved_pfn
;
92 unsigned int pad_size
= 0;
94 /* Walk the tree backwards */
95 spin_lock_irqsave(&iovad
->iova_rbtree_lock
, flags
);
96 saved_pfn
= limit_pfn
;
97 curr
= __get_cached_rbnode(iovad
, &limit_pfn
);
100 struct iova
*curr_iova
= container_of(curr
, struct iova
, node
);
102 if (limit_pfn
< curr_iova
->pfn_lo
)
104 else if (limit_pfn
< curr_iova
->pfn_hi
)
105 goto adjust_limit_pfn
;
108 pad_size
= iova_get_pad_size(size
, limit_pfn
);
109 if ((curr_iova
->pfn_hi
+ size
+ pad_size
) <= limit_pfn
)
110 break; /* found a free slot */
113 limit_pfn
= curr_iova
->pfn_lo
- 1;
116 curr
= rb_prev(curr
);
121 pad_size
= iova_get_pad_size(size
, limit_pfn
);
122 if ((IOVA_START_PFN
+ size
+ pad_size
) > limit_pfn
) {
123 spin_unlock_irqrestore(&iovad
->iova_rbtree_lock
, flags
);
128 /* pfn_lo will point to size aligned address if size_aligned is set */
129 new->pfn_lo
= limit_pfn
- (size
+ pad_size
) + 1;
130 new->pfn_hi
= new->pfn_lo
+ size
- 1;
132 /* Insert the new_iova into domain rbtree by holding writer lock */
133 /* Add new node and rebalance tree. */
135 struct rb_node
**entry
, *parent
= NULL
;
137 /* If we have 'prev', it's a valid place to start the
138 insertion. Otherwise, start from the root. */
142 entry
= &iovad
->rbroot
.rb_node
;
144 /* Figure out where to put new node */
146 struct iova
*this = container_of(*entry
,
150 if (new->pfn_lo
< this->pfn_lo
)
151 entry
= &((*entry
)->rb_left
);
152 else if (new->pfn_lo
> this->pfn_lo
)
153 entry
= &((*entry
)->rb_right
);
155 BUG(); /* this should not happen */
158 /* Add new node and rebalance tree. */
159 rb_link_node(&new->node
, parent
, entry
);
160 rb_insert_color(&new->node
, &iovad
->rbroot
);
162 __cached_rbnode_insert_update(iovad
, saved_pfn
, new);
164 spin_unlock_irqrestore(&iovad
->iova_rbtree_lock
, flags
);
171 iova_insert_rbtree(struct rb_root
*root
, struct iova
*iova
)
173 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
174 /* Figure out where to put new node */
176 struct iova
*this = container_of(*new, struct iova
, node
);
179 if (iova
->pfn_lo
< this->pfn_lo
)
180 new = &((*new)->rb_left
);
181 else if (iova
->pfn_lo
> this->pfn_lo
)
182 new = &((*new)->rb_right
);
184 BUG(); /* this should not happen */
186 /* Add new node and rebalance tree. */
187 rb_link_node(&iova
->node
, parent
, new);
188 rb_insert_color(&iova
->node
, root
);
192 * alloc_iova - allocates an iova
193 * @iovad - iova domain in question
194 * @size - size of page frames to allocate
195 * @limit_pfn - max limit address
196 * @size_aligned - set if size_aligned address range is required
197 * This function allocates an iova in the range limit_pfn to IOVA_START_PFN
198 * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned
199 * flag is set then the allocated address iova->pfn_lo will be naturally
200 * aligned on roundup_power_of_two(size).
203 alloc_iova(struct iova_domain
*iovad
, unsigned long size
,
204 unsigned long limit_pfn
,
207 struct iova
*new_iova
;
210 new_iova
= alloc_iova_mem();
214 /* If size aligned is set then round the size to
215 * to next power of two.
218 size
= __roundup_pow_of_two(size
);
220 ret
= __alloc_and_insert_iova_range(iovad
, size
, limit_pfn
,
221 new_iova
, size_aligned
);
224 free_iova_mem(new_iova
);
232 * find_iova - find's an iova for a given pfn
233 * @iovad - iova domain in question.
234 * pfn - page frame number
235 * This function finds and returns an iova belonging to the
236 * given doamin which matches the given pfn.
238 struct iova
*find_iova(struct iova_domain
*iovad
, unsigned long pfn
)
241 struct rb_node
*node
;
243 /* Take the lock so that no other thread is manipulating the rbtree */
244 spin_lock_irqsave(&iovad
->iova_rbtree_lock
, flags
);
245 node
= iovad
->rbroot
.rb_node
;
247 struct iova
*iova
= container_of(node
, struct iova
, node
);
249 /* If pfn falls within iova's range, return iova */
250 if ((pfn
>= iova
->pfn_lo
) && (pfn
<= iova
->pfn_hi
)) {
251 spin_unlock_irqrestore(&iovad
->iova_rbtree_lock
, flags
);
252 /* We are not holding the lock while this iova
253 * is referenced by the caller as the same thread
254 * which called this function also calls __free_iova()
255 * and it is by desing that only one thread can possibly
256 * reference a particular iova and hence no conflict.
261 if (pfn
< iova
->pfn_lo
)
262 node
= node
->rb_left
;
263 else if (pfn
> iova
->pfn_lo
)
264 node
= node
->rb_right
;
267 spin_unlock_irqrestore(&iovad
->iova_rbtree_lock
, flags
);
272 * __free_iova - frees the given iova
273 * @iovad: iova domain in question.
274 * @iova: iova in question.
275 * Frees the given iova belonging to the giving domain
278 __free_iova(struct iova_domain
*iovad
, struct iova
*iova
)
282 spin_lock_irqsave(&iovad
->iova_rbtree_lock
, flags
);
283 __cached_rbnode_delete_update(iovad
, iova
);
284 rb_erase(&iova
->node
, &iovad
->rbroot
);
285 spin_unlock_irqrestore(&iovad
->iova_rbtree_lock
, flags
);
290 * free_iova - finds and frees the iova for a given pfn
291 * @iovad: - iova domain in question.
292 * @pfn: - pfn that is allocated previously
293 * This functions finds an iova for a given pfn and then
294 * frees the iova from that domain.
297 free_iova(struct iova_domain
*iovad
, unsigned long pfn
)
299 struct iova
*iova
= find_iova(iovad
, pfn
);
301 __free_iova(iovad
, iova
);
306 * put_iova_domain - destroys the iova doamin
307 * @iovad: - iova domain in question.
308 * All the iova's in that domain are destroyed.
310 void put_iova_domain(struct iova_domain
*iovad
)
312 struct rb_node
*node
;
315 spin_lock_irqsave(&iovad
->iova_rbtree_lock
, flags
);
316 node
= rb_first(&iovad
->rbroot
);
318 struct iova
*iova
= container_of(node
, struct iova
, node
);
319 rb_erase(node
, &iovad
->rbroot
);
321 node
= rb_first(&iovad
->rbroot
);
323 spin_unlock_irqrestore(&iovad
->iova_rbtree_lock
, flags
);
327 __is_range_overlap(struct rb_node
*node
,
328 unsigned long pfn_lo
, unsigned long pfn_hi
)
330 struct iova
*iova
= container_of(node
, struct iova
, node
);
332 if ((pfn_lo
<= iova
->pfn_hi
) && (pfn_hi
>= iova
->pfn_lo
))
338 __insert_new_range(struct iova_domain
*iovad
,
339 unsigned long pfn_lo
, unsigned long pfn_hi
)
343 iova
= alloc_iova_mem();
347 iova
->pfn_hi
= pfn_hi
;
348 iova
->pfn_lo
= pfn_lo
;
349 iova_insert_rbtree(&iovad
->rbroot
, iova
);
354 __adjust_overlap_range(struct iova
*iova
,
355 unsigned long *pfn_lo
, unsigned long *pfn_hi
)
357 if (*pfn_lo
< iova
->pfn_lo
)
358 iova
->pfn_lo
= *pfn_lo
;
359 if (*pfn_hi
> iova
->pfn_hi
)
360 *pfn_lo
= iova
->pfn_hi
+ 1;
364 * reserve_iova - reserves an iova in the given range
365 * @iovad: - iova domain pointer
366 * @pfn_lo: - lower page frame address
367 * @pfn_hi:- higher pfn adderss
368 * This function allocates reserves the address range from pfn_lo to pfn_hi so
369 * that this address is not dished out as part of alloc_iova.
372 reserve_iova(struct iova_domain
*iovad
,
373 unsigned long pfn_lo
, unsigned long pfn_hi
)
375 struct rb_node
*node
;
378 unsigned int overlap
= 0;
380 spin_lock_irqsave(&iovad
->iova_rbtree_lock
, flags
);
381 for (node
= rb_first(&iovad
->rbroot
); node
; node
= rb_next(node
)) {
382 if (__is_range_overlap(node
, pfn_lo
, pfn_hi
)) {
383 iova
= container_of(node
, struct iova
, node
);
384 __adjust_overlap_range(iova
, &pfn_lo
, &pfn_hi
);
385 if ((pfn_lo
>= iova
->pfn_lo
) &&
386 (pfn_hi
<= iova
->pfn_hi
))
394 /* We are here either because this is the first reserver node
395 * or need to insert remaining non overlap addr range
397 iova
= __insert_new_range(iovad
, pfn_lo
, pfn_hi
);
400 spin_unlock_irqrestore(&iovad
->iova_rbtree_lock
, flags
);
405 * copy_reserved_iova - copies the reserved between domains
406 * @from: - source doamin from where to copy
407 * @to: - destination domin where to copy
408 * This function copies reserved iova's from one doamin to
412 copy_reserved_iova(struct iova_domain
*from
, struct iova_domain
*to
)
415 struct rb_node
*node
;
417 spin_lock_irqsave(&from
->iova_rbtree_lock
, flags
);
418 for (node
= rb_first(&from
->rbroot
); node
; node
= rb_next(node
)) {
419 struct iova
*iova
= container_of(node
, struct iova
, node
);
420 struct iova
*new_iova
;
421 new_iova
= reserve_iova(to
, iova
->pfn_lo
, iova
->pfn_hi
);
423 printk(KERN_ERR
"Reserve iova range %lx@%lx failed\n",
424 iova
->pfn_lo
, iova
->pfn_lo
);
426 spin_unlock_irqrestore(&from
->iova_rbtree_lock
, flags
);