2 * Copyright (c) 2006, Intel Corporation.
4 * This file is released under the GPLv2.
6 * Copyright (C) 2006-2008 Intel Corporation
7 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
13 init_iova_domain(struct iova_domain
*iovad
, unsigned long pfn_32bit
)
15 spin_lock_init(&iovad
->iova_alloc_lock
);
16 spin_lock_init(&iovad
->iova_rbtree_lock
);
17 iovad
->rbroot
= RB_ROOT
;
18 iovad
->cached32_node
= NULL
;
19 iovad
->dma_32bit_pfn
= pfn_32bit
;
22 static struct rb_node
*
23 __get_cached_rbnode(struct iova_domain
*iovad
, unsigned long *limit_pfn
)
25 if ((*limit_pfn
!= iovad
->dma_32bit_pfn
) ||
26 (iovad
->cached32_node
== NULL
))
27 return rb_last(&iovad
->rbroot
);
29 struct rb_node
*prev_node
= rb_prev(iovad
->cached32_node
);
30 struct iova
*curr_iova
=
31 container_of(iovad
->cached32_node
, struct iova
, node
);
32 *limit_pfn
= curr_iova
->pfn_lo
- 1;
38 __cached_rbnode_insert_update(struct iova_domain
*iovad
,
39 unsigned long limit_pfn
, struct iova
*new)
41 if (limit_pfn
!= iovad
->dma_32bit_pfn
)
43 iovad
->cached32_node
= &new->node
;
47 __cached_rbnode_delete_update(struct iova_domain
*iovad
, struct iova
*free
)
49 struct iova
*cached_iova
;
52 if (!iovad
->cached32_node
)
54 curr
= iovad
->cached32_node
;
55 cached_iova
= container_of(curr
, struct iova
, node
);
57 if (free
->pfn_lo
>= cached_iova
->pfn_lo
)
58 iovad
->cached32_node
= rb_next(&free
->node
);
61 /* Computes the padding size required, to make the
62 * the start address naturally aligned on its size
65 iova_get_pad_size(int size
, unsigned int limit_pfn
)
67 unsigned int pad_size
= 0;
68 unsigned int order
= ilog2(size
);
71 pad_size
= (limit_pfn
+ 1) % (1 << order
);
76 static int __alloc_iova_range(struct iova_domain
*iovad
, unsigned long size
,
77 unsigned long limit_pfn
, struct iova
*new, bool size_aligned
)
79 struct rb_node
*curr
= NULL
;
81 unsigned long saved_pfn
;
82 unsigned int pad_size
= 0;
84 /* Walk the tree backwards */
85 spin_lock_irqsave(&iovad
->iova_rbtree_lock
, flags
);
86 saved_pfn
= limit_pfn
;
87 curr
= __get_cached_rbnode(iovad
, &limit_pfn
);
89 struct iova
*curr_iova
= container_of(curr
, struct iova
, node
);
90 if (limit_pfn
< curr_iova
->pfn_lo
)
92 else if (limit_pfn
< curr_iova
->pfn_hi
)
93 goto adjust_limit_pfn
;
96 pad_size
= iova_get_pad_size(size
, limit_pfn
);
97 if ((curr_iova
->pfn_hi
+ size
+ pad_size
) <= limit_pfn
)
98 break; /* found a free slot */
101 limit_pfn
= curr_iova
->pfn_lo
- 1;
103 curr
= rb_prev(curr
);
108 pad_size
= iova_get_pad_size(size
, limit_pfn
);
109 if ((IOVA_START_PFN
+ size
+ pad_size
) > limit_pfn
) {
110 spin_unlock_irqrestore(&iovad
->iova_rbtree_lock
, flags
);
115 /* pfn_lo will point to size aligned address if size_aligned is set */
116 new->pfn_lo
= limit_pfn
- (size
+ pad_size
) + 1;
117 new->pfn_hi
= new->pfn_lo
+ size
- 1;
119 spin_unlock_irqrestore(&iovad
->iova_rbtree_lock
, flags
);
124 iova_insert_rbtree(struct rb_root
*root
, struct iova
*iova
)
126 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
127 /* Figure out where to put new node */
129 struct iova
*this = container_of(*new, struct iova
, node
);
132 if (iova
->pfn_lo
< this->pfn_lo
)
133 new = &((*new)->rb_left
);
134 else if (iova
->pfn_lo
> this->pfn_lo
)
135 new = &((*new)->rb_right
);
137 BUG(); /* this should not happen */
139 /* Add new node and rebalance tree. */
140 rb_link_node(&iova
->node
, parent
, new);
141 rb_insert_color(&iova
->node
, root
);
145 * alloc_iova - allocates an iova
146 * @iovad - iova domain in question
147 * @size - size of page frames to allocate
148 * @limit_pfn - max limit address
149 * @size_aligned - set if size_aligned address range is required
150 * This function allocates an iova in the range limit_pfn to IOVA_START_PFN
151 * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned
152 * flag is set then the allocated address iova->pfn_lo will be naturally
153 * aligned on roundup_power_of_two(size).
156 alloc_iova(struct iova_domain
*iovad
, unsigned long size
,
157 unsigned long limit_pfn
,
161 struct iova
*new_iova
;
164 new_iova
= alloc_iova_mem();
168 /* If size aligned is set then round the size to
169 * to next power of two.
172 size
= __roundup_pow_of_two(size
);
174 spin_lock_irqsave(&iovad
->iova_alloc_lock
, flags
);
175 ret
= __alloc_iova_range(iovad
, size
, limit_pfn
, new_iova
,
179 spin_unlock_irqrestore(&iovad
->iova_alloc_lock
, flags
);
180 free_iova_mem(new_iova
);
184 /* Insert the new_iova into domain rbtree by holding writer lock */
185 spin_lock(&iovad
->iova_rbtree_lock
);
186 iova_insert_rbtree(&iovad
->rbroot
, new_iova
);
187 __cached_rbnode_insert_update(iovad
, limit_pfn
, new_iova
);
188 spin_unlock(&iovad
->iova_rbtree_lock
);
190 spin_unlock_irqrestore(&iovad
->iova_alloc_lock
, flags
);
196 * find_iova - find's an iova for a given pfn
197 * @iovad - iova domain in question.
198 * pfn - page frame number
199 * This function finds and returns an iova belonging to the
200 * given doamin which matches the given pfn.
202 struct iova
*find_iova(struct iova_domain
*iovad
, unsigned long pfn
)
205 struct rb_node
*node
;
207 /* Take the lock so that no other thread is manipulating the rbtree */
208 spin_lock_irqsave(&iovad
->iova_rbtree_lock
, flags
);
209 node
= iovad
->rbroot
.rb_node
;
211 struct iova
*iova
= container_of(node
, struct iova
, node
);
213 /* If pfn falls within iova's range, return iova */
214 if ((pfn
>= iova
->pfn_lo
) && (pfn
<= iova
->pfn_hi
)) {
215 spin_unlock_irqrestore(&iovad
->iova_rbtree_lock
, flags
);
216 /* We are not holding the lock while this iova
217 * is referenced by the caller as the same thread
218 * which called this function also calls __free_iova()
219 * and it is by desing that only one thread can possibly
220 * reference a particular iova and hence no conflict.
225 if (pfn
< iova
->pfn_lo
)
226 node
= node
->rb_left
;
227 else if (pfn
> iova
->pfn_lo
)
228 node
= node
->rb_right
;
231 spin_unlock_irqrestore(&iovad
->iova_rbtree_lock
, flags
);
236 * __free_iova - frees the given iova
237 * @iovad: iova domain in question.
238 * @iova: iova in question.
239 * Frees the given iova belonging to the giving domain
242 __free_iova(struct iova_domain
*iovad
, struct iova
*iova
)
246 spin_lock_irqsave(&iovad
->iova_rbtree_lock
, flags
);
247 __cached_rbnode_delete_update(iovad
, iova
);
248 rb_erase(&iova
->node
, &iovad
->rbroot
);
249 spin_unlock_irqrestore(&iovad
->iova_rbtree_lock
, flags
);
254 * free_iova - finds and frees the iova for a given pfn
255 * @iovad: - iova domain in question.
256 * @pfn: - pfn that is allocated previously
257 * This functions finds an iova for a given pfn and then
258 * frees the iova from that domain.
261 free_iova(struct iova_domain
*iovad
, unsigned long pfn
)
263 struct iova
*iova
= find_iova(iovad
, pfn
);
265 __free_iova(iovad
, iova
);
270 * put_iova_domain - destroys the iova doamin
271 * @iovad: - iova domain in question.
272 * All the iova's in that domain are destroyed.
274 void put_iova_domain(struct iova_domain
*iovad
)
276 struct rb_node
*node
;
279 spin_lock_irqsave(&iovad
->iova_rbtree_lock
, flags
);
280 node
= rb_first(&iovad
->rbroot
);
282 struct iova
*iova
= container_of(node
, struct iova
, node
);
283 rb_erase(node
, &iovad
->rbroot
);
285 node
= rb_first(&iovad
->rbroot
);
287 spin_unlock_irqrestore(&iovad
->iova_rbtree_lock
, flags
);
291 __is_range_overlap(struct rb_node
*node
,
292 unsigned long pfn_lo
, unsigned long pfn_hi
)
294 struct iova
*iova
= container_of(node
, struct iova
, node
);
296 if ((pfn_lo
<= iova
->pfn_hi
) && (pfn_hi
>= iova
->pfn_lo
))
302 __insert_new_range(struct iova_domain
*iovad
,
303 unsigned long pfn_lo
, unsigned long pfn_hi
)
307 iova
= alloc_iova_mem();
311 iova
->pfn_hi
= pfn_hi
;
312 iova
->pfn_lo
= pfn_lo
;
313 iova_insert_rbtree(&iovad
->rbroot
, iova
);
318 __adjust_overlap_range(struct iova
*iova
,
319 unsigned long *pfn_lo
, unsigned long *pfn_hi
)
321 if (*pfn_lo
< iova
->pfn_lo
)
322 iova
->pfn_lo
= *pfn_lo
;
323 if (*pfn_hi
> iova
->pfn_hi
)
324 *pfn_lo
= iova
->pfn_hi
+ 1;
328 * reserve_iova - reserves an iova in the given range
329 * @iovad: - iova domain pointer
330 * @pfn_lo: - lower page frame address
331 * @pfn_hi:- higher pfn adderss
332 * This function allocates reserves the address range from pfn_lo to pfn_hi so
333 * that this address is not dished out as part of alloc_iova.
336 reserve_iova(struct iova_domain
*iovad
,
337 unsigned long pfn_lo
, unsigned long pfn_hi
)
339 struct rb_node
*node
;
342 unsigned int overlap
= 0;
344 spin_lock_irqsave(&iovad
->iova_alloc_lock
, flags
);
345 spin_lock(&iovad
->iova_rbtree_lock
);
346 for (node
= rb_first(&iovad
->rbroot
); node
; node
= rb_next(node
)) {
347 if (__is_range_overlap(node
, pfn_lo
, pfn_hi
)) {
348 iova
= container_of(node
, struct iova
, node
);
349 __adjust_overlap_range(iova
, &pfn_lo
, &pfn_hi
);
350 if ((pfn_lo
>= iova
->pfn_lo
) &&
351 (pfn_hi
<= iova
->pfn_hi
))
359 /* We are here either becasue this is the first reserver node
360 * or need to insert remaining non overlap addr range
362 iova
= __insert_new_range(iovad
, pfn_lo
, pfn_hi
);
365 spin_unlock(&iovad
->iova_rbtree_lock
);
366 spin_unlock_irqrestore(&iovad
->iova_alloc_lock
, flags
);
371 * copy_reserved_iova - copies the reserved between domains
372 * @from: - source doamin from where to copy
373 * @to: - destination domin where to copy
374 * This function copies reserved iova's from one doamin to
378 copy_reserved_iova(struct iova_domain
*from
, struct iova_domain
*to
)
381 struct rb_node
*node
;
383 spin_lock_irqsave(&from
->iova_alloc_lock
, flags
);
384 spin_lock(&from
->iova_rbtree_lock
);
385 for (node
= rb_first(&from
->rbroot
); node
; node
= rb_next(node
)) {
386 struct iova
*iova
= container_of(node
, struct iova
, node
);
387 struct iova
*new_iova
;
388 new_iova
= reserve_iova(to
, iova
->pfn_lo
, iova
->pfn_hi
);
390 printk(KERN_ERR
"Reserve iova range %lx@%lx failed\n",
391 iova
->pfn_lo
, iova
->pfn_lo
);
393 spin_unlock(&from
->iova_rbtree_lock
);
394 spin_unlock_irqrestore(&from
->iova_alloc_lock
, flags
);