2 * memrar_allocator 1.0: An allocator for Intel RAR.
4 * Copyright (C) 2010 Intel Corporation. All rights reserved.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General
8 * Public License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be
11 * useful, but WITHOUT ANY WARRANTY; without even the implied
12 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
13 * PURPOSE. See the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the Free
16 * Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 02111-1307, USA.
18 * The full GNU General Public License is included in this
19 * distribution in the file called COPYING.
22 * ------------------------------------------------------------------
24 * This simple allocator implementation provides a
25 * malloc()/free()-like interface for reserving space within a
26 * previously reserved block of memory. It is not specific to
27 * any hardware, nor is it coupled with the lower level paging
30 * The primary goal of this implementation is to provide a means
31 * to partition an arbitrary block of memory without actually
32 * accessing the memory or incurring any hardware side-effects
33 * (e.g. paging). It is, in effect, a bookkeeping mechanism for
38 #include "memrar_allocator.h"
39 #include <linux/slab.h>
40 #include <linux/bug.h>
41 #include <linux/kernel.h>
44 struct memrar_allocator
*memrar_create_allocator(unsigned long base
,
48 struct memrar_allocator
*allocator
= NULL
;
49 struct memrar_address_ranges
*first_node
= NULL
;
52 * Make sure the base address is aligned on a block_size
55 * @todo Is this necessary?
57 /* base = ALIGN(base, block_size); */
59 /* Validate parameters.
61 * Make sure we can allocate the entire memory space. Zero
62 * capacity or block size are obviously invalid.
67 || ULONG_MAX
- capacity
< base
68 || capacity
< block_size
)
72 * There isn't much point in creating a memory allocator that
73 * is only capable of holding one block but we'll allow it,
74 * and issue a diagnostic.
76 WARN(capacity
< block_size
* 2,
77 "memrar: Only one block available to allocator.\n");
79 allocator
= kmalloc(sizeof(*allocator
), GFP_KERNEL
);
81 if (allocator
== NULL
)
84 mutex_init(&allocator
->lock
);
85 allocator
->base
= base
;
87 /* Round the capacity down to a multiple of block_size. */
88 allocator
->capacity
= (capacity
/ block_size
) * block_size
;
90 allocator
->block_size
= block_size
;
92 allocator
->largest_free_area
= allocator
->capacity
;
94 /* Initialize the handle and free lists. */
95 INIT_LIST_HEAD(&allocator
->allocated_list
.list
);
96 INIT_LIST_HEAD(&allocator
->free_list
.list
);
98 first_node
= kmalloc(sizeof(*first_node
), GFP_KERNEL
);
99 if (first_node
== NULL
) {
103 /* Full range of blocks is available. */
104 first_node
->range
.begin
= base
;
105 first_node
->range
.end
= base
+ allocator
->capacity
;
106 list_add(&first_node
->list
,
107 &allocator
->free_list
.list
);
113 void memrar_destroy_allocator(struct memrar_allocator
*allocator
)
116 * Assume that the memory allocator lock isn't held at this
117 * point in time. Caller must ensure that.
120 struct memrar_address_ranges
*pos
= NULL
;
121 struct memrar_address_ranges
*n
= NULL
;
123 if (allocator
== NULL
)
126 mutex_lock(&allocator
->lock
);
128 /* Reclaim free list resources. */
129 list_for_each_entry_safe(pos
,
131 &allocator
->free_list
.list
,
133 list_del(&pos
->list
);
137 mutex_unlock(&allocator
->lock
);
142 unsigned long memrar_allocator_alloc(struct memrar_allocator
*allocator
,
145 struct memrar_address_ranges
*pos
= NULL
;
148 unsigned long reserved_bytes
;
151 * Address of allocated buffer. We assume that zero is not a
154 unsigned long addr
= 0;
156 if (allocator
== NULL
|| size
== 0)
159 /* Reserve enough blocks to hold the amount of bytes requested. */
160 num_blocks
= DIV_ROUND_UP(size
, allocator
->block_size
);
162 reserved_bytes
= num_blocks
* allocator
->block_size
;
164 mutex_lock(&allocator
->lock
);
166 if (reserved_bytes
> allocator
->largest_free_area
) {
167 mutex_unlock(&allocator
->lock
);
172 * Iterate through the free list to find a suitably sized
173 * range of free contiguous memory blocks.
175 * We also take the opportunity to reset the size of the
176 * largest free area size statistic.
178 list_for_each_entry(pos
, &allocator
->free_list
.list
, list
) {
179 struct memrar_address_range
* const fr
= &pos
->range
;
180 size_t const curr_size
= fr
->end
- fr
->begin
;
182 if (curr_size
>= reserved_bytes
&& addr
== 0) {
183 struct memrar_address_range
*range
= NULL
;
184 struct memrar_address_ranges
* const new_node
=
185 kmalloc(sizeof(*new_node
), GFP_KERNEL
);
187 if (new_node
== NULL
)
190 list_add(&new_node
->list
,
191 &allocator
->allocated_list
.list
);
194 * Carve out area of memory from end of free
197 range
= &new_node
->range
;
198 range
->end
= fr
->end
;
199 fr
->end
-= reserved_bytes
;
200 range
->begin
= fr
->end
;
204 * Check if largest area has decreased in
205 * size. We'll need to continue scanning for
206 * the next largest area if it has.
208 if (curr_size
== allocator
->largest_free_area
)
209 allocator
->largest_free_area
-=
216 * Reset largest free area size statistic as needed,
217 * but only if we've actually allocated memory.
220 && curr_size
> allocator
->largest_free_area
) {
221 allocator
->largest_free_area
= curr_size
;
226 mutex_unlock(&allocator
->lock
);
231 long memrar_allocator_free(struct memrar_allocator
*allocator
,
234 struct list_head
*pos
= NULL
;
235 struct list_head
*tmp
= NULL
;
236 struct list_head
*dst
= NULL
;
238 struct memrar_address_ranges
*allocated
= NULL
;
239 struct memrar_address_range
const *handle
= NULL
;
241 unsigned long old_end
= 0;
242 unsigned long new_chunk_size
= 0;
244 if (allocator
== NULL
)
248 return 0; /* Ignore "free(0)". */
250 mutex_lock(&allocator
->lock
);
252 /* Find the corresponding handle. */
253 list_for_each_entry(allocated
,
254 &allocator
->allocated_list
.list
,
256 if (allocated
->range
.begin
== addr
) {
257 handle
= &allocated
->range
;
262 /* No such buffer created by this allocator. */
263 if (handle
== NULL
) {
264 mutex_unlock(&allocator
->lock
);
269 * Coalesce adjacent chunks of memory if possible.
271 * @note This isn't full blown coalescing since we're only
272 * coalescing at most three chunks of memory.
274 list_for_each_safe(pos
, tmp
, &allocator
->free_list
.list
) {
275 /* @todo O(n) performance. Optimize. */
277 struct memrar_address_range
* const chunk
=
279 struct memrar_address_ranges
,
282 /* Extend size of existing free adjacent chunk. */
283 if (chunk
->end
== handle
->begin
) {
285 * Chunk "less than" than the one we're
286 * freeing is adjacent.
301 struct memrar_address_ranges
const * const next
=
302 list_entry(pos
->next
,
303 struct memrar_address_ranges
,
306 chunk
->end
= handle
->end
;
309 * Now check if next free chunk is adjacent to
310 * the current extended free chunk.
314 * +------------+----+
316 * +------------+----+
320 * +-----------------+
322 * +-----------------+
324 if (!list_is_singular(pos
)
325 && chunk
->end
== next
->range
.begin
) {
326 chunk
->end
= next
->range
.end
;
331 list_del(&allocated
->list
);
333 new_chunk_size
= chunk
->end
- chunk
->begin
;
335 goto exit_memrar_free
;
337 } else if (handle
->end
== chunk
->begin
) {
339 * Chunk "greater than" than the one we're
340 * freeing is adjacent.
353 struct memrar_address_ranges
const * const prev
=
354 list_entry(pos
->prev
,
355 struct memrar_address_ranges
,
358 chunk
->begin
= handle
->begin
;
361 * Now check if previous free chunk is
362 * adjacent to the current extended free
368 * +----+------------+
370 * +----+------------+
374 * +-----------------+
376 * +-----------------+
378 if (!list_is_singular(pos
)
379 && prev
->range
.end
== chunk
->begin
) {
380 chunk
->begin
= prev
->range
.begin
;
385 list_del(&allocated
->list
);
387 new_chunk_size
= chunk
->end
- chunk
->begin
;
389 goto exit_memrar_free
;
391 } else if (chunk
->end
< handle
->begin
392 && chunk
->end
> old_end
) {
393 /* Keep track of where the entry could be
394 * potentially moved from the "allocated" list
395 * to the "free" list if coalescing doesn't
396 * occur, making sure the "free" list remains
399 old_end
= chunk
->end
;
405 * Nothing to coalesce.
407 * Move the entry from the "allocated" list to the "free"
410 list_move(&allocated
->list
, dst
);
411 new_chunk_size
= handle
->end
- handle
->begin
;
416 if (new_chunk_size
> allocator
->largest_free_area
)
417 allocator
->largest_free_area
= new_chunk_size
;
419 mutex_unlock(&allocator
->lock
);
430 c-file-style: "linux"