GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / staging / memrar / memrar_allocator.c
bloba4f8c5846a008450f8d30345ad576a0ba19668f4
1 /*
2 * memrar_allocator 1.0: An allocator for Intel RAR.
4 * Copyright (C) 2010 Intel Corporation. All rights reserved.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General
8 * Public License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be
11 * useful, but WITHOUT ANY WARRANTY; without even the implied
12 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
13 * PURPOSE. See the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the Free
16 * Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 02111-1307, USA.
18 * The full GNU General Public License is included in this
19 * distribution in the file called COPYING.
22 * ------------------------------------------------------------------
24 * This simple allocator implementation provides a
25 * malloc()/free()-like interface for reserving space within a
26 * previously reserved block of memory. It is not specific to
27 * any hardware, nor is it coupled with the lower level paging
28 * mechanism.
30 * The primary goal of this implementation is to provide a means
31 * to partition an arbitrary block of memory without actually
32 * accessing the memory or incurring any hardware side-effects
33 * (e.g. paging). It is, in effect, a bookkeeping mechanism for
34 * buffers.
38 #include "memrar_allocator.h"
39 #include <linux/slab.h>
40 #include <linux/bug.h>
41 #include <linux/kernel.h>
44 struct memrar_allocator *memrar_create_allocator(unsigned long base,
45 size_t capacity,
46 size_t block_size)
48 struct memrar_allocator *allocator = NULL;
49 struct memrar_address_ranges *first_node = NULL;
52 * Make sure the base address is aligned on a block_size
53 * boundary.
55 * @todo Is this necessary?
57 /* base = ALIGN(base, block_size); */
59 /* Validate parameters.
61 * Make sure we can allocate the entire memory space. Zero
62 * capacity or block size are obviously invalid.
64 if (base == 0
65 || capacity == 0
66 || block_size == 0
67 || ULONG_MAX - capacity < base
68 || capacity < block_size)
69 return allocator;
72 * There isn't much point in creating a memory allocator that
73 * is only capable of holding one block but we'll allow it,
74 * and issue a diagnostic.
76 WARN(capacity < block_size * 2,
77 "memrar: Only one block available to allocator.\n");
79 allocator = kmalloc(sizeof(*allocator), GFP_KERNEL);
81 if (allocator == NULL)
82 return allocator;
84 mutex_init(&allocator->lock);
85 allocator->base = base;
87 /* Round the capacity down to a multiple of block_size. */
88 allocator->capacity = (capacity / block_size) * block_size;
90 allocator->block_size = block_size;
92 allocator->largest_free_area = allocator->capacity;
94 /* Initialize the handle and free lists. */
95 INIT_LIST_HEAD(&allocator->allocated_list.list);
96 INIT_LIST_HEAD(&allocator->free_list.list);
98 first_node = kmalloc(sizeof(*first_node), GFP_KERNEL);
99 if (first_node == NULL) {
100 kfree(allocator);
101 allocator = NULL;
102 } else {
103 /* Full range of blocks is available. */
104 first_node->range.begin = base;
105 first_node->range.end = base + allocator->capacity;
106 list_add(&first_node->list,
107 &allocator->free_list.list);
110 return allocator;
113 void memrar_destroy_allocator(struct memrar_allocator *allocator)
116 * Assume that the memory allocator lock isn't held at this
117 * point in time. Caller must ensure that.
120 struct memrar_address_ranges *pos = NULL;
121 struct memrar_address_ranges *n = NULL;
123 if (allocator == NULL)
124 return;
126 mutex_lock(&allocator->lock);
128 /* Reclaim free list resources. */
129 list_for_each_entry_safe(pos,
131 &allocator->free_list.list,
132 list) {
133 list_del(&pos->list);
134 kfree(pos);
137 mutex_unlock(&allocator->lock);
139 kfree(allocator);
142 unsigned long memrar_allocator_alloc(struct memrar_allocator *allocator,
143 size_t size)
145 struct memrar_address_ranges *pos = NULL;
147 size_t num_blocks;
148 unsigned long reserved_bytes;
151 * Address of allocated buffer. We assume that zero is not a
152 * valid address.
154 unsigned long addr = 0;
156 if (allocator == NULL || size == 0)
157 return addr;
159 /* Reserve enough blocks to hold the amount of bytes requested. */
160 num_blocks = DIV_ROUND_UP(size, allocator->block_size);
162 reserved_bytes = num_blocks * allocator->block_size;
164 mutex_lock(&allocator->lock);
166 if (reserved_bytes > allocator->largest_free_area) {
167 mutex_unlock(&allocator->lock);
168 return addr;
172 * Iterate through the free list to find a suitably sized
173 * range of free contiguous memory blocks.
175 * We also take the opportunity to reset the size of the
176 * largest free area size statistic.
178 list_for_each_entry(pos, &allocator->free_list.list, list) {
179 struct memrar_address_range * const fr = &pos->range;
180 size_t const curr_size = fr->end - fr->begin;
182 if (curr_size >= reserved_bytes && addr == 0) {
183 struct memrar_address_range *range = NULL;
184 struct memrar_address_ranges * const new_node =
185 kmalloc(sizeof(*new_node), GFP_KERNEL);
187 if (new_node == NULL)
188 break;
190 list_add(&new_node->list,
191 &allocator->allocated_list.list);
194 * Carve out area of memory from end of free
195 * range.
197 range = &new_node->range;
198 range->end = fr->end;
199 fr->end -= reserved_bytes;
200 range->begin = fr->end;
201 addr = range->begin;
204 * Check if largest area has decreased in
205 * size. We'll need to continue scanning for
206 * the next largest area if it has.
208 if (curr_size == allocator->largest_free_area)
209 allocator->largest_free_area -=
210 reserved_bytes;
211 else
212 break;
216 * Reset largest free area size statistic as needed,
217 * but only if we've actually allocated memory.
219 if (addr != 0
220 && curr_size > allocator->largest_free_area) {
221 allocator->largest_free_area = curr_size;
222 break;
226 mutex_unlock(&allocator->lock);
228 return addr;
231 long memrar_allocator_free(struct memrar_allocator *allocator,
232 unsigned long addr)
234 struct list_head *pos = NULL;
235 struct list_head *tmp = NULL;
236 struct list_head *dst = NULL;
238 struct memrar_address_ranges *allocated = NULL;
239 struct memrar_address_range const *handle = NULL;
241 unsigned long old_end = 0;
242 unsigned long new_chunk_size = 0;
244 if (allocator == NULL)
245 return -EINVAL;
247 if (addr == 0)
248 return 0; /* Ignore "free(0)". */
250 mutex_lock(&allocator->lock);
252 /* Find the corresponding handle. */
253 list_for_each_entry(allocated,
254 &allocator->allocated_list.list,
255 list) {
256 if (allocated->range.begin == addr) {
257 handle = &allocated->range;
258 break;
262 /* No such buffer created by this allocator. */
263 if (handle == NULL) {
264 mutex_unlock(&allocator->lock);
265 return -EFAULT;
269 * Coalesce adjacent chunks of memory if possible.
271 * @note This isn't full blown coalescing since we're only
272 * coalescing at most three chunks of memory.
274 list_for_each_safe(pos, tmp, &allocator->free_list.list) {
275 /* @todo O(n) performance. Optimize. */
277 struct memrar_address_range * const chunk =
278 &list_entry(pos,
279 struct memrar_address_ranges,
280 list)->range;
282 /* Extend size of existing free adjacent chunk. */
283 if (chunk->end == handle->begin) {
285 * Chunk "less than" than the one we're
286 * freeing is adjacent.
288 * Before:
290 * +-----+------+
291 * |chunk|handle|
292 * +-----+------+
294 * After:
296 * +------------+
297 * | chunk |
298 * +------------+
301 struct memrar_address_ranges const * const next =
302 list_entry(pos->next,
303 struct memrar_address_ranges,
304 list);
306 chunk->end = handle->end;
309 * Now check if next free chunk is adjacent to
310 * the current extended free chunk.
312 * Before:
314 * +------------+----+
315 * | chunk |next|
316 * +------------+----+
318 * After:
320 * +-----------------+
321 * | chunk |
322 * +-----------------+
324 if (!list_is_singular(pos)
325 && chunk->end == next->range.begin) {
326 chunk->end = next->range.end;
327 list_del(pos->next);
328 kfree(next);
331 list_del(&allocated->list);
333 new_chunk_size = chunk->end - chunk->begin;
335 goto exit_memrar_free;
337 } else if (handle->end == chunk->begin) {
339 * Chunk "greater than" than the one we're
340 * freeing is adjacent.
342 * +------+-----+
343 * |handle|chunk|
344 * +------+-----+
346 * After:
348 * +------------+
349 * | chunk |
350 * +------------+
353 struct memrar_address_ranges const * const prev =
354 list_entry(pos->prev,
355 struct memrar_address_ranges,
356 list);
358 chunk->begin = handle->begin;
361 * Now check if previous free chunk is
362 * adjacent to the current extended free
363 * chunk.
366 * Before:
368 * +----+------------+
369 * |prev| chunk |
370 * +----+------------+
372 * After:
374 * +-----------------+
375 * | chunk |
376 * +-----------------+
378 if (!list_is_singular(pos)
379 && prev->range.end == chunk->begin) {
380 chunk->begin = prev->range.begin;
381 list_del(pos->prev);
382 kfree(prev);
385 list_del(&allocated->list);
387 new_chunk_size = chunk->end - chunk->begin;
389 goto exit_memrar_free;
391 } else if (chunk->end < handle->begin
392 && chunk->end > old_end) {
393 /* Keep track of where the entry could be
394 * potentially moved from the "allocated" list
395 * to the "free" list if coalescing doesn't
396 * occur, making sure the "free" list remains
397 * sorted.
399 old_end = chunk->end;
400 dst = pos;
405 * Nothing to coalesce.
407 * Move the entry from the "allocated" list to the "free"
408 * list.
410 list_move(&allocated->list, dst);
411 new_chunk_size = handle->end - handle->begin;
412 allocated = NULL;
414 exit_memrar_free:
416 if (new_chunk_size > allocator->largest_free_area)
417 allocator->largest_free_area = new_chunk_size;
419 mutex_unlock(&allocator->lock);
421 kfree(allocated);
423 return 0;
429 Local Variables:
430 c-file-style: "linux"
431 End: