mb/google/geralt: Correct auxadc channel for SKU ID
[coreboot.git] / tests / lib / memrange-test.c
blob9871484a81e5b1d43caf112c9118add8be4a30a2
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <tests/test.h>
5 #include <device/device.h>
6 #include <device/resource.h>
7 #include <commonlib/helpers.h>
8 #include <memrange.h>
10 #define MEMRANGE_ALIGN (POWER_OF_2(12))
12 enum mem_types {
13 /* Avoid using 0 to verify that UUT really sets this memory,
14 but keep value small, as this will be an index in the table */
15 CACHEABLE_TAG = 0x10,
16 RESERVED_TAG,
17 READONLY_TAG,
18 INSERTED_TAG,
19 HOLE_TAG,
20 UNASSIGNED_TAG,
21 END_OF_RESOURCES
24 /* Indices of entries matters, since it must reflect mem_types enum */
25 struct resource res_mock_1[] = {
26 [UNASSIGNED_TAG] = {.base = 0x0,
27 .size = 0x8000,
28 .next = &res_mock_1[CACHEABLE_TAG],
29 .flags = IORESOURCE_MEM | IORESOURCE_PREFETCH},
30 [CACHEABLE_TAG] = {.base = 0xE000,
31 .size = 0xF2000,
32 .next = &res_mock_1[RESERVED_TAG],
33 .flags = IORESOURCE_CACHEABLE | IORESOURCE_MEM |
34 IORESOURCE_ASSIGNED },
35 [RESERVED_TAG] = {.base = 4ULL * GiB,
36 .size = 4ULL * KiB,
37 .next = &res_mock_1[READONLY_TAG],
38 .flags = IORESOURCE_RESERVE | IORESOURCE_MEM |
39 IORESOURCE_ASSIGNED },
40 [READONLY_TAG] = {.base = 0xFF0000,
41 .size = 0x10000,
42 .next = NULL,
43 .flags = IORESOURCE_READONLY | IORESOURCE_MEM |
44 IORESOURCE_ASSIGNED }
47 /* Boundary 1 byte below 4GiB and 1 byte above 4GiB. */
48 struct resource res_mock_2[] = {
49 [CACHEABLE_TAG] = {.base = 0x1000000,
50 .size = 4ULL * GiB - 0x1000001ULL,
51 .next = &res_mock_2[RESERVED_TAG],
52 .flags = IORESOURCE_CACHEABLE | IORESOURCE_MEM |
53 IORESOURCE_ASSIGNED },
54 [RESERVED_TAG] = {.base = 4ULL * GiB + 1ULL,
55 .size = 4ULL * GiB,
56 .next = &res_mock_2[READONLY_TAG],
57 .flags = IORESOURCE_RESERVE | IORESOURCE_MEM |
58 IORESOURCE_ASSIGNED },
59 [READONLY_TAG] = {.base = 0,
60 .size = 0x10000,
61 .next = NULL,
62 .flags = IORESOURCE_READONLY | IORESOURCE_MEM |
63 IORESOURCE_ASSIGNED }
66 /* Boundary crossing 4GiB. */
67 struct resource res_mock_3[] = {
68 [CACHEABLE_TAG] = {.base = 0xD000,
69 .size = 0xF3000,
70 .next = &res_mock_3[RESERVED_TAG],
71 .flags = IORESOURCE_CACHEABLE | IORESOURCE_MEM |
72 IORESOURCE_ASSIGNED },
73 [RESERVED_TAG] = {.base = 1ULL * GiB,
74 .size = 4ULL * GiB,
75 .next = &res_mock_3[READONLY_TAG],
76 .flags = IORESOURCE_RESERVE | IORESOURCE_MEM |
77 IORESOURCE_ASSIGNED },
78 [READONLY_TAG] = {.base = 0xFF0000,
79 .size = 0x10000,
80 .next = NULL,
81 .flags = IORESOURCE_READONLY | IORESOURCE_MEM |
82 IORESOURCE_ASSIGNED}
86 struct device mock_device = {.enabled = 1};
88 /* Fake memory devices handle */
89 struct device *all_devices = &mock_device;
91 int setup_test_1(void **state)
93 *state = res_mock_1;
94 mock_device.resource_list = &res_mock_1[UNASSIGNED_TAG];
96 return 0;
99 int setup_test_2(void **state)
101 *state = res_mock_2;
102 mock_device.resource_list = &res_mock_2[CACHEABLE_TAG];
104 return 0;
107 int setup_test_3(void **state)
109 *state = res_mock_3;
110 mock_device.resource_list = &res_mock_3[CACHEABLE_TAG];
112 return 0;
115 resource_t get_aligned_base(struct resource *res, struct range_entry *entry)
117 return ALIGN_DOWN(res[range_entry_tag(entry)].base, MEMRANGE_ALIGN);
120 resource_t get_aligned_end(struct resource *res, struct range_entry *entry)
122 resource_t end = res[range_entry_tag(entry)].base + res[range_entry_tag(entry)].size
123 + (res[range_entry_tag(entry)].base - range_entry_base(entry));
124 return ALIGN_UP(end, MEMRANGE_ALIGN);
128 * This test verifies memranges_init(), memranges_add_resources() and memranges_teardown()
129 * functions. It covers basic functionality of memrange library - implementation of creating
130 * memrange structure from resources available on the platform and method for free'ing
131 * allocated memory.
133 * Example memory ranges (res_mock1) for test_memrange_basic.
134 * Ranges marked with asterisks (***) are not added to the test_memrange.
136 * +-------UNASSIGNED_TAG--------+ <-0x0
137 * | |
138 * +-----------------------------+ <-0x8000
142 * +--------CACHEABLE_TAG--------+ <-0xE000
143 * | |
144 * | |
145 * | |
146 * +-----------------------------+ <-0x100000
150 * +-----***READONLY_TAG***------+ <-0xFF0000
151 * | |
152 * | |
153 * | |
154 * +-----------------------------+ <-0x1000000
157 * +--------RESERVED_TAG---------+ <-0x100000000
158 * | |
159 * +-----------------------------+ <-0x100001000
161 static void test_memrange_basic(void **state)
163 int counter = 0;
164 const unsigned long cacheable = IORESOURCE_CACHEABLE;
165 const unsigned long reserved = IORESOURCE_RESERVE;
166 const unsigned long prefetchable = IORESOURCE_PREFETCH;
167 struct range_entry *ptr;
168 struct memranges test_memrange;
169 struct resource *res_mock = *state;
170 resource_t prev_base = 0;
172 memranges_init_empty(&test_memrange, NULL, 0);
173 memranges_add_resources(&test_memrange, prefetchable, prefetchable, UNASSIGNED_TAG);
174 memranges_add_resources(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
175 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
177 /* There should be two entries, since cacheable and reserved regions are not neighbors.
178 Besides these two, a region with an unassigned tag is defined, to emulate an unmapped
179 PCI BAR resource. This resource is not mapped into host physical address and hence
180 should not be picked up by memranges_add_resources().*/
182 memranges_each_entry(ptr, &test_memrange)
184 assert_in_range(range_entry_tag(ptr), CACHEABLE_TAG, RESERVED_TAG);
185 assert_int_equal(range_entry_base(ptr), get_aligned_base(res_mock, ptr));
187 assert_int_equal(range_entry_end(ptr), get_aligned_end(res_mock, ptr));
189 /* Ranges have to be returned in increasing order */
190 assert_true(prev_base <= range_entry_base(ptr));
192 prev_base = range_entry_base(ptr);
193 counter++;
195 assert_int_equal(counter, 2);
196 counter = 0;
198 /* Remove initial memrange */
199 memranges_teardown(&test_memrange);
200 memranges_each_entry(ptr, &test_memrange) counter++;
201 assert_int_equal(counter, 0);
205 * This test verifies memranges_clone(), memranges_insert() and memranges_update_tag()
206 * functions. All operations are performed on cloned memrange. One of the most important thing
207 * to check, is that memrange_insert() should remove all ranges which are covered by the newly
208 * inserted one.
210 * Example memory ranges (res_mock1) for test_memrange_clone_insert.
211 * Ranges marked with asterisks (***) are not added to the clone_memrange.
212 * Ranges marked with (^) have tag value changed during test.
214 * +--------CACHEABLE_TAG--------+ <-0xE000
215 * +------|----INSERTED_TAG----------+ | <-0xF000
216 * | | (^READONLY_TAG^) | |
217 * | | | |
218 * | +-----------------------------+ <-0x100000
219 * +---------------------------------+ <-0x101000
222 * +-----***READONLY_TAG***------+ <-0xFF0000
223 * | |
224 * | |
225 * | |
226 * +-----------------------------+ <-0x1000000
229 * +------+---------RESERVED_TAG-----+--+ <-0x100000000
230 * | | | |
231 * | +-----------------------------+ <-0x100001000
232 * +-----------INSERTED_TAG----------+ <-0x100002000
234 static void test_memrange_clone_insert(void **state)
236 int counter = 0;
237 const unsigned long cacheable = IORESOURCE_CACHEABLE;
238 const unsigned long reserved = IORESOURCE_RESERVE;
239 struct range_entry *ptr;
240 struct memranges test_memrange, clone_memrange;
241 struct resource *res_mock = *state;
242 const resource_t new_range_begin_offset = 1ULL << 12;
244 memranges_init(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
245 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
247 memranges_clone(&clone_memrange, &test_memrange);
248 memranges_teardown(&test_memrange);
250 /* Verify that new one is really a clone */
251 memranges_each_entry(ptr, &clone_memrange)
253 assert_in_range(range_entry_tag(ptr), CACHEABLE_TAG, END_OF_RESOURCES - 1);
254 assert_int_equal(range_entry_base(ptr), get_aligned_base(res_mock, ptr));
256 assert_int_equal(range_entry_end(ptr), get_aligned_end(res_mock, ptr));
258 counter++;
260 assert_int_equal(counter, 2);
261 counter = 0;
263 /* Insert new range, which will overlap with first region. */
264 memranges_insert(&clone_memrange, res_mock[CACHEABLE_TAG].base + new_range_begin_offset,
265 res_mock[CACHEABLE_TAG].size, INSERTED_TAG);
267 /* Three ranges should be there - CACHEABLE(shrunk), INSERTED and RESERVED */
268 memranges_each_entry(ptr, &clone_memrange)
270 resource_t expected_end;
272 if (range_entry_tag(ptr) == CACHEABLE_TAG) {
273 assert_int_equal(range_entry_base(ptr), res_mock[CACHEABLE_TAG].base);
275 expected_end = res_mock[CACHEABLE_TAG].base + new_range_begin_offset;
276 assert_int_equal(range_entry_end(ptr), expected_end);
278 if (range_entry_tag(ptr) == INSERTED_TAG) {
279 assert_int_equal(range_entry_base(ptr),
280 res_mock[CACHEABLE_TAG].base + new_range_begin_offset);
282 expected_end = res_mock[CACHEABLE_TAG].base + new_range_begin_offset
283 + res_mock[CACHEABLE_TAG].size;
284 assert_int_equal(range_entry_end(ptr),
285 ALIGN_UP(expected_end, MEMRANGE_ALIGN));
287 counter++;
289 assert_int_equal(counter, 3);
290 counter = 0;
292 /* Insert new region, which will shadow readonly range.
293 * Additionally verify API for updating tags */
294 memranges_update_tag(&clone_memrange, INSERTED_TAG, READONLY_TAG);
296 memranges_each_entry(ptr, &clone_memrange)
298 resource_t expected_end;
300 assert_int_not_equal(range_entry_tag(ptr), INSERTED_TAG);
301 if (range_entry_tag(ptr) == READONLY_TAG) {
302 assert_int_equal(range_entry_base(ptr),
303 res_mock[CACHEABLE_TAG].base + new_range_begin_offset);
305 expected_end = res_mock[CACHEABLE_TAG].base + new_range_begin_offset
306 + res_mock[CACHEABLE_TAG].size;
307 assert_int_equal(range_entry_end(ptr),
308 ALIGN_UP(expected_end, MEMRANGE_ALIGN));
312 /* Check if alignment (4KiB) is properly applied, that is begin - DOWN and end - UP */
313 memranges_insert(&clone_memrange, res_mock[RESERVED_TAG].base + 0xAD,
314 res_mock[RESERVED_TAG].size, INSERTED_TAG);
316 memranges_each_entry(ptr, &clone_memrange)
318 resource_t expected_end;
320 assert_int_not_equal(range_entry_tag(ptr), RESERVED_TAG);
321 if (range_entry_tag(ptr) == INSERTED_TAG) {
322 assert_int_equal(
323 range_entry_base(ptr),
324 ALIGN_DOWN(res_mock[RESERVED_TAG].base, MEMRANGE_ALIGN));
326 expected_end = ALIGN_DOWN(res_mock[RESERVED_TAG].base, MEMRANGE_ALIGN)
327 + new_range_begin_offset + res_mock[RESERVED_TAG].size;
328 expected_end = ALIGN_UP(expected_end, MEMRANGE_ALIGN);
330 assert_int_equal(range_entry_end(ptr), expected_end);
332 counter++;
334 assert_int_equal(counter, 3);
336 /* Free clone */
337 memranges_teardown(&clone_memrange);
341 * This test verifies memranges_fill_holes_up_to() and memranges_create_hole(). Idea of the test
342 * is to fill all holes, so that we end up with contiguous address space fully covered by
343 * entries. Then, holes are created on the border of two different regions
345 * Example memory ranges (res_mock1) for test_memrange_holes.
346 * Space marked with (/) is not covered by any region at the end of the test.
348 * +--------CACHEABLE_TAG--------+ <-0xE000
349 * | |
350 * | |
351 * //|/////////////////////////////| <-0xFF000
352 * //+-----------HOLE_TAG----------+ <-0x100000
353 * //|/////////////////////////////| <-0x101000
354 * | |
355 * | |
356 * | |
357 * | |
358 * +--------RESERVED_TAG---------+ <-0x100000000
359 * | |
360 * +-----------------------------+ <-0x100001000
362 static void test_memrange_holes(void **state)
364 int counter = 0;
365 const unsigned long cacheable = IORESOURCE_CACHEABLE;
366 const unsigned long reserved = IORESOURCE_RESERVE;
367 struct range_entry *ptr;
368 struct range_entry *hole_ptr = NULL;
369 struct memranges test_memrange;
370 struct resource *res_mock = *state;
371 int holes_found = 0;
372 resource_t last_range_end = 0;
373 const resource_t holes_fill_end = res_mock[RESERVED_TAG].base;
375 memranges_init(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
376 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
378 /* Count holes in ranges */
379 memranges_each_entry(ptr, &test_memrange)
381 if (!last_range_end) {
382 last_range_end = range_entry_end(ptr);
383 continue;
387 if (range_entry_base(ptr) != last_range_end) {
388 holes_found++;
389 last_range_end = range_entry_end(ptr);
392 if (range_entry_base(ptr) >= holes_fill_end)
393 break;
396 /* Create range entries which covers continuous memory range
397 (but with different tags) */
398 memranges_fill_holes_up_to(&test_memrange, holes_fill_end, HOLE_TAG);
400 memranges_each_entry(ptr, &test_memrange)
402 if (range_entry_tag(ptr) == HOLE_TAG) {
403 assert_int_equal(range_entry_base(ptr),
404 ALIGN_UP(res_mock[CACHEABLE_TAG].base
405 + res_mock[CACHEABLE_TAG].size,
406 MEMRANGE_ALIGN));
407 assert_int_equal(range_entry_end(ptr), holes_fill_end);
408 /* Store pointer to HOLE_TAG region for future use */
409 hole_ptr = ptr;
411 counter++;
413 assert_int_equal(counter, 2 + holes_found);
415 /* If test data does not have any holes in it then terminate this test */
416 if (holes_found == 0)
417 return;
419 assert_non_null(hole_ptr);
420 counter = 0;
422 /* Create hole crossing the border of two range entries */
423 const resource_t new_cacheable_end = ALIGN_DOWN(
424 res_mock[CACHEABLE_TAG].base + res_mock[CACHEABLE_TAG].size - 4 * KiB,
425 MEMRANGE_ALIGN);
426 const resource_t new_hole_begin =
427 ALIGN_UP(range_entry_base(hole_ptr) + 4 * KiB, MEMRANGE_ALIGN);
428 const resource_t ranges_diff = new_hole_begin - new_cacheable_end;
430 memranges_create_hole(&test_memrange, new_cacheable_end, ranges_diff);
432 memranges_each_entry(ptr, &test_memrange)
434 switch (range_entry_tag(ptr)) {
435 case CACHEABLE_TAG:
436 assert_int_equal(range_entry_base(ptr), res_mock[CACHEABLE_TAG].base);
437 assert_int_equal(range_entry_end(ptr), new_cacheable_end);
438 break;
439 case RESERVED_TAG:
440 assert_int_equal(range_entry_base(ptr), res_mock[RESERVED_TAG].base);
441 assert_int_equal(range_entry_end(ptr),
442 res_mock[RESERVED_TAG].base
443 + res_mock[RESERVED_TAG].size);
444 break;
445 case HOLE_TAG:
446 assert_int_equal(range_entry_base(ptr), new_hole_begin);
447 assert_int_equal(range_entry_end(ptr), res_mock[RESERVED_TAG].base);
448 break;
449 default:
450 break;
452 counter++;
454 assert_int_equal(counter, 3);
456 memranges_teardown(&test_memrange);
460 * This test verifies memranges_steal() function. Simple check is done by attempt
461 * to steal some memory from the top of region with CACHEABLE_TAG and some from
462 * the bottom of region with READONLY_TAG.
464 * Example memory ranges (res_mock1) for test_memrange_steal.
465 * Space marked with (/) is stolen during the test.
467 * +--------CACHEABLE_TAG--------+ <-0xE000
468 * | |
469 * | |
470 * |/////////////////////////////| <-stolen_base
471 * +-----------------------------+ <-0x100000 <-stolen_base + 0x4000
475 * +--------READONLY_TAG---------+ <-0xFF0000 <-stolen_base
476 * |/////////////////////////////| <-stolen_base + 0x4000
477 * | |
478 * | |
479 * +-----------------------------+ <-0x1000000
482 * +--------RESERVED_TAG---------+ <-0x100000000
483 * | |
484 * +-----------------------------+ <-0x100001000
486 static void test_memrange_steal(void **state)
488 bool status = false;
489 resource_t stolen;
490 const unsigned long cacheable = IORESOURCE_CACHEABLE;
491 const unsigned long reserved = IORESOURCE_RESERVE;
492 const unsigned long readonly = IORESOURCE_READONLY;
493 const resource_t stolen_range_size = 0x4000;
494 struct memranges test_memrange;
495 struct resource *res_mock = *state;
496 struct range_entry *ptr;
497 size_t count = 0;
499 memranges_init(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
500 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
501 memranges_add_resources(&test_memrange, readonly, readonly, READONLY_TAG);
503 status = memranges_steal(&test_memrange,
504 res_mock[RESERVED_TAG].base + res_mock[RESERVED_TAG].size,
505 stolen_range_size, 12, CACHEABLE_TAG, &stolen, true);
506 assert_true(status);
507 assert_in_range(stolen, res_mock[CACHEABLE_TAG].base,
508 res_mock[CACHEABLE_TAG].base + res_mock[CACHEABLE_TAG].size);
509 status = memranges_steal(&test_memrange,
510 res_mock[RESERVED_TAG].base + res_mock[RESERVED_TAG].size,
511 stolen_range_size, 12, READONLY_TAG, &stolen, false);
512 assert_true(status);
513 assert_in_range(stolen, res_mock[READONLY_TAG].base,
514 res_mock[READONLY_TAG].base + res_mock[READONLY_TAG].size);
516 memranges_each_entry(ptr, &test_memrange)
518 if (range_entry_tag(ptr) == CACHEABLE_TAG) {
519 assert_int_equal(range_entry_end(ptr),
520 ALIGN_DOWN(ALIGN_UP(res_mock[CACHEABLE_TAG].base
521 + res_mock[CACHEABLE_TAG].size,
522 MEMRANGE_ALIGN)
523 - stolen_range_size,
524 MEMRANGE_ALIGN));
526 if (range_entry_tag(ptr) == READONLY_TAG) {
527 assert_int_equal(range_entry_base(ptr),
528 ALIGN_DOWN(res_mock[READONLY_TAG].base, MEMRANGE_ALIGN)
529 + stolen_range_size);
531 count++;
533 assert_int_equal(count, 3);
534 count = 0;
536 /* Check if inserting ranges in previously stolen areas will merge them. */
537 memranges_insert(&test_memrange,
538 res_mock[CACHEABLE_TAG].base + res_mock[CACHEABLE_TAG].size
539 - stolen_range_size - 0x12,
540 stolen_range_size, CACHEABLE_TAG);
541 memranges_insert(&test_memrange, res_mock[READONLY_TAG].base + 0xCC, stolen_range_size,
542 READONLY_TAG);
543 memranges_each_entry(ptr, &test_memrange)
545 const unsigned long tag = range_entry_tag(ptr);
546 assert_true(tag == CACHEABLE_TAG || tag == READONLY_TAG || tag == RESERVED_TAG);
547 assert_int_equal(
548 range_entry_base(ptr),
549 ALIGN_DOWN(res_mock[tag].base, MEMRANGE_ALIGN));
550 assert_int_equal(
551 range_entry_end(ptr),
552 ALIGN_UP(res_mock[tag].base + res_mock[tag].size, MEMRANGE_ALIGN));
553 count++;
555 assert_int_equal(count, 3);
556 count = 0;
558 memranges_teardown(&test_memrange);
561 /* Utility function checking number of entries and alignment of their base and end pointers */
562 static void check_range_entries_count_and_alignment(struct memranges *ranges,
563 size_t ranges_count, resource_t alignment)
565 size_t count = 0;
566 struct range_entry *ptr;
568 memranges_each_entry(ptr, ranges)
570 assert_true(IS_ALIGNED(range_entry_base(ptr), alignment));
571 assert_true(IS_ALIGNED(range_entry_end(ptr), alignment));
573 count++;
575 assert_int_equal(ranges_count, count);
578 /* This test verifies memranges_init*() and memranges_teardown() functions.
579 Added ranges are checked correct count and alignment. */
580 static void test_memrange_init_and_teardown(void **state)
582 const unsigned long cacheable = IORESOURCE_CACHEABLE;
583 const unsigned long reserved = IORESOURCE_RESERVE;
584 const unsigned long readonly = IORESOURCE_READONLY;
585 struct memranges test_memrange;
586 struct range_entry range_entries[4] = {0};
588 /* Test memranges_init() correctness */
589 memranges_init(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
590 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
591 memranges_add_resources(&test_memrange, readonly, readonly, READONLY_TAG);
593 /* Expect all entries to be aligned to 4KiB (2^12) */
594 check_range_entries_count_and_alignment(&test_memrange, 3, MEMRANGE_ALIGN);
596 /* Expect ranges list to be empty after teardown */
597 memranges_teardown(&test_memrange);
598 assert_true(memranges_is_empty(&test_memrange));
601 /* Test memranges_init_with_alignment() correctness with alignment of 1KiB (2^10) */
602 memranges_init_with_alignment(&test_memrange, cacheable, cacheable, CACHEABLE_TAG, 10);
603 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
604 memranges_add_resources(&test_memrange, readonly, readonly, READONLY_TAG);
606 check_range_entries_count_and_alignment(&test_memrange, 3, POWER_OF_2(10));
608 memranges_teardown(&test_memrange);
609 assert_true(memranges_is_empty(&test_memrange));
612 /* Test memranges_init_empty() correctness */
613 memranges_init_empty(&test_memrange, &range_entries[0], ARRAY_SIZE(range_entries));
614 assert_true(memranges_is_empty(&test_memrange));
616 memranges_add_resources(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
617 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
618 memranges_add_resources(&test_memrange, readonly, readonly, READONLY_TAG);
620 check_range_entries_count_and_alignment(&test_memrange, 3, MEMRANGE_ALIGN);
622 memranges_teardown(&test_memrange);
623 assert_true(memranges_is_empty(&test_memrange));
626 /* Test memranges_init_with_alignment() correctness with alignment of 8KiB (2^13) */
627 memranges_init_empty_with_alignment(&test_memrange, &range_entries[0],
628 ARRAY_SIZE(range_entries), 13);
629 assert_true(memranges_is_empty(&test_memrange));
631 memranges_add_resources(&test_memrange, cacheable, cacheable, CACHEABLE_TAG);
632 memranges_add_resources(&test_memrange, reserved, reserved, RESERVED_TAG);
633 memranges_add_resources(&test_memrange, readonly, readonly, READONLY_TAG);
635 check_range_entries_count_and_alignment(&test_memrange, 3, POWER_OF_2(13));
637 memranges_teardown(&test_memrange);
638 assert_true(memranges_is_empty(&test_memrange));
641 /* Filter function accepting ranges having memory resource flag */
642 static int memrange_filter_mem_only(struct device *dev, struct resource *res)
644 /* Accept only memory resources */
645 return res->flags & IORESOURCE_MEM;
648 /* Filter function rejecting ranges having memory resource flag */
649 static int memrange_filter_non_mem(struct device *dev, struct resource *res)
651 /* Accept only memory resources */
652 return !(res->flags & IORESOURCE_MEM);
655 /* This test verifies memranges_add_resources_filter() function by providing filter functions
656 which accept or reject ranges. */
657 static void test_memrange_add_resources_filter(void **state)
659 const unsigned long cacheable = IORESOURCE_CACHEABLE;
660 const unsigned long reserved = IORESOURCE_RESERVE;
661 struct memranges test_memrange;
662 struct range_entry *ptr;
663 size_t count = 0;
664 size_t accepted_tags[] = {CACHEABLE_TAG, RESERVED_TAG};
666 /* Check if filter accepts range correctly */
667 memranges_init(&test_memrange, reserved, reserved, RESERVED_TAG);
668 memranges_add_resources_filter(&test_memrange, cacheable, cacheable, CACHEABLE_TAG,
669 memrange_filter_mem_only);
671 /* Check if filter accepted desired range. */
672 memranges_each_entry(ptr, &test_memrange)
674 assert_in_set(range_entry_tag(ptr), accepted_tags, ARRAY_SIZE(accepted_tags));
675 assert_true(IS_ALIGNED(range_entry_base(ptr), MEMRANGE_ALIGN));
676 assert_true(IS_ALIGNED(range_entry_end(ptr), MEMRANGE_ALIGN));
677 count++;
679 assert_int_equal(2, count);
680 count = 0;
681 memranges_teardown(&test_memrange);
683 /* Check if filter rejects range correctly */
684 memranges_init(&test_memrange, reserved, reserved, RESERVED_TAG);
685 memranges_add_resources_filter(&test_memrange, cacheable, cacheable, CACHEABLE_TAG,
686 memrange_filter_non_mem);
688 check_range_entries_count_and_alignment(&test_memrange, 1, MEMRANGE_ALIGN);
690 memranges_teardown(&test_memrange);
693 int main(void)
695 const struct CMUnitTest tests[] = {
696 cmocka_unit_test(test_memrange_basic),
697 cmocka_unit_test(test_memrange_clone_insert),
698 cmocka_unit_test(test_memrange_holes),
699 cmocka_unit_test(test_memrange_steal),
700 cmocka_unit_test(test_memrange_init_and_teardown),
701 cmocka_unit_test(test_memrange_add_resources_filter),
704 return cmocka_run_group_tests_name(__TEST_NAME__ "(Boundary on 4GiB)", tests,
705 setup_test_1, NULL)
706 + cmocka_run_group_tests_name(__TEST_NAME__ "(Boundaries 1 byte from 4GiB)",
707 tests, setup_test_2, NULL)
708 + cmocka_run_group_tests_name(__TEST_NAME__ "(Range over 4GiB boundary)", tests,
709 setup_test_3, NULL);