2 * This file is part of the coreboot project.
4 * Copyright (C) 2013 Google, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <console/console.h>
19 static inline void range_entry_link(struct range_entry
**prev_ptr
,
20 struct range_entry
*r
)
26 static inline void range_entry_unlink(struct range_entry
**prev_ptr
,
27 struct range_entry
*r
)
33 static inline void range_entry_unlink_and_free(struct memranges
*ranges
,
34 struct range_entry
**prev_ptr
,
35 struct range_entry
*r
)
37 range_entry_unlink(prev_ptr
, r
);
38 range_entry_link(&ranges
->free_list
, r
);
41 static struct range_entry
*alloc_range(struct memranges
*ranges
)
43 if (ranges
->free_list
!= NULL
) {
44 struct range_entry
*r
;
46 r
= ranges
->free_list
;
47 range_entry_unlink(&ranges
->free_list
, r
);
51 return malloc(sizeof(struct range_entry
));
55 static inline struct range_entry
*
56 range_list_add(struct memranges
*ranges
, struct range_entry
**prev_ptr
,
57 resource_t begin
, resource_t end
, unsigned long tag
)
59 struct range_entry
*new_entry
;
61 new_entry
= alloc_range(ranges
);
62 if (new_entry
== NULL
) {
63 printk(BIOS_ERR
, "Could not allocate range_entry!\n");
66 new_entry
->begin
= begin
;
69 range_entry_link(prev_ptr
, new_entry
);
74 static void merge_neighbor_entries(struct memranges
*ranges
)
76 struct range_entry
*cur
;
77 struct range_entry
*prev
;
80 /* Merge all neighbors and delete/free the leftover entry. */
81 for (cur
= ranges
->entries
; cur
!= NULL
; cur
= cur
->next
) {
82 /* First entry. Just set prev. */
88 /* If the previous entry merges with the current update the
89 * previous entry to cover full range and delete current from
91 if (prev
->end
+ 1 >= cur
->begin
&& prev
->tag
== cur
->tag
) {
93 range_entry_unlink_and_free(ranges
, &prev
->next
, cur
);
94 /* Set cur to prev so cur->next is valid since cur
95 * was just unlinked and free. */
104 static void remove_memranges(struct memranges
*ranges
,
105 resource_t begin
, resource_t end
,
106 unsigned long unused
)
108 struct range_entry
*cur
;
109 struct range_entry
*next
;
110 struct range_entry
**prev_ptr
;
112 prev_ptr
= &ranges
->entries
;
113 for (cur
= ranges
->entries
; cur
!= NULL
; cur
= next
) {
116 /* Cache the next value to handle unlinks. */
119 /* No other ranges are affected. */
120 if (end
< cur
->begin
)
123 /* The removal range starts after this one. */
124 if (begin
> cur
->end
) {
125 prev_ptr
= &cur
->next
;
129 /* The removal range overlaps with the current entry either
130 * partially or fully. However, we need to adjust the removal
131 * range for any holes. */
132 if (begin
<= cur
->begin
) {
136 if (end
>= cur
->end
) {
137 begin
= cur
->end
+ 1;
138 range_entry_unlink_and_free(ranges
, prev_ptr
,
144 /* prev_ptr can be set now that the unlink path wasn't taken. */
145 prev_ptr
= &cur
->next
;
147 /* Clip the end fragment to do proper splitting. */
152 /* Hole punched in middle of entry. */
153 if (begin
> cur
->begin
&& tmp_end
< cur
->end
) {
154 range_list_add(ranges
, &cur
->next
, end
+ 1, cur
->end
,
156 cur
->end
= begin
- 1;
160 /* Removal at beginning. */
161 if (begin
== cur
->begin
)
162 cur
->begin
= tmp_end
+ 1;
164 /* Removal at end. */
165 if (tmp_end
== cur
->end
)
166 cur
->end
= begin
- 1;
170 static void merge_add_memranges(struct memranges
*ranges
,
171 resource_t begin
, resource_t end
,
174 struct range_entry
*cur
;
175 struct range_entry
**prev_ptr
;
177 prev_ptr
= &ranges
->entries
;
179 /* Remove all existing entries covered by the range. */
180 remove_memranges(ranges
, begin
, end
, -1);
182 /* Find the entry to place the new entry after. Since
183 * remove_memranges() was called above there is a guaranteed
184 * spot for this new entry. */
185 for (cur
= ranges
->entries
; cur
!= NULL
; cur
= cur
->next
) {
186 /* Found insertion spot before current entry. */
187 if (end
< cur
->begin
)
190 /* Keep track of previous entry to insert new entry after it. */
191 prev_ptr
= &cur
->next
;
193 /* The new entry starts after this one. */
194 if (begin
> cur
->end
)
199 /* Add new entry and merge with neighbors. */
200 range_list_add(ranges
, prev_ptr
, begin
, end
, tag
);
201 merge_neighbor_entries(ranges
);
204 void memranges_update_tag(struct memranges
*ranges
, unsigned long old_tag
,
205 unsigned long new_tag
)
207 struct range_entry
*r
;
209 memranges_each_entry(r
, ranges
) {
210 if (range_entry_tag(r
) == old_tag
)
211 range_entry_update_tag(r
, new_tag
);
214 merge_neighbor_entries(ranges
);
217 typedef void (*range_action_t
)(struct memranges
*ranges
,
218 resource_t begin
, resource_t end
,
221 static void do_action(struct memranges
*ranges
,
222 resource_t base
, resource_t size
, unsigned long tag
,
223 range_action_t action
)
231 /* The addresses are aligned to 4096 bytes: the begin address is
232 * aligned down while the end address is aligned up to be conservative
233 * about the full range covered. */
234 begin
= ALIGN_DOWN(base
, 4096);
235 end
= begin
+ size
+ (base
- begin
);
236 end
= ALIGN_UP(end
, 4096) - 1;
237 action(ranges
, begin
, end
, tag
);
240 void memranges_create_hole(struct memranges
*ranges
,
241 resource_t base
, resource_t size
)
243 do_action(ranges
, base
, size
, -1, remove_memranges
);
246 void memranges_insert(struct memranges
*ranges
,
247 resource_t base
, resource_t size
, unsigned long tag
)
249 do_action(ranges
, base
, size
, tag
, merge_add_memranges
);
252 struct collect_context
{
253 struct memranges
*ranges
;
255 memrange_filter_t filter
;
258 static void collect_ranges(void *gp
, struct device
*dev
, struct resource
*res
)
260 struct collect_context
*ctx
= gp
;
265 if (ctx
->filter
== NULL
|| ctx
->filter(dev
, res
))
266 memranges_insert(ctx
->ranges
, res
->base
, res
->size
, ctx
->tag
);
269 void memranges_add_resources_filter(struct memranges
*ranges
,
270 unsigned long mask
, unsigned long match
,
272 memrange_filter_t filter
)
274 struct collect_context context
;
276 /* Only deal with MEM resources. */
277 mask
|= IORESOURCE_MEM
;
278 match
|= IORESOURCE_MEM
;
280 context
.ranges
= ranges
;
282 context
.filter
= filter
;
283 search_global_resources(mask
, match
, collect_ranges
, &context
);
286 void memranges_add_resources(struct memranges
*ranges
,
287 unsigned long mask
, unsigned long match
,
290 memranges_add_resources_filter(ranges
, mask
, match
, tag
, NULL
);
293 void memranges_init_empty(struct memranges
*ranges
, struct range_entry
*to_free
,
298 ranges
->entries
= NULL
;
299 ranges
->free_list
= NULL
;
301 for (i
= 0; i
< num_free
; i
++)
302 range_entry_link(&ranges
->free_list
, &to_free
[i
]);
305 void memranges_init(struct memranges
*ranges
,
306 unsigned long mask
, unsigned long match
,
309 memranges_init_empty(ranges
, NULL
, 0);
310 memranges_add_resources(ranges
, mask
, match
, tag
);
313 void memranges_teardown(struct memranges
*ranges
)
315 while (ranges
->entries
!= NULL
) {
316 range_entry_unlink_and_free(ranges
, &ranges
->entries
,
321 void memranges_fill_holes_up_to(struct memranges
*ranges
,
322 resource_t limit
, unsigned long tag
)
324 struct range_entry
*cur
;
325 struct range_entry
*prev
;
328 for (cur
= ranges
->entries
; cur
!= NULL
; cur
= cur
->next
) {
329 /* First entry. Just set prev. */
335 /* If the previous entry does not directly precede the current
336 * entry then add a new entry just after the previous one. */
337 if (range_entry_end(prev
) != cur
->begin
) {
340 end
= cur
->begin
- 1;
343 range_list_add(ranges
, &prev
->next
,
344 range_entry_end(prev
), end
, tag
);
349 /* Hit the requested range limit. No other entries after this
351 if (cur
->begin
>= limit
)
355 /* Handle the case where the limit was never reached. A new entry needs
356 * to be added to cover the range up to the limit. */
357 if (prev
!= NULL
&& range_entry_end(prev
) < limit
)
358 range_list_add(ranges
, &prev
->next
, range_entry_end(prev
),
361 /* Merge all entries that were newly added. */
362 merge_neighbor_entries(ranges
);
365 struct range_entry
*memranges_next_entry(struct memranges
*ranges
,
366 const struct range_entry
*r
)