2 * This file is part of the coreboot project.
4 * Copyright (C) 2013 Google, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include <console/console.h>
23 /* Coreboot doesn't have a free() function. Therefore, keep a cache of
25 static struct range_entry
*free_list
;
27 static inline void range_entry_link(struct range_entry
**prev_ptr
,
28 struct range_entry
*r
)
34 static inline void range_entry_unlink(struct range_entry
**prev_ptr
,
35 struct range_entry
*r
)
41 static inline void range_entry_unlink_and_free(struct range_entry
**prev_ptr
,
42 struct range_entry
*r
)
44 range_entry_unlink(prev_ptr
, r
);
45 range_entry_link(&free_list
, r
);
48 static struct range_entry
*alloc_range(void)
50 if (free_list
!= NULL
) {
51 struct range_entry
*r
;
54 range_entry_unlink(&free_list
, r
);
57 return malloc(sizeof(struct range_entry
));
60 static inline struct range_entry
*
61 range_list_add(struct range_entry
**prev_ptr
, resource_t begin
, resource_t end
,
64 struct range_entry
*new_entry
;
66 new_entry
= alloc_range();
67 if (new_entry
== NULL
) {
68 printk(BIOS_ERR
, "Could not allocate range_entry!\n");
71 new_entry
->begin
= begin
;
74 range_entry_link(prev_ptr
, new_entry
);
79 static void merge_neighbor_entries(struct memranges
*ranges
)
81 struct range_entry
*cur
;
82 struct range_entry
*prev
;
85 /* Merge all neighbors and delete/free the leftover entry. */
86 for (cur
= ranges
->entries
; cur
!= NULL
; cur
= cur
->next
) {
87 /* First entry. Just set prev. */
93 /* If the previous entry merges with the current update the
94 * previous entry to cover full range and delete current from
96 if (prev
->end
+ 1 >= cur
->begin
&& prev
->tag
== cur
->tag
) {
98 range_entry_unlink_and_free(&prev
->next
, cur
);
99 /* Set cur to prev so cur->next is valid since cur
100 * was just unlinked and free. */
109 static void remove_memranges(struct memranges
*ranges
,
110 resource_t begin
, resource_t end
,
111 unsigned long unused
)
113 struct range_entry
*cur
;
114 struct range_entry
*next
;
115 struct range_entry
**prev_ptr
;
117 prev_ptr
= &ranges
->entries
;
118 for (cur
= ranges
->entries
; cur
!= NULL
; cur
= next
) {
121 /* Cache the next value to handle unlinks. */
124 /* No other ranges are affected. */
125 if (end
< cur
->begin
)
128 /* The removal range starts after this one. */
129 if (begin
> cur
->end
) {
130 prev_ptr
= &cur
->next
;
134 /* The removal range overlaps with the current entry either
135 * partially or fully. However, we need to adjust the removal
136 * range for any holes. */
137 if (begin
<= cur
->begin
) {
141 if (end
>= cur
->end
) {
142 begin
= cur
->end
+ 1;
143 range_entry_unlink_and_free(prev_ptr
, cur
);
148 /* prev_ptr can be set now that the unlink path wasn't taken. */
149 prev_ptr
= &cur
->next
;
151 /* Clip the end fragment to do proper splitting. */
156 /* Hole punched in middle of entry. */
157 if (begin
> cur
->begin
&& tmp_end
< cur
->end
) {
158 range_list_add(&cur
->next
, end
+ 1, cur
->end
, cur
->tag
);
159 cur
->end
= begin
- 1;
163 /* Removal at beginning. */
164 if (begin
== cur
->begin
)
165 cur
->begin
= tmp_end
+ 1;
167 /* Removal at end. */
168 if (tmp_end
== cur
->end
)
169 cur
->end
= begin
- 1;
173 static void merge_add_memranges(struct memranges
*ranges
,
174 resource_t begin
, resource_t end
,
177 struct range_entry
*cur
;
178 struct range_entry
**prev_ptr
;
180 prev_ptr
= &ranges
->entries
;
182 /* Remove all existing entries covered by the range. */
183 remove_memranges(ranges
, begin
, end
, -1);
185 /* Find the entry to place the new entry after. Since
186 * remove_memranges() was called above there is a guaranteed
187 * spot for this new entry. */
188 for (cur
= ranges
->entries
; cur
!= NULL
; cur
= cur
->next
) {
189 /* Found insertion spot before current entry. */
190 if (end
< cur
->begin
)
193 /* Keep track of previous entry to insert new entry after it. */
194 prev_ptr
= &cur
->next
;
196 /* The new entry starts after this one. */
197 if (begin
> cur
->end
)
202 /* Add new entry and merge with neighbors. */
203 range_list_add(prev_ptr
, begin
, end
, tag
);
204 merge_neighbor_entries(ranges
);
207 void memranges_update_tag(struct memranges
*ranges
, unsigned long old_tag
,
208 unsigned long new_tag
)
210 struct range_entry
*r
;
212 memranges_each_entry(r
, ranges
) {
213 if (range_entry_tag(r
) == old_tag
)
214 range_entry_update_tag(r
, new_tag
);
217 merge_neighbor_entries(ranges
);
220 typedef void (*range_action_t
)(struct memranges
*ranges
,
221 resource_t begin
, resource_t end
,
224 static void do_action(struct memranges
*ranges
,
225 resource_t base
, resource_t size
, unsigned long tag
,
226 range_action_t action
)
231 /* The addresses are aligned to 4096 bytes: the begin address is
232 * aligned down while the end address is aligned up to be conservative
233 * about the full range covered. */
234 begin
= ALIGN_DOWN(base
, 4096);
235 end
= begin
+ size
+ (base
- begin
);
236 end
= ALIGN_UP(end
, 4096) - 1;
237 action(ranges
, begin
, end
, tag
);
240 void memranges_create_hole(struct memranges
*ranges
,
241 resource_t base
, resource_t size
)
243 do_action(ranges
, base
, size
, -1, remove_memranges
);
246 void memranges_insert(struct memranges
*ranges
,
247 resource_t base
, resource_t size
, unsigned long tag
)
249 do_action(ranges
, base
, size
, tag
, merge_add_memranges
);
252 struct collect_context
{
253 struct memranges
*ranges
;
257 static void collect_ranges(void *gp
, struct device
*dev
, struct resource
*res
)
259 struct collect_context
*ctx
= gp
;
264 memranges_insert(ctx
->ranges
, res
->base
, res
->size
, ctx
->tag
);
267 void memranges_add_resources(struct memranges
*ranges
,
268 unsigned long mask
, unsigned long match
,
271 struct collect_context context
;
273 /* Only deal with MEM resources. */
274 mask
|= IORESOURCE_MEM
;
275 match
|= IORESOURCE_MEM
;
277 context
.ranges
= ranges
;
279 search_global_resources(mask
, match
, collect_ranges
, &context
);
282 void memranges_init(struct memranges
*ranges
,
283 unsigned long mask
, unsigned long match
,
286 ranges
->entries
= NULL
;
287 memranges_add_resources(ranges
, mask
, match
, tag
);
290 void memranges_teardown(struct memranges
*ranges
)
292 while (ranges
->entries
!= NULL
) {
293 range_entry_unlink_and_free(&ranges
->entries
, ranges
->entries
);
297 void memranges_fill_holes_up_to(struct memranges
*ranges
,
298 resource_t limit
, unsigned long tag
)
300 struct range_entry
*cur
;
301 struct range_entry
*prev
;
304 for (cur
= ranges
->entries
; cur
!= NULL
; cur
= cur
->next
) {
305 /* First entry. Just set prev. */
311 /* If the previous entry does not directly precede the current
312 * entry then add a new entry just after the previous one. */
313 if (range_entry_end(prev
) != cur
->begin
) {
316 end
= cur
->begin
- 1;
319 range_list_add(&prev
->next
, range_entry_end(prev
),
325 /* Hit the requested range limit. No other entries after this
327 if (cur
->begin
>= limit
)
331 /* Handle the case where the limit was never reached. A new entry needs
332 * to be added to cover the range up to the limit. */
333 if (prev
!= NULL
&& range_entry_end(prev
) < limit
)
334 range_list_add(&prev
->next
, range_entry_end(prev
),
337 /* Merge all entries that were newly added. */
338 merge_neighbor_entries(ranges
);
341 struct range_entry
*memranges_next_entry(struct memranges
*ranges
,
342 const struct range_entry
*r
)