CBMEM: Change some types to uintptr_t
[coreboot.git] / src / lib / dynamic_cbmem.c
blob1b46745800685a15b597aaca20e367a5674b8243
1 /*
2 * This file is part of the coreboot project.
4 * Copyright (C) 2013 Google, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied wacbmem_entryanty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include <bootstate.h>
21 #include <bootmem.h>
22 #include <console/console.h>
23 #include <cbmem.h>
24 #include <string.h>
25 #include <stdlib.h>
26 #include <arch/early_variables.h>
27 #if CONFIG_HAVE_ACPI_RESUME && !defined(__PRE_RAM__)
28 #include <arch/acpi.h>
29 #endif
31 #ifndef UINT_MAX
32 #define UINT_MAX 4294967295U
33 #endif
36 * The dynamic cbmem code uses a root region. The root region boundary
37 * addresses are determined by cbmem_top() and ROOT_MIN_SIZE. Just below
38 * the address returned by cbmem_top() is a pointer that points to the
39 * root data structure. The root data structure provides the book keeping
40 * for each large entry.
43 /* The root region is at least DYN_CBMEM_ALIGN_SIZE . */
44 #define ROOT_MIN_SIZE DYN_CBMEM_ALIGN_SIZE
45 #define CBMEM_POINTER_MAGIC 0xc0389479
46 #define CBMEM_ENTRY_MAGIC ~(CBMEM_POINTER_MAGIC)
48 /* The cbmem_root_pointer structure lives just below address returned
49 * from cbmem_top(). It points to the root data structure that
50 * maintains the entries. */
51 struct cbmem_root_pointer {
52 u32 magic;
53 u32 root;
54 } __attribute__((packed));
56 struct cbmem_entry {
57 u32 magic;
58 u32 start;
59 u32 size;
60 u32 id;
61 } __attribute__((packed));
63 struct cbmem_root {
64 u32 max_entries;
65 u32 num_entries;
66 u32 locked;
67 u32 size;
68 struct cbmem_entry entries[0];
69 } __attribute__((packed));
72 static inline void *cbmem_top_cached(void)
74 #if !defined(__PRE_RAM__)
75 static void *cached_cbmem_top;
77 if (cached_cbmem_top == NULL)
78 cached_cbmem_top = cbmem_top();
80 return cached_cbmem_top;
81 #else
82 return cbmem_top();
83 #endif
86 static inline uintptr_t get_top_aligned(void)
88 uintptr_t top;
90 /* Align down what is returned from cbmem_top(). */
91 top = (uintptr_t)cbmem_top_cached();
92 top &= ~(DYN_CBMEM_ALIGN_SIZE - 1);
94 return top;
97 static inline void *get_root(void)
99 uintptr_t pointer_addr;
100 struct cbmem_root_pointer *pointer;
102 pointer_addr = get_top_aligned();
103 pointer_addr -= sizeof(struct cbmem_root_pointer);
105 pointer = (void *)pointer_addr;
106 if (pointer->magic != CBMEM_POINTER_MAGIC)
107 return NULL;
109 pointer_addr = pointer->root;
110 return (void *)pointer_addr;
113 static inline void cbmem_entry_assign(struct cbmem_entry *entry,
114 u32 id, u32 start, u32 size)
116 entry->magic = CBMEM_ENTRY_MAGIC;
117 entry->start = start;
118 entry->size = size;
119 entry->id = id;
122 static inline const struct cbmem_entry *
123 cbmem_entry_append(struct cbmem_root *root, u32 id, u32 start, u32 size)
125 struct cbmem_entry *cbmem_entry;
127 cbmem_entry = &root->entries[root->num_entries];
128 root->num_entries++;
130 cbmem_entry_assign(cbmem_entry, id, start, size);
132 return cbmem_entry;
135 void cbmem_initialize_empty(void)
137 uintptr_t pointer_addr;
138 uintptr_t root_addr;
139 unsigned long max_entries;
140 struct cbmem_root *root;
141 struct cbmem_root_pointer *pointer;
143 /* Place the root pointer and the root. The number of entries is
144 * dictated by difference between the root address and the pointer
145 * where the root address is aligned down to
146 * DYN_CBMEM_ALIGN_SIZE. The pointer falls just below the
147 * address returned by get_top_aligned(). */
148 pointer_addr = get_top_aligned();
149 root_addr = pointer_addr - ROOT_MIN_SIZE;
150 root_addr &= ~(DYN_CBMEM_ALIGN_SIZE - 1);
151 pointer_addr -= sizeof(struct cbmem_root_pointer);
153 max_entries = (pointer_addr - (root_addr + sizeof(*root))) /
154 sizeof(struct cbmem_entry);
156 pointer = (void *)pointer_addr;
157 pointer->magic = CBMEM_POINTER_MAGIC;
158 pointer->root = root_addr;
160 root = (void *)root_addr;
161 root->max_entries = max_entries;
162 root->num_entries = 0;
163 root->locked = 0;
164 root->size = pointer_addr - root_addr +
165 sizeof(struct cbmem_root_pointer);
167 /* Add an entry covering the root region. */
168 cbmem_entry_append(root, CBMEM_ID_ROOT, root_addr, root->size);
170 printk(BIOS_DEBUG, "CBMEM: root @ %p %d entries.\n",
171 root, root->max_entries);
173 /* Complete migration to CBMEM. */
174 cbmem_run_init_hooks();
177 static inline int cbmem_fail_recovery(void)
179 cbmem_initialize_empty();
180 cbmem_fail_resume();
181 return 1;
184 static int validate_entries(struct cbmem_root *root)
186 unsigned int i;
187 uintptr_t current_end;
189 current_end = get_top_aligned();
191 printk(BIOS_DEBUG, "CBMEM: recovering %d/%d entries from root @ %p\n",
192 root->num_entries, root->max_entries, root);
194 /* Check that all regions are properly aligned and are just below
195 * the previous entry */
196 for (i = 0; i < root->num_entries; i++) {
197 struct cbmem_entry *entry = &root->entries[i];
199 if (entry->magic != CBMEM_ENTRY_MAGIC)
200 return -1;
202 if (entry->start & (DYN_CBMEM_ALIGN_SIZE - 1))
203 return -1;
205 if (entry->start + entry->size != current_end)
206 return -1;
208 current_end = entry->start;
211 return 0;
214 int cbmem_initialize(void)
216 struct cbmem_root *root;
217 uintptr_t top_according_to_root;
219 root = get_root();
221 /* No recovery possible since root couldn't be recovered. */
222 if (root == NULL)
223 return cbmem_fail_recovery();
225 /* Sanity check the root. */
226 top_according_to_root = (root->size + (uintptr_t)root);
227 if (get_top_aligned() != top_according_to_root)
228 return cbmem_fail_recovery();
230 if (root->num_entries > root->max_entries)
231 return cbmem_fail_recovery();
233 if ((root->max_entries * sizeof(struct cbmem_entry)) >
234 (root->size - sizeof(struct cbmem_root_pointer) - sizeof(*root)))
235 return cbmem_fail_recovery();
237 /* Validate current entries. */
238 if (validate_entries(root))
239 return cbmem_fail_recovery();
241 #if defined(__PRE_RAM__)
242 /* Lock the root in the romstage on a recovery. The assumption is that
243 * recovery is called during romstage on the S3 resume path. */
244 root->locked = 1;
245 #endif
247 /* Complete migration to CBMEM. */
248 cbmem_run_init_hooks();
250 /* Recovery successful. */
251 return 0;
254 int cbmem_recovery(int is_wakeup)
256 int rv = 0;
257 if (!is_wakeup)
258 cbmem_initialize_empty();
259 else
260 rv = cbmem_initialize();
261 return rv;
264 static uintptr_t cbmem_base(void)
266 struct cbmem_root *root;
267 uintptr_t low_addr;
269 root = get_root();
271 if (root == NULL)
272 return 0;
274 low_addr = (uintptr_t)root;
275 /* a low address is low. */
276 low_addr &= 0xffffffff;
278 /* Assume the lowest address is the last one added. */
279 if (root->num_entries > 0) {
280 low_addr = root->entries[root->num_entries - 1].start;
283 return low_addr;
287 const struct cbmem_entry *cbmem_entry_add(u32 id, u64 size64)
289 struct cbmem_root *root;
290 const struct cbmem_entry *entry;
291 uintptr_t base;
292 u32 size;
293 u32 aligned_size;
295 entry = cbmem_entry_find(id);
297 if (entry != NULL)
298 return entry;
300 /* Only handle sizes <= UINT_MAX internally. */
301 if (size64 > (u64)UINT_MAX)
302 return NULL;
304 size = size64;
306 root = get_root();
308 if (root == NULL)
309 return NULL;
311 /* Nothing can be added once it is locked down. */
312 if (root->locked)
313 return NULL;
315 if (root->max_entries == root->num_entries)
316 return NULL;
318 aligned_size = ALIGN(size, DYN_CBMEM_ALIGN_SIZE);
319 base = cbmem_base();
320 base -= aligned_size;
322 return cbmem_entry_append(root, id, base, aligned_size);
325 void *cbmem_add(u32 id, u64 size)
327 const struct cbmem_entry *entry;
329 entry = cbmem_entry_add(id, size);
331 if (entry == NULL)
332 return NULL;
334 return cbmem_entry_start(entry);
337 /* Retrieve a region provided a given id. */
338 const struct cbmem_entry *cbmem_entry_find(u32 id)
340 struct cbmem_root *root;
341 const struct cbmem_entry *entry;
342 unsigned int i;
344 root = get_root();
346 if (root == NULL)
347 return NULL;
349 entry = NULL;
351 for (i = 0; i < root->num_entries; i++) {
352 if (root->entries[i].id == id) {
353 entry = &root->entries[i];
354 break;
358 return entry;
361 void *cbmem_find(u32 id)
363 const struct cbmem_entry *entry;
365 entry = cbmem_entry_find(id);
367 if (entry == NULL)
368 return NULL;
370 return cbmem_entry_start(entry);
373 /* Remove a reserved region. Returns 0 on success, < 0 on error. Note: A region
374 * cannot be removed unless it was the last one added. */
375 int cbmem_entry_remove(const struct cbmem_entry *entry)
377 unsigned long entry_num;
378 struct cbmem_root *root;
380 root = get_root();
382 if (root == NULL)
383 return -1;
385 if (root->num_entries == 0)
386 return -1;
388 /* Nothing can be removed. */
389 if (root->locked)
390 return -1;
392 entry_num = entry - &root->entries[0];
394 /* If the entry is the last one in the root it can be removed. */
395 if (entry_num == (root->num_entries - 1)) {
396 root->num_entries--;
397 return 0;
400 return -1;
403 u64 cbmem_entry_size(const struct cbmem_entry *entry)
405 return entry->size;
408 void *cbmem_entry_start(const struct cbmem_entry *entry)
410 uintptr_t addr = entry->start;
411 return (void *)addr;
415 #if !defined(__PRE_RAM__)
416 /* selected cbmem can be initialized early in ramstage. Additionally, that
417 * means cbmem console can be reinitialized early as well. The post_device
418 * function is empty since cbmem was initialized early in ramstage. */
419 static void init_cbmem_pre_device(void *unused)
421 cbmem_initialize();
424 BOOT_STATE_INIT_ENTRIES(cbmem_bscb) = {
425 BOOT_STATE_INIT_ENTRY(BS_PRE_DEVICE, BS_ON_ENTRY,
426 init_cbmem_pre_device, NULL),
429 void cbmem_add_bootmem(void)
431 uintptr_t base;
432 uintptr_t top;
434 base = cbmem_base();
435 top = get_top_aligned();
436 bootmem_add_range(base, top - base, LB_MEM_TABLE);
439 void cbmem_list(void)
441 unsigned int i;
442 struct cbmem_root *root;
444 root = get_root();
446 if (root == NULL)
447 return;
449 for (i = 0; i < root->num_entries; i++) {
450 struct cbmem_entry *entry;
452 entry = &root->entries[i];
454 cbmem_print_entry(i, entry->id, entry->start, entry->size);
457 #endif /* __PRE_RAM__ */