2 * This file is part of the coreboot project.
4 * Copyright (C) 2013 Google, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied wacbmem_entryanty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include <bootstate.h>
22 #include <console/console.h>
26 #include <arch/early_variables.h>
27 #if CONFIG_HAVE_ACPI_RESUME && !defined(__PRE_RAM__)
28 #include <arch/acpi.h>
32 #define UINT_MAX 4294967295U
36 * The dynamic cbmem code uses a root region. The root region boundary
37 * addresses are determined by cbmem_top() and ROOT_MIN_SIZE. Just below
38 * the address returned by cbmem_top() is a pointer that points to the
39 * root data structure. The root data structure provides the book keeping
40 * for each large entry.
43 /* The root region is at least DYN_CBMEM_ALIGN_SIZE . */
44 #define ROOT_MIN_SIZE DYN_CBMEM_ALIGN_SIZE
45 #define CBMEM_POINTER_MAGIC 0xc0389479
46 #define CBMEM_ENTRY_MAGIC ~(CBMEM_POINTER_MAGIC)
48 /* The cbmem_root_pointer structure lives just below address returned
49 * from cbmem_top(). It points to the root data structure that
50 * maintains the entries. */
51 struct cbmem_root_pointer
{
54 } __attribute__((packed
));
61 } __attribute__((packed
));
68 struct cbmem_entry entries
[0];
69 } __attribute__((packed
));
72 static inline void *cbmem_top_cached(void)
74 #if !defined(__PRE_RAM__)
75 static void *cached_cbmem_top
;
77 if (cached_cbmem_top
== NULL
)
78 cached_cbmem_top
= cbmem_top();
80 return cached_cbmem_top
;
86 static inline uintptr_t get_top_aligned(void)
90 /* Align down what is returned from cbmem_top(). */
91 top
= (uintptr_t)cbmem_top_cached();
92 top
&= ~(DYN_CBMEM_ALIGN_SIZE
- 1);
97 static inline void *get_root(void)
99 uintptr_t pointer_addr
;
100 struct cbmem_root_pointer
*pointer
;
102 pointer_addr
= get_top_aligned();
103 pointer_addr
-= sizeof(struct cbmem_root_pointer
);
105 pointer
= (void *)pointer_addr
;
106 if (pointer
->magic
!= CBMEM_POINTER_MAGIC
)
109 pointer_addr
= pointer
->root
;
110 return (void *)pointer_addr
;
113 static inline void cbmem_entry_assign(struct cbmem_entry
*entry
,
114 u32 id
, u32 start
, u32 size
)
116 entry
->magic
= CBMEM_ENTRY_MAGIC
;
117 entry
->start
= start
;
122 static inline const struct cbmem_entry
*
123 cbmem_entry_append(struct cbmem_root
*root
, u32 id
, u32 start
, u32 size
)
125 struct cbmem_entry
*cbmem_entry
;
127 cbmem_entry
= &root
->entries
[root
->num_entries
];
130 cbmem_entry_assign(cbmem_entry
, id
, start
, size
);
135 void cbmem_initialize_empty(void)
137 uintptr_t pointer_addr
;
139 unsigned long max_entries
;
140 struct cbmem_root
*root
;
141 struct cbmem_root_pointer
*pointer
;
143 /* Place the root pointer and the root. The number of entries is
144 * dictated by difference between the root address and the pointer
145 * where the root address is aligned down to
146 * DYN_CBMEM_ALIGN_SIZE. The pointer falls just below the
147 * address returned by get_top_aligned(). */
148 pointer_addr
= get_top_aligned();
149 root_addr
= pointer_addr
- ROOT_MIN_SIZE
;
150 root_addr
&= ~(DYN_CBMEM_ALIGN_SIZE
- 1);
151 pointer_addr
-= sizeof(struct cbmem_root_pointer
);
153 max_entries
= (pointer_addr
- (root_addr
+ sizeof(*root
))) /
154 sizeof(struct cbmem_entry
);
156 pointer
= (void *)pointer_addr
;
157 pointer
->magic
= CBMEM_POINTER_MAGIC
;
158 pointer
->root
= root_addr
;
160 root
= (void *)root_addr
;
161 root
->max_entries
= max_entries
;
162 root
->num_entries
= 0;
164 root
->size
= pointer_addr
- root_addr
+
165 sizeof(struct cbmem_root_pointer
);
167 /* Add an entry covering the root region. */
168 cbmem_entry_append(root
, CBMEM_ID_ROOT
, root_addr
, root
->size
);
170 printk(BIOS_DEBUG
, "CBMEM: root @ %p %d entries.\n",
171 root
, root
->max_entries
);
173 /* Complete migration to CBMEM. */
174 cbmem_run_init_hooks();
177 static inline int cbmem_fail_recovery(void)
179 cbmem_initialize_empty();
184 static int validate_entries(struct cbmem_root
*root
)
187 uintptr_t current_end
;
189 current_end
= get_top_aligned();
191 printk(BIOS_DEBUG
, "CBMEM: recovering %d/%d entries from root @ %p\n",
192 root
->num_entries
, root
->max_entries
, root
);
194 /* Check that all regions are properly aligned and are just below
195 * the previous entry */
196 for (i
= 0; i
< root
->num_entries
; i
++) {
197 struct cbmem_entry
*entry
= &root
->entries
[i
];
199 if (entry
->magic
!= CBMEM_ENTRY_MAGIC
)
202 if (entry
->start
& (DYN_CBMEM_ALIGN_SIZE
- 1))
205 if (entry
->start
+ entry
->size
!= current_end
)
208 current_end
= entry
->start
;
214 int cbmem_initialize(void)
216 struct cbmem_root
*root
;
217 uintptr_t top_according_to_root
;
221 /* No recovery possible since root couldn't be recovered. */
223 return cbmem_fail_recovery();
225 /* Sanity check the root. */
226 top_according_to_root
= (root
->size
+ (uintptr_t)root
);
227 if (get_top_aligned() != top_according_to_root
)
228 return cbmem_fail_recovery();
230 if (root
->num_entries
> root
->max_entries
)
231 return cbmem_fail_recovery();
233 if ((root
->max_entries
* sizeof(struct cbmem_entry
)) >
234 (root
->size
- sizeof(struct cbmem_root_pointer
) - sizeof(*root
)))
235 return cbmem_fail_recovery();
237 /* Validate current entries. */
238 if (validate_entries(root
))
239 return cbmem_fail_recovery();
241 #if defined(__PRE_RAM__)
242 /* Lock the root in the romstage on a recovery. The assumption is that
243 * recovery is called during romstage on the S3 resume path. */
247 /* Complete migration to CBMEM. */
248 cbmem_run_init_hooks();
250 /* Recovery successful. */
254 int cbmem_recovery(int is_wakeup
)
258 cbmem_initialize_empty();
260 rv
= cbmem_initialize();
264 static uintptr_t cbmem_base(void)
266 struct cbmem_root
*root
;
274 low_addr
= (uintptr_t)root
;
275 /* a low address is low. */
276 low_addr
&= 0xffffffff;
278 /* Assume the lowest address is the last one added. */
279 if (root
->num_entries
> 0) {
280 low_addr
= root
->entries
[root
->num_entries
- 1].start
;
287 const struct cbmem_entry
*cbmem_entry_add(u32 id
, u64 size64
)
289 struct cbmem_root
*root
;
290 const struct cbmem_entry
*entry
;
295 entry
= cbmem_entry_find(id
);
300 /* Only handle sizes <= UINT_MAX internally. */
301 if (size64
> (u64
)UINT_MAX
)
311 /* Nothing can be added once it is locked down. */
315 if (root
->max_entries
== root
->num_entries
)
318 aligned_size
= ALIGN(size
, DYN_CBMEM_ALIGN_SIZE
);
320 base
-= aligned_size
;
322 return cbmem_entry_append(root
, id
, base
, aligned_size
);
325 void *cbmem_add(u32 id
, u64 size
)
327 const struct cbmem_entry
*entry
;
329 entry
= cbmem_entry_add(id
, size
);
334 return cbmem_entry_start(entry
);
337 /* Retrieve a region provided a given id. */
338 const struct cbmem_entry
*cbmem_entry_find(u32 id
)
340 struct cbmem_root
*root
;
341 const struct cbmem_entry
*entry
;
351 for (i
= 0; i
< root
->num_entries
; i
++) {
352 if (root
->entries
[i
].id
== id
) {
353 entry
= &root
->entries
[i
];
361 void *cbmem_find(u32 id
)
363 const struct cbmem_entry
*entry
;
365 entry
= cbmem_entry_find(id
);
370 return cbmem_entry_start(entry
);
373 /* Remove a reserved region. Returns 0 on success, < 0 on error. Note: A region
374 * cannot be removed unless it was the last one added. */
375 int cbmem_entry_remove(const struct cbmem_entry
*entry
)
377 unsigned long entry_num
;
378 struct cbmem_root
*root
;
385 if (root
->num_entries
== 0)
388 /* Nothing can be removed. */
392 entry_num
= entry
- &root
->entries
[0];
394 /* If the entry is the last one in the root it can be removed. */
395 if (entry_num
== (root
->num_entries
- 1)) {
403 u64
cbmem_entry_size(const struct cbmem_entry
*entry
)
408 void *cbmem_entry_start(const struct cbmem_entry
*entry
)
410 uintptr_t addr
= entry
->start
;
415 #if !defined(__PRE_RAM__)
416 /* selected cbmem can be initialized early in ramstage. Additionally, that
417 * means cbmem console can be reinitialized early as well. The post_device
418 * function is empty since cbmem was initialized early in ramstage. */
419 static void init_cbmem_pre_device(void *unused
)
424 BOOT_STATE_INIT_ENTRIES(cbmem_bscb
) = {
425 BOOT_STATE_INIT_ENTRY(BS_PRE_DEVICE
, BS_ON_ENTRY
,
426 init_cbmem_pre_device
, NULL
),
429 void cbmem_add_bootmem(void)
435 top
= get_top_aligned();
436 bootmem_add_range(base
, top
- base
, LB_MEM_TABLE
);
439 void cbmem_list(void)
442 struct cbmem_root
*root
;
449 for (i
= 0; i
< root
->num_entries
; i
++) {
450 struct cbmem_entry
*entry
;
452 entry
= &root
->entries
[i
];
454 cbmem_print_entry(i
, entry
->id
, entry
->start
, entry
->size
);
457 #endif /* __PRE_RAM__ */