6 * Copyright (C) 1999-2004 Samuel Rydh (samuel@ibrium.se)
7 * Copyright (C) 2004 Stefan Reinauer
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation
16 #include "libopenbios/bindings.h"
17 #include "libopenbios/ofmem.h"
20 * define OFMEM_FILL_RANGE to claim any unclaimed virtual and
21 * physical memory in the range for ofmem_map
23 * TODO: remove this macro and wrapped code if not needed by implementations
25 //#define OFMEM_FILL_RANGE
28 static inline size_t ALIGN_SIZE(size_t x
, size_t a
)
30 return (x
+ a
- 1) & ~(a
-1);
33 static ucell
get_ram_size( void )
35 ofmem_t
*ofmem
= ofmem_arch_get_private();
36 return ofmem
->ramsize
;
39 /************************************************************************/
41 /************************************************************************/
45 print_range( range_t
*r
, char *str
)
47 printk("--- Range %s ---\n", str
);
49 printk("%08lx - %08lx\n", r
->start
, r
->start
+ r
->size
-1 );
56 print_range( ofmem
.phys_range
, "phys" );
62 print_range( ofmem
.virt_range
, "virt" );
68 translation_t
*t
= ofmem
.trans
;
70 printk("--- Translations ---\n");
72 printk("%08lx -> %08lx [size %lx]\n", t
->virt
, t
->phys
, t
->size
);
77 /************************************************************************/
78 /* OF private allocations */
79 /************************************************************************/
81 void* ofmem_malloc( size_t size
)
83 ofmem_t
*ofmem
= ofmem_arch_get_private();
84 alloc_desc_t
*d
, **pp
;
91 if( !ofmem
->next_malloc
)
92 ofmem
->next_malloc
= (char*)ofmem_arch_get_malloc_base();
94 size
= ALIGN_SIZE(size
+ sizeof(alloc_desc_t
), CONFIG_OFMEM_MALLOC_ALIGN
);
96 /* look in the freelist */
97 for( pp
=&ofmem
->mfree
; *pp
&& (**pp
).size
< size
; pp
= &(**pp
).next
) {
100 /* waste at most 4K by taking an entry from the freelist */
101 if( *pp
&& (**pp
).size
< size
+ 0x1000 ) {
102 ret
= (char*)*pp
+ sizeof(alloc_desc_t
);
103 memset( ret
, 0, (**pp
).size
- sizeof(alloc_desc_t
) );
108 top
= ofmem_arch_get_heap_top();
110 if( (ucell
)ofmem
->next_malloc
+ size
> top
) {
111 printk("out of malloc memory (%x)!\n", size
);
115 d
= (alloc_desc_t
*) ofmem
->next_malloc
;
116 ofmem
->next_malloc
+= size
;
121 ret
= (char*)d
+ sizeof(alloc_desc_t
);
122 memset( ret
, 0, size
- sizeof(alloc_desc_t
) );
127 void ofmem_free( void *ptr
)
129 ofmem_t
*ofmem
= ofmem_arch_get_private();
130 alloc_desc_t
**pp
, *d
;
132 /* it is legal to free NULL pointers (size zero allocations) */
136 d
= (alloc_desc_t
*)((char *)ptr
- sizeof(alloc_desc_t
));
137 d
->next
= ofmem
->mfree
;
139 /* insert in the (sorted) freelist */
140 for( pp
=&ofmem
->mfree
; *pp
&& (**pp
).size
< d
->size
; pp
= &(**pp
).next
) {
147 void* ofmem_realloc( void *ptr
, size_t size
)
149 alloc_desc_t
*d
= (alloc_desc_t
*)((char *)ptr
- sizeof(alloc_desc_t
));
153 return malloc( size
);
159 memcpy( p
, ptr
, MIN(d
->size
- sizeof(alloc_desc_t
),size
) );
165 /************************************************************************/
166 /* "translations" and "available" property tracking */
167 /************************************************************************/
169 static phandle_t s_phandle_memory
= 0;
170 static phandle_t s_phandle_mmu
= 0;
172 static void ofmem_update_mmu_translations( void )
174 ofmem_t
*ofmem
= ofmem_arch_get_private();
179 if (s_phandle_mmu
== 0)
182 for( t
= ofmem
->trans
, ncells
= 0; t
; t
=t
->next
, ncells
++ ) {
185 props
= malloc(ncells
* sizeof(ucell
) * ofmem_arch_get_translation_entry_size());
190 /* Call architecture-specific routines to generate translation entries */
191 for( t
= ofmem
->trans
, ncells
= 0 ; t
; t
=t
->next
) {
192 ofmem_arch_create_translation_entry(&props
[ncells
], t
);
193 ncells
+= ofmem_arch_get_translation_entry_size();
196 set_property(s_phandle_mmu
, "translations",
197 (char*)props
, ncells
* sizeof(props
[0]));
202 static void ofmem_update_memory_available( phandle_t ph
, range_t
*range
,
211 if (s_phandle_memory
== 0)
214 /* count phys_range list entries */
215 for( r
= range
, ncells
= 0; r
; r
=r
->next
, ncells
++ ) {
218 /* inverse of phys_range list could take 2 more cells for the tail */
219 props
= malloc((ncells
+1) * sizeof(ucell
) * 2);
229 for (r
= range
; r
; r
=r
->next
) {
230 if (r
->start
>= top_address
) {
234 size
= r
->start
- start
;
236 props
[ncells
++] = start
;
237 props
[ncells
++] = size
;
239 start
= r
->start
+ r
->size
;
243 if (start
< top_address
) {
244 props
[ncells
++] = start
;
245 props
[ncells
++] = top_address
- start
;
248 set_property(ph
, "available",
249 (char*)props
, ncells
* sizeof(props
[0]));
254 static void ofmem_update_translations( void )
256 ofmem_t
*ofmem
= ofmem_arch_get_private();
258 ofmem_update_memory_available(s_phandle_memory
,
259 ofmem
->phys_range
, get_ram_size());
260 ofmem_update_memory_available(s_phandle_mmu
,
261 ofmem
->virt_range
, -1ULL);
262 ofmem_update_mmu_translations();
266 /************************************************************************/
267 /* client interface */
268 /************************************************************************/
270 static int is_free( ucell ea
, ucell size
, range_t
*r
)
274 for( ; r
; r
=r
->next
) {
275 if( r
->start
+ r
->size
- 1 >= ea
&& r
->start
<= ea
)
277 if( r
->start
>= ea
&& r
->start
<= ea
+ size
- 1 )
283 static void add_entry_( ucell ea
, ucell size
, range_t
**r
)
287 for( ; *r
&& (**r
).start
< ea
; r
=&(**r
).next
) {
290 nr
= (range_t
*)malloc( sizeof(range_t
) );
297 static int add_entry( ucell ea
, ucell size
, range_t
**r
)
299 if( !is_free( ea
, size
, *r
) ) {
300 OFMEM_TRACE("add_entry: range not free!\n");
303 add_entry_( ea
, size
, r
);
307 #if defined(OFMEM_FILL_RANGE)
308 static void join_ranges( range_t
**rr
)
310 range_t
*n
, *r
= *rr
;
315 if( r
->start
+ r
->size
- 1 >= n
->start
-1 ) {
316 int s
= n
->size
+ (n
->start
- r
->start
- r
->size
);
327 static void fill_range( ucell ea
, ucell size
, range_t
**rr
)
329 add_entry_( ea
, size
, rr
);
334 static ucell
find_area( ucell align
, ucell size
, range_t
*r
,
335 ucell min
, ucell max
, int reverse
)
340 if( (align
& (align
-1)) ) {
341 OFMEM_TRACE("bad alignment " FMT_ucell
"\n", align
);
347 base
= reverse
? max
- size
: min
;
348 r2
= reverse
? NULL
: r
;
352 base
= (base
+ align
- 1) & ~(align
-1);
355 if( base
+ size
- 1 >= max
-1 )
358 if( base
> max
- size
)
360 base
-= base
& (align
-1);
362 if( is_free( base
, size
, r
) )
368 base
= r2
->start
+ r2
->size
;
373 for( rp
=r
; rp
&& rp
->next
!= r2
; rp
=rp
->next
) {
379 base
= r2
->start
- size
;
385 static ucell
ofmem_claim_phys_( ucell phys
, ucell size
, ucell align
,
386 ucell min
, ucell max
, int reverse
)
388 ofmem_t
*ofmem
= ofmem_arch_get_private();
390 if( !is_free( phys
, size
, ofmem
->phys_range
) ) {
391 OFMEM_TRACE("Non-free physical memory claimed!\n");
394 add_entry( phys
, size
, &ofmem
->phys_range
);
397 phys
= find_area( align
, size
, ofmem
->phys_range
, min
, max
, reverse
);
399 printk("ofmem_claim_phys - out of space (failed request for " FMT_ucellx
" bytes)\n", size
);
402 add_entry( phys
, size
, &ofmem
->phys_range
);
404 ofmem_update_translations();
409 /* if align != 0, phys is ignored. Returns -1 on error */
410 ucell
ofmem_claim_phys( ucell phys
, ucell size
, ucell align
)
412 OFMEM_TRACE("ofmem_claim phys=" FMT_ucellx
" size=" FMT_ucellx
413 " align=" FMT_ucellx
"\n",
416 return ofmem_claim_phys_( phys
, size
, align
, 0, get_ram_size(), 0 );
419 static ucell
ofmem_claim_virt_( ucell virt
, ucell size
, ucell align
,
420 ucell min
, ucell max
, int reverse
)
422 ofmem_t
*ofmem
= ofmem_arch_get_private();
424 if( !is_free( virt
, size
, ofmem
->virt_range
) ) {
425 OFMEM_TRACE("Non-free virtual memory claimed!\n");
428 add_entry( virt
, size
, &ofmem
->virt_range
);
432 virt
= find_area( align
, size
, ofmem
->virt_range
, min
, max
, reverse
);
434 printk("ofmem_claim_virt - out of space (failed request for " FMT_ucellx
" bytes)\n", size
);
437 add_entry( virt
, size
, &ofmem
->virt_range
);
441 ucell
ofmem_claim_virt( ucell virt
, ucell size
, ucell align
)
443 OFMEM_TRACE("ofmem_claim_virt virt=" FMT_ucellx
" size=" FMT_ucellx
444 " align=" FMT_ucellx
"\n",
447 /* printk("+ ofmem_claim virt %08lx %lx %ld\n", virt, size, align ); */
448 return ofmem_claim_virt_( virt
, size
, align
,
449 get_ram_size(), ofmem_arch_get_virt_top(), 0 );
452 /* if align != 0, phys is ignored. Returns -1 on error */
453 ucell
ofmem_retain( ucell phys
, ucell size
, ucell align
)
455 retain_t
*retained
= ofmem_arch_get_retained();
458 OFMEM_TRACE("ofmem_retain phys=" FMT_ucellx
" size=" FMT_ucellx
459 " align=" FMT_ucellx
"\n",
462 retain_phys
= ofmem_claim_phys_( phys
, size
, align
, 0, get_ram_size(), 0 );
464 /* Add to the retain_phys_range list */
465 retained
->retain_phys_range
[retained
->numentries
].next
= NULL
;
466 retained
->retain_phys_range
[retained
->numentries
].start
= retain_phys
;
467 retained
->retain_phys_range
[retained
->numentries
].size
= size
;
468 retained
->numentries
++;
473 /* allocate both physical and virtual space and add a translation */
474 ucell
ofmem_claim( ucell addr
, ucell size
, ucell align
)
476 ofmem_t
*ofmem
= ofmem_arch_get_private();
478 ucell offs
= addr
& 0xfff;
480 OFMEM_TRACE("ofmem_claim " FMT_ucellx
" " FMT_ucellx
" " FMT_ucellx
"\n", addr
, size
, align
);
483 if( is_free(addr
, size
, ofmem
->virt_range
) &&
484 is_free(addr
, size
, ofmem
->phys_range
) ) {
485 ofmem_claim_phys_( addr
, size
, 0, 0, 0, 0 );
486 ofmem_claim_virt_( addr
, size
, 0, 0, 0, 0 );
489 OFMEM_TRACE("**** ofmem_claim failure ***!\n");
495 phys
= ofmem_claim_phys_( addr
, size
, align
, 0, get_ram_size(), 1 /* reverse */ );
496 virt
= ofmem_claim_virt_( addr
, size
, align
, 0, get_ram_size(), 1 /* reverse */ );
497 if( phys
== -1 || virt
== -1 ) {
498 OFMEM_TRACE("ofmem_claim failed\n");
501 /* printk("...phys = %08lX, virt = %08lX, size = %08lX\n", phys, virt, size ); */
506 size
+= (phys
& 0xfff);
507 virt
-= (phys
& 0xfff);
511 size
= (size
+ 0xfff) & ~0xfff;
513 /* printk("...free memory found... phys: %08lX, virt: %08lX, size %lX\n", phys, virt, size ); */
514 ofmem_map( phys
, virt
, size
, -1 );
519 /************************************************************************/
520 /* keep track of ea -> phys translations */
521 /************************************************************************/
523 static void split_trans( ucell virt
)
525 ofmem_t
*ofmem
= ofmem_arch_get_private();
526 translation_t
*t
, *t2
;
528 for( t
=ofmem
->trans
; t
; t
=t
->next
) {
529 if( virt
> t
->virt
&& virt
< t
->virt
+ t
->size
-1 ) {
530 t2
= (translation_t
*)malloc( sizeof(translation_t
) );
532 t2
->size
= t
->size
- (virt
- t
->virt
);
533 t
->size
= virt
- t
->virt
;
534 t2
->phys
= t
->phys
+ t
->size
;
542 int ofmem_map_page_range( ucell phys
, ucell virt
, ucell size
, ucell mode
)
544 ofmem_t
*ofmem
= ofmem_arch_get_private();
545 translation_t
*t
, **tt
;
547 OFMEM_TRACE("ofmem_map_page_range " FMT_ucellx
548 " -> " FMT_ucellx
" " FMT_ucellx
" mode " FMT_ucellx
"\n",
549 virt
, phys
, size
, mode
);
552 split_trans( virt
+ size
);
554 /* detect remappings */
555 for( t
=ofmem
->trans
; t
; ) {
556 if( virt
== t
->virt
|| (virt
< t
->virt
&& virt
+ size
> t
->virt
)) {
557 if( t
->phys
+ virt
- t
->virt
!= phys
) {
558 OFMEM_TRACE("mapping altered virt=" FMT_ucellx
")\n", t
->virt
);
559 } else if( t
->mode
!= mode
){
560 OFMEM_TRACE("mapping mode altered virt=" FMT_ucellx
561 " old mode=" FMT_ucellx
" new mode=" FMT_ucellx
"\n",
562 t
->virt
, t
->mode
, mode
);
565 for( tt
=&ofmem
->trans
; *tt
!= t
; tt
=&(**tt
).next
) {
570 /* really unmap these pages */
571 ofmem_arch_unmap_pages(t
->virt
, t
->size
);
582 for( tt
=&ofmem
->trans
; *tt
&& (**tt
).virt
< virt
; tt
=&(**tt
).next
) {
585 t
= (translation_t
*)malloc( sizeof(translation_t
) );
593 ofmem_update_translations();
598 static int unmap_page_range( ucell virt
, ucell size
)
600 ofmem_t
*ofmem
= ofmem_arch_get_private();
601 translation_t
**plink
;
603 /* make sure there is exactly one matching translation entry */
606 split_trans( virt
+ size
);
608 /* find and unlink entries in range */
609 plink
= &ofmem
->trans
;
611 while (*plink
&& (*plink
)->virt
< virt
+size
) {
612 translation_t
**plinkentry
= plink
;
613 translation_t
*t
= *plink
;
618 if (t
->virt
>= virt
&& t
->virt
+ t
->size
<= virt
+size
) {
621 *plinkentry
= t
->next
;
623 OFMEM_TRACE("unmap_page_range found "
624 FMT_ucellx
" -> " FMT_ucellx
" " FMT_ucellx
625 " mode " FMT_ucellx
"\n",
626 t
->virt
, t
->phys
, t
->size
, t
->mode
);
628 // really map these pages
629 ofmem_arch_unmap_pages(t
->virt
, t
->size
);
635 ofmem_update_translations();
640 int ofmem_map( ucell phys
, ucell virt
, ucell size
, ucell mode
)
642 /* printk("+ofmem_map: %08lX --> %08lX (size %08lX, mode 0x%02X)\n",
643 virt, phys, size, mode ); */
645 if( (phys
& 0xfff) || (virt
& 0xfff) || (size
& 0xfff) ) {
647 OFMEM_TRACE("ofmem_map: Bad parameters ("
648 FMT_ucellX
" " FMT_ucellX
" " FMT_ucellX
")\n",
653 size
= (size
+ 0xfff) & ~0xfff;
656 #if defined(OFMEM_FILL_RANGE)
658 ofmem_t
*ofmem
= ofmem_arch_get_private();
659 /* claim any unclaimed virtual memory in the range */
660 fill_range( virt
, size
, &ofmem
->virt_range
);
661 /* hmm... we better claim the physical range too */
662 fill_range( phys
, size
, &ofmem
->phys_range
);
667 mode
= ofmem_arch_default_translation_mode(phys
);
670 /* install translations */
671 ofmem_map_page_range(phys
, virt
, size
, mode
);
673 /* allow arch to install mappings early, e.g. for locked mappings */
674 ofmem_arch_early_map_pages(phys
, virt
, size
, mode
);
679 int ofmem_unmap( ucell virt
, ucell size
)
681 OFMEM_TRACE("ofmem_unmap " FMT_ucellx
" " FMT_ucellx
"\n",
684 if( (virt
& 0xfff) || (size
& 0xfff) ) {
685 /* printk("ofmem_unmap: Bad parameters (%08lX %08lX)\n",
688 size
= (size
+ 0xfff) & ~0xfff;
691 /* remove translations and unmap pages */
692 unmap_page_range(virt
, size
);
697 /* virtual -> physical. */
698 ucell
ofmem_translate( ucell virt
, ucell
*mode
)
700 ofmem_t
*ofmem
= ofmem_arch_get_private();
703 for( t
=ofmem
->trans
; t
&& t
->virt
<= virt
; t
=t
->next
) {
705 if( t
->virt
+ t
->size
- 1 < virt
)
707 offs
= virt
- t
->virt
;
709 return t
->phys
+ offs
;
712 /*printk("ofmem_translate: no translation defined (%08lx)\n", virt);*/
717 /* release memory allocated by ofmem_claim_phys */
718 void ofmem_release_phys( ucell phys
, ucell size
)
720 OFMEM_TRACE("ofmem_release_phys addr=" FMT_ucellx
" size=" FMT_ucellx
"\n",
723 OFMEM_TRACE("ofmem_release_phys not implemented");
726 /* release memory allocated by ofmem_claim_virt */
727 void ofmem_release_virt( ucell virt
, ucell size
)
729 OFMEM_TRACE("ofmem_release_virt addr=" FMT_ucellx
" size=" FMT_ucellx
"\n",
732 OFMEM_TRACE("ofmem_release_virt not implemented");
735 /************************************************************************/
737 /************************************************************************/
739 void ofmem_register( phandle_t ph_memory
, phandle_t ph_mmu
)
741 s_phandle_memory
= ph_memory
;
742 s_phandle_mmu
= ph_mmu
;
744 ofmem_update_translations();