Fix warnings from GCC 4.6.0
[openbios.git] / libopenbios / ofmem_common.c
blob00a95b83a00d87ae6a4531f0fb513136c86d8623
1 /*
2 * <ofmem_sparc64.c>
4 * OF Memory manager
6 * Copyright (C) 1999-2004 Samuel Rydh (samuel@ibrium.se)
7 * Copyright (C) 2004 Stefan Reinauer
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation
15 #include "config.h"
16 #include "libopenbios/bindings.h"
17 #include "libopenbios/ofmem.h"
20 * define OFMEM_FILL_RANGE to claim any unclaimed virtual and
21 * physical memory in the range for ofmem_map
23 * TODO: remove this macro and wrapped code if not needed by implementations
25 //#define OFMEM_FILL_RANGE
28 static inline size_t ALIGN_SIZE(size_t x, size_t a)
30 return (x + a - 1) & ~(a-1);
33 static ucell get_ram_size( void )
35 ofmem_t *ofmem = ofmem_arch_get_private();
36 return ofmem->ramsize;
39 /************************************************************************/
40 /* debug */
41 /************************************************************************/
43 #if 0
44 static void
45 print_range( range_t *r, char *str )
47 printk("--- Range %s ---\n", str );
48 for( ; r; r=r->next )
49 printk("%08lx - %08lx\n", r->start, r->start + r->size -1 );
50 printk("\n");
53 static void
54 print_phys_range()
56 print_range( ofmem.phys_range, "phys" );
59 static void
60 print_virt_range()
62 print_range( ofmem.virt_range, "virt" );
65 static void
66 print_trans( void )
68 translation_t *t = ofmem.trans;
70 printk("--- Translations ---\n");
71 for( ; t; t=t->next )
72 printk("%08lx -> %08lx [size %lx]\n", t->virt, t->phys, t->size );
73 printk("\n");
75 #endif
77 /************************************************************************/
78 /* OF private allocations */
79 /************************************************************************/
81 void* ofmem_malloc( size_t size )
83 ofmem_t *ofmem = ofmem_arch_get_private();
84 alloc_desc_t *d, **pp;
85 char *ret;
86 ucell top;
88 if( !size )
89 return NULL;
91 if( !ofmem->next_malloc )
92 ofmem->next_malloc = (char*)ofmem_arch_get_malloc_base();
94 size = ALIGN_SIZE(size + sizeof(alloc_desc_t), CONFIG_OFMEM_MALLOC_ALIGN);
96 /* look in the freelist */
97 for( pp=&ofmem->mfree; *pp && (**pp).size < size; pp = &(**pp).next ) {
100 /* waste at most 4K by taking an entry from the freelist */
101 if( *pp && (**pp).size < size + 0x1000 ) {
102 ret = (char*)*pp + sizeof(alloc_desc_t);
103 memset( ret, 0, (**pp).size - sizeof(alloc_desc_t) );
104 *pp = (**pp).next;
105 return ret;
108 top = ofmem_arch_get_heap_top();
110 if( (ucell)ofmem->next_malloc + size > top ) {
111 printk("out of malloc memory (%x)!\n", size );
112 return NULL;
115 d = (alloc_desc_t*) ofmem->next_malloc;
116 ofmem->next_malloc += size;
118 d->next = NULL;
119 d->size = size;
121 ret = (char*)d + sizeof(alloc_desc_t);
122 memset( ret, 0, size - sizeof(alloc_desc_t) );
124 return ret;
127 void ofmem_free( void *ptr )
129 ofmem_t *ofmem = ofmem_arch_get_private();
130 alloc_desc_t **pp, *d;
132 /* it is legal to free NULL pointers (size zero allocations) */
133 if( !ptr )
134 return;
136 d = (alloc_desc_t*)((char *)ptr - sizeof(alloc_desc_t));
137 d->next = ofmem->mfree;
139 /* insert in the (sorted) freelist */
140 for( pp=&ofmem->mfree; *pp && (**pp).size < d->size ; pp = &(**pp).next ) {
143 d->next = *pp;
144 *pp = d;
147 void* ofmem_realloc( void *ptr, size_t size )
149 alloc_desc_t *d = (alloc_desc_t*)((char *)ptr - sizeof(alloc_desc_t));
150 char *p;
152 if( !ptr )
153 return malloc( size );
154 if( !size ) {
155 free( ptr );
156 return NULL;
158 p = malloc( size );
159 memcpy( p, ptr, MIN(d->size - sizeof(alloc_desc_t),size) );
160 free( ptr );
161 return p;
165 /************************************************************************/
166 /* "translations" and "available" property tracking */
167 /************************************************************************/
169 static phandle_t s_phandle_memory = 0;
170 static phandle_t s_phandle_mmu = 0;
172 static void ofmem_update_mmu_translations( void )
174 ofmem_t *ofmem = ofmem_arch_get_private();
175 translation_t *t;
176 int ncells;
177 ucell *props;
179 if (s_phandle_mmu == 0)
180 return;
182 for( t = ofmem->trans, ncells = 0; t ; t=t->next, ncells++ ) {
185 props = malloc(ncells * sizeof(ucell) * ofmem_arch_get_translation_entry_size());
187 if (props == NULL)
188 return;
190 /* Call architecture-specific routines to generate translation entries */
191 for( t = ofmem->trans, ncells = 0 ; t ; t=t->next ) {
192 ofmem_arch_create_translation_entry(&props[ncells], t);
193 ncells += ofmem_arch_get_translation_entry_size();
196 set_property(s_phandle_mmu, "translations",
197 (char*)props, ncells * sizeof(props[0]));
199 free(props);
202 static void ofmem_update_memory_available( phandle_t ph, range_t *range,
203 u64 top_address )
205 range_t *r;
206 int ncells;
207 ucell *props;
209 ucell start, size;
211 if (s_phandle_memory == 0)
212 return;
214 /* count phys_range list entries */
215 for( r = range, ncells = 0; r ; r=r->next, ncells++ ) {
218 /* inverse of phys_range list could take 2 more cells for the tail */
219 props = malloc((ncells+1) * sizeof(ucell) * 2);
221 if (props == NULL) {
222 /* out of memory! */
223 return;
226 start = 0;
227 ncells = 0;
229 for (r = range; r; r=r->next) {
230 if (r->start >= top_address) {
231 break;
234 size = r->start - start;
235 if (size) {
236 props[ncells++] = start;
237 props[ncells++] = size;
239 start = r->start + r->size;
242 /* tail */
243 if (start < top_address) {
244 props[ncells++] = start;
245 props[ncells++] = top_address - start;
248 set_property(ph, "available",
249 (char*)props, ncells * sizeof(props[0]));
251 free(props);
254 static void ofmem_update_translations( void )
256 ofmem_t *ofmem = ofmem_arch_get_private();
258 ofmem_update_memory_available(s_phandle_memory,
259 ofmem->phys_range, get_ram_size());
260 ofmem_update_memory_available(s_phandle_mmu,
261 ofmem->virt_range, -1ULL);
262 ofmem_update_mmu_translations();
266 /************************************************************************/
267 /* client interface */
268 /************************************************************************/
270 static int is_free( ucell ea, ucell size, range_t *r )
272 if( size == 0 )
273 return 1;
274 for( ; r ; r=r->next ) {
275 if( r->start + r->size - 1 >= ea && r->start <= ea )
276 return 0;
277 if( r->start >= ea && r->start <= ea + size - 1 )
278 return 0;
280 return 1;
283 static void add_entry_( ucell ea, ucell size, range_t **r )
285 range_t *nr;
287 for( ; *r && (**r).start < ea; r=&(**r).next ) {
290 nr = (range_t*)malloc( sizeof(range_t) );
291 nr->next = *r;
292 nr->start = ea;
293 nr->size = size;
294 *r = nr;
297 static int add_entry( ucell ea, ucell size, range_t **r )
299 if( !is_free( ea, size, *r ) ) {
300 OFMEM_TRACE("add_entry: range not free!\n");
301 return -1;
303 add_entry_( ea, size, r );
304 return 0;
307 #if defined(OFMEM_FILL_RANGE)
308 static void join_ranges( range_t **rr )
310 range_t *n, *r = *rr;
311 while( r ) {
312 if( !(n=r->next) )
313 break;
315 if( r->start + r->size - 1 >= n->start -1 ) {
316 int s = n->size + (n->start - r->start - r->size);
317 if( s > 0 )
318 r->size += s;
319 r->next = n->next;
320 free( n );
321 continue;
323 r=r->next;
327 static void fill_range( ucell ea, ucell size, range_t **rr )
329 add_entry_( ea, size, rr );
330 join_ranges( rr );
332 #endif
334 static ucell find_area( ucell align, ucell size, range_t *r,
335 ucell min, ucell max, int reverse )
337 ucell base = min;
338 range_t *r2;
340 if( (align & (align-1)) ) {
341 OFMEM_TRACE("bad alignment " FMT_ucell "\n", align);
342 align = 0x1000;
344 if( !align )
345 align = 0x1000;
347 base = reverse ? max - size : min;
348 r2 = reverse ? NULL : r;
350 for( ;; ) {
351 if( !reverse ) {
352 base = (base + align - 1) & ~(align-1);
353 if( base < min )
354 base = min;
355 if( base + size - 1 >= max -1 )
356 break;
357 } else {
358 if( base > max - size )
359 base = max - size;
360 base -= base & (align-1);
362 if( is_free( base, size, r ) )
363 return base;
365 if( !reverse ) {
366 if( !r2 )
367 break;
368 base = r2->start + r2->size;
369 r2 = r2->next;
370 } else {
371 range_t *rp;
373 for( rp=r; rp && rp->next != r2 ; rp=rp->next ) {
376 r2 = rp;
377 if( !r2 )
378 break;
379 base = r2->start - size;
382 return -1;
385 static ucell ofmem_claim_phys_( ucell phys, ucell size, ucell align,
386 ucell min, ucell max, int reverse )
388 ofmem_t *ofmem = ofmem_arch_get_private();
389 if( !align ) {
390 if( !is_free( phys, size, ofmem->phys_range ) ) {
391 OFMEM_TRACE("Non-free physical memory claimed!\n");
392 return -1;
394 add_entry( phys, size, &ofmem->phys_range );
395 return phys;
397 phys = find_area( align, size, ofmem->phys_range, min, max, reverse );
398 if( phys == -1 ) {
399 printk("ofmem_claim_phys - out of space (failed request for " FMT_ucellx " bytes)\n", size);
400 return -1;
402 add_entry( phys, size, &ofmem->phys_range );
404 ofmem_update_translations();
406 return phys;
409 /* if align != 0, phys is ignored. Returns -1 on error */
410 ucell ofmem_claim_phys( ucell phys, ucell size, ucell align )
412 OFMEM_TRACE("ofmem_claim phys=" FMT_ucellx " size=" FMT_ucellx
413 " align=" FMT_ucellx "\n",
414 phys, size, align);
416 return ofmem_claim_phys_( phys, size, align, 0, get_ram_size(), 0 );
419 static ucell ofmem_claim_virt_( ucell virt, ucell size, ucell align,
420 ucell min, ucell max, int reverse )
422 ofmem_t *ofmem = ofmem_arch_get_private();
423 if( !align ) {
424 if( !is_free( virt, size, ofmem->virt_range ) ) {
425 OFMEM_TRACE("Non-free virtual memory claimed!\n");
426 return -1;
428 add_entry( virt, size, &ofmem->virt_range );
429 return virt;
432 virt = find_area( align, size, ofmem->virt_range, min, max, reverse );
433 if( virt == -1 ) {
434 printk("ofmem_claim_virt - out of space (failed request for " FMT_ucellx " bytes)\n", size);
435 return -1;
437 add_entry( virt, size, &ofmem->virt_range );
438 return virt;
441 ucell ofmem_claim_virt( ucell virt, ucell size, ucell align )
443 OFMEM_TRACE("ofmem_claim_virt virt=" FMT_ucellx " size=" FMT_ucellx
444 " align=" FMT_ucellx "\n",
445 virt, size, align);
447 /* printk("+ ofmem_claim virt %08lx %lx %ld\n", virt, size, align ); */
448 return ofmem_claim_virt_( virt, size, align,
449 get_ram_size(), ofmem_arch_get_virt_top(), 0 );
452 /* if align != 0, phys is ignored. Returns -1 on error */
453 ucell ofmem_retain( ucell phys, ucell size, ucell align )
455 retain_t *retained = ofmem_arch_get_retained();
456 ucell retain_phys;
458 OFMEM_TRACE("ofmem_retain phys=" FMT_ucellx " size=" FMT_ucellx
459 " align=" FMT_ucellx "\n",
460 phys, size, align);
462 retain_phys = ofmem_claim_phys_( phys, size, align, 0, get_ram_size(), 0 );
464 /* Add to the retain_phys_range list */
465 retained->retain_phys_range[retained->numentries].next = NULL;
466 retained->retain_phys_range[retained->numentries].start = retain_phys;
467 retained->retain_phys_range[retained->numentries].size = size;
468 retained->numentries++;
470 return retain_phys;
473 /* allocate both physical and virtual space and add a translation */
474 ucell ofmem_claim( ucell addr, ucell size, ucell align )
476 ofmem_t *ofmem = ofmem_arch_get_private();
477 ucell virt, phys;
478 ucell offs = addr & 0xfff;
480 OFMEM_TRACE("ofmem_claim " FMT_ucellx " " FMT_ucellx " " FMT_ucellx "\n", addr, size, align );
481 virt = phys = 0;
482 if( !align ) {
483 if( is_free(addr, size, ofmem->virt_range) &&
484 is_free(addr, size, ofmem->phys_range) ) {
485 ofmem_claim_phys_( addr, size, 0, 0, 0, 0 );
486 ofmem_claim_virt_( addr, size, 0, 0, 0, 0 );
487 virt = phys = addr;
488 } else {
489 OFMEM_TRACE("**** ofmem_claim failure ***!\n");
490 return -1;
492 } else {
493 if( align < 0x1000 )
494 align = 0x1000;
495 phys = ofmem_claim_phys_( addr, size, align, 0, get_ram_size(), 1 /* reverse */ );
496 virt = ofmem_claim_virt_( addr, size, align, 0, get_ram_size(), 1 /* reverse */ );
497 if( phys == -1 || virt == -1 ) {
498 OFMEM_TRACE("ofmem_claim failed\n");
499 return -1;
501 /* printk("...phys = %08lX, virt = %08lX, size = %08lX\n", phys, virt, size ); */
504 /* align */
505 if( phys & 0xfff ) {
506 size += (phys & 0xfff);
507 virt -= (phys & 0xfff);
508 phys &= ~0xfff;
510 if( size & 0xfff )
511 size = (size + 0xfff) & ~0xfff;
513 /* printk("...free memory found... phys: %08lX, virt: %08lX, size %lX\n", phys, virt, size ); */
514 ofmem_map( phys, virt, size, -1 );
515 return virt + offs;
519 /************************************************************************/
520 /* keep track of ea -> phys translations */
521 /************************************************************************/
523 static void split_trans( ucell virt )
525 ofmem_t *ofmem = ofmem_arch_get_private();
526 translation_t *t, *t2;
528 for( t=ofmem->trans; t; t=t->next ) {
529 if( virt > t->virt && virt < t->virt + t->size-1 ) {
530 t2 = (translation_t*)malloc( sizeof(translation_t) );
531 t2->virt = virt;
532 t2->size = t->size - (virt - t->virt);
533 t->size = virt - t->virt;
534 t2->phys = t->phys + t->size;
535 t2->mode = t->mode;
536 t2->next = t->next;
537 t->next = t2;
542 int ofmem_map_page_range( ucell phys, ucell virt, ucell size, ucell mode )
544 ofmem_t *ofmem = ofmem_arch_get_private();
545 translation_t *t, **tt;
547 OFMEM_TRACE("ofmem_map_page_range " FMT_ucellx
548 " -> " FMT_ucellx " " FMT_ucellx " mode " FMT_ucellx "\n",
549 virt, phys, size, mode );
551 split_trans( virt );
552 split_trans( virt + size );
554 /* detect remappings */
555 for( t=ofmem->trans; t; ) {
556 if( virt == t->virt || (virt < t->virt && virt + size > t->virt )) {
557 if( t->phys + virt - t->virt != phys ) {
558 OFMEM_TRACE("mapping altered virt=" FMT_ucellx ")\n", t->virt );
559 } else if( t->mode != mode ){
560 OFMEM_TRACE("mapping mode altered virt=" FMT_ucellx
561 " old mode=" FMT_ucellx " new mode=" FMT_ucellx "\n",
562 t->virt, t->mode, mode);
565 for( tt=&ofmem->trans; *tt != t ; tt=&(**tt).next ) {
568 *tt = t->next;
570 /* really unmap these pages */
571 ofmem_arch_unmap_pages(t->virt, t->size);
573 free((char*)t);
575 t=ofmem->trans;
576 continue;
578 t=t->next;
581 /* add mapping */
582 for( tt=&ofmem->trans; *tt && (**tt).virt < virt ; tt=&(**tt).next ) {
585 t = (translation_t*)malloc( sizeof(translation_t) );
586 t->virt = virt;
587 t->phys = phys;
588 t->size = size;
589 t->mode = mode;
590 t->next = *tt;
591 *tt = t;
593 ofmem_update_translations();
595 return 0;
598 static int unmap_page_range( ucell virt, ucell size )
600 ofmem_t *ofmem = ofmem_arch_get_private();
601 translation_t **plink;
603 /* make sure there is exactly one matching translation entry */
605 split_trans( virt );
606 split_trans( virt + size );
608 /* find and unlink entries in range */
609 plink = &ofmem->trans;
611 while (*plink && (*plink)->virt < virt+size) {
612 translation_t **plinkentry = plink;
613 translation_t *t = *plink;
615 /* move ahead */
616 plink = &t->next;
618 if (t->virt >= virt && t->virt + t->size <= virt+size) {
620 /* unlink entry */
621 *plinkentry = t->next;
623 OFMEM_TRACE("unmap_page_range found "
624 FMT_ucellx " -> " FMT_ucellx " " FMT_ucellx
625 " mode " FMT_ucellx "\n",
626 t->virt, t->phys, t->size, t->mode );
628 // really map these pages
629 ofmem_arch_unmap_pages(t->virt, t->size);
631 free((char*)t);
635 ofmem_update_translations();
637 return 0;
640 int ofmem_map( ucell phys, ucell virt, ucell size, ucell mode )
642 /* printk("+ofmem_map: %08lX --> %08lX (size %08lX, mode 0x%02X)\n",
643 virt, phys, size, mode ); */
645 if( (phys & 0xfff) || (virt & 0xfff) || (size & 0xfff) ) {
647 OFMEM_TRACE("ofmem_map: Bad parameters ("
648 FMT_ucellX " " FMT_ucellX " " FMT_ucellX ")\n",
649 phys, virt, size );
651 phys &= ~0xfff;
652 virt &= ~0xfff;
653 size = (size + 0xfff) & ~0xfff;
656 #if defined(OFMEM_FILL_RANGE)
658 ofmem_t *ofmem = ofmem_arch_get_private();
659 /* claim any unclaimed virtual memory in the range */
660 fill_range( virt, size, &ofmem->virt_range );
661 /* hmm... we better claim the physical range too */
662 fill_range( phys, size, &ofmem->phys_range );
664 #endif
666 if (mode==-1) {
667 mode = ofmem_arch_default_translation_mode(phys);
670 /* install translations */
671 ofmem_map_page_range(phys, virt, size, mode);
673 /* allow arch to install mappings early, e.g. for locked mappings */
674 ofmem_arch_early_map_pages(phys, virt, size, mode);
676 return 0;
679 int ofmem_unmap( ucell virt, ucell size )
681 OFMEM_TRACE("ofmem_unmap " FMT_ucellx " " FMT_ucellx "\n",
682 virt, size );
684 if( (virt & 0xfff) || (size & 0xfff) ) {
685 /* printk("ofmem_unmap: Bad parameters (%08lX %08lX)\n",
686 virt, size ); */
687 virt &= ~0xfff;
688 size = (size + 0xfff) & ~0xfff;
691 /* remove translations and unmap pages */
692 unmap_page_range(virt, size);
694 return 0;
697 /* virtual -> physical. */
698 ucell ofmem_translate( ucell virt, ucell *mode )
700 ofmem_t *ofmem = ofmem_arch_get_private();
701 translation_t *t;
703 for( t=ofmem->trans; t && t->virt <= virt ; t=t->next ) {
704 ucell offs;
705 if( t->virt + t->size - 1 < virt )
706 continue;
707 offs = virt - t->virt;
708 *mode = t->mode;
709 return t->phys + offs;
712 /*printk("ofmem_translate: no translation defined (%08lx)\n", virt);*/
713 /*print_trans();*/
714 return -1;
717 /* release memory allocated by ofmem_claim_phys */
718 void ofmem_release_phys( ucell phys, ucell size )
720 OFMEM_TRACE("ofmem_release_phys addr=" FMT_ucellx " size=" FMT_ucellx "\n",
721 phys, size);
723 OFMEM_TRACE("ofmem_release_phys not implemented");
726 /* release memory allocated by ofmem_claim_virt */
727 void ofmem_release_virt( ucell virt, ucell size )
729 OFMEM_TRACE("ofmem_release_virt addr=" FMT_ucellx " size=" FMT_ucellx "\n",
730 virt, size);
732 OFMEM_TRACE("ofmem_release_virt not implemented");
735 /************************************************************************/
736 /* init / cleanup */
737 /************************************************************************/
739 void ofmem_register( phandle_t ph_memory, phandle_t ph_mmu )
741 s_phandle_memory = ph_memory;
742 s_phandle_mmu = ph_mmu;
744 ofmem_update_translations();