3 * Dynamic memory manager
5 * This is a lightweight replacement for the standard C library malloc().
7 * If you want to use the standard C library malloc() instead, define
8 * MEM_LIBC_MALLOC to 1 in your lwipopts.h
10 * To let mem_malloc() use pools (prevents fragmentation and is much faster than
11 * a heap but might waste some memory), define MEM_USE_POOLS to 1, define
12 * MEM_USE_CUSTOM_POOLS to 1 and create a file "lwippools.h" that includes a list
13 * of pools like this (more pools can be added between _START and _END):
15 * Define three pools with sizes 256, 512, and 1512 bytes
16 * LWIP_MALLOC_MEMPOOL_START
17 * LWIP_MALLOC_MEMPOOL(20, 256)
18 * LWIP_MALLOC_MEMPOOL(10, 512)
19 * LWIP_MALLOC_MEMPOOL(5, 1512)
20 * LWIP_MALLOC_MEMPOOL_END
24 * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
25 * All rights reserved.
27 * Redistribution and use in source and binary forms, with or without modification,
28 * are permitted provided that the following conditions are met:
30 * 1. Redistributions of source code must retain the above copyright notice,
31 * this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright notice,
33 * this list of conditions and the following disclaimer in the documentation
34 * and/or other materials provided with the distribution.
35 * 3. The name of the author may not be used to endorse or promote products
36 * derived from this software without specific prior written permission.
38 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
39 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
40 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
41 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
42 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
43 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
44 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
45 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
46 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
49 * This file is part of the lwIP TCP/IP stack.
51 * Author: Adam Dunkels <adam@sics.se>
58 #if !MEM_LIBC_MALLOC /* don't build if not configured for use in lwipopts.h */
63 #include "lwip/stats.h"
68 /* lwIP head implemented with different sized pools */
71 * This structure is used to save the pool one element came from.
79 * Allocate memory: determine the smallest pool that is big enough
80 * to contain an element of 'size' and get an element from that pool.
82 * @param size the size in bytes of the memory needed
83 * @return a pointer to the allocated memory or NULL if the pool is empty
86 mem_malloc(mem_size_t size
)
88 struct mem_helper
*element
;
91 for (poolnr
= MEMP_POOL_FIRST
; poolnr
<= MEMP_POOL_LAST
; poolnr
++) {
92 /* is this pool big enough to hold an element of the required size
93 plus a struct mem_helper that saves the pool this element came from? */
94 if ((size
+ sizeof(struct mem_helper
)) <= memp_sizes
[poolnr
]) {
98 if (poolnr
> MEMP_POOL_LAST
) {
99 LWIP_ASSERT("mem_malloc(): no pool is that big!", 0);
102 element
= (struct mem_helper
*)memp_malloc(poolnr
);
103 if (element
== NULL
) {
104 /* No need to DEBUGF or ASSERT: This error is already
105 taken care of in memp.c */
106 /** @todo: we could try a bigger pool if this one is empty! */
110 /* save the pool number this element came from */
111 element
->poolnr
= poolnr
;
112 /* and return a pointer to the memory directly after the struct mem_helper */
119 * Free memory previously allocated by mem_malloc. Loads the pool number
120 * and calls memp_free with that pool number to put the element back into
123 * @param rmem the memory element to free
128 struct mem_helper
*hmem
= (struct mem_helper
*)rmem
;
130 LWIP_ASSERT("rmem != NULL", (rmem
!= NULL
));
131 LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem
== LWIP_MEM_ALIGN(rmem
)));
133 /* get the original struct mem_helper */
136 LWIP_ASSERT("hmem != NULL", (hmem
!= NULL
));
137 LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem
== LWIP_MEM_ALIGN(hmem
)));
138 LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem
->poolnr
< MEMP_MAX
));
140 /* and put it in the pool we saved earlier */
141 memp_free(hmem
->poolnr
, hmem
);
144 #else /* MEM_USE_POOLS */
145 /* lwIP replacement for your libc malloc() */
148 * The heap is made up as a list of structs of this type.
149 * This does not have to be aligned since for getting its size,
150 * we only use the macro SIZEOF_STRUCT_MEM, which automatically alignes.
153 /** index (-> ram[next]) of the next struct */
155 /** index (-> ram[next]) of the next struct */
157 /** 1: this area is used; 0: this area is unused */
161 /** All allocated blocks will be MIN_SIZE bytes big, at least!
162 * MIN_SIZE can be overridden to suit your needs. Smaller values save space,
163 * larger values could prevent too small blocks to fragment the RAM too much. */
166 #endif /* MIN_SIZE */
167 /* some alignment macros: we define them here for better source code layout */
168 #define MIN_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MIN_SIZE)
169 #define SIZEOF_STRUCT_MEM LWIP_MEM_ALIGN_SIZE(sizeof(struct mem))
170 #define MEM_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SIZE)
172 /** the heap. we need one struct mem at the end and some room for alignment */
173 static u8_t ram_heap
[MEM_SIZE_ALIGNED
+ (2*SIZEOF_STRUCT_MEM
) + MEM_ALIGNMENT
];
174 /** pointer to the heap (ram_heap): for alignment, ram is now a pointer instead of an array */
176 /** the last entry, always unused! */
177 static struct mem
*ram_end
;
178 /** pointer to the lowest free block, this is used for faster search */
179 static struct mem
*lfree
;
180 /** concurrent access protection */
181 static sys_sem_t mem_sem
;
184 * "Plug holes" by combining adjacent empty struct mems.
185 * After this function is through, there should not exist
186 * one empty struct mem pointing to another empty struct mem.
188 * @param mem this points to a struct mem which just has been freed
189 * @internal this function is only called by mem_free() and mem_realloc()
191 * This assumes access to the heap is protected by the calling function
195 plug_holes(struct mem
*mem
)
200 LWIP_ASSERT("plug_holes: mem >= ram", (u8_t
*)mem
>= ram
);
201 LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t
*)mem
< (u8_t
*)ram_end
);
202 LWIP_ASSERT("plug_holes: mem->used == 0", mem
->used
== 0);
204 /* plug hole forward */
205 LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE_ALIGNED", mem
->next
<= MEM_SIZE_ALIGNED
);
207 nmem
= (struct mem
*)&ram
[mem
->next
];
208 if (mem
!= nmem
&& nmem
->used
== 0 && (u8_t
*)nmem
!= (u8_t
*)ram_end
) {
209 /* if mem->next is unused and not end of ram, combine mem and mem->next */
213 mem
->next
= nmem
->next
;
214 ((struct mem
*)&ram
[nmem
->next
])->prev
= (u8_t
*)mem
- ram
;
217 /* plug hole backward */
218 pmem
= (struct mem
*)&ram
[mem
->prev
];
219 if (pmem
!= mem
&& pmem
->used
== 0) {
220 /* if mem->prev is unused, combine mem and mem->prev */
224 pmem
->next
= mem
->next
;
225 ((struct mem
*)&ram
[mem
->next
])->prev
= (u8_t
*)pmem
- ram
;
230 * Zero the heap and initialize start, end and lowest-free
237 LWIP_ASSERT("Sanity check alignment",
238 (SIZEOF_STRUCT_MEM
& (MEM_ALIGNMENT
-1)) == 0);
241 ram
= LWIP_MEM_ALIGN(ram_heap
);
242 /* initialize the start of the heap */
243 mem
= (struct mem
*)ram
;
244 mem
->next
= MEM_SIZE_ALIGNED
;
247 /* initialize the end of the heap */
248 ram_end
= (struct mem
*)&ram
[MEM_SIZE_ALIGNED
];
250 ram_end
->next
= MEM_SIZE_ALIGNED
;
251 ram_end
->prev
= MEM_SIZE_ALIGNED
;
253 mem_sem
= sys_sem_new(1);
255 /* initialize the lowest-free pointer to the start of the heap */
256 lfree
= (struct mem
*)ram
;
259 lwip_stats
.mem
.avail
= MEM_SIZE_ALIGNED
;
260 #endif /* MEM_STATS */
264 * Put a struct mem back on the heap
266 * @param rmem is the data portion of a struct mem as returned by a previous
267 * call to mem_malloc()
275 LWIP_DEBUGF(MEM_DEBUG
| LWIP_DBG_TRACE
| 2, ("mem_free(p == NULL) was called.\n"));
278 LWIP_ASSERT("mem_free: sanity check alignment", (((mem_ptr_t
)rmem
) & (MEM_ALIGNMENT
-1)) == 0);
280 /* protect the heap from concurrent access */
281 sys_arch_sem_wait(mem_sem
, 0);
283 LWIP_ASSERT("mem_free: legal memory", (u8_t
*)rmem
>= (u8_t
*)ram
&&
284 (u8_t
*)rmem
< (u8_t
*)ram_end
);
286 if ((u8_t
*)rmem
< (u8_t
*)ram
|| (u8_t
*)rmem
>= (u8_t
*)ram_end
) {
287 LWIP_DEBUGF(MEM_DEBUG
| 3, ("mem_free: illegal memory\n"));
289 ++lwip_stats
.mem
.err
;
290 #endif /* MEM_STATS */
291 sys_sem_signal(mem_sem
);
294 /* Get the corresponding struct mem ... */
295 mem
= (struct mem
*)((u8_t
*)rmem
- SIZEOF_STRUCT_MEM
);
296 /* ... which has to be in a used state ... */
297 LWIP_ASSERT("mem_free: mem->used", mem
->used
);
298 /* ... and is now unused. */
302 /* the newly freed struct is now the lowest */
307 lwip_stats
.mem
.used
-= mem
->next
- ((u8_t
*)mem
- ram
);
308 #endif /* MEM_STATS */
310 /* finally, see if prev or next are free also */
312 sys_sem_signal(mem_sem
);
316 * In contrast to its name, mem_realloc can only shrink memory, not expand it.
317 * Since the only use (for now) is in pbuf_realloc (which also can only shrink),
318 * this shouldn't be a problem!
320 * @param rmem pointer to memory allocated by mem_malloc the is to be shrinked
321 * @param newsize required size after shrinking (needs to be smaller than or
322 * equal to the previous size)
323 * @return for compatibility reasons: is always == rmem, at the moment
326 mem_realloc(void *rmem
, mem_size_t newsize
)
329 mem_size_t ptr
, ptr2
;
330 struct mem
*mem
, *mem2
;
332 /* Expand the size of the allocated memory region so that we can
333 adjust for alignment. */
334 newsize
= LWIP_MEM_ALIGN_SIZE(newsize
);
336 if(newsize
< MIN_SIZE_ALIGNED
) {
337 /* every data block must be at least MIN_SIZE_ALIGNED long */
338 newsize
= MIN_SIZE_ALIGNED
;
341 if (newsize
> MEM_SIZE_ALIGNED
) {
345 LWIP_ASSERT("mem_realloc: legal memory", (u8_t
*)rmem
>= (u8_t
*)ram
&&
346 (u8_t
*)rmem
< (u8_t
*)ram_end
);
348 if ((u8_t
*)rmem
< (u8_t
*)ram
|| (u8_t
*)rmem
>= (u8_t
*)ram_end
) {
349 LWIP_DEBUGF(MEM_DEBUG
| 3, ("mem_realloc: illegal memory\n"));
352 /* Get the corresponding struct mem ... */
353 mem
= (struct mem
*)((u8_t
*)rmem
- SIZEOF_STRUCT_MEM
);
354 /* ... and its offset pointer */
355 ptr
= (u8_t
*)mem
- ram
;
357 size
= mem
->next
- ptr
- SIZEOF_STRUCT_MEM
;
358 LWIP_ASSERT("mem_realloc can only shrink memory", newsize
<= size
);
359 if (newsize
> size
) {
363 if (newsize
== size
) {
364 /* No change in size, simply return */
368 /* protect the heap from concurrent access */
369 sys_arch_sem_wait(mem_sem
, 0);
372 lwip_stats
.mem
.used
-= (size
- newsize
);
373 #endif /* MEM_STATS */
375 mem2
= (struct mem
*)&ram
[mem
->next
];
376 if(mem2
->used
== 0) {
377 /* The next struct is unused, we can simply move it at little */
379 /* remember the old next pointer */
381 /* create new struct mem which is moved directly after the shrinked mem */
382 ptr2
= ptr
+ SIZEOF_STRUCT_MEM
+ newsize
;
384 lfree
= (struct mem
*)&ram
[ptr2
];
386 mem2
= (struct mem
*)&ram
[ptr2
];
388 /* restore the next pointer */
390 /* link it back to mem */
394 /* last thing to restore linked list: as we have moved mem2,
395 * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not
396 * the end of the heap */
397 if (mem2
->next
!= MEM_SIZE_ALIGNED
) {
398 ((struct mem
*)&ram
[mem2
->next
])->prev
= ptr2
;
400 /* no need to plug holes, we've already done that */
401 } else if (newsize
+ SIZEOF_STRUCT_MEM
+ MIN_SIZE_ALIGNED
<= size
) {
402 /* Next struct is used but there's room for another struct mem with
403 * at least MIN_SIZE_ALIGNED of data.
404 * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem
405 * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED').
406 * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
407 * region that couldn't hold data, but when mem->next gets freed,
408 * the 2 regions would be combined, resulting in more free memory */
409 ptr2
= ptr
+ SIZEOF_STRUCT_MEM
+ newsize
;
410 mem2
= (struct mem
*)&ram
[ptr2
];
415 mem2
->next
= mem
->next
;
418 if (mem2
->next
!= MEM_SIZE_ALIGNED
) {
419 ((struct mem
*)&ram
[mem2
->next
])->prev
= ptr2
;
421 /* the original mem->next is used, so no need to plug holes! */
424 next struct mem is used but size between mem and mem2 is not big enough
425 to create another struct mem
426 -> don't do anyhting.
427 -> the remaining space stays unused since it is too small
429 sys_sem_signal(mem_sem
);
434 * Adam's mem_malloc() plus solution for bug #17922
435 * Allocate a block of memory with a minimum of 'size' bytes.
437 * @param size is the minimum size of the requested block in bytes.
438 * @return pointer to allocated memory or NULL if no free memory was found.
440 * Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT).
443 mem_malloc(mem_size_t size
)
445 mem_size_t ptr
, ptr2
;
446 struct mem
*mem
, *mem2
;
452 /* Expand the size of the allocated memory region so that we can
453 adjust for alignment. */
454 size
= LWIP_MEM_ALIGN_SIZE(size
);
456 if(size
< MIN_SIZE_ALIGNED
) {
457 /* every data block must be at least MIN_SIZE_ALIGNED long */
458 size
= MIN_SIZE_ALIGNED
;
461 if (size
> MEM_SIZE_ALIGNED
) {
465 /* protect the heap from concurrent access */
466 sys_arch_sem_wait(mem_sem
, 0);
468 /* Scan through the heap searching for a free block that is big enough,
469 * beginning with the lowest free block.
471 for (ptr
= (u8_t
*)lfree
- ram
; ptr
< MEM_SIZE_ALIGNED
- size
;
472 ptr
= ((struct mem
*)&ram
[ptr
])->next
) {
473 mem
= (struct mem
*)&ram
[ptr
];
476 (mem
->next
- (ptr
+ SIZEOF_STRUCT_MEM
)) >= size
) {
477 /* mem is not used and at least perfect fit is possible:
478 * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */
480 if (mem
->next
- (ptr
+ SIZEOF_STRUCT_MEM
) >= (size
+ SIZEOF_STRUCT_MEM
+ MIN_SIZE_ALIGNED
)) {
481 /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing
482 * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
483 * -> split large block, create empty remainder,
484 * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
485 * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
486 * struct mem would fit in but no data between mem2 and mem2->next
487 * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
488 * region that couldn't hold data, but when mem->next gets freed,
489 * the 2 regions would be combined, resulting in more free memory
491 ptr2
= ptr
+ SIZEOF_STRUCT_MEM
+ size
;
492 /* create mem2 struct */
493 mem2
= (struct mem
*)&ram
[ptr2
];
495 mem2
->next
= mem
->next
;
497 /* and insert it between mem and mem->next */
501 if (mem2
->next
!= MEM_SIZE_ALIGNED
) {
502 ((struct mem
*)&ram
[mem2
->next
])->prev
= ptr2
;
505 lwip_stats
.mem
.used
+= (size
+ SIZEOF_STRUCT_MEM
);
506 if (lwip_stats
.mem
.max
< lwip_stats
.mem
.used
) {
507 lwip_stats
.mem
.max
= lwip_stats
.mem
.used
;
509 #endif /* MEM_STATS */
511 /* (a mem2 struct does no fit into the user data space of mem and mem->next will always
512 * be used at this point: if not we have 2 unused structs in a row, plug_holes should have
513 * take care of this).
514 * -> near fit or excact fit: do not split, no mem2 creation
515 * also can't move mem->next directly behind mem, since mem->next
516 * will always be used at this point!
520 lwip_stats
.mem
.used
+= mem
->next
- ((u8_t
*)mem
- ram
);
521 if (lwip_stats
.mem
.max
< lwip_stats
.mem
.used
) {
522 lwip_stats
.mem
.max
= lwip_stats
.mem
.used
;
524 #endif /* MEM_STATS */
528 /* Find next free block after mem and update lowest free pointer */
529 while (lfree
->used
&& lfree
!= ram_end
) {
530 lfree
= (struct mem
*)&ram
[lfree
->next
];
532 LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree
== ram_end
) || (!lfree
->used
)));
534 sys_sem_signal(mem_sem
);
535 LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.",
536 (mem_ptr_t
)mem
+ SIZEOF_STRUCT_MEM
+ size
<= (mem_ptr_t
)ram_end
);
537 LWIP_ASSERT("mem_malloc: allocated memory properly aligned.",
538 (unsigned long)((u8_t
*)mem
+ SIZEOF_STRUCT_MEM
) % MEM_ALIGNMENT
== 0);
539 LWIP_ASSERT("mem_malloc: sanity check alignment",
540 (((mem_ptr_t
)mem
) & (MEM_ALIGNMENT
-1)) == 0);
542 return (u8_t
*)mem
+ SIZEOF_STRUCT_MEM
;
545 LWIP_DEBUGF(MEM_DEBUG
| 2, ("mem_malloc: could not allocate %"S16_F
" bytes\n", (s16_t
)size
));
547 ++lwip_stats
.mem
.err
;
548 #endif /* MEM_STATS */
549 sys_sem_signal(mem_sem
);
553 #endif /* MEM_USE_POOLS */
555 * Contiguously allocates enough space for count objects that are size bytes
556 * of memory each and returns a pointer to the allocated memory.
558 * The allocated memory is filled with bytes of value zero.
560 * @param count number of objects to allocate
561 * @param size size of the objects to allocate
562 * @return pointer to allocated memory / NULL pointer if there is an error
564 void *mem_calloc(mem_size_t count
, mem_size_t size
)
568 /* allocate 'count' objects of size 'size' */
569 p
= mem_malloc(count
* size
);
571 /* zero the memory */
572 memset(p
, 0, count
* size
);
577 #endif /* !MEM_LIBC_MALLOC */