1 /* Copyright (c) 2008-2010, The Tor Project, Inc. */
2 /* See LICENSE for licensing information */
5 * \brief Implementation for memarea_t, an allocator for allocating lots of
6 * small objects that will be freed all at once.
16 /** All returned pointers should be aligned to the nearest multiple of this
18 #define MEMAREA_ALIGN SIZEOF_VOID_P
20 #if MEMAREA_ALIGN == 4
21 #define MEMAREA_ALIGN_MASK 3lu
22 #elif MEMAREA_ALIGN == 8
23 #define MEMAREA_ALIGN_MASK 7lu
25 #error "void* is neither 4 nor 8 bytes long. I don't know how to align stuff."
28 /** Increment <b>ptr</b> until it is aligned to MEMAREA_ALIGN. */
30 realign_pointer(void *ptr
)
32 uintptr_t x
= (uintptr_t)ptr
;
33 x
= (x
+MEMAREA_ALIGN_MASK
) & ~MEMAREA_ALIGN_MASK
;
34 tor_assert(((void*)x
) >= ptr
); // XXXX021 remove this once bug 930 is solved
38 /** Implements part of a memarea. New memory is carved off from chunk->mem in
39 * increasing order until a request is too big, at which point a new chunk is
41 typedef struct memarea_chunk_t
{
42 /** Next chunk in this area. Only kept around so we can free it. */
43 struct memarea_chunk_t
*next_chunk
;
44 size_t mem_size
; /**< How much RAM is available in u.mem, total? */
45 char *next_mem
; /**< Next position in u.mem to allocate data at. If it's
46 * greater than or equal to mem+mem_size, this chunk is
49 char mem
[1]; /**< Memory space in this chunk. */
50 void *_void_for_alignment
; /**< Dummy; used to make sure mem is aligned. */
54 #define CHUNK_HEADER_SIZE STRUCT_OFFSET(memarea_chunk_t, u)
56 #define CHUNK_SIZE 4096
58 /** A memarea_t is an allocation region for a set of small memory requests
59 * that will all be freed at once. */
61 memarea_chunk_t
*first
; /**< Top of the chunk stack: never NULL. */
64 /** How many chunks will we put into the freelist before freeing them? */
65 #define MAX_FREELIST_LEN 4
66 /** The number of memarea chunks currently in our freelist. */
67 static int freelist_len
=0;
68 /** A linked list of unused memory area chunks. Used to prevent us from
69 * spinning in malloc/free loops. */
70 static memarea_chunk_t
*freelist
= NULL
;
72 /** Helper: allocate a new memarea chunk of around <b>chunk_size</b> bytes. */
73 static memarea_chunk_t
*
74 alloc_chunk(size_t sz
, int freelist_ok
)
76 if (freelist
&& freelist_ok
) {
77 memarea_chunk_t
*res
= freelist
;
78 freelist
= res
->next_chunk
;
79 res
->next_chunk
= NULL
;
83 size_t chunk_size
= freelist_ok
? CHUNK_SIZE
: sz
;
84 memarea_chunk_t
*res
= tor_malloc_roundup(&chunk_size
);
85 res
->next_chunk
= NULL
;
86 res
->mem_size
= chunk_size
- CHUNK_HEADER_SIZE
;
87 res
->next_mem
= res
->u
.mem
;
88 tor_assert(res
->next_mem
+res
->mem_size
== ((char*)res
)+chunk_size
);
89 tor_assert(realign_pointer(res
->next_mem
) == res
->next_mem
);
94 /** Release <b>chunk</b> from a memarea, either by adding it to the freelist
95 * or by freeing it if the freelist is already too big. */
97 chunk_free(memarea_chunk_t
*chunk
)
99 if (freelist_len
< MAX_FREELIST_LEN
) {
101 chunk
->next_chunk
= freelist
;
103 chunk
->next_mem
= chunk
->u
.mem
;
109 /** Allocate and return new memarea. */
113 memarea_t
*head
= tor_malloc(sizeof(memarea_t
));
114 head
->first
= alloc_chunk(CHUNK_SIZE
, 1);
118 /** Free <b>area</b>, invalidating all pointers returned from memarea_alloc()
119 * and friends for this area */
121 memarea_drop_all(memarea_t
*area
)
123 memarea_chunk_t
*chunk
, *next
;
124 for (chunk
= area
->first
; chunk
; chunk
= next
) {
125 next
= chunk
->next_chunk
;
128 area
->first
= NULL
; /*fail fast on */
132 /** Forget about having allocated anything in <b>area</b>, and free some of
133 * the backing storage associated with it, as appropriate. Invalidates all
134 * pointers returned from memarea_alloc() for this area. */
136 memarea_clear(memarea_t
*area
)
138 memarea_chunk_t
*chunk
, *next
;
139 if (area
->first
->next_chunk
) {
140 for (chunk
= area
->first
->next_chunk
; chunk
; chunk
= next
) {
141 next
= chunk
->next_chunk
;
144 area
->first
->next_chunk
= NULL
;
146 area
->first
->next_mem
= area
->first
->u
.mem
;
149 /** Remove all unused memarea chunks from the internal freelist. */
151 memarea_clear_freelist(void)
153 memarea_chunk_t
*chunk
, *next
;
155 for (chunk
= freelist
; chunk
; chunk
= next
) {
156 next
= chunk
->next_chunk
;
162 /** Return true iff <b>p</b> is in a range that has been returned by an
163 * allocation from <b>area</b>. */
165 memarea_owns_ptr(const memarea_t
*area
, const void *p
)
167 memarea_chunk_t
*chunk
;
169 for (chunk
= area
->first
; chunk
; chunk
= chunk
->next_chunk
) {
170 if (ptr
>= chunk
->u
.mem
&& ptr
< chunk
->next_mem
)
176 /** Return a pointer to a chunk of memory in <b>area</b> of at least <b>sz</b>
177 * bytes. <b>sz</b> should be significantly smaller than the area's chunk
178 * size, though we can deal if it isn't. */
180 memarea_alloc(memarea_t
*area
, size_t sz
)
182 memarea_chunk_t
*chunk
= area
->first
;
187 if (chunk
->next_mem
+sz
> chunk
->u
.mem
+chunk
->mem_size
) {
188 if (sz
+CHUNK_HEADER_SIZE
>= CHUNK_SIZE
) {
189 /* This allocation is too big. Stick it in a special chunk, and put
190 * that chunk second in the list. */
191 memarea_chunk_t
*new_chunk
= alloc_chunk(sz
+CHUNK_HEADER_SIZE
, 0);
192 new_chunk
->next_chunk
= chunk
->next_chunk
;
193 chunk
->next_chunk
= new_chunk
;
196 memarea_chunk_t
*new_chunk
= alloc_chunk(CHUNK_SIZE
, 1);
197 new_chunk
->next_chunk
= chunk
;
198 area
->first
= chunk
= new_chunk
;
200 tor_assert(chunk
->mem_size
>= sz
);
202 result
= chunk
->next_mem
;
203 chunk
->next_mem
= chunk
->next_mem
+ sz
;
204 // XXXX021 remove these once bug 930 is solved.
205 tor_assert(chunk
->next_mem
>= chunk
->u
.mem
);
206 tor_assert(chunk
->next_mem
<= chunk
->u
.mem
+chunk
->mem_size
);
207 chunk
->next_mem
= realign_pointer(chunk
->next_mem
);
211 /** As memarea_alloc(), but clears the memory it returns. */
213 memarea_alloc_zero(memarea_t
*area
, size_t sz
)
215 void *result
= memarea_alloc(area
, sz
);
216 memset(result
, 0, sz
);
220 /** As memdup, but returns the memory from <b>area</b>. */
222 memarea_memdup(memarea_t
*area
, const void *s
, size_t n
)
224 char *result
= memarea_alloc(area
, n
);
225 memcpy(result
, s
, n
);
229 /** As strdup, but returns the memory from <b>area</b>. */
231 memarea_strdup(memarea_t
*area
, const char *s
)
233 return memarea_memdup(area
, s
, strlen(s
)+1);
236 /** As strndup, but returns the memory from <b>area</b>. */
238 memarea_strndup(memarea_t
*area
, const char *s
, size_t n
)
242 const char *cp
, *end
= s
+n
;
243 for (cp
= s
; cp
< end
&& *cp
; ++cp
)
245 /* cp now points to s+n, or to the 0 in the string. */
247 result
= memarea_alloc(area
, ln
+1);
248 memcpy(result
, s
, ln
);
253 /** Set <b>allocated_out</b> to the number of bytes allocated in <b>area</b>,
254 * and <b>used_out</b> to the number of bytes currently used. */
256 memarea_get_stats(memarea_t
*area
, size_t *allocated_out
, size_t *used_out
)
259 memarea_chunk_t
*chunk
;
260 for (chunk
= area
->first
; chunk
; chunk
= chunk
->next_chunk
) {
261 a
+= CHUNK_HEADER_SIZE
+ chunk
->mem_size
;
262 tor_assert(chunk
->next_mem
>= chunk
->u
.mem
);
263 u
+= CHUNK_HEADER_SIZE
+ (chunk
->next_mem
- chunk
->u
.mem
);
269 /** Assert that <b>area</b> is okay. */
271 memarea_assert_ok(memarea_t
*area
)
273 memarea_chunk_t
*chunk
;
274 tor_assert(area
->first
);
276 for (chunk
= area
->first
; chunk
; chunk
= chunk
->next_chunk
) {
277 tor_assert(chunk
->next_mem
>= chunk
->u
.mem
);
278 tor_assert(chunk
->next_mem
<=
279 (char*) realign_pointer(chunk
->u
.mem
+chunk
->mem_size
));