1 /* Copyright (c) 2008-2011, The Tor Project, Inc. */
2 /* See LICENSE for licensing information */
5 * \brief Implementation for memarea_t, an allocator for allocating lots of
6 * small objects that will be freed all at once.
16 /** All returned pointers should be aligned to the nearest multiple of this
18 #define MEMAREA_ALIGN SIZEOF_VOID_P
20 #if MEMAREA_ALIGN == 4
21 #define MEMAREA_ALIGN_MASK 3lu
22 #elif MEMAREA_ALIGN == 8
23 #define MEMAREA_ALIGN_MASK 7lu
25 #error "void* is neither 4 nor 8 bytes long. I don't know how to align stuff."
28 /** Increment <b>ptr</b> until it is aligned to MEMAREA_ALIGN. */
30 realign_pointer(void *ptr
)
32 uintptr_t x
= (uintptr_t)ptr
;
33 x
= (x
+MEMAREA_ALIGN_MASK
) & ~MEMAREA_ALIGN_MASK
;
34 tor_assert(((void*)x
) >= ptr
); // XXXX021 remove this once bug 930 is solved
38 /** Implements part of a memarea. New memory is carved off from chunk->mem in
39 * increasing order until a request is too big, at which point a new chunk is
41 typedef struct memarea_chunk_t
{
42 /** Next chunk in this area. Only kept around so we can free it. */
43 struct memarea_chunk_t
*next_chunk
;
44 size_t mem_size
; /**< How much RAM is available in u.mem, total? */
45 char *next_mem
; /**< Next position in u.mem to allocate data at. If it's
46 * greater than or equal to mem+mem_size, this chunk is
49 char mem
[1]; /**< Memory space in this chunk. */
50 void *_void_for_alignment
; /**< Dummy; used to make sure mem is aligned. */
54 #define CHUNK_HEADER_SIZE STRUCT_OFFSET(memarea_chunk_t, u)
56 #define CHUNK_SIZE 4096
58 /** A memarea_t is an allocation region for a set of small memory requests
59 * that will all be freed at once. */
61 memarea_chunk_t
*first
; /**< Top of the chunk stack: never NULL. */
64 /** How many chunks will we put into the freelist before freeing them? */
65 #define MAX_FREELIST_LEN 4
66 /** The number of memarea chunks currently in our freelist. */
67 static int freelist_len
=0;
68 /** A linked list of unused memory area chunks. Used to prevent us from
69 * spinning in malloc/free loops. */
70 static memarea_chunk_t
*freelist
= NULL
;
72 /** Helper: allocate a new memarea chunk of around <b>chunk_size</b> bytes. */
73 static memarea_chunk_t
*
74 alloc_chunk(size_t sz
, int freelist_ok
)
76 tor_assert(sz
< SIZE_T_CEILING
);
77 if (freelist
&& freelist_ok
) {
78 memarea_chunk_t
*res
= freelist
;
79 freelist
= res
->next_chunk
;
80 res
->next_chunk
= NULL
;
84 size_t chunk_size
= freelist_ok
? CHUNK_SIZE
: sz
;
85 memarea_chunk_t
*res
= tor_malloc_roundup(&chunk_size
);
86 res
->next_chunk
= NULL
;
87 res
->mem_size
= chunk_size
- CHUNK_HEADER_SIZE
;
88 res
->next_mem
= res
->u
.mem
;
89 tor_assert(res
->next_mem
+res
->mem_size
== ((char*)res
)+chunk_size
);
90 tor_assert(realign_pointer(res
->next_mem
) == res
->next_mem
);
95 /** Release <b>chunk</b> from a memarea, either by adding it to the freelist
96 * or by freeing it if the freelist is already too big. */
98 chunk_free(memarea_chunk_t
*chunk
)
100 if (freelist_len
< MAX_FREELIST_LEN
) {
102 chunk
->next_chunk
= freelist
;
104 chunk
->next_mem
= chunk
->u
.mem
;
110 /** Allocate and return new memarea. */
114 memarea_t
*head
= tor_malloc(sizeof(memarea_t
));
115 head
->first
= alloc_chunk(CHUNK_SIZE
, 1);
119 /** Free <b>area</b>, invalidating all pointers returned from memarea_alloc()
120 * and friends for this area */
122 memarea_drop_all(memarea_t
*area
)
124 memarea_chunk_t
*chunk
, *next
;
125 for (chunk
= area
->first
; chunk
; chunk
= next
) {
126 next
= chunk
->next_chunk
;
129 area
->first
= NULL
; /*fail fast on */
133 /** Forget about having allocated anything in <b>area</b>, and free some of
134 * the backing storage associated with it, as appropriate. Invalidates all
135 * pointers returned from memarea_alloc() for this area. */
137 memarea_clear(memarea_t
*area
)
139 memarea_chunk_t
*chunk
, *next
;
140 if (area
->first
->next_chunk
) {
141 for (chunk
= area
->first
->next_chunk
; chunk
; chunk
= next
) {
142 next
= chunk
->next_chunk
;
145 area
->first
->next_chunk
= NULL
;
147 area
->first
->next_mem
= area
->first
->u
.mem
;
150 /** Remove all unused memarea chunks from the internal freelist. */
152 memarea_clear_freelist(void)
154 memarea_chunk_t
*chunk
, *next
;
156 for (chunk
= freelist
; chunk
; chunk
= next
) {
157 next
= chunk
->next_chunk
;
163 /** Return true iff <b>p</b> is in a range that has been returned by an
164 * allocation from <b>area</b>. */
166 memarea_owns_ptr(const memarea_t
*area
, const void *p
)
168 memarea_chunk_t
*chunk
;
170 for (chunk
= area
->first
; chunk
; chunk
= chunk
->next_chunk
) {
171 if (ptr
>= chunk
->u
.mem
&& ptr
< chunk
->next_mem
)
177 /** Return a pointer to a chunk of memory in <b>area</b> of at least <b>sz</b>
178 * bytes. <b>sz</b> should be significantly smaller than the area's chunk
179 * size, though we can deal if it isn't. */
181 memarea_alloc(memarea_t
*area
, size_t sz
)
183 memarea_chunk_t
*chunk
= area
->first
;
186 tor_assert(sz
< SIZE_T_CEILING
);
189 if (chunk
->next_mem
+sz
> chunk
->u
.mem
+chunk
->mem_size
) {
190 if (sz
+CHUNK_HEADER_SIZE
>= CHUNK_SIZE
) {
191 /* This allocation is too big. Stick it in a special chunk, and put
192 * that chunk second in the list. */
193 memarea_chunk_t
*new_chunk
= alloc_chunk(sz
+CHUNK_HEADER_SIZE
, 0);
194 new_chunk
->next_chunk
= chunk
->next_chunk
;
195 chunk
->next_chunk
= new_chunk
;
198 memarea_chunk_t
*new_chunk
= alloc_chunk(CHUNK_SIZE
, 1);
199 new_chunk
->next_chunk
= chunk
;
200 area
->first
= chunk
= new_chunk
;
202 tor_assert(chunk
->mem_size
>= sz
);
204 result
= chunk
->next_mem
;
205 chunk
->next_mem
= chunk
->next_mem
+ sz
;
206 // XXXX021 remove these once bug 930 is solved.
207 tor_assert(chunk
->next_mem
>= chunk
->u
.mem
);
208 tor_assert(chunk
->next_mem
<= chunk
->u
.mem
+chunk
->mem_size
);
209 chunk
->next_mem
= realign_pointer(chunk
->next_mem
);
213 /** As memarea_alloc(), but clears the memory it returns. */
215 memarea_alloc_zero(memarea_t
*area
, size_t sz
)
217 void *result
= memarea_alloc(area
, sz
);
218 memset(result
, 0, sz
);
222 /** As memdup, but returns the memory from <b>area</b>. */
224 memarea_memdup(memarea_t
*area
, const void *s
, size_t n
)
226 char *result
= memarea_alloc(area
, n
);
227 memcpy(result
, s
, n
);
231 /** As strdup, but returns the memory from <b>area</b>. */
233 memarea_strdup(memarea_t
*area
, const char *s
)
235 return memarea_memdup(area
, s
, strlen(s
)+1);
238 /** As strndup, but returns the memory from <b>area</b>. */
240 memarea_strndup(memarea_t
*area
, const char *s
, size_t n
)
244 const char *cp
, *end
= s
+n
;
245 tor_assert(n
< SIZE_T_CEILING
);
246 for (cp
= s
; cp
< end
&& *cp
; ++cp
)
248 /* cp now points to s+n, or to the 0 in the string. */
250 result
= memarea_alloc(area
, ln
+1);
251 memcpy(result
, s
, ln
);
256 /** Set <b>allocated_out</b> to the number of bytes allocated in <b>area</b>,
257 * and <b>used_out</b> to the number of bytes currently used. */
259 memarea_get_stats(memarea_t
*area
, size_t *allocated_out
, size_t *used_out
)
262 memarea_chunk_t
*chunk
;
263 for (chunk
= area
->first
; chunk
; chunk
= chunk
->next_chunk
) {
264 a
+= CHUNK_HEADER_SIZE
+ chunk
->mem_size
;
265 tor_assert(chunk
->next_mem
>= chunk
->u
.mem
);
266 u
+= CHUNK_HEADER_SIZE
+ (chunk
->next_mem
- chunk
->u
.mem
);
272 /** Assert that <b>area</b> is okay. */
274 memarea_assert_ok(memarea_t
*area
)
276 memarea_chunk_t
*chunk
;
277 tor_assert(area
->first
);
279 for (chunk
= area
->first
; chunk
; chunk
= chunk
->next_chunk
) {
280 tor_assert(chunk
->next_mem
>= chunk
->u
.mem
);
281 tor_assert(chunk
->next_mem
<=
282 (char*) realign_pointer(chunk
->u
.mem
+chunk
->mem_size
));