1 /* Copyright (c) 2008-2015, The Tor Project, Inc. */
2 /* See LICENSE for licensing information */
5 * \brief Implementation for memarea_t, an allocator for allocating lots of
6 * small objects that will be freed all at once.
16 /** If true, we try to detect any attempts to write beyond the length of a
20 /** All returned pointers should be aligned to the nearest multiple of this
22 #define MEMAREA_ALIGN SIZEOF_VOID_P
24 #if MEMAREA_ALIGN == 4
25 #define MEMAREA_ALIGN_MASK 3lu
26 #elif MEMAREA_ALIGN == 8
27 #define MEMAREA_ALIGN_MASK 7lu
29 #error "void* is neither 4 nor 8 bytes long. I don't know how to align stuff."
32 #if defined(__GNUC__) && defined(FLEXIBLE_ARRAY_MEMBER)
33 #define USE_ALIGNED_ATTRIBUTE
40 /** Magic value that we stick at the end of a memarea so we can make sure
41 * there are no run-off-the-end bugs. */
42 #define SENTINEL_VAL 0x90806622u
43 /** How many bytes per area do we devote to the sentinel? */
44 #define SENTINEL_LEN sizeof(uint32_t)
45 /** Given a mem_area_chunk_t with SENTINEL_LEN extra bytes allocated at the
46 * end, set those bytes. */
47 #define SET_SENTINEL(chunk) \
49 set_uint32( &(chunk)->U_MEM[chunk->mem_size], SENTINEL_VAL ); \
51 /** Assert that the sentinel on a memarea is set correctly. */
52 #define CHECK_SENTINEL(chunk) \
54 uint32_t sent_val = get_uint32(&(chunk)->U_MEM[chunk->mem_size]); \
55 tor_assert(sent_val == SENTINEL_VAL); \
58 #define SENTINEL_LEN 0
59 #define SET_SENTINEL(chunk) STMT_NIL
60 #define CHECK_SENTINEL(chunk) STMT_NIL
63 /** Increment <b>ptr</b> until it is aligned to MEMAREA_ALIGN. */
65 realign_pointer(void *ptr
)
67 uintptr_t x
= (uintptr_t)ptr
;
68 x
= (x
+MEMAREA_ALIGN_MASK
) & ~MEMAREA_ALIGN_MASK
;
69 /* Reinstate this if bug 930 ever reappears
70 tor_assert(((void*)x) >= ptr);
75 /** Implements part of a memarea. New memory is carved off from chunk->mem in
76 * increasing order until a request is too big, at which point a new chunk is
78 typedef struct memarea_chunk_t
{
79 /** Next chunk in this area. Only kept around so we can free it. */
80 struct memarea_chunk_t
*next_chunk
;
81 size_t mem_size
; /**< How much RAM is available in mem, total? */
82 char *next_mem
; /**< Next position in mem to allocate data at. If it's
83 * equal to mem+mem_size, this chunk is full. */
84 #ifdef USE_ALIGNED_ATTRIBUTE
85 char mem
[FLEXIBLE_ARRAY_MEMBER
] __attribute__((aligned(MEMAREA_ALIGN
)));
88 char mem
[1]; /**< Memory space in this chunk. */
89 void *void_for_alignment_
; /**< Dummy; used to make sure mem is aligned. */
94 /** How many bytes are needed for overhead before we get to the memory part
96 #define CHUNK_HEADER_SIZE STRUCT_OFFSET(memarea_chunk_t, U_MEM)
98 /** What's the smallest that we'll allocate a chunk? */
99 #define CHUNK_SIZE 4096
101 /** A memarea_t is an allocation region for a set of small memory requests
102 * that will all be freed at once. */
104 memarea_chunk_t
*first
; /**< Top of the chunk stack: never NULL. */
107 /** How many chunks will we put into the freelist before freeing them? */
108 #define MAX_FREELIST_LEN 4
109 /** The number of memarea chunks currently in our freelist. */
110 static int freelist_len
=0;
111 /** A linked list of unused memory area chunks. Used to prevent us from
112 * spinning in malloc/free loops. */
113 static memarea_chunk_t
*freelist
= NULL
;
115 /** Helper: allocate a new memarea chunk of around <b>chunk_size</b> bytes. */
116 static memarea_chunk_t
*
117 alloc_chunk(size_t sz
, int freelist_ok
)
119 tor_assert(sz
< SIZE_T_CEILING
);
120 if (freelist
&& freelist_ok
) {
121 memarea_chunk_t
*res
= freelist
;
122 freelist
= res
->next_chunk
;
123 res
->next_chunk
= NULL
;
128 size_t chunk_size
= freelist_ok
? CHUNK_SIZE
: sz
;
129 memarea_chunk_t
*res
;
130 chunk_size
+= SENTINEL_LEN
;
131 res
= tor_malloc(chunk_size
);
132 res
->next_chunk
= NULL
;
133 res
->mem_size
= chunk_size
- CHUNK_HEADER_SIZE
- SENTINEL_LEN
;
134 res
->next_mem
= res
->U_MEM
;
135 tor_assert(res
->next_mem
+res
->mem_size
+SENTINEL_LEN
==
136 ((char*)res
)+chunk_size
);
137 tor_assert(realign_pointer(res
->next_mem
) == res
->next_mem
);
143 /** Release <b>chunk</b> from a memarea, either by adding it to the freelist
144 * or by freeing it if the freelist is already too big. */
146 chunk_free_unchecked(memarea_chunk_t
*chunk
)
148 CHECK_SENTINEL(chunk
);
149 if (freelist_len
< MAX_FREELIST_LEN
) {
151 chunk
->next_chunk
= freelist
;
153 chunk
->next_mem
= chunk
->U_MEM
;
159 /** Allocate and return new memarea. */
163 memarea_t
*head
= tor_malloc(sizeof(memarea_t
));
164 head
->first
= alloc_chunk(CHUNK_SIZE
, 1);
168 /** Free <b>area</b>, invalidating all pointers returned from memarea_alloc()
169 * and friends for this area */
171 memarea_drop_all(memarea_t
*area
)
173 memarea_chunk_t
*chunk
, *next
;
174 for (chunk
= area
->first
; chunk
; chunk
= next
) {
175 next
= chunk
->next_chunk
;
176 chunk_free_unchecked(chunk
);
178 area
->first
= NULL
; /*fail fast on */
182 /** Forget about having allocated anything in <b>area</b>, and free some of
183 * the backing storage associated with it, as appropriate. Invalidates all
184 * pointers returned from memarea_alloc() for this area. */
186 memarea_clear(memarea_t
*area
)
188 memarea_chunk_t
*chunk
, *next
;
189 if (area
->first
->next_chunk
) {
190 for (chunk
= area
->first
->next_chunk
; chunk
; chunk
= next
) {
191 next
= chunk
->next_chunk
;
192 chunk_free_unchecked(chunk
);
194 area
->first
->next_chunk
= NULL
;
196 area
->first
->next_mem
= area
->first
->U_MEM
;
199 /** Remove all unused memarea chunks from the internal freelist. */
201 memarea_clear_freelist(void)
203 memarea_chunk_t
*chunk
, *next
;
205 for (chunk
= freelist
; chunk
; chunk
= next
) {
206 next
= chunk
->next_chunk
;
212 /** Return true iff <b>p</b> is in a range that has been returned by an
213 * allocation from <b>area</b>. */
215 memarea_owns_ptr(const memarea_t
*area
, const void *p
)
217 memarea_chunk_t
*chunk
;
219 for (chunk
= area
->first
; chunk
; chunk
= chunk
->next_chunk
) {
220 if (ptr
>= chunk
->U_MEM
&& ptr
< chunk
->next_mem
)
226 /** Return a pointer to a chunk of memory in <b>area</b> of at least <b>sz</b>
227 * bytes. <b>sz</b> should be significantly smaller than the area's chunk
228 * size, though we can deal if it isn't. */
230 memarea_alloc(memarea_t
*area
, size_t sz
)
232 memarea_chunk_t
*chunk
= area
->first
;
235 CHECK_SENTINEL(chunk
);
236 tor_assert(sz
< SIZE_T_CEILING
);
239 tor_assert(chunk
->next_mem
<= chunk
->U_MEM
+ chunk
->mem_size
);
240 const size_t space_remaining
=
241 (chunk
->U_MEM
+ chunk
->mem_size
) - chunk
->next_mem
;
242 if (sz
> space_remaining
) {
243 if (sz
+CHUNK_HEADER_SIZE
>= CHUNK_SIZE
) {
244 /* This allocation is too big. Stick it in a special chunk, and put
245 * that chunk second in the list. */
246 memarea_chunk_t
*new_chunk
= alloc_chunk(sz
+CHUNK_HEADER_SIZE
, 0);
247 new_chunk
->next_chunk
= chunk
->next_chunk
;
248 chunk
->next_chunk
= new_chunk
;
251 memarea_chunk_t
*new_chunk
= alloc_chunk(CHUNK_SIZE
, 1);
252 new_chunk
->next_chunk
= chunk
;
253 area
->first
= chunk
= new_chunk
;
255 tor_assert(chunk
->mem_size
>= sz
);
257 result
= chunk
->next_mem
;
258 chunk
->next_mem
= chunk
->next_mem
+ sz
;
259 /* Reinstate these if bug 930 ever comes back
260 tor_assert(chunk->next_mem >= chunk->U_MEM);
261 tor_assert(chunk->next_mem <= chunk->U_MEM+chunk->mem_size);
263 chunk
->next_mem
= realign_pointer(chunk
->next_mem
);
267 /** As memarea_alloc(), but clears the memory it returns. */
269 memarea_alloc_zero(memarea_t
*area
, size_t sz
)
271 void *result
= memarea_alloc(area
, sz
);
272 memset(result
, 0, sz
);
276 /** As memdup, but returns the memory from <b>area</b>. */
278 memarea_memdup(memarea_t
*area
, const void *s
, size_t n
)
280 char *result
= memarea_alloc(area
, n
);
281 memcpy(result
, s
, n
);
285 /** As strdup, but returns the memory from <b>area</b>. */
287 memarea_strdup(memarea_t
*area
, const char *s
)
289 return memarea_memdup(area
, s
, strlen(s
)+1);
292 /** As strndup, but returns the memory from <b>area</b>. */
294 memarea_strndup(memarea_t
*area
, const char *s
, size_t n
)
298 tor_assert(n
< SIZE_T_CEILING
);
299 for (ln
= 0; ln
< n
&& s
[ln
]; ++ln
)
301 result
= memarea_alloc(area
, ln
+1);
302 memcpy(result
, s
, ln
);
307 /** Set <b>allocated_out</b> to the number of bytes allocated in <b>area</b>,
308 * and <b>used_out</b> to the number of bytes currently used. */
310 memarea_get_stats(memarea_t
*area
, size_t *allocated_out
, size_t *used_out
)
313 memarea_chunk_t
*chunk
;
314 for (chunk
= area
->first
; chunk
; chunk
= chunk
->next_chunk
) {
315 CHECK_SENTINEL(chunk
);
316 a
+= CHUNK_HEADER_SIZE
+ chunk
->mem_size
;
317 tor_assert(chunk
->next_mem
>= chunk
->U_MEM
);
318 u
+= CHUNK_HEADER_SIZE
+ (chunk
->next_mem
- chunk
->U_MEM
);
324 /** Assert that <b>area</b> is okay. */
326 memarea_assert_ok(memarea_t
*area
)
328 memarea_chunk_t
*chunk
;
329 tor_assert(area
->first
);
331 for (chunk
= area
->first
; chunk
; chunk
= chunk
->next_chunk
) {
332 CHECK_SENTINEL(chunk
);
333 tor_assert(chunk
->next_mem
>= chunk
->U_MEM
);
334 tor_assert(chunk
->next_mem
<=
335 (char*) realign_pointer(chunk
->U_MEM
+chunk
->mem_size
));