Note that Tor requires Automake 1.7. Fixes bug 2305
[tor/rransom.git] / src / common / memarea.c
blobac26c5fd90312273d41d935de2e37e08fa652781
1 /* Copyright (c) 2008-2011, The Tor Project, Inc. */
2 /* See LICENSE for licensing information */
4 /** \file memarea.c
5 * \brief Implementation for memarea_t, an allocator for allocating lots of
6 * small objects that will be freed all at once.
7 */
9 #include "orconfig.h"
10 #include <stdlib.h>
11 #include "memarea.h"
12 #include "util.h"
13 #include "compat.h"
14 #include "log.h"
16 /** All returned pointers should be aligned to the nearest multiple of this
17 * value. */
18 #define MEMAREA_ALIGN SIZEOF_VOID_P
20 #if MEMAREA_ALIGN == 4
21 #define MEMAREA_ALIGN_MASK 3lu
22 #elif MEMAREA_ALIGN == 8
23 #define MEMAREA_ALIGN_MASK 7lu
24 #else
25 #error "void* is neither 4 nor 8 bytes long. I don't know how to align stuff."
26 #endif
28 /** Increment <b>ptr</b> until it is aligned to MEMAREA_ALIGN. */
29 static INLINE void *
30 realign_pointer(void *ptr)
32 uintptr_t x = (uintptr_t)ptr;
33 x = (x+MEMAREA_ALIGN_MASK) & ~MEMAREA_ALIGN_MASK;
34 tor_assert(((void*)x) >= ptr); // XXXX021 remove this once bug 930 is solved
35 return (void*)x;
38 /** Implements part of a memarea. New memory is carved off from chunk->mem in
39 * increasing order until a request is too big, at which point a new chunk is
40 * allocated. */
41 typedef struct memarea_chunk_t {
42 /** Next chunk in this area. Only kept around so we can free it. */
43 struct memarea_chunk_t *next_chunk;
44 size_t mem_size; /**< How much RAM is available in u.mem, total? */
45 char *next_mem; /**< Next position in u.mem to allocate data at. If it's
46 * greater than or equal to mem+mem_size, this chunk is
47 * full. */
48 union {
49 char mem[1]; /**< Memory space in this chunk. */
50 void *_void_for_alignment; /**< Dummy; used to make sure mem is aligned. */
51 } u;
52 } memarea_chunk_t;
54 #define CHUNK_HEADER_SIZE STRUCT_OFFSET(memarea_chunk_t, u)
56 #define CHUNK_SIZE 4096
58 /** A memarea_t is an allocation region for a set of small memory requests
59 * that will all be freed at once. */
60 struct memarea_t {
61 memarea_chunk_t *first; /**< Top of the chunk stack: never NULL. */
64 /** How many chunks will we put into the freelist before freeing them? */
65 #define MAX_FREELIST_LEN 4
66 /** The number of memarea chunks currently in our freelist. */
67 static int freelist_len=0;
68 /** A linked list of unused memory area chunks. Used to prevent us from
69 * spinning in malloc/free loops. */
70 static memarea_chunk_t *freelist = NULL;
72 /** Helper: allocate a new memarea chunk of around <b>chunk_size</b> bytes. */
73 static memarea_chunk_t *
74 alloc_chunk(size_t sz, int freelist_ok)
76 tor_assert(sz < SIZE_T_CEILING);
77 if (freelist && freelist_ok) {
78 memarea_chunk_t *res = freelist;
79 freelist = res->next_chunk;
80 res->next_chunk = NULL;
81 --freelist_len;
82 return res;
83 } else {
84 size_t chunk_size = freelist_ok ? CHUNK_SIZE : sz;
85 memarea_chunk_t *res = tor_malloc_roundup(&chunk_size);
86 res->next_chunk = NULL;
87 res->mem_size = chunk_size - CHUNK_HEADER_SIZE;
88 res->next_mem = res->u.mem;
89 tor_assert(res->next_mem+res->mem_size == ((char*)res)+chunk_size);
90 tor_assert(realign_pointer(res->next_mem) == res->next_mem);
91 return res;
95 /** Release <b>chunk</b> from a memarea, either by adding it to the freelist
96 * or by freeing it if the freelist is already too big. */
97 static void
98 chunk_free(memarea_chunk_t *chunk)
100 if (freelist_len < MAX_FREELIST_LEN) {
101 ++freelist_len;
102 chunk->next_chunk = freelist;
103 freelist = chunk;
104 chunk->next_mem = chunk->u.mem;
105 } else {
106 tor_free(chunk);
110 /** Allocate and return new memarea. */
111 memarea_t *
112 memarea_new(void)
114 memarea_t *head = tor_malloc(sizeof(memarea_t));
115 head->first = alloc_chunk(CHUNK_SIZE, 1);
116 return head;
119 /** Free <b>area</b>, invalidating all pointers returned from memarea_alloc()
120 * and friends for this area */
121 void
122 memarea_drop_all(memarea_t *area)
124 memarea_chunk_t *chunk, *next;
125 for (chunk = area->first; chunk; chunk = next) {
126 next = chunk->next_chunk;
127 chunk_free(chunk);
129 area->first = NULL; /*fail fast on */
130 tor_free(area);
133 /** Forget about having allocated anything in <b>area</b>, and free some of
134 * the backing storage associated with it, as appropriate. Invalidates all
135 * pointers returned from memarea_alloc() for this area. */
136 void
137 memarea_clear(memarea_t *area)
139 memarea_chunk_t *chunk, *next;
140 if (area->first->next_chunk) {
141 for (chunk = area->first->next_chunk; chunk; chunk = next) {
142 next = chunk->next_chunk;
143 chunk_free(chunk);
145 area->first->next_chunk = NULL;
147 area->first->next_mem = area->first->u.mem;
150 /** Remove all unused memarea chunks from the internal freelist. */
151 void
152 memarea_clear_freelist(void)
154 memarea_chunk_t *chunk, *next;
155 freelist_len = 0;
156 for (chunk = freelist; chunk; chunk = next) {
157 next = chunk->next_chunk;
158 tor_free(chunk);
160 freelist = NULL;
163 /** Return true iff <b>p</b> is in a range that has been returned by an
164 * allocation from <b>area</b>. */
166 memarea_owns_ptr(const memarea_t *area, const void *p)
168 memarea_chunk_t *chunk;
169 const char *ptr = p;
170 for (chunk = area->first; chunk; chunk = chunk->next_chunk) {
171 if (ptr >= chunk->u.mem && ptr < chunk->next_mem)
172 return 1;
174 return 0;
177 /** Return a pointer to a chunk of memory in <b>area</b> of at least <b>sz</b>
178 * bytes. <b>sz</b> should be significantly smaller than the area's chunk
179 * size, though we can deal if it isn't. */
180 void *
181 memarea_alloc(memarea_t *area, size_t sz)
183 memarea_chunk_t *chunk = area->first;
184 char *result;
185 tor_assert(chunk);
186 tor_assert(sz < SIZE_T_CEILING);
187 if (sz == 0)
188 sz = 1;
189 if (chunk->next_mem+sz > chunk->u.mem+chunk->mem_size) {
190 if (sz+CHUNK_HEADER_SIZE >= CHUNK_SIZE) {
191 /* This allocation is too big. Stick it in a special chunk, and put
192 * that chunk second in the list. */
193 memarea_chunk_t *new_chunk = alloc_chunk(sz+CHUNK_HEADER_SIZE, 0);
194 new_chunk->next_chunk = chunk->next_chunk;
195 chunk->next_chunk = new_chunk;
196 chunk = new_chunk;
197 } else {
198 memarea_chunk_t *new_chunk = alloc_chunk(CHUNK_SIZE, 1);
199 new_chunk->next_chunk = chunk;
200 area->first = chunk = new_chunk;
202 tor_assert(chunk->mem_size >= sz);
204 result = chunk->next_mem;
205 chunk->next_mem = chunk->next_mem + sz;
206 // XXXX021 remove these once bug 930 is solved.
207 tor_assert(chunk->next_mem >= chunk->u.mem);
208 tor_assert(chunk->next_mem <= chunk->u.mem+chunk->mem_size);
209 chunk->next_mem = realign_pointer(chunk->next_mem);
210 return result;
213 /** As memarea_alloc(), but clears the memory it returns. */
214 void *
215 memarea_alloc_zero(memarea_t *area, size_t sz)
217 void *result = memarea_alloc(area, sz);
218 memset(result, 0, sz);
219 return result;
222 /** As memdup, but returns the memory from <b>area</b>. */
223 void *
224 memarea_memdup(memarea_t *area, const void *s, size_t n)
226 char *result = memarea_alloc(area, n);
227 memcpy(result, s, n);
228 return result;
231 /** As strdup, but returns the memory from <b>area</b>. */
232 char *
233 memarea_strdup(memarea_t *area, const char *s)
235 return memarea_memdup(area, s, strlen(s)+1);
238 /** As strndup, but returns the memory from <b>area</b>. */
239 char *
240 memarea_strndup(memarea_t *area, const char *s, size_t n)
242 size_t ln;
243 char *result;
244 const char *cp, *end = s+n;
245 tor_assert(n < SIZE_T_CEILING);
246 for (cp = s; cp < end && *cp; ++cp)
248 /* cp now points to s+n, or to the 0 in the string. */
249 ln = cp-s;
250 result = memarea_alloc(area, ln+1);
251 memcpy(result, s, ln);
252 result[ln]='\0';
253 return result;
256 /** Set <b>allocated_out</b> to the number of bytes allocated in <b>area</b>,
257 * and <b>used_out</b> to the number of bytes currently used. */
258 void
259 memarea_get_stats(memarea_t *area, size_t *allocated_out, size_t *used_out)
261 size_t a = 0, u = 0;
262 memarea_chunk_t *chunk;
263 for (chunk = area->first; chunk; chunk = chunk->next_chunk) {
264 a += CHUNK_HEADER_SIZE + chunk->mem_size;
265 tor_assert(chunk->next_mem >= chunk->u.mem);
266 u += CHUNK_HEADER_SIZE + (chunk->next_mem - chunk->u.mem);
268 *allocated_out = a;
269 *used_out = u;
272 /** Assert that <b>area</b> is okay. */
273 void
274 memarea_assert_ok(memarea_t *area)
276 memarea_chunk_t *chunk;
277 tor_assert(area->first);
279 for (chunk = area->first; chunk; chunk = chunk->next_chunk) {
280 tor_assert(chunk->next_mem >= chunk->u.mem);
281 tor_assert(chunk->next_mem <=
282 (char*) realign_pointer(chunk->u.mem+chunk->mem_size));