2 * Various storage structures (pool allocation, vector, hash table)
4 * Copyright (C) 1993, Eric Youngdale.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
25 #include "wine/debug.h"
27 #include "dbghelp_private.h"
32 WINE_DEFAULT_DEBUG_CHANNEL(dbghelp
);
41 void pool_init(struct pool
* a
, size_t arena_size
)
43 list_init( &a
->arena_list
);
44 list_init( &a
->arena_full
);
45 a
->arena_size
= arena_size
;
48 void pool_destroy(struct pool
* pool
)
50 struct pool_arena
* arena
;
51 struct pool_arena
* next
;
54 size_t alloc
, used
, num
;
56 alloc
= used
= num
= 0;
57 LIST_FOR_EACH_ENTRY( arena
, &pool
->arena_list
, struct pool_arena
, entry
)
59 alloc
+= arena
->end
- (char *)arena
;
60 used
+= arena
->current
- (char*)arena
;
63 LIST_FOR_EACH_ENTRY( arena
, &pool
->arena_full
, struct pool_arena
, entry
)
65 alloc
+= arena
->end
- (char *)arena
;
66 used
+= arena
->current
- (char*)arena
;
69 if (alloc
== 0) alloc
= 1; /* avoid division by zero */
70 FIXME("STATS: pool %p has allocated %u kbytes, used %u kbytes in %u arenas, non-allocation ratio: %.2f%%\n",
71 pool
, (unsigned)(alloc
>> 10), (unsigned)(used
>> 10), (unsigned)num
,
72 100.0 - (float)used
/ (float)alloc
* 100.0);
75 LIST_FOR_EACH_ENTRY_SAFE( arena
, next
, &pool
->arena_list
, struct pool_arena
, entry
)
77 list_remove( &arena
->entry
);
78 HeapFree(GetProcessHeap(), 0, arena
);
80 LIST_FOR_EACH_ENTRY_SAFE( arena
, next
, &pool
->arena_full
, struct pool_arena
, entry
)
82 list_remove( &arena
->entry
);
83 HeapFree(GetProcessHeap(), 0, arena
);
87 void* pool_alloc(struct pool
* pool
, size_t len
)
89 struct pool_arena
* arena
;
93 len
= (len
+ 3) & ~3; /* round up size on DWORD boundary */
95 LIST_FOR_EACH_ENTRY( arena
, &pool
->arena_list
, struct pool_arena
, entry
)
97 if (arena
->end
- arena
->current
>= len
)
100 arena
->current
+= len
;
101 if (arena
->current
+ 16 >= arena
->end
)
103 list_remove( &arena
->entry
);
104 list_add_tail( &pool
->arena_full
, &arena
->entry
);
110 size
= max( pool
->arena_size
, len
);
111 arena
= HeapAlloc(GetProcessHeap(), 0, size
+ sizeof(struct pool_arena
));
112 if (!arena
) return NULL
;
115 arena
->current
= (char*)ret
+ len
;
116 arena
->end
= (char*)ret
+ size
;
117 if (arena
->current
+ 16 >= arena
->end
)
118 list_add_tail( &pool
->arena_full
, &arena
->entry
);
120 list_add_head( &pool
->arena_list
, &arena
->entry
);
124 char* pool_strdup(struct pool
* pool
, const char* str
)
127 if ((ret
= pool_alloc(pool
, strlen(str
) + 1))) strcpy(ret
, str
);
131 void vector_init(struct vector
* v
, unsigned esz
, unsigned bucket_sz
)
134 /* align size on DWORD boundaries */
135 v
->elt_size
= (esz
+ 3) & ~3;
138 case 2: v
->shift
= 1; break;
139 case 4: v
->shift
= 2; break;
140 case 8: v
->shift
= 3; break;
141 case 16: v
->shift
= 4; break;
142 case 32: v
->shift
= 5; break;
143 case 64: v
->shift
= 6; break;
144 case 128: v
->shift
= 7; break;
145 case 256: v
->shift
= 8; break;
146 case 512: v
->shift
= 9; break;
147 case 1024: v
->shift
= 10; break;
151 v
->buckets_allocated
= 0;
155 unsigned vector_length(const struct vector
* v
)
160 void* vector_at(const struct vector
* v
, unsigned pos
)
164 if (pos
>= v
->num_elts
) return NULL
;
165 o
= pos
& ((1 << v
->shift
) - 1);
166 return (char*)v
->buckets
[pos
>> v
->shift
] + o
* v
->elt_size
;
169 void* vector_add(struct vector
* v
, struct pool
* pool
)
171 unsigned ncurr
= v
->num_elts
++;
173 /* check that we don't wrap around */
174 assert(v
->num_elts
> ncurr
);
175 if (ncurr
== (v
->num_buckets
<< v
->shift
))
177 if(v
->num_buckets
== v
->buckets_allocated
)
179 /* Double the bucket cache, so it scales well with big vectors.*/
180 unsigned new_reserved
;
183 new_reserved
= 2*v
->buckets_allocated
;
184 if(new_reserved
== 0) new_reserved
= 1;
186 /* Don't even try to resize memory.
187 Pool datastructure is very inefficient with reallocs. */
188 new = pool_alloc(pool
, new_reserved
* sizeof(void*));
189 memcpy(new, v
->buckets
, v
->buckets_allocated
* sizeof(void*));
191 v
->buckets_allocated
= new_reserved
;
193 v
->buckets
[v
->num_buckets
] = pool_alloc(pool
, v
->elt_size
<< v
->shift
);
194 return v
->buckets
[v
->num_buckets
++];
196 return vector_at(v
, ncurr
);
199 /* We construct the sparse array as two vectors (of equal size)
200 * The first vector (key2index) is the lookup table between the key and
201 * an index in the second vector (elements)
202 * When inserting an element, it's always appended in second vector (and
203 * never moved in memory later on), only the first vector is reordered
211 void sparse_array_init(struct sparse_array
* sa
, unsigned elt_sz
, unsigned bucket_sz
)
213 vector_init(&sa
->key2index
, sizeof(struct key2index
), bucket_sz
);
214 vector_init(&sa
->elements
, elt_sz
, bucket_sz
);
217 /******************************************************************
218 * sparse_array_lookup
220 * Returns the first index which key is >= at passed key
222 static struct key2index
* sparse_array_lookup(const struct sparse_array
* sa
,
223 ULONG_PTR key
, unsigned* idx
)
225 struct key2index
* pk2i
;
228 if (!sa
->elements
.num_elts
)
233 high
= sa
->elements
.num_elts
;
234 pk2i
= vector_at(&sa
->key2index
, high
- 1);
240 if (pk2i
->key
== key
)
246 pk2i
= vector_at(&sa
->key2index
, low
);
247 if (pk2i
->key
>= key
)
252 /* now we have: sa(lowest key) < key < sa(highest key) */
255 *idx
= (low
+ high
) / 2;
256 pk2i
= vector_at(&sa
->key2index
, *idx
);
257 if (pk2i
->key
> key
) high
= *idx
;
258 else if (pk2i
->key
< key
) low
= *idx
+ 1;
261 /* binary search could return exact item, we search for highest one
265 pk2i
= vector_at(&sa
->key2index
, ++(*idx
));
269 void* sparse_array_find(const struct sparse_array
* sa
, ULONG_PTR key
)
272 struct key2index
* pk2i
;
274 if ((pk2i
= sparse_array_lookup(sa
, key
, &idx
)) && pk2i
->key
== key
)
275 return vector_at(&sa
->elements
, pk2i
->index
);
279 void* sparse_array_add(struct sparse_array
* sa
, ULONG_PTR key
,
283 struct key2index
* pk2i
;
284 struct key2index
* to
;
286 pk2i
= sparse_array_lookup(sa
, key
, &idx
);
287 if (pk2i
&& pk2i
->key
== key
)
289 FIXME("re-adding an existing key\n");
292 to
= vector_add(&sa
->key2index
, pool
);
295 /* we need to shift vector's content... */
296 /* let's do it brute force... (FIXME) */
297 assert(sa
->key2index
.num_elts
>= 2);
298 for (i
= sa
->key2index
.num_elts
- 1; i
> idx
; i
--)
300 pk2i
= vector_at(&sa
->key2index
, i
- 1);
307 to
->index
= sa
->elements
.num_elts
;
309 return vector_add(&sa
->elements
, pool
);
312 unsigned sparse_array_length(const struct sparse_array
* sa
)
314 return sa
->elements
.num_elts
;
317 static unsigned hash_table_hash(const char* name
, unsigned num_buckets
)
323 hash
+= (hash
<< 10);
327 hash
^= (hash
>> 11);
328 hash
+= (hash
<< 15);
329 return hash
% num_buckets
;
332 void hash_table_init(struct pool
* pool
, struct hash_table
* ht
, unsigned num_buckets
)
335 ht
->num_buckets
= num_buckets
;
340 void hash_table_destroy(struct hash_table
* ht
)
342 #if defined(USE_STATS)
345 unsigned min
= 0xffffffff, max
= 0, sq
= 0;
346 struct hash_table_elt
* elt
;
347 double mean
, variance
;
349 for (i
= 0; i
< ht
->num_buckets
; i
++)
351 for (len
= 0, elt
= ht
->buckets
[i
]; elt
; elt
= elt
->next
) len
++;
352 if (len
< min
) min
= len
;
353 if (len
> max
) max
= len
;
356 mean
= (double)ht
->num_elts
/ ht
->num_buckets
;
357 variance
= (double)sq
/ ht
->num_buckets
- mean
* mean
;
358 FIXME("STATS: elts[num:%-4u size:%u mean:%f] buckets[min:%-4u variance:%+f max:%-4u]\n",
359 ht
->num_elts
, ht
->num_buckets
, mean
, min
, variance
, max
);
361 for (i
= 0; i
< ht
->num_buckets
; i
++)
363 for (len
= 0, elt
= ht
->buckets
[i
]; elt
; elt
= elt
->next
) len
++;
366 FIXME("Longest bucket:\n");
367 for (elt
= ht
->buckets
[i
]; elt
; elt
= elt
->next
)
368 FIXME("\t%s\n", elt
->name
);
376 void hash_table_add(struct hash_table
* ht
, struct hash_table_elt
* elt
)
378 unsigned hash
= hash_table_hash(elt
->name
, ht
->num_buckets
);
382 ht
->buckets
= pool_alloc(ht
->pool
, ht
->num_buckets
* sizeof(struct hash_table_bucket
));
384 memset(ht
->buckets
, 0, ht
->num_buckets
* sizeof(struct hash_table_bucket
));
387 /* in some cases, we need to get back the symbols of same name in the order
388 * in which they've been inserted. So insert new elements at the end of the list.
390 if (!ht
->buckets
[hash
].first
)
392 ht
->buckets
[hash
].first
= elt
;
396 ht
->buckets
[hash
].last
->next
= elt
;
398 ht
->buckets
[hash
].last
= elt
;
403 void hash_table_iter_init(const struct hash_table
* ht
,
404 struct hash_table_iter
* hti
, const char* name
)
409 hti
->last
= hash_table_hash(name
, ht
->num_buckets
);
410 hti
->index
= hti
->last
- 1;
414 hti
->last
= ht
->num_buckets
- 1;
420 void* hash_table_iter_up(struct hash_table_iter
* hti
)
422 if (!hti
->ht
->buckets
) return NULL
;
424 if (hti
->element
) hti
->element
= hti
->element
->next
;
425 while (!hti
->element
&& hti
->index
< hti
->last
)
426 hti
->element
= hti
->ht
->buckets
[++hti
->index
].first
;