1 /* This file is included by symbol.c */
6 #define ID_TABLE_DEBUG 0
9 #if ID_TABLE_DEBUG == 0
13 #include "ruby_assert.h"
15 typedef rb_id_serial_t id_key_t
;
20 return rb_id_serial_to_id(key
);
23 static inline id_key_t
26 return rb_id_to_serial(id
);
29 /* simple open addressing with quadratic probing.
30 uses mark-bit on collisions - need extra 1 bit,
31 ID is strictly 3 bits larger than rb_id_serial_t */
33 typedef struct rb_id_item
{
49 #define ITEM_GET_KEY(tbl, i) ((tbl)->items[i].key)
50 #define ITEM_KEY_ISSET(tbl, i) ((tbl)->items[i].key)
51 #define ITEM_COLLIDED(tbl, i) ((tbl)->items[i].collision)
52 #define ITEM_SET_COLLIDED(tbl, i) ((tbl)->items[i].collision = 1)
54 ITEM_SET_KEY(struct rb_id_table
*tbl
, int i
, id_key_t key
)
56 tbl
->items
[i
].key
= key
;
59 #define ITEM_GET_KEY(tbl, i) ((tbl)->items[i].key >> 1)
60 #define ITEM_KEY_ISSET(tbl, i) ((tbl)->items[i].key > 1)
61 #define ITEM_COLLIDED(tbl, i) ((tbl)->items[i].key & 1)
62 #define ITEM_SET_COLLIDED(tbl, i) ((tbl)->items[i].key |= 1)
64 ITEM_SET_KEY(struct rb_id_table
*tbl
, int i
, id_key_t key
)
66 tbl
->items
[i
].key
= (key
<< 1) | ITEM_COLLIDED(tbl
, i
);
80 return (capa
+ 1) << 2;
83 static struct rb_id_table
*
84 rb_id_table_init(struct rb_id_table
*tbl
, int capa
)
86 MEMZERO(tbl
, struct rb_id_table
, 1);
88 capa
= round_capa(capa
);
89 tbl
->capa
= (int)capa
;
90 tbl
->items
= ZALLOC_N(item_t
, capa
);
95 MJIT_FUNC_EXPORTED
struct rb_id_table
*
96 rb_id_table_create(size_t capa
)
98 struct rb_id_table
*tbl
= ALLOC(struct rb_id_table
);
99 return rb_id_table_init(tbl
, (int)capa
);
103 rb_id_table_free(struct rb_id_table
*tbl
)
110 rb_id_table_clear(struct rb_id_table
*tbl
)
114 MEMZERO(tbl
->items
, item_t
, tbl
->capa
);
118 rb_id_table_size(const struct rb_id_table
*tbl
)
120 return (size_t)tbl
->num
;
124 rb_id_table_memsize(const struct rb_id_table
*tbl
)
126 return sizeof(item_t
) * tbl
->capa
+ sizeof(struct rb_id_table
);
130 hash_table_index(struct rb_id_table
* tbl
, id_key_t key
)
133 int mask
= tbl
->capa
- 1;
136 while (key
!= ITEM_GET_KEY(tbl
, ix
)) {
137 if (!ITEM_COLLIDED(tbl
, ix
))
139 ix
= (ix
+ d
) & mask
;
148 hash_table_raw_insert(struct rb_id_table
*tbl
, id_key_t key
, VALUE val
)
150 int mask
= tbl
->capa
- 1;
154 while (ITEM_KEY_ISSET(tbl
, ix
)) {
155 ITEM_SET_COLLIDED(tbl
, ix
);
156 ix
= (ix
+ d
) & mask
;
160 if (!ITEM_COLLIDED(tbl
, ix
)) {
163 ITEM_SET_KEY(tbl
, ix
, key
);
164 tbl
->items
[ix
].val
= val
;
168 hash_delete_index(struct rb_id_table
*tbl
, int ix
)
171 if (!ITEM_COLLIDED(tbl
, ix
)) {
175 ITEM_SET_KEY(tbl
, ix
, 0);
176 tbl
->items
[ix
].val
= 0;
185 hash_table_extend(struct rb_id_table
* tbl
)
187 if (tbl
->used
+ (tbl
->used
>> 1) >= tbl
->capa
) {
188 int new_cap
= round_capa(tbl
->num
+ (tbl
->num
>> 1));
191 struct rb_id_table tmp_tbl
= {0, 0, 0};
192 if (new_cap
< tbl
->capa
) {
193 new_cap
= round_capa(tbl
->used
+ (tbl
->used
>> 1));
195 tmp_tbl
.capa
= new_cap
;
196 tmp_tbl
.items
= ZALLOC_N(item_t
, new_cap
);
197 for (i
= 0; i
< tbl
->capa
; i
++) {
198 id_key_t key
= ITEM_GET_KEY(tbl
, i
);
200 hash_table_raw_insert(&tmp_tbl
, key
, tbl
->items
[i
].val
);
209 #if ID_TABLE_DEBUG && 0
211 hash_table_show(struct rb_id_table
*tbl
)
213 const id_key_t
*keys
= tbl
->keys
;
214 const int capa
= tbl
->capa
;
217 fprintf(stderr
, "tbl: %p (capa: %d, num: %d, used: %d)\n", tbl
, tbl
->capa
, tbl
->num
, tbl
->used
);
218 for (i
=0; i
<capa
; i
++) {
219 if (ITEM_KEY_ISSET(tbl
, i
)) {
220 fprintf(stderr
, " -> [%d] %s %d\n", i
, rb_id2name(key2id(keys
[i
])), (int)keys
[i
]);
226 MJIT_FUNC_EXPORTED
int
227 rb_id_table_lookup(struct rb_id_table
*tbl
, ID id
, VALUE
*valp
)
229 id_key_t key
= id2key(id
);
230 int index
= hash_table_index(tbl
, key
);
233 *valp
= tbl
->items
[index
].val
;
242 rb_id_table_insert_key(struct rb_id_table
*tbl
, const id_key_t key
, const VALUE val
)
244 const int index
= hash_table_index(tbl
, key
);
247 tbl
->items
[index
].val
= val
;
250 hash_table_extend(tbl
);
251 hash_table_raw_insert(tbl
, key
, val
);
256 MJIT_FUNC_EXPORTED
int
257 rb_id_table_insert(struct rb_id_table
*tbl
, ID id
, VALUE val
)
259 return rb_id_table_insert_key(tbl
, id2key(id
), val
);
263 rb_id_table_delete(struct rb_id_table
*tbl
, ID id
)
265 const id_key_t key
= id2key(id
);
266 int index
= hash_table_index(tbl
, key
);
267 return hash_delete_index(tbl
, index
);
271 rb_id_table_foreach_with_replace(struct rb_id_table
*tbl
, rb_id_table_foreach_func_t
*func
, rb_id_table_update_callback_func_t
*replace
, void *data
)
273 int i
, capa
= tbl
->capa
;
275 for (i
=0; i
<capa
; i
++) {
276 if (ITEM_KEY_ISSET(tbl
, i
)) {
277 enum rb_id_table_iterator_result ret
= (*func
)((ID
)0, tbl
->items
[i
].val
, data
);
278 assert(ITEM_GET_KEY(tbl
, i
));
280 if (ret
== ID_TABLE_REPLACE
) {
281 VALUE val
= tbl
->items
[i
].val
;
282 ret
= (*replace
)(NULL
, &val
, data
, TRUE
);
283 tbl
->items
[i
].val
= val
;
285 else if (ret
== ID_TABLE_STOP
)
292 rb_id_table_foreach(struct rb_id_table
*tbl
, rb_id_table_foreach_func_t
*func
, void *data
)
294 int i
, capa
= tbl
->capa
;
296 for (i
=0; i
<capa
; i
++) {
297 if (ITEM_KEY_ISSET(tbl
, i
)) {
298 const id_key_t key
= ITEM_GET_KEY(tbl
, i
);
299 enum rb_id_table_iterator_result ret
= (*func
)(key2id(key
), tbl
->items
[i
].val
, data
);
302 if (ret
== ID_TABLE_DELETE
)
303 hash_delete_index(tbl
, i
);
304 else if (ret
== ID_TABLE_STOP
)
311 rb_id_table_foreach_values(struct rb_id_table
*tbl
, rb_id_table_foreach_values_func_t
*func
, void *data
)
313 int i
, capa
= tbl
->capa
;
315 for (i
=0; i
<capa
; i
++) {
316 if (ITEM_KEY_ISSET(tbl
, i
)) {
317 enum rb_id_table_iterator_result ret
= (*func
)(tbl
->items
[i
].val
, data
);
319 if (ret
== ID_TABLE_DELETE
)
320 hash_delete_index(tbl
, i
);
321 else if (ret
== ID_TABLE_STOP
)