rbtree: add rb_search_exact()
[nasm.git] / nasmlib / raa.c
blob038f97acbed0583babfffd18738bd3c3c1539b25
1 /* ----------------------------------------------------------------------- *
3 * Copyright 1996-2018 The NASM Authors - All Rights Reserved
4 * See the file AUTHORS included with the NASM distribution for
5 * the specific copyright holders.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following
9 * conditions are met:
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
19 * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
20 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
29 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
30 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * ----------------------------------------------------------------------- */
34 #include "nasmlib.h"
35 #include "raa.h"
36 #include "ilog2.h"
39 * Routines to manage a dynamic random access array of int64_ts which
40 * may grow in size to be more than the largest single malloc'able
41 * chunk.
44 #define RAA_LAYERSHIFT 11 /* 2^this many items per layer */
45 #define RAA_LAYERSIZE ((size_t)1 << RAA_LAYERSHIFT)
46 #define RAA_LAYERMASK (RAA_LAYERSIZE-1)
48 typedef struct RAA RAA;
49 typedef union RAA_UNION RAA_UNION;
50 typedef struct RAA_LEAF RAA_LEAF;
51 typedef struct RAA_BRANCH RAA_BRANCH;
53 struct RAA {
54 /* Last position in this RAA */
55 raaindex endposn;
58 * Number of layers below this one to get to the real data. 0
59 * means this structure is a leaf, holding RAA_LAYERSIZE real
60 * data items; 1 and above mean it's a branch, holding
61 * RAA_LAYERSIZE pointers to the next level branch or leaf
62 * structures.
64 unsigned int layers;
67 * Number of real data items spanned by one position in the
68 * `data' array at this level. This number is 0 trivially, for
69 * a leaf (level 0): for a level n branch it should be
70 * n*RAA_LAYERSHIFT.
72 unsigned int shift;
75 * The actual data
77 union RAA_UNION {
78 struct RAA_LEAF {
79 union intorptr data[RAA_LAYERSIZE];
80 } l;
81 struct RAA_BRANCH {
82 struct RAA *data[RAA_LAYERSIZE];
83 } b;
84 } u;
87 #define LEAFSIZ (sizeof(RAA)-sizeof(RAA_UNION)+sizeof(RAA_LEAF))
88 #define BRANCHSIZ (sizeof(RAA)-sizeof(RAA_UNION)+sizeof(RAA_BRANCH))
90 static struct RAA *raa_init_layer(raaindex posn, unsigned int layers)
92 struct RAA *r;
93 raaindex posmask;
95 r = nasm_zalloc((layers == 0) ? LEAFSIZ : BRANCHSIZ);
96 r->shift = layers * RAA_LAYERSHIFT;
97 r->layers = layers;
98 posmask = ((raaindex)RAA_LAYERSIZE << r->shift) - 1;
99 r->endposn = posn | posmask;
100 return r;
103 void raa_free(struct RAA *r)
105 if (!r)
106 return;
108 if (r->layers) {
109 struct RAA **p = r->u.b.data;
110 size_t i;
111 for (i = 0; i < RAA_LAYERSIZE; i++)
112 raa_free(*p++);
114 nasm_free(r);
117 static const union intorptr *real_raa_read(struct RAA *r, raaindex posn)
119 nasm_assert(posn <= (~(raaindex)0 >> 1));
121 if (unlikely(!r || posn > r->endposn))
122 return NULL; /* Beyond the end */
124 while (r->layers) {
125 size_t l = (posn >> r->shift) & RAA_LAYERMASK;
126 r = r->u.b.data[l];
127 if (!r)
128 return NULL; /* Not present */
130 return &r->u.l.data[posn & RAA_LAYERMASK];
133 int64_t raa_read(struct RAA *r, raaindex pos)
135 const union intorptr *ip;
137 ip = real_raa_read(r, pos);
138 return ip ? ip->i : 0;
141 void *raa_read_ptr(struct RAA *r, raaindex pos)
143 const union intorptr *ip;
145 ip = real_raa_read(r, pos);
146 return ip ? ip->p : NULL;
150 static struct RAA *
151 real_raa_write(struct RAA *r, raaindex posn, union intorptr value)
153 struct RAA *result;
155 nasm_assert(posn <= (~(raaindex)0 >> 1));
157 if (unlikely(!r)) {
158 /* Create a new top-level RAA */
159 r = raa_init_layer(posn, ilog2_64(posn)/RAA_LAYERSHIFT);
160 } else {
161 while (unlikely(r->endposn < posn)) {
162 /* We need to add layers to an existing RAA */
163 struct RAA *s = raa_init_layer(r->endposn, r->layers + 1);
164 s->u.b.data[0] = r;
165 r = s;
169 result = r;
171 while (r->layers) {
172 struct RAA **s;
173 size_t l = (posn >> r->shift) & RAA_LAYERMASK;
174 s = &r->u.b.data[l];
175 if (unlikely(!*s))
176 *s = raa_init_layer(posn, r->layers - 1);
177 r = *s;
179 r->u.l.data[posn & RAA_LAYERMASK] = value;
181 return result;
184 struct RAA *raa_write(struct RAA *r, raaindex posn, int64_t value)
186 union intorptr ip;
188 ip.i = value;
189 return real_raa_write(r, posn, ip);
192 struct RAA *raa_write_ptr(struct RAA *r, raaindex posn, void *value)
194 union intorptr ip;
196 ip.p = value;
197 return real_raa_write(r, posn, ip);