output: bin -- Use nasm_error helpers
[nasm.git] / nasmlib / raa.c
blobfeb86970350269126223ebbde53eb200fb4be6a9
1 /* ----------------------------------------------------------------------- *
3 * Copyright 1996-2018 The NASM Authors - All Rights Reserved
4 * See the file AUTHORS included with the NASM distribution for
5 * the specific copyright holders.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following
9 * conditions are met:
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
19 * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
20 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
29 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
30 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * ----------------------------------------------------------------------- */
34 #include "nasmlib.h"
35 #include "raa.h"
36 #include "ilog2.h"
39 * Routines to manage a dynamic random access array of int64_ts which
40 * may grow in size to be more than the largest single malloc'able
41 * chunk.
44 #define RAA_LAYERSHIFT 11 /* 2^this many items per layer */
45 #define RAA_LAYERSIZE ((size_t)1 << RAA_LAYERSHIFT)
46 #define RAA_LAYERMASK (RAA_LAYERSIZE-1)
48 typedef struct RAA RAA;
49 typedef union RAA_UNION RAA_UNION;
50 typedef struct RAA_LEAF RAA_LEAF;
51 typedef struct RAA_BRANCH RAA_BRANCH;
53 union intorptr {
54 int64_t i;
55 void *p;
58 struct RAA {
59 /* Last position in this RAA */
60 raaindex endposn;
63 * Number of layers below this one to get to the real data. 0
64 * means this structure is a leaf, holding RAA_LAYERSIZE real
65 * data items; 1 and above mean it's a branch, holding
66 * RAA_LAYERSIZE pointers to the next level branch or leaf
67 * structures.
69 unsigned int layers;
72 * Number of real data items spanned by one position in the
73 * `data' array at this level. This number is 0 trivially, for
74 * a leaf (level 0): for a level n branch it should be
75 * n*RAA_LAYERSHIFT.
77 unsigned int shift;
80 * The actual data
82 union RAA_UNION {
83 struct RAA_LEAF {
84 union intorptr data[RAA_LAYERSIZE];
85 } l;
86 struct RAA_BRANCH {
87 struct RAA *data[RAA_LAYERSIZE];
88 } b;
89 } u;
92 #define LEAFSIZ (sizeof(RAA)-sizeof(RAA_UNION)+sizeof(RAA_LEAF))
93 #define BRANCHSIZ (sizeof(RAA)-sizeof(RAA_UNION)+sizeof(RAA_BRANCH))
95 static struct RAA *raa_init_layer(raaindex posn, unsigned int layers)
97 struct RAA *r;
98 raaindex posmask;
100 r = nasm_zalloc((layers == 0) ? LEAFSIZ : BRANCHSIZ);
101 r->shift = layers * RAA_LAYERSHIFT;
102 r->layers = layers;
103 posmask = ((raaindex)RAA_LAYERSIZE << r->shift) - 1;
104 r->endposn = posn | posmask;
105 return r;
108 void raa_free(struct RAA *r)
110 if (!r)
111 return;
113 if (r->layers) {
114 struct RAA **p = r->u.b.data;
115 size_t i;
116 for (i = 0; i < RAA_LAYERSIZE; i++)
117 raa_free(*p++);
119 nasm_free(r);
122 static const union intorptr *real_raa_read(struct RAA *r, raaindex posn)
124 nasm_assert(posn <= (~(raaindex)0 >> 1));
126 if (unlikely(!r || posn > r->endposn))
127 return NULL; /* Beyond the end */
129 while (r->layers) {
130 size_t l = (posn >> r->shift) & RAA_LAYERMASK;
131 r = r->u.b.data[l];
132 if (!r)
133 return NULL; /* Not present */
135 return &r->u.l.data[posn & RAA_LAYERMASK];
138 int64_t raa_read(struct RAA *r, raaindex pos)
140 const union intorptr *ip;
142 ip = real_raa_read(r, pos);
143 return ip ? ip->i : 0;
146 void *raa_read_ptr(struct RAA *r, raaindex pos)
148 const union intorptr *ip;
150 ip = real_raa_read(r, pos);
151 return ip ? ip->p : NULL;
155 static struct RAA *
156 real_raa_write(struct RAA *r, raaindex posn, union intorptr value)
158 struct RAA *result;
160 nasm_assert(posn <= (~(raaindex)0 >> 1));
162 if (unlikely(!r)) {
163 /* Create a new top-level RAA */
164 r = raa_init_layer(posn, ilog2_64(posn)/RAA_LAYERSHIFT);
165 } else {
166 while (unlikely(r->endposn < posn)) {
167 /* We need to add layers to an existing RAA */
168 struct RAA *s = raa_init_layer(r->endposn, r->layers + 1);
169 s->u.b.data[0] = r;
170 r = s;
174 result = r;
176 while (r->layers) {
177 struct RAA **s;
178 size_t l = (posn >> r->shift) & RAA_LAYERMASK;
179 s = &r->u.b.data[l];
180 if (unlikely(!*s))
181 *s = raa_init_layer(posn, r->layers - 1);
182 r = *s;
184 r->u.l.data[posn & RAA_LAYERMASK] = value;
186 return result;
189 struct RAA *raa_write(struct RAA *r, raaindex posn, int64_t value)
191 union intorptr ip;
193 ip.i = value;
194 return real_raa_write(r, posn, ip);
197 struct RAA *raa_write_ptr(struct RAA *r, raaindex posn, void *value)
199 union intorptr ip;
201 ip.p = value;
202 return real_raa_write(r, posn, ip);