blob: 038f97acbed0583babfffd18738bd3c3c1539b25 [file] [log] [blame]
H. Peter Anvin9e6747c2009-06-28 17:13:04 -07001/* ----------------------------------------------------------------------- *
H. Peter Anvine39202c2018-06-14 16:05:34 -07002 *
3 * Copyright 1996-2018 The NASM Authors - All Rights Reserved
H. Peter Anvin9e6747c2009-06-28 17:13:04 -07004 * See the file AUTHORS included with the NASM distribution for
5 * the specific copyright holders.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following
9 * conditions are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
H. Peter Anvine39202c2018-06-14 16:05:34 -070017 *
H. Peter Anvin9e6747c2009-06-28 17:13:04 -070018 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
19 * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
20 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
29 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
30 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 * ----------------------------------------------------------------------- */
33
H. Peter Anvinfcb89092008-06-09 17:40:16 -070034#include "nasmlib.h"
35#include "raa.h"
H. Peter Anvin903ea542018-06-14 19:21:12 -070036#include "ilog2.h"
H. Peter Anvinfcb89092008-06-09 17:40:16 -070037
H. Peter Anvin82a30822016-02-16 17:46:18 -080038/*
39 * Routines to manage a dynamic random access array of int64_ts which
40 * may grow in size to be more than the largest single malloc'able
41 * chunk.
42 */
43
H. Peter Anvin903ea542018-06-14 19:21:12 -070044#define RAA_LAYERSHIFT 11 /* 2^this many items per layer */
45#define RAA_LAYERSIZE ((size_t)1 << RAA_LAYERSHIFT)
46#define RAA_LAYERMASK (RAA_LAYERSIZE-1)
H. Peter Anvin82a30822016-02-16 17:46:18 -080047
48typedef struct RAA RAA;
49typedef union RAA_UNION RAA_UNION;
50typedef struct RAA_LEAF RAA_LEAF;
51typedef struct RAA_BRANCH RAA_BRANCH;
52
53struct RAA {
H. Peter Anvin903ea542018-06-14 19:21:12 -070054 /* Last position in this RAA */
55 raaindex endposn;
56
H. Peter Anvin82a30822016-02-16 17:46:18 -080057 /*
58 * Number of layers below this one to get to the real data. 0
H. Peter Anvin903ea542018-06-14 19:21:12 -070059 * means this structure is a leaf, holding RAA_LAYERSIZE real
H. Peter Anvin82a30822016-02-16 17:46:18 -080060 * data items; 1 and above mean it's a branch, holding
61 * RAA_LAYERSIZE pointers to the next level branch or leaf
62 * structures.
63 */
H. Peter Anvin903ea542018-06-14 19:21:12 -070064 unsigned int layers;
H. Peter Anvin82a30822016-02-16 17:46:18 -080065
66 /*
67 * Number of real data items spanned by one position in the
68 * `data' array at this level. This number is 0 trivially, for
H. Peter Anvin903ea542018-06-14 19:21:12 -070069 * a leaf (level 0): for a level n branch it should be
70 * n*RAA_LAYERSHIFT.
H. Peter Anvin82a30822016-02-16 17:46:18 -080071 */
H. Peter Anvin903ea542018-06-14 19:21:12 -070072 unsigned int shift;
H. Peter Anvin82a30822016-02-16 17:46:18 -080073
H. Peter Anvin903ea542018-06-14 19:21:12 -070074 /*
75 * The actual data
76 */
H. Peter Anvin82a30822016-02-16 17:46:18 -080077 union RAA_UNION {
78 struct RAA_LEAF {
H. Peter Anvin903ea542018-06-14 19:21:12 -070079 union intorptr data[RAA_LAYERSIZE];
H. Peter Anvin82a30822016-02-16 17:46:18 -080080 } l;
81 struct RAA_BRANCH {
82 struct RAA *data[RAA_LAYERSIZE];
83 } b;
84 } u;
85};
86
H. Peter Anvinfcb89092008-06-09 17:40:16 -070087#define LEAFSIZ (sizeof(RAA)-sizeof(RAA_UNION)+sizeof(RAA_LEAF))
88#define BRANCHSIZ (sizeof(RAA)-sizeof(RAA_UNION)+sizeof(RAA_BRANCH))
89
H. Peter Anvin903ea542018-06-14 19:21:12 -070090static struct RAA *raa_init_layer(raaindex posn, unsigned int layers)
H. Peter Anvinfcb89092008-06-09 17:40:16 -070091{
92 struct RAA *r;
H. Peter Anvin903ea542018-06-14 19:21:12 -070093 raaindex posmask;
H. Peter Anvinfcb89092008-06-09 17:40:16 -070094
H. Peter Anvin903ea542018-06-14 19:21:12 -070095 r = nasm_zalloc((layers == 0) ? LEAFSIZ : BRANCHSIZ);
96 r->shift = layers * RAA_LAYERSHIFT;
97 r->layers = layers;
98 posmask = ((raaindex)RAA_LAYERSIZE << r->shift) - 1;
99 r->endposn = posn | posmask;
H. Peter Anvinfcb89092008-06-09 17:40:16 -0700100 return r;
101}
102
H. Peter Anvinfcb89092008-06-09 17:40:16 -0700103void raa_free(struct RAA *r)
104{
H. Peter Anvin903ea542018-06-14 19:21:12 -0700105 if (!r)
106 return;
107
H. Peter Anvinfcb89092008-06-09 17:40:16 -0700108 if (r->layers) {
H. Peter Anvin903ea542018-06-14 19:21:12 -0700109 struct RAA **p = r->u.b.data;
110 size_t i;
111 for (i = 0; i < RAA_LAYERSIZE; i++)
112 raa_free(*p++);
H. Peter Anvinfcb89092008-06-09 17:40:16 -0700113 }
114 nasm_free(r);
115}
116
H. Peter Anvin903ea542018-06-14 19:21:12 -0700117static const union intorptr *real_raa_read(struct RAA *r, raaindex posn)
H. Peter Anvinfcb89092008-06-09 17:40:16 -0700118{
H. Peter Anvin903ea542018-06-14 19:21:12 -0700119 nasm_assert(posn <= (~(raaindex)0 >> 1));
120
121 if (unlikely(!r || posn > r->endposn))
H. Peter Anvine39202c2018-06-14 16:05:34 -0700122 return NULL; /* Beyond the end */
H. Peter Anvin903ea542018-06-14 19:21:12 -0700123
124 while (r->layers) {
125 size_t l = (posn >> r->shift) & RAA_LAYERMASK;
H. Peter Anvinfcb89092008-06-09 17:40:16 -0700126 r = r->u.b.data[l];
127 if (!r)
H. Peter Anvine39202c2018-06-14 16:05:34 -0700128 return NULL; /* Not present */
H. Peter Anvinfcb89092008-06-09 17:40:16 -0700129 }
H. Peter Anvin903ea542018-06-14 19:21:12 -0700130 return &r->u.l.data[posn & RAA_LAYERMASK];
H. Peter Anvinfcb89092008-06-09 17:40:16 -0700131}
132
H. Peter Anvin903ea542018-06-14 19:21:12 -0700133int64_t raa_read(struct RAA *r, raaindex pos)
H. Peter Anvine39202c2018-06-14 16:05:34 -0700134{
135 const union intorptr *ip;
136
137 ip = real_raa_read(r, pos);
138 return ip ? ip->i : 0;
139}
140
H. Peter Anvin903ea542018-06-14 19:21:12 -0700141void *raa_read_ptr(struct RAA *r, raaindex pos)
H. Peter Anvine39202c2018-06-14 16:05:34 -0700142{
143 const union intorptr *ip;
144
145 ip = real_raa_read(r, pos);
146 return ip ? ip->p : NULL;
147}
148
149
150static struct RAA *
H. Peter Anvin903ea542018-06-14 19:21:12 -0700151real_raa_write(struct RAA *r, raaindex posn, union intorptr value)
H. Peter Anvinfcb89092008-06-09 17:40:16 -0700152{
153 struct RAA *result;
154
H. Peter Anvin903ea542018-06-14 19:21:12 -0700155 nasm_assert(posn <= (~(raaindex)0 >> 1));
H. Peter Anvinfcb89092008-06-09 17:40:16 -0700156
H. Peter Anvin903ea542018-06-14 19:21:12 -0700157 if (unlikely(!r)) {
158 /* Create a new top-level RAA */
159 r = raa_init_layer(posn, ilog2_64(posn)/RAA_LAYERSHIFT);
160 } else {
161 while (unlikely(r->endposn < posn)) {
162 /* We need to add layers to an existing RAA */
163 struct RAA *s = raa_init_layer(r->endposn, r->layers + 1);
164 s->u.b.data[0] = r;
165 r = s;
166 }
H. Peter Anvinfcb89092008-06-09 17:40:16 -0700167 }
168
169 result = r;
170
H. Peter Anvin903ea542018-06-14 19:21:12 -0700171 while (r->layers) {
H. Peter Anvinfcb89092008-06-09 17:40:16 -0700172 struct RAA **s;
H. Peter Anvin903ea542018-06-14 19:21:12 -0700173 size_t l = (posn >> r->shift) & RAA_LAYERMASK;
H. Peter Anvinfcb89092008-06-09 17:40:16 -0700174 s = &r->u.b.data[l];
H. Peter Anvin903ea542018-06-14 19:21:12 -0700175 if (unlikely(!*s))
176 *s = raa_init_layer(posn, r->layers - 1);
H. Peter Anvinfcb89092008-06-09 17:40:16 -0700177 r = *s;
178 }
H. Peter Anvin903ea542018-06-14 19:21:12 -0700179 r->u.l.data[posn & RAA_LAYERMASK] = value;
H. Peter Anvinfcb89092008-06-09 17:40:16 -0700180
181 return result;
182}
H. Peter Anvine39202c2018-06-14 16:05:34 -0700183
H. Peter Anvin903ea542018-06-14 19:21:12 -0700184struct RAA *raa_write(struct RAA *r, raaindex posn, int64_t value)
H. Peter Anvine39202c2018-06-14 16:05:34 -0700185{
186 union intorptr ip;
187
188 ip.i = value;
189 return real_raa_write(r, posn, ip);
190}
191
H. Peter Anvin903ea542018-06-14 19:21:12 -0700192struct RAA *raa_write_ptr(struct RAA *r, raaindex posn, void *value)
H. Peter Anvine39202c2018-06-14 16:05:34 -0700193{
194 union intorptr ip;
195
196 ip.p = value;
197 return real_raa_write(r, posn, ip);
198}