H. Peter Anvin | 9e6747c | 2009-06-28 17:13:04 -0700 | [diff] [blame] | 1 | /* ----------------------------------------------------------------------- * |
H. Peter Anvin | e39202c | 2018-06-14 16:05:34 -0700 | [diff] [blame] | 2 | * |
| 3 | * Copyright 1996-2018 The NASM Authors - All Rights Reserved |
H. Peter Anvin | 9e6747c | 2009-06-28 17:13:04 -0700 | [diff] [blame] | 4 | * See the file AUTHORS included with the NASM distribution for |
| 5 | * the specific copyright holders. |
| 6 | * |
| 7 | * Redistribution and use in source and binary forms, with or without |
| 8 | * modification, are permitted provided that the following |
| 9 | * conditions are met: |
| 10 | * |
| 11 | * * Redistributions of source code must retain the above copyright |
| 12 | * notice, this list of conditions and the following disclaimer. |
| 13 | * * Redistributions in binary form must reproduce the above |
| 14 | * copyright notice, this list of conditions and the following |
| 15 | * disclaimer in the documentation and/or other materials provided |
| 16 | * with the distribution. |
H. Peter Anvin | e39202c | 2018-06-14 16:05:34 -0700 | [diff] [blame] | 17 | * |
H. Peter Anvin | 9e6747c | 2009-06-28 17:13:04 -0700 | [diff] [blame] | 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND |
| 19 | * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, |
| 20 | * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF |
| 21 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
| 22 | * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR |
| 23 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 24 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
| 25 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
| 26 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 27 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| 28 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR |
| 29 | * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, |
| 30 | * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 31 | * |
| 32 | * ----------------------------------------------------------------------- */ |
| 33 | |
H. Peter Anvin | fcb8909 | 2008-06-09 17:40:16 -0700 | [diff] [blame] | 34 | #include "nasmlib.h" |
| 35 | #include "raa.h" |
H. Peter Anvin | 903ea54 | 2018-06-14 19:21:12 -0700 | [diff] [blame] | 36 | #include "ilog2.h" |
H. Peter Anvin | fcb8909 | 2008-06-09 17:40:16 -0700 | [diff] [blame] | 37 | |
H. Peter Anvin | 82a3082 | 2016-02-16 17:46:18 -0800 | [diff] [blame] | 38 | /* |
| 39 | * Routines to manage a dynamic random access array of int64_ts which |
| 40 | * may grow in size to be more than the largest single malloc'able |
| 41 | * chunk. |
| 42 | */ |
| 43 | |
H. Peter Anvin | 903ea54 | 2018-06-14 19:21:12 -0700 | [diff] [blame] | 44 | #define RAA_LAYERSHIFT 11 /* 2^this many items per layer */ |
| 45 | #define RAA_LAYERSIZE ((size_t)1 << RAA_LAYERSHIFT) |
| 46 | #define RAA_LAYERMASK (RAA_LAYERSIZE-1) |
H. Peter Anvin | 82a3082 | 2016-02-16 17:46:18 -0800 | [diff] [blame] | 47 | |
| 48 | typedef struct RAA RAA; |
| 49 | typedef union RAA_UNION RAA_UNION; |
| 50 | typedef struct RAA_LEAF RAA_LEAF; |
| 51 | typedef struct RAA_BRANCH RAA_BRANCH; |
| 52 | |
| 53 | struct RAA { |
H. Peter Anvin | 903ea54 | 2018-06-14 19:21:12 -0700 | [diff] [blame] | 54 | /* Last position in this RAA */ |
| 55 | raaindex endposn; |
| 56 | |
H. Peter Anvin | 82a3082 | 2016-02-16 17:46:18 -0800 | [diff] [blame] | 57 | /* |
| 58 | * Number of layers below this one to get to the real data. 0 |
H. Peter Anvin | 903ea54 | 2018-06-14 19:21:12 -0700 | [diff] [blame] | 59 | * means this structure is a leaf, holding RAA_LAYERSIZE real |
H. Peter Anvin | 82a3082 | 2016-02-16 17:46:18 -0800 | [diff] [blame] | 60 | * data items; 1 and above mean it's a branch, holding |
| 61 | * RAA_LAYERSIZE pointers to the next level branch or leaf |
| 62 | * structures. |
| 63 | */ |
H. Peter Anvin | 903ea54 | 2018-06-14 19:21:12 -0700 | [diff] [blame] | 64 | unsigned int layers; |
H. Peter Anvin | 82a3082 | 2016-02-16 17:46:18 -0800 | [diff] [blame] | 65 | |
| 66 | /* |
| 67 | * Number of real data items spanned by one position in the |
| 68 | * `data' array at this level. This number is 0 trivially, for |
H. Peter Anvin | 903ea54 | 2018-06-14 19:21:12 -0700 | [diff] [blame] | 69 | * a leaf (level 0): for a level n branch it should be |
| 70 | * n*RAA_LAYERSHIFT. |
H. Peter Anvin | 82a3082 | 2016-02-16 17:46:18 -0800 | [diff] [blame] | 71 | */ |
H. Peter Anvin | 903ea54 | 2018-06-14 19:21:12 -0700 | [diff] [blame] | 72 | unsigned int shift; |
H. Peter Anvin | 82a3082 | 2016-02-16 17:46:18 -0800 | [diff] [blame] | 73 | |
H. Peter Anvin | 903ea54 | 2018-06-14 19:21:12 -0700 | [diff] [blame] | 74 | /* |
| 75 | * The actual data |
| 76 | */ |
H. Peter Anvin | 82a3082 | 2016-02-16 17:46:18 -0800 | [diff] [blame] | 77 | union RAA_UNION { |
| 78 | struct RAA_LEAF { |
H. Peter Anvin | 903ea54 | 2018-06-14 19:21:12 -0700 | [diff] [blame] | 79 | union intorptr data[RAA_LAYERSIZE]; |
H. Peter Anvin | 82a3082 | 2016-02-16 17:46:18 -0800 | [diff] [blame] | 80 | } l; |
| 81 | struct RAA_BRANCH { |
| 82 | struct RAA *data[RAA_LAYERSIZE]; |
| 83 | } b; |
| 84 | } u; |
| 85 | }; |
| 86 | |
H. Peter Anvin | fcb8909 | 2008-06-09 17:40:16 -0700 | [diff] [blame] | 87 | #define LEAFSIZ (sizeof(RAA)-sizeof(RAA_UNION)+sizeof(RAA_LEAF)) |
| 88 | #define BRANCHSIZ (sizeof(RAA)-sizeof(RAA_UNION)+sizeof(RAA_BRANCH)) |
| 89 | |
H. Peter Anvin | 903ea54 | 2018-06-14 19:21:12 -0700 | [diff] [blame] | 90 | static struct RAA *raa_init_layer(raaindex posn, unsigned int layers) |
H. Peter Anvin | fcb8909 | 2008-06-09 17:40:16 -0700 | [diff] [blame] | 91 | { |
| 92 | struct RAA *r; |
H. Peter Anvin | 903ea54 | 2018-06-14 19:21:12 -0700 | [diff] [blame] | 93 | raaindex posmask; |
H. Peter Anvin | fcb8909 | 2008-06-09 17:40:16 -0700 | [diff] [blame] | 94 | |
H. Peter Anvin | 903ea54 | 2018-06-14 19:21:12 -0700 | [diff] [blame] | 95 | r = nasm_zalloc((layers == 0) ? LEAFSIZ : BRANCHSIZ); |
| 96 | r->shift = layers * RAA_LAYERSHIFT; |
| 97 | r->layers = layers; |
| 98 | posmask = ((raaindex)RAA_LAYERSIZE << r->shift) - 1; |
| 99 | r->endposn = posn | posmask; |
H. Peter Anvin | fcb8909 | 2008-06-09 17:40:16 -0700 | [diff] [blame] | 100 | return r; |
| 101 | } |
| 102 | |
H. Peter Anvin | fcb8909 | 2008-06-09 17:40:16 -0700 | [diff] [blame] | 103 | void raa_free(struct RAA *r) |
| 104 | { |
H. Peter Anvin | 903ea54 | 2018-06-14 19:21:12 -0700 | [diff] [blame] | 105 | if (!r) |
| 106 | return; |
| 107 | |
H. Peter Anvin | fcb8909 | 2008-06-09 17:40:16 -0700 | [diff] [blame] | 108 | if (r->layers) { |
H. Peter Anvin | 903ea54 | 2018-06-14 19:21:12 -0700 | [diff] [blame] | 109 | struct RAA **p = r->u.b.data; |
| 110 | size_t i; |
| 111 | for (i = 0; i < RAA_LAYERSIZE; i++) |
| 112 | raa_free(*p++); |
H. Peter Anvin | fcb8909 | 2008-06-09 17:40:16 -0700 | [diff] [blame] | 113 | } |
| 114 | nasm_free(r); |
| 115 | } |
| 116 | |
H. Peter Anvin | 903ea54 | 2018-06-14 19:21:12 -0700 | [diff] [blame] | 117 | static const union intorptr *real_raa_read(struct RAA *r, raaindex posn) |
H. Peter Anvin | fcb8909 | 2008-06-09 17:40:16 -0700 | [diff] [blame] | 118 | { |
H. Peter Anvin | 903ea54 | 2018-06-14 19:21:12 -0700 | [diff] [blame] | 119 | nasm_assert(posn <= (~(raaindex)0 >> 1)); |
| 120 | |
| 121 | if (unlikely(!r || posn > r->endposn)) |
H. Peter Anvin | e39202c | 2018-06-14 16:05:34 -0700 | [diff] [blame] | 122 | return NULL; /* Beyond the end */ |
H. Peter Anvin | 903ea54 | 2018-06-14 19:21:12 -0700 | [diff] [blame] | 123 | |
| 124 | while (r->layers) { |
| 125 | size_t l = (posn >> r->shift) & RAA_LAYERMASK; |
H. Peter Anvin | fcb8909 | 2008-06-09 17:40:16 -0700 | [diff] [blame] | 126 | r = r->u.b.data[l]; |
| 127 | if (!r) |
H. Peter Anvin | e39202c | 2018-06-14 16:05:34 -0700 | [diff] [blame] | 128 | return NULL; /* Not present */ |
H. Peter Anvin | fcb8909 | 2008-06-09 17:40:16 -0700 | [diff] [blame] | 129 | } |
H. Peter Anvin | 903ea54 | 2018-06-14 19:21:12 -0700 | [diff] [blame] | 130 | return &r->u.l.data[posn & RAA_LAYERMASK]; |
H. Peter Anvin | fcb8909 | 2008-06-09 17:40:16 -0700 | [diff] [blame] | 131 | } |
| 132 | |
H. Peter Anvin | 903ea54 | 2018-06-14 19:21:12 -0700 | [diff] [blame] | 133 | int64_t raa_read(struct RAA *r, raaindex pos) |
H. Peter Anvin | e39202c | 2018-06-14 16:05:34 -0700 | [diff] [blame] | 134 | { |
| 135 | const union intorptr *ip; |
| 136 | |
| 137 | ip = real_raa_read(r, pos); |
| 138 | return ip ? ip->i : 0; |
| 139 | } |
| 140 | |
H. Peter Anvin | 903ea54 | 2018-06-14 19:21:12 -0700 | [diff] [blame] | 141 | void *raa_read_ptr(struct RAA *r, raaindex pos) |
H. Peter Anvin | e39202c | 2018-06-14 16:05:34 -0700 | [diff] [blame] | 142 | { |
| 143 | const union intorptr *ip; |
| 144 | |
| 145 | ip = real_raa_read(r, pos); |
| 146 | return ip ? ip->p : NULL; |
| 147 | } |
| 148 | |
| 149 | |
| 150 | static struct RAA * |
H. Peter Anvin | 903ea54 | 2018-06-14 19:21:12 -0700 | [diff] [blame] | 151 | real_raa_write(struct RAA *r, raaindex posn, union intorptr value) |
H. Peter Anvin | fcb8909 | 2008-06-09 17:40:16 -0700 | [diff] [blame] | 152 | { |
| 153 | struct RAA *result; |
| 154 | |
H. Peter Anvin | 903ea54 | 2018-06-14 19:21:12 -0700 | [diff] [blame] | 155 | nasm_assert(posn <= (~(raaindex)0 >> 1)); |
H. Peter Anvin | fcb8909 | 2008-06-09 17:40:16 -0700 | [diff] [blame] | 156 | |
H. Peter Anvin | 903ea54 | 2018-06-14 19:21:12 -0700 | [diff] [blame] | 157 | if (unlikely(!r)) { |
| 158 | /* Create a new top-level RAA */ |
| 159 | r = raa_init_layer(posn, ilog2_64(posn)/RAA_LAYERSHIFT); |
| 160 | } else { |
| 161 | while (unlikely(r->endposn < posn)) { |
| 162 | /* We need to add layers to an existing RAA */ |
| 163 | struct RAA *s = raa_init_layer(r->endposn, r->layers + 1); |
| 164 | s->u.b.data[0] = r; |
| 165 | r = s; |
| 166 | } |
H. Peter Anvin | fcb8909 | 2008-06-09 17:40:16 -0700 | [diff] [blame] | 167 | } |
| 168 | |
| 169 | result = r; |
| 170 | |
H. Peter Anvin | 903ea54 | 2018-06-14 19:21:12 -0700 | [diff] [blame] | 171 | while (r->layers) { |
H. Peter Anvin | fcb8909 | 2008-06-09 17:40:16 -0700 | [diff] [blame] | 172 | struct RAA **s; |
H. Peter Anvin | 903ea54 | 2018-06-14 19:21:12 -0700 | [diff] [blame] | 173 | size_t l = (posn >> r->shift) & RAA_LAYERMASK; |
H. Peter Anvin | fcb8909 | 2008-06-09 17:40:16 -0700 | [diff] [blame] | 174 | s = &r->u.b.data[l]; |
H. Peter Anvin | 903ea54 | 2018-06-14 19:21:12 -0700 | [diff] [blame] | 175 | if (unlikely(!*s)) |
| 176 | *s = raa_init_layer(posn, r->layers - 1); |
H. Peter Anvin | fcb8909 | 2008-06-09 17:40:16 -0700 | [diff] [blame] | 177 | r = *s; |
| 178 | } |
H. Peter Anvin | 903ea54 | 2018-06-14 19:21:12 -0700 | [diff] [blame] | 179 | r->u.l.data[posn & RAA_LAYERMASK] = value; |
H. Peter Anvin | fcb8909 | 2008-06-09 17:40:16 -0700 | [diff] [blame] | 180 | |
| 181 | return result; |
| 182 | } |
H. Peter Anvin | e39202c | 2018-06-14 16:05:34 -0700 | [diff] [blame] | 183 | |
H. Peter Anvin | 903ea54 | 2018-06-14 19:21:12 -0700 | [diff] [blame] | 184 | struct RAA *raa_write(struct RAA *r, raaindex posn, int64_t value) |
H. Peter Anvin | e39202c | 2018-06-14 16:05:34 -0700 | [diff] [blame] | 185 | { |
| 186 | union intorptr ip; |
| 187 | |
| 188 | ip.i = value; |
| 189 | return real_raa_write(r, posn, ip); |
| 190 | } |
| 191 | |
H. Peter Anvin | 903ea54 | 2018-06-14 19:21:12 -0700 | [diff] [blame] | 192 | struct RAA *raa_write_ptr(struct RAA *r, raaindex posn, void *value) |
H. Peter Anvin | e39202c | 2018-06-14 16:05:34 -0700 | [diff] [blame] | 193 | { |
| 194 | union intorptr ip; |
| 195 | |
| 196 | ip.p = value; |
| 197 | return real_raa_write(r, posn, ip); |
| 198 | } |