Tharun Kumar Merugu | 66f2265 | 2019-04-01 16:56:50 +0530 | [diff] [blame] | 1 | #ifndef PLS_H |
| 2 | #define PLS_H |
| 3 | |
| 4 | /** |
| 5 | * Copyright (c) 2019, The Linux Foundation. All rights reserved. |
| 6 | * |
| 7 | * Redistribution and use in source and binary forms, with or without |
| 8 | * modification, are permitted provided that the following conditions are |
| 9 | * met: |
| 10 | * * Redistributions of source code must retain the above copyright |
| 11 | * notice, this list of conditions and the following disclaimer. |
| 12 | * * Redistributions in binary form must reproduce the above |
| 13 | * copyright notice, this list of conditions and the following |
| 14 | * disclaimer in the documentation and/or other materials provided |
| 15 | * with the distribution. |
| 16 | * * Neither the name of The Linux Foundation nor the names of its |
| 17 | * contributors may be used to endorse or promote products derived |
| 18 | * from this software without specific prior written permission. |
| 19 | * |
| 20 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED |
| 21 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF |
| 22 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT |
| 23 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS |
| 24 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 25 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 26 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR |
| 27 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
| 28 | * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE |
| 29 | * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN |
| 30 | * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 31 | */ |
| 32 | |
| 33 | #include <stdlib.h> |
| 34 | #include "AEEStdDef.h" |
| 35 | #include "AEEatomic.h" |
| 36 | #include "verify.h" |
| 37 | #include "HAP_farf.h" |
| 38 | |
| 39 | struct PLS; |
| 40 | |
| 41 | struct plskey { |
| 42 | uintptr_t type; |
| 43 | uintptr_t key; |
| 44 | }; |
| 45 | |
| 46 | struct PLS { |
| 47 | struct PLS* next; |
| 48 | struct plskey key; |
| 49 | void (*dtor)(void* data); |
| 50 | uint64_t data[1]; |
| 51 | }; |
| 52 | |
| 53 | |
| 54 | struct pls_table { |
| 55 | struct PLS* lst; |
| 56 | uint32_t uRefs; |
| 57 | uint32_t primThread; |
| 58 | }; |
| 59 | |
| 60 | /** |
| 61 | * initialize on every thread and stick the pls_thread_deinit |
| 62 | * function into the threads tls |
| 63 | */ |
| 64 | static __inline int pls_thread_init(struct pls_table* me, uintptr_t tid) { |
| 65 | if(tid == me->primThread) { |
| 66 | return 0; |
| 67 | } |
| 68 | if(0 == atomic_CompareOrAdd(&me->uRefs, 0, 1)) { |
| 69 | return -1; |
| 70 | } |
| 71 | return 0; |
| 72 | } |
| 73 | |
| 74 | /* call this constructor before the first thread creation with the |
| 75 | * first threads id |
| 76 | */ |
| 77 | static __inline void pls_ctor(struct pls_table* me, uintptr_t primThread) { |
| 78 | me->uRefs = 1; |
| 79 | me->primThread = primThread; |
| 80 | } |
| 81 | |
| 82 | static __inline struct pls_table* pls_thread_deinit(struct pls_table* me) { |
| 83 | if(me && 0 != me->uRefs && 0 == atomic_Add(&me->uRefs, -1)) { |
| 84 | struct PLS* lst, *next; |
| 85 | lst = me->lst; |
| 86 | me->lst = 0; |
| 87 | while(lst) { |
| 88 | next = lst->next; |
| 89 | if(lst->dtor) { |
| 90 | FARF(HIGH, "pls dtor %p", lst->dtor); |
| 91 | lst->dtor((void*)lst->data); |
| 92 | } |
| 93 | free(lst); |
| 94 | lst = next; |
| 95 | } |
| 96 | return me; |
| 97 | } |
| 98 | return 0; |
| 99 | } |
| 100 | |
| 101 | /** |
| 102 | * adds a new key to the local storage, overriding |
| 103 | * any previous value at the key. Overriding the key |
| 104 | * does not cause the destructor to run. |
| 105 | * |
| 106 | * @param type, type part of the key to be used for lookup, |
| 107 | these should be static addresses. |
| 108 | * @param key, the key to be used for lookup |
| 109 | * @param size, the size of the data |
| 110 | * @param ctor, constructor that takes a context and memory of size |
| 111 | * @param ctx, constructor context passed as the first argument to ctor |
| 112 | * @param dtor, destructor to run at pls shutdown |
| 113 | * @param ppo, output data |
| 114 | * @retval, 0 for success |
| 115 | */ |
| 116 | |
| 117 | static __inline int pls_add(struct pls_table* me, uintptr_t type, uintptr_t key, int size, int (*ctor)(void* ctx, void* data), void* ctx, void (*dtor)(void* data), void** ppo) { |
| 118 | int nErr = 0; |
| 119 | struct PLS* pls = 0; |
| 120 | struct PLS* prev; |
| 121 | VERIFY(me->uRefs != 0); |
| 122 | VERIFY(0 != (pls = (struct PLS*)calloc(1, size + sizeof(*pls) - sizeof(pls->data)))); |
| 123 | if(ctor) { |
| 124 | VERIFY(0 == ctor(ctx, (void*)pls->data)); |
| 125 | } |
| 126 | pls->dtor = dtor; |
| 127 | pls->key.type = type; |
| 128 | pls->key.key = key; |
| 129 | do { |
| 130 | pls->next = me->lst; |
| 131 | prev = (struct PLS*)atomic_CompareAndExchangeUP((uintptr_t*)&me->lst, (uintptr_t)pls, (uintptr_t)pls->next); |
| 132 | } while(prev != pls->next); |
| 133 | if(ppo) { |
| 134 | *ppo = (void*)pls->data; |
| 135 | } |
| 136 | FARF(HIGH, "pls added %p", dtor); |
| 137 | bail: |
| 138 | if(nErr && pls) { |
| 139 | free(pls); |
| 140 | } |
| 141 | return nErr; |
| 142 | } |
| 143 | |
| 144 | static __inline int pls_lookup(struct pls_table* me, uintptr_t type, uintptr_t key, void** ppo); |
| 145 | |
| 146 | /** |
| 147 | * like add, but will only add 1 item if two threads try to add at the same time. returns |
| 148 | * item if its already there, otherwise tries to add. |
| 149 | * ctor may be called twice |
| 150 | * callers should avoid calling pls_add which will override the singleton |
| 151 | */ |
| 152 | static __inline int pls_add_lookup_singleton(struct pls_table* me, uintptr_t type, uintptr_t key, int size, int (*ctor)(void* ctx, void* data), void* ctx, void (*dtor)(void* data), void** ppo) { |
| 153 | int nErr = 0; |
| 154 | struct PLS* pls = 0; |
| 155 | struct PLS* prev; |
| 156 | if(0 == pls_lookup(me, type, key, ppo)) { |
| 157 | return 0; |
| 158 | } |
| 159 | VERIFY(me->uRefs != 0); |
| 160 | VERIFY(0 != (pls = (struct PLS*)calloc(1, size + sizeof(*pls) - sizeof(pls->data)))); |
| 161 | if(ctor) { |
| 162 | VERIFY(0 == ctor(ctx, (void*)pls->data)); |
| 163 | } |
| 164 | pls->dtor = dtor; |
| 165 | pls->key.type = type; |
| 166 | pls->key.key = key; |
| 167 | do { |
| 168 | pls->next = me->lst; |
| 169 | if(0 == pls_lookup(me, type, key, ppo)) { |
| 170 | if(pls->dtor) { |
| 171 | pls->dtor((void*)pls->data); |
| 172 | } |
| 173 | free(pls); |
| 174 | return 0; |
| 175 | } |
| 176 | prev = (struct PLS*)atomic_CompareAndExchangeUP((uintptr_t*)&me->lst, (uintptr_t)pls, (uintptr_t)pls->next); |
| 177 | } while(prev != pls->next); |
| 178 | if(ppo) { |
| 179 | *ppo = (void*)pls->data; |
| 180 | } |
| 181 | FARF(HIGH, "pls added %p", dtor); |
| 182 | bail: |
| 183 | if(nErr && pls) { |
| 184 | free(pls); |
| 185 | } |
| 186 | return nErr; |
| 187 | } |
| 188 | |
| 189 | |
| 190 | /** |
| 191 | * finds the last data pointer added for key to the local storage |
| 192 | * |
| 193 | * @param key, the key to be used for lookup |
| 194 | * @param ppo, output data |
| 195 | * @retval, 0 for success |
| 196 | */ |
| 197 | |
| 198 | static __inline int pls_lookup(struct pls_table* me, uintptr_t type, uintptr_t key, void** ppo) { |
| 199 | struct PLS* lst; |
| 200 | for(lst = me->lst; me->uRefs != 0 && lst != 0; lst = lst->next) { |
| 201 | if(lst->key.type == type && lst->key.key == key) { |
| 202 | if(ppo) { |
| 203 | *ppo = lst->data; |
| 204 | } |
| 205 | return 0; |
| 206 | } |
| 207 | } |
| 208 | return -1; |
| 209 | } |
| 210 | |
| 211 | #endif //PLS_H |
| 212 | |
| 213 | |