NeilBrown | 0eb71a9 | 2018-06-18 12:52:50 +1000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 2 | /* |
| 3 | * Resizable, Scalable, Concurrent Hash Table |
| 4 | * |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 5 | * Copyright (c) 2015-2016 Herbert Xu <herbert@gondor.apana.org.au> |
Thomas Graf | b5e2c15 | 2015-03-24 20:42:19 +0000 | [diff] [blame] | 6 | * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 7 | * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> |
| 8 | * |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 9 | * Code partially derived from nft_hash |
Herbert Xu | dc0ee26 | 2015-03-20 21:57:06 +1100 | [diff] [blame] | 10 | * Rewritten with rehash code from br_multicast plus single list |
| 11 | * pointer as suggested by Josh Triplett |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 12 | * |
| 13 | * This program is free software; you can redistribute it and/or modify |
| 14 | * it under the terms of the GNU General Public License version 2 as |
| 15 | * published by the Free Software Foundation. |
| 16 | */ |
| 17 | |
| 18 | #ifndef _LINUX_RHASHTABLE_H |
| 19 | #define _LINUX_RHASHTABLE_H |
| 20 | |
Herbert Xu | 3cf9222 | 2015-12-03 20:41:29 +0800 | [diff] [blame] | 21 | #include <linux/err.h> |
Herbert Xu | 6626af6 | 2015-03-20 18:18:45 -0400 | [diff] [blame] | 22 | #include <linux/errno.h> |
Herbert Xu | 31ccde2 | 2015-03-24 00:50:21 +1100 | [diff] [blame] | 23 | #include <linux/jhash.h> |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 24 | #include <linux/list_nulls.h> |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 25 | #include <linux/workqueue.h> |
Ingo Molnar | b2d0910 | 2017-02-04 01:27:20 +0100 | [diff] [blame] | 26 | #include <linux/rculist.h> |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 27 | #include <linux/bit_spinlock.h> |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 28 | |
NeilBrown | 0eb71a9 | 2018-06-18 12:52:50 +1000 | [diff] [blame] | 29 | #include <linux/rhashtable-types.h> |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 30 | /* |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 31 | * Objects in an rhashtable have an embedded struct rhash_head |
| 32 | * which is linked into as hash chain from the hash table - or one |
| 33 | * of two or more hash tables when the rhashtable is being resized. |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 34 | * The end of the chain is marked with a special nulls marks which has |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 35 | * the least significant bit set but otherwise stores the address of |
| 36 | * the hash bucket. This allows us to be be sure we've found the end |
| 37 | * of the right list. |
| 38 | * The value stored in the hash bucket has BIT(2) used as a lock bit. |
| 39 | * This bit must be atomically set before any changes are made to |
| 40 | * the chain. To avoid dereferencing this pointer without clearing |
| 41 | * the bit first, we use an opaque 'struct rhash_lock_head *' for the |
| 42 | * pointer stored in the bucket. This struct needs to be defined so |
NeilBrown | e4edbe3 | 2019-04-12 11:52:07 +1000 | [diff] [blame] | 43 | * that rcu_dereference() works on it, but it has no content so a |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 44 | * cast is needed for it to be useful. This ensures it isn't |
| 45 | * used by mistake with clearing the lock bit first. |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 46 | */ |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 47 | struct rhash_lock_head {}; |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 48 | |
Florian Westphal | 5f8ddea | 2017-04-16 02:55:09 +0200 | [diff] [blame] | 49 | /* Maximum chain length before rehash |
| 50 | * |
| 51 | * The maximum (not average) chain length grows with the size of the hash |
| 52 | * table, at a rate of (log N)/(log log N). |
| 53 | * |
| 54 | * The value of 16 is selected so that even if the hash table grew to |
| 55 | * 2^32 you would not expect the maximum chain length to exceed it |
| 56 | * unless we are under attack (or extremely unlucky). |
| 57 | * |
| 58 | * As this limit is only to detect attacks, we don't need to set it to a |
| 59 | * lower value as you'd need the chain length to vastly exceed 16 to have |
| 60 | * any real effect on the system. |
| 61 | */ |
| 62 | #define RHT_ELASTICITY 16u |
| 63 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 64 | /** |
| 65 | * struct bucket_table - Table of hash buckets |
| 66 | * @size: Number of hash buckets |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 67 | * @nest: Number of bits of first-level nested table. |
Herbert Xu | 63d512d | 2015-03-14 13:57:24 +1100 | [diff] [blame] | 68 | * @rehash: Current bucket being rehashed |
Herbert Xu | 988dfbd | 2015-03-10 09:27:55 +1100 | [diff] [blame] | 69 | * @hash_rnd: Random seed to fold into hash |
Herbert Xu | eddee5ba | 2015-03-14 13:57:20 +1100 | [diff] [blame] | 70 | * @walkers: List of active walkers |
Herbert Xu | 9d901bc | 2015-03-14 13:57:23 +1100 | [diff] [blame] | 71 | * @rcu: RCU structure for freeing the table |
Herbert Xu | c4db884 | 2015-03-14 13:57:25 +1100 | [diff] [blame] | 72 | * @future_tbl: Table under construction during rehashing |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 73 | * @ntbl: Nested table used when out of memory. |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 74 | * @buckets: size * hash buckets |
| 75 | */ |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 76 | struct bucket_table { |
Herbert Xu | 63d512d | 2015-03-14 13:57:24 +1100 | [diff] [blame] | 77 | unsigned int size; |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 78 | unsigned int nest; |
Herbert Xu | 988dfbd | 2015-03-10 09:27:55 +1100 | [diff] [blame] | 79 | u32 hash_rnd; |
Herbert Xu | eddee5ba | 2015-03-14 13:57:20 +1100 | [diff] [blame] | 80 | struct list_head walkers; |
Herbert Xu | 9d901bc | 2015-03-14 13:57:23 +1100 | [diff] [blame] | 81 | struct rcu_head rcu; |
Eric Dumazet | b9ebafb | 2015-02-20 06:48:57 -0800 | [diff] [blame] | 82 | |
Herbert Xu | c4db884 | 2015-03-14 13:57:25 +1100 | [diff] [blame] | 83 | struct bucket_table __rcu *future_tbl; |
| 84 | |
NeilBrown | 149212f | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 85 | struct lockdep_map dep_map; |
| 86 | |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 87 | struct rhash_lock_head __rcu *buckets[] ____cacheline_aligned_in_smp; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 88 | }; |
| 89 | |
NeilBrown | 82208d0d | 2018-11-30 10:26:50 +1100 | [diff] [blame] | 90 | /* |
| 91 | * NULLS_MARKER() expects a hash value with the low |
| 92 | * bits mostly likely to be significant, and it discards |
| 93 | * the msb. |
| 94 | * We git it an address, in which the bottom 2 bits are |
| 95 | * always 0, and the msb might be significant. |
| 96 | * So we shift the address down one bit to align with |
| 97 | * expectations and avoid losing a significant bit. |
| 98 | */ |
| 99 | #define RHT_NULLS_MARKER(ptr) \ |
| 100 | ((void *)NULLS_MARKER(((unsigned long) (ptr)) >> 1)) |
NeilBrown | 9b4f64a | 2018-06-18 12:52:50 +1000 | [diff] [blame] | 101 | #define INIT_RHT_NULLS_HEAD(ptr) \ |
NeilBrown | 82208d0d | 2018-11-30 10:26:50 +1100 | [diff] [blame] | 102 | ((ptr) = RHT_NULLS_MARKER(&(ptr))) |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 103 | |
| 104 | static inline bool rht_is_a_nulls(const struct rhash_head *ptr) |
| 105 | { |
| 106 | return ((unsigned long) ptr & 1); |
| 107 | } |
| 108 | |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 109 | static inline void *rht_obj(const struct rhashtable *ht, |
| 110 | const struct rhash_head *he) |
| 111 | { |
| 112 | return (char *)he - ht->p.head_offset; |
| 113 | } |
| 114 | |
| 115 | static inline unsigned int rht_bucket_index(const struct bucket_table *tbl, |
| 116 | unsigned int hash) |
| 117 | { |
NeilBrown | 9f9a707 | 2018-06-18 12:52:50 +1000 | [diff] [blame] | 118 | return hash & (tbl->size - 1); |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 119 | } |
| 120 | |
Tom Herbert | 2b86093 | 2017-12-04 10:31:43 -0800 | [diff] [blame] | 121 | static inline unsigned int rht_key_get_hash(struct rhashtable *ht, |
| 122 | const void *key, const struct rhashtable_params params, |
| 123 | unsigned int hash_rnd) |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 124 | { |
Thomas Graf | 299e5c3 | 2015-03-24 14:18:17 +0100 | [diff] [blame] | 125 | unsigned int hash; |
Herbert Xu | de91b25 | 2015-03-24 00:50:20 +1100 | [diff] [blame] | 126 | |
Herbert Xu | 31ccde2 | 2015-03-24 00:50:21 +1100 | [diff] [blame] | 127 | /* params must be equal to ht->p if it isn't constant. */ |
| 128 | if (!__builtin_constant_p(params.key_len)) |
Tom Herbert | 2b86093 | 2017-12-04 10:31:43 -0800 | [diff] [blame] | 129 | hash = ht->p.hashfn(key, ht->key_len, hash_rnd); |
Herbert Xu | 31ccde2 | 2015-03-24 00:50:21 +1100 | [diff] [blame] | 130 | else if (params.key_len) { |
Thomas Graf | 299e5c3 | 2015-03-24 14:18:17 +0100 | [diff] [blame] | 131 | unsigned int key_len = params.key_len; |
Herbert Xu | 31ccde2 | 2015-03-24 00:50:21 +1100 | [diff] [blame] | 132 | |
| 133 | if (params.hashfn) |
Tom Herbert | 2b86093 | 2017-12-04 10:31:43 -0800 | [diff] [blame] | 134 | hash = params.hashfn(key, key_len, hash_rnd); |
Herbert Xu | 31ccde2 | 2015-03-24 00:50:21 +1100 | [diff] [blame] | 135 | else if (key_len & (sizeof(u32) - 1)) |
Tom Herbert | 2b86093 | 2017-12-04 10:31:43 -0800 | [diff] [blame] | 136 | hash = jhash(key, key_len, hash_rnd); |
Herbert Xu | 31ccde2 | 2015-03-24 00:50:21 +1100 | [diff] [blame] | 137 | else |
Tom Herbert | 2b86093 | 2017-12-04 10:31:43 -0800 | [diff] [blame] | 138 | hash = jhash2(key, key_len / sizeof(u32), hash_rnd); |
Herbert Xu | 31ccde2 | 2015-03-24 00:50:21 +1100 | [diff] [blame] | 139 | } else { |
Thomas Graf | 299e5c3 | 2015-03-24 14:18:17 +0100 | [diff] [blame] | 140 | unsigned int key_len = ht->p.key_len; |
Herbert Xu | 31ccde2 | 2015-03-24 00:50:21 +1100 | [diff] [blame] | 141 | |
| 142 | if (params.hashfn) |
Tom Herbert | 2b86093 | 2017-12-04 10:31:43 -0800 | [diff] [blame] | 143 | hash = params.hashfn(key, key_len, hash_rnd); |
Herbert Xu | 31ccde2 | 2015-03-24 00:50:21 +1100 | [diff] [blame] | 144 | else |
Tom Herbert | 2b86093 | 2017-12-04 10:31:43 -0800 | [diff] [blame] | 145 | hash = jhash(key, key_len, hash_rnd); |
Herbert Xu | 31ccde2 | 2015-03-24 00:50:21 +1100 | [diff] [blame] | 146 | } |
| 147 | |
Tom Herbert | 2b86093 | 2017-12-04 10:31:43 -0800 | [diff] [blame] | 148 | return hash; |
| 149 | } |
| 150 | |
| 151 | static inline unsigned int rht_key_hashfn( |
| 152 | struct rhashtable *ht, const struct bucket_table *tbl, |
| 153 | const void *key, const struct rhashtable_params params) |
| 154 | { |
| 155 | unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd); |
| 156 | |
Herbert Xu | 31ccde2 | 2015-03-24 00:50:21 +1100 | [diff] [blame] | 157 | return rht_bucket_index(tbl, hash); |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 158 | } |
| 159 | |
| 160 | static inline unsigned int rht_head_hashfn( |
| 161 | struct rhashtable *ht, const struct bucket_table *tbl, |
| 162 | const struct rhash_head *he, const struct rhashtable_params params) |
| 163 | { |
| 164 | const char *ptr = rht_obj(ht, he); |
| 165 | |
| 166 | return likely(params.obj_hashfn) ? |
Patrick McHardy | 49f7b33 | 2015-03-25 13:07:45 +0000 | [diff] [blame] | 167 | rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?: |
| 168 | ht->p.key_len, |
| 169 | tbl->hash_rnd)) : |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 170 | rht_key_hashfn(ht, tbl, ptr + params.key_offset, params); |
| 171 | } |
| 172 | |
| 173 | /** |
| 174 | * rht_grow_above_75 - returns true if nelems > 0.75 * table-size |
| 175 | * @ht: hash table |
| 176 | * @tbl: current table |
| 177 | */ |
| 178 | static inline bool rht_grow_above_75(const struct rhashtable *ht, |
| 179 | const struct bucket_table *tbl) |
| 180 | { |
| 181 | /* Expand table when exceeding 75% load */ |
| 182 | return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) && |
| 183 | (!ht->p.max_size || tbl->size < ht->p.max_size); |
| 184 | } |
| 185 | |
| 186 | /** |
| 187 | * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size |
| 188 | * @ht: hash table |
| 189 | * @tbl: current table |
| 190 | */ |
| 191 | static inline bool rht_shrink_below_30(const struct rhashtable *ht, |
| 192 | const struct bucket_table *tbl) |
| 193 | { |
| 194 | /* Shrink table beneath 30% load */ |
| 195 | return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) && |
| 196 | tbl->size > ht->p.min_size; |
| 197 | } |
| 198 | |
Herbert Xu | ccd57b1 | 2015-03-24 00:50:28 +1100 | [diff] [blame] | 199 | /** |
| 200 | * rht_grow_above_100 - returns true if nelems > table-size |
| 201 | * @ht: hash table |
| 202 | * @tbl: current table |
| 203 | */ |
| 204 | static inline bool rht_grow_above_100(const struct rhashtable *ht, |
| 205 | const struct bucket_table *tbl) |
| 206 | { |
Johannes Berg | 1d8dc3d | 2015-04-23 16:38:43 +0200 | [diff] [blame] | 207 | return atomic_read(&ht->nelems) > tbl->size && |
| 208 | (!ht->p.max_size || tbl->size < ht->p.max_size); |
Herbert Xu | ccd57b1 | 2015-03-24 00:50:28 +1100 | [diff] [blame] | 209 | } |
| 210 | |
Herbert Xu | 07ee072 | 2015-05-15 11:30:47 +0800 | [diff] [blame] | 211 | /** |
| 212 | * rht_grow_above_max - returns true if table is above maximum |
| 213 | * @ht: hash table |
| 214 | * @tbl: current table |
| 215 | */ |
| 216 | static inline bool rht_grow_above_max(const struct rhashtable *ht, |
| 217 | const struct bucket_table *tbl) |
| 218 | { |
Herbert Xu | 6d684e5 | 2017-04-27 13:44:51 +0800 | [diff] [blame] | 219 | return atomic_read(&ht->nelems) >= ht->max_elems; |
Herbert Xu | 07ee072 | 2015-05-15 11:30:47 +0800 | [diff] [blame] | 220 | } |
| 221 | |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 222 | #ifdef CONFIG_PROVE_LOCKING |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 223 | int lockdep_rht_mutex_is_held(struct rhashtable *ht); |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 224 | int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 225 | #else |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 226 | static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 227 | { |
| 228 | return 1; |
| 229 | } |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 230 | |
| 231 | static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, |
| 232 | u32 hash) |
| 233 | { |
| 234 | return 1; |
| 235 | } |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 236 | #endif /* CONFIG_PROVE_LOCKING */ |
| 237 | |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 238 | void *rhashtable_insert_slow(struct rhashtable *ht, const void *key, |
| 239 | struct rhash_head *obj); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 240 | |
Herbert Xu | 246779d | 2016-08-18 16:50:56 +0800 | [diff] [blame] | 241 | void rhashtable_walk_enter(struct rhashtable *ht, |
| 242 | struct rhashtable_iter *iter); |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 243 | void rhashtable_walk_exit(struct rhashtable_iter *iter); |
Tom Herbert | 97a6ec4 | 2017-12-04 10:31:41 -0800 | [diff] [blame] | 244 | int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU); |
| 245 | |
| 246 | static inline void rhashtable_walk_start(struct rhashtable_iter *iter) |
| 247 | { |
| 248 | (void)rhashtable_walk_start_check(iter); |
| 249 | } |
| 250 | |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 251 | void *rhashtable_walk_next(struct rhashtable_iter *iter); |
Tom Herbert | 2db54b4 | 2017-12-04 10:31:42 -0800 | [diff] [blame] | 252 | void *rhashtable_walk_peek(struct rhashtable_iter *iter); |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 253 | void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU); |
| 254 | |
Thomas Graf | 6b6f302 | 2015-03-24 14:18:20 +0100 | [diff] [blame] | 255 | void rhashtable_free_and_destroy(struct rhashtable *ht, |
| 256 | void (*free_fn)(void *ptr, void *arg), |
| 257 | void *arg); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 258 | void rhashtable_destroy(struct rhashtable *ht); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 259 | |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 260 | struct rhash_lock_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, |
| 261 | unsigned int hash); |
| 262 | struct rhash_lock_head __rcu **__rht_bucket_nested(const struct bucket_table *tbl, |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 263 | unsigned int hash); |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 264 | struct rhash_lock_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, |
| 265 | struct bucket_table *tbl, |
| 266 | unsigned int hash); |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 267 | |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 268 | #define rht_dereference(p, ht) \ |
| 269 | rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht)) |
| 270 | |
| 271 | #define rht_dereference_rcu(p, ht) \ |
| 272 | rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht)) |
| 273 | |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 274 | #define rht_dereference_bucket(p, tbl, hash) \ |
| 275 | rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash)) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 276 | |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 277 | #define rht_dereference_bucket_rcu(p, tbl, hash) \ |
| 278 | rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash)) |
| 279 | |
| 280 | #define rht_entry(tpos, pos, member) \ |
| 281 | ({ tpos = container_of(pos, typeof(*tpos), member); 1; }) |
| 282 | |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 283 | static inline struct rhash_lock_head __rcu *const *rht_bucket( |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 284 | const struct bucket_table *tbl, unsigned int hash) |
| 285 | { |
| 286 | return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) : |
| 287 | &tbl->buckets[hash]; |
| 288 | } |
| 289 | |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 290 | static inline struct rhash_lock_head __rcu **rht_bucket_var( |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 291 | struct bucket_table *tbl, unsigned int hash) |
| 292 | { |
NeilBrown | ff302db | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 293 | return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) : |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 294 | &tbl->buckets[hash]; |
| 295 | } |
| 296 | |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 297 | static inline struct rhash_lock_head __rcu **rht_bucket_insert( |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 298 | struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) |
| 299 | { |
| 300 | return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) : |
| 301 | &tbl->buckets[hash]; |
| 302 | } |
| 303 | |
NeilBrown | c578331 | 2019-04-12 11:52:08 +1000 | [diff] [blame] | 304 | /* |
| 305 | * We lock a bucket by setting BIT(1) in the pointer - this is always |
| 306 | * zero in real pointers and in the nulls marker. |
| 307 | * bit_spin_locks do not handle contention well, but the whole point |
| 308 | * of the hashtable design is to achieve minimum per-bucket contention. |
| 309 | * A nested hash table might not have a bucket pointer. In that case |
| 310 | * we cannot get a lock. For remove and replace the bucket cannot be |
| 311 | * interesting and doesn't need locking. |
| 312 | * For insert we allocate the bucket if this is the last bucket_table, |
| 313 | * and then take the lock. |
| 314 | * Sometimes we unlock a bucket by writing a new pointer there. In that |
| 315 | * case we don't need to unlock, but we do need to reset state such as |
| 316 | * local_bh. For that we have rht_assign_unlock(). As rcu_assign_pointer() |
| 317 | * provides the same release semantics that bit_spin_unlock() provides, |
| 318 | * this is safe. |
NeilBrown | f4712b4 | 2019-04-12 11:52:08 +1000 | [diff] [blame^] | 319 | * When we write to a bucket without unlocking, we use rht_assign_locked(). |
NeilBrown | c578331 | 2019-04-12 11:52:08 +1000 | [diff] [blame] | 320 | */ |
| 321 | |
| 322 | static inline void rht_lock(struct bucket_table *tbl, |
| 323 | struct rhash_lock_head **bkt) |
| 324 | { |
| 325 | local_bh_disable(); |
| 326 | bit_spin_lock(1, (unsigned long *)bkt); |
| 327 | lock_map_acquire(&tbl->dep_map); |
| 328 | } |
| 329 | |
| 330 | static inline void rht_lock_nested(struct bucket_table *tbl, |
| 331 | struct rhash_lock_head **bucket, |
| 332 | unsigned int subclass) |
| 333 | { |
| 334 | local_bh_disable(); |
| 335 | bit_spin_lock(1, (unsigned long *)bucket); |
| 336 | lock_acquire_exclusive(&tbl->dep_map, subclass, 0, NULL, _THIS_IP_); |
| 337 | } |
| 338 | |
| 339 | static inline void rht_unlock(struct bucket_table *tbl, |
| 340 | struct rhash_lock_head **bkt) |
| 341 | { |
| 342 | lock_map_release(&tbl->dep_map); |
| 343 | bit_spin_unlock(1, (unsigned long *)bkt); |
| 344 | local_bh_enable(); |
| 345 | } |
| 346 | |
| 347 | /* |
NeilBrown | adc6a3a | 2019-04-12 11:52:08 +1000 | [diff] [blame] | 348 | * Where 'bkt' is a bucket and might be locked: |
| 349 | * rht_ptr() dereferences that pointer and clears the lock bit. |
| 350 | * rht_ptr_exclusive() dereferences in a context where exclusive |
| 351 | * access is guaranteed, such as when destroying the table. |
NeilBrown | c578331 | 2019-04-12 11:52:08 +1000 | [diff] [blame] | 352 | */ |
NeilBrown | adc6a3a | 2019-04-12 11:52:08 +1000 | [diff] [blame] | 353 | static inline struct rhash_head *rht_ptr( |
| 354 | struct rhash_lock_head __rcu * const *bkt, |
| 355 | struct bucket_table *tbl, |
| 356 | unsigned int hash) |
NeilBrown | c578331 | 2019-04-12 11:52:08 +1000 | [diff] [blame] | 357 | { |
NeilBrown | adc6a3a | 2019-04-12 11:52:08 +1000 | [diff] [blame] | 358 | const struct rhash_lock_head *p = |
| 359 | rht_dereference_bucket_rcu(*bkt, tbl, hash); |
| 360 | |
| 361 | return (void *)(((unsigned long)p) & ~BIT(1)); |
| 362 | } |
| 363 | |
| 364 | static inline struct rhash_head *rht_ptr_exclusive( |
| 365 | struct rhash_lock_head __rcu * const *bkt) |
| 366 | { |
| 367 | const struct rhash_lock_head *p = |
| 368 | rcu_dereference_protected(*bkt, 1); |
| 369 | |
NeilBrown | c578331 | 2019-04-12 11:52:08 +1000 | [diff] [blame] | 370 | return (void *)(((unsigned long)p) & ~BIT(1)); |
| 371 | } |
| 372 | |
NeilBrown | f4712b4 | 2019-04-12 11:52:08 +1000 | [diff] [blame^] | 373 | static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt, |
| 374 | struct rhash_head *obj) |
NeilBrown | c578331 | 2019-04-12 11:52:08 +1000 | [diff] [blame] | 375 | { |
NeilBrown | f4712b4 | 2019-04-12 11:52:08 +1000 | [diff] [blame^] | 376 | struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt; |
| 377 | |
| 378 | rcu_assign_pointer(*p, (void *)((unsigned long)obj | BIT(1))); |
NeilBrown | c578331 | 2019-04-12 11:52:08 +1000 | [diff] [blame] | 379 | } |
| 380 | |
| 381 | static inline void rht_assign_unlock(struct bucket_table *tbl, |
| 382 | struct rhash_lock_head __rcu **bkt, |
| 383 | struct rhash_head *obj) |
| 384 | { |
| 385 | struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt; |
| 386 | |
| 387 | lock_map_release(&tbl->dep_map); |
| 388 | rcu_assign_pointer(*p, obj); |
| 389 | preempt_enable(); |
| 390 | __release(bitlock); |
| 391 | local_bh_enable(); |
| 392 | } |
| 393 | |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 394 | /** |
NeilBrown | f7ad68b | 2019-03-21 14:42:40 +1100 | [diff] [blame] | 395 | * rht_for_each_from - iterate over hash chain from given head |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 396 | * @pos: the &struct rhash_head to use as a loop cursor. |
NeilBrown | f7ad68b | 2019-03-21 14:42:40 +1100 | [diff] [blame] | 397 | * @head: the &struct rhash_head to start from |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 398 | * @tbl: the &struct bucket_table |
| 399 | * @hash: the hash value / bucket index |
| 400 | */ |
NeilBrown | f7ad68b | 2019-03-21 14:42:40 +1100 | [diff] [blame] | 401 | #define rht_for_each_from(pos, head, tbl, hash) \ |
NeilBrown | adc6a3a | 2019-04-12 11:52:08 +1000 | [diff] [blame] | 402 | for (pos = head; \ |
| 403 | !rht_is_a_nulls(pos); \ |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 404 | pos = rht_dereference_bucket((pos)->next, tbl, hash)) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 405 | |
| 406 | /** |
| 407 | * rht_for_each - iterate over hash chain |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 408 | * @pos: the &struct rhash_head to use as a loop cursor. |
| 409 | * @tbl: the &struct bucket_table |
| 410 | * @hash: the hash value / bucket index |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 411 | */ |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 412 | #define rht_for_each(pos, tbl, hash) \ |
NeilBrown | adc6a3a | 2019-04-12 11:52:08 +1000 | [diff] [blame] | 413 | rht_for_each_from(pos, rht_ptr(rht_bucket(tbl, hash), tbl, hash), \ |
| 414 | tbl, hash) |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 415 | |
| 416 | /** |
NeilBrown | f7ad68b | 2019-03-21 14:42:40 +1100 | [diff] [blame] | 417 | * rht_for_each_entry_from - iterate over hash chain from given head |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 418 | * @tpos: the type * to use as a loop cursor. |
| 419 | * @pos: the &struct rhash_head to use as a loop cursor. |
NeilBrown | f7ad68b | 2019-03-21 14:42:40 +1100 | [diff] [blame] | 420 | * @head: the &struct rhash_head to start from |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 421 | * @tbl: the &struct bucket_table |
| 422 | * @hash: the hash value / bucket index |
| 423 | * @member: name of the &struct rhash_head within the hashable struct. |
| 424 | */ |
NeilBrown | f7ad68b | 2019-03-21 14:42:40 +1100 | [diff] [blame] | 425 | #define rht_for_each_entry_from(tpos, pos, head, tbl, hash, member) \ |
NeilBrown | adc6a3a | 2019-04-12 11:52:08 +1000 | [diff] [blame] | 426 | for (pos = head; \ |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 427 | (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 428 | pos = rht_dereference_bucket((pos)->next, tbl, hash)) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 429 | |
| 430 | /** |
| 431 | * rht_for_each_entry - iterate over hash chain of given type |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 432 | * @tpos: the type * to use as a loop cursor. |
| 433 | * @pos: the &struct rhash_head to use as a loop cursor. |
| 434 | * @tbl: the &struct bucket_table |
| 435 | * @hash: the hash value / bucket index |
| 436 | * @member: name of the &struct rhash_head within the hashable struct. |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 437 | */ |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 438 | #define rht_for_each_entry(tpos, pos, tbl, hash, member) \ |
NeilBrown | adc6a3a | 2019-04-12 11:52:08 +1000 | [diff] [blame] | 439 | rht_for_each_entry_from(tpos, pos, \ |
| 440 | rht_ptr(rht_bucket(tbl, hash), tbl, hash), \ |
| 441 | tbl, hash, member) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 442 | |
| 443 | /** |
| 444 | * rht_for_each_entry_safe - safely iterate over hash chain of given type |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 445 | * @tpos: the type * to use as a loop cursor. |
| 446 | * @pos: the &struct rhash_head to use as a loop cursor. |
| 447 | * @next: the &struct rhash_head to use as next in loop cursor. |
| 448 | * @tbl: the &struct bucket_table |
| 449 | * @hash: the hash value / bucket index |
| 450 | * @member: name of the &struct rhash_head within the hashable struct. |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 451 | * |
| 452 | * This hash chain list-traversal primitive allows for the looped code to |
| 453 | * remove the loop cursor from the list. |
| 454 | */ |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 455 | #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \ |
NeilBrown | adc6a3a | 2019-04-12 11:52:08 +1000 | [diff] [blame] | 456 | for (pos = rht_ptr(rht_bucket(tbl, hash), tbl, hash), \ |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 457 | next = !rht_is_a_nulls(pos) ? \ |
| 458 | rht_dereference_bucket(pos->next, tbl, hash) : NULL; \ |
| 459 | (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ |
| 460 | pos = next, \ |
| 461 | next = !rht_is_a_nulls(pos) ? \ |
Patrick McHardy | 607954b | 2015-01-21 11:12:13 +0000 | [diff] [blame] | 462 | rht_dereference_bucket(pos->next, tbl, hash) : NULL) |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 463 | |
| 464 | /** |
NeilBrown | f7ad68b | 2019-03-21 14:42:40 +1100 | [diff] [blame] | 465 | * rht_for_each_rcu_from - iterate over rcu hash chain from given head |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 466 | * @pos: the &struct rhash_head to use as a loop cursor. |
NeilBrown | f7ad68b | 2019-03-21 14:42:40 +1100 | [diff] [blame] | 467 | * @head: the &struct rhash_head to start from |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 468 | * @tbl: the &struct bucket_table |
| 469 | * @hash: the hash value / bucket index |
| 470 | * |
| 471 | * This hash chain list-traversal primitive may safely run concurrently with |
| 472 | * the _rcu mutation primitives such as rhashtable_insert() as long as the |
| 473 | * traversal is guarded by rcu_read_lock(). |
| 474 | */ |
NeilBrown | f7ad68b | 2019-03-21 14:42:40 +1100 | [diff] [blame] | 475 | #define rht_for_each_rcu_from(pos, head, tbl, hash) \ |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 476 | for (({barrier(); }), \ |
NeilBrown | adc6a3a | 2019-04-12 11:52:08 +1000 | [diff] [blame] | 477 | pos = head; \ |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 478 | !rht_is_a_nulls(pos); \ |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 479 | pos = rcu_dereference_raw(pos->next)) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 480 | |
| 481 | /** |
| 482 | * rht_for_each_rcu - iterate over rcu hash chain |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 483 | * @pos: the &struct rhash_head to use as a loop cursor. |
| 484 | * @tbl: the &struct bucket_table |
| 485 | * @hash: the hash value / bucket index |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 486 | * |
| 487 | * This hash chain list-traversal primitive may safely run concurrently with |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 488 | * the _rcu mutation primitives such as rhashtable_insert() as long as the |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 489 | * traversal is guarded by rcu_read_lock(). |
| 490 | */ |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 491 | #define rht_for_each_rcu(pos, tbl, hash) \ |
NeilBrown | adc6a3a | 2019-04-12 11:52:08 +1000 | [diff] [blame] | 492 | for (({barrier(); }), \ |
| 493 | pos = rht_ptr(rht_bucket(tbl, hash), tbl, hash); \ |
| 494 | !rht_is_a_nulls(pos); \ |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 495 | pos = rcu_dereference_raw(pos->next)) |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 496 | |
| 497 | /** |
NeilBrown | f7ad68b | 2019-03-21 14:42:40 +1100 | [diff] [blame] | 498 | * rht_for_each_entry_rcu_from - iterated over rcu hash chain from given head |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 499 | * @tpos: the type * to use as a loop cursor. |
| 500 | * @pos: the &struct rhash_head to use as a loop cursor. |
NeilBrown | f7ad68b | 2019-03-21 14:42:40 +1100 | [diff] [blame] | 501 | * @head: the &struct rhash_head to start from |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 502 | * @tbl: the &struct bucket_table |
| 503 | * @hash: the hash value / bucket index |
| 504 | * @member: name of the &struct rhash_head within the hashable struct. |
| 505 | * |
| 506 | * This hash chain list-traversal primitive may safely run concurrently with |
| 507 | * the _rcu mutation primitives such as rhashtable_insert() as long as the |
| 508 | * traversal is guarded by rcu_read_lock(). |
| 509 | */ |
NeilBrown | f7ad68b | 2019-03-21 14:42:40 +1100 | [diff] [blame] | 510 | #define rht_for_each_entry_rcu_from(tpos, pos, head, tbl, hash, member) \ |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 511 | for (({barrier(); }), \ |
NeilBrown | adc6a3a | 2019-04-12 11:52:08 +1000 | [diff] [blame] | 512 | pos = head; \ |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 513 | (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \ |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 514 | pos = rht_dereference_bucket_rcu(pos->next, tbl, hash)) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 515 | |
| 516 | /** |
| 517 | * rht_for_each_entry_rcu - iterate over rcu hash chain of given type |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 518 | * @tpos: the type * to use as a loop cursor. |
| 519 | * @pos: the &struct rhash_head to use as a loop cursor. |
| 520 | * @tbl: the &struct bucket_table |
| 521 | * @hash: the hash value / bucket index |
| 522 | * @member: name of the &struct rhash_head within the hashable struct. |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 523 | * |
| 524 | * This hash chain list-traversal primitive may safely run concurrently with |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 525 | * the _rcu mutation primitives such as rhashtable_insert() as long as the |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 526 | * traversal is guarded by rcu_read_lock(). |
| 527 | */ |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 528 | #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \ |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 529 | rht_for_each_entry_rcu_from(tpos, pos, \ |
NeilBrown | adc6a3a | 2019-04-12 11:52:08 +1000 | [diff] [blame] | 530 | rht_ptr(rht_bucket(tbl, hash), \ |
| 531 | tbl, hash), \ |
| 532 | tbl, hash, member) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 533 | |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 534 | /** |
| 535 | * rhl_for_each_rcu - iterate over rcu hash table list |
| 536 | * @pos: the &struct rlist_head to use as a loop cursor. |
| 537 | * @list: the head of the list |
| 538 | * |
| 539 | * This hash chain list-traversal primitive should be used on the |
| 540 | * list returned by rhltable_lookup. |
| 541 | */ |
| 542 | #define rhl_for_each_rcu(pos, list) \ |
| 543 | for (pos = list; pos; pos = rcu_dereference_raw(pos->next)) |
| 544 | |
| 545 | /** |
| 546 | * rhl_for_each_entry_rcu - iterate over rcu hash table list of given type |
| 547 | * @tpos: the type * to use as a loop cursor. |
| 548 | * @pos: the &struct rlist_head to use as a loop cursor. |
| 549 | * @list: the head of the list |
| 550 | * @member: name of the &struct rlist_head within the hashable struct. |
| 551 | * |
| 552 | * This hash chain list-traversal primitive should be used on the |
| 553 | * list returned by rhltable_lookup. |
| 554 | */ |
| 555 | #define rhl_for_each_entry_rcu(tpos, pos, list, member) \ |
| 556 | for (pos = list; pos && rht_entry(tpos, pos, member); \ |
| 557 | pos = rcu_dereference_raw(pos->next)) |
| 558 | |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 559 | static inline int rhashtable_compare(struct rhashtable_compare_arg *arg, |
| 560 | const void *obj) |
| 561 | { |
| 562 | struct rhashtable *ht = arg->ht; |
| 563 | const char *ptr = obj; |
| 564 | |
| 565 | return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len); |
| 566 | } |
| 567 | |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 568 | /* Internal function, do not use. */ |
| 569 | static inline struct rhash_head *__rhashtable_lookup( |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 570 | struct rhashtable *ht, const void *key, |
| 571 | const struct rhashtable_params params) |
| 572 | { |
| 573 | struct rhashtable_compare_arg arg = { |
| 574 | .ht = ht, |
| 575 | .key = key, |
| 576 | }; |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 577 | struct rhash_lock_head __rcu * const *bkt; |
Herbert Xu | da20420 | 2017-02-11 19:26:47 +0800 | [diff] [blame] | 578 | struct bucket_table *tbl; |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 579 | struct rhash_head *he; |
Thomas Graf | 299e5c3 | 2015-03-24 14:18:17 +0100 | [diff] [blame] | 580 | unsigned int hash; |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 581 | |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 582 | tbl = rht_dereference_rcu(ht->tbl, ht); |
| 583 | restart: |
| 584 | hash = rht_key_hashfn(ht, tbl, key, params); |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 585 | bkt = rht_bucket(tbl, hash); |
NeilBrown | 82208d0d | 2018-11-30 10:26:50 +1100 | [diff] [blame] | 586 | do { |
NeilBrown | adc6a3a | 2019-04-12 11:52:08 +1000 | [diff] [blame] | 587 | rht_for_each_rcu_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) { |
NeilBrown | 82208d0d | 2018-11-30 10:26:50 +1100 | [diff] [blame] | 588 | if (params.obj_cmpfn ? |
| 589 | params.obj_cmpfn(&arg, rht_obj(ht, he)) : |
| 590 | rhashtable_compare(&arg, rht_obj(ht, he))) |
| 591 | continue; |
| 592 | return he; |
| 593 | } |
| 594 | /* An object might have been moved to a different hash chain, |
| 595 | * while we walk along it - better check and retry. |
| 596 | */ |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 597 | } while (he != RHT_NULLS_MARKER(bkt)); |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 598 | |
| 599 | /* Ensure we see any new tables. */ |
| 600 | smp_rmb(); |
| 601 | |
| 602 | tbl = rht_dereference_rcu(tbl->future_tbl, ht); |
| 603 | if (unlikely(tbl)) |
| 604 | goto restart; |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 605 | |
| 606 | return NULL; |
| 607 | } |
| 608 | |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 609 | /** |
| 610 | * rhashtable_lookup - search hash table |
| 611 | * @ht: hash table |
| 612 | * @key: the pointer to the key |
| 613 | * @params: hash table parameters |
| 614 | * |
| 615 | * Computes the hash value for the key and traverses the bucket chain looking |
| 616 | * for a entry with an identical key. The first matching entry is returned. |
| 617 | * |
| 618 | * This must only be called under the RCU read lock. |
| 619 | * |
| 620 | * Returns the first entry on which the compare function returned true. |
| 621 | */ |
| 622 | static inline void *rhashtable_lookup( |
| 623 | struct rhashtable *ht, const void *key, |
| 624 | const struct rhashtable_params params) |
| 625 | { |
| 626 | struct rhash_head *he = __rhashtable_lookup(ht, key, params); |
| 627 | |
| 628 | return he ? rht_obj(ht, he) : NULL; |
| 629 | } |
| 630 | |
| 631 | /** |
| 632 | * rhashtable_lookup_fast - search hash table, without RCU read lock |
| 633 | * @ht: hash table |
| 634 | * @key: the pointer to the key |
| 635 | * @params: hash table parameters |
| 636 | * |
| 637 | * Computes the hash value for the key and traverses the bucket chain looking |
| 638 | * for a entry with an identical key. The first matching entry is returned. |
| 639 | * |
| 640 | * Only use this function when you have other mechanisms guaranteeing |
| 641 | * that the object won't go away after the RCU read lock is released. |
| 642 | * |
| 643 | * Returns the first entry on which the compare function returned true. |
| 644 | */ |
| 645 | static inline void *rhashtable_lookup_fast( |
| 646 | struct rhashtable *ht, const void *key, |
| 647 | const struct rhashtable_params params) |
| 648 | { |
| 649 | void *obj; |
| 650 | |
| 651 | rcu_read_lock(); |
| 652 | obj = rhashtable_lookup(ht, key, params); |
| 653 | rcu_read_unlock(); |
| 654 | |
| 655 | return obj; |
| 656 | } |
| 657 | |
| 658 | /** |
| 659 | * rhltable_lookup - search hash list table |
| 660 | * @hlt: hash table |
| 661 | * @key: the pointer to the key |
| 662 | * @params: hash table parameters |
| 663 | * |
| 664 | * Computes the hash value for the key and traverses the bucket chain looking |
| 665 | * for a entry with an identical key. All matching entries are returned |
| 666 | * in a list. |
| 667 | * |
| 668 | * This must only be called under the RCU read lock. |
| 669 | * |
| 670 | * Returns the list of entries that match the given key. |
| 671 | */ |
| 672 | static inline struct rhlist_head *rhltable_lookup( |
| 673 | struct rhltable *hlt, const void *key, |
| 674 | const struct rhashtable_params params) |
| 675 | { |
| 676 | struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params); |
| 677 | |
| 678 | return he ? container_of(he, struct rhlist_head, rhead) : NULL; |
| 679 | } |
| 680 | |
Pablo Neira Ayuso | 5ca8cc5 | 2016-08-24 12:31:31 +0200 | [diff] [blame] | 681 | /* Internal function, please use rhashtable_insert_fast() instead. This |
| 682 | * function returns the existing element already in hashes in there is a clash, |
| 683 | * otherwise it returns an error via ERR_PTR(). |
| 684 | */ |
| 685 | static inline void *__rhashtable_insert_fast( |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 686 | struct rhashtable *ht, const void *key, struct rhash_head *obj, |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 687 | const struct rhashtable_params params, bool rhlist) |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 688 | { |
| 689 | struct rhashtable_compare_arg arg = { |
| 690 | .ht = ht, |
| 691 | .key = key, |
| 692 | }; |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 693 | struct rhash_lock_head __rcu **bkt; |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 694 | struct rhash_head __rcu **pprev; |
| 695 | struct bucket_table *tbl; |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 696 | struct rhash_head *head; |
Thomas Graf | 299e5c3 | 2015-03-24 14:18:17 +0100 | [diff] [blame] | 697 | unsigned int hash; |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 698 | int elasticity; |
| 699 | void *data; |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 700 | |
| 701 | rcu_read_lock(); |
| 702 | |
| 703 | tbl = rht_dereference_rcu(ht->tbl, ht); |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 704 | hash = rht_head_hashfn(ht, tbl, obj, params); |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 705 | elasticity = RHT_ELASTICITY; |
| 706 | bkt = rht_bucket_insert(ht, tbl, hash); |
| 707 | data = ERR_PTR(-ENOMEM); |
| 708 | if (!bkt) |
| 709 | goto out; |
| 710 | pprev = NULL; |
NeilBrown | 149212f | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 711 | rht_lock(tbl, bkt); |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 712 | |
NeilBrown | c069001 | 2018-06-18 12:52:50 +1000 | [diff] [blame] | 713 | if (unlikely(rcu_access_pointer(tbl->future_tbl))) { |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 714 | slow_path: |
NeilBrown | 149212f | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 715 | rht_unlock(tbl, bkt); |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 716 | rcu_read_unlock(); |
| 717 | return rhashtable_insert_slow(ht, key, obj); |
Herbert Xu | b824478 | 2015-03-24 00:50:26 +1100 | [diff] [blame] | 718 | } |
| 719 | |
NeilBrown | adc6a3a | 2019-04-12 11:52:08 +1000 | [diff] [blame] | 720 | rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) { |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 721 | struct rhlist_head *plist; |
| 722 | struct rhlist_head *list; |
Herbert Xu | 3cf9222 | 2015-12-03 20:41:29 +0800 | [diff] [blame] | 723 | |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 724 | elasticity--; |
| 725 | if (!key || |
| 726 | (params.obj_cmpfn ? |
| 727 | params.obj_cmpfn(&arg, rht_obj(ht, head)) : |
Paul Blakey | d3dcf8e | 2018-03-04 17:29:48 +0200 | [diff] [blame] | 728 | rhashtable_compare(&arg, rht_obj(ht, head)))) { |
| 729 | pprev = &head->next; |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 730 | continue; |
Paul Blakey | d3dcf8e | 2018-03-04 17:29:48 +0200 | [diff] [blame] | 731 | } |
Pablo Neira Ayuso | 5ca8cc5 | 2016-08-24 12:31:31 +0200 | [diff] [blame] | 732 | |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 733 | data = rht_obj(ht, head); |
| 734 | |
| 735 | if (!rhlist) |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 736 | goto out_unlock; |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 737 | |
| 738 | |
| 739 | list = container_of(obj, struct rhlist_head, rhead); |
| 740 | plist = container_of(head, struct rhlist_head, rhead); |
| 741 | |
| 742 | RCU_INIT_POINTER(list->next, plist); |
| 743 | head = rht_dereference_bucket(head->next, tbl, hash); |
| 744 | RCU_INIT_POINTER(list->rhead.next, head); |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 745 | if (pprev) { |
| 746 | rcu_assign_pointer(*pprev, obj); |
NeilBrown | 149212f | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 747 | rht_unlock(tbl, bkt); |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 748 | } else |
NeilBrown | 149212f | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 749 | rht_assign_unlock(tbl, bkt, obj); |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 750 | data = NULL; |
| 751 | goto out; |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 752 | } |
| 753 | |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 754 | if (elasticity <= 0) |
| 755 | goto slow_path; |
| 756 | |
| 757 | data = ERR_PTR(-E2BIG); |
Herbert Xu | 07ee072 | 2015-05-15 11:30:47 +0800 | [diff] [blame] | 758 | if (unlikely(rht_grow_above_max(ht, tbl))) |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 759 | goto out_unlock; |
Herbert Xu | 07ee072 | 2015-05-15 11:30:47 +0800 | [diff] [blame] | 760 | |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 761 | if (unlikely(rht_grow_above_100(ht, tbl))) |
| 762 | goto slow_path; |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 763 | |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 764 | /* Inserting at head of list makes unlocking free. */ |
NeilBrown | adc6a3a | 2019-04-12 11:52:08 +1000 | [diff] [blame] | 765 | head = rht_ptr(bkt, tbl, hash); |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 766 | |
| 767 | RCU_INIT_POINTER(obj->next, head); |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 768 | if (rhlist) { |
| 769 | struct rhlist_head *list; |
| 770 | |
| 771 | list = container_of(obj, struct rhlist_head, rhead); |
| 772 | RCU_INIT_POINTER(list->next, NULL); |
| 773 | } |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 774 | |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 775 | atomic_inc(&ht->nelems); |
NeilBrown | 149212f | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 776 | rht_assign_unlock(tbl, bkt, obj); |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 777 | |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 778 | if (rht_grow_above_75(ht, tbl)) |
| 779 | schedule_work(&ht->run_work); |
| 780 | |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 781 | data = NULL; |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 782 | out: |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 783 | rcu_read_unlock(); |
| 784 | |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 785 | return data; |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 786 | |
| 787 | out_unlock: |
NeilBrown | 149212f | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 788 | rht_unlock(tbl, bkt); |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 789 | goto out; |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 790 | } |
| 791 | |
| 792 | /** |
| 793 | * rhashtable_insert_fast - insert object into hash table |
| 794 | * @ht: hash table |
| 795 | * @obj: pointer to hash head inside object |
| 796 | * @params: hash table parameters |
| 797 | * |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 798 | * Will take the per bucket bitlock to protect against mutual mutations |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 799 | * on the same bucket. Multiple insertions may occur in parallel unless |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 800 | * they map to the same bucket. |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 801 | * |
| 802 | * It is safe to call this function from atomic context. |
| 803 | * |
NeilBrown | 0c6f69a | 2018-04-24 08:29:13 +1000 | [diff] [blame] | 804 | * Will trigger an automatic deferred table resizing if residency in the |
| 805 | * table grows beyond 70%. |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 806 | */ |
| 807 | static inline int rhashtable_insert_fast( |
| 808 | struct rhashtable *ht, struct rhash_head *obj, |
| 809 | const struct rhashtable_params params) |
| 810 | { |
Pablo Neira Ayuso | 5ca8cc5 | 2016-08-24 12:31:31 +0200 | [diff] [blame] | 811 | void *ret; |
| 812 | |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 813 | ret = __rhashtable_insert_fast(ht, NULL, obj, params, false); |
Pablo Neira Ayuso | 5ca8cc5 | 2016-08-24 12:31:31 +0200 | [diff] [blame] | 814 | if (IS_ERR(ret)) |
| 815 | return PTR_ERR(ret); |
| 816 | |
| 817 | return ret == NULL ? 0 : -EEXIST; |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 818 | } |
| 819 | |
| 820 | /** |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 821 | * rhltable_insert_key - insert object into hash list table |
| 822 | * @hlt: hash list table |
| 823 | * @key: the pointer to the key |
| 824 | * @list: pointer to hash list head inside object |
| 825 | * @params: hash table parameters |
| 826 | * |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 827 | * Will take the per bucket bitlock to protect against mutual mutations |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 828 | * on the same bucket. Multiple insertions may occur in parallel unless |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 829 | * they map to the same bucket. |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 830 | * |
| 831 | * It is safe to call this function from atomic context. |
| 832 | * |
NeilBrown | 0c6f69a | 2018-04-24 08:29:13 +1000 | [diff] [blame] | 833 | * Will trigger an automatic deferred table resizing if residency in the |
| 834 | * table grows beyond 70%. |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 835 | */ |
| 836 | static inline int rhltable_insert_key( |
| 837 | struct rhltable *hlt, const void *key, struct rhlist_head *list, |
| 838 | const struct rhashtable_params params) |
| 839 | { |
| 840 | return PTR_ERR(__rhashtable_insert_fast(&hlt->ht, key, &list->rhead, |
| 841 | params, true)); |
| 842 | } |
| 843 | |
| 844 | /** |
| 845 | * rhltable_insert - insert object into hash list table |
| 846 | * @hlt: hash list table |
| 847 | * @list: pointer to hash list head inside object |
| 848 | * @params: hash table parameters |
| 849 | * |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 850 | * Will take the per bucket bitlock to protect against mutual mutations |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 851 | * on the same bucket. Multiple insertions may occur in parallel unless |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 852 | * they map to the same bucket. |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 853 | * |
| 854 | * It is safe to call this function from atomic context. |
| 855 | * |
NeilBrown | 0c6f69a | 2018-04-24 08:29:13 +1000 | [diff] [blame] | 856 | * Will trigger an automatic deferred table resizing if residency in the |
| 857 | * table grows beyond 70%. |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 858 | */ |
| 859 | static inline int rhltable_insert( |
| 860 | struct rhltable *hlt, struct rhlist_head *list, |
| 861 | const struct rhashtable_params params) |
| 862 | { |
| 863 | const char *key = rht_obj(&hlt->ht, &list->rhead); |
| 864 | |
| 865 | key += params.key_offset; |
| 866 | |
| 867 | return rhltable_insert_key(hlt, key, list, params); |
| 868 | } |
| 869 | |
| 870 | /** |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 871 | * rhashtable_lookup_insert_fast - lookup and insert object into hash table |
| 872 | * @ht: hash table |
| 873 | * @obj: pointer to hash head inside object |
| 874 | * @params: hash table parameters |
| 875 | * |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 876 | * This lookup function may only be used for fixed key hash table (key_len |
| 877 | * parameter set). It will BUG() if used inappropriately. |
| 878 | * |
| 879 | * It is safe to call this function from atomic context. |
| 880 | * |
NeilBrown | 0c6f69a | 2018-04-24 08:29:13 +1000 | [diff] [blame] | 881 | * Will trigger an automatic deferred table resizing if residency in the |
| 882 | * table grows beyond 70%. |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 883 | */ |
| 884 | static inline int rhashtable_lookup_insert_fast( |
| 885 | struct rhashtable *ht, struct rhash_head *obj, |
| 886 | const struct rhashtable_params params) |
| 887 | { |
| 888 | const char *key = rht_obj(ht, obj); |
Pablo Neira Ayuso | 5ca8cc5 | 2016-08-24 12:31:31 +0200 | [diff] [blame] | 889 | void *ret; |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 890 | |
| 891 | BUG_ON(ht->p.obj_hashfn); |
| 892 | |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 893 | ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params, |
| 894 | false); |
Pablo Neira Ayuso | 5ca8cc5 | 2016-08-24 12:31:31 +0200 | [diff] [blame] | 895 | if (IS_ERR(ret)) |
| 896 | return PTR_ERR(ret); |
| 897 | |
| 898 | return ret == NULL ? 0 : -EEXIST; |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 899 | } |
| 900 | |
| 901 | /** |
Andreas Gruenbacher | f9fe1c1 | 2017-03-18 00:36:15 +0100 | [diff] [blame] | 902 | * rhashtable_lookup_get_insert_fast - lookup and insert object into hash table |
| 903 | * @ht: hash table |
| 904 | * @obj: pointer to hash head inside object |
| 905 | * @params: hash table parameters |
| 906 | * |
| 907 | * Just like rhashtable_lookup_insert_fast(), but this function returns the |
| 908 | * object if it exists, NULL if it did not and the insertion was successful, |
| 909 | * and an ERR_PTR otherwise. |
| 910 | */ |
| 911 | static inline void *rhashtable_lookup_get_insert_fast( |
| 912 | struct rhashtable *ht, struct rhash_head *obj, |
| 913 | const struct rhashtable_params params) |
| 914 | { |
| 915 | const char *key = rht_obj(ht, obj); |
| 916 | |
| 917 | BUG_ON(ht->p.obj_hashfn); |
| 918 | |
| 919 | return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params, |
| 920 | false); |
| 921 | } |
| 922 | |
| 923 | /** |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 924 | * rhashtable_lookup_insert_key - search and insert object to hash table |
| 925 | * with explicit key |
| 926 | * @ht: hash table |
| 927 | * @key: key |
| 928 | * @obj: pointer to hash head inside object |
| 929 | * @params: hash table parameters |
| 930 | * |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 931 | * Lookups may occur in parallel with hashtable mutations and resizing. |
| 932 | * |
NeilBrown | 0c6f69a | 2018-04-24 08:29:13 +1000 | [diff] [blame] | 933 | * Will trigger an automatic deferred table resizing if residency in the |
| 934 | * table grows beyond 70%. |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 935 | * |
| 936 | * Returns zero on success. |
| 937 | */ |
| 938 | static inline int rhashtable_lookup_insert_key( |
| 939 | struct rhashtable *ht, const void *key, struct rhash_head *obj, |
| 940 | const struct rhashtable_params params) |
| 941 | { |
Pablo Neira Ayuso | 5ca8cc5 | 2016-08-24 12:31:31 +0200 | [diff] [blame] | 942 | void *ret; |
| 943 | |
| 944 | BUG_ON(!ht->p.obj_hashfn || !key); |
| 945 | |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 946 | ret = __rhashtable_insert_fast(ht, key, obj, params, false); |
Pablo Neira Ayuso | 5ca8cc5 | 2016-08-24 12:31:31 +0200 | [diff] [blame] | 947 | if (IS_ERR(ret)) |
| 948 | return PTR_ERR(ret); |
| 949 | |
| 950 | return ret == NULL ? 0 : -EEXIST; |
| 951 | } |
| 952 | |
| 953 | /** |
| 954 | * rhashtable_lookup_get_insert_key - lookup and insert object into hash table |
| 955 | * @ht: hash table |
| 956 | * @obj: pointer to hash head inside object |
| 957 | * @params: hash table parameters |
| 958 | * @data: pointer to element data already in hashes |
| 959 | * |
| 960 | * Just like rhashtable_lookup_insert_key(), but this function returns the |
| 961 | * object if it exists, NULL if it does not and the insertion was successful, |
| 962 | * and an ERR_PTR otherwise. |
| 963 | */ |
| 964 | static inline void *rhashtable_lookup_get_insert_key( |
| 965 | struct rhashtable *ht, const void *key, struct rhash_head *obj, |
| 966 | const struct rhashtable_params params) |
| 967 | { |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 968 | BUG_ON(!ht->p.obj_hashfn || !key); |
| 969 | |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 970 | return __rhashtable_insert_fast(ht, key, obj, params, false); |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 971 | } |
| 972 | |
Thomas Graf | ac833bd | 2015-03-24 14:18:18 +0100 | [diff] [blame] | 973 | /* Internal function, please use rhashtable_remove_fast() instead */ |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 974 | static inline int __rhashtable_remove_fast_one( |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 975 | struct rhashtable *ht, struct bucket_table *tbl, |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 976 | struct rhash_head *obj, const struct rhashtable_params params, |
| 977 | bool rhlist) |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 978 | { |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 979 | struct rhash_lock_head __rcu **bkt; |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 980 | struct rhash_head __rcu **pprev; |
| 981 | struct rhash_head *he; |
Thomas Graf | 299e5c3 | 2015-03-24 14:18:17 +0100 | [diff] [blame] | 982 | unsigned int hash; |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 983 | int err = -ENOENT; |
| 984 | |
| 985 | hash = rht_head_hashfn(ht, tbl, obj, params); |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 986 | bkt = rht_bucket_var(tbl, hash); |
| 987 | if (!bkt) |
| 988 | return -ENOENT; |
| 989 | pprev = NULL; |
NeilBrown | 149212f | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 990 | rht_lock(tbl, bkt); |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 991 | |
NeilBrown | adc6a3a | 2019-04-12 11:52:08 +1000 | [diff] [blame] | 992 | rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) { |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 993 | struct rhlist_head *list; |
| 994 | |
| 995 | list = container_of(he, struct rhlist_head, rhead); |
| 996 | |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 997 | if (he != obj) { |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 998 | struct rhlist_head __rcu **lpprev; |
| 999 | |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 1000 | pprev = &he->next; |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 1001 | |
| 1002 | if (!rhlist) |
| 1003 | continue; |
| 1004 | |
| 1005 | do { |
| 1006 | lpprev = &list->next; |
| 1007 | list = rht_dereference_bucket(list->next, |
| 1008 | tbl, hash); |
| 1009 | } while (list && obj != &list->rhead); |
| 1010 | |
| 1011 | if (!list) |
| 1012 | continue; |
| 1013 | |
| 1014 | list = rht_dereference_bucket(list->next, tbl, hash); |
| 1015 | RCU_INIT_POINTER(*lpprev, list); |
| 1016 | err = 0; |
| 1017 | break; |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 1018 | } |
| 1019 | |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 1020 | obj = rht_dereference_bucket(obj->next, tbl, hash); |
| 1021 | err = 1; |
| 1022 | |
| 1023 | if (rhlist) { |
| 1024 | list = rht_dereference_bucket(list->next, tbl, hash); |
| 1025 | if (list) { |
| 1026 | RCU_INIT_POINTER(list->rhead.next, obj); |
| 1027 | obj = &list->rhead; |
| 1028 | err = 0; |
| 1029 | } |
| 1030 | } |
| 1031 | |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 1032 | if (pprev) { |
| 1033 | rcu_assign_pointer(*pprev, obj); |
NeilBrown | 149212f | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 1034 | rht_unlock(tbl, bkt); |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 1035 | } else { |
NeilBrown | 149212f | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 1036 | rht_assign_unlock(tbl, bkt, obj); |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 1037 | } |
| 1038 | goto unlocked; |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 1039 | } |
| 1040 | |
NeilBrown | 149212f | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 1041 | rht_unlock(tbl, bkt); |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 1042 | unlocked: |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 1043 | if (err > 0) { |
| 1044 | atomic_dec(&ht->nelems); |
| 1045 | if (unlikely(ht->p.automatic_shrinking && |
| 1046 | rht_shrink_below_30(ht, tbl))) |
| 1047 | schedule_work(&ht->run_work); |
| 1048 | err = 0; |
| 1049 | } |
| 1050 | |
| 1051 | return err; |
| 1052 | } |
| 1053 | |
| 1054 | /* Internal function, please use rhashtable_remove_fast() instead */ |
| 1055 | static inline int __rhashtable_remove_fast( |
| 1056 | struct rhashtable *ht, struct rhash_head *obj, |
| 1057 | const struct rhashtable_params params, bool rhlist) |
| 1058 | { |
| 1059 | struct bucket_table *tbl; |
| 1060 | int err; |
| 1061 | |
| 1062 | rcu_read_lock(); |
| 1063 | |
| 1064 | tbl = rht_dereference_rcu(ht->tbl, ht); |
| 1065 | |
| 1066 | /* Because we have already taken (and released) the bucket |
| 1067 | * lock in old_tbl, if we find that future_tbl is not yet |
| 1068 | * visible then that guarantees the entry to still be in |
| 1069 | * the old tbl if it exists. |
| 1070 | */ |
| 1071 | while ((err = __rhashtable_remove_fast_one(ht, tbl, obj, params, |
| 1072 | rhlist)) && |
| 1073 | (tbl = rht_dereference_rcu(tbl->future_tbl, ht))) |
| 1074 | ; |
| 1075 | |
| 1076 | rcu_read_unlock(); |
| 1077 | |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 1078 | return err; |
| 1079 | } |
| 1080 | |
| 1081 | /** |
| 1082 | * rhashtable_remove_fast - remove object from hash table |
| 1083 | * @ht: hash table |
| 1084 | * @obj: pointer to hash head inside object |
| 1085 | * @params: hash table parameters |
| 1086 | * |
| 1087 | * Since the hash chain is single linked, the removal operation needs to |
| 1088 | * walk the bucket chain upon removal. The removal operation is thus |
| 1089 | * considerable slow if the hash table is not correctly sized. |
| 1090 | * |
NeilBrown | 0c6f69a | 2018-04-24 08:29:13 +1000 | [diff] [blame] | 1091 | * Will automatically shrink the table if permitted when residency drops |
| 1092 | * below 30%. |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 1093 | * |
| 1094 | * Returns zero on success, -ENOENT if the entry could not be found. |
| 1095 | */ |
| 1096 | static inline int rhashtable_remove_fast( |
| 1097 | struct rhashtable *ht, struct rhash_head *obj, |
| 1098 | const struct rhashtable_params params) |
| 1099 | { |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 1100 | return __rhashtable_remove_fast(ht, obj, params, false); |
| 1101 | } |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 1102 | |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 1103 | /** |
| 1104 | * rhltable_remove - remove object from hash list table |
| 1105 | * @hlt: hash list table |
| 1106 | * @list: pointer to hash list head inside object |
| 1107 | * @params: hash table parameters |
| 1108 | * |
| 1109 | * Since the hash chain is single linked, the removal operation needs to |
| 1110 | * walk the bucket chain upon removal. The removal operation is thus |
| 1111 | * considerable slow if the hash table is not correctly sized. |
| 1112 | * |
NeilBrown | 0c6f69a | 2018-04-24 08:29:13 +1000 | [diff] [blame] | 1113 | * Will automatically shrink the table if permitted when residency drops |
| 1114 | * below 30% |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 1115 | * |
| 1116 | * Returns zero on success, -ENOENT if the entry could not be found. |
| 1117 | */ |
| 1118 | static inline int rhltable_remove( |
| 1119 | struct rhltable *hlt, struct rhlist_head *list, |
| 1120 | const struct rhashtable_params params) |
| 1121 | { |
| 1122 | return __rhashtable_remove_fast(&hlt->ht, &list->rhead, params, true); |
Herbert Xu | 02fd97c | 2015-03-20 21:57:00 +1100 | [diff] [blame] | 1123 | } |
| 1124 | |
Tom Herbert | 3502cad | 2015-12-15 15:41:36 -0800 | [diff] [blame] | 1125 | /* Internal function, please use rhashtable_replace_fast() instead */ |
| 1126 | static inline int __rhashtable_replace_fast( |
| 1127 | struct rhashtable *ht, struct bucket_table *tbl, |
| 1128 | struct rhash_head *obj_old, struct rhash_head *obj_new, |
| 1129 | const struct rhashtable_params params) |
| 1130 | { |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 1131 | struct rhash_lock_head __rcu **bkt; |
Tom Herbert | 3502cad | 2015-12-15 15:41:36 -0800 | [diff] [blame] | 1132 | struct rhash_head __rcu **pprev; |
| 1133 | struct rhash_head *he; |
Tom Herbert | 3502cad | 2015-12-15 15:41:36 -0800 | [diff] [blame] | 1134 | unsigned int hash; |
| 1135 | int err = -ENOENT; |
| 1136 | |
| 1137 | /* Minimally, the old and new objects must have same hash |
| 1138 | * (which should mean identifiers are the same). |
| 1139 | */ |
| 1140 | hash = rht_head_hashfn(ht, tbl, obj_old, params); |
| 1141 | if (hash != rht_head_hashfn(ht, tbl, obj_new, params)) |
| 1142 | return -EINVAL; |
| 1143 | |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 1144 | bkt = rht_bucket_var(tbl, hash); |
| 1145 | if (!bkt) |
| 1146 | return -ENOENT; |
Tom Herbert | 3502cad | 2015-12-15 15:41:36 -0800 | [diff] [blame] | 1147 | |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 1148 | pprev = NULL; |
NeilBrown | 149212f | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 1149 | rht_lock(tbl, bkt); |
Tom Herbert | 3502cad | 2015-12-15 15:41:36 -0800 | [diff] [blame] | 1150 | |
NeilBrown | adc6a3a | 2019-04-12 11:52:08 +1000 | [diff] [blame] | 1151 | rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) { |
Tom Herbert | 3502cad | 2015-12-15 15:41:36 -0800 | [diff] [blame] | 1152 | if (he != obj_old) { |
| 1153 | pprev = &he->next; |
| 1154 | continue; |
| 1155 | } |
| 1156 | |
| 1157 | rcu_assign_pointer(obj_new->next, obj_old->next); |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 1158 | if (pprev) { |
| 1159 | rcu_assign_pointer(*pprev, obj_new); |
NeilBrown | 149212f | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 1160 | rht_unlock(tbl, bkt); |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 1161 | } else { |
NeilBrown | 149212f | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 1162 | rht_assign_unlock(tbl, bkt, obj_new); |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 1163 | } |
Tom Herbert | 3502cad | 2015-12-15 15:41:36 -0800 | [diff] [blame] | 1164 | err = 0; |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 1165 | goto unlocked; |
Tom Herbert | 3502cad | 2015-12-15 15:41:36 -0800 | [diff] [blame] | 1166 | } |
Tom Herbert | 3502cad | 2015-12-15 15:41:36 -0800 | [diff] [blame] | 1167 | |
NeilBrown | 149212f | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 1168 | rht_unlock(tbl, bkt); |
NeilBrown | 8f0db01 | 2019-04-02 10:07:45 +1100 | [diff] [blame] | 1169 | |
| 1170 | unlocked: |
Tom Herbert | 3502cad | 2015-12-15 15:41:36 -0800 | [diff] [blame] | 1171 | return err; |
| 1172 | } |
| 1173 | |
| 1174 | /** |
| 1175 | * rhashtable_replace_fast - replace an object in hash table |
| 1176 | * @ht: hash table |
| 1177 | * @obj_old: pointer to hash head inside object being replaced |
| 1178 | * @obj_new: pointer to hash head inside object which is new |
| 1179 | * @params: hash table parameters |
| 1180 | * |
| 1181 | * Replacing an object doesn't affect the number of elements in the hash table |
| 1182 | * or bucket, so we don't need to worry about shrinking or expanding the |
| 1183 | * table here. |
| 1184 | * |
| 1185 | * Returns zero on success, -ENOENT if the entry could not be found, |
| 1186 | * -EINVAL if hash is not the same for the old and new objects. |
| 1187 | */ |
| 1188 | static inline int rhashtable_replace_fast( |
| 1189 | struct rhashtable *ht, struct rhash_head *obj_old, |
| 1190 | struct rhash_head *obj_new, |
| 1191 | const struct rhashtable_params params) |
| 1192 | { |
| 1193 | struct bucket_table *tbl; |
| 1194 | int err; |
| 1195 | |
| 1196 | rcu_read_lock(); |
| 1197 | |
| 1198 | tbl = rht_dereference_rcu(ht->tbl, ht); |
| 1199 | |
| 1200 | /* Because we have already taken (and released) the bucket |
| 1201 | * lock in old_tbl, if we find that future_tbl is not yet |
| 1202 | * visible then that guarantees the entry to still be in |
| 1203 | * the old tbl if it exists. |
| 1204 | */ |
| 1205 | while ((err = __rhashtable_replace_fast(ht, tbl, obj_old, |
| 1206 | obj_new, params)) && |
| 1207 | (tbl = rht_dereference_rcu(tbl->future_tbl, ht))) |
| 1208 | ; |
| 1209 | |
| 1210 | rcu_read_unlock(); |
| 1211 | |
| 1212 | return err; |
| 1213 | } |
| 1214 | |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 1215 | /** |
| 1216 | * rhltable_walk_enter - Initialise an iterator |
| 1217 | * @hlt: Table to walk over |
| 1218 | * @iter: Hash table Iterator |
| 1219 | * |
| 1220 | * This function prepares a hash table walk. |
| 1221 | * |
| 1222 | * Note that if you restart a walk after rhashtable_walk_stop you |
| 1223 | * may see the same object twice. Also, you may miss objects if |
| 1224 | * there are removals in between rhashtable_walk_stop and the next |
| 1225 | * call to rhashtable_walk_start. |
| 1226 | * |
| 1227 | * For a completely stable walk you should construct your own data |
| 1228 | * structure outside the hash table. |
| 1229 | * |
NeilBrown | 82266e9 | 2018-04-24 08:29:13 +1000 | [diff] [blame] | 1230 | * This function may be called from any process context, including |
| 1231 | * non-preemptable context, but cannot be called from softirq or |
| 1232 | * hardirq context. |
Herbert Xu | ca26893 | 2016-09-19 19:00:09 +0800 | [diff] [blame] | 1233 | * |
| 1234 | * You must call rhashtable_walk_exit after this function returns. |
| 1235 | */ |
| 1236 | static inline void rhltable_walk_enter(struct rhltable *hlt, |
| 1237 | struct rhashtable_iter *iter) |
| 1238 | { |
| 1239 | return rhashtable_walk_enter(&hlt->ht, iter); |
| 1240 | } |
| 1241 | |
| 1242 | /** |
| 1243 | * rhltable_free_and_destroy - free elements and destroy hash list table |
| 1244 | * @hlt: the hash list table to destroy |
| 1245 | * @free_fn: callback to release resources of element |
| 1246 | * @arg: pointer passed to free_fn |
| 1247 | * |
| 1248 | * See documentation for rhashtable_free_and_destroy. |
| 1249 | */ |
| 1250 | static inline void rhltable_free_and_destroy(struct rhltable *hlt, |
| 1251 | void (*free_fn)(void *ptr, |
| 1252 | void *arg), |
| 1253 | void *arg) |
| 1254 | { |
| 1255 | return rhashtable_free_and_destroy(&hlt->ht, free_fn, arg); |
| 1256 | } |
| 1257 | |
| 1258 | static inline void rhltable_destroy(struct rhltable *hlt) |
| 1259 | { |
| 1260 | return rhltable_free_and_destroy(hlt, NULL, NULL); |
| 1261 | } |
| 1262 | |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 1263 | #endif /* _LINUX_RHASHTABLE_H */ |