blob: 7f740f675b96c3605c7a96dee7619461c87a73fd [file] [log] [blame]
Matthew Wilcoxf6bb2a22018-04-10 16:36:52 -07001/* SPDX-License-Identifier: GPL-2.0+ */
2#ifndef _LINUX_XARRAY_H
3#define _LINUX_XARRAY_H
4/*
5 * eXtensible Arrays
6 * Copyright (c) 2017 Microsoft Corporation
Matthew Wilcox3d0186b2018-06-16 17:32:07 -04007 * Author: Matthew Wilcox <willy@infradead.org>
Matthew Wilcox3159f942017-11-03 13:30:42 -04008 *
9 * See Documentation/core-api/xarray.rst for how to use the XArray.
Matthew Wilcoxf6bb2a22018-04-10 16:36:52 -070010 */
11
Matthew Wilcox3159f942017-11-03 13:30:42 -040012#include <linux/bug.h>
Matthew Wilcoxf8d5d0c2017-11-07 16:30:10 -050013#include <linux/compiler.h>
Matthew Wilcox9b89a032017-11-10 09:34:31 -050014#include <linux/gfp.h>
Matthew Wilcoxf8d5d0c2017-11-07 16:30:10 -050015#include <linux/kconfig.h>
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -050016#include <linux/kernel.h>
17#include <linux/rcupdate.h>
Matthew Wilcoxf6bb2a22018-04-10 16:36:52 -070018#include <linux/spinlock.h>
Matthew Wilcox3159f942017-11-03 13:30:42 -040019#include <linux/types.h>
20
21/*
22 * The bottom two bits of the entry determine how the XArray interprets
23 * the contents:
24 *
25 * 00: Pointer entry
26 * 10: Internal entry
27 * x1: Value entry or tagged pointer
28 *
29 * Attempting to store internal entries in the XArray is a bug.
Matthew Wilcox02c02bf2017-11-03 23:09:45 -040030 *
31 * Most internal entries are pointers to the next node in the tree.
32 * The following internal entries have a special meaning:
33 *
34 * 0-62: Sibling entries
35 * 256: Retry entry
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -050036 *
37 * Errors are also represented as internal entries, but use the negative
38 * space (-4094 to -2). They're never stored in the slots array; only
39 * returned by the normal API.
Matthew Wilcox3159f942017-11-03 13:30:42 -040040 */
41
42#define BITS_PER_XA_VALUE (BITS_PER_LONG - 1)
43
44/**
45 * xa_mk_value() - Create an XArray entry from an integer.
46 * @v: Value to store in XArray.
47 *
48 * Context: Any context.
49 * Return: An entry suitable for storing in the XArray.
50 */
51static inline void *xa_mk_value(unsigned long v)
52{
53 WARN_ON((long)v < 0);
54 return (void *)((v << 1) | 1);
55}
56
57/**
58 * xa_to_value() - Get value stored in an XArray entry.
59 * @entry: XArray entry.
60 *
61 * Context: Any context.
62 * Return: The value stored in the XArray entry.
63 */
64static inline unsigned long xa_to_value(const void *entry)
65{
66 return (unsigned long)entry >> 1;
67}
68
69/**
70 * xa_is_value() - Determine if an entry is a value.
71 * @entry: XArray entry.
72 *
73 * Context: Any context.
74 * Return: True if the entry is a value, false if it is a pointer.
75 */
76static inline bool xa_is_value(const void *entry)
77{
78 return (unsigned long)entry & 1;
79}
80
81/**
82 * xa_tag_pointer() - Create an XArray entry for a tagged pointer.
83 * @p: Plain pointer.
84 * @tag: Tag value (0, 1 or 3).
85 *
86 * If the user of the XArray prefers, they can tag their pointers instead
87 * of storing value entries. Three tags are available (0, 1 and 3).
88 * These are distinct from the xa_mark_t as they are not replicated up
89 * through the array and cannot be searched for.
90 *
91 * Context: Any context.
92 * Return: An XArray entry.
93 */
94static inline void *xa_tag_pointer(void *p, unsigned long tag)
95{
96 return (void *)((unsigned long)p | tag);
97}
98
99/**
100 * xa_untag_pointer() - Turn an XArray entry into a plain pointer.
101 * @entry: XArray entry.
102 *
103 * If you have stored a tagged pointer in the XArray, call this function
104 * to get the untagged version of the pointer.
105 *
106 * Context: Any context.
107 * Return: A pointer.
108 */
109static inline void *xa_untag_pointer(void *entry)
110{
111 return (void *)((unsigned long)entry & ~3UL);
112}
113
114/**
115 * xa_pointer_tag() - Get the tag stored in an XArray entry.
116 * @entry: XArray entry.
117 *
118 * If you have stored a tagged pointer in the XArray, call this function
119 * to get the tag of that pointer.
120 *
121 * Context: Any context.
122 * Return: A tag.
123 */
124static inline unsigned int xa_pointer_tag(void *entry)
125{
126 return (unsigned long)entry & 3UL;
127}
Matthew Wilcoxf6bb2a22018-04-10 16:36:52 -0700128
Matthew Wilcox02c02bf2017-11-03 23:09:45 -0400129/*
130 * xa_mk_internal() - Create an internal entry.
131 * @v: Value to turn into an internal entry.
132 *
133 * Context: Any context.
134 * Return: An XArray internal entry corresponding to this value.
135 */
136static inline void *xa_mk_internal(unsigned long v)
137{
138 return (void *)((v << 2) | 2);
139}
140
141/*
142 * xa_to_internal() - Extract the value from an internal entry.
143 * @entry: XArray entry.
144 *
145 * Context: Any context.
146 * Return: The value which was stored in the internal entry.
147 */
148static inline unsigned long xa_to_internal(const void *entry)
149{
150 return (unsigned long)entry >> 2;
151}
152
153/*
154 * xa_is_internal() - Is the entry an internal entry?
155 * @entry: XArray entry.
156 *
157 * Context: Any context.
158 * Return: %true if the entry is an internal entry.
159 */
160static inline bool xa_is_internal(const void *entry)
161{
162 return ((unsigned long)entry & 3) == 2;
163}
164
Matthew Wilcoxf8d5d0c2017-11-07 16:30:10 -0500165/**
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -0500166 * xa_is_err() - Report whether an XArray operation returned an error
167 * @entry: Result from calling an XArray function
168 *
169 * If an XArray operation cannot complete an operation, it will return
170 * a special value indicating an error. This function tells you
171 * whether an error occurred; xa_err() tells you which error occurred.
172 *
173 * Context: Any context.
174 * Return: %true if the entry indicates an error.
175 */
176static inline bool xa_is_err(const void *entry)
177{
178 return unlikely(xa_is_internal(entry));
179}
180
181/**
182 * xa_err() - Turn an XArray result into an errno.
183 * @entry: Result from calling an XArray function.
184 *
185 * If an XArray operation cannot complete an operation, it will return
186 * a special pointer value which encodes an errno. This function extracts
187 * the errno from the pointer value, or returns 0 if the pointer does not
188 * represent an errno.
189 *
190 * Context: Any context.
191 * Return: A negative errno or 0.
192 */
193static inline int xa_err(void *entry)
194{
195 /* xa_to_internal() would not do sign extension. */
196 if (xa_is_err(entry))
197 return (long)entry >> 2;
198 return 0;
199}
200
Matthew Wilcox9b89a032017-11-10 09:34:31 -0500201typedef unsigned __bitwise xa_mark_t;
202#define XA_MARK_0 ((__force xa_mark_t)0U)
203#define XA_MARK_1 ((__force xa_mark_t)1U)
204#define XA_MARK_2 ((__force xa_mark_t)2U)
205#define XA_PRESENT ((__force xa_mark_t)8U)
206#define XA_MARK_MAX XA_MARK_2
207
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500208enum xa_lock_type {
209 XA_LOCK_IRQ = 1,
210 XA_LOCK_BH = 2,
211};
212
Matthew Wilcox9b89a032017-11-10 09:34:31 -0500213/*
214 * Values for xa_flags. The radix tree stores its GFP flags in the xa_flags,
215 * and we remain compatible with that.
216 */
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500217#define XA_FLAGS_LOCK_IRQ ((__force gfp_t)XA_LOCK_IRQ)
218#define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH)
Matthew Wilcox9b89a032017-11-10 09:34:31 -0500219#define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \
220 (__force unsigned)(mark)))
221
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -0500222/**
Matthew Wilcoxf8d5d0c2017-11-07 16:30:10 -0500223 * struct xarray - The anchor of the XArray.
224 * @xa_lock: Lock that protects the contents of the XArray.
225 *
226 * To use the xarray, define it statically or embed it in your data structure.
227 * It is a very small data structure, so it does not usually make sense to
228 * allocate it separately and keep a pointer to it in your data structure.
229 *
230 * You may use the xa_lock to protect your own data structures as well.
231 */
232/*
233 * If all of the entries in the array are NULL, @xa_head is a NULL pointer.
234 * If the only non-NULL entry in the array is at index 0, @xa_head is that
235 * entry. If any other entry in the array is non-NULL, @xa_head points
236 * to an @xa_node.
237 */
238struct xarray {
239 spinlock_t xa_lock;
240/* private: The rest of the data structure is not to be used directly. */
241 gfp_t xa_flags;
242 void __rcu * xa_head;
243};
244
245#define XARRAY_INIT(name, flags) { \
246 .xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock), \
247 .xa_flags = flags, \
248 .xa_head = NULL, \
249}
250
251/**
252 * DEFINE_XARRAY_FLAGS() - Define an XArray with custom flags.
253 * @name: A string that names your XArray.
254 * @flags: XA_FLAG values.
255 *
256 * This is intended for file scope definitions of XArrays. It declares
257 * and initialises an empty XArray with the chosen name and flags. It is
258 * equivalent to calling xa_init_flags() on the array, but it does the
259 * initialisation at compiletime instead of runtime.
260 */
261#define DEFINE_XARRAY_FLAGS(name, flags) \
262 struct xarray name = XARRAY_INIT(name, flags)
263
264/**
265 * DEFINE_XARRAY() - Define an XArray.
266 * @name: A string that names your XArray.
267 *
268 * This is intended for file scope definitions of XArrays. It declares
269 * and initialises an empty XArray with the chosen name. It is equivalent
270 * to calling xa_init() on the array, but it does the initialisation at
271 * compiletime instead of runtime.
272 */
273#define DEFINE_XARRAY(name) DEFINE_XARRAY_FLAGS(name, 0)
274
275void xa_init_flags(struct xarray *, gfp_t flags);
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -0500276void *xa_load(struct xarray *, unsigned long index);
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500277void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
Matthew Wilcox41aec912017-11-10 15:34:55 -0500278void *xa_cmpxchg(struct xarray *, unsigned long index,
279 void *old, void *entry, gfp_t);
Matthew Wilcox9b89a032017-11-10 09:34:31 -0500280bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t);
281void xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
282void xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
Matthew Wilcoxf8d5d0c2017-11-07 16:30:10 -0500283
284/**
285 * xa_init() - Initialise an empty XArray.
286 * @xa: XArray.
287 *
288 * An empty XArray is full of NULL entries.
289 *
290 * Context: Any context.
291 */
292static inline void xa_init(struct xarray *xa)
293{
294 xa_init_flags(xa, 0);
295}
296
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -0500297/**
298 * xa_empty() - Determine if an array has any present entries.
299 * @xa: XArray.
300 *
301 * Context: Any context.
302 * Return: %true if the array contains only NULL pointers.
303 */
304static inline bool xa_empty(const struct xarray *xa)
305{
306 return xa->xa_head == NULL;
307}
308
Matthew Wilcox9b89a032017-11-10 09:34:31 -0500309/**
310 * xa_marked() - Inquire whether any entry in this array has a mark set
311 * @xa: Array
312 * @mark: Mark value
313 *
314 * Context: Any context.
315 * Return: %true if any entry has this mark set.
316 */
317static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
318{
319 return xa->xa_flags & XA_FLAGS_MARK(mark);
320}
321
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500322/**
323 * xa_erase() - Erase this entry from the XArray.
324 * @xa: XArray.
325 * @index: Index of entry.
326 *
327 * This function is the equivalent of calling xa_store() with %NULL as
328 * the third argument. The XArray does not need to allocate memory, so
329 * the user does not need to provide GFP flags.
330 *
331 * Context: Process context. Takes and releases the xa_lock.
332 * Return: The entry which used to be at this index.
333 */
334static inline void *xa_erase(struct xarray *xa, unsigned long index)
335{
336 return xa_store(xa, index, NULL, 0);
337}
338
Matthew Wilcox41aec912017-11-10 15:34:55 -0500339/**
340 * xa_insert() - Store this entry in the XArray unless another entry is
341 * already present.
342 * @xa: XArray.
343 * @index: Index into array.
344 * @entry: New entry.
345 * @gfp: Memory allocation flags.
346 *
347 * If you would rather see the existing entry in the array, use xa_cmpxchg().
348 * This function is for users who don't care what the entry is, only that
349 * one is present.
350 *
351 * Context: Process context. Takes and releases the xa_lock.
352 * May sleep if the @gfp flags permit.
353 * Return: 0 if the store succeeded. -EEXIST if another entry was present.
354 * -ENOMEM if memory could not be allocated.
355 */
356static inline int xa_insert(struct xarray *xa, unsigned long index,
357 void *entry, gfp_t gfp)
358{
359 void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp);
360 if (!curr)
361 return 0;
362 if (xa_is_err(curr))
363 return xa_err(curr);
364 return -EEXIST;
365}
366
Matthew Wilcoxf6bb2a22018-04-10 16:36:52 -0700367#define xa_trylock(xa) spin_trylock(&(xa)->xa_lock)
368#define xa_lock(xa) spin_lock(&(xa)->xa_lock)
369#define xa_unlock(xa) spin_unlock(&(xa)->xa_lock)
370#define xa_lock_bh(xa) spin_lock_bh(&(xa)->xa_lock)
371#define xa_unlock_bh(xa) spin_unlock_bh(&(xa)->xa_lock)
372#define xa_lock_irq(xa) spin_lock_irq(&(xa)->xa_lock)
373#define xa_unlock_irq(xa) spin_unlock_irq(&(xa)->xa_lock)
374#define xa_lock_irqsave(xa, flags) \
375 spin_lock_irqsave(&(xa)->xa_lock, flags)
376#define xa_unlock_irqrestore(xa, flags) \
377 spin_unlock_irqrestore(&(xa)->xa_lock, flags)
378
Matthew Wilcox9b89a032017-11-10 09:34:31 -0500379/*
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500380 * Versions of the normal API which require the caller to hold the
381 * xa_lock. If the GFP flags allow it, they will drop the lock to
382 * allocate memory, then reacquire it afterwards. These functions
383 * may also re-enable interrupts if the XArray flags indicate the
384 * locking should be interrupt safe.
Matthew Wilcox9b89a032017-11-10 09:34:31 -0500385 */
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500386void *__xa_erase(struct xarray *, unsigned long index);
387void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
Matthew Wilcox41aec912017-11-10 15:34:55 -0500388void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
389 void *entry, gfp_t);
Matthew Wilcox9b89a032017-11-10 09:34:31 -0500390void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
391void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
392
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500393/**
Matthew Wilcox41aec912017-11-10 15:34:55 -0500394 * __xa_insert() - Store this entry in the XArray unless another entry is
395 * already present.
396 * @xa: XArray.
397 * @index: Index into array.
398 * @entry: New entry.
399 * @gfp: Memory allocation flags.
400 *
401 * If you would rather see the existing entry in the array, use __xa_cmpxchg().
402 * This function is for users who don't care what the entry is, only that
403 * one is present.
404 *
405 * Context: Any context. Expects xa_lock to be held on entry. May
406 * release and reacquire xa_lock if the @gfp flags permit.
407 * Return: 0 if the store succeeded. -EEXIST if another entry was present.
408 * -ENOMEM if memory could not be allocated.
409 */
410static inline int __xa_insert(struct xarray *xa, unsigned long index,
411 void *entry, gfp_t gfp)
412{
413 void *curr = __xa_cmpxchg(xa, index, NULL, entry, gfp);
414 if (!curr)
415 return 0;
416 if (xa_is_err(curr))
417 return xa_err(curr);
418 return -EEXIST;
419}
420
421/**
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500422 * xa_erase_bh() - Erase this entry from the XArray.
423 * @xa: XArray.
424 * @index: Index of entry.
425 *
426 * This function is the equivalent of calling xa_store() with %NULL as
427 * the third argument. The XArray does not need to allocate memory, so
428 * the user does not need to provide GFP flags.
429 *
430 * Context: Process context. Takes and releases the xa_lock while
431 * disabling softirqs.
432 * Return: The entry which used to be at this index.
433 */
434static inline void *xa_erase_bh(struct xarray *xa, unsigned long index)
435{
436 void *entry;
437
438 xa_lock_bh(xa);
439 entry = __xa_erase(xa, index);
440 xa_unlock_bh(xa);
441
442 return entry;
443}
444
445/**
446 * xa_erase_irq() - Erase this entry from the XArray.
447 * @xa: XArray.
448 * @index: Index of entry.
449 *
450 * This function is the equivalent of calling xa_store() with %NULL as
451 * the third argument. The XArray does not need to allocate memory, so
452 * the user does not need to provide GFP flags.
453 *
454 * Context: Process context. Takes and releases the xa_lock while
455 * disabling interrupts.
456 * Return: The entry which used to be at this index.
457 */
458static inline void *xa_erase_irq(struct xarray *xa, unsigned long index)
459{
460 void *entry;
461
462 xa_lock_irq(xa);
463 entry = __xa_erase(xa, index);
464 xa_unlock_irq(xa);
465
466 return entry;
467}
468
Matthew Wilcox02c02bf2017-11-03 23:09:45 -0400469/* Everything below here is the Advanced API. Proceed with caution. */
470
471/*
472 * The xarray is constructed out of a set of 'chunks' of pointers. Choosing
473 * the best chunk size requires some tradeoffs. A power of two recommends
474 * itself so that we can walk the tree based purely on shifts and masks.
475 * Generally, the larger the better; as the number of slots per level of the
476 * tree increases, the less tall the tree needs to be. But that needs to be
477 * balanced against the memory consumption of each node. On a 64-bit system,
478 * xa_node is currently 576 bytes, and we get 7 of them per 4kB page. If we
479 * doubled the number of slots per node, we'd get only 3 nodes per 4kB page.
480 */
481#ifndef XA_CHUNK_SHIFT
482#define XA_CHUNK_SHIFT (CONFIG_BASE_SMALL ? 4 : 6)
483#endif
484#define XA_CHUNK_SIZE (1UL << XA_CHUNK_SHIFT)
485#define XA_CHUNK_MASK (XA_CHUNK_SIZE - 1)
Matthew Wilcox01959df2017-11-09 09:23:56 -0500486#define XA_MAX_MARKS 3
487#define XA_MARK_LONGS DIV_ROUND_UP(XA_CHUNK_SIZE, BITS_PER_LONG)
488
489/*
490 * @count is the count of every non-NULL element in the ->slots array
491 * whether that is a value entry, a retry entry, a user pointer,
492 * a sibling entry or a pointer to the next level of the tree.
493 * @nr_values is the count of every element in ->slots which is
494 * either a value entry or a sibling of a value entry.
495 */
496struct xa_node {
497 unsigned char shift; /* Bits remaining in each slot */
498 unsigned char offset; /* Slot offset in parent */
499 unsigned char count; /* Total entry count */
500 unsigned char nr_values; /* Value entry count */
501 struct xa_node __rcu *parent; /* NULL at top of tree */
502 struct xarray *array; /* The array we belong to */
503 union {
504 struct list_head private_list; /* For tree user */
505 struct rcu_head rcu_head; /* Used when freeing node */
506 };
507 void __rcu *slots[XA_CHUNK_SIZE];
508 union {
509 unsigned long tags[XA_MAX_MARKS][XA_MARK_LONGS];
510 unsigned long marks[XA_MAX_MARKS][XA_MARK_LONGS];
511 };
512};
Matthew Wilcox02c02bf2017-11-03 23:09:45 -0400513
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -0500514void xa_dump(const struct xarray *);
515void xa_dump_node(const struct xa_node *);
516
517#ifdef XA_DEBUG
518#define XA_BUG_ON(xa, x) do { \
519 if (x) { \
520 xa_dump(xa); \
521 BUG(); \
522 } \
523 } while (0)
524#define XA_NODE_BUG_ON(node, x) do { \
525 if (x) { \
526 if (node) xa_dump_node(node); \
527 BUG(); \
528 } \
529 } while (0)
530#else
531#define XA_BUG_ON(xa, x) do { } while (0)
532#define XA_NODE_BUG_ON(node, x) do { } while (0)
533#endif
534
535/* Private */
536static inline void *xa_head(const struct xarray *xa)
537{
538 return rcu_dereference_check(xa->xa_head,
539 lockdep_is_held(&xa->xa_lock));
540}
541
542/* Private */
543static inline void *xa_head_locked(const struct xarray *xa)
544{
545 return rcu_dereference_protected(xa->xa_head,
546 lockdep_is_held(&xa->xa_lock));
547}
548
549/* Private */
550static inline void *xa_entry(const struct xarray *xa,
551 const struct xa_node *node, unsigned int offset)
552{
553 XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE);
554 return rcu_dereference_check(node->slots[offset],
555 lockdep_is_held(&xa->xa_lock));
556}
557
558/* Private */
559static inline void *xa_entry_locked(const struct xarray *xa,
560 const struct xa_node *node, unsigned int offset)
561{
562 XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE);
563 return rcu_dereference_protected(node->slots[offset],
564 lockdep_is_held(&xa->xa_lock));
565}
566
567/* Private */
Matthew Wilcox9b89a032017-11-10 09:34:31 -0500568static inline struct xa_node *xa_parent(const struct xarray *xa,
569 const struct xa_node *node)
570{
571 return rcu_dereference_check(node->parent,
572 lockdep_is_held(&xa->xa_lock));
573}
574
575/* Private */
576static inline struct xa_node *xa_parent_locked(const struct xarray *xa,
577 const struct xa_node *node)
578{
579 return rcu_dereference_protected(node->parent,
580 lockdep_is_held(&xa->xa_lock));
581}
582
583/* Private */
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500584static inline void *xa_mk_node(const struct xa_node *node)
585{
586 return (void *)((unsigned long)node | 2);
587}
588
589/* Private */
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -0500590static inline struct xa_node *xa_to_node(const void *entry)
591{
592 return (struct xa_node *)((unsigned long)entry - 2);
593}
594
Matthew Wilcox02c02bf2017-11-03 23:09:45 -0400595/* Private */
596static inline bool xa_is_node(const void *entry)
597{
598 return xa_is_internal(entry) && (unsigned long)entry > 4096;
599}
600
601/* Private */
602static inline void *xa_mk_sibling(unsigned int offset)
603{
604 return xa_mk_internal(offset);
605}
606
607/* Private */
608static inline unsigned long xa_to_sibling(const void *entry)
609{
610 return xa_to_internal(entry);
611}
612
613/**
614 * xa_is_sibling() - Is the entry a sibling entry?
615 * @entry: Entry retrieved from the XArray
616 *
617 * Return: %true if the entry is a sibling entry.
618 */
619static inline bool xa_is_sibling(const void *entry)
620{
621 return IS_ENABLED(CONFIG_XARRAY_MULTI) && xa_is_internal(entry) &&
622 (entry < xa_mk_sibling(XA_CHUNK_SIZE - 1));
623}
624
625#define XA_RETRY_ENTRY xa_mk_internal(256)
626
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -0500627/**
628 * xa_is_retry() - Is the entry a retry entry?
629 * @entry: Entry retrieved from the XArray
630 *
631 * Return: %true if the entry is a retry entry.
632 */
633static inline bool xa_is_retry(const void *entry)
634{
635 return unlikely(entry == XA_RETRY_ENTRY);
636}
637
638/**
639 * typedef xa_update_node_t - A callback function from the XArray.
640 * @node: The node which is being processed
641 *
642 * This function is called every time the XArray updates the count of
643 * present and value entries in a node. It allows advanced users to
644 * maintain the private_list in the node.
645 *
646 * Context: The xa_lock is held and interrupts may be disabled.
647 * Implementations should not drop the xa_lock, nor re-enable
648 * interrupts.
649 */
650typedef void (*xa_update_node_t)(struct xa_node *node);
651
652/*
653 * The xa_state is opaque to its users. It contains various different pieces
654 * of state involved in the current operation on the XArray. It should be
655 * declared on the stack and passed between the various internal routines.
656 * The various elements in it should not be accessed directly, but only
657 * through the provided accessor functions. The below documentation is for
658 * the benefit of those working on the code, not for users of the XArray.
659 *
660 * @xa_node usually points to the xa_node containing the slot we're operating
661 * on (and @xa_offset is the offset in the slots array). If there is a
662 * single entry in the array at index 0, there are no allocated xa_nodes to
663 * point to, and so we store %NULL in @xa_node. @xa_node is set to
664 * the value %XAS_RESTART if the xa_state is not walked to the correct
665 * position in the tree of nodes for this operation. If an error occurs
666 * during an operation, it is set to an %XAS_ERROR value. If we run off the
667 * end of the allocated nodes, it is set to %XAS_BOUNDS.
668 */
669struct xa_state {
670 struct xarray *xa;
671 unsigned long xa_index;
672 unsigned char xa_shift;
673 unsigned char xa_sibs;
674 unsigned char xa_offset;
675 unsigned char xa_pad; /* Helps gcc generate better code */
676 struct xa_node *xa_node;
677 struct xa_node *xa_alloc;
678 xa_update_node_t xa_update;
679};
680
681/*
682 * We encode errnos in the xas->xa_node. If an error has happened, we need to
683 * drop the lock to fix it, and once we've done so the xa_state is invalid.
684 */
685#define XA_ERROR(errno) ((struct xa_node *)(((unsigned long)errno << 2) | 2UL))
686#define XAS_BOUNDS ((struct xa_node *)1UL)
687#define XAS_RESTART ((struct xa_node *)3UL)
688
689#define __XA_STATE(array, index, shift, sibs) { \
690 .xa = array, \
691 .xa_index = index, \
692 .xa_shift = shift, \
693 .xa_sibs = sibs, \
694 .xa_offset = 0, \
695 .xa_pad = 0, \
696 .xa_node = XAS_RESTART, \
697 .xa_alloc = NULL, \
698 .xa_update = NULL \
699}
700
701/**
702 * XA_STATE() - Declare an XArray operation state.
703 * @name: Name of this operation state (usually xas).
704 * @array: Array to operate on.
705 * @index: Initial index of interest.
706 *
707 * Declare and initialise an xa_state on the stack.
708 */
709#define XA_STATE(name, array, index) \
710 struct xa_state name = __XA_STATE(array, index, 0, 0)
711
712/**
713 * XA_STATE_ORDER() - Declare an XArray operation state.
714 * @name: Name of this operation state (usually xas).
715 * @array: Array to operate on.
716 * @index: Initial index of interest.
717 * @order: Order of entry.
718 *
719 * Declare and initialise an xa_state on the stack. This variant of
720 * XA_STATE() allows you to specify the 'order' of the element you
721 * want to operate on.`
722 */
723#define XA_STATE_ORDER(name, array, index, order) \
724 struct xa_state name = __XA_STATE(array, \
725 (index >> order) << order, \
726 order - (order % XA_CHUNK_SHIFT), \
727 (1U << (order % XA_CHUNK_SHIFT)) - 1)
728
729#define xas_marked(xas, mark) xa_marked((xas)->xa, (mark))
730#define xas_trylock(xas) xa_trylock((xas)->xa)
731#define xas_lock(xas) xa_lock((xas)->xa)
732#define xas_unlock(xas) xa_unlock((xas)->xa)
733#define xas_lock_bh(xas) xa_lock_bh((xas)->xa)
734#define xas_unlock_bh(xas) xa_unlock_bh((xas)->xa)
735#define xas_lock_irq(xas) xa_lock_irq((xas)->xa)
736#define xas_unlock_irq(xas) xa_unlock_irq((xas)->xa)
737#define xas_lock_irqsave(xas, flags) \
738 xa_lock_irqsave((xas)->xa, flags)
739#define xas_unlock_irqrestore(xas, flags) \
740 xa_unlock_irqrestore((xas)->xa, flags)
741
742/**
743 * xas_error() - Return an errno stored in the xa_state.
744 * @xas: XArray operation state.
745 *
746 * Return: 0 if no error has been noted. A negative errno if one has.
747 */
748static inline int xas_error(const struct xa_state *xas)
749{
750 return xa_err(xas->xa_node);
751}
752
753/**
754 * xas_set_err() - Note an error in the xa_state.
755 * @xas: XArray operation state.
756 * @err: Negative error number.
757 *
758 * Only call this function with a negative @err; zero or positive errors
759 * will probably not behave the way you think they should. If you want
760 * to clear the error from an xa_state, use xas_reset().
761 */
762static inline void xas_set_err(struct xa_state *xas, long err)
763{
764 xas->xa_node = XA_ERROR(err);
765}
766
767/**
768 * xas_invalid() - Is the xas in a retry or error state?
769 * @xas: XArray operation state.
770 *
771 * Return: %true if the xas cannot be used for operations.
772 */
773static inline bool xas_invalid(const struct xa_state *xas)
774{
775 return (unsigned long)xas->xa_node & 3;
776}
777
778/**
779 * xas_valid() - Is the xas a valid cursor into the array?
780 * @xas: XArray operation state.
781 *
782 * Return: %true if the xas can be used for operations.
783 */
784static inline bool xas_valid(const struct xa_state *xas)
785{
786 return !xas_invalid(xas);
787}
788
Matthew Wilcox9b89a032017-11-10 09:34:31 -0500789/* True if the pointer is something other than a node */
790static inline bool xas_not_node(struct xa_node *node)
791{
792 return ((unsigned long)node & 3) || !node;
793}
794
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500795/* True if the node represents head-of-tree, RESTART or BOUNDS */
796static inline bool xas_top(struct xa_node *node)
797{
798 return node <= XAS_RESTART;
799}
800
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -0500801/**
802 * xas_reset() - Reset an XArray operation state.
803 * @xas: XArray operation state.
804 *
805 * Resets the error or walk state of the @xas so future walks of the
806 * array will start from the root. Use this if you have dropped the
807 * xarray lock and want to reuse the xa_state.
808 *
809 * Context: Any context.
810 */
811static inline void xas_reset(struct xa_state *xas)
812{
813 xas->xa_node = XAS_RESTART;
814}
815
816/**
817 * xas_retry() - Retry the operation if appropriate.
818 * @xas: XArray operation state.
819 * @entry: Entry from xarray.
820 *
821 * The advanced functions may sometimes return an internal entry, such as
822 * a retry entry or a zero entry. This function sets up the @xas to restart
823 * the walk from the head of the array if needed.
824 *
825 * Context: Any context.
826 * Return: true if the operation needs to be retried.
827 */
828static inline bool xas_retry(struct xa_state *xas, const void *entry)
829{
830 if (!xa_is_retry(entry))
831 return false;
832 xas_reset(xas);
833 return true;
834}
835
836void *xas_load(struct xa_state *);
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500837void *xas_store(struct xa_state *, void *entry);
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -0500838
Matthew Wilcox9b89a032017-11-10 09:34:31 -0500839bool xas_get_mark(const struct xa_state *, xa_mark_t);
840void xas_set_mark(const struct xa_state *, xa_mark_t);
841void xas_clear_mark(const struct xa_state *, xa_mark_t);
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500842void xas_init_marks(const struct xa_state *);
843
844bool xas_nomem(struct xa_state *, gfp_t);
Matthew Wilcox9b89a032017-11-10 09:34:31 -0500845
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -0500846/**
847 * xas_reload() - Refetch an entry from the xarray.
848 * @xas: XArray operation state.
849 *
850 * Use this function to check that a previously loaded entry still has
851 * the same value. This is useful for the lockless pagecache lookup where
852 * we walk the array with only the RCU lock to protect us, lock the page,
853 * then check that the page hasn't moved since we looked it up.
854 *
855 * The caller guarantees that @xas is still valid. If it may be in an
856 * error or restart state, call xas_load() instead.
857 *
858 * Return: The entry at this location in the xarray.
859 */
860static inline void *xas_reload(struct xa_state *xas)
861{
862 struct xa_node *node = xas->xa_node;
863
864 if (node)
865 return xa_entry(xas->xa, node, xas->xa_offset);
866 return xa_head(xas->xa);
867}
868
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500869/**
870 * xas_set() - Set up XArray operation state for a different index.
871 * @xas: XArray operation state.
872 * @index: New index into the XArray.
873 *
874 * Move the operation state to refer to a different index. This will
875 * have the effect of starting a walk from the top; see xas_next()
876 * to move to an adjacent index.
877 */
878static inline void xas_set(struct xa_state *xas, unsigned long index)
879{
880 xas->xa_index = index;
881 xas->xa_node = XAS_RESTART;
882}
883
884/**
885 * xas_set_order() - Set up XArray operation state for a multislot entry.
886 * @xas: XArray operation state.
887 * @index: Target of the operation.
888 * @order: Entry occupies 2^@order indices.
889 */
890static inline void xas_set_order(struct xa_state *xas, unsigned long index,
891 unsigned int order)
892{
893#ifdef CONFIG_XARRAY_MULTI
894 xas->xa_index = order < BITS_PER_LONG ? (index >> order) << order : 0;
895 xas->xa_shift = order - (order % XA_CHUNK_SHIFT);
896 xas->xa_sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
897 xas->xa_node = XAS_RESTART;
898#else
899 BUG_ON(order > 0);
900 xas_set(xas, index);
901#endif
902}
903
904/**
905 * xas_set_update() - Set up XArray operation state for a callback.
906 * @xas: XArray operation state.
907 * @update: Function to call when updating a node.
908 *
909 * The XArray can notify a caller after it has updated an xa_node.
910 * This is advanced functionality and is only needed by the page cache.
911 */
912static inline void xas_set_update(struct xa_state *xas, xa_update_node_t update)
913{
914 xas->xa_update = update;
915}
916
Matthew Wilcoxf6bb2a22018-04-10 16:36:52 -0700917#endif /* _LINUX_XARRAY_H */