Thomas Gleixner | 1a59d1b8 | 2019-05-27 08:55:05 +0200 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | Red Black Trees |
| 4 | (C) 1999 Andrea Arcangeli <andrea@suse.de> |
| 5 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | |
| 7 | linux/include/linux/rbtree.h |
| 8 | |
| 9 | To use rbtrees you'll have to implement your own insert and search cores. |
| 10 | This will avoid us to use callbacks and to drop drammatically performances. |
| 11 | I know it's not the cleaner way, but in C (not in C++) to get |
| 12 | performances and genericity... |
| 13 | |
Michel Lespinasse | 1457d28 | 2012-10-08 16:30:28 -0700 | [diff] [blame] | 14 | See Documentation/rbtree.txt for documentation and samples. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | */ |
| 16 | |
| 17 | #ifndef _LINUX_RBTREE_H |
| 18 | #define _LINUX_RBTREE_H |
| 19 | |
| 20 | #include <linux/kernel.h> |
| 21 | #include <linux/stddef.h> |
Peter Zijlstra | d72da4a | 2015-05-27 11:09:36 +0930 | [diff] [blame] | 22 | #include <linux/rcupdate.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | |
Michel Lespinasse | bf7ad8e | 2012-10-08 16:30:37 -0700 | [diff] [blame] | 24 | struct rb_node { |
| 25 | unsigned long __rb_parent_color; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | struct rb_node *rb_right; |
| 27 | struct rb_node *rb_left; |
David Woodhouse | e977145 | 2006-04-21 23:15:39 +0100 | [diff] [blame] | 28 | } __attribute__((aligned(sizeof(long)))); |
| 29 | /* The alignment might seem pointless, but allegedly CRIS needs it */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | |
Michel Lespinasse | bf7ad8e | 2012-10-08 16:30:37 -0700 | [diff] [blame] | 31 | struct rb_root { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | struct rb_node *rb_node; |
| 33 | }; |
| 34 | |
Davidlohr Bueso | cd9e61e | 2017-09-08 16:14:36 -0700 | [diff] [blame] | 35 | /* |
| 36 | * Leftmost-cached rbtrees. |
| 37 | * |
| 38 | * We do not cache the rightmost node based on footprint |
| 39 | * size vs number of potential users that could benefit |
| 40 | * from O(1) rb_last(). Just not worth it, users that want |
| 41 | * this feature can always implement the logic explicitly. |
| 42 | * Furthermore, users that want to cache both pointers may |
| 43 | * find it a bit asymmetric, but that's ok. |
| 44 | */ |
| 45 | struct rb_root_cached { |
| 46 | struct rb_root rb_root; |
| 47 | struct rb_node *rb_leftmost; |
| 48 | }; |
David Woodhouse | 55a9810 | 2006-04-21 13:35:51 +0100 | [diff] [blame] | 49 | |
Michel Lespinasse | bf7ad8e | 2012-10-08 16:30:37 -0700 | [diff] [blame] | 50 | #define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3)) |
David Woodhouse | 55a9810 | 2006-04-21 13:35:51 +0100 | [diff] [blame] | 51 | |
Peter Zijlstra | b945d6b | 2010-05-29 15:31:43 +0200 | [diff] [blame] | 52 | #define RB_ROOT (struct rb_root) { NULL, } |
Davidlohr Bueso | cd9e61e | 2017-09-08 16:14:36 -0700 | [diff] [blame] | 53 | #define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | #define rb_entry(ptr, type, member) container_of(ptr, type, member) |
| 55 | |
Davidlohr Bueso | a460bec | 2016-01-20 15:00:42 -0800 | [diff] [blame] | 56 | #define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL) |
Jens Axboe | dd67d05 | 2006-06-21 09:36:18 +0200 | [diff] [blame] | 57 | |
John de la Garza | 7647f14 | 2015-02-17 13:46:04 -0800 | [diff] [blame] | 58 | /* 'empty' nodes are nodes that are known not to be inserted in an rbtree */ |
Michel Lespinasse | bf7ad8e | 2012-10-08 16:30:37 -0700 | [diff] [blame] | 59 | #define RB_EMPTY_NODE(node) \ |
| 60 | ((node)->__rb_parent_color == (unsigned long)(node)) |
| 61 | #define RB_CLEAR_NODE(node) \ |
| 62 | ((node)->__rb_parent_color = (unsigned long)(node)) |
Michel Lespinasse | 4c199a9 | 2012-10-08 16:30:32 -0700 | [diff] [blame] | 63 | |
John Stultz | 88d19cf | 2011-01-03 18:59:43 -0800 | [diff] [blame] | 64 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | extern void rb_insert_color(struct rb_node *, struct rb_root *); |
| 66 | extern void rb_erase(struct rb_node *, struct rb_root *); |
| 67 | |
Michel Lespinasse | 14b94af | 2012-10-08 16:31:17 -0700 | [diff] [blame] | 68 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | /* Find logical next and previous nodes in a tree */ |
Artem Bityutskiy | f4b477c | 2009-01-10 11:12:09 +0000 | [diff] [blame] | 70 | extern struct rb_node *rb_next(const struct rb_node *); |
| 71 | extern struct rb_node *rb_prev(const struct rb_node *); |
| 72 | extern struct rb_node *rb_first(const struct rb_root *); |
| 73 | extern struct rb_node *rb_last(const struct rb_root *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 | |
Davidlohr Bueso | cd9e61e | 2017-09-08 16:14:36 -0700 | [diff] [blame] | 75 | extern void rb_insert_color_cached(struct rb_node *, |
| 76 | struct rb_root_cached *, bool); |
| 77 | extern void rb_erase_cached(struct rb_node *node, struct rb_root_cached *); |
| 78 | /* Same as rb_first(), but O(1) */ |
| 79 | #define rb_first_cached(root) (root)->rb_leftmost |
| 80 | |
Cody P Schafer | 9dee5c5 | 2013-09-11 14:25:10 -0700 | [diff] [blame] | 81 | /* Postorder iteration - always visit the parent after its children */ |
| 82 | extern struct rb_node *rb_first_postorder(const struct rb_root *); |
| 83 | extern struct rb_node *rb_next_postorder(const struct rb_node *); |
| 84 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | /* Fast replacement of a single node without remove/rebalance/add/rebalance */ |
Peter Zijlstra | d72da4a | 2015-05-27 11:09:36 +0930 | [diff] [blame] | 86 | extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | struct rb_root *root); |
David Howells | c1adf20 | 2016-07-01 07:53:51 +0100 | [diff] [blame] | 88 | extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, |
| 89 | struct rb_root *root); |
Chris Wilson | 338f1d9 | 2017-12-14 15:32:28 -0800 | [diff] [blame] | 90 | extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new, |
| 91 | struct rb_root_cached *root); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | |
Peter Zijlstra | d72da4a | 2015-05-27 11:09:36 +0930 | [diff] [blame] | 93 | static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, |
| 94 | struct rb_node **rb_link) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | { |
Michel Lespinasse | bf7ad8e | 2012-10-08 16:30:37 -0700 | [diff] [blame] | 96 | node->__rb_parent_color = (unsigned long)parent; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | node->rb_left = node->rb_right = NULL; |
| 98 | |
| 99 | *rb_link = node; |
| 100 | } |
| 101 | |
Peter Zijlstra | d72da4a | 2015-05-27 11:09:36 +0930 | [diff] [blame] | 102 | static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent, |
| 103 | struct rb_node **rb_link) |
| 104 | { |
| 105 | node->__rb_parent_color = (unsigned long)parent; |
| 106 | node->rb_left = node->rb_right = NULL; |
| 107 | |
| 108 | rcu_assign_pointer(*rb_link, node); |
| 109 | } |
| 110 | |
Jan Kara | 1310a5a | 2013-11-12 15:11:19 -0800 | [diff] [blame] | 111 | #define rb_entry_safe(ptr, type, member) \ |
| 112 | ({ typeof(ptr) ____ptr = (ptr); \ |
| 113 | ____ptr ? rb_entry(____ptr, type, member) : NULL; \ |
| 114 | }) |
| 115 | |
Cody P Schafer | 2b529089 | 2013-09-11 14:25:11 -0700 | [diff] [blame] | 116 | /** |
Cody P Schafer | 8de1ee7 | 2015-11-06 16:31:28 -0800 | [diff] [blame] | 117 | * rbtree_postorder_for_each_entry_safe - iterate in post-order over rb_root of |
| 118 | * given type allowing the backing memory of @pos to be invalidated |
Cody P Schafer | 2b529089 | 2013-09-11 14:25:11 -0700 | [diff] [blame] | 119 | * |
| 120 | * @pos: the 'type *' to use as a loop cursor. |
| 121 | * @n: another 'type *' to use as temporary storage |
| 122 | * @root: 'rb_root *' of the rbtree. |
| 123 | * @field: the name of the rb_node field within 'type'. |
Cody P Schafer | 8de1ee7 | 2015-11-06 16:31:28 -0800 | [diff] [blame] | 124 | * |
| 125 | * rbtree_postorder_for_each_entry_safe() provides a similar guarantee as |
| 126 | * list_for_each_entry_safe() and allows the iteration to continue independent |
| 127 | * of changes to @pos by the body of the loop. |
| 128 | * |
| 129 | * Note, however, that it cannot handle other modifications that re-order the |
| 130 | * rbtree it is iterating over. This includes calling rb_erase() on @pos, as |
| 131 | * rb_erase() may rebalance the tree, causing us to miss some nodes. |
Cody P Schafer | 2b529089 | 2013-09-11 14:25:11 -0700 | [diff] [blame] | 132 | */ |
| 133 | #define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \ |
Jan Kara | 1310a5a | 2013-11-12 15:11:19 -0800 | [diff] [blame] | 134 | for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \ |
| 135 | pos && ({ n = rb_entry_safe(rb_next_postorder(&pos->field), \ |
| 136 | typeof(*pos), field); 1; }); \ |
| 137 | pos = n) |
Cody P Schafer | 2b529089 | 2013-09-11 14:25:11 -0700 | [diff] [blame] | 138 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | #endif /* _LINUX_RBTREE_H */ |