Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 2 | /* |
| 3 | * kernel/lockdep_internals.h |
| 4 | * |
| 5 | * Runtime locking correctness validator |
| 6 | * |
| 7 | * lockdep subsystem internal functions and variables. |
| 8 | */ |
| 9 | |
| 10 | /* |
Peter Zijlstra | 9851673 | 2009-01-22 14:18:40 +0100 | [diff] [blame] | 11 | * Lock-class usage-state bits: |
| 12 | */ |
| 13 | enum lock_usage_bit { |
Peter Zijlstra | d7b1b02 | 2009-01-22 14:38:38 +0100 | [diff] [blame] | 14 | #define LOCKDEP_STATE(__STATE) \ |
| 15 | LOCK_USED_IN_##__STATE, \ |
| 16 | LOCK_USED_IN_##__STATE##_READ, \ |
| 17 | LOCK_ENABLED_##__STATE, \ |
| 18 | LOCK_ENABLED_##__STATE##_READ, |
| 19 | #include "lockdep_states.h" |
| 20 | #undef LOCKDEP_STATE |
| 21 | LOCK_USED, |
Peter Zijlstra | 9851673 | 2009-01-22 14:18:40 +0100 | [diff] [blame] | 22 | LOCK_USAGE_STATES |
| 23 | }; |
| 24 | |
Frederic Weisbecker | bba2a8f | 2018-12-28 06:02:01 +0100 | [diff] [blame] | 25 | #define LOCK_USAGE_READ_MASK 1 |
| 26 | #define LOCK_USAGE_DIR_MASK 2 |
| 27 | #define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK)) |
| 28 | |
Peter Zijlstra | 9851673 | 2009-01-22 14:18:40 +0100 | [diff] [blame] | 29 | /* |
| 30 | * Usage-state bitmasks: |
| 31 | */ |
Peter Zijlstra | d7b1b02 | 2009-01-22 14:38:38 +0100 | [diff] [blame] | 32 | #define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE), |
| 33 | |
| 34 | enum { |
| 35 | #define LOCKDEP_STATE(__STATE) \ |
| 36 | __LOCKF(USED_IN_##__STATE) \ |
| 37 | __LOCKF(USED_IN_##__STATE##_READ) \ |
| 38 | __LOCKF(ENABLED_##__STATE) \ |
| 39 | __LOCKF(ENABLED_##__STATE##_READ) |
| 40 | #include "lockdep_states.h" |
| 41 | #undef LOCKDEP_STATE |
| 42 | __LOCKF(USED) |
| 43 | }; |
Peter Zijlstra | 9851673 | 2009-01-22 14:18:40 +0100 | [diff] [blame] | 44 | |
| 45 | #define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ) |
| 46 | #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ) |
| 47 | |
Peter Zijlstra | 9851673 | 2009-01-22 14:18:40 +0100 | [diff] [blame] | 48 | #define LOCKF_ENABLED_IRQ_READ \ |
| 49 | (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ) |
| 50 | #define LOCKF_USED_IN_IRQ_READ \ |
| 51 | (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) |
| 52 | |
| 53 | /* |
Daniel Jordan | 395102d | 2017-04-10 11:50:52 -0400 | [diff] [blame] | 54 | * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text, |
Babu Moger | e245d99 | 2016-11-02 09:36:33 -0700 | [diff] [blame] | 55 | * .data and .bss to fit in required 32MB limit for the kernel. With |
Daniel Jordan | 395102d | 2017-04-10 11:50:52 -0400 | [diff] [blame] | 56 | * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems. |
Babu Moger | e245d99 | 2016-11-02 09:36:33 -0700 | [diff] [blame] | 57 | * So, reduce the static allocations for lockdeps related structures so that |
| 58 | * everything fits in current required size limit. |
| 59 | */ |
Daniel Jordan | 395102d | 2017-04-10 11:50:52 -0400 | [diff] [blame] | 60 | #ifdef CONFIG_LOCKDEP_SMALL |
Babu Moger | e245d99 | 2016-11-02 09:36:33 -0700 | [diff] [blame] | 61 | /* |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 62 | * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies |
| 63 | * we track. |
| 64 | * |
| 65 | * We use the per-lock dependency maps in two ways: we grow it by adding |
| 66 | * every to-be-taken lock to all currently held lock's own dependency |
| 67 | * table (if it's not there yet), and we check it for lock order |
| 68 | * conflicts and deadlocks. |
| 69 | */ |
Babu Moger | e245d99 | 2016-11-02 09:36:33 -0700 | [diff] [blame] | 70 | #define MAX_LOCKDEP_ENTRIES 16384UL |
| 71 | #define MAX_LOCKDEP_CHAINS_BITS 15 |
| 72 | #define MAX_STACK_TRACE_ENTRIES 262144UL |
| 73 | #else |
Sasha Levin | 1413c03 | 2014-01-08 14:21:46 -0500 | [diff] [blame] | 74 | #define MAX_LOCKDEP_ENTRIES 32768UL |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 75 | |
Sasha Levin | 1413c03 | 2014-01-08 14:21:46 -0500 | [diff] [blame] | 76 | #define MAX_LOCKDEP_CHAINS_BITS 16 |
Huang, Ying | 443cd50 | 2008-06-20 16:39:21 +0800 | [diff] [blame] | 77 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 78 | /* |
| 79 | * Stack-trace: tightly packed array of stack backtrace |
| 80 | * addresses. Protected by the hash_lock. |
| 81 | */ |
Sasha Levin | 1413c03 | 2014-01-08 14:21:46 -0500 | [diff] [blame] | 82 | #define MAX_STACK_TRACE_ENTRIES 524288UL |
Babu Moger | e245d99 | 2016-11-02 09:36:33 -0700 | [diff] [blame] | 83 | #endif |
| 84 | |
| 85 | #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) |
| 86 | |
| 87 | #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 88 | |
| 89 | extern struct list_head all_lock_classes; |
Huang, Ying | 443cd50 | 2008-06-20 16:39:21 +0800 | [diff] [blame] | 90 | extern struct lock_chain lock_chains[]; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 91 | |
Peter Zijlstra | f510b23 | 2009-01-22 17:53:47 +0100 | [diff] [blame] | 92 | #define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2) |
| 93 | |
| 94 | extern void get_usage_chars(struct lock_class *class, |
| 95 | char usage[LOCK_USAGE_CHARS]); |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 96 | |
| 97 | extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str); |
| 98 | |
Huang, Ying | 443cd50 | 2008-06-20 16:39:21 +0800 | [diff] [blame] | 99 | struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i); |
| 100 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 101 | extern unsigned long nr_lock_classes; |
| 102 | extern unsigned long nr_list_entries; |
Bart Van Assche | 2212684 | 2019-02-14 15:00:48 -0800 | [diff] [blame] | 103 | long lockdep_next_lockchain(long i); |
| 104 | unsigned long lock_chain_count(void); |
Huang, Ying | cd1a28e | 2008-06-23 11:20:54 +0800 | [diff] [blame] | 105 | extern int nr_chain_hlocks; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 106 | extern unsigned long nr_stack_trace_entries; |
| 107 | |
| 108 | extern unsigned int nr_hardirq_chains; |
| 109 | extern unsigned int nr_softirq_chains; |
| 110 | extern unsigned int nr_process_chains; |
| 111 | extern unsigned int max_lockdep_depth; |
| 112 | extern unsigned int max_recursion_depth; |
| 113 | |
Peter Zijlstra | af01296 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 114 | extern unsigned int max_bfs_queue_depth; |
| 115 | |
Ingo Molnar | d6672c5 | 2008-08-01 11:23:50 +0200 | [diff] [blame] | 116 | #ifdef CONFIG_PROVE_LOCKING |
David Miller | 419ca3f | 2008-07-29 21:45:03 -0700 | [diff] [blame] | 117 | extern unsigned long lockdep_count_forward_deps(struct lock_class *); |
| 118 | extern unsigned long lockdep_count_backward_deps(struct lock_class *); |
Ingo Molnar | d6672c5 | 2008-08-01 11:23:50 +0200 | [diff] [blame] | 119 | #else |
| 120 | static inline unsigned long |
| 121 | lockdep_count_forward_deps(struct lock_class *class) |
| 122 | { |
| 123 | return 0; |
| 124 | } |
| 125 | static inline unsigned long |
| 126 | lockdep_count_backward_deps(struct lock_class *class) |
| 127 | { |
| 128 | return 0; |
| 129 | } |
| 130 | #endif |
David Miller | 419ca3f | 2008-07-29 21:45:03 -0700 | [diff] [blame] | 131 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 132 | #ifdef CONFIG_DEBUG_LOCKDEP |
Frederic Weisbecker | bd6d29c | 2010-04-06 00:10:17 +0200 | [diff] [blame] | 133 | |
| 134 | #include <asm/local.h> |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 135 | /* |
Frederic Weisbecker | bd6d29c | 2010-04-06 00:10:17 +0200 | [diff] [blame] | 136 | * Various lockdep statistics. |
| 137 | * We want them per cpu as they are often accessed in fast path |
| 138 | * and we want to avoid too much cache bouncing. |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 139 | */ |
Frederic Weisbecker | bd6d29c | 2010-04-06 00:10:17 +0200 | [diff] [blame] | 140 | struct lockdep_stats { |
| 141 | int chain_lookup_hits; |
| 142 | int chain_lookup_misses; |
| 143 | int hardirqs_on_events; |
| 144 | int hardirqs_off_events; |
| 145 | int redundant_hardirqs_on; |
| 146 | int redundant_hardirqs_off; |
| 147 | int softirqs_on_events; |
| 148 | int softirqs_off_events; |
| 149 | int redundant_softirqs_on; |
| 150 | int redundant_softirqs_off; |
| 151 | int nr_unused_locks; |
Peter Zijlstra | ae81330 | 2017-03-03 10:13:38 +0100 | [diff] [blame] | 152 | int nr_redundant_checks; |
| 153 | int nr_redundant; |
Frederic Weisbecker | bd6d29c | 2010-04-06 00:10:17 +0200 | [diff] [blame] | 154 | int nr_cyclic_checks; |
| 155 | int nr_cyclic_check_recursions; |
| 156 | int nr_find_usage_forwards_checks; |
| 157 | int nr_find_usage_forwards_recursions; |
| 158 | int nr_find_usage_backwards_checks; |
| 159 | int nr_find_usage_backwards_recursions; |
Waiman Long | 8ca2b56c | 2018-10-03 13:07:18 -0400 | [diff] [blame] | 160 | |
| 161 | /* |
| 162 | * Per lock class locking operation stat counts |
| 163 | */ |
| 164 | unsigned long lock_class_ops[MAX_LOCKDEP_KEYS]; |
Frederic Weisbecker | bd6d29c | 2010-04-06 00:10:17 +0200 | [diff] [blame] | 165 | }; |
| 166 | |
| 167 | DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats); |
Waiman Long | 8ca2b56c | 2018-10-03 13:07:18 -0400 | [diff] [blame] | 168 | extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; |
Frederic Weisbecker | bd6d29c | 2010-04-06 00:10:17 +0200 | [diff] [blame] | 169 | |
Frederic Weisbecker | ba697f4 | 2010-05-04 04:47:25 +0200 | [diff] [blame] | 170 | #define __debug_atomic_inc(ptr) \ |
| 171 | this_cpu_inc(lockdep_stats.ptr); |
| 172 | |
Frederic Weisbecker | bd6d29c | 2010-04-06 00:10:17 +0200 | [diff] [blame] | 173 | #define debug_atomic_inc(ptr) { \ |
Frederic Weisbecker | bd6d29c | 2010-04-06 00:10:17 +0200 | [diff] [blame] | 174 | WARN_ON_ONCE(!irqs_disabled()); \ |
Frederic Weisbecker | 54d47a2 | 2010-05-04 04:54:47 +0200 | [diff] [blame] | 175 | __this_cpu_inc(lockdep_stats.ptr); \ |
Frederic Weisbecker | bd6d29c | 2010-04-06 00:10:17 +0200 | [diff] [blame] | 176 | } |
| 177 | |
| 178 | #define debug_atomic_dec(ptr) { \ |
Frederic Weisbecker | bd6d29c | 2010-04-06 00:10:17 +0200 | [diff] [blame] | 179 | WARN_ON_ONCE(!irqs_disabled()); \ |
Frederic Weisbecker | 54d47a2 | 2010-05-04 04:54:47 +0200 | [diff] [blame] | 180 | __this_cpu_dec(lockdep_stats.ptr); \ |
Frederic Weisbecker | bd6d29c | 2010-04-06 00:10:17 +0200 | [diff] [blame] | 181 | } |
| 182 | |
| 183 | #define debug_atomic_read(ptr) ({ \ |
| 184 | struct lockdep_stats *__cpu_lockdep_stats; \ |
| 185 | unsigned long long __total = 0; \ |
| 186 | int __cpu; \ |
| 187 | for_each_possible_cpu(__cpu) { \ |
| 188 | __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu); \ |
| 189 | __total += __cpu_lockdep_stats->ptr; \ |
| 190 | } \ |
| 191 | __total; \ |
| 192 | }) |
Waiman Long | 8ca2b56c | 2018-10-03 13:07:18 -0400 | [diff] [blame] | 193 | |
| 194 | static inline void debug_class_ops_inc(struct lock_class *class) |
| 195 | { |
| 196 | int idx; |
| 197 | |
| 198 | idx = class - lock_classes; |
| 199 | __debug_atomic_inc(lock_class_ops[idx]); |
| 200 | } |
| 201 | |
| 202 | static inline unsigned long debug_class_ops_read(struct lock_class *class) |
| 203 | { |
| 204 | int idx, cpu; |
| 205 | unsigned long ops = 0; |
| 206 | |
| 207 | idx = class - lock_classes; |
| 208 | for_each_possible_cpu(cpu) |
| 209 | ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu); |
| 210 | return ops; |
| 211 | } |
| 212 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 213 | #else |
Frederic Weisbecker | ba697f4 | 2010-05-04 04:47:25 +0200 | [diff] [blame] | 214 | # define __debug_atomic_inc(ptr) do { } while (0) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 215 | # define debug_atomic_inc(ptr) do { } while (0) |
| 216 | # define debug_atomic_dec(ptr) do { } while (0) |
| 217 | # define debug_atomic_read(ptr) 0 |
Waiman Long | 8ca2b56c | 2018-10-03 13:07:18 -0400 | [diff] [blame] | 218 | # define debug_class_ops_inc(ptr) do { } while (0) |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 219 | #endif |