blob: d4c197425f68a95919bd239beb19cf6d74d89d02 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Ingo Molnarfbb9ce952006-07-03 00:24:50 -07002/*
3 * kernel/lockdep_internals.h
4 *
5 * Runtime locking correctness validator
6 *
7 * lockdep subsystem internal functions and variables.
8 */
9
10/*
Peter Zijlstra98516732009-01-22 14:18:40 +010011 * Lock-class usage-state bits:
12 */
13enum lock_usage_bit {
Peter Zijlstrad7b1b022009-01-22 14:38:38 +010014#define LOCKDEP_STATE(__STATE) \
15 LOCK_USED_IN_##__STATE, \
16 LOCK_USED_IN_##__STATE##_READ, \
17 LOCK_ENABLED_##__STATE, \
18 LOCK_ENABLED_##__STATE##_READ,
19#include "lockdep_states.h"
20#undef LOCKDEP_STATE
21 LOCK_USED,
Peter Zijlstra98516732009-01-22 14:18:40 +010022 LOCK_USAGE_STATES
23};
24
Frederic Weisbeckerbba2a8f2018-12-28 06:02:01 +010025#define LOCK_USAGE_READ_MASK 1
26#define LOCK_USAGE_DIR_MASK 2
27#define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK))
28
Peter Zijlstra98516732009-01-22 14:18:40 +010029/*
30 * Usage-state bitmasks:
31 */
Peter Zijlstrad7b1b022009-01-22 14:38:38 +010032#define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE),
33
34enum {
35#define LOCKDEP_STATE(__STATE) \
36 __LOCKF(USED_IN_##__STATE) \
37 __LOCKF(USED_IN_##__STATE##_READ) \
38 __LOCKF(ENABLED_##__STATE) \
39 __LOCKF(ENABLED_##__STATE##_READ)
40#include "lockdep_states.h"
41#undef LOCKDEP_STATE
42 __LOCKF(USED)
43};
Peter Zijlstra98516732009-01-22 14:18:40 +010044
45#define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ)
46#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
47
Peter Zijlstra98516732009-01-22 14:18:40 +010048#define LOCKF_ENABLED_IRQ_READ \
49 (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ)
50#define LOCKF_USED_IN_IRQ_READ \
51 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
52
53/*
Daniel Jordan395102d2017-04-10 11:50:52 -040054 * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text,
Babu Mogere245d992016-11-02 09:36:33 -070055 * .data and .bss to fit in required 32MB limit for the kernel. With
Daniel Jordan395102d2017-04-10 11:50:52 -040056 * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems.
Babu Mogere245d992016-11-02 09:36:33 -070057 * So, reduce the static allocations for lockdeps related structures so that
58 * everything fits in current required size limit.
59 */
Daniel Jordan395102d2017-04-10 11:50:52 -040060#ifdef CONFIG_LOCKDEP_SMALL
Babu Mogere245d992016-11-02 09:36:33 -070061/*
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070062 * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
63 * we track.
64 *
65 * We use the per-lock dependency maps in two ways: we grow it by adding
66 * every to-be-taken lock to all currently held lock's own dependency
67 * table (if it's not there yet), and we check it for lock order
68 * conflicts and deadlocks.
69 */
Babu Mogere245d992016-11-02 09:36:33 -070070#define MAX_LOCKDEP_ENTRIES 16384UL
71#define MAX_LOCKDEP_CHAINS_BITS 15
72#define MAX_STACK_TRACE_ENTRIES 262144UL
73#else
Sasha Levin1413c032014-01-08 14:21:46 -050074#define MAX_LOCKDEP_ENTRIES 32768UL
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070075
Sasha Levin1413c032014-01-08 14:21:46 -050076#define MAX_LOCKDEP_CHAINS_BITS 16
Huang, Ying443cd502008-06-20 16:39:21 +080077
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070078/*
79 * Stack-trace: tightly packed array of stack backtrace
80 * addresses. Protected by the hash_lock.
81 */
Sasha Levin1413c032014-01-08 14:21:46 -050082#define MAX_STACK_TRACE_ENTRIES 524288UL
Babu Mogere245d992016-11-02 09:36:33 -070083#endif
84
85#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
86
87#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070088
89extern struct list_head all_lock_classes;
Huang, Ying443cd502008-06-20 16:39:21 +080090extern struct lock_chain lock_chains[];
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070091
Peter Zijlstraf510b232009-01-22 17:53:47 +010092#define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
93
94extern void get_usage_chars(struct lock_class *class,
95 char usage[LOCK_USAGE_CHARS]);
Ingo Molnarfbb9ce952006-07-03 00:24:50 -070096
97extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str);
98
Huang, Ying443cd502008-06-20 16:39:21 +080099struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);
100
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700101extern unsigned long nr_lock_classes;
102extern unsigned long nr_list_entries;
Bart Van Assche22126842019-02-14 15:00:48 -0800103long lockdep_next_lockchain(long i);
104unsigned long lock_chain_count(void);
Huang, Yingcd1a28e2008-06-23 11:20:54 +0800105extern int nr_chain_hlocks;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700106extern unsigned long nr_stack_trace_entries;
107
108extern unsigned int nr_hardirq_chains;
109extern unsigned int nr_softirq_chains;
110extern unsigned int nr_process_chains;
111extern unsigned int max_lockdep_depth;
112extern unsigned int max_recursion_depth;
113
Peter Zijlstraaf012962009-07-16 15:44:29 +0200114extern unsigned int max_bfs_queue_depth;
115
Ingo Molnard6672c52008-08-01 11:23:50 +0200116#ifdef CONFIG_PROVE_LOCKING
David Miller419ca3f2008-07-29 21:45:03 -0700117extern unsigned long lockdep_count_forward_deps(struct lock_class *);
118extern unsigned long lockdep_count_backward_deps(struct lock_class *);
Ingo Molnard6672c52008-08-01 11:23:50 +0200119#else
120static inline unsigned long
121lockdep_count_forward_deps(struct lock_class *class)
122{
123 return 0;
124}
125static inline unsigned long
126lockdep_count_backward_deps(struct lock_class *class)
127{
128 return 0;
129}
130#endif
David Miller419ca3f2008-07-29 21:45:03 -0700131
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700132#ifdef CONFIG_DEBUG_LOCKDEP
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +0200133
134#include <asm/local.h>
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700135/*
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +0200136 * Various lockdep statistics.
137 * We want them per cpu as they are often accessed in fast path
138 * and we want to avoid too much cache bouncing.
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700139 */
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +0200140struct lockdep_stats {
141 int chain_lookup_hits;
142 int chain_lookup_misses;
143 int hardirqs_on_events;
144 int hardirqs_off_events;
145 int redundant_hardirqs_on;
146 int redundant_hardirqs_off;
147 int softirqs_on_events;
148 int softirqs_off_events;
149 int redundant_softirqs_on;
150 int redundant_softirqs_off;
151 int nr_unused_locks;
Peter Zijlstraae813302017-03-03 10:13:38 +0100152 int nr_redundant_checks;
153 int nr_redundant;
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +0200154 int nr_cyclic_checks;
155 int nr_cyclic_check_recursions;
156 int nr_find_usage_forwards_checks;
157 int nr_find_usage_forwards_recursions;
158 int nr_find_usage_backwards_checks;
159 int nr_find_usage_backwards_recursions;
Waiman Long8ca2b56c2018-10-03 13:07:18 -0400160
161 /*
162 * Per lock class locking operation stat counts
163 */
164 unsigned long lock_class_ops[MAX_LOCKDEP_KEYS];
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +0200165};
166
167DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
Waiman Long8ca2b56c2018-10-03 13:07:18 -0400168extern struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +0200169
Frederic Weisbeckerba697f42010-05-04 04:47:25 +0200170#define __debug_atomic_inc(ptr) \
171 this_cpu_inc(lockdep_stats.ptr);
172
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +0200173#define debug_atomic_inc(ptr) { \
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +0200174 WARN_ON_ONCE(!irqs_disabled()); \
Frederic Weisbecker54d47a22010-05-04 04:54:47 +0200175 __this_cpu_inc(lockdep_stats.ptr); \
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +0200176}
177
178#define debug_atomic_dec(ptr) { \
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +0200179 WARN_ON_ONCE(!irqs_disabled()); \
Frederic Weisbecker54d47a22010-05-04 04:54:47 +0200180 __this_cpu_dec(lockdep_stats.ptr); \
Frederic Weisbeckerbd6d29c2010-04-06 00:10:17 +0200181}
182
183#define debug_atomic_read(ptr) ({ \
184 struct lockdep_stats *__cpu_lockdep_stats; \
185 unsigned long long __total = 0; \
186 int __cpu; \
187 for_each_possible_cpu(__cpu) { \
188 __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu); \
189 __total += __cpu_lockdep_stats->ptr; \
190 } \
191 __total; \
192})
Waiman Long8ca2b56c2018-10-03 13:07:18 -0400193
194static inline void debug_class_ops_inc(struct lock_class *class)
195{
196 int idx;
197
198 idx = class - lock_classes;
199 __debug_atomic_inc(lock_class_ops[idx]);
200}
201
202static inline unsigned long debug_class_ops_read(struct lock_class *class)
203{
204 int idx, cpu;
205 unsigned long ops = 0;
206
207 idx = class - lock_classes;
208 for_each_possible_cpu(cpu)
209 ops += per_cpu(lockdep_stats.lock_class_ops[idx], cpu);
210 return ops;
211}
212
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700213#else
Frederic Weisbeckerba697f42010-05-04 04:47:25 +0200214# define __debug_atomic_inc(ptr) do { } while (0)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700215# define debug_atomic_inc(ptr) do { } while (0)
216# define debug_atomic_dec(ptr) do { } while (0)
217# define debug_atomic_read(ptr) 0
Waiman Long8ca2b56c2018-10-03 13:07:18 -0400218# define debug_class_ops_inc(ptr) do { } while (0)
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700219#endif