blob: e66fb8bc2127025d35ad8b3508ca9d6ee918e4e8 [file] [log] [blame]
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -07001/*
2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Paul E. McKenney87de1cf2013-12-03 10:02:52 -080015 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070017 *
18 * Copyright IBM Corporation, 2008
19 *
20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
21 *
22 * For detailed explanation of Read-Copy Update mechanism see -
Ingo Molnar4ce5b902009-10-26 07:55:55 +010023 * Documentation/RCU
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070024 */
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070025#ifndef __LINUX_TINY_H
26#define __LINUX_TINY_H
27
Paul E. McKenney5f192ab2017-05-03 15:24:25 -070028#include <linux/ktime.h>
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070029
Paul E. McKenney02a5c5502016-11-02 17:25:06 -070030struct rcu_dynticks;
31static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
32{
33 return 0;
34}
35
Paul E. McKenney71c40fd2017-05-03 13:51:42 -070036/* Never flag non-existent other CPUs! */
37static inline bool rcu_eqs_special_set(int cpu) { return false; }
Paul E. McKenneyb8c17e62016-11-08 14:25:21 -080038
Paul E. McKenney709fdce2018-07-03 10:44:44 -070039static inline void synchronize_sched(void)
Paul E. McKenney45975c72018-07-02 14:30:37 -070040{
Paul E. McKenney709fdce2018-07-03 10:44:44 -070041 synchronize_rcu();
Paul E. McKenney45975c72018-07-02 14:30:37 -070042}
43
Paul E. McKenney765a3f42014-03-14 16:37:08 -070044static inline unsigned long get_state_synchronize_rcu(void)
45{
46 return 0;
47}
48
49static inline void cond_synchronize_rcu(unsigned long oldstate)
50{
51 might_sleep();
52}
53
Paul E. McKenney24560052015-05-30 10:11:24 -070054static inline unsigned long get_state_synchronize_sched(void)
55{
56 return 0;
57}
58
59static inline void cond_synchronize_sched(unsigned long oldstate)
60{
61 might_sleep();
62}
63
Paul E. McKenney709fdce2018-07-03 10:44:44 -070064extern void rcu_barrier(void);
Paul E. McKenney6ebb2372009-11-22 08:53:50 -080065
Paul E. McKenney709fdce2018-07-03 10:44:44 -070066static inline void rcu_barrier_sched(void)
Paul E. McKenneya57eb942010-06-29 16:49:16 -070067{
Paul E. McKenney709fdce2018-07-03 10:44:44 -070068 rcu_barrier(); /* Only one CPU, so only one list of callbacks! */
Paul E. McKenneya57eb942010-06-29 16:49:16 -070069}
70
Paul E. McKenney65cfe352018-07-01 07:40:52 -070071static inline void rcu_barrier_bh(void)
72{
73 rcu_barrier();
74}
75
Paul E. McKenneya57eb942010-06-29 16:49:16 -070076static inline void synchronize_rcu_bh(void)
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070077{
78 synchronize_sched();
79}
80
81static inline void synchronize_rcu_bh_expedited(void)
82{
83 synchronize_sched();
84}
85
Paul E. McKenney709fdce2018-07-03 10:44:44 -070086static inline void synchronize_rcu_expedited(void)
87{
88 synchronize_sched();
89}
90
Lai Jiangshan7b27d542010-10-21 11:29:05 +080091static inline void synchronize_sched_expedited(void)
92{
93 synchronize_sched();
94}
95
Paul E. McKenney709fdce2018-07-03 10:44:44 -070096static inline void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
Paul E. McKenney486e2592012-01-06 14:11:30 -080097{
98 call_rcu(head, func);
99}
100
Paul E. McKenney709fdce2018-07-03 10:44:44 -0700101static inline void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
102{
103 call_rcu(head, func);
104}
105
106void rcu_qs(void);
Paul E. McKenney45975c72018-07-02 14:30:37 -0700107
Paul E. McKenneyd28139c2018-06-28 14:45:25 -0700108static inline void rcu_softirq_qs(void)
109{
Paul E. McKenney709fdce2018-07-03 10:44:44 -0700110 rcu_qs();
Paul E. McKenneyd28139c2018-06-28 14:45:25 -0700111}
112
Paul E. McKenneybcbfdd02017-04-11 15:50:41 -0700113#define rcu_note_context_switch(preempt) \
114 do { \
Paul E. McKenney709fdce2018-07-03 10:44:44 -0700115 rcu_qs(); \
Paul E. McKenney6f56f712018-05-14 13:52:27 -0700116 rcu_tasks_qs(current); \
Paul E. McKenneybcbfdd02017-04-11 15:50:41 -0700117 } while (0)
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700118
Paul E. McKenney5f192ab2017-05-03 15:24:25 -0700119static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
120{
121 *nextevt = KTIME_MAX;
122 return 0;
123}
124
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700125/*
Gleb Natapov29ce8312011-05-04 16:31:03 +0300126 * Take advantage of the fact that there is only one CPU, which
127 * allows us to ignore virtualization-based context switches.
128 */
Paul E. McKenney71c40fd2017-05-03 13:51:42 -0700129static inline void rcu_virt_note_context_switch(int cpu) { }
130static inline void rcu_cpu_stall_reset(void) { }
131static inline void rcu_idle_enter(void) { }
132static inline void rcu_idle_exit(void) { }
133static inline void rcu_irq_enter(void) { }
Paul E. McKenney71c40fd2017-05-03 13:51:42 -0700134static inline void rcu_irq_exit_irqson(void) { }
135static inline void rcu_irq_enter_irqson(void) { }
136static inline void rcu_irq_exit(void) { }
137static inline void exit_rcu(void) { }
Paul E. McKenney3e310092018-06-21 12:50:01 -0700138static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t)
139{
140 return false;
141}
142static inline void rcu_preempt_deferred_qs(struct task_struct *t) { }
Paul E. McKenney825c5bd2017-05-26 16:16:40 -0700143#ifdef CONFIG_SRCU
Teodora Baluta584dc4c2013-11-11 17:11:23 +0200144void rcu_scheduler_starting(void);
Paul E. McKenney825c5bd2017-05-26 16:16:40 -0700145#else /* #ifndef CONFIG_SRCU */
Paul E. McKenney71c40fd2017-05-03 13:51:42 -0700146static inline void rcu_scheduler_starting(void) { }
Paul E. McKenney825c5bd2017-05-26 16:16:40 -0700147#endif /* #else #ifndef CONFIG_SRCU */
Paul E. McKenneyd2b16542017-05-11 12:01:50 -0700148static inline void rcu_end_inkernel_boot(void) { }
Paul E. McKenney71c40fd2017-05-03 13:51:42 -0700149static inline bool rcu_is_watching(void) { return true; }
Paul E. McKenney5c173eb2013-09-13 17:20:11 -0700150
Paul E. McKenney71c40fd2017-05-03 13:51:42 -0700151/* Avoid RCU read-side critical sections leaking across. */
152static inline void rcu_all_qs(void) { barrier(); }
Paul E. McKenney5cd37192014-12-13 20:32:04 -0800153
Thomas Gleixner4df83742016-07-13 17:17:03 +0000154/* RCUtree hotplug events */
155#define rcutree_prepare_cpu NULL
156#define rcutree_online_cpu NULL
157#define rcutree_offline_cpu NULL
158#define rcutree_dead_cpu NULL
159#define rcutree_dying_cpu NULL
Peter Zijlstraf64c6012018-05-22 09:50:53 -0700160static inline void rcu_cpu_starting(unsigned int cpu) { }
Thomas Gleixner4df83742016-07-13 17:17:03 +0000161
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700162#endif /* __LINUX_RCUTINY_H */