blob: 84f9745365ff2efea37d2994f1ca5957daac912a [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Peter Zijlstra (Intel)13b35682016-02-19 09:46:37 +01002#ifndef _LINUX_SWAIT_H
3#define _LINUX_SWAIT_H
4
5#include <linux/list.h>
6#include <linux/stddef.h>
7#include <linux/spinlock.h>
Sebastian Andrzej Siewiora59a68f2018-05-04 12:42:24 +02008#include <linux/wait.h>
Peter Zijlstra (Intel)13b35682016-02-19 09:46:37 +01009#include <asm/current.h>
10
11/*
12 * Simple wait queues
13 *
Davidlohr Bueso88796e72017-10-20 10:13:46 -070014 * While these are very similar to regular wait queues (wait.h) the most
15 * important difference is that the simple waitqueue allows for deterministic
16 * behaviour -- IOW it has strictly bounded IRQ and lock hold times.
Peter Zijlstra (Intel)13b35682016-02-19 09:46:37 +010017 *
Davidlohr Bueso88796e72017-10-20 10:13:46 -070018 * Mainly, this is accomplished by two things. Firstly not allowing swake_up_all
19 * from IRQ disabled, and dropping the lock upon every wakeup, giving a higher
20 * priority task a chance to run.
21 *
22 * Secondly, we had to drop a fair number of features of the other waitqueue
23 * code; notably:
Peter Zijlstra (Intel)13b35682016-02-19 09:46:37 +010024 *
25 * - mixing INTERRUPTIBLE and UNINTERRUPTIBLE sleeps on the same waitqueue;
26 * all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right
27 * sleeper state.
28 *
29 * - the exclusive mode; because this requires preserving the list order
30 * and this is hard.
31 *
Davidlohr Bueso88796e72017-10-20 10:13:46 -070032 * - custom wake callback functions; because you cannot give any guarantees
33 * about random code. This also allows swait to be used in RT, such that
34 * raw spinlock can be used for the swait queue head.
Peter Zijlstra (Intel)13b35682016-02-19 09:46:37 +010035 *
Davidlohr Bueso88796e72017-10-20 10:13:46 -070036 * As a side effect of these; the data structures are slimmer albeit more ad-hoc.
37 * For all the above, note that simple wait queues should _only_ be used under
38 * very specific realtime constraints -- it is best to stick with the regular
39 * wait queues in most cases.
Peter Zijlstra (Intel)13b35682016-02-19 09:46:37 +010040 */
41
42struct task_struct;
43
44struct swait_queue_head {
45 raw_spinlock_t lock;
46 struct list_head task_list;
47};
48
49struct swait_queue {
50 struct task_struct *task;
51 struct list_head task_list;
52};
53
54#define __SWAITQUEUE_INITIALIZER(name) { \
55 .task = current, \
56 .task_list = LIST_HEAD_INIT((name).task_list), \
57}
58
59#define DECLARE_SWAITQUEUE(name) \
60 struct swait_queue name = __SWAITQUEUE_INITIALIZER(name)
61
62#define __SWAIT_QUEUE_HEAD_INITIALIZER(name) { \
63 .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
64 .task_list = LIST_HEAD_INIT((name).task_list), \
65}
66
67#define DECLARE_SWAIT_QUEUE_HEAD(name) \
68 struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INITIALIZER(name)
69
70extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
71 struct lock_class_key *key);
72
73#define init_swait_queue_head(q) \
74 do { \
75 static struct lock_class_key __key; \
76 __init_swait_queue_head((q), #q, &__key); \
77 } while (0)
78
79#ifdef CONFIG_LOCKDEP
80# define __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
81 ({ init_swait_queue_head(&name); name; })
82# define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \
83 struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name)
84#else
85# define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \
86 DECLARE_SWAIT_QUEUE_HEAD(name)
87#endif
88
Davidlohr Bueso8cd641e2017-09-13 13:08:18 -070089/**
90 * swait_active -- locklessly test for waiters on the queue
91 * @wq: the waitqueue to test for waiters
92 *
93 * returns true if the wait list is not empty
94 *
95 * NOTE: this function is lockless and requires care, incorrect usage _will_
96 * lead to sporadic and non-obvious failure.
97 *
98 * NOTE2: this function has the same above implications as regular waitqueues.
99 *
100 * Use either while holding swait_queue_head::lock or when used for wakeups
101 * with an extra smp_mb() like:
102 *
103 * CPU0 - waker CPU1 - waiter
104 *
105 * for (;;) {
106 * @cond = true; prepare_to_swait(&wq_head, &wait, state);
107 * smp_mb(); // smp_mb() from set_current_state()
108 * if (swait_active(wq_head)) if (@cond)
109 * wake_up(wq_head); break;
110 * schedule();
111 * }
112 * finish_swait(&wq_head, &wait);
113 *
114 * Because without the explicit smp_mb() it's possible for the
115 * swait_active() load to get hoisted over the @cond store such that we'll
116 * observe an empty wait list while the waiter might not observe @cond.
117 * This, in turn, can trigger missing wakeups.
118 *
119 * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
120 * which (when the lock is uncontended) are of roughly equal cost.
121 */
122static inline int swait_active(struct swait_queue_head *wq)
Peter Zijlstra (Intel)13b35682016-02-19 09:46:37 +0100123{
Davidlohr Bueso8cd641e2017-09-13 13:08:18 -0700124 return !list_empty(&wq->task_list);
125}
126
127/**
128 * swq_has_sleeper - check if there are any waiting processes
129 * @wq: the waitqueue to test for waiters
130 *
131 * Returns true if @wq has waiting processes
132 *
133 * Please refer to the comment for swait_active.
134 */
135static inline bool swq_has_sleeper(struct swait_queue_head *wq)
136{
137 /*
138 * We need to be sure we are in sync with the list_add()
139 * modifications to the wait queue (task_list).
140 *
141 * This memory barrier should be paired with one on the
142 * waiting side.
143 */
144 smp_mb();
145 return swait_active(wq);
Peter Zijlstra (Intel)13b35682016-02-19 09:46:37 +0100146}
147
148extern void swake_up(struct swait_queue_head *q);
149extern void swake_up_all(struct swait_queue_head *q);
150extern void swake_up_locked(struct swait_queue_head *q);
151
152extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
153extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state);
154extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state);
155
156extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
157extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
158
159/* as per ___wait_event() but for swait, therefore "exclusive == 0" */
160#define ___swait_event(wq, condition, state, ret, cmd) \
161({ \
162 struct swait_queue __wait; \
163 long __ret = ret; \
164 \
165 INIT_LIST_HEAD(&__wait.task_list); \
166 for (;;) { \
167 long __int = prepare_to_swait_event(&wq, &__wait, state);\
168 \
169 if (condition) \
170 break; \
171 \
172 if (___wait_is_interruptible(state) && __int) { \
173 __ret = __int; \
174 break; \
175 } \
176 \
177 cmd; \
178 } \
179 finish_swait(&wq, &__wait); \
180 __ret; \
181})
182
183#define __swait_event(wq, condition) \
184 (void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \
185 schedule())
186
187#define swait_event(wq, condition) \
188do { \
189 if (condition) \
190 break; \
191 __swait_event(wq, condition); \
192} while (0)
193
194#define __swait_event_timeout(wq, condition, timeout) \
195 ___swait_event(wq, ___wait_cond_timeout(condition), \
196 TASK_UNINTERRUPTIBLE, timeout, \
197 __ret = schedule_timeout(__ret))
198
199#define swait_event_timeout(wq, condition, timeout) \
200({ \
201 long __ret = timeout; \
202 if (!___wait_cond_timeout(condition)) \
203 __ret = __swait_event_timeout(wq, condition, timeout); \
204 __ret; \
205})
206
207#define __swait_event_interruptible(wq, condition) \
208 ___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \
209 schedule())
210
211#define swait_event_interruptible(wq, condition) \
212({ \
213 int __ret = 0; \
214 if (!(condition)) \
215 __ret = __swait_event_interruptible(wq, condition); \
216 __ret; \
217})
218
219#define __swait_event_interruptible_timeout(wq, condition, timeout) \
220 ___swait_event(wq, ___wait_cond_timeout(condition), \
221 TASK_INTERRUPTIBLE, timeout, \
222 __ret = schedule_timeout(__ret))
223
224#define swait_event_interruptible_timeout(wq, condition, timeout) \
225({ \
226 long __ret = timeout; \
227 if (!___wait_cond_timeout(condition)) \
228 __ret = __swait_event_interruptible_timeout(wq, \
229 condition, timeout); \
230 __ret; \
231})
232
Luis R. Rodriguez352eee12017-06-20 14:45:46 -0700233#define __swait_event_idle(wq, condition) \
234 (void)___swait_event(wq, condition, TASK_IDLE, 0, schedule())
235
236/**
237 * swait_event_idle - wait without system load contribution
238 * @wq: the waitqueue to wait on
239 * @condition: a C expression for the event to wait for
240 *
241 * The process is put to sleep (TASK_IDLE) until the @condition evaluates to
242 * true. The @condition is checked each time the waitqueue @wq is woken up.
243 *
244 * This function is mostly used when a kthread or workqueue waits for some
245 * condition and doesn't want to contribute to system load. Signals are
246 * ignored.
247 */
248#define swait_event_idle(wq, condition) \
249do { \
250 if (condition) \
251 break; \
252 __swait_event_idle(wq, condition); \
253} while (0)
254
255#define __swait_event_idle_timeout(wq, condition, timeout) \
256 ___swait_event(wq, ___wait_cond_timeout(condition), \
257 TASK_IDLE, timeout, \
258 __ret = schedule_timeout(__ret))
259
260/**
261 * swait_event_idle_timeout - wait up to timeout without load contribution
262 * @wq: the waitqueue to wait on
263 * @condition: a C expression for the event to wait for
264 * @timeout: timeout at which we'll give up in jiffies
265 *
266 * The process is put to sleep (TASK_IDLE) until the @condition evaluates to
267 * true. The @condition is checked each time the waitqueue @wq is woken up.
268 *
269 * This function is mostly used when a kthread or workqueue waits for some
270 * condition and doesn't want to contribute to system load. Signals are
271 * ignored.
272 *
273 * Returns:
274 * 0 if the @condition evaluated to %false after the @timeout elapsed,
275 * 1 if the @condition evaluated to %true after the @timeout elapsed,
276 * or the remaining jiffies (at least 1) if the @condition evaluated
277 * to %true before the @timeout elapsed.
278 */
279#define swait_event_idle_timeout(wq, condition, timeout) \
280({ \
281 long __ret = timeout; \
282 if (!___wait_cond_timeout(condition)) \
283 __ret = __swait_event_idle_timeout(wq, \
284 condition, timeout); \
285 __ret; \
286})
287
Peter Zijlstra (Intel)13b35682016-02-19 09:46:37 +0100288#endif /* _LINUX_SWAIT_H */