blob: ad0af4df1b9d6bbbe1d27c7bcc55878f2c591fc9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Fast Userspace Mutexes (which I call "Futexes!").
3 * (C) Rusty Russell, IBM 2002
4 *
5 * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
6 * (C) Copyright 2003 Red Hat Inc, All Rights Reserved
7 *
8 * Removed page pinning, fix privately mapped COW pages and other cleanups
9 * (C) Copyright 2003, 2004 Jamie Lokier
10 *
Ingo Molnar0771dfe2006-03-27 01:16:22 -080011 * Robust futex support started by Ingo Molnar
12 * (C) Copyright 2006 Red Hat Inc, All Rights Reserved
13 * Thanks to Thomas Gleixner for suggestions, analysis and fixes.
14 *
Ingo Molnarc87e2832006-06-27 02:54:58 -070015 * PI-futex support started by Ingo Molnar and Thomas Gleixner
16 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
17 * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
18 *
Eric Dumazet34f01cc2007-05-09 02:35:04 -070019 * PRIVATE futexes by Eric Dumazet
20 * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
21 *
Darren Hart52400ba2009-04-03 13:40:49 -070022 * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
23 * Copyright (C) IBM Corporation, 2009
24 * Thanks to Thomas Gleixner for conceptual design and careful reviews.
25 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
27 * enough at me, Linus for the original (flawed) idea, Matthew
28 * Kirkwood for proof-of-concept implementation.
29 *
30 * "The futexes are also cursed."
31 * "But they come in a choice of three flavours!"
32 *
33 * This program is free software; you can redistribute it and/or modify
34 * it under the terms of the GNU General Public License as published by
35 * the Free Software Foundation; either version 2 of the License, or
36 * (at your option) any later version.
37 *
38 * This program is distributed in the hope that it will be useful,
39 * but WITHOUT ANY WARRANTY; without even the implied warranty of
40 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
41 * GNU General Public License for more details.
42 *
43 * You should have received a copy of the GNU General Public License
44 * along with this program; if not, write to the Free Software
45 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
46 */
47#include <linux/slab.h>
48#include <linux/poll.h>
49#include <linux/fs.h>
50#include <linux/file.h>
51#include <linux/jhash.h>
52#include <linux/init.h>
53#include <linux/futex.h>
54#include <linux/mount.h>
55#include <linux/pagemap.h>
56#include <linux/syscalls.h>
Jesper Juhl7ed20e12005-05-01 08:59:14 -070057#include <linux/signal.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040058#include <linux/export.h>
Andrey Mirkinfd5eea42007-10-16 23:30:13 -070059#include <linux/magic.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070060#include <linux/pid.h>
61#include <linux/nsproxy.h>
Kees Cookbdbb7762012-03-19 16:12:53 -070062#include <linux/ptrace.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060063#include <linux/sched/rt.h>
Ingo Molnar84f001e2017-02-01 16:36:40 +010064#include <linux/sched/wake_q.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010065#include <linux/sched/mm.h>
Zhang Yi13d60f42013-06-25 21:19:31 +080066#include <linux/hugetlb.h>
Colin Cross88c80042013-05-01 18:35:05 -070067#include <linux/freezer.h>
Davidlohr Buesoa52b89e2014-01-12 15:31:23 -080068#include <linux/bootmem.h>
Davidlohr Buesoab51fba2015-06-29 23:26:02 -070069#include <linux/fault-inject.h>
Pavel Emelyanovb4888932007-10-18 23:40:14 -070070
Jakub Jelinek4732efbe2005-09-06 15:16:25 -070071#include <asm/futex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
Peter Zijlstra1696a8b2013-10-31 18:18:19 +010073#include "locking/rtmutex_common.h"
Ingo Molnarc87e2832006-06-27 02:54:58 -070074
Thomas Gleixner99b60ce2014-01-12 15:31:24 -080075/*
Davidlohr Buesod7e8af12014-04-09 11:55:07 -070076 * READ this before attempting to hack on futexes!
77 *
78 * Basic futex operation and ordering guarantees
79 * =============================================
Thomas Gleixner99b60ce2014-01-12 15:31:24 -080080 *
81 * The waiter reads the futex value in user space and calls
82 * futex_wait(). This function computes the hash bucket and acquires
83 * the hash bucket lock. After that it reads the futex user space value
Davidlohr Buesob0c29f72014-01-12 15:31:25 -080084 * again and verifies that the data has not changed. If it has not changed
85 * it enqueues itself into the hash bucket, releases the hash bucket lock
86 * and schedules.
Thomas Gleixner99b60ce2014-01-12 15:31:24 -080087 *
88 * The waker side modifies the user space value of the futex and calls
Davidlohr Buesob0c29f72014-01-12 15:31:25 -080089 * futex_wake(). This function computes the hash bucket and acquires the
90 * hash bucket lock. Then it looks for waiters on that futex in the hash
91 * bucket and wakes them.
Thomas Gleixner99b60ce2014-01-12 15:31:24 -080092 *
Davidlohr Buesob0c29f72014-01-12 15:31:25 -080093 * In futex wake up scenarios where no tasks are blocked on a futex, taking
94 * the hb spinlock can be avoided and simply return. In order for this
95 * optimization to work, ordering guarantees must exist so that the waiter
96 * being added to the list is acknowledged when the list is concurrently being
97 * checked by the waker, avoiding scenarios like the following:
Thomas Gleixner99b60ce2014-01-12 15:31:24 -080098 *
99 * CPU 0 CPU 1
100 * val = *futex;
101 * sys_futex(WAIT, futex, val);
102 * futex_wait(futex, val);
103 * uval = *futex;
104 * *futex = newval;
105 * sys_futex(WAKE, futex);
106 * futex_wake(futex);
107 * if (queue_empty())
108 * return;
109 * if (uval == val)
110 * lock(hash_bucket(futex));
111 * queue();
112 * unlock(hash_bucket(futex));
113 * schedule();
114 *
115 * This would cause the waiter on CPU 0 to wait forever because it
116 * missed the transition of the user space value from val to newval
117 * and the waker did not find the waiter in the hash bucket queue.
Thomas Gleixner99b60ce2014-01-12 15:31:24 -0800118 *
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800119 * The correct serialization ensures that a waiter either observes
120 * the changed user space value before blocking or is woken by a
121 * concurrent waker:
122 *
123 * CPU 0 CPU 1
Thomas Gleixner99b60ce2014-01-12 15:31:24 -0800124 * val = *futex;
125 * sys_futex(WAIT, futex, val);
126 * futex_wait(futex, val);
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800127 *
Davidlohr Buesod7e8af12014-04-09 11:55:07 -0700128 * waiters++; (a)
Davidlohr Bueso8ad7b372016-02-09 11:15:13 -0800129 * smp_mb(); (A) <-- paired with -.
130 * |
131 * lock(hash_bucket(futex)); |
132 * |
133 * uval = *futex; |
134 * | *futex = newval;
135 * | sys_futex(WAKE, futex);
136 * | futex_wake(futex);
137 * |
138 * `--------> smp_mb(); (B)
Thomas Gleixner99b60ce2014-01-12 15:31:24 -0800139 * if (uval == val)
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800140 * queue();
Thomas Gleixner99b60ce2014-01-12 15:31:24 -0800141 * unlock(hash_bucket(futex));
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800142 * schedule(); if (waiters)
143 * lock(hash_bucket(futex));
Davidlohr Buesod7e8af12014-04-09 11:55:07 -0700144 * else wake_waiters(futex);
145 * waiters--; (b) unlock(hash_bucket(futex));
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800146 *
Davidlohr Buesod7e8af12014-04-09 11:55:07 -0700147 * Where (A) orders the waiters increment and the futex value read through
148 * atomic operations (see hb_waiters_inc) and where (B) orders the write
Davidlohr Bueso993b2ff2014-10-23 20:27:00 -0700149 * to futex and the waiters read -- this is done by the barriers for both
150 * shared and private futexes in get_futex_key_refs().
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800151 *
152 * This yields the following case (where X:=waiters, Y:=futex):
153 *
154 * X = Y = 0
155 *
156 * w[X]=1 w[Y]=1
157 * MB MB
158 * r[Y]=y r[X]=x
159 *
160 * Which guarantees that x==0 && y==0 is impossible; which translates back into
161 * the guarantee that we cannot both miss the futex variable change and the
162 * enqueue.
Davidlohr Buesod7e8af12014-04-09 11:55:07 -0700163 *
164 * Note that a new waiter is accounted for in (a) even when it is possible that
165 * the wait call can return error, in which case we backtrack from it in (b).
166 * Refer to the comment in queue_lock().
167 *
168 * Similarly, in order to account for waiters being requeued on another
169 * address we always increment the waiters for the destination bucket before
170 * acquiring the lock. It then decrements them again after releasing it -
171 * the code that actually moves the futex(es) between hash buckets (requeue_futex)
172 * will do the additional required waiter count housekeeping. This is done for
173 * double_lock_hb() and double_unlock_hb(), respectively.
Thomas Gleixner99b60ce2014-01-12 15:31:24 -0800174 */
175
Heiko Carstens03b8c7b2014-03-02 13:09:47 +0100176#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
Thomas Gleixnera0c1e902008-02-23 15:23:57 -0800177int __read_mostly futex_cmpxchg_enabled;
Heiko Carstens03b8c7b2014-03-02 13:09:47 +0100178#endif
Thomas Gleixnera0c1e902008-02-23 15:23:57 -0800179
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180/*
Darren Hartb41277d2010-11-08 13:10:09 -0800181 * Futex flags used to encode options to functions and preserve them across
182 * restarts.
183 */
Thomas Gleixner784bdf32016-07-29 16:32:30 +0200184#ifdef CONFIG_MMU
185# define FLAGS_SHARED 0x01
186#else
187/*
188 * NOMMU does not have per process address space. Let the compiler optimize
189 * code away.
190 */
191# define FLAGS_SHARED 0x00
192#endif
Darren Hartb41277d2010-11-08 13:10:09 -0800193#define FLAGS_CLOCKRT 0x02
194#define FLAGS_HAS_TIMEOUT 0x04
195
196/*
Ingo Molnarc87e2832006-06-27 02:54:58 -0700197 * Priority Inheritance state:
198 */
199struct futex_pi_state {
200 /*
201 * list of 'owned' pi_state instances - these have to be
202 * cleaned up in do_exit() if the task exits prematurely:
203 */
204 struct list_head list;
205
206 /*
207 * The PI object:
208 */
209 struct rt_mutex pi_mutex;
210
211 struct task_struct *owner;
212 atomic_t refcount;
213
214 union futex_key key;
Kees Cook3859a272016-10-28 01:22:25 -0700215} __randomize_layout;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700216
Darren Hartd8d88fb2009-09-21 22:30:30 -0700217/**
218 * struct futex_q - The hashed futex queue entry, one per waiting task
Randy Dunlapfb62db22010-10-13 11:02:34 -0700219 * @list: priority-sorted list of tasks waiting on this futex
Darren Hartd8d88fb2009-09-21 22:30:30 -0700220 * @task: the task waiting on the futex
221 * @lock_ptr: the hash bucket lock
222 * @key: the key the futex is hashed on
223 * @pi_state: optional priority inheritance state
224 * @rt_waiter: rt_waiter storage for use with requeue_pi
225 * @requeue_pi_key: the requeue_pi target futex key
226 * @bitset: bitset for the optional bitmasked wakeup
227 *
Ingo Molnarac6424b2017-06-20 12:06:13 +0200228 * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 * we can wake only the relevant ones (hashed queues may be shared).
230 *
231 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
Pierre Peifferec92d082007-05-09 02:35:00 -0700232 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
Randy Dunlapfb62db22010-10-13 11:02:34 -0700233 * The order of wakeup is always to make the first condition true, then
Darren Hartd8d88fb2009-09-21 22:30:30 -0700234 * the second.
235 *
236 * PI futexes are typically woken before they are removed from the hash list via
237 * the rt_mutex code. See unqueue_me_pi().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 */
239struct futex_q {
Pierre Peifferec92d082007-05-09 02:35:00 -0700240 struct plist_node list;
Darren Hartd8d88fb2009-09-21 22:30:30 -0700241
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +0200242 struct task_struct *task;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 spinlock_t *lock_ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 union futex_key key;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700245 struct futex_pi_state *pi_state;
Darren Hart52400ba2009-04-03 13:40:49 -0700246 struct rt_mutex_waiter *rt_waiter;
Darren Hart84bc4af2009-08-13 17:36:53 -0700247 union futex_key *requeue_pi_key;
Thomas Gleixnercd689982008-02-01 17:45:14 +0100248 u32 bitset;
Kees Cook3859a272016-10-28 01:22:25 -0700249} __randomize_layout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250
Darren Hart5bdb05f2010-11-08 13:40:28 -0800251static const struct futex_q futex_q_init = {
252 /* list gets initialized in queue_me()*/
253 .key = FUTEX_KEY_INIT,
254 .bitset = FUTEX_BITSET_MATCH_ANY
255};
256
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257/*
Darren Hartb2d09942009-03-12 00:55:37 -0700258 * Hash buckets are shared by all the futex_keys that hash to the same
259 * location. Each key may have multiple futex_q structures, one for each task
260 * waiting on a futex.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 */
262struct futex_hash_bucket {
Linus Torvalds11d46162014-03-20 22:11:17 -0700263 atomic_t waiters;
Pierre Peifferec92d082007-05-09 02:35:00 -0700264 spinlock_t lock;
265 struct plist_head chain;
Davidlohr Buesoa52b89e2014-01-12 15:31:23 -0800266} ____cacheline_aligned_in_smp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267
Rasmus Villemoesac742d32015-09-09 23:36:40 +0200268/*
269 * The base of the bucket array and its size are always used together
270 * (after initialization only in hash_futex()), so ensure that they
271 * reside in the same cacheline.
272 */
273static struct {
274 struct futex_hash_bucket *queues;
275 unsigned long hashsize;
276} __futex_data __read_mostly __aligned(2*sizeof(long));
277#define futex_queues (__futex_data.queues)
278#define futex_hashsize (__futex_data.hashsize)
Davidlohr Buesoa52b89e2014-01-12 15:31:23 -0800279
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280
Davidlohr Buesoab51fba2015-06-29 23:26:02 -0700281/*
282 * Fault injections for futexes.
283 */
284#ifdef CONFIG_FAIL_FUTEX
285
286static struct {
287 struct fault_attr attr;
288
Viresh Kumar621a5f72015-09-26 15:04:07 -0700289 bool ignore_private;
Davidlohr Buesoab51fba2015-06-29 23:26:02 -0700290} fail_futex = {
291 .attr = FAULT_ATTR_INITIALIZER,
Viresh Kumar621a5f72015-09-26 15:04:07 -0700292 .ignore_private = false,
Davidlohr Buesoab51fba2015-06-29 23:26:02 -0700293};
294
295static int __init setup_fail_futex(char *str)
296{
297 return setup_fault_attr(&fail_futex.attr, str);
298}
299__setup("fail_futex=", setup_fail_futex);
300
kbuild test robot5d285a72015-07-21 01:40:45 +0800301static bool should_fail_futex(bool fshared)
Davidlohr Buesoab51fba2015-06-29 23:26:02 -0700302{
303 if (fail_futex.ignore_private && !fshared)
304 return false;
305
306 return should_fail(&fail_futex.attr, 1);
307}
308
309#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
310
311static int __init fail_futex_debugfs(void)
312{
313 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
314 struct dentry *dir;
315
316 dir = fault_create_debugfs_attr("fail_futex", NULL,
317 &fail_futex.attr);
318 if (IS_ERR(dir))
319 return PTR_ERR(dir);
320
321 if (!debugfs_create_bool("ignore-private", mode, dir,
322 &fail_futex.ignore_private)) {
323 debugfs_remove_recursive(dir);
324 return -ENOMEM;
325 }
326
327 return 0;
328}
329
330late_initcall(fail_futex_debugfs);
331
332#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
333
334#else
335static inline bool should_fail_futex(bool fshared)
336{
337 return false;
338}
339#endif /* CONFIG_FAIL_FUTEX */
340
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800341static inline void futex_get_mm(union futex_key *key)
342{
Vegard Nossumf1f10072017-02-27 14:30:07 -0800343 mmgrab(key->private.mm);
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800344 /*
345 * Ensure futex_get_mm() implies a full barrier such that
346 * get_futex_key() implies a full barrier. This is relied upon
Davidlohr Bueso8ad7b372016-02-09 11:15:13 -0800347 * as smp_mb(); (B), see the ordering comment above.
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800348 */
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100349 smp_mb__after_atomic();
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800350}
351
Linus Torvalds11d46162014-03-20 22:11:17 -0700352/*
353 * Reflects a new waiter being added to the waitqueue.
354 */
355static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800356{
357#ifdef CONFIG_SMP
Linus Torvalds11d46162014-03-20 22:11:17 -0700358 atomic_inc(&hb->waiters);
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800359 /*
Linus Torvalds11d46162014-03-20 22:11:17 -0700360 * Full barrier (A), see the ordering comment above.
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800361 */
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100362 smp_mb__after_atomic();
Linus Torvalds11d46162014-03-20 22:11:17 -0700363#endif
364}
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800365
Linus Torvalds11d46162014-03-20 22:11:17 -0700366/*
367 * Reflects a waiter being removed from the waitqueue by wakeup
368 * paths.
369 */
370static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
371{
372#ifdef CONFIG_SMP
373 atomic_dec(&hb->waiters);
374#endif
375}
376
377static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
378{
379#ifdef CONFIG_SMP
380 return atomic_read(&hb->waiters);
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800381#else
Linus Torvalds11d46162014-03-20 22:11:17 -0700382 return 1;
Davidlohr Buesob0c29f72014-01-12 15:31:25 -0800383#endif
384}
385
Thomas Gleixnere8b61b32016-06-01 10:43:29 +0200386/**
387 * hash_futex - Return the hash bucket in the global hash
388 * @key: Pointer to the futex key for which the hash is calculated
389 *
390 * We hash on the keys returned from get_futex_key (see below) and return the
391 * corresponding hash bucket in the global hash.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 */
393static struct futex_hash_bucket *hash_futex(union futex_key *key)
394{
395 u32 hash = jhash2((u32*)&key->both.word,
396 (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
397 key->both.offset);
Davidlohr Buesoa52b89e2014-01-12 15:31:23 -0800398 return &futex_queues[hash & (futex_hashsize - 1)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399}
400
Thomas Gleixnere8b61b32016-06-01 10:43:29 +0200401
402/**
403 * match_futex - Check whether two futex keys are equal
404 * @key1: Pointer to key1
405 * @key2: Pointer to key2
406 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 * Return 1 if two futex_keys are equal, 0 otherwise.
408 */
409static inline int match_futex(union futex_key *key1, union futex_key *key2)
410{
Darren Hart2bc87202009-10-14 10:12:39 -0700411 return (key1 && key2
412 && key1->both.word == key2->both.word
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 && key1->both.ptr == key2->both.ptr
414 && key1->both.offset == key2->both.offset);
415}
416
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200417/*
418 * Take a reference to the resource addressed by a key.
419 * Can be called while holding spinlocks.
420 *
421 */
422static void get_futex_key_refs(union futex_key *key)
423{
424 if (!key->both.ptr)
425 return;
426
Thomas Gleixner784bdf32016-07-29 16:32:30 +0200427 /*
428 * On MMU less systems futexes are always "private" as there is no per
429 * process address space. We need the smp wmb nevertheless - yes,
430 * arch/blackfin has MMU less SMP ...
431 */
432 if (!IS_ENABLED(CONFIG_MMU)) {
433 smp_mb(); /* explicit smp_mb(); (B) */
434 return;
435 }
436
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200437 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
438 case FUT_OFF_INODE:
Davidlohr Bueso8ad7b372016-02-09 11:15:13 -0800439 ihold(key->shared.inode); /* implies smp_mb(); (B) */
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200440 break;
441 case FUT_OFF_MMSHARED:
Davidlohr Bueso8ad7b372016-02-09 11:15:13 -0800442 futex_get_mm(key); /* implies smp_mb(); (B) */
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200443 break;
Catalin Marinas76835b0e2014-10-17 17:38:49 +0100444 default:
Davidlohr Bueso993b2ff2014-10-23 20:27:00 -0700445 /*
446 * Private futexes do not hold reference on an inode or
447 * mm, therefore the only purpose of calling get_futex_key_refs
448 * is because we need the barrier for the lockless waiter check.
449 */
Davidlohr Bueso8ad7b372016-02-09 11:15:13 -0800450 smp_mb(); /* explicit smp_mb(); (B) */
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200451 }
452}
453
454/*
455 * Drop a reference to the resource addressed by a key.
Davidlohr Bueso993b2ff2014-10-23 20:27:00 -0700456 * The hash bucket spinlock must not be held. This is
457 * a no-op for private futexes, see comment in the get
458 * counterpart.
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200459 */
460static void drop_futex_key_refs(union futex_key *key)
461{
Darren Hart90621c42008-12-29 19:43:21 -0800462 if (!key->both.ptr) {
463 /* If we're here then we tried to put a key we failed to get */
464 WARN_ON_ONCE(1);
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200465 return;
Darren Hart90621c42008-12-29 19:43:21 -0800466 }
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200467
Thomas Gleixner784bdf32016-07-29 16:32:30 +0200468 if (!IS_ENABLED(CONFIG_MMU))
469 return;
470
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200471 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
472 case FUT_OFF_INODE:
473 iput(key->shared.inode);
474 break;
475 case FUT_OFF_MMSHARED:
476 mmdrop(key->private.mm);
477 break;
478 }
479}
480
Eric Dumazet34f01cc2007-05-09 02:35:04 -0700481/**
Darren Hartd96ee562009-09-21 22:30:22 -0700482 * get_futex_key() - Get parameters which are the keys for a futex
483 * @uaddr: virtual address of the futex
484 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
485 * @key: address where result is stored.
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500486 * @rw: mapping needs to be read/write (values: VERIFY_READ,
487 * VERIFY_WRITE)
Eric Dumazet34f01cc2007-05-09 02:35:04 -0700488 *
Randy Dunlap6c23cbb2013-03-05 10:00:24 -0800489 * Return: a negative error code or 0
490 *
Mauro Carvalho Chehab7b4ff1a2017-05-11 10:17:45 -0300491 * The key words are stored in @key on success.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 *
Al Viro6131ffa2013-02-27 16:59:05 -0500493 * For shared mappings, it's (page->index, file_inode(vma->vm_file),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 * offset_within_page). For private mappings, it's (uaddr, current->mm).
495 * We can usually work out the index without swapping in the page.
496 *
Darren Hartb2d09942009-03-12 00:55:37 -0700497 * lock_page() might sleep, the caller should not hold a spinlock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 */
Thomas Gleixner64d13042009-05-18 21:20:10 +0200499static int
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500500get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501{
Ingo Molnare2970f22006-06-27 02:54:47 -0700502 unsigned long address = (unsigned long)uaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 struct mm_struct *mm = current->mm;
Mel Gorman077fa7a2016-06-08 14:25:22 +0100504 struct page *page, *tail;
Kirill A. Shutemov14d27ab2016-01-15 16:53:00 -0800505 struct address_space *mapping;
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500506 int err, ro = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507
508 /*
509 * The futex address must be "naturally" aligned.
510 */
Ingo Molnare2970f22006-06-27 02:54:47 -0700511 key->both.offset = address % PAGE_SIZE;
Eric Dumazet34f01cc2007-05-09 02:35:04 -0700512 if (unlikely((address % sizeof(u32)) != 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 return -EINVAL;
Ingo Molnare2970f22006-06-27 02:54:47 -0700514 address -= key->both.offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515
Linus Torvalds5cdec2d2013-12-12 09:53:51 -0800516 if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
517 return -EFAULT;
518
Davidlohr Buesoab51fba2015-06-29 23:26:02 -0700519 if (unlikely(should_fail_futex(fshared)))
520 return -EFAULT;
521
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 /*
Eric Dumazet34f01cc2007-05-09 02:35:04 -0700523 * PROCESS_PRIVATE futexes are fast.
524 * As the mm cannot disappear under us and the 'key' only needs
525 * virtual address, we dont even have to find the underlying vma.
526 * Note : We do have to check 'uaddr' is a valid user address,
527 * but access_ok() should be faster than find_vma()
528 */
529 if (!fshared) {
Eric Dumazet34f01cc2007-05-09 02:35:04 -0700530 key->private.mm = mm;
531 key->private.address = address;
Davidlohr Bueso8ad7b372016-02-09 11:15:13 -0800532 get_futex_key_refs(key); /* implies smp_mb(); (B) */
Eric Dumazet34f01cc2007-05-09 02:35:04 -0700533 return 0;
534 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200536again:
Davidlohr Buesoab51fba2015-06-29 23:26:02 -0700537 /* Ignore any VERIFY_READ mapping (futex common case) */
538 if (unlikely(should_fail_futex(fshared)))
539 return -EFAULT;
540
KOSAKI Motohiro7485d0d2010-01-05 16:32:43 +0900541 err = get_user_pages_fast(address, 1, 1, &page);
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500542 /*
543 * If write access is not required (eg. FUTEX_WAIT), try
544 * and get read-only access.
545 */
546 if (err == -EFAULT && rw == VERIFY_READ) {
547 err = get_user_pages_fast(address, 1, 0, &page);
548 ro = 1;
549 }
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200550 if (err < 0)
551 return err;
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500552 else
553 err = 0;
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200554
Mel Gorman65d8fc72016-02-09 11:15:14 -0800555 /*
556 * The treatment of mapping from this point on is critical. The page
557 * lock protects many things but in this context the page lock
558 * stabilizes mapping, prevents inode freeing in the shared
559 * file-backed region case and guards against movement to swap cache.
560 *
561 * Strictly speaking the page lock is not needed in all cases being
562 * considered here and page lock forces unnecessarily serialization
563 * From this point on, mapping will be re-verified if necessary and
564 * page lock will be acquired only if it is unavoidable
Mel Gorman077fa7a2016-06-08 14:25:22 +0100565 *
566 * Mapping checks require the head page for any compound page so the
567 * head page and mapping is looked up now. For anonymous pages, it
568 * does not matter if the page splits in the future as the key is
569 * based on the address. For filesystem-backed pages, the tail is
570 * required as the index of the page determines the key. For
571 * base pages, there is no tail page and tail == page.
Mel Gorman65d8fc72016-02-09 11:15:14 -0800572 */
Mel Gorman077fa7a2016-06-08 14:25:22 +0100573 tail = page;
Mel Gorman65d8fc72016-02-09 11:15:14 -0800574 page = compound_head(page);
575 mapping = READ_ONCE(page->mapping);
576
Hugh Dickinse6780f72011-12-31 11:44:01 -0800577 /*
Kirill A. Shutemov14d27ab2016-01-15 16:53:00 -0800578 * If page->mapping is NULL, then it cannot be a PageAnon
Hugh Dickinse6780f72011-12-31 11:44:01 -0800579 * page; but it might be the ZERO_PAGE or in the gate area or
580 * in a special mapping (all cases which we are happy to fail);
581 * or it may have been a good file page when get_user_pages_fast
582 * found it, but truncated or holepunched or subjected to
583 * invalidate_complete_page2 before we got the page lock (also
584 * cases which we are happy to fail). And we hold a reference,
585 * so refcount care in invalidate_complete_page's remove_mapping
586 * prevents drop_caches from setting mapping to NULL beneath us.
587 *
588 * The case we do have to guard against is when memory pressure made
589 * shmem_writepage move it from filecache to swapcache beneath us:
Kirill A. Shutemov14d27ab2016-01-15 16:53:00 -0800590 * an unlikely race, but we do need to retry for page->mapping.
Hugh Dickinse6780f72011-12-31 11:44:01 -0800591 */
Mel Gorman65d8fc72016-02-09 11:15:14 -0800592 if (unlikely(!mapping)) {
593 int shmem_swizzled;
594
595 /*
596 * Page lock is required to identify which special case above
597 * applies. If this is really a shmem page then the page lock
598 * will prevent unexpected transitions.
599 */
600 lock_page(page);
601 shmem_swizzled = PageSwapCache(page) || page->mapping;
Kirill A. Shutemov14d27ab2016-01-15 16:53:00 -0800602 unlock_page(page);
603 put_page(page);
Mel Gorman65d8fc72016-02-09 11:15:14 -0800604
Hugh Dickinse6780f72011-12-31 11:44:01 -0800605 if (shmem_swizzled)
606 goto again;
Mel Gorman65d8fc72016-02-09 11:15:14 -0800607
Hugh Dickinse6780f72011-12-31 11:44:01 -0800608 return -EFAULT;
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200609 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610
611 /*
612 * Private mappings are handled in a simple way.
613 *
Mel Gorman65d8fc72016-02-09 11:15:14 -0800614 * If the futex key is stored on an anonymous page, then the associated
615 * object is the mm which is implicitly pinned by the calling process.
616 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
618 * it's a read-only handle, it's expected that futexes attach to
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200619 * the object not the particular process.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 */
Kirill A. Shutemov14d27ab2016-01-15 16:53:00 -0800621 if (PageAnon(page)) {
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500622 /*
623 * A RO anonymous page will never change and thus doesn't make
624 * sense for futex operations.
625 */
Davidlohr Buesoab51fba2015-06-29 23:26:02 -0700626 if (unlikely(should_fail_futex(fshared)) || ro) {
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500627 err = -EFAULT;
628 goto out;
629 }
630
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200631 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 key->private.mm = mm;
Ingo Molnare2970f22006-06-27 02:54:47 -0700633 key->private.address = address;
Mel Gorman65d8fc72016-02-09 11:15:14 -0800634
635 get_futex_key_refs(key); /* implies smp_mb(); (B) */
636
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200637 } else {
Mel Gorman65d8fc72016-02-09 11:15:14 -0800638 struct inode *inode;
639
640 /*
641 * The associated futex object in this case is the inode and
642 * the page->mapping must be traversed. Ordinarily this should
643 * be stabilised under page lock but it's not strictly
644 * necessary in this case as we just want to pin the inode, not
645 * update the radix tree or anything like that.
646 *
647 * The RCU read lock is taken as the inode is finally freed
648 * under RCU. If the mapping still matches expectations then the
649 * mapping->host can be safely accessed as being a valid inode.
650 */
651 rcu_read_lock();
652
653 if (READ_ONCE(page->mapping) != mapping) {
654 rcu_read_unlock();
655 put_page(page);
656
657 goto again;
658 }
659
660 inode = READ_ONCE(mapping->host);
661 if (!inode) {
662 rcu_read_unlock();
663 put_page(page);
664
665 goto again;
666 }
667
668 /*
669 * Take a reference unless it is about to be freed. Previously
670 * this reference was taken by ihold under the page lock
671 * pinning the inode in place so i_lock was unnecessary. The
672 * only way for this check to fail is if the inode was
673 * truncated in parallel so warn for now if this happens.
674 *
675 * We are not calling into get_futex_key_refs() in file-backed
676 * cases, therefore a successful atomic_inc return below will
677 * guarantee that get_futex_key() will still imply smp_mb(); (B).
678 */
679 if (WARN_ON_ONCE(!atomic_inc_not_zero(&inode->i_count))) {
680 rcu_read_unlock();
681 put_page(page);
682
683 goto again;
684 }
685
686 /* Should be impossible but lets be paranoid for now */
687 if (WARN_ON_ONCE(inode->i_mapping != mapping)) {
688 err = -EFAULT;
689 rcu_read_unlock();
690 iput(inode);
691
692 goto out;
693 }
694
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200695 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
Mel Gorman65d8fc72016-02-09 11:15:14 -0800696 key->shared.inode = inode;
Mel Gorman077fa7a2016-06-08 14:25:22 +0100697 key->shared.pgoff = basepage_index(tail);
Mel Gorman65d8fc72016-02-09 11:15:14 -0800698 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 }
700
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500701out:
Kirill A. Shutemov14d27ab2016-01-15 16:53:00 -0800702 put_page(page);
Shawn Bohrer9ea71502011-06-30 11:21:32 -0500703 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704}
705
Thomas Gleixnerae791a22010-11-10 13:30:36 +0100706static inline void put_futex_key(union futex_key *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707{
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200708 drop_futex_key_refs(key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709}
710
Darren Hartd96ee562009-09-21 22:30:22 -0700711/**
712 * fault_in_user_writeable() - Fault in user address and verify RW access
Thomas Gleixnerd0725992009-06-11 23:15:43 +0200713 * @uaddr: pointer to faulting user space address
714 *
715 * Slow path to fixup the fault we just took in the atomic write
716 * access to @uaddr.
717 *
Randy Dunlapfb62db22010-10-13 11:02:34 -0700718 * We have no generic implementation of a non-destructive write to the
Thomas Gleixnerd0725992009-06-11 23:15:43 +0200719 * user address. We know that we faulted in the atomic pagefault
720 * disabled section so we can as well avoid the #PF overhead by
721 * calling get_user_pages() right away.
722 */
723static int fault_in_user_writeable(u32 __user *uaddr)
724{
Andi Kleen722d0172009-12-08 13:19:42 +0100725 struct mm_struct *mm = current->mm;
726 int ret;
727
728 down_read(&mm->mmap_sem);
Benjamin Herrenschmidt2efaca92011-07-25 17:12:32 -0700729 ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
Dominik Dingel4a9e1cd2016-01-15 16:57:04 -0800730 FAULT_FLAG_WRITE, NULL);
Andi Kleen722d0172009-12-08 13:19:42 +0100731 up_read(&mm->mmap_sem);
732
Thomas Gleixnerd0725992009-06-11 23:15:43 +0200733 return ret < 0 ? ret : 0;
734}
735
Darren Hart4b1c4862009-04-03 13:39:42 -0700736/**
737 * futex_top_waiter() - Return the highest priority waiter on a futex
Darren Hartd96ee562009-09-21 22:30:22 -0700738 * @hb: the hash bucket the futex_q's reside in
739 * @key: the futex key (to distinguish it from other futex futex_q's)
Darren Hart4b1c4862009-04-03 13:39:42 -0700740 *
741 * Must be called with the hb lock held.
742 */
743static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
744 union futex_key *key)
745{
746 struct futex_q *this;
747
748 plist_for_each_entry(this, &hb->chain, list) {
749 if (match_futex(&this->key, key))
750 return this;
751 }
752 return NULL;
753}
754
Michel Lespinasse37a9d912011-03-10 18:48:51 -0800755static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
756 u32 uval, u32 newval)
Thomas Gleixner36cf3b52007-07-15 23:41:20 -0700757{
Michel Lespinasse37a9d912011-03-10 18:48:51 -0800758 int ret;
Thomas Gleixner36cf3b52007-07-15 23:41:20 -0700759
760 pagefault_disable();
Michel Lespinasse37a9d912011-03-10 18:48:51 -0800761 ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
Thomas Gleixner36cf3b52007-07-15 23:41:20 -0700762 pagefault_enable();
763
Michel Lespinasse37a9d912011-03-10 18:48:51 -0800764 return ret;
Thomas Gleixner36cf3b52007-07-15 23:41:20 -0700765}
766
767static int get_futex_value_locked(u32 *dest, u32 __user *from)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768{
769 int ret;
770
Peter Zijlstraa8663742006-12-06 20:32:20 -0800771 pagefault_disable();
Linus Torvaldsbd28b142016-05-22 17:21:27 -0700772 ret = __get_user(*dest, from);
Peter Zijlstraa8663742006-12-06 20:32:20 -0800773 pagefault_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774
775 return ret ? -EFAULT : 0;
776}
777
Ingo Molnarc87e2832006-06-27 02:54:58 -0700778
779/*
780 * PI code:
781 */
782static int refill_pi_state_cache(void)
783{
784 struct futex_pi_state *pi_state;
785
786 if (likely(current->pi_state_cache))
787 return 0;
788
Burman Yan4668edc2006-12-06 20:38:51 -0800789 pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700790
791 if (!pi_state)
792 return -ENOMEM;
793
Ingo Molnarc87e2832006-06-27 02:54:58 -0700794 INIT_LIST_HEAD(&pi_state->list);
795 /* pi_mutex gets initialized later */
796 pi_state->owner = NULL;
797 atomic_set(&pi_state->refcount, 1);
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200798 pi_state->key = FUTEX_KEY_INIT;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700799
800 current->pi_state_cache = pi_state;
801
802 return 0;
803}
804
Peter Zijlstrabf92cf32017-03-22 11:35:53 +0100805static struct futex_pi_state *alloc_pi_state(void)
Ingo Molnarc87e2832006-06-27 02:54:58 -0700806{
807 struct futex_pi_state *pi_state = current->pi_state_cache;
808
809 WARN_ON(!pi_state);
810 current->pi_state_cache = NULL;
811
812 return pi_state;
813}
814
Peter Zijlstrabf92cf32017-03-22 11:35:53 +0100815static void get_pi_state(struct futex_pi_state *pi_state)
816{
817 WARN_ON_ONCE(!atomic_inc_not_zero(&pi_state->refcount));
818}
819
Brian Silverman30a6b802014-10-25 20:20:37 -0400820/*
Thomas Gleixner29e9ee52015-12-19 20:07:39 +0000821 * Drops a reference to the pi_state object and frees or caches it
822 * when the last reference is gone.
823 *
Brian Silverman30a6b802014-10-25 20:20:37 -0400824 * Must be called with the hb lock held.
825 */
Thomas Gleixner29e9ee52015-12-19 20:07:39 +0000826static void put_pi_state(struct futex_pi_state *pi_state)
Ingo Molnarc87e2832006-06-27 02:54:58 -0700827{
Brian Silverman30a6b802014-10-25 20:20:37 -0400828 if (!pi_state)
829 return;
830
Ingo Molnarc87e2832006-06-27 02:54:58 -0700831 if (!atomic_dec_and_test(&pi_state->refcount))
832 return;
833
834 /*
835 * If pi_state->owner is NULL, the owner is most probably dying
836 * and has cleaned up the pi_state already
837 */
838 if (pi_state->owner) {
Thomas Gleixner1d615482009-11-17 14:54:03 +0100839 raw_spin_lock_irq(&pi_state->owner->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700840 list_del_init(&pi_state->list);
Thomas Gleixner1d615482009-11-17 14:54:03 +0100841 raw_spin_unlock_irq(&pi_state->owner->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700842
843 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
844 }
845
846 if (current->pi_state_cache)
847 kfree(pi_state);
848 else {
849 /*
850 * pi_state->list is already empty.
851 * clear pi_state->owner.
852 * refcount is at 0 - put it back to 1.
853 */
854 pi_state->owner = NULL;
855 atomic_set(&pi_state->refcount, 1);
856 current->pi_state_cache = pi_state;
857 }
858}
859
860/*
861 * Look up the task based on what TID userspace gave us.
862 * We dont trust it.
863 */
Peter Zijlstrabf92cf32017-03-22 11:35:53 +0100864static struct task_struct *futex_find_get_task(pid_t pid)
Ingo Molnarc87e2832006-06-27 02:54:58 -0700865{
866 struct task_struct *p;
867
Oleg Nesterovd359b542006-09-29 02:00:55 -0700868 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -0700869 p = find_task_by_vpid(pid);
Michal Hocko7a0ea092010-06-30 09:51:19 +0200870 if (p)
871 get_task_struct(p);
Thomas Gleixnera06381f2007-06-23 11:48:40 +0200872
Oleg Nesterovd359b542006-09-29 02:00:55 -0700873 rcu_read_unlock();
Ingo Molnarc87e2832006-06-27 02:54:58 -0700874
875 return p;
876}
877
Nicolas Pitrebc2eecd2017-08-01 00:31:32 -0400878#ifdef CONFIG_FUTEX_PI
879
Ingo Molnarc87e2832006-06-27 02:54:58 -0700880/*
881 * This task is holding PI mutexes at exit time => bad.
882 * Kernel cleans up PI-state, but userspace is likely hosed.
883 * (Robust-futex cleanup is separate and might save the day for userspace.)
884 */
885void exit_pi_state_list(struct task_struct *curr)
886{
Ingo Molnarc87e2832006-06-27 02:54:58 -0700887 struct list_head *next, *head = &curr->pi_state_list;
888 struct futex_pi_state *pi_state;
Ingo Molnar627371d2006-07-29 05:16:20 +0200889 struct futex_hash_bucket *hb;
Peter Zijlstra38d47c12008-09-26 19:32:20 +0200890 union futex_key key = FUTEX_KEY_INIT;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700891
Thomas Gleixnera0c1e902008-02-23 15:23:57 -0800892 if (!futex_cmpxchg_enabled)
893 return;
Ingo Molnarc87e2832006-06-27 02:54:58 -0700894 /*
895 * We are a ZOMBIE and nobody can enqueue itself on
896 * pi_state_list anymore, but we have to be careful
Ingo Molnar627371d2006-07-29 05:16:20 +0200897 * versus waiters unqueueing themselves:
Ingo Molnarc87e2832006-06-27 02:54:58 -0700898 */
Thomas Gleixner1d615482009-11-17 14:54:03 +0100899 raw_spin_lock_irq(&curr->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700900 while (!list_empty(head)) {
901
902 next = head->next;
903 pi_state = list_entry(next, struct futex_pi_state, list);
904 key = pi_state->key;
Ingo Molnar627371d2006-07-29 05:16:20 +0200905 hb = hash_futex(&key);
Thomas Gleixner1d615482009-11-17 14:54:03 +0100906 raw_spin_unlock_irq(&curr->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700907
Ingo Molnarc87e2832006-06-27 02:54:58 -0700908 spin_lock(&hb->lock);
909
Thomas Gleixner1d615482009-11-17 14:54:03 +0100910 raw_spin_lock_irq(&curr->pi_lock);
Ingo Molnar627371d2006-07-29 05:16:20 +0200911 /*
912 * We dropped the pi-lock, so re-check whether this
913 * task still owns the PI-state:
914 */
Ingo Molnarc87e2832006-06-27 02:54:58 -0700915 if (head->next != next) {
916 spin_unlock(&hb->lock);
917 continue;
918 }
919
Ingo Molnarc87e2832006-06-27 02:54:58 -0700920 WARN_ON(pi_state->owner != curr);
Ingo Molnar627371d2006-07-29 05:16:20 +0200921 WARN_ON(list_empty(&pi_state->list));
922 list_del_init(&pi_state->list);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700923 pi_state->owner = NULL;
Thomas Gleixner1d615482009-11-17 14:54:03 +0100924 raw_spin_unlock_irq(&curr->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700925
Peter Zijlstra16ffa122017-03-22 11:35:55 +0100926 get_pi_state(pi_state);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700927 spin_unlock(&hb->lock);
928
Peter Zijlstra16ffa122017-03-22 11:35:55 +0100929 rt_mutex_futex_unlock(&pi_state->pi_mutex);
930 put_pi_state(pi_state);
931
Thomas Gleixner1d615482009-11-17 14:54:03 +0100932 raw_spin_lock_irq(&curr->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700933 }
Thomas Gleixner1d615482009-11-17 14:54:03 +0100934 raw_spin_unlock_irq(&curr->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -0700935}
936
Nicolas Pitrebc2eecd2017-08-01 00:31:32 -0400937#endif
938
Thomas Gleixner54a21782014-06-03 12:27:08 +0000939/*
940 * We need to check the following states:
941 *
942 * Waiter | pi_state | pi->owner | uTID | uODIED | ?
943 *
944 * [1] NULL | --- | --- | 0 | 0/1 | Valid
945 * [2] NULL | --- | --- | >0 | 0/1 | Valid
946 *
947 * [3] Found | NULL | -- | Any | 0/1 | Invalid
948 *
949 * [4] Found | Found | NULL | 0 | 1 | Valid
950 * [5] Found | Found | NULL | >0 | 1 | Invalid
951 *
952 * [6] Found | Found | task | 0 | 1 | Valid
953 *
954 * [7] Found | Found | NULL | Any | 0 | Invalid
955 *
956 * [8] Found | Found | task | ==taskTID | 0/1 | Valid
957 * [9] Found | Found | task | 0 | 0 | Invalid
958 * [10] Found | Found | task | !=taskTID | 0/1 | Invalid
959 *
960 * [1] Indicates that the kernel can acquire the futex atomically. We
961 * came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit.
962 *
963 * [2] Valid, if TID does not belong to a kernel thread. If no matching
964 * thread is found then it indicates that the owner TID has died.
965 *
966 * [3] Invalid. The waiter is queued on a non PI futex
967 *
968 * [4] Valid state after exit_robust_list(), which sets the user space
969 * value to FUTEX_WAITERS | FUTEX_OWNER_DIED.
970 *
971 * [5] The user space value got manipulated between exit_robust_list()
972 * and exit_pi_state_list()
973 *
974 * [6] Valid state after exit_pi_state_list() which sets the new owner in
975 * the pi_state but cannot access the user space value.
976 *
977 * [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set.
978 *
979 * [8] Owner and user space value match
980 *
981 * [9] There is no transient state which sets the user space TID to 0
982 * except exit_robust_list(), but this is indicated by the
983 * FUTEX_OWNER_DIED bit. See [4]
984 *
985 * [10] There is no transient state which leaves owner and user space
986 * TID out of sync.
Peter Zijlstra734009e2017-03-22 11:35:52 +0100987 *
988 *
989 * Serialization and lifetime rules:
990 *
991 * hb->lock:
992 *
993 * hb -> futex_q, relation
994 * futex_q -> pi_state, relation
995 *
996 * (cannot be raw because hb can contain arbitrary amount
997 * of futex_q's)
998 *
999 * pi_mutex->wait_lock:
1000 *
1001 * {uval, pi_state}
1002 *
1003 * (and pi_mutex 'obviously')
1004 *
1005 * p->pi_lock:
1006 *
1007 * p->pi_state_list -> pi_state->list, relation
1008 *
1009 * pi_state->refcount:
1010 *
1011 * pi_state lifetime
1012 *
1013 *
1014 * Lock order:
1015 *
1016 * hb->lock
1017 * pi_mutex->wait_lock
1018 * p->pi_lock
1019 *
Thomas Gleixner54a21782014-06-03 12:27:08 +00001020 */
Thomas Gleixnere60cbc52014-06-11 20:45:39 +00001021
1022/*
1023 * Validate that the existing waiter has a pi_state and sanity check
1024 * the pi_state against the user space value. If correct, attach to
1025 * it.
1026 */
Peter Zijlstra734009e2017-03-22 11:35:52 +01001027static int attach_to_pi_state(u32 __user *uaddr, u32 uval,
1028 struct futex_pi_state *pi_state,
Thomas Gleixnere60cbc52014-06-11 20:45:39 +00001029 struct futex_pi_state **ps)
1030{
1031 pid_t pid = uval & FUTEX_TID_MASK;
Peter Zijlstra94ffac52017-04-07 09:04:07 +02001032 u32 uval2;
1033 int ret;
Thomas Gleixnere60cbc52014-06-11 20:45:39 +00001034
1035 /*
1036 * Userspace might have messed up non-PI and PI futexes [3]
1037 */
1038 if (unlikely(!pi_state))
1039 return -EINVAL;
1040
Peter Zijlstra734009e2017-03-22 11:35:52 +01001041 /*
1042 * We get here with hb->lock held, and having found a
1043 * futex_top_waiter(). This means that futex_lock_pi() of said futex_q
1044 * has dropped the hb->lock in between queue_me() and unqueue_me_pi(),
1045 * which in turn means that futex_lock_pi() still has a reference on
1046 * our pi_state.
Peter Zijlstra16ffa122017-03-22 11:35:55 +01001047 *
1048 * The waiter holding a reference on @pi_state also protects against
1049 * the unlocked put_pi_state() in futex_unlock_pi(), futex_lock_pi()
1050 * and futex_wait_requeue_pi() as it cannot go to 0 and consequently
1051 * free pi_state before we can take a reference ourselves.
Peter Zijlstra734009e2017-03-22 11:35:52 +01001052 */
Thomas Gleixnere60cbc52014-06-11 20:45:39 +00001053 WARN_ON(!atomic_read(&pi_state->refcount));
1054
1055 /*
Peter Zijlstra734009e2017-03-22 11:35:52 +01001056 * Now that we have a pi_state, we can acquire wait_lock
1057 * and do the state validation.
1058 */
1059 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
1060
1061 /*
1062 * Since {uval, pi_state} is serialized by wait_lock, and our current
1063 * uval was read without holding it, it can have changed. Verify it
1064 * still is what we expect it to be, otherwise retry the entire
1065 * operation.
1066 */
1067 if (get_futex_value_locked(&uval2, uaddr))
1068 goto out_efault;
1069
1070 if (uval != uval2)
1071 goto out_eagain;
1072
1073 /*
Thomas Gleixnere60cbc52014-06-11 20:45:39 +00001074 * Handle the owner died case:
1075 */
1076 if (uval & FUTEX_OWNER_DIED) {
1077 /*
1078 * exit_pi_state_list sets owner to NULL and wakes the
1079 * topmost waiter. The task which acquires the
1080 * pi_state->rt_mutex will fixup owner.
1081 */
1082 if (!pi_state->owner) {
1083 /*
1084 * No pi state owner, but the user space TID
1085 * is not 0. Inconsistent state. [5]
1086 */
1087 if (pid)
Peter Zijlstra734009e2017-03-22 11:35:52 +01001088 goto out_einval;
Thomas Gleixnere60cbc52014-06-11 20:45:39 +00001089 /*
1090 * Take a ref on the state and return success. [4]
1091 */
Peter Zijlstra734009e2017-03-22 11:35:52 +01001092 goto out_attach;
Thomas Gleixnere60cbc52014-06-11 20:45:39 +00001093 }
1094
1095 /*
1096 * If TID is 0, then either the dying owner has not
1097 * yet executed exit_pi_state_list() or some waiter
1098 * acquired the rtmutex in the pi state, but did not
1099 * yet fixup the TID in user space.
1100 *
1101 * Take a ref on the state and return success. [6]
1102 */
1103 if (!pid)
Peter Zijlstra734009e2017-03-22 11:35:52 +01001104 goto out_attach;
Thomas Gleixnere60cbc52014-06-11 20:45:39 +00001105 } else {
1106 /*
1107 * If the owner died bit is not set, then the pi_state
1108 * must have an owner. [7]
1109 */
1110 if (!pi_state->owner)
Peter Zijlstra734009e2017-03-22 11:35:52 +01001111 goto out_einval;
Thomas Gleixnere60cbc52014-06-11 20:45:39 +00001112 }
1113
1114 /*
1115 * Bail out if user space manipulated the futex value. If pi
1116 * state exists then the owner TID must be the same as the
1117 * user space TID. [9/10]
1118 */
1119 if (pid != task_pid_vnr(pi_state->owner))
Peter Zijlstra734009e2017-03-22 11:35:52 +01001120 goto out_einval;
1121
1122out_attach:
Peter Zijlstrabf92cf32017-03-22 11:35:53 +01001123 get_pi_state(pi_state);
Peter Zijlstra734009e2017-03-22 11:35:52 +01001124 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
Thomas Gleixnere60cbc52014-06-11 20:45:39 +00001125 *ps = pi_state;
1126 return 0;
Peter Zijlstra734009e2017-03-22 11:35:52 +01001127
1128out_einval:
1129 ret = -EINVAL;
1130 goto out_error;
1131
1132out_eagain:
1133 ret = -EAGAIN;
1134 goto out_error;
1135
1136out_efault:
1137 ret = -EFAULT;
1138 goto out_error;
1139
1140out_error:
1141 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1142 return ret;
Thomas Gleixnere60cbc52014-06-11 20:45:39 +00001143}
1144
Thomas Gleixner04e1b2e2014-06-11 20:45:40 +00001145/*
1146 * Lookup the task for the TID provided from user space and attach to
1147 * it after doing proper sanity checks.
1148 */
1149static int attach_to_pi_owner(u32 uval, union futex_key *key,
1150 struct futex_pi_state **ps)
Ingo Molnarc87e2832006-06-27 02:54:58 -07001151{
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001152 pid_t pid = uval & FUTEX_TID_MASK;
Thomas Gleixner04e1b2e2014-06-11 20:45:40 +00001153 struct futex_pi_state *pi_state;
1154 struct task_struct *p;
Ingo Molnarc87e2832006-06-27 02:54:58 -07001155
1156 /*
Ingo Molnare3f2dde2006-07-29 05:17:57 +02001157 * We are the first waiter - try to look up the real owner and attach
Thomas Gleixner54a21782014-06-03 12:27:08 +00001158 * the new pi_state to it, but bail out when TID = 0 [1]
Ingo Molnarc87e2832006-06-27 02:54:58 -07001159 */
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001160 if (!pid)
Ingo Molnare3f2dde2006-07-29 05:17:57 +02001161 return -ESRCH;
Ingo Molnarc87e2832006-06-27 02:54:58 -07001162 p = futex_find_get_task(pid);
Michal Hocko7a0ea092010-06-30 09:51:19 +02001163 if (!p)
1164 return -ESRCH;
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001165
Oleg Nesterova2129462015-02-02 15:05:36 +01001166 if (unlikely(p->flags & PF_KTHREAD)) {
Thomas Gleixnerf0d71b32014-05-12 20:45:35 +00001167 put_task_struct(p);
1168 return -EPERM;
1169 }
1170
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001171 /*
1172 * We need to look at the task state flags to figure out,
1173 * whether the task is exiting. To protect against the do_exit
1174 * change of the task flags, we do this protected by
1175 * p->pi_lock:
1176 */
Thomas Gleixner1d615482009-11-17 14:54:03 +01001177 raw_spin_lock_irq(&p->pi_lock);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001178 if (unlikely(p->flags & PF_EXITING)) {
1179 /*
1180 * The task is on the way out. When PF_EXITPIDONE is
1181 * set, we know that the task has finished the
1182 * cleanup:
1183 */
1184 int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
1185
Thomas Gleixner1d615482009-11-17 14:54:03 +01001186 raw_spin_unlock_irq(&p->pi_lock);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001187 put_task_struct(p);
1188 return ret;
1189 }
Ingo Molnarc87e2832006-06-27 02:54:58 -07001190
Thomas Gleixner54a21782014-06-03 12:27:08 +00001191 /*
1192 * No existing pi state. First waiter. [2]
Peter Zijlstra734009e2017-03-22 11:35:52 +01001193 *
1194 * This creates pi_state, we have hb->lock held, this means nothing can
1195 * observe this state, wait_lock is irrelevant.
Thomas Gleixner54a21782014-06-03 12:27:08 +00001196 */
Ingo Molnarc87e2832006-06-27 02:54:58 -07001197 pi_state = alloc_pi_state();
1198
1199 /*
Thomas Gleixner04e1b2e2014-06-11 20:45:40 +00001200 * Initialize the pi_mutex in locked state and make @p
Ingo Molnarc87e2832006-06-27 02:54:58 -07001201 * the owner of it:
1202 */
1203 rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
1204
1205 /* Store the key for possible exit cleanups: */
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001206 pi_state->key = *key;
Ingo Molnarc87e2832006-06-27 02:54:58 -07001207
Ingo Molnar627371d2006-07-29 05:16:20 +02001208 WARN_ON(!list_empty(&pi_state->list));
Ingo Molnarc87e2832006-06-27 02:54:58 -07001209 list_add(&pi_state->list, &p->pi_state_list);
1210 pi_state->owner = p;
Thomas Gleixner1d615482009-11-17 14:54:03 +01001211 raw_spin_unlock_irq(&p->pi_lock);
Ingo Molnarc87e2832006-06-27 02:54:58 -07001212
1213 put_task_struct(p);
1214
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07001215 *ps = pi_state;
Ingo Molnarc87e2832006-06-27 02:54:58 -07001216
1217 return 0;
1218}
1219
Peter Zijlstra734009e2017-03-22 11:35:52 +01001220static int lookup_pi_state(u32 __user *uaddr, u32 uval,
1221 struct futex_hash_bucket *hb,
Thomas Gleixner04e1b2e2014-06-11 20:45:40 +00001222 union futex_key *key, struct futex_pi_state **ps)
1223{
Peter Zijlstra499f5ac2017-03-22 11:35:48 +01001224 struct futex_q *top_waiter = futex_top_waiter(hb, key);
Thomas Gleixner04e1b2e2014-06-11 20:45:40 +00001225
1226 /*
1227 * If there is a waiter on that futex, validate it and
1228 * attach to the pi_state when the validation succeeds.
1229 */
Peter Zijlstra499f5ac2017-03-22 11:35:48 +01001230 if (top_waiter)
Peter Zijlstra734009e2017-03-22 11:35:52 +01001231 return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
Thomas Gleixner04e1b2e2014-06-11 20:45:40 +00001232
1233 /*
1234 * We are the first waiter - try to look up the owner based on
1235 * @uval and attach to it.
1236 */
1237 return attach_to_pi_owner(uval, key, ps);
1238}
1239
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001240static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
1241{
1242 u32 uninitialized_var(curval);
1243
Davidlohr Buesoab51fba2015-06-29 23:26:02 -07001244 if (unlikely(should_fail_futex(true)))
1245 return -EFAULT;
1246
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001247 if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
1248 return -EFAULT;
1249
Peter Zijlstra734009e2017-03-22 11:35:52 +01001250 /* If user space value changed, let the caller retry */
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001251 return curval != uval ? -EAGAIN : 0;
1252}
1253
Darren Hart1a520842009-04-03 13:39:52 -07001254/**
Darren Hartd96ee562009-09-21 22:30:22 -07001255 * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
Darren Hartbab5bc92009-04-07 23:23:50 -07001256 * @uaddr: the pi futex user address
1257 * @hb: the pi futex hash bucket
1258 * @key: the futex key associated with uaddr and hb
1259 * @ps: the pi_state pointer where we store the result of the
1260 * lookup
1261 * @task: the task to perform the atomic lock work for. This will
1262 * be "current" except in the case of requeue pi.
1263 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
Darren Hart1a520842009-04-03 13:39:52 -07001264 *
Randy Dunlap6c23cbb2013-03-05 10:00:24 -08001265 * Return:
Mauro Carvalho Chehab7b4ff1a2017-05-11 10:17:45 -03001266 * - 0 - ready to wait;
1267 * - 1 - acquired the lock;
1268 * - <0 - error
Darren Hart1a520842009-04-03 13:39:52 -07001269 *
1270 * The hb->lock and futex_key refs shall be held by the caller.
1271 */
1272static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
1273 union futex_key *key,
1274 struct futex_pi_state **ps,
Darren Hartbab5bc92009-04-07 23:23:50 -07001275 struct task_struct *task, int set_waiters)
Darren Hart1a520842009-04-03 13:39:52 -07001276{
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001277 u32 uval, newval, vpid = task_pid_vnr(task);
Peter Zijlstra499f5ac2017-03-22 11:35:48 +01001278 struct futex_q *top_waiter;
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001279 int ret;
Darren Hart1a520842009-04-03 13:39:52 -07001280
1281 /*
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001282 * Read the user space value first so we can validate a few
1283 * things before proceeding further.
Darren Hart1a520842009-04-03 13:39:52 -07001284 */
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001285 if (get_futex_value_locked(&uval, uaddr))
Darren Hart1a520842009-04-03 13:39:52 -07001286 return -EFAULT;
1287
Davidlohr Buesoab51fba2015-06-29 23:26:02 -07001288 if (unlikely(should_fail_futex(true)))
1289 return -EFAULT;
1290
Darren Hart1a520842009-04-03 13:39:52 -07001291 /*
1292 * Detect deadlocks.
1293 */
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001294 if ((unlikely((uval & FUTEX_TID_MASK) == vpid)))
Darren Hart1a520842009-04-03 13:39:52 -07001295 return -EDEADLK;
1296
Davidlohr Buesoab51fba2015-06-29 23:26:02 -07001297 if ((unlikely(should_fail_futex(true))))
1298 return -EDEADLK;
1299
Darren Hart1a520842009-04-03 13:39:52 -07001300 /*
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001301 * Lookup existing state first. If it exists, try to attach to
1302 * its pi_state.
Darren Hart1a520842009-04-03 13:39:52 -07001303 */
Peter Zijlstra499f5ac2017-03-22 11:35:48 +01001304 top_waiter = futex_top_waiter(hb, key);
1305 if (top_waiter)
Peter Zijlstra734009e2017-03-22 11:35:52 +01001306 return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps);
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001307
1308 /*
1309 * No waiter and user TID is 0. We are here because the
1310 * waiters or the owner died bit is set or called from
1311 * requeue_cmp_pi or for whatever reason something took the
1312 * syscall.
1313 */
1314 if (!(uval & FUTEX_TID_MASK)) {
Thomas Gleixnerb3eaa9f2014-06-03 12:27:06 +00001315 /*
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001316 * We take over the futex. No other waiters and the user space
1317 * TID is 0. We preserve the owner died bit.
Thomas Gleixnerb3eaa9f2014-06-03 12:27:06 +00001318 */
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001319 newval = uval & FUTEX_OWNER_DIED;
1320 newval |= vpid;
1321
1322 /* The futex requeue_pi code can enforce the waiters bit */
1323 if (set_waiters)
1324 newval |= FUTEX_WAITERS;
1325
1326 ret = lock_pi_update_atomic(uaddr, uval, newval);
1327 /* If the take over worked, return 1 */
1328 return ret < 0 ? ret : 1;
Thomas Gleixnerb3eaa9f2014-06-03 12:27:06 +00001329 }
Darren Hart1a520842009-04-03 13:39:52 -07001330
Darren Hart1a520842009-04-03 13:39:52 -07001331 /*
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001332 * First waiter. Set the waiters bit before attaching ourself to
1333 * the owner. If owner tries to unlock, it will be forced into
1334 * the kernel and blocked on hb->lock.
Darren Hart1a520842009-04-03 13:39:52 -07001335 */
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001336 newval = uval | FUTEX_WAITERS;
1337 ret = lock_pi_update_atomic(uaddr, uval, newval);
1338 if (ret)
1339 return ret;
Darren Hart1a520842009-04-03 13:39:52 -07001340 /*
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001341 * If the update of the user space value succeeded, we try to
1342 * attach to the owner. If that fails, no harm done, we only
1343 * set the FUTEX_WAITERS bit in the user space variable.
Darren Hart1a520842009-04-03 13:39:52 -07001344 */
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001345 return attach_to_pi_owner(uval, key, ps);
Darren Hart1a520842009-04-03 13:39:52 -07001346}
1347
Lai Jiangshan2e129782010-12-22 14:18:50 +08001348/**
1349 * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
1350 * @q: The futex_q to unqueue
1351 *
1352 * The q->lock_ptr must not be NULL and must be held by the caller.
1353 */
1354static void __unqueue_futex(struct futex_q *q)
1355{
1356 struct futex_hash_bucket *hb;
1357
Steven Rostedt29096202011-03-17 15:21:07 -04001358 if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr))
1359 || WARN_ON(plist_node_empty(&q->list)))
Lai Jiangshan2e129782010-12-22 14:18:50 +08001360 return;
1361
1362 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
1363 plist_del(&q->list, &hb->chain);
Linus Torvalds11d46162014-03-20 22:11:17 -07001364 hb_waiters_dec(hb);
Lai Jiangshan2e129782010-12-22 14:18:50 +08001365}
1366
Ingo Molnarc87e2832006-06-27 02:54:58 -07001367/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368 * The hash bucket lock must be held when this is called.
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07001369 * Afterwards, the futex_q must not be accessed. Callers
1370 * must ensure to later call wake_up_q() for the actual
1371 * wakeups to occur.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372 */
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07001373static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374{
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02001375 struct task_struct *p = q->task;
1376
Darren Hartaa109902012-11-26 16:29:56 -08001377 if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
1378 return;
1379
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02001380 /*
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07001381 * Queue the task for later wakeup for after we've released
1382 * the hb->lock. wake_q_add() grabs reference to p.
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02001383 */
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07001384 wake_q_add(wake_q, p);
Lai Jiangshan2e129782010-12-22 14:18:50 +08001385 __unqueue_futex(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386 /*
Darren Hart (VMware)38fcd062017-04-14 15:31:38 -07001387 * The waiting task can free the futex_q as soon as q->lock_ptr = NULL
1388 * is written, without taking any locks. This is possible in the event
1389 * of a spurious wakeup, for example. A memory barrier is required here
1390 * to prevent the following store to lock_ptr from getting ahead of the
1391 * plist_del in __unqueue_futex().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 */
Peter Zijlstra1b367ec2017-03-22 11:35:49 +01001393 smp_store_release(&q->lock_ptr, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394}
1395
Peter Zijlstra16ffa122017-03-22 11:35:55 +01001396/*
1397 * Caller must hold a reference on @pi_state.
1398 */
1399static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state)
Ingo Molnarc87e2832006-06-27 02:54:58 -07001400{
Vitaliy Ivanov7cfdaf32011-07-07 15:10:31 +03001401 u32 uninitialized_var(curval), newval;
Peter Zijlstra16ffa122017-03-22 11:35:55 +01001402 struct task_struct *new_owner;
Peter Zijlstraaa2bfe52017-03-23 15:56:10 +01001403 bool postunlock = false;
Waiman Long194a6b52016-11-17 11:46:38 -05001404 DEFINE_WAKE_Q(wake_q);
Thomas Gleixner13fbca42014-06-03 12:27:07 +00001405 int ret = 0;
Ingo Molnarc87e2832006-06-27 02:54:58 -07001406
Ingo Molnarc87e2832006-06-27 02:54:58 -07001407 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
Peter Zijlstrabebe5b52017-03-22 11:35:59 +01001408 if (WARN_ON_ONCE(!new_owner)) {
Peter Zijlstra16ffa122017-03-22 11:35:55 +01001409 /*
Peter Zijlstrabebe5b52017-03-22 11:35:59 +01001410 * As per the comment in futex_unlock_pi() this should not happen.
Peter Zijlstra16ffa122017-03-22 11:35:55 +01001411 *
1412 * When this happens, give up our locks and try again, giving
1413 * the futex_lock_pi() instance time to complete, either by
1414 * waiting on the rtmutex or removing itself from the futex
1415 * queue.
1416 */
1417 ret = -EAGAIN;
1418 goto out_unlock;
Peter Zijlstra73d786b2017-03-22 11:35:54 +01001419 }
Ingo Molnarc87e2832006-06-27 02:54:58 -07001420
1421 /*
Peter Zijlstra16ffa122017-03-22 11:35:55 +01001422 * We pass it to the next owner. The WAITERS bit is always kept
1423 * enabled while there is PI state around. We cleanup the owner
1424 * died bit, because we are the owner.
Ingo Molnarc87e2832006-06-27 02:54:58 -07001425 */
Thomas Gleixner13fbca42014-06-03 12:27:07 +00001426 newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07001427
Davidlohr Buesoab51fba2015-06-29 23:26:02 -07001428 if (unlikely(should_fail_futex(true)))
1429 ret = -EFAULT;
1430
Sebastian Andrzej Siewior89e9e662016-04-15 14:35:39 +02001431 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
Thomas Gleixner13fbca42014-06-03 12:27:07 +00001432 ret = -EFAULT;
Peter Zijlstra734009e2017-03-22 11:35:52 +01001433
Sebastian Andrzej Siewior89e9e662016-04-15 14:35:39 +02001434 } else if (curval != uval) {
1435 /*
1436 * If a unconditional UNLOCK_PI operation (user space did not
1437 * try the TID->0 transition) raced with a waiter setting the
1438 * FUTEX_WAITERS flag between get_user() and locking the hash
1439 * bucket lock, retry the operation.
1440 */
1441 if ((FUTEX_TID_MASK & curval) == uval)
1442 ret = -EAGAIN;
1443 else
1444 ret = -EINVAL;
1445 }
Peter Zijlstra734009e2017-03-22 11:35:52 +01001446
Peter Zijlstra16ffa122017-03-22 11:35:55 +01001447 if (ret)
1448 goto out_unlock;
Ingo Molnarc87e2832006-06-27 02:54:58 -07001449
Peter Zijlstra94ffac52017-04-07 09:04:07 +02001450 /*
1451 * This is a point of no return; once we modify the uval there is no
1452 * going back and subsequent operations must not fail.
1453 */
1454
Thomas Gleixnerb4abf912016-01-13 11:25:38 +01001455 raw_spin_lock(&pi_state->owner->pi_lock);
Ingo Molnar627371d2006-07-29 05:16:20 +02001456 WARN_ON(list_empty(&pi_state->list));
1457 list_del_init(&pi_state->list);
Thomas Gleixnerb4abf912016-01-13 11:25:38 +01001458 raw_spin_unlock(&pi_state->owner->pi_lock);
Ingo Molnar627371d2006-07-29 05:16:20 +02001459
Thomas Gleixnerb4abf912016-01-13 11:25:38 +01001460 raw_spin_lock(&new_owner->pi_lock);
Ingo Molnar627371d2006-07-29 05:16:20 +02001461 WARN_ON(!list_empty(&pi_state->list));
Ingo Molnarc87e2832006-06-27 02:54:58 -07001462 list_add(&pi_state->list, &new_owner->pi_state_list);
1463 pi_state->owner = new_owner;
Thomas Gleixnerb4abf912016-01-13 11:25:38 +01001464 raw_spin_unlock(&new_owner->pi_lock);
Ingo Molnar627371d2006-07-29 05:16:20 +02001465
Peter Zijlstraaa2bfe52017-03-23 15:56:10 +01001466 postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
Peter Zijlstra5293c2e2017-03-22 11:35:51 +01001467
Peter Zijlstra16ffa122017-03-22 11:35:55 +01001468out_unlock:
Peter Zijlstra5293c2e2017-03-22 11:35:51 +01001469 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
Peter Zijlstra5293c2e2017-03-22 11:35:51 +01001470
Peter Zijlstraaa2bfe52017-03-23 15:56:10 +01001471 if (postunlock)
1472 rt_mutex_postunlock(&wake_q);
Ingo Molnarc87e2832006-06-27 02:54:58 -07001473
Peter Zijlstra16ffa122017-03-22 11:35:55 +01001474 return ret;
Ingo Molnarc87e2832006-06-27 02:54:58 -07001475}
1476
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477/*
Ingo Molnar8b8f3192006-07-03 00:25:05 -07001478 * Express the locking dependencies for lockdep:
1479 */
1480static inline void
1481double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1482{
1483 if (hb1 <= hb2) {
1484 spin_lock(&hb1->lock);
1485 if (hb1 < hb2)
1486 spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
1487 } else { /* hb1 > hb2 */
1488 spin_lock(&hb2->lock);
1489 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
1490 }
1491}
1492
Darren Hart5eb3dc62009-03-12 00:55:52 -07001493static inline void
1494double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
1495{
Darren Hartf061d352009-03-12 15:11:18 -07001496 spin_unlock(&hb1->lock);
Ingo Molnar88f502f2009-03-13 10:32:07 +01001497 if (hb1 != hb2)
1498 spin_unlock(&hb2->lock);
Darren Hart5eb3dc62009-03-12 00:55:52 -07001499}
1500
Ingo Molnar8b8f3192006-07-03 00:25:05 -07001501/*
Darren Hartb2d09942009-03-12 00:55:37 -07001502 * Wake up waiters matching bitset queued on this futex (uaddr).
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503 */
Darren Hartb41277d2010-11-08 13:10:09 -08001504static int
1505futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506{
Ingo Molnare2970f22006-06-27 02:54:47 -07001507 struct futex_hash_bucket *hb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 struct futex_q *this, *next;
Peter Zijlstra38d47c12008-09-26 19:32:20 +02001509 union futex_key key = FUTEX_KEY_INIT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 int ret;
Waiman Long194a6b52016-11-17 11:46:38 -05001511 DEFINE_WAKE_Q(wake_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512
Thomas Gleixnercd689982008-02-01 17:45:14 +01001513 if (!bitset)
1514 return -EINVAL;
1515
Shawn Bohrer9ea71502011-06-30 11:21:32 -05001516 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 if (unlikely(ret != 0))
1518 goto out;
1519
Ingo Molnare2970f22006-06-27 02:54:47 -07001520 hb = hash_futex(&key);
Davidlohr Buesob0c29f72014-01-12 15:31:25 -08001521
1522 /* Make sure we really have tasks to wakeup */
1523 if (!hb_waiters_pending(hb))
1524 goto out_put_key;
1525
Ingo Molnare2970f22006-06-27 02:54:47 -07001526 spin_lock(&hb->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527
Jason Low0d00c7b2014-01-12 15:31:22 -08001528 plist_for_each_entry_safe(this, next, &hb->chain, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 if (match_futex (&this->key, &key)) {
Darren Hart52400ba2009-04-03 13:40:49 -07001530 if (this->pi_state || this->rt_waiter) {
Ingo Molnared6f7b12006-07-01 04:35:46 -07001531 ret = -EINVAL;
1532 break;
1533 }
Thomas Gleixnercd689982008-02-01 17:45:14 +01001534
1535 /* Check if one of the bits is set in both bitsets */
1536 if (!(this->bitset & bitset))
1537 continue;
1538
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07001539 mark_wake_futex(&wake_q, this);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540 if (++ret >= nr_wake)
1541 break;
1542 }
1543 }
1544
Ingo Molnare2970f22006-06-27 02:54:47 -07001545 spin_unlock(&hb->lock);
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07001546 wake_up_q(&wake_q);
Davidlohr Buesob0c29f72014-01-12 15:31:25 -08001547out_put_key:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001548 put_futex_key(&key);
Darren Hart42d35d42008-12-29 15:49:53 -08001549out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 return ret;
1551}
1552
1553/*
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001554 * Wake up all waiters hashed on the physical page that is mapped
1555 * to this virtual address:
1556 */
Ingo Molnare2970f22006-06-27 02:54:47 -07001557static int
Darren Hartb41277d2010-11-08 13:10:09 -08001558futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
Ingo Molnare2970f22006-06-27 02:54:47 -07001559 int nr_wake, int nr_wake2, int op)
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001560{
Peter Zijlstra38d47c12008-09-26 19:32:20 +02001561 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
Ingo Molnare2970f22006-06-27 02:54:47 -07001562 struct futex_hash_bucket *hb1, *hb2;
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001563 struct futex_q *this, *next;
Darren Harte4dc5b72009-03-12 00:56:13 -07001564 int ret, op_ret;
Waiman Long194a6b52016-11-17 11:46:38 -05001565 DEFINE_WAKE_Q(wake_q);
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001566
Darren Harte4dc5b72009-03-12 00:56:13 -07001567retry:
Shawn Bohrer9ea71502011-06-30 11:21:32 -05001568 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001569 if (unlikely(ret != 0))
1570 goto out;
Shawn Bohrer9ea71502011-06-30 11:21:32 -05001571 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001572 if (unlikely(ret != 0))
Darren Hart42d35d42008-12-29 15:49:53 -08001573 goto out_put_key1;
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001574
Ingo Molnare2970f22006-06-27 02:54:47 -07001575 hb1 = hash_futex(&key1);
1576 hb2 = hash_futex(&key2);
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001577
Darren Harte4dc5b72009-03-12 00:56:13 -07001578retry_private:
Thomas Gleixnereaaea802009-10-04 09:34:17 +02001579 double_lock_hb(hb1, hb2);
Ingo Molnare2970f22006-06-27 02:54:47 -07001580 op_ret = futex_atomic_op_inuser(op, uaddr2);
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001581 if (unlikely(op_ret < 0)) {
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001582
Darren Hart5eb3dc62009-03-12 00:55:52 -07001583 double_unlock_hb(hb1, hb2);
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001584
David Howells7ee1dd32006-01-06 00:11:44 -08001585#ifndef CONFIG_MMU
Ingo Molnare2970f22006-06-27 02:54:47 -07001586 /*
1587 * we don't get EFAULT from MMU faults if we don't have an MMU,
1588 * but we might get them from range checking
1589 */
David Howells7ee1dd32006-01-06 00:11:44 -08001590 ret = op_ret;
Darren Hart42d35d42008-12-29 15:49:53 -08001591 goto out_put_keys;
David Howells7ee1dd32006-01-06 00:11:44 -08001592#endif
1593
David Gibson796f8d92005-11-07 00:59:33 -08001594 if (unlikely(op_ret != -EFAULT)) {
1595 ret = op_ret;
Darren Hart42d35d42008-12-29 15:49:53 -08001596 goto out_put_keys;
David Gibson796f8d92005-11-07 00:59:33 -08001597 }
1598
Thomas Gleixnerd0725992009-06-11 23:15:43 +02001599 ret = fault_in_user_writeable(uaddr2);
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001600 if (ret)
Darren Hartde87fcc2009-03-12 00:55:46 -07001601 goto out_put_keys;
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001602
Darren Hartb41277d2010-11-08 13:10:09 -08001603 if (!(flags & FLAGS_SHARED))
Darren Harte4dc5b72009-03-12 00:56:13 -07001604 goto retry_private;
1605
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001606 put_futex_key(&key2);
1607 put_futex_key(&key1);
Darren Harte4dc5b72009-03-12 00:56:13 -07001608 goto retry;
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001609 }
1610
Jason Low0d00c7b2014-01-12 15:31:22 -08001611 plist_for_each_entry_safe(this, next, &hb1->chain, list) {
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001612 if (match_futex (&this->key, &key1)) {
Darren Hartaa109902012-11-26 16:29:56 -08001613 if (this->pi_state || this->rt_waiter) {
1614 ret = -EINVAL;
1615 goto out_unlock;
1616 }
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07001617 mark_wake_futex(&wake_q, this);
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001618 if (++ret >= nr_wake)
1619 break;
1620 }
1621 }
1622
1623 if (op_ret > 0) {
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001624 op_ret = 0;
Jason Low0d00c7b2014-01-12 15:31:22 -08001625 plist_for_each_entry_safe(this, next, &hb2->chain, list) {
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001626 if (match_futex (&this->key, &key2)) {
Darren Hartaa109902012-11-26 16:29:56 -08001627 if (this->pi_state || this->rt_waiter) {
1628 ret = -EINVAL;
1629 goto out_unlock;
1630 }
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07001631 mark_wake_futex(&wake_q, this);
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001632 if (++op_ret >= nr_wake2)
1633 break;
1634 }
1635 }
1636 ret += op_ret;
1637 }
1638
Darren Hartaa109902012-11-26 16:29:56 -08001639out_unlock:
Darren Hart5eb3dc62009-03-12 00:55:52 -07001640 double_unlock_hb(hb1, hb2);
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07001641 wake_up_q(&wake_q);
Darren Hart42d35d42008-12-29 15:49:53 -08001642out_put_keys:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001643 put_futex_key(&key2);
Darren Hart42d35d42008-12-29 15:49:53 -08001644out_put_key1:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001645 put_futex_key(&key1);
Darren Hart42d35d42008-12-29 15:49:53 -08001646out:
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07001647 return ret;
1648}
1649
Darren Hart9121e472009-04-03 13:40:31 -07001650/**
1651 * requeue_futex() - Requeue a futex_q from one hb to another
1652 * @q: the futex_q to requeue
1653 * @hb1: the source hash_bucket
1654 * @hb2: the target hash_bucket
1655 * @key2: the new key for the requeued futex_q
1656 */
1657static inline
1658void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1659 struct futex_hash_bucket *hb2, union futex_key *key2)
1660{
1661
1662 /*
1663 * If key1 and key2 hash to the same bucket, no need to
1664 * requeue.
1665 */
1666 if (likely(&hb1->chain != &hb2->chain)) {
1667 plist_del(&q->list, &hb1->chain);
Linus Torvalds11d46162014-03-20 22:11:17 -07001668 hb_waiters_dec(hb1);
Linus Torvalds11d46162014-03-20 22:11:17 -07001669 hb_waiters_inc(hb2);
Davidlohr Buesofe1bce92016-04-20 20:09:24 -07001670 plist_add(&q->list, &hb2->chain);
Darren Hart9121e472009-04-03 13:40:31 -07001671 q->lock_ptr = &hb2->lock;
Darren Hart9121e472009-04-03 13:40:31 -07001672 }
1673 get_futex_key_refs(key2);
1674 q->key = *key2;
1675}
1676
Darren Hart52400ba2009-04-03 13:40:49 -07001677/**
1678 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
Darren Hartd96ee562009-09-21 22:30:22 -07001679 * @q: the futex_q
1680 * @key: the key of the requeue target futex
1681 * @hb: the hash_bucket of the requeue target futex
Darren Hart52400ba2009-04-03 13:40:49 -07001682 *
1683 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1684 * target futex if it is uncontended or via a lock steal. Set the futex_q key
1685 * to the requeue target futex so the waiter can detect the wakeup on the right
1686 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
Darren Hartbeda2c72009-08-09 15:34:39 -07001687 * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock
1688 * to protect access to the pi_state to fixup the owner later. Must be called
1689 * with both q->lock_ptr and hb->lock held.
Darren Hart52400ba2009-04-03 13:40:49 -07001690 */
1691static inline
Darren Hartbeda2c72009-08-09 15:34:39 -07001692void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1693 struct futex_hash_bucket *hb)
Darren Hart52400ba2009-04-03 13:40:49 -07001694{
Darren Hart52400ba2009-04-03 13:40:49 -07001695 get_futex_key_refs(key);
1696 q->key = *key;
1697
Lai Jiangshan2e129782010-12-22 14:18:50 +08001698 __unqueue_futex(q);
Darren Hart52400ba2009-04-03 13:40:49 -07001699
1700 WARN_ON(!q->rt_waiter);
1701 q->rt_waiter = NULL;
1702
Darren Hartbeda2c72009-08-09 15:34:39 -07001703 q->lock_ptr = &hb->lock;
Darren Hartbeda2c72009-08-09 15:34:39 -07001704
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02001705 wake_up_state(q->task, TASK_NORMAL);
Darren Hart52400ba2009-04-03 13:40:49 -07001706}
1707
1708/**
1709 * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
Darren Hartbab5bc92009-04-07 23:23:50 -07001710 * @pifutex: the user address of the to futex
1711 * @hb1: the from futex hash bucket, must be locked by the caller
1712 * @hb2: the to futex hash bucket, must be locked by the caller
1713 * @key1: the from futex key
1714 * @key2: the to futex key
1715 * @ps: address to store the pi_state pointer
1716 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
Darren Hart52400ba2009-04-03 13:40:49 -07001717 *
1718 * Try and get the lock on behalf of the top waiter if we can do it atomically.
Darren Hartbab5bc92009-04-07 23:23:50 -07001719 * Wake the top waiter if we succeed. If the caller specified set_waiters,
1720 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1721 * hb1 and hb2 must be held by the caller.
Darren Hart52400ba2009-04-03 13:40:49 -07001722 *
Randy Dunlap6c23cbb2013-03-05 10:00:24 -08001723 * Return:
Mauro Carvalho Chehab7b4ff1a2017-05-11 10:17:45 -03001724 * - 0 - failed to acquire the lock atomically;
1725 * - >0 - acquired the lock, return value is vpid of the top_waiter
1726 * - <0 - error
Darren Hart52400ba2009-04-03 13:40:49 -07001727 */
1728static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1729 struct futex_hash_bucket *hb1,
1730 struct futex_hash_bucket *hb2,
1731 union futex_key *key1, union futex_key *key2,
Darren Hartbab5bc92009-04-07 23:23:50 -07001732 struct futex_pi_state **ps, int set_waiters)
Darren Hart52400ba2009-04-03 13:40:49 -07001733{
Darren Hartbab5bc92009-04-07 23:23:50 -07001734 struct futex_q *top_waiter = NULL;
Darren Hart52400ba2009-04-03 13:40:49 -07001735 u32 curval;
Thomas Gleixner866293e2014-05-12 20:45:34 +00001736 int ret, vpid;
Darren Hart52400ba2009-04-03 13:40:49 -07001737
1738 if (get_futex_value_locked(&curval, pifutex))
1739 return -EFAULT;
1740
Davidlohr Buesoab51fba2015-06-29 23:26:02 -07001741 if (unlikely(should_fail_futex(true)))
1742 return -EFAULT;
1743
Darren Hartbab5bc92009-04-07 23:23:50 -07001744 /*
1745 * Find the top_waiter and determine if there are additional waiters.
1746 * If the caller intends to requeue more than 1 waiter to pifutex,
1747 * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
1748 * as we have means to handle the possible fault. If not, don't set
1749 * the bit unecessarily as it will force the subsequent unlock to enter
1750 * the kernel.
1751 */
Darren Hart52400ba2009-04-03 13:40:49 -07001752 top_waiter = futex_top_waiter(hb1, key1);
1753
1754 /* There are no waiters, nothing for us to do. */
1755 if (!top_waiter)
1756 return 0;
1757
Darren Hart84bc4af2009-08-13 17:36:53 -07001758 /* Ensure we requeue to the expected futex. */
1759 if (!match_futex(top_waiter->requeue_pi_key, key2))
1760 return -EINVAL;
1761
Darren Hart52400ba2009-04-03 13:40:49 -07001762 /*
Darren Hartbab5bc92009-04-07 23:23:50 -07001763 * Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in
1764 * the contended case or if set_waiters is 1. The pi_state is returned
1765 * in ps in contended cases.
Darren Hart52400ba2009-04-03 13:40:49 -07001766 */
Thomas Gleixner866293e2014-05-12 20:45:34 +00001767 vpid = task_pid_vnr(top_waiter->task);
Darren Hartbab5bc92009-04-07 23:23:50 -07001768 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1769 set_waiters);
Thomas Gleixner866293e2014-05-12 20:45:34 +00001770 if (ret == 1) {
Darren Hartbeda2c72009-08-09 15:34:39 -07001771 requeue_pi_wake_futex(top_waiter, key2, hb2);
Thomas Gleixner866293e2014-05-12 20:45:34 +00001772 return vpid;
1773 }
Darren Hart52400ba2009-04-03 13:40:49 -07001774 return ret;
1775}
1776
1777/**
1778 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
Randy Dunlapfb62db22010-10-13 11:02:34 -07001779 * @uaddr1: source futex user address
Darren Hartb41277d2010-11-08 13:10:09 -08001780 * @flags: futex flags (FLAGS_SHARED, etc.)
Randy Dunlapfb62db22010-10-13 11:02:34 -07001781 * @uaddr2: target futex user address
1782 * @nr_wake: number of waiters to wake (must be 1 for requeue_pi)
1783 * @nr_requeue: number of waiters to requeue (0-INT_MAX)
1784 * @cmpval: @uaddr1 expected value (or %NULL)
1785 * @requeue_pi: if we are attempting to requeue from a non-pi futex to a
Darren Hartb41277d2010-11-08 13:10:09 -08001786 * pi futex (pi to pi requeue is not supported)
Darren Hart52400ba2009-04-03 13:40:49 -07001787 *
1788 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
1789 * uaddr2 atomically on behalf of the top waiter.
1790 *
Randy Dunlap6c23cbb2013-03-05 10:00:24 -08001791 * Return:
Mauro Carvalho Chehab7b4ff1a2017-05-11 10:17:45 -03001792 * - >=0 - on success, the number of tasks requeued or woken;
1793 * - <0 - on error
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 */
Darren Hartb41277d2010-11-08 13:10:09 -08001795static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1796 u32 __user *uaddr2, int nr_wake, int nr_requeue,
1797 u32 *cmpval, int requeue_pi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798{
Peter Zijlstra38d47c12008-09-26 19:32:20 +02001799 union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
Darren Hart52400ba2009-04-03 13:40:49 -07001800 int drop_count = 0, task_count = 0, ret;
1801 struct futex_pi_state *pi_state = NULL;
Ingo Molnare2970f22006-06-27 02:54:47 -07001802 struct futex_hash_bucket *hb1, *hb2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 struct futex_q *this, *next;
Waiman Long194a6b52016-11-17 11:46:38 -05001804 DEFINE_WAKE_Q(wake_q);
Darren Hart52400ba2009-04-03 13:40:49 -07001805
Nicolas Pitrebc2eecd2017-08-01 00:31:32 -04001806 /*
1807 * When PI not supported: return -ENOSYS if requeue_pi is true,
1808 * consequently the compiler knows requeue_pi is always false past
1809 * this point which will optimize away all the conditional code
1810 * further down.
1811 */
1812 if (!IS_ENABLED(CONFIG_FUTEX_PI) && requeue_pi)
1813 return -ENOSYS;
1814
Darren Hart52400ba2009-04-03 13:40:49 -07001815 if (requeue_pi) {
1816 /*
Thomas Gleixnere9c243a2014-06-03 12:27:06 +00001817 * Requeue PI only works on two distinct uaddrs. This
1818 * check is only valid for private futexes. See below.
1819 */
1820 if (uaddr1 == uaddr2)
1821 return -EINVAL;
1822
1823 /*
Darren Hart52400ba2009-04-03 13:40:49 -07001824 * requeue_pi requires a pi_state, try to allocate it now
1825 * without any locks in case it fails.
1826 */
1827 if (refill_pi_state_cache())
1828 return -ENOMEM;
1829 /*
1830 * requeue_pi must wake as many tasks as it can, up to nr_wake
1831 * + nr_requeue, since it acquires the rt_mutex prior to
1832 * returning to userspace, so as to not leave the rt_mutex with
1833 * waiters and no owner. However, second and third wake-ups
1834 * cannot be predicted as they involve race conditions with the
1835 * first wake and a fault while looking up the pi_state. Both
1836 * pthread_cond_signal() and pthread_cond_broadcast() should
1837 * use nr_wake=1.
1838 */
1839 if (nr_wake != 1)
1840 return -EINVAL;
1841 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842
Darren Hart42d35d42008-12-29 15:49:53 -08001843retry:
Shawn Bohrer9ea71502011-06-30 11:21:32 -05001844 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845 if (unlikely(ret != 0))
1846 goto out;
Shawn Bohrer9ea71502011-06-30 11:21:32 -05001847 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
1848 requeue_pi ? VERIFY_WRITE : VERIFY_READ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849 if (unlikely(ret != 0))
Darren Hart42d35d42008-12-29 15:49:53 -08001850 goto out_put_key1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851
Thomas Gleixnere9c243a2014-06-03 12:27:06 +00001852 /*
1853 * The check above which compares uaddrs is not sufficient for
1854 * shared futexes. We need to compare the keys:
1855 */
1856 if (requeue_pi && match_futex(&key1, &key2)) {
1857 ret = -EINVAL;
1858 goto out_put_keys;
1859 }
1860
Ingo Molnare2970f22006-06-27 02:54:47 -07001861 hb1 = hash_futex(&key1);
1862 hb2 = hash_futex(&key2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863
Darren Harte4dc5b72009-03-12 00:56:13 -07001864retry_private:
Linus Torvalds69cd9eb2014-04-08 15:30:07 -07001865 hb_waiters_inc(hb2);
Ingo Molnar8b8f3192006-07-03 00:25:05 -07001866 double_lock_hb(hb1, hb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867
Ingo Molnare2970f22006-06-27 02:54:47 -07001868 if (likely(cmpval != NULL)) {
1869 u32 curval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870
Ingo Molnare2970f22006-06-27 02:54:47 -07001871 ret = get_futex_value_locked(&curval, uaddr1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872
1873 if (unlikely(ret)) {
Darren Hart5eb3dc62009-03-12 00:55:52 -07001874 double_unlock_hb(hb1, hb2);
Linus Torvalds69cd9eb2014-04-08 15:30:07 -07001875 hb_waiters_dec(hb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876
Darren Harte4dc5b72009-03-12 00:56:13 -07001877 ret = get_user(curval, uaddr1);
1878 if (ret)
1879 goto out_put_keys;
1880
Darren Hartb41277d2010-11-08 13:10:09 -08001881 if (!(flags & FLAGS_SHARED))
Darren Harte4dc5b72009-03-12 00:56:13 -07001882 goto retry_private;
1883
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001884 put_futex_key(&key2);
1885 put_futex_key(&key1);
Darren Harte4dc5b72009-03-12 00:56:13 -07001886 goto retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887 }
Ingo Molnare2970f22006-06-27 02:54:47 -07001888 if (curval != *cmpval) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889 ret = -EAGAIN;
1890 goto out_unlock;
1891 }
1892 }
1893
Darren Hart52400ba2009-04-03 13:40:49 -07001894 if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
Darren Hartbab5bc92009-04-07 23:23:50 -07001895 /*
1896 * Attempt to acquire uaddr2 and wake the top waiter. If we
1897 * intend to requeue waiters, force setting the FUTEX_WAITERS
1898 * bit. We force this here where we are able to easily handle
1899 * faults rather in the requeue loop below.
1900 */
Darren Hart52400ba2009-04-03 13:40:49 -07001901 ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
Darren Hartbab5bc92009-04-07 23:23:50 -07001902 &key2, &pi_state, nr_requeue);
Darren Hart52400ba2009-04-03 13:40:49 -07001903
1904 /*
1905 * At this point the top_waiter has either taken uaddr2 or is
1906 * waiting on it. If the former, then the pi_state will not
1907 * exist yet, look it up one more time to ensure we have a
Thomas Gleixner866293e2014-05-12 20:45:34 +00001908 * reference to it. If the lock was taken, ret contains the
1909 * vpid of the top waiter task.
Thomas Gleixnerecb38b72015-12-19 20:07:39 +00001910 * If the lock was not taken, we have pi_state and an initial
1911 * refcount on it. In case of an error we have nothing.
Darren Hart52400ba2009-04-03 13:40:49 -07001912 */
Thomas Gleixner866293e2014-05-12 20:45:34 +00001913 if (ret > 0) {
Darren Hart52400ba2009-04-03 13:40:49 -07001914 WARN_ON(pi_state);
Darren Hart89061d32009-10-15 15:30:48 -07001915 drop_count++;
Darren Hart52400ba2009-04-03 13:40:49 -07001916 task_count++;
Thomas Gleixner866293e2014-05-12 20:45:34 +00001917 /*
Thomas Gleixnerecb38b72015-12-19 20:07:39 +00001918 * If we acquired the lock, then the user space value
1919 * of uaddr2 should be vpid. It cannot be changed by
1920 * the top waiter as it is blocked on hb2 lock if it
1921 * tries to do so. If something fiddled with it behind
1922 * our back the pi state lookup might unearth it. So
1923 * we rather use the known value than rereading and
1924 * handing potential crap to lookup_pi_state.
1925 *
1926 * If that call succeeds then we have pi_state and an
1927 * initial refcount on it.
Thomas Gleixner866293e2014-05-12 20:45:34 +00001928 */
Peter Zijlstra734009e2017-03-22 11:35:52 +01001929 ret = lookup_pi_state(uaddr2, ret, hb2, &key2, &pi_state);
Darren Hart52400ba2009-04-03 13:40:49 -07001930 }
1931
1932 switch (ret) {
1933 case 0:
Thomas Gleixnerecb38b72015-12-19 20:07:39 +00001934 /* We hold a reference on the pi state. */
Darren Hart52400ba2009-04-03 13:40:49 -07001935 break;
Thomas Gleixner4959f2d2015-12-19 20:07:40 +00001936
1937 /* If the above failed, then pi_state is NULL */
Darren Hart52400ba2009-04-03 13:40:49 -07001938 case -EFAULT:
1939 double_unlock_hb(hb1, hb2);
Linus Torvalds69cd9eb2014-04-08 15:30:07 -07001940 hb_waiters_dec(hb2);
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001941 put_futex_key(&key2);
1942 put_futex_key(&key1);
Thomas Gleixnerd0725992009-06-11 23:15:43 +02001943 ret = fault_in_user_writeable(uaddr2);
Darren Hart52400ba2009-04-03 13:40:49 -07001944 if (!ret)
1945 goto retry;
1946 goto out;
1947 case -EAGAIN:
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00001948 /*
1949 * Two reasons for this:
1950 * - Owner is exiting and we just wait for the
1951 * exit to complete.
1952 * - The user space value changed.
1953 */
Darren Hart52400ba2009-04-03 13:40:49 -07001954 double_unlock_hb(hb1, hb2);
Linus Torvalds69cd9eb2014-04-08 15:30:07 -07001955 hb_waiters_dec(hb2);
Thomas Gleixnerae791a22010-11-10 13:30:36 +01001956 put_futex_key(&key2);
1957 put_futex_key(&key1);
Darren Hart52400ba2009-04-03 13:40:49 -07001958 cond_resched();
1959 goto retry;
1960 default:
1961 goto out_unlock;
1962 }
1963 }
1964
Jason Low0d00c7b2014-01-12 15:31:22 -08001965 plist_for_each_entry_safe(this, next, &hb1->chain, list) {
Darren Hart52400ba2009-04-03 13:40:49 -07001966 if (task_count - nr_wake >= nr_requeue)
1967 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968
Darren Hart52400ba2009-04-03 13:40:49 -07001969 if (!match_futex(&this->key, &key1))
1970 continue;
1971
Darren Hart392741e2009-08-07 15:20:48 -07001972 /*
1973 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
1974 * be paired with each other and no other futex ops.
Darren Hartaa109902012-11-26 16:29:56 -08001975 *
1976 * We should never be requeueing a futex_q with a pi_state,
1977 * which is awaiting a futex_unlock_pi().
Darren Hart392741e2009-08-07 15:20:48 -07001978 */
1979 if ((requeue_pi && !this->rt_waiter) ||
Darren Hartaa109902012-11-26 16:29:56 -08001980 (!requeue_pi && this->rt_waiter) ||
1981 this->pi_state) {
Darren Hart392741e2009-08-07 15:20:48 -07001982 ret = -EINVAL;
1983 break;
1984 }
Darren Hart52400ba2009-04-03 13:40:49 -07001985
1986 /*
1987 * Wake nr_wake waiters. For requeue_pi, if we acquired the
1988 * lock, we already woke the top_waiter. If not, it will be
1989 * woken by futex_unlock_pi().
1990 */
1991 if (++task_count <= nr_wake && !requeue_pi) {
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07001992 mark_wake_futex(&wake_q, this);
Darren Hart52400ba2009-04-03 13:40:49 -07001993 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 }
Darren Hart52400ba2009-04-03 13:40:49 -07001995
Darren Hart84bc4af2009-08-13 17:36:53 -07001996 /* Ensure we requeue to the expected futex for requeue_pi. */
1997 if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
1998 ret = -EINVAL;
1999 break;
2000 }
2001
Darren Hart52400ba2009-04-03 13:40:49 -07002002 /*
2003 * Requeue nr_requeue waiters and possibly one more in the case
2004 * of requeue_pi if we couldn't acquire the lock atomically.
2005 */
2006 if (requeue_pi) {
Thomas Gleixnerecb38b72015-12-19 20:07:39 +00002007 /*
2008 * Prepare the waiter to take the rt_mutex. Take a
2009 * refcount on the pi_state and store the pointer in
2010 * the futex_q object of the waiter.
2011 */
Peter Zijlstrabf92cf32017-03-22 11:35:53 +01002012 get_pi_state(pi_state);
Darren Hart52400ba2009-04-03 13:40:49 -07002013 this->pi_state = pi_state;
2014 ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
2015 this->rt_waiter,
Thomas Gleixnerc051b212014-05-22 03:25:50 +00002016 this->task);
Darren Hart52400ba2009-04-03 13:40:49 -07002017 if (ret == 1) {
Thomas Gleixnerecb38b72015-12-19 20:07:39 +00002018 /*
2019 * We got the lock. We do neither drop the
2020 * refcount on pi_state nor clear
2021 * this->pi_state because the waiter needs the
2022 * pi_state for cleaning up the user space
2023 * value. It will drop the refcount after
2024 * doing so.
2025 */
Darren Hartbeda2c72009-08-09 15:34:39 -07002026 requeue_pi_wake_futex(this, &key2, hb2);
Darren Hart89061d32009-10-15 15:30:48 -07002027 drop_count++;
Darren Hart52400ba2009-04-03 13:40:49 -07002028 continue;
2029 } else if (ret) {
Thomas Gleixnerecb38b72015-12-19 20:07:39 +00002030 /*
2031 * rt_mutex_start_proxy_lock() detected a
2032 * potential deadlock when we tried to queue
2033 * that waiter. Drop the pi_state reference
2034 * which we took above and remove the pointer
2035 * to the state from the waiters futex_q
2036 * object.
2037 */
Darren Hart52400ba2009-04-03 13:40:49 -07002038 this->pi_state = NULL;
Thomas Gleixner29e9ee52015-12-19 20:07:39 +00002039 put_pi_state(pi_state);
Thomas Gleixner885c2cb2015-12-19 20:07:41 +00002040 /*
2041 * We stop queueing more waiters and let user
2042 * space deal with the mess.
2043 */
2044 break;
Darren Hart52400ba2009-04-03 13:40:49 -07002045 }
2046 }
2047 requeue_futex(this, hb1, hb2, &key2);
2048 drop_count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049 }
2050
Thomas Gleixnerecb38b72015-12-19 20:07:39 +00002051 /*
2052 * We took an extra initial reference to the pi_state either
2053 * in futex_proxy_trylock_atomic() or in lookup_pi_state(). We
2054 * need to drop it here again.
2055 */
Thomas Gleixner29e9ee52015-12-19 20:07:39 +00002056 put_pi_state(pi_state);
Thomas Gleixner885c2cb2015-12-19 20:07:41 +00002057
2058out_unlock:
Darren Hart5eb3dc62009-03-12 00:55:52 -07002059 double_unlock_hb(hb1, hb2);
Davidlohr Bueso1d0dcb32015-05-01 08:27:51 -07002060 wake_up_q(&wake_q);
Linus Torvalds69cd9eb2014-04-08 15:30:07 -07002061 hb_waiters_dec(hb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062
Darren Hartcd84a422009-04-02 14:19:38 -07002063 /*
2064 * drop_futex_key_refs() must be called outside the spinlocks. During
2065 * the requeue we moved futex_q's from the hash bucket at key1 to the
2066 * one at key2 and updated their key pointer. We no longer need to
2067 * hold the references to key1.
2068 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 while (--drop_count >= 0)
Rusty Russell9adef582007-05-08 00:26:42 -07002070 drop_futex_key_refs(&key1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071
Darren Hart42d35d42008-12-29 15:49:53 -08002072out_put_keys:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002073 put_futex_key(&key2);
Darren Hart42d35d42008-12-29 15:49:53 -08002074out_put_key1:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002075 put_futex_key(&key1);
Darren Hart42d35d42008-12-29 15:49:53 -08002076out:
Darren Hart52400ba2009-04-03 13:40:49 -07002077 return ret ? ret : task_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078}
2079
2080/* The key must be already stored in q->key. */
Eric Sesterhenn82af7ac2008-01-25 10:40:46 +01002081static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
Namhyung Kim15e408c2010-09-14 21:43:48 +09002082 __acquires(&hb->lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083{
Ingo Molnare2970f22006-06-27 02:54:47 -07002084 struct futex_hash_bucket *hb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085
Ingo Molnare2970f22006-06-27 02:54:47 -07002086 hb = hash_futex(&q->key);
Linus Torvalds11d46162014-03-20 22:11:17 -07002087
2088 /*
2089 * Increment the counter before taking the lock so that
2090 * a potential waker won't miss a to-be-slept task that is
2091 * waiting for the spinlock. This is safe as all queue_lock()
2092 * users end up calling queue_me(). Similarly, for housekeeping,
2093 * decrement the counter at queue_unlock() when some error has
2094 * occurred and we don't end up adding the task to the list.
2095 */
2096 hb_waiters_inc(hb);
2097
Ingo Molnare2970f22006-06-27 02:54:47 -07002098 q->lock_ptr = &hb->lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099
Davidlohr Bueso8ad7b372016-02-09 11:15:13 -08002100 spin_lock(&hb->lock); /* implies smp_mb(); (A) */
Ingo Molnare2970f22006-06-27 02:54:47 -07002101 return hb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102}
2103
Darren Hartd40d65c2009-09-21 22:30:15 -07002104static inline void
Jason Low0d00c7b2014-01-12 15:31:22 -08002105queue_unlock(struct futex_hash_bucket *hb)
Namhyung Kim15e408c2010-09-14 21:43:48 +09002106 __releases(&hb->lock)
Darren Hartd40d65c2009-09-21 22:30:15 -07002107{
2108 spin_unlock(&hb->lock);
Linus Torvalds11d46162014-03-20 22:11:17 -07002109 hb_waiters_dec(hb);
Darren Hartd40d65c2009-09-21 22:30:15 -07002110}
2111
Peter Zijlstracfafcd12017-03-22 11:35:58 +01002112static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113{
Pierre Peifferec92d082007-05-09 02:35:00 -07002114 int prio;
2115
2116 /*
2117 * The priority used to register this element is
2118 * - either the real thread-priority for the real-time threads
2119 * (i.e. threads with a priority lower than MAX_RT_PRIO)
2120 * - or MAX_RT_PRIO for non-RT threads.
2121 * Thus, all RT-threads are woken first in priority order, and
2122 * the others are woken last, in FIFO order.
2123 */
2124 prio = min(current->normal_prio, MAX_RT_PRIO);
2125
2126 plist_node_init(&q->list, prio);
Pierre Peifferec92d082007-05-09 02:35:00 -07002127 plist_add(&q->list, &hb->chain);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002128 q->task = current;
Peter Zijlstracfafcd12017-03-22 11:35:58 +01002129}
2130
2131/**
2132 * queue_me() - Enqueue the futex_q on the futex_hash_bucket
2133 * @q: The futex_q to enqueue
2134 * @hb: The destination hash bucket
2135 *
2136 * The hb->lock must be held by the caller, and is released here. A call to
2137 * queue_me() is typically paired with exactly one call to unqueue_me(). The
2138 * exceptions involve the PI related operations, which may use unqueue_me_pi()
2139 * or nothing if the unqueue is done as part of the wake process and the unqueue
2140 * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
2141 * an example).
2142 */
2143static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
2144 __releases(&hb->lock)
2145{
2146 __queue_me(q, hb);
Ingo Molnare2970f22006-06-27 02:54:47 -07002147 spin_unlock(&hb->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148}
2149
Darren Hartd40d65c2009-09-21 22:30:15 -07002150/**
2151 * unqueue_me() - Remove the futex_q from its futex_hash_bucket
2152 * @q: The futex_q to unqueue
2153 *
2154 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
2155 * be paired with exactly one earlier call to queue_me().
2156 *
Randy Dunlap6c23cbb2013-03-05 10:00:24 -08002157 * Return:
Mauro Carvalho Chehab7b4ff1a2017-05-11 10:17:45 -03002158 * - 1 - if the futex_q was still queued (and we removed unqueued it);
2159 * - 0 - if the futex_q was already removed by the waking thread
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161static int unqueue_me(struct futex_q *q)
2162{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 spinlock_t *lock_ptr;
Ingo Molnare2970f22006-06-27 02:54:47 -07002164 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165
2166 /* In the common case we don't take the spinlock, which is nice. */
Darren Hart42d35d42008-12-29 15:49:53 -08002167retry:
Jianyu Zhan29b75eb2016-03-07 09:32:24 +08002168 /*
2169 * q->lock_ptr can change between this read and the following spin_lock.
2170 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
2171 * optimizing lock_ptr out of the logic below.
2172 */
2173 lock_ptr = READ_ONCE(q->lock_ptr);
Stephen Hemmingerc80544d2007-10-18 03:07:05 -07002174 if (lock_ptr != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175 spin_lock(lock_ptr);
2176 /*
2177 * q->lock_ptr can change between reading it and
2178 * spin_lock(), causing us to take the wrong lock. This
2179 * corrects the race condition.
2180 *
2181 * Reasoning goes like this: if we have the wrong lock,
2182 * q->lock_ptr must have changed (maybe several times)
2183 * between reading it and the spin_lock(). It can
2184 * change again after the spin_lock() but only if it was
2185 * already changed before the spin_lock(). It cannot,
2186 * however, change back to the original value. Therefore
2187 * we can detect whether we acquired the correct lock.
2188 */
2189 if (unlikely(lock_ptr != q->lock_ptr)) {
2190 spin_unlock(lock_ptr);
2191 goto retry;
2192 }
Lai Jiangshan2e129782010-12-22 14:18:50 +08002193 __unqueue_futex(q);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002194
2195 BUG_ON(q->pi_state);
2196
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 spin_unlock(lock_ptr);
2198 ret = 1;
2199 }
2200
Rusty Russell9adef582007-05-08 00:26:42 -07002201 drop_futex_key_refs(&q->key);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202 return ret;
2203}
2204
Ingo Molnarc87e2832006-06-27 02:54:58 -07002205/*
2206 * PI futexes can not be requeued and must remove themself from the
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002207 * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
2208 * and dropped here.
Ingo Molnarc87e2832006-06-27 02:54:58 -07002209 */
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002210static void unqueue_me_pi(struct futex_q *q)
Namhyung Kim15e408c2010-09-14 21:43:48 +09002211 __releases(q->lock_ptr)
Ingo Molnarc87e2832006-06-27 02:54:58 -07002212{
Lai Jiangshan2e129782010-12-22 14:18:50 +08002213 __unqueue_futex(q);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002214
2215 BUG_ON(!q->pi_state);
Thomas Gleixner29e9ee52015-12-19 20:07:39 +00002216 put_pi_state(q->pi_state);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002217 q->pi_state = NULL;
2218
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002219 spin_unlock(q->lock_ptr);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002220}
2221
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002222/*
Thomas Gleixnercdf71a12008-01-08 19:47:38 +01002223 * Fixup the pi_state owner with the new owner.
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002224 *
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002225 * Must be called with hash bucket lock held and mm->sem held for non
2226 * private futexes.
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002227 */
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002228static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002229 struct task_struct *newowner)
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002230{
Thomas Gleixnercdf71a12008-01-08 19:47:38 +01002231 u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002232 struct futex_pi_state *pi_state = q->pi_state;
Vitaliy Ivanov7cfdaf32011-07-07 15:10:31 +03002233 u32 uval, uninitialized_var(curval), newval;
Peter Zijlstra734009e2017-03-22 11:35:52 +01002234 struct task_struct *oldowner;
Darren Harte4dc5b72009-03-12 00:56:13 -07002235 int ret;
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002236
Peter Zijlstra734009e2017-03-22 11:35:52 +01002237 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
2238
2239 oldowner = pi_state->owner;
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002240 /* Owner died? */
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002241 if (!pi_state->owner)
2242 newtid |= FUTEX_OWNER_DIED;
2243
2244 /*
2245 * We are here either because we stole the rtmutex from the
Lai Jiangshan81612392011-01-14 17:09:41 +08002246 * previous highest priority waiter or we are the highest priority
Peter Zijlstra16ffa122017-03-22 11:35:55 +01002247 * waiter but have failed to get the rtmutex the first time.
2248 *
Lai Jiangshan81612392011-01-14 17:09:41 +08002249 * We have to replace the newowner TID in the user space variable.
2250 * This must be atomic as we have to preserve the owner died bit here.
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002251 *
Darren Hartb2d09942009-03-12 00:55:37 -07002252 * Note: We write the user space value _before_ changing the pi_state
2253 * because we can fault here. Imagine swapped out pages or a fork
2254 * that marked all the anonymous memory readonly for cow.
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002255 *
Peter Zijlstra734009e2017-03-22 11:35:52 +01002256 * Modifying pi_state _before_ the user space value would leave the
2257 * pi_state in an inconsistent state when we fault here, because we
2258 * need to drop the locks to handle the fault. This might be observed
2259 * in the PID check in lookup_pi_state.
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002260 */
2261retry:
2262 if (get_futex_value_locked(&uval, uaddr))
2263 goto handle_fault;
2264
Peter Zijlstra16ffa122017-03-22 11:35:55 +01002265 for (;;) {
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002266 newval = (uval & FUTEX_OWNER_DIED) | newtid;
2267
Michel Lespinasse37a9d912011-03-10 18:48:51 -08002268 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002269 goto handle_fault;
2270 if (curval == uval)
2271 break;
2272 uval = curval;
2273 }
2274
2275 /*
2276 * We fixed up user space. Now we need to fix the pi_state
2277 * itself.
2278 */
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002279 if (pi_state->owner != NULL) {
Peter Zijlstra734009e2017-03-22 11:35:52 +01002280 raw_spin_lock(&pi_state->owner->pi_lock);
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002281 WARN_ON(list_empty(&pi_state->list));
2282 list_del_init(&pi_state->list);
Peter Zijlstra734009e2017-03-22 11:35:52 +01002283 raw_spin_unlock(&pi_state->owner->pi_lock);
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002284 }
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002285
Thomas Gleixnercdf71a12008-01-08 19:47:38 +01002286 pi_state->owner = newowner;
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002287
Peter Zijlstra734009e2017-03-22 11:35:52 +01002288 raw_spin_lock(&newowner->pi_lock);
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002289 WARN_ON(!list_empty(&pi_state->list));
Thomas Gleixnercdf71a12008-01-08 19:47:38 +01002290 list_add(&pi_state->list, &newowner->pi_state_list);
Peter Zijlstra734009e2017-03-22 11:35:52 +01002291 raw_spin_unlock(&newowner->pi_lock);
2292 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
2293
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002294 return 0;
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002295
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002296 /*
Peter Zijlstra734009e2017-03-22 11:35:52 +01002297 * To handle the page fault we need to drop the locks here. That gives
2298 * the other task (either the highest priority waiter itself or the
2299 * task which stole the rtmutex) the chance to try the fixup of the
2300 * pi_state. So once we are back from handling the fault we need to
2301 * check the pi_state after reacquiring the locks and before trying to
2302 * do another fixup. When the fixup has been done already we simply
2303 * return.
2304 *
2305 * Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely
2306 * drop hb->lock since the caller owns the hb -> futex_q relation.
2307 * Dropping the pi_mutex->wait_lock requires the state revalidate.
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002308 */
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002309handle_fault:
Peter Zijlstra734009e2017-03-22 11:35:52 +01002310 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002311 spin_unlock(q->lock_ptr);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002312
Thomas Gleixnerd0725992009-06-11 23:15:43 +02002313 ret = fault_in_user_writeable(uaddr);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002314
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002315 spin_lock(q->lock_ptr);
Peter Zijlstra734009e2017-03-22 11:35:52 +01002316 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002317
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002318 /*
2319 * Check if someone else fixed it for us:
2320 */
Peter Zijlstra734009e2017-03-22 11:35:52 +01002321 if (pi_state->owner != oldowner) {
2322 ret = 0;
2323 goto out_unlock;
2324 }
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002325
2326 if (ret)
Peter Zijlstra734009e2017-03-22 11:35:52 +01002327 goto out_unlock;
Thomas Gleixner1b7558e2008-06-23 11:21:58 +02002328
2329 goto retry;
Peter Zijlstra734009e2017-03-22 11:35:52 +01002330
2331out_unlock:
2332 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
2333 return ret;
Pierre Peifferd0aa7a72007-05-09 02:35:02 -07002334}
2335
Nick Piggin72c1bbf2007-05-08 00:26:43 -07002336static long futex_wait_restart(struct restart_block *restart);
Thomas Gleixner36cf3b52007-07-15 23:41:20 -07002337
Darren Hartca5f9522009-04-03 13:39:33 -07002338/**
Darren Hartdd973992009-04-03 13:40:02 -07002339 * fixup_owner() - Post lock pi_state and corner case management
2340 * @uaddr: user address of the futex
Darren Hartdd973992009-04-03 13:40:02 -07002341 * @q: futex_q (contains pi_state and access to the rt_mutex)
2342 * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0)
2343 *
2344 * After attempting to lock an rt_mutex, this function is called to cleanup
2345 * the pi_state owner as well as handle race conditions that may allow us to
2346 * acquire the lock. Must be called with the hb lock held.
2347 *
Randy Dunlap6c23cbb2013-03-05 10:00:24 -08002348 * Return:
Mauro Carvalho Chehab7b4ff1a2017-05-11 10:17:45 -03002349 * - 1 - success, lock taken;
2350 * - 0 - success, lock not taken;
2351 * - <0 - on error (-EFAULT)
Darren Hartdd973992009-04-03 13:40:02 -07002352 */
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002353static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
Darren Hartdd973992009-04-03 13:40:02 -07002354{
Darren Hartdd973992009-04-03 13:40:02 -07002355 int ret = 0;
2356
2357 if (locked) {
2358 /*
2359 * Got the lock. We might not be the anticipated owner if we
2360 * did a lock-steal - fix up the PI-state in that case:
Peter Zijlstra16ffa122017-03-22 11:35:55 +01002361 *
2362 * We can safely read pi_state->owner without holding wait_lock
2363 * because we now own the rt_mutex, only the owner will attempt
2364 * to change it.
Darren Hartdd973992009-04-03 13:40:02 -07002365 */
2366 if (q->pi_state->owner != current)
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002367 ret = fixup_pi_state_owner(uaddr, q, current);
Darren Hartdd973992009-04-03 13:40:02 -07002368 goto out;
2369 }
2370
2371 /*
Darren Hartdd973992009-04-03 13:40:02 -07002372 * Paranoia check. If we did not take the lock, then we should not be
Lai Jiangshan81612392011-01-14 17:09:41 +08002373 * the owner of the rt_mutex.
Darren Hartdd973992009-04-03 13:40:02 -07002374 */
Peter Zijlstra73d786b2017-03-22 11:35:54 +01002375 if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) {
Darren Hartdd973992009-04-03 13:40:02 -07002376 printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
2377 "pi-state %p\n", ret,
2378 q->pi_state->pi_mutex.owner,
2379 q->pi_state->owner);
Peter Zijlstra73d786b2017-03-22 11:35:54 +01002380 }
Darren Hartdd973992009-04-03 13:40:02 -07002381
2382out:
2383 return ret ? ret : locked;
2384}
2385
2386/**
Darren Hartca5f9522009-04-03 13:39:33 -07002387 * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
2388 * @hb: the futex hash bucket, must be locked by the caller
2389 * @q: the futex_q to queue up on
2390 * @timeout: the prepared hrtimer_sleeper, or null for no timeout
Darren Hartca5f9522009-04-03 13:39:33 -07002391 */
2392static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02002393 struct hrtimer_sleeper *timeout)
Darren Hartca5f9522009-04-03 13:39:33 -07002394{
Darren Hart9beba3c2009-09-24 11:54:47 -07002395 /*
2396 * The task state is guaranteed to be set before another task can
Peter Zijlstrab92b8b32015-05-12 10:51:55 +02002397 * wake it. set_current_state() is implemented using smp_store_mb() and
Darren Hart9beba3c2009-09-24 11:54:47 -07002398 * queue_me() calls spin_unlock() upon completion, both serializing
2399 * access to the hash list and forcing another memory barrier.
2400 */
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02002401 set_current_state(TASK_INTERRUPTIBLE);
Darren Hart0729e192009-09-21 22:30:38 -07002402 queue_me(q, hb);
Darren Hartca5f9522009-04-03 13:39:33 -07002403
2404 /* Arm the timer */
Thomas Gleixner2e4b0d32015-04-14 21:09:13 +00002405 if (timeout)
Darren Hartca5f9522009-04-03 13:39:33 -07002406 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
Darren Hartca5f9522009-04-03 13:39:33 -07002407
2408 /*
Darren Hart0729e192009-09-21 22:30:38 -07002409 * If we have been removed from the hash list, then another task
2410 * has tried to wake us, and we can skip the call to schedule().
Darren Hartca5f9522009-04-03 13:39:33 -07002411 */
2412 if (likely(!plist_node_empty(&q->list))) {
2413 /*
2414 * If the timer has already expired, current will already be
2415 * flagged for rescheduling. Only call schedule if there
2416 * is no timeout, or if it has yet to expire.
2417 */
2418 if (!timeout || timeout->task)
Colin Cross88c80042013-05-01 18:35:05 -07002419 freezable_schedule();
Darren Hartca5f9522009-04-03 13:39:33 -07002420 }
2421 __set_current_state(TASK_RUNNING);
2422}
2423
Darren Hartf8010732009-04-03 13:40:40 -07002424/**
2425 * futex_wait_setup() - Prepare to wait on a futex
2426 * @uaddr: the futex userspace address
2427 * @val: the expected value
Darren Hartb41277d2010-11-08 13:10:09 -08002428 * @flags: futex flags (FLAGS_SHARED, etc.)
Darren Hartf8010732009-04-03 13:40:40 -07002429 * @q: the associated futex_q
2430 * @hb: storage for hash_bucket pointer to be returned to caller
2431 *
2432 * Setup the futex_q and locate the hash_bucket. Get the futex value and
2433 * compare it with the expected value. Handle atomic faults internally.
2434 * Return with the hb lock held and a q.key reference on success, and unlocked
2435 * with no q.key reference on failure.
2436 *
Randy Dunlap6c23cbb2013-03-05 10:00:24 -08002437 * Return:
Mauro Carvalho Chehab7b4ff1a2017-05-11 10:17:45 -03002438 * - 0 - uaddr contains val and hb has been locked;
2439 * - <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
Darren Hartf8010732009-04-03 13:40:40 -07002440 */
Darren Hartb41277d2010-11-08 13:10:09 -08002441static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
Darren Hartf8010732009-04-03 13:40:40 -07002442 struct futex_q *q, struct futex_hash_bucket **hb)
2443{
2444 u32 uval;
2445 int ret;
2446
2447 /*
2448 * Access the page AFTER the hash-bucket is locked.
2449 * Order is important:
2450 *
2451 * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
2452 * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); }
2453 *
2454 * The basic logical guarantee of a futex is that it blocks ONLY
2455 * if cond(var) is known to be true at the time of blocking, for
Michel Lespinasse8fe8f542011-03-06 18:07:50 -08002456 * any cond. If we locked the hash-bucket after testing *uaddr, that
2457 * would open a race condition where we could block indefinitely with
Darren Hartf8010732009-04-03 13:40:40 -07002458 * cond(var) false, which would violate the guarantee.
2459 *
Michel Lespinasse8fe8f542011-03-06 18:07:50 -08002460 * On the other hand, we insert q and release the hash-bucket only
2461 * after testing *uaddr. This guarantees that futex_wait() will NOT
2462 * absorb a wakeup if *uaddr does not match the desired values
2463 * while the syscall executes.
Darren Hartf8010732009-04-03 13:40:40 -07002464 */
2465retry:
Shawn Bohrer9ea71502011-06-30 11:21:32 -05002466 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, VERIFY_READ);
Darren Hartf8010732009-04-03 13:40:40 -07002467 if (unlikely(ret != 0))
Darren Harta5a2a0c2009-04-10 09:50:05 -07002468 return ret;
Darren Hartf8010732009-04-03 13:40:40 -07002469
2470retry_private:
2471 *hb = queue_lock(q);
2472
2473 ret = get_futex_value_locked(&uval, uaddr);
2474
2475 if (ret) {
Jason Low0d00c7b2014-01-12 15:31:22 -08002476 queue_unlock(*hb);
Darren Hartf8010732009-04-03 13:40:40 -07002477
2478 ret = get_user(uval, uaddr);
2479 if (ret)
2480 goto out;
2481
Darren Hartb41277d2010-11-08 13:10:09 -08002482 if (!(flags & FLAGS_SHARED))
Darren Hartf8010732009-04-03 13:40:40 -07002483 goto retry_private;
2484
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002485 put_futex_key(&q->key);
Darren Hartf8010732009-04-03 13:40:40 -07002486 goto retry;
2487 }
2488
2489 if (uval != val) {
Jason Low0d00c7b2014-01-12 15:31:22 -08002490 queue_unlock(*hb);
Darren Hartf8010732009-04-03 13:40:40 -07002491 ret = -EWOULDBLOCK;
2492 }
2493
2494out:
2495 if (ret)
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002496 put_futex_key(&q->key);
Darren Hartf8010732009-04-03 13:40:40 -07002497 return ret;
2498}
2499
Darren Hartb41277d2010-11-08 13:10:09 -08002500static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
2501 ktime_t *abs_time, u32 bitset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502{
Darren Hartca5f9522009-04-03 13:39:33 -07002503 struct hrtimer_sleeper timeout, *to = NULL;
Peter Zijlstra2fff78c2009-02-11 18:10:10 +01002504 struct restart_block *restart;
Ingo Molnare2970f22006-06-27 02:54:47 -07002505 struct futex_hash_bucket *hb;
Darren Hart5bdb05f2010-11-08 13:40:28 -08002506 struct futex_q q = futex_q_init;
Ingo Molnare2970f22006-06-27 02:54:47 -07002507 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002508
Thomas Gleixnercd689982008-02-01 17:45:14 +01002509 if (!bitset)
2510 return -EINVAL;
Thomas Gleixnercd689982008-02-01 17:45:14 +01002511 q.bitset = bitset;
Darren Hartca5f9522009-04-03 13:39:33 -07002512
2513 if (abs_time) {
2514 to = &timeout;
2515
Darren Hartb41277d2010-11-08 13:10:09 -08002516 hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
2517 CLOCK_REALTIME : CLOCK_MONOTONIC,
2518 HRTIMER_MODE_ABS);
Darren Hartca5f9522009-04-03 13:39:33 -07002519 hrtimer_init_sleeper(to, current);
2520 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2521 current->timer_slack_ns);
2522 }
2523
Thomas Gleixnerd58e6572009-10-13 20:40:43 +02002524retry:
Darren Hart7ada8762010-10-17 08:35:04 -07002525 /*
2526 * Prepare to wait on uaddr. On success, holds hb lock and increments
2527 * q.key refs.
2528 */
Darren Hartb41277d2010-11-08 13:10:09 -08002529 ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
Darren Hartf8010732009-04-03 13:40:40 -07002530 if (ret)
Darren Hart42d35d42008-12-29 15:49:53 -08002531 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532
Darren Hartca5f9522009-04-03 13:39:33 -07002533 /* queue_me and wait for wakeup, timeout, or a signal. */
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02002534 futex_wait_queue_me(hb, &q, to);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535
2536 /* If we were woken (and unqueued), we succeeded, whatever. */
Peter Zijlstra2fff78c2009-02-11 18:10:10 +01002537 ret = 0;
Darren Hart7ada8762010-10-17 08:35:04 -07002538 /* unqueue_me() drops q.key ref */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539 if (!unqueue_me(&q))
Darren Hart7ada8762010-10-17 08:35:04 -07002540 goto out;
Peter Zijlstra2fff78c2009-02-11 18:10:10 +01002541 ret = -ETIMEDOUT;
Darren Hartca5f9522009-04-03 13:39:33 -07002542 if (to && !to->task)
Darren Hart7ada8762010-10-17 08:35:04 -07002543 goto out;
Nick Piggin72c1bbf2007-05-08 00:26:43 -07002544
Ingo Molnare2970f22006-06-27 02:54:47 -07002545 /*
Thomas Gleixnerd58e6572009-10-13 20:40:43 +02002546 * We expect signal_pending(current), but we might be the
2547 * victim of a spurious wakeup as well.
Ingo Molnare2970f22006-06-27 02:54:47 -07002548 */
Darren Hart7ada8762010-10-17 08:35:04 -07002549 if (!signal_pending(current))
Thomas Gleixnerd58e6572009-10-13 20:40:43 +02002550 goto retry;
Thomas Gleixnerd58e6572009-10-13 20:40:43 +02002551
Peter Zijlstra2fff78c2009-02-11 18:10:10 +01002552 ret = -ERESTARTSYS;
Pierre Peifferc19384b2007-05-09 02:35:02 -07002553 if (!abs_time)
Darren Hart7ada8762010-10-17 08:35:04 -07002554 goto out;
Steven Rostedtce6bd422007-12-05 15:46:09 +01002555
Andy Lutomirskif56141e2015-02-12 15:01:14 -08002556 restart = &current->restart_block;
Peter Zijlstra2fff78c2009-02-11 18:10:10 +01002557 restart->fn = futex_wait_restart;
Namhyung Kima3c74c52010-09-14 21:43:47 +09002558 restart->futex.uaddr = uaddr;
Peter Zijlstra2fff78c2009-02-11 18:10:10 +01002559 restart->futex.val = val;
Thomas Gleixner2456e852016-12-25 11:38:40 +01002560 restart->futex.time = *abs_time;
Peter Zijlstra2fff78c2009-02-11 18:10:10 +01002561 restart->futex.bitset = bitset;
Darren Hart0cd9c642011-04-14 15:41:57 -07002562 restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
Peter Zijlstra2fff78c2009-02-11 18:10:10 +01002563
2564 ret = -ERESTART_RESTARTBLOCK;
2565
Darren Hart42d35d42008-12-29 15:49:53 -08002566out:
Darren Hartca5f9522009-04-03 13:39:33 -07002567 if (to) {
2568 hrtimer_cancel(&to->timer);
2569 destroy_hrtimer_on_stack(&to->timer);
2570 }
Ingo Molnarc87e2832006-06-27 02:54:58 -07002571 return ret;
2572}
2573
Nick Piggin72c1bbf2007-05-08 00:26:43 -07002574
2575static long futex_wait_restart(struct restart_block *restart)
2576{
Namhyung Kima3c74c52010-09-14 21:43:47 +09002577 u32 __user *uaddr = restart->futex.uaddr;
Darren Harta72188d2009-04-03 13:40:22 -07002578 ktime_t t, *tp = NULL;
Nick Piggin72c1bbf2007-05-08 00:26:43 -07002579
Darren Harta72188d2009-04-03 13:40:22 -07002580 if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
Thomas Gleixner2456e852016-12-25 11:38:40 +01002581 t = restart->futex.time;
Darren Harta72188d2009-04-03 13:40:22 -07002582 tp = &t;
2583 }
Nick Piggin72c1bbf2007-05-08 00:26:43 -07002584 restart->fn = do_no_restart_syscall;
Darren Hartb41277d2010-11-08 13:10:09 -08002585
2586 return (long)futex_wait(uaddr, restart->futex.flags,
2587 restart->futex.val, tp, restart->futex.bitset);
Nick Piggin72c1bbf2007-05-08 00:26:43 -07002588}
2589
2590
Ingo Molnarc87e2832006-06-27 02:54:58 -07002591/*
2592 * Userspace tried a 0 -> TID atomic transition of the futex value
2593 * and failed. The kernel side here does the whole locking operation:
Davidlohr Bueso767f5092015-06-29 23:26:01 -07002594 * if there are waiters then it will block as a consequence of relying
2595 * on rt-mutexes, it does PI, etc. (Due to races the kernel might see
2596 * a 0 value of the futex too.).
2597 *
2598 * Also serves as futex trylock_pi()'ing, and due semantics.
Ingo Molnarc87e2832006-06-27 02:54:58 -07002599 */
Michael Kerrisk996636d2015-01-16 20:28:06 +01002600static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
Darren Hartb41277d2010-11-08 13:10:09 -08002601 ktime_t *time, int trylock)
Ingo Molnarc87e2832006-06-27 02:54:58 -07002602{
Thomas Gleixnerc5780e92006-09-08 09:47:15 -07002603 struct hrtimer_sleeper timeout, *to = NULL;
Peter Zijlstra16ffa122017-03-22 11:35:55 +01002604 struct futex_pi_state *pi_state = NULL;
Peter Zijlstracfafcd12017-03-22 11:35:58 +01002605 struct rt_mutex_waiter rt_waiter;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002606 struct futex_hash_bucket *hb;
Darren Hart5bdb05f2010-11-08 13:40:28 -08002607 struct futex_q q = futex_q_init;
Darren Hartdd973992009-04-03 13:40:02 -07002608 int res, ret;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002609
Nicolas Pitrebc2eecd2017-08-01 00:31:32 -04002610 if (!IS_ENABLED(CONFIG_FUTEX_PI))
2611 return -ENOSYS;
2612
Ingo Molnarc87e2832006-06-27 02:54:58 -07002613 if (refill_pi_state_cache())
2614 return -ENOMEM;
2615
Pierre Peifferc19384b2007-05-09 02:35:02 -07002616 if (time) {
Thomas Gleixnerc5780e92006-09-08 09:47:15 -07002617 to = &timeout;
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07002618 hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
2619 HRTIMER_MODE_ABS);
Thomas Gleixnerc5780e92006-09-08 09:47:15 -07002620 hrtimer_init_sleeper(to, current);
Arjan van de Vencc584b22008-09-01 15:02:30 -07002621 hrtimer_set_expires(&to->timer, *time);
Thomas Gleixnerc5780e92006-09-08 09:47:15 -07002622 }
2623
Darren Hart42d35d42008-12-29 15:49:53 -08002624retry:
Shawn Bohrer9ea71502011-06-30 11:21:32 -05002625 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, VERIFY_WRITE);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002626 if (unlikely(ret != 0))
Darren Hart42d35d42008-12-29 15:49:53 -08002627 goto out;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002628
Darren Harte4dc5b72009-03-12 00:56:13 -07002629retry_private:
Eric Sesterhenn82af7ac2008-01-25 10:40:46 +01002630 hb = queue_lock(&q);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002631
Darren Hartbab5bc92009-04-07 23:23:50 -07002632 ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002633 if (unlikely(ret)) {
Davidlohr Bueso767f5092015-06-29 23:26:01 -07002634 /*
2635 * Atomic work succeeded and we got the lock,
2636 * or failed. Either way, we do _not_ block.
2637 */
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002638 switch (ret) {
Darren Hart1a520842009-04-03 13:39:52 -07002639 case 1:
2640 /* We got the lock. */
2641 ret = 0;
2642 goto out_unlock_put_key;
2643 case -EFAULT:
2644 goto uaddr_faulted;
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002645 case -EAGAIN:
2646 /*
Thomas Gleixneraf54d6a2014-06-11 20:45:41 +00002647 * Two reasons for this:
2648 * - Task is exiting and we just wait for the
2649 * exit to complete.
2650 * - The user space value changed.
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002651 */
Jason Low0d00c7b2014-01-12 15:31:22 -08002652 queue_unlock(hb);
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002653 put_futex_key(&q.key);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002654 cond_resched();
2655 goto retry;
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002656 default:
Darren Hart42d35d42008-12-29 15:49:53 -08002657 goto out_unlock_put_key;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002658 }
Ingo Molnarc87e2832006-06-27 02:54:58 -07002659 }
2660
Peter Zijlstracfafcd12017-03-22 11:35:58 +01002661 WARN_ON(!q.pi_state);
2662
Ingo Molnarc87e2832006-06-27 02:54:58 -07002663 /*
2664 * Only actually queue now that the atomic ops are done:
2665 */
Peter Zijlstracfafcd12017-03-22 11:35:58 +01002666 __queue_me(&q, hb);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002667
Peter Zijlstracfafcd12017-03-22 11:35:58 +01002668 if (trylock) {
Peter Zijlstra5293c2e2017-03-22 11:35:51 +01002669 ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002670 /* Fixup the trylock return value: */
2671 ret = ret ? 0 : -EWOULDBLOCK;
Peter Zijlstracfafcd12017-03-22 11:35:58 +01002672 goto no_block;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002673 }
2674
Peter Zijlstracfafcd12017-03-22 11:35:58 +01002675 rt_mutex_init_waiter(&rt_waiter);
Peter Zijlstra56222b22017-03-22 11:36:00 +01002676
2677 /*
2678 * On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not
2679 * hold it while doing rt_mutex_start_proxy(), because then it will
2680 * include hb->lock in the blocking chain, even through we'll not in
2681 * fact hold it while blocking. This will lead it to report -EDEADLK
2682 * and BUG when futex_unlock_pi() interleaves with this.
2683 *
2684 * Therefore acquire wait_lock while holding hb->lock, but drop the
2685 * latter before calling rt_mutex_start_proxy_lock(). This still fully
2686 * serializes against futex_unlock_pi() as that does the exact same
2687 * lock handoff sequence.
2688 */
2689 raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
2690 spin_unlock(q.lock_ptr);
2691 ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
2692 raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);
2693
Peter Zijlstracfafcd12017-03-22 11:35:58 +01002694 if (ret) {
2695 if (ret == 1)
2696 ret = 0;
2697
Peter Zijlstra56222b22017-03-22 11:36:00 +01002698 spin_lock(q.lock_ptr);
Peter Zijlstracfafcd12017-03-22 11:35:58 +01002699 goto no_block;
2700 }
2701
Peter Zijlstracfafcd12017-03-22 11:35:58 +01002702
2703 if (unlikely(to))
2704 hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
2705
2706 ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
2707
Vernon Mauerya99e4e42006-07-01 04:35:42 -07002708 spin_lock(q.lock_ptr);
Darren Hartdd973992009-04-03 13:40:02 -07002709 /*
Peter Zijlstracfafcd12017-03-22 11:35:58 +01002710 * If we failed to acquire the lock (signal/timeout), we must
2711 * first acquire the hb->lock before removing the lock from the
2712 * rt_mutex waitqueue, such that we can keep the hb and rt_mutex
2713 * wait lists consistent.
Peter Zijlstra56222b22017-03-22 11:36:00 +01002714 *
2715 * In particular; it is important that futex_unlock_pi() can not
2716 * observe this inconsistency.
Peter Zijlstracfafcd12017-03-22 11:35:58 +01002717 */
2718 if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
2719 ret = 0;
2720
2721no_block:
2722 /*
Darren Hartdd973992009-04-03 13:40:02 -07002723 * Fixup the pi_state owner and possibly acquire the lock if we
2724 * haven't already.
2725 */
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002726 res = fixup_owner(uaddr, &q, !ret);
Darren Hartdd973992009-04-03 13:40:02 -07002727 /*
2728 * If fixup_owner() returned an error, proprogate that. If it acquired
2729 * the lock, clear our -ETIMEDOUT or -EINTR.
2730 */
2731 if (res)
2732 ret = (res < 0) ? res : 0;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002733
Darren Harte8f63862009-03-12 00:56:06 -07002734 /*
Darren Hartdd973992009-04-03 13:40:02 -07002735 * If fixup_owner() faulted and was unable to handle the fault, unlock
2736 * it and return the fault to userspace.
Darren Harte8f63862009-03-12 00:56:06 -07002737 */
Peter Zijlstra16ffa122017-03-22 11:35:55 +01002738 if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) {
2739 pi_state = q.pi_state;
2740 get_pi_state(pi_state);
2741 }
Darren Harte8f63862009-03-12 00:56:06 -07002742
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002743 /* Unqueue and drop the lock */
2744 unqueue_me_pi(&q);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002745
Peter Zijlstra16ffa122017-03-22 11:35:55 +01002746 if (pi_state) {
2747 rt_mutex_futex_unlock(&pi_state->pi_mutex);
2748 put_pi_state(pi_state);
2749 }
2750
Mikael Pettersson5ecb01c2010-01-23 22:36:29 +01002751 goto out_put_key;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002752
Darren Hart42d35d42008-12-29 15:49:53 -08002753out_unlock_put_key:
Jason Low0d00c7b2014-01-12 15:31:22 -08002754 queue_unlock(hb);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002755
Darren Hart42d35d42008-12-29 15:49:53 -08002756out_put_key:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002757 put_futex_key(&q.key);
Darren Hart42d35d42008-12-29 15:49:53 -08002758out:
Thomas Gleixner97181f92017-04-10 18:03:36 +02002759 if (to) {
2760 hrtimer_cancel(&to->timer);
Thomas Gleixner237fc6e2008-04-30 00:55:04 -07002761 destroy_hrtimer_on_stack(&to->timer);
Thomas Gleixner97181f92017-04-10 18:03:36 +02002762 }
Darren Hartdd973992009-04-03 13:40:02 -07002763 return ret != -EINTR ? ret : -ERESTARTNOINTR;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002764
Darren Hart42d35d42008-12-29 15:49:53 -08002765uaddr_faulted:
Jason Low0d00c7b2014-01-12 15:31:22 -08002766 queue_unlock(hb);
Alexey Kuznetsov778e9a92007-06-08 13:47:00 -07002767
Thomas Gleixnerd0725992009-06-11 23:15:43 +02002768 ret = fault_in_user_writeable(uaddr);
Darren Harte4dc5b72009-03-12 00:56:13 -07002769 if (ret)
2770 goto out_put_key;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002771
Darren Hartb41277d2010-11-08 13:10:09 -08002772 if (!(flags & FLAGS_SHARED))
Darren Harte4dc5b72009-03-12 00:56:13 -07002773 goto retry_private;
2774
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002775 put_futex_key(&q.key);
Darren Harte4dc5b72009-03-12 00:56:13 -07002776 goto retry;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002777}
2778
2779/*
Ingo Molnarc87e2832006-06-27 02:54:58 -07002780 * Userspace attempted a TID -> 0 atomic transition, and failed.
2781 * This is the in-kernel slowpath: we look up the PI state (if any),
2782 * and do the rt-mutex unlock.
2783 */
Darren Hartb41277d2010-11-08 13:10:09 -08002784static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
Ingo Molnarc87e2832006-06-27 02:54:58 -07002785{
Thomas Gleixnerccf9e6a2014-06-11 20:45:38 +00002786 u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current);
Peter Zijlstra38d47c12008-09-26 19:32:20 +02002787 union futex_key key = FUTEX_KEY_INIT;
Thomas Gleixnerccf9e6a2014-06-11 20:45:38 +00002788 struct futex_hash_bucket *hb;
Peter Zijlstra499f5ac2017-03-22 11:35:48 +01002789 struct futex_q *top_waiter;
Darren Harte4dc5b72009-03-12 00:56:13 -07002790 int ret;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002791
Nicolas Pitrebc2eecd2017-08-01 00:31:32 -04002792 if (!IS_ENABLED(CONFIG_FUTEX_PI))
2793 return -ENOSYS;
2794
Ingo Molnarc87e2832006-06-27 02:54:58 -07002795retry:
2796 if (get_user(uval, uaddr))
2797 return -EFAULT;
2798 /*
2799 * We release only a lock we actually own:
2800 */
Thomas Gleixnerc0c9ed12011-03-11 11:51:22 +01002801 if ((uval & FUTEX_TID_MASK) != vpid)
Ingo Molnarc87e2832006-06-27 02:54:58 -07002802 return -EPERM;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002803
Shawn Bohrer9ea71502011-06-30 11:21:32 -05002804 ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE);
Thomas Gleixnerccf9e6a2014-06-11 20:45:38 +00002805 if (ret)
2806 return ret;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002807
2808 hb = hash_futex(&key);
2809 spin_lock(&hb->lock);
2810
Ingo Molnarc87e2832006-06-27 02:54:58 -07002811 /*
Thomas Gleixnerccf9e6a2014-06-11 20:45:38 +00002812 * Check waiters first. We do not trust user space values at
2813 * all and we at least want to know if user space fiddled
2814 * with the futex value instead of blindly unlocking.
Ingo Molnarc87e2832006-06-27 02:54:58 -07002815 */
Peter Zijlstra499f5ac2017-03-22 11:35:48 +01002816 top_waiter = futex_top_waiter(hb, &key);
2817 if (top_waiter) {
Peter Zijlstra16ffa122017-03-22 11:35:55 +01002818 struct futex_pi_state *pi_state = top_waiter->pi_state;
2819
2820 ret = -EINVAL;
2821 if (!pi_state)
2822 goto out_unlock;
2823
Sebastian Andrzej Siewior802ab582015-06-17 10:33:50 +02002824 /*
Peter Zijlstra16ffa122017-03-22 11:35:55 +01002825 * If current does not own the pi_state then the futex is
2826 * inconsistent and user space fiddled with the futex value.
2827 */
2828 if (pi_state->owner != current)
2829 goto out_unlock;
2830
Peter Zijlstra16ffa122017-03-22 11:35:55 +01002831 get_pi_state(pi_state);
Peter Zijlstrabebe5b52017-03-22 11:35:59 +01002832 /*
Peter Zijlstrabebe5b52017-03-22 11:35:59 +01002833 * By taking wait_lock while still holding hb->lock, we ensure
2834 * there is no point where we hold neither; and therefore
2835 * wake_futex_pi() must observe a state consistent with what we
2836 * observed.
2837 */
2838 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
Peter Zijlstra16ffa122017-03-22 11:35:55 +01002839 spin_unlock(&hb->lock);
2840
2841 ret = wake_futex_pi(uaddr, uval, pi_state);
2842
2843 put_pi_state(pi_state);
2844
2845 /*
2846 * Success, we're done! No tricky corner cases.
Sebastian Andrzej Siewior802ab582015-06-17 10:33:50 +02002847 */
2848 if (!ret)
2849 goto out_putkey;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002850 /*
Thomas Gleixnerccf9e6a2014-06-11 20:45:38 +00002851 * The atomic access to the futex value generated a
2852 * pagefault, so retry the user-access and the wakeup:
Ingo Molnarc87e2832006-06-27 02:54:58 -07002853 */
2854 if (ret == -EFAULT)
2855 goto pi_faulted;
Sebastian Andrzej Siewior802ab582015-06-17 10:33:50 +02002856 /*
Sebastian Andrzej Siewior89e9e662016-04-15 14:35:39 +02002857 * A unconditional UNLOCK_PI op raced against a waiter
2858 * setting the FUTEX_WAITERS bit. Try again.
2859 */
2860 if (ret == -EAGAIN) {
Sebastian Andrzej Siewior89e9e662016-04-15 14:35:39 +02002861 put_futex_key(&key);
2862 goto retry;
2863 }
2864 /*
Sebastian Andrzej Siewior802ab582015-06-17 10:33:50 +02002865 * wake_futex_pi has detected invalid state. Tell user
2866 * space.
2867 */
Peter Zijlstra16ffa122017-03-22 11:35:55 +01002868 goto out_putkey;
Ingo Molnarc87e2832006-06-27 02:54:58 -07002869 }
Thomas Gleixnerccf9e6a2014-06-11 20:45:38 +00002870
Ingo Molnarc87e2832006-06-27 02:54:58 -07002871 /*
Thomas Gleixnerccf9e6a2014-06-11 20:45:38 +00002872 * We have no kernel internal state, i.e. no waiters in the
2873 * kernel. Waiters which are about to queue themselves are stuck
2874 * on hb->lock. So we can safely ignore them. We do neither
2875 * preserve the WAITERS bit not the OWNER_DIED one. We are the
2876 * owner.
Ingo Molnarc87e2832006-06-27 02:54:58 -07002877 */
Peter Zijlstra16ffa122017-03-22 11:35:55 +01002878 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0)) {
2879 spin_unlock(&hb->lock);
Thomas Gleixner13fbca42014-06-03 12:27:07 +00002880 goto pi_faulted;
Peter Zijlstra16ffa122017-03-22 11:35:55 +01002881 }
Ingo Molnarc87e2832006-06-27 02:54:58 -07002882
Thomas Gleixnerccf9e6a2014-06-11 20:45:38 +00002883 /*
2884 * If uval has changed, let user space handle it.
2885 */
2886 ret = (curval == uval) ? 0 : -EAGAIN;
2887
Ingo Molnarc87e2832006-06-27 02:54:58 -07002888out_unlock:
2889 spin_unlock(&hb->lock);
Sebastian Andrzej Siewior802ab582015-06-17 10:33:50 +02002890out_putkey:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002891 put_futex_key(&key);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002892 return ret;
2893
2894pi_faulted:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01002895 put_futex_key(&key);
Ingo Molnarc87e2832006-06-27 02:54:58 -07002896
Thomas Gleixnerd0725992009-06-11 23:15:43 +02002897 ret = fault_in_user_writeable(uaddr);
Darren Hartb5686362008-12-18 15:06:34 -08002898 if (!ret)
Ingo Molnarc87e2832006-06-27 02:54:58 -07002899 goto retry;
2900
Linus Torvalds1da177e2005-04-16 15:20:36 -07002901 return ret;
2902}
2903
Darren Hart52400ba2009-04-03 13:40:49 -07002904/**
2905 * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
2906 * @hb: the hash_bucket futex_q was original enqueued on
2907 * @q: the futex_q woken while waiting to be requeued
2908 * @key2: the futex_key of the requeue target futex
2909 * @timeout: the timeout associated with the wait (NULL if none)
2910 *
2911 * Detect if the task was woken on the initial futex as opposed to the requeue
2912 * target futex. If so, determine if it was a timeout or a signal that caused
2913 * the wakeup and return the appropriate error code to the caller. Must be
2914 * called with the hb lock held.
2915 *
Randy Dunlap6c23cbb2013-03-05 10:00:24 -08002916 * Return:
Mauro Carvalho Chehab7b4ff1a2017-05-11 10:17:45 -03002917 * - 0 = no early wakeup detected;
2918 * - <0 = -ETIMEDOUT or -ERESTARTNOINTR
Darren Hart52400ba2009-04-03 13:40:49 -07002919 */
2920static inline
2921int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2922 struct futex_q *q, union futex_key *key2,
2923 struct hrtimer_sleeper *timeout)
2924{
2925 int ret = 0;
2926
2927 /*
2928 * With the hb lock held, we avoid races while we process the wakeup.
2929 * We only need to hold hb (and not hb2) to ensure atomicity as the
2930 * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
2931 * It can't be requeued from uaddr2 to something else since we don't
2932 * support a PI aware source futex for requeue.
2933 */
2934 if (!match_futex(&q->key, key2)) {
2935 WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
2936 /*
2937 * We were woken prior to requeue by a timeout or a signal.
2938 * Unqueue the futex_q and determine which it was.
2939 */
Lai Jiangshan2e129782010-12-22 14:18:50 +08002940 plist_del(&q->list, &hb->chain);
Linus Torvalds11d46162014-03-20 22:11:17 -07002941 hb_waiters_dec(hb);
Darren Hart52400ba2009-04-03 13:40:49 -07002942
Thomas Gleixnerd58e6572009-10-13 20:40:43 +02002943 /* Handle spurious wakeups gracefully */
Thomas Gleixner11df6dd2009-10-28 20:26:48 +01002944 ret = -EWOULDBLOCK;
Darren Hart52400ba2009-04-03 13:40:49 -07002945 if (timeout && !timeout->task)
2946 ret = -ETIMEDOUT;
Thomas Gleixnerd58e6572009-10-13 20:40:43 +02002947 else if (signal_pending(current))
Thomas Gleixner1c840c12009-05-20 09:22:40 +02002948 ret = -ERESTARTNOINTR;
Darren Hart52400ba2009-04-03 13:40:49 -07002949 }
2950 return ret;
2951}
2952
2953/**
2954 * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
Darren Hart56ec1602009-09-21 22:29:59 -07002955 * @uaddr: the futex we initially wait on (non-pi)
Darren Hartb41277d2010-11-08 13:10:09 -08002956 * @flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
Davidlohr Buesoab51fba2015-06-29 23:26:02 -07002957 * the same type, no requeueing from private to shared, etc.
Darren Hart52400ba2009-04-03 13:40:49 -07002958 * @val: the expected value of uaddr
2959 * @abs_time: absolute timeout
Darren Hart56ec1602009-09-21 22:29:59 -07002960 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all
Darren Hart52400ba2009-04-03 13:40:49 -07002961 * @uaddr2: the pi futex we will take prior to returning to user-space
2962 *
2963 * The caller will wait on uaddr and will be requeued by futex_requeue() to
Darren Hart6f7b0a22012-07-20 11:53:31 -07002964 * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake
2965 * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
2966 * userspace. This ensures the rt_mutex maintains an owner when it has waiters;
2967 * without one, the pi logic would not know which task to boost/deboost, if
2968 * there was a need to.
Darren Hart52400ba2009-04-03 13:40:49 -07002969 *
2970 * We call schedule in futex_wait_queue_me() when we enqueue and return there
Randy Dunlap6c23cbb2013-03-05 10:00:24 -08002971 * via the following--
Darren Hart52400ba2009-04-03 13:40:49 -07002972 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
Darren Hartcc6db4e2009-07-31 16:20:10 -07002973 * 2) wakeup on uaddr2 after a requeue
2974 * 3) signal
2975 * 4) timeout
Darren Hart52400ba2009-04-03 13:40:49 -07002976 *
Darren Hartcc6db4e2009-07-31 16:20:10 -07002977 * If 3, cleanup and return -ERESTARTNOINTR.
Darren Hart52400ba2009-04-03 13:40:49 -07002978 *
2979 * If 2, we may then block on trying to take the rt_mutex and return via:
2980 * 5) successful lock
2981 * 6) signal
2982 * 7) timeout
2983 * 8) other lock acquisition failure
2984 *
Darren Hartcc6db4e2009-07-31 16:20:10 -07002985 * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
Darren Hart52400ba2009-04-03 13:40:49 -07002986 *
2987 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
2988 *
Randy Dunlap6c23cbb2013-03-05 10:00:24 -08002989 * Return:
Mauro Carvalho Chehab7b4ff1a2017-05-11 10:17:45 -03002990 * - 0 - On success;
2991 * - <0 - On error
Darren Hart52400ba2009-04-03 13:40:49 -07002992 */
Darren Hartb41277d2010-11-08 13:10:09 -08002993static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
Darren Hart52400ba2009-04-03 13:40:49 -07002994 u32 val, ktime_t *abs_time, u32 bitset,
Darren Hartb41277d2010-11-08 13:10:09 -08002995 u32 __user *uaddr2)
Darren Hart52400ba2009-04-03 13:40:49 -07002996{
2997 struct hrtimer_sleeper timeout, *to = NULL;
Peter Zijlstra16ffa122017-03-22 11:35:55 +01002998 struct futex_pi_state *pi_state = NULL;
Darren Hart52400ba2009-04-03 13:40:49 -07002999 struct rt_mutex_waiter rt_waiter;
Darren Hart52400ba2009-04-03 13:40:49 -07003000 struct futex_hash_bucket *hb;
Darren Hart5bdb05f2010-11-08 13:40:28 -08003001 union futex_key key2 = FUTEX_KEY_INIT;
3002 struct futex_q q = futex_q_init;
Darren Hart52400ba2009-04-03 13:40:49 -07003003 int res, ret;
Darren Hart52400ba2009-04-03 13:40:49 -07003004
Nicolas Pitrebc2eecd2017-08-01 00:31:32 -04003005 if (!IS_ENABLED(CONFIG_FUTEX_PI))
3006 return -ENOSYS;
3007
Darren Hart6f7b0a22012-07-20 11:53:31 -07003008 if (uaddr == uaddr2)
3009 return -EINVAL;
3010
Darren Hart52400ba2009-04-03 13:40:49 -07003011 if (!bitset)
3012 return -EINVAL;
3013
3014 if (abs_time) {
3015 to = &timeout;
Darren Hartb41277d2010-11-08 13:10:09 -08003016 hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
3017 CLOCK_REALTIME : CLOCK_MONOTONIC,
3018 HRTIMER_MODE_ABS);
Darren Hart52400ba2009-04-03 13:40:49 -07003019 hrtimer_init_sleeper(to, current);
3020 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
3021 current->timer_slack_ns);
3022 }
3023
3024 /*
3025 * The waiter is allocated on our stack, manipulated by the requeue
3026 * code while we sleep on uaddr.
3027 */
Peter Zijlstra50809352017-03-22 11:35:56 +01003028 rt_mutex_init_waiter(&rt_waiter);
Darren Hart52400ba2009-04-03 13:40:49 -07003029
Shawn Bohrer9ea71502011-06-30 11:21:32 -05003030 ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
Darren Hart52400ba2009-04-03 13:40:49 -07003031 if (unlikely(ret != 0))
3032 goto out;
3033
Darren Hart84bc4af2009-08-13 17:36:53 -07003034 q.bitset = bitset;
3035 q.rt_waiter = &rt_waiter;
3036 q.requeue_pi_key = &key2;
3037
Darren Hart7ada8762010-10-17 08:35:04 -07003038 /*
3039 * Prepare to wait on uaddr. On success, increments q.key (key1) ref
3040 * count.
3041 */
Darren Hartb41277d2010-11-08 13:10:09 -08003042 ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
Thomas Gleixnerc8b15a72009-05-20 09:18:50 +02003043 if (ret)
3044 goto out_key2;
Darren Hart52400ba2009-04-03 13:40:49 -07003045
Thomas Gleixnere9c243a2014-06-03 12:27:06 +00003046 /*
3047 * The check above which compares uaddrs is not sufficient for
3048 * shared futexes. We need to compare the keys:
3049 */
3050 if (match_futex(&q.key, &key2)) {
Thomas Gleixner13c42c22014-09-11 23:44:35 +02003051 queue_unlock(hb);
Thomas Gleixnere9c243a2014-06-03 12:27:06 +00003052 ret = -EINVAL;
3053 goto out_put_keys;
3054 }
3055
Darren Hart52400ba2009-04-03 13:40:49 -07003056 /* Queue the futex_q, drop the hb lock, wait for wakeup. */
Thomas Gleixnerf1a11e02009-05-05 19:21:40 +02003057 futex_wait_queue_me(hb, &q, to);
Darren Hart52400ba2009-04-03 13:40:49 -07003058
3059 spin_lock(&hb->lock);
3060 ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
3061 spin_unlock(&hb->lock);
3062 if (ret)
3063 goto out_put_keys;
3064
3065 /*
3066 * In order for us to be here, we know our q.key == key2, and since
3067 * we took the hb->lock above, we also know that futex_requeue() has
3068 * completed and we no longer have to concern ourselves with a wakeup
Darren Hart7ada8762010-10-17 08:35:04 -07003069 * race with the atomic proxy lock acquisition by the requeue code. The
3070 * futex_requeue dropped our key1 reference and incremented our key2
3071 * reference count.
Darren Hart52400ba2009-04-03 13:40:49 -07003072 */
3073
3074 /* Check if the requeue code acquired the second futex for us. */
3075 if (!q.rt_waiter) {
3076 /*
3077 * Got the lock. We might not be the anticipated owner if we
3078 * did a lock-steal - fix up the PI-state in that case.
3079 */
3080 if (q.pi_state && (q.pi_state->owner != current)) {
3081 spin_lock(q.lock_ptr);
Thomas Gleixnerae791a22010-11-10 13:30:36 +01003082 ret = fixup_pi_state_owner(uaddr2, &q, current);
Peter Zijlstra16ffa122017-03-22 11:35:55 +01003083 if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
3084 pi_state = q.pi_state;
3085 get_pi_state(pi_state);
3086 }
Thomas Gleixnerfb75a422015-12-19 20:07:38 +00003087 /*
3088 * Drop the reference to the pi state which
3089 * the requeue_pi() code acquired for us.
3090 */
Thomas Gleixner29e9ee52015-12-19 20:07:39 +00003091 put_pi_state(q.pi_state);
Darren Hart52400ba2009-04-03 13:40:49 -07003092 spin_unlock(q.lock_ptr);
3093 }
3094 } else {
Peter Zijlstrac236c8e2017-03-04 10:27:18 +01003095 struct rt_mutex *pi_mutex;
3096
Darren Hart52400ba2009-04-03 13:40:49 -07003097 /*
3098 * We have been woken up by futex_unlock_pi(), a timeout, or a
3099 * signal. futex_unlock_pi() will not destroy the lock_ptr nor
3100 * the pi_state.
3101 */
Darren Hartf27071c2012-07-20 11:53:30 -07003102 WARN_ON(!q.pi_state);
Darren Hart52400ba2009-04-03 13:40:49 -07003103 pi_mutex = &q.pi_state->pi_mutex;
Peter Zijlstra38d589f2017-03-22 11:35:57 +01003104 ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
Darren Hart52400ba2009-04-03 13:40:49 -07003105
3106 spin_lock(q.lock_ptr);
Peter Zijlstra38d589f2017-03-22 11:35:57 +01003107 if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
3108 ret = 0;
3109
3110 debug_rt_mutex_free_waiter(&rt_waiter);
Darren Hart52400ba2009-04-03 13:40:49 -07003111 /*
3112 * Fixup the pi_state owner and possibly acquire the lock if we
3113 * haven't already.
3114 */
Thomas Gleixnerae791a22010-11-10 13:30:36 +01003115 res = fixup_owner(uaddr2, &q, !ret);
Darren Hart52400ba2009-04-03 13:40:49 -07003116 /*
3117 * If fixup_owner() returned an error, proprogate that. If it
Darren Hart56ec1602009-09-21 22:29:59 -07003118 * acquired the lock, clear -ETIMEDOUT or -EINTR.
Darren Hart52400ba2009-04-03 13:40:49 -07003119 */
3120 if (res)
3121 ret = (res < 0) ? res : 0;
3122
Peter Zijlstrac236c8e2017-03-04 10:27:18 +01003123 /*
3124 * If fixup_pi_state_owner() faulted and was unable to handle
3125 * the fault, unlock the rt_mutex and return the fault to
3126 * userspace.
3127 */
Peter Zijlstra16ffa122017-03-22 11:35:55 +01003128 if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
3129 pi_state = q.pi_state;
3130 get_pi_state(pi_state);
3131 }
Peter Zijlstrac236c8e2017-03-04 10:27:18 +01003132
Darren Hart52400ba2009-04-03 13:40:49 -07003133 /* Unqueue and drop the lock. */
3134 unqueue_me_pi(&q);
3135 }
3136
Peter Zijlstra16ffa122017-03-22 11:35:55 +01003137 if (pi_state) {
3138 rt_mutex_futex_unlock(&pi_state->pi_mutex);
3139 put_pi_state(pi_state);
3140 }
3141
Peter Zijlstrac236c8e2017-03-04 10:27:18 +01003142 if (ret == -EINTR) {
Darren Hart52400ba2009-04-03 13:40:49 -07003143 /*
Darren Hartcc6db4e2009-07-31 16:20:10 -07003144 * We've already been requeued, but cannot restart by calling
3145 * futex_lock_pi() directly. We could restart this syscall, but
3146 * it would detect that the user space "val" changed and return
3147 * -EWOULDBLOCK. Save the overhead of the restart and return
3148 * -EWOULDBLOCK directly.
Darren Hart52400ba2009-04-03 13:40:49 -07003149 */
Thomas Gleixner20708872009-05-19 23:04:59 +02003150 ret = -EWOULDBLOCK;
Darren Hart52400ba2009-04-03 13:40:49 -07003151 }
3152
3153out_put_keys:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01003154 put_futex_key(&q.key);
Thomas Gleixnerc8b15a72009-05-20 09:18:50 +02003155out_key2:
Thomas Gleixnerae791a22010-11-10 13:30:36 +01003156 put_futex_key(&key2);
Darren Hart52400ba2009-04-03 13:40:49 -07003157
3158out:
3159 if (to) {
3160 hrtimer_cancel(&to->timer);
3161 destroy_hrtimer_on_stack(&to->timer);
3162 }
3163 return ret;
3164}
3165
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003166/*
3167 * Support for robust futexes: the kernel cleans up held futexes at
3168 * thread exit time.
3169 *
3170 * Implementation: user-space maintains a per-thread list of locks it
3171 * is holding. Upon do_exit(), the kernel carefully walks this list,
3172 * and marks all locks that are owned by this thread with the
Ingo Molnarc87e2832006-06-27 02:54:58 -07003173 * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003174 * always manipulated with the lock held, so the list is private and
3175 * per-thread. Userspace also maintains a per-thread 'list_op_pending'
3176 * field, to allow the kernel to clean up if the thread dies after
3177 * acquiring the lock, but just before it could have added itself to
3178 * the list. There can only be one such pending lock.
3179 */
3180
3181/**
Darren Hartd96ee562009-09-21 22:30:22 -07003182 * sys_set_robust_list() - Set the robust-futex list head of a task
3183 * @head: pointer to the list-head
3184 * @len: length of the list-head, as userspace expects
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003185 */
Heiko Carstens836f92a2009-01-14 14:14:33 +01003186SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
3187 size_t, len)
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003188{
Thomas Gleixnera0c1e902008-02-23 15:23:57 -08003189 if (!futex_cmpxchg_enabled)
3190 return -ENOSYS;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003191 /*
3192 * The kernel knows only one size for now:
3193 */
3194 if (unlikely(len != sizeof(*head)))
3195 return -EINVAL;
3196
3197 current->robust_list = head;
3198
3199 return 0;
3200}
3201
3202/**
Darren Hartd96ee562009-09-21 22:30:22 -07003203 * sys_get_robust_list() - Get the robust-futex list head of a task
3204 * @pid: pid of the process [zero for current task]
3205 * @head_ptr: pointer to a list-head pointer, the kernel fills it in
3206 * @len_ptr: pointer to a length field, the kernel fills in the header size
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003207 */
Heiko Carstens836f92a2009-01-14 14:14:33 +01003208SYSCALL_DEFINE3(get_robust_list, int, pid,
3209 struct robust_list_head __user * __user *, head_ptr,
3210 size_t __user *, len_ptr)
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003211{
Al Viroba46df92006-10-10 22:46:07 +01003212 struct robust_list_head __user *head;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003213 unsigned long ret;
Kees Cookbdbb7762012-03-19 16:12:53 -07003214 struct task_struct *p;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003215
Thomas Gleixnera0c1e902008-02-23 15:23:57 -08003216 if (!futex_cmpxchg_enabled)
3217 return -ENOSYS;
3218
Kees Cookbdbb7762012-03-19 16:12:53 -07003219 rcu_read_lock();
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003220
Kees Cookbdbb7762012-03-19 16:12:53 -07003221 ret = -ESRCH;
3222 if (!pid)
3223 p = current;
3224 else {
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07003225 p = find_task_by_vpid(pid);
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003226 if (!p)
3227 goto err_unlock;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003228 }
3229
Kees Cookbdbb7762012-03-19 16:12:53 -07003230 ret = -EPERM;
Jann Horncaaee622016-01-20 15:00:04 -08003231 if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
Kees Cookbdbb7762012-03-19 16:12:53 -07003232 goto err_unlock;
3233
3234 head = p->robust_list;
3235 rcu_read_unlock();
3236
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003237 if (put_user(sizeof(*head), len_ptr))
3238 return -EFAULT;
3239 return put_user(head, head_ptr);
3240
3241err_unlock:
Oleg Nesterovaaa2a972006-09-29 02:00:55 -07003242 rcu_read_unlock();
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003243
3244 return ret;
3245}
3246
3247/*
3248 * Process a futex-list entry, check whether it's owned by the
3249 * dying task, and do notification if so:
3250 */
Ingo Molnare3f2dde2006-07-29 05:17:57 +02003251int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003252{
Vitaliy Ivanov7cfdaf32011-07-07 15:10:31 +03003253 u32 uval, uninitialized_var(nval), mval;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003254
Ingo Molnar8f17d3a2006-03-27 01:16:27 -08003255retry:
3256 if (get_user(uval, uaddr))
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003257 return -1;
3258
Pavel Emelyanovb4888932007-10-18 23:40:14 -07003259 if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003260 /*
3261 * Ok, this dying thread is truly holding a futex
3262 * of interest. Set the OWNER_DIED bit atomically
3263 * via cmpxchg, and if the value had FUTEX_WAITERS
3264 * set, wake up a waiter (if any). (We have to do a
3265 * futex_wake() even if OWNER_DIED is already set -
3266 * to handle the rare but possible case of recursive
3267 * thread-death.) The rest of the cleanup is done in
3268 * userspace.
3269 */
Ingo Molnare3f2dde2006-07-29 05:17:57 +02003270 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
Thomas Gleixner6e0aa9f2011-03-14 10:34:35 +01003271 /*
3272 * We are not holding a lock here, but we want to have
3273 * the pagefault_disable/enable() protection because
3274 * we want to handle the fault gracefully. If the
3275 * access fails we try to fault in the futex with R/W
3276 * verification via get_user_pages. get_user() above
3277 * does not guarantee R/W access. If that fails we
3278 * give up and leave the futex locked.
3279 */
3280 if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
3281 if (fault_in_user_writeable(uaddr))
3282 return -1;
3283 goto retry;
3284 }
Ingo Molnarc87e2832006-06-27 02:54:58 -07003285 if (nval != uval)
Ingo Molnar8f17d3a2006-03-27 01:16:27 -08003286 goto retry;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003287
Ingo Molnare3f2dde2006-07-29 05:17:57 +02003288 /*
3289 * Wake robust non-PI futexes here. The wakeup of
3290 * PI futexes happens in exit_pi_state():
3291 */
Thomas Gleixner36cf3b52007-07-15 23:41:20 -07003292 if (!pi && (uval & FUTEX_WAITERS))
Peter Zijlstrac2f9f202008-09-26 19:32:23 +02003293 futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003294 }
3295 return 0;
3296}
3297
3298/*
Ingo Molnare3f2dde2006-07-29 05:17:57 +02003299 * Fetch a robust-list pointer. Bit 0 signals PI futexes:
3300 */
3301static inline int fetch_robust_entry(struct robust_list __user **entry,
Al Viroba46df92006-10-10 22:46:07 +01003302 struct robust_list __user * __user *head,
Namhyung Kim1dcc41b2010-09-14 21:43:46 +09003303 unsigned int *pi)
Ingo Molnare3f2dde2006-07-29 05:17:57 +02003304{
3305 unsigned long uentry;
3306
Al Viroba46df92006-10-10 22:46:07 +01003307 if (get_user(uentry, (unsigned long __user *)head))
Ingo Molnare3f2dde2006-07-29 05:17:57 +02003308 return -EFAULT;
3309
Al Viroba46df92006-10-10 22:46:07 +01003310 *entry = (void __user *)(uentry & ~1UL);
Ingo Molnare3f2dde2006-07-29 05:17:57 +02003311 *pi = uentry & 1;
3312
3313 return 0;
3314}
3315
3316/*
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003317 * Walk curr->robust_list (very carefully, it's a userspace list!)
3318 * and mark any locks found there dead, and notify any waiters.
3319 *
3320 * We silently return on any sign of list-walking problem.
3321 */
3322void exit_robust_list(struct task_struct *curr)
3323{
3324 struct robust_list_head __user *head = curr->robust_list;
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07003325 struct robust_list __user *entry, *next_entry, *pending;
Darren Hart4c115e92010-11-04 15:00:00 -04003326 unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
3327 unsigned int uninitialized_var(next_pi);
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003328 unsigned long futex_offset;
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07003329 int rc;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003330
Thomas Gleixnera0c1e902008-02-23 15:23:57 -08003331 if (!futex_cmpxchg_enabled)
3332 return;
3333
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003334 /*
3335 * Fetch the list head (which was registered earlier, via
3336 * sys_set_robust_list()):
3337 */
Ingo Molnare3f2dde2006-07-29 05:17:57 +02003338 if (fetch_robust_entry(&entry, &head->list.next, &pi))
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003339 return;
3340 /*
3341 * Fetch the relative futex offset:
3342 */
3343 if (get_user(futex_offset, &head->futex_offset))
3344 return;
3345 /*
3346 * Fetch any possibly pending lock-add first, and handle it
3347 * if it exists:
3348 */
Ingo Molnare3f2dde2006-07-29 05:17:57 +02003349 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003350 return;
Ingo Molnare3f2dde2006-07-29 05:17:57 +02003351
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07003352 next_entry = NULL; /* avoid warning with gcc */
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003353 while (entry != &head->list) {
3354 /*
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07003355 * Fetch the next entry in the list before calling
3356 * handle_futex_death:
3357 */
3358 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
3359 /*
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003360 * A pending lock might already be on the list, so
Ingo Molnarc87e2832006-06-27 02:54:58 -07003361 * don't process it twice:
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003362 */
3363 if (entry != pending)
Al Viroba46df92006-10-10 22:46:07 +01003364 if (handle_futex_death((void __user *)entry + futex_offset,
Ingo Molnare3f2dde2006-07-29 05:17:57 +02003365 curr, pi))
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003366 return;
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07003367 if (rc)
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003368 return;
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07003369 entry = next_entry;
3370 pi = next_pi;
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003371 /*
3372 * Avoid excessively long or circular lists:
3373 */
3374 if (!--limit)
3375 break;
3376
3377 cond_resched();
3378 }
Martin Schwidefsky9f96cb12007-10-01 01:20:13 -07003379
3380 if (pending)
3381 handle_futex_death((void __user *)pending + futex_offset,
3382 curr, pip);
Ingo Molnar0771dfe2006-03-27 01:16:22 -08003383}
3384
Pierre Peifferc19384b2007-05-09 02:35:02 -07003385long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
Ingo Molnare2970f22006-06-27 02:54:47 -07003386 u32 __user *uaddr2, u32 val2, u32 val3)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003387{
Thomas Gleixner81b40532012-02-15 12:17:09 +01003388 int cmd = op & FUTEX_CMD_MASK;
Darren Hartb41277d2010-11-08 13:10:09 -08003389 unsigned int flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003390
Eric Dumazet34f01cc2007-05-09 02:35:04 -07003391 if (!(op & FUTEX_PRIVATE_FLAG))
Darren Hartb41277d2010-11-08 13:10:09 -08003392 flags |= FLAGS_SHARED;
Eric Dumazet34f01cc2007-05-09 02:35:04 -07003393
Darren Hartb41277d2010-11-08 13:10:09 -08003394 if (op & FUTEX_CLOCK_REALTIME) {
3395 flags |= FLAGS_CLOCKRT;
Darren Hart337f1302015-12-18 13:36:37 -08003396 if (cmd != FUTEX_WAIT && cmd != FUTEX_WAIT_BITSET && \
3397 cmd != FUTEX_WAIT_REQUEUE_PI)
Darren Hartb41277d2010-11-08 13:10:09 -08003398 return -ENOSYS;
3399 }
Eric Dumazet34f01cc2007-05-09 02:35:04 -07003400
3401 switch (cmd) {
Thomas Gleixner59263b52012-02-15 12:08:34 +01003402 case FUTEX_LOCK_PI:
3403 case FUTEX_UNLOCK_PI:
3404 case FUTEX_TRYLOCK_PI:
3405 case FUTEX_WAIT_REQUEUE_PI:
3406 case FUTEX_CMP_REQUEUE_PI:
3407 if (!futex_cmpxchg_enabled)
3408 return -ENOSYS;
3409 }
3410
3411 switch (cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003412 case FUTEX_WAIT:
Thomas Gleixnercd689982008-02-01 17:45:14 +01003413 val3 = FUTEX_BITSET_MATCH_ANY;
3414 case FUTEX_WAIT_BITSET:
Thomas Gleixner81b40532012-02-15 12:17:09 +01003415 return futex_wait(uaddr, flags, val, timeout, val3);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003416 case FUTEX_WAKE:
Thomas Gleixnercd689982008-02-01 17:45:14 +01003417 val3 = FUTEX_BITSET_MATCH_ANY;
3418 case FUTEX_WAKE_BITSET:
Thomas Gleixner81b40532012-02-15 12:17:09 +01003419 return futex_wake(uaddr, flags, val, val3);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003420 case FUTEX_REQUEUE:
Thomas Gleixner81b40532012-02-15 12:17:09 +01003421 return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003422 case FUTEX_CMP_REQUEUE:
Thomas Gleixner81b40532012-02-15 12:17:09 +01003423 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
Jakub Jelinek4732efbe2005-09-06 15:16:25 -07003424 case FUTEX_WAKE_OP:
Thomas Gleixner81b40532012-02-15 12:17:09 +01003425 return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
Ingo Molnarc87e2832006-06-27 02:54:58 -07003426 case FUTEX_LOCK_PI:
Michael Kerrisk996636d2015-01-16 20:28:06 +01003427 return futex_lock_pi(uaddr, flags, timeout, 0);
Ingo Molnarc87e2832006-06-27 02:54:58 -07003428 case FUTEX_UNLOCK_PI:
Thomas Gleixner81b40532012-02-15 12:17:09 +01003429 return futex_unlock_pi(uaddr, flags);
Ingo Molnarc87e2832006-06-27 02:54:58 -07003430 case FUTEX_TRYLOCK_PI:
Michael Kerrisk996636d2015-01-16 20:28:06 +01003431 return futex_lock_pi(uaddr, flags, NULL, 1);
Darren Hart52400ba2009-04-03 13:40:49 -07003432 case FUTEX_WAIT_REQUEUE_PI:
3433 val3 = FUTEX_BITSET_MATCH_ANY;
Thomas Gleixner81b40532012-02-15 12:17:09 +01003434 return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
3435 uaddr2);
Darren Hart52400ba2009-04-03 13:40:49 -07003436 case FUTEX_CMP_REQUEUE_PI:
Thomas Gleixner81b40532012-02-15 12:17:09 +01003437 return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003438 }
Thomas Gleixner81b40532012-02-15 12:17:09 +01003439 return -ENOSYS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003440}
3441
3442
Heiko Carstens17da2bd2009-01-14 14:14:10 +01003443SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
3444 struct timespec __user *, utime, u32 __user *, uaddr2,
3445 u32, val3)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003446{
Pierre Peifferc19384b2007-05-09 02:35:02 -07003447 struct timespec ts;
3448 ktime_t t, *tp = NULL;
Ingo Molnare2970f22006-06-27 02:54:47 -07003449 u32 val2 = 0;
Eric Dumazet34f01cc2007-05-09 02:35:04 -07003450 int cmd = op & FUTEX_CMD_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003451
Thomas Gleixnercd689982008-02-01 17:45:14 +01003452 if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
Darren Hart52400ba2009-04-03 13:40:49 -07003453 cmd == FUTEX_WAIT_BITSET ||
3454 cmd == FUTEX_WAIT_REQUEUE_PI)) {
Davidlohr Buesoab51fba2015-06-29 23:26:02 -07003455 if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
3456 return -EFAULT;
Pierre Peifferc19384b2007-05-09 02:35:02 -07003457 if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003458 return -EFAULT;
Pierre Peifferc19384b2007-05-09 02:35:02 -07003459 if (!timespec_valid(&ts))
Thomas Gleixner9741ef962006-03-31 02:31:32 -08003460 return -EINVAL;
Pierre Peifferc19384b2007-05-09 02:35:02 -07003461
3462 t = timespec_to_ktime(ts);
Eric Dumazet34f01cc2007-05-09 02:35:04 -07003463 if (cmd == FUTEX_WAIT)
Thomas Gleixner5a7780e2008-02-13 09:20:43 +01003464 t = ktime_add_safe(ktime_get(), t);
Pierre Peifferc19384b2007-05-09 02:35:02 -07003465 tp = &t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003466 }
3467 /*
Darren Hart52400ba2009-04-03 13:40:49 -07003468 * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
Andreas Schwabf54f0982007-07-31 00:38:51 -07003469 * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003470 */
Andreas Schwabf54f0982007-07-31 00:38:51 -07003471 if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
Darren Hartba9c22f2009-04-20 22:22:22 -07003472 cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
Ingo Molnare2970f22006-06-27 02:54:47 -07003473 val2 = (u32) (unsigned long) utime;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003474
Pierre Peifferc19384b2007-05-09 02:35:02 -07003475 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003476}
3477
Heiko Carstens03b8c7b2014-03-02 13:09:47 +01003478static void __init futex_detect_cmpxchg(void)
3479{
3480#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
3481 u32 curval;
3482
3483 /*
3484 * This will fail and we want it. Some arch implementations do
3485 * runtime detection of the futex_atomic_cmpxchg_inatomic()
3486 * functionality. We want to know that before we call in any
3487 * of the complex code paths. Also we want to prevent
3488 * registration of robust lists in that case. NULL is
3489 * guaranteed to fault and we get -EFAULT on functional
3490 * implementation, the non-functional ones will return
3491 * -ENOSYS.
3492 */
3493 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
3494 futex_cmpxchg_enabled = 1;
3495#endif
3496}
3497
Benjamin Herrenschmidtf6d107f2008-03-27 14:52:15 +11003498static int __init futex_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003499{
Heiko Carstens63b1a812014-01-16 14:54:50 +01003500 unsigned int futex_shift;
Davidlohr Buesoa52b89e2014-01-12 15:31:23 -08003501 unsigned long i;
3502
3503#if CONFIG_BASE_SMALL
3504 futex_hashsize = 16;
3505#else
3506 futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus());
3507#endif
3508
3509 futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues),
3510 futex_hashsize, 0,
3511 futex_hashsize < 256 ? HASH_SMALL : 0,
Heiko Carstens63b1a812014-01-16 14:54:50 +01003512 &futex_shift, NULL,
3513 futex_hashsize, futex_hashsize);
3514 futex_hashsize = 1UL << futex_shift;
Heiko Carstens03b8c7b2014-03-02 13:09:47 +01003515
3516 futex_detect_cmpxchg();
Thomas Gleixnera0c1e902008-02-23 15:23:57 -08003517
Davidlohr Buesoa52b89e2014-01-12 15:31:23 -08003518 for (i = 0; i < futex_hashsize; i++) {
Linus Torvalds11d46162014-03-20 22:11:17 -07003519 atomic_set(&futex_queues[i].waiters, 0);
Dima Zavin732375c2011-07-07 17:27:59 -07003520 plist_head_init(&futex_queues[i].chain);
Thomas Gleixner3e4ab742008-02-23 15:23:55 -08003521 spin_lock_init(&futex_queues[i].lock);
3522 }
3523
Linus Torvalds1da177e2005-04-16 15:20:36 -07003524 return 0;
3525}
Yang Yang25f71d12016-12-30 16:17:55 +08003526core_initcall(futex_init);