blob: de1ce0bae0d50bcfada714b60ad1c5899dceb855 [file] [log] [blame]
Ingo Molnar6053ee32006-01-09 15:59:19 -08001/*
Peter Zijlstra67a6de42013-11-08 08:26:39 +01002 * kernel/locking/mutex.c
Ingo Molnar6053ee32006-01-09 15:59:19 -08003 *
4 * Mutexes: blocking mutual exclusion locks
5 *
6 * Started by Ingo Molnar:
7 *
8 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 *
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements.
12 *
Peter Zijlstra0d66bf62009-01-12 14:01:47 +010013 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14 * from the -rt tree, where it was originally implemented for rtmutexes
15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
16 * and Sven Dietrich.
17 *
Davidlohr Bueso214e0ae2014-07-30 13:41:55 -070018 * Also see Documentation/locking/mutex-design.txt.
Ingo Molnar6053ee32006-01-09 15:59:19 -080019 */
20#include <linux/mutex.h>
Maarten Lankhorst1b375dc2013-07-05 09:29:32 +020021#include <linux/ww_mutex.h>
Ingo Molnar6053ee32006-01-09 15:59:19 -080022#include <linux/sched.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060023#include <linux/sched/rt.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040024#include <linux/export.h>
Ingo Molnar6053ee32006-01-09 15:59:19 -080025#include <linux/spinlock.h>
26#include <linux/interrupt.h>
Ingo Molnar9a11b49a2006-07-03 00:24:33 -070027#include <linux/debug_locks.h>
Davidlohr Bueso7a215f82015-01-30 01:14:25 -080028#include <linux/osq_lock.h>
Ingo Molnar6053ee32006-01-09 15:59:19 -080029
Ingo Molnar6053ee32006-01-09 15:59:19 -080030#ifdef CONFIG_DEBUG_MUTEXES
31# include "mutex-debug.h"
Ingo Molnar6053ee32006-01-09 15:59:19 -080032#else
33# include "mutex.h"
Ingo Molnar6053ee32006-01-09 15:59:19 -080034#endif
35
Ingo Molnaref5d4702006-07-03 00:24:55 -070036void
37__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
Ingo Molnar6053ee32006-01-09 15:59:19 -080038{
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +020039 atomic_long_set(&lock->owner, 0);
Ingo Molnar6053ee32006-01-09 15:59:19 -080040 spin_lock_init(&lock->wait_lock);
41 INIT_LIST_HEAD(&lock->wait_list);
Waiman Long2bd2c922013-04-17 15:23:13 -040042#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
Jason Low4d9d9512014-07-14 10:27:50 -070043 osq_lock_init(&lock->osq);
Waiman Long2bd2c922013-04-17 15:23:13 -040044#endif
Ingo Molnar6053ee32006-01-09 15:59:19 -080045
Ingo Molnaref5d4702006-07-03 00:24:55 -070046 debug_mutex_init(lock, name, key);
Ingo Molnar6053ee32006-01-09 15:59:19 -080047}
Ingo Molnar6053ee32006-01-09 15:59:19 -080048EXPORT_SYMBOL(__mutex_init);
49
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +020050/*
51 * @owner: contains: 'struct task_struct *' to the current lock owner,
52 * NULL means not owned. Since task_struct pointers are aligned at
53 * ARCH_MIN_TASKALIGN (which is at least sizeof(void *)), we have low
54 * bits to store extra state.
55 *
56 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
57 */
58#define MUTEX_FLAG_WAITERS 0x01
59
60#define MUTEX_FLAGS 0x03
61
62static inline struct task_struct *__owner_task(unsigned long owner)
63{
64 return (struct task_struct *)(owner & ~MUTEX_FLAGS);
65}
66
67static inline unsigned long __owner_flags(unsigned long owner)
68{
69 return owner & MUTEX_FLAGS;
70}
71
72/*
73 * Actual trylock that will work on any unlocked state.
74 */
75static inline bool __mutex_trylock(struct mutex *lock)
76{
77 unsigned long owner, curr = (unsigned long)current;
78
79 owner = atomic_long_read(&lock->owner);
80 for (;;) { /* must loop, can race against a flag */
81 unsigned long old;
82
83 if (__owner_task(owner))
84 return false;
85
86 old = atomic_long_cmpxchg_acquire(&lock->owner, owner,
87 curr | __owner_flags(owner));
88 if (old == owner)
89 return true;
90
91 owner = old;
92 }
93}
94
95#ifndef CONFIG_DEBUG_LOCK_ALLOC
96/*
97 * Lockdep annotations are contained to the slow paths for simplicity.
98 * There is nothing that would stop spreading the lockdep annotations outwards
99 * except more code.
100 */
101
102/*
103 * Optimistic trylock that only works in the uncontended case. Make sure to
104 * follow with a __mutex_trylock() before failing.
105 */
106static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
107{
108 unsigned long curr = (unsigned long)current;
109
110 if (!atomic_long_cmpxchg_acquire(&lock->owner, 0UL, curr))
111 return true;
112
113 return false;
114}
115
116static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
117{
118 unsigned long curr = (unsigned long)current;
119
120 if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
121 return true;
122
123 return false;
124}
125#endif
126
127static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
128{
129 atomic_long_or(flag, &lock->owner);
130}
131
132static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
133{
134 atomic_long_andnot(flag, &lock->owner);
135}
136
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200137#ifndef CONFIG_DEBUG_LOCK_ALLOC
Ingo Molnar6053ee32006-01-09 15:59:19 -0800138/*
139 * We split the mutex lock/unlock logic into separate fastpath and
140 * slowpath functions, to reduce the register pressure on the fastpath.
141 * We also put the fastpath first in the kernel image, to make sure the
142 * branch is predicted by the CPU as default-untaken.
143 */
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200144static void __sched __mutex_lock_slowpath(struct mutex *lock);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800145
Randy Dunlapef5dc122010-09-02 15:48:16 -0700146/**
Ingo Molnar6053ee32006-01-09 15:59:19 -0800147 * mutex_lock - acquire the mutex
148 * @lock: the mutex to be acquired
149 *
150 * Lock the mutex exclusively for this task. If the mutex is not
151 * available right now, it will sleep until it can get it.
152 *
153 * The mutex must later on be released by the same task that
154 * acquired it. Recursive locking is not allowed. The task
155 * may not exit without first unlocking the mutex. Also, kernel
Sharon Dvir139b6fd2015-02-01 23:47:32 +0200156 * memory where the mutex resides must not be freed with
Ingo Molnar6053ee32006-01-09 15:59:19 -0800157 * the mutex still locked. The mutex must first be initialized
158 * (or statically defined) before it can be locked. memset()-ing
159 * the mutex to 0 is not allowed.
160 *
161 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
162 * checks that will enforce the restrictions and will also do
163 * deadlock debugging. )
164 *
165 * This function is similar to (but not equivalent to) down().
166 */
H. Peter Anvinb09d2502009-04-01 17:21:56 -0700167void __sched mutex_lock(struct mutex *lock)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800168{
Ingo Molnarc544bdb2006-01-10 22:10:36 +0100169 might_sleep();
Ingo Molnar6053ee32006-01-09 15:59:19 -0800170
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200171 if (!__mutex_trylock_fast(lock))
172 __mutex_lock_slowpath(lock);
173}
Ingo Molnar6053ee32006-01-09 15:59:19 -0800174EXPORT_SYMBOL(mutex_lock);
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200175#endif
Ingo Molnar6053ee32006-01-09 15:59:19 -0800176
Davidlohr Bueso76916512014-07-30 13:41:53 -0700177static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
178 struct ww_acquire_ctx *ww_ctx)
179{
180#ifdef CONFIG_DEBUG_MUTEXES
181 /*
182 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
183 * but released with a normal mutex_unlock in this call.
184 *
185 * This should never happen, always use ww_mutex_unlock.
186 */
187 DEBUG_LOCKS_WARN_ON(ww->ctx);
188
189 /*
190 * Not quite done after calling ww_acquire_done() ?
191 */
192 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
193
194 if (ww_ctx->contending_lock) {
195 /*
196 * After -EDEADLK you tried to
197 * acquire a different ww_mutex? Bad!
198 */
199 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
200
201 /*
202 * You called ww_mutex_lock after receiving -EDEADLK,
203 * but 'forgot' to unlock everything else first?
204 */
205 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
206 ww_ctx->contending_lock = NULL;
207 }
208
209 /*
210 * Naughty, using a different class will lead to undefined behavior!
211 */
212 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
213#endif
214 ww_ctx->acquired++;
215}
216
217/*
Davidlohr Bueso4bd19082015-01-06 11:45:06 -0800218 * After acquiring lock with fastpath or when we lost out in contested
Davidlohr Bueso76916512014-07-30 13:41:53 -0700219 * slowpath, set ctx and wake up any waiters so they can recheck.
Davidlohr Bueso76916512014-07-30 13:41:53 -0700220 */
221static __always_inline void
222ww_mutex_set_context_fastpath(struct ww_mutex *lock,
223 struct ww_acquire_ctx *ctx)
224{
225 unsigned long flags;
226 struct mutex_waiter *cur;
227
228 ww_mutex_lock_acquired(lock, ctx);
229
230 lock->ctx = ctx;
231
232 /*
233 * The lock->ctx update should be visible on all cores before
234 * the atomic read is done, otherwise contended waiters might be
235 * missed. The contended waiters will either see ww_ctx == NULL
236 * and keep spinning, or it will acquire wait_lock, add itself
237 * to waiter list and sleep.
238 */
239 smp_mb(); /* ^^^ */
240
241 /*
242 * Check if lock is contended, if not there is nobody to wake up
243 */
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200244 if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
Davidlohr Bueso76916512014-07-30 13:41:53 -0700245 return;
246
247 /*
248 * Uh oh, we raced in fastpath, wake up everyone in this case,
249 * so they can see the new lock->ctx.
250 */
251 spin_lock_mutex(&lock->base.wait_lock, flags);
252 list_for_each_entry(cur, &lock->base.wait_list, list) {
253 debug_mutex_wake_waiter(&lock->base, cur);
254 wake_up_process(cur->task);
255 }
256 spin_unlock_mutex(&lock->base.wait_lock, flags);
257}
258
Davidlohr Bueso4bd19082015-01-06 11:45:06 -0800259/*
260 * After acquiring lock in the slowpath set ctx and wake up any
261 * waiters so they can recheck.
262 *
263 * Callers must hold the mutex wait_lock.
264 */
265static __always_inline void
266ww_mutex_set_context_slowpath(struct ww_mutex *lock,
267 struct ww_acquire_ctx *ctx)
268{
269 struct mutex_waiter *cur;
270
271 ww_mutex_lock_acquired(lock, ctx);
272 lock->ctx = ctx;
273
274 /*
275 * Give any possible sleeping processes the chance to wake up,
276 * so they can recheck if they have to back off.
277 */
278 list_for_each_entry(cur, &lock->base.wait_list, list) {
279 debug_mutex_wake_waiter(&lock->base, cur);
280 wake_up_process(cur->task);
281 }
282}
Davidlohr Bueso76916512014-07-30 13:41:53 -0700283
Waiman Long41fcb9f2013-04-17 15:23:11 -0400284#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
Waiman Long41fcb9f2013-04-17 15:23:11 -0400285/*
286 * Look out! "owner" is an entirely speculative pointer
287 * access and not reliable.
288 */
289static noinline
Jason Lowbe1f7bf2015-02-02 13:59:27 -0800290bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
Waiman Long41fcb9f2013-04-17 15:23:11 -0400291{
Jason Low01ac33c2015-04-08 12:39:19 -0700292 bool ret = true;
Jason Lowbe1f7bf2015-02-02 13:59:27 -0800293
Waiman Long41fcb9f2013-04-17 15:23:11 -0400294 rcu_read_lock();
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200295 while (__mutex_owner(lock) == owner) {
Jason Lowbe1f7bf2015-02-02 13:59:27 -0800296 /*
297 * Ensure we emit the owner->on_cpu, dereference _after_
Jason Low01ac33c2015-04-08 12:39:19 -0700298 * checking lock->owner still matches owner. If that fails,
299 * owner might point to freed memory. If it still matches,
Jason Lowbe1f7bf2015-02-02 13:59:27 -0800300 * the rcu_read_lock() ensures the memory stays valid.
301 */
302 barrier();
303
304 if (!owner->on_cpu || need_resched()) {
305 ret = false;
306 break;
307 }
Waiman Long41fcb9f2013-04-17 15:23:11 -0400308
Davidlohr Bueso3a6bfbc2014-06-29 15:09:33 -0700309 cpu_relax_lowlatency();
Waiman Long41fcb9f2013-04-17 15:23:11 -0400310 }
311 rcu_read_unlock();
312
Jason Lowbe1f7bf2015-02-02 13:59:27 -0800313 return ret;
Waiman Long41fcb9f2013-04-17 15:23:11 -0400314}
Waiman Long2bd2c922013-04-17 15:23:13 -0400315
316/*
317 * Initial check for entering the mutex spinning loop
318 */
319static inline int mutex_can_spin_on_owner(struct mutex *lock)
320{
Peter Zijlstra1e40c2e2013-07-19 20:31:01 +0200321 struct task_struct *owner;
Waiman Long2bd2c922013-04-17 15:23:13 -0400322 int retval = 1;
323
Jason Low46af29e2014-01-28 11:13:12 -0800324 if (need_resched())
325 return 0;
326
Waiman Long2bd2c922013-04-17 15:23:13 -0400327 rcu_read_lock();
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200328 owner = __mutex_owner(lock);
Peter Zijlstra1e40c2e2013-07-19 20:31:01 +0200329 if (owner)
330 retval = owner->on_cpu;
Waiman Long2bd2c922013-04-17 15:23:13 -0400331 rcu_read_unlock();
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200332
Waiman Long2bd2c922013-04-17 15:23:13 -0400333 /*
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200334 * If lock->owner is not set, the mutex has been released. Return true
335 * such that we'll trylock in the spin path, which is a faster option
336 * than the blocking slow path.
Waiman Long2bd2c922013-04-17 15:23:13 -0400337 */
338 return retval;
339}
Davidlohr Bueso76916512014-07-30 13:41:53 -0700340
341/*
Davidlohr Bueso76916512014-07-30 13:41:53 -0700342 * Optimistic spinning.
343 *
344 * We try to spin for acquisition when we find that the lock owner
345 * is currently running on a (different) CPU and while we don't
346 * need to reschedule. The rationale is that if the lock owner is
347 * running, it is likely to release the lock soon.
348 *
Davidlohr Bueso76916512014-07-30 13:41:53 -0700349 * The mutex spinners are queued up using MCS lock so that only one
350 * spinner can compete for the mutex. However, if mutex spinning isn't
351 * going to happen, there is no point in going through the lock/unlock
352 * overhead.
353 *
354 * Returns true when the lock was taken, otherwise false, indicating
355 * that we need to jump to the slowpath and sleep.
356 */
357static bool mutex_optimistic_spin(struct mutex *lock,
358 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
359{
360 struct task_struct *task = current;
361
362 if (!mutex_can_spin_on_owner(lock))
363 goto done;
364
Davidlohr Buesoe42f6782015-01-06 11:45:05 -0800365 /*
366 * In order to avoid a stampede of mutex spinners trying to
367 * acquire the mutex all at once, the spinners need to take a
368 * MCS (queued) lock first before spinning on the owner field.
369 */
Davidlohr Bueso76916512014-07-30 13:41:53 -0700370 if (!osq_lock(&lock->osq))
371 goto done;
372
373 while (true) {
374 struct task_struct *owner;
375
376 if (use_ww_ctx && ww_ctx->acquired > 0) {
377 struct ww_mutex *ww;
378
379 ww = container_of(lock, struct ww_mutex, base);
380 /*
381 * If ww->ctx is set the contents are undefined, only
382 * by acquiring wait_lock there is a guarantee that
383 * they are not invalid when reading.
384 *
385 * As such, when deadlock detection needs to be
386 * performed the optimistic spinning cannot be done.
387 */
Davidlohr Bueso4d3199e2015-02-22 19:31:41 -0800388 if (READ_ONCE(ww->ctx))
Davidlohr Bueso76916512014-07-30 13:41:53 -0700389 break;
390 }
391
392 /*
393 * If there's an owner, wait for it to either
394 * release the lock or go to sleep.
395 */
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200396 owner = __mutex_owner(lock);
Davidlohr Bueso76916512014-07-30 13:41:53 -0700397 if (owner && !mutex_spin_on_owner(lock, owner))
398 break;
399
400 /* Try to acquire the mutex if it is unlocked. */
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200401 if (__mutex_trylock(lock)) {
Davidlohr Bueso76916512014-07-30 13:41:53 -0700402 osq_unlock(&lock->osq);
403 return true;
404 }
405
406 /*
Davidlohr Bueso76916512014-07-30 13:41:53 -0700407 * The cpu_relax() call is a compiler barrier which forces
408 * everything in this loop to be re-loaded. We don't need
409 * memory barriers as we'll eventually observe the right
410 * values at the cost of a few extra spins.
411 */
412 cpu_relax_lowlatency();
413 }
414
415 osq_unlock(&lock->osq);
416done:
417 /*
418 * If we fell out of the spin path because of need_resched(),
419 * reschedule now, before we try-lock the mutex. This avoids getting
420 * scheduled out right after we obtained the mutex.
421 */
Peter Zijlstra6f942a12014-09-24 10:18:46 +0200422 if (need_resched()) {
423 /*
424 * We _should_ have TASK_RUNNING here, but just in case
425 * we do not, make it so, otherwise we might get stuck.
426 */
427 __set_current_state(TASK_RUNNING);
Davidlohr Bueso76916512014-07-30 13:41:53 -0700428 schedule_preempt_disabled();
Peter Zijlstra6f942a12014-09-24 10:18:46 +0200429 }
Davidlohr Bueso76916512014-07-30 13:41:53 -0700430
431 return false;
432}
433#else
434static bool mutex_optimistic_spin(struct mutex *lock,
435 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
436{
437 return false;
438}
Waiman Long41fcb9f2013-04-17 15:23:11 -0400439#endif
440
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200441static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800442
Randy Dunlapef5dc122010-09-02 15:48:16 -0700443/**
Ingo Molnar6053ee32006-01-09 15:59:19 -0800444 * mutex_unlock - release the mutex
445 * @lock: the mutex to be released
446 *
447 * Unlock a mutex that has been locked by this task previously.
448 *
449 * This function must not be used in interrupt context. Unlocking
450 * of a not locked mutex is not allowed.
451 *
452 * This function is similar to (but not equivalent to) up().
453 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800454void __sched mutex_unlock(struct mutex *lock)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800455{
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200456#ifndef CONFIG_DEBUG_LOCK_ALLOC
457 if (__mutex_unlock_fast(lock))
458 return;
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100459#endif
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200460 __mutex_unlock_slowpath(lock, _RET_IP_);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800461}
Ingo Molnar6053ee32006-01-09 15:59:19 -0800462EXPORT_SYMBOL(mutex_unlock);
463
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200464/**
465 * ww_mutex_unlock - release the w/w mutex
466 * @lock: the mutex to be released
467 *
468 * Unlock a mutex that has been locked by this task previously with any of the
469 * ww_mutex_lock* functions (with or without an acquire context). It is
470 * forbidden to release the locks after releasing the acquire context.
471 *
472 * This function must not be used in interrupt context. Unlocking
473 * of a unlocked mutex is not allowed.
474 */
475void __sched ww_mutex_unlock(struct ww_mutex *lock)
476{
477 /*
478 * The unlocking fastpath is the 0->1 transition from 'locked'
479 * into 'unlocked' state:
480 */
481 if (lock->ctx) {
482#ifdef CONFIG_DEBUG_MUTEXES
483 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
484#endif
485 if (lock->ctx->acquired > 0)
486 lock->ctx->acquired--;
487 lock->ctx = NULL;
488 }
489
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200490 mutex_unlock(&lock->base);
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200491}
492EXPORT_SYMBOL(ww_mutex_unlock);
493
494static inline int __sched
Davidlohr Bueso63dc47e2015-01-06 11:45:04 -0800495__ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200496{
497 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
Davidlohr Bueso4d3199e2015-02-22 19:31:41 -0800498 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200499
500 if (!hold_ctx)
501 return 0;
502
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200503 if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
504 (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
505#ifdef CONFIG_DEBUG_MUTEXES
506 DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
507 ctx->contending_lock = ww;
508#endif
509 return -EDEADLK;
510 }
511
512 return 0;
513}
514
Ingo Molnar6053ee32006-01-09 15:59:19 -0800515/*
516 * Lock a mutex (possibly interruptible), slowpath:
517 */
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200518static __always_inline int __sched
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200519__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200520 struct lockdep_map *nest_lock, unsigned long ip,
Tetsuo Handab0267502013-10-17 19:45:29 +0900521 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800522{
523 struct task_struct *task = current;
524 struct mutex_waiter waiter;
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700525 unsigned long flags;
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200526 int ret;
Ingo Molnar6053ee32006-01-09 15:59:19 -0800527
Chris Wilson0422e832016-05-26 21:08:17 +0100528 if (use_ww_ctx) {
529 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
530 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
531 return -EALREADY;
532 }
533
Peter Zijlstra41719b02009-01-14 15:36:26 +0100534 preempt_disable();
Peter Zijlstrae4c70a62011-05-24 17:12:03 -0700535 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
Frederic Weisbeckerc0226022009-12-02 20:49:16 +0100536
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200537 if (__mutex_trylock(lock) || mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) {
Davidlohr Bueso76916512014-07-30 13:41:53 -0700538 /* got the lock, yay! */
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200539 lock_acquired(&lock->dep_map, ip);
540 if (use_ww_ctx) {
541 struct ww_mutex *ww;
542 ww = container_of(lock, struct ww_mutex, base);
543
544 ww_mutex_set_context_fastpath(ww, ww_ctx);
545 }
Davidlohr Bueso76916512014-07-30 13:41:53 -0700546 preempt_enable();
547 return 0;
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100548 }
Davidlohr Bueso76916512014-07-30 13:41:53 -0700549
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700550 spin_lock_mutex(&lock->wait_lock, flags);
Jason Low1e820c92014-06-11 11:37:21 -0700551 /*
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200552 * After waiting to acquire the wait_lock, try again.
Jason Low1e820c92014-06-11 11:37:21 -0700553 */
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200554 if (__mutex_trylock(lock))
Davidlohr Buesoec83f422013-06-28 13:13:18 -0700555 goto skip_wait;
556
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700557 debug_mutex_lock_common(lock, &waiter);
Linus Torvalds6720a302016-06-23 12:11:17 -0700558 debug_mutex_add_waiter(lock, &waiter, task);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800559
560 /* add waiting tasks to the end of the waitqueue (FIFO): */
561 list_add_tail(&waiter.list, &lock->wait_list);
562 waiter.task = task;
563
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200564 if (list_first_entry(&lock->wait_list, struct mutex_waiter, list) == &waiter)
565 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
566
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200567 lock_contended(&lock->dep_map, ip);
Peter Zijlstra4fe87742007-07-19 01:48:58 -0700568
Ingo Molnar6053ee32006-01-09 15:59:19 -0800569 for (;;) {
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200570 if (__mutex_trylock(lock))
Ingo Molnar6053ee32006-01-09 15:59:19 -0800571 break;
572
573 /*
574 * got a signal? (This code gets eliminated in the
575 * TASK_UNINTERRUPTIBLE case.)
576 */
Oleg Nesterov6ad36762008-06-08 21:20:42 +0400577 if (unlikely(signal_pending_state(state, task))) {
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200578 ret = -EINTR;
579 goto err;
Ingo Molnar6053ee32006-01-09 15:59:19 -0800580 }
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200581
Tetsuo Handab0267502013-10-17 19:45:29 +0900582 if (use_ww_ctx && ww_ctx->acquired > 0) {
Davidlohr Bueso63dc47e2015-01-06 11:45:04 -0800583 ret = __ww_mutex_lock_check_stamp(lock, ww_ctx);
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200584 if (ret)
585 goto err;
586 }
587
Ingo Molnar6053ee32006-01-09 15:59:19 -0800588 __set_task_state(task, state);
589
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300590 /* didn't get the lock, go to sleep: */
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700591 spin_unlock_mutex(&lock->wait_lock, flags);
Thomas Gleixnerbd2f5532011-03-21 12:33:18 +0100592 schedule_preempt_disabled();
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700593 spin_lock_mutex(&lock->wait_lock, flags);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800594 }
Davidlohr Bueso51587bc2015-01-19 17:39:21 -0800595 __set_task_state(task, TASK_RUNNING);
596
Linus Torvalds6720a302016-06-23 12:11:17 -0700597 mutex_remove_waiter(lock, &waiter, task);
Davidlohr Buesoec83f422013-06-28 13:13:18 -0700598 if (likely(list_empty(&lock->wait_list)))
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200599 __mutex_clear_flag(lock, MUTEX_FLAG_WAITERS);
600
Davidlohr Buesoec83f422013-06-28 13:13:18 -0700601 debug_mutex_free_waiter(&waiter);
602
603skip_wait:
604 /* got the lock - cleanup and rejoice! */
605 lock_acquired(&lock->dep_map, ip);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800606
Tetsuo Handab0267502013-10-17 19:45:29 +0900607 if (use_ww_ctx) {
Davidlohr Buesoec83f422013-06-28 13:13:18 -0700608 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
Davidlohr Bueso4bd19082015-01-06 11:45:06 -0800609 ww_mutex_set_context_slowpath(ww, ww_ctx);
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200610 }
611
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700612 spin_unlock_mutex(&lock->wait_lock, flags);
Peter Zijlstra41719b02009-01-14 15:36:26 +0100613 preempt_enable();
Ingo Molnar6053ee32006-01-09 15:59:19 -0800614 return 0;
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200615
616err:
Linus Torvalds6720a302016-06-23 12:11:17 -0700617 mutex_remove_waiter(lock, &waiter, task);
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200618 spin_unlock_mutex(&lock->wait_lock, flags);
619 debug_mutex_free_waiter(&waiter);
620 mutex_release(&lock->dep_map, 1, ip);
621 preempt_enable();
622 return ret;
Ingo Molnar6053ee32006-01-09 15:59:19 -0800623}
624
Ingo Molnaref5d4702006-07-03 00:24:55 -0700625#ifdef CONFIG_DEBUG_LOCK_ALLOC
626void __sched
627mutex_lock_nested(struct mutex *lock, unsigned int subclass)
628{
629 might_sleep();
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200630 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
Tetsuo Handab0267502013-10-17 19:45:29 +0900631 subclass, NULL, _RET_IP_, NULL, 0);
Ingo Molnaref5d4702006-07-03 00:24:55 -0700632}
633
634EXPORT_SYMBOL_GPL(mutex_lock_nested);
NeilBrownd63a5a72006-12-08 02:36:17 -0800635
Peter Zijlstrae4c70a62011-05-24 17:12:03 -0700636void __sched
637_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
638{
639 might_sleep();
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200640 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
Tetsuo Handab0267502013-10-17 19:45:29 +0900641 0, nest, _RET_IP_, NULL, 0);
Peter Zijlstrae4c70a62011-05-24 17:12:03 -0700642}
Peter Zijlstrae4c70a62011-05-24 17:12:03 -0700643EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
644
NeilBrownd63a5a72006-12-08 02:36:17 -0800645int __sched
Liam R. Howlettad776532007-12-06 17:37:59 -0500646mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
647{
648 might_sleep();
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200649 return __mutex_lock_common(lock, TASK_KILLABLE,
Tetsuo Handab0267502013-10-17 19:45:29 +0900650 subclass, NULL, _RET_IP_, NULL, 0);
Liam R. Howlettad776532007-12-06 17:37:59 -0500651}
652EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
653
654int __sched
NeilBrownd63a5a72006-12-08 02:36:17 -0800655mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
656{
657 might_sleep();
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100658 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
Tetsuo Handab0267502013-10-17 19:45:29 +0900659 subclass, NULL, _RET_IP_, NULL, 0);
NeilBrownd63a5a72006-12-08 02:36:17 -0800660}
NeilBrownd63a5a72006-12-08 02:36:17 -0800661EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200662
Daniel Vetter23010022013-06-20 13:31:17 +0200663static inline int
664ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
665{
666#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
667 unsigned tmp;
668
669 if (ctx->deadlock_inject_countdown-- == 0) {
670 tmp = ctx->deadlock_inject_interval;
671 if (tmp > UINT_MAX/4)
672 tmp = UINT_MAX;
673 else
674 tmp = tmp*2 + tmp + tmp/2;
675
676 ctx->deadlock_inject_interval = tmp;
677 ctx->deadlock_inject_countdown = tmp;
678 ctx->contending_lock = lock;
679
680 ww_mutex_unlock(lock);
681
682 return -EDEADLK;
683 }
684#endif
685
686 return 0;
687}
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200688
689int __sched
690__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
691{
Daniel Vetter23010022013-06-20 13:31:17 +0200692 int ret;
693
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200694 might_sleep();
Daniel Vetter23010022013-06-20 13:31:17 +0200695 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
Tetsuo Handab0267502013-10-17 19:45:29 +0900696 0, &ctx->dep_map, _RET_IP_, ctx, 1);
Maarten Lankhorst85f48962013-07-30 10:13:41 +0200697 if (!ret && ctx->acquired > 1)
Daniel Vetter23010022013-06-20 13:31:17 +0200698 return ww_mutex_deadlock_injection(lock, ctx);
699
700 return ret;
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200701}
702EXPORT_SYMBOL_GPL(__ww_mutex_lock);
703
704int __sched
705__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
706{
Daniel Vetter23010022013-06-20 13:31:17 +0200707 int ret;
708
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200709 might_sleep();
Daniel Vetter23010022013-06-20 13:31:17 +0200710 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
Tetsuo Handab0267502013-10-17 19:45:29 +0900711 0, &ctx->dep_map, _RET_IP_, ctx, 1);
Daniel Vetter23010022013-06-20 13:31:17 +0200712
Maarten Lankhorst85f48962013-07-30 10:13:41 +0200713 if (!ret && ctx->acquired > 1)
Daniel Vetter23010022013-06-20 13:31:17 +0200714 return ww_mutex_deadlock_injection(lock, ctx);
715
716 return ret;
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200717}
718EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
719
Ingo Molnaref5d4702006-07-03 00:24:55 -0700720#endif
721
Ingo Molnar6053ee32006-01-09 15:59:19 -0800722/*
723 * Release the lock, slowpath:
724 */
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200725static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800726{
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200727 unsigned long owner, flags;
Davidlohr Bueso1329ce62016-01-24 18:23:43 -0800728 WAKE_Q(wake_q);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800729
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200730 mutex_release(&lock->dep_map, 1, ip);
731
Ingo Molnar6053ee32006-01-09 15:59:19 -0800732 /*
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200733 * Release the lock before (potentially) taking the spinlock
734 * such that other contenders can get on with things ASAP.
Ingo Molnar6053ee32006-01-09 15:59:19 -0800735 */
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200736 owner = atomic_long_fetch_and_release(MUTEX_FLAGS, &lock->owner);
737 if (!__owner_flags(owner))
738 return;
Ingo Molnar6053ee32006-01-09 15:59:19 -0800739
Jason Low1d8fe7d2014-01-28 11:13:14 -0800740 spin_lock_mutex(&lock->wait_lock, flags);
Jason Low1d8fe7d2014-01-28 11:13:14 -0800741 debug_mutex_unlock(lock);
742
Ingo Molnar6053ee32006-01-09 15:59:19 -0800743 if (!list_empty(&lock->wait_list)) {
744 /* get the first entry from the wait-list: */
745 struct mutex_waiter *waiter =
746 list_entry(lock->wait_list.next,
747 struct mutex_waiter, list);
748
749 debug_mutex_wake_waiter(lock, waiter);
Davidlohr Bueso1329ce62016-01-24 18:23:43 -0800750 wake_q_add(&wake_q, waiter->task);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800751 }
752
Ingo Molnar1fb00c62006-06-26 00:24:31 -0700753 spin_unlock_mutex(&lock->wait_lock, flags);
Davidlohr Bueso1329ce62016-01-24 18:23:43 -0800754 wake_up_q(&wake_q);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800755}
756
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200757#ifndef CONFIG_DEBUG_LOCK_ALLOC
Ingo Molnar9a11b49a2006-07-03 00:24:33 -0700758/*
Ingo Molnar6053ee32006-01-09 15:59:19 -0800759 * Here come the less common (and hence less performance-critical) APIs:
760 * mutex_lock_interruptible() and mutex_trylock().
761 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800762static noinline int __sched
Maarten Lankhorsta41b56e2013-06-20 13:31:05 +0200763__mutex_lock_killable_slowpath(struct mutex *lock);
Liam R. Howlettad776532007-12-06 17:37:59 -0500764
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800765static noinline int __sched
Maarten Lankhorsta41b56e2013-06-20 13:31:05 +0200766__mutex_lock_interruptible_slowpath(struct mutex *lock);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800767
Randy Dunlapef5dc122010-09-02 15:48:16 -0700768/**
769 * mutex_lock_interruptible - acquire the mutex, interruptible
Ingo Molnar6053ee32006-01-09 15:59:19 -0800770 * @lock: the mutex to be acquired
771 *
772 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
773 * been acquired or sleep until the mutex becomes available. If a
774 * signal arrives while waiting for the lock then this function
775 * returns -EINTR.
776 *
777 * This function is similar to (but not equivalent to) down_interruptible().
778 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800779int __sched mutex_lock_interruptible(struct mutex *lock)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800780{
Ingo Molnarc544bdb2006-01-10 22:10:36 +0100781 might_sleep();
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200782
783 if (__mutex_trylock_fast(lock))
Maarten Lankhorsta41b56e2013-06-20 13:31:05 +0200784 return 0;
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200785
786 return __mutex_lock_interruptible_slowpath(lock);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800787}
788
789EXPORT_SYMBOL(mutex_lock_interruptible);
790
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800791int __sched mutex_lock_killable(struct mutex *lock)
Liam R. Howlettad776532007-12-06 17:37:59 -0500792{
793 might_sleep();
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200794
795 if (__mutex_trylock_fast(lock))
Maarten Lankhorsta41b56e2013-06-20 13:31:05 +0200796 return 0;
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200797
798 return __mutex_lock_killable_slowpath(lock);
Liam R. Howlettad776532007-12-06 17:37:59 -0500799}
800EXPORT_SYMBOL(mutex_lock_killable);
801
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200802static noinline void __sched
803__mutex_lock_slowpath(struct mutex *lock)
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200804{
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200805 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
Tetsuo Handab0267502013-10-17 19:45:29 +0900806 NULL, _RET_IP_, NULL, 0);
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200807}
808
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800809static noinline int __sched
Maarten Lankhorsta41b56e2013-06-20 13:31:05 +0200810__mutex_lock_killable_slowpath(struct mutex *lock)
Liam R. Howlettad776532007-12-06 17:37:59 -0500811{
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200812 return __mutex_lock_common(lock, TASK_KILLABLE, 0,
Tetsuo Handab0267502013-10-17 19:45:29 +0900813 NULL, _RET_IP_, NULL, 0);
Liam R. Howlettad776532007-12-06 17:37:59 -0500814}
815
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800816static noinline int __sched
Maarten Lankhorsta41b56e2013-06-20 13:31:05 +0200817__mutex_lock_interruptible_slowpath(struct mutex *lock)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800818{
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200819 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
Tetsuo Handab0267502013-10-17 19:45:29 +0900820 NULL, _RET_IP_, NULL, 0);
Ingo Molnar6053ee32006-01-09 15:59:19 -0800821}
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200822
823static noinline int __sched
824__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
825{
826 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
Tetsuo Handab0267502013-10-17 19:45:29 +0900827 NULL, _RET_IP_, ctx, 1);
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200828}
829
830static noinline int __sched
831__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
832 struct ww_acquire_ctx *ctx)
833{
834 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
Tetsuo Handab0267502013-10-17 19:45:29 +0900835 NULL, _RET_IP_, ctx, 1);
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200836}
837
Peter Zijlstrae4564f72007-10-11 22:11:12 +0200838#endif
Ingo Molnar6053ee32006-01-09 15:59:19 -0800839
Randy Dunlapef5dc122010-09-02 15:48:16 -0700840/**
841 * mutex_trylock - try to acquire the mutex, without waiting
Ingo Molnar6053ee32006-01-09 15:59:19 -0800842 * @lock: the mutex to be acquired
843 *
844 * Try to acquire the mutex atomically. Returns 1 if the mutex
845 * has been acquired successfully, and 0 on contention.
846 *
847 * NOTE: this function follows the spin_trylock() convention, so
Randy Dunlapef5dc122010-09-02 15:48:16 -0700848 * it is negated from the down_trylock() return values! Be careful
Ingo Molnar6053ee32006-01-09 15:59:19 -0800849 * about this when converting semaphore users to mutexes.
850 *
851 * This function must not be used in interrupt context. The
852 * mutex must be released by the same task that acquired it.
853 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800854int __sched mutex_trylock(struct mutex *lock)
Ingo Molnar6053ee32006-01-09 15:59:19 -0800855{
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200856 bool locked = __mutex_trylock(lock);
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100857
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200858 if (locked)
859 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
Peter Zijlstra0d66bf62009-01-12 14:01:47 +0100860
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200861 return locked;
Ingo Molnar6053ee32006-01-09 15:59:19 -0800862}
Ingo Molnar6053ee32006-01-09 15:59:19 -0800863EXPORT_SYMBOL(mutex_trylock);
Andrew Mortona511e3f2009-04-29 15:59:58 -0700864
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200865#ifndef CONFIG_DEBUG_LOCK_ALLOC
866int __sched
867__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
868{
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200869 might_sleep();
870
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200871 if (__mutex_trylock_fast(&lock->base)) {
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200872 ww_mutex_set_context_fastpath(lock, ctx);
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200873 return 0;
874 }
875
876 return __ww_mutex_lock_slowpath(lock, ctx);
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200877}
878EXPORT_SYMBOL(__ww_mutex_lock);
879
880int __sched
881__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
882{
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200883 might_sleep();
884
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200885 if (__mutex_trylock_fast(&lock->base)) {
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200886 ww_mutex_set_context_fastpath(lock, ctx);
Peter Zijlstra3ca0ff52016-08-23 13:36:04 +0200887 return 0;
888 }
889
890 return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
Maarten Lankhorst040a0a32013-06-24 10:30:04 +0200891}
892EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
893
894#endif
895
Andrew Mortona511e3f2009-04-29 15:59:58 -0700896/**
897 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
898 * @cnt: the atomic which we are to dec
899 * @lock: the mutex to return holding if we dec to 0
900 *
901 * return true and hold lock if we dec to 0, return false otherwise
902 */
903int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
904{
905 /* dec if we can't possibly hit 0 */
906 if (atomic_add_unless(cnt, -1, 1))
907 return 0;
908 /* we might hit 0, so take the lock */
909 mutex_lock(lock);
910 if (!atomic_dec_and_test(cnt)) {
911 /* when we actually did the dec, we didn't hit 0 */
912 mutex_unlock(lock);
913 return 0;
914 }
915 /* we hit 0, and we hold the lock */
916 return 1;
917}
918EXPORT_SYMBOL(atomic_dec_and_mutex_lock);