blob: 033872113ebbb4b12213deb75eb0b04307dfa315 [file] [log] [blame]
Waiman Longa33fda32015-04-24 14:56:30 -04001/*
2 * Queued spinlock
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
15 * (C) Copyright 2013-2014 Red Hat, Inc.
16 * (C) Copyright 2015 Intel Corp.
17 *
18 * Authors: Waiman Long <waiman.long@hp.com>
19 * Peter Zijlstra <peterz@infradead.org>
20 */
21#include <linux/smp.h>
22#include <linux/bug.h>
23#include <linux/cpumask.h>
24#include <linux/percpu.h>
25#include <linux/hardirq.h>
26#include <linux/mutex.h>
Peter Zijlstra (Intel)69f9cae2015-04-24 14:56:34 -040027#include <asm/byteorder.h>
Waiman Longa33fda32015-04-24 14:56:30 -040028#include <asm/qspinlock.h>
29
30/*
31 * The basic principle of a queue-based spinlock can best be understood
32 * by studying a classic queue-based spinlock implementation called the
33 * MCS lock. The paper below provides a good description for this kind
34 * of lock.
35 *
36 * http://www.cise.ufl.edu/tr/DOC/REP-1992-71.pdf
37 *
38 * This queued spinlock implementation is based on the MCS lock, however to make
39 * it fit the 4 bytes we assume spinlock_t to be, and preserve its existing
40 * API, we must modify it somehow.
41 *
42 * In particular; where the traditional MCS lock consists of a tail pointer
43 * (8 bytes) and needs the next pointer (another 8 bytes) of its own node to
44 * unlock the next pending (next->locked), we compress both these: {tail,
45 * next->locked} into a single u32 value.
46 *
47 * Since a spinlock disables recursion of its own context and there is a limit
48 * to the contexts that can nest; namely: task, softirq, hardirq, nmi. As there
49 * are at most 4 nesting levels, it can be encoded by a 2-bit number. Now
50 * we can encode the tail by combining the 2-bit nesting level with the cpu
51 * number. With one byte for the lock value and 3 bytes for the tail, only a
52 * 32-bit word is now needed. Even though we only need 1 bit for the lock,
53 * we extend it to a full byte to achieve better performance for architectures
54 * that support atomic byte write.
55 *
56 * We also change the first spinner to spin on the lock bit instead of its
57 * node; whereby avoiding the need to carry a node from lock to unlock, and
58 * preserving existing lock API. This also makes the unlock code simpler and
59 * faster.
Peter Zijlstra (Intel)69f9cae2015-04-24 14:56:34 -040060 *
61 * N.B. The current implementation only supports architectures that allow
62 * atomic operations on smaller 8-bit and 16-bit data types.
63 *
Waiman Longa33fda32015-04-24 14:56:30 -040064 */
65
66#include "mcs_spinlock.h"
67
68/*
69 * Per-CPU queue node structures; we can never have more than 4 nested
70 * contexts: task, softirq, hardirq, nmi.
71 *
72 * Exactly fits one 64-byte cacheline on a 64-bit architecture.
73 */
74static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[4]);
75
76/*
77 * We must be able to distinguish between no-tail and the tail at 0:0,
78 * therefore increment the cpu number by one.
79 */
80
81static inline u32 encode_tail(int cpu, int idx)
82{
83 u32 tail;
84
85#ifdef CONFIG_DEBUG_SPINLOCK
86 BUG_ON(idx > 3);
87#endif
88 tail = (cpu + 1) << _Q_TAIL_CPU_OFFSET;
89 tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */
90
91 return tail;
92}
93
94static inline struct mcs_spinlock *decode_tail(u32 tail)
95{
96 int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1;
97 int idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
98
99 return per_cpu_ptr(&mcs_nodes[idx], cpu);
100}
101
Peter Zijlstra (Intel)c1fb1592015-04-24 14:56:32 -0400102#define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)
103
Peter Zijlstra (Intel)69f9cae2015-04-24 14:56:34 -0400104/*
105 * By using the whole 2nd least significant byte for the pending bit, we
106 * can allow better optimization of the lock acquisition for the pending
107 * bit holder.
Waiman Long2c83e8e2015-04-24 14:56:35 -0400108 *
109 * This internal structure is also used by the set_locked function which
110 * is not restricted to _Q_PENDING_BITS == 8.
Peter Zijlstra (Intel)69f9cae2015-04-24 14:56:34 -0400111 */
Peter Zijlstra (Intel)69f9cae2015-04-24 14:56:34 -0400112struct __qspinlock {
113 union {
114 atomic_t val;
Peter Zijlstra (Intel)69f9cae2015-04-24 14:56:34 -0400115#ifdef __LITTLE_ENDIAN
Waiman Long2c83e8e2015-04-24 14:56:35 -0400116 struct {
117 u8 locked;
118 u8 pending;
Peter Zijlstra (Intel)69f9cae2015-04-24 14:56:34 -0400119 };
Waiman Long2c83e8e2015-04-24 14:56:35 -0400120 struct {
121 u16 locked_pending;
122 u16 tail;
123 };
124#else
125 struct {
126 u16 tail;
127 u16 locked_pending;
128 };
129 struct {
130 u8 reserved[2];
131 u8 pending;
132 u8 locked;
133 };
134#endif
Peter Zijlstra (Intel)69f9cae2015-04-24 14:56:34 -0400135 };
136};
137
Waiman Long2c83e8e2015-04-24 14:56:35 -0400138#if _Q_PENDING_BITS == 8
Peter Zijlstra (Intel)69f9cae2015-04-24 14:56:34 -0400139/**
140 * clear_pending_set_locked - take ownership and clear the pending bit.
141 * @lock: Pointer to queued spinlock structure
142 *
143 * *,1,0 -> *,0,1
144 *
145 * Lock stealing is not allowed if this function is used.
146 */
147static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
148{
149 struct __qspinlock *l = (void *)lock;
150
151 WRITE_ONCE(l->locked_pending, _Q_LOCKED_VAL);
152}
153
154/*
155 * xchg_tail - Put in the new queue tail code word & retrieve previous one
156 * @lock : Pointer to queued spinlock structure
157 * @tail : The new queue tail code word
158 * Return: The previous queue tail code word
159 *
160 * xchg(lock, tail)
161 *
162 * p,*,* -> n,*,* ; prev = xchg(lock, node)
163 */
164static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
165{
166 struct __qspinlock *l = (void *)lock;
167
168 return (u32)xchg(&l->tail, tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET;
169}
170
171#else /* _Q_PENDING_BITS == 8 */
172
Waiman Longa33fda32015-04-24 14:56:30 -0400173/**
Waiman Long6403bd72015-04-24 14:56:33 -0400174 * clear_pending_set_locked - take ownership and clear the pending bit.
175 * @lock: Pointer to queued spinlock structure
176 *
177 * *,1,0 -> *,0,1
178 */
179static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
180{
181 atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val);
182}
183
184/**
185 * xchg_tail - Put in the new queue tail code word & retrieve previous one
186 * @lock : Pointer to queued spinlock structure
187 * @tail : The new queue tail code word
188 * Return: The previous queue tail code word
189 *
190 * xchg(lock, tail)
191 *
192 * p,*,* -> n,*,* ; prev = xchg(lock, node)
193 */
194static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
195{
196 u32 old, new, val = atomic_read(&lock->val);
197
198 for (;;) {
199 new = (val & _Q_LOCKED_PENDING_MASK) | tail;
200 old = atomic_cmpxchg(&lock->val, val, new);
201 if (old == val)
202 break;
203
204 val = old;
205 }
206 return old;
207}
Peter Zijlstra (Intel)69f9cae2015-04-24 14:56:34 -0400208#endif /* _Q_PENDING_BITS == 8 */
Waiman Long6403bd72015-04-24 14:56:33 -0400209
210/**
Waiman Long2c83e8e2015-04-24 14:56:35 -0400211 * set_locked - Set the lock bit and own the lock
212 * @lock: Pointer to queued spinlock structure
213 *
214 * *,*,0 -> *,0,1
215 */
216static __always_inline void set_locked(struct qspinlock *lock)
217{
218 struct __qspinlock *l = (void *)lock;
219
220 WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
221}
222
223/**
Waiman Longa33fda32015-04-24 14:56:30 -0400224 * queued_spin_lock_slowpath - acquire the queued spinlock
225 * @lock: Pointer to queued spinlock structure
226 * @val: Current value of the queued spinlock 32-bit word
227 *
Peter Zijlstra (Intel)c1fb1592015-04-24 14:56:32 -0400228 * (queue tail, pending bit, lock value)
Waiman Longa33fda32015-04-24 14:56:30 -0400229 *
Peter Zijlstra (Intel)c1fb1592015-04-24 14:56:32 -0400230 * fast : slow : unlock
231 * : :
232 * uncontended (0,0,0) -:--> (0,0,1) ------------------------------:--> (*,*,0)
233 * : | ^--------.------. / :
234 * : v \ \ | :
235 * pending : (0,1,1) +--> (0,1,0) \ | :
236 * : | ^--' | | :
237 * : v | | :
238 * uncontended : (n,x,y) +--> (n,0,0) --' | :
239 * queue : | ^--' | :
240 * : v | :
241 * contended : (*,x,y) +--> (*,0,0) ---> (*,0,1) -' :
242 * queue : ^--' :
Waiman Longa33fda32015-04-24 14:56:30 -0400243 */
244void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
245{
246 struct mcs_spinlock *prev, *next, *node;
247 u32 new, old, tail;
248 int idx;
249
250 BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
251
Peter Zijlstra (Intel)c1fb1592015-04-24 14:56:32 -0400252 /*
253 * wait for in-progress pending->locked hand-overs
254 *
255 * 0,1,0 -> 0,0,1
256 */
257 if (val == _Q_PENDING_VAL) {
258 while ((val = atomic_read(&lock->val)) == _Q_PENDING_VAL)
259 cpu_relax();
260 }
261
262 /*
263 * trylock || pending
264 *
265 * 0,0,0 -> 0,0,1 ; trylock
266 * 0,0,1 -> 0,1,1 ; pending
267 */
268 for (;;) {
269 /*
270 * If we observe any contention; queue.
271 */
272 if (val & ~_Q_LOCKED_MASK)
273 goto queue;
274
275 new = _Q_LOCKED_VAL;
276 if (val == new)
277 new |= _Q_PENDING_VAL;
278
279 old = atomic_cmpxchg(&lock->val, val, new);
280 if (old == val)
281 break;
282
283 val = old;
284 }
285
286 /*
287 * we won the trylock
288 */
289 if (new == _Q_LOCKED_VAL)
290 return;
291
292 /*
293 * we're pending, wait for the owner to go away.
294 *
295 * *,1,1 -> *,1,0
Peter Zijlstra (Intel)69f9cae2015-04-24 14:56:34 -0400296 *
297 * this wait loop must be a load-acquire such that we match the
298 * store-release that clears the locked bit and create lock
299 * sequentiality; this is because not all clear_pending_set_locked()
300 * implementations imply full barriers.
Peter Zijlstra (Intel)c1fb1592015-04-24 14:56:32 -0400301 */
Peter Zijlstra (Intel)69f9cae2015-04-24 14:56:34 -0400302 while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_MASK)
Peter Zijlstra (Intel)c1fb1592015-04-24 14:56:32 -0400303 cpu_relax();
304
305 /*
306 * take ownership and clear the pending bit.
307 *
308 * *,1,0 -> *,0,1
309 */
Waiman Long6403bd72015-04-24 14:56:33 -0400310 clear_pending_set_locked(lock);
Peter Zijlstra (Intel)c1fb1592015-04-24 14:56:32 -0400311 return;
312
313 /*
314 * End of pending bit optimistic spinning and beginning of MCS
315 * queuing.
316 */
317queue:
Waiman Longa33fda32015-04-24 14:56:30 -0400318 node = this_cpu_ptr(&mcs_nodes[0]);
319 idx = node->count++;
320 tail = encode_tail(smp_processor_id(), idx);
321
322 node += idx;
323 node->locked = 0;
324 node->next = NULL;
325
326 /*
Waiman Long6403bd72015-04-24 14:56:33 -0400327 * We touched a (possibly) cold cacheline in the per-cpu queue node;
328 * attempt the trylock once more in the hope someone let go while we
329 * weren't watching.
330 */
331 if (queued_spin_trylock(lock))
332 goto release;
333
334 /*
Peter Zijlstra (Intel)c1fb1592015-04-24 14:56:32 -0400335 * We have already touched the queueing cacheline; don't bother with
336 * pending stuff.
337 *
Waiman Long6403bd72015-04-24 14:56:33 -0400338 * p,*,* -> n,*,*
Waiman Longa33fda32015-04-24 14:56:30 -0400339 */
Waiman Long6403bd72015-04-24 14:56:33 -0400340 old = xchg_tail(lock, tail);
Waiman Longa33fda32015-04-24 14:56:30 -0400341
342 /*
343 * if there was a previous node; link it and wait until reaching the
344 * head of the waitqueue.
345 */
Waiman Long6403bd72015-04-24 14:56:33 -0400346 if (old & _Q_TAIL_MASK) {
Waiman Longa33fda32015-04-24 14:56:30 -0400347 prev = decode_tail(old);
348 WRITE_ONCE(prev->next, node);
349
350 arch_mcs_spin_lock_contended(&node->locked);
351 }
352
353 /*
Peter Zijlstra (Intel)c1fb1592015-04-24 14:56:32 -0400354 * we're at the head of the waitqueue, wait for the owner & pending to
355 * go away.
Waiman Longa33fda32015-04-24 14:56:30 -0400356 *
Peter Zijlstra (Intel)c1fb1592015-04-24 14:56:32 -0400357 * *,x,y -> *,0,0
Waiman Long2c83e8e2015-04-24 14:56:35 -0400358 *
359 * this wait loop must use a load-acquire such that we match the
360 * store-release that clears the locked bit and create lock
361 * sequentiality; this is because the set_locked() function below
362 * does not imply a full barrier.
363 *
Waiman Longa33fda32015-04-24 14:56:30 -0400364 */
Waiman Long2c83e8e2015-04-24 14:56:35 -0400365 while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_PENDING_MASK)
Waiman Longa33fda32015-04-24 14:56:30 -0400366 cpu_relax();
367
368 /*
369 * claim the lock:
370 *
Peter Zijlstra (Intel)c1fb1592015-04-24 14:56:32 -0400371 * n,0,0 -> 0,0,1 : lock, uncontended
372 * *,0,0 -> *,0,1 : lock, contended
Waiman Long2c83e8e2015-04-24 14:56:35 -0400373 *
374 * If the queue head is the only one in the queue (lock value == tail),
375 * clear the tail code and grab the lock. Otherwise, we only need
376 * to grab the lock.
Waiman Longa33fda32015-04-24 14:56:30 -0400377 */
378 for (;;) {
Waiman Long2c83e8e2015-04-24 14:56:35 -0400379 if (val != tail) {
380 set_locked(lock);
Waiman Longa33fda32015-04-24 14:56:30 -0400381 break;
Waiman Long2c83e8e2015-04-24 14:56:35 -0400382 }
383 old = atomic_cmpxchg(&lock->val, val, _Q_LOCKED_VAL);
384 if (old == val)
385 goto release; /* No contention */
Waiman Longa33fda32015-04-24 14:56:30 -0400386
387 val = old;
388 }
389
390 /*
391 * contended path; wait for next, release.
392 */
Waiman Long2c83e8e2015-04-24 14:56:35 -0400393 while (!(next = READ_ONCE(node->next)))
394 cpu_relax();
Waiman Longa33fda32015-04-24 14:56:30 -0400395
Waiman Long2c83e8e2015-04-24 14:56:35 -0400396 arch_mcs_spin_unlock_contended(&next->locked);
Waiman Longa33fda32015-04-24 14:56:30 -0400397
398release:
399 /*
400 * release the node
401 */
402 this_cpu_dec(mcs_nodes[0].count);
403}
404EXPORT_SYMBOL(queued_spin_lock_slowpath);