blob: 27e580d232ca88f01620333d3050b0f987b6656c [file] [log] [blame]
Arun Sharmaacac43e2011-07-26 16:09:08 -07001/* Atomic operations usable in machine independent code */
Eric Dumazet3f9d35b2010-11-11 14:05:08 -08002#ifndef _LINUX_ATOMIC_H
3#define _LINUX_ATOMIC_H
4#include <asm/atomic.h>
Will Deacon654672d2015-08-06 17:54:37 +01005#include <asm/barrier.h>
6
Peter Zijlstrae3e72ab2015-09-18 13:22:52 +02007#ifndef atomic_read_ctrl
8static inline int atomic_read_ctrl(const atomic_t *v)
9{
10 int val = atomic_read(v);
11 smp_read_barrier_depends(); /* Enforce control dependency. */
12 return val;
13}
14#endif
15
Will Deacon654672d2015-08-06 17:54:37 +010016/*
17 * Relaxed variants of xchg, cmpxchg and some atomic operations.
18 *
19 * We support four variants:
20 *
21 * - Fully ordered: The default implementation, no suffix required.
22 * - Acquire: Provides ACQUIRE semantics, _acquire suffix.
23 * - Release: Provides RELEASE semantics, _release suffix.
24 * - Relaxed: No ordering guarantees, _relaxed suffix.
25 *
26 * For compound atomics performing both a load and a store, ACQUIRE
27 * semantics apply only to the load and RELEASE semantics only to the
28 * store portion of the operation. Note that a failed cmpxchg_acquire
29 * does -not- imply any memory ordering constraints.
30 *
31 * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
32 */
33
34#ifndef atomic_read_acquire
35#define atomic_read_acquire(v) smp_load_acquire(&(v)->counter)
36#endif
37
38#ifndef atomic_set_release
39#define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i))
40#endif
41
42/*
43 * The idea here is to build acquire/release variants by adding explicit
44 * barriers on top of the relaxed variant. In the case where the relaxed
45 * variant is already fully ordered, no additional barriers are needed.
46 */
47#define __atomic_op_acquire(op, args...) \
48({ \
49 typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
50 smp_mb__after_atomic(); \
51 __ret; \
52})
53
54#define __atomic_op_release(op, args...) \
55({ \
56 smp_mb__before_atomic(); \
57 op##_relaxed(args); \
58})
59
60#define __atomic_op_fence(op, args...) \
61({ \
62 typeof(op##_relaxed(args)) __ret; \
63 smp_mb__before_atomic(); \
64 __ret = op##_relaxed(args); \
65 smp_mb__after_atomic(); \
66 __ret; \
67})
68
69/* atomic_add_return_relaxed */
70#ifndef atomic_add_return_relaxed
71#define atomic_add_return_relaxed atomic_add_return
72#define atomic_add_return_acquire atomic_add_return
73#define atomic_add_return_release atomic_add_return
74
75#else /* atomic_add_return_relaxed */
76
77#ifndef atomic_add_return_acquire
78#define atomic_add_return_acquire(...) \
79 __atomic_op_acquire(atomic_add_return, __VA_ARGS__)
80#endif
81
82#ifndef atomic_add_return_release
83#define atomic_add_return_release(...) \
84 __atomic_op_release(atomic_add_return, __VA_ARGS__)
85#endif
86
87#ifndef atomic_add_return
88#define atomic_add_return(...) \
89 __atomic_op_fence(atomic_add_return, __VA_ARGS__)
90#endif
91#endif /* atomic_add_return_relaxed */
92
Davidlohr Bueso63ab7bd2015-09-30 13:03:11 -070093/* atomic_inc_return_relaxed */
94#ifndef atomic_inc_return_relaxed
95#define atomic_inc_return_relaxed atomic_inc_return
96#define atomic_inc_return_acquire atomic_inc_return
97#define atomic_inc_return_release atomic_inc_return
98
99#else /* atomic_inc_return_relaxed */
100
101#ifndef atomic_inc_return_acquire
102#define atomic_inc_return_acquire(...) \
103 __atomic_op_acquire(atomic_inc_return, __VA_ARGS__)
104#endif
105
106#ifndef atomic_inc_return_release
107#define atomic_inc_return_release(...) \
108 __atomic_op_release(atomic_inc_return, __VA_ARGS__)
109#endif
110
111#ifndef atomic_inc_return
112#define atomic_inc_return(...) \
113 __atomic_op_fence(atomic_inc_return, __VA_ARGS__)
114#endif
115#endif /* atomic_inc_return_relaxed */
116
Will Deacon654672d2015-08-06 17:54:37 +0100117/* atomic_sub_return_relaxed */
118#ifndef atomic_sub_return_relaxed
119#define atomic_sub_return_relaxed atomic_sub_return
120#define atomic_sub_return_acquire atomic_sub_return
121#define atomic_sub_return_release atomic_sub_return
122
123#else /* atomic_sub_return_relaxed */
124
125#ifndef atomic_sub_return_acquire
126#define atomic_sub_return_acquire(...) \
127 __atomic_op_acquire(atomic_sub_return, __VA_ARGS__)
128#endif
129
130#ifndef atomic_sub_return_release
131#define atomic_sub_return_release(...) \
132 __atomic_op_release(atomic_sub_return, __VA_ARGS__)
133#endif
134
135#ifndef atomic_sub_return
136#define atomic_sub_return(...) \
137 __atomic_op_fence(atomic_sub_return, __VA_ARGS__)
138#endif
139#endif /* atomic_sub_return_relaxed */
140
Davidlohr Bueso63ab7bd2015-09-30 13:03:11 -0700141/* atomic_dec_return_relaxed */
142#ifndef atomic_dec_return_relaxed
143#define atomic_dec_return_relaxed atomic_dec_return
144#define atomic_dec_return_acquire atomic_dec_return
145#define atomic_dec_return_release atomic_dec_return
146
147#else /* atomic_dec_return_relaxed */
148
149#ifndef atomic_dec_return_acquire
150#define atomic_dec_return_acquire(...) \
151 __atomic_op_acquire(atomic_dec_return, __VA_ARGS__)
152#endif
153
154#ifndef atomic_dec_return_release
155#define atomic_dec_return_release(...) \
156 __atomic_op_release(atomic_dec_return, __VA_ARGS__)
157#endif
158
159#ifndef atomic_dec_return
160#define atomic_dec_return(...) \
161 __atomic_op_fence(atomic_dec_return, __VA_ARGS__)
162#endif
163#endif /* atomic_dec_return_relaxed */
164
Will Deacon654672d2015-08-06 17:54:37 +0100165/* atomic_xchg_relaxed */
166#ifndef atomic_xchg_relaxed
167#define atomic_xchg_relaxed atomic_xchg
168#define atomic_xchg_acquire atomic_xchg
169#define atomic_xchg_release atomic_xchg
170
171#else /* atomic_xchg_relaxed */
172
173#ifndef atomic_xchg_acquire
174#define atomic_xchg_acquire(...) \
175 __atomic_op_acquire(atomic_xchg, __VA_ARGS__)
176#endif
177
178#ifndef atomic_xchg_release
179#define atomic_xchg_release(...) \
180 __atomic_op_release(atomic_xchg, __VA_ARGS__)
181#endif
182
183#ifndef atomic_xchg
184#define atomic_xchg(...) \
185 __atomic_op_fence(atomic_xchg, __VA_ARGS__)
186#endif
187#endif /* atomic_xchg_relaxed */
188
189/* atomic_cmpxchg_relaxed */
190#ifndef atomic_cmpxchg_relaxed
191#define atomic_cmpxchg_relaxed atomic_cmpxchg
192#define atomic_cmpxchg_acquire atomic_cmpxchg
193#define atomic_cmpxchg_release atomic_cmpxchg
194
195#else /* atomic_cmpxchg_relaxed */
196
197#ifndef atomic_cmpxchg_acquire
198#define atomic_cmpxchg_acquire(...) \
199 __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
200#endif
201
202#ifndef atomic_cmpxchg_release
203#define atomic_cmpxchg_release(...) \
204 __atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
205#endif
206
207#ifndef atomic_cmpxchg
208#define atomic_cmpxchg(...) \
209 __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
210#endif
211#endif /* atomic_cmpxchg_relaxed */
212
213#ifndef atomic64_read_acquire
214#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter)
215#endif
216
217#ifndef atomic64_set_release
218#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i))
219#endif
220
221/* atomic64_add_return_relaxed */
222#ifndef atomic64_add_return_relaxed
223#define atomic64_add_return_relaxed atomic64_add_return
224#define atomic64_add_return_acquire atomic64_add_return
225#define atomic64_add_return_release atomic64_add_return
226
227#else /* atomic64_add_return_relaxed */
228
229#ifndef atomic64_add_return_acquire
230#define atomic64_add_return_acquire(...) \
231 __atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
232#endif
233
234#ifndef atomic64_add_return_release
235#define atomic64_add_return_release(...) \
236 __atomic_op_release(atomic64_add_return, __VA_ARGS__)
237#endif
238
239#ifndef atomic64_add_return
240#define atomic64_add_return(...) \
241 __atomic_op_fence(atomic64_add_return, __VA_ARGS__)
242#endif
243#endif /* atomic64_add_return_relaxed */
244
Davidlohr Bueso63ab7bd2015-09-30 13:03:11 -0700245/* atomic64_inc_return_relaxed */
246#ifndef atomic64_inc_return_relaxed
247#define atomic64_inc_return_relaxed atomic64_inc_return
248#define atomic64_inc_return_acquire atomic64_inc_return
249#define atomic64_inc_return_release atomic64_inc_return
250
251#else /* atomic64_inc_return_relaxed */
252
253#ifndef atomic64_inc_return_acquire
254#define atomic64_inc_return_acquire(...) \
255 __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__)
256#endif
257
258#ifndef atomic64_inc_return_release
259#define atomic64_inc_return_release(...) \
260 __atomic_op_release(atomic64_inc_return, __VA_ARGS__)
261#endif
262
263#ifndef atomic64_inc_return
264#define atomic64_inc_return(...) \
265 __atomic_op_fence(atomic64_inc_return, __VA_ARGS__)
266#endif
267#endif /* atomic64_inc_return_relaxed */
268
269
Will Deacon654672d2015-08-06 17:54:37 +0100270/* atomic64_sub_return_relaxed */
271#ifndef atomic64_sub_return_relaxed
272#define atomic64_sub_return_relaxed atomic64_sub_return
273#define atomic64_sub_return_acquire atomic64_sub_return
274#define atomic64_sub_return_release atomic64_sub_return
275
276#else /* atomic64_sub_return_relaxed */
277
278#ifndef atomic64_sub_return_acquire
279#define atomic64_sub_return_acquire(...) \
280 __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
281#endif
282
283#ifndef atomic64_sub_return_release
284#define atomic64_sub_return_release(...) \
285 __atomic_op_release(atomic64_sub_return, __VA_ARGS__)
286#endif
287
288#ifndef atomic64_sub_return
289#define atomic64_sub_return(...) \
290 __atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
291#endif
292#endif /* atomic64_sub_return_relaxed */
293
Davidlohr Bueso63ab7bd2015-09-30 13:03:11 -0700294/* atomic64_dec_return_relaxed */
295#ifndef atomic64_dec_return_relaxed
296#define atomic64_dec_return_relaxed atomic64_dec_return
297#define atomic64_dec_return_acquire atomic64_dec_return
298#define atomic64_dec_return_release atomic64_dec_return
299
300#else /* atomic64_dec_return_relaxed */
301
302#ifndef atomic64_dec_return_acquire
303#define atomic64_dec_return_acquire(...) \
304 __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__)
305#endif
306
307#ifndef atomic64_dec_return_release
308#define atomic64_dec_return_release(...) \
309 __atomic_op_release(atomic64_dec_return, __VA_ARGS__)
310#endif
311
312#ifndef atomic64_dec_return
313#define atomic64_dec_return(...) \
314 __atomic_op_fence(atomic64_dec_return, __VA_ARGS__)
315#endif
316#endif /* atomic64_dec_return_relaxed */
317
Will Deacon654672d2015-08-06 17:54:37 +0100318/* atomic64_xchg_relaxed */
319#ifndef atomic64_xchg_relaxed
320#define atomic64_xchg_relaxed atomic64_xchg
321#define atomic64_xchg_acquire atomic64_xchg
322#define atomic64_xchg_release atomic64_xchg
323
324#else /* atomic64_xchg_relaxed */
325
326#ifndef atomic64_xchg_acquire
327#define atomic64_xchg_acquire(...) \
328 __atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
329#endif
330
331#ifndef atomic64_xchg_release
332#define atomic64_xchg_release(...) \
333 __atomic_op_release(atomic64_xchg, __VA_ARGS__)
334#endif
335
336#ifndef atomic64_xchg
337#define atomic64_xchg(...) \
338 __atomic_op_fence(atomic64_xchg, __VA_ARGS__)
339#endif
340#endif /* atomic64_xchg_relaxed */
341
342/* atomic64_cmpxchg_relaxed */
343#ifndef atomic64_cmpxchg_relaxed
344#define atomic64_cmpxchg_relaxed atomic64_cmpxchg
345#define atomic64_cmpxchg_acquire atomic64_cmpxchg
346#define atomic64_cmpxchg_release atomic64_cmpxchg
347
348#else /* atomic64_cmpxchg_relaxed */
349
350#ifndef atomic64_cmpxchg_acquire
351#define atomic64_cmpxchg_acquire(...) \
352 __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
353#endif
354
355#ifndef atomic64_cmpxchg_release
356#define atomic64_cmpxchg_release(...) \
357 __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
358#endif
359
360#ifndef atomic64_cmpxchg
361#define atomic64_cmpxchg(...) \
362 __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
363#endif
364#endif /* atomic64_cmpxchg_relaxed */
365
366/* cmpxchg_relaxed */
367#ifndef cmpxchg_relaxed
368#define cmpxchg_relaxed cmpxchg
369#define cmpxchg_acquire cmpxchg
370#define cmpxchg_release cmpxchg
371
372#else /* cmpxchg_relaxed */
373
374#ifndef cmpxchg_acquire
375#define cmpxchg_acquire(...) \
376 __atomic_op_acquire(cmpxchg, __VA_ARGS__)
377#endif
378
379#ifndef cmpxchg_release
380#define cmpxchg_release(...) \
381 __atomic_op_release(cmpxchg, __VA_ARGS__)
382#endif
383
384#ifndef cmpxchg
385#define cmpxchg(...) \
386 __atomic_op_fence(cmpxchg, __VA_ARGS__)
387#endif
388#endif /* cmpxchg_relaxed */
389
390/* cmpxchg64_relaxed */
391#ifndef cmpxchg64_relaxed
392#define cmpxchg64_relaxed cmpxchg64
393#define cmpxchg64_acquire cmpxchg64
394#define cmpxchg64_release cmpxchg64
395
396#else /* cmpxchg64_relaxed */
397
398#ifndef cmpxchg64_acquire
399#define cmpxchg64_acquire(...) \
400 __atomic_op_acquire(cmpxchg64, __VA_ARGS__)
401#endif
402
403#ifndef cmpxchg64_release
404#define cmpxchg64_release(...) \
405 __atomic_op_release(cmpxchg64, __VA_ARGS__)
406#endif
407
408#ifndef cmpxchg64
409#define cmpxchg64(...) \
410 __atomic_op_fence(cmpxchg64, __VA_ARGS__)
411#endif
412#endif /* cmpxchg64_relaxed */
413
414/* xchg_relaxed */
415#ifndef xchg_relaxed
416#define xchg_relaxed xchg
417#define xchg_acquire xchg
418#define xchg_release xchg
419
420#else /* xchg_relaxed */
421
422#ifndef xchg_acquire
423#define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__)
424#endif
425
426#ifndef xchg_release
427#define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__)
428#endif
429
430#ifndef xchg
431#define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__)
432#endif
433#endif /* xchg_relaxed */
Eric Dumazet3f9d35b2010-11-11 14:05:08 -0800434
435/**
Arun Sharmaf24219b2011-07-26 16:09:07 -0700436 * atomic_add_unless - add unless the number is already a given value
437 * @v: pointer of type atomic_t
438 * @a: the amount to add to v...
439 * @u: ...unless v is equal to u.
440 *
441 * Atomically adds @a to @v, so long as @v was not already @u.
442 * Returns non-zero if @v was not @u, and zero otherwise.
443 */
444static inline int atomic_add_unless(atomic_t *v, int a, int u)
445{
446 return __atomic_add_unless(v, a, u) != u;
447}
448
449/**
Arun Sharma600634972011-07-26 16:09:06 -0700450 * atomic_inc_not_zero - increment unless the number is zero
451 * @v: pointer of type atomic_t
452 *
453 * Atomically increments @v by 1, so long as @v is non-zero.
454 * Returns non-zero if @v was non-zero, and zero otherwise.
455 */
Anton Blanchardb1ada602012-02-29 21:09:53 +0000456#ifndef atomic_inc_not_zero
Arun Sharma600634972011-07-26 16:09:06 -0700457#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
Anton Blanchardb1ada602012-02-29 21:09:53 +0000458#endif
Arun Sharma600634972011-07-26 16:09:06 -0700459
Peter Zijlstrade9e4322015-04-24 01:12:32 +0200460#ifndef atomic_andnot
461static inline void atomic_andnot(int i, atomic_t *v)
462{
463 atomic_and(~i, v);
464}
465#endif
466
467static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
468{
469 atomic_andnot(mask, v);
470}
471
472static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
473{
474 atomic_or(mask, v);
475}
476
Arun Sharma600634972011-07-26 16:09:06 -0700477/**
Eric Dumazet3f9d35b2010-11-11 14:05:08 -0800478 * atomic_inc_not_zero_hint - increment if not null
479 * @v: pointer of type atomic_t
480 * @hint: probable value of the atomic before the increment
481 *
482 * This version of atomic_inc_not_zero() gives a hint of probable
483 * value of the atomic. This helps processor to not read the memory
484 * before doing the atomic read/modify/write cycle, lowering
485 * number of bus transactions on some arches.
486 *
487 * Returns: 0 if increment was not done, 1 otherwise.
488 */
489#ifndef atomic_inc_not_zero_hint
490static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
491{
492 int val, c = hint;
493
494 /* sanity test, should be removed by compiler if hint is a constant */
495 if (!hint)
496 return atomic_inc_not_zero(v);
497
498 do {
499 val = atomic_cmpxchg(v, c, c + 1);
500 if (val == c)
501 return 1;
502 c = val;
503 } while (c);
504
505 return 0;
506}
507#endif
508
Al Viro07b8ce12011-06-20 10:52:57 -0400509#ifndef atomic_inc_unless_negative
510static inline int atomic_inc_unless_negative(atomic_t *p)
511{
512 int v, v1;
513 for (v = 0; v >= 0; v = v1) {
514 v1 = atomic_cmpxchg(p, v, v + 1);
515 if (likely(v1 == v))
516 return 1;
517 }
518 return 0;
519}
520#endif
521
522#ifndef atomic_dec_unless_positive
523static inline int atomic_dec_unless_positive(atomic_t *p)
524{
525 int v, v1;
526 for (v = 0; v <= 0; v = v1) {
527 v1 = atomic_cmpxchg(p, v, v - 1);
528 if (likely(v1 == v))
529 return 1;
530 }
531 return 0;
532}
533#endif
534
Shaohua Lie79bee22012-10-08 16:32:18 -0700535/*
536 * atomic_dec_if_positive - decrement by 1 if old value positive
537 * @v: pointer of type atomic_t
538 *
539 * The function returns the old value of *v minus 1, even if
540 * the atomic variable, v, was not decremented.
541 */
542#ifndef atomic_dec_if_positive
543static inline int atomic_dec_if_positive(atomic_t *v)
544{
545 int c, old, dec;
546 c = atomic_read(v);
547 for (;;) {
548 dec = c - 1;
549 if (unlikely(dec < 0))
550 break;
551 old = atomic_cmpxchg((v), c, dec);
552 if (likely(old == c))
553 break;
554 c = old;
555 }
556 return dec;
557}
558#endif
559
Arun Sharma78477772011-07-26 16:09:08 -0700560#ifdef CONFIG_GENERIC_ATOMIC64
561#include <asm-generic/atomic64.h>
562#endif
Peter Zijlstrade9e4322015-04-24 01:12:32 +0200563
Peter Zijlstrae3e72ab2015-09-18 13:22:52 +0200564#ifndef atomic64_read_ctrl
565static inline long long atomic64_read_ctrl(const atomic64_t *v)
566{
567 long long val = atomic64_read(v);
568 smp_read_barrier_depends(); /* Enforce control dependency. */
569 return val;
570}
571#endif
572
Peter Zijlstrade9e4322015-04-24 01:12:32 +0200573#ifndef atomic64_andnot
574static inline void atomic64_andnot(long long i, atomic64_t *v)
575{
576 atomic64_and(~i, v);
577}
578#endif
579
Peter Zijlstra90fe6512015-09-18 15:04:59 +0200580#include <asm-generic/atomic-long.h>
581
Eric Dumazet3f9d35b2010-11-11 14:05:08 -0800582#endif /* _LINUX_ATOMIC_H */