blob: 3059a2dc39f8e9a0435ff308631342de8c3e5364 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Waiman Long19c5d692016-05-17 21:26:19 -04002/*
Waiman Long925b9cd2018-09-06 16:18:34 -04003 * The least significant 2 bits of the owner value has the following
4 * meanings when set.
5 * - RWSEM_READER_OWNED (bit 0): The rwsem is owned by readers
6 * - RWSEM_ANONYMOUSLY_OWNED (bit 1): The rwsem is anonymously owned,
7 * i.e. the owner(s) cannot be readily determined. It can be reader
8 * owned or the owning writer is indeterminate.
Waiman Long19c5d692016-05-17 21:26:19 -04009 *
Waiman Long925b9cd2018-09-06 16:18:34 -040010 * When a writer acquires a rwsem, it puts its task_struct pointer
11 * into the owner field. It is cleared after an unlock.
12 *
13 * When a reader acquires a rwsem, it will also puts its task_struct
14 * pointer into the owner field with both the RWSEM_READER_OWNED and
15 * RWSEM_ANONYMOUSLY_OWNED bits set. On unlock, the owner field will
16 * largely be left untouched. So for a free or reader-owned rwsem,
17 * the owner value may contain information about the last reader that
18 * acquires the rwsem. The anonymous bit is set because that particular
19 * reader may or may not still own the lock.
20 *
21 * That information may be helpful in debugging cases where the system
22 * seems to hang on a reader owned rwsem especially if only one reader
23 * is involved. Ideally we would like to track all the readers that own
24 * a rwsem, but the overhead is simply too big.
Waiman Long19c5d692016-05-17 21:26:19 -040025 */
Waiman Long925b9cd2018-09-06 16:18:34 -040026#define RWSEM_READER_OWNED (1UL << 0)
27#define RWSEM_ANONYMOUSLY_OWNED (1UL << 1)
Waiman Long19c5d692016-05-17 21:26:19 -040028
Waiman Long5149cba2018-03-30 17:27:58 -040029#ifdef CONFIG_DEBUG_RWSEMS
Waiman Long3b4ba662019-04-04 13:43:15 -040030# define DEBUG_RWSEMS_WARN_ON(c, sem) do { \
31 if (WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\
32 #c, atomic_long_read(&(sem)->count), \
33 (long)((sem)->owner), (long)current, \
34 list_empty(&(sem)->wait_list) ? "" : "not ")) \
35 debug_locks_off(); \
36 } while (0)
Waiman Long5149cba2018-03-30 17:27:58 -040037#else
Waiman Long3b4ba662019-04-04 13:43:15 -040038# define DEBUG_RWSEMS_WARN_ON(c, sem)
Waiman Long5149cba2018-03-30 17:27:58 -040039#endif
40
Waiman Long46ad0842019-03-22 10:30:06 -040041/*
42 * R/W semaphores originally for PPC using the stuff in lib/rwsem.c.
43 * Adapted largely from include/asm-i386/rwsem.h
44 * by Paul Mackerras <paulus@samba.org>.
45 */
46
47/*
48 * the semaphore definition
49 */
50#ifdef CONFIG_64BIT
51# define RWSEM_ACTIVE_MASK 0xffffffffL
52#else
53# define RWSEM_ACTIVE_MASK 0x0000ffffL
54#endif
55
56#define RWSEM_ACTIVE_BIAS 0x00000001L
57#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
58#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
59#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
60
Davidlohr Bueso7a215f82015-01-30 01:14:25 -080061#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
Waiman Longfb6a44f2016-05-17 21:26:20 -040062/*
63 * All writes to owner are protected by WRITE_ONCE() to make sure that
64 * store tearing can't happen as optimistic spinners may read and use
65 * the owner value concurrently without lock. Read from owner, however,
66 * may not need READ_ONCE() as long as the pointer value is only used
67 * for comparison and isn't being dereferenced.
68 */
Davidlohr Bueso7a215f82015-01-30 01:14:25 -080069static inline void rwsem_set_owner(struct rw_semaphore *sem)
70{
Waiman Longfb6a44f2016-05-17 21:26:20 -040071 WRITE_ONCE(sem->owner, current);
Davidlohr Bueso7a215f82015-01-30 01:14:25 -080072}
73
74static inline void rwsem_clear_owner(struct rw_semaphore *sem)
75{
Waiman Longfb6a44f2016-05-17 21:26:20 -040076 WRITE_ONCE(sem->owner, NULL);
Davidlohr Bueso7a215f82015-01-30 01:14:25 -080077}
78
Waiman Long925b9cd2018-09-06 16:18:34 -040079/*
80 * The task_struct pointer of the last owning reader will be left in
81 * the owner field.
82 *
83 * Note that the owner value just indicates the task has owned the rwsem
84 * previously, it may not be the real owner or one of the real owners
85 * anymore when that field is examined, so take it with a grain of salt.
86 */
87static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
88 struct task_struct *owner)
89{
90 unsigned long val = (unsigned long)owner | RWSEM_READER_OWNED
91 | RWSEM_ANONYMOUSLY_OWNED;
92
93 WRITE_ONCE(sem->owner, (struct task_struct *)val);
94}
95
Waiman Long19c5d692016-05-17 21:26:19 -040096static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
97{
Waiman Long925b9cd2018-09-06 16:18:34 -040098 __rwsem_set_reader_owned(sem, current);
Waiman Long19c5d692016-05-17 21:26:19 -040099}
100
Waiman Longd7d760e2018-05-15 17:49:50 -0400101/*
102 * Return true if the a rwsem waiter can spin on the rwsem's owner
103 * and steal the lock, i.e. the lock is not anonymously owned.
104 * N.B. !owner is considered spinnable.
105 */
106static inline bool is_rwsem_owner_spinnable(struct task_struct *owner)
Waiman Long19c5d692016-05-17 21:26:19 -0400107{
Waiman Longd7d760e2018-05-15 17:49:50 -0400108 return !((unsigned long)owner & RWSEM_ANONYMOUSLY_OWNED);
Waiman Long19c5d692016-05-17 21:26:19 -0400109}
110
Waiman Longd7d760e2018-05-15 17:49:50 -0400111/*
112 * Return true if rwsem is owned by an anonymous writer or readers.
113 */
114static inline bool rwsem_has_anonymous_owner(struct task_struct *owner)
Waiman Long19c5d692016-05-17 21:26:19 -0400115{
Waiman Longd7d760e2018-05-15 17:49:50 -0400116 return (unsigned long)owner & RWSEM_ANONYMOUSLY_OWNED;
Waiman Long19c5d692016-05-17 21:26:19 -0400117}
Waiman Long925b9cd2018-09-06 16:18:34 -0400118
119#ifdef CONFIG_DEBUG_RWSEMS
120/*
121 * With CONFIG_DEBUG_RWSEMS configured, it will make sure that if there
122 * is a task pointer in owner of a reader-owned rwsem, it will be the
123 * real owner or one of the real owners. The only exception is when the
124 * unlock is done by up_read_non_owner().
125 */
126#define rwsem_clear_reader_owned rwsem_clear_reader_owned
127static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
128{
129 unsigned long val = (unsigned long)current | RWSEM_READER_OWNED
130 | RWSEM_ANONYMOUSLY_OWNED;
131 if (READ_ONCE(sem->owner) == (struct task_struct *)val)
132 cmpxchg_relaxed((unsigned long *)&sem->owner, val,
133 RWSEM_READER_OWNED | RWSEM_ANONYMOUSLY_OWNED);
134}
135#endif
136
Davidlohr Bueso7a215f82015-01-30 01:14:25 -0800137#else
138static inline void rwsem_set_owner(struct rw_semaphore *sem)
139{
140}
141
142static inline void rwsem_clear_owner(struct rw_semaphore *sem)
143{
144}
Waiman Long19c5d692016-05-17 21:26:19 -0400145
Waiman Long925b9cd2018-09-06 16:18:34 -0400146static inline void __rwsem_set_reader_owned(struct rw_semaphore *sem,
147 struct task_struct *owner)
148{
149}
150
Waiman Long19c5d692016-05-17 21:26:19 -0400151static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
152{
153}
Davidlohr Bueso7a215f82015-01-30 01:14:25 -0800154#endif
Waiman Long925b9cd2018-09-06 16:18:34 -0400155
156#ifndef rwsem_clear_reader_owned
157static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
158{
159}
160#endif
Waiman Long46ad0842019-03-22 10:30:06 -0400161
Waiman Long12a30a72019-04-04 13:43:12 -0400162extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
163extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem);
164extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
165extern struct rw_semaphore *rwsem_down_write_failed_killable(struct rw_semaphore *sem);
166extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
167extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
168
Waiman Long46ad0842019-03-22 10:30:06 -0400169/*
170 * lock for reading
171 */
172static inline void __down_read(struct rw_semaphore *sem)
173{
Waiman Longa68e2c42019-04-04 13:43:14 -0400174 if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) {
Waiman Long46ad0842019-03-22 10:30:06 -0400175 rwsem_down_read_failed(sem);
Waiman Longa68e2c42019-04-04 13:43:14 -0400176 DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner &
Waiman Long3b4ba662019-04-04 13:43:15 -0400177 RWSEM_READER_OWNED), sem);
Waiman Longa68e2c42019-04-04 13:43:14 -0400178 } else {
Waiman Longc7580c12019-04-04 13:43:11 -0400179 rwsem_set_reader_owned(sem);
Waiman Longa68e2c42019-04-04 13:43:14 -0400180 }
Waiman Long46ad0842019-03-22 10:30:06 -0400181}
182
183static inline int __down_read_killable(struct rw_semaphore *sem)
184{
185 if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) {
186 if (IS_ERR(rwsem_down_read_failed_killable(sem)))
187 return -EINTR;
Waiman Longa68e2c42019-04-04 13:43:14 -0400188 DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner &
Waiman Long3b4ba662019-04-04 13:43:15 -0400189 RWSEM_READER_OWNED), sem);
Waiman Longc7580c12019-04-04 13:43:11 -0400190 } else {
191 rwsem_set_reader_owned(sem);
Waiman Long46ad0842019-03-22 10:30:06 -0400192 }
Waiman Long46ad0842019-03-22 10:30:06 -0400193 return 0;
194}
195
196static inline int __down_read_trylock(struct rw_semaphore *sem)
197{
Waiman Longddb20d12019-03-22 10:30:08 -0400198 /*
199 * Optimize for the case when the rwsem is not locked at all.
200 */
201 long tmp = RWSEM_UNLOCKED_VALUE;
Waiman Long46ad0842019-03-22 10:30:06 -0400202
Waiman Longddb20d12019-03-22 10:30:08 -0400203 do {
204 if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
205 tmp + RWSEM_ACTIVE_READ_BIAS)) {
Waiman Longc7580c12019-04-04 13:43:11 -0400206 rwsem_set_reader_owned(sem);
Waiman Long46ad0842019-03-22 10:30:06 -0400207 return 1;
208 }
Waiman Longddb20d12019-03-22 10:30:08 -0400209 } while (tmp >= 0);
Waiman Long46ad0842019-03-22 10:30:06 -0400210 return 0;
211}
212
213/*
214 * lock for writing
215 */
216static inline void __down_write(struct rw_semaphore *sem)
217{
218 long tmp;
219
220 tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
221 &sem->count);
222 if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
223 rwsem_down_write_failed(sem);
Waiman Longc7580c12019-04-04 13:43:11 -0400224 rwsem_set_owner(sem);
Waiman Long46ad0842019-03-22 10:30:06 -0400225}
226
227static inline int __down_write_killable(struct rw_semaphore *sem)
228{
229 long tmp;
230
231 tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
232 &sem->count);
233 if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
234 if (IS_ERR(rwsem_down_write_failed_killable(sem)))
235 return -EINTR;
Waiman Longc7580c12019-04-04 13:43:11 -0400236 rwsem_set_owner(sem);
Waiman Long46ad0842019-03-22 10:30:06 -0400237 return 0;
238}
239
240static inline int __down_write_trylock(struct rw_semaphore *sem)
241{
242 long tmp;
243
244 tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
245 RWSEM_ACTIVE_WRITE_BIAS);
Waiman Longc7580c12019-04-04 13:43:11 -0400246 if (tmp == RWSEM_UNLOCKED_VALUE) {
247 rwsem_set_owner(sem);
248 return true;
249 }
250 return false;
Waiman Long46ad0842019-03-22 10:30:06 -0400251}
252
253/*
254 * unlock after reading
255 */
256static inline void __up_read(struct rw_semaphore *sem)
257{
258 long tmp;
259
Waiman Long3b4ba662019-04-04 13:43:15 -0400260 DEBUG_RWSEMS_WARN_ON(!((unsigned long)sem->owner & RWSEM_READER_OWNED),
261 sem);
Waiman Longc7580c12019-04-04 13:43:11 -0400262 rwsem_clear_reader_owned(sem);
Waiman Long46ad0842019-03-22 10:30:06 -0400263 tmp = atomic_long_dec_return_release(&sem->count);
264 if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
265 rwsem_wake(sem);
266}
267
268/*
269 * unlock after writing
270 */
271static inline void __up_write(struct rw_semaphore *sem)
272{
Waiman Long3b4ba662019-04-04 13:43:15 -0400273 DEBUG_RWSEMS_WARN_ON(sem->owner != current, sem);
Waiman Longc7580c12019-04-04 13:43:11 -0400274 rwsem_clear_owner(sem);
Waiman Long46ad0842019-03-22 10:30:06 -0400275 if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS,
276 &sem->count) < 0))
277 rwsem_wake(sem);
278}
279
280/*
281 * downgrade write lock to read lock
282 */
283static inline void __downgrade_write(struct rw_semaphore *sem)
284{
285 long tmp;
286
287 /*
288 * When downgrading from exclusive to shared ownership,
289 * anything inside the write-locked region cannot leak
290 * into the read side. In contrast, anything in the
291 * read-locked region is ok to be re-ordered into the
292 * write side. As such, rely on RELEASE semantics.
293 */
Waiman Long3b4ba662019-04-04 13:43:15 -0400294 DEBUG_RWSEMS_WARN_ON(sem->owner != current, sem);
Waiman Long46ad0842019-03-22 10:30:06 -0400295 tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS, &sem->count);
Waiman Longc7580c12019-04-04 13:43:11 -0400296 rwsem_set_reader_owned(sem);
Waiman Long46ad0842019-03-22 10:30:06 -0400297 if (tmp < 0)
298 rwsem_downgrade_wake(sem);
299}