blob: fe450ee58d553a3d89564e26f4e85a76cf5922f9 [file] [log] [blame]
Ohad Ben-Cohenbd9a4c72011-02-17 09:52:03 -08001/*
2 * Hardware spinlock public header
3 *
4 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Contact: Ohad Ben-Cohen <ohad@wizery.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published
10 * by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#ifndef __LINUX_HWSPINLOCK_H
19#define __LINUX_HWSPINLOCK_H
20
21#include <linux/err.h>
22#include <linux/sched.h>
23
24/* hwspinlock mode argument */
25#define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */
26#define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */
Baolin Wang1e6c06a2018-04-08 11:06:57 +080027#define HWLOCK_RAW 0x03
Ohad Ben-Cohenbd9a4c72011-02-17 09:52:03 -080028
Paul Gortmaker313162d2012-01-30 11:46:54 -050029struct device;
Suman Annafb7737e2015-03-04 20:01:14 -060030struct device_node;
Ohad Ben-Cohenbd9a4c72011-02-17 09:52:03 -080031struct hwspinlock;
Ohad Ben-Cohen300bab92011-09-06 15:39:21 +030032struct hwspinlock_device;
33struct hwspinlock_ops;
Ohad Ben-Cohenbd9a4c72011-02-17 09:52:03 -080034
Ohad Ben-Cohenc3c12502011-09-05 23:15:06 +030035/**
36 * struct hwspinlock_pdata - platform data for hwspinlock drivers
37 * @base_id: base id for this hwspinlock device
38 *
39 * hwspinlock devices provide system-wide hardware locks that are used
40 * by remote processors that have no other way to achieve synchronization.
41 *
42 * To achieve that, each physical lock must have a system-wide id number
43 * that is agreed upon, otherwise remote processors can't possibly assume
44 * they're using the same hardware lock.
45 *
46 * Usually boards have a single hwspinlock device, which provides several
47 * hwspinlocks, and in this case, they can be trivially numbered 0 to
48 * (num-of-locks - 1).
49 *
50 * In case boards have several hwspinlocks devices, a different base id
51 * should be used for each hwspinlock device (they can't all use 0 as
52 * a starting id!).
53 *
54 * This platform data structure should be used to provide the base id
55 * for each device (which is trivially 0 when only a single hwspinlock
56 * device exists). It can be shared between different platforms, hence
57 * its location.
58 */
59struct hwspinlock_pdata {
60 int base_id;
61};
62
Ohad Ben-Cohenbd9a4c72011-02-17 09:52:03 -080063#if defined(CONFIG_HWSPINLOCK) || defined(CONFIG_HWSPINLOCK_MODULE)
64
Ohad Ben-Cohen300bab92011-09-06 15:39:21 +030065int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
66 const struct hwspinlock_ops *ops, int base_id, int num_locks);
67int hwspin_lock_unregister(struct hwspinlock_device *bank);
Ohad Ben-Cohenbd9a4c72011-02-17 09:52:03 -080068struct hwspinlock *hwspin_lock_request(void);
69struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
70int hwspin_lock_free(struct hwspinlock *hwlock);
Suman Annafb7737e2015-03-04 20:01:14 -060071int of_hwspin_lock_get_id(struct device_node *np, int index);
Ohad Ben-Cohenbd9a4c72011-02-17 09:52:03 -080072int hwspin_lock_get_id(struct hwspinlock *hwlock);
73int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
74 unsigned long *);
75int __hwspin_trylock(struct hwspinlock *, int, unsigned long *);
76void __hwspin_unlock(struct hwspinlock *, int, unsigned long *);
77
78#else /* !CONFIG_HWSPINLOCK */
79
80/*
81 * We don't want these functions to fail if CONFIG_HWSPINLOCK is not
82 * enabled. We prefer to silently succeed in this case, and let the
83 * code path get compiled away. This way, if CONFIG_HWSPINLOCK is not
84 * required on a given setup, users will still work.
85 *
86 * The only exception is hwspin_lock_register/hwspin_lock_unregister, with which
87 * we _do_ want users to fail (no point in registering hwspinlock instances if
88 * the framework is not available).
89 *
90 * Note: ERR_PTR(-ENODEV) will still be considered a success for NULL-checking
91 * users. Others, which care, can still check this with IS_ERR.
92 */
93static inline struct hwspinlock *hwspin_lock_request(void)
94{
95 return ERR_PTR(-ENODEV);
96}
97
98static inline struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
99{
100 return ERR_PTR(-ENODEV);
101}
102
103static inline int hwspin_lock_free(struct hwspinlock *hwlock)
104{
105 return 0;
106}
107
108static inline
109int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
110 int mode, unsigned long *flags)
111{
112 return 0;
113}
114
115static inline
116int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
117{
118 return 0;
119}
120
121static inline
122void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
123{
Ohad Ben-Cohenbd9a4c72011-02-17 09:52:03 -0800124}
125
Suman Annafb7737e2015-03-04 20:01:14 -0600126static inline int of_hwspin_lock_get_id(struct device_node *np, int index)
127{
128 return 0;
129}
130
Ohad Ben-Cohenbd9a4c72011-02-17 09:52:03 -0800131static inline int hwspin_lock_get_id(struct hwspinlock *hwlock)
132{
133 return 0;
134}
135
Ohad Ben-Cohenbd9a4c72011-02-17 09:52:03 -0800136#endif /* !CONFIG_HWSPINLOCK */
137
138/**
139 * hwspin_trylock_irqsave() - try to lock an hwspinlock, disable interrupts
140 * @hwlock: an hwspinlock which we want to trylock
141 * @flags: a pointer to where the caller's interrupt state will be saved at
142 *
143 * This function attempts to lock the underlying hwspinlock, and will
144 * immediately fail if the hwspinlock is already locked.
145 *
146 * Upon a successful return from this function, preemption and local
147 * interrupts are disabled (previous interrupts state is saved at @flags),
148 * so the caller must not sleep, and is advised to release the hwspinlock
149 * as soon as possible.
150 *
151 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
152 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
153 */
154static inline
155int hwspin_trylock_irqsave(struct hwspinlock *hwlock, unsigned long *flags)
156{
157 return __hwspin_trylock(hwlock, HWLOCK_IRQSTATE, flags);
158}
159
160/**
161 * hwspin_trylock_irq() - try to lock an hwspinlock, disable interrupts
162 * @hwlock: an hwspinlock which we want to trylock
163 *
164 * This function attempts to lock the underlying hwspinlock, and will
165 * immediately fail if the hwspinlock is already locked.
166 *
167 * Upon a successful return from this function, preemption and local
168 * interrupts are disabled, so the caller must not sleep, and is advised
169 * to release the hwspinlock as soon as possible.
170 *
171 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
172 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
173 */
174static inline int hwspin_trylock_irq(struct hwspinlock *hwlock)
175{
176 return __hwspin_trylock(hwlock, HWLOCK_IRQ, NULL);
177}
178
179/**
Baolin Wang1e6c06a2018-04-08 11:06:57 +0800180 * hwspin_trylock_raw() - attempt to lock a specific hwspinlock
181 * @hwlock: an hwspinlock which we want to trylock
182 *
183 * This function attempts to lock an hwspinlock, and will immediately fail
184 * if the hwspinlock is already taken.
185 *
186 * Caution: User must protect the routine of getting hardware lock with mutex
187 * or spinlock to avoid dead-lock, that will let user can do some time-consuming
188 * or sleepable operations under the hardware lock.
189 *
190 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
191 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
192 */
193static inline int hwspin_trylock_raw(struct hwspinlock *hwlock)
194{
195 return __hwspin_trylock(hwlock, HWLOCK_RAW, NULL);
196}
197
198/**
Ohad Ben-Cohenbd9a4c72011-02-17 09:52:03 -0800199 * hwspin_trylock() - attempt to lock a specific hwspinlock
200 * @hwlock: an hwspinlock which we want to trylock
201 *
202 * This function attempts to lock an hwspinlock, and will immediately fail
203 * if the hwspinlock is already taken.
204 *
205 * Upon a successful return from this function, preemption is disabled,
206 * so the caller must not sleep, and is advised to release the hwspinlock
207 * as soon as possible. This is required in order to minimize remote cores
208 * polling on the hardware interconnect.
209 *
210 * Returns 0 if we successfully locked the hwspinlock, -EBUSY if
211 * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid.
212 */
213static inline int hwspin_trylock(struct hwspinlock *hwlock)
214{
215 return __hwspin_trylock(hwlock, 0, NULL);
216}
217
218/**
219 * hwspin_lock_timeout_irqsave() - lock hwspinlock, with timeout, disable irqs
220 * @hwlock: the hwspinlock to be locked
221 * @to: timeout value in msecs
222 * @flags: a pointer to where the caller's interrupt state will be saved at
223 *
224 * This function locks the underlying @hwlock. If the @hwlock
225 * is already taken, the function will busy loop waiting for it to
226 * be released, but give up when @timeout msecs have elapsed.
227 *
228 * Upon a successful return from this function, preemption and local interrupts
229 * are disabled (plus previous interrupt state is saved), so the caller must
230 * not sleep, and is advised to release the hwspinlock as soon as possible.
231 *
232 * Returns 0 when the @hwlock was successfully taken, and an appropriate
233 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
234 * busy after @timeout msecs). The function will never sleep.
235 */
236static inline int hwspin_lock_timeout_irqsave(struct hwspinlock *hwlock,
237 unsigned int to, unsigned long *flags)
238{
239 return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQSTATE, flags);
240}
241
242/**
243 * hwspin_lock_timeout_irq() - lock hwspinlock, with timeout, disable irqs
244 * @hwlock: the hwspinlock to be locked
245 * @to: timeout value in msecs
246 *
247 * This function locks the underlying @hwlock. If the @hwlock
248 * is already taken, the function will busy loop waiting for it to
249 * be released, but give up when @timeout msecs have elapsed.
250 *
251 * Upon a successful return from this function, preemption and local interrupts
252 * are disabled so the caller must not sleep, and is advised to release the
253 * hwspinlock as soon as possible.
254 *
255 * Returns 0 when the @hwlock was successfully taken, and an appropriate
256 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
257 * busy after @timeout msecs). The function will never sleep.
258 */
259static inline
260int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to)
261{
262 return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQ, NULL);
263}
264
265/**
Baolin Wang1e6c06a2018-04-08 11:06:57 +0800266 * hwspin_lock_timeout_raw() - lock an hwspinlock with timeout limit
267 * @hwlock: the hwspinlock to be locked
268 * @to: timeout value in msecs
269 *
270 * This function locks the underlying @hwlock. If the @hwlock
271 * is already taken, the function will busy loop waiting for it to
272 * be released, but give up when @timeout msecs have elapsed.
273 *
274 * Caution: User must protect the routine of getting hardware lock with mutex
275 * or spinlock to avoid dead-lock, that will let user can do some time-consuming
276 * or sleepable operations under the hardware lock.
277 *
278 * Returns 0 when the @hwlock was successfully taken, and an appropriate
279 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
280 * busy after @timeout msecs). The function will never sleep.
281 */
282static inline
283int hwspin_lock_timeout_raw(struct hwspinlock *hwlock, unsigned int to)
284{
285 return __hwspin_lock_timeout(hwlock, to, HWLOCK_RAW, NULL);
286}
287
288/**
Ohad Ben-Cohenbd9a4c72011-02-17 09:52:03 -0800289 * hwspin_lock_timeout() - lock an hwspinlock with timeout limit
290 * @hwlock: the hwspinlock to be locked
291 * @to: timeout value in msecs
292 *
293 * This function locks the underlying @hwlock. If the @hwlock
294 * is already taken, the function will busy loop waiting for it to
295 * be released, but give up when @timeout msecs have elapsed.
296 *
297 * Upon a successful return from this function, preemption is disabled
298 * so the caller must not sleep, and is advised to release the hwspinlock
299 * as soon as possible.
300 * This is required in order to minimize remote cores polling on the
301 * hardware interconnect.
302 *
303 * Returns 0 when the @hwlock was successfully taken, and an appropriate
304 * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still
305 * busy after @timeout msecs). The function will never sleep.
306 */
307static inline
308int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to)
309{
310 return __hwspin_lock_timeout(hwlock, to, 0, NULL);
311}
312
313/**
314 * hwspin_unlock_irqrestore() - unlock hwspinlock, restore irq state
315 * @hwlock: a previously-acquired hwspinlock which we want to unlock
316 * @flags: previous caller's interrupt state to restore
317 *
318 * This function will unlock a specific hwspinlock, enable preemption and
319 * restore the previous state of the local interrupts. It should be used
320 * to undo, e.g., hwspin_trylock_irqsave().
321 *
322 * @hwlock must be already locked before calling this function: it is a bug
323 * to call unlock on a @hwlock that is already unlocked.
324 */
325static inline void hwspin_unlock_irqrestore(struct hwspinlock *hwlock,
326 unsigned long *flags)
327{
328 __hwspin_unlock(hwlock, HWLOCK_IRQSTATE, flags);
329}
330
331/**
332 * hwspin_unlock_irq() - unlock hwspinlock, enable interrupts
333 * @hwlock: a previously-acquired hwspinlock which we want to unlock
334 *
335 * This function will unlock a specific hwspinlock, enable preemption and
336 * enable local interrupts. Should be used to undo hwspin_lock_irq().
337 *
338 * @hwlock must be already locked (e.g. by hwspin_trylock_irq()) before
339 * calling this function: it is a bug to call unlock on a @hwlock that is
340 * already unlocked.
341 */
342static inline void hwspin_unlock_irq(struct hwspinlock *hwlock)
343{
344 __hwspin_unlock(hwlock, HWLOCK_IRQ, NULL);
345}
346
347/**
Baolin Wang1e6c06a2018-04-08 11:06:57 +0800348 * hwspin_unlock_raw() - unlock hwspinlock
349 * @hwlock: a previously-acquired hwspinlock which we want to unlock
350 *
351 * This function will unlock a specific hwspinlock.
352 *
353 * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
354 * this function: it is a bug to call unlock on a @hwlock that is already
355 * unlocked.
356 */
357static inline void hwspin_unlock_raw(struct hwspinlock *hwlock)
358{
359 __hwspin_unlock(hwlock, HWLOCK_RAW, NULL);
360}
361
362/**
Ohad Ben-Cohenbd9a4c72011-02-17 09:52:03 -0800363 * hwspin_unlock() - unlock hwspinlock
364 * @hwlock: a previously-acquired hwspinlock which we want to unlock
365 *
366 * This function will unlock a specific hwspinlock and enable preemption
367 * back.
368 *
369 * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling
370 * this function: it is a bug to call unlock on a @hwlock that is already
371 * unlocked.
372 */
373static inline void hwspin_unlock(struct hwspinlock *hwlock)
374{
375 __hwspin_unlock(hwlock, 0, NULL);
376}
377
378#endif /* __LINUX_HWSPINLOCK_H */