blob: 25aebc5b448d7adf8905a228a916c3f9b1894a34 [file] [log] [blame]
Alessandro Zummo0c86edc2006-03-27 01:16:37 -08001/*
2 * RTC subsystem, interface functions
3 *
4 * Copyright (C) 2005 Tower Technologies
5 * Author: Alessandro Zummo <a.zummo@towertech.it>
6 *
7 * based on arch/arm/common/rtctime.c
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12*/
13
14#include <linux/rtc.h>
Alexey Dobriyand43c36d2009-10-07 17:09:06 +040015#include <linux/sched.h>
Paul Gortmaker21138522011-05-27 09:57:25 -040016#include <linux/module.h>
David Brownell97144c62007-10-16 01:28:16 -070017#include <linux/log2.h>
John Stultz6610e082010-09-23 15:07:34 -070018#include <linux/workqueue.h>
19
Baolin Wang29a1f592017-12-14 13:31:43 +080020#define CREATE_TRACE_POINTS
21#include <trace/events/rtc.h>
22
John Stultzaa0be0f2011-01-20 15:26:12 -080023static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer);
24static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer);
25
Baolin Wang4c4e5df2018-01-08 14:04:49 +080026static int rtc_valid_range(struct rtc_device *rtc, struct rtc_time *tm)
27{
28 if (rtc->range_min != rtc->range_max) {
29 time64_t time = rtc_tm_to_time64(tm);
30
31 if (time < rtc->range_min || time > rtc->range_max)
32 return -ERANGE;
33 }
34
35 return 0;
36}
37
John Stultz6610e082010-09-23 15:07:34 -070038static int __rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
39{
40 int err;
41 if (!rtc->ops)
42 err = -ENODEV;
43 else if (!rtc->ops->read_time)
44 err = -EINVAL;
45 else {
46 memset(tm, 0, sizeof(struct rtc_time));
47 err = rtc->ops->read_time(rtc->dev.parent, tm);
Hyogi Gim16682c862014-12-10 15:52:27 -080048 if (err < 0) {
Aaro Koskinend0bddb52015-04-16 12:45:51 -070049 dev_dbg(&rtc->dev, "read_time: fail to read: %d\n",
50 err);
Hyogi Gim16682c862014-12-10 15:52:27 -080051 return err;
52 }
53
54 err = rtc_valid_tm(tm);
55 if (err < 0)
Aaro Koskinend0bddb52015-04-16 12:45:51 -070056 dev_dbg(&rtc->dev, "read_time: rtc_time isn't valid\n");
John Stultz6610e082010-09-23 15:07:34 -070057 }
58 return err;
59}
Alessandro Zummo0c86edc2006-03-27 01:16:37 -080060
David Brownellab6a2d72007-05-08 00:33:30 -070061int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
Alessandro Zummo0c86edc2006-03-27 01:16:37 -080062{
63 int err;
Alessandro Zummo0c86edc2006-03-27 01:16:37 -080064
65 err = mutex_lock_interruptible(&rtc->ops_lock);
66 if (err)
David Brownellb68bb262008-07-29 22:33:30 -070067 return err;
Alessandro Zummo0c86edc2006-03-27 01:16:37 -080068
John Stultz6610e082010-09-23 15:07:34 -070069 err = __rtc_read_time(rtc, tm);
Alessandro Zummo0c86edc2006-03-27 01:16:37 -080070 mutex_unlock(&rtc->ops_lock);
Baolin Wang29a1f592017-12-14 13:31:43 +080071
72 trace_rtc_read_time(rtc_tm_to_time64(tm), err);
Alessandro Zummo0c86edc2006-03-27 01:16:37 -080073 return err;
74}
75EXPORT_SYMBOL_GPL(rtc_read_time);
76
David Brownellab6a2d72007-05-08 00:33:30 -070077int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
Alessandro Zummo0c86edc2006-03-27 01:16:37 -080078{
79 int err;
Alessandro Zummo0c86edc2006-03-27 01:16:37 -080080
81 err = rtc_valid_tm(tm);
82 if (err != 0)
83 return err;
84
Baolin Wang4c4e5df2018-01-08 14:04:49 +080085 err = rtc_valid_range(rtc, tm);
86 if (err)
87 return err;
Alexandre Belloni71db0492018-02-17 14:58:40 +010088
Alessandro Zummo0c86edc2006-03-27 01:16:37 -080089 err = mutex_lock_interruptible(&rtc->ops_lock);
90 if (err)
David Brownellb68bb262008-07-29 22:33:30 -070091 return err;
Alessandro Zummo0c86edc2006-03-27 01:16:37 -080092
93 if (!rtc->ops)
94 err = -ENODEV;
Alessandro Zummobbccf832009-01-06 14:42:21 -080095 else if (rtc->ops->set_time)
David Brownellcd966202007-05-08 00:33:40 -070096 err = rtc->ops->set_time(rtc->dev.parent, tm);
Xunlei Pang8e4ff1a2015-04-01 20:34:27 -070097 else if (rtc->ops->set_mmss64) {
98 time64_t secs64 = rtc_tm_to_time64(tm);
99
100 err = rtc->ops->set_mmss64(rtc->dev.parent, secs64);
101 } else if (rtc->ops->set_mmss) {
Xunlei Pangbc10aa92015-01-22 02:31:51 +0000102 time64_t secs64 = rtc_tm_to_time64(tm);
103 err = rtc->ops->set_mmss(rtc->dev.parent, secs64);
Alessandro Zummobbccf832009-01-06 14:42:21 -0800104 } else
105 err = -EINVAL;
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800106
Zoran Markovic14d0e342013-06-26 16:09:13 -0700107 pm_stay_awake(rtc->dev.parent);
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800108 mutex_unlock(&rtc->ops_lock);
NeilBrown5f9679d2011-12-09 09:39:15 +1100109 /* A timer might have just expired */
110 schedule_work(&rtc->irqwork);
Baolin Wang29a1f592017-12-14 13:31:43 +0800111
112 trace_rtc_set_time(rtc_tm_to_time64(tm), err);
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800113 return err;
114}
115EXPORT_SYMBOL_GPL(rtc_set_time);
116
John Stultzf44f7f92011-02-21 22:58:51 -0800117static int rtc_read_alarm_internal(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
118{
119 int err;
120
121 err = mutex_lock_interruptible(&rtc->ops_lock);
122 if (err)
123 return err;
124
125 if (rtc->ops == NULL)
126 err = -ENODEV;
127 else if (!rtc->ops->read_alarm)
128 err = -EINVAL;
129 else {
Uwe Kleine-Königd68778b2016-05-11 09:11:23 +0200130 alarm->enabled = 0;
131 alarm->pending = 0;
132 alarm->time.tm_sec = -1;
133 alarm->time.tm_min = -1;
134 alarm->time.tm_hour = -1;
135 alarm->time.tm_mday = -1;
136 alarm->time.tm_mon = -1;
137 alarm->time.tm_year = -1;
138 alarm->time.tm_wday = -1;
139 alarm->time.tm_yday = -1;
140 alarm->time.tm_isdst = -1;
John Stultzf44f7f92011-02-21 22:58:51 -0800141 err = rtc->ops->read_alarm(rtc->dev.parent, alarm);
142 }
143
144 mutex_unlock(&rtc->ops_lock);
Baolin Wang29a1f592017-12-14 13:31:43 +0800145
146 trace_rtc_read_alarm(rtc_tm_to_time64(&alarm->time), err);
John Stultzf44f7f92011-02-21 22:58:51 -0800147 return err;
148}
149
150int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
151{
152 int err;
153 struct rtc_time before, now;
154 int first_time = 1;
Xunlei Pangbc10aa92015-01-22 02:31:51 +0000155 time64_t t_now, t_alm;
John Stultzf44f7f92011-02-21 22:58:51 -0800156 enum { none, day, month, year } missing = none;
157 unsigned days;
158
159 /* The lower level RTC driver may return -1 in some fields,
160 * creating invalid alarm->time values, for reasons like:
161 *
162 * - The hardware may not be capable of filling them in;
163 * many alarms match only on time-of-day fields, not
164 * day/month/year calendar data.
165 *
166 * - Some hardware uses illegal values as "wildcard" match
167 * values, which non-Linux firmware (like a BIOS) may try
168 * to set up as e.g. "alarm 15 minutes after each hour".
169 * Linux uses only oneshot alarms.
170 *
171 * When we see that here, we deal with it by using values from
172 * a current RTC timestamp for any missing (-1) values. The
173 * RTC driver prevents "periodic alarm" modes.
174 *
175 * But this can be racey, because some fields of the RTC timestamp
176 * may have wrapped in the interval since we read the RTC alarm,
177 * which would lead to us inserting inconsistent values in place
178 * of the -1 fields.
179 *
180 * Reading the alarm and timestamp in the reverse sequence
181 * would have the same race condition, and not solve the issue.
182 *
183 * So, we must first read the RTC timestamp,
184 * then read the RTC alarm value,
185 * and then read a second RTC timestamp.
186 *
187 * If any fields of the second timestamp have changed
188 * when compared with the first timestamp, then we know
189 * our timestamp may be inconsistent with that used by
190 * the low-level rtc_read_alarm_internal() function.
191 *
192 * So, when the two timestamps disagree, we just loop and do
193 * the process again to get a fully consistent set of values.
194 *
195 * This could all instead be done in the lower level driver,
196 * but since more than one lower level RTC implementation needs it,
197 * then it's probably best best to do it here instead of there..
198 */
199
200 /* Get the "before" timestamp */
201 err = rtc_read_time(rtc, &before);
202 if (err < 0)
203 return err;
204 do {
205 if (!first_time)
206 memcpy(&before, &now, sizeof(struct rtc_time));
207 first_time = 0;
208
209 /* get the RTC alarm values, which may be incomplete */
210 err = rtc_read_alarm_internal(rtc, alarm);
211 if (err)
212 return err;
213
214 /* full-function RTCs won't have such missing fields */
215 if (rtc_valid_tm(&alarm->time) == 0)
216 return 0;
217
218 /* get the "after" timestamp, to detect wrapped fields */
219 err = rtc_read_time(rtc, &now);
220 if (err < 0)
221 return err;
222
223 /* note that tm_sec is a "don't care" value here: */
224 } while ( before.tm_min != now.tm_min
225 || before.tm_hour != now.tm_hour
226 || before.tm_mon != now.tm_mon
227 || before.tm_year != now.tm_year);
228
229 /* Fill in the missing alarm fields using the timestamp; we
230 * know there's at least one since alarm->time is invalid.
231 */
232 if (alarm->time.tm_sec == -1)
233 alarm->time.tm_sec = now.tm_sec;
234 if (alarm->time.tm_min == -1)
235 alarm->time.tm_min = now.tm_min;
236 if (alarm->time.tm_hour == -1)
237 alarm->time.tm_hour = now.tm_hour;
238
239 /* For simplicity, only support date rollover for now */
Ben Hutchingse74a8f22012-01-10 15:11:02 -0800240 if (alarm->time.tm_mday < 1 || alarm->time.tm_mday > 31) {
John Stultzf44f7f92011-02-21 22:58:51 -0800241 alarm->time.tm_mday = now.tm_mday;
242 missing = day;
243 }
Ben Hutchingse74a8f22012-01-10 15:11:02 -0800244 if ((unsigned)alarm->time.tm_mon >= 12) {
John Stultzf44f7f92011-02-21 22:58:51 -0800245 alarm->time.tm_mon = now.tm_mon;
246 if (missing == none)
247 missing = month;
248 }
249 if (alarm->time.tm_year == -1) {
250 alarm->time.tm_year = now.tm_year;
251 if (missing == none)
252 missing = year;
253 }
254
Vaibhav Jainda96aea2017-05-19 22:18:55 +0530255 /* Can't proceed if alarm is still invalid after replacing
256 * missing fields.
257 */
258 err = rtc_valid_tm(&alarm->time);
259 if (err)
260 goto done;
261
John Stultzf44f7f92011-02-21 22:58:51 -0800262 /* with luck, no rollover is needed */
Xunlei Pangbc10aa92015-01-22 02:31:51 +0000263 t_now = rtc_tm_to_time64(&now);
264 t_alm = rtc_tm_to_time64(&alarm->time);
John Stultzf44f7f92011-02-21 22:58:51 -0800265 if (t_now < t_alm)
266 goto done;
267
268 switch (missing) {
269
270 /* 24 hour rollover ... if it's now 10am Monday, an alarm that
271 * that will trigger at 5am will do so at 5am Tuesday, which
272 * could also be in the next month or year. This is a common
273 * case, especially for PCs.
274 */
275 case day:
276 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "day");
277 t_alm += 24 * 60 * 60;
Xunlei Pangbc10aa92015-01-22 02:31:51 +0000278 rtc_time64_to_tm(t_alm, &alarm->time);
John Stultzf44f7f92011-02-21 22:58:51 -0800279 break;
280
281 /* Month rollover ... if it's the 31th, an alarm on the 3rd will
282 * be next month. An alarm matching on the 30th, 29th, or 28th
283 * may end up in the month after that! Many newer PCs support
284 * this type of alarm.
285 */
286 case month:
287 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "month");
288 do {
289 if (alarm->time.tm_mon < 11)
290 alarm->time.tm_mon++;
291 else {
292 alarm->time.tm_mon = 0;
293 alarm->time.tm_year++;
294 }
295 days = rtc_month_days(alarm->time.tm_mon,
296 alarm->time.tm_year);
297 } while (days < alarm->time.tm_mday);
298 break;
299
300 /* Year rollover ... easy except for leap years! */
301 case year:
302 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "year");
303 do {
304 alarm->time.tm_year++;
Ales Novakee1d9012014-06-06 14:35:39 -0700305 } while (!is_leap_year(alarm->time.tm_year + 1900)
306 && rtc_valid_tm(&alarm->time) != 0);
John Stultzf44f7f92011-02-21 22:58:51 -0800307 break;
308
309 default:
310 dev_warn(&rtc->dev, "alarm rollover not handled\n");
311 }
312
Ales Novakee1d9012014-06-06 14:35:39 -0700313 err = rtc_valid_tm(&alarm->time);
314
Vaibhav Jainda96aea2017-05-19 22:18:55 +0530315done:
Ales Novakee1d9012014-06-06 14:35:39 -0700316 if (err) {
317 dev_warn(&rtc->dev, "invalid alarm value: %d-%d-%d %d:%d:%d\n",
318 alarm->time.tm_year + 1900, alarm->time.tm_mon + 1,
319 alarm->time.tm_mday, alarm->time.tm_hour, alarm->time.tm_min,
320 alarm->time.tm_sec);
321 }
322
323 return err;
John Stultzf44f7f92011-02-21 22:58:51 -0800324}
325
John Stultz6610e082010-09-23 15:07:34 -0700326int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800327{
328 int err;
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800329
330 err = mutex_lock_interruptible(&rtc->ops_lock);
331 if (err)
David Brownellb68bb262008-07-29 22:33:30 -0700332 return err;
John Stultzd5553a52011-01-20 15:26:13 -0800333 if (rtc->ops == NULL)
334 err = -ENODEV;
335 else if (!rtc->ops->read_alarm)
336 err = -EINVAL;
337 else {
338 memset(alarm, 0, sizeof(struct rtc_wkalrm));
339 alarm->enabled = rtc->aie_timer.enabled;
John Stultz6610e082010-09-23 15:07:34 -0700340 alarm->time = rtc_ktime_to_tm(rtc->aie_timer.node.expires);
John Stultzd5553a52011-01-20 15:26:13 -0800341 }
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800342 mutex_unlock(&rtc->ops_lock);
Mark Lord0e36a9a2007-10-16 01:28:21 -0700343
Baolin Wang29a1f592017-12-14 13:31:43 +0800344 trace_rtc_read_alarm(rtc_tm_to_time64(&alarm->time), err);
John Stultzd5553a52011-01-20 15:26:13 -0800345 return err;
Mark Lord0e36a9a2007-10-16 01:28:21 -0700346}
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800347EXPORT_SYMBOL_GPL(rtc_read_alarm);
348
Mark Brownd576fe42011-06-01 11:13:16 +0100349static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
John Stultz6610e082010-09-23 15:07:34 -0700350{
351 struct rtc_time tm;
Xunlei Pangbc10aa92015-01-22 02:31:51 +0000352 time64_t now, scheduled;
John Stultz6610e082010-09-23 15:07:34 -0700353 int err;
354
355 err = rtc_valid_tm(&alarm->time);
356 if (err)
357 return err;
Xunlei Pangbc10aa92015-01-22 02:31:51 +0000358 scheduled = rtc_tm_to_time64(&alarm->time);
John Stultz6610e082010-09-23 15:07:34 -0700359
360 /* Make sure we're not setting alarms in the past */
361 err = __rtc_read_time(rtc, &tm);
Hyogi Gimca6dc2d2014-08-08 14:20:11 -0700362 if (err)
363 return err;
Xunlei Pangbc10aa92015-01-22 02:31:51 +0000364 now = rtc_tm_to_time64(&tm);
John Stultz6610e082010-09-23 15:07:34 -0700365 if (scheduled <= now)
366 return -ETIME;
367 /*
368 * XXX - We just checked to make sure the alarm time is not
369 * in the past, but there is still a race window where if
370 * the is alarm set for the next second and the second ticks
371 * over right here, before we set the alarm.
372 */
373
Linus Torvalds157e8bf2012-01-03 17:32:13 -0800374 if (!rtc->ops)
375 err = -ENODEV;
376 else if (!rtc->ops->set_alarm)
377 err = -EINVAL;
378 else
379 err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
380
Baolin Wang29a1f592017-12-14 13:31:43 +0800381 trace_rtc_set_alarm(rtc_tm_to_time64(&alarm->time), err);
Linus Torvalds157e8bf2012-01-03 17:32:13 -0800382 return err;
John Stultz6610e082010-09-23 15:07:34 -0700383}
384
David Brownellab6a2d72007-05-08 00:33:30 -0700385int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800386{
387 int err;
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800388
David Brownellf8245c22007-05-08 00:34:07 -0700389 err = rtc_valid_tm(&alarm->time);
390 if (err != 0)
391 return err;
392
Baolin Wang4c4e5df2018-01-08 14:04:49 +0800393 err = rtc_valid_range(rtc, &alarm->time);
394 if (err)
395 return err;
Alexandre Belloni71db0492018-02-17 14:58:40 +0100396
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800397 err = mutex_lock_interruptible(&rtc->ops_lock);
398 if (err)
David Brownellb68bb262008-07-29 22:33:30 -0700399 return err;
Sachin Kamat3ff2e132013-07-03 15:05:42 -0700400 if (rtc->aie_timer.enabled)
Thomas Gleixner96c8f062010-12-13 22:45:48 +0100401 rtc_timer_remove(rtc, &rtc->aie_timer);
Sachin Kamat3ff2e132013-07-03 15:05:42 -0700402
John Stultz6610e082010-09-23 15:07:34 -0700403 rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
Thomas Gleixner8b0e1952016-12-25 12:30:41 +0100404 rtc->aie_timer.period = 0;
Sachin Kamat3ff2e132013-07-03 15:05:42 -0700405 if (alarm->enabled)
John Stultzaa0be0f2011-01-20 15:26:12 -0800406 err = rtc_timer_enqueue(rtc, &rtc->aie_timer);
Sachin Kamat3ff2e132013-07-03 15:05:42 -0700407
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800408 mutex_unlock(&rtc->ops_lock);
John Stultzaa0be0f2011-01-20 15:26:12 -0800409 return err;
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800410}
411EXPORT_SYMBOL_GPL(rtc_set_alarm);
412
John Stultzf6d5b332011-03-29 18:00:27 -0700413/* Called once per device from rtc_device_register */
414int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
415{
416 int err;
John Stultzbd729d72012-01-05 15:21:19 -0800417 struct rtc_time now;
John Stultzf6d5b332011-03-29 18:00:27 -0700418
419 err = rtc_valid_tm(&alarm->time);
420 if (err != 0)
421 return err;
422
John Stultzbd729d72012-01-05 15:21:19 -0800423 err = rtc_read_time(rtc, &now);
424 if (err)
425 return err;
426
John Stultzf6d5b332011-03-29 18:00:27 -0700427 err = mutex_lock_interruptible(&rtc->ops_lock);
428 if (err)
429 return err;
430
431 rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
Thomas Gleixner8b0e1952016-12-25 12:30:41 +0100432 rtc->aie_timer.period = 0;
John Stultzbd729d72012-01-05 15:21:19 -0800433
Uwe Kleine-König6785b3b2016-07-02 17:28:12 +0200434 /* Alarm has to be enabled & in the future for us to enqueue it */
Thomas Gleixner2456e852016-12-25 11:38:40 +0100435 if (alarm->enabled && (rtc_tm_to_ktime(now) <
436 rtc->aie_timer.node.expires)) {
John Stultzbd729d72012-01-05 15:21:19 -0800437
John Stultzf6d5b332011-03-29 18:00:27 -0700438 rtc->aie_timer.enabled = 1;
439 timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node);
Baolin Wang29a1f592017-12-14 13:31:43 +0800440 trace_rtc_timer_enqueue(&rtc->aie_timer);
John Stultzf6d5b332011-03-29 18:00:27 -0700441 }
442 mutex_unlock(&rtc->ops_lock);
443 return err;
444}
445EXPORT_SYMBOL_GPL(rtc_initialize_alarm);
446
Alessandro Zummo099e6572009-01-04 12:00:54 -0800447int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled)
448{
449 int err = mutex_lock_interruptible(&rtc->ops_lock);
450 if (err)
451 return err;
452
John Stultz6610e082010-09-23 15:07:34 -0700453 if (rtc->aie_timer.enabled != enabled) {
John Stultzaa0be0f2011-01-20 15:26:12 -0800454 if (enabled)
455 err = rtc_timer_enqueue(rtc, &rtc->aie_timer);
456 else
Thomas Gleixner96c8f062010-12-13 22:45:48 +0100457 rtc_timer_remove(rtc, &rtc->aie_timer);
John Stultz6610e082010-09-23 15:07:34 -0700458 }
459
John Stultzaa0be0f2011-01-20 15:26:12 -0800460 if (err)
Uwe Kleine-König516373b2011-02-14 11:33:17 +0100461 /* nothing */;
462 else if (!rtc->ops)
Alessandro Zummo099e6572009-01-04 12:00:54 -0800463 err = -ENODEV;
464 else if (!rtc->ops->alarm_irq_enable)
465 err = -EINVAL;
466 else
467 err = rtc->ops->alarm_irq_enable(rtc->dev.parent, enabled);
468
469 mutex_unlock(&rtc->ops_lock);
Baolin Wang29a1f592017-12-14 13:31:43 +0800470
471 trace_rtc_alarm_irq_enable(enabled, err);
Alessandro Zummo099e6572009-01-04 12:00:54 -0800472 return err;
473}
474EXPORT_SYMBOL_GPL(rtc_alarm_irq_enable);
475
476int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
477{
478 int err = mutex_lock_interruptible(&rtc->ops_lock);
479 if (err)
480 return err;
481
John Stultz456d66e2011-02-11 18:15:23 -0800482#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
483 if (enabled == 0 && rtc->uie_irq_active) {
484 mutex_unlock(&rtc->ops_lock);
485 return rtc_dev_update_irq_enable_emul(rtc, 0);
486 }
487#endif
John Stultz6610e082010-09-23 15:07:34 -0700488 /* make sure we're changing state */
489 if (rtc->uie_rtctimer.enabled == enabled)
490 goto out;
491
John Stultz4a649902012-03-06 17:16:09 -0800492 if (rtc->uie_unsupported) {
493 err = -EINVAL;
494 goto out;
495 }
496
John Stultz6610e082010-09-23 15:07:34 -0700497 if (enabled) {
498 struct rtc_time tm;
499 ktime_t now, onesec;
500
501 __rtc_read_time(rtc, &tm);
502 onesec = ktime_set(1, 0);
503 now = rtc_tm_to_ktime(tm);
504 rtc->uie_rtctimer.node.expires = ktime_add(now, onesec);
505 rtc->uie_rtctimer.period = ktime_set(1, 0);
John Stultzaa0be0f2011-01-20 15:26:12 -0800506 err = rtc_timer_enqueue(rtc, &rtc->uie_rtctimer);
507 } else
Thomas Gleixner96c8f062010-12-13 22:45:48 +0100508 rtc_timer_remove(rtc, &rtc->uie_rtctimer);
Alessandro Zummo099e6572009-01-04 12:00:54 -0800509
John Stultz6610e082010-09-23 15:07:34 -0700510out:
Alessandro Zummo099e6572009-01-04 12:00:54 -0800511 mutex_unlock(&rtc->ops_lock);
John Stultz456d66e2011-02-11 18:15:23 -0800512#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
513 /*
514 * Enable emulation if the driver did not provide
515 * the update_irq_enable function pointer or if returned
516 * -EINVAL to signal that it has been configured without
517 * interrupts or that are not available at the moment.
518 */
519 if (err == -EINVAL)
520 err = rtc_dev_update_irq_enable_emul(rtc, enabled);
521#endif
Alessandro Zummo099e6572009-01-04 12:00:54 -0800522 return err;
John Stultz6610e082010-09-23 15:07:34 -0700523
Alessandro Zummo099e6572009-01-04 12:00:54 -0800524}
525EXPORT_SYMBOL_GPL(rtc_update_irq_enable);
526
John Stultz6610e082010-09-23 15:07:34 -0700527
David Brownelld728b1e2006-11-25 11:09:28 -0800528/**
John Stultz6610e082010-09-23 15:07:34 -0700529 * rtc_handle_legacy_irq - AIE, UIE and PIE event hook
530 * @rtc: pointer to the rtc device
531 *
532 * This function is called when an AIE, UIE or PIE mode interrupt
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300533 * has occurred (or been emulated).
John Stultz6610e082010-09-23 15:07:34 -0700534 *
535 * Triggers the registered irq_task function callback.
536 */
John Stultz456d66e2011-02-11 18:15:23 -0800537void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode)
John Stultz6610e082010-09-23 15:07:34 -0700538{
539 unsigned long flags;
540
541 /* mark one irq of the appropriate mode */
542 spin_lock_irqsave(&rtc->irq_lock, flags);
543 rtc->irq_data = (rtc->irq_data + (num << 8)) | (RTC_IRQF|mode);
544 spin_unlock_irqrestore(&rtc->irq_lock, flags);
545
546 /* call the task func */
547 spin_lock_irqsave(&rtc->irq_task_lock, flags);
548 if (rtc->irq_task)
549 rtc->irq_task->func(rtc->irq_task->private_data);
550 spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
551
552 wake_up_interruptible(&rtc->irq_queue);
553 kill_fasync(&rtc->async_queue, SIGIO, POLL_IN);
554}
555
556
557/**
558 * rtc_aie_update_irq - AIE mode rtctimer hook
559 * @private: pointer to the rtc_device
560 *
561 * This functions is called when the aie_timer expires.
562 */
563void rtc_aie_update_irq(void *private)
564{
565 struct rtc_device *rtc = (struct rtc_device *)private;
566 rtc_handle_legacy_irq(rtc, 1, RTC_AF);
567}
568
569
570/**
571 * rtc_uie_update_irq - UIE mode rtctimer hook
572 * @private: pointer to the rtc_device
573 *
574 * This functions is called when the uie_timer expires.
575 */
576void rtc_uie_update_irq(void *private)
577{
578 struct rtc_device *rtc = (struct rtc_device *)private;
579 rtc_handle_legacy_irq(rtc, 1, RTC_UF);
580}
581
582
583/**
584 * rtc_pie_update_irq - PIE mode hrtimer hook
585 * @timer: pointer to the pie mode hrtimer
586 *
587 * This function is used to emulate PIE mode interrupts
588 * using an hrtimer. This function is called when the periodic
589 * hrtimer expires.
590 */
591enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer)
592{
593 struct rtc_device *rtc;
594 ktime_t period;
595 int count;
596 rtc = container_of(timer, struct rtc_device, pie_timer);
597
Thomas Gleixner8b0e1952016-12-25 12:30:41 +0100598 period = NSEC_PER_SEC / rtc->irq_freq;
John Stultz6610e082010-09-23 15:07:34 -0700599 count = hrtimer_forward_now(timer, period);
600
601 rtc_handle_legacy_irq(rtc, count, RTC_PF);
602
603 return HRTIMER_RESTART;
604}
605
606/**
607 * rtc_update_irq - Triggered when a RTC interrupt occurs.
David Brownellab6a2d72007-05-08 00:33:30 -0700608 * @rtc: the rtc device
David Brownelld728b1e2006-11-25 11:09:28 -0800609 * @num: how many irqs are being reported (usually one)
610 * @events: mask of RTC_IRQF with one or more of RTC_PF, RTC_AF, RTC_UF
Atsushi Nemotoe6229bec2009-06-18 16:49:09 -0700611 * Context: any
David Brownelld728b1e2006-11-25 11:09:28 -0800612 */
David Brownellab6a2d72007-05-08 00:33:30 -0700613void rtc_update_irq(struct rtc_device *rtc,
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800614 unsigned long num, unsigned long events)
615{
viresh kumare7cba882015-07-31 16:23:43 +0530616 if (IS_ERR_OR_NULL(rtc))
Alessandro Zummo131c9cc2014-04-03 14:50:09 -0700617 return;
618
NeilBrown7523cee2012-08-05 22:56:20 +0200619 pm_stay_awake(rtc->dev.parent);
John Stultz6610e082010-09-23 15:07:34 -0700620 schedule_work(&rtc->irqwork);
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800621}
622EXPORT_SYMBOL_GPL(rtc_update_irq);
623
Michał Mirosław9f3b7952013-02-01 20:40:17 +0100624static int __rtc_match(struct device *dev, const void *data)
Dave Young71da8902008-01-22 14:00:34 +0800625{
Michał Mirosław9f3b7952013-02-01 20:40:17 +0100626 const char *name = data;
Dave Young71da8902008-01-22 14:00:34 +0800627
Kay Sieversd4afc762009-01-06 14:42:11 -0800628 if (strcmp(dev_name(dev), name) == 0)
Dave Young71da8902008-01-22 14:00:34 +0800629 return 1;
630 return 0;
631}
632
Michał Mirosław9f3b7952013-02-01 20:40:17 +0100633struct rtc_device *rtc_class_open(const char *name)
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800634{
David Brownellcd966202007-05-08 00:33:40 -0700635 struct device *dev;
David Brownellab6a2d72007-05-08 00:33:30 -0700636 struct rtc_device *rtc = NULL;
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800637
Greg Kroah-Hartman695794a2008-05-22 17:21:08 -0400638 dev = class_find_device(rtc_class, NULL, name, __rtc_match);
Dave Young71da8902008-01-22 14:00:34 +0800639 if (dev)
640 rtc = to_rtc_device(dev);
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800641
David Brownellab6a2d72007-05-08 00:33:30 -0700642 if (rtc) {
643 if (!try_module_get(rtc->owner)) {
David Brownellcd966202007-05-08 00:33:40 -0700644 put_device(dev);
David Brownellab6a2d72007-05-08 00:33:30 -0700645 rtc = NULL;
646 }
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800647 }
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800648
David Brownellab6a2d72007-05-08 00:33:30 -0700649 return rtc;
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800650}
651EXPORT_SYMBOL_GPL(rtc_class_open);
652
David Brownellab6a2d72007-05-08 00:33:30 -0700653void rtc_class_close(struct rtc_device *rtc)
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800654{
David Brownellab6a2d72007-05-08 00:33:30 -0700655 module_put(rtc->owner);
David Brownellcd966202007-05-08 00:33:40 -0700656 put_device(&rtc->dev);
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800657}
658EXPORT_SYMBOL_GPL(rtc_class_close);
659
David Brownellab6a2d72007-05-08 00:33:30 -0700660int rtc_irq_register(struct rtc_device *rtc, struct rtc_task *task)
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800661{
662 int retval = -EBUSY;
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800663
664 if (task == NULL || task->func == NULL)
665 return -EINVAL;
666
Alessandro Zummod691eb92007-10-16 01:28:15 -0700667 /* Cannot register while the char dev is in use */
Jiri Kosina372a3022007-12-04 23:45:05 -0800668 if (test_and_set_bit_lock(RTC_DEV_BUSY, &rtc->flags))
Alessandro Zummod691eb92007-10-16 01:28:15 -0700669 return -EBUSY;
670
David Brownelld728b1e2006-11-25 11:09:28 -0800671 spin_lock_irq(&rtc->irq_task_lock);
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800672 if (rtc->irq_task == NULL) {
673 rtc->irq_task = task;
674 retval = 0;
675 }
David Brownelld728b1e2006-11-25 11:09:28 -0800676 spin_unlock_irq(&rtc->irq_task_lock);
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800677
Jiri Kosina372a3022007-12-04 23:45:05 -0800678 clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags);
Alessandro Zummod691eb92007-10-16 01:28:15 -0700679
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800680 return retval;
681}
682EXPORT_SYMBOL_GPL(rtc_irq_register);
683
David Brownellab6a2d72007-05-08 00:33:30 -0700684void rtc_irq_unregister(struct rtc_device *rtc, struct rtc_task *task)
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800685{
David Brownelld728b1e2006-11-25 11:09:28 -0800686 spin_lock_irq(&rtc->irq_task_lock);
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800687 if (rtc->irq_task == task)
688 rtc->irq_task = NULL;
David Brownelld728b1e2006-11-25 11:09:28 -0800689 spin_unlock_irq(&rtc->irq_task_lock);
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800690}
691EXPORT_SYMBOL_GPL(rtc_irq_unregister);
692
Thomas Gleixner3c8bb90e2011-07-22 09:12:51 +0000693static int rtc_update_hrtimer(struct rtc_device *rtc, int enabled)
694{
695 /*
696 * We always cancel the timer here first, because otherwise
697 * we could run into BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
698 * when we manage to start the timer before the callback
699 * returns HRTIMER_RESTART.
700 *
701 * We cannot use hrtimer_cancel() here as a running callback
702 * could be blocked on rtc->irq_task_lock and hrtimer_cancel()
703 * would spin forever.
704 */
705 if (hrtimer_try_to_cancel(&rtc->pie_timer) < 0)
706 return -1;
707
708 if (enabled) {
Thomas Gleixner8b0e1952016-12-25 12:30:41 +0100709 ktime_t period = NSEC_PER_SEC / rtc->irq_freq;
Thomas Gleixner3c8bb90e2011-07-22 09:12:51 +0000710
711 hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL);
712 }
713 return 0;
714}
715
David Brownell97144c62007-10-16 01:28:16 -0700716/**
717 * rtc_irq_set_state - enable/disable 2^N Hz periodic IRQs
718 * @rtc: the rtc device
719 * @task: currently registered with rtc_irq_register()
720 * @enabled: true to enable periodic IRQs
721 * Context: any
722 *
723 * Note that rtc_irq_set_freq() should previously have been used to
724 * specify the desired frequency of periodic IRQ task->func() callbacks.
725 */
David Brownellab6a2d72007-05-08 00:33:30 -0700726int rtc_irq_set_state(struct rtc_device *rtc, struct rtc_task *task, int enabled)
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800727{
728 int err = 0;
729 unsigned long flags;
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800730
Thomas Gleixner3c8bb90e2011-07-22 09:12:51 +0000731retry:
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800732 spin_lock_irqsave(&rtc->irq_task_lock, flags);
Alessandro Zummod691eb92007-10-16 01:28:15 -0700733 if (rtc->irq_task != NULL && task == NULL)
734 err = -EBUSY;
Chris Brand0734e272013-07-03 15:07:57 -0700735 else if (rtc->irq_task != task)
Alessandro Zummod691eb92007-10-16 01:28:15 -0700736 err = -EACCES;
Chris Brand0734e272013-07-03 15:07:57 -0700737 else {
Thomas Gleixner3c8bb90e2011-07-22 09:12:51 +0000738 if (rtc_update_hrtimer(rtc, enabled) < 0) {
739 spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
740 cpu_relax();
741 goto retry;
742 }
743 rtc->pie_enabled = enabled;
John Stultz6610e082010-09-23 15:07:34 -0700744 }
John Stultz6610e082010-09-23 15:07:34 -0700745 spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
Baolin Wang29a1f592017-12-14 13:31:43 +0800746
747 trace_rtc_irq_set_state(enabled, err);
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800748 return err;
749}
750EXPORT_SYMBOL_GPL(rtc_irq_set_state);
751
David Brownell97144c62007-10-16 01:28:16 -0700752/**
753 * rtc_irq_set_freq - set 2^N Hz periodic IRQ frequency for IRQ
754 * @rtc: the rtc device
755 * @task: currently registered with rtc_irq_register()
756 * @freq: positive frequency with which task->func() will be called
757 * Context: any
758 *
759 * Note that rtc_irq_set_state() is used to enable or disable the
760 * periodic IRQs.
761 */
David Brownellab6a2d72007-05-08 00:33:30 -0700762int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq)
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800763{
Alessandro Zummo56f10c62006-06-25 05:48:20 -0700764 int err = 0;
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800765 unsigned long flags;
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800766
Thomas Gleixner6e7a3332011-07-22 09:12:51 +0000767 if (freq <= 0 || freq > RTC_MAX_FREQ)
Marcelo Roberto Jimenez83a06bf2011-02-02 16:04:02 -0200768 return -EINVAL;
Thomas Gleixner3c8bb90e2011-07-22 09:12:51 +0000769retry:
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800770 spin_lock_irqsave(&rtc->irq_task_lock, flags);
Alessandro Zummod691eb92007-10-16 01:28:15 -0700771 if (rtc->irq_task != NULL && task == NULL)
772 err = -EBUSY;
Chris Brand0734e272013-07-03 15:07:57 -0700773 else if (rtc->irq_task != task)
Alessandro Zummod691eb92007-10-16 01:28:15 -0700774 err = -EACCES;
Chris Brand0734e272013-07-03 15:07:57 -0700775 else {
John Stultz6610e082010-09-23 15:07:34 -0700776 rtc->irq_freq = freq;
Thomas Gleixner3c8bb90e2011-07-22 09:12:51 +0000777 if (rtc->pie_enabled && rtc_update_hrtimer(rtc, 1) < 0) {
778 spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
779 cpu_relax();
780 goto retry;
John Stultz6610e082010-09-23 15:07:34 -0700781 }
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800782 }
John Stultz6610e082010-09-23 15:07:34 -0700783 spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
Baolin Wang29a1f592017-12-14 13:31:43 +0800784
785 trace_rtc_irq_set_freq(freq, err);
Alessandro Zummo0c86edc2006-03-27 01:16:37 -0800786 return err;
787}
David Brownell2601a462006-11-25 11:09:27 -0800788EXPORT_SYMBOL_GPL(rtc_irq_set_freq);
John Stultz6610e082010-09-23 15:07:34 -0700789
790/**
Thomas Gleixner96c8f062010-12-13 22:45:48 +0100791 * rtc_timer_enqueue - Adds a rtc_timer to the rtc_device timerqueue
John Stultz6610e082010-09-23 15:07:34 -0700792 * @rtc rtc device
793 * @timer timer being added.
794 *
795 * Enqueues a timer onto the rtc devices timerqueue and sets
796 * the next alarm event appropriately.
797 *
John Stultzaa0be0f2011-01-20 15:26:12 -0800798 * Sets the enabled bit on the added timer.
799 *
John Stultz6610e082010-09-23 15:07:34 -0700800 * Must hold ops_lock for proper serialization of timerqueue
801 */
John Stultzaa0be0f2011-01-20 15:26:12 -0800802static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
John Stultz6610e082010-09-23 15:07:34 -0700803{
Colin Ian King2b2f5ff2016-05-16 17:22:54 +0100804 struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
805 struct rtc_time tm;
806 ktime_t now;
807
John Stultzaa0be0f2011-01-20 15:26:12 -0800808 timer->enabled = 1;
Colin Ian King2b2f5ff2016-05-16 17:22:54 +0100809 __rtc_read_time(rtc, &tm);
810 now = rtc_tm_to_ktime(tm);
811
812 /* Skip over expired timers */
813 while (next) {
Thomas Gleixner2456e852016-12-25 11:38:40 +0100814 if (next->expires >= now)
Colin Ian King2b2f5ff2016-05-16 17:22:54 +0100815 break;
816 next = timerqueue_iterate_next(next);
817 }
818
John Stultz6610e082010-09-23 15:07:34 -0700819 timerqueue_add(&rtc->timerqueue, &timer->node);
Baolin Wang29a1f592017-12-14 13:31:43 +0800820 trace_rtc_timer_enqueue(timer);
Alexandre Belloni74717b22017-09-28 13:53:27 +0200821 if (!next || ktime_before(timer->node.expires, next->expires)) {
John Stultz6610e082010-09-23 15:07:34 -0700822 struct rtc_wkalrm alarm;
823 int err;
824 alarm.time = rtc_ktime_to_tm(timer->node.expires);
825 alarm.enabled = 1;
826 err = __rtc_set_alarm(rtc, &alarm);
Zoran Markovic14d0e342013-06-26 16:09:13 -0700827 if (err == -ETIME) {
828 pm_stay_awake(rtc->dev.parent);
John Stultz6610e082010-09-23 15:07:34 -0700829 schedule_work(&rtc->irqwork);
Zoran Markovic14d0e342013-06-26 16:09:13 -0700830 } else if (err) {
John Stultzaa0be0f2011-01-20 15:26:12 -0800831 timerqueue_del(&rtc->timerqueue, &timer->node);
Baolin Wang29a1f592017-12-14 13:31:43 +0800832 trace_rtc_timer_dequeue(timer);
John Stultzaa0be0f2011-01-20 15:26:12 -0800833 timer->enabled = 0;
834 return err;
835 }
John Stultz6610e082010-09-23 15:07:34 -0700836 }
John Stultzaa0be0f2011-01-20 15:26:12 -0800837 return 0;
John Stultz6610e082010-09-23 15:07:34 -0700838}
839
Rabin Vincent41c7f742011-11-22 11:03:14 +0100840static void rtc_alarm_disable(struct rtc_device *rtc)
841{
842 if (!rtc->ops || !rtc->ops->alarm_irq_enable)
843 return;
844
845 rtc->ops->alarm_irq_enable(rtc->dev.parent, false);
Baolin Wang29a1f592017-12-14 13:31:43 +0800846 trace_rtc_alarm_irq_enable(0, 0);
Rabin Vincent41c7f742011-11-22 11:03:14 +0100847}
848
John Stultz6610e082010-09-23 15:07:34 -0700849/**
Thomas Gleixner96c8f062010-12-13 22:45:48 +0100850 * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
John Stultz6610e082010-09-23 15:07:34 -0700851 * @rtc rtc device
852 * @timer timer being removed.
853 *
854 * Removes a timer onto the rtc devices timerqueue and sets
855 * the next alarm event appropriately.
856 *
John Stultzaa0be0f2011-01-20 15:26:12 -0800857 * Clears the enabled bit on the removed timer.
858 *
John Stultz6610e082010-09-23 15:07:34 -0700859 * Must hold ops_lock for proper serialization of timerqueue
860 */
John Stultzaa0be0f2011-01-20 15:26:12 -0800861static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
John Stultz6610e082010-09-23 15:07:34 -0700862{
863 struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
864 timerqueue_del(&rtc->timerqueue, &timer->node);
Baolin Wang29a1f592017-12-14 13:31:43 +0800865 trace_rtc_timer_dequeue(timer);
John Stultzaa0be0f2011-01-20 15:26:12 -0800866 timer->enabled = 0;
John Stultz6610e082010-09-23 15:07:34 -0700867 if (next == &timer->node) {
868 struct rtc_wkalrm alarm;
869 int err;
870 next = timerqueue_getnext(&rtc->timerqueue);
Rabin Vincent41c7f742011-11-22 11:03:14 +0100871 if (!next) {
872 rtc_alarm_disable(rtc);
John Stultz6610e082010-09-23 15:07:34 -0700873 return;
Rabin Vincent41c7f742011-11-22 11:03:14 +0100874 }
John Stultz6610e082010-09-23 15:07:34 -0700875 alarm.time = rtc_ktime_to_tm(next->expires);
876 alarm.enabled = 1;
877 err = __rtc_set_alarm(rtc, &alarm);
Zoran Markovic14d0e342013-06-26 16:09:13 -0700878 if (err == -ETIME) {
879 pm_stay_awake(rtc->dev.parent);
John Stultz6610e082010-09-23 15:07:34 -0700880 schedule_work(&rtc->irqwork);
Zoran Markovic14d0e342013-06-26 16:09:13 -0700881 }
John Stultz6610e082010-09-23 15:07:34 -0700882 }
883}
884
885/**
Thomas Gleixner96c8f062010-12-13 22:45:48 +0100886 * rtc_timer_do_work - Expires rtc timers
John Stultz6610e082010-09-23 15:07:34 -0700887 * @rtc rtc device
888 * @timer timer being removed.
889 *
890 * Expires rtc timers. Reprograms next alarm event if needed.
891 * Called via worktask.
892 *
893 * Serializes access to timerqueue via ops_lock mutex
894 */
Thomas Gleixner96c8f062010-12-13 22:45:48 +0100895void rtc_timer_do_work(struct work_struct *work)
John Stultz6610e082010-09-23 15:07:34 -0700896{
897 struct rtc_timer *timer;
898 struct timerqueue_node *next;
899 ktime_t now;
900 struct rtc_time tm;
901
902 struct rtc_device *rtc =
903 container_of(work, struct rtc_device, irqwork);
904
905 mutex_lock(&rtc->ops_lock);
906again:
907 __rtc_read_time(rtc, &tm);
908 now = rtc_tm_to_ktime(tm);
909 while ((next = timerqueue_getnext(&rtc->timerqueue))) {
Thomas Gleixner2456e852016-12-25 11:38:40 +0100910 if (next->expires > now)
John Stultz6610e082010-09-23 15:07:34 -0700911 break;
912
913 /* expire timer */
914 timer = container_of(next, struct rtc_timer, node);
915 timerqueue_del(&rtc->timerqueue, &timer->node);
Baolin Wang29a1f592017-12-14 13:31:43 +0800916 trace_rtc_timer_dequeue(timer);
John Stultz6610e082010-09-23 15:07:34 -0700917 timer->enabled = 0;
918 if (timer->task.func)
919 timer->task.func(timer->task.private_data);
920
Baolin Wang29a1f592017-12-14 13:31:43 +0800921 trace_rtc_timer_fired(timer);
John Stultz6610e082010-09-23 15:07:34 -0700922 /* Re-add/fwd periodic timers */
923 if (ktime_to_ns(timer->period)) {
924 timer->node.expires = ktime_add(timer->node.expires,
925 timer->period);
926 timer->enabled = 1;
927 timerqueue_add(&rtc->timerqueue, &timer->node);
Baolin Wang29a1f592017-12-14 13:31:43 +0800928 trace_rtc_timer_enqueue(timer);
John Stultz6610e082010-09-23 15:07:34 -0700929 }
930 }
931
932 /* Set next alarm */
933 if (next) {
934 struct rtc_wkalrm alarm;
935 int err;
Xunlei Pang6528b882014-12-10 15:54:26 -0800936 int retry = 3;
937
John Stultz6610e082010-09-23 15:07:34 -0700938 alarm.time = rtc_ktime_to_tm(next->expires);
939 alarm.enabled = 1;
Xunlei Pang6528b882014-12-10 15:54:26 -0800940reprogram:
John Stultz6610e082010-09-23 15:07:34 -0700941 err = __rtc_set_alarm(rtc, &alarm);
942 if (err == -ETIME)
943 goto again;
Xunlei Pang6528b882014-12-10 15:54:26 -0800944 else if (err) {
945 if (retry-- > 0)
946 goto reprogram;
947
948 timer = container_of(next, struct rtc_timer, node);
949 timerqueue_del(&rtc->timerqueue, &timer->node);
Baolin Wang29a1f592017-12-14 13:31:43 +0800950 trace_rtc_timer_dequeue(timer);
Xunlei Pang6528b882014-12-10 15:54:26 -0800951 timer->enabled = 0;
952 dev_err(&rtc->dev, "__rtc_set_alarm: err=%d\n", err);
953 goto again;
954 }
Rabin Vincent41c7f742011-11-22 11:03:14 +0100955 } else
956 rtc_alarm_disable(rtc);
John Stultz6610e082010-09-23 15:07:34 -0700957
Zoran Markovic14d0e342013-06-26 16:09:13 -0700958 pm_relax(rtc->dev.parent);
John Stultz6610e082010-09-23 15:07:34 -0700959 mutex_unlock(&rtc->ops_lock);
960}
961
962
Thomas Gleixner96c8f062010-12-13 22:45:48 +0100963/* rtc_timer_init - Initializes an rtc_timer
John Stultz6610e082010-09-23 15:07:34 -0700964 * @timer: timer to be intiialized
965 * @f: function pointer to be called when timer fires
966 * @data: private data passed to function pointer
967 *
968 * Kernel interface to initializing an rtc_timer.
969 */
Sachin Kamat3ff2e132013-07-03 15:05:42 -0700970void rtc_timer_init(struct rtc_timer *timer, void (*f)(void *p), void *data)
John Stultz6610e082010-09-23 15:07:34 -0700971{
972 timerqueue_init(&timer->node);
973 timer->enabled = 0;
974 timer->task.func = f;
975 timer->task.private_data = data;
976}
977
Thomas Gleixner96c8f062010-12-13 22:45:48 +0100978/* rtc_timer_start - Sets an rtc_timer to fire in the future
John Stultz6610e082010-09-23 15:07:34 -0700979 * @ rtc: rtc device to be used
980 * @ timer: timer being set
981 * @ expires: time at which to expire the timer
982 * @ period: period that the timer will recur
983 *
984 * Kernel interface to set an rtc_timer
985 */
Sachin Kamat3ff2e132013-07-03 15:05:42 -0700986int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer *timer,
John Stultz6610e082010-09-23 15:07:34 -0700987 ktime_t expires, ktime_t period)
988{
989 int ret = 0;
990 mutex_lock(&rtc->ops_lock);
991 if (timer->enabled)
Thomas Gleixner96c8f062010-12-13 22:45:48 +0100992 rtc_timer_remove(rtc, timer);
John Stultz6610e082010-09-23 15:07:34 -0700993
994 timer->node.expires = expires;
995 timer->period = period;
996
John Stultzaa0be0f2011-01-20 15:26:12 -0800997 ret = rtc_timer_enqueue(rtc, timer);
John Stultz6610e082010-09-23 15:07:34 -0700998
999 mutex_unlock(&rtc->ops_lock);
1000 return ret;
1001}
1002
Thomas Gleixner96c8f062010-12-13 22:45:48 +01001003/* rtc_timer_cancel - Stops an rtc_timer
John Stultz6610e082010-09-23 15:07:34 -07001004 * @ rtc: rtc device to be used
1005 * @ timer: timer being set
1006 *
1007 * Kernel interface to cancel an rtc_timer
1008 */
Krzysztof Kozlowski73744a62015-05-03 18:57:11 +09001009void rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer *timer)
John Stultz6610e082010-09-23 15:07:34 -07001010{
John Stultz6610e082010-09-23 15:07:34 -07001011 mutex_lock(&rtc->ops_lock);
1012 if (timer->enabled)
Thomas Gleixner96c8f062010-12-13 22:45:48 +01001013 rtc_timer_remove(rtc, timer);
John Stultz6610e082010-09-23 15:07:34 -07001014 mutex_unlock(&rtc->ops_lock);
John Stultz6610e082010-09-23 15:07:34 -07001015}
1016
Joshua Claytonb3967062016-02-05 12:41:11 -08001017/**
1018 * rtc_read_offset - Read the amount of rtc offset in parts per billion
1019 * @ rtc: rtc device to be used
1020 * @ offset: the offset in parts per billion
1021 *
1022 * see below for details.
1023 *
1024 * Kernel interface to read rtc clock offset
1025 * Returns 0 on success, or a negative number on error.
1026 * If read_offset() is not implemented for the rtc, return -EINVAL
1027 */
1028int rtc_read_offset(struct rtc_device *rtc, long *offset)
1029{
1030 int ret;
John Stultz6610e082010-09-23 15:07:34 -07001031
Joshua Claytonb3967062016-02-05 12:41:11 -08001032 if (!rtc->ops)
1033 return -ENODEV;
1034
1035 if (!rtc->ops->read_offset)
1036 return -EINVAL;
1037
1038 mutex_lock(&rtc->ops_lock);
1039 ret = rtc->ops->read_offset(rtc->dev.parent, offset);
1040 mutex_unlock(&rtc->ops_lock);
Baolin Wang29a1f592017-12-14 13:31:43 +08001041
1042 trace_rtc_read_offset(*offset, ret);
Joshua Claytonb3967062016-02-05 12:41:11 -08001043 return ret;
1044}
1045
1046/**
1047 * rtc_set_offset - Adjusts the duration of the average second
1048 * @ rtc: rtc device to be used
1049 * @ offset: the offset in parts per billion
1050 *
1051 * Some rtc's allow an adjustment to the average duration of a second
1052 * to compensate for differences in the actual clock rate due to temperature,
1053 * the crystal, capacitor, etc.
1054 *
Russell King8a25c8f2017-09-29 11:23:25 +01001055 * The adjustment applied is as follows:
1056 * t = t0 * (1 + offset * 1e-9)
1057 * where t0 is the measured length of 1 RTC second with offset = 0
1058 *
Joshua Claytonb3967062016-02-05 12:41:11 -08001059 * Kernel interface to adjust an rtc clock offset.
1060 * Return 0 on success, or a negative number on error.
1061 * If the rtc offset is not setable (or not implemented), return -EINVAL
1062 */
1063int rtc_set_offset(struct rtc_device *rtc, long offset)
1064{
1065 int ret;
1066
1067 if (!rtc->ops)
1068 return -ENODEV;
1069
1070 if (!rtc->ops->set_offset)
1071 return -EINVAL;
1072
1073 mutex_lock(&rtc->ops_lock);
1074 ret = rtc->ops->set_offset(rtc->dev.parent, offset);
1075 mutex_unlock(&rtc->ops_lock);
Baolin Wang29a1f592017-12-14 13:31:43 +08001076
1077 trace_rtc_set_offset(offset, ret);
Joshua Claytonb3967062016-02-05 12:41:11 -08001078 return ret;
1079}