blob: eb7b223e1950abb0b40f3c770dd67364c4ab46ec [file] [log] [blame]
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +01001/*
2 * Copyright (c) 2016, Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the Intel Corporation nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
29 * Keyon Jie <yang.jie@linux.intel.com>
30 */
31
32#include <reef/work.h>
33#include <reef/timer.h>
34#include <reef/list.h>
35#include <reef/clock.h>
36#include <reef/alloc.h>
37#include <reef/reef.h>
38#include <reef/lock.h>
39#include <reef/notifier.h>
40#include <reef/debug.h>
41#include <platform/clk.h>
42#include <platform/platform.h>
Yan Wang860292e2017-10-10 18:40:13 +080043#include <limits.h>
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010044
45/*
46 * Generic delayed work queue support.
47 *
48 * Work can be queued to run after a microsecond timeout on either the system
49 * work queue or a private work queue. It's expected most users will use the
50 * system work queue as private work queues depend on available architecture
51 * timers.
52 *
53 * The work on the system work queue should be short duration and not delay
54 * any other work on this queue. If you have longer duration work (like audio
55 * processing) then use a private work queue.
56 *
57 * The generic work queues are intended to stay in time synchronisation with
58 * any CPU clock changes. i.e. timeouts will remain constant regardless of CPU
59 * frequency changes.
60 */
61
62struct work_queue {
63 struct list_item work; /* list of work */
Liam Girdwood488f02d2017-09-13 23:17:17 +010064 uint64_t timeout; /* timeout for next queue run */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010065 uint32_t window_size; /* window size for pending work */
66 spinlock_t lock;
67 struct notifier notifier; /* notify CPU freq changes */
68 struct work_queue_timesource *ts; /* time source for work queue */
69 uint32_t ticks_per_usec; /* ticks per msec */
Liam Girdwood488f02d2017-09-13 23:17:17 +010070 uint64_t run_ticks; /* ticks when last run */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010071};
72
73/* generic system work queue */
74static struct work_queue *queue_;
75
Liam Girdwood488f02d2017-09-13 23:17:17 +010076static inline int work_set_timer(struct work_queue *queue, uint64_t ticks)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010077{
Liam Girdwood488f02d2017-09-13 23:17:17 +010078 int ret;
79
80 ret = queue->ts->timer_set(&queue->ts->timer, ticks);
Keyon Jie2e7a2af2017-03-09 21:44:37 +080081 timer_enable(&queue->ts->timer);
Liam Girdwood488f02d2017-09-13 23:17:17 +010082
83 return ret;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010084}
85
86static inline void work_clear_timer(struct work_queue *queue)
87{
88 queue->ts->timer_clear(&queue->ts->timer);
Keyon Jie2e7a2af2017-03-09 21:44:37 +080089 timer_disable(&queue->ts->timer);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010090}
91
Liam Girdwood488f02d2017-09-13 23:17:17 +010092static inline uint64_t work_get_timer(struct work_queue *queue)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010093{
94 return queue->ts->timer_get(&queue->ts->timer);
95}
96
97/* is there any work pending in the current time window ? */
98static int is_work_pending(struct work_queue *queue)
99{
100 struct list_item *wlist;
101 struct work *work;
Yan Wang860292e2017-10-10 18:40:13 +0800102 uint64_t win_end;
103 uint64_t win_start;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100104 int pending_count = 0;
105
106 /* get the current valid window of work */
107 win_end = work_get_timer(queue);
108 win_start = win_end - queue->window_size;
109
110 /* correct the pending flag window for overflow */
111 if (win_end > win_start) {
112
113 /* mark each valid work item in this time period as pending */
114 list_for_item(wlist, &queue->work) {
115
116 work = container_of(wlist, struct work, list);
117
118 /* if work has timed out then mark it as pending to run */
119 if (work->timeout >= win_start && work->timeout <= win_end) {
120 work->pending = 1;
121 pending_count++;
122 } else {
123 work->pending = 0;
124 }
125 }
126 } else {
127
128 /* mark each valid work item in this time period as pending */
129 list_for_item(wlist, &queue->work) {
130
131 work = container_of(wlist, struct work, list);
132
133 /* if work has timed out then mark it as pending to run */
134 if (work->timeout <= win_end ||
Yan Wang860292e2017-10-10 18:40:13 +0800135 (work->timeout >= win_start &&
136 work->timeout < ULONG_LONG_MAX)) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100137 work->pending = 1;
138 pending_count++;
139 } else {
140 work->pending = 0;
141 }
142 }
143 }
144
145 return pending_count;
146}
147
148static inline void work_next_timeout(struct work_queue *queue,
Yan Wang860292e2017-10-10 18:40:13 +0800149 struct work *work, uint64_t reschedule_usecs)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100150{
151 /* reschedule work */
152 if (work->flags & WORK_SYNC) {
153 work->timeout += queue->ticks_per_usec * reschedule_usecs;
154 } else {
155 /* calc next run based on work request */
156 work->timeout = queue->ticks_per_usec *
157 reschedule_usecs + queue->run_ticks;
158 }
159}
160
161/* run all pending work */
162static void run_work(struct work_queue *queue, uint32_t *flags)
163{
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500164 struct list_item *wlist;
165 struct list_item *tlist;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100166 struct work *work;
Yan Wang860292e2017-10-10 18:40:13 +0800167 uint64_t reschedule_usecs;
168 uint64_t udelay;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100169
170 /* check each work item in queue for pending */
171 list_for_item_safe(wlist, tlist, &queue->work) {
172
173 work = container_of(wlist, struct work, list);
174
175 /* run work if its pending and remove from the queue */
176 if (work->pending) {
177
178 udelay = (work_get_timer(queue) - work->timeout) /
179 queue->ticks_per_usec;
180
181 /* work can run in non atomic context */
182 spin_unlock_irq(&queue->lock, *flags);
183 reschedule_usecs = work->cb(work->cb_data, udelay);
184 spin_lock_irq(&queue->lock, *flags);
185
186 /* do we need reschedule this work ? */
187 if (reschedule_usecs == 0)
188 list_item_del(&work->list);
189 else {
190 /* get next work timeout */
191 work_next_timeout(queue, work, reschedule_usecs);
192 }
193 }
194 }
195}
196
Yan Wang860292e2017-10-10 18:40:13 +0800197static inline uint64_t calc_delta_ticks(uint64_t current, uint64_t work)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100198{
Yan Wang860292e2017-10-10 18:40:13 +0800199 uint64_t max = ULONG_LONG_MAX;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100200
201 /* does work run in next cycle ? */
202 if (work < current) {
203 max -= current;
204 max += work;
205 return max;
206 } else
207 return work - current;
208}
209
210/* calculate next timeout */
211static void queue_get_next_timeout(struct work_queue *queue)
212{
213 struct list_item *wlist;
214 struct work *work;
Yan Wang860292e2017-10-10 18:40:13 +0800215 uint64_t delta = ULONG_LONG_MAX;
216 uint64_t current;
217 uint64_t d;
218 uint64_t ticks;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100219
220 /* only recalc if work list not empty */
221 if (list_is_empty(&queue->work)) {
222 queue->timeout = 0;
223 return;
224 }
225
226 ticks = current = work_get_timer(queue);
227
228 /* find time for next work */
229 list_for_item(wlist, &queue->work) {
230
231 work = container_of(wlist, struct work, list);
232
233 d = calc_delta_ticks(current, work->timeout);
234
235 /* is work next ? */
236 if (d < delta) {
237 ticks = work->timeout;
238 delta = d;
239 }
240 }
241
242 queue->timeout = ticks;
243}
244
245/* re calculate timers for queue after CPU frequency change */
246static void queue_recalc_timers(struct work_queue *queue,
247 struct clock_notify_data *clk_data)
248{
249 struct list_item *wlist;
250 struct work *work;
Yan Wang860292e2017-10-10 18:40:13 +0800251 uint64_t delta_ticks;
252 uint64_t delta_usecs;
253 uint64_t current;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100254
255 /* get current time */
256 current = work_get_timer(queue);
257
258 /* re calculate timers for each work item */
259 list_for_item(wlist, &queue->work) {
260
261 work = container_of(wlist, struct work, list);
262
263 delta_ticks = calc_delta_ticks(current, work->timeout);
264 delta_usecs = delta_ticks / clk_data->old_ticks_per_usec;
265
266 /* is work within next msec, then schedule it now */
267 if (delta_usecs > 0)
268 work->timeout = current + queue->ticks_per_usec * delta_usecs;
269 else
270 work->timeout = current + (queue->ticks_per_usec >> 3);
271 }
272}
273
274static void queue_reschedule(struct work_queue *queue)
275{
276 queue_get_next_timeout(queue);
277
278 if (queue->timeout)
279 work_set_timer(queue, queue->timeout);
280}
281
282/* run the work queue */
283static void queue_run(void *data)
284{
285 struct work_queue *queue = (struct work_queue *)data;
286 uint32_t flags;
287
288 /* clear interrupt */
289 work_clear_timer(queue);
290
291 spin_lock_irq(&queue->lock, flags);
292
293 queue->run_ticks = work_get_timer(queue);
294
295 /* work can take variable time to complete so we re-check the
296 queue after running all the pending work to make sure no new work
297 is pending */
298 while (is_work_pending(queue))
299 run_work(queue, &flags);
300
301 /* re-calc timer and re-arm */
302 queue_reschedule(queue);
303
304 spin_unlock_irq(&queue->lock, flags);
305}
306
307/* notification of CPU frequency changes - atomic PRE and POST sequence */
308static void work_notify(int message, void *data, void *event_data)
309{
310 struct work_queue *queue = (struct work_queue *)data;
311 struct clock_notify_data *clk_data =
312 (struct clock_notify_data *)event_data;
313 uint32_t flags;
314
315 spin_lock_irq(&queue->lock, flags);
316
Pierre-Louis Bossartf9458092017-11-09 15:24:07 -0600317 /* we need to re-calculate timer when CPU frequency changes */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100318 if (message == CLOCK_NOTIFY_POST) {
319
320 /* CPU frequency update complete */
321 /* scale the window size to clock speed */
322 queue->ticks_per_usec = clock_us_to_ticks(queue->ts->clk, 1);
323 queue->window_size =
324 queue->ticks_per_usec * PLATFORM_WORKQ_WINDOW;
325 queue_recalc_timers(queue, clk_data);
326 queue_reschedule(queue);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100327 } else if (message == CLOCK_NOTIFY_PRE) {
328 /* CPU frequency update pending */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100329 }
330
331 spin_unlock_irq(&queue->lock, flags);
332}
333
Liam Girdwood488f02d2017-09-13 23:17:17 +0100334void work_schedule(struct work_queue *queue, struct work *w, uint64_t timeout)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100335{
336 struct work *work;
337 struct list_item *wlist;
338 uint32_t flags;
339
340 spin_lock_irq(&queue->lock, flags);
341
342 /* check to see if we are already scheduled ? */
343 list_for_item(wlist, &queue->work) {
344 work = container_of(wlist, struct work, list);
345
346 /* keep original timeout */
347 if (work == w)
348 goto out;
349 }
350
Keyon Jied8dd7572017-03-09 14:09:20 +0800351 /* convert timeout micro seconds to CPU clock ticks */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100352 w->timeout = queue->ticks_per_usec * timeout + work_get_timer(queue);
353
354 /* insert work into list */
355 list_item_prepend(&w->list, &queue->work);
356
357 /* re-calc timer and re-arm */
358 queue_reschedule(queue);
359
360out:
361 spin_unlock_irq(&queue->lock, flags);
362}
363
Liam Girdwood2aefb172017-09-20 12:53:00 +0100364void work_schedule_default(struct work *w, uint64_t timeout)
365{
366 work_schedule(queue_, w, timeout);
367}
368
369static void reschedule(struct work_queue *queue, struct work *w, uint64_t time)
370{
371 struct work *work;
372 struct list_item *wlist;
373 uint32_t flags;
374
375 spin_lock_irq(&queue->lock, flags);
376
377 /* check to see if we are already scheduled ? */
378 list_for_item(wlist, &queue->work) {
379 work = container_of(wlist, struct work, list);
380
381 /* found it */
382 if (work == w)
383 goto found;
384 }
385
386 /* not found insert work into list */
387 list_item_prepend(&w->list, &queue->work);
388
389found:
390 /* re-calc timer and re-arm */
391 w->timeout = time;
392 queue_reschedule(queue);
393
394 spin_unlock_irq(&queue->lock, flags);
395}
396
397void work_reschedule(struct work_queue *queue, struct work *w, uint64_t timeout)
398{
399 uint64_t time;
400
401 /* convert timeout micro seconds to CPU clock ticks */
402 time = queue->ticks_per_usec * timeout + work_get_timer(queue);
403
404 reschedule(queue, w, time);
405}
406
407void work_reschedule_default(struct work *w, uint64_t timeout)
408{
409 uint64_t time;
410
411 /* convert timeout micro seconds to CPU clock ticks */
412 time = queue_->ticks_per_usec * timeout + work_get_timer(queue_);
413
414 reschedule(queue_, w, time);
415}
416
417void work_reschedule_default_at(struct work *w, uint64_t time)
418{
419 reschedule(queue_, w, time);
420}
421
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100422void work_cancel(struct work_queue *queue, struct work *w)
423{
424 uint32_t flags;
425
426 spin_lock_irq(&queue->lock, flags);
427
428 /* remove work from list */
429 list_item_del(&w->list);
430
431 /* re-calc timer and re-arm */
432 queue_reschedule(queue);
433
434 spin_unlock_irq(&queue->lock, flags);
435}
436
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100437void work_cancel_default(struct work *w)
438{
Liam Girdwood2aefb172017-09-20 12:53:00 +0100439 work_cancel(queue_, w);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100440}
441
442struct work_queue *work_new_queue(struct work_queue_timesource *ts)
443{
444 struct work_queue *queue;
445
446 /* init work queue */
Liam Girdwood1f6aee52018-03-01 16:13:05 +0000447 queue = rmalloc(RZONE_SYS, SOF_MEM_CAPS_RAM, sizeof(*queue_));
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100448
449 list_init(&queue->work);
450 spinlock_init(&queue->lock);
451 queue->ts = ts;
452 queue->ticks_per_usec = clock_us_to_ticks(queue->ts->clk, 1);
453 queue->window_size = queue->ticks_per_usec * PLATFORM_WORKQ_WINDOW;
454
455 /* notification of clk changes */
456 queue->notifier.cb = work_notify;
457 queue->notifier.cb_data = queue;
458 queue->notifier.id = ts->notifier;
459 notifier_register(&queue->notifier);
460
461 /* register system timer */
462 timer_register(&queue->ts->timer, queue_run, queue);
463
464 return queue;
465}
466
467void init_system_workq(struct work_queue_timesource *ts)
468{
469 queue_ = work_new_queue(ts);
470}