blob: af9c5a1a157b65691e1b589cc98e7e9e48f9f062 [file] [log] [blame]
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +01001/*
2 * Copyright (c) 2016, Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the Intel Corporation nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
29 * Keyon Jie <yang.jie@linux.intel.com>
30 */
31
Pierre-Louis Bossart81708a52018-04-04 18:46:50 -050032#include <sof/work.h>
33#include <sof/timer.h>
34#include <sof/list.h>
35#include <sof/clock.h>
36#include <sof/alloc.h>
37#include <sof/sof.h>
38#include <sof/lock.h>
39#include <sof/notifier.h>
40#include <sof/debug.h>
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010041#include <platform/clk.h>
42#include <platform/platform.h>
Yan Wang860292e2017-10-10 18:40:13 +080043#include <limits.h>
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010044
45/*
46 * Generic delayed work queue support.
47 *
48 * Work can be queued to run after a microsecond timeout on either the system
49 * work queue or a private work queue. It's expected most users will use the
50 * system work queue as private work queues depend on available architecture
51 * timers.
52 *
53 * The work on the system work queue should be short duration and not delay
54 * any other work on this queue. If you have longer duration work (like audio
55 * processing) then use a private work queue.
56 *
57 * The generic work queues are intended to stay in time synchronisation with
58 * any CPU clock changes. i.e. timeouts will remain constant regardless of CPU
59 * frequency changes.
60 */
61
62struct work_queue {
63 struct list_item work; /* list of work */
Liam Girdwood488f02d2017-09-13 23:17:17 +010064 uint64_t timeout; /* timeout for next queue run */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010065 uint32_t window_size; /* window size for pending work */
66 spinlock_t lock;
67 struct notifier notifier; /* notify CPU freq changes */
68 struct work_queue_timesource *ts; /* time source for work queue */
Marcin Maka0c5a0a02018-08-28 14:05:10 +020069 uint32_t ticks_per_usec; /* ticks per usec */
70 uint32_t ticks_per_msec; /* ticks per msec */
Liam Girdwood488f02d2017-09-13 23:17:17 +010071 uint64_t run_ticks; /* ticks when last run */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010072};
73
74/* generic system work queue */
75static struct work_queue *queue_;
76
Liam Girdwood488f02d2017-09-13 23:17:17 +010077static inline int work_set_timer(struct work_queue *queue, uint64_t ticks)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010078{
Liam Girdwood488f02d2017-09-13 23:17:17 +010079 int ret;
80
81 ret = queue->ts->timer_set(&queue->ts->timer, ticks);
Keyon Jie2e7a2af2017-03-09 21:44:37 +080082 timer_enable(&queue->ts->timer);
Liam Girdwood488f02d2017-09-13 23:17:17 +010083
84 return ret;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010085}
86
87static inline void work_clear_timer(struct work_queue *queue)
88{
89 queue->ts->timer_clear(&queue->ts->timer);
Keyon Jie2e7a2af2017-03-09 21:44:37 +080090 timer_disable(&queue->ts->timer);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010091}
92
Liam Girdwood488f02d2017-09-13 23:17:17 +010093static inline uint64_t work_get_timer(struct work_queue *queue)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010094{
95 return queue->ts->timer_get(&queue->ts->timer);
96}
97
98/* is there any work pending in the current time window ? */
99static int is_work_pending(struct work_queue *queue)
100{
101 struct list_item *wlist;
102 struct work *work;
Yan Wang860292e2017-10-10 18:40:13 +0800103 uint64_t win_end;
104 uint64_t win_start;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100105 int pending_count = 0;
106
107 /* get the current valid window of work */
108 win_end = work_get_timer(queue);
109 win_start = win_end - queue->window_size;
110
111 /* correct the pending flag window for overflow */
112 if (win_end > win_start) {
113
114 /* mark each valid work item in this time period as pending */
115 list_for_item(wlist, &queue->work) {
116
117 work = container_of(wlist, struct work, list);
118
119 /* if work has timed out then mark it as pending to run */
120 if (work->timeout >= win_start && work->timeout <= win_end) {
121 work->pending = 1;
122 pending_count++;
123 } else {
124 work->pending = 0;
125 }
126 }
127 } else {
128
129 /* mark each valid work item in this time period as pending */
130 list_for_item(wlist, &queue->work) {
131
132 work = container_of(wlist, struct work, list);
133
134 /* if work has timed out then mark it as pending to run */
135 if (work->timeout <= win_end ||
Yan Wang860292e2017-10-10 18:40:13 +0800136 (work->timeout >= win_start &&
137 work->timeout < ULONG_LONG_MAX)) {
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100138 work->pending = 1;
139 pending_count++;
140 } else {
141 work->pending = 0;
142 }
143 }
144 }
145
146 return pending_count;
147}
148
149static inline void work_next_timeout(struct work_queue *queue,
Yan Wang860292e2017-10-10 18:40:13 +0800150 struct work *work, uint64_t reschedule_usecs)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100151{
152 /* reschedule work */
Marcin Maka0c5a0a02018-08-28 14:05:10 +0200153 uint64_t next_d = 0;
154
155 if (reschedule_usecs % 1000)
156 next_d = queue->ticks_per_usec * reschedule_usecs;
157 else
158 next_d = queue->ticks_per_msec * (reschedule_usecs / 1000);
159
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100160 if (work->flags & WORK_SYNC) {
Marcin Maka0c5a0a02018-08-28 14:05:10 +0200161 work->timeout += next_d;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100162 } else {
163 /* calc next run based on work request */
Marcin Maka0c5a0a02018-08-28 14:05:10 +0200164 work->timeout = next_d + queue->run_ticks;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100165 }
166}
167
168/* run all pending work */
169static void run_work(struct work_queue *queue, uint32_t *flags)
170{
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500171 struct list_item *wlist;
172 struct list_item *tlist;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100173 struct work *work;
Yan Wang860292e2017-10-10 18:40:13 +0800174 uint64_t reschedule_usecs;
175 uint64_t udelay;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100176
177 /* check each work item in queue for pending */
178 list_for_item_safe(wlist, tlist, &queue->work) {
179
180 work = container_of(wlist, struct work, list);
181
182 /* run work if its pending and remove from the queue */
183 if (work->pending) {
184
185 udelay = (work_get_timer(queue) - work->timeout) /
186 queue->ticks_per_usec;
187
188 /* work can run in non atomic context */
189 spin_unlock_irq(&queue->lock, *flags);
190 reschedule_usecs = work->cb(work->cb_data, udelay);
191 spin_lock_irq(&queue->lock, *flags);
192
193 /* do we need reschedule this work ? */
194 if (reschedule_usecs == 0)
195 list_item_del(&work->list);
196 else {
197 /* get next work timeout */
198 work_next_timeout(queue, work, reschedule_usecs);
199 }
200 }
201 }
202}
203
Yan Wang860292e2017-10-10 18:40:13 +0800204static inline uint64_t calc_delta_ticks(uint64_t current, uint64_t work)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100205{
Yan Wang860292e2017-10-10 18:40:13 +0800206 uint64_t max = ULONG_LONG_MAX;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100207
208 /* does work run in next cycle ? */
209 if (work < current) {
210 max -= current;
211 max += work;
212 return max;
213 } else
214 return work - current;
215}
216
217/* calculate next timeout */
218static void queue_get_next_timeout(struct work_queue *queue)
219{
220 struct list_item *wlist;
221 struct work *work;
Yan Wang860292e2017-10-10 18:40:13 +0800222 uint64_t delta = ULONG_LONG_MAX;
223 uint64_t current;
224 uint64_t d;
225 uint64_t ticks;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100226
227 /* only recalc if work list not empty */
228 if (list_is_empty(&queue->work)) {
229 queue->timeout = 0;
230 return;
231 }
232
233 ticks = current = work_get_timer(queue);
234
235 /* find time for next work */
236 list_for_item(wlist, &queue->work) {
237
238 work = container_of(wlist, struct work, list);
239
240 d = calc_delta_ticks(current, work->timeout);
241
242 /* is work next ? */
243 if (d < delta) {
244 ticks = work->timeout;
245 delta = d;
246 }
247 }
248
249 queue->timeout = ticks;
250}
251
252/* re calculate timers for queue after CPU frequency change */
253static void queue_recalc_timers(struct work_queue *queue,
254 struct clock_notify_data *clk_data)
255{
256 struct list_item *wlist;
257 struct work *work;
Yan Wang860292e2017-10-10 18:40:13 +0800258 uint64_t delta_ticks;
259 uint64_t delta_usecs;
260 uint64_t current;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100261
262 /* get current time */
263 current = work_get_timer(queue);
264
265 /* re calculate timers for each work item */
266 list_for_item(wlist, &queue->work) {
267
268 work = container_of(wlist, struct work, list);
269
270 delta_ticks = calc_delta_ticks(current, work->timeout);
271 delta_usecs = delta_ticks / clk_data->old_ticks_per_usec;
272
273 /* is work within next msec, then schedule it now */
274 if (delta_usecs > 0)
275 work->timeout = current + queue->ticks_per_usec * delta_usecs;
276 else
277 work->timeout = current + (queue->ticks_per_usec >> 3);
278 }
279}
280
281static void queue_reschedule(struct work_queue *queue)
282{
283 queue_get_next_timeout(queue);
284
285 if (queue->timeout)
286 work_set_timer(queue, queue->timeout);
287}
288
289/* run the work queue */
290static void queue_run(void *data)
291{
292 struct work_queue *queue = (struct work_queue *)data;
293 uint32_t flags;
294
295 /* clear interrupt */
296 work_clear_timer(queue);
297
298 spin_lock_irq(&queue->lock, flags);
299
300 queue->run_ticks = work_get_timer(queue);
301
302 /* work can take variable time to complete so we re-check the
303 queue after running all the pending work to make sure no new work
304 is pending */
305 while (is_work_pending(queue))
306 run_work(queue, &flags);
307
308 /* re-calc timer and re-arm */
309 queue_reschedule(queue);
310
311 spin_unlock_irq(&queue->lock, flags);
312}
313
314/* notification of CPU frequency changes - atomic PRE and POST sequence */
315static void work_notify(int message, void *data, void *event_data)
316{
317 struct work_queue *queue = (struct work_queue *)data;
318 struct clock_notify_data *clk_data =
319 (struct clock_notify_data *)event_data;
320 uint32_t flags;
321
322 spin_lock_irq(&queue->lock, flags);
323
Pierre-Louis Bossartf9458092017-11-09 15:24:07 -0600324 /* we need to re-calculate timer when CPU frequency changes */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100325 if (message == CLOCK_NOTIFY_POST) {
326
327 /* CPU frequency update complete */
328 /* scale the window size to clock speed */
329 queue->ticks_per_usec = clock_us_to_ticks(queue->ts->clk, 1);
330 queue->window_size =
331 queue->ticks_per_usec * PLATFORM_WORKQ_WINDOW;
332 queue_recalc_timers(queue, clk_data);
333 queue_reschedule(queue);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100334 } else if (message == CLOCK_NOTIFY_PRE) {
335 /* CPU frequency update pending */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100336 }
337
338 spin_unlock_irq(&queue->lock, flags);
339}
340
Liam Girdwood488f02d2017-09-13 23:17:17 +0100341void work_schedule(struct work_queue *queue, struct work *w, uint64_t timeout)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100342{
343 struct work *work;
344 struct list_item *wlist;
345 uint32_t flags;
346
347 spin_lock_irq(&queue->lock, flags);
348
349 /* check to see if we are already scheduled ? */
350 list_for_item(wlist, &queue->work) {
351 work = container_of(wlist, struct work, list);
352
353 /* keep original timeout */
354 if (work == w)
355 goto out;
356 }
357
Keyon Jied8dd7572017-03-09 14:09:20 +0800358 /* convert timeout micro seconds to CPU clock ticks */
Marcin Maka0c5a0a02018-08-28 14:05:10 +0200359 if (timeout % 1000)
360 w->timeout = queue->ticks_per_usec * timeout +
361 work_get_timer(queue);
362 else
363 w->timeout = queue->ticks_per_msec * (timeout / 1000) +
364 work_get_timer(queue);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100365
366 /* insert work into list */
367 list_item_prepend(&w->list, &queue->work);
368
369 /* re-calc timer and re-arm */
370 queue_reschedule(queue);
371
372out:
373 spin_unlock_irq(&queue->lock, flags);
374}
375
Liam Girdwood2aefb172017-09-20 12:53:00 +0100376void work_schedule_default(struct work *w, uint64_t timeout)
377{
378 work_schedule(queue_, w, timeout);
379}
380
381static void reschedule(struct work_queue *queue, struct work *w, uint64_t time)
382{
383 struct work *work;
384 struct list_item *wlist;
385 uint32_t flags;
386
387 spin_lock_irq(&queue->lock, flags);
388
389 /* check to see if we are already scheduled ? */
390 list_for_item(wlist, &queue->work) {
391 work = container_of(wlist, struct work, list);
392
393 /* found it */
394 if (work == w)
395 goto found;
396 }
397
398 /* not found insert work into list */
399 list_item_prepend(&w->list, &queue->work);
400
401found:
402 /* re-calc timer and re-arm */
403 w->timeout = time;
404 queue_reschedule(queue);
405
406 spin_unlock_irq(&queue->lock, flags);
407}
408
409void work_reschedule(struct work_queue *queue, struct work *w, uint64_t timeout)
410{
411 uint64_t time;
412
413 /* convert timeout micro seconds to CPU clock ticks */
414 time = queue->ticks_per_usec * timeout + work_get_timer(queue);
415
416 reschedule(queue, w, time);
417}
418
419void work_reschedule_default(struct work *w, uint64_t timeout)
420{
421 uint64_t time;
422
423 /* convert timeout micro seconds to CPU clock ticks */
424 time = queue_->ticks_per_usec * timeout + work_get_timer(queue_);
425
426 reschedule(queue_, w, time);
427}
428
429void work_reschedule_default_at(struct work *w, uint64_t time)
430{
431 reschedule(queue_, w, time);
432}
433
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100434void work_cancel(struct work_queue *queue, struct work *w)
435{
436 uint32_t flags;
437
438 spin_lock_irq(&queue->lock, flags);
439
440 /* remove work from list */
441 list_item_del(&w->list);
442
443 /* re-calc timer and re-arm */
444 queue_reschedule(queue);
445
446 spin_unlock_irq(&queue->lock, flags);
447}
448
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100449void work_cancel_default(struct work *w)
450{
Liam Girdwood2aefb172017-09-20 12:53:00 +0100451 work_cancel(queue_, w);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100452}
453
454struct work_queue *work_new_queue(struct work_queue_timesource *ts)
455{
456 struct work_queue *queue;
457
458 /* init work queue */
Liam Girdwood1f6aee52018-03-01 16:13:05 +0000459 queue = rmalloc(RZONE_SYS, SOF_MEM_CAPS_RAM, sizeof(*queue_));
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100460
461 list_init(&queue->work);
462 spinlock_init(&queue->lock);
463 queue->ts = ts;
464 queue->ticks_per_usec = clock_us_to_ticks(queue->ts->clk, 1);
Marcin Maka0c5a0a02018-08-28 14:05:10 +0200465 queue->ticks_per_msec = clock_ms_to_ticks(queue->ts->clk, 1);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100466 queue->window_size = queue->ticks_per_usec * PLATFORM_WORKQ_WINDOW;
467
468 /* notification of clk changes */
469 queue->notifier.cb = work_notify;
470 queue->notifier.cb_data = queue;
471 queue->notifier.id = ts->notifier;
472 notifier_register(&queue->notifier);
473
474 /* register system timer */
475 timer_register(&queue->ts->timer, queue_run, queue);
476
477 return queue;
478}
479
480void init_system_workq(struct work_queue_timesource *ts)
481{
482 queue_ = work_new_queue(ts);
483}