blob: a267b805efeb596c3f82a7e309f70ba91998e9e2 [file] [log] [blame]
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +01001/*
2 * Copyright (c) 2016, Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the Intel Corporation nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
29 * Keyon Jie <yang.jie@linux.intel.com>
30 */
31
32#include <reef/work.h>
33#include <reef/timer.h>
34#include <reef/list.h>
35#include <reef/clock.h>
36#include <reef/alloc.h>
37#include <reef/reef.h>
38#include <reef/lock.h>
39#include <reef/notifier.h>
40#include <reef/debug.h>
41#include <platform/clk.h>
42#include <platform/platform.h>
43
44/*
45 * Generic delayed work queue support.
46 *
47 * Work can be queued to run after a microsecond timeout on either the system
48 * work queue or a private work queue. It's expected most users will use the
49 * system work queue as private work queues depend on available architecture
50 * timers.
51 *
52 * The work on the system work queue should be short duration and not delay
53 * any other work on this queue. If you have longer duration work (like audio
54 * processing) then use a private work queue.
55 *
56 * The generic work queues are intended to stay in time synchronisation with
57 * any CPU clock changes. i.e. timeouts will remain constant regardless of CPU
58 * frequency changes.
59 */
60
61struct work_queue {
62 struct list_item work; /* list of work */
Liam Girdwood488f02d2017-09-13 23:17:17 +010063 uint64_t timeout; /* timeout for next queue run */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010064 uint32_t window_size; /* window size for pending work */
65 spinlock_t lock;
66 struct notifier notifier; /* notify CPU freq changes */
67 struct work_queue_timesource *ts; /* time source for work queue */
68 uint32_t ticks_per_usec; /* ticks per msec */
Liam Girdwood488f02d2017-09-13 23:17:17 +010069 uint64_t run_ticks; /* ticks when last run */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010070};
71
72/* generic system work queue */
73static struct work_queue *queue_;
74
Liam Girdwood488f02d2017-09-13 23:17:17 +010075static inline int work_set_timer(struct work_queue *queue, uint64_t ticks)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010076{
Liam Girdwood488f02d2017-09-13 23:17:17 +010077 int ret;
78
79 ret = queue->ts->timer_set(&queue->ts->timer, ticks);
Keyon Jie2e7a2af2017-03-09 21:44:37 +080080 timer_enable(&queue->ts->timer);
Liam Girdwood488f02d2017-09-13 23:17:17 +010081
82 return ret;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010083}
84
85static inline void work_clear_timer(struct work_queue *queue)
86{
87 queue->ts->timer_clear(&queue->ts->timer);
Keyon Jie2e7a2af2017-03-09 21:44:37 +080088 timer_disable(&queue->ts->timer);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010089}
90
Liam Girdwood488f02d2017-09-13 23:17:17 +010091static inline uint64_t work_get_timer(struct work_queue *queue)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010092{
93 return queue->ts->timer_get(&queue->ts->timer);
94}
95
96/* is there any work pending in the current time window ? */
97static int is_work_pending(struct work_queue *queue)
98{
99 struct list_item *wlist;
100 struct work *work;
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500101 uint32_t win_end;
102 uint32_t win_start;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100103 int pending_count = 0;
104
105 /* get the current valid window of work */
106 win_end = work_get_timer(queue);
107 win_start = win_end - queue->window_size;
108
109 /* correct the pending flag window for overflow */
110 if (win_end > win_start) {
111
112 /* mark each valid work item in this time period as pending */
113 list_for_item(wlist, &queue->work) {
114
115 work = container_of(wlist, struct work, list);
116
117 /* if work has timed out then mark it as pending to run */
118 if (work->timeout >= win_start && work->timeout <= win_end) {
119 work->pending = 1;
120 pending_count++;
121 } else {
122 work->pending = 0;
123 }
124 }
125 } else {
126
127 /* mark each valid work item in this time period as pending */
128 list_for_item(wlist, &queue->work) {
129
130 work = container_of(wlist, struct work, list);
131
132 /* if work has timed out then mark it as pending to run */
133 if (work->timeout <= win_end ||
134 (work->timeout >= win_start && work->timeout < MAX_INT)) {
135 work->pending = 1;
136 pending_count++;
137 } else {
138 work->pending = 0;
139 }
140 }
141 }
142
143 return pending_count;
144}
145
146static inline void work_next_timeout(struct work_queue *queue,
147 struct work *work, uint32_t reschedule_usecs)
148{
149 /* reschedule work */
150 if (work->flags & WORK_SYNC) {
151 work->timeout += queue->ticks_per_usec * reschedule_usecs;
152 } else {
153 /* calc next run based on work request */
154 work->timeout = queue->ticks_per_usec *
155 reschedule_usecs + queue->run_ticks;
156 }
157}
158
159/* run all pending work */
160static void run_work(struct work_queue *queue, uint32_t *flags)
161{
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500162 struct list_item *wlist;
163 struct list_item *tlist;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100164 struct work *work;
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500165 uint32_t reschedule_usecs;
166 uint32_t udelay;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100167
168 /* check each work item in queue for pending */
169 list_for_item_safe(wlist, tlist, &queue->work) {
170
171 work = container_of(wlist, struct work, list);
172
173 /* run work if its pending and remove from the queue */
174 if (work->pending) {
175
176 udelay = (work_get_timer(queue) - work->timeout) /
177 queue->ticks_per_usec;
178
179 /* work can run in non atomic context */
180 spin_unlock_irq(&queue->lock, *flags);
181 reschedule_usecs = work->cb(work->cb_data, udelay);
182 spin_lock_irq(&queue->lock, *flags);
183
184 /* do we need reschedule this work ? */
185 if (reschedule_usecs == 0)
186 list_item_del(&work->list);
187 else {
188 /* get next work timeout */
189 work_next_timeout(queue, work, reschedule_usecs);
190 }
191 }
192 }
193}
194
195static inline uint32_t calc_delta_ticks(uint32_t current, uint32_t work)
196{
197 uint32_t max = MAX_INT;
198
199 /* does work run in next cycle ? */
200 if (work < current) {
201 max -= current;
202 max += work;
203 return max;
204 } else
205 return work - current;
206}
207
208/* calculate next timeout */
209static void queue_get_next_timeout(struct work_queue *queue)
210{
211 struct list_item *wlist;
212 struct work *work;
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500213 uint32_t delta = MAX_INT;
214 uint32_t current;
215 uint32_t d;
216 uint32_t ticks;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100217
218 /* only recalc if work list not empty */
219 if (list_is_empty(&queue->work)) {
220 queue->timeout = 0;
221 return;
222 }
223
224 ticks = current = work_get_timer(queue);
225
226 /* find time for next work */
227 list_for_item(wlist, &queue->work) {
228
229 work = container_of(wlist, struct work, list);
230
231 d = calc_delta_ticks(current, work->timeout);
232
233 /* is work next ? */
234 if (d < delta) {
235 ticks = work->timeout;
236 delta = d;
237 }
238 }
239
240 queue->timeout = ticks;
241}
242
243/* re calculate timers for queue after CPU frequency change */
244static void queue_recalc_timers(struct work_queue *queue,
245 struct clock_notify_data *clk_data)
246{
247 struct list_item *wlist;
248 struct work *work;
Pierre-Louis Bossart4ccf81d2017-09-25 14:52:09 -0500249 uint32_t delta_ticks;
250 uint32_t delta_usecs;
251 uint32_t current;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100252
253 /* get current time */
254 current = work_get_timer(queue);
255
256 /* re calculate timers for each work item */
257 list_for_item(wlist, &queue->work) {
258
259 work = container_of(wlist, struct work, list);
260
261 delta_ticks = calc_delta_ticks(current, work->timeout);
262 delta_usecs = delta_ticks / clk_data->old_ticks_per_usec;
263
264 /* is work within next msec, then schedule it now */
265 if (delta_usecs > 0)
266 work->timeout = current + queue->ticks_per_usec * delta_usecs;
267 else
268 work->timeout = current + (queue->ticks_per_usec >> 3);
269 }
270}
271
272static void queue_reschedule(struct work_queue *queue)
273{
274 queue_get_next_timeout(queue);
275
276 if (queue->timeout)
277 work_set_timer(queue, queue->timeout);
278}
279
280/* run the work queue */
281static void queue_run(void *data)
282{
283 struct work_queue *queue = (struct work_queue *)data;
284 uint32_t flags;
285
286 /* clear interrupt */
287 work_clear_timer(queue);
288
289 spin_lock_irq(&queue->lock, flags);
290
291 queue->run_ticks = work_get_timer(queue);
292
293 /* work can take variable time to complete so we re-check the
294 queue after running all the pending work to make sure no new work
295 is pending */
296 while (is_work_pending(queue))
297 run_work(queue, &flags);
298
299 /* re-calc timer and re-arm */
300 queue_reschedule(queue);
301
302 spin_unlock_irq(&queue->lock, flags);
303}
304
305/* notification of CPU frequency changes - atomic PRE and POST sequence */
306static void work_notify(int message, void *data, void *event_data)
307{
308 struct work_queue *queue = (struct work_queue *)data;
309 struct clock_notify_data *clk_data =
310 (struct clock_notify_data *)event_data;
311 uint32_t flags;
312
313 spin_lock_irq(&queue->lock, flags);
314
Keyon Jied8dd7572017-03-09 14:09:20 +0800315 /* we need to re-caclulate timer when CPU frequency changes */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100316 if (message == CLOCK_NOTIFY_POST) {
317
318 /* CPU frequency update complete */
319 /* scale the window size to clock speed */
320 queue->ticks_per_usec = clock_us_to_ticks(queue->ts->clk, 1);
321 queue->window_size =
322 queue->ticks_per_usec * PLATFORM_WORKQ_WINDOW;
323 queue_recalc_timers(queue, clk_data);
324 queue_reschedule(queue);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100325 } else if (message == CLOCK_NOTIFY_PRE) {
326 /* CPU frequency update pending */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100327 }
328
329 spin_unlock_irq(&queue->lock, flags);
330}
331
Liam Girdwood488f02d2017-09-13 23:17:17 +0100332void work_schedule(struct work_queue *queue, struct work *w, uint64_t timeout)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100333{
334 struct work *work;
335 struct list_item *wlist;
336 uint32_t flags;
337
338 spin_lock_irq(&queue->lock, flags);
339
340 /* check to see if we are already scheduled ? */
341 list_for_item(wlist, &queue->work) {
342 work = container_of(wlist, struct work, list);
343
344 /* keep original timeout */
345 if (work == w)
346 goto out;
347 }
348
Keyon Jied8dd7572017-03-09 14:09:20 +0800349 /* convert timeout micro seconds to CPU clock ticks */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100350 w->timeout = queue->ticks_per_usec * timeout + work_get_timer(queue);
351
352 /* insert work into list */
353 list_item_prepend(&w->list, &queue->work);
354
355 /* re-calc timer and re-arm */
356 queue_reschedule(queue);
357
358out:
359 spin_unlock_irq(&queue->lock, flags);
360}
361
Liam Girdwood2aefb172017-09-20 12:53:00 +0100362void work_schedule_default(struct work *w, uint64_t timeout)
363{
364 work_schedule(queue_, w, timeout);
365}
366
367static void reschedule(struct work_queue *queue, struct work *w, uint64_t time)
368{
369 struct work *work;
370 struct list_item *wlist;
371 uint32_t flags;
372
373 spin_lock_irq(&queue->lock, flags);
374
375 /* check to see if we are already scheduled ? */
376 list_for_item(wlist, &queue->work) {
377 work = container_of(wlist, struct work, list);
378
379 /* found it */
380 if (work == w)
381 goto found;
382 }
383
384 /* not found insert work into list */
385 list_item_prepend(&w->list, &queue->work);
386
387found:
388 /* re-calc timer and re-arm */
389 w->timeout = time;
390 queue_reschedule(queue);
391
392 spin_unlock_irq(&queue->lock, flags);
393}
394
395void work_reschedule(struct work_queue *queue, struct work *w, uint64_t timeout)
396{
397 uint64_t time;
398
399 /* convert timeout micro seconds to CPU clock ticks */
400 time = queue->ticks_per_usec * timeout + work_get_timer(queue);
401
402 reschedule(queue, w, time);
403}
404
405void work_reschedule_default(struct work *w, uint64_t timeout)
406{
407 uint64_t time;
408
409 /* convert timeout micro seconds to CPU clock ticks */
410 time = queue_->ticks_per_usec * timeout + work_get_timer(queue_);
411
412 reschedule(queue_, w, time);
413}
414
415void work_reschedule_default_at(struct work *w, uint64_t time)
416{
417 reschedule(queue_, w, time);
418}
419
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100420void work_cancel(struct work_queue *queue, struct work *w)
421{
422 uint32_t flags;
423
424 spin_lock_irq(&queue->lock, flags);
425
426 /* remove work from list */
427 list_item_del(&w->list);
428
429 /* re-calc timer and re-arm */
430 queue_reschedule(queue);
431
432 spin_unlock_irq(&queue->lock, flags);
433}
434
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100435void work_cancel_default(struct work *w)
436{
Liam Girdwood2aefb172017-09-20 12:53:00 +0100437 work_cancel(queue_, w);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100438}
439
440struct work_queue *work_new_queue(struct work_queue_timesource *ts)
441{
442 struct work_queue *queue;
443
444 /* init work queue */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100445 queue = rmalloc(RZONE_SYS, RFLAGS_NONE, sizeof(*queue_));
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100446
447 list_init(&queue->work);
448 spinlock_init(&queue->lock);
449 queue->ts = ts;
450 queue->ticks_per_usec = clock_us_to_ticks(queue->ts->clk, 1);
451 queue->window_size = queue->ticks_per_usec * PLATFORM_WORKQ_WINDOW;
452
453 /* notification of clk changes */
454 queue->notifier.cb = work_notify;
455 queue->notifier.cb_data = queue;
456 queue->notifier.id = ts->notifier;
457 notifier_register(&queue->notifier);
458
459 /* register system timer */
460 timer_register(&queue->ts->timer, queue_run, queue);
461
462 return queue;
463}
464
465void init_system_workq(struct work_queue_timesource *ts)
466{
467 queue_ = work_new_queue(ts);
468}