blob: cac3deb488185f6b3e2a3caabb1b0bcddcbd1f93 [file] [log] [blame]
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +01001/*
2 * Copyright (c) 2016, Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the Intel Corporation nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
29 * Keyon Jie <yang.jie@linux.intel.com>
30 */
31
32#include <reef/work.h>
33#include <reef/timer.h>
34#include <reef/list.h>
35#include <reef/clock.h>
36#include <reef/alloc.h>
37#include <reef/reef.h>
38#include <reef/lock.h>
39#include <reef/notifier.h>
40#include <reef/debug.h>
41#include <platform/clk.h>
42#include <platform/platform.h>
43
44/*
45 * Generic delayed work queue support.
46 *
47 * Work can be queued to run after a microsecond timeout on either the system
48 * work queue or a private work queue. It's expected most users will use the
49 * system work queue as private work queues depend on available architecture
50 * timers.
51 *
52 * The work on the system work queue should be short duration and not delay
53 * any other work on this queue. If you have longer duration work (like audio
54 * processing) then use a private work queue.
55 *
56 * The generic work queues are intended to stay in time synchronisation with
57 * any CPU clock changes. i.e. timeouts will remain constant regardless of CPU
58 * frequency changes.
59 */
60
61struct work_queue {
62 struct list_item work; /* list of work */
Liam Girdwood488f02d2017-09-13 23:17:17 +010063 uint64_t timeout; /* timeout for next queue run */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010064 uint32_t window_size; /* window size for pending work */
65 spinlock_t lock;
66 struct notifier notifier; /* notify CPU freq changes */
67 struct work_queue_timesource *ts; /* time source for work queue */
68 uint32_t ticks_per_usec; /* ticks per msec */
Liam Girdwood488f02d2017-09-13 23:17:17 +010069 uint64_t run_ticks; /* ticks when last run */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010070};
71
72/* generic system work queue */
73static struct work_queue *queue_;
74
Liam Girdwood488f02d2017-09-13 23:17:17 +010075static inline int work_set_timer(struct work_queue *queue, uint64_t ticks)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010076{
Liam Girdwood488f02d2017-09-13 23:17:17 +010077 int ret;
78
79 ret = queue->ts->timer_set(&queue->ts->timer, ticks);
Keyon Jie2e7a2af2017-03-09 21:44:37 +080080 timer_enable(&queue->ts->timer);
Liam Girdwood488f02d2017-09-13 23:17:17 +010081
82 return ret;
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010083}
84
85static inline void work_clear_timer(struct work_queue *queue)
86{
87 queue->ts->timer_clear(&queue->ts->timer);
Keyon Jie2e7a2af2017-03-09 21:44:37 +080088 timer_disable(&queue->ts->timer);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010089}
90
Liam Girdwood488f02d2017-09-13 23:17:17 +010091static inline uint64_t work_get_timer(struct work_queue *queue)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010092{
93 return queue->ts->timer_get(&queue->ts->timer);
94}
95
96/* is there any work pending in the current time window ? */
97static int is_work_pending(struct work_queue *queue)
98{
99 struct list_item *wlist;
100 struct work *work;
101 uint32_t win_end, win_start;
102 int pending_count = 0;
103
104 /* get the current valid window of work */
105 win_end = work_get_timer(queue);
106 win_start = win_end - queue->window_size;
107
108 /* correct the pending flag window for overflow */
109 if (win_end > win_start) {
110
111 /* mark each valid work item in this time period as pending */
112 list_for_item(wlist, &queue->work) {
113
114 work = container_of(wlist, struct work, list);
115
116 /* if work has timed out then mark it as pending to run */
117 if (work->timeout >= win_start && work->timeout <= win_end) {
118 work->pending = 1;
119 pending_count++;
120 } else {
121 work->pending = 0;
122 }
123 }
124 } else {
125
126 /* mark each valid work item in this time period as pending */
127 list_for_item(wlist, &queue->work) {
128
129 work = container_of(wlist, struct work, list);
130
131 /* if work has timed out then mark it as pending to run */
132 if (work->timeout <= win_end ||
133 (work->timeout >= win_start && work->timeout < MAX_INT)) {
134 work->pending = 1;
135 pending_count++;
136 } else {
137 work->pending = 0;
138 }
139 }
140 }
141
142 return pending_count;
143}
144
145static inline void work_next_timeout(struct work_queue *queue,
146 struct work *work, uint32_t reschedule_usecs)
147{
148 /* reschedule work */
149 if (work->flags & WORK_SYNC) {
150 work->timeout += queue->ticks_per_usec * reschedule_usecs;
151 } else {
152 /* calc next run based on work request */
153 work->timeout = queue->ticks_per_usec *
154 reschedule_usecs + queue->run_ticks;
155 }
156}
157
158/* run all pending work */
159static void run_work(struct work_queue *queue, uint32_t *flags)
160{
161 struct list_item *wlist, *tlist;
162 struct work *work;
163 uint32_t reschedule_usecs, udelay;
164
165 /* check each work item in queue for pending */
166 list_for_item_safe(wlist, tlist, &queue->work) {
167
168 work = container_of(wlist, struct work, list);
169
170 /* run work if its pending and remove from the queue */
171 if (work->pending) {
172
173 udelay = (work_get_timer(queue) - work->timeout) /
174 queue->ticks_per_usec;
175
176 /* work can run in non atomic context */
177 spin_unlock_irq(&queue->lock, *flags);
178 reschedule_usecs = work->cb(work->cb_data, udelay);
179 spin_lock_irq(&queue->lock, *flags);
180
181 /* do we need reschedule this work ? */
182 if (reschedule_usecs == 0)
183 list_item_del(&work->list);
184 else {
185 /* get next work timeout */
186 work_next_timeout(queue, work, reschedule_usecs);
187 }
188 }
189 }
190}
191
192static inline uint32_t calc_delta_ticks(uint32_t current, uint32_t work)
193{
194 uint32_t max = MAX_INT;
195
196 /* does work run in next cycle ? */
197 if (work < current) {
198 max -= current;
199 max += work;
200 return max;
201 } else
202 return work - current;
203}
204
205/* calculate next timeout */
206static void queue_get_next_timeout(struct work_queue *queue)
207{
208 struct list_item *wlist;
209 struct work *work;
210 uint32_t delta = MAX_INT, current, d, ticks;
211
212 /* only recalc if work list not empty */
213 if (list_is_empty(&queue->work)) {
214 queue->timeout = 0;
215 return;
216 }
217
218 ticks = current = work_get_timer(queue);
219
220 /* find time for next work */
221 list_for_item(wlist, &queue->work) {
222
223 work = container_of(wlist, struct work, list);
224
225 d = calc_delta_ticks(current, work->timeout);
226
227 /* is work next ? */
228 if (d < delta) {
229 ticks = work->timeout;
230 delta = d;
231 }
232 }
233
234 queue->timeout = ticks;
235}
236
237/* re calculate timers for queue after CPU frequency change */
238static void queue_recalc_timers(struct work_queue *queue,
239 struct clock_notify_data *clk_data)
240{
241 struct list_item *wlist;
242 struct work *work;
243 uint32_t delta_ticks, delta_usecs, current;
244
245 /* get current time */
246 current = work_get_timer(queue);
247
248 /* re calculate timers for each work item */
249 list_for_item(wlist, &queue->work) {
250
251 work = container_of(wlist, struct work, list);
252
253 delta_ticks = calc_delta_ticks(current, work->timeout);
254 delta_usecs = delta_ticks / clk_data->old_ticks_per_usec;
255
256 /* is work within next msec, then schedule it now */
257 if (delta_usecs > 0)
258 work->timeout = current + queue->ticks_per_usec * delta_usecs;
259 else
260 work->timeout = current + (queue->ticks_per_usec >> 3);
261 }
262}
263
264static void queue_reschedule(struct work_queue *queue)
265{
266 queue_get_next_timeout(queue);
267
268 if (queue->timeout)
269 work_set_timer(queue, queue->timeout);
270}
271
272/* run the work queue */
273static void queue_run(void *data)
274{
275 struct work_queue *queue = (struct work_queue *)data;
276 uint32_t flags;
277
278 /* clear interrupt */
279 work_clear_timer(queue);
280
281 spin_lock_irq(&queue->lock, flags);
282
283 queue->run_ticks = work_get_timer(queue);
284
285 /* work can take variable time to complete so we re-check the
286 queue after running all the pending work to make sure no new work
287 is pending */
288 while (is_work_pending(queue))
289 run_work(queue, &flags);
290
291 /* re-calc timer and re-arm */
292 queue_reschedule(queue);
293
294 spin_unlock_irq(&queue->lock, flags);
295}
296
297/* notification of CPU frequency changes - atomic PRE and POST sequence */
298static void work_notify(int message, void *data, void *event_data)
299{
300 struct work_queue *queue = (struct work_queue *)data;
301 struct clock_notify_data *clk_data =
302 (struct clock_notify_data *)event_data;
303 uint32_t flags;
304
305 spin_lock_irq(&queue->lock, flags);
306
Keyon Jied8dd7572017-03-09 14:09:20 +0800307 /* we need to re-caclulate timer when CPU frequency changes */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100308 if (message == CLOCK_NOTIFY_POST) {
309
310 /* CPU frequency update complete */
311 /* scale the window size to clock speed */
312 queue->ticks_per_usec = clock_us_to_ticks(queue->ts->clk, 1);
313 queue->window_size =
314 queue->ticks_per_usec * PLATFORM_WORKQ_WINDOW;
315 queue_recalc_timers(queue, clk_data);
316 queue_reschedule(queue);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100317 } else if (message == CLOCK_NOTIFY_PRE) {
318 /* CPU frequency update pending */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100319 }
320
321 spin_unlock_irq(&queue->lock, flags);
322}
323
Liam Girdwood488f02d2017-09-13 23:17:17 +0100324void work_schedule(struct work_queue *queue, struct work *w, uint64_t timeout)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100325{
326 struct work *work;
327 struct list_item *wlist;
328 uint32_t flags;
329
330 spin_lock_irq(&queue->lock, flags);
331
332 /* check to see if we are already scheduled ? */
333 list_for_item(wlist, &queue->work) {
334 work = container_of(wlist, struct work, list);
335
336 /* keep original timeout */
337 if (work == w)
338 goto out;
339 }
340
Keyon Jied8dd7572017-03-09 14:09:20 +0800341 /* convert timeout micro seconds to CPU clock ticks */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100342 w->timeout = queue->ticks_per_usec * timeout + work_get_timer(queue);
343
344 /* insert work into list */
345 list_item_prepend(&w->list, &queue->work);
346
347 /* re-calc timer and re-arm */
348 queue_reschedule(queue);
349
350out:
351 spin_unlock_irq(&queue->lock, flags);
352}
353
354void work_cancel(struct work_queue *queue, struct work *w)
355{
356 uint32_t flags;
357
358 spin_lock_irq(&queue->lock, flags);
359
360 /* remove work from list */
361 list_item_del(&w->list);
362
363 /* re-calc timer and re-arm */
364 queue_reschedule(queue);
365
366 spin_unlock_irq(&queue->lock, flags);
367}
368
Liam Girdwood488f02d2017-09-13 23:17:17 +0100369void work_schedule_default(struct work *w, uint64_t timeout)
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100370{
371 struct work *work;
372 struct list_item *wlist;
373 uint32_t flags;
374
375 spin_lock_irq(&queue_->lock, flags);
376
377 /* check to see if we are already scheduled ? */
378 list_for_item(wlist, &queue_->work) {
379 work = container_of(wlist, struct work, list);
380
381 /* keep original timeout */
382 if (work == w)
383 goto out;
384 }
385
386 /* convert timeout microsecs to CPU clock ticks */
387 w->timeout = queue_->ticks_per_usec * timeout + work_get_timer(queue_);
388
389 /* insert work into list */
390 list_item_prepend(&w->list, &queue_->work);
391
392 /* re-calc timer and re-arm */
393 queue_reschedule(queue_);
394
395out:
396 spin_unlock_irq(&queue_->lock, flags);
397}
398
399void work_cancel_default(struct work *w)
400{
401 uint32_t flags;
402
403 spin_lock_irq(&queue_->lock, flags);
404
405 /* remove work from list */
406 list_item_del(&w->list);
407
408 /* re-calc timer and re-arm */
409 queue_reschedule(queue_);
410
411 spin_unlock_irq(&queue_->lock, flags);
412}
413
414struct work_queue *work_new_queue(struct work_queue_timesource *ts)
415{
416 struct work_queue *queue;
417
418 /* init work queue */
Liam Girdwood50f7b0e2017-06-06 12:52:15 +0100419 queue = rmalloc(RZONE_SYS, RFLAGS_NONE, sizeof(*queue_));
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100420
421 list_init(&queue->work);
422 spinlock_init(&queue->lock);
423 queue->ts = ts;
424 queue->ticks_per_usec = clock_us_to_ticks(queue->ts->clk, 1);
425 queue->window_size = queue->ticks_per_usec * PLATFORM_WORKQ_WINDOW;
426
427 /* notification of clk changes */
428 queue->notifier.cb = work_notify;
429 queue->notifier.cb_data = queue;
430 queue->notifier.id = ts->notifier;
431 notifier_register(&queue->notifier);
432
433 /* register system timer */
434 timer_register(&queue->ts->timer, queue_run, queue);
435
436 return queue;
437}
438
439void init_system_workq(struct work_queue_timesource *ts)
440{
441 queue_ = work_new_queue(ts);
442}