blob: 7da6f686a00222c99c7631602ffede674dd3899c [file] [log] [blame]
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +01001/*
2 * Copyright (c) 2016, Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the Intel Corporation nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
29 * Keyon Jie <yang.jie@linux.intel.com>
30 */
31
32#include <reef/work.h>
33#include <reef/timer.h>
34#include <reef/list.h>
35#include <reef/clock.h>
36#include <reef/alloc.h>
37#include <reef/reef.h>
38#include <reef/lock.h>
39#include <reef/notifier.h>
40#include <reef/debug.h>
41#include <platform/clk.h>
42#include <platform/platform.h>
43
44/*
45 * Generic delayed work queue support.
46 *
47 * Work can be queued to run after a microsecond timeout on either the system
48 * work queue or a private work queue. It's expected most users will use the
49 * system work queue as private work queues depend on available architecture
50 * timers.
51 *
52 * The work on the system work queue should be short duration and not delay
53 * any other work on this queue. If you have longer duration work (like audio
54 * processing) then use a private work queue.
55 *
56 * The generic work queues are intended to stay in time synchronisation with
57 * any CPU clock changes. i.e. timeouts will remain constant regardless of CPU
58 * frequency changes.
59 */
60
61struct work_queue {
62 struct list_item work; /* list of work */
63 uint32_t timeout; /* timeout for next queue run */
64 uint32_t window_size; /* window size for pending work */
65 spinlock_t lock;
66 struct notifier notifier; /* notify CPU freq changes */
67 struct work_queue_timesource *ts; /* time source for work queue */
68 uint32_t ticks_per_usec; /* ticks per msec */
69 uint32_t run_ticks; /* ticks when last run */
70};
71
72/* generic system work queue */
73static struct work_queue *queue_;
74
75static inline void work_set_timer(struct work_queue *queue, uint32_t ticks)
76{
77 queue->ts->timer_set(&queue->ts->timer, ticks);
Keyon Jie2e7a2af2017-03-09 21:44:37 +080078 timer_enable(&queue->ts->timer);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010079}
80
81static inline void work_clear_timer(struct work_queue *queue)
82{
83 queue->ts->timer_clear(&queue->ts->timer);
Keyon Jie2e7a2af2017-03-09 21:44:37 +080084 timer_disable(&queue->ts->timer);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +010085}
86
87static inline uint32_t work_get_timer(struct work_queue *queue)
88{
89 return queue->ts->timer_get(&queue->ts->timer);
90}
91
92/* is there any work pending in the current time window ? */
93static int is_work_pending(struct work_queue *queue)
94{
95 struct list_item *wlist;
96 struct work *work;
97 uint32_t win_end, win_start;
98 int pending_count = 0;
99
100 /* get the current valid window of work */
101 win_end = work_get_timer(queue);
102 win_start = win_end - queue->window_size;
103
104 /* correct the pending flag window for overflow */
105 if (win_end > win_start) {
106
107 /* mark each valid work item in this time period as pending */
108 list_for_item(wlist, &queue->work) {
109
110 work = container_of(wlist, struct work, list);
111
112 /* if work has timed out then mark it as pending to run */
113 if (work->timeout >= win_start && work->timeout <= win_end) {
114 work->pending = 1;
115 pending_count++;
116 } else {
117 work->pending = 0;
118 }
119 }
120 } else {
121
122 /* mark each valid work item in this time period as pending */
123 list_for_item(wlist, &queue->work) {
124
125 work = container_of(wlist, struct work, list);
126
127 /* if work has timed out then mark it as pending to run */
128 if (work->timeout <= win_end ||
129 (work->timeout >= win_start && work->timeout < MAX_INT)) {
130 work->pending = 1;
131 pending_count++;
132 } else {
133 work->pending = 0;
134 }
135 }
136 }
137
138 return pending_count;
139}
140
141static inline void work_next_timeout(struct work_queue *queue,
142 struct work *work, uint32_t reschedule_usecs)
143{
144 /* reschedule work */
145 if (work->flags & WORK_SYNC) {
146 work->timeout += queue->ticks_per_usec * reschedule_usecs;
147 } else {
148 /* calc next run based on work request */
149 work->timeout = queue->ticks_per_usec *
150 reschedule_usecs + queue->run_ticks;
151 }
152}
153
154/* run all pending work */
155static void run_work(struct work_queue *queue, uint32_t *flags)
156{
157 struct list_item *wlist, *tlist;
158 struct work *work;
159 uint32_t reschedule_usecs, udelay;
160
161 /* check each work item in queue for pending */
162 list_for_item_safe(wlist, tlist, &queue->work) {
163
164 work = container_of(wlist, struct work, list);
165
166 /* run work if its pending and remove from the queue */
167 if (work->pending) {
168
169 udelay = (work_get_timer(queue) - work->timeout) /
170 queue->ticks_per_usec;
171
172 /* work can run in non atomic context */
173 spin_unlock_irq(&queue->lock, *flags);
174 reschedule_usecs = work->cb(work->cb_data, udelay);
175 spin_lock_irq(&queue->lock, *flags);
176
177 /* do we need reschedule this work ? */
178 if (reschedule_usecs == 0)
179 list_item_del(&work->list);
180 else {
181 /* get next work timeout */
182 work_next_timeout(queue, work, reschedule_usecs);
183 }
184 }
185 }
186}
187
188static inline uint32_t calc_delta_ticks(uint32_t current, uint32_t work)
189{
190 uint32_t max = MAX_INT;
191
192 /* does work run in next cycle ? */
193 if (work < current) {
194 max -= current;
195 max += work;
196 return max;
197 } else
198 return work - current;
199}
200
201/* calculate next timeout */
202static void queue_get_next_timeout(struct work_queue *queue)
203{
204 struct list_item *wlist;
205 struct work *work;
206 uint32_t delta = MAX_INT, current, d, ticks;
207
208 /* only recalc if work list not empty */
209 if (list_is_empty(&queue->work)) {
210 queue->timeout = 0;
211 return;
212 }
213
214 ticks = current = work_get_timer(queue);
215
216 /* find time for next work */
217 list_for_item(wlist, &queue->work) {
218
219 work = container_of(wlist, struct work, list);
220
221 d = calc_delta_ticks(current, work->timeout);
222
223 /* is work next ? */
224 if (d < delta) {
225 ticks = work->timeout;
226 delta = d;
227 }
228 }
229
230 queue->timeout = ticks;
231}
232
233/* re calculate timers for queue after CPU frequency change */
234static void queue_recalc_timers(struct work_queue *queue,
235 struct clock_notify_data *clk_data)
236{
237 struct list_item *wlist;
238 struct work *work;
239 uint32_t delta_ticks, delta_usecs, current;
240
241 /* get current time */
242 current = work_get_timer(queue);
243
244 /* re calculate timers for each work item */
245 list_for_item(wlist, &queue->work) {
246
247 work = container_of(wlist, struct work, list);
248
249 delta_ticks = calc_delta_ticks(current, work->timeout);
250 delta_usecs = delta_ticks / clk_data->old_ticks_per_usec;
251
252 /* is work within next msec, then schedule it now */
253 if (delta_usecs > 0)
254 work->timeout = current + queue->ticks_per_usec * delta_usecs;
255 else
256 work->timeout = current + (queue->ticks_per_usec >> 3);
257 }
258}
259
260static void queue_reschedule(struct work_queue *queue)
261{
262 queue_get_next_timeout(queue);
263
264 if (queue->timeout)
265 work_set_timer(queue, queue->timeout);
266}
267
268/* run the work queue */
269static void queue_run(void *data)
270{
271 struct work_queue *queue = (struct work_queue *)data;
272 uint32_t flags;
273
274 /* clear interrupt */
275 work_clear_timer(queue);
276
277 spin_lock_irq(&queue->lock, flags);
278
279 queue->run_ticks = work_get_timer(queue);
280
281 /* work can take variable time to complete so we re-check the
282 queue after running all the pending work to make sure no new work
283 is pending */
284 while (is_work_pending(queue))
285 run_work(queue, &flags);
286
287 /* re-calc timer and re-arm */
288 queue_reschedule(queue);
289
290 spin_unlock_irq(&queue->lock, flags);
291}
292
293/* notification of CPU frequency changes - atomic PRE and POST sequence */
294static void work_notify(int message, void *data, void *event_data)
295{
296 struct work_queue *queue = (struct work_queue *)data;
297 struct clock_notify_data *clk_data =
298 (struct clock_notify_data *)event_data;
299 uint32_t flags;
300
301 spin_lock_irq(&queue->lock, flags);
302
Keyon Jied8dd7572017-03-09 14:09:20 +0800303 /* we need to re-caclulate timer when CPU frequency changes */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100304 if (message == CLOCK_NOTIFY_POST) {
305
306 /* CPU frequency update complete */
307 /* scale the window size to clock speed */
308 queue->ticks_per_usec = clock_us_to_ticks(queue->ts->clk, 1);
309 queue->window_size =
310 queue->ticks_per_usec * PLATFORM_WORKQ_WINDOW;
311 queue_recalc_timers(queue, clk_data);
312 queue_reschedule(queue);
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100313 } else if (message == CLOCK_NOTIFY_PRE) {
314 /* CPU frequency update pending */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100315 }
316
317 spin_unlock_irq(&queue->lock, flags);
318}
319
320void work_schedule(struct work_queue *queue, struct work *w, uint32_t timeout)
321{
322 struct work *work;
323 struct list_item *wlist;
324 uint32_t flags;
325
326 spin_lock_irq(&queue->lock, flags);
327
328 /* check to see if we are already scheduled ? */
329 list_for_item(wlist, &queue->work) {
330 work = container_of(wlist, struct work, list);
331
332 /* keep original timeout */
333 if (work == w)
334 goto out;
335 }
336
Keyon Jied8dd7572017-03-09 14:09:20 +0800337 /* convert timeout micro seconds to CPU clock ticks */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100338 w->timeout = queue->ticks_per_usec * timeout + work_get_timer(queue);
339
340 /* insert work into list */
341 list_item_prepend(&w->list, &queue->work);
342
343 /* re-calc timer and re-arm */
344 queue_reschedule(queue);
345
346out:
347 spin_unlock_irq(&queue->lock, flags);
348}
349
350void work_cancel(struct work_queue *queue, struct work *w)
351{
352 uint32_t flags;
353
354 spin_lock_irq(&queue->lock, flags);
355
356 /* remove work from list */
357 list_item_del(&w->list);
358
359 /* re-calc timer and re-arm */
360 queue_reschedule(queue);
361
362 spin_unlock_irq(&queue->lock, flags);
363}
364
365void work_schedule_default(struct work *w, uint32_t timeout)
366{
367 struct work *work;
368 struct list_item *wlist;
369 uint32_t flags;
370
371 spin_lock_irq(&queue_->lock, flags);
372
373 /* check to see if we are already scheduled ? */
374 list_for_item(wlist, &queue_->work) {
375 work = container_of(wlist, struct work, list);
376
377 /* keep original timeout */
378 if (work == w)
379 goto out;
380 }
381
382 /* convert timeout microsecs to CPU clock ticks */
383 w->timeout = queue_->ticks_per_usec * timeout + work_get_timer(queue_);
384
385 /* insert work into list */
386 list_item_prepend(&w->list, &queue_->work);
387
388 /* re-calc timer and re-arm */
389 queue_reschedule(queue_);
390
391out:
392 spin_unlock_irq(&queue_->lock, flags);
393}
394
395void work_cancel_default(struct work *w)
396{
397 uint32_t flags;
398
399 spin_lock_irq(&queue_->lock, flags);
400
401 /* remove work from list */
402 list_item_del(&w->list);
403
404 /* re-calc timer and re-arm */
405 queue_reschedule(queue_);
406
407 spin_unlock_irq(&queue_->lock, flags);
408}
409
410struct work_queue *work_new_queue(struct work_queue_timesource *ts)
411{
412 struct work_queue *queue;
413
414 /* init work queue */
415 queue = rmalloc(RZONE_DEV, RMOD_SYS, sizeof(*queue_));
416
417 list_init(&queue->work);
418 spinlock_init(&queue->lock);
419 queue->ts = ts;
420 queue->ticks_per_usec = clock_us_to_ticks(queue->ts->clk, 1);
421 queue->window_size = queue->ticks_per_usec * PLATFORM_WORKQ_WINDOW;
422
423 /* notification of clk changes */
424 queue->notifier.cb = work_notify;
425 queue->notifier.cb_data = queue;
426 queue->notifier.id = ts->notifier;
427 notifier_register(&queue->notifier);
428
429 /* register system timer */
430 timer_register(&queue->ts->timer, queue_run, queue);
431
432 return queue;
433}
434
435void init_system_workq(struct work_queue_timesource *ts)
436{
437 queue_ = work_new_queue(ts);
438}