blob: 55a6f90af081789121e32d62ae416a993c2d6475 [file] [log] [blame]
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +01001/*
2 * Copyright (c) 2016, Intel Corporation
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of the Intel Corporation nor the
13 * names of its contributors may be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
29 * Keyon Jie <yang.jie@linux.intel.com>
30 */
31
32#include <reef/work.h>
33#include <reef/timer.h>
34#include <reef/list.h>
35#include <reef/clock.h>
36#include <reef/alloc.h>
37#include <reef/reef.h>
38#include <reef/lock.h>
39#include <reef/notifier.h>
40#include <reef/debug.h>
41#include <platform/clk.h>
42#include <platform/platform.h>
43
44/*
45 * Generic delayed work queue support.
46 *
47 * Work can be queued to run after a microsecond timeout on either the system
48 * work queue or a private work queue. It's expected most users will use the
49 * system work queue as private work queues depend on available architecture
50 * timers.
51 *
52 * The work on the system work queue should be short duration and not delay
53 * any other work on this queue. If you have longer duration work (like audio
54 * processing) then use a private work queue.
55 *
56 * The generic work queues are intended to stay in time synchronisation with
57 * any CPU clock changes. i.e. timeouts will remain constant regardless of CPU
58 * frequency changes.
59 */
60
61struct work_queue {
62 struct list_item work; /* list of work */
63 uint32_t timeout; /* timeout for next queue run */
64 uint32_t window_size; /* window size for pending work */
65 spinlock_t lock;
66 struct notifier notifier; /* notify CPU freq changes */
67 struct work_queue_timesource *ts; /* time source for work queue */
68 uint32_t ticks_per_usec; /* ticks per msec */
69 uint32_t run_ticks; /* ticks when last run */
70};
71
72/* generic system work queue */
73static struct work_queue *queue_;
74
75static inline void work_set_timer(struct work_queue *queue, uint32_t ticks)
76{
77 queue->ts->timer_set(&queue->ts->timer, ticks);
78}
79
80static inline void work_clear_timer(struct work_queue *queue)
81{
82 queue->ts->timer_clear(&queue->ts->timer);
83}
84
85static inline uint32_t work_get_timer(struct work_queue *queue)
86{
87 return queue->ts->timer_get(&queue->ts->timer);
88}
89
90/* is there any work pending in the current time window ? */
91static int is_work_pending(struct work_queue *queue)
92{
93 struct list_item *wlist;
94 struct work *work;
95 uint32_t win_end, win_start;
96 int pending_count = 0;
97
98 /* get the current valid window of work */
99 win_end = work_get_timer(queue);
100 win_start = win_end - queue->window_size;
101
102 /* correct the pending flag window for overflow */
103 if (win_end > win_start) {
104
105 /* mark each valid work item in this time period as pending */
106 list_for_item(wlist, &queue->work) {
107
108 work = container_of(wlist, struct work, list);
109
110 /* if work has timed out then mark it as pending to run */
111 if (work->timeout >= win_start && work->timeout <= win_end) {
112 work->pending = 1;
113 pending_count++;
114 } else {
115 work->pending = 0;
116 }
117 }
118 } else {
119
120 /* mark each valid work item in this time period as pending */
121 list_for_item(wlist, &queue->work) {
122
123 work = container_of(wlist, struct work, list);
124
125 /* if work has timed out then mark it as pending to run */
126 if (work->timeout <= win_end ||
127 (work->timeout >= win_start && work->timeout < MAX_INT)) {
128 work->pending = 1;
129 pending_count++;
130 } else {
131 work->pending = 0;
132 }
133 }
134 }
135
136 return pending_count;
137}
138
139static inline void work_next_timeout(struct work_queue *queue,
140 struct work *work, uint32_t reschedule_usecs)
141{
142 /* reschedule work */
143 if (work->flags & WORK_SYNC) {
144 work->timeout += queue->ticks_per_usec * reschedule_usecs;
145 } else {
146 /* calc next run based on work request */
147 work->timeout = queue->ticks_per_usec *
148 reschedule_usecs + queue->run_ticks;
149 }
150}
151
152/* run all pending work */
153static void run_work(struct work_queue *queue, uint32_t *flags)
154{
155 struct list_item *wlist, *tlist;
156 struct work *work;
157 uint32_t reschedule_usecs, udelay;
158
159 /* check each work item in queue for pending */
160 list_for_item_safe(wlist, tlist, &queue->work) {
161
162 work = container_of(wlist, struct work, list);
163
164 /* run work if its pending and remove from the queue */
165 if (work->pending) {
166
167 udelay = (work_get_timer(queue) - work->timeout) /
168 queue->ticks_per_usec;
169
170 /* work can run in non atomic context */
171 spin_unlock_irq(&queue->lock, *flags);
172 reschedule_usecs = work->cb(work->cb_data, udelay);
173 spin_lock_irq(&queue->lock, *flags);
174
175 /* do we need reschedule this work ? */
176 if (reschedule_usecs == 0)
177 list_item_del(&work->list);
178 else {
179 /* get next work timeout */
180 work_next_timeout(queue, work, reschedule_usecs);
181 }
182 }
183 }
184}
185
186static inline uint32_t calc_delta_ticks(uint32_t current, uint32_t work)
187{
188 uint32_t max = MAX_INT;
189
190 /* does work run in next cycle ? */
191 if (work < current) {
192 max -= current;
193 max += work;
194 return max;
195 } else
196 return work - current;
197}
198
199/* calculate next timeout */
200static void queue_get_next_timeout(struct work_queue *queue)
201{
202 struct list_item *wlist;
203 struct work *work;
204 uint32_t delta = MAX_INT, current, d, ticks;
205
206 /* only recalc if work list not empty */
207 if (list_is_empty(&queue->work)) {
208 queue->timeout = 0;
209 return;
210 }
211
212 ticks = current = work_get_timer(queue);
213
214 /* find time for next work */
215 list_for_item(wlist, &queue->work) {
216
217 work = container_of(wlist, struct work, list);
218
219 d = calc_delta_ticks(current, work->timeout);
220
221 /* is work next ? */
222 if (d < delta) {
223 ticks = work->timeout;
224 delta = d;
225 }
226 }
227
228 queue->timeout = ticks;
229}
230
231/* re calculate timers for queue after CPU frequency change */
232static void queue_recalc_timers(struct work_queue *queue,
233 struct clock_notify_data *clk_data)
234{
235 struct list_item *wlist;
236 struct work *work;
237 uint32_t delta_ticks, delta_usecs, current;
238
239 /* get current time */
240 current = work_get_timer(queue);
241
242 /* re calculate timers for each work item */
243 list_for_item(wlist, &queue->work) {
244
245 work = container_of(wlist, struct work, list);
246
247 delta_ticks = calc_delta_ticks(current, work->timeout);
248 delta_usecs = delta_ticks / clk_data->old_ticks_per_usec;
249
250 /* is work within next msec, then schedule it now */
251 if (delta_usecs > 0)
252 work->timeout = current + queue->ticks_per_usec * delta_usecs;
253 else
254 work->timeout = current + (queue->ticks_per_usec >> 3);
255 }
256}
257
258static void queue_reschedule(struct work_queue *queue)
259{
260 queue_get_next_timeout(queue);
261
262 if (queue->timeout)
263 work_set_timer(queue, queue->timeout);
264}
265
266/* run the work queue */
267static void queue_run(void *data)
268{
269 struct work_queue *queue = (struct work_queue *)data;
270 uint32_t flags;
271
272 /* clear interrupt */
273 work_clear_timer(queue);
274
275 spin_lock_irq(&queue->lock, flags);
276
277 queue->run_ticks = work_get_timer(queue);
278
279 /* work can take variable time to complete so we re-check the
280 queue after running all the pending work to make sure no new work
281 is pending */
282 while (is_work_pending(queue))
283 run_work(queue, &flags);
284
285 /* re-calc timer and re-arm */
286 queue_reschedule(queue);
287
288 spin_unlock_irq(&queue->lock, flags);
289}
290
291/* notification of CPU frequency changes - atomic PRE and POST sequence */
292static void work_notify(int message, void *data, void *event_data)
293{
294 struct work_queue *queue = (struct work_queue *)data;
295 struct clock_notify_data *clk_data =
296 (struct clock_notify_data *)event_data;
297 uint32_t flags;
298
299 spin_lock_irq(&queue->lock, flags);
300
Keyon Jied8dd7572017-03-09 14:09:20 +0800301 /* we need to re-caclulate timer when CPU frequency changes */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100302 if (message == CLOCK_NOTIFY_POST) {
303
304 /* CPU frequency update complete */
305 /* scale the window size to clock speed */
306 queue->ticks_per_usec = clock_us_to_ticks(queue->ts->clk, 1);
307 queue->window_size =
308 queue->ticks_per_usec * PLATFORM_WORKQ_WINDOW;
309 queue_recalc_timers(queue, clk_data);
310 queue_reschedule(queue);
311 timer_enable(&queue->ts->timer);
312 } else if (message == CLOCK_NOTIFY_PRE) {
313 /* CPU frequency update pending */
314 timer_disable(&queue->ts->timer);
315 }
316
317 spin_unlock_irq(&queue->lock, flags);
318}
319
320void work_schedule(struct work_queue *queue, struct work *w, uint32_t timeout)
321{
322 struct work *work;
323 struct list_item *wlist;
324 uint32_t flags;
325
326 spin_lock_irq(&queue->lock, flags);
327
328 /* check to see if we are already scheduled ? */
329 list_for_item(wlist, &queue->work) {
330 work = container_of(wlist, struct work, list);
331
332 /* keep original timeout */
333 if (work == w)
334 goto out;
335 }
336
Keyon Jied8dd7572017-03-09 14:09:20 +0800337 /* convert timeout micro seconds to CPU clock ticks */
Liam Girdwoodc0dfb4e2016-09-21 15:57:22 +0100338 w->timeout = queue->ticks_per_usec * timeout + work_get_timer(queue);
339
340 /* insert work into list */
341 list_item_prepend(&w->list, &queue->work);
342
343 /* re-calc timer and re-arm */
344 queue_reschedule(queue);
345
346out:
347 spin_unlock_irq(&queue->lock, flags);
348}
349
350void work_cancel(struct work_queue *queue, struct work *w)
351{
352 uint32_t flags;
353
354 spin_lock_irq(&queue->lock, flags);
355
356 /* remove work from list */
357 list_item_del(&w->list);
358
359 /* re-calc timer and re-arm */
360 queue_reschedule(queue);
361
362 spin_unlock_irq(&queue->lock, flags);
363}
364
365void work_schedule_default(struct work *w, uint32_t timeout)
366{
367 struct work *work;
368 struct list_item *wlist;
369 uint32_t flags;
370
371 spin_lock_irq(&queue_->lock, flags);
372
373 /* check to see if we are already scheduled ? */
374 list_for_item(wlist, &queue_->work) {
375 work = container_of(wlist, struct work, list);
376
377 /* keep original timeout */
378 if (work == w)
379 goto out;
380 }
381
382 /* convert timeout microsecs to CPU clock ticks */
383 w->timeout = queue_->ticks_per_usec * timeout + work_get_timer(queue_);
384
385 /* insert work into list */
386 list_item_prepend(&w->list, &queue_->work);
387
388 /* re-calc timer and re-arm */
389 queue_reschedule(queue_);
390
391out:
392 spin_unlock_irq(&queue_->lock, flags);
393}
394
395void work_cancel_default(struct work *w)
396{
397 uint32_t flags;
398
399 spin_lock_irq(&queue_->lock, flags);
400
401 /* remove work from list */
402 list_item_del(&w->list);
403
404 /* re-calc timer and re-arm */
405 queue_reschedule(queue_);
406
407 spin_unlock_irq(&queue_->lock, flags);
408}
409
410struct work_queue *work_new_queue(struct work_queue_timesource *ts)
411{
412 struct work_queue *queue;
413
414 /* init work queue */
415 queue = rmalloc(RZONE_DEV, RMOD_SYS, sizeof(*queue_));
416
417 list_init(&queue->work);
418 spinlock_init(&queue->lock);
419 queue->ts = ts;
420 queue->ticks_per_usec = clock_us_to_ticks(queue->ts->clk, 1);
421 queue->window_size = queue->ticks_per_usec * PLATFORM_WORKQ_WINDOW;
422
423 /* notification of clk changes */
424 queue->notifier.cb = work_notify;
425 queue->notifier.cb_data = queue;
426 queue->notifier.id = ts->notifier;
427 notifier_register(&queue->notifier);
428
429 /* register system timer */
430 timer_register(&queue->ts->timer, queue_run, queue);
431
432 return queue;
433}
434
435void init_system_workq(struct work_queue_timesource *ts)
436{
437 queue_ = work_new_queue(ts);
438}