core: initial import of open source DSP firmware
This project provides an open source audio firmware infrastructure for audio
DSPs found on many modern devices. The intention is to allow developers to
create their own codecs, audio processing algorithms and pipelines using
the infrastructure and audio components provided by this project.
The project currently supports the Intel Baytrail and Cherrytrail audio DSP
platforms which use the Xtensa architecture.
The firmware source code is released under the BSD 3 clause licence.
Signed-off-by: Liam Girdwood <liam.r.girdwood@linux.intel.com>
diff --git a/src/lib/work.c b/src/lib/work.c
new file mode 100644
index 0000000..e421da4
--- /dev/null
+++ b/src/lib/work.c
@@ -0,0 +1,438 @@
+/*
+ * Copyright (c) 2016, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Intel Corporation nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+ * Keyon Jie <yang.jie@linux.intel.com>
+ */
+
+#include <reef/work.h>
+#include <reef/timer.h>
+#include <reef/list.h>
+#include <reef/clock.h>
+#include <reef/alloc.h>
+#include <reef/reef.h>
+#include <reef/lock.h>
+#include <reef/notifier.h>
+#include <reef/debug.h>
+#include <platform/clk.h>
+#include <platform/platform.h>
+
+/*
+ * Generic delayed work queue support.
+ *
+ * Work can be queued to run after a microsecond timeout on either the system
+ * work queue or a private work queue. It's expected most users will use the
+ * system work queue as private work queues depend on available architecture
+ * timers.
+ *
+ * The work on the system work queue should be short duration and not delay
+ * any other work on this queue. If you have longer duration work (like audio
+ * processing) then use a private work queue.
+ *
+ * The generic work queues are intended to stay in time synchronisation with
+ * any CPU clock changes. i.e. timeouts will remain constant regardless of CPU
+ * frequency changes.
+ */
+
+struct work_queue {
+ struct list_item work; /* list of work */
+ uint32_t timeout; /* timeout for next queue run */
+ uint32_t window_size; /* window size for pending work */
+ spinlock_t lock;
+ struct notifier notifier; /* notify CPU freq changes */
+ struct work_queue_timesource *ts; /* time source for work queue */
+ uint32_t ticks_per_usec; /* ticks per msec */
+ uint32_t run_ticks; /* ticks when last run */
+};
+
+/* generic system work queue */
+static struct work_queue *queue_;
+
+static inline void work_set_timer(struct work_queue *queue, uint32_t ticks)
+{
+ queue->ts->timer_set(&queue->ts->timer, ticks);
+}
+
+static inline void work_clear_timer(struct work_queue *queue)
+{
+ queue->ts->timer_clear(&queue->ts->timer);
+}
+
+static inline uint32_t work_get_timer(struct work_queue *queue)
+{
+ return queue->ts->timer_get(&queue->ts->timer);
+}
+
+/* is there any work pending in the current time window ? */
+static int is_work_pending(struct work_queue *queue)
+{
+ struct list_item *wlist;
+ struct work *work;
+ uint32_t win_end, win_start;
+ int pending_count = 0;
+
+ /* get the current valid window of work */
+ win_end = work_get_timer(queue);
+ win_start = win_end - queue->window_size;
+
+ /* correct the pending flag window for overflow */
+ if (win_end > win_start) {
+
+ /* mark each valid work item in this time period as pending */
+ list_for_item(wlist, &queue->work) {
+
+ work = container_of(wlist, struct work, list);
+
+ /* if work has timed out then mark it as pending to run */
+ if (work->timeout >= win_start && work->timeout <= win_end) {
+ work->pending = 1;
+ pending_count++;
+ } else {
+ work->pending = 0;
+ }
+ }
+ } else {
+
+ /* mark each valid work item in this time period as pending */
+ list_for_item(wlist, &queue->work) {
+
+ work = container_of(wlist, struct work, list);
+
+ /* if work has timed out then mark it as pending to run */
+ if (work->timeout <= win_end ||
+ (work->timeout >= win_start && work->timeout < MAX_INT)) {
+ work->pending = 1;
+ pending_count++;
+ } else {
+ work->pending = 0;
+ }
+ }
+ }
+
+ return pending_count;
+}
+
+static inline void work_next_timeout(struct work_queue *queue,
+ struct work *work, uint32_t reschedule_usecs)
+{
+ /* reschedule work */
+ if (work->flags & WORK_SYNC) {
+ work->timeout += queue->ticks_per_usec * reschedule_usecs;
+ } else {
+ /* calc next run based on work request */
+ work->timeout = queue->ticks_per_usec *
+ reschedule_usecs + queue->run_ticks;
+ }
+}
+
+/* run all pending work */
+static void run_work(struct work_queue *queue, uint32_t *flags)
+{
+ struct list_item *wlist, *tlist;
+ struct work *work;
+ uint32_t reschedule_usecs, udelay;
+
+ /* check each work item in queue for pending */
+ list_for_item_safe(wlist, tlist, &queue->work) {
+
+ work = container_of(wlist, struct work, list);
+
+ /* run work if its pending and remove from the queue */
+ if (work->pending) {
+
+ udelay = (work_get_timer(queue) - work->timeout) /
+ queue->ticks_per_usec;
+
+ /* work can run in non atomic context */
+ spin_unlock_irq(&queue->lock, *flags);
+ reschedule_usecs = work->cb(work->cb_data, udelay);
+ spin_lock_irq(&queue->lock, *flags);
+
+ /* do we need reschedule this work ? */
+ if (reschedule_usecs == 0)
+ list_item_del(&work->list);
+ else {
+ /* get next work timeout */
+ work_next_timeout(queue, work, reschedule_usecs);
+ }
+ }
+ }
+}
+
+static inline uint32_t calc_delta_ticks(uint32_t current, uint32_t work)
+{
+ uint32_t max = MAX_INT;
+
+ /* does work run in next cycle ? */
+ if (work < current) {
+ max -= current;
+ max += work;
+ return max;
+ } else
+ return work - current;
+}
+
+/* calculate next timeout */
+static void queue_get_next_timeout(struct work_queue *queue)
+{
+ struct list_item *wlist;
+ struct work *work;
+ uint32_t delta = MAX_INT, current, d, ticks;
+
+ /* only recalc if work list not empty */
+ if (list_is_empty(&queue->work)) {
+ queue->timeout = 0;
+ return;
+ }
+
+ ticks = current = work_get_timer(queue);
+
+ /* find time for next work */
+ list_for_item(wlist, &queue->work) {
+
+ work = container_of(wlist, struct work, list);
+
+ d = calc_delta_ticks(current, work->timeout);
+
+ /* is work next ? */
+ if (d < delta) {
+ ticks = work->timeout;
+ delta = d;
+ }
+ }
+
+ queue->timeout = ticks;
+}
+
+/* re calculate timers for queue after CPU frequency change */
+static void queue_recalc_timers(struct work_queue *queue,
+ struct clock_notify_data *clk_data)
+{
+ struct list_item *wlist;
+ struct work *work;
+ uint32_t delta_ticks, delta_usecs, current;
+
+ /* get current time */
+ current = work_get_timer(queue);
+
+ /* re calculate timers for each work item */
+ list_for_item(wlist, &queue->work) {
+
+ work = container_of(wlist, struct work, list);
+
+ delta_ticks = calc_delta_ticks(current, work->timeout);
+ delta_usecs = delta_ticks / clk_data->old_ticks_per_usec;
+
+ /* is work within next msec, then schedule it now */
+ if (delta_usecs > 0)
+ work->timeout = current + queue->ticks_per_usec * delta_usecs;
+ else
+ work->timeout = current + (queue->ticks_per_usec >> 3);
+ }
+}
+
+static void queue_reschedule(struct work_queue *queue)
+{
+ queue_get_next_timeout(queue);
+
+ if (queue->timeout)
+ work_set_timer(queue, queue->timeout);
+}
+
+/* run the work queue */
+static void queue_run(void *data)
+{
+ struct work_queue *queue = (struct work_queue *)data;
+ uint32_t flags;
+
+ /* clear interrupt */
+ work_clear_timer(queue);
+
+ spin_lock_irq(&queue->lock, flags);
+
+ queue->run_ticks = work_get_timer(queue);
+
+ /* work can take variable time to complete so we re-check the
+ queue after running all the pending work to make sure no new work
+ is pending */
+ while (is_work_pending(queue))
+ run_work(queue, &flags);
+
+ /* re-calc timer and re-arm */
+ queue_reschedule(queue);
+
+ spin_unlock_irq(&queue->lock, flags);
+}
+
+/* notification of CPU frequency changes - atomic PRE and POST sequence */
+static void work_notify(int message, void *data, void *event_data)
+{
+ struct work_queue *queue = (struct work_queue *)data;
+ struct clock_notify_data *clk_data =
+ (struct clock_notify_data *)event_data;
+ uint32_t flags;
+
+ spin_lock_irq(&queue->lock, flags);
+
+ /* we need to re-caclulate timer when CPU freqency changes */
+ if (message == CLOCK_NOTIFY_POST) {
+
+ /* CPU frequency update complete */
+ /* scale the window size to clock speed */
+ queue->ticks_per_usec = clock_us_to_ticks(queue->ts->clk, 1);
+ queue->window_size =
+ queue->ticks_per_usec * PLATFORM_WORKQ_WINDOW;
+ queue_recalc_timers(queue, clk_data);
+ queue_reschedule(queue);
+ timer_enable(&queue->ts->timer);
+ } else if (message == CLOCK_NOTIFY_PRE) {
+ /* CPU frequency update pending */
+ timer_disable(&queue->ts->timer);
+ }
+
+ spin_unlock_irq(&queue->lock, flags);
+}
+
+void work_schedule(struct work_queue *queue, struct work *w, uint32_t timeout)
+{
+ struct work *work;
+ struct list_item *wlist;
+ uint32_t flags;
+
+ spin_lock_irq(&queue->lock, flags);
+
+ /* check to see if we are already scheduled ? */
+ list_for_item(wlist, &queue->work) {
+ work = container_of(wlist, struct work, list);
+
+ /* keep original timeout */
+ if (work == w)
+ goto out;
+ }
+
+ /* convert timeout microsecs to CPU clock ticks */
+ w->timeout = queue->ticks_per_usec * timeout + work_get_timer(queue);
+
+ /* insert work into list */
+ list_item_prepend(&w->list, &queue->work);
+
+ /* re-calc timer and re-arm */
+ queue_reschedule(queue);
+
+out:
+ spin_unlock_irq(&queue->lock, flags);
+}
+
+void work_cancel(struct work_queue *queue, struct work *w)
+{
+ uint32_t flags;
+
+ spin_lock_irq(&queue->lock, flags);
+
+ /* remove work from list */
+ list_item_del(&w->list);
+
+ /* re-calc timer and re-arm */
+ queue_reschedule(queue);
+
+ spin_unlock_irq(&queue->lock, flags);
+}
+
+void work_schedule_default(struct work *w, uint32_t timeout)
+{
+ struct work *work;
+ struct list_item *wlist;
+ uint32_t flags;
+
+ spin_lock_irq(&queue_->lock, flags);
+
+ /* check to see if we are already scheduled ? */
+ list_for_item(wlist, &queue_->work) {
+ work = container_of(wlist, struct work, list);
+
+ /* keep original timeout */
+ if (work == w)
+ goto out;
+ }
+
+ /* convert timeout microsecs to CPU clock ticks */
+ w->timeout = queue_->ticks_per_usec * timeout + work_get_timer(queue_);
+
+ /* insert work into list */
+ list_item_prepend(&w->list, &queue_->work);
+
+ /* re-calc timer and re-arm */
+ queue_reschedule(queue_);
+
+out:
+ spin_unlock_irq(&queue_->lock, flags);
+}
+
+void work_cancel_default(struct work *w)
+{
+ uint32_t flags;
+
+ spin_lock_irq(&queue_->lock, flags);
+
+ /* remove work from list */
+ list_item_del(&w->list);
+
+ /* re-calc timer and re-arm */
+ queue_reschedule(queue_);
+
+ spin_unlock_irq(&queue_->lock, flags);
+}
+
+struct work_queue *work_new_queue(struct work_queue_timesource *ts)
+{
+ struct work_queue *queue;
+
+ /* init work queue */
+ queue = rmalloc(RZONE_DEV, RMOD_SYS, sizeof(*queue_));
+
+ list_init(&queue->work);
+ spinlock_init(&queue->lock);
+ queue->ts = ts;
+ queue->ticks_per_usec = clock_us_to_ticks(queue->ts->clk, 1);
+ queue->window_size = queue->ticks_per_usec * PLATFORM_WORKQ_WINDOW;
+
+ /* notification of clk changes */
+ queue->notifier.cb = work_notify;
+ queue->notifier.cb_data = queue;
+ queue->notifier.id = ts->notifier;
+ notifier_register(&queue->notifier);
+
+ /* register system timer */
+ timer_register(&queue->ts->timer, queue_run, queue);
+
+ return queue;
+}
+
+void init_system_workq(struct work_queue_timesource *ts)
+{
+ queue_ = work_new_queue(ts);
+}