blob: b3c113a14727211356b20c3cdaa3771bb611cb39 [file] [log] [blame]
Peter Zijlstrae360adb2010-10-14 14:01:34 +08001/*
2 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
3 *
4 * Provides a framework for enqueueing and running callbacks from hardirq
5 * context. The enqueueing is NMI-safe.
6 */
7
Paul Gortmaker83e3fa62012-04-01 16:38:37 -04008#include <linux/bug.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +08009#include <linux/kernel.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040010#include <linux/export.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +080011#include <linux/irq_work.h>
Paul Gortmaker967d1f92011-07-18 13:03:04 -040012#include <linux/percpu.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +080013#include <linux/hardirq.h>
Chris Metcalfef1f0982012-04-11 12:21:39 -040014#include <linux/irqflags.h>
Paul Gortmaker967d1f92011-07-18 13:03:04 -040015#include <asm/processor.h>
Peter Zijlstrae360adb2010-10-14 14:01:34 +080016
17/*
18 * An entry can be in one of four states:
19 *
20 * free NULL, 0 -> {claimed} : free to be used
21 * claimed NULL, 3 -> {pending} : claimed to be enqueued
22 * pending next, 3 -> {busy} : queued, pending callback
23 * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed
Peter Zijlstrae360adb2010-10-14 14:01:34 +080024 */
25
26#define IRQ_WORK_PENDING 1UL
27#define IRQ_WORK_BUSY 2UL
28#define IRQ_WORK_FLAGS 3UL
29
Huang Ying38aaf802011-09-08 14:00:46 +080030static DEFINE_PER_CPU(struct llist_head, irq_work_list);
Peter Zijlstrae360adb2010-10-14 14:01:34 +080031
32/*
33 * Claim the entry so that no one else will poke at it.
34 */
Huang Ying38aaf802011-09-08 14:00:46 +080035static bool irq_work_claim(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080036{
Frederic Weisbeckere0bbe2d2012-10-27 15:21:36 +020037 unsigned long flags, oflags, nflags;
Peter Zijlstrae360adb2010-10-14 14:01:34 +080038
Frederic Weisbeckere0bbe2d2012-10-27 15:21:36 +020039 /*
40 * Start with our best wish as a premise but only trust any
41 * flag value after cmpxchg() result.
42 */
43 flags = work->flags & ~IRQ_WORK_PENDING;
Huang Ying38aaf802011-09-08 14:00:46 +080044 for (;;) {
Huang Ying38aaf802011-09-08 14:00:46 +080045 nflags = flags | IRQ_WORK_FLAGS;
Frederic Weisbeckere0bbe2d2012-10-27 15:21:36 +020046 oflags = cmpxchg(&work->flags, flags, nflags);
47 if (oflags == flags)
Huang Ying38aaf802011-09-08 14:00:46 +080048 break;
Frederic Weisbeckere0bbe2d2012-10-27 15:21:36 +020049 if (oflags & IRQ_WORK_PENDING)
50 return false;
51 flags = oflags;
Huang Ying38aaf802011-09-08 14:00:46 +080052 cpu_relax();
53 }
Peter Zijlstrae360adb2010-10-14 14:01:34 +080054
55 return true;
56}
57
Peter Zijlstrae360adb2010-10-14 14:01:34 +080058void __weak arch_irq_work_raise(void)
59{
60 /*
61 * Lame architectures will get the timer tick callback
62 */
63}
64
65/*
66 * Queue the entry and raise the IPI if needed.
67 */
Huang Ying38aaf802011-09-08 14:00:46 +080068static void __irq_work_queue(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080069{
Huang Ying38aaf802011-09-08 14:00:46 +080070 bool empty;
Peter Zijlstrae360adb2010-10-14 14:01:34 +080071
Christoph Lameter20b87692010-12-14 10:28:45 -060072 preempt_disable();
Peter Zijlstrae360adb2010-10-14 14:01:34 +080073
Huang Ying38aaf802011-09-08 14:00:46 +080074 empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
Peter Zijlstrae360adb2010-10-14 14:01:34 +080075 /* The list was empty, raise self-interrupt to start processing. */
Huang Ying38aaf802011-09-08 14:00:46 +080076 if (empty)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080077 arch_irq_work_raise();
78
Christoph Lameter20b87692010-12-14 10:28:45 -060079 preempt_enable();
Peter Zijlstrae360adb2010-10-14 14:01:34 +080080}
81
82/*
83 * Enqueue the irq_work @entry, returns true on success, failure when the
84 * @entry was already enqueued by someone else.
85 *
86 * Can be re-enqueued while the callback is still in progress.
87 */
Huang Ying38aaf802011-09-08 14:00:46 +080088bool irq_work_queue(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +080089{
Huang Ying38aaf802011-09-08 14:00:46 +080090 if (!irq_work_claim(work)) {
Peter Zijlstrae360adb2010-10-14 14:01:34 +080091 /*
92 * Already enqueued, can't do!
93 */
94 return false;
95 }
96
Huang Ying38aaf802011-09-08 14:00:46 +080097 __irq_work_queue(work);
Peter Zijlstrae360adb2010-10-14 14:01:34 +080098 return true;
99}
100EXPORT_SYMBOL_GPL(irq_work_queue);
101
Frederic Weisbecker00b42952012-11-07 21:03:07 +0100102bool irq_work_needs_cpu(void)
103{
104 struct llist_head *this_list;
105
106 this_list = &__get_cpu_var(irq_work_list);
107 if (llist_empty(this_list))
108 return false;
109
110 return true;
111}
112
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800113/*
114 * Run the irq_work entries on this cpu. Requires to be ran from hardirq
115 * context with local IRQs disabled.
116 */
117void irq_work_run(void)
118{
Huang Ying38aaf802011-09-08 14:00:46 +0800119 struct irq_work *work;
120 struct llist_head *this_list;
121 struct llist_node *llnode;
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800122
Huang Ying38aaf802011-09-08 14:00:46 +0800123 this_list = &__get_cpu_var(irq_work_list);
124 if (llist_empty(this_list))
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800125 return;
126
127 BUG_ON(!in_irq());
128 BUG_ON(!irqs_disabled());
129
Huang Ying38aaf802011-09-08 14:00:46 +0800130 llnode = llist_del_all(this_list);
131 while (llnode != NULL) {
132 work = llist_entry(llnode, struct irq_work, llnode);
Christoph Lameter20b87692010-12-14 10:28:45 -0600133
Peter Zijlstra924f8f52011-09-12 13:12:28 +0200134 llnode = llist_next(llnode);
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800135
136 /*
Huang Ying38aaf802011-09-08 14:00:46 +0800137 * Clear the PENDING bit, after this point the @work
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800138 * can be re-used.
Frederic Weisbeckerc8446b72012-10-30 13:33:54 +0100139 * Make it immediately visible so that other CPUs trying
140 * to claim that work don't rely on us to handle their data
141 * while we are in the middle of the func.
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800142 */
Frederic Weisbeckerc8446b72012-10-30 13:33:54 +0100143 xchg(&work->flags, IRQ_WORK_BUSY);
Huang Ying38aaf802011-09-08 14:00:46 +0800144 work->func(work);
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800145 /*
146 * Clear the BUSY bit and return to the free state if
147 * no-one else claimed it meanwhile.
148 */
Huang Ying38aaf802011-09-08 14:00:46 +0800149 (void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0);
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800150 }
151}
152EXPORT_SYMBOL_GPL(irq_work_run);
153
154/*
155 * Synchronize against the irq_work @entry, ensures the entry is not
156 * currently in use.
157 */
Huang Ying38aaf802011-09-08 14:00:46 +0800158void irq_work_sync(struct irq_work *work)
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800159{
160 WARN_ON_ONCE(irqs_disabled());
161
Huang Ying38aaf802011-09-08 14:00:46 +0800162 while (work->flags & IRQ_WORK_BUSY)
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800163 cpu_relax();
164}
165EXPORT_SYMBOL_GPL(irq_work_sync);