Christoph Hellwig | 511cbce | 2015-11-10 14:56:14 +0100 | [diff] [blame^] | 1 | #ifndef IRQ_POLL_H |
| 2 | #define IRQ_POLL_H |
| 3 | |
| 4 | struct irq_poll; |
| 5 | typedef int (irq_poll_fn)(struct irq_poll *, int); |
| 6 | |
| 7 | struct irq_poll { |
| 8 | struct list_head list; |
| 9 | unsigned long state; |
| 10 | unsigned long data; |
| 11 | int weight; |
| 12 | int max; |
| 13 | irq_poll_fn *poll; |
| 14 | }; |
| 15 | |
| 16 | enum { |
| 17 | IRQ_POLL_F_SCHED = 0, |
| 18 | IRQ_POLL_F_DISABLE = 1, |
| 19 | }; |
| 20 | |
| 21 | /* |
| 22 | * Returns 0 if we successfully set the IRQ_POLL_F_SCHED bit, indicating |
| 23 | * that we were the first to acquire this iop for scheduling. If this iop |
| 24 | * is currently disabled, return "failure". |
| 25 | */ |
| 26 | static inline int irq_poll_sched_prep(struct irq_poll *iop) |
| 27 | { |
| 28 | if (!test_bit(IRQ_POLL_F_DISABLE, &iop->state)) |
| 29 | return test_and_set_bit(IRQ_POLL_F_SCHED, &iop->state); |
| 30 | |
| 31 | return 1; |
| 32 | } |
| 33 | |
| 34 | static inline int irq_poll_disable_pending(struct irq_poll *iop) |
| 35 | { |
| 36 | return test_bit(IRQ_POLL_F_DISABLE, &iop->state); |
| 37 | } |
| 38 | |
| 39 | extern void irq_poll_sched(struct irq_poll *); |
| 40 | extern void irq_poll_init(struct irq_poll *, int, irq_poll_fn *); |
| 41 | extern void irq_poll_complete(struct irq_poll *); |
| 42 | extern void __irq_poll_complete(struct irq_poll *); |
| 43 | extern void irq_poll_enable(struct irq_poll *); |
| 44 | extern void irq_poll_disable(struct irq_poll *); |
| 45 | |
| 46 | #endif |