blob: 379f88d5c44d399511cb3fe990c97f1455f178f7 [file] [log] [blame]
Jens Axboe320ae512013-10-24 09:20:05 +01001#ifndef BLK_MQ_H
2#define BLK_MQ_H
3
4#include <linux/blkdev.h>
5
6struct blk_mq_tags;
7
8struct blk_mq_cpu_notifier {
9 struct list_head list;
10 void *data;
11 void (*notify)(void *data, unsigned long action, unsigned int cpu);
12};
13
14struct blk_mq_hw_ctx {
15 struct {
16 spinlock_t lock;
17 struct list_head dispatch;
18 } ____cacheline_aligned_in_smp;
19
20 unsigned long state; /* BLK_MQ_S_* flags */
Christoph Hellwig70f4db62014-04-16 10:48:08 -060021 struct delayed_work run_work;
22 struct delayed_work delay_work;
Jens Axboee4043dc2014-04-09 10:18:23 -060023 cpumask_var_t cpumask;
Jens Axboe506e9312014-05-07 10:26:44 -060024 int next_cpu;
25 int next_cpu_batch;
Jens Axboe320ae512013-10-24 09:20:05 +010026
27 unsigned long flags; /* BLK_MQ_F_* flags */
28
29 struct request_queue *queue;
30 unsigned int queue_num;
31
32 void *driver_data;
33
Jens Axboe320ae512013-10-24 09:20:05 +010034 unsigned int nr_ctx_map;
35 unsigned long *ctx_map;
Jens Axboe4bb659b2014-05-09 09:36:49 -060036 unsigned int nr_ctx;
37 struct blk_mq_ctx **ctxs;
38
39 unsigned int wait_index;
Jens Axboe320ae512013-10-24 09:20:05 +010040
Jens Axboe320ae512013-10-24 09:20:05 +010041 struct blk_mq_tags *tags;
42
43 unsigned long queued;
44 unsigned long run;
45#define BLK_MQ_MAX_DISPATCH_ORDER 10
46 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
47
Jens Axboe320ae512013-10-24 09:20:05 +010048 unsigned int numa_node;
49 unsigned int cmd_size; /* per-request extra data */
50
Jens Axboe0d2602c2014-05-13 15:10:52 -060051 atomic_t nr_active;
52
Jens Axboe320ae512013-10-24 09:20:05 +010053 struct blk_mq_cpu_notifier cpu_notifier;
54 struct kobject kobj;
55};
56
Christoph Hellwig24d2f902014-04-15 14:14:00 -060057struct blk_mq_tag_set {
Jens Axboe320ae512013-10-24 09:20:05 +010058 struct blk_mq_ops *ops;
59 unsigned int nr_hw_queues;
60 unsigned int queue_depth;
61 unsigned int reserved_tags;
62 unsigned int cmd_size; /* per-request extra data */
63 int numa_node;
64 unsigned int timeout;
65 unsigned int flags; /* BLK_MQ_F_* */
Christoph Hellwig24d2f902014-04-15 14:14:00 -060066 void *driver_data;
67
68 struct blk_mq_tags **tags;
Jens Axboe0d2602c2014-05-13 15:10:52 -060069
70 struct mutex tag_list_lock;
71 struct list_head tag_list;
Jens Axboe320ae512013-10-24 09:20:05 +010072};
73
74typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *);
75typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
Christoph Hellwig24d2f902014-04-15 14:14:00 -060076typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_tag_set *,
77 unsigned int);
Jens Axboe320ae512013-10-24 09:20:05 +010078typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
79typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
80typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
Christoph Hellwig24d2f902014-04-15 14:14:00 -060081typedef int (init_request_fn)(void *, struct request *, unsigned int,
82 unsigned int, unsigned int);
83typedef void (exit_request_fn)(void *, struct request *, unsigned int,
84 unsigned int);
Jens Axboe320ae512013-10-24 09:20:05 +010085
86struct blk_mq_ops {
87 /*
88 * Queue request
89 */
90 queue_rq_fn *queue_rq;
91
92 /*
93 * Map to specific hardware queue
94 */
95 map_queue_fn *map_queue;
96
97 /*
98 * Called on request timeout
99 */
100 rq_timed_out_fn *timeout;
101
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800102 softirq_done_fn *complete;
103
Jens Axboe320ae512013-10-24 09:20:05 +0100104 /*
105 * Override for hctx allocations (should probably go)
106 */
107 alloc_hctx_fn *alloc_hctx;
108 free_hctx_fn *free_hctx;
109
110 /*
111 * Called when the block layer side of a hardware queue has been
112 * set up, allowing the driver to allocate/init matching structures.
113 * Ditto for exit/teardown.
114 */
115 init_hctx_fn *init_hctx;
116 exit_hctx_fn *exit_hctx;
Christoph Hellwige9b267d2014-04-15 13:59:10 -0600117
118 /*
119 * Called for every command allocated by the block layer to allow
120 * the driver to set up driver specific data.
121 * Ditto for exit/teardown.
122 */
123 init_request_fn *init_request;
124 exit_request_fn *exit_request;
Jens Axboe320ae512013-10-24 09:20:05 +0100125};
126
127enum {
128 BLK_MQ_RQ_QUEUE_OK = 0, /* queued fine */
129 BLK_MQ_RQ_QUEUE_BUSY = 1, /* requeue IO for later */
130 BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */
131
132 BLK_MQ_F_SHOULD_MERGE = 1 << 0,
133 BLK_MQ_F_SHOULD_SORT = 1 << 1,
Jens Axboe0d2602c2014-05-13 15:10:52 -0600134 BLK_MQ_F_TAG_SHARED = 1 << 2,
Jens Axboe320ae512013-10-24 09:20:05 +0100135
Jens Axboe5d12f902014-03-19 15:25:02 -0600136 BLK_MQ_S_STOPPED = 0,
Jens Axboe0d2602c2014-05-13 15:10:52 -0600137 BLK_MQ_S_TAG_ACTIVE = 1,
Jens Axboe320ae512013-10-24 09:20:05 +0100138
139 BLK_MQ_MAX_DEPTH = 2048,
Jens Axboe506e9312014-05-07 10:26:44 -0600140
141 BLK_MQ_CPU_WORK_BATCH = 8,
Jens Axboe320ae512013-10-24 09:20:05 +0100142};
143
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600144struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
Jens Axboe320ae512013-10-24 09:20:05 +0100145int blk_mq_register_disk(struct gendisk *);
146void blk_mq_unregister_disk(struct gendisk *);
Jens Axboe320ae512013-10-24 09:20:05 +0100147
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600148int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
149void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
150
Jens Axboe320ae512013-10-24 09:20:05 +0100151void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
152
Christoph Hellwigfeb71da2014-02-20 15:32:37 -0800153void blk_mq_insert_request(struct request *, bool, bool, bool);
Jens Axboe320ae512013-10-24 09:20:05 +0100154void blk_mq_run_queues(struct request_queue *q, bool async);
155void blk_mq_free_request(struct request *rq);
156bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
Christoph Hellwig18741982014-02-10 09:29:00 -0700157struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp);
Jens Axboe320ae512013-10-24 09:20:05 +0100158struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp);
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600159struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
Jens Axboe320ae512013-10-24 09:20:05 +0100160
161struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
Christoph Hellwig24d2f902014-04-15 14:14:00 -0600162struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int);
Jens Axboe320ae512013-10-24 09:20:05 +0100163void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int);
164
Christoph Hellwig63151a42014-04-16 09:44:52 +0200165void blk_mq_end_io(struct request *rq, int error);
166void __blk_mq_end_io(struct request *rq, int error);
Jens Axboe320ae512013-10-24 09:20:05 +0100167
Christoph Hellwiged0791b2014-04-16 09:44:57 +0200168void blk_mq_requeue_request(struct request *rq);
169
Christoph Hellwig30a91cb2014-02-10 03:24:38 -0800170void blk_mq_complete_request(struct request *rq);
171
Jens Axboe320ae512013-10-24 09:20:05 +0100172void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
173void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
Christoph Hellwig280d45f2013-10-25 14:45:58 +0100174void blk_mq_stop_hw_queues(struct request_queue *q);
Christoph Hellwig2f268552014-04-16 09:44:56 +0200175void blk_mq_start_hw_queues(struct request_queue *q);
Christoph Hellwig1b4a3252014-04-16 09:44:54 +0200176void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
Christoph Hellwig70f4db62014-04-16 10:48:08 -0600177void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
Jens Axboe320ae512013-10-24 09:20:05 +0100178
179/*
180 * Driver command data is immediately after the request. So subtract request
181 * size to get back to the original request.
182 */
183static inline struct request *blk_mq_rq_from_pdu(void *pdu)
184{
185 return pdu - sizeof(struct request);
186}
187static inline void *blk_mq_rq_to_pdu(struct request *rq)
188{
189 return (void *) rq + sizeof(*rq);
190}
191
Jens Axboe320ae512013-10-24 09:20:05 +0100192#define queue_for_each_hw_ctx(q, hctx, i) \
Jose Alonso0d0b7d42014-01-28 08:09:46 -0700193 for ((i) = 0; (i) < (q)->nr_hw_queues && \
194 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
Jens Axboe320ae512013-10-24 09:20:05 +0100195
196#define queue_for_each_ctx(q, ctx, i) \
Jose Alonso0d0b7d42014-01-28 08:09:46 -0700197 for ((i) = 0; (i) < (q)->nr_queues && \
198 ({ ctx = per_cpu_ptr((q)->queue_ctx, (i)); 1; }); (i)++)
Jens Axboe320ae512013-10-24 09:20:05 +0100199
200#define hctx_for_each_ctx(hctx, ctx, i) \
Jose Alonso0d0b7d42014-01-28 08:09:46 -0700201 for ((i) = 0; (i) < (hctx)->nr_ctx && \
202 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
Jens Axboe320ae512013-10-24 09:20:05 +0100203
204#define blk_ctx_sum(q, sum) \
205({ \
206 struct blk_mq_ctx *__x; \
207 unsigned int __ret = 0, __i; \
208 \
209 queue_for_each_ctx((q), __x, __i) \
210 __ret += sum; \
211 __ret; \
212})
213
214#endif